diff --git a/.mailmap b/.mailmap deleted file mode 100644 index fd6219293057..000000000000 --- a/.mailmap +++ /dev/null @@ -1,273 +0,0 @@ -# -# This list is used by git-shortlog to fix a few botched name translations -# in the git archive, either because the author's full name was messed up -# and/or not always written the same way, making contributions from the -# same person appearing not to be so or badly displayed. -# -# repo-abbrev: /pub/scm/linux/kernel/git/ -# - -Aaron Durbin -Adam Oldham -Adam Radford -Adrian Bunk -Adriana Reus -Alan Cox -Alan Cox -Aleksey Gorelov -Aleksandar Markovic -Alex Shi -Alex Shi -Alexei Starovoitov -Alexei Starovoitov -Alexei Starovoitov -Al Viro -Al Viro -Andi Shyti -Andreas Herrmann -Andrey Ryabinin -Andrew Morton -Andrew Vasquez -Andy Adamson -Antoine Tenart -Antonio Ospite -Archit Taneja -Arnaud Patard -Arnd Bergmann -Axel Dyks -Axel Lin -Bart Van Assche -Bart Van Assche -Ben Gardner -Ben M Cahill -Björn Steinbrink -Boris Brezillon -Boris Brezillon -Boris Brezillon -Boris Brezillon -Brian Avery -Brian King -Chao Yu -Chao Yu -Christoph Hellwig -Christophe Ricard -Corey Minyard -Damian Hobson-Garcia -Daniel Borkmann -Daniel Borkmann -Daniel Borkmann -Daniel Borkmann -Daniel Borkmann -Daniel Borkmann -David Brownell -David Woodhouse -Dengcheng Zhu -Dengcheng Zhu -Dengcheng Zhu -Dengcheng Zhu - -Dmitry Eremin-Solenikov -Dmitry Safonov <0x7f454c46@gmail.com> -Dmitry Safonov <0x7f454c46@gmail.com> -Dmitry Safonov <0x7f454c46@gmail.com> -Domen Puncer -Douglas Gilbert -Ed L. Cashin -Evgeniy Polyakov -Felipe W Damasio -Felix Kuhling -Felix Moeller -Filipe Lautert -Franck Bui-Huu -Frank Rowand -Frank Rowand -Frank Rowand -Frank Zago -Gao Xiang -Gao Xiang -Greg Kroah-Hartman -Greg Kroah-Hartman -Greg Kroah-Hartman -Gregory CLEMENT -Hanjun Guo -Henk Vergonet -Henrik Kretzschmar -Henrik Rydberg -Herbert Xu -Jacob Shin -Jaegeuk Kim -Jaegeuk Kim -Jaegeuk Kim -James Bottomley -James Bottomley -James E Wilson -James Hogan -James Hogan -James Ketrenos -Jason Gunthorpe -Jason Gunthorpe -Javi Merino - -Jayachandran C -Jayachandran C -Jayachandran C -Jayachandran C -Jean Tourrilhes - -Jeff Garzik -Jeff Layton -Jeff Layton -Jeff Layton -Jens Axboe -Jens Osterkamp -Johan Hovold -Johan Hovold -John Paul Adrian Glaubitz -John Stultz - - - - - -Juha Yrjola -Juha Yrjola -Juha Yrjola -Julien Thierry -Kay Sievers -Kenneth W Chen -Konstantin Khlebnikov -Koushik -Krzysztof Kozlowski -Krzysztof Kozlowski -Kuninori Morimoto -Leon Romanovsky -Leon Romanovsky -Leonid I Ananiev -Linas Vepstas -Linus Lüssing -Linus Lüssing -Li Yang -Li Yang -Maciej W. Rozycki -Marc Zyngier -Marcin Nowakowski -Mark Brown -Mark Yao -Martin Kepplinger -Martin Kepplinger -Mathieu Othacehe -Matthew Wilcox -Matthew Wilcox -Matthew Wilcox -Matthew Wilcox -Matthew Wilcox -Matthew Wilcox -Matthew Wilcox -Matthieu CASTET -Mauro Carvalho Chehab -Mauro Carvalho Chehab -Mauro Carvalho Chehab -Mauro Carvalho Chehab -Mauro Carvalho Chehab -Mauro Carvalho Chehab -Mauro Carvalho Chehab -Matt Ranostay Matthew Ranostay -Matt Ranostay -Matt Ranostay -Matt Redfearn -Maxime Ripard -Maxime Ripard -Mayuresh Janorkar -Michael Buesch -Michel Dänzer -Miodrag Dinic -Miquel Raynal -Mitesh shah -Mohit Kumar -Morten Welinder -Morten Welinder -Morten Welinder -Morten Welinder -Mythri P K -Nguyen Anh Quynh -Nicolas Ferre -Nicolas Pitre -Nicolas Pitre -Oleksij Rempel -Oleksij Rempel -Oleksij Rempel -Oleksij Rempel -Oleksij Rempel -Paolo 'Blaisorblade' Giarrusso -Patrick Mochel -Paul Burton -Paul Burton -Peter A Jonsson -Peter Oruba -Peter Oruba -Pratyush Anand -Praveen BP -Punit Agrawal -Qais Yousef -Quentin Perret -Rajesh Shah -Ralf Baechle -Ralf Wildenhues -Randy Dunlap -Rémi Denis-Courmont -Ricardo Ribalda Delgado -Ross Zwisler -Rudolf Marek -Rui Saraiva -Sachin P Sant -Sarangdhar Joshi -Sam Ravnborg -Santosh Shilimkar -Santosh Shilimkar -Sascha Hauer -S.ÇaÄŸlar Onur -Sean Nyekjaer -Sebastian Reichel -Sebastian Reichel -Shiraz Hashim -Shuah Khan -Shuah Khan -Shuah Khan -Shuah Khan -Simon Arlott -Simon Kelley -Stéphane Witzmann -Stephen Hemminger -Subash Abhinov Kasiviswanathan -Subhash Jadavani -Sudeep Holla Sudeep KarkadaNagesha -Sumit Semwal -Tejun Heo -Thomas Graf -Thomas Pedersen -Todor Tomov -Tony Luck -TripleX Chung -TripleX Chung -Tsuneo Yoshioka -Uwe Kleine-König -Uwe Kleine-König -Uwe Kleine-König -Valdis Kletnieks -Vinod Koul -Vinod Koul -Vinod Koul -Viresh Kumar -Viresh Kumar -Viresh Kumar -Vlad Dogaru -Vladimir Davydov -Vladimir Davydov -Takashi YOSHII -Will Deacon -Yakir Yang -Yusuke Goda -Gustavo Padovan -Gustavo Padovan -Changbin Du -Changbin Du diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io index 8ca498447aeb..05601a90a9b6 100644 --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io @@ -29,13 +29,13 @@ Description: This file shows the system fans direction: The files are read only. -What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version Date: November 2018 KernelVersion: 5.0 Contact: Vadim Pasternak Description: These files show with which CPLD versions have been burned - on LED board. + on LED or Gearbox board. The files are read only. @@ -121,6 +121,15 @@ Description: These files show the system reset cause, as following: ComEx The files are read only. +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version +Date: November 2018 +KernelVersion: 5.0 +Contact: Vadim Pasternak +Description: These files show with which CPLD versions have been burned + on LED board. + + The files are read only. + Date: June 2019 KernelVersion: 5.3 Contact: Vadim Pasternak diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei index 6bd45346ac7e..3f8701e8fa24 100644 --- a/Documentation/ABI/testing/sysfs-bus-mei +++ b/Documentation/ABI/testing/sysfs-bus-mei @@ -4,7 +4,7 @@ KernelVersion: 3.10 Contact: Samuel Ortiz linux-mei@linux.intel.com Description: Stores the same MODALIAS value emitted by uevent - Format: mei::: + Format: mei::: What: /sys/bus/mei/devices/.../name Date: May 2015 diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq index 01196e19afca..75897e2fde43 100644 --- a/Documentation/ABI/testing/sysfs-class-devfreq +++ b/Documentation/ABI/testing/sysfs-class-devfreq @@ -7,6 +7,13 @@ Description: The name of devfreq object denoted as ... is same as the name of device using devfreq. +What: /sys/class/devfreq/.../name +Date: November 2019 +Contact: Chanwoo Choi +Description: + The /sys/class/devfreq/.../name shows the name of device + of the corresponding devfreq object. + What: /sys/class/devfreq/.../governor Date: September 2011 Contact: MyungJoo Ham diff --git a/Documentation/admin-guide/device-mapper/index.rst b/Documentation/admin-guide/device-mapper/index.rst index c77c58b8f67b..d8dec8911eb3 100644 --- a/Documentation/admin-guide/device-mapper/index.rst +++ b/Documentation/admin-guide/device-mapper/index.rst @@ -8,6 +8,7 @@ Device Mapper cache-policies cache delay + dm-clone dm-crypt dm-flakey dm-init diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst index e3a796c0d3a2..2d19c9f4c1fe 100644 --- a/Documentation/admin-guide/hw-vuln/mds.rst +++ b/Documentation/admin-guide/hw-vuln/mds.rst @@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are: ============ ============================================================= -Not specifying this option is equivalent to "mds=full". - +Not specifying this option is equivalent to "mds=full". For processors +that are affected by both TAA (TSX Asynchronous Abort) and MDS, +specifying just "mds=off" without an accompanying "tsx_async_abort=off" +will have no effect as the same mitigation is used for both +vulnerabilities. Mitigation selection guide -------------------------- diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst index fddbd7579c53..af6865b822d2 100644 --- a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst +++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst @@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are: CPU is not vulnerable to cross-thread TAA attacks. ============ ============================================================= -Not specifying this option is equivalent to "tsx_async_abort=full". +Not specifying this option is equivalent to "tsx_async_abort=full". For +processors that are affected by both TAA and MDS, specifying just +"tsx_async_abort=off" without an accompanying "mds=off" will have no +effect as the same mitigation is used for both vulnerabilities. The kernel command line also allows to control the TSX feature using the parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8dee8f68fe15..b5c933fa971f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -113,7 +113,7 @@ the GPE dispatcher. This facility can be used to prevent such uncontrolled GPE floodings. - Format: + Format: acpi_no_auto_serialize [HW,ACPI] Disable auto-serialization of AML methods @@ -136,6 +136,10 @@ dynamic table installation which will install SSDT tables to /sys/firmware/acpi/tables/dynamic. + acpi_no_watchdog [HW,ACPI,WDT] + Ignore the ACPI-based watchdog interface (WDAT) and let + a native driver control the watchdog device instead. + acpi_rsdp= [ACPI,EFI,KEXEC] Pass the RSDP address to the kernel, mostly used on machines running EFI runtime service to boot the @@ -2473,6 +2477,12 @@ SMT on vulnerable CPUs off - Unconditionally disable MDS mitigation + On TAA-affected machines, mds=off can be prevented by + an active TAA mitigation as both vulnerabilities are + mitigated with the same mechanism so in order to disable + this mitigation, you need to specify tsx_async_abort=off + too. + Not specifying this option is equivalent to mds=full. @@ -4931,6 +4941,11 @@ vulnerable to cross-thread TAA attacks. off - Unconditionally disable TAA mitigation + On MDS-affected machines, tsx_async_abort=off can be + prevented by an active MDS mitigation as both vulnerabilities + are mitigated with the same mechanism so in order to disable + this mitigation, you need to specify mds=off too. + Not specifying this option is equivalent to tsx_async_abort=full. On CPUs which are MDS affected and deploy MDS mitigation, TAA mitigation is not @@ -5090,13 +5105,13 @@ Flags is a set of characters, each corresponding to a common usb-storage quirk flag as follows: a = SANE_SENSE (collect more than 18 bytes - of sense data); + of sense data, not on uas); b = BAD_SENSE (don't collect more than 18 - bytes of sense data); + bytes of sense data, not on uas); c = FIX_CAPACITY (decrease the reported device capacity by one sector); d = NO_READ_DISC_INFO (don't use - READ_DISC_INFO command); + READ_DISC_INFO command, not on uas); e = NO_READ_CAPACITY_16 (don't use READ_CAPACITY_16 command); f = NO_REPORT_OPCODES (don't use report opcodes @@ -5111,17 +5126,18 @@ j = NO_REPORT_LUNS (don't use report luns command, uas only); l = NOT_LOCKABLE (don't try to lock and - unlock ejectable media); + unlock ejectable media, not on uas); m = MAX_SECTORS_64 (don't transfer more - than 64 sectors = 32 KB at a time); + than 64 sectors = 32 KB at a time, + not on uas); n = INITIAL_READ10 (force a retry of the - initial READ(10) command); + initial READ(10) command, not on uas); o = CAPACITY_OK (accept the capacity - reported by the device); + reported by the device, not on uas); p = WRITE_CACHE (the device cache is ON - by default); + by default, not on uas); r = IGNORE_RESIDUE (the device reports - bogus residue values); + bogus residue values, not on uas); s = SINGLE_LUN (the device has only one Logical Unit); t = NO_ATA_1X (don't allow ATA(12) and ATA(16) @@ -5130,7 +5146,8 @@ w = NO_WP_DETECT (don't test whether the medium is write-protected). y = ALWAYS_SYNC (issue a SYNCHRONIZE_CACHE - even if the device claims no cache) + even if the device claims no cache, + not on uas) Example: quirks=0419:aaf5:rl,0421:0433:rc user_debug= [KNL,ARM] diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst index d4a85d535bf9..4a9d9c794ee5 100644 --- a/Documentation/arm64/tagged-address-abi.rst +++ b/Documentation/arm64/tagged-address-abi.rst @@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending how the user addresses are used by the kernel: 1. User addresses not accessed by the kernel but used for address space - management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use - of valid tagged pointers in this context is always allowed. + management (e.g. ``mprotect()``, ``madvise()``). The use of valid + tagged pointers in this context is allowed with the exception of + ``brk()``, ``mmap()`` and the ``new_address`` argument to + ``mremap()`` as these have the potential to alias with existing + user addresses. + + NOTE: This behaviour changed in v5.6 and so some earlier kernels may + incorrectly accept valid tagged pointers for the ``brk()``, + ``mmap()`` and ``mremap()`` system calls. 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI relaxation is disabled by default and the application thread needs to diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile index 5138a2f6232a..646cb3525373 100644 --- a/Documentation/devicetree/bindings/Makefile +++ b/Documentation/devicetree/bindings/Makefile @@ -12,7 +12,6 @@ $(obj)/%.example.dts: $(src)/%.yaml FORCE $(call if_changed,chk_binding) DT_TMP_SCHEMA := processed-schema.yaml -extra-y += $(DT_TMP_SCHEMA) quiet_cmd_mk_schema = SCHEMA $@ cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(real-prereqs) @@ -26,8 +25,12 @@ DT_DOCS = $(shell \ DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) +ifeq ($(CHECK_DTBS),) extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) extra-y += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES)) +endif $(obj)/$(DT_TMP_SCHEMA): $(DT_SCHEMA_FILES) FORCE $(call if_changed,mk_schema) + +extra-y += $(DT_TMP_SCHEMA) diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt index e96e085271c1..83f6c6a7c41c 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt @@ -46,7 +46,7 @@ Required properties: Example (R-Car H3): usb2_clksel: clock-controller@e6590630 { - compatible = "renesas,r8a77950-rcar-usb2-clock-sel", + compatible = "renesas,r8a7795-rcar-usb2-clock-sel", "renesas,rcar-gen3-usb2-clock-sel"; reg = <0 0xe6590630 0 0x02>; clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml index cc544fdc38be..bc8aed17800d 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml @@ -85,7 +85,7 @@ properties: Must be the device tree identifier of the over-sampling mode pins. As the line is active high, it should be marked GPIO_ACTIVE_HIGH. - maxItems: 1 + maxItems: 3 adi,sw-mode: description: @@ -128,9 +128,9 @@ examples: adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>; reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>; adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>; - adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH - &gpio 23 GPIO_ACTIVE_HIGH - &gpio 26 GPIO_ACTIVE_HIGH>; + adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>, + <&gpio 23 GPIO_ACTIVE_HIGH>, + <&gpio 26 GPIO_ACTIVE_HIGH>; standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>; adi,sw-mode; }; diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt index 299c0dcd67db..1316f0aec0cf 100644 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt @@ -110,6 +110,13 @@ PROPERTIES Usage: required Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt +- fsl,erratum-a050385 + Usage: optional + Value type: boolean + Definition: A boolean property. Indicates the presence of the + erratum A050385 which indicates that DMA transactions that are + split can result in a FMan lock. + ============================================================================= FMan MURAM Node diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index 4845e29411e4..e08cd4c4d568 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -347,6 +347,7 @@ allOf: - st,spear600-gmac then: + properties: snps,tso: $ref: /schemas/types.yaml#definitions/flag description: diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt index ae661e65354e..f9499b20d840 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt @@ -81,6 +81,12 @@ Optional properties: Definition: Name of external front end module used. Some valid FEM names for example: "microsemi-lx5586", "sky85703-11" and "sky85803" etc. +- qcom,snoc-host-cap-8bit-quirk: + Usage: Optional + Value type: + Definition: Quirk specifying that the firmware expects the 8bit version + of the host capability QMI request + Example (to supply PCI based wifi block details): diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt index 6e5341b4f891..ee59409640f2 100644 --- a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt +++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt @@ -22,6 +22,6 @@ Example: }; ðernet_switch { - resets = <&reset>; + resets = <&reset 26>; reset-names = "switch"; }; diff --git a/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt new file mode 100644 index 000000000000..f315c9723bd2 --- /dev/null +++ b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt @@ -0,0 +1,27 @@ +OMAP ROM RNG driver binding + +Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does. The +implementation can depend on the SoC secure ROM used. + +- compatible: + Usage: required + Value type: + Definition: must be "nokia,n900-rom-rng" + +- clocks: + Usage: required + Value type: + Definition: reference to the the RNG interface clock + +- clock-names: + Usage: required + Value type: + Definition: must be "ick" + +Example: + + rom_rng: rng { + compatible = "nokia,n900-rom-rng"; + clocks = <&rng_ick>; + clock-names = "ick"; + }; diff --git a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt index d6d5207fa996..17ff3892f439 100644 --- a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt +++ b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt @@ -2,9 +2,11 @@ MT8183 with MT6358, TS3A227 and MAX98357 CODECS Required properties: - compatible : "mediatek,mt8183_mt6358_ts3a227_max98357" -- mediatek,headset-codec: the phandles of ts3a227 codecs - mediatek,platform: the phandle of MT8183 ASoC platform +Optional properties: +- mediatek,headset-codec: the phandles of ts3a227 codecs + Example: sound { diff --git a/Documentation/devicetree/writing-schema.rst b/Documentation/devicetree/writing-schema.rst index f4a638072262..83e04e5c342d 100644 --- a/Documentation/devicetree/writing-schema.rst +++ b/Documentation/devicetree/writing-schema.rst @@ -130,11 +130,13 @@ binding schema. All of the DT binding documents can be validated using the make dt_binding_check -In order to perform validation of DT source files, use the `dtbs_check` target:: +In order to perform validation of DT source files, use the ``dtbs_check`` target:: make dtbs_check -This will first run the `dt_binding_check` which generates the processed schema. +Note that ``dtbs_check`` will skip any binding schema files with errors. It is +necessary to use ``dt_binding_check`` to get all the validation errors in the +binding schema files. It is also possible to run checks with a single schema file by setting the ``DT_SCHEMA_FILES`` variable to a specific schema file. diff --git a/Documentation/fb/fbcon.rst b/Documentation/fb/fbcon.rst index ebca41785abe..65ba40255137 100644 --- a/Documentation/fb/fbcon.rst +++ b/Documentation/fb/fbcon.rst @@ -127,7 +127,7 @@ C. Boot options is typically located on the same video card. Thus, the consoles that are controlled by the VGA console will be garbled. -4. fbcon=rotate: +5. fbcon=rotate: This option changes the orientation angle of the console display. The value 'n' accepts the following: @@ -152,21 +152,21 @@ C. Boot options Actually, the underlying fb driver is totally ignorant of console rotation. -5. fbcon=margin: +6. fbcon=margin: This option specifies the color of the margins. The margins are the leftover area at the right and the bottom of the screen that are not used by text. By default, this area will be black. The 'color' value is an integer number that depends on the framebuffer driver being used. -6. fbcon=nodefer +7. fbcon=nodefer If the kernel is compiled with deferred fbcon takeover support, normally the framebuffer contents, left in place by the firmware/bootloader, will be preserved until there actually is some text is output to the console. This option causes fbcon to bind immediately to the fbdev device. -7. fbcon=logo-pos: +8. fbcon=logo-pos: The only possible 'location' is 'center' (without quotes), and when given, the bootup logo is moved from the default top-left corner diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst index f18506083ced..26c093969573 100644 --- a/Documentation/filesystems/porting.rst +++ b/Documentation/filesystems/porting.rst @@ -850,3 +850,11 @@ business doing so. d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are very suspect (and won't work in modules). Such uses are very likely to be misspelled d_alloc_anon(). + +--- + +**mandatory** + +[should've been added in 2016] stale comment in finish_open() nonwithstanding, +failure exits in ->atomic_open() instances should *NOT* fput() the file, +no matter what. Everything is handled by the caller. diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index b89c88168d6a..b9b50553bfc5 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -1115,23 +1115,6 @@ When kbuild executes, the following steps are followed (roughly): In this example, extra-y is used to list object files that shall be built, but shall not be linked as part of built-in.a. - header-test-y - - header-test-y specifies headers (`*.h`) in the current directory that - should be compile tested to ensure they are self-contained, - i.e. compilable as standalone units. If CONFIG_HEADER_TEST is enabled, - this builds them as part of extra-y. - - header-test-pattern-y - - This works as a weaker version of header-test-y, and accepts wildcard - patterns. The typical usage is:: - - header-test-pattern-y += *.h - - This specifies all the files that matches to `*.h` in the current - directory, but the files in 'header-test-' are excluded. - 6.7 Commands useful for building a boot image --------------------------------------------- diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst index 774a998dcf37..199ce72bf922 100644 --- a/Documentation/kbuild/modules.rst +++ b/Documentation/kbuild/modules.rst @@ -470,9 +470,9 @@ build. The syntax of the Module.symvers file is:: - + - 0xe1cc2a05 usb_stor_suspend USB_STORAGE drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL + 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE The fields are separated by tabs and values may be empty (e.g. if no namespace is defined for an exported symbol). diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 8d4ad1d1ae26..f274aede80a4 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -369,6 +369,14 @@ tcp_invalid_ratelimit - INTEGER Default: 500 (milliseconds). +tcp_inherit_buffsize - BOOLEAN + For a new passive TCP connection, can use current tcp_wmem/tcp_rmem + parameter to set initial snd/rcv buffer size. This is useful for online + services which no need to be restarted just set it with 0. By default, + new passive connection inherits snd/rcv buffer size from lister socket. + + Default: 1 + tcp_keepalive_time - INTEGER How often TCP sends out keepalive messages when keepalive is enabled. Default: 2hours. diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index dc60b13fcd09..f5be243d250a 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -339,7 +339,7 @@ To claim an address following code example can be used: .pgn = J1939_PGN_ADDRESS_CLAIMED, .pgn_mask = J1939_PGN_PDU1_MAX, }, { - .pgn = J1939_PGN_ADDRESS_REQUEST, + .pgn = J1939_PGN_REQUEST, .pgn_mask = J1939_PGN_PDU1_MAX, }, { .pgn = J1939_PGN_ADDRESS_COMMANDED, diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt index ca2136c76042..0bf32d1121be 100644 --- a/Documentation/networking/nf_flowtable.txt +++ b/Documentation/networking/nf_flowtable.txt @@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain. table inet x { flowtable f { - hook ingress priority 0 devices = { eth0, eth1 }; + hook ingress priority 0; devices = { eth0, eth1 }; } chain y { type filter hook forward priority 0; policy accept; diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt index 201f80c7c050..df129f55ace5 100644 --- a/Documentation/scsi/smartpqi.txt +++ b/Documentation/scsi/smartpqi.txt @@ -29,7 +29,7 @@ smartpqi specific entries in /sys smartpqi host attributes: ------------------------- /sys/class/scsi_host/host*/rescan - /sys/class/scsi_host/host*/version + /sys/class/scsi_host/host*/driver_version The host rescan attribute is a write only attribute. Writing to this attribute will trigger the driver to scan for new, changed, or removed diff --git a/Documentation/vm/pagecache-limit b/Documentation/vm/pagecache-limit new file mode 100644 index 000000000000..0050a9019070 --- /dev/null +++ b/Documentation/vm/pagecache-limit @@ -0,0 +1,49 @@ +Functionality: +------------- +The patch introduces three new tunables in the proc filesystem: + +/proc/sys/vm/pagecache_limit_ratio + +This tunable sets a limit ratio of totalram to the unmapped pages in the pagecache. +If zero, it wil disable pagecache limit function. and it will work if non-zero. + +Examples: +echo 50 >/proc/sys/vm/pagecache_limit_ratio + +This sets a baseline limits for the page cache (not the buffer cache!) of 50% totalram. +As we only consider pagecache pages that are unmapped, currently mapped pages (files that are mmap'ed such as e.g. binaries and libraries as well as SysV shared memory) are not limited by this. + +/proc/sys/vm/pagecache_limit_reclaim_ratio + +This sets the real reclaim ratio of totalram, it defaults 2%(ADDITIONAL_RECLAIM_RATIO) larger than pagecache_limit_ratio. pagecache_limit_ratio is the check ratio of pagecache, and we will reclaim some more than this, in case of reclaim pagecache frequently. + +/proc/sys/vm/pagecache_limit_ignore_dirty + +The default for this setting is 1; this means that we don't consider dirty memory to be part of the limited pagecache, as we can not easily free up dirty memory (we'd need to do writes for this). By setting this to 0, we actually consider dirty (unampped) memory to be freeable and do a third pass in shrink_page_cache() where we schedule the pages for writeout. Values larger than 1 are also possible and result in a fraction of the dirty pages to be considered non-freeable. + + + + +How it works: +------------ +The heart of this patch is a new function called shrink_page_cache(). It is called from balance_pgdat (which is the worker for kswapd) or add_to_page_cache if the pagecache is above the limit. +The balance_pgdat() is also called in __alloc_pages_slowpath. + +shrink_page_cache() calculates the nr of pages the cache is over its limit. It reduces this number then shrinks the pagecache (using the Kernel LRUs). + +shrink_page_cache does several passes: +- Just reclaiming from inactive pagecache memory. + This is fast -- but it might not find enough free pages; if that happens, + the second pass will happen +- In the second pass, pages from active list will also be considered. +- The third pass will only happen if pagecacahe_limig_ignore-dirty is not 1. + In that case, the third pass is a repetition of the second pass, but this + time we allow pages to be written out. + +In all passes, only unmapped pages will be considered. + + +Foreground vs. background shrinking: +----------------------------------- + +Usually, the Linux kernel reclaims its memory using the kernel thread kswapd. It reclaims memory in the background. If it can't reclaim memory fast enough, it retries with higher priority and if this still doesn't succeed it uses a direct reclaim path. \ No newline at end of file diff --git a/MAINTAINERS b/MAINTAINERS index 9d3a5c54a41d..fe6fa5d3a63e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6973,6 +6973,7 @@ L: linux-acpi@vger.kernel.org S: Maintained F: Documentation/firmware-guide/acpi/gpio-properties.rst F: drivers/gpio/gpiolib-acpi.c +F: drivers/gpio/gpiolib-acpi.h GPIO IR Transmitter M: Sean Young @@ -8200,7 +8201,7 @@ M: Joonas Lahtinen M: Rodrigo Vivi L: intel-gfx@lists.freedesktop.org W: https://01.org/linuxgraphics/ -B: https://01.org/linuxgraphics/documentation/how-report-bugs +B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs C: irc://chat.freenode.net/intel-gfx Q: http://patchwork.freedesktop.org/project/intel-gfx/ T: git git://anongit.freedesktop.org/drm-intel @@ -8703,8 +8704,10 @@ L: isdn4linux@listserv.isdn4linux.de (subscribers-only) L: netdev@vger.kernel.org W: http://www.isdn4linux.de S: Maintained -F: drivers/isdn/mISDN -F: drivers/isdn/hardware +F: drivers/isdn/mISDN/ +F: drivers/isdn/hardware/ +F: drivers/isdn/Kconfig +F: drivers/isdn/Makefile ISDN/CAPI SUBSYSTEM M: Karsten Keil diff --git a/Makefile b/Makefile index d4d36c61940b..d6587d9dc476 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 4 -SUBLEVEL = 0 -EXTRAVERSION = +SUBLEVEL = 32 +EXTRAVERSION = -1 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -618,7 +618,6 @@ ifeq ($(KBUILD_EXTMOD),) init-y := init/ drivers-y := drivers/ sound/ drivers-$(CONFIG_SAMPLES) += samples/ -drivers-$(CONFIG_KERNEL_HEADER_TEST) += include/ net-y := net/ libs-y := lib/ core-y := usr/ @@ -1196,19 +1195,15 @@ headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts $(Q)$(MAKE) $(hdr-inst)=include/uapi $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi +# Deprecated. It is no-op now. PHONY += headers_check -headers_check: headers - $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 - $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi HDRCHECK=1 +headers_check: + @: ifdef CONFIG_HEADERS_INSTALL prepare: headers endif -ifdef CONFIG_HEADERS_CHECK -all: headers_check -endif - PHONY += scripts_unifdef scripts_unifdef: scripts_basic $(Q)$(MAKE) $(build)=scripts scripts/unifdef @@ -1242,7 +1237,7 @@ ifneq ($(dtstree),) %.dtb: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ -PHONY += dtbs dtbs_install dt_binding_check +PHONY += dtbs dtbs_install dtbs_check dtbs dtbs_check: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) @@ -1262,6 +1257,7 @@ PHONY += scripts_dtc scripts_dtc: scripts_basic $(Q)$(MAKE) $(build)=scripts/dtc +PHONY += dt_binding_check dt_binding_check: scripts_dtc $(Q)$(MAKE) $(build)=Documentation/devicetree/bindings @@ -1476,7 +1472,6 @@ help: @echo ' versioncheck - Sanity check on version.h usage' @echo ' includecheck - Check for duplicate included header files' @echo ' export_report - List the usages of all exported symbols' - @echo ' headers_check - Sanity check on exported headers' @echo ' headerdep - Detect inclusion cycles in headers' @echo ' coccicheck - Check with Coccinelle' @echo '' @@ -1641,6 +1636,50 @@ help: PHONY += prepare endif # KBUILD_EXTMOD +# Single targets +# --------------------------------------------------------------------------- +# To build individual files in subdirectories, you can do like this: +# +# make foo/bar/baz.s +# +# The supported suffixes for single-target are listed in 'single-targets' +# +# To build only under specific subdirectories, you can do like this: +# +# make foo/bar/baz/ + +ifdef single-build + +# .ko is special because modpost is needed +single-ko := $(sort $(filter %.ko, $(MAKECMDGOALS))) +single-no-ko := $(sort $(patsubst %.ko,%.mod, $(MAKECMDGOALS))) + +$(single-ko): single_modpost + @: +$(single-no-ko): descend + @: + +ifeq ($(KBUILD_EXTMOD),) +# For the single build of in-tree modules, use a temporary file to avoid +# the situation of modules_install installing an invalid modules.order. +MODORDER := .modules.tmp +endif + +PHONY += single_modpost +single_modpost: $(single-no-ko) + $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + +KBUILD_MODULES := 1 + +export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko)) + +# trim unrelated directories +build-dirs := $(foreach d, $(build-dirs), \ + $(if $(filter $(d)/%, $(KBUILD_SINGLE_TARGETS)), $(d))) + +endif + # Handle descending into subdirectories listed in $(build-dirs) # Preset locale variables to speed up the build process. Limit locale # tweaks to this spot to avoid wrong language settings when running @@ -1649,7 +1688,9 @@ endif # KBUILD_EXTMOD PHONY += descend $(build-dirs) descend: $(build-dirs) $(build-dirs): prepare - $(Q)$(MAKE) $(build)=$@ single-build=$(single-build) need-builtin=1 need-modorder=1 + $(Q)$(MAKE) $(build)=$@ \ + single-build=$(if $(filter-out $@/, $(single-no-ko)),1) \ + need-builtin=1 need-modorder=1 clean-dirs := $(addprefix _clean_, $(clean-dirs)) PHONY += $(clean-dirs) clean @@ -1753,50 +1794,6 @@ tools/%: FORCE $(Q)mkdir -p $(objtree)/tools $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* -# Single targets -# --------------------------------------------------------------------------- -# To build individual files in subdirectories, you can do like this: -# -# make foo/bar/baz.s -# -# The supported suffixes for single-target are listed in 'single-targets' -# -# To build only under specific subdirectories, you can do like this: -# -# make foo/bar/baz/ - -ifdef single-build - -single-all := $(filter $(single-targets), $(MAKECMDGOALS)) - -# .ko is special because modpost is needed -single-ko := $(sort $(filter %.ko, $(single-all))) -single-no-ko := $(sort $(patsubst %.ko,%.mod, $(single-all))) - -$(single-ko): single_modpost - @: -$(single-no-ko): descend - @: - -ifeq ($(KBUILD_EXTMOD),) -# For the single build of in-tree modules, use a temporary file to avoid -# the situation of modules_install installing an invalid modules.order. -MODORDER := .modules.tmp -endif - -PHONY += single_modpost -single_modpost: $(single-no-ko) - $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER) - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost - -KBUILD_MODULES := 1 - -export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko)) - -single-build = $(if $(filter-out $@/, $(single-no-ko)),1) - -endif - # FIXME Should go into a make.lib or something # =========================================================================== diff --git a/README.md b/README.md new file mode 100644 index 000000000000..fe7d2d88c4d8 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# Tencent Linux Kernel 4.0 diff --git a/arch/Kconfig b/arch/Kconfig index 5f8a5d84dbbe..238dccfa7691 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -396,10 +396,10 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE config HAVE_RCU_TABLE_FREE bool -config HAVE_RCU_TABLE_NO_INVALIDATE +config HAVE_MMU_GATHER_PAGE_SIZE bool -config HAVE_MMU_GATHER_PAGE_SIZE +config MMU_GATHER_NO_RANGE bool config HAVE_MMU_GATHER_NO_GATHER diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 08bcfed6b80f..134cc223ea81 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi @@ -77,6 +77,7 @@ interrupt-names = "macirq"; phy-mode = "rgmii"; snps,pbl = < 32 >; + snps,multicast-filter-bins = <256>; clocks = <&apbclk>; clock-names = "stmmaceth"; max-speed = <100>; diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h index d9ee43c6b7db..fe19f1d412e7 100644 --- a/arch/arc/include/asm/linkage.h +++ b/arch/arc/include/asm/linkage.h @@ -29,6 +29,8 @@ .endm #define ASM_NL ` /* use '`' to mark new line in macro */ +#define __ALIGN .align 4 +#define __ALIGN_STR __stringify(__ALIGN) /* annotation for data we want in DCCM - if enabled in .config */ .macro ARCFP_DATA nm diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig index a376a50d3fea..a931d0a256d0 100644 --- a/arch/arc/plat-eznps/Kconfig +++ b/arch/arc/plat-eznps/Kconfig @@ -7,7 +7,7 @@ menuconfig ARC_PLAT_EZNPS bool "\"EZchip\" ARC dev platform" select CPU_BIG_ENDIAN - select CLKSRC_NPS + select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select EZNPS_GIC select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET help diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8a50efb559f3..05c9bbfe444d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -73,8 +73,9 @@ config ARM select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT - select HAVE_DEBUG_KMEMLEAK + select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE @@ -1906,7 +1907,7 @@ config XIP_DEFLATED_DATA config KEXEC bool "Kexec system call (EXPERIMENTAL)" depends on (!SMP || PM_SLEEP_SMP) - depends on !CPU_V7M + depends on MMU select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your diff --git a/arch/arm/Makefile b/arch/arm/Makefile index db857d07114f..1fc32b611f8a 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -307,13 +307,15 @@ endif ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) prepare: stack_protector_prepare stack_protector_prepare: prepare0 - $(eval KBUILD_CFLAGS += \ + $(eval SSP_PLUGIN_CFLAGS := \ -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \ awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\ include/generated/asm-offsets.h) \ -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \ awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\ include/generated/asm-offsets.h)) + $(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS)) + $(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS)) endif all: $(notdir $(KBUILD_IMAGE)) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 9219389bbe61..1483966dcf23 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -101,7 +101,6 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \ $(libfdt) $(libfdt_hdrs) hyp-stub.S KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN) ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) @@ -117,7 +116,8 @@ CFLAGS_fdt_ro.o := $(nossp_flags) CFLAGS_fdt_rw.o := $(nossp_flags) CFLAGS_fdt_wip.o := $(nossp_flags) -ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) +ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \ + -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN) asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h index b36c0289a308..6a0f1f524466 100644 --- a/arch/arm/boot/compressed/libfdt_env.h +++ b/arch/arm/boot/compressed/libfdt_env.h @@ -2,11 +2,13 @@ #ifndef _ARM_LIBFDT_ENV_H #define _ARM_LIBFDT_ENV_H +#include #include #include #include -#define INT_MAX ((int)(~0U>>1)) +#define INT32_MAX S32_MAX +#define UINT32_MAX U32_MAX typedef __be16 fdt16_t; typedef __be32 fdt32_t; diff --git a/arch/arm/boot/dts/am335x-boneblack-common.dtsi b/arch/arm/boot/dts/am335x-boneblack-common.dtsi index 7ad079861efd..91f93bc89716 100644 --- a/arch/arm/boot/dts/am335x-boneblack-common.dtsi +++ b/arch/arm/boot/dts/am335x-boneblack-common.dtsi @@ -131,6 +131,11 @@ }; / { + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x20000000>; /* 512 MB */ + }; + clk_mcasp0_fixed: clk_mcasp0_fixed { #clock-cells = <0>; compatible = "fixed-clock"; diff --git a/arch/arm/boot/dts/am335x-sancloud-bbe.dts b/arch/arm/boot/dts/am335x-sancloud-bbe.dts index 8678e6e35493..e5fdb7abb0d5 100644 --- a/arch/arm/boot/dts/am335x-sancloud-bbe.dts +++ b/arch/arm/boot/dts/am335x-sancloud-bbe.dts @@ -108,7 +108,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; }; &i2c0 { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index cae4500194fe..811c8cae315b 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -86,7 +86,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts index f3ced6df0c9b..9f66f96d09c9 100644 --- a/arch/arm/boot/dts/am437x-idk-evm.dts +++ b/arch/arm/boot/dts/am437x-idk-evm.dts @@ -526,11 +526,11 @@ * Supply voltage supervisor on board will not allow opp50 so * disable it and set opp100 as suspend OPP. */ - opp50@300000000 { + opp50-300000000 { status = "disabled"; }; - opp100@600000000 { + opp100-600000000 { opp-suspend; }; }; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 95314121d111..a6fbc088daa8 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -42,7 +42,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; @@ -848,6 +848,7 @@ pinctrl-names = "default", "sleep"; pinctrl-0 = <&spi0_pins_default>; pinctrl-1 = <&spi0_pins_sleep>; + ti,pindir-d0-out-d1-in = <1>; }; &spi1 { @@ -855,6 +856,7 @@ pinctrl-names = "default", "sleep"; pinctrl-0 = <&spi1_pins_default>; pinctrl-1 = <&spi1_pins_sleep>; + ti,pindir-d0-out-d1-in = <1>; }; &usb2_phy1 { diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi index 091356f2a8c1..c726cd8dbdf1 100644 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi @@ -704,6 +704,60 @@ ti,bit-shift = <8>; reg = <0x2a48>; }; + + clkout1_osc_div_ck: clkout1-osc-div-ck { + #clock-cells = <0>; + compatible = "ti,divider-clock"; + clocks = <&sys_clkin_ck>; + ti,bit-shift = <20>; + ti,max-div = <4>; + reg = <0x4100>; + }; + + clkout1_src2_mux_ck: clkout1-src2-mux-ck { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>, + <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>, + <&dpll_mpu_m2_ck>; + reg = <0x4100>; + }; + + clkout1_src2_pre_div_ck: clkout1-src2-pre-div-ck { + #clock-cells = <0>; + compatible = "ti,divider-clock"; + clocks = <&clkout1_src2_mux_ck>; + ti,bit-shift = <4>; + ti,max-div = <8>; + reg = <0x4100>; + }; + + clkout1_src2_post_div_ck: clkout1-src2-post-div-ck { + #clock-cells = <0>; + compatible = "ti,divider-clock"; + clocks = <&clkout1_src2_pre_div_ck>; + ti,bit-shift = <8>; + ti,max-div = <32>; + ti,index-power-of-two; + reg = <0x4100>; + }; + + clkout1_mux_ck: clkout1-mux-ck { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>, + <&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>; + ti,bit-shift = <16>; + reg = <0x4100>; + }; + + clkout1_ck: clkout1-ck { + #clock-cells = <0>; + compatible = "ti,gate-clock"; + clocks = <&clkout1_mux_ck>; + ti,bit-shift = <23>; + reg = <0x4100>; + }; }; &prcm { diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts index 0aaacea1d887..10105a497c1a 100644 --- a/arch/arm/boot/dts/am571x-idk.dts +++ b/arch/arm/boot/dts/am571x-idk.dts @@ -167,11 +167,7 @@ &pcie1_rc { status = "okay"; - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; -}; - -&pcie1_ep { - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; + gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>; }; &mmc1 { diff --git a/arch/arm/boot/dts/am572x-idk-common.dtsi b/arch/arm/boot/dts/am572x-idk-common.dtsi index a064f13b3880..ddf123620e96 100644 --- a/arch/arm/boot/dts/am572x-idk-common.dtsi +++ b/arch/arm/boot/dts/am572x-idk-common.dtsi @@ -147,10 +147,6 @@ gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; }; -&pcie1_ep { - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; -}; - &mailbox5 { status = "okay"; mbox_ipu1_ipc3x: mbox_ipu1_ipc3x { diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi index bc76f1705c0f..a813a0cf3ff3 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi @@ -29,6 +29,27 @@ reg = <0x0 0x80000000 0x0 0x80000000>; }; + main_12v0: fixedregulator-main_12v0 { + /* main supply */ + compatible = "regulator-fixed"; + regulator-name = "main_12v0"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-always-on; + regulator-boot-on; + }; + + evm_5v0: fixedregulator-evm_5v0 { + /* Output of TPS54531D */ + compatible = "regulator-fixed"; + regulator-name = "evm_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&main_12v0>; + regulator-always-on; + regulator-boot-on; + }; + vdd_3v3: fixedregulator-vdd_3v3 { compatible = "regulator-fixed"; regulator-name = "vdd_3v3"; @@ -547,10 +568,6 @@ gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; }; -&pcie1_ep { - gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; -}; - &mcasp3 { #sound-dai-cells = <0>; assigned-clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>; diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index dee9c0c8a096..16c6fd3c4246 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -187,7 +187,7 @@ usart0 { pinctrl_usart0: usart0-0 { atmel,pins = - ; }; @@ -221,7 +221,7 @@ usart1 { pinctrl_usart1: usart1-0 { atmel,pins = - ; }; @@ -239,7 +239,7 @@ usart2 { pinctrl_usart2: usart2-0 { atmel,pins = - ; }; @@ -257,7 +257,7 @@ usart3 { pinctrl_usart3: usart3-0 { atmel,pins = - ; }; @@ -275,7 +275,7 @@ uart0 { pinctrl_uart0: uart0-0 { atmel,pins = - ; }; }; @@ -283,7 +283,7 @@ uart1 { pinctrl_uart1: uart1-0 { atmel,pins = - ; }; }; diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index dba025a98527..5ed3d745ac86 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi @@ -329,7 +329,7 @@ usart0 { pinctrl_usart0: usart0-0 { atmel,pins = - , + , ; }; @@ -347,7 +347,7 @@ usart1 { pinctrl_usart1: usart1-0 { atmel,pins = - , + , ; }; @@ -365,7 +365,7 @@ usart2 { pinctrl_usart2: usart2-0 { atmel,pins = - , + , ; }; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 99678abdda93..5c990cfae254 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -183,7 +183,7 @@ usart0 { pinctrl_usart0: usart0-0 { atmel,pins = - ; }; @@ -201,7 +201,7 @@ usart1 { pinctrl_usart1: usart1-0 { atmel,pins = - ; }; @@ -219,7 +219,7 @@ usart2 { pinctrl_usart2: usart2-0 { atmel,pins = - ; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index 691c95ea6175..fd179097a4bf 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -556,7 +556,7 @@ usart0 { pinctrl_usart0: usart0-0 { atmel,pins = - ; }; @@ -574,7 +574,7 @@ usart1 { pinctrl_usart1: usart1-0 { atmel,pins = - ; }; @@ -592,7 +592,7 @@ usart2 { pinctrl_usart2: usart2-0 { atmel,pins = - ; }; @@ -610,7 +610,7 @@ usart3 { pinctrl_usart3: usart3-0 { atmel,pins = - ; }; diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi index 8643b7151565..ea024e4b6e09 100644 --- a/arch/arm/boot/dts/at91sam9rl.dtsi +++ b/arch/arm/boot/dts/at91sam9rl.dtsi @@ -682,7 +682,7 @@ usart0 { pinctrl_usart0: usart0-0 { atmel,pins = - , + , ; }; @@ -721,7 +721,7 @@ usart1 { pinctrl_usart1: usart1-0 { atmel,pins = - , + , ; }; @@ -744,7 +744,7 @@ usart2 { pinctrl_usart2: usart2-0 { atmel,pins = - , + , ; }; @@ -767,7 +767,7 @@ usart3 { pinctrl_usart3: usart3-0 { atmel,pins = - , + , ; }; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 2dac3efc7640..1bc45cfd5453 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -174,8 +174,8 @@ mdio: mdio@18002000 { compatible = "brcm,iproc-mdio"; reg = <0x18002000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; status = "disabled"; gphy0: ethernet-phy@0 { diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts index b75af21069f9..4c3f606e5b8d 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts @@ -112,6 +112,7 @@ &sdhci { #address-cells = <1>; #size-cells = <0>; + pinctrl-names = "default"; pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>; bus-width = <4>; mmc-pwrseq = <&wifi_pwrseq>; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 2d191fcbc2cc..90125ce19a1b 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -40,7 +40,7 @@ trips { cpu-crit { - temperature = <80000>; + temperature = <90000>; hysteresis = <0>; type = "critical"; }; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 372dc1eb88a0..2d9b4dd05830 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -353,8 +353,8 @@ mdio: mdio@18003000 { compatible = "brcm,iproc-mdio"; reg = <0x18003000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; }; mdio-bus-mux@18003000 { diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 5cac2dd58241..3ae4f6358da4 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -3059,7 +3059,7 @@ davinci_mdio: mdio@1000 { compatible = "ti,cpsw-mdio","ti,davinci_mdio"; - clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 0>; + clocks = <&gmac_main_clk>; clock-names = "fck"; #address-cells = <1>; #size-cells = <0>; @@ -3413,6 +3413,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER13_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; @@ -3441,6 +3442,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; @@ -3469,6 +3471,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; @@ -3497,6 +3500,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 953f0ffce2a9..c6be65249f42 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -148,6 +148,7 @@ #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x0 0x0 0xc0000000>; + dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>; ti,hwmods = "l3_main_1", "l3_main_2"; reg = <0x0 0x44000000 0x0 0x1000000>, <0x0 0x45000000 0x0 0x1000>; @@ -184,6 +185,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x20013000 0x13000 0 0xffed000>; + dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; @@ -238,6 +240,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x30013000 0x13000 0 0xffed000>; + dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi index cdcba3f561c4..9f6fbe4c1fee 100644 --- a/arch/arm/boot/dts/dra76x.dtsi +++ b/arch/arm/boot/dts/dra76x.dtsi @@ -86,3 +86,8 @@ &usb4_tm { status = "disabled"; }; + +&mmc3 { + /* dra76x is not affected by i887 */ + max-frequency = <96000000>; +}; diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 93e1eb83bed9..d7d98d2069df 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -796,16 +796,6 @@ clock-div = <1>; }; - ipu1_gfclk_mux: ipu1_gfclk_mux@520 { - #clock-cells = <0>; - compatible = "ti,mux-clock"; - clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>; - ti,bit-shift = <24>; - reg = <0x0520>; - assigned-clocks = <&ipu1_gfclk_mux>; - assigned-clock-parents = <&dpll_core_h22x2_ck>; - }; - dummy_ck: dummy_ck { #clock-cells = <0>; compatible = "fixed-clock"; @@ -1564,6 +1554,8 @@ compatible = "ti,clkctrl"; reg = <0x20 0x4>; #clock-cells = <2>; + assigned-clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 24>; + assigned-clock-parents = <&dpll_core_h22x2_ck>; }; ipu_clkctrl: ipu-clkctrl@50 { diff --git a/arch/arm/boot/dts/imx6dl-icore-mipi.dts b/arch/arm/boot/dts/imx6dl-icore-mipi.dts index e43bccb78ab2..d8f3821a0ffd 100644 --- a/arch/arm/boot/dts/imx6dl-icore-mipi.dts +++ b/arch/arm/boot/dts/imx6dl-icore-mipi.dts @@ -8,7 +8,7 @@ /dts-v1/; #include "imx6dl.dtsi" -#include "imx6qdl-icore.dtsi" +#include "imx6qdl-icore-1.5.dtsi" / { model = "Engicam i.CoreM6 DualLite/Solo MIPI Starter Kit"; diff --git a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts index 9c61e3be2d9a..1c46df6827f5 100644 --- a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts +++ b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts @@ -55,7 +55,7 @@ #sound-dai-cells = <0>; clocks = <&clk_ext_audio_codec>; VDDA-supply = <®_3p3v>; - VDDIO-supply = <®_3p3v>; + VDDIO-supply = <&sw2_reg>; }; }; diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi index 387801dde02e..08a2e17e0539 100644 --- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi +++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi @@ -206,7 +206,7 @@ }; rtc@56 { - compatible = "rv3029c2"; + compatible = "microcrystal,rv3029"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_rtc_hw300>; reg = <0x56>; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi index 6486df3e2942..31fa37d2fe47 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi @@ -107,14 +107,14 @@ regulators { vdd_arm: buck1 { regulator-name = "vdd_arm"; - regulator-min-microvolt = <730000>; + regulator-min-microvolt = <925000>; regulator-max-microvolt = <1380000>; regulator-always-on; }; vdd_soc: buck2 { regulator-name = "vdd_soc"; - regulator-min-microvolt = <730000>; + regulator-min-microvolt = <1150000>; regulator-max-microvolt = <1380000>; regulator-always-on; }; @@ -183,7 +183,6 @@ pinctrl-0 = <&pinctrl_usdhc4>; bus-width = <8>; non-removable; - vmmc-supply = <&vdd_emmc_1p8>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index 71ca76a5e4a5..fe59dde41b64 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -749,10 +749,6 @@ vin-supply = <&vgen5_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen5_reg>; }; diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi index 93be00a60c88..a66c4fac6baf 100644 --- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi @@ -627,7 +627,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; + disable-wp; vmmc-supply = <®_3p3v_sd>; vqmmc-supply = <®_3p3v>; no-1-8-v; @@ -640,7 +640,7 @@ pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; + disable-wp; vmmc-supply = <®_3p3v_sd>; vqmmc-supply = <®_3p3v>; no-1-8-v; @@ -774,6 +774,7 @@ &usbh1 { vbus-supply = <®_5p0v_main>; disable-over-current; + maximum-speed = "full-speed"; status = "okay"; }; @@ -1055,7 +1056,6 @@ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059 - MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x40010040 MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x40010040 >; }; @@ -1068,7 +1068,6 @@ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 - MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x40010040 MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x40010040 >; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 4829aa682aeb..bc86cfaaa9c2 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -584,10 +584,6 @@ vin-supply = <&sw2_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&sw2_reg>; }; diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index 3e1d32fdf4b8..5ace9e6acf85 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts @@ -265,10 +265,6 @@ status = "okay"; }; -®_3p0 { - vin-supply = <&sw2_reg>; -}; - &snvs_poweroff { status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts index f1830ed387a5..91a7548fdb8d 100644 --- a/arch/arm/boot/dts/imx6sx-sdb-reva.dts +++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts @@ -159,10 +159,6 @@ vin-supply = <&vgen6_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen6_reg>; }; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index a8ee7087af5a..5a63ca615722 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -141,10 +141,6 @@ vin-supply = <&vgen6_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen6_reg>; }; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index c2a9dd57e56a..aa86341adaaa 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -215,7 +215,7 @@ flash0: n25q256a@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "micron,n25q256a"; + compatible = "micron,n25q256a", "jedec,spi-nor"; spi-max-frequency = <29000000>; spi-rx-bus-width = <4>; spi-tx-bus-width = <4>; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6310-s.dts b/arch/arm/boot/dts/imx6ul-kontron-n6310-s.dts index 0205fd56d975..4e99e6c79a68 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6310-s.dts +++ b/arch/arm/boot/dts/imx6ul-kontron-n6310-s.dts @@ -157,10 +157,6 @@ status = "okay"; }; -&snvs_poweroff { - status = "okay"; -}; - &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index 917eb0b58b13..eed78f12e79e 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -337,7 +337,6 @@ assigned-clock-rates = <400000000>; bus-width = <8>; fsl,tuning-step = <2>; - max-frequency = <100000000>; vmmc-supply = <®_module_3v3>; vqmmc-supply = <®_DCDC3>; non-removable; diff --git a/arch/arm/boot/dts/imx7s-colibri.dtsi b/arch/arm/boot/dts/imx7s-colibri.dtsi index 1fb1ec5d3d70..6d16e32aed89 100644 --- a/arch/arm/boot/dts/imx7s-colibri.dtsi +++ b/arch/arm/boot/dts/imx7s-colibri.dtsi @@ -49,3 +49,7 @@ reg = <0x80000000 0x10000000>; }; }; + +&gpmi { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index 6859a3a83750..3dac6898cdc5 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -37,10 +37,10 @@ #address-cells = <1>; #size-cells = <0>; - cpu0: cpu@0 { + cpu0: cpu@f00 { compatible = "arm,cortex-a7"; device_type = "cpu"; - reg = <0>; + reg = <0xf00>; }; }; diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts index 07ac99b9cda6..cdb89b3e2a9b 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts @@ -11,22 +11,6 @@ #include "logicpd-torpedo-37xx-devkit.dts" &lcd0 { - - label = "28"; - - panel-timing { - clock-frequency = <9000000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <3>; - hback-porch = <2>; - hsync-len = <42>; - vback-porch = <3>; - vfront-porch = <2>; - vsync-len = <11>; - hsync-active = <1>; - vsync-active = <1>; - de-active = <1>; - pixelclk-active = <0>; - }; + /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */ + compatible = "logicpd,type28"; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 2f6977ada447..63d9f4a066e3 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -728,7 +728,7 @@ }; mdio0: mdio@2d24000 { - compatible = "fsl,etsec2-mdio"; + compatible = "gianfar"; device_type = "mdio"; #address-cells = <1>; #size-cells = <0>; @@ -737,7 +737,7 @@ }; mdio1: mdio@2d64000 { - compatible = "fsl,etsec2-mdio"; + compatible = "gianfar"; device_type = "mdio"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index 5a7e3e5caebe..db2033f674c6 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -129,8 +129,8 @@ gpu_opp_table: gpu-opp-table { compatible = "operating-points-v2"; - opp-182150000 { - opp-hz = /bits/ 64 <182150000>; + opp-182142857 { + opp-hz = /bits/ 64 <182142857>; opp-microvolt = <1150000>; }; opp-318750000 { @@ -253,7 +253,7 @@ &aobus { pmu: pmu@e0 { compatible = "amlogic,meson8-pmu", "syscon"; - reg = <0xe0 0x8>; + reg = <0xe0 0x18>; }; pinctrl_aobus: pinctrl@84 { diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index 099bf8e711c9..1e8c5d7bc824 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -125,8 +125,8 @@ opp-hz = /bits/ 64 <255000000>; opp-microvolt = <1100000>; }; - opp-364300000 { - opp-hz = /bits/ 64 <364300000>; + opp-364285714 { + opp-hz = /bits/ 64 <364285714>; opp-microvolt = <1100000>; }; opp-425000000 { diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index d1eae47b83f6..82f7ae030600 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -160,12 +160,12 @@ regulator-enable-ramp-delay = <1000>; }; - /* Used by DSS */ + /* Used by DSS and is the "zerov_regulator" trigger for SoC off mode */ vcsi: VCSI { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-enable-ramp-delay = <1000>; - regulator-boot-on; + regulator-always-on; }; vdac: VDAC { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 84a5ade1e865..7f2ddb78da5f 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -155,6 +155,12 @@ pwms = <&pwm9 0 26316 0>; /* 38000 Hz */ }; + rom_rng: rng { + compatible = "nokia,n900-rom-rng"; + clocks = <&rng_ick>; + clock-names = "ick"; + }; + /* controlled (enabled/disabled) directly by bcm2048 and wl1251 */ vctcxo: vctcxo { compatible = "fixed-clock"; @@ -843,34 +849,46 @@ compatible = "ti,omap2-onenand"; reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ + /* + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported + * bootloader set values when booted with v5.1 + * (OneNAND Manufacturer: Samsung): + * + * cs0 GPMC_CS_CONFIG1: 0xfb001202 + * cs0 GPMC_CS_CONFIG2: 0x00111100 + * cs0 GPMC_CS_CONFIG3: 0x00020200 + * cs0 GPMC_CS_CONFIG4: 0x11001102 + * cs0 GPMC_CS_CONFIG5: 0x03101616 + * cs0 GPMC_CS_CONFIG6: 0x90060000 + */ gpmc,sync-read; gpmc,sync-write; gpmc,burst-length = <16>; gpmc,burst-read; gpmc,burst-wrap; gpmc,burst-write; - gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */ - gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */ + gpmc,device-width = <2>; + gpmc,mux-add-data = <2>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <87>; - gpmc,cs-wr-off-ns = <87>; + gpmc,cs-rd-off-ns = <102>; + gpmc,cs-wr-off-ns = <102>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <10>; - gpmc,adv-wr-off-ns = <10>; - gpmc,oe-on-ns = <15>; - gpmc,oe-off-ns = <87>; + gpmc,adv-rd-off-ns = <12>; + gpmc,adv-wr-off-ns = <12>; + gpmc,oe-on-ns = <12>; + gpmc,oe-off-ns = <102>; gpmc,we-on-ns = <0>; - gpmc,we-off-ns = <87>; - gpmc,rd-cycle-ns = <112>; - gpmc,wr-cycle-ns = <112>; - gpmc,access-ns = <81>; - gpmc,page-burst-access-ns = <15>; + gpmc,we-off-ns = <102>; + gpmc,rd-cycle-ns = <132>; + gpmc,wr-cycle-ns = <132>; + gpmc,access-ns = <96>; + gpmc,page-burst-access-ns = <18>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,wait-monitoring-ns = <0>; - gpmc,clk-activation-ns = <5>; - gpmc,wr-data-mux-bus-ns = <30>; - gpmc,wr-access-ns = <81>; + gpmc,clk-activation-ns = <6>; + gpmc,wr-data-mux-bus-ns = <36>; + gpmc,wr-access-ns = <96>; gpmc,sync-clk-ps = <15000>; /* diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi index ec5891718ae6..150d5be42d27 100644 --- a/arch/arm/boot/dts/omap3-pandora-common.dtsi +++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi @@ -226,6 +226,17 @@ gpio = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* GPIO_164 */ }; + /* wl1251 wifi+bt module */ + wlan_en: fixed-regulator-wg7210_en { + compatible = "regulator-fixed"; + regulator-name = "vwlan"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + startup-delay-us = <50000>; + enable-active-high; + gpio = <&gpio1 23 GPIO_ACTIVE_HIGH>; + }; + /* wg7210 (wifi+bt module) 32k clock buffer */ wg7210_32k: fixed-regulator-wg7210_32k { compatible = "regulator-fixed"; @@ -522,9 +533,30 @@ /*wp-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>;*/ /* GPIO_127 */ }; -/* mmc3 is probed using pdata-quirks to pass wl1251 card data */ &mmc3 { - status = "disabled"; + vmmc-supply = <&wlan_en>; + + bus-width = <4>; + non-removable; + ti,non-removable; + cap-power-off-card; + + pinctrl-names = "default"; + pinctrl-0 = <&mmc3_pins>; + + #address-cells = <1>; + #size-cells = <0>; + + wlan: wifi@1 { + compatible = "ti,wl1251"; + + reg = <1>; + + interrupt-parent = <&gpio1>; + interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_21 */ + + ti,wl1251-has-eeprom; + }; }; /* bluetooth*/ diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi index a7a04d78deeb..f24e2326cfa7 100644 --- a/arch/arm/boot/dts/omap3-tao3530.dtsi +++ b/arch/arm/boot/dts/omap3-tao3530.dtsi @@ -222,7 +222,7 @@ pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; vqmmc-supply = <&vsim>; - cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>; bus-width = <8>; }; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 7cc95bc1598b..e5506ab669fc 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -330,8 +330,8 @@ target-module@56000000 { compatible = "ti,sysc-omap4", "ti,sysc"; - reg = <0x5601fc00 0x4>, - <0x5601fc10 0x4>; + reg = <0x5600fe00 0x4>, + <0x5600fe10 0x4>; reg-names = "rev", "sysc"; ti,sysc-midle = , , diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 1fb7937638f0..041646fabb2d 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -143,6 +143,7 @@ #address-cells = <1>; #size-cells = <1>; ranges = <0 0 0 0xc0000000>; + dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>; ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3"; reg = <0 0x44000000 0 0x2000>, <0 0x44800000 0 0x3000>, diff --git a/arch/arm/boot/dts/ox810se.dtsi b/arch/arm/boot/dts/ox810se.dtsi index 9f6c2b660ed3..0755e5864c4a 100644 --- a/arch/arm/boot/dts/ox810se.dtsi +++ b/arch/arm/boot/dts/ox810se.dtsi @@ -323,8 +323,8 @@ interrupt-controller; reg = <0 0x200>; #interrupt-cells = <1>; - valid-mask = <0xFFFFFFFF>; - clear-mask = <0>; + valid-mask = <0xffffffff>; + clear-mask = <0xffffffff>; }; timer0: timer@200 { diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi index c9b327732063..90846a7655b4 100644 --- a/arch/arm/boot/dts/ox820.dtsi +++ b/arch/arm/boot/dts/ox820.dtsi @@ -240,8 +240,8 @@ reg = <0 0x200>; interrupts = ; #interrupt-cells = <1>; - valid-mask = <0xFFFFFFFF>; - clear-mask = <0>; + valid-mask = <0xffffffff>; + clear-mask = <0xffffffff>; }; timer0: timer@200 { diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi index ebf5b7cfe215..63341635bddf 100644 --- a/arch/arm/boot/dts/r8a7779.dtsi +++ b/arch/arm/boot/dts/r8a7779.dtsi @@ -68,6 +68,14 @@ <0xf0000100 0x100>; }; + timer@f0000200 { + compatible = "arm,cortex-a9-global-timer"; + reg = <0xf0000200 0x100>; + interrupts = ; + clocks = <&cpg_clocks R8A7779_CLK_ZS>; + }; + timer@f0000600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0xf0000600 0x20>; diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts index c8b62bbd6a4a..ad1afd403052 100644 --- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts +++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts @@ -466,9 +466,12 @@ pinctrl-names = "default"; pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>; vmmcq-supply = <&vccio_wl>; + #address-cells = <1>; + #size-cells = <0>; status = "okay"; brcmf: wifi@1 { + reg = <1>; compatible = "brcm,bcm4329-fmac"; interrupt-parent = <&gpio3>; interrupts = ; diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts index 0e159c884f97..1aeac33b0d34 100644 --- a/arch/arm/boot/dts/s3c6410-mini6410.dts +++ b/arch/arm/boot/dts/s3c6410-mini6410.dts @@ -165,6 +165,10 @@ }; }; +&clocks { + clocks = <&fin_pll>; +}; + &sdhci0 { pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; diff --git a/arch/arm/boot/dts/s3c6410-smdk6410.dts b/arch/arm/boot/dts/s3c6410-smdk6410.dts index a9a5689dc462..3bf6c450a26e 100644 --- a/arch/arm/boot/dts/s3c6410-smdk6410.dts +++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts @@ -69,6 +69,10 @@ }; }; +&clocks { + clocks = <&fin_pll>; +}; + &sdhci0 { pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index f770aace0efd..203d40be70a5 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1188,49 +1188,49 @@ usart0_clk: usart0_clk { #clock-cells = <0>; reg = <12>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; usart1_clk: usart1_clk { #clock-cells = <0>; reg = <13>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; usart2_clk: usart2_clk { #clock-cells = <0>; reg = <14>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; usart3_clk: usart3_clk { #clock-cells = <0>; reg = <15>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; uart0_clk: uart0_clk { #clock-cells = <0>; reg = <16>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; twi0_clk: twi0_clk { reg = <18>; #clock-cells = <0>; - atmel,clk-output-range = <0 16625000>; + atmel,clk-output-range = <0 41500000>; }; twi1_clk: twi1_clk { #clock-cells = <0>; reg = <19>; - atmel,clk-output-range = <0 16625000>; + atmel,clk-output-range = <0 41500000>; }; twi2_clk: twi2_clk { #clock-cells = <0>; reg = <20>; - atmel,clk-output-range = <0 16625000>; + atmel,clk-output-range = <0 41500000>; }; mci0_clk: mci0_clk { @@ -1246,19 +1246,19 @@ spi0_clk: spi0_clk { #clock-cells = <0>; reg = <24>; - atmel,clk-output-range = <0 133000000>; + atmel,clk-output-range = <0 166000000>; }; spi1_clk: spi1_clk { #clock-cells = <0>; reg = <25>; - atmel,clk-output-range = <0 133000000>; + atmel,clk-output-range = <0 166000000>; }; tcb0_clk: tcb0_clk { #clock-cells = <0>; reg = <26>; - atmel,clk-output-range = <0 133000000>; + atmel,clk-output-range = <0 166000000>; }; pwm_clk: pwm_clk { @@ -1269,7 +1269,7 @@ adc_clk: adc_clk { #clock-cells = <0>; reg = <29>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; dma0_clk: dma0_clk { @@ -1300,13 +1300,13 @@ ssc0_clk: ssc0_clk { #clock-cells = <0>; reg = <38>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; ssc1_clk: ssc1_clk { #clock-cells = <0>; reg = <39>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; sha_clk: sha_clk { diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi index cf06a018ed0f..2470dd3fff25 100644 --- a/arch/arm/boot/dts/sama5d3_can.dtsi +++ b/arch/arm/boot/dts/sama5d3_can.dtsi @@ -36,13 +36,13 @@ can0_clk: can0_clk { #clock-cells = <0>; reg = <40>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; can1_clk: can1_clk { #clock-cells = <0>; reg = <41>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; }; }; diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi index 1584035daf51..215802b8db30 100644 --- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi +++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi @@ -22,6 +22,7 @@ tcb1_clk: tcb1_clk { #clock-cells = <0>; reg = <27>; + atmel,clk-output-range = <0 166000000>; }; }; }; diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi index 4316bdbdc25d..cb62adbd28ed 100644 --- a/arch/arm/boot/dts/sama5d3_uart.dtsi +++ b/arch/arm/boot/dts/sama5d3_uart.dtsi @@ -41,13 +41,13 @@ uart0_clk: uart0_clk { #clock-cells = <0>; reg = <16>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; uart1_clk: uart1_clk { #clock-cells = <0>; reg = <17>; - atmel,clk-output-range = <0 66000000>; + atmel,clk-output-range = <0 83000000>; }; }; }; diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi index 60e11045ad76..d051f080e52e 100644 --- a/arch/arm/boot/dts/stihxxx-b2120.dtsi +++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi @@ -46,7 +46,7 @@ /* DAC */ format = "i2s"; mclk-fs = <256>; - frame-inversion = <1>; + frame-inversion; cpu { sound-dai = <&sti_uni_player2>; }; diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts index a3ff04940aec..c6dc6d1a051b 100644 --- a/arch/arm/boot/dts/stm32f469-disco.dts +++ b/arch/arm/boot/dts/stm32f469-disco.dts @@ -76,6 +76,13 @@ regulator-max-microvolt = <3300000>; }; + vdd_dsi: vdd-dsi { + compatible = "regulator-fixed"; + regulator-name = "vdd_dsi"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + soc { dma-ranges = <0xc0000000 0x0 0x10000000>; }; @@ -155,6 +162,7 @@ compatible = "orisetech,otm8009a"; reg = <0>; /* dsi virtual channel (0..3) */ reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>; + power-supply = <&vdd_dsi>; status = "okay"; port { diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts index fb928503ad45..d9be511f054f 100644 --- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts +++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts @@ -101,7 +101,7 @@ initial-mode = <1>; /* initialize in HUB mode */ disabled-ports = <1>; intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */ - reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */ + reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */ connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */ refclk-frequency = <19200000>; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts index 3bec3e0a81b2..397140454132 100644 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts @@ -482,7 +482,8 @@ }; &usbphy { - usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */ + usb0_id_det-gpios = <&pio 7 11 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH11 */ + usb0_vbus_power-supply = <&usb_power_supply>; usb0_vbus-supply = <®_drivevbus>; usb1_vbus-supply = <®_vmain>; usb2_vbus-supply = <®_vmain>; diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi index e37c30e811d3..6056f206c9e3 100644 --- a/arch/arm/boot/dts/sun8i-h3.dtsi +++ b/arch/arm/boot/dts/sun8i-h3.dtsi @@ -80,7 +80,7 @@ #cooling-cells = <2>; }; - cpu@1 { + cpu1: cpu@1 { compatible = "arm,cortex-a7"; device_type = "cpu"; reg = <1>; @@ -90,7 +90,7 @@ #cooling-cells = <2>; }; - cpu@2 { + cpu2: cpu@2 { compatible = "arm,cortex-a7"; device_type = "cpu"; reg = <2>; @@ -100,7 +100,7 @@ #cooling-cells = <2>; }; - cpu@3 { + cpu3: cpu@3 { compatible = "arm,cortex-a7"; device_type = "cpu"; reg = <3>; @@ -111,6 +111,15 @@ }; }; + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = , + , + , + ; + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; + }; + timer { compatible = "arm,armv7-timer"; interrupts = , diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi index c9c2688db66d..80f4dc34df34 100644 --- a/arch/arm/boot/dts/sun8i-r40.dtsi +++ b/arch/arm/boot/dts/sun8i-r40.dtsi @@ -266,6 +266,16 @@ #phy-cells = <1>; }; + ahci: sata@1c18000 { + compatible = "allwinner,sun8i-r40-ahci"; + reg = <0x01c18000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>; + resets = <&ccu RST_BUS_SATA>; + reset-names = "ahci"; + status = "disabled"; + }; + ehci1: usb@1c19000 { compatible = "allwinner,sun8i-r40-ehci", "generic-ehci"; reg = <0x01c19000 0x100>; @@ -557,17 +567,6 @@ #size-cells = <0>; }; - ahci: sata@1c18000 { - compatible = "allwinner,sun8i-r40-ahci"; - reg = <0x01c18000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>; - resets = <&ccu RST_BUS_SATA>; - reset-names = "ahci"; - status = "disabled"; - - }; - gmac: ethernet@1c50000 { compatible = "allwinner,sun8i-r40-gmac"; syscon = <&ccu>; diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig index 597536cc9573..b87508c7056c 100644 --- a/arch/arm/configs/aspeed_g5_defconfig +++ b/arch/arm/configs/aspeed_g5_defconfig @@ -139,6 +139,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_ASPEED_VUART=y CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_ASPEED_KCS_IPMI_BMC=y CONFIG_ASPEED_BT_IPMI_BMC=y diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 08db1c83eb2d..34d4acbcee34 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -38,6 +38,7 @@ CONFIG_CRYPTO_SHA256_ARM=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_KALLSYMS_ALL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -92,6 +93,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y CONFIG_ATA=y @@ -290,6 +292,7 @@ CONFIG_CROS_EC_SPI=y CONFIG_COMMON_CLK_MAX77686=y CONFIG_COMMON_CLK_S2MPS11=y CONFIG_EXYNOS_IOMMU=y +CONFIG_PM_DEVFREQ=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y @@ -348,9 +351,13 @@ CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_DETECT_HUNG_TASK is not set CONFIG_PROVE_LOCKING=y CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_USER=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 0f7381ee0c37..dabb80453249 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -460,6 +460,7 @@ CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set CONFIG_PROVE_LOCKING=y # CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 40d7f1a4fc45..4ec69fb8a698 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -552,5 +552,6 @@ CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_SPLIT=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 40002416efec..8e995ec796c8 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -14,13 +14,25 @@ #include /* arm64 compatibility macros */ +#define PSR_AA32_MODE_FIQ FIQ_MODE +#define PSR_AA32_MODE_SVC SVC_MODE #define PSR_AA32_MODE_ABT ABT_MODE #define PSR_AA32_MODE_UND UND_MODE #define PSR_AA32_T_BIT PSR_T_BIT +#define PSR_AA32_F_BIT PSR_F_BIT #define PSR_AA32_I_BIT PSR_I_BIT #define PSR_AA32_A_BIT PSR_A_BIT #define PSR_AA32_E_BIT PSR_E_BIT #define PSR_AA32_IT_MASK PSR_IT_MASK +#define PSR_AA32_GE_MASK 0x000f0000 +#define PSR_AA32_DIT_BIT 0x00200000 +#define PSR_AA32_PAN_BIT 0x00400000 +#define PSR_AA32_SSBS_BIT 0x00800000 +#define PSR_AA32_Q_BIT PSR_Q_BIT +#define PSR_AA32_V_BIT PSR_V_BIT +#define PSR_AA32_C_BIT PSR_C_BIT +#define PSR_AA32_Z_BIT PSR_Z_BIT +#define PSR_AA32_N_BIT PSR_N_BIT unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); @@ -41,6 +53,11 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) *__vcpu_spsr(vcpu) = v; } +static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) +{ + return spsr; +} + static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, u8 reg_num) { @@ -177,6 +194,11 @@ static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; } +static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) +{ + return false; +} + static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) { return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h index 7c0eddb0adb2..32fbf82e3ebc 100644 --- a/arch/arm/include/asm/kvm_mmio.h +++ b/arch/arm/include/asm/kvm_mmio.h @@ -14,6 +14,8 @@ struct kvm_decode { unsigned long rt; bool sign_extend; + /* Not used on 32-bit arm */ + bool sixty_four; }; void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index ae5020302de4..6607fa817bba 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -146,10 +146,9 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER) @ make CNTP_* and CNTPCT accessible from PL1 mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1 - lsr r7, #16 - and r7, #0xf - cmp r7, #1 - bne 1f + ubfx r7, r7, #16, #4 + teq r7, #0 + beq 1f mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL orr r7, r7, #3 @ PL1PCEN | PL1PCTEN mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 9485acc520a4..e7fac125ea0d 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -224,8 +224,8 @@ void release_thread(struct task_struct *dead_task) asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int -copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); @@ -259,7 +259,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) - thread->tp_value[0] = childregs->ARM_r3; + thread->tp_value[0] = tls; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 4b0bab2607e4..46e1be9e57a8 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -240,6 +240,10 @@ int __cpu_disable(void) if (ret) return ret; +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY + remove_cpu_topology(cpu); +#endif + /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 5b9faba03afb..8d2e61d9e7a6 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -196,9 +196,8 @@ void store_cpu_topology(unsigned int cpuid) struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; - /* If the cpu topology has been already set, just return */ - if (cpuid_topo->core_id != -1) - return; + if (cpuid_topo->package_id != -1) + goto topology_populated; mpidr = read_cpuid_mpidr(); @@ -231,14 +230,15 @@ void store_cpu_topology(unsigned int cpuid) cpuid_topo->package_id = -1; } - update_siblings_masks(cpuid); - update_cpu_capacity(cpuid); pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, cpu_topology[cpuid].package_id, mpidr); + +topology_populated: + update_siblings_masks(cpuid); } static inline int cpu_corepower_flags(void) diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 9bf16c93ee6a..f00e45fa62c4 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -92,6 +92,8 @@ static bool __init cntvct_functional(void) * this. */ np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); + if (!np) + np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer"); if (!np) goto out_put; diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 95b2e1ce559c..f8016e3db65d 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -118,7 +118,7 @@ ENTRY(arm_copy_from_user) ENDPROC(arm_copy_from_user) - .pushsection .fixup,"ax" + .pushsection .text.fixup,"ax" .align 0 copy_abort_preamble ldmfd sp!, {r1, r2, r3} diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index d5af6aedc02c..52665f30d236 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -691,6 +691,12 @@ static void __init at91_pm_use_default_mode(int pm_mode) soc_pm.data.suspend_mode = AT91_PM_ULP0; } +static const struct of_device_id atmel_shdwc_ids[] = { + { .compatible = "atmel,sama5d2-shdwc" }, + { .compatible = "microchip,sam9x60-shdwc" }, + { /* sentinel. */ } +}; + static void __init at91_pm_modes_init(void) { struct device_node *np; @@ -700,7 +706,7 @@ static void __init at91_pm_modes_init(void) !at91_is_pm_mode_active(AT91_PM_ULP1)) return; - np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc"); + np = of_find_matching_node(NULL, atmel_shdwc_ids); if (!np) { pr_warn("%s: failed to find shdwc!\n", __func__); goto ulp1_default; @@ -751,6 +757,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = { { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] }, { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] }, { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] }, + { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] }, { /* sentinel */ }, }; diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index dd427bd2768c..02b180ad7245 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -9,6 +9,7 @@ menuconfig ARCH_DAVINCI select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF select REGMAP_MMIO + select RESET_CONTROLLER select HAVE_IDE select PINCTRL_SINGLE diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 593bf1519608..95584ee02b55 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -520,6 +520,7 @@ config SOC_IMX6UL bool "i.MX6 UltraLite support" select PINCTRL_IMX6UL select SOC_IMX6 + select ARM_ERRATA_814220 help This enables support for Freescale i.MX6 UltraLite processor. @@ -556,6 +557,7 @@ config SOC_IMX7D select PINCTRL_IMX7D select SOC_IMX7D_CA7 if ARCH_MULTI_V7 select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M + select ARM_ERRATA_814220 if ARCH_MULTI_V7 help This enables support for Freescale i.MX7 Dual processor. diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 35ff620537e6..03506ce46149 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o endif +AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a +obj-$(CONFIG_SOC_IMX6) += resume-imx6.o obj-$(CONFIG_SOC_IMX6) += pm-imx6.o obj-$(CONFIG_SOC_IMX1) += mach-imx1.o diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 912aeceb4ff8..5aa5796cff0e 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -109,17 +109,17 @@ void imx_cpu_die(unsigned int cpu); int imx_cpu_kill(unsigned int cpu); #ifdef CONFIG_SUSPEND -void v7_cpu_resume(void); void imx53_suspend(void __iomem *ocram_vbase); extern const u32 imx53_suspend_sz; void imx6_suspend(void __iomem *ocram_vbase); #else -static inline void v7_cpu_resume(void) {} static inline void imx53_suspend(void __iomem *ocram_vbase) {} static const u32 imx53_suspend_sz; static inline void imx6_suspend(void __iomem *ocram_vbase) {} #endif +void v7_cpu_resume(void); + void imx6_pm_ccm_init(const char *ccm_compat); void imx6q_pm_init(void); void imx6dl_pm_init(void); diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S new file mode 100644 index 000000000000..5bd1ba7ef15b --- /dev/null +++ b/arch/arm/mach-imx/resume-imx6.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2014 Freescale Semiconductor, Inc. + */ + +#include +#include +#include +#include +#include "hardware.h" + +/* + * The following code must assume it is running from physical address + * where absolute virtual addresses to the data section have to be + * turned into relative ones. + */ + +ENTRY(v7_cpu_resume) + bl v7_invalidate_l1 +#ifdef CONFIG_CACHE_L2X0 + bl l2c310_early_resume +#endif + b cpu_resume +ENDPROC(v7_cpu_resume) diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S index 062391ff13da..1eabf2d2834b 100644 --- a/arch/arm/mach-imx/suspend-imx6.S +++ b/arch/arm/mach-imx/suspend-imx6.S @@ -327,17 +327,3 @@ resume: ret lr ENDPROC(imx6_suspend) - -/* - * The following code must assume it is running from physical address - * where absolute virtual addresses to the data section have to be - * turned into relative ones. - */ - -ENTRY(v7_cpu_resume) - bl v7_invalidate_l1 -#ifdef CONFIG_CACHE_L2X0 - bl l2c310_early_resume -#endif - b cpu_resume -ENDPROC(v7_cpu_resume) diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig index 880bc2a5cada..7f7002dc2b21 100644 --- a/arch/arm/mach-npcm/Kconfig +++ b/arch/arm/mach-npcm/Kconfig @@ -11,7 +11,7 @@ config ARCH_NPCM7XX depends on ARCH_MULTI_V7 select PINCTRL_NPCM7XX select NPCM7XX_TIMER - select ARCH_REQUIRE_GPIOLIB + select GPIOLIB select CACHE_L2X0 select ARM_GIC select HAVE_ARM_TWD if SMP diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 439e143cad7b..46012ca812f4 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -265,6 +265,7 @@ static int __init omapdss_init_of(void) r = of_platform_populate(node, NULL, NULL, &pdev->dev); if (r) { pr_err("Unable to populate DSS submodule devices\n"); + put_device(&pdev->dev); return r; } diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 2efd18e8824c..247e3f8acffe 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -269,14 +268,6 @@ static void __init am3517_evm_legacy_init(void) am35xx_emac_reset(); } -static struct platform_device omap3_rom_rng_device = { - .name = "omap3-rom-rng", - .id = -1, - .dev = { - .platform_data = rx51_secure_rng_call, - }, -}; - static void __init nokia_n900_legacy_init(void) { hsmmc2_internal_input_clk(); @@ -292,9 +283,6 @@ static void __init nokia_n900_legacy_init(void) pr_warn("RX-51: Not enabling ARM errata 430973 workaround\n"); pr_warn("Thumb binaries may crash randomly without this workaround\n"); } - - pr_info("RX-51: Registering OMAP3 HWRNG device\n"); - platform_device_register(&omap3_rom_rng_device); } } @@ -311,118 +299,15 @@ static void __init omap3_logicpd_torpedo_init(void) } /* omap3pandora legacy devices */ -#define PANDORA_WIFI_IRQ_GPIO 21 -#define PANDORA_WIFI_NRESET_GPIO 23 static struct platform_device pandora_backlight = { .name = "pandora-backlight", .id = -1, }; -static struct regulator_consumer_supply pandora_vmmc3_supply[] = { - REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"), -}; - -static struct regulator_init_data pandora_vmmc3 = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc3_supply), - .consumer_supplies = pandora_vmmc3_supply, -}; - -static struct fixed_voltage_config pandora_vwlan = { - .supply_name = "vwlan", - .microvolts = 1800000, /* 1.8V */ - .startup_delay = 50000, /* 50ms */ - .init_data = &pandora_vmmc3, -}; - -static struct platform_device pandora_vwlan_device = { - .name = "reg-fixed-voltage", - .id = 1, - .dev = { - .platform_data = &pandora_vwlan, - }, -}; - -static struct gpiod_lookup_table pandora_vwlan_gpiod_table = { - .dev_id = "reg-fixed-voltage.1", - .table = { - /* - * As this is a low GPIO number it should be at the first - * GPIO bank. - */ - GPIO_LOOKUP("gpio-0-31", PANDORA_WIFI_NRESET_GPIO, - NULL, GPIO_ACTIVE_HIGH), - { }, - }, -}; - -static void pandora_wl1251_init_card(struct mmc_card *card) -{ - /* - * We have TI wl1251 attached to MMC3. Pass this information to - * SDIO core because it can't be probed by normal methods. - */ - if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) { - card->quirks |= MMC_QUIRK_NONSTD_SDIO; - card->cccr.wide_bus = 1; - card->cis.vendor = 0x104c; - card->cis.device = 0x9066; - card->cis.blksize = 512; - card->cis.max_dtr = 24000000; - card->ocr = 0x80; - } -} - -static struct omap2_hsmmc_info pandora_mmc3[] = { - { - .mmc = 3, - .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD, - .init_card = pandora_wl1251_init_card, - }, - {} /* Terminator */ -}; - -static void __init pandora_wl1251_init(void) -{ - struct wl1251_platform_data pandora_wl1251_pdata; - int ret; - - memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata)); - - pandora_wl1251_pdata.power_gpio = -1; - - ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq"); - if (ret < 0) - goto fail; - - pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO); - if (pandora_wl1251_pdata.irq < 0) - goto fail_irq; - - pandora_wl1251_pdata.use_eeprom = true; - ret = wl1251_set_platform_data(&pandora_wl1251_pdata); - if (ret < 0) - goto fail_irq; - - return; - -fail_irq: - gpio_free(PANDORA_WIFI_IRQ_GPIO); -fail: - pr_err("wl1251 board initialisation failed\n"); -} - static void __init omap3_pandora_legacy_init(void) { platform_device_register(&pandora_backlight); - gpiod_add_lookup_table(&pandora_vwlan_gpiod_table); - platform_device_register(&pandora_vwlan_device); - omap_hsmmc_init(pandora_mmc3); - omap_hsmmc_late_init(pandora_mmc3); - pandora_wl1251_init(); } #endif /* CONFIG_ARCH_OMAP3 */ @@ -472,10 +357,14 @@ static void __init dra7x_evm_mmc_quirk(void) static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk) { + struct clk_hw *hw = __clk_get_hw(clk); struct clockdomain *clkdm = NULL; struct clk_hw_omap *hwclk; - hwclk = to_clk_hw_omap(__clk_get_hw(clk)); + hwclk = to_clk_hw_omap(hw); + if (!omap2_clk_is_hw_omap(hw)) + return NULL; + if (hwclk && hwclk->clkdm_name) clkdm = clkdm_lookup(hwclk->clkdm_name); @@ -638,6 +527,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = { OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0", &am35xx_emac_pdata), + OF_DEV_AUXDATA("nokia,n900-rom-rng", 0, NULL, rx51_secure_rng_call), /* McBSP modules with sidetone core */ #if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP) OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata), diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S index 67b763fea005..e3f34815c9da 100644 --- a/arch/arm/mach-tegra/reset-handler.S +++ b/arch/arm/mach-tegra/reset-handler.S @@ -44,16 +44,16 @@ ENTRY(tegra_resume) cmp r6, #TEGRA20 beq 1f @ Yes /* Clear the flow controller flags for this CPU. */ - cpu_to_csr_reg r1, r0 + cpu_to_csr_reg r3, r0 mov32 r2, TEGRA_FLOW_CTRL_BASE - ldr r1, [r2, r1] + ldr r1, [r2, r3] /* Clear event & intr flag */ orr r1, r1, \ #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps @ & ext flags for CPU power mgnt bic r1, r1, r0 - str r1, [r2] + str r1, [r2, r3] 1: mov32 r9, 0xc09 diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S index b408fa56eb89..6922dd8d3e2d 100644 --- a/arch/arm/mach-tegra/sleep-tegra30.S +++ b/arch/arm/mach-tegra/sleep-tegra30.S @@ -370,6 +370,14 @@ _pll_m_c_x_done: pll_locked r1, r0, CLK_RESET_PLLC_BASE pll_locked r1, r0, CLK_RESET_PLLX_BASE + tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 + cmp r1, #TEGRA30 + beq 1f + ldr r1, [r0, #CLK_RESET_PLLP_BASE] + bic r1, r1, #(1<<31) @ disable PllP bypass + str r1, [r0, #CLK_RESET_PLLP_BASE] +1: + mov32 r7, TEGRA_TMRUS_BASE ldr r1, [r7] add r1, r1, #LOCK_DELAY @@ -630,7 +638,10 @@ tegra30_switch_cpu_to_clk32k: str r0, [r4, #PMC_PLLP_WB0_OVERRIDE] /* disable PLLP, PLLA, PLLC and PLLX */ + tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 + cmp r1, #TEGRA30 ldr r0, [r5, #CLK_RESET_PLLP_BASE] + orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster bic r0, r0, #(1 << 30) str r0, [r5, #CLK_RESET_PLLP_BASE] ldr r0, [r5, #CLK_RESET_PLLA_BASE] diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 354e0e7025ae..1da11bdb1dfb 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -551,8 +551,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev) static int __init ve_spc_clk_init(void) { - int cpu; + int cpu, cluster; struct clk *clk; + bool init_opp_table[MAX_CLUSTERS] = { false }; if (!info) return 0; /* Continue only if SPC is initialised */ @@ -578,8 +579,17 @@ static int __init ve_spc_clk_init(void) continue; } + cluster = topology_physical_package_id(cpu_dev->id); + if (init_opp_table[cluster]) + continue; + if (ve_init_opp_table(cpu_dev)) pr_warn("failed to initialise cpu%d opp table\n", cpu); + else if (dev_pm_opp_set_sharing_cpus(cpu_dev, + topology_core_cpumask(cpu_dev->id))) + pr_warn("failed to mark OPPs shared for cpu%d\n", cpu); + else + init_opp_table[cluster] = true; } platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index db9247898300..287ef898a55e 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, unsigned long attrs) { - void *ret = dma_alloc_from_global_coherent(size, dma_handle); + void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle); /* * dma_alloc_from_global_coherent() may fail because: diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7d042d5c43e3..27576c7b836e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -221,7 +221,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops); static int __dma_supported(struct device *dev, u64 mask, bool warn) { - unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); + unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); /* * Translate the device's DMA mask to a PFN limit. This diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index b4be3baa83d4..6f19ba53fd1f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -323,7 +323,7 @@ static inline void poison_init_mem(void *s, size_t count) *p++ = 0xe7fddef0; } -static inline void +static inline void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index 9a07916af8dd..a6554fdb56c5 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -65,6 +65,9 @@ static void cpu_v7_spectre_init(void) break; #ifdef CONFIG_ARM_PSCI + case ARM_CPU_PART_BRAHMA_B53: + /* Requires no workaround */ + break; default: /* Other ARM CPUs require no workaround */ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3f047afb982c..a3c45f6f97db 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -139,10 +139,13 @@ config ARM64 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS \ + if $(cc-option,-fpatchable-function-entry=2) select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD @@ -313,6 +316,7 @@ config KASAN_SHADOW_OFFSET default 0xffffffffffffffff source "arch/arm64/Kconfig.platforms" +source "kernel/tkernel/kpatch/Kconfig" menu "Kernel Features" @@ -1524,6 +1528,7 @@ endif config RELOCATABLE bool select ARCH_HAS_RELR + default y help This builds the kernel as a Position Independent Executable (PIE), which retains all relocation metadata required to relocate the diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 16d761475a86..fa2d6e200ae1 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -298,4 +298,14 @@ config ARCH_ZYNQMP help This enables support for Xilinx ZynqMP Family +config ARCH_PHYTIUM + bool "Phytium SOC Family" + select ARCH_MIGHT_HAVE_PC_SERIO + help + This enables support for Xilinx ZynqMP Family + +config FIX_OPTION_ROM + bool "FIX_OPTION_ROM" + help + This enables support for class storage endmenu diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 2c0238ce0551..1fbe24d4fdb6 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -95,6 +95,11 @@ ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) KBUILD_LDS_MODULE += $(srctree)/arch/arm64/kernel/module.lds endif +ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y) + KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY + CC_FLAGS_FTRACE := -fpatchable-function-entry=2 +endif + # Default value head-y := arch/arm64/kernel/head.o diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index 1f012c506434..cd3414898d10 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -16,7 +16,7 @@ OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S -targets := Image Image.gz +targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts index 96ab0227e82d..121e6cc4849b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts @@ -15,7 +15,7 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; vmmc-supply = <®_dcdc1>; - vqmmc-supply = <®_dcdc1>; + vqmmc-supply = <®_eldo1>; bus-width = <8>; non-removable; cap-mmc-hw-reset; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts index 01a9a52edae4..393c1948a495 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts @@ -140,7 +140,7 @@ &mmc1 { pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; - vmmc-supply = <®_aldo2>; + vmmc-supply = <®_dcdc1>; vqmmc-supply = <®_dldo4>; mmc-pwrseq = <&wifi_pwrseq>; bus-width = <4>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 70f4cce6be43..ba41c1b85887 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -142,6 +142,15 @@ clock-output-names = "ext-osc32k"; }; + pmu { + compatible = "arm,cortex-a53-pmu"; + interrupts = , + , + , + ; + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; + }; + psci { compatible = "arm,psci-0.2"; method = "smc"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi index f002a496d7cb..1d34e3eefda3 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi @@ -54,21 +54,21 @@ enable-method = "psci"; }; - cpu@1 { + cpu1: cpu@1 { compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <1>; enable-method = "psci"; }; - cpu@2 { + cpu2: cpu@2 { compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <2>; enable-method = "psci"; }; - cpu@3 { + cpu3: cpu@3 { compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <3>; @@ -76,6 +76,16 @@ }; }; + pmu { + compatible = "arm,cortex-a53-pmu", + "arm,armv8-pmuv3"; + interrupts = , + , + , + ; + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; + }; + psci { compatible = "arm,psci-0.2"; method = "smc"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index 0d5ea19336a1..d19253891672 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -70,6 +70,16 @@ clock-output-names = "ext_osc32k"; }; + pmu { + compatible = "arm,cortex-a53-pmu", + "arm,armv8-pmuv3"; + interrupts = , + , + , + ; + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; + }; + psci { compatible = "arm,psci-0.2"; method = "smc"; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 144a2c19ac02..d1fc9c2055f4 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -61,10 +61,10 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 120 8>, - <0 121 8>, - <0 122 8>, - <0 123 8>; + interrupts = <0 170 4>, + <0 171 4>, + <0 172 4>, + <0 173 4>; interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index 82919b106010..bb4a2acb9970 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -1162,7 +1162,7 @@ toddr_a: audio-controller@100 { compatible = "amlogic,axg-toddr"; - reg = <0x0 0x100 0x0 0x1c>; + reg = <0x0 0x100 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "TODDR_A"; interrupts = ; @@ -1173,7 +1173,7 @@ toddr_b: audio-controller@140 { compatible = "amlogic,axg-toddr"; - reg = <0x0 0x140 0x0 0x1c>; + reg = <0x0 0x140 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "TODDR_B"; interrupts = ; @@ -1184,7 +1184,7 @@ toddr_c: audio-controller@180 { compatible = "amlogic,axg-toddr"; - reg = <0x0 0x180 0x0 0x1c>; + reg = <0x0 0x180 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "TODDR_C"; interrupts = ; @@ -1195,7 +1195,7 @@ frddr_a: audio-controller@1c0 { compatible = "amlogic,axg-frddr"; - reg = <0x0 0x1c0 0x0 0x1c>; + reg = <0x0 0x1c0 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "FRDDR_A"; interrupts = ; @@ -1206,7 +1206,7 @@ frddr_b: audio-controller@200 { compatible = "amlogic,axg-frddr"; - reg = <0x0 0x200 0x0 0x1c>; + reg = <0x0 0x200 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "FRDDR_B"; interrupts = ; @@ -1217,7 +1217,7 @@ frddr_c: audio-controller@240 { compatible = "amlogic,axg-frddr"; - reg = <0x0 0x240 0x0 0x1c>; + reg = <0x0 0x240 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "FRDDR_C"; interrupts = ; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 3f39e020f74e..0ee8a369c547 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -1509,7 +1509,7 @@ toddr_a: audio-controller@100 { compatible = "amlogic,g12a-toddr", "amlogic,axg-toddr"; - reg = <0x0 0x100 0x0 0x1c>; + reg = <0x0 0x100 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "TODDR_A"; interrupts = ; @@ -1521,7 +1521,7 @@ toddr_b: audio-controller@140 { compatible = "amlogic,g12a-toddr", "amlogic,axg-toddr"; - reg = <0x0 0x140 0x0 0x1c>; + reg = <0x0 0x140 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "TODDR_B"; interrupts = ; @@ -1533,7 +1533,7 @@ toddr_c: audio-controller@180 { compatible = "amlogic,g12a-toddr", "amlogic,axg-toddr"; - reg = <0x0 0x180 0x0 0x1c>; + reg = <0x0 0x180 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "TODDR_C"; interrupts = ; @@ -1545,7 +1545,7 @@ frddr_a: audio-controller@1c0 { compatible = "amlogic,g12a-frddr", "amlogic,axg-frddr"; - reg = <0x0 0x1c0 0x0 0x1c>; + reg = <0x0 0x1c0 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "FRDDR_A"; interrupts = ; @@ -1557,7 +1557,7 @@ frddr_b: audio-controller@200 { compatible = "amlogic,g12a-frddr", "amlogic,axg-frddr"; - reg = <0x0 0x200 0x0 0x1c>; + reg = <0x0 0x200 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "FRDDR_B"; interrupts = ; @@ -1569,7 +1569,7 @@ frddr_c: audio-controller@240 { compatible = "amlogic,g12a-frddr", "amlogic,axg-frddr"; - reg = <0x0 0x240 0x0 0x1c>; + reg = <0x0 0x240 0x0 0x2c>; #sound-dai-cells = <0>; sound-name-prefix = "FRDDR_C"; interrupts = ; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 6039adda12ee..b0b12e389835 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -296,7 +296,7 @@ }; &usb0_phy { - status = "okay"; + status = "disabled"; phy-supply = <&usb_otg_pwr>; }; @@ -306,7 +306,7 @@ }; &usb0 { - status = "okay"; + status = "disabled"; }; &usb1 { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts index 2a5cd303123d..440bc23c7342 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts @@ -33,11 +33,9 @@ gpio-keys-polled { compatible = "gpio-keys-polled"; - #address-cells = <1>; - #size-cells = <0>; poll-interval = <100>; - button@0 { + power-button { label = "power"; linux,code = ; gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>; @@ -192,6 +190,9 @@ bluetooth { compatible = "brcm,bcm43438-bt"; shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; + max-speed = <2000000>; + clocks = <&wifi32k>; + clock-names = "lpo"; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index f25ddd18a607..3f43716d5c45 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -327,7 +327,7 @@ #size-cells = <0>; bus-width = <4>; - max-frequency = <50000000>; + max-frequency = <60000000>; non-removable; disable-wp; @@ -409,6 +409,9 @@ bluetooth { compatible = "brcm,bcm43438-bt"; shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; + max-speed = <2000000>; + clocks = <&wifi32k>; + clock-names = "lpo"; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index 3435aaa4e8db..29ac78ddc057 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -361,6 +361,9 @@ bluetooth { compatible = "brcm,bcm43438-bt"; + interrupt-parent = <&gpio_intc>; + interrupts = <95 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host-wakeup"; shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; max-speed = <2000000>; clocks = <&wifi32k>; diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts index 62ab0d54ff71..335fff762451 100644 --- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts +++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts @@ -161,10 +161,10 @@ bus-range = <0x0 0x1>; reg = <0x0 0x40000000 0x0 0x10000000>; ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>; - interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; msi-map = <0x0 &its 0x0 0x10000>; iommu-map = <0x0 &smmu 0x0 0x10000>; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 26a039a028b8..8c11660bbe40 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -6,7 +6,6 @@ /* * Devices shared by all Juno boards */ - dma-ranges = <0 0 0 0 0x100 0>; memtimer: timer@2a810000 { compatible = "arm,armv7-timer-mem"; diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index e5e265dfa902..2870b5eeb198 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi @@ -8,10 +8,10 @@ */ / { /* SoC fixed clocks */ - soc_uartclk: refclk7273800hz { + soc_uartclk: refclk7372800hz { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <7273800>; + clock-frequency = <7372800>; clock-output-names = "juno:uartclk"; }; diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index a76f620f7f35..a5f8752f607b 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi @@ -18,8 +18,8 @@ / { compatible = "samsung,exynos5433"; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; interrupt-parent = <&gic>; @@ -311,7 +311,7 @@ compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; - ranges; + ranges = <0x0 0x0 0x0 0x18000000>; chipid@10000000 { compatible = "samsung,exynos4210-chipid"; diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index bcb9d8cee267..0821489a874d 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -12,8 +12,8 @@ / { compatible = "samsung,exynos7"; interrupt-parent = <&gic>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; aliases { pinctrl0 = &pinctrl_alive; @@ -98,7 +98,7 @@ compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; - ranges; + ranges = <0 0 0 0x18000000>; chipid@10000000 { compatible = "samsung,exynos4210-chipid"; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 72b9a75976a1..9589b15693d6 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -102,7 +102,7 @@ reboot { compatible ="syscon-reboot"; - regmap = <&dcfg>; + regmap = <&rst>; offset = <0xb0>; mask = <0x02>; }; @@ -158,7 +158,13 @@ dcfg: syscon@1e00000 { compatible = "fsl,ls1028a-dcfg", "syscon"; reg = <0x0 0x1e00000 0x0 0x10000>; - big-endian; + little-endian; + }; + + rst: syscon@1e60000 { + compatible = "syscon"; + reg = <0x0 0x1e60000 0x0 0x10000>; + little-endian; }; scfg: syscon@1fc0000 { @@ -567,7 +573,7 @@ 0x00010004 0x0000003d 0x00010005 0x00000045 0x00010006 0x0000004d - 0x00010007 0x00000045 + 0x00010007 0x00000055 0x00010008 0x0000005e 0x00010009 0x00000066 0x0001000a 0x0000006e diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi index 6082ae022136..d237162a8744 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi @@ -20,6 +20,8 @@ }; &fman0 { + fsl,erratum-a050385; + /* these aliases provide the FMan ports mapping */ enet0: ethernet@e0000 { }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts index 4223a2352d45..dde50c88f5e3 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts @@ -119,12 +119,12 @@ ethernet@e4000 { phy-handle = <&rgmii_phy1>; - phy-connection-type = "rgmii-txid"; + phy-connection-type = "rgmii-id"; }; ethernet@e6000 { phy-handle = <&rgmii_phy2>; - phy-connection-type = "rgmii-txid"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts index 6a6514d0e5a9..274339759114 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts @@ -127,12 +127,12 @@ &fman0 { ethernet@e4000 { phy-handle = <&rgmii_phy1>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e6000 { phy-handle = <&rgmii_phy2>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts index f7a15f3904c2..13137451b438 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts @@ -62,6 +62,8 @@ cpudai: simple-audio-card,cpu { sound-dai = <&sai3>; + dai-tdm-slot-num = <2>; + dai-tdm-slot-width = <32>; }; simple-audio-card,codec { diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index 23c8fad7932b..fde1849d36ca 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -479,14 +479,18 @@ <&clk IMX8MM_CLK_AUDIO_AHB>, <&clk IMX8MM_CLK_IPG_AUDIO_ROOT>, <&clk IMX8MM_SYS_PLL3>, - <&clk IMX8MM_VIDEO_PLL1>; + <&clk IMX8MM_VIDEO_PLL1>, + <&clk IMX8MM_AUDIO_PLL1>, + <&clk IMX8MM_AUDIO_PLL2>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL3_OUT>, <&clk IMX8MM_SYS_PLL1_800M>; assigned-clock-rates = <0>, <400000000>, <400000000>, <750000000>, - <594000000>; + <594000000>, + <393216000>, + <361267200>; }; src: reset-controller@30390000 { @@ -741,7 +745,7 @@ reg = <0x30bd0000 0x10000>; interrupts = ; clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>, - <&clk IMX8MM_CLK_SDMA1_ROOT>; + <&clk IMX8MM_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts index 683a11035643..98cfe67b7db7 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts @@ -421,7 +421,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_imu>; interrupt-parent = <&gpio3>; - interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; vdd-supply = <®_3v3_p>; vddio-supply = <®_3v3_p>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts index 19468058e6ae..8148196902dd 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts +++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts @@ -52,11 +52,6 @@ compatible = "ethernet-phy-ieee802.3-c22"; reg = <0>; }; - - ethphy1: ethernet-phy@1 { - compatible = "ethernet-phy-ieee802.3-c22"; - reg = <1>; - }; }; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index 36abc25320a8..d911d38877e5 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -47,10 +47,10 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 120 8>, - <0 121 8>, - <0 122 8>, - <0 123 8>; + interrupts = <0 170 4>, + <0 171 4>, + <0 172 4>, + <0 173 4>; interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, @@ -82,7 +82,7 @@ ranges = <0 0 0 0xffffffff>; gmac0: ethernet@ff800000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff800000 0x2000>; interrupts = <0 90 4>; interrupt-names = "macirq"; @@ -97,7 +97,7 @@ }; gmac1: ethernet@ff802000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff802000 0x2000>; interrupts = <0 91 4>; interrupt-names = "macirq"; @@ -112,7 +112,7 @@ }; gmac2: ethernet@ff804000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff804000 0x2000>; interrupts = <0 92 4>; interrupt-names = "macirq"; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts index bd4aab6092e0..e31813a4f972 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts @@ -143,6 +143,7 @@ phy-mode = "sgmii"; status = "okay"; managed = "in-band-status"; + phys = <&comphy1 0>; sfp = <&sfp_eth0>; }; @@ -150,11 +151,14 @@ phy-mode = "sgmii"; status = "okay"; managed = "in-band-status"; + phys = <&comphy0 1>; sfp = <&sfp_eth1>; }; &usb3 { status = "okay"; + phys = <&usb2_utmi_otg_phy>; + phy-names = "usb2-utmi-otg-phy"; }; &uart0 { diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index bd881497b872..a211a046b2f2 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -408,6 +408,8 @@ reg = <5>; label = "cpu"; ethernet = <&cp1_eth2>; + phy-mode = "2500base-x"; + managed = "in-band-status"; }; }; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi index 9024a2d9db07..62ae016ee6aa 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi @@ -21,6 +21,7 @@ reg = <0x000>; enable-method = "psci"; #cooling-cells = <2>; + clocks = <&cpu_clk 0>; }; cpu1: cpu@1 { device_type = "cpu"; @@ -28,6 +29,7 @@ reg = <0x001>; enable-method = "psci"; #cooling-cells = <2>; + clocks = <&cpu_clk 0>; }; }; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi index d81944902650..8259fc8f86f2 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi @@ -438,10 +438,10 @@ CP110_LABEL(nand_controller): nand@720000 { /* - * Due to the limitation of the pins available - * this controller is only usable on the CPM - * for A7K and on the CPS for A8K. - */ + * Due to the limitation of the pins available + * this controller is only usable on the CPM + * for A7K and on the CPS for A8K. + */ compatible = "marvell,armada-8k-nand-controller", "marvell,armada370-nand-controller"; reg = <0x720000 0x54>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi index 4c38426a6969..02909a48dfcd 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi @@ -309,9 +309,8 @@ regulator-name = "VDD_12V"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; - gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>; + gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_HIGH>; regulator-boot-on; - enable-active-low; }; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi index a7dc319214a4..b0095072bc28 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi @@ -1612,7 +1612,7 @@ regulator-name = "VDD_HDMI_5V0"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; - gpio = <&exp1 12 GPIO_ACTIVE_LOW>; + gpio = <&exp1 12 GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&vdd_5v0_sys>; }; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index 04ad2fb22b9a..dba3488492f1 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi @@ -623,6 +623,8 @@ l21 { regulator-min-microvolt = <2950000>; regulator-max-microvolt = <2950000>; + regulator-allow-set-load; + regulator-system-load = <200000>; }; l22 { regulator-min-microvolt = <3300000>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 87f4d9c1b0d4..fbb8ce78f95b 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -1598,6 +1598,8 @@ interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>; phys = <&hsusb_phy2>; phy-names = "usb2-phy"; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; }; }; @@ -1628,6 +1630,8 @@ interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>; phys = <&hsusb_phy1>, <&ssusb_phy_0>; phy-names = "usb2-phy", "usb3-phy"; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi index 9682d4dd7496..1bae90705746 100644 --- a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi @@ -23,6 +23,43 @@ }; }; +/* + * The laptop FW does not appear to support the retention state as it is + * not advertised as enabled in ACPI, and enabling it in DT can cause boot + * hangs. + */ +&CPU0 { + cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; +}; + +&CPU1 { + cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; +}; + +&CPU2 { + cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; +}; + +&CPU3 { + cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; +}; + +&CPU4 { + cpu-idle-states = <&BIG_CPU_SLEEP_1>; +}; + +&CPU5 { + cpu-idle-states = <&BIG_CPU_SLEEP_1>; +}; + +&CPU6 { + cpu-idle-states = <&BIG_CPU_SLEEP_1>; +}; + +&CPU7 { + cpu-idle-states = <&BIG_CPU_SLEEP_1>; +}; + &qusb2phy { status = "okay"; diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi index 108667ce4f31..8d15572d18e6 100644 --- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi @@ -27,6 +27,66 @@ status = "okay"; }; +&etf { + status = "okay"; +}; + +&etm1 { + status = "okay"; +}; + +&etm2 { + status = "okay"; +}; + +&etm3 { + status = "okay"; +}; + +&etm4 { + status = "okay"; +}; + +&etm5 { + status = "okay"; +}; + +&etm6 { + status = "okay"; +}; + +&etm7 { + status = "okay"; +}; + +&etm8 { + status = "okay"; +}; + +&etr { + status = "okay"; +}; + +&funnel1 { + status = "okay"; +}; + +&funnel2 { + status = "okay"; +}; + +&funnel3 { + status = "okay"; +}; + +&funnel4 { + status = "okay"; +}; + +&funnel5 { + status = "okay"; +}; + &pm8005_lsid1 { pm8005-regulators { compatible = "qcom,pm8005-regulators"; @@ -51,6 +111,10 @@ vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; }; +&replicator1 { + status = "okay"; +}; + &rpm_requests { pm8998-regulators { compatible = "qcom,rpm-pm8998-regulators"; @@ -249,6 +313,10 @@ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; }; +&stm { + status = "okay"; +}; + &ufshc { vcc-supply = <&vreg_l20a_2p95>; vccq-supply = <&vreg_l26a_1p2>; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index c6f81431983e..ccd535edbf4e 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -985,7 +985,7 @@ tcsr_mutex_regs: syscon@1f40000 { compatible = "syscon"; - reg = <0x01f40000 0x20000>; + reg = <0x01f40000 0x40000>; }; tlmm: pinctrl@3400000 { @@ -998,11 +998,12 @@ #interrupt-cells = <0x2>; }; - stm@6002000 { + stm: stm@6002000 { compatible = "arm,coresight-stm", "arm,primecell"; reg = <0x06002000 0x1000>, <0x16280000 0x180000>; reg-names = "stm-base", "stm-data-base"; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1016,9 +1017,10 @@ }; }; - funnel@6041000 { + funnel1: funnel@6041000 { compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; reg = <0x06041000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1045,9 +1047,10 @@ }; }; - funnel@6042000 { + funnel2: funnel@6042000 { compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; reg = <0x06042000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1075,9 +1078,10 @@ }; }; - funnel@6045000 { + funnel3: funnel@6045000 { compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; reg = <0x06045000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1113,9 +1117,10 @@ }; }; - replicator@6046000 { + replicator1: replicator@6046000 { compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; reg = <0x06046000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1137,9 +1142,10 @@ }; }; - etf@6047000 { + etf: etf@6047000 { compatible = "arm,coresight-tmc", "arm,primecell"; reg = <0x06047000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1163,9 +1169,10 @@ }; }; - etr@6048000 { + etr: etr@6048000 { compatible = "arm,coresight-tmc", "arm,primecell"; reg = <0x06048000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1181,9 +1188,10 @@ }; }; - etm@7840000 { + etm1: etm@7840000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07840000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1200,9 +1208,10 @@ }; }; - etm@7940000 { + etm2: etm@7940000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07940000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1219,9 +1228,10 @@ }; }; - etm@7a40000 { + etm3: etm@7a40000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07a40000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1238,9 +1248,10 @@ }; }; - etm@7b40000 { + etm4: etm@7b40000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07b40000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1257,9 +1268,10 @@ }; }; - funnel@7b60000 { /* APSS Funnel */ + funnel4: funnel@7b60000 { /* APSS Funnel */ compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07b60000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1343,9 +1355,10 @@ }; }; - funnel@7b70000 { + funnel5: funnel@7b70000 { compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; reg = <0x07b70000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1369,9 +1382,10 @@ }; }; - etm@7c40000 { + etm5: etm@7c40000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07c40000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1385,9 +1399,10 @@ }; }; - etm@7d40000 { + etm6: etm@7d40000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07d40000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1401,9 +1416,10 @@ }; }; - etm@7e40000 { + etm7: etm@7e40000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07e40000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; @@ -1417,9 +1433,10 @@ }; }; - etm@7f40000 { + etm8: etm@7f40000 { compatible = "arm,coresight-etm4x", "arm,primecell"; reg = <0x07f40000 0x1000>; + status = "disabled"; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; clock-names = "apb_pclk", "atclk"; diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi index 501a7330dbc8..522d3ef72df5 100644 --- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi @@ -73,6 +73,7 @@ regulator-always-on; regulator-boot-on; regulator-name = "vdd_apc"; + regulator-initial-mode = <1>; regulator-min-microvolt = <1048000>; regulator-max-microvolt = <1384000>; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi index 34881c0113cb..99a28d64ee62 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi @@ -165,6 +165,8 @@ /delete-node/ &venus_mem; /delete-node/ &cdsp_mem; /delete-node/ &cdsp_pas; +/delete-node/ &zap_shader; +/delete-node/ &gpu_mem; /* Increase the size from 120 MB to 128 MB */ &mpss_region { diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index f5a85caff1a3..751651a6cd81 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -517,6 +517,8 @@ vdd-1.8-xo-supply = <&vreg_l7a_1p8>; vdd-1.3-rfa-supply = <&vreg_l17a_1p3>; vdd-3.3-ch0-supply = <&vreg_l25a_3p3>; + + qcom,snoc-host-cap-8bit-quirk; }; /* PINCTRL - additions to nodes defined in sdm845.dtsi */ diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index f406a4340b05..2287354fef86 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2824,7 +2824,7 @@ qcom,gmu = <&gmu>; - zap-shader { + zap_shader: zap-shader { memory-region = <&gpu_mem>; }; diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi index 3e376d29a730..69585d6e3653 100644 --- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi +++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi @@ -86,7 +86,7 @@ label = "rcar-sound"; - dais = <&rsnd_port0>; + dais = <&rsnd_port>; }; vbus0_usb2: regulator-vbus0-usb2 { @@ -191,7 +191,7 @@ port@2 { reg = <2>; dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint0>; + remote-endpoint = <&rsnd_endpoint>; }; }; }; @@ -327,17 +327,15 @@ /* Single DAI */ #sound-dai-cells = <0>; - ports { - rsnd_port0: port@0 { - rsnd_endpoint0: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; + rsnd_port: port { + rsnd_endpoint: endpoint { + remote-endpoint = <&dw_hdmi0_snd_in>; - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint0>; - frame-master = <&rsnd_endpoint0>; + dai-format = "i2s"; + bitclock-master = <&rsnd_endpoint>; + frame-master = <&rsnd_endpoint>; - playback = <&ssi2>; - }; + playback = <&ssi2>; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi index 06c7c849c8ab..c2a7ec3fc209 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi @@ -1726,17 +1726,6 @@ "ssi.1", "ssi.0"; status = "disabled"; - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - }; - port@1 { - reg = <1>; - }; - }; - rcar_sound,ctu { ctu00: ctu-0 { }; ctu01: ctu-1 { }; diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi index 0cd3b376635d..4952981bb6ba 100644 --- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi @@ -652,7 +652,7 @@ }; pwm3: pwm@e6e33000 { - compatible = "renesas,pwm-r8a7790", "renesas,pwm-rcar"; + compatible = "renesas,pwm-r8a77970", "renesas,pwm-rcar"; reg = <0 0xe6e33000 0 8>; #pwm-cells = <2>; clocks = <&cpg CPG_MOD 523>; diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts index b38f9d442fc0..e6d700f8c194 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts @@ -636,7 +636,6 @@ /* audio_clkout0/1/2/3 */ #clock-cells = <1>; clock-frequency = <12288000 11289600>; - clkout-lr-synchronous; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi index eb992d60e6ba..9e09909a510a 100644 --- a/arch/arm64/boot/dts/rockchip/px30.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30.dtsi @@ -768,7 +768,7 @@ interrupts = ; clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu-drv", "ciu-sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; max-frequency = <150000000>; pinctrl-names = "default"; @@ -783,7 +783,7 @@ interrupts = ; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu-drv", "ciu-sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; max-frequency = <150000000>; pinctrl-names = "default"; @@ -798,7 +798,7 @@ interrupts = ; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu-drv", "ciu-sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; max-frequency = <150000000>; power-domains = <&power PX30_PD_MMC_NAND>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts index c706db0ee9ec..76f5db696009 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts @@ -669,9 +669,12 @@ vqmmc-supply = &vcc1v8_s3; /* IO line */ vmmc-supply = &vcc_sdio; /* card's power */ + #address-cells = <1>; + #size-cells = <0>; status = "okay"; brcmf: wifi@1 { + reg = <1>; compatible = "brcm,bcm4329-fmac"; interrupt-parent = <&gpio0>; interrupts = ; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi index 4944d78a0a1c..e87a04477440 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi @@ -654,9 +654,12 @@ sd-uhs-sdr104; vqmmc-supply = <&vcc1v8_s3>; vmmc-supply = <&vccio_sd>; + #address-cells = <1>; + #size-cells = <0>; status = "okay"; brcmf: wifi@1 { + reg = <1>; compatible = "brcm,bcm4329-fmac"; interrupt-parent = <&gpio0>; interrupts = ; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts index 2a127985ab17..d3ed8e5e770f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts @@ -94,33 +94,6 @@ }; }; -&gpu_thermal { - trips { - gpu_warm: gpu_warm { - temperature = <55000>; - hysteresis = <2000>; - type = "active"; - }; - - gpu_hot: gpu_hot { - temperature = <65000>; - hysteresis = <2000>; - type = "active"; - }; - }; - cooling-maps { - map1 { - trip = <&gpu_warm>; - cooling-device = <&fan THERMAL_NO_LIMIT 1>; - }; - - map2 { - trip = <&gpu_hot>; - cooling-device = <&fan 2 THERMAL_NO_LIMIT>; - }; - }; -}; - &pinctrl { ir { ir_rx: ir-rx { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts index 0541dfce924d..9c659f3115c8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts @@ -648,9 +648,12 @@ pinctrl-names = "default"; pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>; sd-uhs-sdr104; + #address-cells = <1>; + #size-cells = <0>; status = "okay"; brcmf: wifi@1 { + reg = <1>; compatible = "brcm,bcm4329-fmac"; interrupt-parent = <&gpio0>; interrupts = ; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 698ef9a1d5b7..96445111e398 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -43,6 +43,7 @@ smmu0: smmu@36600000 { compatible = "arm,smmu-v3"; reg = <0x0 0x36600000 0x0 0x100000>; + power-domains = <&k3_pds 229 TI_SCI_PD_EXCLUSIVE>; interrupt-parent = <&gic500>; interrupts = , ; diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index ea873b8904c4..e3e27349a9fe 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -384,7 +384,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, goto xts_tail; kernel_neon_end(); - skcipher_walk_done(&walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } if (err || likely(!tail)) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 70b1469783f9..24bc0a3f26e2 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -261,7 +261,7 @@ static int ghash_setkey(struct crypto_shash *tfm, static struct shash_alg ghash_alg[] = {{ .base.cra_name = "ghash", .base.cra_driver_name = "ghash-neon", - .base.cra_priority = 100, + .base.cra_priority = 150, .base.cra_blocksize = GHASH_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ghash_key), .base.cra_module = THIS_MODULE, diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index b9f8d787eea9..5e5dc05d63a0 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif -#define ALTINSTR_ENTRY(feature,cb) \ +#define ALTINSTR_ENTRY(feature) \ " .word 661b - .\n" /* label */ \ - " .if " __stringify(cb) " == 0\n" \ " .word 663f - .\n" /* new instruction */ \ - " .else\n" \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +#define ALTINSTR_ENTRY_CB(feature, cb) \ + " .word 661b - .\n" /* label */ \ " .word " __stringify(cb) "- .\n" /* callback */ \ - " .endif\n" \ " .hword " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ @@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { } * * Alternatives with callbacks do not generate replacement instructions. */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature,cb) \ + ALTINSTR_ENTRY(feature) \ ".popsection\n" \ - " .if " __stringify(cb) " == 0\n" \ ".pushsection .altinstr_replacement, \"a\"\n" \ "663:\n\t" \ newinstr "\n" \ @@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { } ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ ".org . - (662b-661b) + (664b-663b)\n" \ - ".else\n\t" \ + ".endif\n" + +#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY_CB(feature, cb) \ + ".popsection\n" \ "663:\n\t" \ "664:\n\t" \ - ".endif\n" \ ".endif\n" #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) #define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb) + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) #else #include @@ -211,7 +221,7 @@ alternative_endif .macro user_alt, label, oldinstr, newinstr, cond 9999: alternative_insn "\oldinstr", "\newinstr", \cond - _ASM_EXTABLE 9999b, \label + _asm_extable 9999b, \label .endm /* diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 574808b9df4c..da3280f639cd 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -14,6 +14,7 @@ static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op " %w[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ @@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd) static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op #mb " %w[i], %w[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ @@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ u32 tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " add %w[i], %w[i], %w[tmp]" \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ @@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") static inline void __lse_atomic_and(int i, atomic_t *v) { asm volatile( + __LSE_PREAMBLE " mvn %w[i], %w[i]\n" " stclr %w[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v) static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " mvn %w[i], %w[i]\n" \ " ldclr" #mb " %w[i], %w[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") static inline void __lse_atomic_sub(int i, atomic_t *v) { asm volatile( + __LSE_PREAMBLE " neg %w[i], %w[i]\n" " stadd %w[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ u32 tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " add %w[i], %w[i], %w[tmp]" \ @@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op " %[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ @@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd) static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op #mb " %[i], %[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ @@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " add %[i], %[i], %x[tmp]" \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ @@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") static inline void __lse_atomic64_and(s64 i, atomic64_t *v) { asm volatile( + __LSE_PREAMBLE " mvn %[i], %[i]\n" " stclr %[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v) static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " mvn %[i], %[i]\n" \ " ldclr" #mb " %[i], %[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) { asm volatile( + __LSE_PREAMBLE " neg %[i], %[i]\n" " stadd %[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " add %[i], %[i], %x[tmp]" \ @@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile( + __LSE_PREAMBLE "1: ldr %x[tmp], %[v]\n" " subs %[ret], %x[tmp], #1\n" " b.lt 2f\n" @@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " mov %" #w "[tmp], %" #w "[old]\n" \ " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ " mov %" #w "[ret], %" #w "[tmp]" \ @@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ asm volatile( \ + __LSE_PREAMBLE \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 063c964af705..48bfbf70dbb0 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -36,7 +36,7 @@ static inline void local_daif_mask(void) trace_hardirqs_off(); } -static inline unsigned long local_daif_save(void) +static inline unsigned long local_daif_save_flags(void) { unsigned long flags; @@ -48,6 +48,15 @@ static inline unsigned long local_daif_save(void) flags |= PSR_I_BIT; } + return flags; +} + +static inline unsigned long local_daif_save(void) +{ + unsigned long flags; + + flags = local_daif_save_flags(); + local_daif_mask(); return flags; diff --git a/arch/arm64/include/asm/fb.h b/arch/arm64/include/asm/fb.h index bdc735ee1f67..d48be330f2e2 100644 --- a/arch/arm64/include/asm/fb.h +++ b/arch/arm64/include/asm/fb.h @@ -12,7 +12,8 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, unsigned long off) { - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + //vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + vma->vm_page_prot = pgprot_device(vma->vm_page_prot); } static inline int fb_is_primary_device(struct fb_info *info) diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index d48667b04c41..91fa4baa1a93 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -11,9 +11,20 @@ #include #define HAVE_FUNCTION_GRAPH_FP_TEST + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#else #define MCOUNT_ADDR ((unsigned long)_mcount) +#endif + +/* The BL at the callsite's adjusted rec->ip */ #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE +#define FTRACE_PLT_IDX 0 +#define FTRACE_REGS_PLT_IDX 1 +#define NR_FTRACE_PLTS 2 + /* * Currently, gcc tends to save the link register after the local variables * on the stack. This causes the max stack tracer to report the function @@ -43,6 +54,12 @@ extern void return_to_handler(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { + /* + * Adjust addr to point at the BL in the callsite. + * See ftrace_init_nop() for the callsite sequence. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + return addr + AARCH64_INSN_SIZE; /* * addr is the address of the mcount call instruction. * recordmcount does the necessary offset calculation. @@ -50,6 +67,12 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +struct dyn_ftrace; +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop +#endif + #define ftrace_return_address(n) return_address(n) /* diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 39e7780bedd6..bb313dde58a4 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -440,6 +440,9 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_logic_type type); +u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_variant variant); u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type, enum aarch64_insn_variant variant, enum aarch64_insn_register Rn, diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index d69c1efc63e7..6ff84f1f3b4c 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -204,6 +204,38 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; } +/* + * The layout of SPSR for an AArch32 state is different when observed from an + * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32 + * view given an AArch64 view. + * + * In ARM DDI 0487E.a see: + * + * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426 + * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256 + * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280 + * + * Which show the following differences: + * + * | Bit | AA64 | AA32 | Notes | + * +-----+------+------+-----------------------------| + * | 24 | DIT | J | J is RES0 in ARMv8 | + * | 21 | SS | DIT | SS doesn't exist in AArch32 | + * + * ... and all other bits are (currently) common. + */ +static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) +{ + const unsigned long overlap = BIT(24) | BIT(21); + unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT); + + spsr &= ~overlap; + + spsr |= dit << 21; + + return spsr; +} + static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) { u32 mode; @@ -263,6 +295,11 @@ static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); } +static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); +} + static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h index 02b5c48fd467..b204501a0c39 100644 --- a/arch/arm64/include/asm/kvm_mmio.h +++ b/arch/arm64/include/asm/kvm_mmio.h @@ -10,13 +10,11 @@ #include #include -/* - * This is annoying. The mmio code requires this, even if we don't - * need any decoding. To be fixed. - */ struct kvm_decode { unsigned long rt; bool sign_extend; + /* Witdth of the register accessed by the faulting instruction is 64-bits */ + bool sixty_four; }; void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 80b388278149..5de132100b6d 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -6,6 +6,8 @@ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#define __LSE_PREAMBLE ".arch_extension lse\n" + #include #include #include @@ -14,8 +16,6 @@ #include #include -__asm__(".arch_extension lse"); - extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; @@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void) /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ - ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) + ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS) #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index c23c47360664..95362e37555a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -21,7 +21,11 @@ * Size of the PCI I/O space. This must remain a power of two so that * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. */ +#ifdef CONFIG_ARCH_PHYTIUM +#define PCI_IO_SIZE SZ_256M +#else #define PCI_IO_SIZE SZ_16M +#endif /* * VMEMMAP_SIZE - allows the whole linear region to be covered by @@ -52,9 +56,13 @@ #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (BPF_JIT_REGION_END) -#define MODULES_VSIZE (SZ_128M) +#define MODULES_VSIZE (SZ_1G) #define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) +#ifdef CONFIG_ARCH_PHYTIUM +#define PCI_IO_END (VMEMMAP_START - SZ_2G) +#else #define PCI_IO_END (VMEMMAP_START - SZ_2M) +#endif #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) @@ -219,7 +227,7 @@ static inline unsigned long kaslr_offset(void) ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) #define untagged_addr(addr) ({ \ - u64 __addr = (__force u64)addr; \ + u64 __addr = (__force u64)(addr); \ __addr &= __untagged_addr(__addr); \ (__force __typeof__(addr))__addr; \ }) diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index f80e13cbf8ec..9af3316e6071 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -21,13 +21,15 @@ struct mod_arch_specific { struct mod_plt_sec init; /* for CONFIG_DYNAMIC_FTRACE */ - struct plt_entry *ftrace_trampoline; + struct plt_entry *ftrace_trampolines; }; #endif u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, void *loc, const Elf64_Rela *rela, Elf64_Sym *sym); +u64 module_emit_plt_entry_kpatch(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, unsigned long val); u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, void *loc, u64 val); diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 8dc6c5cdabe6..baf52baaa2a5 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -85,13 +85,12 @@ #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_READONLY #define __P011 PAGE_READONLY -#define __P100 PAGE_EXECONLY +#define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC @@ -100,7 +99,7 @@ #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_EXECONLY +#define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 565aa45ef134..13ebe2bad79f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -/* - * Execute-only user mappings do not have the PTE_USER bit set. All valid - * kernel mappings have the PTE_UXN bit set. - */ #define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) #define pte_valid_user(pte) \ @@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; /* * p??_access_permitted() is true for valid user mappings (subject to the - * write permission check) other than user execute-only which do not have the - * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + * write permission check). PROT_NONE mappings do not have the PTE_VALID bit + * set. */ #define pte_access_permitted(pte, write) \ (pte_valid_user(pte) && (!(write) || pte_write(pte))) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index fbebb411ae20..bf57308fcd63 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -62,6 +62,7 @@ #define PSR_AA32_I_BIT 0x00000080 #define PSR_AA32_A_BIT 0x00000100 #define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_PAN_BIT 0x00400000 #define PSR_AA32_SSBS_BIT 0x00800000 #define PSR_AA32_DIT_BIT 0x01000000 #define PSR_AA32_Q_BIT 0x08000000 diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 788ae971f11c..25a73aab438f 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; +extern char __exittext_begin[], __exittext_end[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 127712b0b970..32fc8061aa76 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; + /* + * Asynchronous I/O running in a kernel thread does not have the + * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag + * the user address before checking. + */ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && - test_thread_flag(TIF_TAGGED_ADDR)) + (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) addr = untagged_addr(addr); __chk_user_ptr(addr); diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 2629a68b8724..8c1b73dc8f55 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -25,8 +25,8 @@ #define __NR_compat_gettimeofday 78 #define __NR_compat_sigreturn 119 #define __NR_compat_rt_sigreturn 173 -#define __NR_compat_clock_getres 247 #define __NR_compat_clock_gettime 263 +#define __NR_compat_clock_getres 264 #define __NR_compat_clock_gettime64 403 #define __NR_compat_clock_getres_time64 406 @@ -42,7 +42,6 @@ #endif #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #ifndef __COMPAT_SYSCALL_NR #include diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 7ed9294e2004..d1bb5b69f1ce 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -49,6 +49,7 @@ #define PSR_SSBS_BIT 0x00001000 #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 +#define PSR_DIT_BIT 0x01000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 4703d218663a..f83a70e07df8 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -19,5 +19,6 @@ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS +#define __ARCH_WANT_SYS_CLONE3 #include diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 478491f07b4f..4fa92ca3b72e 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -18,7 +18,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ hyp-stub.o psci.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ - smp.o smp_spin_table.o topology.o smccc-call.o \ + smp.o smp_spin_table.o topology.o smccc-call.o processor-type.o \ syscall.o extra-$(CONFIG_EFI) := efi-entry.o diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 3a58e9db5cfe..a100483b47c4 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -274,7 +274,7 @@ int apei_claim_sea(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) return err; - current_flags = arch_local_save_flags(); + current_flags = local_daif_save_flags(); /* * SEA can interrupt SError, mask it and describe this as an NMI so diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 214685760e1c..a5bdce8af65b 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -56,6 +56,7 @@ int main(void) DEFINE(S_X24, offsetof(struct pt_regs, regs[24])); DEFINE(S_X26, offsetof(struct pt_regs, regs[26])); DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); + DEFINE(S_FP, offsetof(struct pt_regs, regs[29])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 93f34b4eca25..96f576e9ea46 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -575,6 +575,7 @@ static const struct midr_range spectre_v2_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), { /* sentinel */ } }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 80f459ad0190..f400cb29b811 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly; #define COMPAT_ELF_HWCAP_DEFAULT \ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ - COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ - COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ + COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\ COMPAT_HWCAP_LPAE) unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; unsigned int compat_elf_hwcap2 __read_mostly; @@ -1367,7 +1365,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, .min_field_value = 0, .matches = has_no_fpsimd, }, @@ -1595,6 +1593,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .match_list = list, \ } +#define HWCAP_CAP_MATCH(match, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + .matches = match, \ + } + #ifdef CONFIG_ARM64_PTR_AUTH static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { { @@ -1668,8 +1672,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { {}, }; +#ifdef CONFIG_COMPAT +static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) +{ + /* + * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available, + * in line with that of arm32 as in vfp_init(). We make sure that the + * check is future proof, by making sure value is non-zero. + */ + u32 mvfr1; + + WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); + if (scope == SCOPE_SYSTEM) + mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1); + else + mvfr1 = read_sysreg_s(SYS_MVFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT); +} +#endif + static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { #ifdef CONFIG_COMPAT + HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), + /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 05933c065732..4fb9e88d466c 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include /* * In case the boot CPU is hotpluggable, we record its initial state and @@ -139,9 +141,25 @@ static int c_show(struct seq_file *m, void *v) * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); + + if (is_phytium2000()) + seq_puts(m, "model name\t: Phytium 2000 Series\n"); + else if (is_phytium1500()) + seq_puts(m, "model name\t: Phytium 1500 Series\n"); + else if (is_virtual()) + seq_puts(m, "model name\t: Virtual\n"); + else if (is_huawei()) + seq_printf(m, "model name\t: %s %s\n", + dmi_get_system_info(DMI_SYS_VENDOR), + dmi_get_system_info(DMI_PRODUCT_NAME)); + else + seq_puts(m, "model name\t: Unknown(jws) Model\n"); + if (compat) - seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", + seq_printf(m, "model type\t: ARMv8 Processor rev %d (%s)\n", MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); + else + seq_puts(m, "model type\t: General Arm64 Type(jws)\n"); seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000UL/HZ), @@ -153,7 +171,7 @@ static int c_show(struct seq_file *m, void *v) * rather than attempting to parse this, but there's a body of * software which does already (at least for 32-bit). */ - seq_puts(m, "Features\t:"); + seq_puts(m, "flags\t\t:"); if (compat) { #ifdef CONFIG_COMPAT for (j = 0; compat_hwcap_str[j]; j++) diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 33d003d80121..7d02f9966d34 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -7,10 +7,136 @@ */ #include +#include #include #include #include +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +/* + * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before + * the regular function prologue. For an enabled callsite, ftrace_init_nop() and + * ftrace_make_call() have patched those NOPs to: + * + * MOV X9, LR + * BL + * + * ... where is either ftrace_caller or ftrace_regs_caller. + * + * Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are + * live, and x9-x18 are safe to clobber. + * + * We save the callsite's context into a pt_regs before invoking any ftrace + * callbacks. So that we can get a sensible backtrace, we create a stack record + * for the callsite and the ftrace entry assembly. This is not sufficient for + * reliable stacktrace: until we create the callsite stack record, its caller + * is missing from the LR and existing chain of frame records. + */ + .macro ftrace_regs_entry, allregs=0 + /* Make room for pt_regs, plus a callee frame */ + sub sp, sp, #(S_FRAME_SIZE + 16) + + /* Save function arguments (and x9 for simplicity) */ + stp x0, x1, [sp, #S_X0] + stp x2, x3, [sp, #S_X2] + stp x4, x5, [sp, #S_X4] + stp x6, x7, [sp, #S_X6] + stp x8, x9, [sp, #S_X8] + + /* Optionally save the callee-saved registers, always save the FP */ + .if \allregs == 1 + stp x10, x11, [sp, #S_X10] + stp x12, x13, [sp, #S_X12] + stp x14, x15, [sp, #S_X14] + stp x16, x17, [sp, #S_X16] + stp x18, x19, [sp, #S_X18] + stp x20, x21, [sp, #S_X20] + stp x22, x23, [sp, #S_X22] + stp x24, x25, [sp, #S_X24] + stp x26, x27, [sp, #S_X26] + stp x28, x29, [sp, #S_X28] + .else + str x29, [sp, #S_FP] + .endif + + /* Save the callsite's SP and LR */ + add x10, sp, #(S_FRAME_SIZE + 16) + stp x9, x10, [sp, #S_LR] + + /* Save the PC after the ftrace callsite */ + str x30, [sp, #S_PC] + + /* Create a frame record for the callsite above pt_regs */ + stp x29, x9, [sp, #S_FRAME_SIZE] + add x29, sp, #S_FRAME_SIZE + + /* Create our frame record within pt_regs. */ + stp x29, x30, [sp, #S_STACKFRAME] + add x29, sp, #S_STACKFRAME + .endm + +ENTRY(ftrace_regs_caller) + ftrace_regs_entry 1 + b ftrace_common +ENDPROC(ftrace_regs_caller) + +ENTRY(ftrace_caller) + ftrace_regs_entry 0 + b ftrace_common +ENDPROC(ftrace_caller) + +ENTRY(ftrace_common) + sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) + mov x1, x9 // parent_ip (callsite's LR) + ldr_l x2, function_trace_op // op + mov x3, sp // regs + +GLOBAL(ftrace_call) + bl ftrace_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); + nop // If enabled, this will be replaced + // "b ftrace_graph_caller" +#endif + +/* + * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved + * x19-x29 per the AAPCS, and we created frame records upon entry, so we need + * to restore x0-x8, x29, and x30. + */ +ftrace_common_return: + /* Restore function arguments */ + ldp x0, x1, [sp] + ldp x2, x3, [sp, #S_X2] + ldp x4, x5, [sp, #S_X4] + ldp x6, x7, [sp, #S_X6] + ldr x8, [sp, #S_X8] + + /* Restore the callsite's FP, LR, PC */ + ldr x29, [sp, #S_FP] + ldr x30, [sp, #S_LR] + ldr x9, [sp, #S_PC] + + /* Restore the callsite's SP */ + add sp, sp, #S_FRAME_SIZE + 16 + + ret x9 +ENDPROC(ftrace_common) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + ldr x0, [sp, #S_PC] + sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn) + add x1, sp, #S_LR // parent_ip (callsite's LR) + ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP) + bl prepare_ftrace_return + b ftrace_common_return +ENDPROC(ftrace_graph_caller) +#endif + +#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + /* * Gcc with -pg will put the following code in the beginning of each function: * mov x0, x30 @@ -162,10 +288,6 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); ENDPROC(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE */ -ENTRY(ftrace_stub) - ret -ENDPROC(ftrace_stub) - #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * void ftrace_graph_caller(void) @@ -184,7 +306,14 @@ ENTRY(ftrace_graph_caller) mcount_exit ENDPROC(ftrace_graph_caller) +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ +ENTRY(ftrace_stub) + ret +ENDPROC(ftrace_stub) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * void return_to_handler(void) * diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 37d3912cfe06..1765e5284994 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task) */ static void task_fpsimd_load(void) { + WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); if (system_supports_sve() && test_thread_flag(TIF_SVE)) @@ -289,6 +290,7 @@ static void fpsimd_save(void) this_cpu_ptr(&fpsimd_last_state); /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ + WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { @@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void) struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); + WARN_ON(!system_supports_fpsimd()); last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; last->sve_vl = current->thread.sve_vl; @@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); + WARN_ON(!system_supports_fpsimd()); WARN_ON(!in_softirq() && !irqs_disabled()); last->st = st; @@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, */ void fpsimd_restore_current_state(void) { - if (!system_supports_fpsimd()) + /* + * For the tasks that were created before we detected the absence of + * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), + * e.g, init. This could be then inherited by the children processes. + * If we later detect that the system doesn't support FP/SIMD, + * we must clear the flag for all the tasks to indicate that the + * FPSTATE is clean (as we can't have one) to avoid looping for ever in + * do_notify_resume(). + */ + if (!system_supports_fpsimd()) { + clear_thread_flag(TIF_FOREIGN_FPSTATE); return; + } get_cpu_fpsimd_context(); @@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void) */ void fpsimd_update_current_state(struct user_fpsimd_state const *state) { - if (!system_supports_fpsimd()) + if (WARN_ON(!system_supports_fpsimd())) return; get_cpu_fpsimd_context(); @@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) void fpsimd_flush_task_state(struct task_struct *t) { t->thread.fpsimd_cpu = NR_CPUS; - + /* + * If we don't support fpsimd, bail out after we have + * reset the fpsimd_cpu for this task and clear the + * FPSTATE. + */ + if (!system_supports_fpsimd()) + return; barrier(); set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); @@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t) */ static void fpsimd_flush_cpu_state(void) { + WARN_ON(!system_supports_fpsimd()); __this_cpu_write(fpsimd_last_state.st, NULL); set_thread_flag(TIF_FOREIGN_FPSTATE); } @@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void) */ void fpsimd_save_and_flush_cpu_state(void) { + if (!system_supports_fpsimd()) + return; WARN_ON(preemptible()); __get_cpu_fpsimd_context(); fpsimd_save(); diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 06e56b470315..da3e97a991d3 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -62,6 +62,19 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return ftrace_modify_code(pc, 0, new, false); } +static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) +{ +#ifdef CONFIG_ARM64_MODULE_PLTS + struct plt_entry *plt = mod->arch.ftrace_trampolines; + + if (addr == FTRACE_ADDR) + return &plt[FTRACE_PLT_IDX]; + if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + return &plt[FTRACE_REGS_PLT_IDX]; +#endif + return NULL; +} + /* * Turn on the call to ftrace_caller() in instrumented function */ @@ -72,9 +85,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) long offset = (long)pc - (long)addr; if (offset < -SZ_128M || offset >= SZ_128M) { -#ifdef CONFIG_ARM64_MODULE_PLTS - struct plt_entry trampoline, *dst; struct module *mod; + struct plt_entry *plt; + + if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) + return -EINVAL; /* * On kernels that support module PLTs, the offset between the @@ -93,49 +108,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (WARN_ON(!mod)) return -EINVAL; - /* - * There is only one ftrace trampoline per module. For now, - * this is not a problem since on arm64, all dynamic ftrace - * invocations are routed via ftrace_caller(). This will need - * to be revisited if support for multiple ftrace entry points - * is added in the future, but for now, the pr_err() below - * deals with a theoretical issue only. - * - * Note that PLTs are place relative, and plt_entries_equal() - * checks whether they point to the same target. Here, we need - * to check if the actual opcodes are in fact identical, - * regardless of the offset in memory so use memcmp() instead. - */ - dst = mod->arch.ftrace_trampoline; - trampoline = get_plt_entry(addr, dst); - if (memcmp(dst, &trampoline, sizeof(trampoline))) { - if (plt_entry_is_initialized(dst)) { - pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); - return -EINVAL; - } - - /* point the trampoline to our ftrace entry point */ - module_disable_ro(mod); - *dst = trampoline; - module_enable_ro(mod, true); - - /* - * Ensure updated trampoline is visible to instruction - * fetch before we patch in the branch. Although the - * architecture doesn't require an IPI in this case, - * Neoverse-N1 erratum #1542419 does require one - * if the TLB maintenance in module_enable_ro() is - * skipped due to rodata_enabled. It doesn't seem worth - * it to make it conditional given that this is - * certainly not a fast-path. - */ - flush_icache_range((unsigned long)&dst[0], - (unsigned long)&dst[1]); + plt = get_ftrace_plt(mod, addr); + if (!plt) { + pr_err("ftrace: no module PLT for %ps\n", (void *)addr); + return -EINVAL; } - addr = (unsigned long)dst; -#else /* CONFIG_ARM64_MODULE_PLTS */ - return -EINVAL; -#endif /* CONFIG_ARM64_MODULE_PLTS */ + + addr = (unsigned long)plt; } old = aarch64_insn_gen_nop(); @@ -144,6 +123,55 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ftrace_modify_code(pc, old, new, true); } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old, new; + + old = aarch64_insn_gen_branch_imm(pc, old_addr, + AARCH64_INSN_BRANCH_LINK); + new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); + + return ftrace_modify_code(pc, old, new, true); +} + +/* + * The compiler has inserted two NOPs before the regular function prologue. + * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live, + * and x9-x18 are free for our use. + * + * At runtime we want to be able to swing a single NOP <-> BL to enable or + * disable the ftrace call. The BL requires us to save the original LR value, + * so here we insert a over the first NOP so the instructions + * before the regular prologue are: + * + * | Compiled | Disabled | Enabled | + * +----------+------------+------------+ + * | NOP | MOV X9, LR | MOV X9, LR | + * | NOP | NOP | BL | + * + * The LR value will be recovered by ftrace_regs_entry, and restored into LR + * before returning to the regular function prologue. When a function is not + * being traced, the MOV is not harmful given x9 is not live per the AAPCS. + * + * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of + * the BL. + */ +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + unsigned long pc = rec->ip - AARCH64_INSN_SIZE; + u32 old, new; + + old = aarch64_insn_gen_nop(); + new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9, + AARCH64_INSN_REG_LR, + AARCH64_INSN_VARIANT_64BIT); + return ftrace_modify_code(pc, old, new, true); +} +#endif + /* * Turn off the call to ftrace_caller() in instrumented function */ @@ -156,9 +184,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, long offset = (long)pc - (long)addr; if (offset < -SZ_128M || offset >= SZ_128M) { -#ifdef CONFIG_ARM64_MODULE_PLTS u32 replaced; + if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) + return -EINVAL; + /* * 'mod' is only set at module load time, but if we end up * dealing with an out-of-range condition, we can assume it @@ -189,9 +219,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return -EINVAL; validate = false; -#else /* CONFIG_ARM64_MODULE_PLTS */ - return -EINVAL; -#endif /* CONFIG_ARM64_MODULE_PLTS */ } else { old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index d801a7094076..4a9e773a177f 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -21,6 +21,7 @@ #include #include #include +#include #define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_N_BIT BIT(22) @@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn) static DEFINE_RAW_SPINLOCK(patch_lock); +static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + static void __kprobes *patch_map(void *addr, int fixmap) { unsigned long uintaddr = (uintptr_t) addr; - bool module = !core_kernel_text(uintaddr); + bool image = is_image_text(uintaddr); struct page *page; - if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) - page = vmalloc_to_page(addr); - else if (!module) + if (image) page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); else return addr; @@ -1268,6 +1282,19 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); } +/* + * MOV (register) is architecturally an alias of ORR (shifted register) where + * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m> + */ +u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_variant variant) +{ + return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR, + src, 0, variant, + AARCH64_INSN_LOGIC_ORR); +} + u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr, enum aarch64_insn_register reg, enum aarch64_insn_adr_type type) diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index b182442b87a3..915a39dcf32c 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -4,9 +4,11 @@ */ #include +#include #include #include #include +#include static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc, enum aarch64_insn_register reg) @@ -99,6 +101,34 @@ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, return (u64)&plt[i]; } +u64 module_emit_plt_entry_kpatch(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, unsigned long val) +{ + struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : + &mod->arch.init; + struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; + int i = pltsec->plt_num_entries; + int j = i - 1; + + if (is_forbidden_offset_for_adrp(&plt[i].adrp)) + i++; + + plt[i] = get_plt_entry(val, &plt[i]); + + /* + * Check if the entry we just created is a duplicate. Given that the + * relocations are sorted, this will be the last entry we allocated. + * (if one exists). + */ + if (j >= 0 && plt_entries_equal(plt + i, plt + j)) + return (u64)&plt[j]; + + pltsec->plt_num_entries += i - j; + if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) + return 0; + + return (u64)&plt[i]; +} #ifdef CONFIG_ARM64_ERRATUM_843419 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, void *loc, u64 val) @@ -169,7 +199,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, switch (ELF64_R_TYPE(rela[i].r_info)) { case R_AARCH64_JUMP26: case R_AARCH64_CALL26: - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + if (!IS_ENABLED(CONFIG_RELOCATABLE)) break; /* @@ -257,6 +287,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, { unsigned long core_plts = 0; unsigned long init_plts = 0; + unsigned long core_extra_plts = 0; Elf64_Sym *syms = NULL; Elf_Shdr *pltsec, *tramp = NULL; int i; @@ -292,6 +323,11 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela); Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info; + if (!strcmp(secstrings + sechdrs[i].sh_name, ".kpatch.dynrelas")) { + core_extra_plts += sechdrs[i].sh_size / sizeof(struct kpatch_patch_dynrela); + continue; + } + if (sechdrs[i].sh_type != SHT_RELA) continue; @@ -310,6 +346,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, sechdrs[i].sh_info, dstsec); } + core_plts += core_extra_plts; pltsec = sechdrs + mod->arch.core.plt_shndx; pltsec->sh_type = SHT_NOBITS; pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; @@ -330,7 +367,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, tramp->sh_type = SHT_NOBITS; tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; tramp->sh_addralign = __alignof__(struct plt_entry); - tramp->sh_size = sizeof(struct plt_entry); + tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry); } return 0; diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 03ff15bffbb6..cd0abc83d46d 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -252,6 +253,197 @@ static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, return 0; } +int static_relocate(struct module *me, int type, void *loc, + unsigned long val) +{ + int ovf = 0; + bool overflow_check = true; + /* Perform the static relocation. */ + switch (type) { + /* Null relocations. */ + case R_ARM_NONE: + case R_AARCH64_NONE: + ovf = 0; + break; + + /* Data relocations. */ + case R_AARCH64_ABS64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); + break; + case R_AARCH64_ABS32: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); + break; + case R_AARCH64_ABS16: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); + break; + case R_AARCH64_PREL64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); + break; + case R_AARCH64_PREL32: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); + break; + case R_AARCH64_PREL16: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); + break; + + /* MOVW instruction relocations. */ + case R_AARCH64_MOVW_UABS_G0_NC: + overflow_check = false; + /* Fall through */ + case R_AARCH64_MOVW_UABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G1_NC: + overflow_check = false; + /* Fall through */ + case R_AARCH64_MOVW_UABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G2_NC: + overflow_check = false; + /* Fall through */ + case R_AARCH64_MOVW_UABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_SABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G0_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G0: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G1_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G1: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G2_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G2: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, + AARCH64_INSN_IMM_MOVNZ); + break; + + /* Immediate instruction relocations. */ + case R_AARCH64_LD_PREL_LO19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + AARCH64_INSN_IMM_19); + break; + case R_AARCH64_ADR_PREL_LO21: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, + AARCH64_INSN_IMM_ADR); + break; + case R_AARCH64_ADR_PREL_PG_HI21_NC: + overflow_check = false; + /* Fall through */ + case R_AARCH64_ADR_PREL_PG_HI21: + ovf = reloc_insn_adrp(me, me->kpatch_info->sechdrs, loc, val); + if (ovf && ovf != -ERANGE) + return ovf; + break; + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST16_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST32_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST64_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST128_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_TSTBR14: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, + AARCH64_INSN_IMM_14); + break; + case R_AARCH64_CONDBR19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + AARCH64_INSN_IMM_19); + break; + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, + AARCH64_INSN_IMM_26); + + if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && + ovf == -ERANGE) { + val = module_emit_plt_entry_kpatch(me, me->kpatch_info->sechdrs, loc, val); + if (!val) + return -ENOEXEC; + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, + 26, AARCH64_INSN_IMM_26); + } + break; + + default: + pr_err("module %s: unsupported RELA relocation: %d\n", + me->name, type); + return -ENOEXEC; + } + + if (overflow_check && ovf == -ERANGE) { + pr_err("module %s: overflow in relocation type %d val %lx\n", + me->name, type, val); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL_GPL(static_relocate); + int apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, @@ -470,22 +662,58 @@ overflow: return -ENOEXEC; } -int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) +static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) { const Elf_Shdr *s, *se; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { - if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) - apply_alternatives_module((void *)s->sh_addr, s->sh_size); -#ifdef CONFIG_ARM64_MODULE_PLTS - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && - !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) - me->arch.ftrace_trampoline = (void *)s->sh_addr; -#endif + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; } + return NULL; +} + +static inline void __init_plt(struct plt_entry *plt, unsigned long addr) +{ + *plt = get_plt_entry(addr, plt); +} + +static int module_init_ftrace_plt(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod) +{ +#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE) + const Elf_Shdr *s; + struct plt_entry *plts; + + s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); + if (!s) + return -ENOEXEC; + + plts = (void *)s->sh_addr; + + __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); + + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR); + + mod->arch.ftrace_trampolines = plts; +#endif return 0; } + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + s = find_section(hdr, sechdrs, ".altinstructions"); + if (s) + apply_alternatives_module((void *)s->sh_addr, s->sh_size); + + return module_init_ftrace_plt(hdr, sechdrs, me); +} diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index 570988c7a7ff..1ca2acc6da54 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -17,6 +17,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI /* @@ -217,4 +218,13 @@ void pcibios_remove_bus(struct pci_bus *bus) acpi_pci_remove_bus(bus); } +/* + * Called after each bus is probed, but before its children are examined + */ +void pcibios_fixup_bus(struct pci_bus *bus) +{ + if (likely(bus->self != NULL)) + pci_read_bridge_bases(bus); +} + #endif diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 71f788cd2b18..81b761d1da8f 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -66,6 +67,30 @@ EXPORT_SYMBOL(__stack_chk_guard); void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); +void _pm_power_off(void) +{ + if (is_phytium2000()) { + unsigned long gpio_ctl_base; + unsigned long value; + + gpio_ctl_base = (unsigned long)ioremap(0x80028006000, 0x10000); + gpio_ctl_base += 0x24; + + value = 0; + value = readl((void __iomem *)(gpio_ctl_base)+0x4); //GPIO_DDR + value = value&(~0x18); + value = value | 0x18; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x4)); //GPIO_DDR + + pr_info("---system restart by gpio---\n"); + value = readl((void __iomem *)(gpio_ctl_base)+0x0); //GPIO_DR + value = value & (~0x18); + value = value | 0x8; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x0)); //GPIO_DR + } else if (pm_power_off) + pm_power_off(); +} + void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); static void __cpu_do_idle(void) @@ -170,8 +195,7 @@ void machine_power_off(void) { local_irq_disable(); smp_send_stop(); - if (pm_power_off) - pm_power_off(); + _pm_power_off(); } /* @@ -360,8 +384,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) asmlinkage void ret_from_fork(void) asm("ret_from_fork"); -int copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -394,11 +418,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } /* - * If a TLS pointer was passed to clone (4th argument), use it - * for the new thread. + * If a TLS pointer was passed to clone, use it for the new + * thread. */ if (clone_flags & CLONE_SETTLS) - p->thread.uw.tp_value = childregs->regs[3]; + p->thread.uw.tp_value = tls; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; @@ -466,6 +490,13 @@ static void ssbs_thread_switch(struct task_struct *next) if (unlikely(next->flags & PF_KTHREAD)) return; + /* + * If all CPUs implement the SSBS extension, then we just need to + * context-switch the PSTATE field. + */ + if (cpu_have_feature(cpu_feature(SSBS))) + return; + /* If the mitigation is enabled, then we leave SSBS clear. */ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || test_tsk_thread_flag(next, TIF_SSBD)) diff --git a/arch/arm64/kernel/processor-type.c b/arch/arm64/kernel/processor-type.c new file mode 100644 index 000000000000..3e57330e034a --- /dev/null +++ b/arch/arm64/kernel/processor-type.c @@ -0,0 +1,42 @@ +/* +* Process Type Helper for Arm +* +* Jasperwang, Tencent inc. +*/ +#include +#include +#include +#include + +/* check phytium via dtb */ +unsigned int is_phytium2000(void) +{ + return of_machine_is_compatible("phytium,ft-2000a") + || of_machine_is_compatible("phytium,ft-2000ahk") + || of_machine_is_compatible("phytium,ft-2000plus"); +} +EXPORT_SYMBOL(is_phytium2000); + +/* check phytium via dtb */ +unsigned int is_phytium1500(void) +{ + return of_machine_is_compatible("phytium,ft-1500a"); +} +EXPORT_SYMBOL(is_phytium1500); + +/* check huawei via dmi info + * notice: only available after subsys initcall ! +*/ +unsigned int is_huawei(void) +{ + return dmi_name_in_vendors("HUAWEI") + || dmi_name_in_vendors("Huawei"); +} +EXPORT_SYMBOL(is_huawei); + +unsigned int is_virtual(void) +{ + return dmi_name_in_vendors("QEMU") + || dmi_match(DMI_PRODUCT_NAME, "KVM Virtual Machine"); +} +EXPORT_SYMBOL(is_virtual); diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index c9f72b2665f1..43ae4e0c968f 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -81,7 +81,8 @@ static void cpu_psci_cpu_die(unsigned int cpu) static int cpu_psci_cpu_kill(unsigned int cpu) { - int err, i; + int err; + unsigned long start, end; if (!psci_ops.affinity_info) return 0; @@ -91,16 +92,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu) * while it is dying. So, try again a few times. */ - for (i = 0; i < 10; i++) { + start = jiffies; + end = start + msecs_to_jiffies(100); + do { err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { - pr_info("CPU%d killed.\n", cpu); + pr_info("CPU%d killed (polled %d ms)\n", cpu, + jiffies_to_msecs(jiffies - start)); return 0; } - msleep(10); - pr_info("Retrying again to check for CPU kill\n"); - } + usleep_range(100, 1000); + } while (time_before(jiffies, end)); pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", cpu, err); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 21176d02e21a..9168c4f1a37f 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, return 0; } +static int fpr_active(struct task_struct *target, const struct user_regset *regset) +{ + if (!system_supports_fpsimd()) + return -ENODEV; + return regset->n; +} + /* * TODO: update fp accessors for lazy context switching (sync/flush hwstate) */ @@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + if (!system_supports_fpsimd()) + return -EINVAL; + if (target == current) fpsimd_preserve_current_state(); @@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, { int ret; + if (!system_supports_fpsimd()) + return -EINVAL; + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); if (ret) return ret; @@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = { */ .size = sizeof(u32), .align = sizeof(u32), + .active = fpr_active, .get = fpr_get, .set = fpr_set }, @@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target, compat_ulong_t fpscr; int ret, vregs_end_pos; + if (!system_supports_fpsimd()) + return -EINVAL; + uregs = &target->thread.uw.fpsimd_state; if (target == current) @@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target, compat_ulong_t fpscr; int ret, vregs_end_pos; + if (!system_supports_fpsimd()) + return -EINVAL; + uregs = &target->thread.uw.fpsimd_state; vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); @@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = { .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), .size = sizeof(compat_ulong_t), .align = sizeof(compat_ulong_t), + .active = fpr_active, .get = compat_vfp_get, .set = compat_vfp_set }, diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index dc9fe879c279..993a4aedfd37 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -955,11 +955,22 @@ void tick_broadcast(const struct cpumask *mask) } #endif +/* + * The number of CPUs online, not counting this CPU (which may not be + * fully online and so not counted in num_online_cpus()). + */ +static inline unsigned int num_other_online_cpus(void) +{ + unsigned int this_cpu_online = cpu_online(smp_processor_id()); + + return num_online_cpus() - this_cpu_online; +} + void smp_send_stop(void) { unsigned long timeout; - if (num_online_cpus() > 1) { + if (num_other_online_cpus()) { cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); @@ -972,10 +983,10 @@ void smp_send_stop(void) /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && timeout--) + while (num_other_online_cpus() && timeout--) udelay(1); - if (num_online_cpus() > 1) + if (num_other_online_cpus()) pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(cpu_online_mask)); @@ -998,7 +1009,11 @@ void crash_smp_send_stop(void) cpus_stopped = 1; - if (num_online_cpus() == 1) { + /* + * If this cpu is the only one alive at this point in time, online or + * not, there are no stop messages to be sent around, so just back out. + */ + if (num_other_online_cpus() == 0) { sdei_mask_local_cpu(); return; } @@ -1006,7 +1021,7 @@ void crash_smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); pr_crit("SMP: stopping secondary CPUs\n"); smp_cross_call(&mask, IPI_CPU_CRASH_STOP); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index aa76f7259668..93752718f9d5 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -161,9 +161,12 @@ SECTIONS __inittext_begin = .; INIT_TEXT_SECTION(8) + + __exittext_begin = .; .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) } + __exittext_end = .; . = ALIGN(4); .altinstructions : { diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 43487f035385..7a7e425616b5 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -101,7 +101,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) { bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY); - unsigned long mdscr; + unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2; trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug); @@ -197,6 +197,10 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE)) vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; + /* Write mdcr_el2 changes since vcpu_load on VHE systems */ + if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2) + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1)); } diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 799e84a40335..d76a3d39b269 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -28,7 +28,15 @@ /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { - if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) + /* + * When the system doesn't support FP/SIMD, we cannot rely on + * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an + * abort on the very first access to FP and thus we should never + * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always + * trap the accesses. + */ + if (!system_supports_fpsimd() || + vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_FP_HOST); diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index a9d25a305af5..a364a4ad5479 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -14,9 +14,6 @@ #include #include -#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ - PSR_I_BIT | PSR_D_BIT) - #define CURRENT_EL_SP_EL0_VECTOR 0x0 #define CURRENT_EL_SP_ELx_VECTOR 0x200 #define LOWER_EL_AArch64_VECTOR 0x400 @@ -50,6 +47,69 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type) return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type; } +/* + * When an exception is taken, most PSTATE fields are left unchanged in the + * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all + * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx + * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0. + * + * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429. + * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426. + * + * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from + * MSB to LSB. + */ +static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu) +{ + unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); + unsigned long old, new; + + old = *vcpu_cpsr(vcpu); + new = 0; + + new |= (old & PSR_N_BIT); + new |= (old & PSR_Z_BIT); + new |= (old & PSR_C_BIT); + new |= (old & PSR_V_BIT); + + // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests) + + new |= (old & PSR_DIT_BIT); + + // PSTATE.UAO is set to zero upon any exception to AArch64 + // See ARM DDI 0487E.a, page D5-2579. + + // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0 + // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented + // See ARM DDI 0487E.a, page D5-2578. + new |= (old & PSR_PAN_BIT); + if (!(sctlr & SCTLR_EL1_SPAN)) + new |= PSR_PAN_BIT; + + // PSTATE.SS is set to zero upon any exception to AArch64 + // See ARM DDI 0487E.a, page D2-2452. + + // PSTATE.IL is set to zero upon any exception to AArch64 + // See ARM DDI 0487E.a, page D1-2306. + + // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64 + // See ARM DDI 0487E.a, page D13-3258 + if (sctlr & SCTLR_ELx_DSSBS) + new |= PSR_SSBS_BIT; + + // PSTATE.BTYPE is set to zero upon any exception to AArch64 + // See ARM DDI 0487E.a, pages D1-2293 to D1-2294. + + new |= PSR_D_BIT; + new |= PSR_A_BIT; + new |= PSR_I_BIT; + new |= PSR_F_BIT; + + new |= PSR_MODE_EL1h; + + return new; +} + static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) { unsigned long cpsr = *vcpu_cpsr(vcpu); @@ -59,7 +119,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu)); *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); - *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; + *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu); vcpu_write_spsr(vcpu, cpsr); vcpu_write_sys_reg(vcpu, addr, FAR_EL1); @@ -94,7 +154,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu)); *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); - *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; + *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu); vcpu_write_spsr(vcpu, cpsr); /* diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 46822afc57e0..01a515e0171e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2360,8 +2360,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) return NULL; + if (!index_to_params(id, ¶ms)) + return NULL; + table = get_target_table(vcpu->arch.target, true, &num); - r = find_reg_by_id(id, ¶ms, table, num); + r = find_reg(¶ms, table, num); if (!r) r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9fc6db0bcbad..d26e6cd28953 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -454,7 +454,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault, major = 0; - unsigned long vm_flags = VM_READ | VM_WRITE; + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (kprobe_page_fault(regs, esr)) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 45c00a54909c..7be414815780 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -68,7 +68,7 @@ phys_addr_t arm64_dma_phys_limit __ro_after_init; */ static void __init reserve_crashkernel(void) { - unsigned long long crash_base, crash_size; + unsigned long long crash_base, crash_size, addr_limit; int ret; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), @@ -80,8 +80,9 @@ static void __init reserve_crashkernel(void) crash_size = PAGE_ALIGN(crash_size); if (crash_base == 0) { + addr_limit = 0xFFFFFFFF; /* Current arm64 boot protocol requires 2MB alignment */ - crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, + crash_base = memblock_find_in_range(0, addr_limit, crash_size, SZ_2M); if (crash_base == 0) { pr_warn("cannot allocate crashkernel (size:0x%llx)\n", diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 60c929f3683b..d10247fab0fd 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1069,7 +1069,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; /* * FIXME: Cleanup page tables (also in arch_add_memory() in case @@ -1078,7 +1077,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be * unlocked yet. */ - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 3973847b5f42..48b2e1b59119 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -36,6 +36,7 @@ config CSKY select GX6605S_TIMER if CPU_CK610 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_AUDITSYSCALL + select HAVE_COPY_THREAD_TLS select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER @@ -74,7 +75,7 @@ config CPU_HAS_TLBI config CPU_HAS_LDSTEX bool help - For SMP, CPU needs "ldex&stex" instrcutions to atomic operations. + For SMP, CPU needs "ldex&stex" instructions for atomic operations. config CPU_NEED_TLBSYNC bool diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h index 7ab78bd0f3b1..f35a9f3315ee 100644 --- a/arch/csky/abiv1/inc/abi/entry.h +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -16,14 +16,16 @@ #define LSAVE_A4 40 #define LSAVE_A5 44 +#define usp ss1 + .macro USPTOKSP - mtcr sp, ss1 + mtcr sp, usp mfcr sp, ss0 .endm .macro KSPTOUSP mtcr sp, ss0 - mfcr sp, ss1 + mfcr sp, usp .endm .macro SAVE_ALL epc_inc @@ -45,7 +47,13 @@ add lr, r13 stw lr, (sp, 8) + mov lr, sp + addi lr, 32 + addi lr, 32 + addi lr, 16 + bt 2f mfcr lr, ss1 +2: stw lr, (sp, 16) stw a0, (sp, 20) @@ -79,9 +87,10 @@ ldw a0, (sp, 12) mtcr a0, epsr btsti a0, 31 + bt 1f ldw a0, (sp, 16) mtcr a0, ss1 - +1: ldw a0, (sp, 24) ldw a1, (sp, 28) ldw a2, (sp, 32) @@ -102,9 +111,9 @@ addi sp, 32 addi sp, 8 - bt 1f + bt 2f KSPTOUSP -1: +2: rte .endm diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h index 9897a16b45e5..94a7a58765df 100644 --- a/arch/csky/abiv2/inc/abi/entry.h +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -31,7 +31,13 @@ mfcr lr, epsr stw lr, (sp, 12) + btsti lr, 31 + bf 1f + addi lr, sp, 152 + br 2f +1: mfcr lr, usp +2: stw lr, (sp, 16) stw a0, (sp, 20) @@ -64,8 +70,10 @@ mtcr a0, epc ldw a0, (sp, 12) mtcr a0, epsr + btsti a0, 31 ldw a0, (sp, 16) mtcr a0, usp + mtcr a0, ss0 #ifdef CONFIG_CPU_HAS_HILO ldw a0, (sp, 140) @@ -86,6 +94,9 @@ addi sp, 40 ldm r16-r30, (sp) addi sp, 72 + bf 1f + mfcr sp, ss0 +1: rte .endm diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h index 211c983c7282..ba4018929733 100644 --- a/arch/csky/include/uapi/asm/unistd.h +++ b/arch/csky/include/uapi/asm/unistd.h @@ -1,7 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS #include diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S index 5b84f11485ae..3821ef9b7567 100644 --- a/arch/csky/kernel/atomic.S +++ b/arch/csky/kernel/atomic.S @@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg) mfcr a3, epc addi a3, TRAP0_SIZE - subi sp, 8 + subi sp, 16 stw a3, (sp, 0) mfcr a3, epsr stw a3, (sp, 4) + mfcr a3, usp + stw a3, (sp, 8) psrset ee #ifdef CONFIG_CPU_HAS_LDSTEX @@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg) mtcr a3, epc ldw a3, (sp, 4) mtcr a3, epsr - addi sp, 8 + ldw a3, (sp, 8) + mtcr a3, usp + addi sp, 16 KSPTOUSP rte END(csky_cmpxchg) diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index f320d9248a22..397962e11bd1 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -34,10 +34,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sw->r15; } -int copy_thread(unsigned long clone_flags, +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, - struct task_struct *p) + struct task_struct *p, + unsigned long tls) { struct switch_stack *childstack; struct pt_regs *childregs = task_pt_regs(p); @@ -64,7 +65,7 @@ int copy_thread(unsigned long clone_flags, childregs->usp = usp; if (clone_flags & CLONE_SETTLS) task_thread_info(p)->tp_value = childregs->tls - = childregs->regs[0]; + = tls; childregs->a0 = 0; childstack->r15 = (unsigned long) ret_from_fork; diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index b753d382e4ce..0bb0954d5570 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -120,7 +120,7 @@ void __init setup_smp_ipi(void) int rc; if (ipi_irq == 0) - panic("%s IRQ mapping failed\n", __func__); + return; rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", &ipi_dummy_dev); diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile index c94ef6481098..efb7ebab342b 100644 --- a/arch/csky/mm/Makefile +++ b/arch/csky/mm/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) obj-y += cachev2.o +CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) else obj-y += cachev1.o +CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) endif obj-y += dma-mapping.o diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index d4c2292ea46b..00e96278b377 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -31,6 +31,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pte_table); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 12cd9231c4b8..0231d69c8bf2 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%2);\n" \ " %1 = "#op "(%0,%3);\n" \ " memw_locked(%2,P3)=%1;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output), "=&r" (val) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) " }" " memw_locked(%2, p3) = %1;" " {" - " if !p3 jump 1b;" + " if (!p3) jump 1b;" " }" "2:" : "=&r" (__oldval), "=&r" (tmp) diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 47384b094b94..71429f756af0 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -223,7 +223,7 @@ static inline int ffs(int x) int r; asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" : "=&r" (r) : "r" (x) : "p0"); diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index 6091322c3af9..92b8a02e588a 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __asm__ __volatile__ ( "1: %0 = memw_locked(%1);\n" /* load into retval */ " memw_locked(%1,P0) = %2;\n" /* store into memory */ - " if !P0 jump 1b;\n" + " if (!P0) jump 1b;\n" : "=&r" (retval) : "r" (ptr), "r" (x) : "memory", "p0" diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index cb635216a732..0191f7c7193e 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -16,7 +16,7 @@ /* For example: %1 = %4 */ \ insn \ "2: memw_locked(%3,p2) = %1;\n" \ - " if !p2 jump 1b;\n" \ + " if (!p2) jump 1b;\n" \ " %1 = #0;\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "1: %1 = memw_locked(%3)\n" " {\n" " p2 = cmp.eq(%1,%4)\n" - " if !p2.new jump:NT 3f\n" + " if (!p2.new) jump:NT 3f\n" " }\n" "2: memw_locked(%3,p2) = %5\n" - " if !p2 jump 1b\n" + " if (!p2) jump 1b\n" "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h index bfe07d842ff3..ef103b73bec8 100644 --- a/arch/hexagon/include/asm/spinlock.h +++ b/arch/hexagon/include/asm/spinlock.h @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) "1: R6 = memw_locked(%0);\n" " R6 = add(R6,#-1);\n" " memw_locked(%0,P3) = R6\n" - " if !P3 jump 1b;\n" + " if (!P3) jump 1b;\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " { %0 = P3 }\n" "1:\n" @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0)\n" " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1)\n" " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1b; R6 = #1; }\n" + " { if (!P3) jump 1b; R6 = #1; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c index 35f29423fda8..5ed02f699479 100644 --- a/arch/hexagon/kernel/stacktrace.c +++ b/arch/hexagon/kernel/stacktrace.c @@ -11,8 +11,6 @@ #include #include -register unsigned long current_frame_pointer asm("r30"); - struct stackframe { unsigned long fp; unsigned long rets; @@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace) low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; - fp = current_frame_pointer; + fp = (unsigned long)__builtin_frame_address(0); while (fp >= low && fp <= (high - sizeof(*frame))) { frame = (struct stackframe *)fp; diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index 12242c27e2df..4023fdbea490 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S @@ -369,7 +369,7 @@ ret_from_fork: R26.L = #LO(do_work_pending); R0 = #VM_INT_DISABLE; } - if P0 jump check_work_pending + if (P0) jump check_work_pending { R0 = R25; callr R24 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index bf9df2625bc8..a6dd80a2c939 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index 0bde47e4fa69..dcba53803fa5 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c @@ -92,7 +92,8 @@ static inline void __disable_dcache_nomsr(void) #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ do { \ int align = ~(cache_line_length - 1); \ - end = min(start + cache_size, end); \ + if (start < UINT_MAX - cache_size) \ + end = min(start + cache_size, end); \ start &= align; \ } while (0) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a0bd9bdb5f83..e5c2d47608fe 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -46,7 +46,7 @@ config MIPS select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES select HAVE_ASM_MODVERSIONS - select HAVE_EBPF_JIT if (!CPU_MICROMIPS) + select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT diff --git a/arch/mips/Makefile.postlink b/arch/mips/Makefile.postlink index 4eea4188cb20..13e0beb9eee3 100644 --- a/arch/mips/Makefile.postlink +++ b/arch/mips/Makefile.postlink @@ -12,7 +12,7 @@ __archpost: include scripts/Kbuild.include CMD_RELOCS = arch/mips/boot/tools/relocs -quiet_cmd_relocs = RELOCS $@ +quiet_cmd_relocs = RELOCS $@ cmd_relocs = $(CMD_RELOCS) $@ # `@true` prevents complaint when there is nothing to be done diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 528bd73d530a..4ed45ade32a1 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -123,7 +123,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS targets += vmlinux.its targets += vmlinux.gz.its targets += vmlinux.bz2.its -targets += vmlinux.lzmo.its +targets += vmlinux.lzma.its targets += vmlinux.lzo.its quiet_cmd_cpp_its_S = ITS $@ diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 172801ed35b8..d859f079b771 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS) +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + # decompressor objects (linked with vmlinuz) vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 93a9dce31f25..813dfe5f45a5 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -18,10 +18,12 @@ #include #define __ARCH_USE_5LEVEL_HACK -#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) +#if CONFIG_PGTABLE_LEVELS == 2 #include -#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48)) +#elif CONFIG_PGTABLE_LEVELS == 3 #include +#else +#include #endif /* @@ -216,6 +218,9 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) return pgd_val(pgd); } +#define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd)) +#define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT)) + static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) { return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 4993db40482c..ee26f9a4575d 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -49,8 +49,26 @@ struct thread_info { .addr_limit = KERNEL_DS, \ } -/* How to get the thread information struct from C. */ +/* + * A pointer to the struct thread_info for the currently executing thread is + * held in register $28/$gp. + * + * We declare __current_thread_info as a global register variable rather than a + * local register variable within current_thread_info() because clang doesn't + * support explicit local register variables. + * + * When building the VDSO we take care not to declare the global register + * variable because this causes GCC to not preserve the value of $28/$gp in + * functions that change its value (which is common in the PIC VDSO when + * accessing the GOT). Since the VDSO shouldn't be accessing + * __current_thread_info anyway we declare it extern in order to cause a link + * failure if it's referenced. + */ +#ifdef __VDSO__ +extern struct thread_info *__current_thread_info; +#else register struct thread_info *__current_thread_info __asm__("$28"); +#endif static inline struct thread_info *current_thread_info(void) { diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index b08825531e9f..0ae9b4cbc153 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -26,8 +26,6 @@ #define __VDSO_USE_SYSCALL ULLONG_MAX -#ifdef CONFIG_MIPS_CLOCK_VSYSCALL - static __always_inline long gettimeofday_fallback( struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback( return error ? -ret : ret; } -#else - -static __always_inline long gettimeofday_fallback( - struct __kernel_old_timeval *_tv, - struct timezone *_tz) -{ - return -1; -} - -#endif - static __always_inline long clock_gettime_fallback( clockid_t _clkid, struct __kernel_timespec *_ts) diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index f777e44653d5..47312c529410 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu) return 0; } +static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + + for_each_possible_cpu(cpu1) + if (cpus_are_siblings(cpu, cpu1)) + cpumask_set_cpu(cpu1, cpu_map); +} + +static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + int cluster = cpu_cluster(&cpu_data[cpu]); + + for_each_possible_cpu(cpu1) + if (cpu_cluster(&cpu_data[cpu1]) == cluster) + cpumask_set_cpu(cpu1, cpu_map); +} + static int __populate_cache_leaves(unsigned int cpu) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu) struct cacheinfo *this_leaf = this_cpu_ci->info_list; if (c->icache.waysize) { + /* L1 caches are per core */ + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); } else { populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); } - if (c->scache.waysize) + if (c->scache.waysize) { + /* L2 cache is per cluster */ + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); + } if (c->tcache.waysize) populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index a3d4bec695c6..6efb2f6889a7 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -18,7 +18,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_pfx_$(basetarget))' \ '$(syshdr_offset_$(basetarget))' -quiet_cmd_sysnr = SYSNR $@ +quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ '$(sysnr_abis_$(basetarget))' \ '$(sysnr_pfx_$(basetarget))' \ diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6176b9acba95..d0d832ab3d3b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -134,7 +134,7 @@ void release_vpe(struct vpe *v) { list_del(&v->list); if (v->load_addr) - release_progmem(v); + release_progmem(v->load_addr); kfree(v); } diff --git a/arch/mips/loongson64/loongson-3/platform.c b/arch/mips/loongson64/loongson-3/platform.c index 13f3404f0030..9674ae1361a8 100644 --- a/arch/mips/loongson64/loongson-3/platform.c +++ b/arch/mips/loongson64/loongson-3/platform.c @@ -27,6 +27,9 @@ static int __init loongson3_platform_init(void) continue; pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); + if (!pdev) + return -ENOMEM; + pdev->name = loongson_sysconf.sensors[i].name; pdev->id = loongson_sysconf.sensors[i].id; pdev->dev.platform_data = &loongson_sysconf.sensors[i]; diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 46b76751f3a5..561154cbcc40 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) { int off, b_off; + int tcc_reg; ctx->flags |= EBPF_SEEN_TC; /* @@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) b_off = b_imm(this_idx + 1, ctx); emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); /* - * if (--TCC < 0) + * if (TCC-- < 0) * goto out; */ /* Delay slot */ - emit_instr(ctx, daddiu, MIPS_R_T5, - (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); + tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4; + emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1); b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, bltz, MIPS_R_T5, b_off); + emit_instr(ctx, bltz, tcc_reg, b_off); /* * prog = array->ptrs[index]; * if (prog == NULL) @@ -1803,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) unsigned int image_size; u8 *image_ptr; - if (!prog->jit_requested || MIPS_ISA_REV < 2) + if (!prog->jit_requested) return prog; tmp = bpf_jit_blind_constants(prog); diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index 7b4d40354ee7..30017d5945bc 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -279,16 +279,15 @@ static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask, struct bridge_irq_chip_data *data = d->chip_data; int bit = d->parent_data->hwirq; int pin = d->hwirq; - nasid_t nasid; int ret, cpu; ret = irq_chip_set_affinity_parent(d, mask, force); if (ret >= 0) { cpu = cpumask_first_and(mask, cpu_online_mask); - nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + data->nnasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); bridge_write(data->bc, b_int_addr[pin].addr, (((data->bc->intr_addr >> 30) & 0x30000) | - bit | (nasid << 8))); + bit | (data->nasid << 8))); bridge_read(data->bc, b_wid_tflush); } return ret; diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 1434fa60f3db..94e9ce994494 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -51,6 +51,7 @@ choice select MIPS_GIC select COMMON_CLK select CLKSRC_MIPS_GIC + select HAVE_PCI if PCI_MT7621 endchoice choice diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 37be04975831..79a2f6bd2b5a 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -73,6 +73,9 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask) int cpu; cpu = cpumask_first_and(mask, cpu_online_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_any(cpu_online_mask); + nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); hd->cpu = cpu; if (!cputoslice(cpu)) { @@ -139,6 +142,7 @@ static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq, /* use CPU connected to nearest hub */ hub = hub_data(NASID_TO_COMPACT_NODEID(info->nasid)); setup_hub_mask(hd, &hub->h_cpus); + info->nasid = cpu_to_node(hd->cpu); /* Make sure it's not already pending when we connect it. */ REMOTE_HUB_CLR_INTR(info->nasid, swlevel); diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c index 6ebdc37c89fc..6b83b6376a4b 100644 --- a/arch/mips/vdso/vgettimeofday.c +++ b/arch/mips/vdso/vgettimeofday.c @@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime32(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct old_timespec32 *res) { @@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index d9ac7e6408ef..caddded56e77 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h @@ -9,7 +9,11 @@ #define PG_dcache_dirty PG_arch_1 void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range + void flush_icache_page(struct vm_area_struct *vma, struct page *page); +#define flush_icache_page flush_icache_page + #ifdef CONFIG_CPU_CACHE_ALIASING void flush_cache_mm(struct mm_struct *mm); void flush_cache_dup_mm(struct mm_struct *mm); @@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size); #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) #else -#include -#undef flush_icache_range -#undef flush_icache_page -#undef flush_icache_user_range void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); +#define flush_icache_user_range flush_icache_user_range + +#include #endif #endif /* __NDS32_CACHEFLUSH_H__ */ diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b16237c95ea3..0c29d6cb2c8d 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -62,6 +62,7 @@ config PARISC select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_KPROBES_ON_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_COPY_THREAD_TLS help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index f627c37dad9c..ab5c215cf46c 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size) ** if (((unsigned long)p & 0xf) == 0) ** return __ldcw(p); */ -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) +#define xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __typeof__(*(ptr)) _x_ = (x); \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ + __ret; \ +}) /* bug catcher for when unsupported size is used - won't link */ extern void __cmpxchg_called_with_bad_pointer(void); diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h index a99ea747d7ed..87e174006995 100644 --- a/arch/parisc/include/asm/kexec.h +++ b/arch/parisc/include/asm/kexec.h @@ -2,8 +2,6 @@ #ifndef _ASM_PARISC_KEXEC_H #define _ASM_PARISC_KEXEC_H -#ifdef CONFIG_KEXEC - /* Maximum physical address we can use pages from */ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ @@ -32,6 +30,4 @@ static inline void crash_setup_regs(struct pt_regs *newregs, #endif /* __ASSEMBLY__ */ -#endif /* CONFIG_KEXEC */ - #endif /* _ASM_PARISC_KEXEC_H */ diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 2663c8f8be11..068d90950d93 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -37,5 +37,5 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_KEXEC) += kexec.o relocate_kernel.o +obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o obj-$(CONFIG_KEXEC_FILE) += kexec_file.o diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 3b330e58a4f0..a5f3e50fe976 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -810,7 +810,7 @@ EXPORT_SYMBOL(device_to_hwpath); static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, struct device *parent); -static void walk_lower_bus(struct parisc_device *dev) +static void __init walk_lower_bus(struct parisc_device *dev) { unsigned long io_io_low, io_io_high; @@ -889,8 +889,8 @@ static void print_parisc_device(struct parisc_device *dev) static int count; print_pa_hwpath(dev, hw_path); - pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", - ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, + pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", + ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type, dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); if (dev->num_addrs) { diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index ecc5c2771208..230a6422b99f 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init); * Copy architecture-specific thread state */ int -copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, unsigned long tls) { struct pt_regs *cregs = &(p->thread.regs); void *stack = task_stack_page(p); @@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; cregs->kpc = (unsigned long) &child_return; - /* Setup thread TLS area from the 4th parameter in clone */ + /* Setup thread TLS area */ if (clone_flags & CLONE_SETTLS) - cregs->cr27 = cregs->gr[23]; + cregs->cr27 = tls; } return 0; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3e56c9c2f16e..2b1033f13210 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -221,8 +221,7 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP - select HAVE_RCU_TABLE_FREE if SMP - select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE + select HAVE_RCU_TABLE_FREE select HAVE_MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN @@ -237,6 +236,7 @@ config PPC select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE select NEED_SG_DMA_LENGTH select OF + select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE select OF_EARLY_FLATTREE select OLD_SIGACTION if PPC32 select OLD_SIGSUSPEND diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index c59920920ddc..b915fe658979 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -371,7 +371,7 @@ config PPC_PTDUMP config PPC_DEBUG_WX bool "Warn on W+X mappings at boot" - depends on PPC_PTDUMP + depends on PPC_PTDUMP && STRICT_KERNEL_RWX help Generate a warning if any W+X mappings are found at boot. diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 83522c9fc7b6..37ac731a556b 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -91,11 +91,13 @@ MULTIPLEWORD := -mmultiple endif ifdef CONFIG_PPC64 +ifndef CONFIG_CC_IS_CLANG cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc) aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 endif +endif ifndef CONFIG_CC_IS_CLANG cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align @@ -141,6 +143,7 @@ endif endif CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no) +ifndef CONFIG_CC_IS_CLANG ifdef CONFIG_CPU_LITTLE_ENDIAN CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) @@ -149,6 +152,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) endif +endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink index 134f12f89b92..2268396ff4bb 100644 --- a/arch/powerpc/Makefile.postlink +++ b/arch/powerpc/Makefile.postlink @@ -17,11 +17,11 @@ quiet_cmd_head_check = CHKHEAD $@ quiet_cmd_relocs_check = CHKREL $@ ifdef CONFIG_PPC_BOOK3S_64 cmd_relocs_check = \ - $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" ; \ + $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" ; \ $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@" else cmd_relocs_check = \ - $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" + $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" endif # `@true` prevents complaint when there is nothing to be done diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c index 1699e9531552..00c4d843a023 100644 --- a/arch/powerpc/boot/4xx.c +++ b/arch/powerpc/boot/4xx.c @@ -228,7 +228,7 @@ void ibm4xx_denali_fixup_memsize(void) dpath = 8; /* 64 bits */ /* get address pins (rows) */ - val = SDRAM0_READ(DDR0_42); + val = SDRAM0_READ(DDR0_42); row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT); if (row > max_row) diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi index e1a961f05dcd..baa0c503e741 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi @@ -63,6 +63,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy0: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi index c288f3c6c637..93095600e808 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi @@ -60,6 +60,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xf1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy6: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi index 94f3e7175012..ff4bd38f0645 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi @@ -63,6 +63,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy1: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi index 94a76982d214..1fa38ed6f59e 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi @@ -60,6 +60,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xf3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy7: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi index b5ff5f71c6b8..a8cc9780c0c4 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi @@ -59,6 +59,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy0: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi index ee44182c6348..8b8bd70c9382 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi @@ -59,6 +59,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy1: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi index f05f0d775039..619c880b54d8 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi @@ -59,6 +59,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe5000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy2: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi index a9114ec51075..d7ebb73a400d 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi @@ -59,6 +59,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe7000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy3: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi index 44dd00ac7367..b151d696a069 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi @@ -59,6 +59,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe9000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy4: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi index 5b1b84b58602..adc0ae0013a3 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi @@ -59,6 +59,7 @@ fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xeb000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy5: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi index 0e1daaef9e74..435047e0e250 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi @@ -60,6 +60,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xf1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy14: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi index 68c5ef779266..c098657cca0a 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi @@ -60,6 +60,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xf3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy15: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi index 605363cc1117..9d06824815f3 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi @@ -59,6 +59,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy8: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi index 1955dfa13634..70e947730c4b 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi @@ -59,6 +59,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy9: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi index 2c1476454ee0..ad96e6529595 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi @@ -59,6 +59,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe5000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy10: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi index b8b541ff5fb0..034bc4b71f7a 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi @@ -59,6 +59,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe7000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy11: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi index 4b2cfddd1b15..93ca23d82b39 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi @@ -59,6 +59,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xe9000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy12: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi index 0a52ddf7cc17..23b3117a2fd2 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi @@ -59,6 +59,7 @@ fman@500000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xeb000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ pcsphy13: ethernet-phy@0 { reg = <0x0>; diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h index 2abc8e83b95e..9757d4f6331e 100644 --- a/arch/powerpc/boot/libfdt_env.h +++ b/arch/powerpc/boot/libfdt_env.h @@ -6,6 +6,8 @@ #include #define INT_MAX ((int)(~0U>>1)) +#define UINT32_MAX ((u32)~0U) +#define INT32_MAX ((s32)(UINT32_MAX >> 1)) #include "of.h" diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h index 9c63b596e6ce..a09595f00cab 100644 --- a/arch/powerpc/include/asm/archrandom.h +++ b/arch/powerpc/include/asm/archrandom.h @@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v) unsigned long val; int rc; - rc = arch_get_random_long(&val); + rc = arch_get_random_seed_long(&val); if (rc) *v = val; diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 8561498e653c..d84d1417ddb6 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -152,9 +152,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); /* Patch sites */ extern s32 patch__call_flush_count_cache; extern s32 patch__flush_count_cache_return; +extern s32 patch__flush_link_stack_return; +extern s32 patch__call_kvm_flush_link_stack; extern s32 patch__memset_nocache, patch__memcpy_nocache; extern long flush_count_cache; +extern long kvm_flush_link_stack; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index f9dc597b0b86..91c8f1d9bcee 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -102,11 +102,13 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end) isync(); /* Context sync required after mtsrin() */ } -static inline void allow_user_access(void __user *to, const void __user *from, u32 size) +static __always_inline void allow_user_access(void __user *to, const void __user *from, + u32 size, unsigned long dir) { u32 addr, end; - if (__builtin_constant_p(to) && to == NULL) + BUILD_BUG_ON(!__builtin_constant_p(dir)); + if (!(dir & KUAP_WRITE)) return; addr = (__force u32)to; @@ -119,11 +121,16 @@ static inline void allow_user_access(void __user *to, const void __user *from, u kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */ } -static inline void prevent_user_access(void __user *to, const void __user *from, u32 size) +static __always_inline void prevent_user_access(void __user *to, const void __user *from, + u32 size, unsigned long dir) { u32 addr = (__force u32)to; u32 end = min(addr + size, TASK_SIZE); + BUILD_BUG_ON(!__builtin_constant_p(dir)); + if (!(dir & KUAP_WRITE)) + return; + if (!addr || addr >= TASK_SIZE || !size) return; @@ -131,12 +138,17 @@ static inline void prevent_user_access(void __user *to, const void __user *from, kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */ } -static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) +static inline bool +bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { + unsigned long begin = regs->kuap & 0xf0000000; + unsigned long end = regs->kuap << 28; + if (!is_write) return false; - return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !"); + return WARN(address < begin || address >= end, + "Bug: write fault blocked by segment registers !"); } #endif /* CONFIG_PPC_KUAP */ diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 998317702630..dc5c039eb28e 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size) #define get_hugepd_cache_index(x) (x) -#ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { @@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table) pgtable_free(table, shift); } -#else -static inline void pgtable_free_tlb(struct mmu_gather *tlb, - void *table, int shift) -{ - pgtable_free(table, shift); -} -#endif static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h index f254de956d6a..c8d1076e0ebb 100644 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h @@ -77,25 +77,27 @@ static inline void set_kuap(unsigned long value) isync(); } -static inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size) +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { // This is written so we can resolve to a single case at build time - if (__builtin_constant_p(to) && to == NULL) + BUILD_BUG_ON(!__builtin_constant_p(dir)); + if (dir == KUAP_READ) set_kuap(AMR_KUAP_BLOCK_WRITE); - else if (__builtin_constant_p(from) && from == NULL) + else if (dir == KUAP_WRITE) set_kuap(AMR_KUAP_BLOCK_READ); else set_kuap(0); } static inline void prevent_user_access(void __user *to, const void __user *from, - unsigned long size) + unsigned long size, unsigned long dir) { set_kuap(AMR_KUAP_BLOCKED); } -static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) +static inline bool +bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 15b75005bc34..3fa1b962dc27 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -600,8 +600,11 @@ extern void slb_set_size(u16 size); * */ #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2) + +// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \ - MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT) + MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2) + /* * For platforms that support on 65bit VA we limit the context bits */ diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index d5a44912902f..cae9e814593a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list; extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); extern void pmd_fragment_free(unsigned long *); extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); -#ifdef CONFIG_SMP extern void __tlb_remove_table(void *_table); -#endif void pte_frag_destroy(void *pte_frag); static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 45e3137ccd71..72b81015cebe 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -55,42 +55,48 @@ struct ppc64_caches { extern struct ppc64_caches ppc64_caches; -static inline u32 l1_cache_shift(void) +static inline u32 l1_dcache_shift(void) { return ppc64_caches.l1d.log_block_size; } -static inline u32 l1_cache_bytes(void) +static inline u32 l1_dcache_bytes(void) { return ppc64_caches.l1d.block_size; } + +static inline u32 l1_icache_shift(void) +{ + return ppc64_caches.l1i.log_block_size; +} + +static inline u32 l1_icache_bytes(void) +{ + return ppc64_caches.l1i.block_size; +} #else -static inline u32 l1_cache_shift(void) +static inline u32 l1_dcache_shift(void) { return L1_CACHE_SHIFT; } -static inline u32 l1_cache_bytes(void) +static inline u32 l1_dcache_bytes(void) { return L1_CACHE_BYTES; } + +static inline u32 l1_icache_shift(void) +{ + return L1_CACHE_SHIFT; +} + +static inline u32 l1_icache_bytes(void) +{ + return L1_CACHE_BYTES; +} + #endif -#endif /* ! __ASSEMBLY__ */ -#if defined(__ASSEMBLY__) -/* - * For a snooping icache, we still need a dummy icbi to purge all the - * prefetched instructions from the ifetch buffers. We also need a sync - * before the icbi to order the the actual stores to memory that might - * have modified instructions with the icbi. - */ -#define PURGE_PREFETCHED_INS \ - sync; \ - icbi 0,r3; \ - sync; \ - isync - -#else #define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifdef CONFIG_PPC_BOOK3S_32 @@ -124,6 +130,17 @@ static inline void dcbst(void *addr) { __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory"); } + +static inline void icbi(void *addr) +{ + asm volatile ("icbi 0, %0" : : "r"(addr) : "memory"); +} + +static inline void iccci(void *addr) +{ + asm volatile ("iccci 0, %0" : : "r"(addr) : "memory"); +} + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHE_H */ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index eef388f2659f..4a1c9f0200e1 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -42,29 +42,25 @@ extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -extern void flush_icache_range(unsigned long, unsigned long); +void flush_icache_range(unsigned long start, unsigned long stop); extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); -extern void __flush_dcache_icache(void *page_va); extern void flush_dcache_icache_page(struct page *page); -#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) -extern void __flush_dcache_icache_phys(unsigned long physaddr); -#else -static inline void __flush_dcache_icache_phys(unsigned long physaddr) -{ - BUG(); -} -#endif +void __flush_dcache_icache(void *page); -/* - * Write any modified data cache blocks out to memory and invalidate them. - * Does not invalidate the corresponding instruction cache blocks. +/** + * flush_dcache_range(): Write any modified data cache blocks out to memory and + * invalidate them. Does not invalidate the corresponding instruction cache + * blocks. + * + * @start: the start address + * @stop: the stop address (exclusive) */ static inline void flush_dcache_range(unsigned long start, unsigned long stop) { - unsigned long shift = l1_cache_shift(); - unsigned long bytes = l1_cache_bytes(); + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; @@ -89,8 +85,8 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop) */ static inline void clean_dcache_range(unsigned long start, unsigned long stop) { - unsigned long shift = l1_cache_shift(); - unsigned long bytes = l1_cache_bytes(); + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; @@ -108,8 +104,8 @@ static inline void clean_dcache_range(unsigned long start, unsigned long stop) static inline void invalidate_dcache_range(unsigned long start, unsigned long stop) { - unsigned long shift = l1_cache_shift(); - unsigned long bytes = l1_cache_bytes(); + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 0cfc365d814b..722289a1d000 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -77,7 +77,12 @@ enum fixed_addresses { static inline void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { - map_kernel_page(fix_to_virt(idx), phys, flags); + if (__builtin_constant_p(idx)) + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + else if (WARN_ON(idx >= __end_of_fixed_addresses)) + return; + + map_kernel_page(__fix_to_virt(idx), phys, flags); } #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index eea28ca679db..bc7d9d06a6d9 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -35,7 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, { int oldval = 0, ret; - allow_write_to_user(uaddr, sizeof(*uaddr)); + allow_read_write_user(uaddr, uaddr, sizeof(*uaddr)); pagefault_disable(); switch (op) { @@ -62,7 +62,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, *oval = oldval; - prevent_write_to_user(uaddr, sizeof(*uaddr)); + prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr)); return ret; } @@ -76,7 +76,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; - allow_write_to_user(uaddr, sizeof(*uaddr)); + allow_read_write_user(uaddr, uaddr, sizeof(*uaddr)); + __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ @@ -97,7 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "cc", "memory"); *uval = prev; - prevent_write_to_user(uaddr, sizeof(*uaddr)); + prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr)); + return ret; } diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 5b5e39643a27..94f24928916a 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -2,6 +2,10 @@ #ifndef _ASM_POWERPC_KUP_H_ #define _ASM_POWERPC_KUP_H_ +#define KUAP_READ 1 +#define KUAP_WRITE 2 +#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE) + #ifdef CONFIG_PPC64 #include #endif @@ -42,32 +46,48 @@ void setup_kuap(bool disabled); #else static inline void setup_kuap(bool disabled) { } static inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size) { } + unsigned long size, unsigned long dir) { } static inline void prevent_user_access(void __user *to, const void __user *from, - unsigned long size) { } -static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; } + unsigned long size, unsigned long dir) { } +static inline bool +bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +{ + return false; +} #endif /* CONFIG_PPC_KUAP */ static inline void allow_read_from_user(const void __user *from, unsigned long size) { - allow_user_access(NULL, from, size); + allow_user_access(NULL, from, size, KUAP_READ); } static inline void allow_write_to_user(void __user *to, unsigned long size) { - allow_user_access(to, NULL, size); + allow_user_access(to, NULL, size, KUAP_WRITE); +} + +static inline void allow_read_write_user(void __user *to, const void __user *from, + unsigned long size) +{ + allow_user_access(to, from, size, KUAP_READ_WRITE); } static inline void prevent_read_from_user(const void __user *from, unsigned long size) { - prevent_user_access(NULL, from, size); + prevent_user_access(NULL, from, size, KUAP_READ); } static inline void prevent_write_to_user(void __user *to, unsigned long size) { - prevent_user_access(to, NULL, size); + prevent_user_access(to, NULL, size, KUAP_WRITE); +} + +static inline void prevent_read_write_user(void __user *to, const void __user *from, + unsigned long size) +{ + prevent_user_access(to, from, size, KUAP_READ_WRITE); } #endif /* !__ASSEMBLY__ */ -#endif /* _ASM_POWERPC_KUP_H_ */ +#endif /* _ASM_POWERPC_KUAP_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index 1c3133b5f86a..6fe97465e350 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -34,18 +34,19 @@ #include static inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size) + unsigned long size, unsigned long dir) { mtspr(SPRN_MD_AP, MD_APG_INIT); } static inline void prevent_user_access(void __user *to, const void __user *from, - unsigned long size) + unsigned long size, unsigned long dir) { mtspr(SPRN_MD_AP, MD_APG_KUAP); } -static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) +static inline bool +bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000), "Bug: fault blocked by AP register !"); diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h index 332b13b4ecdb..29c43665a753 100644 --- a/arch/powerpc/include/asm/nohash/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/pgalloc.h @@ -46,7 +46,6 @@ static inline void pgtable_free(void *table, int shift) #define get_hugepd_cache_index(x) (x) -#ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { unsigned long pgf = (unsigned long)table; @@ -64,13 +63,6 @@ static inline void __tlb_remove_table(void *_table) pgtable_free(table, shift); } -#else -static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) -{ - pgtable_free(table, shift); -} -#endif - static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index c8bb14ff4713..6ba5adb96a3b 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn) /* * Some number of bits at the level of the page table that points to * a hugepte are used to encode the size. This masks those bits. + * On 8xx, HW assistance requires 4k alignment for the hugepte. */ +#ifdef CONFIG_PPC_8xx +#define HUGEPD_SHIFT_MASK 0xfff +#else #define HUGEPD_SHIFT_MASK 0x3f +#endif #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 5a9b6eb651b6..d19871763ed4 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -5,8 +5,22 @@ #include #include + +#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed + #include +extern bool init_mem_is_free; + +static inline int arch_is_kernel_initmem_freed(unsigned long addr) +{ + if (!init_mem_is_free) + return 0; + + return addr >= (unsigned long)__init_begin && + addr < (unsigned long)__init_end; +} + extern char __head_end[]; #ifdef __powerpc64__ diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 759597bf0fd8..7c05e95a5c44 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -9,7 +9,7 @@ #define _ASM_POWERPC_SECURITY_FEATURES_H -extern unsigned long powerpc_security_features; +extern u64 powerpc_security_features; extern bool rfi_flush; /* These are bit flags */ @@ -24,17 +24,17 @@ void setup_stf_barrier(void); void do_stf_barrier_fixups(enum stf_barrier_type types); void setup_count_cache_flush(void); -static inline void security_ftr_set(unsigned long feature) +static inline void security_ftr_set(u64 feature) { powerpc_security_features |= feature; } -static inline void security_ftr_clear(unsigned long feature) +static inline void security_ftr_clear(u64 feature) { powerpc_security_features &= ~feature; } -static inline bool security_ftr_enabled(unsigned long feature) +static inline bool security_ftr_enabled(u64 feature) { return !!(powerpc_security_features & feature); } @@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Software required to flush count cache on context switch #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull +// Software required to flush link stack on context switch +#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull + // Features enabled by default #define SEC_FTR_DEFAULT \ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index e9a960e28f3c..d6102e34eb21 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -15,6 +15,7 @@ * * (the type definitions are in asm/spinlock_types.h) */ +#include #include #ifdef CONFIG_PPC64 #include @@ -36,10 +37,12 @@ #endif #ifdef CONFIG_PPC_PSERIES +DECLARE_STATIC_KEY_FALSE(shared_processor); + #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) { - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + if (!static_branch_unlikely(&shared_processor)) return false; return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); } diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index b2c0be93929d..7f3a8b902325 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -26,6 +26,17 @@ #define tlb_flush tlb_flush extern void tlb_flush(struct mmu_gather *tlb); +/* + * book3s: + * Hash does not use the linux page-tables, so we can avoid + * the TLB invalidate for page-table freeing, Radix otoh does use the + * page-tables and needs the TLBI. + * + * nohash: + * We still do TLB invalidate in the __pte_free_tlb routine before we + * add the page table pages to mmu gather table batch. + */ +#define tlb_needs_table_invalidate() radix_enabled() /* Get the generic bits... */ #include diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 15002b51ff18..cafad1960e76 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -313,9 +313,9 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) unsigned long ret; barrier_nospec(); - allow_user_access(to, from, n); + allow_read_write_user(to, from, n); ret = __copy_tofrom_user(to, from, n); - prevent_user_access(to, from, n); + prevent_read_write_user(to, from, n); return ret; } #endif /* __powerpc64__ */ @@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) return n; } -extern unsigned long __clear_user(void __user *addr, unsigned long size); +unsigned long __arch_clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) { @@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) might_fault(); if (likely(access_ok(addr, size))) { allow_write_to_user(addr, size); - ret = __clear_user(addr, size); + ret = __arch_clear_user(addr, size); prevent_write_to_user(addr, size); } return ret; } +static inline unsigned long __clear_user(void __user *addr, unsigned long size) +{ + return clear_user(addr, size); +} + extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index c61d59ed3b45..2ccb938d8544 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -82,6 +82,7 @@ struct vdso_data { __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */ __s64 wtom_clock_sec; /* Wall to monotonic clock sec */ struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -103,6 +104,7 @@ struct vdso_data { __s32 wtom_clock_nsec; struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 dcache_block_size; /* L1 d-cache block size */ __u32 icache_block_size; /* L1 i-cache block size */ diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h index f2dfcd50a2d3..33aee7490cbb 100644 --- a/arch/powerpc/include/asm/xive-regs.h +++ b/arch/powerpc/include/asm/xive-regs.h @@ -39,6 +39,7 @@ #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 +#define XIVE_ESB_INVALID 0xFF /* * Thread Management (aka "TM") registers diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index a7ca8fe62368..3c02445cf086 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -5,8 +5,8 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' -# Disable clang warning for using setjmp without setjmp.h header -CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header) +# Avoid clang warnings around longjmp/setjmp declarations +CFLAGS_crash.o += -ffreestanding ifdef CONFIG_PPC64 CFLAGS_prom_init.o += $(NO_MINIMAL_TOC) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 484f54dab247..5c0a1e17219b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -387,6 +387,7 @@ int main(void) OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec); OFFSET(STAMP_XTIME, vdso_data, stamp_xtime); OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); + OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res); OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); @@ -417,7 +418,6 @@ int main(void) DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); - DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index e745abc5457a..245be4fafe13 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, * oprofile_cpu_type already has a value, then we are * possibly overriding a real PVR with a logical one, * and, in that case, keep the current value for - * oprofile_cpu_type. + * oprofile_cpu_type. Futhermore, let's ensure that the + * fix for the PMAO bug is enabled on compatibility mode. */ if (old.oprofile_cpu_type != NULL) { t->oprofile_cpu_type = old.oprofile_cpu_type; t->oprofile_type = old.oprofile_type; + t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG; } } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index d9279d0ee9f5..4fd7efdf2a53 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -541,12 +541,6 @@ static void eeh_rmv_device(struct eeh_dev *edev, void *userdata) pci_iov_remove_virtfn(edev->physfn, pdn->vf_index); edev->pdev = NULL; - - /* - * We have to set the VF PE number to invalid one, which is - * required to plug the VF successfully. - */ - pdn->pe_number = IODA_INVALID_PE; #endif if (rmv_data) list_add(&edev->rmv_entry, &rmv_data->removed_vf_list); @@ -897,12 +891,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe) /* Log the event */ if (pe->type & EEH_PE_PHB) { - pr_err("EEH: PHB#%x failure detected, location: %s\n", + pr_err("EEH: Recovering PHB#%x, location: %s\n", pe->phb->global_number, eeh_pe_loc_get(pe)); } else { struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb); - pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", + pr_err("EEH: Recovering PHB#%x-PE#%x\n", pe->phb->global_number, pe->addr); pr_err("EEH: PE location: %s, PHB location: %s\n", eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe)); @@ -1206,6 +1200,17 @@ void eeh_handle_special_event(void) eeh_pe_state_mark(pe, EEH_PE_RECOVERING); eeh_handle_normal_event(pe); } else { + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) + edev->mode &= ~EEH_DEV_NO_HANDLER; + + /* Notify all devices to be down */ + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); + eeh_pe_report( + "error_detected(permanent failure)", pe, + eeh_report_failure, NULL); + pci_lock_rescan_remove(); list_for_each_entry(hose, &hose_list, list_node) { phb_pe = eeh_phb_pe_get(hose); @@ -1214,16 +1219,6 @@ void eeh_handle_special_event(void) (phb_pe->state & EEH_PE_RECOVERING)) continue; - eeh_for_each_pe(pe, tmp_pe) - eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) - edev->mode &= ~EEH_DEV_NO_HANDLER; - - /* Notify all devices to be down */ - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); - eeh_set_channel_state(pe, pci_channel_io_perm_failure); - eeh_pe_report( - "error_detected(permanent failure)", pe, - eeh_report_failure, NULL); bus = eeh_pe_bus_get(phb_pe); if (!bus) { pr_err("%s: Cannot find PCI bus for " diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d60908ea37fb..13f699256258 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -179,7 +179,7 @@ transfer_to_handler: 2: /* if from kernel, check interrupted DOZE/NAP mode and * check for stack overflow */ - kuap_save_and_lock r11, r12, r9, r2, r0 + kuap_save_and_lock r11, r12, r9, r2, r6 addi r2, r12, -THREAD lwz r9,KSP_LIMIT(r12) cmplw r1,r9 /* if r1 <= ksp_limit */ @@ -284,6 +284,7 @@ reenable_mmu: rlwinm r9,r9,0,~MSR_EE lwz r12,_LINK(r11) /* and return to address in LR */ kuap_restore r11, r2, r3, r4, r5 + lwz r2, GPR2(r11) b fast_exception_return #endif @@ -777,7 +778,7 @@ fast_exception_return: 1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 -#if CONFIG_PPC_BOOK3S_601 +#ifdef CONFIG_PPC_BOOK3S_601 bge 2b #else bge 3f @@ -785,7 +786,7 @@ fast_exception_return: lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 -#if CONFIG_PPC_BOOK3S_601 +#ifdef CONFIG_PPC_BOOK3S_601 blt 2b #else blt 3f diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 6467bdab8d40..3fd3ef352e3f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -537,6 +537,7 @@ flush_count_cache: /* Save LR into r9 */ mflr r9 + // Flush the link stack .rept 64 bl .+4 .endr @@ -546,6 +547,11 @@ flush_count_cache: .balign 32 /* Restore LR */ 1: mtlr r9 + + // If we're just flushing the link stack, return here +3: nop + patch_site 3b patch__flush_link_stack_return + li r9,0x7fff mtctr r9 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 19f583e18402..98d8b6832fcb 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -289,7 +289,7 @@ InstructionTLBMiss: * set. All other Linux PTE bits control the behavior * of the MMU. */ - rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */ + rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */ rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */ ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index adf0505dbe02..519d49547e2f 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -238,6 +238,9 @@ set_ivor: bl early_init +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif #ifdef CONFIG_RELOCATABLE mr r3,r30 mr r4,r31 @@ -264,9 +267,6 @@ set_ivor: /* * Decide what sort of machine this is and initialize the MMU. */ -#ifdef CONFIG_KASAN - bl kasan_early_init -#endif mr r3,r30 mr r4,r31 bl machine_init diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5645bc9cbc09..add67498c126 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs) trace_irq_entry(regs); - check_stack_overflow(); - /* * Query the platform PIC for the interrupt & ack it. * @@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs) irqsp = hardirq_ctx[raw_smp_processor_id()]; sirqsp = softirq_ctx[raw_smp_processor_id()]; + check_stack_overflow(); + /* Already there ? */ if (unlikely(cursp == irqsp || cursp == sirqsp)) { __do_irq(regs); diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 82df4b09e79f..f4e4a1926a7a 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -316,126 +316,6 @@ _GLOBAL(flush_instruction_cache) EXPORT_SYMBOL(flush_instruction_cache) #endif /* CONFIG_PPC_8xx */ -/* - * Write any modified data cache blocks out to memory - * and invalidate the corresponding instruction cache blocks. - * This is a no-op on the 601. - * - * flush_icache_range(unsigned long start, unsigned long stop) - */ -_GLOBAL(flush_icache_range) -#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200) - PURGE_PREFETCHED_INS - blr /* for 601 and e200, do nothing */ -#else - rlwinm r3,r3,0,0,31 - L1_CACHE_SHIFT - subf r4,r3,r4 - addi r4,r4,L1_CACHE_BYTES - 1 - srwi. r4,r4,L1_CACHE_SHIFT - beqlr - mtctr r4 - mr r6,r3 -1: dcbst 0,r3 - addi r3,r3,L1_CACHE_BYTES - bdnz 1b - sync /* wait for dcbst's to get to ram */ -#ifndef CONFIG_44x - mtctr r4 -2: icbi 0,r6 - addi r6,r6,L1_CACHE_BYTES - bdnz 2b -#else - /* Flash invalidate on 44x because we are passed kmapped addresses and - this doesn't work for userspace pages due to the virtually tagged - icache. Sigh. */ - iccci 0, r0 -#endif - sync /* additional sync needed on g4 */ - isync - blr -#endif -_ASM_NOKPROBE_SYMBOL(flush_icache_range) -EXPORT_SYMBOL(flush_icache_range) - -/* - * Flush a particular page from the data cache to RAM. - * Note: this is necessary because the instruction cache does *not* - * snoop from the data cache. - * This is a no-op on the 601 and e200 which have a unified cache. - * - * void __flush_dcache_icache(void *page) - */ -_GLOBAL(__flush_dcache_icache) -#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200) - PURGE_PREFETCHED_INS - blr -#else - rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ - li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ - mtctr r4 - mr r6,r3 -0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_BYTES - bdnz 0b - sync -#ifdef CONFIG_44x - /* We don't flush the icache on 44x. Those have a virtual icache - * and we don't have access to the virtual address here (it's - * not the page vaddr but where it's mapped in user space). The - * flushing of the icache on these is handled elsewhere, when - * a change in the address space occurs, before returning to - * user space - */ -BEGIN_MMU_FTR_SECTION - blr -END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x) -#endif /* CONFIG_44x */ - mtctr r4 -1: icbi 0,r6 - addi r6,r6,L1_CACHE_BYTES - bdnz 1b - sync - isync - blr -#endif - -#ifndef CONFIG_BOOKE -/* - * Flush a particular page from the data cache to RAM, identified - * by its physical address. We turn off the MMU so we can just use - * the physical address (this may be a highmem page without a kernel - * mapping). - * - * void __flush_dcache_icache_phys(unsigned long physaddr) - */ -_GLOBAL(__flush_dcache_icache_phys) -#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200) - PURGE_PREFETCHED_INS - blr /* for 601 and e200, do nothing */ -#else - mfmsr r10 - rlwinm r0,r10,0,28,26 /* clear DR */ - mtmsr r0 - isync - rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ - li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ - mtctr r4 - mr r6,r3 -0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_BYTES - bdnz 0b - sync - mtctr r4 -1: icbi 0,r6 - addi r6,r6,L1_CACHE_BYTES - bdnz 1b - sync - mtmsr r10 /* restore DR */ - isync - blr -#endif -#endif /* CONFIG_BOOKE */ - /* * Copy a whole page. We use the dcbz instruction on the destination * to reduce memory traffic (it eliminates the unnecessary reads of diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index b55a7b4cb543..ff20c253f273 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -49,108 +49,6 @@ _GLOBAL(call_do_irq) mtlr r0 blr - .section ".toc","aw" -PPC64_CACHES: - .tc ppc64_caches[TC],ppc64_caches - .section ".text" - -/* - * Write any modified data cache blocks out to memory - * and invalidate the corresponding instruction cache blocks. - * - * flush_icache_range(unsigned long start, unsigned long stop) - * - * flush all bytes from start through stop-1 inclusive - */ - -_GLOBAL_TOC(flush_icache_range) -BEGIN_FTR_SECTION - PURGE_PREFETCHED_INS - blr -END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) -/* - * Flush the data cache to memory - * - * Different systems have different cache line sizes - * and in some cases i-cache and d-cache line sizes differ from - * each other. - */ - ld r10,PPC64_CACHES@toc(r2) - lwz r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */ - addi r5,r7,-1 - andc r6,r3,r5 /* round low to line bdy */ - subf r8,r6,r4 /* compute length */ - add r8,r8,r5 /* ensure we get enough */ - lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */ - srw. r8,r8,r9 /* compute line count */ - beqlr /* nothing to do? */ - mtctr r8 -1: dcbst 0,r6 - add r6,r6,r7 - bdnz 1b - sync - -/* Now invalidate the instruction cache */ - - lwz r7,ICACHEL1BLOCKSIZE(r10) /* Get Icache block size */ - addi r5,r7,-1 - andc r6,r3,r5 /* round low to line bdy */ - subf r8,r6,r4 /* compute length */ - add r8,r8,r5 - lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */ - srw. r8,r8,r9 /* compute line count */ - beqlr /* nothing to do? */ - mtctr r8 -2: icbi 0,r6 - add r6,r6,r7 - bdnz 2b - isync - blr -_ASM_NOKPROBE_SYMBOL(flush_icache_range) -EXPORT_SYMBOL(flush_icache_range) - -/* - * Flush a particular page from the data cache to RAM. - * Note: this is necessary because the instruction cache does *not* - * snoop from the data cache. - * - * void __flush_dcache_icache(void *page) - */ -_GLOBAL(__flush_dcache_icache) -/* - * Flush the data cache to memory - * - * Different systems have different cache line sizes - */ - -BEGIN_FTR_SECTION - PURGE_PREFETCHED_INS - blr -END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) - -/* Flush the dcache */ - ld r7,PPC64_CACHES@toc(r2) - clrrdi r3,r3,PAGE_SHIFT /* Page align */ - lwz r4,DCACHEL1BLOCKSPERPAGE(r7) /* Get # dcache blocks per page */ - lwz r5,DCACHEL1BLOCKSIZE(r7) /* Get dcache block size */ - mr r6,r3 - mtctr r4 -0: dcbst 0,r6 - add r6,r6,r5 - bdnz 0b - sync - -/* Now invalidate the icache */ - - lwz r4,ICACHEL1BLOCKSPERPAGE(r7) /* Get # icache blocks per page */ - lwz r5,ICACHEL1BLOCKSIZE(r7) /* Get icache block size */ - mtctr r4 -1: icbi 0,r3 - add r3,r3,r5 - bdnz 1b - isync - blr - _GLOBAL(__bswapdi2) EXPORT_SYMBOL(__bswapdi2) srdi r8,r3,32 diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 9524009ca1ae..d876eda92609 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -244,9 +244,22 @@ void remove_dev_pci_data(struct pci_dev *pdev) continue; #ifdef CONFIG_EEH - /* Release EEH device for the VF */ + /* + * Release EEH state for this VF. The PCI core + * has already torn down the pci_dev for this VF, but + * we're responsible to removing the eeh_dev since it + * has the same lifetime as the pci_dn that spawned it. + */ edev = pdn_to_eeh_dev(pdn); if (edev) { + /* + * We allocate pci_dn's for the totalvfs count, + * but only only the vfs that were activated + * have a configured PE. + */ + if (edev->pe) + eeh_rmv_from_parent_pe(edev); + pdn->edev = NULL; kfree(edev); } diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 100f1b57ec2f..eba9d4ee4baf 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1053,7 +1053,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { .reserved2 = 0, .reserved3 = 0, .subprocessors = 1, - .byte22 = OV5_FEAT(OV5_DRMEM_V2), + .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO), .intarch = 0, .mmu = 0, .hash_ext = 0, diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 7cfcb294b11c..1740a66cea84 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -16,7 +16,7 @@ #include -unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; +u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; enum count_cache_flush_type { COUNT_CACHE_FLUSH_NONE = 0x1, @@ -24,6 +24,7 @@ enum count_cache_flush_type { COUNT_CACHE_FLUSH_HW = 0x4, }; static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; +static bool link_stack_flush_enabled; bool barrier_nospec_enabled; static bool no_nospec; @@ -108,7 +109,7 @@ device_initcall(barrier_nospec_debugfs_init); static __init int security_feature_debugfs_init(void) { debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, - (u64 *)&powerpc_security_features); + &powerpc_security_features); return 0; } device_initcall(security_feature_debugfs_init); @@ -141,32 +142,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); - if (rfi_flush || thread_priv) { + if (rfi_flush) { struct seq_buf s; seq_buf_init(&s, buf, PAGE_SIZE - 1); - seq_buf_printf(&s, "Mitigation: "); - - if (rfi_flush) - seq_buf_printf(&s, "RFI Flush"); - - if (rfi_flush && thread_priv) - seq_buf_printf(&s, ", "); - + seq_buf_printf(&s, "Mitigation: RFI Flush"); if (thread_priv) - seq_buf_printf(&s, "L1D private per thread"); + seq_buf_printf(&s, ", L1D private per thread"); seq_buf_printf(&s, "\n"); return s.len; } + if (thread_priv) + return sprintf(buf, "Vulnerable: L1D private per thread\n"); + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) return sprintf(buf, "Not affected\n"); return sprintf(buf, "Vulnerable\n"); } + +ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_meltdown(dev, attr, buf); +} #endif ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) @@ -212,11 +214,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (ccd) seq_buf_printf(&s, "Indirect branch cache disabled"); + + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { seq_buf_printf(&s, "Mitigation: Software count cache flush"); if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) seq_buf_printf(&s, " (hardware accelerated)"); + + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); + } else if (btb_flush_enabled) { seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); } else { @@ -377,18 +387,49 @@ static __init int stf_barrier_debugfs_init(void) device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +static void no_count_cache_flush(void) +{ + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + pr_info("count-cache-flush: software flush disabled.\n"); +} + static void toggle_count_cache_flush(bool enable) { - if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && + !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) + enable = false; + + if (!enable) { patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); - count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; - pr_info("count-cache-flush: software flush disabled.\n"); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); +#endif + pr_info("link-stack-flush: software flush disabled.\n"); + link_stack_flush_enabled = false; + no_count_cache_flush(); return; } + // This enables the branch from _switch to flush_count_cache patch_branch_site(&patch__call_flush_count_cache, (u64)&flush_count_cache, BRANCH_SET_LINK); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + // This enables the branch from guest_exit_cont to kvm_flush_link_stack + patch_branch_site(&patch__call_kvm_flush_link_stack, + (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); +#endif + + pr_info("link-stack-flush: software flush enabled.\n"); + link_stack_flush_enabled = true; + + // If we just need to flush the link stack, patch an early return + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); + no_count_cache_flush(); + return; + } + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { count_cache_flush_type = COUNT_CACHE_FLUSH_SW; pr_info("count-cache-flush: full software flush sequence enabled.\n"); @@ -407,11 +448,20 @@ void setup_count_cache_flush(void) if (no_spectrev2 || cpu_mitigations_off()) { if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) - pr_warn("Spectre v2 mitigations not under software control, can't disable\n"); + pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); enable = false; } + /* + * There's no firmware feature flag/hypervisor bit to tell us we need to + * flush the link stack on context switch. So we set it here if we see + * either of the Spectre v2 mitigations that aim to protect userspace. + */ + if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || + security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) + security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); + toggle_count_cache_flush(enable); } diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index e6c30cee6abf..d215f9554553 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk) * normal/non-checkpointed stack pointer. */ + unsigned long ret = tsk->thread.regs->gpr[1]; + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BUG_ON(tsk != current); if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) { + preempt_disable(); tm_reclaim_current(TM_CAUSE_SIGNAL); if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr)) - return tsk->thread.ckpt_regs.gpr[1]; + ret = tsk->thread.ckpt_regs.gpr[1]; + + /* + * If we treclaim, we must clear the current thread's TM bits + * before re-enabling preemption. Otherwise we might be + * preempted and have the live MSR[TS] changed behind our back + * (tm_recheckpoint_new_task() would recheckpoint). Besides, we + * enter the signal handler in non-transactional state. + */ + tsk->thread.regs->msr &= ~MSR_TS_MASK; + preempt_enable(); } #endif - return tsk->thread.regs->gpr[1]; + return ret; } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 98600b276f76..1b090a76b444 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, */ static int save_tm_user_regs(struct pt_regs *regs, struct mcontext __user *frame, - struct mcontext __user *tm_frame, int sigret) + struct mcontext __user *tm_frame, int sigret, + unsigned long msr) { - unsigned long msr = regs->msr; - WARN_ON(tm_suspend_disabled); - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state. This also ensures - * that flush_fp_to_thread won't set TIF_RESTORE_TM again. - */ - regs->msr &= ~MSR_TS_MASK; - /* Save both sets of general registers */ if (save_general_regs(¤t->thread.ckpt_regs, frame) || save_general_regs(regs, tm_frame)) @@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, int sigret; unsigned long tramp; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_frame = &rt_sf->uc_transact.uc_mcontext; - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { if (__put_user((unsigned long)&rt_sf->uc_transact, &rt_sf->uc.uc_link) || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs)) goto badframe; - if (save_tm_user_regs(regs, frame, tm_frame, sigret)) + if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr)) goto badframe; } else @@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, int sigret; unsigned long tramp; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_mctx = &frame->mctx_transact; - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact, - sigret)) + sigret, msr)) goto badframe; } else diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 117515564ec7..84ed2e77ef9c 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, static long setup_tm_sigcontexts(struct sigcontext __user *sc, struct sigcontext __user *tm_sc, struct task_struct *tsk, - int signr, sigset_t *set, unsigned long handler) + int signr, sigset_t *set, unsigned long handler, + unsigned long msr) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of @@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc); #endif struct pt_regs *regs = tsk->thread.regs; - unsigned long msr = tsk->thread.regs->msr; long err = 0; BUG_ON(tsk != current); - BUG_ON(!MSR_TM_ACTIVE(regs->msr)); + BUG_ON(!MSR_TM_ACTIVE(msr)); WARN_ON(tm_suspend_disabled); @@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, */ msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state. This also ensures - * that flush_fp_to_thread won't set TIF_RESTORE_TM again. - */ - regs->msr &= ~MSR_TS_MASK; - #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); err |= __put_user(tm_v_regs, &tm_sc->v_regs); @@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, unsigned long newsp = 0; long err = 0; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, err |= __put_user(0, &frame->uc.uc_flags); err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ @@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, tsk, ksig->sig, NULL, - (unsigned long)ksig->ka.sa.sa_handler); + (unsigned long)ksig->ka.sa.sa_handler, + msr); } else #endif { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 694522308cd5..11301a1187f3 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -232,7 +232,7 @@ static u64 scan_dispatch_log(u64 stop_tb) * Accumulate stolen time by scanning the dispatch trace log. * Called on entry from user mode. */ -void accumulate_stolen_time(void) +void notrace accumulate_stolen_time(void) { u64 sst, ust; unsigned long save_irq_soft_mask = irq_soft_mask_return(); @@ -959,6 +959,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec; vdso_data->stamp_xtime = xt; vdso_data->stamp_sec_fraction = frac_sec; + vdso_data->hrtimer_res = hrtimer_resolution; smp_wmb(); ++(vdso_data->tb_update_count); } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 82f43535e686..014ff0701f24 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -250,15 +250,22 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, } NOKPROBE_SYMBOL(oops_end); +static char *get_mmu_str(void) +{ + if (early_radix_enabled()) + return " MMU=Radix"; + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) + return " MMU=Hash"; + return ""; +} + static int __die(const char *str, struct pt_regs *regs, long err) { printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); - printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n", + printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", - PAGE_SIZE / 1024, - early_radix_enabled() ? " MMU=Radix" : "", - early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "", + PAGE_SIZE / 1024, get_mmu_str(), IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", IS_ENABLED(CONFIG_SMP) ? " SMP" : "", IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index becd9f8767ed..a967e795b96d 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -156,12 +156,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl __get_datapage@local /* get data page */ + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpli cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l stw r3,TSPC32_TV_SEC(r4) stw r5,TSPC32_TV_NSEC(r4) blr diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S index 3f92561a64c4..526f5ba2593e 100644 --- a/arch/powerpc/kernel/vdso64/cacheflush.S +++ b/arch/powerpc/kernel/vdso64/cacheflush.S @@ -35,7 +35,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 @@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) subf r8,r6,r4 /* compute length */ add r8,r8,r5 lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index 07bfe33fe874..81757f06bbd7 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -186,12 +186,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl V_LOCAL_FUNC(__get_datapage) + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpldi cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l std r3,TSPC64_TV_SEC(r4) std r5,TSPC64_TV_NSEC(r4) blr diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 060a1acd7c6d..4638d2863388 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -326,6 +326,12 @@ SECTIONS *(.branch_lt) } +#ifdef CONFIG_DEBUG_INFO_BTF + .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { + *(.BTF) + } +#endif + .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; KEEP(*(.opd)) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 709cf1fd4cf4..36abbe3c346d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2354,7 +2354,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, mutex_unlock(&kvm->lock); if (!vcore) - goto free_vcpu; + goto uninit_vcpu; spin_lock(&vcore->lock); ++vcore->num_threads; @@ -2371,6 +2371,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, return vcpu; +uninit_vcpu: + kvm_vcpu_uninit(vcpu); free_vcpu: kmem_cache_free(kvm_vcpu_cache, vcpu); out: diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index faebcbb8c4db..c6fbbd29bd87 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -1116,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r7, VCPU_GPR(R7)(r4) bne ret_to_ultra - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R0)(r4) @@ -1136,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) * R3 = UV_RETURN */ ret_to_ultra: - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R3)(r4) @@ -1487,6 +1488,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1: #endif /* CONFIG_KVM_XICS */ + /* + * Possibly flush the link stack here, before we do a blr in + * guest_exit_short_path. + */ +1: nop + patch_site 1b patch__call_kvm_flush_link_stack + /* If we came in through the P9 short path, go back out to C now */ lwz r0, STACK_SLOT_SHORT_PATH(r1) cmpwi r0, 0 @@ -1963,6 +1971,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) mtlr r0 blr +.balign 32 +.global kvm_flush_link_stack +kvm_flush_link_stack: + /* Save LR into r0 */ + mflr r0 + + /* Flush the link stack. On Power8 it's up to 32 entries in size. */ + .rept 32 + bl .+4 + .endr + + /* And on Power9 it's up to 64. */ +BEGIN_FTR_SECTION + .rept 32 + bl .+4 + .endr +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + + /* Restore LR */ + mtlr r0 + blr + kvmppc_guest_external: /* External interrupt, first check for host_ipi. If this is * set, we know the host wants us out so let's do it now diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index cc65af8fe6f7..3f6ad3f58628 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1769,10 +1769,12 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, err = kvmppc_mmu_init(vcpu); if (err < 0) - goto uninit_vcpu; + goto free_shared_page; return vcpu; +free_shared_page: + free_page((unsigned long)vcpu->arch.shared); uninit_vcpu: kvm_vcpu_uninit(vcpu); free_shadow_vcpu: diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index a3f9c665bb5b..baa740815b3c 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -2005,6 +2005,10 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) pr_devel("Creating xive for partition\n"); + /* Already there ? */ + if (kvm->arch.xive) + return -EEXIST; + xive = kvmppc_xive_get_device(kvm, type); if (!xive) return -ENOMEM; @@ -2014,12 +2018,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) xive->kvm = kvm; mutex_init(&xive->lock); - /* Already there ? */ - if (kvm->arch.xive) - ret = -EEXIST; - else - kvm->arch.xive = xive; - /* We use the default queue size set by the host */ xive->q_order = xive_native_default_eq_shift(); if (xive->q_order < PAGE_SHIFT) @@ -2039,6 +2037,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) if (ret) return ret; + kvm->arch.xive = xive; return 0; } diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 78b906ffa0d2..235d57d6c205 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -50,6 +50,24 @@ static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio) } } +static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q, + u8 prio, __be32 *qpage, + u32 order, bool can_escalate) +{ + int rc; + __be32 *qpage_prev = q->qpage; + + rc = xive_native_configure_queue(vp_id, q, prio, qpage, order, + can_escalate); + if (rc) + return rc; + + if (qpage_prev) + put_page(virt_to_page(qpage_prev)); + + return rc; +} + void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; @@ -582,19 +600,14 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, q->guest_qaddr = 0; q->guest_qshift = 0; - rc = xive_native_configure_queue(xc->vp_id, q, priority, - NULL, 0, true); + rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, + NULL, 0, true); if (rc) { pr_err("Failed to reset queue %d for VCPU %d: %d\n", priority, xc->server_num, rc); return rc; } - if (q->qpage) { - put_page(virt_to_page(q->qpage)); - q->qpage = NULL; - } - return 0; } @@ -624,6 +637,14 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, srcu_idx = srcu_read_lock(&kvm->srcu); gfn = gpa_to_gfn(kvm_eq.qaddr); + + page_size = kvm_host_page_size(vcpu, gfn); + if (1ull << kvm_eq.qshift > page_size) { + srcu_read_unlock(&kvm->srcu, srcu_idx); + pr_warn("Incompatible host page size %lx!\n", page_size); + return -EINVAL; + } + page = gfn_to_page(kvm, gfn); if (is_error_page(page)) { srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -631,13 +652,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, return -EINVAL; } - page_size = kvm_host_page_size(kvm, gfn); - if (1ull << kvm_eq.qshift > page_size) { - srcu_read_unlock(&kvm->srcu, srcu_idx); - pr_warn("Incompatible host page size %lx!\n", page_size); - return -EINVAL; - } - qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK); srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -653,8 +667,8 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, * OPAL level because the use of END ESBs is not supported by * Linux. */ - rc = xive_native_configure_queue(xc->vp_id, q, priority, - (__be32 *) qaddr, kvm_eq.qshift, true); + rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, + (__be32 *) qaddr, kvm_eq.qshift, true); if (rc) { pr_err("Failed to configure queue %d for VCPU %d: %d\n", priority, xc->server_num, rc); @@ -1081,7 +1095,6 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) dev->private = xive; xive->dev = dev; xive->kvm = kvm; - kvm->arch.xive = xive; mutex_init(&xive->mapping_lock); mutex_init(&xive->lock); @@ -1102,6 +1115,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) if (ret) return ret; + kvm->arch.xive = xive; return 0; } diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 2e496eb86e94..1139bc56e004 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -73,7 +73,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; u32 inst; - int ra, rs, rt; enum emulation_result emulated = EMULATE_FAIL; int advance = 1; struct instruction_op op; @@ -85,10 +84,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (emulated != EMULATE_DONE) return emulated; - ra = get_ra(inst); - rs = get_rs(inst); - rt = get_rt(inst); - vcpu->arch.mmio_vsx_copy_nums = 0; vcpu->arch.mmio_vsx_offset = 0; vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S index f69a6aab7bfb..1ddb26394e8a 100644 --- a/arch/powerpc/lib/string_32.S +++ b/arch/powerpc/lib/string_32.S @@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) -_GLOBAL(__clear_user) +_GLOBAL(__arch_clear_user) /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination @@ -87,4 +87,4 @@ _GLOBAL(__clear_user) EX_TABLE(8b, 91b) EX_TABLE(9b, 91b) -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 507b18b1660e..169872bc0892 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -17,7 +17,7 @@ PPC64_CACHES: .section ".text" /** - * __clear_user: - Zero a block of memory in user space, with less checking. + * __arch_clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * @@ -58,7 +58,7 @@ err3; stb r0,0(r3) mr r3,r4 blr -_GLOBAL_TOC(__clear_user) +_GLOBAL_TOC(__arch_clear_user) cmpdi r4,32 neg r6,r3 li r0,0 @@ -181,4 +181,4 @@ err1; dcbz 0,r3 cmpdi r4,32 blt .Lshort_clear b .Lmedium_clear -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6c123760164e..83c51a7d7eee 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -294,10 +294,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, HPTE_V_BOLTED, psize, psize, ssize); - + if (ret == -1) { + /* Try to remove a non bolted entry */ + ret = mmu_hash_ops.hpte_remove(hpteg); + if (ret != -1) + ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, + HPTE_V_BOLTED, psize, psize, + ssize); + } if (ret < 0) break; + cond_resched(); #ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 75483b40fcb1..2bf7e1b4fd82 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -378,7 +378,6 @@ static inline void pgtable_free(void *table, int index) } } -#ifdef CONFIG_SMP void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) { unsigned long pgf = (unsigned long)table; @@ -395,12 +394,6 @@ void __tlb_remove_table(void *_table) return pgtable_free(table, index); } -#else -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) -{ - return pgtable_free(table, index); -} -#endif #ifdef CONFIG_PROC_FS atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 8432c281de92..881a026a603a 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -233,7 +233,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, // Read/write fault in a valid region (the exception table search passed // above), but blocked by KUAP is bad, it can never succeed. - if (bad_kuap_fault(regs, is_write)) + if (bad_kuap_fault(regs, address, is_write)) return true; // What's left? Kernel fault on user in well defined regions (extable @@ -354,6 +354,9 @@ static void sanity_check_fault(bool is_write, bool is_user, * Userspace trying to access kernel address, we get PROTFAULT for that. */ if (is_user && address >= TASK_SIZE) { + if ((long)address == -1) + return; + pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n", current->comm, current->pid, address, from_kuid(&init_user_ns, current_uid())); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 73d4873fc7f8..33b3461d91e8 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (pshift >= pdshift) { cachep = PGT_CACHE(PTE_T_ORDER); num_hugepd = 1 << (pshift - pdshift); + new = NULL; } else if (IS_ENABLED(CONFIG_PPC_8xx)) { - cachep = PGT_CACHE(PTE_INDEX_SIZE); + cachep = NULL; num_hugepd = 1; + new = pte_alloc_one(mm); } else { cachep = PGT_CACHE(pdshift - pshift); num_hugepd = 1; + new = NULL; } - if (!cachep) { + if (!cachep && !new) { WARN_ONCE(1, "No page table cache created for hugetlb tables"); return -ENOMEM; } - new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); + if (cachep) + new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); @@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); - kmem_cache_free(cachep, new); + if (cachep) + kmem_cache_free(cachep, new); + else + pte_free(mm, new); } else { kmemleak_ignore(new); } @@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); else if (IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_free_tlb(tlb, hugepte, - get_hugepd_cache_index(PTE_INDEX_SIZE)); + pgtable_free_tlb(tlb, hugepte, 0); else pgtable_free_tlb(tlb, hugepte, get_hugepd_cache_index(pdshift - shift)); @@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_cache_add(PTE_INDEX_SIZE); - else if (pdshift > shift) - pgtable_cache_add(pdshift - shift); - else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx)) + if (pdshift > shift) { + if (!IS_ENABLED(CONFIG_PPC_8xx)) + pgtable_cache_add(pdshift - shift); + } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || + IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); + } configured = true; } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index be941d382c8d..96ca90ce0264 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -104,6 +104,27 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) return -ENODEV; } +#define FLUSH_CHUNK_SIZE SZ_1G +/** + * flush_dcache_range_chunked(): Write any modified data cache blocks out to + * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE + * Does not invalidate the corresponding instruction cache blocks. + * + * @start: the start address + * @stop: the stop address (exclusive) + * @chunk: the max size of the chunks + */ +static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, + unsigned long chunk) +{ + unsigned long i; + + for (i = start; i < stop; i += chunk) { + flush_dcache_range(i, min(stop, i + chunk)); + cond_resched(); + } +} + int __ref arch_add_memory(int nid, u64 start, u64 size, struct mhp_restrictions *restrictions) { @@ -120,7 +141,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, start, start + size, rc); return -EFAULT; } - flush_dcache_range(start, start + size); + + flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); return __add_pages(nid, start_pfn, nr_pages, restrictions); } @@ -130,14 +152,14 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); int ret; - __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); - flush_dcache_range(start, start + size); + flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); + ret = remove_section_mapping(start, start + size); WARN_ON_ONCE(ret); @@ -260,6 +282,14 @@ void __init mem_init(void) BUILD_BUG_ON(MMU_PAGE_COUNT > 16); #ifdef CONFIG_SWIOTLB + /* + * Some platforms (e.g. 85xx) limit DMA-able memory way below + * 4G. We force memblock to bottom-up mode to ensure that the + * memory allocated in swiotlb_init() is DMA-able. + * As it's the last memblock allocation, no need to reset it + * back to to-down. + */ + memblock_set_bottom_up(true); swiotlb_init(0); #endif @@ -318,6 +348,122 @@ void free_initmem(void) free_initmem_default(POISON_FREE_INITMEM); } +/** + * flush_coherent_icache() - if a CPU has a coherent icache, flush it + * @addr: The base address to use (can be any valid address, the whole cache will be flushed) + * Return true if the cache was flushed, false otherwise + */ +static inline bool flush_coherent_icache(unsigned long addr) +{ + /* + * For a snooping icache, we still need a dummy icbi to purge all the + * prefetched instructions from the ifetch buffers. We also need a sync + * before the icbi to order the the actual stores to memory that might + * have modified instructions with the icbi. + */ + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { + mb(); /* sync */ + allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES); + icbi((void *)addr); + prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES); + mb(); /* sync */ + isync(); + return true; + } + + return false; +} + +/** + * invalidate_icache_range() - Flush the icache by issuing icbi across an address range + * @start: the start address + * @stop: the stop address (exclusive) + */ +static void invalidate_icache_range(unsigned long start, unsigned long stop) +{ + unsigned long shift = l1_icache_shift(); + unsigned long bytes = l1_icache_bytes(); + char *addr = (char *)(start & ~(bytes - 1)); + unsigned long size = stop - (unsigned long)addr + (bytes - 1); + unsigned long i; + + for (i = 0; i < size >> shift; i++, addr += bytes) + icbi(addr); + + mb(); /* sync */ + isync(); +} + +/** + * flush_icache_range: Write any modified data cache blocks out to memory + * and invalidate the corresponding blocks in the instruction cache + * + * Generic code will call this after writing memory, before executing from it. + * + * @start: the start address + * @stop: the stop address (exclusive) + */ +void flush_icache_range(unsigned long start, unsigned long stop) +{ + if (flush_coherent_icache(start)) + return; + + clean_dcache_range(start, stop); + + if (IS_ENABLED(CONFIG_44x)) { + /* + * Flash invalidate on 44x because we are passed kmapped + * addresses and this doesn't work for userspace pages due to + * the virtually tagged icache. + */ + iccci((void *)start); + mb(); /* sync */ + isync(); + } else + invalidate_icache_range(start, stop); +} +EXPORT_SYMBOL(flush_icache_range); + +#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) +/** + * flush_dcache_icache_phys() - Flush a page by it's physical address + * @physaddr: the physical address of the page + */ +static void flush_dcache_icache_phys(unsigned long physaddr) +{ + unsigned long bytes = l1_dcache_bytes(); + unsigned long nb = PAGE_SIZE / bytes; + unsigned long addr = physaddr & PAGE_MASK; + unsigned long msr, msr0; + unsigned long loop1 = addr, loop2 = addr; + + msr0 = mfmsr(); + msr = msr0 & ~MSR_DR; + /* + * This must remain as ASM to prevent potential memory accesses + * while the data MMU is disabled + */ + asm volatile( + " mtctr %2;\n" + " mtmsr %3;\n" + " isync;\n" + "0: dcbst 0, %0;\n" + " addi %0, %0, %4;\n" + " bdnz 0b;\n" + " sync;\n" + " mtctr %2;\n" + "1: icbi 0, %1;\n" + " addi %1, %1, %4;\n" + " bdnz 1b;\n" + " sync;\n" + " mtmsr %5;\n" + " isync;\n" + : "+&r" (loop1), "+&r" (loop2) + : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0) + : "ctr", "memory"); +} +#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) + /* * This is called when a page has been modified by the kernel. * It just marks the page as not i-cache clean. We do the i-cache @@ -350,12 +496,46 @@ void flush_dcache_icache_page(struct page *page) __flush_dcache_icache(start); kunmap_atomic(start); } else { - __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); + unsigned long addr = page_to_pfn(page) << PAGE_SHIFT; + + if (flush_coherent_icache(addr)) + return; + flush_dcache_icache_phys(addr); } #endif } EXPORT_SYMBOL(flush_dcache_icache_page); +/** + * __flush_dcache_icache(): Flush a particular page from the data cache to RAM. + * Note: this is necessary because the instruction cache does *not* + * snoop from the data cache. + * + * @page: the address of the page to flush + */ +void __flush_dcache_icache(void *p) +{ + unsigned long addr = (unsigned long)p; + + if (flush_coherent_icache(addr)) + return; + + clean_dcache_range(addr, addr + PAGE_SIZE); + + /* + * We don't flush the icache on 44x. Those have a virtual icache and we + * don't have access to the virtual address here (it's not the page + * vaddr but where it's mapped in user space). The flushing of the + * icache on these is handled elsewhere, when a change in the address + * space occurs, before returning to user space. + */ + + if (cpu_has_feature(MMU_FTR_TYPE_44x)) + return; + + invalidate_icache_range(addr, addr + PAGE_SIZE); +} + void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { clear_page(page); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 8ec5dfb65b2e..784cae9f5697 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -221,6 +221,7 @@ void mark_rodata_ro(void) if (v_block_mapped((unsigned long)_sinittext)) { mmu_mark_rodata_ro(); + ptdump_check_wx(); return; } diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 2f9ddc29c535..c73205172447 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -173,10 +173,12 @@ static void dump_addr(struct pg_state *st, unsigned long addr) static void note_prot_wx(struct pg_state *st, unsigned long addr) { + pte_t pte = __pte(st->current_flags); + if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) return; - if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X))) + if (!pte_write(pte) || !pte_exec(pte)) return; WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 42bbcd47cc85..dffe1a45b6ed 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { #endif -static inline bool slice_addr_is_low(unsigned long addr) +static inline notrace bool slice_addr_is_low(unsigned long addr) { u64 tmp = (u64)addr; @@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, mm_ctx_user_psize(¤t->mm->context), 1); } -unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) +unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) { unsigned char *psizes; int index, mask_index; diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index e04b20625cb9..7ccc5c85c74e 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -285,7 +285,14 @@ static int opal_imc_counters_probe(struct platform_device *pdev) domain = IMC_DOMAIN_THREAD; break; case IMC_TYPE_TRACE: - domain = IMC_DOMAIN_TRACE; + /* + * FIXME. Using trace_imc events to monitor application + * or KVM thread performance can cause a checkstop + * (system crash). + * Disable it for now. + */ + pr_info_once("IMC: disabling trace_imc PMU\n"); + domain = -1; break; default: pr_warn("IMC Unknown Device type \n"); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index c28d0d9b7ee0..058223233088 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1558,6 +1558,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) /* Reserve PE for each VF */ for (vf_index = 0; vf_index < num_vfs; vf_index++) { + int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index); + int vf_bus = pci_iov_virtfn_bus(pdev, vf_index); + struct pci_dn *vf_pdn; + if (pdn->m64_single_mode) pe_num = pdn->pe_num_map[vf_index]; else @@ -1570,13 +1574,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pe->pbus = NULL; pe->parent_dev = pdev; pe->mve_number = -1; - pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) | - pci_iov_virtfn_devfn(pdev, vf_index); + pe->rid = (vf_bus << 8) | vf_devfn; pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", hose->global_number, pdev->bus->number, - PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)), - PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num); + PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num); if (pnv_ioda_configure_pe(phb, pe)) { /* XXX What do we do here ? */ @@ -1590,6 +1592,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) list_add_tail(&pe->list, &phb->ioda.pe_list); mutex_unlock(&phb->ioda.pe_list_mutex); + /* associate this pe to it's pdn */ + list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { + if (vf_pdn->busno == vf_bus && + vf_pdn->devfn == vf_devfn) { + vf_pdn->pe_number = pe_num; + break; + } + } + pnv_pci_ioda2_setup_dma_pe(phb, pe); #ifdef CONFIG_IOMMU_API iommu_register_group(&pe->table_group, @@ -2889,9 +2900,6 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) struct pci_dn *pdn; int mul, total_vfs; - if (!pdev->is_physfn || pci_dev_is_added(pdev)) - return; - pdn = pci_get_pdn(pdev); pdn->vfs_expanded = 0; pdn->m64_single_mode = false; @@ -2966,6 +2974,30 @@ truncate_iov: res->end = res->start - 1; } } + +static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) +{ + if (WARN_ON(pci_dev_is_added(pdev))) + return; + + if (pdev->is_virtfn) { + struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev); + + /* + * VF PEs are single-device PEs so their pdev pointer needs to + * be set. The pdev doesn't exist when the PE is allocated (in + * (pcibios_sriov_enable()) so we fix it up here. + */ + pe->pdev = pdev; + WARN_ON(!(pe->flags & PNV_IODA_PE_VF)); + } else if (pdev->is_physfn) { + /* + * For PFs adjust their allocated IOV resources to match what + * the PHB can support using it's M64 BAR table. + */ + pnv_pci_ioda_fixup_iov_resources(pdev); + } +} #endif /* CONFIG_PCI_IOV */ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, @@ -3862,7 +3894,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ppc_md.pcibios_default_alignment = pnv_pci_default_alignment; #ifdef CONFIG_PCI_IOV - ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; + ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov; ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable; ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 2825d004dece..8307e1f4086c 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -814,24 +814,6 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; -#ifdef CONFIG_PCI_IOV - struct pnv_ioda_pe *pe; - struct pci_dn *pdn; - - /* Fix the VF pdn PE number */ - if (pdev->is_virtfn) { - pdn = pci_get_pdn(pdev); - WARN_ON(pdn->pe_number != IODA_INVALID_PE); - list_for_each_entry(pe, &phb->ioda.pe_list, list) { - if (pe->rid == ((pdev->bus->number << 8) | - (pdev->devfn & 0xff))) { - pdn->pe_number = pe->pe_number; - pe->pdev = pdev; - break; - } - } - } -#endif /* CONFIG_PCI_IOV */ if (phb && phb->dma_dev_setup) phb->dma_dev_setup(phb, pdev); @@ -945,6 +927,23 @@ void __init pnv_pci_init(void) if (!firmware_has_feature(FW_FEATURE_OPAL)) return; +#ifdef CONFIG_PCIEPORTBUS + /* + * On PowerNV PCIe devices are (currently) managed in cooperation + * with firmware. This isn't *strictly* required, but there's enough + * assumptions baked into both firmware and the platform code that + * it's unwise to allow the portbus services to be used. + * + * We need to fix this eventually, but for now set this flag to disable + * the portbus driver. The AER service isn't required since that AER + * events are handled via EEH. The pciehp hotplug driver can't work + * without kernel changes (and portbus binding breaks pnv_php). The + * other services also require some thinking about how we're going + * to integrate them. + */ + pcie_ports_disabled = true; +#endif + /* Look for IODA IO-Hubs. */ for_each_compatible_node(np, NULL, "ibm,ioda-hub") { pnv_pci_init_ioda_hub(np); diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index b33251d75927..572651a5c87b 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -411,6 +411,10 @@ static struct bus_type cmm_subsys = { .dev_name = "cmm", }; +static void cmm_release_device(struct device *dev) +{ +} + /** * cmm_sysfs_register - Register with sysfs * @@ -426,6 +430,7 @@ static int cmm_sysfs_register(struct device *dev) dev->id = 0; dev->bus = &cmm_subsys; + dev->release = cmm_release_device; if ((rc = device_register(dev))) goto subsys_unregister; diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 8e700390f3d6..4c3af2e9eb8e 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -360,8 +360,10 @@ static bool lmb_is_removable(struct drmem_lmb *lmb) for (i = 0; i < scns_per_block; i++) { pfn = PFN_DOWN(phys_addr); - if (!pfn_present(pfn)) + if (!pfn_present(pfn)) { + phys_addr += MIN_MEMORY_BLOCK_SIZE; continue; + } rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION); phys_addr += MIN_MEMORY_BLOCK_SIZE; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 6ba081dd61c9..b4ce9d472dfe 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -36,7 +36,6 @@ #include #include #include -#include #include "pseries.h" @@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) return be64_to_cpu(*tcep); } -static void tce_free_pSeriesLP(struct iommu_table*, long, long); +static void tce_free_pSeriesLP(unsigned long liobn, long, long); static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); -static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, +static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) @@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, int ret = 0; long tcenum_start = tcenum, npages_start = npages; - rpn = __pa(uaddr) >> TCE_SHIFT; + rpn = __pa(uaddr) >> tceshift; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; while (npages--) { - tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; - rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce); + tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift; + rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; - tce_free_pSeriesLP(tbl, tcenum_start, + tce_free_pSeriesLP(liobn, tcenum_start, (npages_start - (npages + 1))); break; } if (rc && printk_ratelimit()) { printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); - printk("\tindex = 0x%llx\n", (u64)tbl->it_index); + printk("\tindex = 0x%llx\n", (u64)liobn); printk("\ttcenum = 0x%llx\n", (u64)tcenum); printk("\ttce val = 0x%llx\n", tce ); dump_stack(); @@ -194,7 +193,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, unsigned long flags; if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) { - return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, + return tce_build_pSeriesLP(tbl->it_index, tcenum, + tbl->it_page_shift, npages, uaddr, direction, attrs); } @@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, /* If allocation fails, fall back to the loop implementation */ if (!tcep) { local_irq_restore(flags); - return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, - direction, attrs); + return tce_build_pSeriesLP(tbl->it_index, tcenum, + tbl->it_page_shift, + npages, uaddr, direction, attrs); } __this_cpu_write(tce_page, tcep); } @@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, return ret; } -static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) +static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages) { u64 rc; while (npages--) { - rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); + rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0); if (rc && printk_ratelimit()) { printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); - printk("\tindex = 0x%llx\n", (u64)tbl->it_index); + printk("\tindex = 0x%llx\n", (u64)liobn); printk("\ttcenum = 0x%llx\n", (u64)tcenum); dump_stack(); } @@ -286,7 +287,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n u64 rc; if (!firmware_has_feature(FW_FEATURE_MULTITCE)) - return tce_free_pSeriesLP(tbl, tcenum, npages); + return tce_free_pSeriesLP(tbl->it_index, tcenum, npages); rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); @@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, u64 rc = 0; long l, limit; + if (!firmware_has_feature(FW_FEATURE_MULTITCE)) { + unsigned long tceshift = be32_to_cpu(maprange->tce_shift); + unsigned long dmastart = (start_pfn << PAGE_SHIFT) + + be64_to_cpu(maprange->dma_base); + unsigned long tcenum = dmastart >> tceshift; + unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift; + void *uaddr = __va(start_pfn << PAGE_SHIFT); + + return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn), + tcenum, tceshift, npages, (unsigned long) uaddr, + DMA_BIDIRECTIONAL, 0); + } + local_irq_disable(); /* to protect tcep and the page behind it */ tcep = __this_cpu_read(tce_page); @@ -1320,15 +1334,7 @@ void iommu_init_early_pSeries(void) of_reconfig_notifier_register(&iommu_reconfig_nb); register_memory_notifier(&iommu_mem_nb); - /* - * Secure guest memory is inacessible to devices so regular DMA isn't - * possible. - * - * In that case keep devices' dma_map_ops as NULL so that the generic - * DMA code path will use SWIOTLB to bounce buffers for DMA. - */ - if (!is_secure_guest()) - set_pci_dma_ops(&dma_iommu_ops); + set_pci_dma_ops(&dma_iommu_ops); } static int __init disable_multitce(char *str) diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e33e8bc4b69b..38c306551f76 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -435,10 +435,10 @@ static void maxmem_data(struct seq_file *m) { unsigned long maxmem = 0; - maxmem += drmem_info->n_lmbs * drmem_info->lmb_size; + maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size; maxmem += hugetlb_total_pages() * PAGE_SIZE; - seq_printf(m, "MaxMem=%ld\n", maxmem); + seq_printf(m, "MaxMem=%lu\n", maxmem); } static int pseries_lparcfg_data(struct seq_file *m, void *v) diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 61883291defc..66fd517c4816 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -152,7 +152,7 @@ static int papr_scm_meta_get(struct papr_scm_priv *p, int len, read; int64_t ret; - if ((hdr->in_offset + hdr->in_length) >= p->metadata_size) + if ((hdr->in_offset + hdr->in_length) > p->metadata_size) return -EINVAL; for (len = hdr->in_length; len; len -= read) { @@ -206,7 +206,7 @@ static int papr_scm_meta_set(struct papr_scm_priv *p, __be64 data_be; int64_t ret; - if ((hdr->in_offset + hdr->in_length) >= p->metadata_size) + if ((hdr->in_offset + hdr->in_length) > p->metadata_size) return -EINVAL; for (len = hdr->in_length; len; len -= wrote) { @@ -342,6 +342,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) p->bus = nvdimm_bus_register(NULL, &p->bus_desc); if (!p->bus) { dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn); + kfree(p->bus_desc.provider_name); return -ENXIO; } @@ -498,6 +499,7 @@ static int papr_scm_remove(struct platform_device *pdev) nvdimm_bus_unregister(p->bus); drc_pmem_unbind(p); + kfree(p->bus_desc.provider_name); kfree(p); return 0; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0a40201f315f..0c8421dd01ab 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -74,6 +74,9 @@ #include "pseries.h" #include "../../../../drivers/pci/pci.h" +DEFINE_STATIC_KEY_FALSE(shared_processor); +EXPORT_SYMBOL_GPL(shared_processor); + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); + + if (lppaca_shared_proc(get_lppaca())) + static_branch_enable(&shared_processor); + ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; #ifdef CONFIG_PCI_IOV diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 79e2287991db..f682b7babc09 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) if (tbl == NULL) return NULL; + kref_init(&tbl->it_kref); + of_parse_dma_window(dev->dev.of_node, dma_window, &tbl->it_index, &offset, &size); diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index df832b09e3e9..9651ca061828 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) { struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); + u8 pq; switch (which) { case IRQCHIP_STATE_ACTIVE: - *state = !xd->stale_p && - (xd->saved_p || - !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P)); + pq = xive_esb_read(xd, XIVE_ESB_GET); + + /* + * The esb value being all 1's means we couldn't get + * the PQ state of the interrupt through mmio. It may + * happen, for example when querying a PHB interrupt + * while the PHB is in an error state. We consider the + * interrupt to be inactive in that case. + */ + *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && + (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); return 0; default: return -EINVAL; @@ -1035,6 +1044,15 @@ static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) xd->target = XIVE_INVALID_TARGET; irq_set_handler_data(virq, xd); + /* + * Turn OFF by default the interrupt being mapped. A side + * effect of this check is the mapping the ESB page of the + * interrupt in the Linux address space. This prevents page + * fault issues in the crash handler which masks all + * interrupts. + */ + xive_esb_read(xd, XIVE_ESB_SET_PQ_01); + return 0; } diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 33c10749edec..55dc61cb4867 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -392,20 +392,28 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->esb_shift = esb_shift; data->trig_page = trig_page; + data->hw_irq = hw_irq; + /* * No chip-id for the sPAPR backend. This has an impact how we * pick a target. See xive_pick_irq_target(). */ data->src_chip = XIVE_INVALID_CHIP_ID; + /* + * When the H_INT_ESB flag is set, the H_INT_ESB hcall should + * be used for interrupt management. Skip the remapping of the + * ESB pages which are not available. + */ + if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB) + return 0; + data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); if (!data->eoi_mmio) { pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); return -ENOMEM; } - data->hw_irq = hw_irq; - /* Full function page supports trigger */ if (flags & XIVE_SRC_TRIGGER) { data->trig_mmio = data->eoi_mmio; diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh index 2b4e959caa36..014e00e74d2b 100755 --- a/arch/powerpc/tools/relocs_check.sh +++ b/arch/powerpc/tools/relocs_check.sh @@ -10,24 +10,29 @@ # based on relocs_check.pl # Copyright © 2009 IBM Corporation -if [ $# -lt 2 ]; then - echo "$0 [path to objdump] [path to vmlinux]" 1>&2 +if [ $# -lt 3 ]; then + echo "$0 [path to objdump] [path to nm] [path to vmlinux]" 1>&2 exit 1 fi -# Have Kbuild supply the path to objdump so we handle cross compilation. +# Have Kbuild supply the path to objdump and nm so we handle cross compilation. objdump="$1" -vmlinux="$2" +nm="$2" +vmlinux="$3" + +# Remove from the bad relocations those that match an undefined weak symbol +# which will result in an absolute relocation to 0. +# Weak unresolved symbols are of that form in nm output: +# " w _binary__btf_vmlinux_bin_end" +undef_weak_symbols=$($nm "$vmlinux" | awk '$1 ~ /w/ { print $2 }') bad_relocs=$( -"$objdump" -R "$vmlinux" | +$objdump -R "$vmlinux" | # Only look at relocation lines. grep -E '\ - # R_PPC64_ADDR64 __crc_ # On PPC: # R_PPC_RELATIVE, R_PPC_ADDR16_HI, # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO, @@ -39,8 +44,7 @@ R_PPC_ADDR16_HI R_PPC_ADDR16_HA R_PPC_RELATIVE R_PPC_NONE' | - grep -E -v '\:' | awk '{print $1}' ) BRANCHES=$( -"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \ +$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \ --stop-address=${end_intr} | grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" | grep -v '\<__start_initialization_multiplatform>' | diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index f142570ad860..c3842dbeb1b7 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for xmon -# Disable clang warning for using setjmp without setjmp.h header -subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header) +# Avoid clang warnings around longjmp/setjmp declarations +subdir-ccflags-y := -ffreestanding GCOV_PROFILE := n KCOV_INSTRUMENT := n diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index d83364ebc5c5..8057aafd5f5e 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1894,15 +1894,14 @@ static void dump_300_sprs(void) printf("pidr = %.16lx tidr = %.16lx\n", mfspr(SPRN_PID), mfspr(SPRN_TIDR)); - printf("asdr = %.16lx psscr = %.16lx\n", - mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR) - : mfspr(SPRN_PSSCR_PR)); + printf("psscr = %.16lx\n", + hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR)); if (!hv) return; - printf("ptcr = %.16lx\n", - mfspr(SPRN_PTCR)); + printf("ptcr = %.16lx asdr = %.16lx\n", + mfspr(SPRN_PTCR), mfspr(SPRN_ASDR)); #endif } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 8eebbc8860bb..a0fa4be94a68 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -61,6 +61,7 @@ config RISCV select SPARSEMEM_STATIC if 32BIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select HAVE_ARCH_MMAP_RND_BITS + select HAVE_COPY_THREAD_TLS config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT @@ -100,6 +101,7 @@ config ARCH_FLATMEM_ENABLE config ARCH_SPARSEMEM_ENABLE def_bool y + depends on MMU select SPARSEMEM_VMEMMAP_ENABLE config ARCH_SELECT_MEMORY_MODEL diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index b94d8db5ddcc..c40fdcdeb950 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - if (function_graph_enter(old, self_addr, frame_pointer, parent)) + if (!function_graph_enter(old, self_addr, frame_pointer, parent)) *parent = return_hooker; } diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 70bb94ae61c5..6bf5b1674384 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -8,6 +8,10 @@ #include #include #include +#include +#include +#include +#include static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) { @@ -386,3 +390,15 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, return 0; } + +#if defined(CONFIG_MMU) && defined(CONFIG_64BIT) +#define VMALLOC_MODULE_START \ + max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START) +void *module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START, + VMALLOC_END, GFP_KERNEL, + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} +#endif diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 85e3c39bb60b..330b34706aa0 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -120,7 +120,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (usp) /* User fork */ childregs->sp = usp; if (clone_flags & CLONE_SETTLS) - childregs->tp = childregs->a5; + childregs->tp = tls; childregs->a0 = 0; /* Return value of fork() */ p->thread.ra = (unsigned long)ret_from_fork; } diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 49a5852fd07d..33b16f4212f7 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -58,7 +58,8 @@ quiet_cmd_vdsold = VDSOLD $@ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ $(CROSS_COMPILE)objcopy \ - $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 3f15938dec89..c54bd3c79955 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -14,6 +14,7 @@ void flush_icache_all(void) { sbi_remote_fence_i(NULL); } +EXPORT_SYMBOL(flush_icache_all); /* * Performs an icache flush for the given MM context. RISC-V has no direct diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 573463d1c799..f5d813c1304d 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -98,7 +98,7 @@ void __init setup_bootmem(void) for_each_memblock(memory, reg) { phys_addr_t end = reg->base + reg->size; - if (reg->base <= vmlinux_end && vmlinux_end <= end) { + if (reg->base <= vmlinux_start && vmlinux_end <= end) { mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); /* diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c index 5451ef3845f2..e2279fed8f56 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp.c @@ -120,6 +120,11 @@ static bool seen_reg(int reg, struct rv_jit_context *ctx) return false; } +static void mark_fp(struct rv_jit_context *ctx) +{ + __set_bit(RV_CTX_F_SEEN_S5, &ctx->flags); +} + static void mark_call(struct rv_jit_context *ctx) { __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); @@ -596,7 +601,8 @@ static void __build_epilogue(u8 reg, struct rv_jit_context *ctx) emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx); /* Set return value. */ - emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx); + if (reg == RV_REG_RA) + emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx); emit(rv_jalr(RV_REG_ZERO, reg, 0), ctx); } @@ -631,14 +637,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) return -1; emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); - /* if (--TCC < 0) + /* if (TCC-- < 0) * goto out; */ emit(rv_addi(RV_REG_T1, tcc, -1), ctx); off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; if (is_13b_check(off, insn)) return -1; - emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); + emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx); /* prog = array->ptrs[index]; * if (!prog) @@ -1426,6 +1432,10 @@ static void build_prologue(struct rv_jit_context *ctx) { int stack_adjust = 0, store_offset, bpf_stack_adjust; + bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); + if (bpf_stack_adjust) + mark_fp(ctx); + if (seen_reg(RV_REG_RA, ctx)) stack_adjust += 8; stack_adjust += 8; /* RV_REG_FP */ @@ -1443,7 +1453,6 @@ static void build_prologue(struct rv_jit_context *ctx) stack_adjust += 8; stack_adjust = round_up(stack_adjust, 16); - bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); stack_adjust += bpf_stack_adjust; store_offset = stack_adjust - 8; diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 478b645b20dd..2faaf456956a 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -69,7 +69,7 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include # cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls -ifeq ($(call cc-option-yn,-mpacked-stack),y) +ifeq ($(call cc-option-yn,-mpacked-stack -mbackchain -msoft-float),y) cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK endif @@ -146,7 +146,7 @@ all: bzImage #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg... KBUILD_IMAGE := $(boot)/bzImage -install: vmlinux +install: $(Q)$(MAKE) $(build)=$(boot) $@ bzImage: vmlinux diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index e2c47d3a1c89..0ff9261c915e 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE $(obj)/startup.a: $(OBJECTS) FORCE $(call if_changed,ar) -install: $(CONFIGURE) $(obj)/bzImage +install: sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ System.map "$(INSTALL_PATH)" diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index 5d12352545c5..5591243d673e 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit) *(unsigned long *) prng.parm_block ^= seed; for (i = 0; i < 16; i++) { cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, - (char *) entropy, (char *) entropy, + (u8 *) entropy, (u8 *) entropy, sizeof(entropy)); memcpy(prng.parm_block, entropy, sizeof(entropy)); } diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 5367950510f6..fa0150285d38 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -170,6 +170,11 @@ void startup_kernel(void) handle_relocs(__kaslr_offset); if (__kaslr_offset) { + /* + * Save KASLR offset for early dumps, before vmcore_info is set. + * Mark as uneven to distinguish from real vmcore_info pointer. + */ + S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL; /* Clear non-relocated kernel */ if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) memset(img, 0, vmlinux.image_size); diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c index ed007f4a6444..3f501159ee9f 100644 --- a/arch/s390/boot/uv.c +++ b/arch/s390/boot/uv.c @@ -15,7 +15,8 @@ void uv_query_info(void) if (!test_facility(158)) return; - if (uv_call(0, (uint64_t)&uvcb)) + /* rc==0x100 means that there is additional data we do not process */ + if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100) return; if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) && diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index d39e0f079217..686fe7aa192f 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) struct s390_sha_ctx *ctx = shash_desc_ctx(desc); unsigned int bsize = crypto_shash_blocksize(desc->tfm); u64 bits; - unsigned int n, mbl_offset; + unsigned int n; + int mbl_offset; n = ctx->count % bsize; bits = ctx->count * 8; - mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32); + mbl_offset = s390_crypto_shash_parmsize(ctx->func); if (mbl_offset < 0) return -EINVAL; + mbl_offset = mbl_offset / sizeof(u32); + /* set total msg bit length (mbl) in CPACF parmblock */ switch (ctx->func) { case CPACF_KLMD_SHA_1: diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 237ee0c4169f..612ed3c6d581 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -141,7 +141,9 @@ struct lowcore { /* br %r1 trampoline */ __u16 br_r1_trampoline; /* 0x0400 */ - __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */ + __u32 return_lpswe; /* 0x0402 */ + __u32 return_mcck_lpswe; /* 0x0406 */ + __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 823578c6b9e2..e399102367af 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -33,6 +33,8 @@ #define ARCH_HAS_PREPARE_HUGEPAGE #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA + #include #ifndef __ASSEMBLY__ @@ -40,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end); static inline void storage_key_init_range(unsigned long start, unsigned long end) { - if (PAGE_DEFAULT_KEY) + if (PAGE_DEFAULT_KEY != 0) __storage_key_init_range(start, end); } diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index a2399eff84ca..6087a4e9b2bf 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -183,7 +183,7 @@ void zpci_remove_reserved_devices(void); /* CLP */ int clp_scan_pci_devices(void); int clp_rescan_pci_devices(void); -int clp_rescan_pci_devices_simple(void); +int clp_rescan_pci_devices_simple(u32 *fid); int clp_add_pci_device(u32, u32, int); int clp_enable_fh(struct zpci_dev *, u8); int clp_disable_fh(struct zpci_dev *); diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index bccb8f4a63e2..77606c4acd58 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) crst_table_init(table, _REGION2_ENTRY_EMPTY); return (p4d_t *) table; } -#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d) + +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (!mm_p4d_folded(mm)) + crst_table_free(mm, (unsigned long *) p4d); +} static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { @@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) crst_table_init(table, _REGION3_ENTRY_EMPTY); return (pud_t *) table; } -#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + if (!mm_pud_folded(mm)) + crst_table_free(mm, (unsigned long *) pud); +} static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { @@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { + if (mm_pmd_folded(mm)) + return; pgtable_pmd_page_dtor(virt_to_page(pmd)); crst_table_free(mm, (unsigned long *) pmd); } diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 5ff98d76a66c..34a655ad7123 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -756,6 +756,12 @@ static inline int pmd_write(pmd_t pmd) return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; } +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0; +} + static inline int pmd_dirty(pmd_t pmd) { int dirty = 1; @@ -1173,8 +1179,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - if (!MACHINE_HAS_NX) - pte_val(entry) &= ~_PAGE_NOEXEC; if (pte_present(entry)) pte_val(entry) &= ~_PAGE_UNUSED; if (mm_has_pgste(mm)) @@ -1191,6 +1195,8 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); + if (!MACHINE_HAS_NX) + pte_val(__pte) &= ~_PAGE_NOEXEC; return pte_mkyoung(__pte); } diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 51a0e4a2dc96..560d8b77b1d1 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -162,6 +162,7 @@ typedef struct thread_struct thread_struct; #define INIT_THREAD { \ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .fpu.regs = (void *) init_task.thread.fpu.fprs, \ + .last_break = 1, \ } /* diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index e3f238e8c611..4f35b1055f30 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -227,7 +227,7 @@ struct qdio_buffer { * @sbal: absolute SBAL address */ struct sl_element { - unsigned long sbal; + u64 sbal; } __attribute__ ((packed)); /** diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 6dc6c4fbc8e2..1932088686a6 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -8,6 +8,7 @@ #include #include +#include #define EP_OFFSET 0x10008 #define EP_STRING "S390EP" @@ -157,6 +158,12 @@ static inline unsigned long kaslr_offset(void) return __kaslr_offset; } +static inline u32 gen_lpswe(unsigned long addr) +{ + BUILD_BUG_ON(addr > 0xfff); + return 0xb2b20000 | addr; +} + #else /* __ASSEMBLY__ */ #define IPL_DEVICE (IPL_DEVICE_OFFSET) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 64539c221672..b6a4ce9dafaf 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -10,8 +10,9 @@ #ifndef _ASM_S390_TIMEX_H #define _ASM_S390_TIMEX_H -#include +#include #include +#include /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL @@ -154,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk) static inline unsigned long long get_tod_clock(void) { - unsigned char clk[STORE_CLOCK_EXT_SIZE]; + char clk[STORE_CLOCK_EXT_SIZE]; get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); @@ -186,15 +187,18 @@ extern unsigned char tod_clock_base[16] __aligned(8); /** * get_clock_monotonic - returns current time in clock rate units * - * The caller must ensure that preemption is disabled. * The clock and tod_clock_base get changed via stop_machine. - * Therefore preemption must be disabled when calling this - * function, otherwise the returned value is not guaranteed to - * be monotonic. + * Therefore preemption must be disabled, otherwise the returned + * value is not guaranteed to be monotonic. */ static inline unsigned long long get_tod_clock_monotonic(void) { - return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; + unsigned long long tod; + + preempt_disable_notrace(); + tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; + preempt_enable_notrace(); + return tod; } /** diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 41ac4ad21311..b6628586ab70 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -125,6 +125,8 @@ int main(void) OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code); OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address); OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr); + OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe); + OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe); OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw); OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw); OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 7abe6ae261b4..f304802ecf7b 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -461,10 +461,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) ptr += sprintf(ptr, "%%c%i", value); else if (operand->flags & OPERAND_VR) ptr += sprintf(ptr, "%%v%i", value); - else if (operand->flags & OPERAND_PCREL) - ptr += sprintf(ptr, "%lx", (signed int) value - + addr); - else if (operand->flags & OPERAND_SIGNED) + else if (operand->flags & OPERAND_PCREL) { + void *pcrel = (void *)((int)value + addr); + + ptr += sprintf(ptr, "%px", pcrel); + } else if (operand->flags & OPERAND_SIGNED) ptr += sprintf(ptr, "%i", value); else ptr += sprintf(ptr, "%u", value); @@ -536,7 +537,7 @@ void show_code(struct pt_regs *regs) else *ptr++ = ' '; addr = regs->psw.addr + start - 32; - ptr += sprintf(ptr, "%016lx: ", addr); + ptr += sprintf(ptr, "%px: ", (void *)addr); if (start + opsize >= end) break; for (i = 0; i < opsize; i++) @@ -564,7 +565,7 @@ void print_fn_code(unsigned char *code, unsigned long len) opsize = insn_length(*code); if (opsize > len) break; - ptr += sprintf(ptr, "%p: ", code); + ptr += sprintf(ptr, "%px: ", code); for (i = 0; i < opsize; i++) ptr += sprintf(ptr, "%02x", code[i]); *ptr++ = '\t'; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 270d1d145761..bc85987727f0 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP .macro SWITCH_ASYNC savearea,timer tmhh %r8,0x0001 # interrupting from user ? - jnz 1f + jnz 2f lgr %r14,%r9 + cghi %r14,__LC_RETURN_LPSWE + je 0f slg %r14,BASED(.Lcritical_start) clg %r14,BASED(.Lcritical_length) - jhe 0f + jhe 1f +0: lghi %r11,\savearea # inside critical section, do cleanup brasl %r14,cleanup_critical tmhh %r8,0x0001 # retest problem state after cleanup - jnz 1f -0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? + jnz 2f +1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? slgr %r14,%r15 srag %r14,%r14,STACK_SHIFT - jnz 2f + jnz 3f CHECK_STACK \savearea aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 3f -1: UPDATE_VTIME %r14,%r15,\timer + j 4f +2: UPDATE_VTIME %r14,%r15,\timer BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP -2: lg %r15,__LC_ASYNC_STACK # load async stack -3: la %r11,STACK_FRAME_OVERHEAD(%r15) +3: lg %r15,__LC_ASYNC_STACK # load async stack +4: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro UPDATE_VTIME w1,w2,enter_timer @@ -401,7 +404,7 @@ ENTRY(system_call) stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER lmg %r11,%r15,__PT_R11(%r11) - lpswe __LC_RETURN_PSW + b __LC_RETURN_LPSWE(%r0) .Lsysc_done: # @@ -608,43 +611,50 @@ ENTRY(pgm_check_handler) BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK - lg %r12,__LC_CURRENT + srag %r11,%r10,12 + jnz 0f + /* if __LC_LAST_BREAK is < 4096, it contains one of + * the lpswe addresses in lowcore. Set it to 1 (initial state) + * to prevent leaking that address to userspace. + */ + lghi %r10,1 +0: lg %r12,__LC_CURRENT lghi %r11,0 larl %r13,cleanup_critical lmg %r8,%r9,__LC_PGM_OLD_PSW tmhh %r8,0x0001 # test problem state bit - jnz 2f # -> fault in user space + jnz 3f # -> fault in user space #if IS_ENABLED(CONFIG_KVM) # cleanup critical section for program checks in sie64a lgr %r14,%r9 slg %r14,BASED(.Lsie_critical_start) clg %r14,BASED(.Lsie_critical_length) - jhe 0f + jhe 1f lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit lghi %r11,_PIF_GUEST_FAULT #endif -0: tmhh %r8,0x4000 # PER bit set in old PSW ? - jnz 1f # -> enabled, can't be a double fault +1: tmhh %r8,0x4000 # PER bit set in old PSW ? + jnz 2f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc -1: CHECK_STACK __LC_SAVE_AREA_SYNC +2: CHECK_STACK __LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - # CHECK_VMAP_STACK branches to stack_overflow or 4f - CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f -2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER + # CHECK_VMAP_STACK branches to stack_overflow or 5f + CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f +3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lg %r15,__LC_KERNEL_STACK lgr %r14,%r12 aghi %r14,__TASK_thread # pointer to thread_struct lghi %r13,__LC_PGM_TDB tm __LC_PGM_ILC+2,0x02 # check for transaction abort - jz 3f + jz 4f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -3: stg %r10,__THREAD_last_break(%r14) -4: lgr %r13,%r11 +4: stg %r10,__THREAD_last_break(%r14) +5: lgr %r13,%r11 la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use @@ -663,14 +673,14 @@ ENTRY(pgm_check_handler) stg %r13,__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 5f + jz 6f tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -5: REENABLE_IRQS +6: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) @@ -775,7 +785,7 @@ ENTRY(io_int_handler) mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER .Lio_exit_kernel: lmg %r11,%r15,__PT_R11(%r11) - lpswe __LC_RETURN_PSW + b __LC_RETURN_LPSWE(%r0) .Lio_done: # @@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler) stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 0: lmg %r11,%r15,__PT_R11(%r11) - lpswe __LC_RETURN_MCCK_PSW + b __LC_RETURN_MCCK_LPSWE .Lmcck_panic: lg %r15,__LC_NODAT_STACK @@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow) #endif ENTRY(cleanup_critical) + cghi %r9,__LC_RETURN_LPSWE + je .Lcleanup_lpswe #if IS_ENABLED(CONFIG_KVM) clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap jl 0f @@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical) mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) +.Lcleanup_lpswe: 1: lmg %r8,%r9,__LC_RETURN_PSW BR_EX %r14,%r11 .Lcleanup_sysc_restore_insn: diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 444a19125a81..cb8b1cc285c9 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -164,7 +164,9 @@ static bool kdump_csum_valid(struct kimage *image) #ifdef CONFIG_CRASH_DUMP int rc; + preempt_disable(); rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image); + preempt_enable(); return rc == 0; #else return false; @@ -254,10 +256,10 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(lowcore_ptr); VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); - mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); vmcoreinfo_append_str("SDMA=%lx\n", __sdma); vmcoreinfo_append_str("EDMA=%lx\n", __edma); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); } void machine_shutdown(void) diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 9e1660a6b9db..3431b2d5e334 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -26,6 +26,12 @@ ENDPROC(ftrace_stub) #define STACK_PTREGS (STACK_FRAME_OVERHEAD) #define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS) #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) +#ifdef __PACK_STACK +/* allocate just enough for r14, r15 and backchain */ +#define TRACED_FUNC_FRAME_SIZE 24 +#else +#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD +#endif ENTRY(_mcount) BR_EX %r14 @@ -39,9 +45,16 @@ ENTRY(ftrace_caller) #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) aghi %r0,MCOUNT_RETURN_FIXUP #endif - aghi %r15,-STACK_FRAME_SIZE + # allocate stack frame for ftrace_caller to contain traced function + aghi %r15,-TRACED_FUNC_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) + stg %r0,(__SF_GPRS+8*8)(%r15) + stg %r15,(__SF_GPRS+9*8)(%r15) + # allocate pt_regs and stack frame for ftrace_trace_function + aghi %r15,-STACK_FRAME_SIZE stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) + aghi %r1,-TRACED_FUNC_FRAME_SIZE + stg %r1,__SF_BACKCHAIN(%r15) stg %r0,(STACK_PTREGS_PSW+8)(%r15) stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 48d48b6187c0..0eb1d1cc53a8 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -199,7 +199,7 @@ static const int cpumf_generic_events_user[] = { [PERF_COUNT_HW_BUS_CYCLES] = -1, }; -static int __hw_perf_event_init(struct perf_event *event) +static int __hw_perf_event_init(struct perf_event *event, unsigned int type) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; @@ -207,7 +207,7 @@ static int __hw_perf_event_init(struct perf_event *event) int err = 0; u64 ev; - switch (attr->type) { + switch (type) { case PERF_TYPE_RAW: /* Raw events are used to access counters directly, * hence do not permit excludes */ @@ -294,17 +294,16 @@ static int __hw_perf_event_init(struct perf_event *event) static int cpumf_pmu_event_init(struct perf_event *event) { + unsigned int type = event->attr.type; int err; - switch (event->attr.type) { - case PERF_TYPE_HARDWARE: - case PERF_TYPE_HW_CACHE: - case PERF_TYPE_RAW: - err = __hw_perf_event_init(event); - break; - default: + if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW) + err = __hw_perf_event_init(event, type); + else if (event->pmu->type == type) + /* Registered as unknown PMU */ + err = __hw_perf_event_init(event, PERF_TYPE_RAW); + else return -ENOENT; - } if (unlikely(err) && event->destroy) event->destroy(event); @@ -553,7 +552,7 @@ static int __init cpumf_pmu_init(void) return -ENODEV; cpumf_pmu.attr_groups = cpumf_cf_event_group(); - rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); + rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1); if (rc) pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); return rc; diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index 2654e348801a..e949ab832ed7 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -243,13 +243,13 @@ static int cf_diag_event_init(struct perf_event *event) int err = -ENOENT; debug_sprintf_event(cf_diag_dbg, 5, - "%s event %p cpu %d config %#llx " + "%s event %p cpu %d config %#llx type:%u " "sample_type %#llx cf_diag_events %d\n", __func__, - event, event->cpu, attr->config, attr->sample_type, - atomic_read(&cf_diag_events)); + event, event->cpu, attr->config, event->pmu->type, + attr->sample_type, atomic_read(&cf_diag_events)); if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG || - event->attr.type != PERF_TYPE_RAW) + event->attr.type != event->pmu->type) goto out; /* Raw events are used to access counters directly, @@ -693,7 +693,7 @@ static int __init cf_diag_init(void) } debug_register_view(cf_diag_dbg, &debug_sprintf_view); - rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW); + rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1); if (rc) { debug_unregister_view(cf_diag_dbg, &debug_sprintf_view); debug_unregister(cf_diag_dbg); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 3d8b12a9a6ff..fdb8083e7870 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb, gfp_t gfp_flags) { int i, rc; - unsigned long *new, *tail; + unsigned long *new, *tail, *tail_prev = NULL; if (!sfb->sdbt || !sfb->tail) return -EINVAL; @@ -232,6 +232,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, sfb->num_sdbt++; /* Link current page to tail of chain */ *tail = (unsigned long)(void *) new + 1; + tail_prev = tail; tail = new; } @@ -241,10 +242,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, * issue, a new realloc call (if required) might succeed. */ rc = alloc_sample_data_block(tail, gfp_flags); - if (rc) + if (rc) { + /* Undo last SDBT. An SDBT with no SDB at its first + * entry but with an SDBT entry instead can not be + * handled by the interrupt handler code. + * Avoid this situation. + */ + if (tail_prev) { + sfb->num_sdbt--; + free_page((unsigned long) new); + tail = tail_prev; + } break; + } sfb->num_sdb++; tail++; + tail_prev = new = NULL; /* Allocated at least one SBD */ } /* Link sampling buffer to its origin */ @@ -1300,18 +1313,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) */ if (flush_all && done) break; - - /* If an event overflow happened, discard samples by - * processing any remaining sample-data-blocks. - */ - if (event_overflow) - flush_all = 1; } /* Account sample overflows in the event hardware structure */ if (sampl_overflow) OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + sampl_overflow, 1 + num_sdb); + + /* Perf_event_overflow() and perf_event_account_interrupt() limit + * the interrupt rate to an upper limit. Roughly 1000 samples per + * task tick. + * Hitting this limit results in a large number + * of throttled REF_REPORT_THROTTLE entries and the samples + * are dropped. + * Slightly increase the interval to avoid hitting this limit. + */ + if (event_overflow) { + SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); + debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", + __func__, + DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); + } + if (sampl_overflow || event_overflow) debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " "overflow stats: sample=%llu event=%llu\n", diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index fcb6c2e92b07..1e75cc983546 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -224,9 +224,13 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct unwind_state state; + unsigned long addr; - unwind_for_each_frame(&state, current, regs, 0) - perf_callchain_store(entry, state.ip); + unwind_for_each_frame(&state, current, regs, 0) { + addr = unwind_get_return_address(&state); + if (!addr || perf_callchain_store(entry, addr)) + return; + } } /* Perf definitions for PMU event attributes in sysfs */ diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index b0afec673f77..4e6299e2ca94 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -105,6 +105,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, p->thread.system_timer = 0; p->thread.hardirq_timer = 0; p->thread.softirq_timer = 0; + p->thread.last_break = 1; frame->sf.back_chain = 0; /* new return point is ret_from_fork */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 3ff291bc63b7..4366962f4930 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -73,6 +73,7 @@ #include #include #include +#include #include "entry.h" /* @@ -457,6 +458,8 @@ static void __init setup_lowcore_dat_off(void) lc->spinlock_index = 0; arch_spin_lock_setup(0); lc->br_r1_trampoline = 0x07f1; /* br %r1 */ + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; @@ -1059,7 +1062,7 @@ static void __init log_component_list(void) if (!early_ipl_comp_list_addr) return; - if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR) + if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL) pr_info("Linux is running with Secure-IPL enabled\n"); else pr_info("Linux is running with Secure-IPL disabled\n"); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 44974654cbd0..f468a10e5206 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->spinlock_lockval = arch_spin_lockval(cpu); lc->spinlock_index = 0; lc->br_r1_trampoline = 0x07f1; /* br %r1 */ + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); if (nmi_alloc_per_cpu(lc)) goto out_async; if (vdso_alloc_per_cpu(lc)) @@ -262,10 +264,13 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) lc->spinlock_index = 0; lc->percpu_offset = __per_cpu_offset[cpu]; lc->kernel_asce = S390_lowcore.kernel_asce; + lc->user_asce = S390_lowcore.kernel_asce; lc->machine_flags = S390_lowcore.machine_flags; lc->user_timer = lc->system_timer = lc->steal_timer = lc->avg_steal_timer = 0; __ctl_store(lc->cregs_save_area, 0, 15); + lc->cregs_save_area[1] = lc->kernel_asce; + lc->cregs_save_area[7] = lc->vdso_asce; save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, sizeof(lc->stfle_fac_list)); @@ -724,39 +729,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early) static int smp_add_present_cpu(int cpu); -static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add) +static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, + bool configured, bool early) { struct pcpu *pcpu; - cpumask_t avail; - int cpu, nr, i, j; + int cpu, nr, i; u16 address; nr = 0; - cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); - cpu = cpumask_first(&avail); - for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { - if (sclp.has_core_type && info->core[i].type != boot_core_type) + if (sclp.has_core_type && core->type != boot_core_type) + return nr; + cpu = cpumask_first(avail); + address = core->core_id << smp_cpu_mt_shift; + for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { + if (pcpu_find_address(cpu_present_mask, address + i)) continue; - address = info->core[i].core_id << smp_cpu_mt_shift; - for (j = 0; j <= smp_cpu_mtid; j++) { - if (pcpu_find_address(cpu_present_mask, address + j)) - continue; - pcpu = pcpu_devices + cpu; - pcpu->address = address + j; - pcpu->state = - (cpu >= info->configured*(smp_cpu_mtid + 1)) ? - CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); - set_cpu_present(cpu, true); - if (sysfs_add && smp_add_present_cpu(cpu) != 0) - set_cpu_present(cpu, false); - else - nr++; - cpu = cpumask_next(cpu, &avail); - if (cpu >= nr_cpu_ids) + pcpu = pcpu_devices + cpu; + pcpu->address = address + i; + if (configured) + pcpu->state = CPU_STATE_CONFIGURED; + else + pcpu->state = CPU_STATE_STANDBY; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + set_cpu_present(cpu, true); + if (!early && smp_add_present_cpu(cpu) != 0) + set_cpu_present(cpu, false); + else + nr++; + cpumask_clear_cpu(cpu, avail); + cpu = cpumask_next(cpu, avail); + } + return nr; +} + +static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) +{ + struct sclp_core_entry *core; + cpumask_t avail; + bool configured; + u16 core_id; + int nr, i; + + nr = 0; + cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); + /* + * Add IPL core first (which got logical CPU number 0) to make sure + * that all SMT threads get subsequent logical CPU numbers. + */ + if (early) { + core_id = pcpu_devices[0].address >> smp_cpu_mt_shift; + for (i = 0; i < info->configured; i++) { + core = &info->core[i]; + if (core->core_id == core_id) { + nr += smp_add_core(core, &avail, true, early); break; + } } } + for (i = 0; i < info->combined; i++) { + configured = i < info->configured; + nr += smp_add_core(&info->core[i], &avail, configured, early); + } return nr; } @@ -805,7 +838,7 @@ void __init smp_detect_cpus(void) /* Add CPUs present at boot */ get_online_cpus(); - __smp_rescan_cpus(info, 0); + __smp_rescan_cpus(info, true); put_online_cpus(); memblock_free_early((unsigned long)info, sizeof(*info)); } @@ -816,6 +849,8 @@ static void smp_init_secondary(void) S390_lowcore.last_update_clock = get_tod_clock(); restore_access_regs(S390_lowcore.access_regs_save_area); + set_cpu_flag(CIF_ASCE_PRIMARY); + set_cpu_flag(CIF_ASCE_SECONDARY); cpu_init(); preempt_disable(); init_cpu_timer(); @@ -1148,7 +1183,7 @@ int __ref smp_rescan_cpus(void) smp_get_core_info(info, 0); get_online_cpus(); mutex_lock(&smp_cpu_state_mutex); - nr = __smp_rescan_cpus(info, 1); + nr = __smp_rescan_cpus(info, false); mutex_unlock(&smp_cpu_state_mutex); put_online_cpus(); kfree(info); diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c index a8204f952315..6e609b13c0ce 100644 --- a/arch/s390/kernel/unwind_bc.c +++ b/arch/s390/kernel/unwind_bc.c @@ -60,6 +60,11 @@ bool unwind_next_frame(struct unwind_state *state) ip = READ_ONCE_NOCHECK(sf->gprs[8]); reliable = false; regs = NULL; + if (!__kernel_text_address(ip)) { + /* skip bogus %r14 */ + state->regs = NULL; + return unwind_next_frame(state); + } } else { sf = (struct stack_frame *) state->sp; sp = READ_ONCE_NOCHECK(sf->back_chain); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index d1ccc168c071..62388a678b91 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2191,7 +2191,7 @@ static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr) return -EINVAL; if (!test_kvm_facility(kvm, 72)) - return -ENOTSUPP; + return -EOPNOTSUPP; mutex_lock(&fi->ais_lock); ais.simm = fi->simm; @@ -2500,7 +2500,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr) int ret = 0; if (!test_kvm_facility(kvm, 72)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) return -EFAULT; @@ -2580,7 +2580,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr) struct kvm_s390_ais_all ais; if (!test_kvm_facility(kvm, 72)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais))) return -EFAULT; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d047e846e1b9..756c627f7e54 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2863,9 +2863,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 | CR14_UNUSED_33 | CR14_EXTERNAL_DAMAGE_SUBMASK; - /* make sure the new fpc will be lazily loaded */ - save_fpu_regs(); - current->thread.fpu.fpc = 0; + vcpu->run->s.regs.fpc = 0; vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->fpf &= ~FPF_BPBC; @@ -4354,7 +4352,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, switch (ioctl) { case KVM_S390_STORE_STATUS: idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvm_s390_vcpu_store_status(vcpu, arg); + r = kvm_s390_store_status_unloaded(vcpu, arg); srcu_read_unlock(&vcpu->kvm->srcu, idx); break; case KVM_S390_SET_INITIAL_PSW: { diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index b0246c705a19..5674710a4841 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -2,7 +2,7 @@ /* * IBM System z Huge TLB Page Support for Kernel. * - * Copyright IBM Corp. 2007,2016 + * Copyright IBM Corp. 2007,2020 * Author(s): Gerald Schaefer */ @@ -11,6 +11,9 @@ #include #include +#include +#include +#include /* * If the bit selected by single-bit bitmask "a" is set within "x", move @@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); + +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int rc; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + goto check_asce_limit; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vm_start_gap(vma))) + goto check_asce_limit; + } + + if (mm->get_unmapped_area == arch_get_unmapped_area) + addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + addr = hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); + if (addr & ~PAGE_MASK) + return addr; + +check_asce_limit: + if (addr + len > current->mm->context.asce_limit && + addr + len <= TASK_SIZE) { + rc = crst_table_upgrade(mm, addr + len); + if (rc) + return (unsigned long) rc; + } + return addr; +} diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index a124f19f7b3c..c1d96e588152 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -291,10 +291,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); vmem_remove_mapping(start, size); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 1864a8bb9622..de7ca4b6718f 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -70,7 +70,7 @@ void notrace s390_kernel_write(void *dst, const void *src, size_t size) spin_unlock_irqrestore(&s390_kernel_write_lock, flags); } -static int __memcpy_real(void *dest, void *src, size_t count) +static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) { register unsigned long _dest asm("2") = (unsigned long) dest; register unsigned long _len1 asm("3") = (unsigned long) count; @@ -91,19 +91,23 @@ static int __memcpy_real(void *dest, void *src, size_t count) return rc; } -static unsigned long _memcpy_real(unsigned long dest, unsigned long src, - unsigned long count) +static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest, + unsigned long src, + unsigned long count) { int irqs_disabled, rc; unsigned long flags; if (!count) return 0; - flags = __arch_local_irq_stnsm(0xf8UL); + flags = arch_local_irq_save(); irqs_disabled = arch_irqs_disabled_flags(flags); if (!irqs_disabled) trace_hardirqs_off(); + __arch_local_irq_stnsm(0xf8); // disable DAT rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); + if (flags & PSW_MASK_DAT) + __arch_local_irq_stosm(0x04); // enable DAT if (!irqs_disabled) trace_hardirqs_on(); __arch_local_irq_ssm(flags); @@ -115,9 +119,15 @@ static unsigned long _memcpy_real(unsigned long dest, unsigned long src, */ int memcpy_real(void *dest, void *src, size_t count) { - if (S390_lowcore.nodat_stack != 0) - return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, - 3, dest, src, count); + int rc; + + if (S390_lowcore.nodat_stack != 0) { + preempt_disable(); + rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3, + dest, src, count); + preempt_enable(); + return rc; + } /* * This is a really early memcpy_real call, the stacks are * not set up yet. Just call _memcpy_real on the early boot diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index b403fa14847d..f810930aff42 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -415,6 +415,10 @@ void __init vmem_map_init(void) SET_MEMORY_RO | SET_MEMORY_X); __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT, SET_MEMORY_RO | SET_MEMORY_X); + + /* we need lowcore executable for our LPSWE instructions */ + set_memory_x(0, 1); + pr_info("Write protected kernel read-only data: %luk\n", (unsigned long)(__end_rodata - _stext) >> 10); } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index ce88211b9c6c..c8c16b5eed6b 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -1369,7 +1370,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) } memset(&jit, 0, sizeof(jit)); - jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); + jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); if (jit.addrs == NULL) { fp = orig_fp; goto out; @@ -1422,7 +1423,7 @@ skip_init_ctx: if (!fp->is_func || extra_pass) { bpf_prog_fill_jited_linfo(fp, jit.addrs + 1); free_addrs: - kfree(jit.addrs); + kvfree(jit.addrs); kfree(jit_data); fp->aux->jit_data = NULL; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index c7fea9bea8cb..6105b1b6e49b 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -423,7 +423,7 @@ static void zpci_map_resources(struct pci_dev *pdev) if (zpci_use_mio(zdev)) pdev->resource[i].start = - (resource_size_t __force) zdev->bars[i].mio_wb; + (resource_size_t __force) zdev->bars[i].mio_wt; else pdev->resource[i].start = (resource_size_t __force) pci_iomap_range_fh(pdev, i, 0, 0); @@ -530,7 +530,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev, flags |= IORESOURCE_MEM_64; if (zpci_use_mio(zdev)) - addr = (unsigned long) zdev->bars[i].mio_wb; + addr = (unsigned long) zdev->bars[i].mio_wt; else addr = ZPCI_ADDR(entry); size = 1UL << zdev->bars[i].size; @@ -934,5 +934,5 @@ subsys_initcall_sync(pci_base_init); void zpci_rescan(void) { if (zpci_is_enabled()) - clp_rescan_pci_devices_simple(); + clp_rescan_pci_devices_simple(NULL); } diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index e585a62d6530..281e0dd4c614 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -240,12 +240,14 @@ error: } /* - * Enable/Disable a given PCI function defined by its function handle. + * Enable/Disable a given PCI function and update its function handle if + * necessary */ -static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) +static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command) { struct clp_req_rsp_set_pci *rrb; int rc, retries = 100; + u32 fid = zdev->fid; rrb = clp_alloc_block(GFP_KERNEL); if (!rrb) @@ -256,7 +258,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) rrb->request.hdr.len = sizeof(rrb->request); rrb->request.hdr.cmd = CLP_SET_PCI_FN; rrb->response.hdr.len = sizeof(rrb->response); - rrb->request.fh = *fh; + rrb->request.fh = zdev->fh; rrb->request.oc = command; rrb->request.ndas = nr_dma_as; @@ -269,12 +271,17 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) } } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY); - if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) - *fh = rrb->response.fh; - else { + if (rc || rrb->response.hdr.rsp != CLP_RC_OK) { zpci_err("Set PCI FN:\n"); zpci_err_clp(rrb->response.hdr.rsp, rc); - rc = -EIO; + } + + if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) { + zdev->fh = rrb->response.fh; + } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY && + rrb->response.fh == 0) { + /* Function is already in desired state - update handle */ + rc = clp_rescan_pci_devices_simple(&fid); } clp_free_block(rrb); return rc; @@ -282,18 +289,17 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as) { - u32 fh = zdev->fh; int rc; - rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN); - zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc); + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN); + zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); if (rc) goto out; - zdev->fh = fh; if (zpci_use_mio(zdev)) { - rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_MIO); - zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc); + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO); + zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n", + zdev->fid, zdev->fh, rc); if (rc) clp_disable_fh(zdev); } @@ -309,11 +315,8 @@ int clp_disable_fh(struct zpci_dev *zdev) if (!zdev_enabled(zdev)) return 0; - rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN); + rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN); zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc); - if (!rc) - zdev->fh = fh; - return rc; } @@ -370,10 +373,14 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) static void __clp_update(struct clp_fh_list_entry *entry, void *data) { struct zpci_dev *zdev; + u32 *fid = data; if (!entry->vendor_id) return; + if (fid && *fid != entry->fid) + return; + zdev = get_zdev_by_fid(entry->fid); if (!zdev) return; @@ -413,7 +420,10 @@ int clp_rescan_pci_devices(void) return rc; } -int clp_rescan_pci_devices_simple(void) +/* Rescan PCI functions and refresh function handles. If fid is non-NULL only + * refresh the handle of the function matching @fid + */ +int clp_rescan_pci_devices_simple(u32 *fid) { struct clp_req_rsp_list_pci *rrb; int rc; @@ -422,7 +432,7 @@ int clp_rescan_pci_devices_simple(void) if (!rrb) return -ENOMEM; - rc = clp_list_pci(rrb, NULL, __clp_update); + rc = clp_list_pci(rrb, fid, __clp_update); clp_free_block(rrb); return rc; diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index a433ba01a317..215f17437a4f 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c @@ -13,6 +13,8 @@ #include #include +#include "../../../drivers/pci/pci.h" + #include #define zpci_attr(name, fmt, member) \ @@ -49,31 +51,50 @@ static DEVICE_ATTR_RO(mio_enabled); static ssize_t recover_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct kernfs_node *kn; struct pci_dev *pdev = to_pci_dev(dev); struct zpci_dev *zdev = to_zpci(pdev); - int ret; + int ret = 0; - if (!device_remove_file_self(dev, attr)) - return count; + /* Can't use device_remove_self() here as that would lead us to lock + * the pci_rescan_remove_lock while holding the device' kernfs lock. + * This would create a possible deadlock with disable_slot() which is + * not directly protected by the device' kernfs lock but takes it + * during the device removal which happens under + * pci_rescan_remove_lock. + * + * This is analogous to sdev_store_delete() in + * drivers/scsi/scsi_sysfs.c + */ + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); + WARN_ON_ONCE(!kn); + /* device_remove_file() serializes concurrent calls ignoring all but + * the first + */ + device_remove_file(dev, attr); + /* A concurrent call to recover_store() may slip between + * sysfs_break_active_protection() and the sysfs file removal. + * Once it unblocks from pci_lock_rescan_remove() the original pdev + * will already be removed. + */ pci_lock_rescan_remove(); - pci_stop_and_remove_bus_device(pdev); - ret = zpci_disable_device(zdev); - if (ret) - goto error; + if (pci_dev_is_added(pdev)) { + pci_stop_and_remove_bus_device(pdev); + ret = zpci_disable_device(zdev); + if (ret) + goto out; - ret = zpci_enable_device(zdev); - if (ret) - goto error; - - pci_rescan_bus(zdev->bus); + ret = zpci_enable_device(zdev); + if (ret) + goto out; + pci_rescan_bus(zdev->bus); + } +out: pci_unlock_rescan_remove(); - - return count; - -error: - pci_unlock_rescan_remove(); - return ret; + if (kn) + sysfs_unbreak_active_protection(kn); + return ret ? ret : count; } static DEVICE_ATTR_WO(recover); diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index bc0d7a0d0394..9de56065f28c 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -15,8 +15,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE $(call if_changed_rule,as_o_S) -$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE - $(call if_changed_rule,cc_o_c) +KCOV_INSTRUMENT := n +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare diff --git a/arch/s390/purgatory/string.c b/arch/s390/purgatory/string.c new file mode 100644 index 000000000000..c98c22a72db7 --- /dev/null +++ b/arch/s390/purgatory/string.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +#define __HAVE_ARCH_MEMCMP /* arch function */ +#include "../lib/string.c" diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h index d516e5d48818..b887cc402b71 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h @@ -78,8 +78,15 @@ enum { GPIO_FN_WDTOVF, /* CAN */ - GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1, - GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1, GPIO_FN_CRX0_CRX1_CRX2, + GPIO_FN_CTX2, GPIO_FN_CRX2, + GPIO_FN_CTX1, GPIO_FN_CRX1, + GPIO_FN_CTX0, GPIO_FN_CRX0, + GPIO_FN_CTX0_CTX1, GPIO_FN_CRX0_CRX1, + GPIO_FN_CTX0_CTX1_CTX2, GPIO_FN_CRX0_CRX1_CRX2, + GPIO_FN_CTX2_PJ21, GPIO_FN_CRX2_PJ20, + GPIO_FN_CTX1_PJ23, GPIO_FN_CRX1_PJ22, + GPIO_FN_CTX0_CTX1_PJ23, GPIO_FN_CRX0_CRX1_PJ22, + GPIO_FN_CTX0_CTX1_CTX2_PJ21, GPIO_FN_CRX0_CRX1_CRX2_PJ20, /* DMAC */ GPIO_FN_TEND0, GPIO_FN_DACK0, GPIO_FN_DREQ0, diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h index 96f0246ad2f2..82b63208135a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7734.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h @@ -134,7 +134,7 @@ enum { GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C, GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A, GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B, - GPIO_FN_RD_WR, GPIO_FN_TCLK0, + GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4, GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B, GPIO_FN_ET0_ETXD3_A, GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B, diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index dfdbaa50946e..d1b1ff2be17a 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = PFN_DOWN(start); unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index eb24cb1afc11..18e9fb6fcf1b 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -65,7 +65,6 @@ config SPARC64 select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_RCU_TABLE_FREE if SMP - select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_MEMBLOCK_NODE_MAP select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_DYNAMIC_FTRACE diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 688911051b44..f4afa301954a 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -407,6 +407,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) } #define ioremap_nocache(X,Y) ioremap((X),(Y)) +#define ioremap_uc(X,Y) ioremap((X),(Y)) #define ioremap_wc(X,Y) ioremap((X),(Y)) #define ioremap_wt(X,Y) ioremap((X),(Y)) diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index a2f3fa61ee36..8cb8f3833239 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h @@ -28,6 +28,15 @@ void flush_tlb_pending(void); #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #define tlb_flush(tlb) flush_tlb_pending() +/* + * SPARC64's hardware TLB fill does not use the Linux page-tables + * and therefore we don't need a TLBI when freeing page-table pages. + */ + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define tlb_needs_table_invalidate() (false) +#endif + #include #endif /* _SPARC64_TLB_H */ diff --git a/arch/sparc/include/uapi/asm/ipcbuf.h b/arch/sparc/include/uapi/asm/ipcbuf.h index 9d0d125500e2..084b8949ddff 100644 --- a/arch/sparc/include/uapi/asm/ipcbuf.h +++ b/arch/sparc/include/uapi/asm/ipcbuf.h @@ -15,19 +15,19 @@ struct ipc64_perm { - __kernel_key_t key; - __kernel_uid_t uid; - __kernel_gid_t gid; - __kernel_uid_t cuid; - __kernel_gid_t cgid; + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; #ifndef __arch64__ - unsigned short __pad0; + unsigned short __pad0; #endif - __kernel_mode_t mode; - unsigned short __pad1; - unsigned short seq; - unsigned long long __unused1; - unsigned long long __unused2; + __kernel_mode_t mode; + unsigned short __pad1; + unsigned short seq; + unsigned long long __unused1; + unsigned long long __unused2; }; #endif /* __SPARC_IPCBUF_H */ diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 61afd787bd0c..59b6df13ddea 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -172,12 +172,14 @@ SECTIONS } PERCPU_SECTION(SMP_CACHE_BYTES) -#ifdef CONFIG_JUMP_LABEL . = ALIGN(PAGE_SIZE); .exit.text : { EXIT_TEXT } -#endif + + .exit.data : { + EXIT_DATA + } . = ALIGN(PAGE_SIZE); __init_end = .; diff --git a/arch/um/Kconfig b/arch/um/Kconfig index fec6b4ca2b6e..c56d3526a3bd 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -14,6 +14,7 @@ config UML select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_BUGVERBOSE + select HAVE_COPY_THREAD_TLS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig index fea5a0d522dc..388096fb45a2 100644 --- a/arch/um/drivers/Kconfig +++ b/arch/um/drivers/Kconfig @@ -337,7 +337,7 @@ config UML_NET_SLIRP endmenu config VIRTIO_UML - tristate "UML driver for virtio devices" + bool "UML driver for virtio devices" select VIRTIO help This driver provides support for virtio based paravirtual device diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index fc8c52cff5aa..179b41ad63ba 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -4,12 +4,12 @@ * * Copyright(c) 2019 Intel Corporation * - * This module allows virtio devices to be used over a vhost-user socket. + * This driver allows virtio devices to be used over a vhost-user socket. * * Guest devices can be instantiated by kernel module or command line * parameters. One device will be created for each parameter. Syntax: * - * [virtio_uml.]device=:[:] + * virtio_uml.device=:[:] * where: * := vhost-user socket path to connect * := virtio device id (as in virtio_ids.h) @@ -83,7 +83,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len, return 0; } -static int full_read(int fd, void *buf, int len) +static int full_read(int fd, void *buf, int len, bool abortable) { int rc; @@ -93,7 +93,7 @@ static int full_read(int fd, void *buf, int len) buf += rc; len -= rc; } - } while (len && (rc > 0 || rc == -EINTR)); + } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN))); if (rc < 0) return rc; @@ -104,7 +104,7 @@ static int full_read(int fd, void *buf, int len) static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg) { - return full_read(fd, msg, sizeof(msg->header)); + return full_read(fd, msg, sizeof(msg->header), true); } static int vhost_user_recv(int fd, struct vhost_user_msg *msg, @@ -118,7 +118,7 @@ static int vhost_user_recv(int fd, struct vhost_user_msg *msg, size = msg->header.size; if (size > max_payload_size) return -EPROTO; - return full_read(fd, &msg->payload, size); + return full_read(fd, &msg->payload, size, false); } static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev, diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index d7086b985f27..4049f2c46387 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -83,8 +83,8 @@ __preinit_array_end = .; } .init_array : { - /* dummy - we call this ourselves */ __init_array_start = .; + *(.init_array) __init_array_end = .; } .fini_array : { diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index 81c647ef9c6c..adf91ef553ae 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request, extern unsigned long getreg(struct task_struct *child, int regno); extern int putreg(struct task_struct *child, int regno, unsigned long value); -extern int arch_copy_tls(struct task_struct *new); +extern int arch_set_tls(struct task_struct *new, unsigned long tls); extern void clear_flushed_tls(struct task_struct *task); extern int syscall_trace_enter(struct pt_regs *regs); extern void syscall_trace_leave(struct pt_regs *regs); diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index c69d69ee96be..f5001481010c 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -103,6 +103,7 @@ SECTIONS be empty, which isn't pretty. */ . = ALIGN(32 / 8); .preinit_array : { *(.preinit_array) } + .init_array : { *(.init_array) } .fini_array : { *(.fini_array) } .data : { INIT_TASK_DATA(KERNEL_STACK_SIZE) diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 263a8f069133..17045e7211bf 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -153,8 +153,8 @@ void fork_handler(void) userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct * p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); int kthread = current->flags & PF_KTHREAD; @@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) - ret = arch_copy_tls(p); + ret = arch_set_tls(p, tls); } return ret; diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c index 8014dfac644d..c8a42ecbd7a2 100644 --- a/arch/um/os-Linux/main.c +++ b/arch/um/os-Linux/main.c @@ -170,7 +170,7 @@ int __init main(int argc, char **argv, char **envp) * that they won't be delivered after the exec, when * they are definitely not expected. */ - unblock_signals_trace(); + unblock_signals(); os_info("\n"); /* Reboot */ diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c index 25019d42ae93..ef2ad7253cd5 100644 --- a/arch/x86/boot/compressed/acpi.c +++ b/arch/x86/boot/compressed/acpi.c @@ -393,7 +393,13 @@ int count_immovable_mem_regions(void) table = table_addr + sizeof(struct acpi_table_srat); while (table + sizeof(struct acpi_subtable_header) < table_end) { + sub_table = (struct acpi_subtable_header *)table; + if (!sub_table->length) { + debug_putstr("Invalid zero length SRAT subtable.\n"); + return 0; + } + if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) { struct acpi_srat_mem_affinity *ma; diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index d98cd483377e..e9a7f7cadb12 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -244,6 +244,11 @@ ENTRY(efi32_stub_entry) leal efi32_config(%ebp), %eax movl %eax, efi_config(%ebp) + /* Disable paging */ + movl %cr0, %eax + btrl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + jmp startup_32 ENDPROC(efi32_stub_entry) #endif diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c index 748456c365f4..9557c5a15b91 100644 --- a/arch/x86/boot/compressed/kaslr_64.c +++ b/arch/x86/boot/compressed/kaslr_64.c @@ -29,9 +29,6 @@ #define __PAGE_OFFSET __PAGE_OFFSET_BASE #include "../../mm/ident_map.c" -/* Used by pgtable.h asm code to force instruction serialization. */ -unsigned long __force_order; - /* Used to track our page table allocation area. */ struct alloc_pgt_data { unsigned char *pgt_buf; diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index f83ca5aa8b77..f07baf0388bc 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -172,7 +172,7 @@ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI .if \no_user_check == 0 /* coming from usermode? */ - testl $SEGMENT_RPL_MASK, PT_CS(%esp) + testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) jz .Lend_\@ .endif /* On user-cr3? */ @@ -205,64 +205,76 @@ #define CS_FROM_ENTRY_STACK (1 << 31) #define CS_FROM_USER_CR3 (1 << 30) #define CS_FROM_KERNEL (1 << 29) +#define CS_FROM_ESPFIX (1 << 28) .macro FIXUP_FRAME /* * The high bits of the CS dword (__csh) are used for CS_FROM_*. * Clear them in case hardware didn't do this for us. */ - andl $0x0000ffff, 3*4(%esp) + andl $0x0000ffff, 4*4(%esp) #ifdef CONFIG_VM86 - testl $X86_EFLAGS_VM, 4*4(%esp) + testl $X86_EFLAGS_VM, 5*4(%esp) jnz .Lfrom_usermode_no_fixup_\@ #endif - testl $SEGMENT_RPL_MASK, 3*4(%esp) + testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) jnz .Lfrom_usermode_no_fixup_\@ - orl $CS_FROM_KERNEL, 3*4(%esp) + orl $CS_FROM_KERNEL, 4*4(%esp) /* * When we're here from kernel mode; the (exception) stack looks like: * - * 5*4(%esp) - - * 4*4(%esp) - flags - * 3*4(%esp) - cs - * 2*4(%esp) - ip - * 1*4(%esp) - orig_eax - * 0*4(%esp) - gs / function + * 6*4(%esp) - + * 5*4(%esp) - flags + * 4*4(%esp) - cs + * 3*4(%esp) - ip + * 2*4(%esp) - orig_eax + * 1*4(%esp) - gs / function + * 0*4(%esp) - fs * * Lets build a 5 entry IRET frame after that, such that struct pt_regs * is complete and in particular regs->sp is correct. This gives us - * the original 5 enties as gap: + * the original 6 enties as gap: * - * 12*4(%esp) - - * 11*4(%esp) - gap / flags - * 10*4(%esp) - gap / cs - * 9*4(%esp) - gap / ip - * 8*4(%esp) - gap / orig_eax - * 7*4(%esp) - gap / gs / function - * 6*4(%esp) - ss - * 5*4(%esp) - sp - * 4*4(%esp) - flags - * 3*4(%esp) - cs - * 2*4(%esp) - ip - * 1*4(%esp) - orig_eax - * 0*4(%esp) - gs / function + * 14*4(%esp) - + * 13*4(%esp) - gap / flags + * 12*4(%esp) - gap / cs + * 11*4(%esp) - gap / ip + * 10*4(%esp) - gap / orig_eax + * 9*4(%esp) - gap / gs / function + * 8*4(%esp) - gap / fs + * 7*4(%esp) - ss + * 6*4(%esp) - sp + * 5*4(%esp) - flags + * 4*4(%esp) - cs + * 3*4(%esp) - ip + * 2*4(%esp) - orig_eax + * 1*4(%esp) - gs / function + * 0*4(%esp) - fs */ pushl %ss # ss pushl %esp # sp (points at ss) - addl $6*4, (%esp) # point sp back at the previous context - pushl 6*4(%esp) # flags - pushl 6*4(%esp) # cs - pushl 6*4(%esp) # ip - pushl 6*4(%esp) # orig_eax - pushl 6*4(%esp) # gs / function + addl $7*4, (%esp) # point sp back at the previous context + pushl 7*4(%esp) # flags + pushl 7*4(%esp) # cs + pushl 7*4(%esp) # ip + pushl 7*4(%esp) # orig_eax + pushl 7*4(%esp) # gs / function + pushl 7*4(%esp) # fs .Lfrom_usermode_no_fixup_\@: .endm .macro IRET_FRAME + /* + * We're called with %ds, %es, %fs, and %gs from the interrupted + * frame, so we shouldn't use them. Also, we may be in ESPFIX + * mode and therefore have a nonzero SS base and an offset ESP, + * so any attempt to access the stack needs to use SS. (except for + * accesses through %esp, which automatically use SS.) + */ testl $CS_FROM_KERNEL, 1*4(%esp) jz .Lfinished_frame_\@ @@ -276,31 +288,40 @@ movl 5*4(%esp), %eax # (modified) regs->sp movl 4*4(%esp), %ecx # flags - movl %ecx, -4(%eax) + movl %ecx, %ss:-1*4(%eax) movl 3*4(%esp), %ecx # cs andl $0x0000ffff, %ecx - movl %ecx, -8(%eax) + movl %ecx, %ss:-2*4(%eax) movl 2*4(%esp), %ecx # ip - movl %ecx, -12(%eax) + movl %ecx, %ss:-3*4(%eax) movl 1*4(%esp), %ecx # eax - movl %ecx, -16(%eax) + movl %ecx, %ss:-4*4(%eax) popl %ecx - lea -16(%eax), %esp + lea -4*4(%eax), %esp popl %eax .Lfinished_frame_\@: .endm -.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 +.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 cld .if \skip_gs == 0 PUSH_GS .endif - FIXUP_FRAME pushl %fs + + pushl %eax + movl $(__KERNEL_PERCPU), %eax + movl %eax, %fs +.if \unwind_espfix > 0 + UNWIND_ESPFIX_STACK +.endif + popl %eax + + FIXUP_FRAME pushl %es pushl %ds pushl \pt_regs_ax @@ -313,8 +334,6 @@ movl $(__USER_DS), %edx movl %edx, %ds movl %edx, %es - movl $(__KERNEL_PERCPU), %edx - movl %edx, %fs .if \skip_gs == 0 SET_KERNEL_GS %edx .endif @@ -324,8 +343,8 @@ .endif .endm -.macro SAVE_ALL_NMI cr3_reg:req - SAVE_ALL +.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 + SAVE_ALL unwind_espfix=\unwind_espfix BUG_IF_WRONG_CR3 @@ -357,6 +376,7 @@ 2: popl %es 3: popl %fs POP_GS \pop + IRET_FRAME .pushsection .fixup, "ax" 4: movl $0, (%esp) jmp 1b @@ -395,7 +415,8 @@ .macro CHECK_AND_APPLY_ESPFIX #ifdef CONFIG_X86_ESPFIX32 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) +#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) +#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX @@ -1075,7 +1096,6 @@ restore_all: /* Restore user state */ RESTORE_REGS pop=4 # skip orig_eax/error_code .Lirq_return: - IRET_FRAME /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization * when returning from IPI handler and when returning from @@ -1128,30 +1148,43 @@ ENDPROC(entry_INT80_32) * We can't call C functions using the ESPFIX stack. This code reads * the high word of the segment base from the GDT and swiches to the * normal stack and adjusts ESP with the matching offset. + * + * We might be on user CR3 here, so percpu data is not mapped and we can't + * access the GDT through the percpu segment. Instead, use SGDT to find + * the cpu_entry_area alias of the GDT. */ #ifdef CONFIG_X86_ESPFIX32 /* fixup the stack */ - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ + pushl %ecx + subl $2*4, %esp + sgdt (%esp) + movl 2(%esp), %ecx /* GDT address */ + /* + * Careful: ECX is a linear pointer, so we need to force base + * zero. %cs is the only known-linear segment we have right now. + */ + mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ + mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ shl $16, %eax + addl $2*4, %esp + popl %ecx addl %esp, %eax /* the adjusted stack pointer */ pushl $__KERNEL_DS pushl %eax lss (%esp), %esp /* switch to the normal stack segment */ #endif .endm + .macro UNWIND_ESPFIX_STACK + /* It's safe to clobber %eax, all other regs need to be preserved */ #ifdef CONFIG_X86_ESPFIX32 movl %ss, %eax /* see if on espfix stack */ cmpw $__ESPFIX_SS, %ax - jne 27f - movl $__KERNEL_DS, %eax - movl %eax, %ds - movl %eax, %es + jne .Lno_fixup_\@ /* switch to normal stack */ FIXUP_ESPFIX_STACK -27: +.Lno_fixup_\@: #endif .endm @@ -1341,11 +1374,6 @@ END(spurious_interrupt_bug) #ifdef CONFIG_XEN_PV ENTRY(xen_hypervisor_callback) - pushl $-1 /* orig_ax = -1 => not a system call */ - SAVE_ALL - ENCODE_FRAME_POINTER - TRACE_IRQS_OFF - /* * Check to see if we got the event in the critical * region in xen_iret_direct, after we've reenabled @@ -1353,16 +1381,17 @@ ENTRY(xen_hypervisor_callback) * iret instruction's behaviour where it delivers a * pending interrupt when enabling interrupts: */ - movl PT_EIP(%esp), %eax - cmpl $xen_iret_start_crit, %eax + cmpl $xen_iret_start_crit, (%esp) jb 1f - cmpl $xen_iret_end_crit, %eax + cmpl $xen_iret_end_crit, (%esp) jae 1f - - jmp xen_iret_crit_fixup - -ENTRY(xen_do_upcall) -1: mov %esp, %eax + call xen_iret_crit_fixup +1: + pushl $-1 /* orig_ax = -1 => not a system call */ + SAVE_ALL + ENCODE_FRAME_POINTER + TRACE_IRQS_OFF + mov %esp, %eax call xen_evtchn_do_upcall #ifndef CONFIG_PREEMPTION call xen_maybe_preempt_hcall @@ -1449,10 +1478,9 @@ END(page_fault) common_exception_read_cr2: /* the function address is in %gs's slot on the stack */ - SAVE_ALL switch_stacks=1 skip_gs=1 + SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 ENCODE_FRAME_POINTER - UNWIND_ESPFIX_STACK /* fixup %gs */ GS_TO_REG %ecx @@ -1474,9 +1502,8 @@ END(common_exception_read_cr2) common_exception: /* the function address is in %gs's slot on the stack */ - SAVE_ALL switch_stacks=1 skip_gs=1 + SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 ENCODE_FRAME_POINTER - UNWIND_ESPFIX_STACK /* fixup %gs */ GS_TO_REG %ecx @@ -1515,6 +1542,10 @@ ENTRY(nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 + /* + * ESPFIX_SS is only ever set on the return to user path + * after we've switched to the entry stack. + */ pushl %eax movl %ss, %eax cmpw $__ESPFIX_SS, %ax @@ -1550,6 +1581,11 @@ ENTRY(nmi) movl %ebx, %esp .Lnmi_return: +#ifdef CONFIG_X86_ESPFIX32 + testl $CS_FROM_ESPFIX, PT_CS(%esp) + jnz .Lnmi_from_espfix +#endif + CHECK_AND_APPLY_ESPFIX RESTORE_ALL_NMI cr3_reg=%edi pop=4 jmp .Lirq_return @@ -1557,23 +1593,42 @@ ENTRY(nmi) #ifdef CONFIG_X86_ESPFIX32 .Lnmi_espfix_stack: /* - * create the pointer to lss back + * Create the pointer to LSS back */ pushl %ss pushl %esp addl $4, (%esp) - /* copy the iret frame of 12 bytes */ - .rept 3 - pushl 16(%esp) - .endr - pushl %eax - SAVE_ALL_NMI cr3_reg=%edi + + /* Copy the (short) IRET frame */ + pushl 4*4(%esp) # flags + pushl 4*4(%esp) # cs + pushl 4*4(%esp) # ip + + pushl %eax # orig_ax + + SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 ENCODE_FRAME_POINTER - FIXUP_ESPFIX_STACK # %eax == %esp + + /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ + xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) + xorl %edx, %edx # zero error code - call do_nmi + movl %esp, %eax # pt_regs pointer + jmp .Lnmi_from_sysenter_stack + +.Lnmi_from_espfix: RESTORE_ALL_NMI cr3_reg=%edi - lss 12+4(%esp), %esp # back to espfix stack + /* + * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to + * fix up the gap and long frame: + * + * 3 - original frame (exception) + * 2 - ESPFIX block (above) + * 6 - gap (FIXUP_FRAME) + * 5 - long frame (FIXUP_FRAME) + * 1 - orig_ax + */ + lss (1+5+6)*4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif END(nmi) diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index aa3336a7cb15..7d17b3addbbb 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -10,13 +10,11 @@ #ifdef CONFIG_IA32_EMULATION /* On X86_64, we use struct pt_regs * to pass parameters to syscalls */ #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *); - -/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */ -extern asmlinkage long sys_ni_syscall(const struct pt_regs *); - +#define __sys_ni_syscall __ia32_sys_ni_syscall #else /* CONFIG_IA32_EMULATION */ #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#define __sys_ni_syscall sys_ni_syscall #endif /* CONFIG_IA32_EMULATION */ #include @@ -29,6 +27,6 @@ __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = * Smells like a compiler bug -- it doesn't work * when the & below is removed. */ - [0 ... __NR_syscall_compat_max] = &sys_ni_syscall, + [0 ... __NR_syscall_compat_max] = &__sys_ni_syscall, #include }; diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index b1bf31713374..adf619a856e8 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -4,11 +4,17 @@ #include #include #include +#include #include #include -/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */ -extern asmlinkage long sys_ni_syscall(const struct pt_regs *); +extern asmlinkage long sys_ni_syscall(void); + +SYSCALL_DEFINE0(ni_syscall) +{ + return sys_ni_syscall(); +} + #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *); #define __SYSCALL_X32(nr, sym, qual) __SYSCALL_64(nr, sym, qual) #include @@ -23,7 +29,7 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { * Smells like a compiler bug -- it doesn't work * when the & below is removed. */ - [0 ... __NR_syscall_max] = &sys_ni_syscall, + [0 ... __NR_syscall_max] = &__x64_sys_ni_syscall, #include }; @@ -40,7 +46,7 @@ asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_syscall_x32_max+1] = { * Smells like a compiler bug -- it doesn't work * when the & below is removed. */ - [0 ... __NR_syscall_x32_max] = &sys_ni_syscall, + [0 ... __NR_syscall_x32_max] = &__x64_sys_ni_syscall, #include }; diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 3fe02546aed3..15908eb9b17e 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -124,13 +124,13 @@ 110 i386 iopl sys_iopl __ia32_sys_iopl 111 i386 vhangup sys_vhangup __ia32_sys_vhangup 112 i386 idle -113 i386 vm86old sys_vm86old sys_ni_syscall +113 i386 vm86old sys_vm86old __ia32_sys_ni_syscall 114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4 115 i386 swapoff sys_swapoff __ia32_sys_swapoff 116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo 117 i386 ipc sys_ipc __ia32_compat_sys_ipc 118 i386 fsync sys_fsync __ia32_sys_fsync -119 i386 sigreturn sys_sigreturn sys32_sigreturn +119 i386 sigreturn sys_sigreturn __ia32_compat_sys_sigreturn 120 i386 clone sys_clone __ia32_compat_sys_x86_clone 121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname 122 i386 uname sys_newuname __ia32_sys_newuname @@ -177,14 +177,14 @@ 163 i386 mremap sys_mremap __ia32_sys_mremap 164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16 165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16 -166 i386 vm86 sys_vm86 sys_ni_syscall +166 i386 vm86 sys_vm86 __ia32_sys_ni_syscall 167 i386 query_module 168 i386 poll sys_poll __ia32_sys_poll 169 i386 nfsservctl 170 i386 setresgid sys_setresgid16 __ia32_sys_setresgid16 171 i386 getresgid sys_getresgid16 __ia32_sys_getresgid16 172 i386 prctl sys_prctl __ia32_sys_prctl -173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn +173 i386 rt_sigreturn sys_rt_sigreturn __ia32_compat_sys_rt_sigreturn 174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction 175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_compat_sys_rt_sigprocmask 176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c index 240626e7f55a..43842fade8fa 100644 --- a/arch/x86/entry/vdso/vdso32-setup.c +++ b/arch/x86/entry/vdso/vdso32-setup.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 64c3e70b0556..3ea8056148d8 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -246,6 +246,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] = [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, @@ -301,6 +302,25 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) return offset; } +/* + * AMD64 events are detected based on their event codes. + */ +static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) +{ + return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); +} + +static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc) +{ + if (!(x86_pmu.flags & PMU_FL_PAIR)) + return false; + + switch (amd_get_event_code(hwc)) { + case 0x003: return true; /* Retired SSE/AVX FLOPs */ + default: return false; + } +} + static int amd_core_hw_config(struct perf_event *event) { if (event->attr.exclude_host && event->attr.exclude_guest) @@ -319,14 +339,6 @@ static int amd_core_hw_config(struct perf_event *event) return 0; } -/* - * AMD64 events are detected based on their event codes. - */ -static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) -{ - return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); -} - static inline int amd_is_nb_event(struct hw_perf_event *hwc) { return (hwc->config & 0xe0) == 0xe0; @@ -864,6 +876,20 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, } } +static struct event_constraint pair_constraint; + +static struct event_constraint * +amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (amd_is_pair_event_code(hwc)) + return &pair_constraint; + + return &unconstrained; +} + static ssize_t amd_event_sysfs_show(char *page, u64 config) { u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | @@ -907,33 +933,15 @@ static __initconst const struct x86_pmu amd_pmu = { static int __init amd_core_pmu_init(void) { + u64 even_ctr_mask = 0ULL; + int i; + if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) return 0; - /* Avoid calulating the value each time in the NMI handler */ + /* Avoid calculating the value each time in the NMI handler */ perf_nmi_window = msecs_to_jiffies(100); - switch (boot_cpu_data.x86) { - case 0x15: - pr_cont("Fam15h "); - x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; - break; - case 0x17: - pr_cont("Fam17h "); - /* - * In family 17h, there are no event constraints in the PMC hardware. - * We fallback to using default amd_get_event_constraints. - */ - break; - case 0x18: - pr_cont("Fam18h "); - /* Using default amd_get_event_constraints. */ - break; - default: - pr_err("core perfctr but no constraints; unknown hardware!\n"); - return -ENODEV; - } - /* * If core performance counter extensions exists, we must use * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also @@ -948,6 +956,30 @@ static int __init amd_core_pmu_init(void) */ x86_pmu.amd_nb_constraints = 0; + if (boot_cpu_data.x86 == 0x15) { + pr_cont("Fam15h "); + x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; + } + if (boot_cpu_data.x86 >= 0x17) { + pr_cont("Fam17h+ "); + /* + * Family 17h and compatibles have constraints for Large + * Increment per Cycle events: they may only be assigned an + * even numbered counter that has a consecutive adjacent odd + * numbered counter following it. + */ + for (i = 0; i < x86_pmu.num_counters - 1; i += 2) + even_ctr_mask |= 1 << i; + + pair_constraint = (struct event_constraint) + __EVENT_CONSTRAINT(0, even_ctr_mask, 0, + x86_pmu.num_counters / 2, 0, + PERF_X86_EVENT_PAIR); + + x86_pmu.get_event_constraints = amd_get_event_constraints_f17h; + x86_pmu.flags |= PMU_FL_PAIR; + } + pr_cont("core perfctr, "); return 0; } diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index a6ea07f2aa84..4d867a752f0e 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event) /* * NB and Last level cache counters (MSRs) are shared across all cores - * that share the same NB / Last level cache. Interrupts can be directed - * to a single target core, however, event counts generated by processes - * running on other cores cannot be masked out. So we do not support - * sampling and per-thread events. + * that share the same NB / Last level cache. On family 16h and below, + * Interrupts can be directed to a single target core, however, event + * counts generated by processes running on other cores cannot be masked + * out. So we do not support sampling and per-thread events via + * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts: */ - if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) - return -EINVAL; - - /* and we do not enable counter overflow interrupts */ hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->idx = -1; @@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, }; static struct pmu amd_llc_pmu = { @@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, }; static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 7b21455d7504..e622158f5659 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -375,7 +375,7 @@ int x86_add_exclusive(unsigned int what) * LBR and BTS are still mutually exclusive. */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) - return 0; + goto out; if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { mutex_lock(&pmc_reserve_mutex); @@ -387,6 +387,7 @@ int x86_add_exclusive(unsigned int what) mutex_unlock(&pmc_reserve_mutex); } +out: atomic_inc(&active_events); return 0; @@ -397,11 +398,15 @@ fail_unlock: void x86_del_exclusive(unsigned int what) { + atomic_dec(&active_events); + + /* + * See the comment in x86_add_exclusive(). + */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) return; atomic_dec(&x86_pmu.lbr_exclusive[what]); - atomic_dec(&active_events); } int x86_setup_perfctr(struct perf_event *event) @@ -1641,9 +1646,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = { ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) { - struct perf_pmu_events_attr *pmu_attr = \ + struct perf_pmu_events_attr *pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); - u64 config = x86_pmu.event_map(pmu_attr->id); + u64 config = 0; + + if (pmu_attr->id < x86_pmu.max_events) + config = x86_pmu.event_map(pmu_attr->id); /* string trumps id */ if (pmu_attr->event_str) @@ -1712,6 +1720,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct perf_pmu_events_attr *pmu_attr; + if (idx >= x86_pmu.max_events) + return 0; + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); /* str trumps id */ return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 5ee3fed881d3..741540d849f3 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -63,9 +63,17 @@ struct bts_buffer { static struct pmu bts_pmu; +static int buf_nr_pages(struct page *page) +{ + if (!PagePrivate(page)) + return 1; + + return 1 << page_private(page); +} + static size_t buf_size(struct page *page) { - return 1 << (PAGE_SHIFT + page_private(page)); + return buf_nr_pages(page) * PAGE_SIZE; } static void * @@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, /* count all the high order buffers */ for (pg = 0, nbuf = 0; pg < nr_pages;) { page = virt_to_page(pages[pg]); - if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) - return NULL; - pg += 1 << page_private(page); + pg += buf_nr_pages(page); nbuf++; } @@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, unsigned int __nr_pages; page = virt_to_page(pages[pg]); - __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; + __nr_pages = buf_nr_pages(page); buf->buf[nbuf].page = page; buf->buf[nbuf].offset = offset; buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index fcef678c3423..c531e3f3269e 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4746,6 +4746,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_TREMONT_D: + case INTEL_FAM6_ATOM_TREMONT: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index e1daf4151e11..4814c964692c 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -40,17 +40,18 @@ * Model specific counters: * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 - * Available model: SLM,AMT,GLM,CNL + * Available model: SLM,AMT,GLM,CNL,TNT * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM, - * CNL,KBL,CML + * CNL,KBL,CML,TNT * Scope: Core * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, - * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL + * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, + * TNT * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -60,17 +61,18 @@ * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. * perf code: 0x00 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, - * KBL,CML,ICL,TGL + * KBL,CML,ICL,TGL,TNT * Scope: Package (physical package) * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * perf code: 0x01 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, - * GLM,CNL,KBL,CML,ICL,TGL + * GLM,CNL,KBL,CML,ICL,TGL,TNT * Scope: Package (physical package) * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. * perf code: 0x02 - * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW - * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, + * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, + * TNT * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 @@ -87,7 +89,8 @@ * Scope: Package (physical package) * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * perf code: 0x06 - * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL + * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, + * TNT * Scope: Package (physical package) * */ @@ -640,8 +643,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates), diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index ce83950036c5..e5ad97a82342 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1713,6 +1713,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) old = ((s64)(prev_raw_count << shift) >> shift); local64_add(new - old + count * period, &event->count); + local64_set(&hwc->period_left, -new); + perf_event_update_userpage(event); return 0; diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index dbaa1b088a30..c37cb12d0ef6 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -15,6 +15,7 @@ #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f +#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 @@ -657,6 +658,10 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, { /* IMC */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), @@ -826,6 +831,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ + IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index b10a5ec79e48..ad20220af303 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -369,11 +369,6 @@ #define SNR_M2M_PCI_PMON_BOX_CTL 0x438 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff -/* SNR PCIE3 */ -#define SNR_PCIE3_PCI_PMON_CTL0 0x508 -#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 -#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4 - /* SNR IMC */ #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 @@ -4328,27 +4323,12 @@ static struct intel_uncore_type snr_uncore_m2m = { .format_group = &snr_m2m_uncore_format_group, }; -static struct intel_uncore_type snr_uncore_pcie3 = { - .name = "pcie3", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, - .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, - .event_mask = SNBEP_PMON_RAW_EVENT_MASK, - .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, - .ops = &ivbep_uncore_pci_ops, - .format_group = &ivbep_uncore_format_group, -}; - enum { SNR_PCI_UNCORE_M2M, - SNR_PCI_UNCORE_PCIE3, }; static struct intel_uncore_type *snr_pci_uncores[] = { [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, - [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, NULL, }; @@ -4357,10 +4337,6 @@ static const struct pci_device_id snr_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0), }, - { /* PCIe3 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), - .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), - }, { /* end: all zeroes */ } }; @@ -4536,6 +4512,7 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + { /* end: all zeroes */ }, }; static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = { diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 6f86650b3f77..a949f6f55991 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -75,8 +75,9 @@ static bool test_intel(int idx, void *data) case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_D: - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: + case INTEL_FAM6_ATOM_TREMONT_D: + case INTEL_FAM6_ATOM_TREMONT: case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index ecacfbf4ebc1..0ed910237c4d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -77,6 +77,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode) #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */ #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */ +#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */ struct amd_nb { int nb_id; /* NorthBridge id */ @@ -735,6 +736,7 @@ do { \ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ +#define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 1cee10091b9f..30416d7f19d4 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -118,7 +119,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, return err; } -asmlinkage long sys32_sigreturn(void) +COMPAT_SYSCALL_DEFINE0(sigreturn) { struct pt_regs *regs = current_pt_regs(); struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); @@ -144,7 +145,7 @@ badframe: return 0; } -asmlinkage long sys32_rt_sigreturn(void) +COMPAT_SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe_ia32 __user *frame; diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 2ebc17d9c72c..19e94af9cc5d 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -140,6 +140,7 @@ extern void apic_soft_disable(void); extern void lapic_shutdown(void); extern void sync_Arb_IDs(void); extern void init_bsp_APIC(void); +extern void apic_intr_mode_select(void); extern void apic_intr_mode_init(void); extern void init_apic_mappings(void); void register_lapic_address(unsigned long address); @@ -188,6 +189,7 @@ static inline void disable_local_APIC(void) { } # define setup_secondary_APIC_clock x86_init_noop static inline void lapic_update_tsc_freq(void) { } static inline void init_bsp_APIC(void) { } +static inline void apic_intr_mode_select(void) { } static inline void apic_intr_mode_init(void) { } static inline void lapic_assign_system_vectors(void) { } static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { } @@ -452,6 +454,14 @@ static inline void ack_APIC_irq(void) apic_eoi(); } + +static inline bool lapic_vector_set_in_irr(unsigned int vector) +{ + u32 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); + + return !!(irr & (1U << (vector % 32))); +} + static inline unsigned default_get_apic_id(unsigned long x) { unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 8348f7d69fd5..ea866c7bf31d 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -78,8 +78,12 @@ struct cpu_entry_area { /* * The GDT is just below entry_stack and thus serves (on x86_64) as - * a a read-only guard page. + * a read-only guard page. On 32-bit the GDT must be writeable, so + * it needs an extra guard page. */ +#ifdef CONFIG_X86_32 + char guard_entry_stack[PAGE_SIZE]; +#endif struct entry_stack_page entry_stack_page; /* @@ -94,7 +98,6 @@ struct cpu_entry_area { */ struct cea_exception_stacks estacks; #endif -#ifdef CONFIG_CPU_SUP_INTEL /* * Per CPU debug store for Intel performance monitoring. Wastes a * full page at the moment. @@ -105,11 +108,13 @@ struct cpu_entry_area { * Reserve enough fixmap PTEs. */ struct debug_store_buffers cpu_debug_buffers; -#endif }; -#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) -#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) +#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) +#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) + +/* Total size includes the readonly IDT mapping page as well: */ +#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE) DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); @@ -117,13 +122,14 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); extern void setup_cpu_entry_areas(void); extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); +/* Single page reserved for the readonly IDT mapping: */ #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) #define CPU_ENTRY_AREA_MAP_SIZE \ - (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) extern struct cpu_entry_area *get_cpu_entry_area(int cpu); diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index 0acf5ee45a21..ef5638f641f2 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_CRASH_H #define _ASM_X86_CRASH_H +struct kimage; + int crash_load_segments(struct kimage *image); int crash_copy_backup_region(struct kimage *image); int crash_setup_memmap_entries(struct kimage *image, diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 0c47aa82e2e2..28183ee3cc42 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -156,7 +156,7 @@ extern pte_t *kmap_pte; extern pte_t *pkmap_page_table; void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); -void native_set_fixmap(enum fixed_addresses idx, +void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags); #ifndef CONFIG_PARAVIRT_XXL diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 4c95c365058a..44c48e34d799 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -509,7 +509,7 @@ static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) { - return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; + return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; } /* diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4fc61483919a..734a3334e0f0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -380,12 +380,12 @@ struct kvm_mmu { void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); - int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, + int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, bool prefault); void (*inject_page_fault)(struct kvm_vcpu *vcpu, struct x86_exception *fault); - gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, - struct x86_exception *exception); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa, + u32 access, struct x86_exception *exception); gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception); int (*sync_page)(struct kvm_vcpu *vcpu, @@ -667,10 +667,10 @@ struct kvm_vcpu_arch { bool pvclock_set_guest_stopped_request; struct { + u8 preempted; u64 msr_val; u64 last_steal; - struct gfn_to_hva_cache stime; - struct kvm_steal_time steal; + struct gfn_to_pfn_cache cache; } st; u64 tsc_offset; @@ -1098,7 +1098,7 @@ struct kvm_x86_ops { void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); - void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); + int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); @@ -1128,6 +1128,7 @@ struct kvm_x86_ops { bool (*xsaves_supported)(void); bool (*umip_emulated)(void); bool (*pt_supported)(void); + bool (*pku_supported)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); void (*request_immediate_exit)(struct kvm_vcpu *vcpu); @@ -1450,7 +1451,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); -int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code, +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 6a3124664289..1682e4b5ce75 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -510,6 +510,8 @@ #define MSR_K7_HWCR 0xc0010015 #define MSR_K7_HWCR_SMMLOCK_BIT 0 #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT) +#define MSR_K7_HWCR_IRPERF_EN_BIT 30 +#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT) #define MSR_K7_FID_VID_CTL 0xc0010041 #define MSR_K7_FID_VID_STATUS 0xc0010042 diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 75ded1d13d98..9d5d949e662e 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -41,7 +41,6 @@ struct nmiaction { struct list_head list; nmi_handler_t handler; u64 max_duration; - struct irq_work irq_work; unsigned long flags; const char *name; }; diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index b0bc0fff5f1f..1636eb8e5a5b 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -44,11 +44,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c * to avoid include recursion hell */ -#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39) -#define CPU_ENTRY_AREA_BASE \ - ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \ - & PMD_MASK) +/* The +1 is for the readonly IDT page: */ +#define CPU_ENTRY_AREA_BASE \ + ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK) #define LDT_BASE_ADDR \ ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index ac3892920419..6669164abadc 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -31,6 +31,18 @@ */ #define SEGMENT_RPL_MASK 0x3 +/* + * When running on Xen PV, the actual privilege level of the kernel is 1, + * not 0. Testing the Requested Privilege Level in a segment selector to + * determine whether the context is user mode or kernel mode with + * SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level + * matches the 0x3 mask. + * + * Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV + * kernels because privilege level 2 is never used. + */ +#define USER_SEGMENT_RPL_MASK 0x2 + /* User mode is privilege level 3: */ #define USER_RPL 0x3 diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h index e046a405743d..e2389ce9bf58 100644 --- a/arch/x86/include/asm/syscall_wrapper.h +++ b/arch/x86/include/asm/syscall_wrapper.h @@ -6,6 +6,8 @@ #ifndef _ASM_X86_SYSCALL_WRAPPER_H #define _ASM_X86_SYSCALL_WRAPPER_H +struct pt_regs; + /* Mapping of registers to parameters for syscalls on x86-64 and x32 */ #define SC_X86_64_REGS_TO_ARGS(x, ...) \ __MAP(x,__SC_ARGS \ @@ -28,13 +30,21 @@ * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this * case as well. */ +#define __IA32_COMPAT_SYS_STUB0(x, name) \ + asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs);\ + ALLOW_ERROR_INJECTION(__ia32_compat_sys_##name, ERRNO); \ + asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs)\ + { \ + return __se_compat_sys_##name(); \ + } + #define __IA32_COMPAT_SYS_STUBx(x, name, ...) \ asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\ ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \ asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\ { \ return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\ - } \ + } #define __IA32_SYS_STUBx(x, name, ...) \ asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \ @@ -48,16 +58,23 @@ * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias * named __ia32_sys_*() */ -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __x64_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ - SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \ - asmlinkage long __x64_sys_##sname(void) -#define COND_SYSCALL(name) \ - cond_syscall(__x64_sys_##name); \ - cond_syscall(__ia32_sys_##name) +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\ + ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ + SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused) + +#define COND_SYSCALL(name) \ + asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \ + { \ + return sys_ni_syscall(); \ + } \ + asmlinkage __weak long __ia32_sys_##name(const struct pt_regs *__unused)\ + { \ + return sys_ni_syscall(); \ + } #define SYS_NI(name) \ SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \ @@ -75,15 +92,24 @@ * of the x86-64-style parameter ordering of x32 syscalls. The syscalls common * with x86_64 obviously do not need such care. */ +#define __X32_COMPAT_SYS_STUB0(x, name, ...) \ + asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs);\ + ALLOW_ERROR_INJECTION(__x32_compat_sys_##name, ERRNO); \ + asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs)\ + { \ + return __se_compat_sys_##name();\ + } + #define __X32_COMPAT_SYS_STUBx(x, name, ...) \ asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\ ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \ asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\ { \ return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\ - } \ + } #else /* CONFIG_X86_X32 */ +#define __X32_COMPAT_SYS_STUB0(x, name) #define __X32_COMPAT_SYS_STUBx(x, name, ...) #endif /* CONFIG_X86_X32 */ @@ -94,6 +120,17 @@ * mapping of registers to parameters, we need to generate stubs for each * of them. */ +#define COMPAT_SYSCALL_DEFINE0(name) \ + static long __se_compat_sys_##name(void); \ + static inline long __do_compat_sys_##name(void); \ + __IA32_COMPAT_SYS_STUB0(x, name) \ + __X32_COMPAT_SYS_STUB0(x, name) \ + static long __se_compat_sys_##name(void) \ + { \ + return __do_compat_sys_##name(); \ + } \ + static inline long __do_compat_sys_##name(void) + #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ @@ -181,15 +218,19 @@ * macros to work correctly. */ #ifndef SYSCALL_DEFINE0 -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __x64_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ - asmlinkage long __x64_sys_##sname(void) +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\ + ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused) #endif #ifndef COND_SYSCALL -#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name) +#define COND_SYSCALL(name) \ + asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \ + { \ + return sys_ni_syscall(); \ + } #endif #ifndef SYS_NI @@ -201,7 +242,6 @@ * For VSYSCALLS, we need to declare these three syscalls with the new * pt_regs-based calling convention for in-kernel use. */ -struct pt_regs; asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs); asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs); asmlinkage long __x64_sys_time(const struct pt_regs *regs); diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 19435858df5f..96d9cd208610 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -51,12 +51,14 @@ struct x86_init_resources { * are set up. * @intr_init: interrupt init code * @trap_init: platform specific trap setup + * @intr_mode_select: interrupt delivery mode selection * @intr_mode_init: interrupt delivery mode setup */ struct x86_init_irqs { void (*pre_vector_init)(void); void (*intr_init)(void); void (*trap_init)(void); + void (*intr_mode_select)(void); void (*intr_mode_init)(void); }; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2b0faf86da1b..df891f874614 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -830,8 +830,17 @@ bool __init apic_needs_pit(void) if (!tsc_khz || !cpu_khz) return true; - /* Is there an APIC at all? */ - if (!boot_cpu_has(X86_FEATURE_APIC)) + /* Is there an APIC at all or is it disabled? */ + if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic) + return true; + + /* + * If interrupt delivery mode is legacy PIC or virtual wire without + * configuration, the local APIC timer wont be set up. Make sure + * that the PIT is initialized. + */ + if (apic_intr_mode == APIC_PIC || + apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG) return true; /* Virt guests may lack ARAT, but still have DEADLINE */ @@ -1322,7 +1331,7 @@ void __init sync_Arb_IDs(void) enum apic_intr_mode_id apic_intr_mode __ro_after_init; -static int __init apic_intr_mode_select(void) +static int __init __apic_intr_mode_select(void) { /* Check kernel option */ if (disable_apic) { @@ -1384,6 +1393,12 @@ static int __init apic_intr_mode_select(void) return APIC_SYMMETRIC_IO; } +/* Select the interrupt delivery mode for the BSP */ +void __init apic_intr_mode_select(void) +{ + apic_intr_mode = __apic_intr_mode_select(); +} + /* * An initial setup of the virtual wire mode. */ @@ -1440,8 +1455,6 @@ void __init apic_intr_mode_init(void) { bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT); - apic_intr_mode = apic_intr_mode_select(); - switch (apic_intr_mode) { case APIC_PIC: pr_info("APIC: Keep in PIC mode(8259)\n"); diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index d1fc62a67320..be66cfcaeb8a 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -38,6 +38,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) nmi_trigger_cpumask_backtrace(mask, exclude_self, nmi_raise_cpu_backtrace); } +EXPORT_SYMBOL_GPL(arch_trigger_cpumask_backtrace); static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) { diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d6af97fd170a..f0262cb5657a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1727,9 +1727,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) static inline bool ioapic_irqd_mask(struct irq_data *data) { - /* If we are moving the irq we need to mask it */ + /* If we are moving the IRQ we need to mask it */ if (unlikely(irqd_is_setaffinity_pending(data))) { - mask_ioapic_irq(data); + if (!irqd_irq_masked(data)) + mask_ioapic_irq(data); return true; } return false; @@ -1766,7 +1767,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) */ if (!io_apic_level_ack_pending(data->chip_data)) irq_move_masked_irq(data); - unmask_ioapic_irq(data); + /* If the IRQ is masked in the core, leave it: */ + if (!irqd_irq_masked(data)) + unmask_ioapic_irq(data); } } #else diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 7f7533462474..159bd0cb8548 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -23,10 +23,8 @@ static struct irq_domain *msi_default_domain; -static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg) { - struct irq_cfg *cfg = irqd_cfg(data); - msg->address_hi = MSI_ADDR_BASE_HI; if (x2apic_enabled()) @@ -47,6 +45,127 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) MSI_DATA_VECTOR(cfg->vector); } +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + __irq_msi_compose_msg(irqd_cfg(data), msg); +} + +static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg) +{ + struct msi_msg msg[2] = { [1] = { }, }; + + __irq_msi_compose_msg(cfg, msg); + irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg); +} + +static int +msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) +{ + struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd); + struct irq_data *parent = irqd->parent_data; + unsigned int cpu; + int ret; + + /* Save the current configuration */ + cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd)); + old_cfg = *cfg; + + /* Allocate a new target vector */ + ret = parent->chip->irq_set_affinity(parent, mask, force); + if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) + return ret; + + /* + * For non-maskable and non-remapped MSI interrupts the migration + * to a different destination CPU and a different vector has to be + * done careful to handle the possible stray interrupt which can be + * caused by the non-atomic update of the address/data pair. + * + * Direct update is possible when: + * - The MSI is maskable (remapped MSI does not use this code path)). + * The quirk bit is not set in this case. + * - The new vector is the same as the old vector + * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) + * - The new destination CPU is the same as the old destination CPU + */ + if (!irqd_msi_nomask_quirk(irqd) || + cfg->vector == old_cfg.vector || + old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || + cfg->dest_apicid == old_cfg.dest_apicid) { + irq_msi_update_msg(irqd, cfg); + return ret; + } + + /* + * Paranoia: Validate that the interrupt target is the local + * CPU. + */ + if (WARN_ON_ONCE(cpu != smp_processor_id())) { + irq_msi_update_msg(irqd, cfg); + return ret; + } + + /* + * Redirect the interrupt to the new vector on the current CPU + * first. This might cause a spurious interrupt on this vector if + * the device raises an interrupt right between this update and the + * update to the final destination CPU. + * + * If the vector is in use then the installed device handler will + * denote it as spurious which is no harm as this is a rare event + * and interrupt handlers have to cope with spurious interrupts + * anyway. If the vector is unused, then it is marked so it won't + * trigger the 'No irq handler for vector' warning in do_IRQ(). + * + * This requires to hold vector lock to prevent concurrent updates to + * the affected vector. + */ + lock_vector_lock(); + + /* + * Mark the new target vector on the local CPU if it is currently + * unused. Reuse the VECTOR_RETRIGGERED state which is also used in + * the CPU hotplug path for a similar purpose. This cannot be + * undone here as the current CPU has interrupts disabled and + * cannot handle the interrupt before the whole set_affinity() + * section is done. In the CPU unplug case, the current CPU is + * about to vanish and will not handle any interrupts anymore. The + * vector is cleaned up when the CPU comes online again. + */ + if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector]))) + this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED); + + /* Redirect it to the new vector on the local CPU temporarily */ + old_cfg.vector = cfg->vector; + irq_msi_update_msg(irqd, &old_cfg); + + /* Now transition it to the target CPU */ + irq_msi_update_msg(irqd, cfg); + + /* + * All interrupts after this point are now targeted at the new + * vector/CPU. + * + * Drop vector lock before testing whether the temporary assignment + * to the local CPU was hit by an interrupt raised in the device, + * because the retrigger function acquires vector lock again. + */ + unlock_vector_lock(); + + /* + * Check whether the transition raced with a device interrupt and + * is pending in the local APICs IRR. It is safe to do this outside + * of vector lock as the irq_desc::lock of this interrupt is still + * held and interrupts are disabled: The check is not accessing the + * underlying vector store. It's just checking the local APIC's + * IRR. + */ + if (lapic_vector_set_in_irr(cfg->vector)) + irq_data_get_irq_chip(irqd)->irq_retrigger(irqd); + + return ret; +} + /* * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, * which implement the MSI or MSI-X Capability Structure. @@ -58,6 +177,7 @@ static struct irq_chip pci_msi_controller = { .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_compose_msi_msg = irq_msi_compose_msg, + .irq_set_affinity = msi_set_affinity, .flags = IRQCHIP_SKIP_SET_WAKE, }; @@ -146,6 +266,8 @@ void __init arch_init_msi_domain(struct irq_domain *parent) } if (!msi_default_domain) pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); + else + msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; } #ifdef CONFIG_IRQ_REMAP diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 90f75e515876..c3f4dd4ae155 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -28,6 +28,7 @@ static const int amd_erratum_383[]; static const int amd_erratum_400[]; +static const int amd_erratum_1054[]; static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); /* @@ -615,9 +616,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) return; clear_all: - clear_cpu_cap(c, X86_FEATURE_SME); + setup_clear_cpu_cap(X86_FEATURE_SME); clear_sev: - clear_cpu_cap(c, X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV); } } @@ -978,6 +979,15 @@ static void init_amd(struct cpuinfo_x86 *c) /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ if (!cpu_has(c, X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); + + /* + * Turn on the Instructions Retired free counter on machines not + * susceptible to erratum #1054 "Instructions Retired Performance + * Counter May Be Inaccurate". + */ + if (cpu_has(c, X86_FEATURE_IRPERF) && + !cpu_has_amd_erratum(c, amd_erratum_1054)) + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); } #ifdef CONFIG_X86_32 @@ -1105,6 +1115,10 @@ static const int amd_erratum_400[] = static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +static const int amd_erratum_1054[] = + AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4c7b0fa15a19..8bf64899f56a 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); +static void __init mds_print_mitigation(void); static void __init taa_select_mitigation(void); /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ @@ -108,6 +109,12 @@ void __init check_bugs(void) mds_select_mitigation(); taa_select_mitigation(); + /* + * As MDS and TAA mitigations are inter-related, print MDS + * mitigation until after TAA mitigation selection is done. + */ + mds_print_mitigation(); + arch_smt_update(); #ifdef CONFIG_X86_32 @@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void) (mds_nosmt || cpu_mitigations_auto_nosmt())) cpu_smt_disable(false); } +} + +static void __init mds_print_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) + return; pr_info("%s\n", mds_strings[mds_mitigation]); } @@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void) return; } - /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */ - if (taa_mitigation == TAA_MITIGATION_OFF) + /* + * TAA mitigation via VERW is turned off if both + * tsx_async_abort=off and mds=off are specified. + */ + if (taa_mitigation == TAA_MITIGATION_OFF && + mds_mitigation == MDS_MITIGATION_OFF) goto out; if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) @@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void) if (taa_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); + /* + * Update MDS mitigation, if necessary, as the mds_user_clear is + * now enabled for TAA mitigation. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } out: pr_info("%s\n", taa_strings[taa_mitigation]); } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index fffe21945374..704caec136cf 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -464,7 +464,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c) * cpuid bit to be set. We need to ensure that we * update that bit in this CPU's "cpu_info". */ - get_cpu_cap(c); + set_cpu_cap(c, X86_FEATURE_OSPKE); } #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 6ea7fdc82f3c..1cf34fcc3a8e 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -266,10 +266,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu) smca_set_misc_banks_map(bank, cpu); /* Return early if this bank was already initialized. */ - if (smca_banks[bank].hwid) + if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0) return; - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { + if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { pr_warn("Failed to read MCA_IPID for bank %d\n", bank); return; } @@ -1161,9 +1161,12 @@ static const struct sysfs_ops threshold_ops = { .store = store, }; +static void threshold_block_release(struct kobject *kobj); + static struct kobj_type threshold_ktype = { .sysfs_ops = &threshold_ops, .default_attrs = default_attrs, + .release = threshold_block_release, }; static const char *get_name(unsigned int bank, struct threshold_block *b) @@ -1196,8 +1199,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b) return buf_mcatype; } -static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, - unsigned int block, u32 address) +static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, + unsigned int bank, unsigned int block, + u32 address) { struct threshold_block *b = NULL; u32 low, high; @@ -1241,16 +1245,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, INIT_LIST_HEAD(&b->miscj); - if (per_cpu(threshold_banks, cpu)[bank]->blocks) { - list_add(&b->miscj, - &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); - } else { - per_cpu(threshold_banks, cpu)[bank]->blocks = b; - } + if (tb->blocks) + list_add(&b->miscj, &tb->blocks->miscj); + else + tb->blocks = b; - err = kobject_init_and_add(&b->kobj, &threshold_ktype, - per_cpu(threshold_banks, cpu)[bank]->kobj, - get_name(bank, b)); + err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b)); if (err) goto out_free; recurse: @@ -1258,7 +1258,7 @@ recurse: if (!address) return 0; - err = allocate_threshold_blocks(cpu, bank, block, address); + err = allocate_threshold_blocks(cpu, tb, bank, block, address); if (err) goto out_free; @@ -1343,8 +1343,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) goto out_free; } - per_cpu(threshold_banks, cpu)[bank] = b; - if (is_shared_bank(bank)) { refcount_set(&b->cpus, 1); @@ -1355,9 +1353,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) } } - err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank)); - if (!err) - goto out; + err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); + if (err) + goto out_free; + + per_cpu(threshold_banks, cpu)[bank] = b; + + return 0; out_free: kfree(b); @@ -1366,8 +1368,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) return err; } -static void deallocate_threshold_block(unsigned int cpu, - unsigned int bank) +static void threshold_block_release(struct kobject *kobj) +{ + kfree(to_block(kobj)); +} + +static void deallocate_threshold_block(unsigned int cpu, unsigned int bank) { struct threshold_block *pos = NULL; struct threshold_block *tmp = NULL; @@ -1377,13 +1383,11 @@ static void deallocate_threshold_block(unsigned int cpu, return; list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { - kobject_put(&pos->kobj); list_del(&pos->miscj); - kfree(pos); + kobject_put(&pos->kobj); } - kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); - per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; + kobject_put(&head->blocks->kobj); } static void __threshold_remove_blocks(struct threshold_bank *b) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 743370ee4983..aecb15ba66cd 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -814,8 +814,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, if (quirk_no_way_out) quirk_no_way_out(i, m, regs); + m->bank = i; if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { - m->bank = i; mce_read_aux(m, i); *msg = tmp; return 1; diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index 88cd9598fa57..f2350967a898 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -489,17 +489,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) return; if ((val & 3UL) == 1UL) { - /* PPIN available but disabled: */ + /* PPIN locked in disabled mode */ return; } - /* If PPIN is disabled, but not locked, try to enable: */ - if (!(val & 3UL)) { + /* If PPIN is disabled, try to enable */ + if (!(val & 2UL)) { wrmsrl_safe(MSR_PPIN_CTL, val | 2UL); rdmsrl_safe(MSR_PPIN_CTL, &val); } - if ((val & 3UL) == 2UL) + /* Is the enable bit set? */ + if (val & 2UL) set_cpu_cap(c, X86_FEATURE_INTEL_PPIN); } } diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index 6e2becf547c5..bc441d68d060 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -188,7 +188,7 @@ static void therm_throt_process(bool new_event, int event, int level) /* if we just entered the thermal event */ if (new_event) { if (event == THERMAL_THROTTLING_EVENT) - pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", + pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, level == CORE_LEVEL ? "Core" : "Package", state->count); diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 03eb90d00af0..89049b343c7a 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -618,7 +618,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) if (static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); list_del(&d->list); - if (is_mbm_enabled()) + if (r->mon_capable && is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { /* diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index e49b77283924..181c992f448c 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -57,6 +57,7 @@ static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) } DECLARE_STATIC_KEY_FALSE(rdt_enable_key); +DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); /** * struct mon_evt - Entry in the event list of a resource diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 397206f23d14..773124b0e18a 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -514,7 +514,7 @@ void mbm_handle_overflow(struct work_struct *work) mutex_lock(&rdtgroup_mutex); - if (!static_branch_likely(&rdt_enable_key)) + if (!static_branch_likely(&rdt_mon_enable_key)) goto out_unlock; d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]); @@ -543,7 +543,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; - if (!static_branch_likely(&rdt_enable_key)) + if (!static_branch_likely(&rdt_mon_enable_key)) return; cpu = cpumask_any(&dom->cpu_mask); dom->mbm_work_cpu = cpu; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2e3b06d6bbc6..954fd048ad9b 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1741,9 +1741,6 @@ static int set_cache_qos_cfg(int level, bool enable) struct rdt_domain *d; int cpu; - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - if (level == RDT_RESOURCE_L3) update = l3_qos_cfg_update; else if (level == RDT_RESOURCE_L2) @@ -1751,6 +1748,9 @@ static int set_cache_qos_cfg(int level, bool enable) else return -EINVAL; + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + r_l = &rdt_resources_all[level]; list_for_each_entry(d, &r_l->domains, list) { /* Pick one CPU from each domain instance to update MSR */ @@ -1970,7 +1970,7 @@ static int rdt_get_tree(struct fs_context *fc) if (rdt_mon_capable) { ret = mongroup_create_dir(rdtgroup_default.kn, - NULL, "mon_groups", + &rdtgroup_default, "mon_groups", &kn_mongrp); if (ret < 0) goto out_info; @@ -2205,7 +2205,11 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { free_rmid(sentry->mon.rmid); list_del(&sentry->mon.crdtgrp_list); - kfree(sentry); + + if (atomic_read(&sentry->waitcount) != 0) + sentry->flags = RDT_DELETED; + else + kfree(sentry); } } @@ -2243,7 +2247,11 @@ static void rmdir_all_sub(void) kernfs_remove(rdtgrp->kn); list_del(&rdtgrp->rdtgroup_list); - kfree(rdtgrp); + + if (atomic_read(&rdtgrp->waitcount) != 0) + rdtgrp->flags = RDT_DELETED; + else + kfree(rdtgrp); } /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ update_closid_rmid(cpu_online_mask, &rdtgroup_default); @@ -2446,7 +2454,7 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, /* * Create the mon_data directory first. */ - ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn); + ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); if (ret) return ret; @@ -2645,7 +2653,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, uint files = 0; int ret; - prdtgrp = rdtgroup_kn_lock_live(prgrp_kn); + prdtgrp = rdtgroup_kn_lock_live(parent_kn); if (!prdtgrp) { ret = -ENODEV; goto out_unlock; @@ -2718,7 +2726,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, kernfs_activate(kn); /* - * The caller unlocks the prgrp_kn upon success. + * The caller unlocks the parent_kn upon success. */ return 0; @@ -2729,7 +2737,7 @@ out_destroy: out_free_rgrp: kfree(rdtgrp); out_unlock: - rdtgroup_kn_unlock(prgrp_kn); + rdtgroup_kn_unlock(parent_kn); return ret; } @@ -2767,7 +2775,7 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, */ list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); - rdtgroup_kn_unlock(prgrp_kn); + rdtgroup_kn_unlock(parent_kn); return ret; } @@ -2810,7 +2818,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, * Create an empty mon_groups directory to hold the subset * of tasks and cpus to monitor. */ - ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL); + ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); if (ret) { rdt_last_cmd_puts("kernfs subdir error\n"); goto out_del_list; @@ -2826,7 +2834,7 @@ out_id_free: out_common_fail: mkdir_rdt_prepare_clean(rdtgrp); out_unlock: - rdtgroup_kn_unlock(prgrp_kn); + rdtgroup_kn_unlock(parent_kn); return ret; } @@ -2952,13 +2960,13 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp, closid_free(rdtgrp->closid); free_rmid(rdtgrp->mon.rmid); + rdtgroup_ctrl_remove(kn, rdtgrp); + /* * Free all the child monitor group rmids. */ free_all_child_rdtgrp(rdtgrp); - rdtgroup_ctrl_remove(kn, rdtgrp); - return 0; } diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c index 3e20d322bc98..032509adf9de 100644 --- a/arch/x86/kernel/cpu/tsx.c +++ b/arch/x86/kernel/cpu/tsx.c @@ -115,11 +115,12 @@ void __init tsx_init(void) tsx_disable(); /* - * tsx_disable() will change the state of the - * RTM CPUID bit. Clear it here since it is now - * expected to be not set. + * tsx_disable() will change the state of the RTM and HLE CPUID + * bits. Clear them here since they are now expected to be not + * set. */ setup_clear_cpu_cap(X86_FEATURE_RTM); + setup_clear_cpu_cap(X86_FEATURE_HLE); } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) { /* @@ -131,10 +132,10 @@ void __init tsx_init(void) tsx_enable(); /* - * tsx_enable() will change the state of the - * RTM CPUID bit. Force it here since it is now - * expected to be set. + * tsx_enable() will change the state of the RTM and HLE CPUID + * bits. Force them here since they are now expected to be set. */ setup_force_cpu_cap(X86_FEATURE_RTM); + setup_force_cpu_cap(X86_FEATURE_HLE); } } diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index 0b8cedb20d6d..d5c9b13bafdf 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -65,6 +65,9 @@ struct x86_hw_tss doublefault_tss __cacheline_aligned = { .ss = __KERNEL_DS, .ds = __USER_DS, .fs = __KERNEL_PERCPU, +#ifndef CONFIG_X86_32_LAZY_GS + .gs = __KERNEL_STACK_CANARY, +#endif .__cr3 = __pa_nodebug(swapper_pg_dir), }; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 4cba91ec8049..2f9ec14be3b1 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -710,8 +710,12 @@ static struct chipset early_qrk[] __initdata = { */ { PCI_VENDOR_ID_INTEL, 0x0f00, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, + { PCI_VENDOR_ID_INTEL, 0x3e20, + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_INTEL, 0x3ec4, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, + { PCI_VENDOR_ID_INTEL, 0x8a12, + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_BROADCOM, 0x4331, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, {} diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 0071b794ed19..400a05e1c1c5 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -352,6 +352,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) fpregs_unlock(); return 0; } + fpregs_deactivate(fpu); fpregs_unlock(); } @@ -403,6 +404,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) } if (!ret) fpregs_mark_activate(); + else + fpregs_deactivate(fpu); fpregs_unlock(); err_out: diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 30f9cb2c0b55..2e6a0676c1f4 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -571,6 +571,16 @@ ENTRY(initial_page_table) # error "Kernel PMDs should be 1, 2 or 3" # endif .align PAGE_SIZE /* needs to be page-sized too */ + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + /* + * PTI needs another page so sync_initial_pagetable() works correctly + * and does not scribble over the data which is placed behind the + * actual initial_page_table. See clone_pgd_range(). + */ + .fill 1024, 4, 0 +#endif + #endif .data diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c index 4d4f5d9faac3..23054909c8dd 100644 --- a/arch/x86/kernel/ima_arch.c +++ b/arch/x86/kernel/ima_arch.c @@ -10,8 +10,6 @@ extern struct boot_params boot_params; static enum efi_secureboot_mode get_sb_mode(void) { - efi_char16_t efi_SecureBoot_name[] = L"SecureBoot"; - efi_char16_t efi_SetupMode_name[] = L"SecureBoot"; efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; efi_status_t status; unsigned long size; @@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void) } /* Get variable contents into buffer */ - status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid, + status = efi.get_variable(L"SecureBoot", &efi_variable_guid, NULL, &size, &secboot); if (status == EFI_NOT_FOUND) { pr_info("ima: secureboot mode disabled\n"); @@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void) } size = sizeof(setupmode); - status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid, + status = efi.get_variable(L"SetupMode", &efi_variable_guid, NULL, &size, &setupmode); if (status != EFI_SUCCESS) /* ignore unknown SetupMode */ diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index e676a9916c49..54c21d6abd5a 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -104,18 +104,22 @@ static int __init nmi_warning_debugfs(void) } fs_initcall(nmi_warning_debugfs); -static void nmi_max_handler(struct irq_work *w) +static void nmi_check_duration(struct nmiaction *action, u64 duration) { - struct nmiaction *a = container_of(w, struct nmiaction, irq_work); + u64 whole_msecs = READ_ONCE(action->max_duration); int remainder_ns, decimal_msecs; - u64 whole_msecs = READ_ONCE(a->max_duration); + + if (duration < nmi_longest_ns || duration < action->max_duration) + return; + + action->max_duration = duration; remainder_ns = do_div(whole_msecs, (1000 * 1000)); decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", - a->handler, whole_msecs, decimal_msecs); + action->handler, whole_msecs, decimal_msecs); } static int nmi_handle(unsigned int type, struct pt_regs *regs) @@ -142,11 +146,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs) delta = sched_clock() - delta; trace_nmi_handler(a->handler, (int)delta, thishandled); - if (delta < nmi_longest_ns || delta < a->max_duration) - continue; - - a->max_duration = delta; - irq_work_queue(&a->irq_work); + nmi_check_duration(a, delta); } rcu_read_unlock(); @@ -164,8 +164,6 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action) if (!action->handler) return -EINVAL; - init_irq_work(&action->irq_work, nmi_max_handler); - raw_spin_lock_irqsave(&desc->lock, flags); /* diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 01f0e2263b86..298fc1edd9c9 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c @@ -90,11 +90,11 @@ __init int create_simplefb(const struct screen_info *si, if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) size <<= 16; length = mode->height * mode->stride; - length = PAGE_ALIGN(length); if (length > size) { printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); return -EINVAL; } + length = PAGE_ALIGN(length); /* setup IORESOURCE_MEM as framebuffer memory */ memset(&res, 0, sizeof(res)); diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 7ce29cee9f9e..d8673d8a779b 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -91,10 +91,18 @@ void __init hpet_time_init(void) static __init void x86_late_time_init(void) { - x86_init.timers.timer_init(); /* - * After PIT/HPET timers init, select and setup - * the final interrupt mode for delivering IRQs. + * Before PIT/HPET init, select the interrupt mode. This is required + * to make the decision whether PIT should be initialized correct. + */ + x86_init.irqs.intr_mode_select(); + + /* Setup the legacy timers */ + x86_init.timers.timer_init(); + + /* + * After PIT/HPET timers init, set up the final interrupt mode for + * delivering IRQs. */ x86_init.irqs.intr_mode_init(); tsc_init(); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 18a799c8fa28..1838b10a299c 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -58,6 +58,7 @@ struct x86_init_ops x86_init __initdata = { .pre_vector_init = init_ISA_irqs, .intr_init = native_init_IRQ, .trap_init = x86_init_noop, + .intr_mode_select = apic_intr_mode_select, .intr_mode_init = apic_intr_mode_init }, diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index f68c0c753c38..6fa946f983c9 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -352,6 +352,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; unsigned f_la57; + unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0; /* cpuid 7.0.ebx */ const u32 kvm_cpuid_7_0_ebx_x86_features = @@ -363,7 +364,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) /* cpuid 7.0.ecx*/ const u32 kvm_cpuid_7_0_ecx_x86_features = - F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | + F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/; @@ -392,6 +393,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) /* Set LA57 based on hardware capability. */ entry->ecx |= f_la57; entry->ecx |= f_umip; + entry->ecx |= f_pku; /* PKU is not yet implemented for shadow paging. */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); @@ -402,7 +404,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) entry->edx |= F(SPEC_CTRL); if (boot_cpu_has(X86_FEATURE_STIBP)) entry->edx |= F(INTEL_STIBP); - if (boot_cpu_has(X86_FEATURE_SSBD)) + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->edx |= F(SPEC_CTRL_SSBD); /* * We emulate ARCH_CAPABILITIES in software even @@ -504,7 +507,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, r = -E2BIG; - if (*nent >= maxnent) + if (WARN_ON(*nent >= maxnent)) goto out; do_host_cpuid(entry, function, 0); @@ -759,7 +762,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, entry->ebx |= F(AMD_IBRS); if (boot_cpu_has(X86_FEATURE_STIBP)) entry->ebx |= F(AMD_STIBP); - if (boot_cpu_has(X86_FEATURE_SSBD)) + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->ebx |= F(AMD_SSBD); if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) entry->ebx |= F(AMD_SSB_NO); @@ -810,6 +814,9 @@ out: static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func, int *nent, int maxnent, unsigned int type) { + if (*nent >= maxnent) + return -E2BIG; + if (type == KVM_GET_EMULATED_CPUID) return __do_cpuid_func_emulated(entry, func, nent, maxnent); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 698efb8c3897..128d3ad46e96 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -22,6 +22,7 @@ #include "kvm_cache_regs.h" #include #include +#include #include #include @@ -1075,8 +1076,23 @@ static void fetch_register_operand(struct operand *op) } } +static void emulator_get_fpu(void) +{ + fpregs_lock(); + + fpregs_assert_state_consistent(); + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + switch_fpu_return(); +} + +static void emulator_put_fpu(void) +{ + fpregs_unlock(); +} + static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { + emulator_get_fpu(); switch (reg) { case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; @@ -1098,11 +1114,13 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) #endif default: BUG(); } + emulator_put_fpu(); } static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { + emulator_get_fpu(); switch (reg) { case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; @@ -1124,10 +1142,12 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, #endif default: BUG(); } + emulator_put_fpu(); } static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { + emulator_get_fpu(); switch (reg) { case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; @@ -1139,10 +1159,12 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; default: BUG(); } + emulator_put_fpu(); } static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { + emulator_get_fpu(); switch (reg) { case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; @@ -1154,6 +1176,7 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; default: BUG(); } + emulator_put_fpu(); } static int em_fninit(struct x86_emulate_ctxt *ctxt) @@ -1161,7 +1184,9 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt) if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); + emulator_get_fpu(); asm volatile("fninit"); + emulator_put_fpu(); return X86EMUL_CONTINUE; } @@ -1172,7 +1197,9 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt) if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); + emulator_get_fpu(); asm volatile("fnstcw %0": "+m"(fcw)); + emulator_put_fpu(); ctxt->dst.val = fcw; @@ -1186,7 +1213,9 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt) if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); + emulator_get_fpu(); asm volatile("fnstsw %0": "+m"(fsw)); + emulator_put_fpu(); ctxt->dst.val = fsw; @@ -4094,8 +4123,12 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; + emulator_get_fpu(); + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); + emulator_put_fpu(); + if (rc != X86EMUL_CONTINUE) return rc; @@ -4138,6 +4171,8 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; + emulator_get_fpu(); + if (size < __fxstate_size(16)) { rc = fxregs_fixup(&fx_state, size); if (rc != X86EMUL_CONTINUE) @@ -4153,6 +4188,8 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); out: + emulator_put_fpu(); + return rc; } @@ -5160,6 +5197,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; + ctxt->intercept = x86_intercept_none; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { @@ -5212,16 +5250,28 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ + has_seg_override = true; + ctxt->seg_override = VCPU_SREG_ES; + break; case 0x2e: /* CS override */ + has_seg_override = true; + ctxt->seg_override = VCPU_SREG_CS; + break; case 0x36: /* SS override */ + has_seg_override = true; + ctxt->seg_override = VCPU_SREG_SS; + break; case 0x3e: /* DS override */ has_seg_override = true; - ctxt->seg_override = (ctxt->b >> 3) & 3; + ctxt->seg_override = VCPU_SREG_DS; break; case 0x64: /* FS override */ + has_seg_override = true; + ctxt->seg_override = VCPU_SREG_FS; + break; case 0x65: /* GS override */ has_seg_override = true; - ctxt->seg_override = ctxt->b & 7; + ctxt->seg_override = VCPU_SREG_GS; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) @@ -5305,10 +5355,15 @@ done_prefixes: } break; case Escape: - if (ctxt->modrm > 0xbf) - opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; - else + if (ctxt->modrm > 0xbf) { + size_t size = ARRAY_SIZE(opcode.u.esc->high); + u32 index = array_index_nospec( + ctxt->modrm - 0xc0, size); + + opcode = opcode.u.esc->high[index]; + } else { opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; + } break; case InstrDual: if ((ctxt->modrm >> 6) == 3) @@ -5450,7 +5505,9 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { int rc; + emulator_get_fpu(); rc = asm_safe("fwait"); + emulator_put_fpu(); if (unlikely(rc != X86EMUL_CONTINUE)) return emulate_exception(ctxt, MF_VECTOR, 0, false); diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 23ff65504d7e..26408434b9bc 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -809,11 +809,12 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, u32 index, u64 *pdata) { struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + size_t size = ARRAY_SIZE(hv->hv_crash_param); - if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) + if (WARN_ON_ONCE(index >= size)) return -EINVAL; - *pdata = hv->hv_crash_param[index]; + *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; return 0; } @@ -852,11 +853,12 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, u32 index, u64 data) { struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + size_t size = ARRAY_SIZE(hv->hv_crash_param); - if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) + if (WARN_ON_ONCE(index >= size)) return -EINVAL; - hv->hv_crash_param[index] = data; + hv->hv_crash_param[array_index_nospec(index, size)] = data; return 0; } diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 8b38bb4868a6..629a09ca9860 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -460,10 +460,14 @@ static int picdev_write(struct kvm_pic *s, switch (addr) { case 0x20: case 0x21: + pic_lock(s); + pic_ioport_write(&s->pics[0], addr, data); + pic_unlock(s); + break; case 0xa0: case 0xa1: pic_lock(s); - pic_ioport_write(&s->pics[addr >> 7], addr, data); + pic_ioport_write(&s->pics[1], addr, data); pic_unlock(s); break; case 0x4d0: diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index d859ae8890d0..24a6905d60ee 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -68,13 +69,14 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, default: { u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; - u64 redir_content; + u64 redir_content = ~0ULL; - if (redir_index < IOAPIC_NUM_PINS) - redir_content = - ioapic->redirtbl[redir_index].bits; - else - redir_content = ~0ULL; + if (redir_index < IOAPIC_NUM_PINS) { + u32 index = array_index_nospec( + redir_index, IOAPIC_NUM_PINS); + + redir_content = ioapic->redirtbl[index].bits; + } result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : @@ -291,6 +293,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) if (index >= IOAPIC_NUM_PINS) return; + index = array_index_nospec(index, IOAPIC_NUM_PINS); e = &ioapic->redirtbl[index]; mask_before = e->fields.mask; /* Preserve read-only fields */ diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 8ecd48d31800..5ddcaacef291 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -416,7 +416,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, kvm_set_msi_irq(vcpu->kvm, entry, &irq); - if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0, + if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0, irq.dest_id, irq.dest_mode)) __set_bit(irq.vector, ioapic_handled_vectors); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index b29d00b661ff..5d2587005d0e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -637,9 +637,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) { u8 val; - if (pv_eoi_get_user(vcpu, &val) < 0) + if (pv_eoi_get_user(vcpu, &val) < 0) { printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n", (unsigned long long)vcpu->arch.pv_eoi.msr_val); + return false; + } return val & 0x1; } @@ -1056,11 +1058,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, apic->regs + APIC_TMR); } - if (vcpu->arch.apicv_active) - kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); - else { + if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) { kvm_lapic_set_irr(vector, apic); - kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); } @@ -1926,15 +1925,20 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) case APIC_LVTTHMR: case APIC_LVTPC: case APIC_LVT1: - case APIC_LVTERR: + case APIC_LVTERR: { /* TODO: Check vector */ + size_t size; + u32 index; + if (!kvm_apic_sw_enabled(apic)) val |= APIC_LVT_MASKED; - - val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; + size = ARRAY_SIZE(apic_lvt_mask); + index = array_index_nospec( + (reg - APIC_LVTT) >> 4, size); + val &= apic_lvt_mask[index]; kvm_lapic_set_reg(apic, reg, val); - break; + } case APIC_LVTT: if (!kvm_apic_sw_enabled(apic)) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2ce9da58611e..518100ea5ef4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -418,22 +418,24 @@ static inline bool is_access_track_spte(u64 spte) * requires a full MMU zap). The flag is instead explicitly queried when * checking for MMIO spte cache hits. */ -#define MMIO_SPTE_GEN_MASK GENMASK_ULL(18, 0) +#define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0) #define MMIO_SPTE_GEN_LOW_START 3 #define MMIO_SPTE_GEN_LOW_END 11 #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ MMIO_SPTE_GEN_LOW_START) -#define MMIO_SPTE_GEN_HIGH_START 52 -#define MMIO_SPTE_GEN_HIGH_END 61 +#define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT +#define MMIO_SPTE_GEN_HIGH_END 62 #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ MMIO_SPTE_GEN_HIGH_START) + static u64 generation_mmio_spte_mask(u64 gen) { u64 mask; WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); + BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK; mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK; @@ -444,8 +446,6 @@ static u64 get_mmio_spte_generation(u64 spte) { u64 gen; - spte &= ~shadow_mmio_mask; - gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START; gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START; return gen; @@ -538,16 +538,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); static u8 kvm_get_shadow_phys_bits(void) { /* - * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected - * in CPU detection code, but MKTME treats those reduced bits as - * 'keyID' thus they are not reserved bits. Therefore for MKTME - * we should still return physical address bits reported by CPUID. + * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected + * in CPU detection code, but the processor treats those reduced bits as + * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at + * the physical address bits reported by CPUID. */ - if (!boot_cpu_has(X86_FEATURE_TME) || - WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008)) - return boot_cpu_data.x86_phys_bits; + if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) + return cpuid_eax(0x80000008) & 0xff; - return cpuid_eax(0x80000008) & 0xff; + /* + * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with + * custom CPUID. Proceed with whatever the kernel found since these features + * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). + */ + return boot_cpu_data.x86_phys_bits; } static void kvm_mmu_reset_all_pte_masks(void) @@ -1282,12 +1286,12 @@ static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn, return __mmu_gfn_lpage_is_disallowed(gfn, level, slot); } -static int host_mapping_level(struct kvm *kvm, gfn_t gfn) +static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn) { unsigned long page_size; int i, ret = 0; - page_size = kvm_host_page_size(kvm, gfn); + page_size = kvm_host_page_size(vcpu, gfn); for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { if (page_size >= KVM_HPAGE_SIZE(i)) @@ -1337,7 +1341,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, if (unlikely(*force_pt_level)) return PT_PAGE_TABLE_LEVEL; - host_level = host_mapping_level(vcpu->kvm, large_gfn); + host_level = host_mapping_level(vcpu, large_gfn); if (host_level == PT_PAGE_TABLE_LEVEL) return host_level; @@ -3528,7 +3532,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte) * - true: let the vcpu to access on the same address again. * - false: let the real page fault path to fix it. */ -static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, +static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level, u32 error_code) { struct kvm_shadow_walk_iterator iterator; @@ -3548,7 +3552,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, do { u64 new_spte; - for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) + for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) if (!is_shadow_present_pte(spte) || iterator.level < level) break; @@ -3626,7 +3630,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, } while (true); - trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, + trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, spte, fault_handled); walk_shadow_page_lockless_end(vcpu); @@ -3634,10 +3638,11 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, } static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, - gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable); + gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, + bool *writable); static int make_mmu_pages_available(struct kvm_vcpu *vcpu); -static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, +static int nonpaging_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, gfn_t gfn, bool prefault) { int r; @@ -3663,16 +3668,16 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); } - if (fast_page_fault(vcpu, v, level, error_code)) + if (fast_page_fault(vcpu, gpa, level, error_code)) return RET_PF_RETRY; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) + if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) return RET_PF_RETRY; - if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) + if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r)) return r; r = RET_PF_RETRY; @@ -3683,7 +3688,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, goto out_unlock; if (likely(!force_pt_level)) transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); - r = __direct_map(vcpu, v, write, map_writable, level, pfn, + r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault, false); out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); @@ -3981,7 +3986,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, u32 access, struct x86_exception *exception) { if (exception) @@ -3989,7 +3994,7 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, return vaddr; } -static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, +static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, u32 access, struct x86_exception *exception) { @@ -4149,13 +4154,14 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) walk_shadow_page_lockless_end(vcpu); } -static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, +static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, bool prefault) { - gfn_t gfn = gva >> PAGE_SHIFT; + gfn_t gfn = gpa >> PAGE_SHIFT; int r; - pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); + /* Note, paging is disabled, ergo gva == gpa. */ + pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); if (page_fault_handle_page_track(vcpu, error_code, gfn)) return RET_PF_EMULATE; @@ -4167,11 +4173,12 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); - return nonpaging_map(vcpu, gva & PAGE_MASK, + return nonpaging_map(vcpu, gpa & PAGE_MASK, error_code, gfn, prefault); } -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) +static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + gfn_t gfn) { struct kvm_arch_async_pf arch; @@ -4180,11 +4187,13 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) arch.direct_map = vcpu->arch.mmu->direct_map; arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); - return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); + return kvm_setup_async_pf(vcpu, cr2_or_gpa, + kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); } static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, - gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable) + gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, + bool *writable) { struct kvm_memory_slot *slot; bool async; @@ -4204,12 +4213,12 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, return false; /* *pfn has correct page already */ if (!prefault && kvm_can_do_async_pf(vcpu)) { - trace_kvm_try_async_get_page(gva, gfn); + trace_kvm_try_async_get_page(cr2_or_gpa, gfn); if (kvm_find_async_pf_gfn(vcpu, gfn)) { - trace_kvm_async_pf_doublefault(gva, gfn); + trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn); kvm_make_request(KVM_REQ_APF_HALT, vcpu); return true; - } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) + } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn)) return true; } @@ -4222,6 +4231,12 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, { int r = 1; +#ifndef CONFIG_X86_64 + /* A 64-bit CR2 should be impossible on 32-bit KVM. */ + if (WARN_ON_ONCE(fault_address >> 32)) + return -EFAULT; +#endif + vcpu->arch.l1tf_flush_l1d = true; switch (vcpu->arch.apf.host_apf_reason) { default: @@ -4259,7 +4274,7 @@ check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num); } -static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, +static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, bool prefault) { kvm_pfn_t pfn; @@ -5516,7 +5531,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) return 0; } -int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len) { int r, emulation_type = 0; @@ -5525,18 +5540,18 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, /* With shadow page tables, fault_address contains a GVA or nGPA. */ if (vcpu->arch.mmu->direct_map) { vcpu->arch.gpa_available = true; - vcpu->arch.gpa_val = cr2; + vcpu->arch.gpa_val = cr2_or_gpa; } r = RET_PF_INVALID; if (unlikely(error_code & PFERR_RSVD_MASK)) { - r = handle_mmio_page_fault(vcpu, cr2, direct); + r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct); if (r == RET_PF_EMULATE) goto emulate; } if (r == RET_PF_INVALID) { - r = vcpu->arch.mmu->page_fault(vcpu, cr2, + r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, lower_32_bits(error_code), false); WARN_ON(r == RET_PF_INVALID); @@ -5556,7 +5571,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, */ if (vcpu->arch.mmu->direct_map && (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { - kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); + kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)); return 1; } @@ -5571,7 +5586,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, * explicitly shadowing L1's page tables, i.e. unprotecting something * for L1 isn't going to magically fix whatever issue cause L2 to fail. */ - if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu)) + if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu)) emulation_type = EMULTYPE_ALLOW_RETRY; emulate: /* @@ -5586,7 +5601,7 @@ emulate: return 1; } - return x86_emulate_instruction(vcpu, cr2, emulation_type, insn, + return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, insn_len); } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); @@ -6249,7 +6264,7 @@ static void kvm_set_mmio_spte_mask(void) * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ - if (IS_ENABLED(CONFIG_X86_64) && shadow_phys_bits == 52) + if (shadow_phys_bits == 52) mask &= ~1ull; kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 7ca8831c7d1a..3c6522b84ff1 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -249,13 +249,13 @@ TRACE_EVENT( TRACE_EVENT( fast_page_fault, - TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, + TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code, u64 *sptep, u64 old_spte, bool retry), - TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), + TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry), TP_STRUCT__entry( __field(int, vcpu_id) - __field(gva_t, gva) + __field(gpa_t, cr2_or_gpa) __field(u32, error_code) __field(u64 *, sptep) __field(u64, old_spte) @@ -265,7 +265,7 @@ TRACE_EVENT( TP_fast_assign( __entry->vcpu_id = vcpu->vcpu_id; - __entry->gva = gva; + __entry->cr2_or_gpa = cr2_or_gpa; __entry->error_code = error_code; __entry->sptep = sptep; __entry->old_spte = old_spte; @@ -273,9 +273,9 @@ TRACE_EVENT( __entry->retry = retry; ), - TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx" + TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx" " new %llx spurious %d fixed %d", __entry->vcpu_id, - __entry->gva, __print_flags(__entry->error_code, "|", + __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|", kvm_mmu_trace_pferr_flags), __entry->sptep, __entry->old_spte, __entry->new_spte, __spte_satisfied(old_spte), __spte_satisfied(new_spte) diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 25ce3edd1872..7f0059aa30e1 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -192,11 +192,15 @@ static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) break; case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000: *seg = 1; - *unit = msr - MSR_MTRRfix16K_80000; + *unit = array_index_nospec( + msr - MSR_MTRRfix16K_80000, + MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1); break; case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: *seg = 2; - *unit = msr - MSR_MTRRfix4K_C0000; + *unit = array_index_nospec( + msr - MSR_MTRRfix4K_C0000, + MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1); break; default: return false; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 97b21e7fd013..4e3f137ffa8c 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -33,7 +33,7 @@ #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #define PT_HAVE_ACCESSED_DIRTY(mmu) true #ifdef CONFIG_X86_64 - #define PT_MAX_FULL_LEVELS 4 + #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL #define CMPXCHG cmpxchg #else #define CMPXCHG cmpxchg64 @@ -291,11 +291,11 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte) } /* - * Fetch a guest pte for a guest virtual address + * Fetch a guest pte for a guest virtual address, or for an L2's GPA. */ static int FNAME(walk_addr_generic)(struct guest_walker *walker, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gva_t addr, u32 access) + gpa_t addr, u32 access) { int ret; pt_element_t pte; @@ -496,7 +496,7 @@ error: } static int FNAME(walk_addr)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, gva_t addr, u32 access) + struct kvm_vcpu *vcpu, gpa_t addr, u32 access) { return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, access); @@ -611,7 +611,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, * If the guest tries to write a write-protected page, we need to * emulate this operation, return 1 to indicate this case. */ -static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, +static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, struct guest_walker *gw, int write_fault, int hlevel, kvm_pfn_t pfn, bool map_writable, bool prefault, @@ -765,7 +765,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, * Returns: 1 if we need to emulate the instruction, 0 otherwise, or * a negative value on error. */ -static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, +static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, bool prefault) { int write_fault = error_code & PFERR_WRITE_MASK; @@ -945,18 +945,19 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) spin_unlock(&vcpu->kvm->mmu_lock); } -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, +/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access, struct x86_exception *exception) { struct guest_walker walker; gpa_t gpa = UNMAPPED_GVA; int r; - r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); + r = FNAME(walk_addr)(&walker, vcpu, addr, access); if (r) { gpa = gfn_to_gpa(walker.gfn); - gpa |= vaddr & ~PAGE_MASK; + gpa |= addr & ~PAGE_MASK; } else if (exception) *exception = walker.fault; @@ -964,7 +965,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, } #if PTTYPE != PTTYPE_EPT -static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, +/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */ +static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, u32 access, struct x86_exception *exception) { @@ -972,6 +974,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, gpa_t gpa = UNMAPPED_GVA; int r; +#ifndef CONFIG_X86_64 + /* A 64-bit GVA should be impossible on 32-bit KVM. */ + WARN_ON_ONCE(vaddr >> 32); +#endif + r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); if (r) { diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 58265f761c3b..3fc98afd72a8 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -2,6 +2,8 @@ #ifndef __KVM_X86_PMU_H #define __KVM_X86_PMU_H +#include + #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) @@ -86,8 +88,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc) static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, u32 base) { - if (msr >= base && msr < base + pmu->nr_arch_gp_counters) - return &pmu->gp_counters[msr - base]; + if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { + u32 index = array_index_nospec(msr - base, + pmu->nr_arch_gp_counters); + + return &pmu->gp_counters[index]; + } return NULL; } @@ -97,8 +103,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) { int base = MSR_CORE_PERF_FIXED_CTR0; - if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) - return &pmu->fixed_counters[msr - base]; + if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { + u32 index = array_index_nospec(msr - base, + pmu->nr_arch_fixed_counters); + + return &pmu->fixed_counters[index]; + } return NULL; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c5673bda4b66..07459120a222 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1298,6 +1298,47 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu) } } +/* + * The default MMIO mask is a single bit (excluding the present bit), + * which could conflict with the memory encryption bit. Check for + * memory encryption support and override the default MMIO mask if + * memory encryption is enabled. + */ +static __init void svm_adjust_mmio_mask(void) +{ + unsigned int enc_bit, mask_bit; + u64 msr, mask; + + /* If there is no memory encryption support, use existing mask */ + if (cpuid_eax(0x80000000) < 0x8000001f) + return; + + /* If memory encryption is not enabled, use existing mask */ + rdmsrl(MSR_K8_SYSCFG, msr); + if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) + return; + + enc_bit = cpuid_ebx(0x8000001f) & 0x3f; + mask_bit = boot_cpu_data.x86_phys_bits; + + /* Increment the mask bit if it is the same as the encryption bit */ + if (enc_bit == mask_bit) + mask_bit++; + + /* + * If the mask bit location is below 52, then some bits above the + * physical addressing limit will always be reserved, so use the + * rsvd_bits() function to generate the mask. This mask, along with + * the present bit, will be used to generate a page fault with + * PFER.RSV = 1. + * + * If the mask bit location is 52 (or above), then clear the mask. + */ + mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; + + kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); +} + static __init int svm_hardware_setup(void) { int cpu; @@ -1352,6 +1393,8 @@ static __init int svm_hardware_setup(void) } } + svm_adjust_mmio_mask(); + for_each_possible_cpu(cpu) { r = svm_cpu_init(cpu); if (r) @@ -5141,8 +5184,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) return; } -static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) +static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) { + if (!vcpu->arch.apicv_active) + return -1; + kvm_lapic_set_irr(vec, vcpu->arch.apic); smp_mb__after_atomic(); @@ -5154,6 +5200,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) put_cpu(); } else kvm_vcpu_wake_up(vcpu); + + return 0; } static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) @@ -5986,6 +6034,11 @@ static bool svm_has_wbinvd_exit(void) return true; } +static bool svm_pku_supported(void) +{ + return false; +} + #define PRE_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_PRE_EXCEPT, } #define POST_EX(exit) { .exit_code = (exit), \ @@ -7278,6 +7331,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .xsaves_supported = svm_xsaves_supported, .umip_emulated = svm_umip_emulated, .pt_supported = svm_pt_supported, + .pku_supported = svm_pku_supported, .set_supported_cpuid = svm_set_supported_cpuid, diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 7aa69716d516..f486e2606247 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept; extern bool __read_mostly enable_unrestricted_guest; extern bool __read_mostly enable_ept_ad_bits; extern bool __read_mostly enable_pml; +extern bool __read_mostly enable_apicv; extern int __read_mostly pt_mode; #define PT_MODE_SYSTEM 0 @@ -145,6 +146,11 @@ static inline bool vmx_umip_emulated(void) SECONDARY_EXEC_DESC; } +static inline bool vmx_pku_supported(void) +{ + return boot_cpu_has(X86_FEATURE_PKU); +} + static inline bool cpu_has_vmx_rdtscp(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 0e7c9301fe86..2b44554baf28 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -223,7 +223,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) return; kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); - vmx->nested.hv_evmcs_vmptr = -1ull; + vmx->nested.hv_evmcs_vmptr = 0; vmx->nested.hv_evmcs = NULL; } @@ -1828,7 +1828,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) return 1; - if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { + if (unlikely(!vmx->nested.hv_evmcs || + evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { if (!vmx->nested.hv_evmcs) vmx->nested.current_vmptr = -1ull; @@ -2418,6 +2419,16 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, entry_failure_code)) return -EINVAL; + /* + * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 + * on nested VM-Exit, which can occur without actually running L2 and + * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with + * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the + * transition to HLT instead of running L2. + */ + if (enable_ept) + vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); + /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { @@ -4599,32 +4610,28 @@ static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; u64 field_value; + struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); int len; gva_t gva = 0; - struct vmcs12 *vmcs12; + struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) + : get_vmcs12(vcpu); struct x86_exception e; short offset; if (!nested_vmx_check_permission(vcpu)) return 1; - if (to_vmx(vcpu)->nested.current_vmptr == -1ull) + /* + * In VMX non-root operation, when the VMCS-link pointer is -1ull, + * any VMREAD sets the ALU flags for VMfailInvalid. + */ + if (vmx->nested.current_vmptr == -1ull || + (is_guest_mode(vcpu) && + get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) return nested_vmx_failInvalid(vcpu); - if (!is_guest_mode(vcpu)) - vmcs12 = get_vmcs12(vcpu); - else { - /* - * When vmcs->vmcs_link_pointer is -1ull, any VMREAD - * to shadowed-field sets the ALU flags for VMfailInvalid. - */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) - return nested_vmx_failInvalid(vcpu); - vmcs12 = get_shadow_vmcs12(vcpu); - } - /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); @@ -4653,8 +4660,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu) vmx_instruction_info, true, len, &gva)) return 1; /* _system ok, nested_vmx_check_permission has verified cpl=0 */ - if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e)) + if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e)) { kvm_inject_page_fault(vcpu, &e); + return 1; + } } return nested_vmx_succeed(vcpu); @@ -4701,13 +4710,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) */ u64 field_value = 0; struct x86_exception e; - struct vmcs12 *vmcs12; + struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) + : get_vmcs12(vcpu); short offset; if (!nested_vmx_check_permission(vcpu)) return 1; - if (vmx->nested.current_vmptr == -1ull) + /* + * In VMX non-root operation, when the VMCS-link pointer is -1ull, + * any VMWRITE sets the ALU flags for VMfailInvalid. + */ + if (vmx->nested.current_vmptr == -1ull || + (is_guest_mode(vcpu) && + get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) return nested_vmx_failInvalid(vcpu); if (vmx_instruction_info & (1u << 10)) @@ -4726,6 +4742,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + + offset = vmcs_field_to_offset(field); + if (offset < 0) + return nested_vmx_failValid(vcpu, + VMXERR_UNSUPPORTED_VMCS_COMPONENT); + /* * If the vCPU supports "VMWRITE to any supported field in the * VMCS," then the "read-only" fields are actually read/write. @@ -4735,29 +4757,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) return nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); - if (!is_guest_mode(vcpu)) { - vmcs12 = get_vmcs12(vcpu); - - /* - * Ensure vmcs12 is up-to-date before any VMWRITE that dirties - * vmcs12, else we may crush a field or consume a stale value. - */ - if (!is_shadow_field_rw(field)) - copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); - } else { - /* - * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE - * to shadowed-field sets the ALU flags for VMfailInvalid. - */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) - return nested_vmx_failInvalid(vcpu); - vmcs12 = get_shadow_vmcs12(vcpu); - } - - offset = vmcs_field_to_offset(field); - if (offset < 0) - return nested_vmx_failValid(vcpu, - VMXERR_UNSUPPORTED_VMCS_COMPONENT); + /* + * Ensure vmcs12 is up-to-date before any VMWRITE that dirties + * vmcs12, else we may crush a field or consume a stale value. + */ + if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) + copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); /* * Some Intel CPUs intentionally drop the reserved bits of the AR byte @@ -5120,24 +5125,17 @@ fail: return 1; } - -static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) +/* + * Return true if an IO instruction with the specified port and size should cause + * a VM-exit into L1. + */ +bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, + int size) { - unsigned long exit_qualification; + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); gpa_t bitmap, last_bitmap; - unsigned int port; - int size; u8 b; - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) - return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - port = exit_qualification >> 16; - size = (exit_qualification & 7) + 1; - last_bitmap = (gpa_t)-1; b = -1; @@ -5164,6 +5162,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, return false; } +static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + unsigned long exit_qualification; + unsigned short port; + int size; + + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + port = exit_qualification >> 16; + size = (exit_qualification & 7) + 1; + + return nested_vmx_check_io_bitmaps(vcpu, port, size); +} + /* * Return 1 if we should exit from L2 to L1 to handle an MSR access access, * rather than handle it ourselves in L0. I.e., check whether L1 expressed @@ -5784,8 +5800,7 @@ void nested_vmx_vcpu_setup(void) * bit in the high half is on if the corresponding bit in the control field * may be on. See also vmx_control_verify(). */ -void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, - bool apicv) +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) { /* * Note that as a general rule, the high half of the MSRs (bits in @@ -5812,7 +5827,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS | - (apicv ? PIN_BASED_POSTED_INTR : 0); + (enable_apicv ? PIN_BASED_POSTED_INTR : 0); msrs->pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 6280f33e5fa6..b8521c451bb0 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -17,8 +17,7 @@ enum nvmx_vmentry_status { }; void vmx_leave_nested(struct kvm_vcpu *vcpu); -void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, - bool apicv); +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps); void nested_vmx_hardware_unsetup(void); __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); void nested_vmx_vcpu_setup(void); @@ -33,6 +32,8 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret); +bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, + int size); static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 3e9c059099e9..f8998a7bc7d5 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -84,10 +84,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu, static unsigned intel_find_fixed_event(int idx) { - if (idx >= ARRAY_SIZE(fixed_pmc_events)) + u32 event; + size_t size = ARRAY_SIZE(fixed_pmc_events); + + if (idx >= size) return PERF_COUNT_HW_MAX; - return intel_arch_events[fixed_pmc_events[idx]].event_type; + event = fixed_pmc_events[array_index_nospec(idx, size)]; + return intel_arch_events[event].event_type; } /* check if a PMC is enabled by comparing it with globl_ctrl bits. */ @@ -128,16 +132,20 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); bool fixed = idx & (1u << 30); struct kvm_pmc *counters; + unsigned int num_counters; idx &= ~(3u << 30); - if (!fixed && idx >= pmu->nr_arch_gp_counters) + if (fixed) { + counters = pmu->fixed_counters; + num_counters = pmu->nr_arch_fixed_counters; + } else { + counters = pmu->gp_counters; + num_counters = pmu->nr_arch_gp_counters; + } + if (idx >= num_counters) return NULL; - if (fixed && idx >= pmu->nr_arch_fixed_counters) - return NULL; - counters = fixed ? pmu->fixed_counters : pmu->gp_counters; *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; - - return &counters[idx]; + return &counters[array_index_nospec(idx, num_counters)]; } static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 04a8212704c1..8129b6b27c93 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -95,7 +95,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); -static bool __read_mostly enable_apicv = 1; +bool __read_mostly enable_apicv = 1; module_param(enable_apicv, bool, S_IRUGO); /* @@ -2140,6 +2140,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_num_address_ranges))) return 1; + if (is_noncanonical_address(data, vcpu)) + return 1; if (index % 2) vmx->pt_desc.guest.addr_b[index / 2] = data; else @@ -2973,6 +2975,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) static int get_ept_level(struct kvm_vcpu *vcpu) { + /* Nested EPT currently only supports 4-level walks. */ + if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu))) + return 4; if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) return 5; return 4; @@ -2995,6 +3000,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { struct kvm *kvm = vcpu->kvm; + bool update_guest_cr3 = true; unsigned long guest_cr3; u64 eptp; @@ -3011,15 +3017,18 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); } - if (enable_unrestricted_guest || is_paging(vcpu) || - is_guest_mode(vcpu)) + /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */ + if (is_guest_mode(vcpu)) + update_guest_cr3 = false; + else if (enable_unrestricted_guest || is_paging(vcpu)) guest_cr3 = kvm_read_cr3(vcpu); else guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; ept_load_pdptrs(vcpu); } - vmcs_writel(GUEST_CR3, guest_cr3); + if (update_guest_cr3) + vmcs_writel(GUEST_CR3, guest_cr3); } int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -3844,24 +3853,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, * 2. If target vcpu isn't running(root mode), kick it to pick up the * interrupt from PIR in next vmentry. */ -static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) +static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vmx_deliver_nested_posted_interrupt(vcpu, vector); if (!r) - return; + return 0; + + if (!vcpu->arch.apicv_active) + return -1; if (pi_test_and_set_pir(vector, &vmx->pi_desc)) - return; + return 0; /* If a previous notification has sent the IPI, nothing to do. */ if (pi_test_and_set_on(&vmx->pi_desc)) - return; + return 0; if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); + + return 0; } /* @@ -6793,8 +6807,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (nested) nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, - vmx_capability.ept, - kvm_vcpu_apicv_active(&vmx->vcpu)); + vmx_capability.ept); else memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); @@ -6876,8 +6889,7 @@ static int __init vmx_check_processor_compat(void) if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) return -EIO; if (nested) - nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept, - enable_apicv); + nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept); if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); @@ -7123,6 +7135,40 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) to_vmx(vcpu)->req_immediate_exit = true; } +static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, + struct x86_instruction_info *info) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned short port; + bool intercept; + int size; + + if (info->intercept == x86_intercept_in || + info->intercept == x86_intercept_ins) { + port = info->src_val; + size = info->dst_bytes; + } else { + port = info->dst_val; + size = info->src_bytes; + } + + /* + * If the 'use IO bitmaps' VM-execution control is 0, IO instruction + * VM-exits depend on the 'unconditional IO exiting' VM-execution + * control. + * + * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. + */ + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + intercept = nested_cpu_has(vmcs12, + CPU_BASED_UNCOND_IO_EXITING); + else + intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); + + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ + return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; +} + static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) @@ -7130,19 +7176,45 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; + switch (info->intercept) { /* * RDPID causes #UD if disabled through secondary execution controls. * Because it is marked as EmulateOnUD, we need to intercept it here. */ - if (info->intercept == x86_intercept_rdtscp && - !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { - ctxt->exception.vector = UD_VECTOR; - ctxt->exception.error_code_valid = false; - return X86EMUL_PROPAGATE_FAULT; - } + case x86_intercept_rdtscp: + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { + ctxt->exception.vector = UD_VECTOR; + ctxt->exception.error_code_valid = false; + return X86EMUL_PROPAGATE_FAULT; + } + break; + + case x86_intercept_in: + case x86_intercept_ins: + case x86_intercept_out: + case x86_intercept_outs: + return vmx_check_intercept_io(vcpu, info); + + case x86_intercept_lgdt: + case x86_intercept_lidt: + case x86_intercept_lldt: + case x86_intercept_ltr: + case x86_intercept_sgdt: + case x86_intercept_sidt: + case x86_intercept_sldt: + case x86_intercept_str: + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) + return X86EMUL_CONTINUE; + + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ + break; /* TODO: check more intercepts... */ - return X86EMUL_CONTINUE; + default: + break; + } + + return X86EMUL_UNHANDLEABLE; } #ifdef CONFIG_X86_64 @@ -7727,7 +7799,7 @@ static __init int hardware_setup(void) if (nested) { nested_vmx_setup_ctls_msrs(&vmcs_config.nested, - vmx_capability.ept, enable_apicv); + vmx_capability.ept); r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); if (r) @@ -7861,6 +7933,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .xsaves_supported = vmx_xsaves_supported, .umip_emulated = vmx_umip_emulated, .pt_supported = vmx_pt_supported, + .pku_supported = vmx_pku_supported, .request_immediate_exit = vmx_request_immediate_exit, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5d530521f11d..c5e15eba8052 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -92,6 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif +static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; + #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ #define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ @@ -300,13 +302,14 @@ int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); int err; - if (((value ^ smsr->values[slot].curr) & mask) == 0) + value = (value & mask) | (smsr->values[slot].host & ~mask); + if (value == smsr->values[slot].curr) return 0; - smsr->values[slot].curr = value; err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); if (err) return 1; + smsr->values[slot].curr = value; if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); @@ -442,6 +445,14 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) * for #DB exceptions under VMX. */ vcpu->arch.dr6 ^= payload & DR6_RTM; + + /* + * The #DB payload is defined as compatible with the 'pending + * debug exceptions' field under VMX, not DR6. While bit 12 is + * defined in the 'pending debug exceptions' field (enabled + * breakpoint), it is reserved and must be zero in DR6. + */ + vcpu->arch.dr6 &= ~BIT(12); break; case PF_VECTOR: vcpu->arch.cr2 = payload; @@ -885,9 +896,38 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) } EXPORT_SYMBOL_GPL(kvm_set_xcr); +static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c) +{ + u64 reserved_bits = CR4_RESERVED_BITS; + + if (!cpu_has(c, X86_FEATURE_XSAVE)) + reserved_bits |= X86_CR4_OSXSAVE; + + if (!cpu_has(c, X86_FEATURE_SMEP)) + reserved_bits |= X86_CR4_SMEP; + + if (!cpu_has(c, X86_FEATURE_SMAP)) + reserved_bits |= X86_CR4_SMAP; + + if (!cpu_has(c, X86_FEATURE_FSGSBASE)) + reserved_bits |= X86_CR4_FSGSBASE; + + if (!cpu_has(c, X86_FEATURE_PKU)) + reserved_bits |= X86_CR4_PKE; + + if (!cpu_has(c, X86_FEATURE_LA57) && + !(cpuid_ecx(0x7) & bit(X86_FEATURE_LA57))) + reserved_bits |= X86_CR4_LA57; + + if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated()) + reserved_bits |= X86_CR4_UMIP; + + return reserved_bits; +} + static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { - if (cr4 & CR4_RESERVED_BITS) + if (cr4 & cr4_reserved_bits) return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) @@ -1053,9 +1093,11 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { + size_t size = ARRAY_SIZE(vcpu->arch.db); + switch (dr) { case 0 ... 3: - vcpu->arch.db[dr] = val; + vcpu->arch.db[array_index_nospec(dr, size)] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; @@ -1092,9 +1134,11 @@ EXPORT_SYMBOL_GPL(kvm_set_dr); int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { + size_t size = ARRAY_SIZE(vcpu->arch.db); + switch (dr) { case 0 ... 3: - *val = vcpu->arch.db[dr]; + *val = vcpu->arch.db[array_index_nospec(dr, size)]; break; case 4: /* fall through */ @@ -1327,10 +1371,15 @@ static u64 kvm_get_arch_capabilities(void) * If TSX is disabled on the system, guests are also mitigated against * TAA and clear CPU buffer mitigation is not required for guests. */ - if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) && - (data & ARCH_CAP_TSX_CTRL_MSR)) + if (!boot_cpu_has(X86_FEATURE_RTM)) + data &= ~ARCH_CAP_TAA_NO; + else if (!boot_cpu_has_bug(X86_BUG_TAA)) + data |= ARCH_CAP_TAA_NO; + else if (data & ARCH_CAP_TSX_CTRL_MSR) data &= ~ARCH_CAP_MDS_NO; + /* KVM does not emulate MSR_IA32_TSX_CTRL. */ + data &= ~ARCH_CAP_TSX_CTRL_MSR; return data; } @@ -2484,7 +2533,10 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MCx_CTL(bank_num)) { - u32 offset = msr - MSR_IA32_MC0_CTL; + u32 offset = array_index_nospec( + msr - MSR_IA32_MC0_CTL, + MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); + /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore @@ -2580,45 +2632,47 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) static void record_steal_time(struct kvm_vcpu *vcpu) { + struct kvm_host_map map; + struct kvm_steal_time *st; + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; - if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) + /* -EAGAIN is returned in atomic context so we can just return. */ + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, + &map, &vcpu->arch.st.cache, false)) return; + st = map.hva + + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); + /* * Doing a TLB flush here, on the guest's behalf, can avoid * expensive IPIs. */ trace_kvm_pv_tlb_flush(vcpu->vcpu_id, - vcpu->arch.st.steal.preempted & KVM_VCPU_FLUSH_TLB); - if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB) + st->preempted & KVM_VCPU_FLUSH_TLB); + if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) kvm_vcpu_flush_tlb(vcpu, false); - if (vcpu->arch.st.steal.version & 1) - vcpu->arch.st.steal.version += 1; /* first time write, random junk */ + vcpu->arch.st.preempted = 0; - vcpu->arch.st.steal.version += 1; + if (st->version & 1) + st->version += 1; /* first time write, random junk */ - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); + st->version += 1; smp_wmb(); - vcpu->arch.st.steal.steal += current->sched_info.run_delay - + st->steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); - smp_wmb(); - vcpu->arch.st.steal.version += 1; + st->version += 1; - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); + kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -2771,11 +2825,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & KVM_STEAL_RESERVED_MASK) return 1; - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS, - sizeof(struct kvm_steal_time))) - return 1; - vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) @@ -2911,7 +2960,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MCx_CTL(bank_num)) { - u32 offset = msr - MSR_IA32_MC0_CTL; + u32 offset = array_index_nospec( + msr - MSR_IA32_MC0_CTL, + MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); + data = vcpu->arch.mce_banks[offset]; break; } @@ -3437,10 +3489,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_x86_ops->vcpu_load(vcpu, cpu); - fpregs_assert_state_consistent(); - if (test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_return(); - /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); @@ -3480,15 +3528,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { + struct kvm_host_map map; + struct kvm_steal_time *st; + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; - vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED; + if (vcpu->arch.st.preempted) + return; - kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal.preempted, - offsetof(struct kvm_steal_time, preempted), - sizeof(vcpu->arch.st.steal.preempted)); + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, + &vcpu->arch.st.cache, true)) + return; + + st = map.hva + + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); + + st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; + + kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -4421,6 +4479,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_NESTED_STATE: { struct kvm_nested_state __user *user_kvm_nested_state = argp; struct kvm_nested_state kvm_state; + int idx; r = -EINVAL; if (!kvm_x86_ops->set_nested_state) @@ -4444,7 +4503,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) break; + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } case KVM_GET_SUPPORTED_HV_CPUID: { @@ -6356,11 +6417,11 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) return 1; } -static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, +static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, bool write_fault_to_shadow_pgtable, int emulation_type) { - gpa_t gpa = cr2; + gpa_t gpa = cr2_or_gpa; kvm_pfn_t pfn; if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) @@ -6374,7 +6435,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, * Write permission should be allowed since only * write access need to be emulated. */ - gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); + gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); /* * If the mapping is invalid in guest, let cpu retry @@ -6431,10 +6492,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, - unsigned long cr2, int emulation_type) + gpa_t cr2_or_gpa, int emulation_type) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - unsigned long last_retry_eip, last_retry_addr, gpa = cr2; + unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; last_retry_eip = vcpu->arch.last_retry_eip; last_retry_addr = vcpu->arch.last_retry_addr; @@ -6463,14 +6524,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, if (x86_page_table_writing_insn(ctxt)) return false; - if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) + if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) return false; vcpu->arch.last_retry_eip = ctxt->eip; - vcpu->arch.last_retry_addr = cr2; + vcpu->arch.last_retry_addr = cr2_or_gpa; if (!vcpu->arch.mmu->direct_map) - gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); + gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); @@ -6616,11 +6677,8 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) return false; } -int x86_emulate_instruction(struct kvm_vcpu *vcpu, - unsigned long cr2, - int emulation_type, - void *insn, - int insn_len) +int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + int emulation_type, void *insn, int insn_len) { int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; @@ -6666,8 +6724,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, kvm_queue_exception(vcpu, UD_VECTOR); return 1; } - if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, - emulation_type)) + if (reexecute_instruction(vcpu, cr2_or_gpa, + write_fault_to_spt, + emulation_type)) return 1; if (ctxt->have_exception) { /* @@ -6701,7 +6760,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, return 1; } - if (retry_instruction(ctxt, cr2, emulation_type)) + if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) return 1; /* this is needed for vmware backdoor interface to work since it @@ -6713,7 +6772,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, restart: /* Save the faulting GPA (cr2) in the address field */ - ctxt->exception.address = cr2; + ctxt->exception.address = cr2_or_gpa; r = x86_emulate_insn(ctxt); @@ -6721,7 +6780,7 @@ restart: return 1; if (r == EMULATION_FAILED) { - if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, + if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, emulation_type)) return 1; @@ -8163,8 +8222,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu->vcpu_id); guest_enter_irqoff(); - /* The preempt notifier should have taken care of the FPU already. */ - WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD)); + fpregs_assert_state_consistent(); + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + switch_fpu_return(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); @@ -8436,12 +8496,26 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) return 0; } +static void kvm_save_current_fpu(struct fpu *fpu) +{ + /* + * If the target FPU state is not resident in the CPU registers, just + * memcpy() from current, else save CPU state directly to the target. + */ + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + memcpy(&fpu->state, ¤t->thread.fpu.state, + fpu_kernel_xstate_size); + else + copy_fpregs_to_fpstate(fpu); +} + /* Swap (qemu) user FPU context for the guest FPU context. */ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { fpregs_lock(); - copy_fpregs_to_fpstate(vcpu->arch.user_fpu); + kvm_save_current_fpu(vcpu->arch.user_fpu); + /* PKRU is separately restored in kvm_x86_ops->run. */ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, ~XFEATURE_MASK_PKRU); @@ -8457,7 +8531,8 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { fpregs_lock(); - copy_fpregs_to_fpstate(vcpu->arch.guest_fpu); + kvm_save_current_fpu(vcpu->arch.guest_fpu); + copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); fpregs_mark_activate(); @@ -8679,6 +8754,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { vcpu_load(vcpu); + if (kvm_mpx_supported()) + kvm_load_guest_fpu(vcpu); kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && @@ -8687,6 +8764,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, else mp_state->mp_state = vcpu->arch.mp_state; + if (kvm_mpx_supported()) + kvm_put_guest_fpu(vcpu); vcpu_put(vcpu); return 0; } @@ -9046,6 +9125,9 @@ static void fx_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; + struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; + + kvm_release_pfn(cache->pfn, cache->dirty, cache); kvmclock_reset(vcpu); @@ -9110,13 +9192,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - vcpu->arch.apf.msr_val = 0; - - vcpu_load(vcpu); - kvm_mmu_unload(vcpu); - vcpu_put(vcpu); - - kvm_x86_ops->vcpu_free(vcpu); + kvm_arch_vcpu_free(vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) @@ -9308,6 +9384,8 @@ int kvm_arch_hardware_setup(void) if (r != 0) return r; + cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data); + if (kvm_has_tsc_control) { /* * Make sure the user can only configure tsc_khz values that @@ -9710,11 +9788,18 @@ out_free: void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) { + struct kvm_vcpu *vcpu; + int i; + /* * memslots->generation has been incremented. * mmio generation may have reached its maximum value. */ kvm_mmu_invalidate_mmio_sptes(kvm, gen); + + /* Force re-initialization of steal_time cache */ + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vcpu_kick(vcpu); } int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -9966,7 +10051,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu)) return; - vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true); + vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) @@ -10079,7 +10164,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, { struct x86_exception fault; - trace_kvm_async_pf_not_present(work->arch.token, work->gva); + trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (kvm_can_deliver_async_pf(vcpu) && @@ -10114,7 +10199,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); - trace_kvm_async_pf_ready(work->arch.token, work->gva); + trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED && !apf_get_user(vcpu, &val)) { diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index dbf7442a822b..de6b55484876 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -286,7 +286,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); bool kvm_vector_hashing_enabled(void); -int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, +int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len); #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index e0b85930dd77..5cb9f009f2be 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -333,7 +333,7 @@ AVXcode: 1 06: CLTS 07: SYSRET (o64) 08: INVD -09: WBINVD +09: WBINVD | WBNOINVD (F3) 0a: 0b: UD2 (1B) 0c: @@ -364,7 +364,7 @@ AVXcode: 1 # a ModR/M byte. 1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev 1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv -1c: +1c: Grp20 (1A),(1C) 1d: 1e: 1f: NOP Ev @@ -792,6 +792,8 @@ f3: Grp17 (1A) f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v) f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) +f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3) +f9: MOVDIRI My,Gy EndTable Table: 3-byte opcode 2 (0x0f 0x3a) @@ -907,7 +909,7 @@ EndTable GrpTable: Grp3_2 0: TEST Ev,Iz -1: +1: TEST Ev,Iz 2: NOT Ev 3: NEG Ev 4: MUL rAX,Ev @@ -943,9 +945,9 @@ GrpTable: Grp6 EndTable GrpTable: Grp7 -0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) -1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) -2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) +0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) +1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B) 3: LIDT Ms 4: SMSW Mw/Rv 5: rdpkru (110),(11B) | wrpkru (111),(11B) @@ -1020,7 +1022,7 @@ GrpTable: Grp15 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) 4: XSAVE | ptwrite Ey (F3),(11B) 5: XRSTOR | lfence (11B) -6: XSAVEOPT | clwb (66) | mfence (11B) +6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B) 7: clflush | clflushopt (66) | sfence (11B) EndTable @@ -1051,6 +1053,10 @@ GrpTable: Grp19 6: vscatterpf1qps/d Wx (66),(ev) EndTable +GrpTable: Grp20 +0: cldemote Mb +EndTable + # AMD's Prefetch Group GrpTable: GrpP 0: PREFETCH diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index f98a0c956764..9b41391867dc 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h @@ -107,6 +107,8 @@ static inline bool seg_writable(struct desc_struct *d) #define FPU_access_ok(y,z) if ( !access_ok(y,z) ) \ math_abort(FPU_info,SIGSEGV) #define FPU_abort math_abort(FPU_info, SIGSEGV) +#define FPU_copy_from_user(to, from, n) \ + do { if (copy_from_user(to, from, n)) FPU_abort; } while (0) #undef FPU_IGNORE_CODE_SEGV #ifdef FPU_IGNORE_CODE_SEGV @@ -122,7 +124,7 @@ static inline bool seg_writable(struct desc_struct *d) #define FPU_code_access_ok(z) FPU_access_ok((void __user *)FPU_EIP,z) #endif -#define FPU_get_user(x,y) get_user((x),(y)) -#define FPU_put_user(x,y) put_user((x),(y)) +#define FPU_get_user(x,y) do { if (get_user((x),(y))) FPU_abort; } while (0) +#define FPU_put_user(x,y) do { if (put_user((x),(y))) FPU_abort; } while (0) #endif diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c index f3779743d15e..fe6246ff9887 100644 --- a/arch/x86/math-emu/reg_ld_str.c +++ b/arch/x86/math-emu/reg_ld_str.c @@ -85,7 +85,7 @@ int FPU_load_extended(long double __user *s, int stnr) RE_ENTRANT_CHECK_OFF; FPU_access_ok(s, 10); - __copy_from_user(sti_ptr, s, 10); + FPU_copy_from_user(sti_ptr, s, 10); RE_ENTRANT_CHECK_ON; return FPU_tagof(sti_ptr); @@ -1126,9 +1126,9 @@ void frstor(fpu_addr_modes addr_modes, u_char __user *data_address) /* Copy all registers in stack order. */ RE_ENTRANT_CHECK_OFF; FPU_access_ok(s, 80); - __copy_from_user(register_base + offset, s, other); + FPU_copy_from_user(register_base + offset, s, other); if (offset) - __copy_from_user(register_base, s + other, offset); + FPU_copy_from_user(register_base, s + other, offset); RE_ENTRANT_CHECK_ON; for (i = 0; i < 8; i++) { diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 752ad11d6868..d9643647a9ce 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -178,7 +178,9 @@ static __init void setup_cpu_entry_area_ptes(void) #ifdef CONFIG_X86_32 unsigned long start, end; - BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); + /* The +1 is for the readonly IDT: */ + BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); + BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); start = CPU_ENTRY_AREA_BASE; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ceacd1156db..c494c8c05824 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -189,7 +189,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) return pmd_k; } -void vmalloc_sync_all(void) +static void vmalloc_sync(void) { unsigned long address; @@ -197,7 +197,7 @@ void vmalloc_sync_all(void) return; for (address = VMALLOC_START & PMD_MASK; - address >= TASK_SIZE_MAX && address < FIXADDR_TOP; + address >= TASK_SIZE_MAX && address < VMALLOC_END; address += PMD_SIZE) { struct page *page; @@ -216,6 +216,16 @@ void vmalloc_sync_all(void) } } +void vmalloc_sync_mappings(void) +{ + vmalloc_sync(); +} + +void vmalloc_sync_unmappings(void) +{ + vmalloc_sync(); +} + /* * 32-bit: * @@ -318,11 +328,23 @@ out: #else /* CONFIG_X86_64: */ -void vmalloc_sync_all(void) +void vmalloc_sync_mappings(void) { + /* + * 64-bit mappings might allocate new p4d/pud pages + * that need to be propagated to all tasks' PGDs. + */ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); } +void vmalloc_sync_unmappings(void) +{ + /* + * Unmappings never allocate or free p4d/pud pages. + * No work is required here. + */ +} + /* * 64-bit: * diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 930edeb41ec3..0a74407ef92e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a6b5c653727b..b8541d77452c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); - struct zone *zone = page_zone(page); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); kernel_physical_mapping_remove(start, start + size); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index a39dcdb5ae34..a353f88d299d 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -106,6 +106,22 @@ static unsigned int __ioremap_check_encrypted(struct resource *res) return 0; } +/* + * The EFI runtime services data area is not covered by walk_mem_res(), but must + * be mapped encrypted when SEV is active. + */ +static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) +{ + if (!sev_active()) + return; + + if (!IS_ENABLED(CONFIG_EFI)) + return; + + if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA) + desc->flags |= IORES_MAP_ENCRYPTED; +} + static int __ioremap_collect_map_flags(struct resource *res, void *arg) { struct ioremap_desc *desc = arg; @@ -124,6 +140,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg) * To avoid multiple resource walks, this function walks resources marked as * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). + * + * After that, deal with misc other ranges in __ioremap_check_other() which do + * not fall into the above category. */ static void __ioremap_check_mem(resource_size_t addr, unsigned long size, struct ioremap_desc *desc) @@ -135,6 +154,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size, memset(desc, 0, sizeof(struct ioremap_desc)); walk_mem_res(start, end, desc, __ioremap_collect_map_flags); + + __ioremap_check_other(addr, desc); } /* diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 0d09cc5aad61..a19a71b4d185 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -2215,7 +2215,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, .pgd = pgd, .numpages = numpages, .mask_set = __pgprot(0), - .mask_clr = __pgprot(0), + .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)), .flags = 0, }; @@ -2224,12 +2224,6 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, if (!(__supported_pte_mask & _PAGE_NX)) goto out; - if (!(page_flags & _PAGE_NX)) - cpa.mask_clr = __pgprot(_PAGE_NX); - - if (!(page_flags & _PAGE_RW)) - cpa.mask_clr = __pgprot(_PAGE_RW); - if (!(page_flags & _PAGE_ENC)) cpa.mask_clr = pgprot_encrypted(cpa.mask_clr); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 3e4b9035bb9a..7bd2c3a52297 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -643,8 +643,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) fixmaps_set++; } -void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, - pgprot_t flags) +void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, + phys_addr_t phys, pgprot_t flags) { /* Sanitize 'prot' against any unsupported bits: */ pgprot_val(flags) &= __default_kernel_pte_mask; diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 393d251798c0..4d2a7a764602 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); - /* and dreg_hi,sreg_hi */ - EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); - /* or dreg_lo,dreg_hi */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + if (is_jmp64) { + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + } goto emit_cond_jmp; } case BPF_JMP | BPF_JSET | BPF_K: diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 9acab6ac28f5..b339ce5e7f59 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -285,6 +285,7 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"), }, }, +#if 0 { .callback = find_sort_method, .ident = "Dell System", @@ -292,6 +293,7 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), }, }, +#endif { .callback = set_bf_sort, .ident = "HP ProLiant BL20p G3", diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 527e69b12002..e723559c386a 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -588,6 +588,17 @@ static void pci_fixup_amd_ehci_pme(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme); +/* + * Device [1022:7914] + * When in D0, PME# doesn't get asserted when plugging USB 2.0 device. + */ +static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev) +{ + dev_info(&dev->dev, "PME# does not work under D0, disabling it\n"); + dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme); + /* * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] * diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 425e025341db..01d7ca492741 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -504,7 +504,6 @@ void __init efi_init(void) efi_char16_t *c16; char vendor[100] = "unknown"; int i = 0; - void *tmp; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -529,14 +528,16 @@ void __init efi_init(void) /* * Show what we know for posterity */ - c16 = tmp = early_memremap(efi.systab->fw_vendor, 2); + c16 = early_memremap_ro(efi.systab->fw_vendor, + sizeof(vendor) * sizeof(efi_char16_t)); if (c16) { - for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) - vendor[i] = *c16++; + for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) + vendor[i] = c16[i]; vendor[i] = '\0'; - } else + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } else { pr_err("Could not map the firmware vendor!\n"); - early_memunmap(tmp, 2); + } pr_info("EFI v%u.%.02u by %s\n", efi.systab->hdr.revision >> 16, @@ -953,16 +954,14 @@ static void __init __efi_enter_virtual_mode(void) if (efi_alloc_page_tables()) { pr_err("Failed to allocate EFI page tables\n"); - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; + goto err; } efi_merge_regions(); new_memmap = efi_map_regions(&count, &pg_shift); if (!new_memmap) { pr_err("Error reallocating memory, EFI runtime non-functional!\n"); - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; + goto err; } pa = __pa(new_memmap); @@ -976,8 +975,7 @@ static void __init __efi_enter_virtual_mode(void) if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) { pr_err("Failed to remap late EFI memory map\n"); - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; + goto err; } if (efi_enabled(EFI_DBG)) { @@ -985,12 +983,11 @@ static void __init __efi_enter_virtual_mode(void) efi_print_memmap(); } - BUG_ON(!efi.systab); + if (WARN_ON(!efi.systab)) + goto err; - if (efi_setup_page_tables(pa, 1 << pg_shift)) { - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; - } + if (efi_setup_page_tables(pa, 1 << pg_shift)) + goto err; efi_sync_low_kernel_mappings(); @@ -1010,9 +1007,9 @@ static void __init __efi_enter_virtual_mode(void) } if (status != EFI_SUCCESS) { - pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n", - status); - panic("EFI call to SetVirtualAddressMap() failed!"); + pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n", + status); + goto err; } efi_free_boot_services(); @@ -1041,6 +1038,10 @@ static void __init __efi_enter_virtual_mode(void) /* clean DUMMY object */ efi_delete_dummy_variable(); + return; + +err: + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); } void __init efi_enter_virtual_mode(void) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 08ce8177c3af..fe0e647411da 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -316,7 +316,7 @@ void efi_sync_low_kernel_mappings(void) static inline phys_addr_t virt_to_phys_or_null_size(void *va, unsigned long size) { - bool bad_size; + phys_addr_t pa; if (!va) return 0; @@ -324,16 +324,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size) if (virt_addr_valid(va)) return virt_to_phys(va); - /* - * A fully aligned variable on the stack is guaranteed not to - * cross a page bounary. Try to catch strings on the stack by - * checking that 'size' is a power of two. - */ - bad_size = size > PAGE_SIZE || !is_power_of_2(size); + pa = slow_virt_to_phys(va); - WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); + /* check if the object crosses a page boundary */ + if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK)) + return 0; - return slow_virt_to_phys(va); + return pa; } #define virt_to_phys_or_null(addr) \ @@ -392,11 +389,12 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) return 0; page = alloc_page(GFP_KERNEL|__GFP_DMA32); - if (!page) - panic("Unable to allocate EFI runtime stack < 4GB\n"); + if (!page) { + pr_err("Unable to allocate EFI runtime stack < 4GB\n"); + return 1; + } - efi_scratch.phys_stack = virt_to_phys(page_address(page)); - efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */ + efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */ npages = (_etext - _text) >> PAGE_SHIFT; text = __pa(_text); @@ -790,6 +788,8 @@ static efi_status_t efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, unsigned long *data_size, void *data) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); efi_status_t status; u32 phys_name, phys_vendor, phys_attr; u32 phys_data_size, phys_data; @@ -797,14 +797,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, spin_lock_irqsave(&efi_runtime_lock, flags); + *vnd = *vendor; + phys_data_size = virt_to_phys_or_null(data_size); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); phys_attr = virt_to_phys_or_null(attr); phys_data = virt_to_phys_or_null_size(data, *data_size); - status = efi_thunk(get_variable, phys_name, phys_vendor, - phys_attr, phys_data_size, phys_data); + if (!phys_name || (data && !phys_data)) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(get_variable, phys_name, phys_vendor, + phys_attr, phys_data_size, phys_data); spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -815,19 +820,25 @@ static efi_status_t efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); u32 phys_name, phys_vendor, phys_data; efi_status_t status; unsigned long flags; spin_lock_irqsave(&efi_runtime_lock, flags); + *vnd = *vendor; + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - /* If data_size is > sizeof(u32) we've got problems */ - status = efi_thunk(set_variable, phys_name, phys_vendor, - attr, data_size, phys_data); + if (!phys_name || !phys_data) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(set_variable, phys_name, phys_vendor, + attr, data_size, phys_data); spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -839,6 +850,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); u32 phys_name, phys_vendor, phys_data; efi_status_t status; unsigned long flags; @@ -846,13 +859,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, if (!spin_trylock_irqsave(&efi_runtime_lock, flags)) return EFI_NOT_READY; + *vnd = *vendor; + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - /* If data_size is > sizeof(u32) we've got problems */ - status = efi_thunk(set_variable, phys_name, phys_vendor, - attr, data_size, phys_data); + if (!phys_name || !phys_data) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(set_variable, phys_name, phys_vendor, + attr, data_size, phys_data); spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -864,21 +881,29 @@ efi_thunk_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); efi_status_t status; u32 phys_name_size, phys_name, phys_vendor; unsigned long flags; spin_lock_irqsave(&efi_runtime_lock, flags); + *vnd = *vendor; + phys_name_size = virt_to_phys_or_null(name_size); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_name = virt_to_phys_or_null_size(name, *name_size); - status = efi_thunk(get_next_variable, phys_name_size, - phys_name, phys_vendor); + if (!phys_name) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(get_next_variable, phys_name_size, + phys_name, phys_vendor); spin_unlock_irqrestore(&efi_runtime_lock, flags); + *vendor = *vnd; return status; } diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 3b9fd679cea9..aefe845dff59 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -260,10 +260,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } - /* No need to reserve regions that will never be freed. */ - if (md.attribute & EFI_MEMORY_RUNTIME) - return; - size += addr % EFI_PAGE_SIZE; size = round_up(size, EFI_PAGE_SIZE); addr = round_down(addr, EFI_PAGE_SIZE); @@ -293,6 +289,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) early_memunmap(new, new_size); efi_memmap_install(new_phys, num_entries); + e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); + e820__update_table(e820_table); } /* diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index b02a36b2c14f..a42015b305f4 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -69,7 +69,7 @@ BEGIN { lprefix1_expr = "\\((66|!F3)\\)" lprefix2_expr = "\\(F3\\)" - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" + lprefix3_expr = "\\((F2|!F3|66&F2)\\)" lprefix_expr = "\\((66|F2|F3)\\)" max_lprefix = 4 @@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod) return add_flags(imm, mod) } -/^[0-9a-f]+\:/ { +/^[0-9a-f]+:/ { if (NR == 1) next # get index diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index 5bd949da7a4a..ac8eee093f9c 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info, return 0; } -int arch_copy_tls(struct task_struct *new) +int arch_set_tls(struct task_struct *new, unsigned long tls) { struct user_desc info; int idx, ret = -EFAULT; - if (copy_from_user(&info, - (void __user *) UPT_SI(&new->thread.regs.regs), - sizeof(info))) + if (copy_from_user(&info, (void __user *) tls, sizeof(info))) goto out; ret = -EINVAL; diff --git a/arch/x86/um/tls_64.c b/arch/x86/um/tls_64.c index 3a621e0d3925..ebd3855d9b13 100644 --- a/arch/x86/um/tls_64.c +++ b/arch/x86/um/tls_64.c @@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task) { } -int arch_copy_tls(struct task_struct *t) +int arch_set_tls(struct task_struct *t, unsigned long tls) { /* * If CLONE_SETTLS is set, we need to save the thread id - * (which is argument 5, child_tid, of clone) so it can be set - * during context switches. + * so it can be set during context switches. */ - t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; + t->thread.arch.fs = tls; return 0; } diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 5bfea374a160..6d4d8a5700b7 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -905,14 +905,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; +#ifdef CONFIG_X86_64 + unsigned int which; + u64 base; +#endif ret = 0; switch (msr) { #ifdef CONFIG_X86_64 - unsigned which; - u64 base; - case MSR_FS_BASE: which = SEGBASE_FS; goto set; case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; @@ -1215,6 +1216,7 @@ asmlinkage __visible void __init xen_start_kernel(void) x86_platform.get_nmi_reason = xen_get_nmi_reason; x86_init.resources.memory_setup = xen_memory_setup; + x86_init.irqs.intr_mode_select = x86_init_noop; x86_init.irqs.intr_mode_init = x86_init_noop; x86_init.oem.arch_setup = xen_arch_setup; x86_init.oem.banner = xen_banner; diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index c15db060a242..cd177772fe4d 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -126,10 +126,9 @@ hyper_iret: .globl xen_iret_start_crit, xen_iret_end_crit /* - * This is called by xen_hypervisor_callback in entry.S when it sees + * This is called by xen_hypervisor_callback in entry_32.S when it sees * that the EIP at the time of interrupt was between - * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in - * %eax so we can do a more refined determination of what to do. + * xen_iret_start_crit and xen_iret_end_crit. * * The stack format at this point is: * ---------------- @@ -138,70 +137,46 @@ hyper_iret: * eflags } outer exception info * cs } * eip } - * ---------------- <- edi (copy dest) + * ---------------- * eax : outer eax if it hasn't been restored * ---------------- - * eflags } nested exception info - * cs } (no ss/esp because we're nested - * eip } from the same ring) - * orig_eax }<- esi (copy src) - * - - - - - - - - - * fs } - * es } - * ds } SAVE_ALL state - * eax } - * : : - * ebx }<- esp - * ---------------- + * eflags } + * cs } nested exception info + * eip } + * return address : (into xen_hypervisor_callback) * - * In order to deliver the nested exception properly, we need to shift - * everything from the return addr up to the error code so it sits - * just under the outer exception info. This means that when we - * handle the exception, we do it in the context of the outer - * exception rather than starting a new one. + * In order to deliver the nested exception properly, we need to discard the + * nested exception frame such that when we handle the exception, we do it + * in the context of the outer exception rather than starting a new one. * - * The only caveat is that if the outer eax hasn't been restored yet - * (ie, it's still on stack), we need to insert its value into the - * SAVE_ALL state before going on, since it's usermode state which we - * eventually need to restore. + * The only caveat is that if the outer eax hasn't been restored yet (i.e. + * it's still on stack), we need to restore its value here. */ ENTRY(xen_iret_crit_fixup) /* * Paranoia: Make sure we're really coming from kernel space. * One could imagine a case where userspace jumps into the * critical range address, but just before the CPU delivers a - * GP, it decides to deliver an interrupt instead. Unlikely? - * Definitely. Easy to avoid? Yes. The Intel documents - * explicitly say that the reported EIP for a bad jump is the - * jump instruction itself, not the destination, but some - * virtual environments get this wrong. + * PF, it decides to deliver an interrupt instead. Unlikely? + * Definitely. Easy to avoid? Yes. */ - movl PT_CS(%esp), %ecx - andl $SEGMENT_RPL_MASK, %ecx - cmpl $USER_RPL, %ecx - je 2f - - lea PT_ORIG_EAX(%esp), %esi - lea PT_EFLAGS(%esp), %edi + testb $2, 2*4(%esp) /* nested CS */ + jnz 2f /* * If eip is before iret_restore_end then stack * hasn't been restored yet. */ - cmp $iret_restore_end, %eax + cmpl $iret_restore_end, 1*4(%esp) jae 1f - movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */ - movl %eax, PT_EAX(%esp) + movl 4*4(%esp), %eax /* load outer EAX */ + ret $4*4 /* discard nested EIP, CS, and EFLAGS as + * well as the just restored EAX */ - lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */ - - /* set up the copy */ -1: std - mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */ - rep movsl - cld - - lea 4(%edi), %esp /* point esp to new frame */ -2: jmp xen_do_upcall +1: + ret $3*4 /* discard nested EIP, CS, and EFLAGS */ +2: + ret +END(xen_iret_crit_fixup) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index a8e7beb6b7b5..8352037322df 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -22,6 +22,7 @@ config XTENSA select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KASAN if MMU select HAVE_ARCH_TRACEHOOK + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 359ab40e935a..c90fb944f9d8 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - regs->areg[0] = (long) error ? error : val; + regs->areg[2] = (long) error ? error : val; } #define SYSCALL_MAX_ARGS 6 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index db278a9e80c7..7cbf8bd6d922 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, - unsigned long thread_fn_arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn, + unsigned long thread_fn_arg, struct task_struct *p, + unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -264,9 +265,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, ®s->areg[XCHAL_NUM_AREGS - len/4], len); } - /* The thread pointer is passed in the '4th argument' (= a5) */ if (clone_flags & CLONE_SETTLS) - childregs->threadptr = childregs->areg[5]; + childregs->threadptr = tls; } else { p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index af7152560bc3..b771459778fe 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -56,7 +56,9 @@ static void __init populate(void *start, void *end) for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { phys_addr_t phys = - memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, + 0, + MEMBLOCK_ALLOC_ANYWHERE); if (!phys) panic("Failed to allocate page table page\n"); diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 59153d0aa890..b43f03620843 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -216,6 +216,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) unsigned tlbidx = w | (e << PAGE_SHIFT); unsigned r0 = dtlb ? read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); + unsigned r1 = dtlb ? + read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx); unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); unsigned pte = get_pte_for_vaddr(vpn); unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; @@ -231,8 +233,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) } if (tlb_asid == mm_asid) { - unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : - read_itlb_translation(tlbidx); if ((pte ^ r1) & PAGE_MASK) { pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", dtlb ? 'D' : 'I', w, e, r0, r1, pte); diff --git a/backport_remove_lists.txt b/backport_remove_lists.txt new file mode 100644 index 000000000000..3d7c5323bbc3 --- /dev/null +++ b/backport_remove_lists.txt @@ -0,0 +1,11 @@ +remove lists: +1. Architectures except x86,ARM(ARM´Ó4.14¿ªÊ¼Ö§³Ö),powerpc£¨ÊýƽÔÚÓÃpowerpc, û׼ÄÄÌìÐèÒªÖ§³Ö£©. +2. CAN bus. +3. ALSA drivers, ASoC. +4. all Android files. +5. Bluetooh. +6. i40e & i40evf (Ìæ»»µ½sourceforge°æ±¾£¬ÎÞÐè´ÓÉÏÓÎbackport±ä¸ü). + + +replace .gitignore directly + diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 86a607cf19a1..86cd718e0380 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg) kfree(bfqg); } -static void bfqg_and_blkg_get(struct bfq_group *bfqg) +void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); @@ -593,12 +593,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, */ entity = &bfqg->entity; for_each_entity(entity) { - bfqg = container_of(entity, struct bfq_group, entity); - if (bfqg != bfqd->root_group) { - parent = bfqg_parent(bfqg); + struct bfq_group *curr_bfqg = container_of(entity, + struct bfq_group, entity); + if (curr_bfqg != bfqd->root_group) { + parent = bfqg_parent(curr_bfqg); if (!parent) parent = bfqd->root_group; - bfq_group_set_parent(bfqg, parent); + bfq_group_set_parent(curr_bfqg, parent); } } @@ -634,6 +635,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_bfqq_expire(bfqd, bfqd->in_service_queue, false, BFQQE_PREEMPTED); + /* + * get extra reference to prevent bfqq from being freed in + * next possible deactivate + */ + bfqq->ref++; + if (bfq_bfqq_busy(bfqq)) bfq_deactivate_bfqq(bfqd, bfqq, false, false); else if (entity->on_st) @@ -653,6 +660,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (!bfqd->in_service_queue && !bfqd->rq_in_driver) bfq_schedule_dispatch(bfqd); + /* release extra ref taken above */ + bfq_put_queue(bfqq); } /** @@ -1380,6 +1389,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq) return bfqq->bfqd->root_group; } +void bfqg_and_blkg_get(struct bfq_group *bfqg) {} + +void bfqg_and_blkg_put(struct bfq_group *bfqg) {} + struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) { struct bfq_group *bfqg; diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 0c6214497fcc..48189ff88916 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -614,6 +614,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqq->pos_root = NULL; } + /* oom_bfqq does not participate in queue merging */ + if (bfqq == &bfqd->oom_bfqq) + return; + /* * bfqq cannot be merged any longer (see comments in * bfq_setup_cooperator): no point in adding bfqq into the @@ -3444,6 +3448,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, struct bfq_queue *bfqq) { + /* No point in idling for bfqq if it won't get requests any longer */ + if (unlikely(!bfqq_process_refs(bfqq))) + return false; + return (bfqq->wr_coeff > 1 && (bfqd->wr_busy_queues < bfq_tot_busy_queues(bfqd) || @@ -4077,6 +4085,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, bfqq_sequential_and_IO_bound, idling_boosts_thr; + /* No point in idling for bfqq if it won't get requests any longer */ + if (unlikely(!bfqq_process_refs(bfqq))) + return false; + bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); @@ -4170,6 +4182,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) struct bfq_data *bfqd = bfqq->bfqd; bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar; + /* No point in idling for bfqq if it won't get requests any longer */ + if (unlikely(!bfqq_process_refs(bfqq))) + return false; + if (unlikely(bfqd->strict_guarantees)) return true; @@ -4810,9 +4826,7 @@ void bfq_put_queue(struct bfq_queue *bfqq) { struct bfq_queue *item; struct hlist_node *n; -#ifdef CONFIG_BFQ_GROUP_IOSCHED struct bfq_group *bfqg = bfqq_group(bfqq); -#endif if (bfqq->bfqd) bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", @@ -4885,9 +4899,7 @@ void bfq_put_queue(struct bfq_queue *bfqq) bfqq->bfqd->last_completed_rq_bfqq = NULL; kmem_cache_free(bfq_pool, bfqq); -#ifdef CONFIG_BFQ_GROUP_IOSCHED bfqg_and_blkg_put(bfqg); -#endif } static void bfq_put_cooperator(struct bfq_queue *bfqq) @@ -6371,10 +6383,10 @@ static void bfq_exit_queue(struct elevator_queue *e) hrtimer_cancel(&bfqd->idle_slice_timer); -#ifdef CONFIG_BFQ_GROUP_IOSCHED /* release oom-queue reference to root group */ bfqg_and_blkg_put(bfqd->root_group); +#ifdef CONFIG_BFQ_GROUP_IOSCHED blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); #else spin_lock_irq(&bfqd->lock); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 5d1a519640f6..1553a4e8f7ad 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -916,6 +916,7 @@ struct bfq_group { #else struct bfq_group { + struct bfq_entity entity; struct bfq_sched_data sched_data; struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; @@ -978,6 +979,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); +void bfqg_and_blkg_get(struct bfq_group *bfqg); void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 05f0bf4a1144..44079147e396 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -536,7 +536,9 @@ static void bfq_get_entity(struct bfq_entity *entity) bfqq->ref++; bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, bfqq->ref); - } + } else + bfqg_and_blkg_get(container_of(entity, struct bfq_group, + entity)); } /** @@ -650,8 +652,14 @@ static void bfq_forget_entity(struct bfq_service_tree *st, entity->on_st = false; st->wsum -= entity->weight; - if (bfqq && !is_in_service) + if (is_in_service) + return; + + if (bfqq) bfq_put_queue(bfqq); + else + bfqg_and_blkg_put(container_of(entity, struct bfq_group, + entity)); } /** diff --git a/block/bio-integrity.c b/block/bio-integrity.c index fb95dbb21dd8..bf62c25cde8f 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -87,7 +87,7 @@ EXPORT_SYMBOL(bio_integrity_alloc); * Description: Used to free the integrity portion of a bio. Usually * called from bio_free(). */ -static void bio_integrity_free(struct bio *bio) +void bio_integrity_free(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_set *bs = bio->bi_pool; diff --git a/block/bio.c b/block/bio.c index b1170ec18464..94d697217887 100644 --- a/block/bio.c +++ b/block/bio.c @@ -233,6 +233,9 @@ fallback: void bio_uninit(struct bio *bio) { bio_disassociate_blkg(bio); + + if (bio_integrity(bio)) + bio_integrity_free(bio); } EXPORT_SYMBOL(bio_uninit); @@ -535,6 +538,55 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) } EXPORT_SYMBOL(zero_fill_bio_iter); +/** + * bio_truncate - truncate the bio to small size of @new_size + * @bio: the bio to be truncated + * @new_size: new size for truncating the bio + * + * Description: + * Truncate the bio to new size of @new_size. If bio_op(bio) is + * REQ_OP_READ, zero the truncated part. This function should only + * be used for handling corner cases, such as bio eod. + */ +void bio_truncate(struct bio *bio, unsigned new_size) +{ + struct bio_vec bv; + struct bvec_iter iter; + unsigned int done = 0; + bool truncated = false; + + if (new_size >= bio->bi_iter.bi_size) + return; + + if (bio_op(bio) != REQ_OP_READ) + goto exit; + + bio_for_each_segment(bv, bio, iter) { + if (done + bv.bv_len > new_size) { + unsigned offset; + + if (!truncated) + offset = new_size - done; + else + offset = 0; + zero_user(bv.bv_page, offset, bv.bv_len - offset); + truncated = true; + } + done += bv.bv_len; + } + + exit: + /* + * Don't touch bvec table here and make it really immutable, since + * fs bio user has to retrieve all pages via bio_for_each_segment_all + * in its .end_bio() callback. + * + * It is enough to truncate bio by updating .bi_size since we can make + * correct bvec with the updated .bi_size for drivers. + */ + bio->bi_iter.bi_size = new_size; +} + /** * bio_put - release a reference to a bio * @bio: bio to release reference to @@ -751,10 +803,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return false; - if (bio->bi_vcnt > 0 && !bio_full(bio, len)) { + if (bio->bi_vcnt > 0) { struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { + if (bio->bi_iter.bi_size > UINT_MAX - len) + return false; bv->bv_len += len; bio->bi_iter.bi_size += len; return true; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1eb8895be4c6..ba25f771e751 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "blk.h" #define MAX_KEY_LEN 100 @@ -215,6 +216,216 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, } EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); +#ifdef CONFIG_SMP +static LIST_HEAD(alloc_list); +static DEFINE_SPINLOCK(alloc_lock); +static void dkstats_alloc_fn(struct work_struct *); +static DECLARE_DELAYED_WORK(dkstats_alloc_work, dkstats_alloc_fn); + +static void dkstats_alloc_fn(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + static struct disk_stats *dkstats; + struct blkcg_dkstats *ds; + bool empty = false; + + dkstats_alloc: + if (!dkstats) { + dkstats = alloc_percpu(struct disk_stats); + if (!dkstats) { + /* allocation failed, try again after some time */ + schedule_delayed_work(dwork, msecs_to_jiffies(10)); + return; + } + } + + /* now we have alloc success and can safely hold lock */ + spin_lock_irq(&alloc_lock); + if (list_empty(&alloc_list)) { + empty = true; + goto out; + } + + ds = list_first_entry(&alloc_list, struct blkcg_dkstats, alloc_node); + list_del_init(&ds->alloc_node); + swap(ds->dkstats, dkstats); + + empty = list_empty(&alloc_list); + out: + spin_unlock_irq(&alloc_lock); + if (!empty) { + cond_resched(); + goto dkstats_alloc; + } +} +#endif + +static struct blkcg_dkstats *blkcg_dkstats_alloc(struct hd_struct *hd) +{ + struct blkcg_dkstats *ds; + unsigned long flags; + + /* inside io path, donot consider GFP_KERNEL */ + ds = kzalloc(sizeof(struct blkcg_dkstats), GFP_ATOMIC); + if (!ds) + return NULL; + + ds->part = hd; + INIT_LIST_HEAD(&ds->list_node); + INIT_LIST_HEAD(&ds->alloc_node); + +#ifdef CONFIG_SMP + /* percpu can't alloc inside IO path */ + spin_lock_irqsave(&alloc_lock, flags); + list_add(&ds->alloc_node, &alloc_list); + schedule_delayed_work(&dkstats_alloc_work, 0); + spin_unlock_irqrestore(&alloc_lock, flags); +#else + memset(&ds->dkstats, 0, sizeof(ds->dkstats)); +#endif + return ds; +} + +static void blkcg_dkstats_rcu_free(struct rcu_head *rcu_head) +{ + struct blkcg_dkstats *ds; + ds = container_of(rcu_head, struct blkcg_dkstats, rcu_head); + +#ifdef CONFIG_SMP + if (ds->dkstats) + free_percpu(ds->dkstats); +#endif + kfree(ds); +} + +static void blkcg_dkstats_free(struct blkcg_dkstats *ds) +{ +#ifdef CONFIG_SMP + if (!list_empty(&ds->alloc_node)) { + spin_lock_irq(&alloc_lock); + list_del_init(&ds->alloc_node); + spin_unlock_irq(&alloc_lock); + } +#endif + list_del_init(&ds->list_node); + call_rcu(&ds->rcu_head, blkcg_dkstats_rcu_free); +} + +static void blkcg_dkstats_free_all(struct request_queue *q) +{ + struct blkcg_gq *blkg; + + spin_lock_irq(&q->queue_lock); + list_for_each_entry(blkg, &q->blkg_list, q_node) { + struct blkcg *blkcg = blkg->blkcg; + struct blkcg_dkstats *ds, *ns; + + spin_lock_irq(&blkcg->lock); + list_for_each_entry_safe(ds, ns, &blkcg->dkstats_list, list_node) { + if (part_to_disk(ds->part)->queue != q) + continue; + + if (blkcg->dkstats_hint == ds) + blkcg->dkstats_hint = NULL; + + blkcg_dkstats_free(ds); + } + spin_unlock_irq(&blkcg->lock); + } + spin_unlock_irq(&q->queue_lock); +} + +static inline int blkcg_do_io_stat(struct blkcg *blkcg) +{ + return blkcg->dkstats_on; +} + +struct disk_stats *blkcg_dkstats_find(struct blkcg *blkcg, int cpu, struct hd_struct *hd, int *alloc) +{ + struct blkcg_dkstats *ds; + + /* blkcg_dkstats list protected by rcu */ + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (!blkcg_do_io_stat(blkcg)) + return NULL; + + ds = rcu_dereference(blkcg->dkstats_hint); + if (ds && ds->part == hd) + goto out; + + list_for_each_entry(ds, &blkcg->dkstats_list, list_node) { + if (ds->part == hd) { + rcu_assign_pointer(blkcg->dkstats_hint, ds); + goto out; + } + } + return NULL; + out: + if (alloc) + *alloc = 0; +#ifdef CONFIG_SMP + return ds->dkstats ? per_cpu_ptr(ds->dkstats, cpu) : NULL; +#else + return &ds->dkstats; +#endif +} +EXPORT_SYMBOL_GPL(blkcg_dkstats_find); + +static struct blkcg_dkstats *blkcg_dkstats_find_locked(struct blkcg *blkcg, int cpu, struct hd_struct *hd) +{ + struct blkcg_dkstats *ds; + + ds = rcu_dereference(blkcg->dkstats_hint); + if (ds && ds->part == hd) + return ds; + + list_for_each_entry(ds, &blkcg->dkstats_list, list_node) { + if (ds->part == hd) { + rcu_assign_pointer(blkcg->dkstats_hint, ds); + return ds; + } + } + return NULL; +} + +struct disk_stats *blkcg_dkstats_find_create(struct blkcg *blkcg, int cpu, struct hd_struct *hd) +{ + struct blkcg_dkstats *ds; + struct disk_stats *dk; + unsigned long flags; + int alloc = 1; + + /* blkcg_dkstats list protected by rcu */ + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (!blkcg_do_io_stat(blkcg)) + return NULL; + + /* double lock, singleton here*/ + dk = blkcg_dkstats_find(blkcg, cpu, hd, &alloc); + if (dk || !alloc) + return dk; + + spin_lock_irqsave(&blkcg->lock, flags); + ds = blkcg_dkstats_find_locked(blkcg, cpu, hd); + if (!ds) { + ds = blkcg_dkstats_alloc(hd); + if (!ds) + goto out; + list_add(&ds->list_node, &blkcg->dkstats_list); + } +#ifdef CONFIG_SMP + dk = ds->dkstats ? per_cpu_ptr(ds->dkstats, cpu) : NULL; +#else + dk = &ds->dkstats; +#endif +out: + spin_unlock_irqrestore(&blkcg->lock, flags); + return dk; +} +EXPORT_SYMBOL_GPL(blkcg_dkstats_find_create); + /* * If @new_blkg is %NULL, this function tries to allocate a new one as * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. @@ -491,6 +702,128 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, return 0; } +static void blkcg_dkstats_seqf_dummy(struct blkcg *blkcg, struct hd_struct *hd, + struct seq_file *seqf) +{ + char buf[BDEVNAME_SIZE]; + seq_printf(seqf, "%4d %7d %s %lu %lu %lu " + "%u %lu %lu %lu %u %u %u %u %u %u\n", + MAJOR(part_devt(hd)), MINOR(part_devt(hd)), + disk_name(part_to_disk(hd), hd->partno, buf), + 0UL, 0UL, 0UL, 0U, 0UL, 0UL, 0UL, 0U, 0U, 0U, + 0U, jiffies_to_msecs(part_stat_read(hd, io_ticks)), + jiffies_to_msecs(part_stat_read(hd, time_in_queue))); +} + +static void blkcg_dkstats_seqf_print(struct blkcg *blkcg, struct hd_struct *hd, + struct seq_file *seqf) +{ + char buf[BDEVNAME_SIZE]; + unsigned long rd_nsecs = blkcg_part_stat_read(blkcg, hd, nsecs[READ]); + unsigned long wr_nsecs = blkcg_part_stat_read(blkcg, hd, nsecs[WRITE]); + + seq_printf(seqf, "%4d %7d %s %lu %lu %lu " + "%u %lu %lu %lu %u %u %u %u %u %u\n", + MAJOR(part_devt(hd)), MINOR(part_devt(hd)), + disk_name(part_to_disk(hd), hd->partno, buf), + blkcg_part_stat_read(blkcg, hd, ios[READ]), + blkcg_part_stat_read(blkcg, hd, merges[READ]), + blkcg_part_stat_read(blkcg, hd, sectors[READ]), + (unsigned int)rd_nsecs / 1000000, + blkcg_part_stat_read(blkcg, hd, ios[WRITE]), + blkcg_part_stat_read(blkcg, hd, merges[WRITE]), + blkcg_part_stat_read(blkcg, hd, sectors[WRITE]), + (unsigned int)wr_nsecs / 1000000, 0, + (unsigned int)(rd_nsecs + wr_nsecs) / 100000, 0, + jiffies_to_msecs(part_stat_read(hd, io_ticks)), + jiffies_to_msecs(part_stat_read(hd, time_in_queue))); +} + +static int blkcg_dkstats_show_partion(struct blkcg *blkcg, struct gendisk *gd, + struct seq_file *seqf) +{ + struct disk_part_iter piter; + struct hd_struct *part; + + disk_part_iter_init(&piter, gd, DISK_PITER_INCL_EMPTY_PART0); + while ((part = disk_part_iter_next(&piter))) { + struct blkcg_dkstats *ds; + + spin_lock_irq(&blkcg->lock); + list_for_each_entry(ds, &blkcg->dkstats_list, list_node) { + if (ds->part == part) { + blkcg_dkstats_seqf_print(blkcg, part, seqf); + break; + } + } + if (ds->part != part) + blkcg_dkstats_seqf_dummy(blkcg, part, seqf); + + spin_unlock_irq(&blkcg->lock); + } + + disk_part_iter_exit(&piter); + return 0; +} + +static int blkcg_dkstats_enable(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 val) +{ + struct blkcg *blkcg = css_to_blkcg(css); + struct blkcg_dkstats *ds; + + mutex_lock(&blkcg_pol_mutex); + spin_lock_irq(&blkcg->lock); + + if (blkcg->dkstats_on) { + blkcg->dkstats_on = val; + goto out; + } + + list_for_each_entry(ds, &blkcg->dkstats_list, list_node) { +#ifdef CONFIG_SMP + if (ds->dkstats) { + unsigned int cpu; + for_each_possible_cpu(cpu) { + struct disk_stats *dk = per_cpu_ptr(ds->dkstats, cpu); + memset(dk, 0, sizeof(struct disk_stats)); + } + } +#else + memset(&ds->dkstats, 0, sizeof(ds->dkstats)); +#endif + } + blkcg->dkstats_on = val; + + out: + spin_unlock_irq(&blkcg->lock); + mutex_unlock(&blkcg_pol_mutex); + return 0; +} + +static int blkcg_dkstats_show(struct seq_file *sf, void *v) +{ + struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); + struct class_dev_iter iter; + struct device *dev; + + mutex_lock(&blkcg_pol_mutex); + + if (!blkcg_do_io_stat(blkcg)) + goto out; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while ((dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + blkcg_dkstats_show_partion(blkcg, disk, sf); + } + + class_dev_iter_exit(&iter); + out: + mutex_unlock(&blkcg_pol_mutex); + return 0; +} + const char *blkg_dev_name(struct blkcg_gq *blkg) { /* some drivers (floppy) instantiate a queue w/o disk registered */ @@ -1022,6 +1355,11 @@ static struct cftype blkcg_legacy_files[] = { .name = "reset_stats", .write_u64 = blkcg_reset_stats, }, + { + .name = "diskstats", + .write_u64 = blkcg_dkstats_enable, + .seq_show = blkcg_dkstats_show, + }, { } /* terminate */ }; @@ -1057,6 +1395,13 @@ static struct cftype blkcg_legacy_files[] = { static void blkcg_css_offline(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); + struct blkcg_dkstats *ds, *ns; + + spin_lock_irq(&blkcg->lock); + blkcg->dkstats_hint = NULL; + list_for_each_entry_safe(ds, ns, &blkcg->dkstats_list, list_node) + blkcg_dkstats_free(ds); + spin_unlock_irq(&blkcg->lock); /* this prevents anyone from attaching or migrating to this blkcg */ wb_blkcg_offline(blkcg); @@ -1163,6 +1508,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) spin_lock_init(&blkcg->lock); INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); INIT_HLIST_HEAD(&blkcg->blkg_list); + INIT_LIST_HEAD(&blkcg->dkstats_list); #ifdef CONFIG_CGROUP_WRITEBACK INIT_LIST_HEAD(&blkcg->cgwb_list); refcount_set(&blkcg->cgwb_refcnt, 1); @@ -1267,6 +1613,7 @@ void blkcg_drain_queue(struct request_queue *q) */ void blkcg_exit_queue(struct request_queue *q) { + blkcg_dkstats_free_all(q); blkg_destroy_all(q); blk_throtl_exit(q); } diff --git a/block/blk-core.c b/block/blk-core.c index d5e668ec751b..00a1d478ddc5 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -70,6 +70,55 @@ struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue; +static void blkcg_stat_acct(struct blkcg *blkcg, struct request *req, int new_io) +{ + struct hd_struct *part = req->part; + int rw = rq_data_dir(req), cpu; + + if (!new_io) { + cpu = part_stat_lock(); + blkcg_part_stat_inc(blkcg, cpu, part, merges[rw]); + part_stat_unlock(); + } +} + +static void blkcg_account_io_completion(struct request *req, struct bio *bio, + unsigned int bytes) +{ + if (blk_do_io_stat(req)) { + const int rw = bio_data_dir(bio); + struct blkcg * blkcg = bio_blkcg(bio); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = req->part; + blkcg_part_stat_add(blkcg, cpu, part, sectors[rw], bytes >> 9); + part_stat_unlock(); + } +} + +static void blkcg_account_io_done(struct request *req, struct bio *bio) +{ + /* + * Account IO completion. flush_rq isn't accounted as a + * normal IO on queueing nor completion. Accounting the + * containing request is enough. + */ + if (blk_do_io_stat(req) && !(req->cmd_flags & RQF_FLUSH_SEQ)) { + unsigned long duration = ktime_get_ns() - req->start_time_ns; + const int rw = rq_data_dir(req); + struct hd_struct *part = req->part; + struct blkcg *blkcg = bio_blkcg(bio); + int cpu; + + cpu = part_stat_lock(); + blkcg_part_stat_inc(blkcg, cpu, part, ios[rw]); + blkcg_part_stat_add(blkcg, cpu, part, nsecs[rw], duration); + part_stat_unlock(); + } +} + /** * blk_queue_flag_set - atomically set a queue flag * @flag: flag to be set @@ -639,6 +688,7 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio, req->__data_len += bio->bi_iter.bi_size; blk_account_io_start(req, false); + blkcg_stat_acct(bio_blkcg(bio), req, 0); return true; } @@ -661,6 +711,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, req->nr_phys_segments = segments + 1; blk_account_io_start(req, false); + blkcg_stat_acct(bio_blkcg(bio), req, 0); return true; no_merge: req_set_nomerge(q, req); @@ -886,11 +937,14 @@ generic_make_request_checks(struct bio *bio) } /* - * For a REQ_NOWAIT based request, return -EOPNOTSUPP - * if queue is not a request based queue. + * Non-mq queues do not honor REQ_NOWAIT, so complete a bio + * with BLK_STS_AGAIN status in order to catch -EAGAIN and + * to give a chance to the caller to repeat request gracefully. */ - if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) - goto not_supported; + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { + status = BLK_STS_AGAIN; + goto end_io; + } if (should_fail_bio(bio)) goto end_io; @@ -959,6 +1013,9 @@ generic_make_request_checks(struct bio *bio) */ create_io_context(GFP_ATOMIC, q->node); + /* attach cgroup */ + bio_associate_blkg(bio); + if (!blkcg_bio_issue_check(q, bio)) return false; @@ -1454,6 +1511,10 @@ bool blk_update_request(struct request *req, blk_status_t error, struct bio *bio = req->bio; unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); + blkcg_account_io_completion(req, bio, bio_bytes); + if (req->bio == req->biotail) + blkcg_account_io_done(req, bio); + if (bio_bytes == bio->bi_iter.bi_size) req->bio = bio->bi_next; diff --git a/block/blk-flush.c b/block/blk-flush.c index 1eec9cbe5a0a..5aa6fada2259 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -69,6 +69,7 @@ #include #include #include +#include #include "blk.h" #include "blk-mq.h" @@ -398,7 +399,7 @@ void blk_insert_flush(struct request *rq) */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, false, false); return; } @@ -492,6 +493,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); + lockdep_register_key(&fq->key); + lockdep_set_class(&fq->mq_flush_lock, &fq->key); + return fq; fail_rq: @@ -506,6 +510,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq) if (!fq) return; + lockdep_unregister_key(&fq->key); kfree(fq->flush_rq); kfree(fq); } diff --git a/block/blk-iocost.c b/block/blk-iocost.c index e01267f99183..9a599cc28c29 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } -static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) +static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) { struct ioc *ioc = iocg->ioc; struct blkcg_gq *blkg = iocg_to_blkg(iocg); @@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) /* clear or maintain depending on the overage */ if (time_before_eq64(vtime, now->vnow)) { blkcg_clear_delay(blkg); - return; + return false; } if (!atomic_read(&blkg->use_delay) && time_before_eq64(vtime, now->vnow + vmargin)) - return; + return false; /* use delay */ if (cost) { @@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); if (hrtimer_is_queued(&iocg->delay_timer) && abs(oexpires - expires) <= margin_ns / 4) - return; + return true; hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), margin_ns / 4, HRTIMER_MODE_ABS); + return true; } static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) @@ -1317,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg) return false; /* is something in flight? */ - if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime)) + if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime)) return false; return true; @@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) */ if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { atomic64_add(abs_cost, &iocg->abs_vdebt); - iocg_kick_delay(iocg, &now, cost); + if (iocg_kick_delay(iocg, &now, cost)) + blkcg_schedule_throttle(rqos->q, + (bio->bi_opf & REQ_SWAP) == REQ_SWAP); return; } diff --git a/block/blk-map.c b/block/blk-map.c index 3a62e471d81b..b0790268ed9d 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return 0; unmap_rq: - __blk_rq_unmap_user(bio); + blk_rq_unmap_user(bio); fail: rq->bio = NULL; return ret; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index ca22afd47b3d..74cedea56034 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, bool has_sched, struct request *rq) { - /* dispatch flush rq directly */ - if (rq->rq_flags & RQF_FLUSH_SEQ) { - spin_lock(&hctx->lock); - list_add(&rq->queuelist, &hctx->dispatch); - spin_unlock(&hctx->lock); + /* + * dispatch flush and passthrough rq directly + * + * passthrough request has to be added to hctx->dispatch directly. + * For some reason, device may be in one situation which can't + * handle FS request, so STS_RESOURCE is always returned and the + * FS request will be added to hctx->dispatch. However passthrough + * request may be required at that time for fixing the problem. If + * passthrough request is added to scheduler queue, there isn't any + * chance to dispatch it given we prioritize requests in hctx->dispatch. + */ + if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) return true; - } if (has_sched) rq->rq_flags |= RQF_SORTED; @@ -391,8 +397,32 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, WARN_ON(e && (rq->tag != -1)); - if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) + if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { + /* + * Firstly normal IO request is inserted to scheduler queue or + * sw queue, meantime we add flush request to dispatch queue( + * hctx->dispatch) directly and there is at most one in-flight + * flush request for each hw queue, so it doesn't matter to add + * flush request to tail or front of the dispatch queue. + * + * Secondly in case of NCQ, flush request belongs to non-NCQ + * command, and queueing it will fail when there is any + * in-flight normal IO request(NCQ command). When adding flush + * rq to the front of hctx->dispatch, it is easier to introduce + * extra time to flush rq's latency because of S_SCHED_RESTART + * compared with adding to the tail of dispatch queue, then + * chance of flush merge is increased, and less flush requests + * will be issued to controller. It is observed that ~10% time + * is saved in blktests block/004 on disk attached to AHCI/NCQ + * drive when adding flush rq to the front of hctx->dispatch. + * + * Simply queue flush rq to the front of hctx->dispatch so that + * intensive flush workloads can benefit in case of NCQ HW. + */ + at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; + blk_mq_request_bypass_insert(rq, at_head, false); goto run; + } if (e && e->type->ops.insert_requests) { LIST_HEAD(list); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index a0d3ce30fa08..a09ab0c3d074 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -166,20 +166,25 @@ static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { + const size_t size = PAGE_SIZE - 1; unsigned int i, first = 1; - ssize_t ret = 0; + int ret = 0, pos = 0; for_each_cpu(i, hctx->cpumask) { if (first) - ret += sprintf(ret + page, "%u", i); + ret = snprintf(pos + page, size - pos, "%u", i); else - ret += sprintf(ret + page, ", %u", i); + ret = snprintf(pos + page, size - pos, ", %u", i); + + if (ret >= size - pos) + break; first = 0; + pos += ret; } - ret += sprintf(ret + page, "\n"); - return ret; + ret = snprintf(pos + page, size + 1 - pos, "\n"); + return pos + ret; } static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { diff --git a/block/blk-mq.c b/block/blk-mq.c index ec791156e9cc..a8c1a45cedde 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -761,7 +761,7 @@ static void blk_mq_requeue_work(struct work_struct *work) * merge. */ if (rq->rq_flags & RQF_DONTPREP) - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, false, false); else blk_mq_sched_insert_request(rq, true, false, false); } @@ -1313,7 +1313,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, q->mq_ops->commit_rqs(hctx); spin_lock(&hctx->lock); - list_splice_init(list, &hctx->dispatch); + list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); /* @@ -1668,12 +1668,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) +void blk_mq_request_bypass_insert(struct request *rq, bool at_head, + bool run_queue) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; spin_lock(&hctx->lock); - list_add_tail(&rq->queuelist, &hctx->dispatch); + if (at_head) + list_add(&rq->queuelist, &hctx->dispatch); + else + list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); if (run_queue) @@ -1863,7 +1867,7 @@ insert: if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_request_bypass_insert(rq, run_queue); + blk_mq_request_bypass_insert(rq, false, run_queue); return BLK_STS_OK; } @@ -1879,7 +1883,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) - blk_mq_request_bypass_insert(rq, true); + blk_mq_request_bypass_insert(rq, false, true); else if (ret != BLK_STS_OK) blk_mq_end_request(rq, ret); @@ -1913,7 +1917,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, if (ret != BLK_STS_OK) { if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { - blk_mq_request_bypass_insert(rq, + blk_mq_request_bypass_insert(rq, false, list_empty(list)); break; } @@ -3003,6 +3007,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) { + /* + * blk_mq_map_queues() and multiple .map_queues() implementations + * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the + * number of hardware queues. + */ + if (set->nr_maps == 1) + set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; + if (set->ops->map_queues && !is_kdump_kernel()) { int i; diff --git a/block/blk-mq.h b/block/blk-mq.h index 32c62c64e6c2..f2075978db50 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, */ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); +void blk_mq_request_bypass_insert(struct request *rq, bool at_head, + bool run_queue); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list); diff --git a/block/blk-settings.c b/block/blk-settings.c index 5f6dcc7a47bd..5119211390a3 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -58,6 +58,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; + lim->nbd_ignore_blksize_set = 0; lim->zoned = BLK_ZONED_NONE; } EXPORT_SYMBOL(blk_set_default_limits); @@ -328,7 +329,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); * storage device can address. The default of 512 covers most * hardware. **/ -void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) +void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) { q->limits.logical_block_size = size; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 46f5198be017..f69e0249f2da 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -489,6 +489,24 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, return count; } +static ssize_t queue_nbd_ignore_blksize_set_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->limits.nbd_ignore_blksize_set, (page)); +} + +static ssize_t queue_nbd_ignore_blksize_set_store(struct request_queue *q, + const char *page, size_t count) +{ + unsigned long ignore_blksize_set; + ssize_t ret = queue_var_store(&ignore_blksize_set, page, count); + + if (ret < 0) + return ret; + + q->limits.nbd_ignore_blksize_set = ignore_blksize_set; + return ret; +} + static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) @@ -719,6 +737,12 @@ static struct queue_sysfs_entry queue_wb_lat_entry = { .store = queue_wb_lat_store, }; +static struct queue_sysfs_entry queue_nbd_ignore_blksize_set_entry = { + .attr = {.name = "nbd_ignore_blksize_set", .mode = S_IRUGO | S_IWUSR }, + .show = queue_nbd_ignore_blksize_set_show, + .store = queue_nbd_ignore_blksize_set_store, +}; + #ifdef CONFIG_BLK_DEV_THROTTLING_LOW static struct queue_sysfs_entry throtl_sample_time_entry = { .attr = {.name = "throttle_sample_time", .mode = 0644 }, @@ -763,6 +787,7 @@ static struct attribute *queue_attrs[] = { &queue_wb_lat_entry.attr, &queue_poll_delay_entry.attr, &queue_io_timeout_entry.attr, + &queue_nbd_ignore_blksize_set_entry.attr, #ifdef CONFIG_BLK_DEV_THROTTLING_LOW &throtl_sample_time_entry.attr, #endif diff --git a/block/blk.h b/block/blk.h index 47fba9362e60..ee3d5664d962 100644 --- a/block/blk.h +++ b/block/blk.h @@ -30,6 +30,7 @@ struct blk_flush_queue { * at the same time */ struct request *orig_rq; + struct lock_class_key key; spinlock_t mq_flush_lock; }; @@ -121,6 +122,7 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, #ifdef CONFIG_BLK_DEV_INTEGRITY void blk_flush_integrity(void); bool __bio_integrity_endio(struct bio *); +void bio_integrity_free(struct bio *bio); static inline bool bio_integrity_endio(struct bio *bio) { if (bio_integrity(bio)) @@ -166,6 +168,9 @@ static inline bool bio_integrity_endio(struct bio *bio) { return true; } +static inline void bio_integrity_free(struct bio *bio) +{ +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ unsigned long blk_rq_timeout(unsigned long timeout); diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 347dda16c2f4..6cbb7926534c 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req = bd->rq; struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); - int sts = BLK_STS_IOERR; + blk_status_t sts = BLK_STS_IOERR; int ret; blk_mq_start_request(req); diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 6ca015f92766..7f053468b50d 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -354,6 +355,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) * but we call blkdev_ioctl, which gets the lock for us */ case BLKRRPART: + case BLKREPORTZONE: + case BLKRESETZONE: + case BLKGETZONESZ: + case BLKGETNRZONES: return blkdev_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg)); case BLKBSZSET_32: @@ -401,6 +406,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKTRACETEARDOWN: /* compatible */ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); return ret; + case IOC_PR_REGISTER: + case IOC_PR_RESERVE: + case IOC_PR_RELEASE: + case IOC_PR_PREEMPT: + case IOC_PR_PREEMPT_ABORT: + case IOC_PR_CLEAR: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); default: if (disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); diff --git a/block/genhd.c b/block/genhd.c index 26b31fcae217..ecc45a34e581 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -37,7 +37,7 @@ struct kobject *block_depr; static DEFINE_SPINLOCK(ext_devt_lock); static DEFINE_IDR(ext_devt_idr); -static const struct device_type disk_type; +const struct device_type disk_type; static void disk_check_events(struct disk_events *ev, unsigned int *clearing_ptr); @@ -1347,7 +1347,7 @@ static char *block_devnode(struct device *dev, umode_t *mode, return NULL; } -static const struct device_type disk_type = { +const struct device_type disk_type = { .name = "disk", .groups = disk_attr_groups, .release = disk_release, diff --git a/crypto/Kconfig b/crypto/Kconfig index 9e524044d312..b2cc0ad3792a 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -309,6 +309,7 @@ config CRYPTO_AEGIS128 config CRYPTO_AEGIS128_SIMD bool "Support SIMD acceleration for AEGIS-128" depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) + depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800 default y config CRYPTO_AEGIS128_AESNI_SSE2 @@ -499,10 +500,10 @@ config CRYPTO_ESSIV encryption. This driver implements a crypto API template that can be - instantiated either as a skcipher or as a aead (depending on the + instantiated either as an skcipher or as an AEAD (depending on the type of the first template argument), and which defers encryption and decryption requests to the encapsulated cipher after applying - ESSIV to the input IV. Note that in the aead case, it is assumed + ESSIV to the input IV. Note that in the AEAD case, it is assumed that the keys are presented in the same format used by the authenc template, and that the IV appears at the end of the authenticated associated data (AAD) region (which is how dm-crypt uses it.) diff --git a/crypto/Makefile b/crypto/Makefile index fcb1ee679782..aa740c8492b9 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -93,7 +93,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o aegis128-y := aegis128-core.o ifeq ($(ARCH),arm) -CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp +CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv8-a -mfloat-abi=softfp CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8 aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o endif diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 879cf23f7489..3d8e53010cda 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -134,11 +134,13 @@ void af_alg_release_parent(struct sock *sk) sk = ask->parent; ask = alg_sk(sk); - lock_sock(sk); + local_bh_disable(); + bh_lock_sock(sk); ask->nokey_refcnt -= nokey; if (!last) last = !--ask->refcnt; - release_sock(sk); + bh_unlock_sock(sk); + local_bh_enable(); if (last) sock_put(sk); @@ -1043,7 +1045,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) af_alg_free_resources(areq); sock_put(sk); - iocb->ki_complete(iocb, err ? err : resultlen, 0); + iocb->ki_complete(iocb, err ? err : (int)resultlen, 0); } EXPORT_SYMBOL_GPL(af_alg_async_cb); diff --git a/crypto/algapi.c b/crypto/algapi.c index de30ddc952d8..bb8329e49956 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -257,6 +257,7 @@ void crypto_alg_tested(const char *name, int err) struct crypto_alg *alg; struct crypto_alg *q; LIST_HEAD(list); + bool best; down_write(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -280,6 +281,21 @@ found: alg->cra_flags |= CRYPTO_ALG_TESTED; + /* Only satisfy larval waiters if we are the best. */ + best = true; + list_for_each_entry(q, &crypto_alg_list, cra_list) { + if (crypto_is_moribund(q) || !crypto_is_larval(q)) + continue; + + if (strcmp(alg->cra_name, q->cra_name)) + continue; + + if (q->cra_priority > alg->cra_priority) { + best = false; + break; + } + } + list_for_each_entry(q, &crypto_alg_list, cra_list) { if (q == alg) continue; @@ -303,10 +319,12 @@ found: continue; if ((q->cra_flags ^ alg->cra_flags) & larval->mask) continue; - if (!crypto_mod_get(alg)) - continue; - larval->adult = alg; + if (best && crypto_mod_get(alg)) + larval->adult = alg; + else + larval->adult = ERR_PTR(-EAGAIN); + continue; } @@ -669,11 +687,9 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn); void crypto_drop_spawn(struct crypto_spawn *spawn) { - if (!spawn->alg) - return; - down_write(&crypto_alg_sem); - list_del(&spawn->list); + if (spawn->alg) + list_del(&spawn->list); up_write(&crypto_alg_sem); } EXPORT_SYMBOL_GPL(crypto_drop_spawn); @@ -681,22 +697,16 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn); static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) { struct crypto_alg *alg; - struct crypto_alg *alg2; down_read(&crypto_alg_sem); alg = spawn->alg; - alg2 = alg; - if (alg2) - alg2 = crypto_mod_get(alg2); + if (alg && !crypto_mod_get(alg)) { + alg->cra_flags |= CRYPTO_ALG_DYING; + alg = NULL; + } up_read(&crypto_alg_sem); - if (!alg2) { - if (alg) - crypto_shoot_alg(alg); - return ERR_PTR(-EAGAIN); - } - - return alg; + return alg ?: ERR_PTR(-EAGAIN); } struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index c1601edd70e3..e2c8ab408bed 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -56,7 +56,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; struct crypto_skcipher *tfm = pask->private; - unsigned int bs = crypto_skcipher_blocksize(tfm); + unsigned int bs = crypto_skcipher_chunksize(tfm); struct af_alg_async_req *areq; int err = 0; size_t len = 0; diff --git a/crypto/api.c b/crypto/api.c index d8ba54142620..eda0c56b8615 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -97,7 +97,7 @@ static void crypto_larval_destroy(struct crypto_alg *alg) struct crypto_larval *larval = (void *)alg; BUG_ON(!crypto_is_larval(alg)); - if (larval->adult) + if (!IS_ERR_OR_NULL(larval->adult)) crypto_mod_put(larval->adult); kfree(larval); } @@ -178,6 +178,8 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) alg = ERR_PTR(-ETIMEDOUT); else if (!alg) alg = ERR_PTR(-ENOENT); + else if (IS_ERR(alg)) + ; else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) alg = ERR_PTR(-EAGAIN); @@ -344,13 +346,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) return len; } -void crypto_shoot_alg(struct crypto_alg *alg) +static void crypto_shoot_alg(struct crypto_alg *alg) { down_write(&crypto_alg_sem); alg->cra_flags |= CRYPTO_ALG_DYING; up_write(&crypto_alg_sem); } -EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask) diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c index 76d2ce3a1b5b..5154e280ada2 100644 --- a/crypto/asymmetric_keys/asym_tpm.c +++ b/crypto/asymmetric_keys/asym_tpm.c @@ -486,6 +486,7 @@ static int tpm_key_encrypt(struct tpm_key *tk, if (ret < 0) goto error_free_tfm; + ret = -ENOMEM; req = akcipher_request_alloc(tfm, GFP_KERNEL); if (!req) goto error_free_tfm; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 364b9df9d631..d7f43d4ea925 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -184,6 +184,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; req = akcipher_request_alloc(tfm, GFP_KERNEL); if (!req) goto error_free_tfm; diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c index 910e0b46012e..b785c476de67 100644 --- a/crypto/crypto_user_base.c +++ b/crypto/crypto_user_base.c @@ -213,8 +213,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, drop_alg: crypto_mod_put(alg); - if (err) + if (err) { + kfree_skb(skb); return err; + } return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c index 8bad88413de1..1be95432fa23 100644 --- a/crypto/crypto_user_stat.c +++ b/crypto/crypto_user_stat.c @@ -328,8 +328,10 @@ int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, drop_alg: crypto_mod_put(alg); - if (err) + if (err) { + kfree_skb(skb); return err; + } return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } diff --git a/crypto/ecc.c b/crypto/ecc.c index dfe114bc0c4a..8ee787723c5c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -1284,10 +1284,11 @@ EXPORT_SYMBOL(ecc_point_mult_shamir); static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits) { + const __be64 *src = (__force __be64 *)in; int i; for (i = 0; i < ndigits; i++) - out[i] = __swab64(in[ndigits - 1 - i]); + out[i] = be64_to_cpu(src[ndigits - 1 - i]); } static int __ecc_is_key_valid(const struct ecc_curve *curve, diff --git a/crypto/hash_info.c b/crypto/hash_info.c index c754cb75dd1a..a49ff96bde77 100644 --- a/crypto/hash_info.c +++ b/crypto/hash_info.c @@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = { [HASH_ALGO_TGR_128] = "tgr128", [HASH_ALGO_TGR_160] = "tgr160", [HASH_ALGO_TGR_192] = "tgr192", - [HASH_ALGO_SM3_256] = "sm3-256", + [HASH_ALGO_SM3_256] = "sm3", [HASH_ALGO_STREEBOG_256] = "streebog256", [HASH_ALGO_STREEBOG_512] = "streebog512", }; diff --git a/crypto/internal.h b/crypto/internal.h index 93df7bec844a..e506a57e2243 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -68,7 +68,6 @@ void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); -void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm(struct crypto_alg *alg, diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 543792e0ebf0..a4f3b3f342c8 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -24,6 +24,8 @@ static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { struct crypto_aead_spawn spawn; + struct padata_shell *psenc; + struct padata_shell *psdec; atomic_t tfm_count; }; @@ -32,6 +34,12 @@ struct pcrypt_aead_ctx { unsigned int cb_cpu; }; +static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx( + struct crypto_aead *tfm) +{ + return aead_instance_ctx(aead_alg_instance(tfm)); +} + static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -63,7 +71,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err) struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; - req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; padata_do_serial(padata); } @@ -90,6 +97,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req) struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); + struct pcrypt_instance_ctx *ictx; + + ictx = pcrypt_tfm_ictx(aead); memset(padata, 0, sizeof(struct padata_priv)); @@ -103,7 +113,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = padata_do_parallel(pencrypt, padata, &ctx->cb_cpu); + err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; @@ -132,6 +142,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req) struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); + struct pcrypt_instance_ctx *ictx; + + ictx = pcrypt_tfm_ictx(aead); memset(padata, 0, sizeof(struct padata_priv)); @@ -145,7 +158,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = padata_do_parallel(pdecrypt, padata, &ctx->cb_cpu); + err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; @@ -192,6 +205,8 @@ static void pcrypt_free(struct aead_instance *inst) struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_aead(&ctx->spawn); + padata_free_shell(ctx->psdec); + padata_free_shell(ctx->psenc); kfree(inst); } @@ -233,12 +248,22 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (!inst) return -ENOMEM; + err = -ENOMEM; + ctx = aead_instance_ctx(inst); + ctx->psenc = padata_alloc_shell(pencrypt); + if (!ctx->psenc) + goto out_free_inst; + + ctx->psdec = padata_alloc_shell(pdecrypt); + if (!ctx->psdec) + goto out_free_psenc; + crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst)); err = crypto_grab_aead(&ctx->spawn, name, 0, 0); if (err) - goto out_free_inst; + goto out_free_psdec; alg = crypto_spawn_aead_alg(&ctx->spawn); err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); @@ -271,6 +296,10 @@ out: out_drop_aead: crypto_drop_aead(&ctx->spawn); +out_free_psdec: + padata_free_shell(ctx->psdec); +out_free_psenc: + padata_free_shell(ctx->psenc); out_free_inst: kfree(inst); goto out; @@ -362,11 +391,12 @@ err: static void __exit pcrypt_exit(void) { + crypto_unregister_template(&pcrypt_tmpl); + pcrypt_fini_padata(pencrypt); pcrypt_fini_padata(pdecrypt); kset_unregister(pcrypt_kset); - crypto_unregister_template(&pcrypt_tmpl); } subsys_initcall(pcrypt_init); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index c39e39e55dc2..7473c5bc06b1 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2102,6 +2102,7 @@ static void generate_random_aead_testvec(struct aead_request *req, * If the key or authentication tag size couldn't be set, no need to * continue to encrypt. */ + vec->crypt_error = 0; if (vec->setkey_error || vec->setauthsize_error) goto done; @@ -2245,10 +2246,12 @@ static int test_aead_vs_generic_impl(const char *driver, req, tsgls); if (err) goto out; - err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg, - req, tsgls); - if (err) - goto out; + if (vec.crypt_error == 0) { + err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, + cfg, req, tsgls); + if (err) + goto out; + } cond_resched(); } err = 0; @@ -2678,6 +2681,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req, skcipher_request_set_callback(req, 0, crypto_req_done, &wait); skcipher_request_set_crypt(req, &src, &dst, vec->len, iv); vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + if (vec->crypt_error != 0) { + /* + * The only acceptable error here is for an invalid length, so + * skcipher decryption should fail with the same error too. + * We'll test for this. But to keep the API usage well-defined, + * explicitly initialize the ciphertext buffer too. + */ + memset((u8 *)vec->ctext, 0, vec->len); + } done: snprintf(name, max_namelen, "\"random: len=%u klen=%u\"", vec->len, vec->klen); diff --git a/drivers/Makefile b/drivers/Makefile index aaef17cc6512..b3cb100a6483 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -76,9 +76,9 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ -obj-y += scsi/ obj-y += nvme/ obj-$(CONFIG_ATA) += ata/ +obj-y += scsi/ obj-$(CONFIG_TARGET_CORE) += target/ obj-$(CONFIG_MTD) += mtd/ obj-$(CONFIG_SPI) += spi/ diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 60bbc5090abe..751ed38f2a10 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -463,6 +464,18 @@ struct lpss_device_links { const char *consumer_hid; const char *consumer_uid; u32 flags; + const struct dmi_system_id *dep_missing_ids; +}; + +/* Please keep this list sorted alphabetically by vendor and model */ +static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"), + }, + }, + {} }; /* @@ -473,9 +486,17 @@ struct lpss_device_links { * the supplier is not enumerated until after the consumer is probed. */ static const struct lpss_device_links lpss_device_links[] = { + /* CHT External sdcard slot controller depends on PMIC I2C ctrl */ {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME}, + /* CHT iGPU depends on PMIC I2C controller */ {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, + /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */ + {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME, + i2c1_dep_missing_dmi_ids}, + /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */ {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, + /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */ + {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, }; static bool hid_uid_match(struct acpi_device *adev, @@ -570,7 +591,8 @@ static void acpi_lpss_link_consumer(struct device *dev1, if (!dev2) return; - if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1))) + if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) + || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1))) device_link_add(dev2, dev1, link->flags); put_device(dev2); @@ -585,7 +607,8 @@ static void acpi_lpss_link_supplier(struct device *dev1, if (!dev2) return; - if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2))) + if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) + || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2))) device_link_add(dev1, dev2, link->flags); put_device(dev2); diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 00ec4f2bf015..c05050f474cd 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -31,6 +31,44 @@ static const struct acpi_device_id forbidden_id_list[] = { {"", 0}, }; +static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev) +{ + struct device *dev; + + dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev); + return dev ? to_platform_device(dev) : NULL; +} + +static int acpi_platform_device_remove_notify(struct notifier_block *nb, + unsigned long value, void *arg) +{ + struct acpi_device *adev = arg; + struct platform_device *pdev; + + switch (value) { + case ACPI_RECONFIG_DEVICE_ADD: + /* Nothing to do here */ + break; + case ACPI_RECONFIG_DEVICE_REMOVE: + if (!acpi_device_enumerated(adev)) + break; + + pdev = acpi_platform_device_find_by_companion(adev); + if (!pdev) + break; + + platform_device_unregister(pdev); + put_device(&pdev->dev); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block acpi_platform_notifier = { + .notifier_call = acpi_platform_device_remove_notify, +}; + static void acpi_platform_fill_resource(struct acpi_device *adev, const struct resource *src, struct resource *dest) { @@ -130,3 +168,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, return pdev; } EXPORT_SYMBOL_GPL(acpi_create_platform_device); + +void __init acpi_platform_init(void) +{ + acpi_reconfig_notifier_register(&acpi_platform_notifier); +} diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index b5516b04ffc0..6e9ec6e3fe47 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat) } #endif +static bool acpi_no_watchdog; + static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) { const struct acpi_table_wdat *wdat = NULL; acpi_status status; - if (acpi_disabled) + if (acpi_disabled || acpi_no_watchdog) return NULL; status = acpi_get_table(ACPI_SIG_WDAT, 0, @@ -88,6 +90,14 @@ bool acpi_has_watchdog(void) } EXPORT_SYMBOL_GPL(acpi_has_watchdog); +/* ACPI watchdog can be disabled on boot command line */ +static int __init disable_acpi_watchdog(char *str) +{ + acpi_no_watchdog = true; + return 1; +} +__setup("acpi_no_watchdog", disable_acpi_watchdog); + void __init acpi_watchdog_init(void) { const struct acpi_wdat_entry *entries; @@ -126,12 +136,11 @@ void __init acpi_watchdog_init(void) gas = &entries[i].register_region; res.start = gas->address; + res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { res.flags = IORESOURCE_MEM; - res.end = res.start + ALIGN(gas->access_width, 4) - 1; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { res.flags = IORESOURCE_IO; - res.end = res.start + gas->access_width - 1; } else { pr_warn("Unsupported address space: %u\n", gas->space_id); diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index bcf8f7501db7..a74c1a0e892d 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void); acpi_status acpi_hw_enable_all_wakeup_gpes(void); +u8 acpi_hw_check_all_gpes(void); + acpi_status acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index cf4e061bb0f0..8438e33aa447 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -244,7 +244,7 @@ cleanup: * FUNCTION: acpi_ds_get_field_names * * PARAMETERS: info - create_field info structure - * ` walk_state - Current method state + * walk_state - Current method state * arg - First parser arg for the field name list * * RETURN: Status diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index c88fd31208a5..4bcf15bf03de 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); + /* + * Disassembler: handle create field operators here. + * + * create_buffer_field is a deferred op that is typically processed in load + * pass 2. However, disassembly of control method contents walk the parse + * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed + * in a later walk. This is a problem when there is a control method that + * has the same name as the AML_CREATE object. In this case, any use of the + * name segment will be detected as a method call rather than a reference + * to a buffer field. + * + * This earlier creation during disassembly solves this issue by inserting + * the named object in the ACPI namespace so that references to this name + * would be a name string rather than a method call. + */ + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) && + (walk_state->op_info->flags & AML_CREATE)) { + status = acpi_ds_create_buffer_field(op, walk_state); + return_ACPI_STATUS(status); + } + /* We are only interested in opcodes that have an associated name */ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index 9e2f5a05c066..bad2257356fe 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) handler) (acpi_gbl_fixed_event_handlers[event].context)); } +/******************************************************************************* + * + * FUNCTION: acpi_any_fixed_event_status_set + * + * PARAMETERS: None + * + * RETURN: TRUE or FALSE + * + * DESCRIPTION: Checks the PM status register for active fixed events + * + ******************************************************************************/ + +u32 acpi_any_fixed_event_status_set(void) +{ + acpi_status status; + u32 in_status; + u32 in_enable; + u32 i; + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + /* + * Check for all possible Fixed Events and dispatch those that are active + */ + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + + /* Both the status and enable bits must be on for this event */ + + if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) && + (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) { + return (TRUE); + } + } + + return (FALSE); +} + #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 04a40d563dd6..84b0b410310e 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void) ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) +/****************************************************************************** + * + * FUNCTION: acpi_any_gpe_status_set + * + * PARAMETERS: None + * + * RETURN: Whether or not the status bit is set for any GPE + * + * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any + * of them is set or FALSE otherwise. + * + ******************************************************************************/ +u32 acpi_any_gpe_status_set(void) +{ + acpi_status status; + u8 ret; + + ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + ret = acpi_hw_check_all_gpes(); + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + + return (ret); +} + +ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set) + /******************************************************************************* * * FUNCTION: acpi_install_gpe_block diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 565bd3f29f31..b1d7d5f92495 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, return (AE_OK); } +/****************************************************************************** + * + * FUNCTION: acpi_hw_get_gpe_block_status + * + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info + * + * RETURN: Success + * + * DESCRIPTION: Produce a combined GPE status bits mask for the given block. + * + ******************************************************************************/ + +static acpi_status +acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block, + void *ret_ptr) +{ + struct acpi_gpe_register_info *gpe_register_info; + u64 in_enable, in_status; + acpi_status status; + u8 *ret = ret_ptr; + u32 i; + + /* Examine each GPE Register within the block */ + + for (i = 0; i < gpe_block->register_count; i++) { + gpe_register_info = &gpe_block->register_info[i]; + + status = acpi_hw_read(&in_enable, + &gpe_register_info->enable_address); + if (ACPI_FAILURE(status)) { + continue; + } + + status = acpi_hw_read(&in_status, + &gpe_register_info->status_address); + if (ACPI_FAILURE(status)) { + continue; + } + + *ret |= in_enable & in_status; + } + + return (AE_OK); +} + /****************************************************************************** * * FUNCTION: acpi_hw_disable_all_gpes @@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) return_ACPI_STATUS(status); } +/****************************************************************************** + * + * FUNCTION: acpi_hw_check_all_gpes + * + * PARAMETERS: None + * + * RETURN: Combined status of all GPEs + * + * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the + * status bit is set for at least one of them of FALSE otherwise. + * + ******************************************************************************/ + +u8 acpi_hw_check_all_gpes(void) +{ + u8 ret = 0; + + ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); + + (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret); + + return (ret != 0); +} + #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 777f6f7122b4..e0d82fab1f44 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes) * New allocation must be visible in all pgd before it can be found by * an NMI allocating from the pool. */ - vmalloc_sync_all(); + vmalloc_sync_mappings(); rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); if (rc) diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 558fedf8a7a1..254a7d98b9d4 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -38,6 +38,8 @@ #define PREFIX "ACPI: " #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF +#define ACPI_BATTERY_CAPACITY_VALID(capacity) \ + ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN) #define ACPI_BATTERY_DEVICE_NAME "Battery" @@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) static bool acpi_battery_is_degraded(struct acpi_battery *battery) { - return battery->full_charge_capacity && battery->design_capacity && + return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) && battery->full_charge_capacity < battery->design_capacity; } @@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { - int ret = 0; + int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0; struct acpi_battery *battery = to_acpi_battery(psy); if (acpi_battery_present(battery)) { @@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: - if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) ret = -ENODEV; else val->intval = battery->design_capacity * 1000; break; case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL: - if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) ret = -ENODEV; else val->intval = battery->full_charge_capacity * 1000; @@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = battery->capacity_now * 1000; break; case POWER_SUPPLY_PROP_CAPACITY: - if (battery->capacity_now && battery->full_charge_capacity) - val->intval = battery->capacity_now * 100/ - battery->full_charge_capacity; + if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) + full_capacity = battery->full_charge_capacity; + else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_capacity = battery->design_capacity; + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN || + full_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + ret = -ENODEV; else - val->intval = 0; + val->intval = battery->capacity_now * 100/ + full_capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: if (battery->state & ACPI_BATTERY_STATE_CRITICAL) @@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; +static enum power_supply_property charge_battery_full_cap_broken_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void) static int sysfs_add_battery(struct acpi_battery *battery) { struct power_supply_config psy_cfg = { .drv_data = battery, }; + bool full_cap_broken = false; + + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_cap_broken = true; if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) { - battery->bat_desc.properties = charge_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(charge_battery_props); - } else if (battery->full_charge_capacity == 0) { - battery->bat_desc.properties = - energy_battery_full_cap_broken_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_full_cap_broken_props); + if (full_cap_broken) { + battery->bat_desc.properties = + charge_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = charge_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_props); + } } else { - battery->bat_desc.properties = energy_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_props); + if (full_cap_broken) { + battery->bat_desc.properties = + energy_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = energy_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_props); + } } battery->bat_desc.name = acpi_device_bid(battery->device); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 48bc96d45bab..54002670cb7a 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -153,7 +153,7 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data) { acpi_status status; - if (!*data) + if (!data) return -EINVAL; status = acpi_get_data(handle, acpi_bus_private_data_handler, data); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 4a2cde2c536a..985afc62da82 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -78,6 +78,28 @@ static const struct dmi_system_id lid_blacklst[] = { DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"), }, }, + { + /* + * Medion Akoya E2215T, notification of the LID device only + * happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, + { + /* + * Razer Blade Stealth 13 late 2019, notification of the LID device + * only happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, {} }; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 08bb9f2f2d23..5e4a8860a9c0 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1314,9 +1314,19 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off) */ int acpi_dev_pm_attach(struct device *dev, bool power_on) { + /* + * Skip devices whose ACPI companions match the device IDs below, + * because they require special power management handling incompatible + * with the generic ACPI PM domain. + */ + static const struct acpi_device_id special_pm_ids[] = { + {"PNP0C0B", }, /* Generic ACPI fan */ + {"INT3404", }, /* Fan */ + {} + }; struct acpi_device *adev = ACPI_COMPANION(dev); - if (!adev) + if (!adev || !acpi_match_device_ids(adev, special_pm_ids)) return 0; /* diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index da1e5c5ce150..ca5cdb621c2a 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec); static struct acpi_ec *boot_ec; static bool boot_ec_is_ecdt = false; +static struct workqueue_struct *ec_wq; static struct workqueue_struct *ec_query_wq; static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ @@ -461,7 +462,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec) ec_dbg_evt("Command(%s) submitted/blocked", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); ec->nr_pending_queries++; - schedule_work(&ec->work); + queue_work(ec_wq, &ec->work); } } @@ -525,26 +526,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) } #ifdef CONFIG_PM_SLEEP -static bool acpi_ec_query_flushed(struct acpi_ec *ec) +static void __acpi_ec_flush_work(void) { - bool flushed; - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); - flushed = !ec->nr_pending_queries; - spin_unlock_irqrestore(&ec->lock, flags); - return flushed; -} - -static void __acpi_ec_flush_event(struct acpi_ec *ec) -{ - /* - * When ec_freeze_events is true, we need to flush events in - * the proper position before entering the noirq stage. - */ - wait_event(ec->wait, acpi_ec_query_flushed(ec)); - if (ec_query_wq) - flush_workqueue(ec_query_wq); + drain_workqueue(ec_wq); /* flush ec->work */ + flush_workqueue(ec_query_wq); /* flush queries */ } static void acpi_ec_disable_event(struct acpi_ec *ec) @@ -554,15 +539,21 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) spin_lock_irqsave(&ec->lock, flags); __acpi_ec_disable_event(ec); spin_unlock_irqrestore(&ec->lock, flags); - __acpi_ec_flush_event(ec); + + /* + * When ec_freeze_events is true, we need to flush events in + * the proper position before entering the noirq stage. + */ + __acpi_ec_flush_work(); } void acpi_ec_flush_work(void) { - if (first_ec) - __acpi_ec_flush_event(first_ec); + /* Without ec_wq there is nothing to flush. */ + if (!ec_wq) + return; - flush_scheduled_work(); + __acpi_ec_flush_work(); } #endif /* CONFIG_PM_SLEEP */ @@ -2042,25 +2033,33 @@ static struct acpi_driver acpi_ec_driver = { .drv.pm = &acpi_ec_pm, }; -static inline int acpi_ec_query_init(void) +static void acpi_ec_destroy_workqueues(void) { - if (!ec_query_wq) { - ec_query_wq = alloc_workqueue("kec_query", 0, - ec_max_queries); - if (!ec_query_wq) - return -ENODEV; + if (ec_wq) { + destroy_workqueue(ec_wq); + ec_wq = NULL; } - return 0; -} - -static inline void acpi_ec_query_exit(void) -{ if (ec_query_wq) { destroy_workqueue(ec_query_wq); ec_query_wq = NULL; } } +static int acpi_ec_init_workqueues(void) +{ + if (!ec_wq) + ec_wq = alloc_ordered_workqueue("kec", 0); + + if (!ec_query_wq) + ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries); + + if (!ec_wq || !ec_query_wq) { + acpi_ec_destroy_workqueues(); + return -ENODEV; + } + return 0; +} + static const struct dmi_system_id acpi_ec_no_wakeup[] = { { .ident = "Thinkpad X1 Carbon 6th", @@ -2091,8 +2090,7 @@ int __init acpi_ec_init(void) int result; int ecdt_fail, dsdt_fail; - /* register workqueue for _Qxx evaluations */ - result = acpi_ec_query_init(); + result = acpi_ec_init_workqueues(); if (result) return result; @@ -2123,6 +2121,6 @@ static void __exit acpi_ec_exit(void) { acpi_bus_unregister_driver(&acpi_ec_driver); - acpi_ec_query_exit(); + acpi_ec_destroy_workqueues(); } #endif /* 0 */ diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index a2e844a8e9ed..41168c027a5a 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -374,19 +374,21 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) } EXPORT_SYMBOL_GPL(acpi_os_map_memory); -static void acpi_os_drop_map_ref(struct acpi_ioremap *map) +/* Must be called with mutex_lock(&acpi_ioremap_lock) */ +static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map) { - if (!--map->refcount) + unsigned long refcount = --map->refcount; + + if (!refcount) list_del_rcu(&map->list); + return refcount; } static void acpi_os_map_cleanup(struct acpi_ioremap *map) { - if (!map->refcount) { - synchronize_rcu_expedited(); - acpi_unmap(map->phys, map->virt); - kfree(map); - } + synchronize_rcu_expedited(); + acpi_unmap(map->phys, map->virt); + kfree(map); } /** @@ -406,6 +408,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) { struct acpi_ioremap *map; + unsigned long refcount; if (!acpi_permanent_mmap) { __acpi_unmap_table(virt, size); @@ -419,10 +422,11 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); return; } - acpi_os_drop_map_ref(map); + refcount = acpi_os_drop_map_ref(map); mutex_unlock(&acpi_ioremap_lock); - acpi_os_map_cleanup(map); + if (!refcount) + acpi_os_map_cleanup(map); } EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); @@ -457,6 +461,7 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) { u64 addr; struct acpi_ioremap *map; + unsigned long refcount; if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) return; @@ -472,10 +477,11 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) mutex_unlock(&acpi_ioremap_lock); return; } - acpi_os_drop_map_ref(map); + refcount = acpi_os_drop_map_ref(map); mutex_unlock(&acpi_ioremap_lock); - acpi_os_map_cleanup(map); + if (!refcount) + acpi_os_map_cleanup(map); } EXPORT_SYMBOL(acpi_os_unmap_generic_address); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index aad6be5c0af0..915650bf519f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2174,6 +2174,7 @@ int __init acpi_scan_init(void) acpi_pci_root_init(); acpi_pci_link_init(); acpi_processor_init(); + acpi_platform_init(); acpi_lpss_init(); acpi_apd_init(); acpi_cmos_rtc_init(); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2af937a8b1c5..abd39cc5ff88 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -977,21 +977,55 @@ static int acpi_s2idle_prepare_late(void) return 0; } -static void acpi_s2idle_wake(void) +static void acpi_s2idle_sync(void) { /* - * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has - * not triggered while suspended, so bail out. + * The EC driver uses the system workqueue and an additional special + * one, so those need to be flushed too. */ - if (!acpi_sci_irq_valid() || - irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) - return; + acpi_ec_flush_work(); + acpi_os_wait_events_complete(); /* synchronize Notify handling */ +} + +static bool acpi_s2idle_wake(void) +{ + if (!acpi_sci_irq_valid()) + return pm_wakeup_pending(); + + while (pm_wakeup_pending()) { + /* + * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the + * SCI has not triggered while suspended, so bail out (the + * wakeup is pending anyway and the SCI is not the source of + * it). + */ + if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) + return true; + + /* + * If the status bit of any enabled fixed event is set, the + * wakeup is regarded as valid. + */ + if (acpi_any_fixed_event_status_set()) + return true; + + /* Check wakeups from drivers sharing the SCI. */ + if (acpi_check_wakeup_handlers()) + return true; + + /* + * If there are no EC events to process and at least one of the + * other enabled GPEs is active, the wakeup is regarded as a + * genuine one. + * + * Note that the checks below must be carried out in this order + * to avoid returning prematurely due to a change of the EC GPE + * status bit from unset to set between the checks with the + * status bits of all the other GPEs unset. + */ + if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe()) + return true; - /* - * If there are EC events to process, the wakeup may be a spurious one - * coming from the EC. - */ - if (acpi_ec_dispatch_gpe()) { /* * Cancel the wakeup and process all pending events in case * there are any wakeup ones in there. @@ -1001,16 +1035,22 @@ static void acpi_s2idle_wake(void) * should be missed by canceling the wakeup here. */ pm_system_cancel_wakeup(); + + acpi_s2idle_sync(); + /* - * The EC driver uses the system workqueue and an additional - * special one, so those need to be flushed too. + * The SCI is in the "suspended" state now and it cannot produce + * new wakeup events till the rearming below, so if any of them + * are pending here, they must be resulting from the processing + * of EC events above or coming from somewhere else. */ - acpi_os_wait_events_complete(); /* synchronize EC GPE processing */ - acpi_ec_flush_work(); - acpi_os_wait_events_complete(); /* synchronize Notify handling */ + if (pm_wakeup_pending()) + return true; rearm_wake_irq(acpi_sci_irq); } + + return false; } static void acpi_s2idle_restore_early(void) @@ -1024,6 +1064,13 @@ static void acpi_s2idle_restore_early(void) static void acpi_s2idle_restore(void) { + /* + * Drain pending events before restoring the working-state configuration + * of GPEs. + */ + acpi_os_wait_events_complete(); /* synchronize GPE processing */ + acpi_s2idle_sync(); + s2idle_wakeup = false; acpi_enable_all_runtime_gpes(); diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 41675d24a9bc..3d90480ce1b1 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -2,6 +2,7 @@ extern void acpi_enable_wakeup_devices(u8 sleep_state); extern void acpi_disable_wakeup_devices(u8 sleep_state); +extern bool acpi_check_wakeup_handlers(void); extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 75948a3f1a20..c60d2c6d31d6 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -819,14 +819,14 @@ end: * interface: * echo unmask > /sys/firmware/acpi/interrupts/gpe00 */ -#define ACPI_MASKABLE_GPE_MAX 0xFF +#define ACPI_MASKABLE_GPE_MAX 0x100 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata; static int __init acpi_gpe_set_masked_gpes(char *val) { u8 gpe; - if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX) + if (kstrtou8(val, 0, &gpe)) return -EINVAL; set_bit(gpe, acpi_masked_gpes_map); @@ -838,7 +838,7 @@ void __init acpi_gpe_apply_masked_gpes(void) { acpi_handle handle; acpi_status status; - u8 gpe; + u16 gpe; for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) { status = acpi_get_gpe_device(gpe, &handle); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 31014c7d3793..e63fd7bfd3a5 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -336,6 +336,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + + /* + * Desktops which falsely report a backlight and which our heuristics + * for this do not catch. + */ { .callback = video_detect_force_none, .ident = "Dell OptiPlex 9020M", @@ -344,6 +349,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"), }, }, + { + .callback = video_detect_force_none, + .ident = "MSI MS-7721", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MSI"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"), + }, + }, { }, }; diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index 9614126bf56e..90c40f992e13 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -12,6 +12,15 @@ #include "internal.h" #include "sleep.h" +struct acpi_wakeup_handler { + struct list_head list_node; + bool (*wakeup)(void *context); + void *context; +}; + +static LIST_HEAD(acpi_wakeup_handler_head); +static DEFINE_MUTEX(acpi_wakeup_handler_mutex); + /* * We didn't lock acpi_device_lock in the file, because it invokes oops in * suspend/resume and isn't really required as this is called in S-state. At @@ -96,3 +105,75 @@ int __init acpi_wakeup_device_init(void) mutex_unlock(&acpi_device_lock); return 0; } + +/** + * acpi_register_wakeup_handler - Register wakeup handler + * @wake_irq: The IRQ through which the device may receive wakeups + * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup + * @context: Context to pass to the handler when calling it + * + * Drivers which may share an IRQ with the SCI can use this to register + * a handler which returns true when the device they are managing wants + * to trigger a wakeup. + */ +int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + /* + * If the device is not sharing its IRQ with the SCI, there is no + * need to register the handler. + */ + if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq) + return 0; + + handler = kmalloc(sizeof(*handler), GFP_KERNEL); + if (!handler) + return -ENOMEM; + + handler->wakeup = wakeup; + handler->context = context; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_add(&handler->list_node, &acpi_wakeup_handler_head); + mutex_unlock(&acpi_wakeup_handler_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler); + +/** + * acpi_unregister_wakeup_handler - Unregister wakeup handler + * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler() + * @context: Context passed to acpi_register_wakeup_handler() + */ +void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup == wakeup && handler->context == context) { + list_del(&handler->list_node); + kfree(handler); + break; + } + } + mutex_unlock(&acpi_wakeup_handler_mutex); +} +EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler); + +bool acpi_check_wakeup_handlers(void) +{ + struct acpi_wakeup_handler *handler; + + /* No need to lock, nothing else is running when we're called. */ + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup(handler->context)) + return true; + } + + return false; +} diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 265d9dd46a5e..34a6de65aa7e 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3314,7 +3314,7 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t parent_offset; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); - size_t num_valid = (buffer_offset - off_start_offset) * + size_t num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); struct binder_buffer_object *parent = binder_validate_ptr(target_proc, t->buffer, @@ -3388,7 +3388,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->user_data + sg_buf_offset; sg_buf_offset += ALIGN(bp->length, sizeof(u64)); - num_valid = (buffer_offset - off_start_offset) * + num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); ret = binder_fixup_parent(t, thread, bp, off_start_offset, @@ -5203,10 +5203,11 @@ err_bad_arg: static int binder_open(struct inode *nodp, struct file *filp) { - struct binder_proc *proc; + struct binder_proc *proc, *itr; struct binder_device *binder_dev; struct binderfs_info *info; struct dentry *binder_binderfs_dir_entry_proc = NULL; + bool existing_pid = false; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, current->group_leader->pid, current->pid); @@ -5229,6 +5230,7 @@ static int binder_open(struct inode *nodp, struct file *filp) binder_dev = container_of(filp->private_data, struct binder_device, miscdev); } + refcount_inc(&binder_dev->ref); proc->context = &binder_dev->context; binder_alloc_init(&proc->alloc); @@ -5239,19 +5241,24 @@ static int binder_open(struct inode *nodp, struct file *filp) filp->private_data = proc; mutex_lock(&binder_procs_lock); + hlist_for_each_entry(itr, &binder_procs, proc_node) { + if (itr->pid == proc->pid) { + existing_pid = true; + break; + } + } hlist_add_head(&proc->proc_node, &binder_procs); mutex_unlock(&binder_procs_lock); - if (binder_debugfs_dir_entry_proc) { + if (binder_debugfs_dir_entry_proc && !existing_pid) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); /* - * proc debug entries are shared between contexts, so - * this will fail if the process tries to open the driver - * again with a different context. The priting code will - * anyway print all contexts that a given PID has, so this - * is not a problem. + * proc debug entries are shared between contexts. + * Only create for the first PID to avoid debugfs log spamming + * The printing code will anyway print all contexts for a given + * PID so this is not a problem. */ proc->debugfs_entry = debugfs_create_file(strbuf, 0444, binder_debugfs_dir_entry_proc, @@ -5259,19 +5266,16 @@ static int binder_open(struct inode *nodp, struct file *filp) &proc_fops); } - if (binder_binderfs_dir_entry_proc) { + if (binder_binderfs_dir_entry_proc && !existing_pid) { char strbuf[11]; struct dentry *binderfs_entry; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); /* * Similar to debugfs, the process specific log file is shared - * between contexts. If the file has already been created for a - * process, the following binderfs_create_file() call will - * fail with error code EEXIST if another context of the same - * process invoked binder_open(). This is ok since same as - * debugfs, the log file will contain information on all - * contexts of a given PID. + * between contexts. Only create for the first PID. + * This is ok since same as debugfs, the log file will contain + * information on all contexts of a given PID. */ binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, strbuf, &proc_fops, (void *)(unsigned long)proc->pid); @@ -5281,10 +5285,8 @@ static int binder_open(struct inode *nodp, struct file *filp) int error; error = PTR_ERR(binderfs_entry); - if (error != -EEXIST) { - pr_warn("Unable to create file %s in binderfs (error %d)\n", - strbuf, error); - } + pr_warn("Unable to create file %s in binderfs (error %d)\n", + strbuf, error); } } @@ -5406,6 +5408,7 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; + struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5422,6 +5425,12 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(context->name); + kfree(device); + } + proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we @@ -6078,6 +6087,7 @@ static int __init init_binder_device(const char *name) binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; binder_device->miscdev.name = name; + refcount_set(&binder_device->ref, 1); binder_device->context.binder_context_mgr_uid = INVALID_UID; binder_device->context.name = name; mutex_init(&binder_device->context.context_mgr_node_lock); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index eb76a823fbb2..7067d5542a82 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -277,8 +277,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, return 0; free_range: - for (page_addr = end - PAGE_SIZE; page_addr >= start; - page_addr -= PAGE_SIZE) { + for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { bool ret; size_t index; @@ -291,6 +290,8 @@ free_range: WARN_ON(!ret); trace_binder_free_lru_end(alloc, index); + if (page_addr == start) + break; continue; err_vm_insert_page_failed: @@ -298,7 +299,8 @@ err_vm_insert_page_failed: page->page_ptr = NULL; err_alloc_page_failed: err_page_ptr_cleared: - ; + if (page_addr == start) + break; } err_no_vma: if (mm) { @@ -681,17 +683,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct binder_buffer *buffer; mutex_lock(&binder_alloc_mmap_lock); - if (alloc->buffer) { + if (alloc->buffer_size) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } - - alloc->buffer = (void __user *)vma->vm_start; - mutex_unlock(&binder_alloc_mmap_lock); - alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, SZ_4M); + mutex_unlock(&binder_alloc_mmap_lock); + + alloc->buffer = (void __user *)vma->vm_start; + alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, sizeof(alloc->pages[0]), GFP_KERNEL); @@ -722,8 +724,9 @@ err_alloc_buf_struct_failed: kfree(alloc->pages); alloc->pages = NULL; err_alloc_pages_failed: - mutex_lock(&binder_alloc_mmap_lock); alloc->buffer = NULL; + mutex_lock(&binder_alloc_mmap_lock); + alloc->buffer_size = 0; err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, @@ -841,14 +844,20 @@ void binder_alloc_print_pages(struct seq_file *m, int free = 0; mutex_lock(&alloc->mutex); - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - page = &alloc->pages[i]; - if (!page->page_ptr) - free++; - else if (list_empty(&page->lru)) - active++; - else - lru++; + /* + * Make sure the binder_alloc is fully initialized, otherwise we might + * read inconsistent state. + */ + if (binder_alloc_get_vma(alloc) != NULL) { + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = &alloc->pages[i]; + if (!page->page_ptr) + free++; + else if (list_empty(&page->lru)) + active++; + else + lru++; + } } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index ae991097d14d..283d3cb9c16e 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ struct binder_device { struct miscdevice miscdev; struct binder_context context; struct inode *binderfs_inode; + refcount_t ref; }; /** diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index e2580e5316a2..f303106b3362 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode, if (!name) goto err; + refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; @@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode) ida_free(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); - kfree(device->context.name); - kfree(device); + if (refcount_dec_and_test(&device->ref)) { + kfree(device->context.name); + kfree(device); + } } /** @@ -445,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; + refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->miscdev.minor = minor; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 05c2b32dcc4d..d33528033042 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -80,6 +80,7 @@ enum board_ids { static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void ahci_remove_one(struct pci_dev *dev); +static void ahci_shutdown_one(struct pci_dev *dev); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, @@ -392,6 +393,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ + { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ @@ -593,6 +595,7 @@ static struct pci_driver ahci_pci_driver = { .id_table = ahci_pci_tbl, .probe = ahci_init_one, .remove = ahci_remove_one, + .shutdown = ahci_shutdown_one, .driver = { .pm = &ahci_pci_pm_ops, }, @@ -1864,6 +1867,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } +static void ahci_shutdown_one(struct pci_dev *pdev) +{ + ata_pci_shutdown_one(pdev); +} + static void ahci_remove_one(struct pci_dev *pdev) { pm_runtime_get_noresume(&pdev->dev); diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f41744b9b38a..66a570d0da83 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -76,8 +76,7 @@ enum brcm_ahci_version { }; enum brcm_ahci_quirks { - BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), - BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), + BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0), }; struct brcm_ahci_priv { @@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv) brcm_sata_phy_disable(priv, i); } -static u32 brcm_ahci_get_portmask(struct platform_device *pdev, +static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv, struct brcm_ahci_priv *priv) { - void __iomem *ahci; - struct resource *res; u32 impl; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"); - ahci = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(ahci)) - return 0; - - impl = readl(ahci + HOST_PORTS_IMPL); + impl = readl(hpriv->mmio + HOST_PORTS_IMPL); if (fls(impl) > SATA_TOP_MAX_PHYS) dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n", @@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev, else if (!impl) dev_info(priv->dev, "no ports found\n"); - devm_iounmap(&pdev->dev, ahci); - devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); - return impl; } @@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev, /* Perform the SATA PHY reset sequence */ brcm_sata_phy_disable(priv, ap->port_no); + /* Reset the SATA clock */ + ahci_platform_disable_clks(hpriv); + msleep(10); + + ahci_platform_enable_clks(hpriv); + msleep(10); + /* Bring the PHY back on */ brcm_sata_phy_enable(priv, ap->port_no); @@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; - int ret; - ret = ahci_platform_suspend(dev); brcm_sata_phys_disable(priv); - return ret; + + return ahci_platform_suspend(dev); } static int brcm_ahci_resume(struct device *dev) @@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret; + + /* Make sure clocks are turned on before re-configuration */ + ret = ahci_platform_enable_clks(hpriv); + if (ret) + return ret; brcm_sata_init(priv); brcm_sata_phys_enable(priv); brcm_sata_alpm_init(hpriv); - return ahci_platform_resume(dev); + + /* Since we had to enable clocks earlier on, we cannot use + * ahci_platform_resume() as-is since a second call to + * ahci_platform_enable_resources() would bump up the resources + * (regulators, clocks, PHYs) count artificially so we copy the part + * after ahci_platform_enable_resources(). + */ + ret = ahci_platform_enable_phys(hpriv); + if (ret) + goto out_disable_phys; + + ret = ahci_platform_resume_host(dev); + if (ret) + goto out_disable_platform_phys; + + /* We resumed so update PM runtime state */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); + ahci_platform_disable_clks(hpriv); + return ret; } #endif @@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (!IS_ERR_OR_NULL(priv->rcdev)) reset_control_deassert(priv->rcdev); - if ((priv->version == BRCM_SATA_BCM7425) || - (priv->version == BRCM_SATA_NSP)) { - priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; - priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; + hpriv = ahci_platform_get_resources(pdev, 0); + if (IS_ERR(hpriv)) { + ret = PTR_ERR(hpriv); + goto out_reset; } + hpriv->plat_data = priv; + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; + + switch (priv->version) { + case BRCM_SATA_BCM7425: + hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; + /* fall through */ + case BRCM_SATA_NSP: + hpriv->flags |= AHCI_HFLAG_NO_NCQ; + priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; + break; + default: + break; + } + + ret = ahci_platform_enable_clks(hpriv); + if (ret) + goto out_reset; + + /* Must be first so as to configure endianness including that + * of the standard AHCI register space. + */ brcm_sata_init(priv); - priv->port_mask = brcm_ahci_get_portmask(pdev, priv); - if (!priv->port_mask) - return -ENODEV; + /* Initializes priv->port_mask which is used below */ + priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); + if (!priv->port_mask) { + ret = -ENODEV; + goto out_disable_clks; + } + /* Must be done before ahci_platform_enable_phys() */ brcm_sata_phys_enable(priv); - hpriv = ahci_platform_get_resources(pdev, 0); - if (IS_ERR(hpriv)) - return PTR_ERR(hpriv); - hpriv->plat_data = priv; - hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; - brcm_sata_alpm_init(hpriv); - ret = ahci_platform_enable_resources(hpriv); + ret = ahci_platform_enable_phys(hpriv); if (ret) - return ret; - - if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ) - hpriv->flags |= AHCI_HFLAG_NO_NCQ; - hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO; + goto out_disable_phys; ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info, &ahci_platform_sht); if (ret) - return ret; + goto out_disable_platform_phys; dev_info(dev, "Broadcom AHCI SATA3 registered\n"); return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); +out_disable_clks: + ahci_platform_disable_clks(hpriv); +out_reset: + if (!IS_ERR_OR_NULL(priv->rcdev)) + reset_control_assert(priv->rcdev); + return ret; } static int brcm_ahci_remove(struct platform_device *pdev) @@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev) struct brcm_ahci_priv *priv = hpriv->plat_data; int ret; + brcm_sata_phys_disable(priv); + ret = ata_platform_remove_one(pdev); if (ret) return ret; - brcm_sata_phys_disable(priv); - return 0; } diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 8befce036af8..129556fcf6be 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops); * RETURNS: * 0 on success otherwise a negative error code */ -static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) { int rc, i; @@ -74,6 +74,7 @@ disable_phys: } return rc; } +EXPORT_SYMBOL_GPL(ahci_platform_enable_phys); /** * ahci_platform_disable_phys - Disable PHYs @@ -81,7 +82,7 @@ disable_phys: * * This function disables all PHYs found in hpriv->phys. */ -static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) { int i; @@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) phy_exit(hpriv->phys[i]); } } +EXPORT_SYMBOL_GPL(ahci_platform_disable_phys); /** * ahci_platform_enable_clks - Enable platform clocks diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 28c492be0a57..581595b35573 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5325,6 +5325,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc) } } +/** + * ata_qc_get_active - get bitmask of active qcs + * @ap: port in question + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Bitmask of active qcs + */ +u64 ata_qc_get_active(struct ata_port *ap) +{ + u64 qc_active = ap->qc_active; + + /* ATA_TAG_INTERNAL is sent to hw as tag 0 */ + if (qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (1 << 0); + qc_active &= ~(1ULL << ATA_TAG_INTERNAL); + } + + return qc_active; +} +EXPORT_SYMBOL_GPL(ata_qc_get_active); + /** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question @@ -6708,6 +6732,9 @@ void ata_host_detach(struct ata_host *host) { int i; + /* Ensure ata_port probe has completed */ + async_synchronize_full(); + for (i = 0; i < host->n_ports; i++) ata_port_detach(host->ports[i]); @@ -6735,6 +6762,26 @@ void ata_pci_remove_one(struct pci_dev *pdev) ata_host_detach(host); } +void ata_pci_shutdown_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int i; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ap->pflags |= ATA_PFLAG_FROZEN; + + /* Disable port interrupts */ + if (ap->ops->freeze) + ap->ops->freeze(ap); + + /* Stop the port DMA engines */ + if (ap->ops->port_stop) + ap->ops->port_stop(ap); + } +} + /* move to PCI subsystem */ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) { @@ -7355,6 +7402,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(pci_test_config_bits); +EXPORT_SYMBOL_GPL(ata_pci_shutdown_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one); #ifdef CONFIG_PM EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 8e9cb198fcd1..ca6c706e9c25 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1278,7 +1278,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) i, ioread32(hcr_base + CC), ioread32(hcr_base + CA)); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); return; } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index ad385a113391..bde695a32097 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2827,7 +2827,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp } if (work_done) { - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 56946012d113..7510303111fa 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) check_commands = 0; check_commands &= ~(1 << pos); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); } } diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b23d1e4bad33..9d0d65efcd94 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -374,7 +374,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, here = (eni_vcc->descr+skip) & (eni_vcc->words-1); dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); if (!eff) size += skip; @@ -447,7 +447,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, if (size != eff) { dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } if (!j || j > 2*RX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n"); diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 2bbab0230aeb..d287837ed755 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc) } if (!to) { printk ("No more free channels for FS50..\n"); + kfree(vcc); return -EBUSY; } vcc->channo = dev->channo; @@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc) if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) || ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) { printk ("Channel is in use for FS155.\n"); + kfree(vcc); return -EBUSY; } } @@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc) tc, sizeof (struct fs_transmit_config)); if (!tc) { fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n"); + kfree(vcc); return -ENOMEM; } diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index f1a500205313..8fbd36eb8941 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -1414,12 +1414,14 @@ fore200e_open(struct atm_vcc *vcc) static void fore200e_close(struct atm_vcc* vcc) { - struct fore200e* fore200e = FORE200E_DEV(vcc->dev); struct fore200e_vcc* fore200e_vcc; + struct fore200e* fore200e; struct fore200e_vc_map* vc_map; unsigned long flags; ASSERT(vcc); + fore200e = FORE200E_DEV(vcc->dev); + ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<vci >= 0) && (vcc->vci < 1<dev); - struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); + struct fore200e* fore200e; + struct fore200e_vcc* fore200e_vcc; struct fore200e_vc_map* vc_map; - struct host_txq* txq = &fore200e->host_txq; + struct host_txq* txq; struct host_txq_entry* entry; struct tpd* tpd; struct tpd_haddr tpd_haddr; @@ -1480,9 +1482,18 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) unsigned char* data; unsigned long flags; - ASSERT(vcc); - ASSERT(fore200e); - ASSERT(fore200e_vcc); + if (!vcc) + return -EINVAL; + + fore200e = FORE200E_DEV(vcc->dev); + fore200e_vcc = FORE200E_VCC(vcc); + + if (!fore200e) + return -EINVAL; + + txq = &fore200e->host_txq; + if (!fore200e_vcc) + return -EINVAL; if (!test_bit(ATM_VF_READY, &vcc->flags)) { DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); diff --git a/drivers/base/component.c b/drivers/base/component.c index 532a3a5d8f63..1fdbd6ff2058 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -102,11 +102,11 @@ static int component_devices_show(struct seq_file *s, void *data) seq_printf(s, "%-40s %20s\n", "device name", "status"); seq_puts(s, "-------------------------------------------------------------\n"); for (i = 0; i < match->num; i++) { - struct device *d = (struct device *)match->compare[i].data; + struct component *component = match->compare[i].component; - seq_printf(s, "%-40s %20s\n", dev_name(d), - match->compare[i].component ? - "registered" : "not registered"); + seq_printf(s, "%-40s %20s\n", + component ? dev_name(component->dev) : "(unknown)", + component ? (component->bound ? "bound" : "not bound") : "not registered"); } mutex_unlock(&component_mutex); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index d811e60610d3..b25bcab2a26b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -516,7 +516,10 @@ static int really_probe(struct device *dev, struct device_driver *drv) atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", drv->bus->name, __func__, drv->name, dev_name(dev)); - WARN_ON(!list_empty(&dev->devres_head)); + if (!list_empty(&dev->devres_head)) { + dev_crit(dev, "Resources present before probing\n"); + return -EBUSY; + } re_probe: dev->driver = drv; diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile index 37e5ae387400..5fa7ce3745a0 100644 --- a/drivers/base/firmware_loader/builtin/Makefile +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -8,7 +8,8 @@ fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE))) FWNAME = $(patsubst $(obj)/%.gen.S,%,$@) -FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))) +comma := , +FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))) ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long) ASM_ALIGN = $(if $(CONFIG_64BIT),3,2) PROGBITS = $(if $(CONFIG_ARM),%,@)progbits @@ -16,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits filechk_fwbin = \ echo "/* Generated by $(src)/Makefile */" ;\ echo " .section .rodata" ;\ - echo " .p2align $(ASM_ALIGN)" ;\ + echo " .p2align 4" ;\ echo "_fw_$(FWSTR)_bin:" ;\ echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ echo "_fw_end:" ;\ diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 84c4e1f72cbd..5a8c430fb8ff 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -114,30 +114,13 @@ static ssize_t phys_index_show(struct device *dev, } /* - * Show whether the memory block is likely to be offlineable (or is already - * offline). Once offline, the memory block could be removed. The return - * value does, however, not indicate that there is a way to remove the - * memory block. + * Legacy interface that we cannot remove. Always indicate "removable" + * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. */ static ssize_t removable_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = to_memory_block(dev); - unsigned long pfn; - int ret = 1, i; - - if (mem->state != MEM_ONLINE) - goto out; - - for (i = 0; i < sections_per_block; i++) { - if (!present_section_nr(mem->start_section_nr + i)) - continue; - pfn = section_nr_to_pfn(mem->start_section_nr + i); - ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); - } - -out: - return sprintf(buf, "%d\n", ret); + return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); } /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index b230beb6ccb4..604a461848c9 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "base.h" #include "power/power.h" @@ -48,7 +49,7 @@ EXPORT_SYMBOL_GPL(platform_bus); struct resource *platform_get_resource(struct platform_device *dev, unsigned int type, unsigned int num) { - int i; + u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; @@ -226,7 +227,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev, unsigned int type, const char *name) { - int i; + u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; @@ -334,10 +335,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev) { if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - if (!pdev->dma_mask) - pdev->dma_mask = DMA_BIT_MASK(32); - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dma_mask; + if (!pdev->dev.dma_mask) { + pdev->platform_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->platform_dma_mask; + } }; /** @@ -473,7 +474,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties); */ int platform_device_add(struct platform_device *pdev) { - int i, ret; + u32 i; + int ret; if (!pdev) return -EINVAL; @@ -541,7 +543,7 @@ int platform_device_add(struct platform_device *pdev) pdev->id = PLATFORM_DEVID_AUTO; } - while (--i >= 0) { + while (i--) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); @@ -562,7 +564,7 @@ EXPORT_SYMBOL_GPL(platform_device_add); */ void platform_device_del(struct platform_device *pdev) { - int i; + u32 i; if (!IS_ERR_OR_NULL(pdev)) { device_del(&pdev->dev); @@ -632,20 +634,8 @@ struct platform_device *platform_device_register_full( pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { - /* - * This memory isn't freed when the device is put, - * I don't have a nice idea for that though. Conceptually - * dma_mask in struct device should not be a pointer. - * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 - */ - pdev->dev.dma_mask = - kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); - if (!pdev->dev.dma_mask) - goto err; - - kmemleak_ignore(pdev->dev.dma_mask); - - *pdev->dev.dma_mask = pdevinfo->dma_mask; + pdev->platform_dma_mask = pdevinfo->dma_mask; + pdev->dev.dma_mask = &pdev->platform_dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } @@ -670,7 +660,6 @@ struct platform_device *platform_device_register_full( if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); - kfree(pdev->dev.dma_mask); platform_device_put(pdev); return ERR_PTR(ret); } @@ -1278,6 +1267,11 @@ struct bus_type platform_bus_type = { }; EXPORT_SYMBOL_GPL(platform_bus_type); +static inline int __platform_match(struct device *dev, const void *drv) +{ + return platform_match(dev, (struct device_driver *)drv); +} + /** * platform_find_device_by_driver - Find a platform device with a given * driver. @@ -1288,7 +1282,7 @@ struct device *platform_find_device_by_driver(struct device *start, const struct device_driver *drv) { return bus_find_device(&platform_bus_type, start, drv, - (void *)platform_match); + __platform_match); } EXPORT_SYMBOL_GPL(platform_find_device_by_driver); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 134a8af51511..0e99a760aebd 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -273,10 +273,38 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) device_links_read_unlock(idx); } -static void dpm_wait_for_superior(struct device *dev, bool async) +static bool dpm_wait_for_superior(struct device *dev, bool async) { - dpm_wait(dev->parent, async); + struct device *parent; + + /* + * If the device is resumed asynchronously and the parent's callback + * deletes both the device and the parent itself, the parent object may + * be freed while this function is running, so avoid that by reference + * counting the parent once more unless the device has been deleted + * already (in which case return right away). + */ + mutex_lock(&dpm_list_mtx); + + if (!device_pm_initialized(dev)) { + mutex_unlock(&dpm_list_mtx); + return false; + } + + parent = get_device(dev->parent); + + mutex_unlock(&dpm_list_mtx); + + dpm_wait(parent, async); + put_device(parent); + dpm_wait_for_suppliers(dev, async); + + /* + * If the parent's callback has deleted the device, attempting to resume + * it would be invalid, so avoid doing that then. + */ + return device_pm_initialized(dev); } static void dpm_wait_for_consumers(struct device *dev, bool async) @@ -621,7 +649,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn if (!dev->power.is_noirq_suspended) goto Out; - dpm_wait_for_superior(dev, async); + if (!dpm_wait_for_superior(dev, async)) + goto Out; skip_resume = dev_pm_may_skip_resume(dev); @@ -829,7 +858,8 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn if (!dev->power.is_late_suspended) goto Out; - dpm_wait_for_superior(dev, async); + if (!dpm_wait_for_superior(dev, async)) + goto Out; callback = dpm_subsys_resume_early_cb(dev, state, &info); @@ -944,7 +974,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) goto Complete; } - dpm_wait_for_superior(dev, async); + if (!dpm_wait_for_superior(dev, async)) + goto Complete; + dpm_watchdog_set(&wd, dev); device_lock(dev); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 19f57ccfbe1d..59f911e57719 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, WARN_ON(!map->bus); - /* Check for unwritable registers before we start */ - for (i = 0; i < val_len / map->format.val_bytes; i++) - if (!regmap_writeable(map, - reg + regmap_get_offset(map, i))) - return -EINVAL; + /* Check for unwritable or noinc registers in range + * before we start + */ + if (!regmap_writeable_noinc(map, reg)) { + for (i = 0; i < val_len / map->format.val_bytes; i++) { + unsigned int element = + reg + regmap_get_offset(map, i); + if (!regmap_writeable(map, element) || + regmap_writeable_noinc(map, element)) + return -EINVAL; + } + } if (!map->cache_bypass && map->format.parse_val) { unsigned int ival; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index a1f3f0994f9f..d5b4905e2adb 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -520,7 +520,10 @@ software_node_get_parent(const struct fwnode_handle *fwnode) { struct swnode *swnode = to_swnode(fwnode); - return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL; + if (!swnode || !swnode->parent) + return NULL; + + return fwnode_handle_get(&swnode->parent->fwnode); } static struct fwnode_handle * diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c index f4b1d8e54daf..3bb7beb127a9 100644 --- a/drivers/base/test/test_async_driver_probe.c +++ b/drivers/base/test/test_async_driver_probe.c @@ -44,7 +44,8 @@ static int test_probe(struct platform_device *pdev) * performing an async init on that node. */ if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) { - if (dev_to_node(dev) != numa_node_id()) { + if (IS_ENABLED(CONFIG_NUMA) && + dev_to_node(dev) != numa_node_id()) { dev_warn(dev, "NUMA node mismatch %d != %d\n", dev_to_node(dev), numa_node_id()); atomic_inc(&warnings); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c548a5a6c1a0..79f18cfa7049 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -470,6 +470,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) return kobj; } +static inline void brd_check_and_reset_par(void) +{ + if (unlikely(!max_part)) + max_part = 1; + + /* + * make sure 'max_part' can be divided exactly by (1U << MINORBITS), + * otherwise, it is possiable to get same dev_t when adding partitions. + */ + if ((1U << MINORBITS) % max_part != 0) + max_part = 1UL << fls(max_part); + + if (max_part > DISK_MAX_PARTS) { + pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n", + DISK_MAX_PARTS, DISK_MAX_PARTS); + max_part = DISK_MAX_PARTS; + } +} + static int __init brd_init(void) { struct brd_device *brd, *next; @@ -493,8 +512,7 @@ static int __init brd_init(void) if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) return -EIO; - if (unlikely(!max_part)) - max_part = 1; + brd_check_and_reset_par(); for (i = 0; i < rd_nr; i++) { brd = brd_alloc(i); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 485865fd0412..f19a03b62365 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -853,14 +853,17 @@ static void reset_fdc_info(int mode) /* selects the fdc and drive, and enables the fdc's input/dma. */ static void set_fdc(int drive) { + unsigned int new_fdc = fdc; + if (drive >= 0 && drive < N_DRIVE) { - fdc = FDC(drive); + new_fdc = FDC(drive); current_drive = drive; } - if (fdc != 1 && fdc != 0) { + if (new_fdc >= N_FDC) { pr_info("bad fdc value\n"); return; } + fdc = new_fdc; set_dor(fdc, ~0, 8); #if N_FDC > 1 set_dor(1 - fdc, ~8, 0); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f6f77eaa7217..ef6e251857c8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -417,18 +417,20 @@ out_free_page: return ret; } -static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) +static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, + int mode) { /* - * We use punch hole to reclaim the free space used by the - * image a.k.a. discard. However we do not support discard if - * encryption is enabled, because it may give an attacker - * useful information. + * We use fallocate to manipulate the space mappings used by the image + * a.k.a. discard/zerorange. However we do not support this if + * encryption is enabled, because it may give an attacker useful + * information. */ struct file *file = lo->lo_backing_file; - int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; int ret; + mode |= FALLOC_FL_KEEP_SIZE; + if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { ret = -EOPNOTSUPP; goto out; @@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) switch (req_op(rq)) { case REQ_OP_FLUSH: return lo_req_flush(lo, rq); - case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: - return lo_discard(lo, rq, pos); + /* + * If the caller doesn't want deallocation, call zeroout to + * write zeroes the range. Otherwise, punch them out. + */ + return lo_fallocate(lo, rq, pos, + (rq->cmd_flags & REQ_NOUNMAP) ? + FALLOC_FL_ZERO_RANGE : + FALLOC_FL_PUNCH_HOLE); + case REQ_OP_DISCARD: + return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); case REQ_OP_WRITE: if (lo->transfer) return lo_write_transfer(lo, rq, pos); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 19e75999bb15..2bc6e7e149bc 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -306,7 +306,8 @@ static void nbd_size_update(struct nbd_device *nbd) nbd->disk->queue->limits.discard_alignment = config->blksize; blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); } - blk_queue_logical_block_size(nbd->disk->queue, config->blksize); + if (!nbd->disk->queue->limits.nbd_ignore_blksize_set) + blk_queue_logical_block_size(nbd->disk->queue, config->blksize); blk_queue_physical_block_size(nbd->disk->queue, config->blksize); set_capacity(nbd->disk, config->bytesize >> 9); if (bdev) { @@ -1032,14 +1033,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, sockfd_put(sock); return -ENOMEM; } + + config->socks = socks; + nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); if (!nsock) { sockfd_put(sock); return -ENOMEM; } - config->socks = socks; - nsock->fallback_index = -1; nsock->dead = false; mutex_init(&nsock->tx_lock); @@ -1264,6 +1266,16 @@ static int nbd_start_device(struct nbd_device *nbd) args = kzalloc(sizeof(*args), GFP_KERNEL); if (!args) { sock_shutdown(nbd); + /* + * If num_connections is m (2 < m), + * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful. + * But NO.(n + 1) failed. We still have n recv threads. + * So, add flush_workqueue here to prevent recv threads + * dropping the last config_refs and trying to destroy + * the workqueue from inside the workqueue. + */ + if (i) + flush_workqueue(nbd->recv_workq); return -ENOMEM; } sk_set_memalloc(config->socks[i]->sock->sk); @@ -1295,10 +1307,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); - if (ret) { + if (ret) sock_shutdown(nbd); - flush_workqueue(nbd->recv_workq); - } + flush_workqueue(nbd->recv_workq); + mutex_lock(&nbd->config_lock); nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ @@ -1464,6 +1476,8 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) mutex_unlock(&nbd->config_lock); goto out; } + if (nbd->disk->queue->limits.nbd_ignore_blksize_set) + nbd->config->blksize = 512; refcount_set(&nbd->config_refs, 1); refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); @@ -1891,6 +1905,8 @@ again: printk(KERN_ERR "nbd: couldn't allocate config\n"); return -ENOMEM; } + if (nbd->disk->queue->limits.nbd_ignore_blksize_set) + nbd->config->blksize = 512; refcount_set(&nbd->config_refs, 1); set_bit(NBD_RT_BOUND, &config->runtime_flags); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 13527a0b4e44..a67315786db4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2739,7 +2739,7 @@ static int rbd_img_fill_nodata(struct rbd_img_request *img_req, u64 off, u64 len) { struct ceph_file_extent ex = { off, len }; - union rbd_img_fill_iter dummy; + union rbd_img_fill_iter dummy = {}; struct rbd_img_fill_ctx fctx = { .pos_type = OBJ_REQUEST_NODATA, .pos = &dummy, diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7ffd719d89de..c2ed3e9128e3 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -339,10 +339,12 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); - blk_mq_stop_hw_queue(hctx); + /* Don't stop the queue if -ENOMEM: we may have failed to + * bounce the buffer due to global resource outage. + */ + if (err == -ENOSPC) + blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - /* Out of mem doesn't actually happen, since we fall back - * to direct descriptors */ if (err == -ENOMEM || err == -ENOSPC) return BLK_STS_DEV_RESOURCE; return BLK_STS_IOERR; diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index fd1e19f1a49f..3666afa639d1 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -936,6 +936,8 @@ next: out_of_memory: pr_alert("%s: out of memory\n", __func__); put_free_pages(ring, pages_to_gnt, segs_to_map); + for (i = last_map; i < num; i++) + pages[i]->handle = BLKBACK_INVALID_HANDLE; return -ENOMEM; } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index b90dbcd99c03..c4cd68116e7f 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -171,6 +171,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) blkif->domid = domid; atomic_set(&blkif->refcnt, 1); init_completion(&blkif->drain_complete); + + /* + * Because freeing back to the cache may be deferred, it is not + * safe to unload the module (and hence destroy the cache) until + * this has completed. To prevent premature unloading, take an + * extra module reference here and release only when the object + * has been freed back to the cache. + */ + __module_get(THIS_MODULE); INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); return blkif; @@ -320,6 +329,7 @@ static void xen_blkif_free(struct xen_blkif *blkif) /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); + module_put(THIS_MODULE); } int __init xen_blkif_interface_init(void) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a74d03913822..c02be06c5299 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1113,8 +1113,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, if (!VDEV_IS_EXTENDED(info->vdevice)) { err = xen_translate_vdev(info->vdevice, &minor, &offset); if (err) - return err; - nr_parts = PARTS_PER_DISK; + return err; + nr_parts = PARTS_PER_DISK; } else { minor = BLKIF_MINOR_EXT(info->vdevice); nr_parts = PARTS_PER_EXT_DISK; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 4285e75e52c3..1bf4a908a0bd 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -626,7 +626,7 @@ static ssize_t writeback_store(struct device *dev, struct bio bio; struct bio_vec bio_vec; struct page *page; - ssize_t ret; + ssize_t ret = len; int mode; unsigned long blk_idx = 0; @@ -762,7 +762,6 @@ next: if (blk_idx) free_block_bdev(zram, blk_idx); - ret = len; __free_page(page); release_init_lock: up_read(&zram->init_lock); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 2d2e6d862068..f02a4bdc0ca7 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -440,6 +440,12 @@ int btbcm_finalize(struct hci_dev *hdev) set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + /* Some devices ship with the controller default address. + * Allow the bootloader to set a valid address through the + * device tree. + */ + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + return 0; } EXPORT_SYMBOL_GPL(btbcm_finalize); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a9c35ebb30f8..9c3b063e1a1f 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1200,7 +1200,7 @@ static int btusb_open(struct hci_dev *hdev) if (data->setup_on_usb) { err = data->setup_on_usb(hdev); if (err < 0) - return err; + goto setup_fail; } data->intf->needs_remote_wakeup = 1; @@ -1239,6 +1239,7 @@ done: failed: clear_bit(BTUSB_INTR_RUNNING, &data->flags); +setup_fail: usb_autopm_put_interface(data->intf); return err; } @@ -2584,7 +2585,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) * and being processed the events from there then. */ if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { - data->evt_skb = skb_clone(skb, GFP_KERNEL); + data->evt_skb = skb_clone(skb, GFP_ATOMIC); if (!data->evt_skb) goto err_out; } @@ -2849,7 +2850,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); - return err; + goto err_release_fw; } /* Wait a few moments for firmware activation done */ @@ -3807,8 +3808,8 @@ static int btusb_probe(struct usb_interface *intf, btusb_check_needs_reset_resume(intf); } -#ifdef CONFIG_BT_HCIBTUSB_RTL - if (id->driver_info & BTUSB_REALTEK) { + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) && + (id->driver_info & BTUSB_REALTEK)) { hdev->setup = btrtl_setup_realtek; hdev->shutdown = btrtl_shutdown_realtek; hdev->cmd_timeout = btusb_rtl_cmd_timeout; @@ -3818,8 +3819,11 @@ static int btusb_probe(struct usb_interface *intf, * (DEVICE_REMOTE_WAKEUP) */ set_bit(BTUSB_WAKEUP_DISABLE, &data->flags); + + err = usb_autopm_get_interface(intf); + if (err < 0) + goto out_free_dev; } -#endif if (id->driver_info & BTUSB_AMP) { /* AMP controllers do not support SCO packets */ diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index fe2e307009f4..cf4a56095817 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) if (*ptr == 0xc0) { BT_ERR("Short BCSP packet"); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_count = 0; } else @@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { BT_ERR("Error in BCSP hdr checksum"); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; @@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) bscp_get_crc(bcsp)); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 285706618f8a..d9a4c6c691e0 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu) serdev_device_set_flow_control(serdev, true); - if (hu->oper_speed) - speed = hu->oper_speed; - else if (hu->proto->oper_speed) - speed = hu->proto->oper_speed; - else - speed = 0; - do { /* Reset the Bluetooth device */ gpiod_set_value_cansleep(lldev->enable_gpio, 0); @@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu) return err; } - if (speed) { - __le32 speed_le = cpu_to_le32(speed); - struct sk_buff *skb; - - skb = __hci_cmd_sync(hu->hdev, - HCI_VS_UPDATE_UART_HCI_BAUDRATE, - sizeof(speed_le), &speed_le, - HCI_INIT_TIMEOUT); - if (!IS_ERR(skb)) { - kfree_skb(skb); - serdev_device_set_baudrate(serdev, speed); - } - } - err = download_firmware(lldev); if (!err) break; @@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu) } /* Operational speed if any */ + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + else + speed = 0; + if (speed) { + __le32 speed_le = cpu_to_le32(speed); + struct sk_buff *skb; + + skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE, + sizeof(speed_le), &speed_le, + HCI_INIT_TIMEOUT); + if (!IS_ERR(skb)) { + kfree_skb(skb); + serdev_device_set_baudrate(serdev, speed); + } + } return 0; } diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 6b331061d34b..47c2bb444ab4 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -138,7 +138,6 @@ config TEGRA_ACONNECT tristate "Tegra ACONNECT Bus Driver" depends on ARCH_TEGRA_210_SOC depends on OF && PM - select PM_CLK help Driver for the Tegra ACONNECT bus which is used to interface with the devices inside the Audio Processing Engine (APE) for Tegra210. diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index 36cf13eee6b8..68413bf9cf87 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -466,7 +466,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len, { struct moxtet *moxtet = file->private_data; u8 bin[TURRIS_MOX_MAX_MODULES]; - u8 hex[sizeof(buf) * 2 + 1]; + u8 hex[sizeof(bin) * 2 + 1]; int ret, n; ret = moxtet_spi_read(moxtet, bin); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 2b6670daf7fc..f0bc0841cbc4 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata) return -EINVAL; } + /* Always add a slot for main clocks fck and ick even if unused */ + if (!nr_fck) + ddata->nr_clocks++; + if (!nr_ick) + ddata->nr_clocks++; + ddata->clocks = devm_kcalloc(ddata->dev, ddata->nr_clocks, sizeof(*ddata->clocks), GFP_KERNEL); @@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata) struct clk *clock; int i, error; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return 0; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata) struct clk *clock; int i; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -473,7 +479,7 @@ static void sysc_clkdm_deny_idle(struct sysc *ddata) { struct ti_sysc_platform_data *pdata; - if (ddata->legacy_mode) + if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) return; pdata = dev_get_platdata(ddata->dev); @@ -485,7 +491,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata) { struct ti_sysc_platform_data *pdata; - if (ddata->legacy_mode) + if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) return; pdata = dev_get_platdata(ddata->dev); @@ -917,6 +923,9 @@ set_midle: return -EINVAL; } + if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY) + best_mode = SYSC_IDLE_NO; + reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); @@ -978,6 +987,10 @@ static int sysc_disable_module(struct device *dev) return ret; } + if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) || + ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY)) + best_mode = SYSC_IDLE_FORCE; + reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); @@ -1236,6 +1249,14 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE), /* Quirks that need to be set based on detected module */ + SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, + SYSC_MODULE_QUIRK_AESS), + SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff, + SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, SYSC_MODULE_QUIRK_HDQ1W), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, @@ -1251,6 +1272,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), + SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, + 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), /* Watchdog on am3 and am4 */ @@ -1260,7 +1285,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { #ifdef DEBUG SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0), SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0), - SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, 0), SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0), SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, @@ -1309,8 +1333,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0), - SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, - 0xffffffff, 0), SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0), #endif }; @@ -1384,7 +1406,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata) } /* 1-wire needs module's internal clocks enabled for reset */ -static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata) +static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata) { int offset = 0x0c; /* HDQ_CTRL_STATUS */ u16 val; @@ -1394,6 +1416,14 @@ static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata) sysc_write(ddata, offset, val); } +/* AESS (Audio Engine SubSystem) needs autogating set after enable */ +static void sysc_module_enable_quirk_aess(struct sysc *ddata) +{ + int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */ + + sysc_write(ddata, offset, 1); +} + /* I2C needs extra enable bit toggling for reset */ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) { @@ -1464,7 +1494,7 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { - ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w; + ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w; return; } @@ -1476,6 +1506,9 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; } + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) + ddata->module_enable_quirk = sysc_module_enable_quirk_aess; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; @@ -1594,6 +1627,10 @@ static int sysc_reset(struct sysc *ddata) sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + if (ddata->cfg.srst_udelay) + usleep_range(ddata->cfg.srst_udelay, + ddata->cfg.srst_udelay * 2); + if (ddata->clk_enable_quirk) ddata->clk_enable_quirk(ddata); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index ac42ae4651ce..eebdcbef0578 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks) tracks->xa = 0; tracks->error = 0; cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) { + tracks->error = CDS_NO_INFO; + return; + } + /* Grab the TOC header so we can see how many tracks there are */ ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header); if (ret) { @@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, ret = open_for_data(cdi); if (ret) goto err; - cdrom_mmc3_profile(cdi); + if (CDROM_CAN(CDC_GENERIC_PACKET)) + cdrom_mmc3_profile(cdi); if (mode & FMODE_WRITE) { ret = -EROFS; if (cdrom_open_write(cdi)) @@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written) it doesn't give enough information or fails. then we return the toc contents. */ use_toc: + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + toc.cdte_format = CDROM_MSF; toc.cdte_track = CDROM_LEADOUT; if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc))) diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 9ac6671bb514..f69609b47fef 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -855,7 +855,7 @@ int hpet_alloc(struct hpet_data *hdp) return 0; } - hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1), + hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs), GFP_KERNEL); if (!hpetp) diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 30cf00f8e9a0..0576801944fd 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -105,8 +105,10 @@ static int imx_rngc_self_test(struct imx_rngc *rngc) return -ETIMEDOUT; } - if (rngc->err_reg != 0) + if (rngc->err_reg != 0) { + imx_rngc_irq_mask_clear(rngc); return -EIO; + } return 0; } diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index b27f39688b5e..e329f82c0467 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -66,6 +66,13 @@ #define OMAP4_RNG_OUTPUT_SIZE 0x8 #define EIP76_RNG_OUTPUT_SIZE 0x10 +/* + * EIP76 RNG takes approx. 700us to produce 16 bytes of output data + * as per testing results. And to account for the lack of udelay()'s + * reliability, we keep the timeout as 1000us. + */ +#define RNG_DATA_FILL_TIMEOUT 100 + enum { RNG_OUTPUT_0_REG = 0, RNG_OUTPUT_1_REG, @@ -176,7 +183,7 @@ static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max, if (max < priv->pdata->data_size) return 0; - for (i = 0; i < 20; i++) { + for (i = 0; i < RNG_DATA_FILL_TIMEOUT; i++) { present = priv->pdata->data_present(priv); if (present || !wait) break; diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index 38b719017186..8df3cad7c97a 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #define RNG_RESET 0x01 @@ -86,14 +88,18 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w) static struct hwrng omap3_rom_rng_ops = { .name = "omap3-rom", - .read = omap3_rom_rng_read, }; static int omap3_rom_rng_probe(struct platform_device *pdev) { int ret = 0; - pr_info("initializing\n"); + omap3_rom_rng_ops.read = of_device_get_match_data(&pdev->dev); + if (!omap3_rom_rng_ops.read) { + dev_err(&pdev->dev, "missing rom code handler\n"); + + return -ENODEV; + } omap3_rom_rng_call = pdev->dev.platform_data; if (!omap3_rom_rng_call) { @@ -121,13 +127,21 @@ static int omap3_rom_rng_remove(struct platform_device *pdev) { cancel_delayed_work_sync(&idle_work); hwrng_unregister(&omap3_rom_rng_ops); - clk_disable_unprepare(rng_clk); + if (!rng_idle) + clk_disable_unprepare(rng_clk); return 0; } +static const struct of_device_id omap_rom_rng_match[] = { + { .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, omap_rom_rng_match); + static struct platform_driver omap3_rom_rng_driver = { .driver = { .name = "omap3-rom-rng", + .of_match_table = omap_rom_rng_match, }, .probe = omap3_rom_rng_probe, .remove = omap3_rom_rng_remove, diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 285e0b8f9a97..09e3e25562a8 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -265,7 +265,7 @@ static int ipmb_slave_cb(struct i2c_client *client, break; case I2C_SLAVE_WRITE_RECEIVED: - if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg)) + if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1) break; buf[++ipmb_dev->msg_idx] = *val; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 2aab80e19ae0..0b6e7f8d9729 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -448,6 +448,8 @@ enum ipmi_stat_indexes { #define IPMI_IPMB_NUM_SEQ 64 struct ipmi_smi { + struct module *owner; + /* What interface number are we? */ int intf_num; @@ -1220,6 +1222,11 @@ int ipmi_create_user(unsigned int if_num, if (rv) goto out_kfree; + if (!try_module_get(intf->owner)) { + rv = -ENODEV; + goto out_kfree; + } + /* Note that each existing user holds a refcount to the interface. */ kref_get(&intf->refcount); @@ -1349,6 +1356,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) } kref_put(&intf->refcount, intf_free); + module_put(intf->owner); } int ipmi_destroy_user(struct ipmi_user *user) @@ -2459,7 +2467,7 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) * been recently fetched, this will just use the cached data. Otherwise * it will run a new fetch. * - * Except for the first time this is called (in ipmi_register_smi()), + * Except for the first time this is called (in ipmi_add_smi()), * this will always return good data; */ static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, @@ -3031,8 +3039,11 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf, bmc->pdev.name = "ipmi_bmc"; rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); - if (rv < 0) + if (rv < 0) { + kfree(bmc); goto out; + } + bmc->pdev.dev.driver = &ipmidriver.driver; bmc->pdev.id = rv; bmc->pdev.dev.release = release_bmc_device; @@ -3377,10 +3388,11 @@ static void redo_bmc_reg(struct work_struct *work) kref_put(&intf->refcount, intf_free); } -int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, - void *send_info, - struct device *si_dev, - unsigned char slave_addr) +int ipmi_add_smi(struct module *owner, + const struct ipmi_smi_handlers *handlers, + void *send_info, + struct device *si_dev, + unsigned char slave_addr) { int i, j; int rv; @@ -3406,7 +3418,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, return rv; } - + intf->owner = owner; intf->bmc = &intf->tmp_bmc; INIT_LIST_HEAD(&intf->bmc->intfs); mutex_init(&intf->bmc->dyn_mutex); @@ -3514,7 +3526,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, return rv; } -EXPORT_SYMBOL(ipmi_register_smi); +EXPORT_SYMBOL(ipmi_add_smi); static void deliver_smi_err_response(struct ipmi_smi *intf, struct ipmi_smi_msg *msg, diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 6b9a0593d2eb..c950f0aea2fe 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -253,7 +253,7 @@ struct smi_info { static int force_kipmid[IPMI_MAX_INTFS]; static int num_force_kipmid; -static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS]; +static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS] = { 1, 1, 1, 1 }; static int num_max_busy_us; static bool unload_when_empty = true; diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index c78127ccbc0d..638c693e17ad 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev) else io.slave_addr = slave_addr; - io.irq = platform_get_irq(pdev, 0); + io.irq = platform_get_irq_optional(pdev, 0); if (io.irq > 0) io.irq_setup = ipmi_std_irq_setup; else @@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev) io.irq = tmp; io.irq_setup = acpi_gpe_irq_setup; } else { - int irq = platform_get_irq(pdev, 0); + int irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { io.irq = irq; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 22c6a2e61236..8ac390c2b514 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); msg = ssif_info->curr_msg; if (msg) { + if (data) { + if (len > IPMI_MAX_MSG_LENGTH) + len = IPMI_MAX_MSG_LENGTH; + memcpy(msg->rsp, data, len); + } else { + len = 0; + } msg->rsp_size = len; - if (msg->rsp_size > IPMI_MAX_MSG_LENGTH) - msg->rsp_size = IPMI_MAX_MSG_LENGTH; - memcpy(msg->rsp, data, msg->rsp_size); ssif_info->curr_msg = NULL; } diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 7c9269e3477a..bd95aba1f9fe 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg) if (copy_from_user(karg, arg, sizeof(karg))) return -EFAULT; + /* sparc64 suseconds_t is 32-bit only */ + if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) + karg[1] >>= 32; + return lp_set_timeout(minor, karg[0], karg[1]); } diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index c86f18aa8985..34bb88fe0b0a 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -619,20 +619,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (copy_from_user(time32, argp, sizeof(time32))) return -EFAULT; + if ((time32[0] < 0) || (time32[1] < 0)) + return -EINVAL; + return pp_set_timeout(pp->pdev, time32[0], time32[1]); case PPSETTIME64: if (copy_from_user(time64, argp, sizeof(time64))) return -EFAULT; + if ((time64[0] < 0) || (time64[1] < 0)) + return -EINVAL; + + if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) + time64[1] >>= 32; + return pp_set_timeout(pp->pdev, time64[0], time64[1]); case PPGETTIME32: jiffies_to_timespec64(pp->pdev->timeout, &ts); time32[0] = ts.tv_sec; time32[1] = ts.tv_nsec / NSEC_PER_USEC; - if ((time32[0] < 0) || (time32[1] < 0)) - return -EINVAL; if (copy_to_user(argp, time32, sizeof(time32))) return -EFAULT; @@ -643,8 +650,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) jiffies_to_timespec64(pp->pdev->timeout, &ts); time64[0] = ts.tv_sec; time64[1] = ts.tv_nsec / NSEC_PER_USEC; - if ((time64[0] < 0) || (time64[1] < 0)) - return -EINVAL; + + if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) + time64[1] <<= 32; if (copy_to_user(argp, time64, sizeof(time64))) return -EFAULT; diff --git a/drivers/char/random.c b/drivers/char/random.c index 01b8868b9bed..8ff28c14af7e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1687,8 +1687,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, print_once = true; #endif if (__ratelimit(&unseeded_warning)) - pr_notice("random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); + printk_deferred(KERN_NOTICE "random: %s called from %pS " + "with crng_init=%d\n", func_name, caller, + crng_init); } /* @@ -2357,11 +2358,11 @@ struct batched_entropy { /* * Get a random word for internal kernel use only. The quality of the random - * number is either as good as RDRAND or as good as /dev/urandom, with the - * goal of being quite fast and not depleting entropy. In order to ensure + * number is good as /dev/urandom, but there is no backtrack protection, with + * the goal of being quite fast and not depleting entropy. In order to ensure * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * wait_for_random_bytes() should be called and return 0 at least once at any + * point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), @@ -2374,15 +2375,6 @@ u64 get_random_u64(void) struct batched_entropy *batch; static void *previous; -#if BITS_PER_LONG == 64 - if (arch_get_random_long((unsigned long *)&ret)) - return ret; -#else - if (arch_get_random_long((unsigned long *)&ret) && - arch_get_random_long((unsigned long *)&ret + 1)) - return ret; -#endif - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u64); @@ -2407,9 +2399,6 @@ u32 get_random_u32(void) struct batched_entropy *batch; static void *previous; - if (arch_get_random_int(&ret)) - return ret; - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u32); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 2ec47a69a2a6..87f449340202 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -61,6 +61,12 @@ static void tpm_dev_async_work(struct work_struct *work) mutex_lock(&priv->buffer_mutex); priv->command_enqueued = false; + ret = tpm_try_get_ops(priv->chip); + if (ret) { + priv->response_length = ret; + goto out; + } + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); @@ -68,6 +74,7 @@ static void tpm_dev_async_work(struct work_struct *work) priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } +out: mutex_unlock(&priv->buffer_mutex); wake_up_interruptible(&priv->async_wait); } @@ -123,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, priv->response_read = true; ret_size = min_t(ssize_t, size, priv->response_length); - if (!ret_size) { + if (ret_size <= 0) { priv->response_length = 0; goto out; } @@ -204,6 +211,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); + tpm_put_ops(priv->chip); mutex_unlock(&priv->buffer_mutex); return size; } diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index 1089fc0bb290..f3742bcc73e3 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -14,7 +14,7 @@ struct file_priv { struct work_struct timeout_work; struct work_struct async_work; wait_queue_head_t async_wait; - size_t response_length; + ssize_t response_length; bool response_read; bool command_enqueued; diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index ba9acae83bff..2f8026b71933 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -831,6 +831,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index) return 0; } + bank->crypto_id = HASH_ALGO__LAST; + return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size); } @@ -939,6 +941,10 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands, GFP_KERNEL); + if (!chip->cc_attrs_tbl) { + rc = -ENOMEM; + goto out; + } rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY); if (rc) diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 6640a14dbe48..22bf553ccf9d 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -32,7 +32,7 @@ static const uuid_t ftpm_ta_uuid = 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96); /** - * ftpm_tee_tpm_op_recv - retrieve fTPM response. + * ftpm_tee_tpm_op_recv() - retrieve fTPM response. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h. * @buf: the buffer to store data. * @count: the number of bytes to read. @@ -61,7 +61,7 @@ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) } /** - * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory. + * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h * @buf: the buffer to send. * @len: the number of bytes to send. @@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data) } /** - * ftpm_tee_probe - initialize the fTPM + * ftpm_tee_probe() - initialize the fTPM * @pdev: the platform_device description. * * Return: @@ -298,7 +298,7 @@ out_tee_session: } /** - * ftpm_tee_remove - remove the TPM device + * ftpm_tee_remove() - remove the TPM device * @pdev: the platform_device description. * * Return: @@ -328,6 +328,19 @@ static int ftpm_tee_remove(struct platform_device *pdev) return 0; } +/** + * ftpm_tee_shutdown() - shutdown the TPM device + * @pdev: the platform_device description. + */ +static void ftpm_tee_shutdown(struct platform_device *pdev) +{ + struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev); + + tee_shm_free(pvt_data->shm); + tee_client_close_session(pvt_data->ctx, pvt_data->session); + tee_client_close_context(pvt_data->ctx); +} + static const struct of_device_id of_ftpm_tee_ids[] = { { .compatible = "microsoft,ftpm" }, { } @@ -341,6 +354,7 @@ static struct platform_driver ftpm_tee_driver = { }, .probe = ftpm_tee_probe, .remove = ftpm_tee_remove, + .shutdown = ftpm_tee_shutdown, }; module_platform_driver(ftpm_tee_driver); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index e4fdde93ed4c..e7df342a317d 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -286,7 +286,7 @@ static int tpm_tis_plat_probe(struct platform_device *pdev) } tpm_info.res = *res; - tpm_info.irq = platform_get_irq(pdev, 0); + tpm_info.irq = platform_get_irq_optional(pdev, 0); if (tpm_info.irq <= 0) { if (pdev != force_pdev) tpm_info.irq = -1; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 270f43acbb77..c3181ea9f271 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -980,8 +980,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, goto out_err; } - tpm_chip_start(chip); - chip->flags |= TPM_CHIP_FLAG_IRQ; if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); @@ -991,7 +989,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } else { tpm_tis_probe_irq(chip, intmask); } - tpm_chip_stop(chip); } rc = tpm_chip_register(chip); diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index 4f24e46ebe7c..56db949a7b70 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -15,10 +15,11 @@ #include #include #include +#include struct ttyprintk_port { struct tty_port port; - struct mutex port_write_mutex; + spinlock_t spinlock; }; static struct ttyprintk_port tpk_port; @@ -99,11 +100,12 @@ static int tpk_open(struct tty_struct *tty, struct file *filp) static void tpk_close(struct tty_struct *tty, struct file *filp) { struct ttyprintk_port *tpkp = tty->driver_data; + unsigned long flags; - mutex_lock(&tpkp->port_write_mutex); + spin_lock_irqsave(&tpkp->spinlock, flags); /* flush tpk_printk buffer */ tpk_printk(NULL, 0); - mutex_unlock(&tpkp->port_write_mutex); + spin_unlock_irqrestore(&tpkp->spinlock, flags); tty_port_close(&tpkp->port, tty, filp); } @@ -115,13 +117,14 @@ static int tpk_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct ttyprintk_port *tpkp = tty->driver_data; + unsigned long flags; int ret; /* exclusive use of tpk_printk within this tty */ - mutex_lock(&tpkp->port_write_mutex); + spin_lock_irqsave(&tpkp->spinlock, flags); ret = tpk_printk(buf, count); - mutex_unlock(&tpkp->port_write_mutex); + spin_unlock_irqrestore(&tpkp->spinlock, flags); return ret; } @@ -171,7 +174,7 @@ static int __init ttyprintk_init(void) { int ret = -ENOMEM; - mutex_init(&tpk_port.port_write_mutex); + spin_lock_init(&tpk_port.spinlock); ttyprintk_driver = tty_alloc_driver(1, TTY_DRIVER_RESET_TERMIOS | diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index 0aabe49aed09..a9d4234758d7 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -348,7 +348,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index 0ac34cdaa106..77fe83a73bf4 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -83,7 +83,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 0855f3a80cc7..086cf0b4955c 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -146,7 +146,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 0b03cfae3a9d..b71515acdec1 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -275,7 +275,7 @@ static int __init pmc_register_ops(void) np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids); - pmcreg = syscon_node_to_regmap(np); + pmcreg = device_node_to_regmap(np); if (IS_ERR(pmcreg)) return PTR_ERR(pmcreg); diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index 86238d5ecb4d..77398aefeb6d 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -47,6 +47,7 @@ static const struct clk_programmable_layout sam9x60_programmable_layout = { .pres_shift = 8, .css_mask = 0x1f, .have_slck_mck = 0, + .is_pres_direct = 1, }; static const struct clk_pcr_layout sam9x60_pcr_layout = { diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 0de1108737db..ff7e3f727082 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -162,7 +162,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index 25b156d4e645..a6dee4a3b6e4 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -136,7 +136,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 9d930edd6516..13304cf5f2a8 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c @@ -280,7 +280,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev) else clk = clk_register_gpio_gate(&pdev->dev, node->name, parent_names ? parent_names[0] : NULL, gpiod, - 0); + CLK_SET_RATE_PARENT); if (IS_ERR(clk)) return PTR_ERR(clk); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 1c677d7f7f53..62d0fc486d3a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3231,6 +3231,34 @@ static inline void clk_debug_unregister(struct clk_core *core) } #endif +static void clk_core_reparent_orphans_nolock(void) +{ + struct clk_core *orphan; + struct hlist_node *tmp2; + + /* + * walk the list of orphan clocks and reparent any that newly finds a + * parent. + */ + hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { + struct clk_core *parent = __clk_init_parent(orphan); + + /* + * We need to use __clk_set_parent_before() and _after() to + * to properly migrate any prepare/enable count of the orphan + * clock. This is important for CLK_IS_CRITICAL clocks, which + * are enabled during init but might not have a parent yet. + */ + if (parent) { + /* update the clk tree topology */ + __clk_set_parent_before(orphan, parent); + __clk_set_parent_after(orphan, parent, NULL); + __clk_recalc_accuracies(orphan); + __clk_recalc_rates(orphan, 0); + } + } +} + /** * __clk_core_init - initialize the data structures in a struct clk_core * @core: clk_core being initialized @@ -3241,8 +3269,6 @@ static inline void clk_debug_unregister(struct clk_core *core) static int __clk_core_init(struct clk_core *core) { int ret; - struct clk_core *orphan; - struct hlist_node *tmp2; unsigned long rate; if (!core) @@ -3294,6 +3320,21 @@ static int __clk_core_init(struct clk_core *core) goto out; } + /* + * optional platform-specific magic + * + * The .init callback is not used by any of the basic clock types, but + * exists for weird hardware that must perform initialization magic. + * Please consider other ways of solving initialization problems before + * using this callback, as its use is discouraged. + * + * If it exist, this callback should called before any other callback of + * the clock + */ + if (core->ops->init) + core->ops->init(core->hw); + + core->parent = __clk_init_parent(core); /* @@ -3318,17 +3359,6 @@ static int __clk_core_init(struct clk_core *core) core->orphan = true; } - /* - * optional platform-specific magic - * - * The .init callback is not used by any of the basic clock types, but - * exists for weird hardware that must perform initialization magic. - * Please consider other ways of solving initialization problems before - * using this callback, as its use is discouraged. - */ - if (core->ops->init) - core->ops->init(core->hw); - /* * Set clk's accuracy. The preferred method is to use * .recalc_accuracy. For simple clocks and lazy developers the default @@ -3382,35 +3412,22 @@ static int __clk_core_init(struct clk_core *core) if (core->flags & CLK_IS_CRITICAL) { unsigned long flags; - clk_core_prepare(core); + ret = clk_core_prepare(core); + if (ret) + goto out; flags = clk_enable_lock(); - clk_core_enable(core); + ret = clk_core_enable(core); clk_enable_unlock(flags); - } - - /* - * walk the list of orphan clocks and reparent any that newly finds a - * parent. - */ - hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { - struct clk_core *parent = __clk_init_parent(orphan); - - /* - * We need to use __clk_set_parent_before() and _after() to - * to properly migrate any prepare/enable count of the orphan - * clock. This is important for CLK_IS_CRITICAL clocks, which - * are enabled during init but might not have a parent yet. - */ - if (parent) { - /* update the clk tree topology */ - __clk_set_parent_before(orphan, parent); - __clk_set_parent_after(orphan, parent, NULL); - __clk_recalc_accuracies(orphan); - __clk_recalc_rates(orphan, 0); + if (ret) { + clk_core_unprepare(core); + goto out; } } + clk_core_reparent_orphans_nolock(); + + kref_init(&core->ref); out: clk_pm_runtime_put(core); @@ -3701,6 +3718,28 @@ fail_out: return ERR_PTR(ret); } +/** + * dev_or_parent_of_node() - Get device node of @dev or @dev's parent + * @dev: Device to get device node of + * + * Return: device node pointer of @dev, or the device node pointer of + * @dev->parent if dev doesn't have a device node, or NULL if neither + * @dev or @dev->parent have a device node. + */ +static struct device_node *dev_or_parent_of_node(struct device *dev) +{ + struct device_node *np; + + if (!dev) + return NULL; + + np = dev_of_node(dev); + if (!np) + np = dev_of_node(dev->parent); + + return np; +} + /** * clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock @@ -3716,7 +3755,7 @@ fail_out: */ struct clk *clk_register(struct device *dev, struct clk_hw *hw) { - return __clk_register(dev, dev_of_node(dev), hw); + return __clk_register(dev, dev_or_parent_of_node(dev), hw); } EXPORT_SYMBOL_GPL(clk_register); @@ -3732,7 +3771,8 @@ EXPORT_SYMBOL_GPL(clk_register); */ int clk_hw_register(struct device *dev, struct clk_hw *hw) { - return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw)); + return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev), + hw)); } EXPORT_SYMBOL_GPL(clk_hw_register); @@ -3879,6 +3919,7 @@ void clk_unregister(struct clk *clk) __func__, clk->core->name); kref_put(&clk->core->ref, __clk_release); + free_clk(clk); unlock: clk_prepare_unlock(); } @@ -4160,6 +4201,13 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) EXPORT_SYMBOL_GPL(clk_notifier_unregister); #ifdef CONFIG_OF +static void clk_core_reparent_orphans(void) +{ + clk_prepare_lock(); + clk_core_reparent_orphans_nolock(); + clk_prepare_unlock(); +} + /** * struct of_clk_provider - Clock provider registration structure * @link: Entry in global list of clock providers @@ -4255,6 +4303,8 @@ int of_clk_add_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clock from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); @@ -4290,6 +4340,8 @@ int of_clk_add_hw_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clk_hw provider from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 388bdb94f841..d3486ee79ab5 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, mux->reg = reg; mux->shift = PCG_PCS_SHIFT; mux->mask = PCG_PCS_MASK; + mux->lock = &imx_ccm_lock; div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) @@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, gate_hw = &gate->hw; gate->reg = reg; gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ops, div_hw, diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 2022d9bead91..04a3ae979281 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -24,8 +24,8 @@ static const char * const spll_pfd_sels[] = { "spll_pfd0", "spll_pfd1", "spll_pf static const char * const spll_sels[] = { "spll", "spll_pfd_sel", }; static const char * const apll_pfd_sels[] = { "apll_pfd0", "apll_pfd1", "apll_pfd2", "apll_pfd3", }; static const char * const apll_sels[] = { "apll", "apll_pfd_sel", }; -static const char * const scs_sels[] = { "dummy", "sosc", "sirc", "firc", "dummy", "apll_sel", "spll_sel", "upll", }; -static const char * const ddr_sels[] = { "apll_pfd_sel", "upll", }; +static const char * const scs_sels[] = { "dummy", "sosc", "sirc", "firc", "dummy", "apll_sel", "spll_sel", "dummy", }; +static const char * const ddr_sels[] = { "apll_pfd_sel", "dummy", "dummy", "dummy", }; static const char * const nic_sels[] = { "firc", "ddr_clk", }; static const char * const periph_plat_sels[] = { "dummy", "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2", "apll_pfd1", "apll_pfd0", "upll", }; static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "mpll", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", }; @@ -40,6 +40,7 @@ static const struct clk_div_table ulp_div_table[] = { { .val = 5, .div = 16, }, { .val = 6, .div = 32, }, { .val = 7, .div = 64, }, + { /* sentinel */ }, }; static const int pcc2_uart_clk_ids[] __initconst = { @@ -118,7 +119,7 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np) clks[IMX7ULP_CLK_SYS_SEL] = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels)); clks[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels)); clks[IMX7ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels)); - clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 1, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE); + clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE); clks[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT); clks[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT); diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 7a815ec76aa5..047f1d8fe323 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -112,48 +112,22 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw, return fvco; } -static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate, +static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate, u32 pll_div) { u32 old_mdiv, old_pdiv; - old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK; - old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK; + old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT; + old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT; return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv; } -static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate, - u32 pll_div_ctl0, u32 pll_div_ctl1) -{ - u32 old_mdiv, old_pdiv, old_kdiv; - - old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK; - old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK; - old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK; - - return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv || - rate->kdiv != old_kdiv; -} - -static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate, - u32 pll_div_ctl0, u32 pll_div_ctl1) -{ - u32 old_mdiv, old_pdiv, old_kdiv; - - old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK; - old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK; - old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK; - - return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv || - rate->kdiv != old_kdiv; -} - static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll) { u32 val; - return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0, + return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0, LOCK_TIMEOUT_US); } @@ -174,7 +148,7 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate, tmp = readl_relaxed(pll->base + 4); - if (!clk_pll1416x_mp_change(rate, tmp)) { + if (!clk_pll14xx_mp_change(rate, tmp)) { tmp &= ~(SDIV_MASK) << SDIV_SHIFT; tmp |= rate->sdiv << SDIV_SHIFT; writel_relaxed(tmp, pll->base + 4); @@ -239,13 +213,15 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate, } tmp = readl_relaxed(pll->base + 4); - div_val = readl_relaxed(pll->base + 8); - if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) { + if (!clk_pll14xx_mp_change(rate, tmp)) { tmp &= ~(SDIV_MASK) << SDIV_SHIFT; tmp |= rate->sdiv << SDIV_SHIFT; writel_relaxed(tmp, pll->base + 4); + tmp = rate->kdiv << KDIV_SHIFT; + writel_relaxed(tmp, pll->base + 8); + return 0; } diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c index fbef740704d0..b8b2072742a5 100644 --- a/drivers/clk/imx/clk-scu.c +++ b/drivers/clk/imx/clk-scu.c @@ -43,12 +43,12 @@ struct imx_sc_msg_req_set_clock_rate { __le32 rate; __le16 resource; u8 clk; -} __packed; +} __packed __aligned(4); struct req_get_clock_rate { __le16 resource; u8 clk; -} __packed; +} __packed __aligned(4); struct resp_get_clock_rate { __le32 rate; @@ -84,7 +84,7 @@ struct imx_sc_msg_get_clock_parent { struct req_get_clock_parent { __le16 resource; u8 clk; - } __packed req; + } __packed __aligned(4) req; struct resp_get_clock_parent { u8 parent; } resp; @@ -121,7 +121,7 @@ struct imx_sc_msg_req_clock_enable { u8 clk; u8 enable; u8 autog; -} __packed; +} __packed __aligned(4); static inline struct clk_scu *to_clk_scu(struct clk_hw *hw) { diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index f7a389a50401..6fe64ff8ffa1 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -51,48 +51,48 @@ struct imx_pll14xx_clk { }; #define imx_clk_cpu(name, parent_name, div, mux, pll, step) \ - imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk + to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)) #define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \ cgr_val, clk_gate_flags, lock, share_count) \ - clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \ - cgr_val, clk_gate_flags, lock, share_count)->clk + to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \ + cgr_val, clk_gate_flags, lock, share_count)) #define imx_clk_pllv3(type, name, parent_name, base, div_mask) \ - imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)->clk + to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)) #define imx_clk_pfd(name, parent_name, reg, idx) \ - imx_clk_hw_pfd(name, parent_name, reg, idx)->clk + to_clk(imx_clk_hw_pfd(name, parent_name, reg, idx)) #define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \ - imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk + to_clk(imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)) #define imx_clk_fixed_factor(name, parent, mult, div) \ - imx_clk_hw_fixed_factor(name, parent, mult, div)->clk + to_clk(imx_clk_hw_fixed_factor(name, parent, mult, div)) #define imx_clk_divider2(name, parent, reg, shift, width) \ - imx_clk_hw_divider2(name, parent, reg, shift, width)->clk + to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width)) #define imx_clk_gate_dis(name, parent, reg, shift) \ - imx_clk_hw_gate_dis(name, parent, reg, shift)->clk + to_clk(imx_clk_hw_gate_dis(name, parent, reg, shift)) #define imx_clk_gate2(name, parent, reg, shift) \ - imx_clk_hw_gate2(name, parent, reg, shift)->clk + to_clk(imx_clk_hw_gate2(name, parent, reg, shift)) #define imx_clk_gate2_flags(name, parent, reg, shift, flags) \ - imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk + to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)) #define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \ - imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk + to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)) #define imx_clk_gate3(name, parent, reg, shift) \ - imx_clk_hw_gate3(name, parent, reg, shift)->clk + to_clk(imx_clk_hw_gate3(name, parent, reg, shift)) #define imx_clk_gate4(name, parent, reg, shift) \ - imx_clk_hw_gate4(name, parent, reg, shift)->clk + to_clk(imx_clk_hw_gate4(name, parent, reg, shift)) #define imx_clk_mux(name, reg, shift, width, parents, num_parents) \ - imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)->clk + to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)) struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, void __iomem *base, const struct imx_pll14xx_clk *pll_clk); @@ -195,6 +195,13 @@ struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg, u8 shift, u8 width, const char * const *parents, int num_parents, void (*fixup)(u32 *val)); +static inline struct clk *to_clk(struct clk_hw *hw) +{ + if (IS_ERR_OR_NULL(hw)) + return ERR_CAST(hw); + return hw->clk; +} + static inline struct clk *imx_clk_fixed(const char *name, int rate) { return clk_register_fixed_rate(NULL, name, NULL, 0, rate); diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index 18b23cdf679c..aa2522624fd3 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -1001,7 +1001,7 @@ static const struct regmap_config axg_audio_regmap_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = AUDIO_CLK_PDMIN_CTRL1, + .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL, }; struct audioclk_data { diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index ddb1e5634739..3a5853ca98c6 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -77,6 +77,15 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, unsigned int m, n, frac; n = meson_parm_read(clk->map, &pll->n); + + /* + * On some HW, N is set to zero on init. This value is invalid as + * it would result in a division by zero. The rate can't be + * calculated in this case + */ + if (n == 0) + return 0; + m = meson_parm_read(clk->map, &pll->m); frac = MESON_PARM_APPLICABLE(&pll->frac) ? diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index b3af61cc6fb9..d2760a021301 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = { &g12a_bt656, &g12a_usb1_to_ddr, &g12a_mmc_pclk, + &g12a_uart2, &g12a_vpu_intr, &g12a_gic, &g12a_sd_emmc_a_clk0, diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 67e6691e080c..8856ce476ccf 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1764,8 +1764,11 @@ static struct clk_regmap meson8b_hdmi_sys = { /* * The MALI IP is clocked by two identical clocks (mali_0 and mali_1) - * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only - * has mali_0 and no glitch-free mux. + * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can + * actually manage this glitch-free mux because it does top-to-bottom + * updates the each clock tree and switches to the "inactive" one when + * CLK_SET_RATE_GATE is set. + * Meson8 only has mali_0 and no glitch-free mux. */ static const struct clk_hw *meson8b_mali_0_1_parent_hws[] = { &meson8b_xtal.hw, @@ -1830,7 +1833,7 @@ static struct clk_regmap meson8b_mali_0 = { &meson8b_mali_0_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; @@ -1885,7 +1888,7 @@ static struct clk_regmap meson8b_mali_1 = { &meson8b_mali_1_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index a60a1be937ad..b4a95cbbda98 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock); static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; static DEFINE_SPINLOCK(timer_lock); -static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"}; +static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"}; static DEFINE_SPINLOCK(reset_lock); diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c index 287fdeae7c7c..7b123105b5de 100644 --- a/drivers/clk/pxa/clk-pxa27x.c +++ b/drivers/clk/pxa/clk-pxa27x.c @@ -459,6 +459,7 @@ struct dummy_clk { }; static struct dummy_clk dummy_clks[] __initdata = { DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"), + DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"), DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"), DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"), }; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index b98b81ef43a1..a88101480e33 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -217,9 +217,14 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, clk_flags = clk_hw_get_flags(hw); p = clk_hw_get_parent_by_index(hw, index); + if (!p) + return -EINVAL; + if (clk_flags & CLK_SET_RATE_PARENT) { rate = f->freq; if (f->pre_div) { + if (!rate) + rate = req->rate; rate /= 2; rate *= f->pre_div + 1; } @@ -950,7 +955,7 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, struct clk_rcg2 *rcg = to_clk_rcg2(hw); struct clk_hw *p; unsigned long prate = 0; - u32 val, mask, cfg, mode; + u32 val, mask, cfg, mode, src; int i, num_parents; regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); @@ -960,12 +965,12 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, if (cfg & mask) f->pre_div = cfg & mask; - cfg &= CFG_SRC_SEL_MASK; - cfg >>= CFG_SRC_SEL_SHIFT; + src = cfg & CFG_SRC_SEL_MASK; + src >>= CFG_SRC_SEL_SHIFT; num_parents = clk_hw_get_num_parents(hw); for (i = 0; i < num_parents; i++) { - if (cfg == rcg->parent_map[i].cfg) { + if (src == rcg->parent_map[i].cfg) { f->src = rcg->parent_map[i].src; p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); prate = clk_hw_get_rate(p); diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index fef5e8157061..e5c3db11bf26 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -648,6 +648,8 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = { }; /* msm8998 */ +DEFINE_CLK_SMD_RPM(msm8998, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); @@ -670,6 +672,10 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5); DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6); DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6); static struct clk_smd_rpm *msm8998_clks[] = { + [RPM_SMD_BIMC_CLK] = &msm8998_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8998_bimc_a_clk, + [RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk, + [RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk, [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk, [RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk, [RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk, diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 28ddc747d703..bdeacebbf0e4 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -29,6 +29,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate) if (!f) return NULL; + if (!f->freq) + return f; + for (; f->freq; f++) if (rate <= f->freq) return f; diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index 95be125c3bdd..56d22dd225c9 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { @@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { @@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { @@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { @@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { @@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct clk_regmap *gcc_sdm845_clocks[] = { diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index d25c8ba00a65..532626946b8d 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -464,7 +464,8 @@ static struct clk * __init cpg_rpc_clk_register(const char *name, clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, &rpc->div.hw, &clk_divider_ops, - &rpc->gate.hw, &clk_gate_ops, 0); + &rpc->gate.hw, &clk_gate_ops, + CLK_SET_RATE_PARENT); if (IS_ERR(clk)) { kfree(rpc); return clk; @@ -500,7 +501,8 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name, clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, &rpcd2->fixed.hw, &clk_fixed_factor_ops, - &rpcd2->gate.hw, &clk_gate_ops, 0); + &rpcd2->gate.hw, &clk_gate_ops, + CLK_SET_RATE_PARENT); if (IS_ERR(clk)) kfree(rpcd2); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 31466cd1842f..27fd274e92f8 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "clk.h" #include "clk-cpu.h" @@ -165,6 +166,8 @@ static const unsigned long exynos5x_clk_regs[] __initconst = { GATE_BUS_CPU, GATE_SCLK_CPU, CLKOUT_CMU_CPU, + APLL_CON0, + KPLL_CON0, CPLL_CON0, DPLL_CON0, EPLL_CON0, @@ -1628,6 +1631,13 @@ static void __init exynos5x_clk_init(struct device_node *np, exynos5x_subcmus); } + /* + * Keep top part of G3D clock path enabled permanently to ensure + * that the internal busses get their clock regardless of the + * main G3D clock enablement status. + */ + clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d")); + samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c index 9d56eac43832..7ad5ba26dfba 100644 --- a/drivers/clk/sprd/common.c +++ b/drivers/clk/sprd/common.c @@ -46,7 +46,7 @@ int sprd_clk_regmap_init(struct platform_device *pdev, if (of_find_property(node, "sprd,syscon", NULL)) { regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon"); - if (IS_ERR_OR_NULL(regmap)) { + if (IS_ERR(regmap)) { pr_err("%s: failed to get syscon regmap\n", __func__); return PTR_ERR(regmap); } diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index 49bd7a4c015c..5f66bf879772 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -921,11 +921,26 @@ static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = { .num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets), }; +static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = { + .common = &pll_cpux_clk.common, + /* copy from pll_cpux_clk */ + .enable = BIT(31), + .lock = BIT(28), +}; + +static struct ccu_mux_nb sun50i_a64_cpu_nb = { + .common = &cpux_clk.common, + .cm = &cpux_clk.mux, + .delay_us = 1, /* > 8 clock cycles at 24 MHz */ + .bypass_index = 1, /* index of 24 MHz oscillator */ +}; + static int sun50i_a64_ccu_probe(struct platform_device *pdev) { struct resource *res; void __iomem *reg; u32 val; + int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(&pdev->dev, res); @@ -939,7 +954,18 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev) writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG); - return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc); + ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc); + if (ret) + return ret; + + /* Gate then ungate PLL CPU after any rate changes */ + ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb); + + /* Reparent CPU during PLL CPU rate changes */ + ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, + &sun50i_a64_cpu_nb); + + return 0; } static const struct of_device_id sun50i_a64_ccu_ids[] = { diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 45a1ed3fe674..ab194143e06c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -23,9 +23,9 @@ */ static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k", - "pll-periph0", "iosc" }; + "iosc", "pll-periph0" }; static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = { - { .index = 2, .shift = 0, .width = 5 }, + { .index = 3, .shift = 0, .width = 5 }, }; static struct ccu_div ar100_clk = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index 4646fdc61053..4c8c491b87c2 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div apb0_clk = { - .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), - - .common = { - .reg = 0x0c, - .hw.init = CLK_HW_INIT_HW("apb0", - &ahb0_clk.hw, - &ccu_div_ops, - 0), - }, -}; - -static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); +static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); /* * Define the parent as an array that can be reused to save space @@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = { static struct ccu_common *sun8i_a83t_r_ccu_clks[] = { &ar100_clk.common, - &a83t_apb0_clk.common, + &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, @@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, - [CLK_APB0] = &a83t_apb0_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, @@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node, static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) { - /* Fix apb0 bus gate parents here */ - apb0_gate_parent[0] = &a83t_apb0_clk.common.hw; - sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc); } CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 897490800102..23bfe1d12f21 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = { .reg = 0x1f0, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outa", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; @@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = { .reg = 0x1f4, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outb", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 5c779eec454b..0e36ca3bf3d5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_PLL_DDR1 + 1, }; static struct clk_hw_onecell_data sun8i_v3_hw_clks = { @@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_I2S0 + 1, }; static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h index b0160d305a67..108eeeedcbf7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h @@ -51,6 +51,4 @@ #define CLK_PLL_DDR1 74 -#define CLK_NUMBER (CLK_I2S0 + 1) - #endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 1ed85f120a1b..49b9f2f85bad 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -785,7 +785,11 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0), GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0), GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0), - GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0), + /* + * Critical for RAM re-repair operation, which must occur on resume + * from LP1 system suspend and as part of CCPLEX cluster switching. + */ + GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL), GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0), GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0), GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0), diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index 2782d91838ac..2cca1ced913d 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -78,7 +78,7 @@ static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst }; static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = { - { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ick" }, { 0 }, }; diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 9dd6185a4b4e..66e4b2b9ec60 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -405,7 +405,7 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = { }; static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = { - { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" }, + { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" }, { 0 }, }; diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c index 9caa52944b1c..3e32db9dad81 100644 --- a/drivers/clk/uniphier/clk-uniphier-peri.c +++ b/drivers/clk/uniphier/clk-uniphier-peri.c @@ -18,8 +18,8 @@ #define UNIPHIER_PERI_CLK_FI2C(idx, ch) \ UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch)) -#define UNIPHIER_PERI_CLK_SCSSI(idx) \ - UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17) +#define UNIPHIER_PERI_CLK_SCSSI(idx, ch) \ + UNIPHIER_CLK_GATE("scssi" #ch, (idx), "spi", 0x20, 17 + (ch)) #define UNIPHIER_PERI_CLK_MCSSI(idx) \ UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14) @@ -35,7 +35,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = { UNIPHIER_PERI_CLK_I2C(6, 2), UNIPHIER_PERI_CLK_I2C(7, 3), UNIPHIER_PERI_CLK_I2C(8, 4), - UNIPHIER_PERI_CLK_SCSSI(11), + UNIPHIER_PERI_CLK_SCSSI(11, 0), { /* sentinel */ } }; @@ -51,7 +51,10 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = { UNIPHIER_PERI_CLK_FI2C(8, 4), UNIPHIER_PERI_CLK_FI2C(9, 5), UNIPHIER_PERI_CLK_FI2C(10, 6), - UNIPHIER_PERI_CLK_SCSSI(11), - UNIPHIER_PERI_CLK_MCSSI(12), + UNIPHIER_PERI_CLK_SCSSI(11, 0), + UNIPHIER_PERI_CLK_SCSSI(12, 1), + UNIPHIER_PERI_CLK_SCSSI(13, 2), + UNIPHIER_PERI_CLK_SCSSI(14, 3), + UNIPHIER_PERI_CLK_MCSSI(15), { /* sentinel */ } }; diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c index 9f09a59161e7..5b39d3701fa3 100644 --- a/drivers/clocksource/asm9260_timer.c +++ b/drivers/clocksource/asm9260_timer.c @@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np) } clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get clk!\n"); + return PTR_ERR(clk); + } ret = clk_prepare_enable(clk); if (ret) { diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 2b196cbfadb6..b235f446ee50 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node) ret = setup_irq(irq, &timer->act); if (ret) { pr_err("Can't set up timer IRQ\n"); - goto err_iounmap; + goto err_timer_free; } clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); @@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node) return 0; +err_timer_free: + kfree(timer); + err_iounmap: iounmap(base); return ret; diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 2317d4e3daaf..36933e2b3b0d 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -233,7 +233,8 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg) static u64 read_hv_sched_clock_tsc(void) { - return read_hv_clock_tsc(NULL) - hv_sched_clock_offset; + return (read_hv_clock_tsc(NULL) - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); } static struct clocksource hyperv_cs_tsc = { @@ -258,7 +259,8 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg) static u64 read_hv_sched_clock_msr(void) { - return read_hv_clock_msr(NULL) - hv_sched_clock_offset; + return (read_hv_clock_msr(NULL) - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); } static struct clocksource hyperv_cs_msr = { diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c index 62745c962049..e421946a91c5 100644 --- a/drivers/clocksource/timer-davinci.c +++ b/drivers/clocksource/timer-davinci.c @@ -302,10 +302,6 @@ int __init davinci_timer_register(struct clk *clk, return rv; } - clockevents_config_and_register(&clockevent->dev, tick_rate, - DAVINCI_TIMER_MIN_DELTA, - DAVINCI_TIMER_MAX_DELTA); - davinci_clocksource.dev.rating = 300; davinci_clocksource.dev.read = davinci_clocksource_read; davinci_clocksource.dev.mask = @@ -323,6 +319,10 @@ int __init davinci_timer_register(struct clk *clk, davinci_clocksource_init_tim34(base); } + clockevents_config_and_register(&clockevent->dev, tick_rate, + DAVINCI_TIMER_MIN_DELTA, + DAVINCI_TIMER_MAX_DELTA); + rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate); if (rv) { pr_err("Unable to register clocksource"); diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 11ff701ff4bb..a3c73e972fce 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to) } if (!to->clkevt.name) - to->clkevt.name = np->name; + to->clkevt.name = np->full_name; to->np = np; diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 470c7ef02ea4..4b04ffbe5e7e 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -41,7 +41,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs) return get_cycles64(); } -static u64 riscv_sched_clock(void) +static u64 notrace riscv_sched_clock(void) { return get_cycles64(); } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8d8da763adc5..8910fd1ae3c6 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -217,7 +217,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, return ret; } -static int cppc_verify_policy(struct cpufreq_policy *policy) +static int cppc_verify_policy(struct cpufreq_policy_data *policy) { cpufreq_verify_within_cpu_limits(policy); return 0; diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index cd53272e2fa2..f7a7bcf6f52e 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -291,7 +291,7 @@ static int nforce2_target(struct cpufreq_policy *policy, * nforce2_verify - verifies a new CPUFreq policy * @policy: new policy */ -static int nforce2_verify(struct cpufreq_policy *policy) +static int nforce2_verify(struct cpufreq_policy_data *policy) { unsigned int fsb_pol_max; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 48a224a6b178..35f8e098e9fa 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -74,6 +74,9 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy); static int cpufreq_start_governor(struct cpufreq_policy *policy); static void cpufreq_stop_governor(struct cpufreq_policy *policy); static void cpufreq_governor_limits(struct cpufreq_policy *policy); +static int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_governor *new_gov, + unsigned int new_pol); /** * Two notifier lists: the "policy" list is involved in the @@ -613,25 +616,22 @@ static struct cpufreq_governor *find_governor(const char *str_governor) return NULL; } -static int cpufreq_parse_policy(char *str_governor, - struct cpufreq_policy *policy) +static unsigned int cpufreq_parse_policy(char *str_governor) { - if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { - policy->policy = CPUFREQ_POLICY_PERFORMANCE; - return 0; - } - if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { - policy->policy = CPUFREQ_POLICY_POWERSAVE; - return 0; - } - return -EINVAL; + if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) + return CPUFREQ_POLICY_PERFORMANCE; + + if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) + return CPUFREQ_POLICY_POWERSAVE; + + return CPUFREQ_POLICY_UNKNOWN; } /** * cpufreq_parse_governor - parse a governor string only for has_target() + * @str_governor: Governor name. */ -static int cpufreq_parse_governor(char *str_governor, - struct cpufreq_policy *policy) +static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor) { struct cpufreq_governor *t; @@ -645,7 +645,7 @@ static int cpufreq_parse_governor(char *str_governor, ret = request_module("cpufreq_%s", str_governor); if (ret) - return -EINVAL; + return NULL; mutex_lock(&cpufreq_governor_mutex); @@ -656,12 +656,7 @@ static int cpufreq_parse_governor(char *str_governor, mutex_unlock(&cpufreq_governor_mutex); - if (t) { - policy->governor = t; - return 0; - } - - return -EINVAL; + return t; } /** @@ -762,29 +757,34 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { + char str_governor[16]; int ret; - char str_governor[16]; - struct cpufreq_policy new_policy; - - memcpy(&new_policy, policy, sizeof(*policy)); ret = sscanf(buf, "%15s", str_governor); if (ret != 1) return -EINVAL; if (cpufreq_driver->setpolicy) { - if (cpufreq_parse_policy(str_governor, &new_policy)) + unsigned int new_pol; + + new_pol = cpufreq_parse_policy(str_governor); + if (!new_pol) return -EINVAL; + + ret = cpufreq_set_policy(policy, NULL, new_pol); } else { - if (cpufreq_parse_governor(str_governor, &new_policy)) + struct cpufreq_governor *new_gov; + + new_gov = cpufreq_parse_governor(str_governor); + if (!new_gov) return -EINVAL; + + ret = cpufreq_set_policy(policy, new_gov, + CPUFREQ_POLICY_UNKNOWN); + + module_put(new_gov->owner); } - ret = cpufreq_set_policy(policy, &new_policy); - - if (new_policy.governor) - module_put(new_policy.governor->owner); - return ret ? ret : count; } @@ -933,6 +933,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) struct freq_attr *fattr = to_attr(attr); ssize_t ret; + if (!fattr->show) + return -EIO; + down_read(&policy->rwsem); ret = fattr->show(policy, buf); up_read(&policy->rwsem); @@ -947,6 +950,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; + if (!fattr->store) + return -EIO; + /* * cpus_read_trylock() is used here to work around a circular lock * dependency problem with respect to the cpufreq_register_driver(). @@ -1044,40 +1050,41 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void) static int cpufreq_init_policy(struct cpufreq_policy *policy) { - struct cpufreq_governor *gov = NULL, *def_gov = NULL; - struct cpufreq_policy new_policy; - - memcpy(&new_policy, policy, sizeof(*policy)); - - def_gov = cpufreq_default_governor(); + struct cpufreq_governor *def_gov = cpufreq_default_governor(); + struct cpufreq_governor *gov = NULL; + unsigned int pol = CPUFREQ_POLICY_UNKNOWN; if (has_target()) { - /* - * Update governor of new_policy to the governor used before - * hotplug - */ + /* Update policy governor to the one used before hotplug. */ gov = find_governor(policy->last_governor); if (gov) { pr_debug("Restoring governor %s for cpu %d\n", - policy->governor->name, policy->cpu); - } else { - if (!def_gov) - return -ENODATA; + policy->governor->name, policy->cpu); + } else if (def_gov) { gov = def_gov; + } else { + return -ENODATA; } - new_policy.governor = gov; } else { /* Use the default policy if there is no last_policy. */ if (policy->last_policy) { - new_policy.policy = policy->last_policy; - } else { - if (!def_gov) - return -ENODATA; - cpufreq_parse_policy(def_gov->name, &new_policy); + pol = policy->last_policy; + } else if (def_gov) { + pol = cpufreq_parse_policy(def_gov->name); + /* + * In case the default governor is neiter "performance" + * nor "powersave", fall back to the initial policy + * value set by the driver. + */ + if (pol == CPUFREQ_POLICY_UNKNOWN) + pol = policy->policy; } + if (pol != CPUFREQ_POLICY_PERFORMANCE && + pol != CPUFREQ_POLICY_POWERSAVE) + return -ENODATA; } - return cpufreq_set_policy(policy, &new_policy); + return cpufreq_set_policy(policy, gov, pol); } static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) @@ -1105,13 +1112,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp void refresh_frequency_limits(struct cpufreq_policy *policy) { - struct cpufreq_policy new_policy; - if (!policy_is_inactive(policy)) { - new_policy = *policy; pr_debug("updating policy for CPU %u\n", policy->cpu); - cpufreq_set_policy(policy, &new_policy); + cpufreq_set_policy(policy, policy->governor, policy->policy); } } EXPORT_SYMBOL(refresh_frequency_limits); @@ -2355,43 +2359,46 @@ EXPORT_SYMBOL(cpufreq_get_policy); /** * cpufreq_set_policy - Modify cpufreq policy parameters. * @policy: Policy object to modify. - * @new_policy: New policy data. + * @new_gov: Policy governor pointer. + * @new_pol: Policy value (for drivers with built-in governors). * - * Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the - * min and max parameters of @new_policy to @policy and either invoke the - * driver's ->setpolicy() callback (if present) or carry out a governor update - * for @policy. That is, run the current governor's ->limits() callback (if the - * governor field in @new_policy points to the same object as the one in - * @policy) or replace the governor for @policy with the new one stored in - * @new_policy. + * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency + * limits to be set for the policy, update @policy with the verified limits + * values and either invoke the driver's ->setpolicy() callback (if present) or + * carry out a governor update for @policy. That is, run the current governor's + * ->limits() callback (if @new_gov points to the same object as the one in + * @policy) or replace the governor for @policy with @new_gov. * * The cpuinfo part of @policy is not updated by this function. */ -int cpufreq_set_policy(struct cpufreq_policy *policy, - struct cpufreq_policy *new_policy) +static int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_governor *new_gov, + unsigned int new_pol) { + struct cpufreq_policy_data new_data; struct cpufreq_governor *old_gov; int ret; - pr_debug("setting new policy for CPU %u: %u - %u kHz\n", - new_policy->cpu, new_policy->min, new_policy->max); - - memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); - + memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); + new_data.freq_table = policy->freq_table; + new_data.cpu = policy->cpu; /* * PM QoS framework collects all the requests from users and provide us * the final aggregated value here. */ - new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); - new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); + new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); + new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); + + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", + new_data.cpu, new_data.min, new_data.max); /* verify the cpu speed can be set within this limit */ - ret = cpufreq_driver->verify(new_policy); + ret = cpufreq_driver->verify(&new_data); if (ret) return ret; - policy->min = new_policy->min; - policy->max = new_policy->max; + policy->min = new_data.min; + policy->max = new_data.max; trace_cpu_frequency_limits(policy); policy->cached_target_freq = UINT_MAX; @@ -2400,12 +2407,12 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, policy->min, policy->max); if (cpufreq_driver->setpolicy) { - policy->policy = new_policy->policy; + policy->policy = new_pol; pr_debug("setting range\n"); return cpufreq_driver->setpolicy(policy); } - if (new_policy->governor == policy->governor) { + if (new_gov == policy->governor) { pr_debug("governor limits update\n"); cpufreq_governor_limits(policy); return 0; @@ -2422,7 +2429,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, } /* start new governor */ - policy->governor = new_policy->governor; + policy->governor = new_gov; ret = cpufreq_init_governor(policy); if (!ret) { ret = cpufreq_start_governor(policy); @@ -2628,6 +2635,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (cpufreq_disabled()) return -ENODEV; + /* + * The cpufreq core depends heavily on the availability of device + * structure, make sure they are available before proceeding further. + */ + if (!get_cpu_device(0)) + return -EPROBE_DEFER; + if (!driver_data || !driver_data->verify || !driver_data->init || !(driver_data->setpolicy || driver_data->target_index || driver_data->target) || diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index ded427e0a488..e117b0059123 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -60,7 +60,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, return 0; } -int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, +int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, struct cpufreq_frequency_table *table) { struct cpufreq_frequency_table *pos; @@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); * Generic routine to verify policy & frequency table, requires driver to set * policy->freq_table prior to it. */ -int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) +int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) { if (!policy->freq_table) return -ENODEV; diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index e97b5733aa24..75b3ef7ec679 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -328,7 +328,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) * for the hardware supported by the driver. */ -static int cpufreq_gx_verify(struct cpufreq_policy *policy) +static int cpufreq_gx_verify(struct cpufreq_policy_data *policy) { unsigned int tmp_freq = 0; u8 tmp1, tmp2; diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 35db14cf3102..85a6efd6b68f 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -44,19 +44,19 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT; /* - * Early samples without fuses written report "0 0" which means - * consumer segment and minimum speed grading. - * - * According to datasheet minimum speed grading is not supported for - * consumer parts so clamp to 1 to avoid warning for "no OPPs" + * Early samples without fuses written report "0 0" which may NOT + * match any OPP defined in DT. So clamp to minimum OPP defined in + * DT to avoid warning for "no OPPs". * * Applies to i.MX8M series SoCs. */ - if (mkt_segment == 0 && speed_grade == 0 && ( - of_machine_is_compatible("fsl,imx8mm") || - of_machine_is_compatible("fsl,imx8mn") || - of_machine_is_compatible("fsl,imx8mq"))) - speed_grade = 1; + if (mkt_segment == 0 && speed_grade == 0) { + if (of_machine_is_compatible("fsl,imx8mm") || + of_machine_is_compatible("fsl,imx8mq")) + speed_grade = 1; + if (of_machine_is_compatible("fsl,imx8mn")) + speed_grade = 0xb; + } supported_hw[0] = BIT(speed_grade); supported_hw[1] = BIT(mkt_segment); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8ab31702cf6a..45499e0b9f2f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2036,8 +2036,9 @@ static int intel_pstate_get_max_freq(struct cpudata *cpu) cpu->pstate.max_freq : cpu->pstate.turbo_freq; } -static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, - struct cpudata *cpu) +static void intel_pstate_update_perf_limits(struct cpudata *cpu, + unsigned int policy_min, + unsigned int policy_max) { int max_freq = intel_pstate_get_max_freq(cpu); int32_t max_policy_perf, min_policy_perf; @@ -2056,18 +2057,17 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, turbo_max = cpu->pstate.turbo_pstate; } - max_policy_perf = max_state * policy->max / max_freq; - if (policy->max == policy->min) { + max_policy_perf = max_state * policy_max / max_freq; + if (policy_max == policy_min) { min_policy_perf = max_policy_perf; } else { - min_policy_perf = max_state * policy->min / max_freq; + min_policy_perf = max_state * policy_min / max_freq; min_policy_perf = clamp_t(int32_t, min_policy_perf, 0, max_policy_perf); } pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", - policy->cpu, max_state, - min_policy_perf, max_policy_perf); + cpu->cpu, max_state, min_policy_perf, max_policy_perf); /* Normalize user input to [min_perf, max_perf] */ if (per_cpu_limits) { @@ -2081,7 +2081,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); global_min = clamp_t(int32_t, global_min, 0, global_max); - pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu, + pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, global_min, global_max); cpu->min_perf_ratio = max(min_policy_perf, global_min); @@ -2094,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, cpu->max_perf_ratio); } - pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu, + pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, cpu->max_perf_ratio, cpu->min_perf_ratio); } @@ -2114,7 +2114,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) mutex_lock(&intel_pstate_limits_lock); - intel_pstate_update_perf_limits(policy, cpu); + intel_pstate_update_perf_limits(cpu, policy->min, policy->max); if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { /* @@ -2143,8 +2143,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) return 0; } -static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, - struct cpudata *cpu) +static void intel_pstate_adjust_policy_max(struct cpudata *cpu, + struct cpufreq_policy_data *policy) { if (!hwp_active && cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && @@ -2155,7 +2155,7 @@ static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, } } -static int intel_pstate_verify_policy(struct cpufreq_policy *policy) +static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; @@ -2163,11 +2163,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, intel_pstate_get_max_freq(cpu)); - if (policy->policy != CPUFREQ_POLICY_POWERSAVE && - policy->policy != CPUFREQ_POLICY_PERFORMANCE) - return -EINVAL; - - intel_pstate_adjust_policy_max(policy, cpu); + intel_pstate_adjust_policy_max(cpu, policy); return 0; } @@ -2268,7 +2264,7 @@ static struct cpufreq_driver intel_pstate = { .name = "intel_pstate", }; -static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) +static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; @@ -2276,9 +2272,9 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, intel_pstate_get_max_freq(cpu)); - intel_pstate_adjust_policy_max(policy, cpu); + intel_pstate_adjust_policy_max(cpu, policy); - intel_pstate_update_perf_limits(policy, cpu); + intel_pstate_update_perf_limits(cpu, policy->min, policy->max); return 0; } diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 64b8689f7a4a..0b08be8bff76 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c @@ -122,7 +122,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy) * Validates a new CPUFreq policy. This function has to be called with * cpufreq_driver locked. */ -static int longrun_verify_policy(struct cpufreq_policy *policy) +static int longrun_verify_policy(struct cpufreq_policy_data *policy) { if (!policy) return -EINVAL; @@ -130,10 +130,6 @@ static int longrun_verify_policy(struct cpufreq_policy *policy) policy->cpu = 0; cpufreq_verify_within_cpu_limits(policy); - if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && - (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) - return -EINVAL; - return 0; } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index fdc767fdbe6a..f90273006553 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -109,7 +109,7 @@ struct pcc_cpu { static struct pcc_cpu __percpu *pcc_cpu_info; -static int pcc_cpufreq_verify(struct cpufreq_policy *policy) +static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy) { cpufreq_verify_within_cpu_limits(policy); return 0; diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 6061850e59c9..56f4bc0d209e 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -1041,9 +1041,14 @@ static struct cpufreq_driver powernv_cpufreq_driver = { static int init_chip_info(void) { - unsigned int chip[256]; + unsigned int *chip; unsigned int cpu, i; unsigned int prev_chip_id = UINT_MAX; + int ret = 0; + + chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; for_each_possible_cpu(cpu) { unsigned int id = cpu_to_chip_id(cpu); @@ -1055,8 +1060,10 @@ static int init_chip_info(void) } chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); - if (!chips) - return -ENOMEM; + if (!chips) { + ret = -ENOMEM; + goto free_and_return; + } for (i = 0; i < nr_chips; i++) { chips[i].id = chip[i]; @@ -1066,7 +1073,9 @@ static int init_chip_info(void) per_cpu(chip_info, cpu) = &chips[i]; } - return 0; +free_and_return: + kfree(chip); + return ret; } static inline void clean_chip_info(void) diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 5096c0ab781b..0ac265d47ef0 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -87,7 +87,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data); } -static int sh_cpufreq_verify(struct cpufreq_policy *policy) +static int sh_cpufreq_verify(struct cpufreq_policy_data *policy) { struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); struct cpufreq_frequency_table *freq_table; diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index eca32e443716..9907a165135b 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -25,7 +25,7 @@ static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev; /** - * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC + * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value * @versions: Set to the value parsed from efuse * * Returns 0 if success. @@ -69,21 +69,16 @@ static int sun50i_cpufreq_get_efuse(u32 *versions) return PTR_ERR(speedbin); efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK; - switch (efuse_value) { - case 0b0001: - *versions = 1; - break; - case 0b0011: - *versions = 2; - break; - default: - /* - * For other situations, we treat it as bin0. - * This vf table can be run for any good cpu. - */ + + /* + * We treat unexpected efuse values as if the SoC was from + * the slowest bin. Expected efuse values are 1-3, slowest + * to fastest. + */ + if (efuse_value >= 1 && efuse_value <= 3) + *versions = efuse_value - 1; + else *versions = 0; - break; - } kfree(speedbin); return 0; diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index 707dbc1b7ac8..98d392196df2 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -22,7 +22,7 @@ static struct cpufreq_driver ucv2_driver; /* make sure that only the "userspace" governor is run * -- anything else wouldn't make sense on this platform, anyway. */ -static int ucv2_verify_speed(struct cpufreq_policy *policy) +static int ucv2_verify_speed(struct cpufreq_policy_data *policy) { if (policy->cpu) return -EINVAL; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 0895b988fa92..29d2d7a21bd7 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -384,6 +384,7 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv, continue; limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC; + break; } dev->poll_limit_ns = limit_ns; diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 80c1a830d991..9db154224999 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -62,25 +62,24 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv) * __cpuidle_set_driver - set per CPU driver variables for the given driver. * @drv: a valid pointer to a struct cpuidle_driver * - * For each CPU in the driver's cpumask, unset the registered driver per CPU - * to @drv. - * - * Returns 0 on success, -EBUSY if the CPUs have driver(s) already. + * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver + * different from drv already. */ static inline int __cpuidle_set_driver(struct cpuidle_driver *drv) { int cpu; for_each_cpu(cpu, drv->cpumask) { + struct cpuidle_driver *old_drv; - if (__cpuidle_get_cpu_driver(cpu)) { - __cpuidle_unset_driver(drv); + old_drv = __cpuidle_get_cpu_driver(cpu); + if (old_drv && old_drv != drv) return -EBUSY; - } - - per_cpu(cpuidle_drivers, cpu) = drv; } + for_each_cpu(cpu, drv->cpumask) + per_cpu(cpuidle_drivers, cpu) = drv; + return 0; } diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index b5a0e498f798..c71773c88890 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -194,7 +194,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * pattern detection. */ cpu_data->intervals[cpu_data->interval_idx++] = measured_us; - if (cpu_data->interval_idx > INTERVALS) + if (cpu_data->interval_idx >= INTERVALS) cpu_data->interval_idx = 0; } @@ -233,8 +233,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); int latency_req = cpuidle_governor_latency_req(dev->cpu); - unsigned int duration_us, count; - int max_early_idx, constraint_idx, idx, i; + unsigned int duration_us, hits, misses, early_hits; + int max_early_idx, prev_max_early_idx, constraint_idx, idx, i; ktime_t delta_tick; if (dev->last_state_idx >= 0) { @@ -247,8 +247,11 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick); duration_us = ktime_to_us(cpu_data->sleep_length_ns); - count = 0; + hits = 0; + misses = 0; + early_hits = 0; max_early_idx = -1; + prev_max_early_idx = -1; constraint_idx = drv->state_count; idx = -1; @@ -258,23 +261,62 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (s->disabled || su->disable) { /* - * If the "early hits" metric of a disabled state is - * greater than the current maximum, it should be taken - * into account, because it would be a mistake to select - * a deeper state with lower "early hits" metric. The - * index cannot be changed to point to it, however, so - * just increase the max count alone and let the index - * still point to a shallower idle state. + * Ignore disabled states with target residencies beyond + * the anticipated idle duration. */ - if (max_early_idx >= 0 && - count < cpu_data->states[i].early_hits) - count = cpu_data->states[i].early_hits; + if (s->target_residency > duration_us) + continue; + + /* + * This state is disabled, so the range of idle duration + * values corresponding to it is covered by the current + * candidate state, but still the "hits" and "misses" + * metrics of the disabled state need to be used to + * decide whether or not the state covering the range in + * question is good enough. + */ + hits = cpu_data->states[i].hits; + misses = cpu_data->states[i].misses; + + if (early_hits >= cpu_data->states[i].early_hits || + idx < 0) + continue; + + /* + * If the current candidate state has been the one with + * the maximum "early hits" metric so far, the "early + * hits" metric of the disabled state replaces the + * current "early hits" count to avoid selecting a + * deeper state with lower "early hits" metric. + */ + if (max_early_idx == idx) { + early_hits = cpu_data->states[i].early_hits; + continue; + } + + /* + * The current candidate state is closer to the disabled + * one than the current maximum "early hits" state, so + * replace the latter with it, but in case the maximum + * "early hits" state index has not been set so far, + * check if the current candidate state is not too + * shallow for that role. + */ + if (!(tick_nohz_tick_stopped() && + drv->states[idx].target_residency < TICK_USEC)) { + prev_max_early_idx = max_early_idx; + early_hits = cpu_data->states[i].early_hits; + max_early_idx = idx; + } continue; } - if (idx < 0) + if (idx < 0) { idx = i; /* first enabled state */ + hits = cpu_data->states[i].hits; + misses = cpu_data->states[i].misses; + } if (s->target_residency > duration_us) break; @@ -283,11 +325,14 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, constraint_idx = i; idx = i; + hits = cpu_data->states[i].hits; + misses = cpu_data->states[i].misses; - if (count < cpu_data->states[i].early_hits && + if (early_hits < cpu_data->states[i].early_hits && !(tick_nohz_tick_stopped() && drv->states[i].target_residency < TICK_USEC)) { - count = cpu_data->states[i].early_hits; + prev_max_early_idx = max_early_idx; + early_hits = cpu_data->states[i].early_hits; max_early_idx = i; } } @@ -300,10 +345,19 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * "early hits" metric, but if that cannot be determined, just use the * state selected so far. */ - if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses && - max_early_idx >= 0) { - idx = max_early_idx; - duration_us = drv->states[idx].target_residency; + if (hits <= misses) { + /* + * The current candidate state is not suitable, so take the one + * whose "early hits" metric is the maximum for the range of + * shallower states. + */ + if (idx == max_early_idx) + max_early_idx = prev_max_early_idx; + + if (max_early_idx >= 0) { + idx = max_early_idx; + duration_us = drv->states[idx].target_residency; + } } /* @@ -316,10 +370,9 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx < 0) { idx = 0; /* No states enabled. Must use 0. */ } else if (idx > 0) { + unsigned int count = 0; u64 sum = 0; - count = 0; - /* * Count and sum the most recent idle duration values less than * the current expected idle duration value. diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1fb622f2a87d..06b2b3fa5206 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -287,6 +287,7 @@ config CRYPTO_DEV_TALITOS select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER select CRYPTO_HASH + select CRYPTO_LIB_DES select HW_RANDOM depends on FSL_SOC help @@ -332,6 +333,7 @@ config CRYPTO_DEV_PPC4XX depends on PPC && 4xx select CRYPTO_HASH select CRYPTO_AEAD + select CRYPTO_AES select CRYPTO_LIB_AES select CRYPTO_CCM select CRYPTO_CTR @@ -737,7 +739,7 @@ source "drivers/crypto/stm32/Kconfig" config CRYPTO_DEV_SAFEXCEL tristate "Inside Secure's SafeXcel cryptographic engine driver" - depends on OF || PCI || COMPILE_TEST + depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM select CRYPTO_LIB_AES select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index de5e9352e920..7d6b695c4ab3 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -365,12 +365,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) dma_alloc_coherent(dev->core_dev->device, PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, &dev->scatter_buffer_pa, GFP_ATOMIC); - if (!dev->scatter_buffer_va) { - dma_free_coherent(dev->core_dev->device, - sizeof(struct ce_sd) * PPC4XX_NUM_SD, - dev->sdr, dev->sdr_pa); + if (!dev->scatter_buffer_va) return -ENOMEM; - } for (i = 0; i < PPC4XX_NUM_SD; i++) { dev->sdr[i].ptr = dev->scatter_buffer_pa + diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 026f193556f9..89f79d763ab8 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -88,7 +88,6 @@ struct atmel_aes_caps { bool has_dualbuff; bool has_cfb64; - bool has_ctr32; bool has_gcm; bool has_xts; bool has_authenc; @@ -145,7 +144,7 @@ struct atmel_aes_xts_ctx { u32 key2[AES_KEYSIZE_256 / sizeof(u32)]; }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) struct atmel_aes_authenc_ctx { struct atmel_aes_base_ctx base; struct atmel_sha_authenc_ctx *auth; @@ -157,7 +156,7 @@ struct atmel_aes_reqctx { u32 lastc[AES_BLOCK_SIZE / sizeof(u32)]; }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) struct atmel_aes_authenc_reqctx { struct atmel_aes_reqctx base; @@ -486,13 +485,36 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) return (dd->flags & AES_FLAGS_ENCRYPT); } -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); #endif +static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + if (req->nbytes < ivsize) + return; + + if (rctx->mode & AES_FLAGS_ENCRYPT) { + scatterwalk_map_and_copy(req->info, req->dst, + req->nbytes - ivsize, ivsize, 0); + } else { + if (req->src == req->dst) + memcpy(req->info, rctx->lastc, ivsize); + else + scatterwalk_map_and_copy(req->info, req->src, + req->nbytes - ivsize, + ivsize, 0); + } +} + static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) { -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->ctx->is_aead) atmel_aes_authenc_complete(dd, err); #endif @@ -500,26 +522,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) clk_disable(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; - if (!dd->ctx->is_aead) { - struct ablkcipher_request *req = - ablkcipher_request_cast(dd->areq); - struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); - struct crypto_ablkcipher *ablkcipher = - crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - - if (rctx->mode & AES_FLAGS_ENCRYPT) { - scatterwalk_map_and_copy(req->info, req->dst, - req->nbytes - ivsize, ivsize, 0); - } else { - if (req->src == req->dst) { - memcpy(req->info, rctx->lastc, ivsize); - } else { - scatterwalk_map_and_copy(req->info, req->src, - req->nbytes - ivsize, ivsize, 0); - } - } - } + if (!dd->ctx->is_aead) + atmel_aes_set_iv_as_last_ciphertext_block(dd); if (dd->is_async) dd->areq->complete(dd->areq, err); @@ -1008,8 +1012,9 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); struct scatterlist *src, *dst; - u32 ctr, blocks; size_t datalen; + u32 ctr; + u16 blocks, start, end; bool use_dma, fragmented = false; /* Check for transfer completion. */ @@ -1021,27 +1026,17 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) datalen = req->nbytes - ctx->offset; blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); ctr = be32_to_cpu(ctx->iv[3]); - if (dd->caps.has_ctr32) { - /* Check 32bit counter overflow. */ - u32 start = ctr; - u32 end = start + blocks - 1; - if (end < start) { - ctr |= 0xffffffff; - datalen = AES_BLOCK_SIZE * -start; - fragmented = true; - } - } else { - /* Check 16bit counter overflow. */ - u16 start = ctr & 0xffff; - u16 end = start + (u16)blocks - 1; + /* Check 16bit counter overflow. */ + start = ctr & 0xffff; + end = start + blocks - 1; - if (blocks >> 16 || end < start) { - ctr |= 0xffff; - datalen = AES_BLOCK_SIZE * (0x10000-start); - fragmented = true; - } + if (blocks >> 16 || end < start) { + ctr |= 0xffff; + datalen = AES_BLOCK_SIZE * (0x10000 - start); + fragmented = true; } + use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD); /* Jump to offset. */ @@ -1125,10 +1120,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) rctx->mode = mode; if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) { - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - scatterwalk_map_and_copy(rctx->lastc, req->src, - (req->nbytes - ivsize), ivsize, 0); + if (req->nbytes >= ivsize) + scatterwalk_map_and_copy(rctx->lastc, req->src, + req->nbytes - ivsize, + ivsize, 0); } return atmel_aes_handle_queue(dd, &req->base); @@ -1973,7 +1970,7 @@ static struct crypto_alg aes_xts_alg = { } }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) /* authenc aead functions */ static int atmel_aes_authenc_start(struct atmel_aes_dev *dd); @@ -2460,7 +2457,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) { int i; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->caps.has_authenc) for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) crypto_unregister_aead(&aes_authenc_algs[i]); @@ -2507,7 +2504,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) goto err_aes_xts_alg; } -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->caps.has_authenc) { for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) { err = crypto_register_aead(&aes_authenc_algs[i]); @@ -2519,7 +2516,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) return 0; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) /* i = ARRAY_SIZE(aes_authenc_algs); */ err_aes_authenc_alg: for (j = 0; j < i; j++) @@ -2543,7 +2540,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) { dd->caps.has_dualbuff = 0; dd->caps.has_cfb64 = 0; - dd->caps.has_ctr32 = 0; dd->caps.has_gcm = 0; dd->caps.has_xts = 0; dd->caps.has_authenc = 0; @@ -2554,7 +2550,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) case 0x500: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; - dd->caps.has_ctr32 = 1; dd->caps.has_gcm = 1; dd->caps.has_xts = 1; dd->caps.has_authenc = 1; @@ -2563,7 +2558,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) case 0x200: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; - dd->caps.has_ctr32 = 1; dd->caps.has_gcm = 1; dd->caps.max_burst_size = 4; break; @@ -2709,7 +2703,7 @@ static int atmel_aes_probe(struct platform_device *pdev) atmel_aes_get_cap(aes_dd); -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) { err = -EPROBE_DEFER; goto iclk_unprepare; diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h index cbd37a2edada..d6de810df44f 100644 --- a/drivers/crypto/atmel-authenc.h +++ b/drivers/crypto/atmel-authenc.h @@ -12,7 +12,7 @@ #ifndef __ATMEL_AUTHENC_H__ #define __ATMEL_AUTHENC_H__ -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) #include #include diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 84cb8748a795..1f9c16395a3f 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1918,12 +1918,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, { struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); - if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return 0; + return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen); } static int atmel_sha_hmac_init(struct ahash_request *req) @@ -2212,7 +2207,7 @@ static struct ahash_alg sha_hmac_algs[] = { }, }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) /* authenc functions */ static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd); diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 4b20606983a4..22ebe40f09f5 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -1251,7 +1251,7 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key, if (len != 16 && len != 24 && len != 32) { crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -1; + return -EINVAL; } ctx->key_length = len; diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 3443f6d6dd83..6863d7097674 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -2481,7 +2481,7 @@ static struct caam_aead_alg driver_aeads[] = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" - "hmac-sha256-cbc-desi-" + "hmac-sha256-cbc-des-" "caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index db22777d59b4..62930351ccd9 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -685,11 +685,9 @@ static int caam_probe(struct platform_device *pdev) of_node_put(np); if (!ctrlpriv->mc_en) - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | - MCFGR_WDENABLE | MCFGR_LARGE_BURST | - (sizeof(dma_addr_t) == sizeof(u64) ? - MCFGR_LONG_PTR : 0)); + MCFGR_WDENABLE | MCFGR_LARGE_BURST); handle_imx6_err005766(&ctrl->mcr); diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index bc924980e10c..c4632d84c9a1 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -103,8 +103,7 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size, offset = UCD_UCODE_LOAD_BLOCK_NUM; nitrox_write_csr(ndev, offset, block_num); - code_size = ucode_size; - code_size = roundup(code_size, 8); + code_size = roundup(ucode_size, 16); while (code_size) { data = ucode_data[i]; /* write 8 bytes at a time */ @@ -220,11 +219,11 @@ static int nitrox_load_fw(struct nitrox_device *ndev) /* write block number and firmware length * bit:<2:0> block number - * bit:3 is set SE uses 32KB microcode - * bit:3 is clear SE uses 64KB microcode + * bit:3 is set AE uses 32KB microcode + * bit:3 is clear AE uses 64KB microcode */ core_2_eid_val.value = 0ULL; - core_2_eid_val.ucode_blk = 0; + core_2_eid_val.ucode_blk = 2; if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) core_2_eid_val.ucode_len = 1; else diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 0186b3df4c87..0d5576f6ad21 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = { .setup = NULL, .perform = &ccp3_actions, .offset = 0, + .rsamax = CCP_RSA_MAX_WIDTH, }; const struct ccp_vdata ccpv3 = { diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index a54f9367a580..0770a83bf1a5 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -342,6 +342,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, desc->tx_desc.flags = flags; desc->tx_desc.tx_submit = ccp_tx_submit; desc->ccp = chan->ccp; + INIT_LIST_HEAD(&desc->entry); INIT_LIST_HEAD(&desc->pending); INIT_LIST_HEAD(&desc->active); desc->status = DMA_IN_PROGRESS; diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index d3e8faa03f15..3d7c8d9e54b9 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) * revealed the decrypted message --> zero its memory. */ sg_zero_buffer(areq->dst, sg_nents(areq->dst), - areq->cryptlen, 0); + areq->cryptlen, areq->assoclen); err = -EBADMSG; } /*ENCRYPT*/ diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 254b48797799..cd9c60268bf8 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -523,6 +523,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm, } } + static void cc_setup_state_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, unsigned int ivsize, unsigned int nbytes, @@ -534,8 +535,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm, int cipher_mode = ctx_p->cipher_mode; int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; - dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; - unsigned int key_len = ctx_p->keylen; dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; unsigned int du_size = nbytes; @@ -570,6 +569,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm, break; case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: + case DRV_CIPHER_BITLOCKER: + break; + default: + dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); + } +} + + +static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, + struct cipher_req_ctx *req_ctx, + unsigned int ivsize, unsigned int nbytes, + struct cc_hw_desc desc[], + unsigned int *seq_size) +{ + struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx_p->drvdata); + int cipher_mode = ctx_p->cipher_mode; + int flow_mode = ctx_p->flow_mode; + int direction = req_ctx->gen_ctx.op_type; + dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; + unsigned int key_len = ctx_p->keylen; + dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; + unsigned int du_size = nbytes; + + struct cc_crypto_alg *cc_alg = + container_of(tfm->__crt_alg, struct cc_crypto_alg, + skcipher_alg.base); + + if (cc_alg->data_unit) + du_size = cc_alg->data_unit; + + switch (cipher_mode) { + case DRV_CIPHER_ECB: + break; + case DRV_CIPHER_CBC: + case DRV_CIPHER_CBC_CTS: + case DRV_CIPHER_CTR: + case DRV_CIPHER_OFB: + break; + case DRV_CIPHER_XTS: + case DRV_CIPHER_ESSIV: case DRV_CIPHER_BITLOCKER: /* load XEX key */ hw_desc_init(&desc[*seq_size]); @@ -881,12 +921,14 @@ static int cc_cipher_process(struct skcipher_request *req, /* STAT_PHASE_2: Create sequence */ - /* Setup IV and XEX key used */ + /* Setup state (IV) */ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Setup MLLI line, if needed */ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len); /* Setup key */ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len); + /* Setup state (IV and XEX key) */ + cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Data processing */ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len); /* Read next IV */ diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index ab31d4a68c80..7d2f7e2c0bb5 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -161,6 +161,7 @@ struct cc_drvdata { int std_bodies; bool sec_disabled; u32 comp_mask; + bool pm_on; }; struct cc_crypto_alg { diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index dbc508fb719b..452bd77a9ba0 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = { int cc_pm_suspend(struct device *dev) { struct cc_drvdata *drvdata = dev_get_drvdata(dev); - int rc; dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); - rc = cc_suspend_req_queue(drvdata); - if (rc) { - dev_err(dev, "cc_suspend_req_queue (%x)\n", rc); - return rc; - } fini_cc_regs(drvdata); cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); cc_clk_off(drvdata); @@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev) /* check if tee fips error occurred during power down */ cc_tee_handle_fips_error(drvdata); - rc = cc_resume_req_queue(drvdata); - if (rc) { - dev_err(dev, "cc_resume_req_queue (%x)\n", rc); - return rc; - } - - /* must be after the queue resuming as it uses the HW queue*/ cc_init_hash_sram(drvdata); return 0; @@ -80,12 +67,10 @@ int cc_pm_get(struct device *dev) int rc = 0; struct cc_drvdata *drvdata = dev_get_drvdata(dev); - if (cc_req_queue_suspended(drvdata)) + if (drvdata->pm_on) rc = pm_runtime_get_sync(dev); - else - pm_runtime_get_noresume(dev); - return rc; + return (rc == 1 ? 0 : rc); } int cc_pm_put_suspend(struct device *dev) @@ -93,14 +78,11 @@ int cc_pm_put_suspend(struct device *dev) int rc = 0; struct cc_drvdata *drvdata = dev_get_drvdata(dev); - if (!cc_req_queue_suspended(drvdata)) { + if (drvdata->pm_on) { pm_runtime_mark_last_busy(dev); rc = pm_runtime_put_autosuspend(dev); - } else { - /* Something wrong happens*/ - dev_err(dev, "request to suspend already suspended queue"); - rc = -EBUSY; } + return rc; } @@ -117,7 +99,7 @@ int cc_pm_init(struct cc_drvdata *drvdata) /* must be before the enabling to avoid resdundent suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); - /* activate the PM module */ + /* set us as active - note we won't do PM ops until cc_pm_go()! */ return pm_runtime_set_active(dev); } @@ -125,9 +107,11 @@ int cc_pm_init(struct cc_drvdata *drvdata) void cc_pm_go(struct cc_drvdata *drvdata) { pm_runtime_enable(drvdata_to_dev(drvdata)); + drvdata->pm_on = true; } void cc_pm_fini(struct cc_drvdata *drvdata) { pm_runtime_disable(drvdata_to_dev(drvdata)); + drvdata->pm_on = false; } diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c index a947d5a2cf35..37e6fee37b13 100644 --- a/drivers/crypto/ccree/cc_request_mgr.c +++ b/drivers/crypto/ccree/cc_request_mgr.c @@ -41,7 +41,6 @@ struct cc_req_mgr_handle { #else struct tasklet_struct comptask; #endif - bool is_runtime_suspended; }; struct cc_bl_item { @@ -404,6 +403,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) spin_lock(&mgr->bl_lock); list_del(&bli->list); --mgr->bl_len; + kfree(bli); } spin_unlock(&mgr->bl_lock); @@ -677,52 +677,3 @@ static void comp_handler(unsigned long devarg) cc_proc_backlog(drvdata); dev_dbg(dev, "Comp. handler done.\n"); } - -/* - * resume the queue configuration - no need to take the lock as this happens - * inside the spin lock protection - */ -#if defined(CONFIG_PM) -int cc_resume_req_queue(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - spin_lock_bh(&request_mgr_handle->hw_lock); - request_mgr_handle->is_runtime_suspended = false; - spin_unlock_bh(&request_mgr_handle->hw_lock); - - return 0; -} - -/* - * suspend the queue configuration. Since it is used for the runtime suspend - * only verify that the queue can be suspended. - */ -int cc_suspend_req_queue(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - /* lock the send_request */ - spin_lock_bh(&request_mgr_handle->hw_lock); - if (request_mgr_handle->req_queue_head != - request_mgr_handle->req_queue_tail) { - spin_unlock_bh(&request_mgr_handle->hw_lock); - return -EBUSY; - } - request_mgr_handle->is_runtime_suspended = true; - spin_unlock_bh(&request_mgr_handle->hw_lock); - - return 0; -} - -bool cc_req_queue_suspended(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - return request_mgr_handle->is_runtime_suspended; -} - -#endif diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h index f46cf766fe4d..ff7746aaaf35 100644 --- a/drivers/crypto/ccree/cc_request_mgr.h +++ b/drivers/crypto/ccree/cc_request_mgr.h @@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata); void cc_req_mgr_fini(struct cc_drvdata *drvdata); -#if defined(CONFIG_PM) -int cc_resume_req_queue(struct cc_drvdata *drvdata); - -int cc_suspend_req_queue(struct cc_drvdata *drvdata); - -bool cc_req_queue_suspended(struct cc_drvdata *drvdata); -#endif - #endif /*__REQUEST_MGR_H__*/ diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 38ee38b37ae6..01dd418bdadc 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -3194,9 +3194,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) aeadctx->mayverify = VERIFY_SW; break; default: - - crypto_tfm_set_flags((struct crypto_tfm *) tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3221,8 +3218,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3263,8 +3258,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3289,8 +3282,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3328,8 +3320,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, int error; if (keylen < 3) { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3379,8 +3370,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, } else if (keylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); pr_err("GCM: Invalid key length %d\n", keylen); ret = -EINVAL; goto out; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index aca75237bbcf..dffa2aa855fd 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) return 0; } +static void chtls_purge_wr_queue(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = dequeue_wr(sk)) != NULL) + kfree_skb(skb); +} + static void chtls_release_resources(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); @@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk) kfree_skb(csk->txdata_skb_cache); csk->txdata_skb_cache = NULL; + if (csk->wr_credits != csk->wr_max_credits) { + chtls_purge_wr_queue(sk); + chtls_reset_wr_list(csk); + } + if (csk->l2t_entry) { cxgb4_l2t_release(csk->l2t_entry); csk->l2t_entry = NULL; @@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb) else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } + kfree_skb(skb); } static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb) @@ -2062,19 +2076,6 @@ rel_skb: return 0; } -static struct sk_buff *dequeue_wr(struct sock *sk) -{ - struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); - struct sk_buff *skb = csk->wr_skb_head; - - if (likely(skb)) { - /* Don't bother clearing the tail */ - csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; - WR_SKB_CB(skb)->next_wr = NULL; - } - return skb; -} - static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) { struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h index 129d7ac649a9..3fac0c74a41f 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h @@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } +static inline void chtls_reset_wr_list(struct chtls_sock *csk) +{ + csk->wr_skb_head = NULL; + csk->wr_skb_tail = NULL; +} + static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) { WR_SKB_CB(skb)->next_wr = NULL; @@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb; csk->wr_skb_tail = skb; } + +static inline struct sk_buff *dequeue_wr(struct sock *sk) +{ + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); + struct sk_buff *skb = NULL; + + skb = csk->wr_skb_head; + + if (likely(skb)) { + /* Don't bother clearing the tail */ + csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; + WR_SKB_CB(skb)->next_wr = NULL; + } + return skb; +} #endif diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index 2a34035d3cfb..a217fe72602d 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -350,6 +350,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); kwr->sc_imm.len = cpu_to_be32(klen); + lock_sock(sk); /* key info */ kctx = (struct _key_ctx *)(kwr + 1); ret = chtls_key_info(csk, kctx, keylen, optname); @@ -388,8 +389,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) csk->tlshws.txkey = keyid; } + release_sock(sk); return ret; out_notcb: + release_sock(sk); free_tls_keyid(sk); out_nokey: kfree_skb(skb); diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index d81a1297cb9e..73a899e6f837 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -23,12 +24,12 @@ static spinlock_t lock; /* Write a 128 bit field (either a writable key or IV) */ static inline void -_writefield(u32 offset, void *value) +_writefield(u32 offset, const void *value) { int i; for (i = 0; i < 4; i++) - iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); + iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4)); } /* Read a 128 bit field (either a writable key or IV) */ @@ -42,12 +43,12 @@ _readfield(u32 offset, void *value) } static int -do_crypt(void *src, void *dst, int len, u32 flags) +do_crypt(const void *src, void *dst, u32 len, u32 flags) { u32 status; u32 counter = AES_OP_TIMEOUT; - iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG); + iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG); iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); iowrite32(len, _iobase + AES_LENA_REG); @@ -64,16 +65,14 @@ do_crypt(void *src, void *dst, int len, u32 flags) return counter ? 0 : 1; } -static unsigned int -geode_aes_crypt(struct geode_aes_op *op) +static void +geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src, + void *dst, u32 len, u8 *iv, int mode, int dir) { u32 flags = 0; unsigned long iflags; int ret; - if (op->len == 0) - return 0; - /* If the source and destination is the same, then * we need to turn on the coherent flags, otherwise * we don't need to worry @@ -81,32 +80,28 @@ geode_aes_crypt(struct geode_aes_op *op) flags |= (AES_CTRL_DCA | AES_CTRL_SCA); - if (op->dir == AES_DIR_ENCRYPT) + if (dir == AES_DIR_ENCRYPT) flags |= AES_CTRL_ENCRYPT; /* Start the critical section */ spin_lock_irqsave(&lock, iflags); - if (op->mode == AES_MODE_CBC) { + if (mode == AES_MODE_CBC) { flags |= AES_CTRL_CBC; - _writefield(AES_WRITEIV0_REG, op->iv); + _writefield(AES_WRITEIV0_REG, iv); } - if (!(op->flags & AES_FLAGS_HIDDENKEY)) { - flags |= AES_CTRL_WRKEY; - _writefield(AES_WRITEKEY0_REG, op->key); - } + flags |= AES_CTRL_WRKEY; + _writefield(AES_WRITEKEY0_REG, tctx->key); - ret = do_crypt(op->src, op->dst, op->len, flags); + ret = do_crypt(src, dst, len, flags); BUG_ON(ret); - if (op->mode == AES_MODE_CBC) - _readfield(AES_WRITEIV0_REG, op->iv); + if (mode == AES_MODE_CBC) + _readfield(AES_WRITEIV0_REG, iv); spin_unlock_irqrestore(&lock, iflags); - - return op->len; } /* CRYPTO-API Functions */ @@ -114,13 +109,13 @@ geode_aes_crypt(struct geode_aes_op *op) static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, unsigned int len) { - struct geode_aes_op *op = crypto_tfm_ctx(tfm); + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); unsigned int ret; - op->keylen = len; + tctx->keylen = len; if (len == AES_KEYSIZE_128) { - memcpy(op->key, key, len); + memcpy(tctx->key, key, len); return 0; } @@ -133,135 +128,93 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, /* * The requested key size is not supported by HW, do a fallback */ - op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; - op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); + tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + tctx->fallback.cip->base.crt_flags |= + (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(op->fallback.cip, key, len); + ret = crypto_cipher_setkey(tctx->fallback.cip, key, len); if (ret) { tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); + tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags & + CRYPTO_TFM_RES_MASK); } return ret; } -static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, - unsigned int len) +static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, + unsigned int len) { - struct geode_aes_op *op = crypto_tfm_ctx(tfm); + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); unsigned int ret; - op->keylen = len; + tctx->keylen = len; if (len == AES_KEYSIZE_128) { - memcpy(op->key, key, len); + memcpy(tctx->key, key, len); return 0; } if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { /* not supported at all */ - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /* * The requested key size is not supported by HW, do a fallback */ - op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; - op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); - - ret = crypto_blkcipher_setkey(op->fallback.blk, key, len); - if (ret) { - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); - } - return ret; -} - -static int fallback_blk_dec(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - unsigned int ret; - struct crypto_blkcipher *tfm; - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); - - tfm = desc->tfm; - desc->tfm = op->fallback.blk; - - ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); - - desc->tfm = tfm; - return ret; -} -static int fallback_blk_enc(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - unsigned int ret; - struct crypto_blkcipher *tfm; - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); - - tfm = desc->tfm; - desc->tfm = op->fallback.blk; - - ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); - - desc->tfm = tfm; + crypto_skcipher_clear_flags(tctx->fallback.skcipher, + CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(tctx->fallback.skcipher, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); + crypto_skcipher_set_flags(tfm, + crypto_skcipher_get_flags(tctx->fallback.skcipher) & + CRYPTO_TFM_RES_MASK); return ret; } static void geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct geode_aes_op *op = crypto_tfm_ctx(tfm); + const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - if (unlikely(op->keylen != AES_KEYSIZE_128)) { - crypto_cipher_encrypt_one(op->fallback.cip, out, in); + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { + crypto_cipher_encrypt_one(tctx->fallback.cip, out, in); return; } - op->src = (void *) in; - op->dst = (void *) out; - op->mode = AES_MODE_ECB; - op->flags = 0; - op->len = AES_BLOCK_SIZE; - op->dir = AES_DIR_ENCRYPT; - - geode_aes_crypt(op); + geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, + AES_MODE_ECB, AES_DIR_ENCRYPT); } static void geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct geode_aes_op *op = crypto_tfm_ctx(tfm); + const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - if (unlikely(op->keylen != AES_KEYSIZE_128)) { - crypto_cipher_decrypt_one(op->fallback.cip, out, in); + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { + crypto_cipher_decrypt_one(tctx->fallback.cip, out, in); return; } - op->src = (void *) in; - op->dst = (void *) out; - op->mode = AES_MODE_ECB; - op->flags = 0; - op->len = AES_BLOCK_SIZE; - op->dir = AES_DIR_DECRYPT; - - geode_aes_crypt(op); + geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, + AES_MODE_ECB, AES_DIR_DECRYPT); } static int fallback_init_cip(struct crypto_tfm *tfm) { const char *name = crypto_tfm_alg_name(tfm); - struct geode_aes_op *op = crypto_tfm_ctx(tfm); + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - op->fallback.cip = crypto_alloc_cipher(name, 0, - CRYPTO_ALG_NEED_FALLBACK); + tctx->fallback.cip = crypto_alloc_cipher(name, 0, + CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(op->fallback.cip)) { + if (IS_ERR(tctx->fallback.cip)) { printk(KERN_ERR "Error allocating fallback algo %s\n", name); - return PTR_ERR(op->fallback.cip); + return PTR_ERR(tctx->fallback.cip); } return 0; @@ -269,10 +222,9 @@ static int fallback_init_cip(struct crypto_tfm *tfm) static void fallback_exit_cip(struct crypto_tfm *tfm) { - struct geode_aes_op *op = crypto_tfm_ctx(tfm); + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(op->fallback.cip); - op->fallback.cip = NULL; + crypto_free_cipher(tctx->fallback.cip); } static struct crypto_alg geode_alg = { @@ -285,7 +237,7 @@ static struct crypto_alg geode_alg = { .cra_init = fallback_init_cip, .cra_exit = fallback_exit_cip, .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct geode_aes_op), + .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { @@ -298,209 +250,126 @@ static struct crypto_alg geode_alg = { } }; -static int -geode_cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int geode_init_skcipher(struct crypto_skcipher *tfm) { - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - int err, ret; + const char *name = crypto_tfm_alg_name(&tfm->base); + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); - if (unlikely(op->keylen != AES_KEYSIZE_128)) - return fallback_blk_dec(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - op->iv = walk.iv; - - while ((nbytes = walk.nbytes)) { - op->src = walk.src.virt.addr, - op->dst = walk.dst.virt.addr; - op->mode = AES_MODE_CBC; - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); - op->dir = AES_DIR_DECRYPT; - - ret = geode_aes_crypt(op); - - nbytes -= ret; - err = blkcipher_walk_done(desc, &walk, nbytes); - } - - return err; -} - -static int -geode_cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - int err, ret; - - if (unlikely(op->keylen != AES_KEYSIZE_128)) - return fallback_blk_enc(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - op->iv = walk.iv; - - while ((nbytes = walk.nbytes)) { - op->src = walk.src.virt.addr, - op->dst = walk.dst.virt.addr; - op->mode = AES_MODE_CBC; - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); - op->dir = AES_DIR_ENCRYPT; - - ret = geode_aes_crypt(op); - nbytes -= ret; - err = blkcipher_walk_done(desc, &walk, nbytes); - } - - return err; -} - -static int fallback_init_blk(struct crypto_tfm *tfm) -{ - const char *name = crypto_tfm_alg_name(tfm); - struct geode_aes_op *op = crypto_tfm_ctx(tfm); - - op->fallback.blk = crypto_alloc_blkcipher(name, 0, - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); - - if (IS_ERR(op->fallback.blk)) { + tctx->fallback.skcipher = + crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); + if (IS_ERR(tctx->fallback.skcipher)) { printk(KERN_ERR "Error allocating fallback algo %s\n", name); - return PTR_ERR(op->fallback.blk); + return PTR_ERR(tctx->fallback.skcipher); } + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(tctx->fallback.skcipher)); return 0; } -static void fallback_exit_blk(struct crypto_tfm *tfm) +static void geode_exit_skcipher(struct crypto_skcipher *tfm) { - struct geode_aes_op *op = crypto_tfm_ctx(tfm); + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); - crypto_free_blkcipher(op->fallback.blk); - op->fallback.blk = NULL; + crypto_free_skcipher(tctx->fallback.skcipher); } -static struct crypto_alg geode_cbc_alg = { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-geode", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK, - .cra_init = fallback_init_blk, - .cra_exit = fallback_exit_blk, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct geode_aes_op), - .cra_alignmask = 15, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = geode_setkey_blk, - .encrypt = geode_cbc_encrypt, - .decrypt = geode_cbc_decrypt, - .ivsize = AES_BLOCK_SIZE, - } - } -}; - -static int -geode_ecb_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir) { - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - int err, ret; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; - if (unlikely(op->keylen != AES_KEYSIZE_128)) - return fallback_blk_dec(desc, dst, src, nbytes); + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { + struct skcipher_request *subreq = skcipher_request_ctx(req); - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + *subreq = *req; + skcipher_request_set_tfm(subreq, tctx->fallback.skcipher); + if (dir == AES_DIR_DECRYPT) + return crypto_skcipher_decrypt(subreq); + else + return crypto_skcipher_encrypt(subreq); + } - while ((nbytes = walk.nbytes)) { - op->src = walk.src.virt.addr, - op->dst = walk.dst.virt.addr; - op->mode = AES_MODE_ECB; - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); - op->dir = AES_DIR_DECRYPT; + err = skcipher_walk_virt(&walk, req, false); - ret = geode_aes_crypt(op); - nbytes -= ret; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr, + round_down(nbytes, AES_BLOCK_SIZE), + walk.iv, mode, dir); + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } return err; } -static int -geode_ecb_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int geode_cbc_encrypt(struct skcipher_request *req) { - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - int err, ret; - - if (unlikely(op->keylen != AES_KEYSIZE_128)) - return fallback_blk_enc(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - - while ((nbytes = walk.nbytes)) { - op->src = walk.src.virt.addr, - op->dst = walk.dst.virt.addr; - op->mode = AES_MODE_ECB; - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); - op->dir = AES_DIR_ENCRYPT; - - ret = geode_aes_crypt(op); - nbytes -= ret; - ret = blkcipher_walk_done(desc, &walk, nbytes); - } - - return err; + return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT); } -static struct crypto_alg geode_ecb_alg = { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-geode", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK, - .cra_init = fallback_init_blk, - .cra_exit = fallback_exit_blk, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct geode_aes_op), - .cra_alignmask = 15, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = geode_setkey_blk, - .encrypt = geode_ecb_encrypt, - .decrypt = geode_ecb_decrypt, - } - } +static int geode_cbc_decrypt(struct skcipher_request *req) +{ + return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT); +} + +static int geode_ecb_encrypt(struct skcipher_request *req) +{ + return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT); +} + +static int geode_ecb_decrypt(struct skcipher_request *req) +{ + return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT); +} + +static struct skcipher_alg geode_skcipher_algs[] = { + { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-geode", + .base.cra_priority = 400, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), + .base.cra_alignmask = 15, + .base.cra_module = THIS_MODULE, + .init = geode_init_skcipher, + .exit = geode_exit_skcipher, + .setkey = geode_setkey_skcipher, + .encrypt = geode_cbc_encrypt, + .decrypt = geode_cbc_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-geode", + .base.cra_priority = 400, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), + .base.cra_alignmask = 15, + .base.cra_module = THIS_MODULE, + .init = geode_init_skcipher, + .exit = geode_exit_skcipher, + .setkey = geode_setkey_skcipher, + .encrypt = geode_ecb_encrypt, + .decrypt = geode_ecb_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, }; static void geode_aes_remove(struct pci_dev *dev) { crypto_unregister_alg(&geode_alg); - crypto_unregister_alg(&geode_ecb_alg); - crypto_unregister_alg(&geode_cbc_alg); + crypto_unregister_skciphers(geode_skcipher_algs, + ARRAY_SIZE(geode_skcipher_algs)); pci_iounmap(dev, _iobase); _iobase = NULL; @@ -538,20 +407,14 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) if (ret) goto eiomap; - ret = crypto_register_alg(&geode_ecb_alg); + ret = crypto_register_skciphers(geode_skcipher_algs, + ARRAY_SIZE(geode_skcipher_algs)); if (ret) goto ealg; - ret = crypto_register_alg(&geode_cbc_alg); - if (ret) - goto eecb; - dev_notice(&dev->dev, "GEODE AES engine enabled.\n"); return 0; - eecb: - crypto_unregister_alg(&geode_ecb_alg); - ealg: crypto_unregister_alg(&geode_alg); diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h index 5c6e131a8f9d..6d0a0cdc7647 100644 --- a/drivers/crypto/geode-aes.h +++ b/drivers/crypto/geode-aes.h @@ -46,21 +46,10 @@ #define AES_OP_TIMEOUT 0x50000 -struct geode_aes_op { - - void *src; - void *dst; - - u32 mode; - u32 dir; - u32 flags; - int len; - +struct geode_aes_tfm_ctx { u8 key[AES_KEYSIZE_128]; - u8 *iv; - union { - struct crypto_blkcipher *blk; + struct crypto_skcipher *skcipher; struct crypto_cipher *cip; } fallback; u32 keylen; diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index ebaf91e0146d..f7f0a1fb6895 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -17,6 +17,7 @@ config CRYPTO_DEV_HISI_SEC config CRYPTO_DEV_HISI_QM tristate depends on ARM64 && PCI && PCI_MSI + select NEED_SG_DMA_LENGTH help HiSilicon accelerator engines use a common queue management interface. Specific engine driver may use this module. @@ -34,6 +35,5 @@ config CRYPTO_DEV_HISI_ZIP depends on ARM64 && PCI && PCI_MSI select CRYPTO_DEV_HISI_QM select CRYPTO_HISI_SGL - select SG_SPLIT help Support for HiSilicon ZIP Driver diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index ffb00d987d02..99f21d848d4f 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -12,6 +12,10 @@ /* hisi_zip_sqe dw3 */ #define HZIP_BD_STATUS_M GENMASK(7, 0) +/* hisi_zip_sqe dw7 */ +#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0) +/* hisi_zip_sqe dw8 */ +#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0) /* hisi_zip_sqe dw9 */ #define HZIP_REQ_TYPE_M GENMASK(7, 0) #define HZIP_ALG_TYPE_ZLIB 0x02 diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 59023545a1c4..cf34bfdfb3e6 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -45,10 +45,8 @@ enum hisi_zip_alg_type { struct hisi_zip_req { struct acomp_req *req; - struct scatterlist *src; - struct scatterlist *dst; - size_t slen; - size_t dlen; + int sskip; + int dskip; struct hisi_acc_hw_sgl *hw_src; struct hisi_acc_hw_sgl *hw_dst; dma_addr_t dma_src; @@ -94,13 +92,15 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, dma_addr_t s_addr, dma_addr_t d_addr, u32 slen, - u32 dlen) + u32 dlen, int sskip, int dskip) { memset(sqe, 0, sizeof(struct hisi_zip_sqe)); - sqe->input_data_length = slen; + sqe->input_data_length = slen - sskip; + sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip); + sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip); sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); - sqe->dest_avail_out = dlen; + sqe->dest_avail_out = dlen - dskip; sqe->source_addr_l = lower_32_bits(s_addr); sqe->source_addr_h = upper_32_bits(s_addr); sqe->dest_addr_l = lower_32_bits(d_addr); @@ -301,11 +301,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, { struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP) - kfree(req->dst); - else - kfree(req->src); - write_lock(&req_q->req_lock); clear_bit(req->req_id, req_q->req_bitmap); memset(req, 0, sizeof(struct hisi_zip_req)); @@ -333,8 +328,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) } dlen = sqe->produced; - hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); - hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); + hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; acomp_req->dlen = dlen + head_size; @@ -428,20 +423,6 @@ static size_t get_comp_head_size(struct scatterlist *src, u8 req_type) } } -static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes, - size_t remains, struct scatterlist **out) -{ -#define SPLIT_NUM 2 - size_t split_sizes[SPLIT_NUM]; - int out_mapped_nents[SPLIT_NUM]; - - split_sizes[0] = bytes; - split_sizes[1] = remains; - - return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out, - out_mapped_nents, GFP_KERNEL); -} - static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, struct hisi_zip_qp_ctx *qp_ctx, size_t head_size, bool is_comp) @@ -449,31 +430,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct hisi_zip_req *q = req_q->q; struct hisi_zip_req *req_cache; - struct scatterlist *out[2]; - struct scatterlist *sgl; - size_t len; - int ret, req_id; - - /* - * remove/add zlib/gzip head, as hardware operations do not include - * comp head. so split req->src to get sgl without heads in acomp, or - * add comp head to req->dst ahead of that hardware output compressed - * data in sgl splited from req->dst without comp head. - */ - if (is_comp) { - sgl = req->dst; - len = req->dlen - head_size; - } else { - sgl = req->src; - len = req->slen - head_size; - } - - ret = get_sg_skip_bytes(sgl, head_size, len, out); - if (ret) - return ERR_PTR(ret); - - /* sgl for comp head is useless, so free it now */ - kfree(out[0]); + int req_id; write_lock(&req_q->req_lock); @@ -481,7 +438,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, if (req_id >= req_q->size) { write_unlock(&req_q->req_lock); dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); - kfree(out[1]); return ERR_PTR(-EBUSY); } set_bit(req_id, req_q->req_bitmap); @@ -489,16 +445,13 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, req_cache = q + req_id; req_cache->req_id = req_id; req_cache->req = req; + if (is_comp) { - req_cache->src = req->src; - req_cache->dst = out[1]; - req_cache->slen = req->slen; - req_cache->dlen = req->dlen - head_size; + req_cache->sskip = 0; + req_cache->dskip = head_size; } else { - req_cache->src = out[1]; - req_cache->dst = req->dst; - req_cache->slen = req->slen - head_size; - req_cache->dlen = req->dlen; + req_cache->sskip = head_size; + req_cache->dskip = 0; } write_unlock(&req_q->req_lock); @@ -510,6 +463,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_qp_ctx *qp_ctx) { struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; + struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool; @@ -517,16 +471,16 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, dma_addr_t output; int ret; - if (!req->src || !req->slen || !req->dst || !req->dlen) + if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) return -EINVAL; - req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool, + req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, req->req_id << 1, &input); if (IS_ERR(req->hw_src)) return PTR_ERR(req->hw_src); req->dma_src = input; - req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool, + req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, (req->req_id << 1) + 1, &output); if (IS_ERR(req->hw_dst)) { @@ -535,8 +489,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, } req->dma_dst = output; - hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen, - req->dlen); + hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen, + a_req->dlen, req->sskip, req->dskip); hisi_zip_config_buf_type(zip_sqe, HZIP_SGL); hisi_zip_config_tag(zip_sqe, req->req_id); @@ -548,9 +502,9 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, return -EINPROGRESS; err_unmap_output: - hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); + hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); err_unmap_input: - hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); return ret; } diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 4ab1bde8dd9b..991a4425f006 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -221,9 +221,9 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) /* Step #3: Determine log2 of hash table size */ cs_ht_sz = __fls(asize - cs_rc_max) - 2; /* Step #4: determine current size of hash table in dwords */ - cs_ht_wc = 16<> 4)); + cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2)); /* Clear the cache RAMs */ eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc); @@ -1120,6 +1120,8 @@ static int safexcel_request_ring_irq(void *pdev, int irqid, irq_name, irq); return irq; } + } else { + return -ENXIO; } ret = devm_request_threaded_irq(dev, irq, handler, diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 3cbefb41b099..2680e1525db5 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1613,6 +1613,11 @@ static const struct of_device_id spacc_of_id_table[] = { MODULE_DEVICE_TABLE(of, spacc_of_id_table); #endif /* CONFIG_OF */ +static void spacc_tasklet_kill(void *data) +{ + tasklet_kill(data); +} + static int spacc_probe(struct platform_device *pdev) { int i, err, ret; @@ -1655,6 +1660,14 @@ static int spacc_probe(struct platform_device *pdev) return -ENXIO; } + tasklet_init(&engine->complete, spacc_spacc_complete, + (unsigned long)engine); + + ret = devm_add_action(&pdev->dev, spacc_tasklet_kill, + &engine->complete); + if (ret) + return ret; + if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, engine->name, engine)) { dev_err(engine->dev, "failed to request IRQ\n"); @@ -1712,8 +1725,6 @@ static int spacc_probe(struct platform_device *pdev) INIT_LIST_HEAD(&engine->completed); INIT_LIST_HEAD(&engine->in_progress); engine->in_flight = 0; - tasklet_init(&engine->complete, spacc_spacc_complete, - (unsigned long)engine); platform_set_drvdata(pdev, engine); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 6536fd4bee65..7e5e092a23b3 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -72,7 +72,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) oi = 0; oo = 0; do { - todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); + todo = min(rx_cnt, ileft); + todo = min_t(size_t, todo, (mi.length - oi) / 4); if (todo) { ileft -= todo; writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); @@ -87,7 +88,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); - todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); + todo = min(tx_cnt, oleft); + todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { oleft -= todo; readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); @@ -239,7 +241,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * todo is the number of consecutive 4byte word that we * can read from current SG */ - todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); + todo = min(rx_cnt, ileft / 4); + todo = min_t(size_t, todo, (mi.length - oi) / 4); if (todo && !ob) { writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); @@ -253,8 +256,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * we need to be able to write all buf in one * pass, so it is why we min() with rx_cnt */ - todo = min3(rx_cnt * 4 - ob, ileft, - mi.length - oi); + todo = min(rx_cnt * 4 - ob, ileft); + todo = min_t(size_t, todo, mi.length - oi); memcpy(buf + ob, mi.addr + oi, todo); ileft -= todo; oi += todo; @@ -274,7 +277,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); - dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", + dev_dbg(ss->dev, + "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n", mode, oi, mi.length, ileft, areq->cryptlen, rx_cnt, oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); @@ -282,7 +286,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) if (!tx_cnt) continue; /* todo in 4bytes word */ - todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); + todo = min(tx_cnt, oleft / 4); + todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; @@ -308,7 +313,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * no more than remaining buffer * no need to test against oleft */ - todo = min(mo.length - oo, obl - obo); + todo = min_t(size_t, + mo.length - oo, obl - obo); memcpy(mo.addr + oo, bufo + obo, todo); oleft -= todo; obo += todo; diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index fcffba5ef927..07df012893bb 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -175,7 +175,7 @@ static int sun4i_hash(struct ahash_request *areq) */ unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo; unsigned int in_i = 0; - u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0; + u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0; struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -184,6 +184,7 @@ static int sun4i_hash(struct ahash_request *areq) struct sg_mapping_iter mi; int in_r, err = 0; size_t copied = 0; + __le32 wb = 0; dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", __func__, crypto_tfm_alg_name(areq->base.tfm), @@ -272,8 +273,8 @@ static int sun4i_hash(struct ahash_request *areq) */ while (op->len < 64 && i < end) { /* how many bytes we can read from current SG */ - in_r = min3(mi.length - in_i, end - i, - 64 - op->len); + in_r = min(end - i, 64 - op->len); + in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; @@ -293,8 +294,8 @@ static int sun4i_hash(struct ahash_request *areq) } if (mi.length - in_i > 3 && i < end) { /* how many bytes we can read from current SG */ - in_r = min3(mi.length - in_i, areq->nbytes - i, - ((mi.length - in_i) / 4) * 4); + in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i); + in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r); /* how many bytes we can write in the device*/ todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4); writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo); @@ -320,8 +321,8 @@ static int sun4i_hash(struct ahash_request *areq) if ((areq->nbytes - i) < 64) { while (i < areq->nbytes && in_i < mi.length && op->len < 64) { /* how many bytes we can read from current SG */ - in_r = min3(mi.length - in_i, areq->nbytes - i, - 64 - op->len); + in_r = min(areq->nbytes - i, 64 - op->len); + in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; @@ -395,7 +396,7 @@ hash_final: nbw = op->len - 4 * nwait; if (nbw) { - wb = *(u32 *)(op->buf + nwait * 4); + wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4)); wb &= GENMASK((nbw * 8) - 1, 0); op->byte_count += nbw; @@ -404,7 +405,7 @@ hash_final: /* write the remaining bytes of the nbw buffer */ wb |= ((1 << 7) << (nbw * 8)); - bf[j++] = wb; + bf[j++] = le32_to_cpu(wb); /* * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) @@ -423,13 +424,13 @@ hash_final: /* write the length of data */ if (op->mode == SS_OP_SHA1) { - __be64 bits = cpu_to_be64(op->byte_count << 3); - bf[j++] = lower_32_bits(bits); - bf[j++] = upper_32_bits(bits); + __be64 *bits = (__be64 *)&bf[j]; + *bits = cpu_to_be64(op->byte_count << 3); + j += 2; } else { - __le64 bits = op->byte_count << 3; - bf[j++] = lower_32_bits(bits); - bf[j++] = upper_32_bits(bits); + __le64 *bits = (__le64 *)&bf[j]; + *bits = cpu_to_le64(op->byte_count << 3); + j += 2; } writesl(ss->base + SS_RXFIFO, bf, j); @@ -471,7 +472,7 @@ hash_final: } } else { for (i = 0; i < 4; i++) { - v = readl(ss->base + SS_MD0 + i * 4); + v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4)); memcpy(areq->result + i * 4, &v, 4); } } diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 42d19205166b..82b316b2f537 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -105,8 +105,6 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg) *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC; break; default: - pr_err("virtio_crypto: Unsupported key length: %d\n", - key_len); return -EINVAL; } return 0; @@ -437,6 +435,11 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, goto free; } memcpy(iv, req->info, ivsize); + if (!vc_sym_req->encrypt) + scatterwalk_map_and_copy(req->info, req->src, + req->nbytes - AES_BLOCK_SIZE, + AES_BLOCK_SIZE, 0); + sg_init_one(&iv_sg, iv, ivsize); sgs[num_out++] = &iv_sg; vc_sym_req->iv = iv; @@ -484,6 +487,11 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; + if (!req->nbytes) + return 0; + if (req->nbytes % AES_BLOCK_SIZE) + return -EINVAL; + vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; vc_sym_req->ablkcipher_ctx = ctx; @@ -504,6 +512,11 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; + if (!req->nbytes) + return 0; + if (req->nbytes % AES_BLOCK_SIZE) + return -EINVAL; + vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; vc_sym_req->ablkcipher_ctx = ctx; @@ -563,6 +576,10 @@ static void virtio_crypto_ablkcipher_finalize_req( struct ablkcipher_request *req, int err) { + if (vc_sym_req->encrypt) + scatterwalk_map_and_copy(req->info, req->dst, + req->nbytes - AES_BLOCK_SIZE, + AES_BLOCK_SIZE, 0); crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, req, err); kzfree(vc_sym_req->iv); diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index cab32cfec9c4..709670d2b553 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile @@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) -TARGET := linux-ppc64le +override flavour := linux-ppc64le else -TARGET := linux-ppc64 +override flavour := linux-ppc64 endif quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $(<) $(TARGET) > $(@) + cmd_perl = $(PERL) $(<) $(flavour) > $(@) targets += aesp8-ppc.S ghashp8-ppc.S diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index d59e736882f6..9fee1b1532a4 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -84,6 +84,9 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc) u8 tweak[AES_BLOCK_SIZE]; int ret; + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) { struct skcipher_request *subreq = skcipher_request_ctx(req); diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index defe1d438710..1433f2ba9d3b 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -99,6 +99,7 @@ config ARM_TEGRA_DEVFREQ ARCH_TEGRA_210_SOC || \ COMPILE_TEST select PM_OPP + depends on COMMON_CLK help This adds the DEVFREQ driver for the Tegra family of SoCs. It reads ACTMON counters of memory controllers and adjusts the @@ -117,7 +118,8 @@ config ARM_TEGRA20_DEVFREQ config ARM_RK3399_DMC_DEVFREQ tristate "ARM RK3399 DMC DEVFREQ Driver" - depends on ARCH_ROCKCHIP + depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \ + (COMPILE_TEST && HAVE_ARM_SMCCC) select DEVFREQ_EVENT_ROCKCHIP_DFI select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ_EVENT diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 446490c9d635..ff81b7cdab71 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -160,6 +160,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) int lev, prev_lev, ret = 0; unsigned long cur_time; + lockdep_assert_held(&devfreq->lock); cur_time = jiffies; /* Immediately exit if previous_freq is not initialized yet. */ @@ -550,26 +551,30 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, void *devp) { struct devfreq *devfreq = container_of(nb, struct devfreq, nb); - int ret; + int err = -EINVAL; mutex_lock(&devfreq->lock); devfreq->scaling_min_freq = find_available_min_freq(devfreq); - if (!devfreq->scaling_min_freq) { - mutex_unlock(&devfreq->lock); - return -EINVAL; - } + if (!devfreq->scaling_min_freq) + goto out; devfreq->scaling_max_freq = find_available_max_freq(devfreq); if (!devfreq->scaling_max_freq) { - mutex_unlock(&devfreq->lock); - return -EINVAL; + devfreq->scaling_max_freq = ULONG_MAX; + goto out; } - ret = update_devfreq(devfreq); - mutex_unlock(&devfreq->lock); + err = update_devfreq(devfreq); - return ret; +out: + mutex_unlock(&devfreq->lock); + if (err) + dev_err(devfreq->dev.parent, + "failed to update frequency from OPP notifier (%d)\n", + err); + + return NOTIFY_OK; } /** @@ -583,11 +588,6 @@ static void devfreq_dev_release(struct device *dev) struct devfreq *devfreq = to_devfreq(dev); mutex_lock(&devfreq_list_lock); - if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { - mutex_unlock(&devfreq_list_lock); - dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); - return; - } list_del(&devfreq->node); mutex_unlock(&devfreq_list_lock); @@ -613,7 +613,6 @@ struct devfreq *devfreq_add_device(struct device *dev, { struct devfreq *devfreq; struct devfreq_governor *governor; - static atomic_t devfreq_no = ATOMIC_INIT(-1); int err = 0; if (!dev || !profile || !governor_name) { @@ -642,6 +641,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->dev.parent = dev; devfreq->dev.class = devfreq_class; devfreq->dev.release = devfreq_dev_release; + INIT_LIST_HEAD(&devfreq->node); devfreq->profile = profile; strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); devfreq->previous_freq = profile->initial_freq; @@ -676,8 +676,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); atomic_set(&devfreq->suspend_count, 0); - dev_set_name(&devfreq->dev, "devfreq%d", - atomic_inc_return(&devfreq_no)); + dev_set_name(&devfreq->dev, "%s", dev_name(dev)); err = device_register(&devfreq->dev); if (err) { mutex_unlock(&devfreq->lock); @@ -1111,6 +1110,14 @@ err_out: } EXPORT_SYMBOL(devfreq_remove_governor); +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct devfreq *devfreq = to_devfreq(dev); + return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent)); +} +static DEVICE_ATTR_RO(name); + static ssize_t governor_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1195,7 +1202,7 @@ static ssize_t available_governors_show(struct device *d, * The devfreq with immutable governor (e.g., passive) shows * only own governor. */ - if (df->governor->immutable) { + if (df->governor && df->governor->immutable) { count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, "%s ", df->governor_name); /* @@ -1397,12 +1404,17 @@ static ssize_t trans_stat_show(struct device *dev, int i, j; unsigned int max_state = devfreq->profile->max_state; - if (!devfreq->stop_polling && - devfreq_update_status(devfreq, devfreq->previous_freq)) - return 0; if (max_state == 0) return sprintf(buf, "Not Supported.\n"); + mutex_lock(&devfreq->lock); + if (!devfreq->stop_polling && + devfreq_update_status(devfreq, devfreq->previous_freq)) { + mutex_unlock(&devfreq->lock); + return 0; + } + mutex_unlock(&devfreq->lock); + len = sprintf(buf, " From : To\n"); len += sprintf(buf + len, " :"); for (i = 0; i < max_state; i++) @@ -1434,6 +1446,7 @@ static ssize_t trans_stat_show(struct device *dev, static DEVICE_ATTR_RO(trans_stat); static struct attribute *devfreq_attrs[] = { + &dev_attr_name.attr, &dev_attr_governor.attr, &dev_attr_available_governors.attr, &dev_attr_cur_freq.attr, diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index cef2cf5347ca..a53e0a6ffdfe 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU config DEVFREQ_EVENT_ROCKCHIP_DFI tristate "ROCKCHIP DFI DEVFREQ event Driver" - depends on ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST help This add the devfreq-event driver for Rockchip SoC. It provides DFI (DDR Monitor Module) driver to count ddr load. diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 87b42055e6bc..c4873bb791f8 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -101,17 +101,22 @@ static struct __exynos_ppmu_events { PPMU_EVENT(dmc1_1), }; -static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +static int __exynos_ppmu_find_ppmu_id(const char *edev_name) { int i; for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) - if (!strcmp(edev->desc->name, ppmu_events[i].name)) + if (!strcmp(edev_name, ppmu_events[i].name)) return ppmu_events[i].id; return -EINVAL; } +static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +{ + return __exynos_ppmu_find_ppmu_id(edev->desc->name); +} + /* * The devfreq-event ops structure for PPMU v1.1 */ @@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np, * use default if not. */ if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) { - struct devfreq_event_dev edev; int id; /* Not all registers take the same value for * read+write data count. */ - edev.desc = &desc[j]; - id = exynos_ppmu_find_ppmu_id(&edev); + id = __exynos_ppmu_find_ppmu_id(desc[j].name); switch (id) { case PPMU_PMNCNT0: diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 433d91d710e4..0fb0358f0073 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file) dma_resv_fini(dmabuf->resv); module_put(dmabuf->owner); + kfree(dmabuf->name); kfree(dmabuf); return 0; } diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 25c5c071645b..91185db9a952 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -221,7 +221,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, a_fences = get_fences(a, &a_num_fences); b_fences = get_fences(b, &b_num_fences); if (a_num_fences > INT_MAX - b_num_fences) - return NULL; + goto err; num_fences = a_num_fences + b_num_fences; diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e51d836afcc7..1092d4ce723e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) return; } - spin_lock(&cohc->lock); - /* * When we reach this point, at least one queue item * should have been moved over from cohc->queue to @@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) if (coh901318_queue_start(cohc) == NULL) cohc->busy = 0; - spin_unlock(&cohc->lock); - /* * This tasklet will remove items from cohc->active * and thus terminates them. diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index a0ee404b736e..f1d149e32839 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev) struct dma_device *dma_dev; struct axi_dmac *dmac; struct resource *res; + struct regmap *regmap; int ret; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); @@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dmac); - devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); + regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, + &axi_dmac_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto err_free_irq; + } return 0; +err_free_irq: + free_irq(dmac->irq, dmac); err_unregister_of: of_dma_controller_free(pdev->dev.of_node); err_unregister_device: diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index cafb1cc065bb..bf95f1d551c5 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1004,7 +1004,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { .nb_channels = 6, .transfer_ord_max = 5, - .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, }; static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 03ac4b96117c..4b604086b1b3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -179,7 +179,7 @@ __dma_device_satisfies_mask(struct dma_device *device, static struct module *dma_chan_to_owner(struct dma_chan *chan) { - return chan->device->dev->driver->owner; + return chan->device->owner; } /** @@ -919,6 +919,8 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + device->owner = device->dev->driver->owner; + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index c90c798e5ec3..0585d749d935 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev) data->chip = chip; - chip->clk = devm_clk_get(chip->dev, "hclk"); + chip->clk = devm_clk_get_optional(chip->dev, "hclk"); if (IS_ERR(chip->clk)) return PTR_ERR(chip->clk); err = clk_prepare_enable(chip->clk); diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 06664fbd2d91..95cc0256b387 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) vchan_dma_desc_free_list(&fsl_chan->vchan, &head); - if (!fsl_queue->comp_pool && !fsl_queue->comp_pool) + if (!fsl_queue->comp_pool && !fsl_queue->desc_pool) return; list_for_each_entry_safe(comp_temp, _comp_temp, @@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev) return ret; fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); + if (fsl_qdma->irq_base < 0) + return fsl_qdma->irq_base; + fsl_qdma->feature = of_property_read_bool(np, "big-endian"); INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c27e206a764c..67736c801f3c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1328,13 +1328,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdma_channel_synchronize(chan); - if (sdmac->event_id0) + if (sdmac->event_id0 >= 0) sdma_event_disable(sdmac, sdmac->event_id0); if (sdmac->event_id1) sdma_event_disable(sdmac, sdmac->event_id1); sdmac->event_id0 = 0; sdmac->event_id1 = 0; + sdmac->context_loaded = false; sdma_set_channel_priority(sdmac, 0); @@ -1628,7 +1629,7 @@ static int sdma_config(struct dma_chan *chan, memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); /* Set ENBLn earlier to make sure dma request triggered after that */ - if (sdmac->event_id0) { + if (sdmac->event_id0 >= 0) { if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) return -EINVAL; sdma_event_enable(sdmac, sdmac->event_id0); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a422a8b43cf..18c011e57592 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) descs->virt = dma_alloc_coherent(to_dev(ioat_chan), SZ_2M, &descs->hw, flags); - if (!descs->virt && (i > 0)) { + if (!descs->virt) { int idx; for (idx = 0; idx < i; idx++) { + descs = &ioat_chan->descs[idx]; dma_free_coherent(to_dev(ioat_chan), SZ_2M, descs->virt, descs->hw); descs->virt = NULL; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 4b36c8810517..d05471653224 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) c = p->vchan; if (c && (tc1 & BIT(i))) { spin_lock_irqsave(&c->vc.lock, flags); - vchan_cookie_complete(&p->ds_run->vd); - p->ds_done = p->ds_run; - p->ds_run = NULL; + if (p->ds_run != NULL) { + vchan_cookie_complete(&p->ds_run->vd); + p->ds_done = p->ds_run; + p->ds_run = NULL; + } spin_unlock_irqrestore(&c->vc.lock, flags); } if (c && (tc2 & BIT(i))) { @@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) return -EAGAIN; + /* Avoid losing track of ds_run if a transaction is in flight */ + if (c->phy->ds_run) + return -EAGAIN; + if (vd) { struct k3_dma_desc_sw *ds = container_of(vd, struct k3_dma_desc_sw, vd); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 3a45079d11ec..4a750e29bfb5 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Do not allocate if desc are waiting for ack */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { - if (async_tx_test_ack(&dma_desc->txd)) { + if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) { list_del(&dma_desc->node); spin_unlock_irqrestore(&tdc->lock, flags); dma_desc->txd.flags = 0; @@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) bool was_busy; spin_lock_irqsave(&tdc->lock, flags); - if (list_empty(&tdc->pending_sg_req)) { - spin_unlock_irqrestore(&tdc->lock, flags); - return 0; - } if (!tdc->busy) goto skip_dma_stop; diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index ba7c4f07fcd6..80b780e49971 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2403,8 +2403,10 @@ static int edma_probe(struct platform_device *pdev) ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, sizeof(*ecc->tc_list), GFP_KERNEL); - if (!ecc->tc_list) - return -ENOMEM; + if (!ecc->tc_list) { + ret = -ENOMEM; + goto err_reg1; + } for (i = 0;; i++) { ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index ec4adf4260a0..256fc662c500 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); - vchan_vdesc_fini(vd); - dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + vchan_vdesc_fini(vd); } } diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 5d56f1e4d332..43acba2a1c0e 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1433,6 +1433,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan) chan->err = false; chan->idle = true; + chan->desc_pendingcount = 0; chan->desc_submitcount = 0; return err; diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index fbda4b876afd..0be3d1b17f03 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -560,6 +560,7 @@ static const struct regmap_config s10_sdram_regmap_cfg = { .reg_write = s10_protected_reg_write, .use_single_read = true, .use_single_write = true, + .fast_io = true, }; /************** ***********/ diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index c1d4536ae466..cc5e56d752c8 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2936,6 +2936,7 @@ static int init_csrows_df(struct mem_ctl_info *mci) dimm->mtype = pvt->dram_type; dimm->edac_mode = edac_mode; dimm->dtype = dev_type; + dimm->grain = 64; } } @@ -3012,6 +3013,7 @@ static int init_csrows(struct mem_ctl_info *mci) dimm = csrow->channels[j]->dimm; dimm->mtype = pvt->dram_type; dimm->edac_mode = edac_mode; + dimm->grain = 64; } } diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index e6fd079783bd..e73ca303f1a7 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -503,16 +503,10 @@ void edac_mc_free(struct mem_ctl_info *mci) { edac_dbg(1, "\n"); - /* If we're not yet registered with sysfs free only what was allocated - * in edac_mc_alloc(). - */ - if (!device_is_registered(&mci->dev)) { - _edac_mc_free(mci); - return; - } + if (device_is_registered(&mci->dev)) + edac_unregister_sysfs(mci); - /* the mci instance is freed here, when the sysfs object is dropped */ - edac_unregister_sysfs(mci); + _edac_mc_free(mci); } EXPORT_SYMBOL_GPL(edac_mc_free); diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 32d016f1ecd1..0287884ae28c 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = { static void csrow_attr_release(struct device *dev) { - struct csrow_info *csrow = container_of(dev, struct csrow_info, dev); - - edac_dbg(1, "device %s released\n", dev_name(dev)); - kfree(csrow); + /* release device with _edac_mc_free() */ } static const struct device_type csrow_attr_type = { @@ -447,8 +444,7 @@ error: csrow = mci->csrows[i]; if (!nr_pages_per_csrow(csrow)) continue; - - device_del(&mci->csrows[i]->dev); + device_unregister(&mci->csrows[i]->dev); } return err; @@ -620,10 +616,7 @@ static const struct attribute_group *dimm_attr_groups[] = { static void dimm_attr_release(struct device *dev) { - struct dimm_info *dimm = container_of(dev, struct dimm_info, dev); - - edac_dbg(1, "device %s released\n", dev_name(dev)); - kfree(dimm); + /* release device with _edac_mc_free() */ } static const struct device_type dimm_attr_type = { @@ -906,10 +899,7 @@ static const struct attribute_group *mci_attr_groups[] = { static void mci_attr_release(struct device *dev) { - struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); - - edac_dbg(1, "device %s released\n", dev_name(dev)); - kfree(mci); + /* release device with _edac_mc_free() */ } static const struct device_type mci_attr_type = { diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index 0bb62857ffb2..523dd56a798c 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -26,9 +26,18 @@ struct ghes_edac_pvt { char msg[80]; }; -static atomic_t ghes_init = ATOMIC_INIT(0); +static refcount_t ghes_refcount = REFCOUNT_INIT(0); + +/* + * Access to ghes_pvt must be protected by ghes_lock. The spinlock + * also provides the necessary (implicit) memory barrier for the SMP + * case to make the pointer visible on another CPU. + */ static struct ghes_edac_pvt *ghes_pvt; +/* GHES registration mutex */ +static DEFINE_MUTEX(ghes_reg_mutex); + /* * Sync with other, potentially concurrent callers of * ghes_edac_report_mem_error(). We don't know what the @@ -79,9 +88,8 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg) (*num_dimm)++; } -static int get_dimm_smbios_index(u16 handle) +static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle) { - struct mem_ctl_info *mci = ghes_pvt->mci; int i; for (i = 0; i < mci->tot_dimms; i++) { @@ -198,14 +206,11 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) enum hw_event_mc_err_type type; struct edac_raw_error_desc *e; struct mem_ctl_info *mci; - struct ghes_edac_pvt *pvt = ghes_pvt; + struct ghes_edac_pvt *pvt; unsigned long flags; char *p; u8 grain_bits; - if (!pvt) - return; - /* * We can do the locking below because GHES defers error processing * from NMI to IRQ context. Whenever that changes, we'd at least @@ -216,12 +221,17 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) spin_lock_irqsave(&ghes_lock, flags); + pvt = ghes_pvt; + if (!pvt) + goto unlock; + mci = pvt->mci; e = &mci->error_desc; /* Cleans the error report buffer */ memset(e, 0, sizeof (*e)); e->error_count = 1; + e->grain = 1; strcpy(e->label, "unknown label"); e->msg = pvt->msg; e->other_detail = pvt->other_detail; @@ -317,7 +327,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) /* Error grain */ if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK) - e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK); + e->grain = ~mem_err->physical_addr_mask + 1; /* Memory error location, mapped on e->location */ p = e->location; @@ -348,7 +358,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) p += sprintf(p, "DIMM DMI handle: 0x%.4x ", mem_err->mem_dev_handle); - index = get_dimm_smbios_index(mem_err->mem_dev_handle); + index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle); if (index >= 0) { e->top_layer = index; e->enable_per_layer_report = true; @@ -433,8 +443,13 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) if (p > pvt->other_detail) *(p - 1) = '\0'; + /* Sanity-check driver-supplied grain value. */ + if (WARN_ON_ONCE(!e->grain)) + e->grain = 1; + + grain_bits = fls_long(e->grain - 1); + /* Generate the trace event */ - grain_bits = fls_long(e->grain); snprintf(pvt->detail_location, sizeof(pvt->detail_location), "APEI location: %s %s", e->location, e->other_detail); trace_mc_event(type, e->msg, e->label, e->error_count, @@ -443,6 +458,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) grain_bits, e->syndrome, pvt->detail_location); edac_raw_mc_handle_error(type, mci, e); + +unlock: spin_unlock_irqrestore(&ghes_lock, flags); } @@ -457,10 +474,12 @@ static struct acpi_platform_list plat_list[] = { int ghes_edac_register(struct ghes *ghes, struct device *dev) { bool fake = false; - int rc, num_dimm = 0; + int rc = 0, num_dimm = 0; struct mem_ctl_info *mci; + struct ghes_edac_pvt *pvt; struct edac_mc_layer layers[1]; struct ghes_edac_dimm_fill dimm_fill; + unsigned long flags; int idx = -1; if (IS_ENABLED(CONFIG_X86)) { @@ -472,11 +491,14 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) idx = 0; } + /* finish another registration/unregistration instance first */ + mutex_lock(&ghes_reg_mutex); + /* * We have only one logical memory controller to which all DIMMs belong. */ - if (atomic_inc_return(&ghes_init) > 1) - return 0; + if (refcount_inc_not_zero(&ghes_refcount)) + goto unlock; /* Get the number of DIMMs */ dmi_walk(ghes_edac_count_dimms, &num_dimm); @@ -494,12 +516,13 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt)); if (!mci) { pr_info("Can't allocate memory for EDAC data\n"); - return -ENOMEM; + rc = -ENOMEM; + goto unlock; } - ghes_pvt = mci->pvt_info; - ghes_pvt->ghes = ghes; - ghes_pvt->mci = mci; + pvt = mci->pvt_info; + pvt->ghes = ghes; + pvt->mci = mci; mci->pdev = dev; mci->mtype_cap = MEM_FLAG_EMPTY; @@ -541,23 +564,48 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) if (rc < 0) { pr_info("Can't register at EDAC core\n"); edac_mc_free(mci); - return -ENODEV; + rc = -ENODEV; + goto unlock; } - return 0; + + spin_lock_irqsave(&ghes_lock, flags); + ghes_pvt = pvt; + spin_unlock_irqrestore(&ghes_lock, flags); + + /* only set on success */ + refcount_set(&ghes_refcount, 1); + +unlock: + mutex_unlock(&ghes_reg_mutex); + + return rc; } void ghes_edac_unregister(struct ghes *ghes) { struct mem_ctl_info *mci; + unsigned long flags; - if (!ghes_pvt) - return; + mutex_lock(&ghes_reg_mutex); - if (atomic_dec_return(&ghes_init)) - return; + if (!refcount_dec_and_test(&ghes_refcount)) + goto unlock; - mci = ghes_pvt->mci; + /* + * Wait for the irq handler being finished. + */ + spin_lock_irqsave(&ghes_lock, flags); + mci = ghes_pvt ? ghes_pvt->mci : NULL; ghes_pvt = NULL; - edac_mc_del_mc(mci->pdev); - edac_mc_free(mci); + spin_unlock_irqrestore(&ghes_lock, flags); + + if (!mci) + goto unlock; + + mci = edac_mc_del_mc(mci->pdev); + if (mci) + edac_mc_free(mci); + +unlock: + mutex_unlock(&ghes_reg_mutex); } diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index 413cdb4a591d..bb9ceeaf29bf 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev) p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc", 1, 1, NULL, 0, edac_device_alloc_index()); - if (IS_ERR(p->dci)) - return PTR_ERR(p->dci); + if (!p->dci) + return -ENOMEM; p->dci->dev = &pdev->dev; p->dci->mod_name = "Sifive ECC Manager"; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index d8ff63d91b86..a04349c6d17e 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -235,7 +235,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL); if (!pdev) { - skx_printk(KERN_ERR, "Can't get tolm/tohm\n"); + edac_dbg(2, "Can't get tolm/tohm\n"); return -ENODEV; } diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 2d263382d797..880ffd833718 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) pinf = &p->ceinfo; if (!priv->p_data->quirks) { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d Col %d ", - "CE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "Bit Position: %d Data: 0x%08x\n", + "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x", + "CE", pinf->row, pinf->bank, pinf->col, pinf->bitpos, pinf->data); } else { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d Col %d ", - "CE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "BankGroup Number %d Block Number %d ", - pinf->bankgrpnr, pinf->blknr); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "Bit Position: %d Data: 0x%08x\n", + "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x", + "CE", pinf->row, pinf->bank, pinf->col, + pinf->bankgrpnr, pinf->blknr, pinf->bitpos, pinf->data); } @@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) "UE", pinf->row, pinf->bank, pinf->col); } else { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type :%s Row %d Bank %d Col %d ", - "UE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "BankGroup Number %d Block Number %d", + "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d", + "UE", pinf->row, pinf->bank, pinf->col, pinf->bankgrpnr, pinf->blknr); } diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index 415afaf479e7..0840c0f1bf2e 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c @@ -423,9 +423,40 @@ static int axp288_extcon_probe(struct platform_device *pdev) /* Start charger cable type detection */ axp288_extcon_enable(info); + device_init_wakeup(dev, true); + platform_set_drvdata(pdev, info); + return 0; } +static int __maybe_unused axp288_extcon_suspend(struct device *dev) +{ + struct axp288_extcon_info *info = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(info->irq[VBUS_RISING_IRQ]); + + return 0; +} + +static int __maybe_unused axp288_extcon_resume(struct device *dev) +{ + struct axp288_extcon_info *info = dev_get_drvdata(dev); + + /* + * Wakeup when a charger is connected to do charger-type + * connection and generate an extcon event which makes the + * axp288 charger driver set the input current limit. + */ + if (device_may_wakeup(dev)) + disable_irq_wake(info->irq[VBUS_RISING_IRQ]); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(axp288_extcon_pm_ops, axp288_extcon_suspend, + axp288_extcon_resume); + static const struct platform_device_id axp288_extcon_table[] = { { .name = "axp288_extcon" }, {}, @@ -437,6 +468,7 @@ static struct platform_driver axp288_extcon_driver = { .id_table = axp288_extcon_table, .driver = { .name = "axp288_extcon", + .pm = &axp288_extcon_pm_ops, }, }; diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c index 9d32150e68db..771f6f4cf92e 100644 --- a/drivers/extcon/extcon-intel-cht-wc.c +++ b/drivers/extcon/extcon-intel-cht-wc.c @@ -338,6 +338,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev) struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); struct cht_wc_extcon_data *ext; unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK); + int pwrsrc_sts, id; int irq, ret; irq = platform_get_irq(pdev, 0); @@ -387,8 +388,19 @@ static int cht_wc_extcon_probe(struct platform_device *pdev) goto disable_sw_control; } - /* Route D+ and D- to PMIC for initial charger detection */ - cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC); + ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts); + if (ret) { + dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret); + goto disable_sw_control; + } + + /* + * If no USB host or device connected, route D+ and D- to PMIC for + * initial charger detection + */ + id = cht_wc_extcon_get_id(ext, pwrsrc_sts); + if (id != INTEL_USB_ID_GND) + cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC); /* Get initial state */ cht_wc_extcon_pwrsrc_event(ext); diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c index dc43847ad2b0..b3d93baf4fc5 100644 --- a/drivers/extcon/extcon-sm5502.c +++ b/drivers/extcon/extcon-sm5502.c @@ -65,6 +65,10 @@ struct sm5502_muic_info { /* Default value of SM5502 register to bring up MUIC device. */ static struct reg_data sm5502_reg_data[] = { { + .reg = SM5502_REG_RESET, + .val = SM5502_REG_RESET_MASK, + .invert = true, + }, { .reg = SM5502_REG_CONTROL, .val = SM5502_REG_CONTROL_MASK_INT_MASK, .invert = false, diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h index 9dbb634d213b..ce1f1ec310c4 100644 --- a/drivers/extcon/extcon-sm5502.h +++ b/drivers/extcon/extcon-sm5502.h @@ -237,6 +237,8 @@ enum sm5502_reg { #define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <hh_data + HH_DATA_OFF(sizeof(*h))); h->h_proto = type; memcpy(h->h_dest, neigh->ha, net->addr_len); - hh->hh_len = FWNET_HLEN; + + /* Pairs with the READ_ONCE() in neigh_resolve_output(), + * neigh_hh_output() and neigh_update_hhs(). + */ + smp_store_release(&hh->hh_len, FWNET_HLEN); return 0; } diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 92f843eaf1e0..7a30952b463d 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -135,8 +135,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) return NULL; id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); - if (id < 0) - goto free_mem; + if (id < 0) { + kfree(scmi_dev); + return NULL; + } scmi_dev->id = id; scmi_dev->protocol_id = protocol; @@ -154,8 +156,6 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) put_dev: put_device(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, id); -free_mem: - kfree(scmi_dev); return NULL; } diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 4a8012e3cb8c..601af4edad5e 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -323,7 +323,7 @@ static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db) if (db->mask) val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set, db->addr); + iowrite64_hi_lo(db->set | val, db->addr); } #endif } diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c index c9a0efca17b0..5d4f84781aa0 100644 --- a/drivers/firmware/efi/earlycon.c +++ b/drivers/firmware/efi/earlycon.c @@ -13,18 +13,58 @@ #include +static const struct console *earlycon_console __initdata; static const struct font_desc *font; static u32 efi_x, efi_y; static u64 fb_base; -static pgprot_t fb_prot; +static bool fb_wb; +static void *efi_fb; + +/* + * EFI earlycon needs to use early_memremap() to map the framebuffer. + * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon', + * memremap() should be used instead. memremap() will be available after + * paging_init() which is earlier than initcall callbacks. Thus adding this + * early initcall function early_efi_map_fb() to map the whole EFI framebuffer. + */ +static int __init efi_earlycon_remap_fb(void) +{ + /* bail if there is no bootconsole or it has been disabled already */ + if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED)) + return 0; + + efi_fb = memremap(fb_base, screen_info.lfb_size, + fb_wb ? MEMREMAP_WB : MEMREMAP_WC); + + return efi_fb ? 0 : -ENOMEM; +} +early_initcall(efi_earlycon_remap_fb); + +static int __init efi_earlycon_unmap_fb(void) +{ + /* unmap the bootconsole fb unless keep_bootcon has left it enabled */ + if (efi_fb && !(earlycon_console->flags & CON_ENABLED)) + memunmap(efi_fb); + return 0; +} +late_initcall(efi_earlycon_unmap_fb); static __ref void *efi_earlycon_map(unsigned long start, unsigned long len) { + pgprot_t fb_prot; + + if (efi_fb) + return efi_fb + start; + + fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL); return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot)); } static __ref void efi_earlycon_unmap(void *addr, unsigned long len) { + if (efi_fb) + return; + early_memunmap(addr, len); } @@ -176,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) fb_base |= (u64)screen_info.ext_lfb_base << 32; - if (opt && !strcmp(opt, "ram")) - fb_prot = PAGE_KERNEL; - else - fb_prot = pgprot_writecombine(PAGE_KERNEL); + fb_wb = opt && !strcmp(opt, "ram"); si = &screen_info; xres = si->lfb_width; @@ -201,6 +238,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, efi_earlycon_scroll_up(); device->con->write = efi_earlycon_write; + earlycon_console = device->con; return 0; } EARLYCON_DECLARE(efifb, efi_earlycon_setup); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index e98bbf8e56d9..ad8a4bc074fb 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -544,7 +544,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, seed = early_memremap(efi.rng_seed, sizeof(*seed)); if (seed != NULL) { - size = seed->size; + size = READ_ONCE(seed->size); early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); @@ -554,7 +554,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, sizeof(*seed) + size); if (seed != NULL) { pr_notice("seeding entropy pool\n"); - add_bootloader_randomness(seed->bits, seed->size); + add_bootloader_randomness(seed->bits, size); early_memunmap(seed, sizeof(*seed) + size); } else { pr_err("Could not map UEFI random seed!\n"); @@ -970,6 +970,24 @@ static int __init efi_memreserve_map_root(void) return 0; } +static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) +{ + struct resource *res, *parent; + + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return -ENOMEM; + + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = addr; + res->end = addr + size - 1; + + /* we expect a conflict with a 'System RAM' region */ + parent = request_resource_conflict(&iomem_resource, res); + return parent ? request_resource(parent, res) : 0; +} + int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) { struct linux_efi_memreserve *rsv; @@ -994,7 +1012,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) rsv->entry[index].size = size; memunmap(rsv); - return 0; + return efi_mem_reserve_iomem(addr, size); } memunmap(rsv); } @@ -1004,6 +1022,12 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) if (!rsv) return -ENOMEM; + rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); + if (rc) { + free_page((unsigned long)rsv); + return rc; + } + /* * The memremap() call above assumes that a linux_efi_memreserve entry * never crosses a page boundary, so let's ensure that this remains true @@ -1020,7 +1044,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) efi_memreserve_root->next = __pa(rsv); spin_unlock(&efi_mem_reserve_persistent_lock); - return 0; + return efi_mem_reserve_iomem(addr, size); } static int __init efi_memreserve_root_init(void) diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 7576450c8254..aff3dfb4d7ba 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -83,13 +83,16 @@ static ssize_t efivar_attr_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) @@ -116,13 +119,16 @@ static ssize_t efivar_size_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; str += sprintf(str, "0x%lx\n", var->DataSize); @@ -133,12 +139,15 @@ static ssize_t efivar_data_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; memcpy(buf, var->Data, var->DataSize); @@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) u8 *data; int err; + if (!entry || !buf) + return -EINVAL; + if (in_compat_syscall()) { struct compat_efi_variable *compat; @@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; struct compat_efi_variable *compat; + unsigned long datasize = sizeof(var->Data); size_t size; + int ret; if (!entry || !buf) return 0; - var->DataSize = 1024; - if (efivar_entry_get(entry, &entry->var.Attributes, - &entry->var.DataSize, entry->var.Data)) + ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data); + var->DataSize = datasize; + if (ret) return -EIO; if (in_compat_syscall()) { diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 0101ca4c13b1..b7bf1e993b8b 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -83,30 +83,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } } -static efi_status_t -__gop_query32(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_32 *gop32, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_32 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop32->mode; - mode = (struct efi_graphics_output_protocol_mode_32 *)m; - query_mode = (void *)(unsigned long)gop32->query_mode; - - status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - - *fb_base = mode->frame_buffer_base; - return status; -} - static efi_status_t setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, efi_guid_t *proto, unsigned long size, void **gop_handle) @@ -119,7 +95,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u32 *handles = (u32 *)(unsigned long)gop_handle; int i; @@ -128,6 +104,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u32); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_32 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -145,9 +122,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query32(sys_table_arg, gop32, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop32->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -175,7 +154,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -197,32 +176,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; -} -static efi_status_t -__gop_query64(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_64 *gop64, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_64 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop64->mode; - mode = (struct efi_graphics_output_protocol_mode_64 *)m; - query_mode = (void *)(unsigned long)gop64->query_mode; - - status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - - *fb_base = mode->frame_buffer_base; - return status; + return EFI_SUCCESS; } static efi_status_t @@ -237,7 +192,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u64 *handles = (u64 *)(unsigned long)gop_handle; int i; @@ -246,6 +201,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u64); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_64 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -263,9 +219,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query64(sys_table_arg, gop64, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop64->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -293,7 +251,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -315,8 +273,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; + + return EFI_SUCCESS; } /* diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c index 76b0c354a027..de1a9a1f9f14 100644 --- a/drivers/firmware/efi/rci2-table.c +++ b/drivers/firmware/efi/rci2-table.c @@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void) struct kobject *tables_kobj; int ret = -ENOMEM; + if (rci2_table_phys == EFI_INVALID_TABLE_ADDR) + return 0; + rci2_base = memremap(rci2_table_phys, sizeof(struct rci2_table_global_hdr), MEMREMAP_WB); diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c index a43d2db5cbdb..4265e9dbed84 100644 --- a/drivers/firmware/imx/imx-dsp.c +++ b/drivers/firmware/imx/imx-dsp.c @@ -114,7 +114,7 @@ static int imx_dsp_probe(struct platform_device *pdev) dev_info(dev, "NXP i.MX DSP IPC initialized\n"); - return devm_of_platform_populate(dev); + return 0; out: kfree(chan_name); for (j = 0; j < i; j++) { diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 04a24a863d6e..35a5f8f8eea5 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -29,6 +29,7 @@ struct imx_sc_chan { struct mbox_client cl; struct mbox_chan *ch; int idx; + struct completion tx_done; }; struct imx_sc_ipc { @@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc) } EXPORT_SYMBOL(imx_scu_get_handle); +/* Callback called when the word of a message is ack-ed, eg read by SCU */ +static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r) +{ + struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl); + + complete(&sc_chan->tx_done); +} + static void imx_scu_rx_callback(struct mbox_client *c, void *msg) { struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl); @@ -143,6 +152,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) for (i = 0; i < hdr->size; i++) { sc_chan = &sc_ipc->chans[i % 4]; + + /* + * SCU requires that all messages words are written + * sequentially but linux MU driver implements multiple + * independent channels for each register so ordering between + * different channels must be ensured by SCU API interface. + * + * Wait for tx_done before every send to ensure that no + * queueing happens at the mailbox channel level. + */ + wait_for_completion(&sc_chan->tx_done); + reinit_completion(&sc_chan->tx_done); + ret = mbox_send_message(sc_chan->ch, &data[i]); if (ret < 0) return ret; @@ -225,6 +247,11 @@ static int imx_scu_probe(struct platform_device *pdev) cl->knows_txdone = true; cl->rx_callback = imx_scu_rx_callback; + /* Initial tx_done completion as "done" */ + cl->tx_done = imx_scu_tx_done; + init_completion(&sc_chan->tx_done); + complete(&sc_chan->tx_done); + sc_chan->sc_ipc = sc_ipc; sc_chan->idx = i % 4; sc_chan->ch = mbox_request_channel_byname(cl, chan_name); diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c index 4b56a587dacd..d073cb3ce699 100644 --- a/drivers/firmware/imx/misc.c +++ b/drivers/firmware/imx/misc.c @@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl { u32 ctrl; u32 val; u16 resource; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_cpu_start { struct imx_sc_rpc_msg hdr; @@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start { u32 address_lo; u16 resource; u8 enable; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_misc_get_ctrl { struct imx_sc_rpc_msg hdr; u32 ctrl; u16 resource; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_resp_misc_get_ctrl { struct imx_sc_rpc_msg hdr; u32 val; -} __packed; +} __packed __aligned(4); /* * This function sets a miscellaneous control value. diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index b556612207e5..af3ae0087de4 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode { struct imx_sc_rpc_msg hdr; u16 resource; u8 mode; -} __packed; +} __packed __aligned(4); #define IMX_SCU_PD_NAME_SIZE 20 struct imx_sc_pm_domain { diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 91d5ad7cf58b..25e0f60c759a 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -150,7 +150,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, kfree(args_virt); } - if (res->a0 < 0) + if ((long)res->a0 < 0) return qcom_scm_remap_error(res->a0); return 0; diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c index 1f76740f33b6..9282239b4d95 100644 --- a/drivers/fsi/fsi-core.c +++ b/drivers/fsi/fsi-core.c @@ -544,6 +544,31 @@ static int fsi_slave_scan(struct fsi_slave *slave) return 0; } +static unsigned long aligned_access_size(size_t offset, size_t count) +{ + unsigned long offset_unit, count_unit; + + /* Criteria: + * + * 1. Access size must be less than or equal to the maximum access + * width or the highest power-of-two factor of offset + * 2. Access size must be less than or equal to the amount specified by + * count + * + * The access width is optimal if we can calculate 1 to be strictly + * equal while still satisfying 2. + */ + + /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */ + offset_unit = BIT(__builtin_ctzl(offset | 4)); + + /* Find 2 by the top bit of count */ + count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count)); + + /* Constrain the maximum access width to the minimum of both criteria */ + return BIT(__builtin_ctzl(offset_unit | count_unit)); +} + static ssize_t fsi_slave_sysfs_raw_read(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) @@ -559,8 +584,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file, return -EINVAL; for (total_len = 0; total_len < count; total_len += read_len) { - read_len = min_t(size_t, count, 4); - read_len -= off & 0x3; + read_len = aligned_access_size(off, count - total_len); rc = fsi_slave_read(slave, off, buf + total_len, read_len); if (rc) @@ -587,8 +611,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file, return -EINVAL; for (total_len = 0; total_len < count; total_len += write_len) { - write_len = min_t(size_t, count, 4); - write_len -= off & 0x3; + write_len = aligned_access_size(off, count - total_len); rc = fsi_slave_write(slave, off, buf + total_len, write_len); if (rc) diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 38e096e6925f..f9263426af03 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -546,7 +546,6 @@ config GPIO_THUNDERX tristate "Cavium ThunderX/OCTEON-TX GPIO" depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) depends on PCI_MSI - select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS help @@ -1121,6 +1120,7 @@ config GPIO_MADERA config GPIO_MAX77620 tristate "GPIO support for PMIC MAX77620 and MAX20024" depends on MFD_MAX77620 + select GPIOLIB_IRQCHIP help GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor. MAX77620 PMIC has 8 pins that can be configured as GPIOs. The diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 08234e64993a..3224933f4c8f 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -253,17 +253,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, lirq->irq = irq; uirq = &priv->uirqs[lirq->index]; if (uirq->refcnt == 0) { + spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); ret = request_irq(uirq->uirq, grgpio_irq_handler, 0, dev_name(priv->dev), priv); if (ret) { dev_err(priv->dev, "Could not request underlying irq %d\n", uirq->uirq); - - spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - return ret; } + spin_lock_irqsave(&priv->gc.bgpio_lock, flags); } uirq->refcnt++; @@ -309,8 +308,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq) if (index >= 0) { uirq = &priv->uirqs[lirq->index]; uirq->refcnt--; - if (uirq->refcnt == 0) + if (uirq->refcnt == 0) { + spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); free_irq(uirq->uirq, priv); + return; + } } spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index e9e47c0d5be7..490ce7bae25e 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -164,6 +164,12 @@ static int lp_irq_type(struct irq_data *d, unsigned type) value |= TRIG_SEL_BIT | INT_INV_BIT; outl(value, reg); + + if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + spin_unlock_irqrestore(&lg->lock, flags); return 0; diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 16a47de29c94..d72a3a5507b0 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) return -ENOMEM; gc = &mpc8xxx_gc->gc; + gc->parent = &pdev->dev; if (of_property_read_bool(np, "little-endian")) { ret = bgpio_init(gc, &pdev->dev, 4, @@ -377,7 +378,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) * It's assumed that only a single type of gpio controller is available * on the current machine, so overwriting global data is fine. */ - mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type; + if (devtype->irq_set_type) + mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type; if (devtype->gpio_dir_out) gc->direction_output = devtype->gpio_dir_out; @@ -386,6 +388,9 @@ static int mpc8xxx_probe(struct platform_device *pdev) gc->to_irq = mpc8xxx_gpio_to_irq; + if (of_device_is_compatible(np, "fsl,qoriq-gpio")) + gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff); + ret = gpiochip_add_data(gc, mpc8xxx_gc); if (ret) { pr_err("%pOF: GPIO chip registration failed with status %d\n", diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 7907a8755866..c77d474185f3 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -411,6 +411,7 @@ static int mxc_gpio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mxc_gpio_port *port; + int irq_count; int irq_base; int err; @@ -426,9 +427,15 @@ static int mxc_gpio_probe(struct platform_device *pdev) if (IS_ERR(port->base)) return PTR_ERR(port->base); - port->irq_high = platform_get_irq(pdev, 1); - if (port->irq_high < 0) - port->irq_high = 0; + irq_count = platform_irq_count(pdev); + if (irq_count < 0) + return irq_count; + + if (irq_count > 1) { + port->irq_high = platform_get_irq(pdev, 1); + if (port->irq_high < 0) + port->irq_high = 0; + } port->irq = platform_get_irq(pdev, 0); if (port->irq < 0) diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index ddad5c7ea617..715371b5102a 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -53,6 +53,7 @@ struct thunderx_line { struct thunderx_gpio { struct gpio_chip chip; u8 __iomem *register_base; + struct irq_domain *irqd; struct msix_entry *msix_entries; /* per line MSI-X */ struct thunderx_line *line_entries; /* per line irq info */ raw_spinlock_t lock; @@ -282,60 +283,54 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip, } } -static void thunderx_gpio_irq_ack(struct irq_data *d) +static void thunderx_gpio_irq_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask(struct irq_data *d) +static void thunderx_gpio_irq_mask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask_ack(struct irq_data *d) +static void thunderx_gpio_irq_mask_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_unmask(struct irq_data *d) +static void thunderx_gpio_irq_unmask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1S, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static int thunderx_gpio_irq_set_type(struct irq_data *d, +static int thunderx_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - struct thunderx_line *txline = - &txgpio->line_entries[irqd_to_hwirq(d)]; + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; u64 bit_cfg; - irqd_set_trigger_type(d, flow_type); + irqd_set_trigger_type(data, flow_type); bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN; if (flow_type & IRQ_TYPE_EDGE_BOTH) { - irq_set_handler_locked(d, handle_fasteoi_ack_irq); + irq_set_handler_locked(data, handle_fasteoi_ack_irq); bit_cfg |= GPIO_BIT_CFG_INT_TYPE; } else { - irq_set_handler_locked(d, handle_fasteoi_mask_irq); + irq_set_handler_locked(data, handle_fasteoi_mask_irq); } raw_spin_lock(&txgpio->lock); @@ -364,6 +359,33 @@ static void thunderx_gpio_irq_disable(struct irq_data *data) irq_chip_disable_parent(data); } +static int thunderx_gpio_irq_request_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + int r; + + r = gpiochip_lock_as_irq(&txgpio->chip, txline->line); + if (r) + return r; + + r = irq_chip_request_resources_parent(data); + if (r) + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); + + return r; +} + +static void thunderx_gpio_irq_release_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + + irq_chip_release_resources_parent(data); + + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); +} + /* * Interrupts are chained from underlying MSI-X vectors. We have * these irq_chip functions to be able to handle level triggering @@ -380,24 +402,50 @@ static struct irq_chip thunderx_gpio_irq_chip = { .irq_unmask = thunderx_gpio_irq_unmask, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_request_resources = thunderx_gpio_irq_request_resources, + .irq_release_resources = thunderx_gpio_irq_release_resources, .irq_set_type = thunderx_gpio_irq_set_type, .flags = IRQCHIP_SET_TYPE_MASKED }; -static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, - unsigned int child, - unsigned int child_type, - unsigned int *parent, - unsigned int *parent_type) +static int thunderx_gpio_irq_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) { - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_gpio *txgpio = d->host_data; - *parent = txgpio->base_msi + (2 * child); - *parent_type = IRQ_TYPE_LEVEL_HIGH; + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (fwspec->param[0] >= txgpio->chip.ngpio) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } +static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct thunderx_line *txline = arg; + + return irq_domain_set_hwirq_and_chip(d, virq, txline->line, + &thunderx_gpio_irq_chip, txline); +} + +static const struct irq_domain_ops thunderx_gpio_irqd_ops = { + .alloc = thunderx_gpio_irq_alloc, + .translate = thunderx_gpio_irq_translate +}; + +static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) +{ + struct thunderx_gpio *txgpio = gpiochip_get_data(chip); + + return irq_find_mapping(txgpio->irqd, offset); +} + static int thunderx_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -405,7 +453,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, struct device *dev = &pdev->dev; struct thunderx_gpio *txgpio; struct gpio_chip *chip; - struct gpio_irq_chip *girq; int ngpio, i; int err = 0; @@ -450,8 +497,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, } txgpio->msix_entries = devm_kcalloc(dev, - ngpio, sizeof(struct msix_entry), - GFP_KERNEL); + ngpio, sizeof(struct msix_entry), + GFP_KERNEL); if (!txgpio->msix_entries) { err = -ENOMEM; goto out; @@ -492,6 +539,27 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, if (err < 0) goto out; + /* + * Push GPIO specific irqdomain on hierarchy created as a side + * effect of the pci_enable_msix() + */ + txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain, + 0, 0, of_node_to_fwnode(dev->of_node), + &thunderx_gpio_irqd_ops, txgpio); + if (!txgpio->irqd) { + err = -ENOMEM; + goto out; + } + + /* Push on irq_data and the domain for each line. */ + for (i = 0; i < ngpio; i++) { + err = irq_domain_push_irq(txgpio->irqd, + txgpio->msix_entries[i].vector, + &txgpio->line_entries[i]); + if (err < 0) + dev_err(dev, "irq_domain_push_irq: %d\n", err); + } + chip->label = KBUILD_MODNAME; chip->parent = dev; chip->owner = THIS_MODULE; @@ -506,28 +574,11 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, chip->set = thunderx_gpio_set; chip->set_multiple = thunderx_gpio_set_multiple; chip->set_config = thunderx_gpio_set_config; - girq = &chip->irq; - girq->chip = &thunderx_gpio_irq_chip; - girq->fwnode = of_node_to_fwnode(dev->of_node); - girq->parent_domain = - irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; - girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; - girq->handler = handle_bad_irq; - girq->default_type = IRQ_TYPE_NONE; - + chip->to_irq = thunderx_gpio_to_irq; err = devm_gpiochip_add_data(dev, chip, txgpio); if (err) goto out; - /* Push on irq_data and the domain for each line. */ - for (i = 0; i < ngpio; i++) { - err = irq_domain_push_irq(chip->irq.domain, - txgpio->msix_entries[i].vector, - chip); - if (err < 0) - dev_err(dev, "irq_domain_push_irq: %d\n", err); - } - dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n", ngpio, chip->base); return 0; @@ -542,10 +593,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev) struct thunderx_gpio *txgpio = pci_get_drvdata(pdev); for (i = 0; i < txgpio->chip.ngpio; i++) - irq_domain_pop_irq(txgpio->chip.irq.domain, + irq_domain_pop_irq(txgpio->irqd, txgpio->msix_entries[i].vector); - irq_domain_remove(txgpio->chip.irq.domain); + irq_domain_remove(txgpio->irqd); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index a9748b5198e6..67f9f82e0db0 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, for (i = 0; i < gc->ngpio; i++) { if (*mask == 0) break; + /* Once finished with an index write it out to the register */ if (index != xgpio_index(chip, i)) { xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + - xgpio_regoffset(chip, i), + index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]); spin_unlock_irqrestore(&chip->gpio_lock[index], flags); index = xgpio_index(chip, i); @@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, } xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + - xgpio_regoffset(chip, i), chip->gpio_state[index]); + index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]); spin_unlock_irqrestore(&chip->gpio_lock[index], flags); } diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index 43d3fa5f511a..0fb2211f9573 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c @@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable) unsigned long flags; local_irq_save(flags); - RSR_CPENABLE(*cpenable); - WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP)); - + *cpenable = xtensa_get_sr(cpenable); + xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable); return flags; } static inline void disable_cp(unsigned long flags, unsigned long cpenable) { - WSR_CPENABLE(cpenable); + xtensa_set_sr(cpenable, cpenable); local_irq_restore(flags); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index cd475ff4bcad..7835aad6d162 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -681,6 +681,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) unsigned int bank_num; for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { + writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); writel_relaxed(gpio->context.datalsw[bank_num], gpio->base_addr + ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); @@ -690,9 +692,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.dirm[bank_num], gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); - writel_relaxed(gpio->context.int_en[bank_num], - gpio->base_addr + - ZYNQ_GPIO_INTEN_OFFSET(bank_num)); writel_relaxed(gpio->context.int_type[bank_num], gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); @@ -702,6 +701,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.int_any[bank_num], gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); + writel_relaxed(~(gpio->context.int_en[bank_num]), + gpio->base_addr + + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); } } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 59ccfd24627d..b2e186047014 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -26,6 +26,17 @@ module_param(run_edge_events_on_boot, int, 0444); MODULE_PARM_DESC(run_edge_events_on_boot, "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); +static char *ignore_wake; +module_param(ignore_wake, charp, 0444); +MODULE_PARM_DESC(ignore_wake, + "controller@pin combos on which to ignore the ACPI wake flag " + "ignore_wake=controller@pin[,controller@pin[,...]]"); + +struct acpi_gpiolib_dmi_quirk { + bool no_edge_events_on_boot; + char *ignore_wake; +}; + /** * struct acpi_gpio_event - ACPI GPIO event handler data * @@ -194,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) acpi_gpiochip_request_irq(acpi_gpio, event); } +static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in) +{ + const char *controller, *pin_str; + int len, pin; + char *endp; + + controller = ignore_wake; + while (controller) { + pin_str = strchr(controller, '@'); + if (!pin_str) + goto err; + + len = pin_str - controller; + if (len == strlen(controller_in) && + strncmp(controller, controller_in, len) == 0) { + pin = simple_strtoul(pin_str + 1, &endp, 10); + if (*endp != 0 && *endp != ',') + goto err; + + if (pin == pin_in) + return true; + } + + controller = strchr(controller, ','); + if (controller) + controller++; + } + + return false; +err: + pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n", + ignore_wake); + return false; +} + +static bool acpi_gpio_irq_is_wake(struct device *parent, + struct acpi_resource_gpio *agpio) +{ + int pin = agpio->pin_table[0]; + + if (agpio->wake_capable != ACPI_WAKE_CAPABLE) + return false; + + if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) { + dev_info(parent, "Ignoring wakeup on pin %d\n", pin); + return false; + } + + return true; +} + static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, void *context) { @@ -274,7 +336,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, event->handle = evt_handle; event->handler = handler; event->irq = irq; - event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE; + event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio); event->pin = pin; event->desc = desc; @@ -1302,7 +1364,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void) /* We must use _sync so that this runs after the first deferred_probe run */ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); -static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { +static const struct dmi_system_id gpiolib_acpi_quirks[] = { { /* * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for @@ -1312,7 +1374,10 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), - } + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, }, { /* @@ -1324,20 +1389,84 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), - } + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, + }, + { + /* + * HP X2 10 models with Cherry Trail SoC + TI PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + * When suspending by closing the LID, the power to the USB + * keyboard is turned off, causing INT0002 ACPI events to + * trigger once the XHCI controller notices the keyboard is + * gone. So INT0002 events cause spurious wakeups too. Ignoring + * EC wakes breaks wakeup when opening the lid, the user needs + * to press the power-button to wakeup the system. The + * alternative is suspend simply not working, which is worse. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0,INT0002:00@2", + }, + }, + { + /* + * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FC:02 pin 28, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "815D"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FC:02@28", + }, + }, + { + /* + * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "813E"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0", + }, }, {} /* Terminating entry */ }; static int acpi_gpio_setup_params(void) { + const struct acpi_gpiolib_dmi_quirk *quirk = NULL; + const struct dmi_system_id *id; + + id = dmi_first_match(gpiolib_acpi_quirks); + if (id) + quirk = id->driver_data; + if (run_edge_events_on_boot < 0) { - if (dmi_check_system(run_edge_events_on_boot_blacklist)) + if (quirk && quirk->no_edge_events_on_boot) run_edge_events_on_boot = 0; else run_edge_events_on_boot = 1; } + if (ignore_wake == NULL && quirk && quirk->ignore_wake) + ignore_wake = quirk->ignore_wake; + return 0; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 80ea49f570f4..3ece59185d37 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -23,6 +23,29 @@ #include "gpiolib.h" #include "gpiolib-of.h" +/** + * of_gpio_spi_cs_get_count() - special GPIO counting for SPI + * Some elder GPIO controllers need special quirks. Currently we handle + * the Freescale GPIO controller with bindings that doesn't use the + * established "cs-gpios" for chip selects but instead rely on + * "gpios" for the chip select lines. If we detect this, we redirect + * the counting of "cs-gpios" to count "gpios" transparent to the + * driver. + */ +int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id) +{ + struct device_node *np = dev->of_node; + + if (!IS_ENABLED(CONFIG_SPI_MASTER)) + return 0; + if (!con_id || strcmp(con_id, "cs")) + return 0; + if (!of_device_is_compatible(np, "fsl,spi") && + !of_device_is_compatible(np, "aeroflexgaisler,spictrl")) + return 0; + return of_gpio_named_count(np, "gpios"); +} + /* * This is used by external users of of_gpio_count() from * @@ -35,6 +58,10 @@ int of_gpio_get_count(struct device *dev, const char *con_id) char propname[32]; unsigned int i; + ret = of_gpio_spi_cs_get_count(dev, con_id); + if (ret > 0) + return ret; + for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { if (con_id) snprintf(propname, sizeof(propname), "%s-%s", @@ -119,10 +146,6 @@ static void of_gpio_flags_quirks(struct device_node *np, if (of_property_read_bool(np, "cd-inverted")) *flags ^= OF_GPIO_ACTIVE_LOW; } - if (!strcmp(propname, "wp-gpios")) { - if (of_property_read_bool(np, "wp-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } } /* * Some GPIO fixed regulator quirks. @@ -882,16 +905,13 @@ int of_gpiochip_add(struct gpio_chip *chip) of_node_get(chip->of_node); ret = of_gpiochip_scan_gpios(chip); - if (ret) { + if (ret) of_node_put(chip->of_node); - gpiochip_remove_pin_ranges(chip); - } return ret; } void of_gpiochip_remove(struct gpio_chip *chip) { - gpiochip_remove_pin_ranges(chip); of_node_put(chip->of_node); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 104ed299d5ea..a8cf55eb54d8 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc) chip = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); + /* + * Open drain emulation using input mode may incorrectly report + * input here, fix that up. + */ + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && + test_bit(FLAG_IS_OUT, &desc->flags)) + return 0; + if (!chip->get_direction) return -ENOTSUPP; @@ -1444,6 +1452,7 @@ err_remove_of_chip: gpiochip_free_hogs(chip); of_gpiochip_remove(chip); err_free_gpiochip_mask: + gpiochip_remove_pin_ranges(chip); gpiochip_free_valid_mask(chip); err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); @@ -1499,8 +1508,8 @@ void gpiochip_remove(struct gpio_chip *chip) gdev->chip = NULL; gpiochip_irqchip_remove(chip); acpi_gpiochip_remove(chip); - gpiochip_remove_pin_ranges(chip); of_gpiochip_remove(chip); + gpiochip_remove_pin_ranges(chip); gpiochip_free_valid_mask(chip); /* * We accept no more calls into the driver from this point, so @@ -1915,6 +1924,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, parent_type); chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n", irq, parent_hwirq); + irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key); ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec); if (ret) chip_err(gc, @@ -2184,9 +2194,16 @@ static void gpiochip_irq_disable(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + /* + * Since we override .irq_disable() we need to mimic the + * behaviour of __irq_disable() in irq/chip.c. + * First call .irq_disable() if it exists, else mimic the + * behaviour of mask_irq() which calls .irq_mask() if + * it exists. + */ if (chip->irq.irq_disable) chip->irq.irq_disable(d); - else + else if (chip->irq.chip->irq_mask) chip->irq.chip->irq_mask(d); gpiochip_disable_irq(chip, d->hwirq); } @@ -3211,6 +3228,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc) } EXPORT_SYMBOL_GPL(gpiod_is_active_low); +/** + * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not + * @desc: the gpio descriptor to change + */ +void gpiod_toggle_active_low(struct gpio_desc *desc) +{ + VALIDATE_DESC_VOID(desc); + change_bit(FLAG_ACTIVE_LOW, &desc->flags); +} +EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); + /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * @@ -4320,8 +4348,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, if (chip->ngpio <= p->chip_hwnum) { dev_err(dev, - "requested GPIO %d is out of range [0..%d] for chip %s\n", - idx, chip->ngpio, chip->label); + "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", + idx, p->chip_hwnum, chip->ngpio - 1, + chip->label); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/sgpio-aspeed.c index 7e99860ca447..8319812593e3 100644 --- a/drivers/gpio/sgpio-aspeed.c +++ b/drivers/gpio/sgpio-aspeed.c @@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio, return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS; default: /* acturally if code runs to here, it's an error case */ - BUG_ON(1); + BUG(); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bd37df5dd6d0..d1e278e999ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -813,6 +813,7 @@ struct amdgpu_device { uint8_t *bios; uint32_t bios_size; struct amdgpu_bo *stolen_vga_memory; + struct amdgpu_bo *discovery_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1c9d40f97a9b..c687432da426 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { - uint8_t con_obj_id, con_obj_num, con_obj_type; - - con_obj_id = + uint8_t con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - con_obj_num = - (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) - >> ENUM_ID_SHIFT; - con_obj_type = - (le16_to_cpu(path->usConnObjectId) & - OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; /* Skip TV/CV support */ if ((le16_to_cpu(path->usDeviceTag) == @@ -373,15 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { - uint8_t grph_obj_id, grph_obj_num, grph_obj_type; - - grph_obj_id = - (le16_to_cpu(path->usGraphicObjIds[j]) & - OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - grph_obj_num = - (le16_to_cpu(path->usGraphicObjIds[j]) & - ENUM_ID_MASK) >> ENUM_ID_SHIFT; - grph_obj_type = + uint8_t grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 5652cc72ed3a..1e25ca34d876 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -694,11 +694,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, ssize_t result = 0; uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; - if (size & 3 || *pos & 3) + if (size > 4096 || size & 3 || *pos & 3) return -EINVAL; /* decode offset */ - offset = *pos & GENMASK_ULL(11, 0); + offset = (*pos & GENMASK_ULL(11, 0)) >> 2; se = (*pos & GENMASK_ULL(19, 12)) >> 12; sh = (*pos & GENMASK_ULL(27, 20)) >> 20; cu = (*pos & GENMASK_ULL(35, 28)) >> 28; @@ -729,7 +729,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = data[offset++]; + value = data[result >> 2]; r = put_user(value, (uint32_t *)buf); if (r) { result = r; @@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; int r = 0, i; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* hold on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) kthread_unpark(ring->sched.thread); } + mutex_unlock(&adev->lock_reset); + return 0; } @@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) if (!fences) return -ENOMEM; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* stop the scheduler */ kthread_park(ring->sched.thread); @@ -1075,6 +1083,8 @@ failure: /* restart the scheduler */ kthread_unpark(ring->sched.thread); + mutex_unlock(&adev->lock_reset); + ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); if (fences) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7a6c837c0a85..13694d5eba47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3466,8 +3466,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - amdgpu_amdkfd_pre_reset(adev); - /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 1481899f86c1..71198c5318e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -136,7 +136,7 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin { uint32_t *p = (uint32_t *)binary; uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; - uint64_t pos = vram_size - BINARY_MAX_SIZE; + uint64_t pos = vram_size - DISCOVERY_TMR_SIZE; unsigned long flags; while (pos < vram_size) { @@ -179,7 +179,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) uint16_t checksum; int r; - adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL); + adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); if (!adev->discovery) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 85b8c4d4d576..5a6693d7d269 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,6 +24,8 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ +#define DISCOVERY_TMR_SIZE (64 << 10) + int amdgpu_discovery_init(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b19157b19fa0..05d114a72ca1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -145,7 +145,7 @@ int amdgpu_async_gfx_ring = 1; int amdgpu_mcbp = 0; int amdgpu_discovery = -1; int amdgpu_mes = 0; -int amdgpu_noretry = 1; +int amdgpu_noretry; struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), @@ -613,7 +613,7 @@ MODULE_PARM_DESC(mes, module_param_named(mes, amdgpu_mes, int, 0444); MODULE_PARM_DESC(noretry, - "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); + "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)"); module_param_named(noretry, amdgpu_noretry, int, 0644); #ifdef CONFIG_HSA_AMD @@ -1023,6 +1023,7 @@ static const struct pci_device_id pciidlist[] = { /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, {0, 0, 0} }; @@ -1420,7 +1421,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, static struct drm_driver kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_ATOMIC | + DRIVER_ATOMIC | DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, .load = amdgpu_driver_load_kms, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index b6e1d98ef01e..8c0ac66d31d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -77,6 +77,7 @@ struct amdgpu_gmc_fault { struct amdgpu_vmhub { uint32_t ctx0_ptb_addr_lo32; uint32_t ctx0_ptb_addr_hi32; + uint32_t vm_inv_eng0_sem; uint32_t vm_inv_eng0_req; uint32_t vm_inv_eng0_ack; uint32_t vm_context0_cntl; @@ -156,6 +157,7 @@ struct amdgpu_gmc { uint32_t srbm_soft_reset; bool prt_warning; uint64_t stolen_size; + uint32_t sdpif_register; /* apertures */ u64 shared_aperture_start; u64 shared_aperture_end; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 7289e1b4fb60..28361a9c5add 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -342,6 +342,67 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, return 0; } +/** + * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location + * + * @adev: amdgpu device object + * @offset: offset of the BO + * @size: size of the BO + * @domain: where to place it + * @bo_ptr: used to initialize BOs in structures + * @cpu_addr: optional CPU address mapping + * + * Creates a kernel BO at a specific offset in the address space of the domain. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + unsigned int i; + int r; + + offset &= PAGE_MASK; + size = ALIGN(size, PAGE_SIZE); + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, + NULL, NULL); + if (r) + return r; + + /* + * Remove the original mem node and create a new one at the request + * position. + */ + for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { + (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; + (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; + } + + ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, + &(*bo_ptr)->tbo.mem, &ctx); + if (r) + goto error; + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) + goto error; + } + + amdgpu_bo_unreserve(*bo_ptr); + return 0; + +error: + amdgpu_bo_unreserve(*bo_ptr); + amdgpu_bo_unref(bo_ptr); + return r; +} + /** * amdgpu_bo_free_kernel - free BO for kernel use * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 658f4c9779b7..4fcea23ee516 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -237,6 +237,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, u64 *gpu_addr, void **cpu_addr); +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr); void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 016ea274b955..9c5cbc47edf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -65,12 +65,6 @@ const char *ras_block_string[] = { /* inject address is 52 bits */ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr); -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr); - static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1214,75 +1208,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) atomic_set(&ras->in_recovery, 0); } -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr) -{ - /* no need to free it actually. */ - amdgpu_bo_free_kernel(bo_ptr, NULL, NULL); - return 0; -} - -/* reserve vram with size@offset */ -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - struct amdgpu_bo *bo; - - if (bo_ptr) - *bo_ptr = NULL; - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; - - r = amdgpu_bo_create(adev, &bp, &bo); - if (r) - return -EINVAL; - - r = amdgpu_bo_reserve(bo, false); - if (r) - goto error_reserve; - - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } - - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx); - if (r) - goto error_pin; - - r = amdgpu_bo_pin_restricted(bo, - AMDGPU_GEM_DOMAIN_VRAM, - offset, - offset + size); - if (r) - goto error_pin; - - if (bo_ptr) - *bo_ptr = bo; - - amdgpu_bo_unreserve(bo); - return r; - -error_pin: - amdgpu_bo_unreserve(bo); -error_reserve: - amdgpu_bo_unref(&bo); - return r; -} - /* alloc/realloc bps array */ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, struct ras_err_handler_data *data, int pages) @@ -1345,7 +1270,7 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; uint64_t bp; - struct amdgpu_bo *bo; + struct amdgpu_bo *bo = NULL; int i; if (!con || !con->eh_data) @@ -1359,12 +1284,14 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) for (i = data->last_reserved; i < data->count; i++) { bp = data->bps[i].bp; - if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT, - PAGE_SIZE, &bo)) + if (amdgpu_bo_create_kernel_at(adev, bp << PAGE_SHIFT, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL)) DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp); data->bps[i].bo = bo; data->last_reserved = i + 1; + bo = NULL; } out: mutex_unlock(&con->recovery_lock); @@ -1390,7 +1317,7 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) for (i = data->last_reserved - 1; i >= 0; i--) { bo = data->bps[i].bo; - amdgpu_ras_release_vram(adev, &bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); data->bps[i].bo = bo; data->last_reserved = i; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b66d29d5ffa2..b158230af8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 77674a7b9616..91899d28fa72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __field(unsigned int, context) __field(unsigned int, seqno) __field(struct dma_fence *, fence) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); TRACE_EVENT(amdgpu_sched_run_job, @@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __field(unsigned int, context) __field(unsigned int, seqno) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -468,7 +468,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry( - __field(const char *,name) + __string(ring, sched_job->base.sched->name); __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx) @@ -476,14 +476,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, ), TP_fast_assign( - __entry->name = sched_job->base.sched->name; + __assign_str(ring, sched_job->base.sched->name) __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", - __entry->name, __entry->id, + __get_str(ring), __entry->id, __entry->fence, __entry->ctx, __entry->seqno) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dff41d0a85fe..f15ded1ce905 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -788,7 +789,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL; struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm; - struct mm_struct *mm = gtt->usertask->mm; + struct mm_struct *mm; unsigned long start = gtt->userptr; struct vm_area_struct *vma; struct hmm_range *range; @@ -796,25 +797,14 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) uint64_t *pfns; int r = 0; - if (!mm) /* Happens during process shutdown */ - return -ESRCH; - if (unlikely(!mirror)) { DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n"); - r = -EFAULT; - goto out; + return -EFAULT; } - vma = find_vma(mm, start); - if (unlikely(!vma || start < vma->vm_start)) { - r = -EFAULT; - goto out; - } - if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && - vma->vm_file)) { - r = -EPERM; - goto out; - } + mm = mirror->hmm->mmu_notifier.mm; + if (!mmget_not_zero(mm)) /* Happens during process shutdown */ + return -ESRCH; range = kzalloc(sizeof(*range), GFP_KERNEL); if (unlikely(!range)) { @@ -847,6 +837,17 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT); down_read(&mm->mmap_sem); + vma = find_vma(mm, start); + if (unlikely(!vma || start < vma->vm_start)) { + r = -EFAULT; + goto out_unlock; + } + if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && + vma->vm_file)) { + r = -EPERM; + goto out_unlock; + } + r = hmm_range_fault(range, 0); up_read(&mm->mmap_sem); @@ -865,15 +866,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) } gtt->range = range; + mmput(mm); return 0; +out_unlock: + up_read(&mm->mmap_sem); out_free_pfns: hmm_range_unregister(range); kvfree(pfns); out_free_ranges: kfree(range); out: + mmput(mm); return r; } @@ -1634,81 +1639,25 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) */ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - u64 vram_size = adev->gmc.visible_vram_size; - u64 offset = adev->fw_vram_usage.start_offset; - u64 size = adev->fw_vram_usage.size; - struct amdgpu_bo *bo; + uint64_t vram_size = adev->gmc.visible_vram_size; + int r; - memset(&bp, 0, sizeof(bp)); - bp.size = adev->fw_vram_usage.size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; adev->fw_vram_usage.va = NULL; adev->fw_vram_usage.reserved_bo = NULL; - if (adev->fw_vram_usage.size > 0 && - adev->fw_vram_usage.size <= vram_size) { + if (adev->fw_vram_usage.size == 0 || + adev->fw_vram_usage.size > vram_size) + return 0; - r = amdgpu_bo_create(adev, &bp, - &adev->fw_vram_usage.reserved_bo); - if (r) - goto error_create; - - r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); - if (r) - goto error_reserve; - - /* remove the original mem node and create a new one at the - * request position - */ - bo = adev->fw_vram_usage.reserved_bo; - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } - - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, - &bo->tbo.mem, &ctx); - if (r) - goto error_pin; - - r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, - AMDGPU_GEM_DOMAIN_VRAM, - adev->fw_vram_usage.start_offset, - (adev->fw_vram_usage.start_offset + - adev->fw_vram_usage.size)); - if (r) - goto error_pin; - r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); - if (r) - goto error_kmap; - - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); - } - return r; - -error_kmap: - amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); -error_pin: - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); -error_reserve: - amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); -error_create: - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; + return amdgpu_bo_create_kernel_at(adev, + adev->fw_vram_usage.start_offset, + adev->fw_vram_usage.size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->fw_vram_usage.reserved_bo, + &adev->fw_vram_usage.va); return r; } + /** * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. @@ -1781,6 +1730,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) NULL, &stolen_vga_buf); if (r) return r; + + /* + * reserve one TMR (64K) memory at the top of VRAM which holds + * IP Discovery data and is protected by PSP. + */ + r = amdgpu_bo_create_kernel_at(adev, + adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, + DISCOVERY_TMR_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->discovery_memory, + NULL); + if (r) + return r; + DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); @@ -1845,6 +1808,9 @@ void amdgpu_ttm_late_init(struct amdgpu_device *adev) void *stolen_vga_buf; /* return the VGA stolen memory (if any) back to VRAM */ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); + + /* return the IP Discovery TMR memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5251352f5922..c7514f743409 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1034,10 +1034,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ id->oa_base != job->oa_base || id->oa_size != job->oa_size); bool vm_flush_needed = job->vm_needs_flush; - bool pasid_mapping_needed = id->pasid != job->pasid || - !id->pasid_mapping || - !dma_fence_is_signaled(id->pasid_mapping); struct dma_fence *fence = NULL; + bool pasid_mapping_needed = false; unsigned patch_offset = 0; int r; @@ -1047,6 +1045,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ pasid_mapping_needed = true; } + mutex_lock(&id_mgr->lock); + if (id->pasid != job->pasid || !id->pasid_mapping || + !dma_fence_is_signaled(id->pasid_mapping)) + pasid_mapping_needed = true; + mutex_unlock(&id_mgr->lock); + gds_switch_needed &= !!ring->funcs->emit_gds_switch; vm_flush_needed &= !!ring->funcs->emit_vm_flush && job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; @@ -1086,9 +1090,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } if (pasid_mapping_needed) { + mutex_lock(&id_mgr->lock); id->pasid = job->pasid; dma_fence_put(id->pasid_mapping); id->pasid_mapping = dma_fence_get(fence); + mutex_unlock(&id_mgr->lock); } dma_fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 5850c8e34caa..97d11d792351 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -261,23 +261,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, { u32 tmp; - /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); + if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } else { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_DISABLE; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + if (enable) { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_DISABLE; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } + + /* Exit broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); } - - /* Exit broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); } static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 53090eae0082..14417cebe38b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); } -static void gfx_v10_0_init_csb(struct amdgpu_device *adev) +static int gfx_v10_0_init_csb(struct amdgpu_device *adev) { + int r; + + if (adev->in_gpu_reset) { + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (r) + return r; + + r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, + (void **)&adev->gfx.rlc.cs_ptr); + if (!r) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, + adev->gfx.rlc.cs_ptr); + amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); + } + + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + if (r) + return r; + } + /* csib */ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + + return 0; } -static void gfx_v10_0_init_pg(struct amdgpu_device *adev) +static int gfx_v10_0_init_pg(struct amdgpu_device *adev) { int i; + int r; - gfx_v10_0_init_csb(adev); + r = gfx_v10_0_init_csb(adev); + if (r) + return r; for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); /* TODO: init power gating */ - return; + return 0; } void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) @@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); if (r) return r; - gfx_v10_0_init_pg(adev); + + r = gfx_v10_0_init_pg(adev); + if (r) + return r; /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); @@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) return r; } - gfx_v10_0_init_pg(adev); + r = gfx_v10_0_init_pg(adev); + if (r) + return r; + adev->gfx.rlc.funcs->start(adev); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { @@ -2400,7 +2431,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) return 0; } -static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { int i; u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); @@ -2413,7 +2444,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) adev->gfx.gfx_ring[i].sched.ready = false; } WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); - udelay(50); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); + + return 0; } static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) @@ -3514,6 +3555,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -3936,11 +3978,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; + amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); return clock; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 87dd55e9d72b..cc88ba76a8d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6184,7 +6184,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + amdgpu_ring_write(ring, lower_32_bits(seq - 1)); + amdgpu_ring_write(ring, upper_32_bits(seq - 1)); + + /* Then send the real EOP event down the pipe: + * EVENT_WRITE_EOP - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -6926,7 +6942,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 5 + /* COND_EXEC */ 7 + /* PIPELINE_SYNC */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */ - 8 + /* FENCE for VM_FLUSH */ + 12 + /* FENCE for VM_FLUSH */ 20 + /* GDS switch */ 4 + /* double SWITCH_BUFFER, the first COND_EXEC jump to the place just @@ -6938,7 +6954,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 31 + /* DE_META */ 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ - 8 + 8 + /* FENCE x2 */ + 12 + 12 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 97cf0b536873..40034efa64bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1038,17 +1038,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) case CHIP_VEGA20: break; case CHIP_RAVEN: - /* Disable GFXOFF on original raven. There are combinations - * of sbios and platforms that are not stable. - */ - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - &&((adev->gfx.rlc_fw_version != 106 && - adev->gfx.rlc_fw_version < 531) || - (adev->gfx.rlc_fw_version == 53815) || - (adev->gfx.rlc_feature_version < 1) || - !adev->gfx.rlc.is_rlc_v2_1)) + if (!(adev->rev_id >= 0x8 || + adev->pdev->device == 0x15d8) && + (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ + !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (adev->pm.pp_feature & PP_GFXOFF_MASK) @@ -2930,7 +2923,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - gfx_v9_1_init_rlc_save_restore_list(adev); + if (adev->asic_type == CHIP_VEGA12 || + (adev->asic_type == CHIP_RAVEN && + adev->rev_id >= 8)) + gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); } @@ -3760,6 +3756,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -4084,11 +4081,13 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; + amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); return clock; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 6ce37ce77d14..d6fbdc6c0548 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -365,6 +365,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index db10640a3b2f..fbe06c13a09c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -350,6 +350,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5c7d5f73f54f..f642e066e67a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -230,12 +230,36 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int vmhub, uint32_t flush_type) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 tmp; /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + spin_lock(&adev->gmc.invalidate_lock); + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) { + for (i = 0; i < adev->usec_timeout; i++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -254,6 +278,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, udelay(1); } + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + + spin_unlock(&adev->gmc.invalidate_lock); + if (i < adev->usec_timeout) return; @@ -338,6 +373,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); @@ -348,6 +397,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, hub->vm_inv_eng0_ack + eng, req, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + return pd_addr; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f91337030dc0..688111ef814d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -448,6 +448,24 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, return req; } +/** + * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore + * + * @adev: amdgpu_device pointer + * @vmhub: vmhub type + * + */ +static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, + uint32_t vmhub) +{ + return ((vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) && + (!amdgpu_sriov_vf(adev)) && + (!(adev->asic_type == CHIP_RAVEN && + adev->rev_id < 0x8 && + adev->pdev->device == 0x15d8))); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -467,14 +485,15 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { + bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; - u32 j, tmp; + u32 j, inv_req, tmp; struct amdgpu_vmhub *hub; BUG_ON(vmhub >= adev->num_vmhubs); hub = &adev->vmhub[vmhub]; - tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal @@ -485,13 +504,35 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t req = hub->vm_inv_eng0_req + eng; uint32_t ack = hub->vm_inv_eng0_ack + eng; - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); return; } spin_lock(&adev->gmc.invalidate_lock); - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) { + for (j = 0; j < adev->usec_timeout; j++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (j >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -506,7 +547,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, break; udelay(1); } + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + spin_unlock(&adev->gmc.invalidate_lock); + if (j < adev->usec_timeout) return; @@ -516,11 +567,25 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { + bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); struct amdgpu_device *adev = ring->adev; struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); @@ -531,6 +596,14 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, hub->vm_inv_eng0_ack + eng, req, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + return pd_addr; } @@ -1309,6 +1382,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) } } +/** + * gmc_v9_0_restore_registers - restores regs + * + * @adev: amdgpu_device pointer + * + * This restores register values, saved at suspend. + */ +static void gmc_v9_0_restore_registers(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_RAVEN) + WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); +} + /** * gmc_v9_0_gart_enable - gart enable * @@ -1405,6 +1491,20 @@ static int gmc_v9_0_hw_init(void *handle) return r; } +/** + * gmc_v9_0_save_registers - saves regs + * + * @adev: amdgpu_device pointer + * + * This saves potential register values that should be + * restored upon resume + */ +static void gmc_v9_0_save_registers(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_RAVEN) + adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); +} + /** * gmc_v9_0_gart_disable - gart disable * @@ -1441,9 +1541,16 @@ static int gmc_v9_0_hw_fini(void *handle) static int gmc_v9_0_suspend(void *handle) { + int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return gmc_v9_0_hw_fini(adev); + r = gmc_v9_0_hw_fini(adev); + if (r) + return r; + + gmc_v9_0_save_registers(adev); + + return 0; } static int gmc_v9_0_resume(void *handle) @@ -1451,6 +1558,7 @@ static int gmc_v9_0_resume(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v9_0_restore_registers(adev); r = gmc_v9_0_hw_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 04cd4b6f95d4..641f1258f08d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -418,6 +418,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index b39bea6f54e9..096bb883c29d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -341,6 +341,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 9ed178fa241c..fb161c83e409 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -502,6 +502,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev) SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_SEM) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; hub[i]->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_INVALIDATE_ENG0_REQ) + diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index de9b995b65b1..2d780820ba00 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -660,6 +660,12 @@ static int nv_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_ATHUB; + /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, + * as a consequence, the rev_id and external_rev_id are wrong. + * workaround it by hardcoding rev_id to 0 (default value). + */ + if (amdgpu_sriov_vf(adev)) + adev->rev_id = 0; adev->external_rev_id = adev->rev_id + 0xa; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 10166104b8a3..d483684db95b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -398,6 +398,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) return false; } +static int psp_v11_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct amdgpu_device *adev = psp->adev; + + /* Write the ring destroy command*/ + if (psp_v11_0_support_vmr_ring(psp)) + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) */ + if (psp_v11_0_support_vmr_ring(psp)) + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + + return ret; +} + static int psp_v11_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -407,6 +435,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; if (psp_v11_0_support_vmr_ring(psp)) { + ret = psp_v11_0_ring_stop(psp, ring_type); + if (ret) { + DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); + return ret; + } + /* Write low address of the ring to C2PMSG_102 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); @@ -451,33 +485,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp, return ret; } -static int psp_v11_0_ring_stop(struct psp_context *psp, - enum psp_ring_type ring_type) -{ - int ret = 0; - struct amdgpu_device *adev = psp->adev; - - /* Write the ring destroy command*/ - if (psp_v11_0_support_vmr_ring(psp)) - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); - else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, - GFX_CTRL_CMD_ID_DESTROY_RINGS); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) */ - if (psp_v11_0_support_vmr_ring(psp)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); - else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); - - return ret; -} static int psp_v11_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 57bb5f9e08b2..88ae27a5a03d 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev) u32 interrupt_cntl, ih_cntl, ih_rb_cntl; si_ih_disable_interrupts(adev); - WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4ccfcdf8f16a..c086262cc181 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -84,6 +84,13 @@ #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 + +/* for Vega20/arcturus regiter offset change */ +#define mmROM_INDEX_VG20 0x00e4 +#define mmROM_INDEX_VG20_BASE_IDX 0 +#define mmROM_DATA_VG20 0x00e5 +#define mmROM_DATA_VG20_BASE_IDX 0 + /* * Indirect registers accessor */ @@ -267,7 +274,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev) static u32 soc15_get_xclk(struct amdgpu_device *adev) { - return adev->clock.spll.reference_freq; + u32 reference_clock = adev->clock.spll.reference_freq; + + if (adev->asic_type == CHIP_RAVEN) + return reference_clock / 4; + + return reference_clock; } @@ -299,6 +311,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + uint32_t rom_index_offset; + uint32_t rom_data_offset; if (bios == NULL) return false; @@ -311,11 +325,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20); + break; + default: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); + break; + } + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index a3dde0c31f57..a1d4ea69a284 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -28,8 +28,8 @@ #include "nbio_v7_0.h" #include "nbio_v7_4.h" -#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4 -#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1 +#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 +#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 extern const struct amd_ip_funcs soc15_common_ip_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 839f186e1182..19e870c79896 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -52,6 +52,7 @@ uint32_t old_ = 0; \ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ uint32_t loop = adev->usec_timeout; \ + ret = 0; \ while ((tmp_ & (mask)) != (expected_value)) { \ if (old_ != tmp_) { \ loop = adev->usec_timeout; \ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 93b3500e522b..4f0f0de83293 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1375,7 +1375,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, if (enable) { /* wait for STATUS to clear */ - if (vcn_v1_0_is_idle(handle)) + if (!vcn_v1_0_is_idle(handle)) return -EBUSY; vcn_v1_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 395c2259f979..9d778a0b2c5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -423,7 +423,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) * vcn_v2_5_disable_clock_gating - disable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Disable clock gating for VCN block */ @@ -542,7 +541,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) * vcn_v2_5_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Enable clock gating for VCN block */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c index 15c523027285..511712c2e382 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c @@ -93,7 +93,7 @@ void kfd_debugfs_init(void) kfd_debugfs_hqds_by_device, &kfd_debugfs_fops); debugfs_create_file("rls", S_IFREG | 0444, debugfs_root, kfd_debugfs_rls_by_device, &kfd_debugfs_fops); - debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root, + debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root, NULL, &kfd_debugfs_hang_hws_fops); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d985e31fcc1e..a2ed9c257cb0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1181,16 +1181,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, list_add(&q->list, &qpd->queues_list); qpd->queue_count++; + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) + dqm->xgmi_sdma_queue_count++; + if (q->properties.is_active) { dqm->queue_count++; retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); } - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; /* * Unconditionally increment this counter, regardless of the queue's * type or whether the queue is active. @@ -1676,7 +1678,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_dev *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * - dev->device_info->num_sdma_engines * + (dev->device_info->num_sdma_engines + + dev->device_info->num_xgmi_sdma_engines) * dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index c56ac47cd318..bc47f6a44456 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd) } kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (unlikely(!kfd->ih_wq)) { + kfifo_free(&kfd->ih_fifo); + dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n"); + return -ENOMEM; + } spin_lock_init(&kfd->interrupt_lock); INIT_WORK(&kfd->interrupt_work, interrupt_wq); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4139f129eafb..360c87ba4595 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -940,6 +940,11 @@ static int dm_late_init(void *handle) params.backlight_lut_array_size = 16; params.backlight_lut_array = linear_lut; + /* Min backlight level after ABM reduction, Don't allow below 1% + * 0xFFFF x 0.01 = 0x28F + */ + params.min_abm_backlight = 0x28F; + /* todo will enable for navi10 */ if (adev->asic_type <= CHIP_RAVEN) { ret = dmcu_load_iram(dmcu, params); @@ -3261,27 +3266,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) return color_space; } -static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) -{ - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; - - timing_out->display_color_depth--; -} - -static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, - const struct drm_display_info *info) +static bool adjust_colour_depth_from_display_info( + struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) { + enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) normalized_clk /= 2; /* Adjusting pix clock following on HDMI spec based on colour depth */ - switch (timing_out->display_color_depth) { + switch (depth) { + case COLOR_DEPTH_888: + break; case COLOR_DEPTH_101010: normalized_clk = (normalized_clk * 30) / 24; break; @@ -3292,14 +3291,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ normalized_clk = (normalized_clk * 48) / 24; break; default: - return; + /* The above depths are the only ones valid for HDMI. */ + return false; } - if (normalized_clk <= info->max_tmds_clock) - return; - reduce_mode_colour_depth(timing_out); - - } while (timing_out->display_color_depth > COLOR_DEPTH_888); - + if (normalized_clk <= info->max_tmds_clock) { + timing_out->display_color_depth = depth; + return true; + } + } while (--depth > COLOR_DEPTH_666); + return false; } static void fill_stream_properties_from_drm_display_mode( @@ -3365,8 +3365,14 @@ static void fill_stream_properties_from_drm_display_mode( stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) - adjust_colour_depth_from_display_info(timing_out, info); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + if (!adjust_colour_depth_from_display_info(timing_out, info) && + drm_mode_is_420_also(info, mode_in) && + timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + adjust_colour_depth_from_display_info(timing_out, info); + } + } } static void fill_audio_info(struct audio_info *audio_info, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ee1dc75f5ddc..1d733b57e60f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -247,7 +247,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); } - ret = drm_dp_update_payload_part1(mst_mgr); + /* It's OK for this to fail */ + drm_dp_update_payload_part1(mst_mgr); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each @@ -256,9 +257,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( get_payload_table(aconnector, proposed_table); - if (ret) - return false; - return true; } @@ -316,7 +314,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; - int ret; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; @@ -330,10 +327,8 @@ bool dm_helpers_dp_mst_send_payload_allocation( if (!mst_mgr->mst_state) return false; - ret = drm_dp_update_payload_part2(mst_mgr); - - if (ret) - return false; + /* It's OK for this to fail */ + drm_dp_update_payload_part2(mst_mgr); if (!enable) drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 16218a202b59..28a6c7b2ef4b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -379,6 +379,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; + aconnector->dc_link->cur_link_settings.lane_count = 0; } drm_connector_unregister(connector); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index f4cfa0caeba8..785322cd4c6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -342,7 +342,8 @@ bool dm_pp_get_clock_levels_by_type( if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clks)) { - /* Error in pplib. Provide default values. */ + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); return true; } } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 9b2cb57bf2ba..c9a241fe46cf 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1438,6 +1438,7 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dc_context *ctx = dc->ctx; struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( @@ -1449,17 +1450,28 @@ void dcn_bw_update_from_pplib(struct dc *dc) res = verify_clock_values(&fclks); if (res) { - ASSERT(fclks.num_levels >= 3); - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; + ASSERT(fclks.num_levels); + + vmin0p65_idx = 0; + vmid0p72_idx = fclks.num_levels - + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); + vmax0p9_idx = fclks.num_levels - 1; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = + dc->dcn_soc->number_of_channels * + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; } else BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index b864869cc7e3..6fa7422c51da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -91,6 +91,12 @@ ifdef CONFIG_DRM_AMD_DC_DCN2_1 ############################################################################### CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o +# prevent build errors regarding soft-float vs hard-float FP ABI tags +# this code is currently unused on ppc64, as it applies to Renoir APUs only +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute) +endif + AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 3e8ac303bd52..23ec283eb07b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -320,6 +320,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */ int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000; @@ -357,14 +359,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; } - /* Both fclk and dppclk ref are run on the same scemi clock so we - * need to keep the same value for both + /* Both fclk and ref_dppclk run on the same scemi clock. + * So take the higher value since the DPP DTO is typically programmed + * such that max dppclk is 1:1 with ref_dppclk. */ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz) clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz; if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz) clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz; + // Both fclk and ref_dppclk run on the same scemi clock. + clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; + dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 787f94d815f4..dd92f9c295b4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -91,6 +91,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); } + // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; + } + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) dpp_clock_lowered = true; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 50984c1811bb..468c6bb0e311 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,7 +33,7 @@ #include "mp/mp_12_0_0_sh_mask.h" #define REG(reg_name) \ - (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) #define FN(reg_name, field) \ FD(reg_name##__##field) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4b8819c27fcd..4704aac336c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2267,12 +2267,7 @@ void dc_set_power_state( enum dc_acpi_cm_power_state power_state) { struct kref refcount; - struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib), - GFP_KERNEL); - - ASSERT(dml); - if (!dml) - return; + struct display_mode_lib *dml; switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: @@ -2294,6 +2289,12 @@ void dc_set_power_state( * clean state, and dc hw programming optimizations will not * cause any trouble. */ + dml = kzalloc(sizeof(struct display_mode_lib), + GFP_KERNEL); + + ASSERT(dml); + if (!dml) + return; /* Preserve refcount */ refcount = dc->current_state->refcount; @@ -2307,10 +2308,10 @@ void dc_set_power_state( dc->current_state->refcount = refcount; dc->current_state->bw_ctx.dml = *dml; + kfree(dml); + break; } - - kfree(dml); } void dc_resume(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ca20b150afcc..3aedc724241e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -373,7 +373,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) if (GPIO_RESULT_OK != dal_ddc_open( ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { - dal_gpio_destroy_ddc(&ddc); + dal_ddc_close(ddc); return present; } @@ -809,8 +809,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { - read_edp_current_link_settings_on_detect(link); detect_edp_sink_caps(link); + read_edp_current_link_settings_on_detect(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; @@ -948,8 +948,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX && - reason != DETECT_REASON_HPDRX) { + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* * TODO debug why Dell 2413 doesn't like * two link trainings @@ -2169,8 +2168,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_set_fec_ready(link, false); } #endif - } else - link->link_enc->funcs->disable_output(link->link_enc, signal); + } else { + if (signal != SIGNAL_TYPE_VIRTUAL) + link->link_enc->funcs->disable_output(link->link_enc, signal); + } if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { /* MST disable link only when no stream use the link */ @@ -2217,7 +2218,7 @@ static bool dp_active_dongle_validate_timing( break; } - if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || + if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; @@ -2767,6 +2768,15 @@ void core_link_enable_stream( CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, COLOR_DEPTH_UNDEFINED); + /* This second call is needed to reconfigure the DIG + * as a workaround for the incorrect value being applied + * from transmitter control. + */ + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) + stream->link->link_enc->funcs->setup( + stream->link->link_enc, + pipe_ctx->stream->signal); + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f5742719b5d9..6dd2334dd5e6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2545,6 +2545,7 @@ static void get_active_converter_info( uint8_t data, struct dc_link *link) { union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); /* decode converter info*/ if (!ds_port.fields.PORT_PRESENT) { @@ -2691,6 +2692,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, * keep receiver powered all the time.*/ case DP_BRANCH_DEVICE_ID_0010FA: case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: link->wa_flags.dp_keep_receiver_powered = true; break; @@ -2877,6 +2879,17 @@ static bool retrieve_link_cap(struct dc_link *link) sink_id.ieee_device_id, sizeof(sink_id.ieee_device_id)); + /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; + + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, + sizeof(str_mbp_2017))) { + link->reported_link_cap.link_rate = 0x0c; + } + } + core_link_read_dpcd( link, DP_SINK_HW_REVISION_START, @@ -3490,7 +3503,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) if (link_enc->funcs->fec_set_enable && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { if (link->fec_state == dc_link_fec_ready && enable) { - msleep(1); + /* Accord to DP spec, FEC enable sequence can first + * be transmitted anytime after 1000 LL codes have + * been transmitted on the link after link training + * completion. Using 1 lane RBR should have the maximum + * time for transmitting 1000 LL codes which is 6.173 us. + * So use 7 microseconds delay instead. + */ + udelay(7); link_enc->funcs->fec_set_enable(link_enc, true); link->fec_state = dc_link_fec_enabled; } else if (link->fec_state == dc_link_fec_enabled && !enable) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 79438c4f1e20..a519dbc5ecb6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link, if (pipes[i].stream != NULL && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && pipes[i].stream->link != NULL && - pipes[i].stream_res.stream_enc != NULL) { + pipes[i].stream_res.stream_enc != NULL && + pipes[i].stream->link == link) { udelay(100); pipes[i].stream_res.stream_enc->funcs->dp_blank( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bf1d7bb90e0f..bb09243758fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc, if (dwb->funcs->is_enabled(dwb)) { /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, stream_status, wb_info); + dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); } else { /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, stream_status, wb_info); + dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 58bd131d5b48..7700a855d77c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id) /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + 1, 80000); + return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index c3f9f4185ce8..cf877238fff9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -386,7 +386,7 @@ static bool acquire( { enum gpio_result result; - if (!is_engine_available(engine)) + if ((engine == NULL) || !is_engine_available(engine)) return false; result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index a02c10e23e0d..d163388c99a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -840,8 +840,8 @@ static void hubbub1_det_request_size( hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe); - swath_bytes_horz_wc = height * blk256_height * bpe; - swath_bytes_vert_wc = width * blk256_width * bpe; + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? false : /* full 256B request */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 1212da12c414..e933f6a369f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -493,7 +493,6 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) dpp->funcs->dpp_dppclk_control(dpp, false, false); hubp->power_gated = true; - dc->optimized_required = false; /* We're powering off, no need to optimize */ dc->hwss.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, @@ -1103,6 +1102,25 @@ void dcn20_pipe_control_lock( if (pipe->plane_state != NULL) flip_immediate = pipe->plane_state->flip_immediate; + if (flip_immediate && lock) { + const int TIMEOUT_FOR_FLIP_PENDING = 100000; + int i; + + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) + break; + udelay(1); + } + + if (pipe->bottom_pipe != NULL) { + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) + break; + udelay(1); + } + } + } + /* In flip immediate and pipe splitting case, we need to use GSL * for synchronization. Only do setup on locking and on flip type change. */ @@ -1337,7 +1355,8 @@ bool dcn20_update_bandwidth( static void dcn20_enable_writeback( struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info) + struct dc_writeback_info *wb_info, + struct dc_state *context) { struct dwbc *dwb; struct mcif_wb *mcif_wb; @@ -1354,7 +1373,7 @@ static void dcn20_enable_writeback( optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); /* set MCIF_WB buffer and arbitration configuration */ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); - mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); /* Enable MCIF_WB */ mcif_wb->funcs->enable_mcif(mcif_wb); /* Enable DWB */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 2137e2be2140..8d5cfd5357c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -233,12 +233,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) / opp_cnt; - int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf; + uint32_t memory_mask; uint32_t data_fmt = 0; + ASSERT(opp_cnt == 2); + /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start @@ -246,9 +247,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c * MASTER_UPDATE_LOCK_DB_X, 160, * MASTER_UPDATE_LOCK_DB_Y, 240); */ + + /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, + * however, for ODM combine we can simplify by always using 4. + * To make sure there's no overlap, each instance "reserves" 2 memories and + * they are uniquely combined here. + */ + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + if (REG(OPTC_MEMORY_CONFIG)) REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask << (optc->inst * 4)); + OPTC_MEM_SEL, memory_mask); if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) data_fmt = 1; @@ -257,7 +266,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt); - ASSERT(opp_cnt == 2); REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 1, OPTC_SEG0_SRC_SEL, opp_id[0], @@ -287,6 +295,10 @@ void optc2_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 2; else *num_of_src_opp = 1; + + /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */ + if (*src_opp_id_1 == 0xf) + *num_of_src_opp = 1; } void optc2_set_dwb_source(struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 6b2f2f1a1c9c..c13dce760098 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -269,6 +269,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { .use_urgent_burst_bw = 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 8960.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 11104.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 16000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 8.6, + .sr_enter_plus_exit_time_us = 10.9, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 8, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404.0, + .dummy_pstate_latency_us = 5.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 0 +}; + struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -1419,13 +1530,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, - struct display_stream_compressor **dsc) + struct display_stream_compressor **dsc, + int pipe_idx) { int i; ASSERT(*dsc == NULL); *dsc = NULL; + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + /* Find first free DSC */ for (i = 0; i < pool->res_cap->num_dsc; i++) if (!res_ctx->is_dsc_acquired[i]) { @@ -1468,7 +1586,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream != dc_stream) continue; - acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); + acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { @@ -1669,7 +1787,7 @@ static bool dcn20_split_stream_for_odm( next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (next_odm_pipe->stream->timing.flags.DSC == 1) { - acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); + acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; @@ -1765,7 +1883,7 @@ int dcn20_populate_dml_pipes_from_context( pipe_cnt = i; continue; } - if (!resource_are_streams_timing_synchronizable( + if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, res_ctx->pipe_ctx[i].stream)) { synchronized_vblank = false; @@ -2474,6 +2592,7 @@ bool dcn20_fast_validate_bw( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; + dcn20_build_mapped_resource(dc, context, pipe->stream); } else dcn20_split_stream_for_mpc( &context->res_ctx, dc->res_pool, @@ -3040,7 +3159,7 @@ static void cap_soc_clocks( static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { - struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0}; + struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES]; int i; int num_calculated_states = 0; int min_dcfclk = 0; @@ -3048,6 +3167,8 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (num_states == 0) return; + memset(calculated_states, 0, sizeof(calculated_states)); + if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; else @@ -3125,6 +3246,9 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( uint32_t hw_internal_rev) { + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_soc; + if (ASICREV_IS_NAVI12_P(hw_internal_rev)) return &dcn2_0_nv12_soc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 5ab9d6240498..e95025b1d14d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -492,15 +492,23 @@ void enc2_stream_encoder_dp_unblank( DP_VID_N_MUL, n_multiply); } - /* set DIG_START to 0x1 to reset FIFO */ + /* make sure stream is disabled before resetting steer fifo */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); + REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); + /* set DIG_START to 0x1 to reset FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + udelay(1); /* write 0 to take the FIFO out of reset */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); - /* switch DP encoder to CRTC data */ + /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen + * that it overflows during mode transition, and sometimes doesn't recover. + */ + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); + udelay(10); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index d1266741763b..f5f6b4a0f0aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -22,6 +22,7 @@ * Authors: AMD * */ +#include #include "dm_services.h" #include "dcn20/dcn20_hubbub.h" #include "dcn21_hubbub.h" @@ -71,30 +72,39 @@ static uint32_t convert_and_clamp( void dcn21_dchvm_init(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; //Init DCHVM block REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); //Poll until RIOMMU_ACTIVE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100); + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); - //Reflect the power status of DCHUBBUB - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + if (riommu_active) + break; + else + udelay(5); + } - //Start rIOMMU prefetching - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + if (riommu_active) { + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); - // Enable dynamic clock gating - REG_UPDATE_4(DCHVM_CLK_CTRL, - HVM_DISPCLK_R_GATE_DIS, 0, - HVM_DISPCLK_G_GATE_DIS, 0, - HVM_DCFCLK_R_GATE_DIS, 0, - HVM_DCFCLK_G_GATE_DIS, 0); + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); - //Poll until HOSTVM_PREFETCH_DONE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); + + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + } } static int hubbub21_init_dchub(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index de182185fe1f..161bf7caf3ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -57,6 +57,7 @@ #include "dcn20/dcn20_dccg.h" #include "dcn21_hubbub.h" #include "dcn10/dcn10_resource.h" +#include "dce110/dce110_resource.h" #include "dcn20/dcn20_dwb.h" #include "dcn20/dcn20_mmhubbub.h" @@ -258,7 +259,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 23.84, .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3550, + .dispclk_dppclk_vco_speed_mhz = 3600, .xfc_bus_transport_time_us = 4, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 1, @@ -824,6 +825,7 @@ static const struct dc_debug_options debug_defaults_diags = { enum dcn20_clk_src_array_id { DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL1, + DCN20_CLK_SRC_PLL2, DCN20_CLK_SRC_TOTAL_DCN21 }; @@ -1492,6 +1494,10 @@ static bool construct( dcn21_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c index b953b02a1512..723af0b2dda0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c @@ -24,7 +24,7 @@ */ #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h" #include "dml_inline_defs.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index eca140da13d8..ded71ea82413 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -27,7 +27,7 @@ #define __DML_INLINE_DEFS_H__ #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h" #include "dml_logger.h" static inline double dml_min(double a, double b) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h rename to drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 3a938cd414ea..f6cc2d6f576d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -321,10 +321,12 @@ struct hw_sequencer_funcs { struct dc_state *context); void (*update_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*enable_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); #endif diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 18961707db23..9ad49da50a17 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -31,6 +31,8 @@ #define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9 #define DP_BRANCH_DEVICE_ID_00001A 0x00001A #define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1 +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 +#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C enum ddc_result { DDC_RESULT_UNKNOWN = 0, diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ec70c9b12e1a..7d67cb2c61f0 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -743,6 +743,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); + /* Rounded to the nearest Hz */ + nominal_field_rate_in_uhz = 1000000ULL * + div_u64(nominal_field_rate_in_uhz + 500000, 1000000); + min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; @@ -799,6 +803,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, 2 * in_out_vrr->min_refresh_in_uhz) in_out_vrr->btr.btr_enabled = false; + in_out_vrr->fixed.fixed_active = false; in_out_vrr->btr.btr_active = false; in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; @@ -818,6 +823,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->adjust.v_total_max = stream->timing.v_total; } else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE && refresh_range >= MIN_REFRESH_RANGE_IN_US) { + in_out_vrr->adjust.v_total_min = calc_v_total_from_refresh(stream, in_out_vrr->max_refresh_in_uhz); @@ -996,14 +1002,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate( const struct dc_stream_state *stream) { unsigned long long nominal_field_rate_in_uhz = 0; + unsigned int total = stream->timing.h_total * stream->timing.v_total; - /* Calculate nominal field rate for stream */ + /* Calculate nominal field rate for stream, rounded up to nearest integer */ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.h_total); - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.v_total); + + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total); return nominal_field_rate_in_uhz; } diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 05e2be856037..ba1aafe40512 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = { /* NOTE: iRAM is 256B in size */ struct iram_table_v_2 { /* flags */ - uint16_t flags; /* 0x00 U16 */ + uint16_t min_abm_backlight; /* 0x00 U16 */ /* parameters for ABM2.0 algorithm */ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ @@ -140,10 +140,10 @@ struct iram_table_v_2 { /* For reading PSR State directly from IRAM */ uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ uint8_t dmcu_state; /* 0xf6 */ uint16_t blRampReduction; /* 0xf7 */ @@ -164,42 +164,43 @@ struct iram_table_v_2_2 { uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ - uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ - uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ - uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ - uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ - uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ - uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ - uint8_t pad[21]; /* 0x6b U0.8 */ + uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ + uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ + uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ + uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ + uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ + uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ + uint16_t min_abm_backlight; /* 0x6b U16 */ + uint8_t pad[19]; /* 0x6d U0.8 */ /* parameters for crgb conversion */ - uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ - uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ - uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ + uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ /* parameters for custom curve */ /* thresholds for brightness --> backlight */ - uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ /* offsets for brightness --> backlight */ - uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ /* For reading PSR State directly from IRAM */ - uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ - uint8_t dmcu_state; /* 0xf6 */ + uint8_t psr_state; /* 0xf0 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_state; /* 0xf6 */ - uint8_t dummy1; /* 0xf7 */ - uint8_t dummy2; /* 0xf8 */ - uint8_t dummy3; /* 0xf9 */ - uint8_t dummy4; /* 0xfa */ - uint8_t dummy5; /* 0xfb */ - uint8_t dummy6; /* 0xfc */ - uint8_t dummy7; /* 0xfd */ - uint8_t dummy8; /* 0xfe */ - uint8_t dummy9; /* 0xff */ + uint8_t dummy1; /* 0xf7 */ + uint8_t dummy2; /* 0xf8 */ + uint8_t dummy3; /* 0xf9 */ + uint8_t dummy4; /* 0xfa */ + uint8_t dummy5; /* 0xfb */ + uint8_t dummy6; /* 0xfc */ + uint8_t dummy7; /* 0xfd */ + uint8_t dummy8; /* 0xfe */ + uint8_t dummy9; /* 0xff */ }; #pragma pack(pop) @@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters { unsigned int set = params.set; - ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); ram_table->deviation_gain = 0xb3; ram_table->blRampReduction = @@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + ram_table->deviation_gain[0] = 0xb3; ram_table->deviation_gain[1] = 0xa8; ram_table->deviation_gain[2] = 0x98; @@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame unsigned int set = params.set; ram_table->flags = 0x0; + + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + for (i = 0; i < NUM_AGGR_LEVEL; i++) { ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain; ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index da5df00fedce..e54157026330 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -38,6 +38,7 @@ struct dmcu_iram_parameters { unsigned int backlight_lut_array_size; unsigned int backlight_ramping_reduction; unsigned int backlight_ramping_start; + unsigned int min_abm_backlight; unsigned int set; }; diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h index b6f74bf4af02..27bb8c1ab858 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h @@ -7376,6 +7376,8 @@ #define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e #define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2 +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 // addressBlock: dce_dc_fmt4_dispdec // base address: 0x2000 diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h index 5dcb776548d8..7ec4331e67f2 100644 --- a/drivers/gpu/drm/amd/include/discovery.h +++ b/drivers/gpu/drm/amd/include/discovery.h @@ -25,7 +25,6 @@ #define _DISCOVERY_H_ #define PSP_HEADER_SIZE 256 -#define BINARY_MAX_SIZE (64 << 10) #define BINARY_SIGNATURE 0x28211407 #define DISCOVERY_TABLE_SIGNATURE 0x53445049 diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index fa8ad7db2b3a..d306cc711997 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -1421,6 +1421,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) { struct pp_hwmgr *hwmgr = handle; + *cap = false; if (!hwmgr) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 4acf139ea014..a066e9297777 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -844,6 +844,7 @@ static int smu_sw_init(void *handle) smu->smu_baco.platform_support = false; mutex_init(&smu->sensor_lock); + mutex_init(&smu->metrics_lock); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; @@ -1344,7 +1345,10 @@ static int smu_suspend(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; - bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); + bool baco_feature_is_enabled = false; + + if(!(adev->flags & AMD_IS_APU)) + baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); ret = smu_system_features_control(smu, false); if (ret) diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index d493a3f8c07a..08a717a34bd6 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -910,18 +910,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } @@ -1388,12 +1391,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, "VR", "COMPUTE", "CUSTOM"}; + static const char *title[] = { + "PROFILE_INDEX(NAME)"}; uint32_t i, size = 0; int16_t workload_type = 0; if (!smu->pm_enabled || !buf) return -EINVAL; + size += sprintf(buf + size, "%16s\n", + title[0]); + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 1115761982a7..fed3fc4bb57a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].latency_in_us = latency_required ? - smu10_get_mem_latency(hwmgr, - pclk_vol_table->entries[i].clk) : - 0; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].latency_in_us = latency_required ? + smu10_get_mem_latency(hwmgr, + pclk_vol_table->entries[i].clk) : + 0; + clocks->num_levels++; + } } return 0; @@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol; + clocks->num_levels++; + } } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c index df6ff9252401..b068d1c7b44d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c @@ -29,7 +29,7 @@ #include "vega20_baco.h" #include "vega20_smumgr.h" - +#include "amdgpu_ras.h" static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { @@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); enum BACO_STATE cur_state; uint32_t data; @@ -84,10 +85,11 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) return 0; if (state == BACO_STATE_IN) { - data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); - data |= 0x80000000; - WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - + if (!ras || !ras->supported) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + } if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0)) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 23171a4d9a31..5ad9a7878f6b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -345,6 +345,7 @@ struct smu_context const struct pptable_funcs *ppt_funcs; struct mutex mutex; struct mutex sensor_lock; + struct mutex metrics_lock; uint64_t pool_size; struct smu_table_context smu_table; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 328e258a6895..7d913a06ebac 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -547,17 +547,20 @@ static int navi10_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index e62bfba51562..e5283dafc414 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -183,11 +183,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_t metrics = {0}; + SmuMetrics_t metrics; if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; + memset(&metrics, 0, sizeof(metrics)); + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); if (ret) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c5257ae3188a..0922d9cd858a 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -988,8 +988,12 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks; int ret = 0; - max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), + if (!smu->smu_table.max_sustainable_clocks) + max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL); + else + max_sustainable_clocks = smu->smu_table.max_sustainable_clocks; + smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks; max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 92c393f613d3..3c3f719971f7 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -1691,17 +1691,20 @@ static int vega20_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 624d257da20f..52c42569a111 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -250,6 +250,7 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc, { komeda_crtc_prepare(to_kcrtc(crtc)); drm_crtc_vblank_on(crtc); + WARN_ON(drm_crtc_vblank_get(crtc)); komeda_crtc_do_flush(crtc, old); } @@ -319,6 +320,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc, } } + drm_crtc_vblank_put(crtc); drm_crtc_vblank_off(crtc); komeda_crtc_unprepare(kcrtc); } diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 875a3a9eabfa..7d0e7b031e44 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { +static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { .get_modes = malidp_mw_connector_get_modes, .mode_valid = malidp_mw_connector_mode_valid, }; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index f2e73e6d46b8..10985134ce0b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -73,7 +73,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) unsigned long prate; unsigned int mask = ATMEL_HLCDC_CLKDIV_MASK | ATMEL_HLCDC_CLKPOL; unsigned int cfg = 0; - int div; + int div, ret; + + ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk); + if (ret) + return; vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay; vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end; @@ -95,14 +99,14 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) (adj->crtc_hdisplay - 1) | ((adj->crtc_vdisplay - 1) << 16)); + prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); + mode_rate = adj->crtc_clock * 1000; if (!crtc->dc->desc->fixed_clksrc) { + prate *= 2; cfg |= ATMEL_HLCDC_CLKSEL; mask |= ATMEL_HLCDC_CLKSEL; } - prate = 2 * clk_get_rate(crtc->dc->hlcdc->sys_clk); - mode_rate = adj->crtc_clock * 1000; - div = DIV_ROUND_UP(prate, mode_rate); if (div < 2) { div = 2; @@ -117,8 +121,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) int div_low = prate / mode_rate; if (div_low >= 2 && - ((prate / div_low - mode_rate) < - 10 * (mode_rate - prate / div))) + (10 * (prate / div_low - mode_rate) < + (mode_rate - prate / div))) /* * At least 10 times better when using a higher * frequency than requested, instead of a lower. @@ -147,6 +151,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO | ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK, cfg); + + clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); } static enum drm_mode_status diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index e567bdfa2ab8..bb1391784caf 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c @@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) { - DRM_ERROR("Cannot request framebuffer\n"); - return -EBUSY; - } + if (pci_request_region(pdev, 0, "bochs-drm") != 0) + DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); bochs->fb_map = ioremap(addr, size); if (bochs->fb_map == NULL) { diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index 3c7cc5af735c..56df07cdab68 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -715,7 +715,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx) /* 1.0V digital core power regulator */ pdata->dvdd10 = devm_regulator_get(dev, "dvdd10"); if (IS_ERR(pdata->dvdd10)) { - DRM_ERROR("DVDD10 regulator not found\n"); + if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER) + DRM_ERROR("DVDD10 regulator not found\n"); + return PTR_ERR(pdata->dvdd10); } @@ -1332,7 +1334,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client, err = anx78xx_init_pdata(anx78xx); if (err) { - DRM_ERROR("Failed to initialize pdata: %d\n", err); + if (err != -EPROBE_DEFER) + DRM_ERROR("Failed to initialize pdata: %d\n", err); + return err; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 521d689413c8..41bf4aaff21c 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -36,6 +36,7 @@ #include "dw-hdmi-cec.h" #include "dw-hdmi.h" +#define DDC_CI_ADDR 0x37 #define DDC_SEGMENT_ADDR 0x30 #define HDMI_EDID_LEN 512 @@ -398,6 +399,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap, u8 addr = msgs[0].addr; int i, ret = 0; + if (addr == DDC_CI_ADDR) + /* + * The internal I2C controller does not support the multi-byte + * read and write operations needed for DDC/CI. + * TOFIX: Blacklist the DDC/CI address until we filter out + * unsupported I2C operations. + */ + return -EOPNOTSUPP; + dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr); for (i = 0; i < num; i++) { @@ -1566,28 +1576,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) frame.colorspace = HDMI_COLORSPACE_RGB; /* Set up colorimetry */ - switch (hdmi->hdmi_data.enc_out_encoding) { - case V4L2_YCBCR_ENC_601: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else + if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { + switch (hdmi->hdmi_data.enc_out_encoding) { + case V4L2_YCBCR_ENC_601: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + case V4L2_YCBCR_ENC_709: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_709; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; + break; + default: /* Carries no data */ frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + } + } else { + frame.colorimetry = HDMI_COLORIMETRY_NONE; frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; - case V4L2_YCBCR_ENC_709: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else - frame.colorimetry = HDMI_COLORIMETRY_ITU_709; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; - break; - default: /* Carries no data */ - frame.colorimetry = HDMI_COLORIMETRY_ITU_601; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } frame.scan_mode = HDMI_SCAN_MODE_NONE; @@ -2023,7 +2039,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step E - Configure audio */ hdmi_clk_regenerator_update_pixel_clock(hdmi); - hdmi_enable_audio_clk(hdmi, true); + hdmi_enable_audio_clk(hdmi, hdmi->audio_enable); } /* not for DVI mode */ diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8a8d605021f0..0454675a44cb 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -294,7 +294,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr, static int tc_aux_wait_busy(struct tc_data *tc) { - return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000); + return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000); } static int tc_aux_write_data(struct tc_data *tc, const void *data, @@ -637,7 +637,7 @@ static int tc_aux_link_setup(struct tc_data *tc) if (ret) goto err; - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000); + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000); if (ret == -ETIMEDOUT) { dev_err(tc->dev, "Timeout waiting for PHY to become ready"); return ret; @@ -861,7 +861,7 @@ static int tc_wait_link_training(struct tc_data *tc) int ret; ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE, - LT_LOOPDONE, 1, 1000); + LT_LOOPDONE, 500, 100000); if (ret) { dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n"); return ret; @@ -934,7 +934,7 @@ static int tc_main_link_enable(struct tc_data *tc) dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST); ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000); + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000); if (ret) { dev_err(dev, "timeout waiting for phy become ready"); return ret; diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index c8922b7cac09..bf1bdb0aac19 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -880,7 +880,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) * depending on the hardware this may require the framebuffer * to be in a specific tiling format. */ - if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 || + if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 && + (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) || !plane->rotation_property) return false; diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c index 8230dac01a89..3a4126dc2520 100644 --- a/drivers/gpu/drm/drm_damage_helper.c +++ b/drivers/gpu/drm/drm_damage_helper.c @@ -212,8 +212,14 @@ retry: drm_for_each_plane(plane, fb->dev) { struct drm_plane_state *plane_state; - if (plane->state->fb != fb) + ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); + if (ret) + goto out; + + if (plane->state->fb != fb) { + drm_modeset_unlock(&plane->mutex); continue; + } plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index be1b7ba92ffe..6a626c82e264 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -140,8 +140,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf, if (IS_ERR(source)) return PTR_ERR(source); - if (source[len] == '\n') - source[len] = '\0'; + if (source[len - 1] == '\n') + source[len - 1] = '\0'; ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); if (ret) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 82add736e17d..c5e9e2305fff 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -339,7 +339,7 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); idx += req->u.i2c_read.transactions[i].num_bytes; - buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); idx++; } @@ -2465,9 +2465,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) drm_dp_mst_topology_put_port(port); } - for (i = 0; i < mgr->max_payloads; i++) { - if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) + for (i = 0; i < mgr->max_payloads; /* do nothing */) { + if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) { + i++; continue; + } DRM_DEBUG_KMS("removing payload %d\n", i); for (j = i; j < mgr->max_payloads - 1; j++) { @@ -2692,6 +2694,7 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw, int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) { int ret = 0; + int i = 0; struct drm_dp_mst_branch *mstb = NULL; mutex_lock(&mgr->lock); @@ -2752,10 +2755,21 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* this can fail if the device is gone */ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; + mutex_lock(&mgr->payload_lock); memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); mgr->payload_mask = 0; set_bit(0, &mgr->payload_mask); + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + + if (vcpi) { + vcpi->vcpi = 0; + vcpi->num_slots = 0; + } + mgr->proposed_vcpis[i] = NULL; + } mgr->vcpi_mask = 0; + mutex_unlock(&mgr->payload_lock); } out_unlock: diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6b0177112e18..3f50b8865db4 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3722,7 +3722,7 @@ cea_db_offsets(const u8 *cea, int *start, int *end) if (*end < 4 || *end > 127) return -ERANGE; } else { - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; @@ -4191,7 +4191,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads) if (cea_revision(cea) < 3) { DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } if (cea_db_offsets(cea, &start, &end)) { @@ -4252,7 +4252,7 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb) if (cea_revision(cea) < 3) { DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } if (cea_db_offsets(cea, &start, &end)) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a7ba5b4902d6..8d193a58363d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1320,7 +1320,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. */ - if (var->bits_per_pixel != fb->format->cpp[0] * 8 || + if (var->bits_per_pixel > fb->format->cpp[0] * 8 || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb requested width/height/bpp can't fit in current fb " @@ -1345,6 +1345,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); } + /* + * Likewise, bits_per_pixel should be rounded up to a supported value. + */ + var->bits_per_pixel = fb->format->cpp[0] * 8; + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index b481cafdde28..825abe38201a 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, } DRM_DEBUG_LEASE("Creating lease\n"); + /* lessee will take the ownership of leases */ lessee = drm_lease_create(lessor, &leases); if (IS_ERR(lessee)) { ret = PTR_ERR(lessee); + idr_destroy(&leases); goto out_leases; } @@ -580,7 +582,6 @@ out_lessee: out_leases: put_unused_fd(fd); - idr_destroy(&leases); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 1961f713aaab..a05e64e3d80b 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -367,9 +367,9 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev) memset(dbidev->tx_buf, 0, len); mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0, - (width >> 8) & 0xFF, (width - 1) & 0xFF); + ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF); mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0, - (height >> 8) & 0xFF, (height - 1) & 0xFF); + ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF); mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, (u8 *)dbidev->tx_buf, len); @@ -955,7 +955,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd, int ret; if (mipi_dbi_command_is_read(dbi, *cmd)) - return -ENOTSUPP; + return -EOPNOTSUPP; MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num); @@ -1187,8 +1187,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file, struct mipi_dbi_dev *dbidev = m->private; u8 val, cmd = 0, parameters[64]; char *buf, *pos, *token; - unsigned int i; - int ret, idx; + int i, ret, idx; if (!drm_dev_enter(&dbidev->drm, &idx)) return -ENODEV; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 88232698d7a0..3fd35e6b9d53 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1672,6 +1672,13 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len, } } + if (!(rotation & DRM_MODE_ROTATE_MASK)) + rotation |= DRM_MODE_ROTATE_0; + + /* Make sure there is exactly one rotation defined */ + if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK)) + return -EINVAL; + mode->rotation_reflection = rotation; return 0; diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 892ce636ef72..6ee04803c362 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -561,7 +561,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, struct drm_property_blob *blob; int ret; - if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) + if (!length || length > INT_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index b8363aaa9032..818738e83d06 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -54,7 +54,12 @@ EXPORT_SYMBOL(drm_rect_intersect); static u32 clip_scaled(u32 src, u32 dst, u32 clip) { - u64 tmp = mul_u32_u32(src, dst - clip); + u64 tmp; + + if (dst == 0) + return 0; + + tmp = mul_u32_u32(src, dst - clip); /* * Round toward 1.0 when clipping so that we don't accidentally diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index fd1fbc77871f..552ec82e9bc5 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1581,7 +1581,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, unsigned int flags, pipe, high_pipe; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) return -EINVAL; @@ -1838,7 +1838,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, return -EOPNOTSUPP; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); if (!crtc) @@ -1896,7 +1896,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, return -EOPNOTSUPP; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); if (!crtc) diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 2d5cbfda3ca7..9c262daf5816 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = { struct decon_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data) decon_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, dev); + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); } static void decon_unbind(struct device *dev, struct device *master, void *data) @@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data) decon_disable(ctx->crtc); /* detach this sub driver from iommu mapping if supported. */ - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); } static const struct component_ops decon_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f0640950bd46..6fd40410dfd2 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -40,6 +40,7 @@ struct decon_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx, decon_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, ctx->dev); + return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); } static void decon_ctx_remove(struct decon_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); } static u32 decon_calc_clkdiv(struct decon_context *ctx, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 9ebc02768847..619f81435c1b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev) * mapping. */ static int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) + struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; int ret; @@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, return ret; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { - if (to_dma_iommu_mapping(subdrv_dev)) + /* + * Keep the original DMA mapping of the sub-device and + * restore it on Exynos DRM detach, otherwise the DMA + * framework considers it as IOMMU-less during the next + * probe (in case of deferred probe or modular build) + */ + *dma_priv = to_dma_iommu_mapping(subdrv_dev); + if (*dma_priv) arm_iommu_detach_device(subdrv_dev); ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); @@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, * mapping */ static void drm_iommu_detach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) + struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; - if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { arm_iommu_detach_device(subdrv_dev); - else if (IS_ENABLED(CONFIG_IOMMU_DMA)) + arm_iommu_attach_device(subdrv_dev, *dma_priv); + } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) iommu_detach_device(priv->mapping, subdrv_dev); clear_dma_max_seg_size(subdrv_dev); } -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, + void **dma_priv) { struct exynos_drm_private *priv = drm->dev_private; @@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) priv->mapping = mapping; } - return drm_iommu_attach_device(drm, dev); + return drm_iommu_attach_device(drm, dev, dma_priv); } -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev) +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, + void **dma_priv) { if (IS_ENABLED(CONFIG_EXYNOS_IOMMU)) - drm_iommu_detach_device(drm, dev); + drm_iommu_detach_device(drm, dev, dma_priv); } void exynos_drm_cleanup_dma(struct drm_device *drm) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index d4014ba592fd..735f436c857c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) return priv->mapping ? true : false; } -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev); -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev); +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, + void **dma_priv); +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, + void **dma_priv); void exynos_drm_cleanup_dma(struct drm_device *drm); #ifdef CONFIG_DRM_EXYNOS_DPI diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 6926cee91b36..8ed94c994800 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1750,8 +1750,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), dsi->supplies); if (ret) { - dev_info(dev, "failed to get regulators: %d\n", ret); - return -EPROBE_DEFER; + if (ret != -EPROBE_DEFER) + dev_info(dev, "failed to get regulators: %d\n", ret); + return ret; } dsi->clks = devm_kcalloc(dev, @@ -1764,9 +1765,10 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->clks[i] = devm_clk_get(dev, clk_names[i]); if (IS_ERR(dsi->clks[i])) { if (strcmp(clk_names[i], "sclk_mipi") == 0) { - strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME); - i--; - continue; + dsi->clks[i] = devm_clk_get(dev, + OLD_SCLK_MIPI_CLK_NAME); + if (!IS_ERR(dsi->clks[i])) + continue; } dev_info(dev, "failed to get the clock: %s\n", diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 8ea2e1d77802..29ab8be8604c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -97,6 +97,7 @@ struct fimc_scaler { struct fimc_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_formats *formats; @@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data) ctx->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &ctx->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(drm_dev, dev); + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); } static const struct component_ops fimc_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 8d0a929104e5..34e6b22173fa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { struct fimd_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) if (is_drm_iommu_supported(drm_dev)) fimd_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, dev); + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); } static void fimd_unbind(struct device *dev, struct device *master, @@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master, fimd_disable(ctx->crtc); - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); if (ctx->encoder) exynos_dpi_remove(ctx->encoder); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2a3382d43bc9..fcee33a43aca 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -232,6 +232,7 @@ struct g2d_runqueue_node { struct g2d_data { struct device *dev; + void *dma_priv; struct clk *gate_clk; void __iomem *regs; int irq; @@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data) return ret; } - ret = exynos_drm_register_dma(drm_dev, dev); + ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv); if (ret < 0) { dev_err(dev, "failed to enable iommu.\n"); g2d_fini_cmdlist(g2d); @@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data) priv->g2d_dev = NULL; cancel_work_sync(&g2d->runqueue_work); - exynos_drm_unregister_dma(g2d->drm_dev, dev); + exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv); } static const struct component_ops g2d_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 7ae087b0504d..45e9aee8366a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -97,6 +97,7 @@ struct gsc_scaler { struct gsc_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_formats *formats; @@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) ctx->drm_dev = drm_dev; ctx->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &ctx->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(drm_dev, dev); + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); } static const struct component_ops gsc_component_ops = { @@ -1313,6 +1314,7 @@ static int gsc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + component_del(dev, &gsc_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index b98482990d1a..dafa87b82052 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -56,6 +56,7 @@ struct rot_variant { struct rot_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; void __iomem *regs; struct clk *clock; @@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data) rot->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE, @@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &rot->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(rot->drm_dev, rot->dev); + exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv); } static const struct component_ops rotator_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 497973e9b2c5..93c43c8d914e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -39,6 +39,7 @@ struct scaler_data { struct scaler_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; void __iomem *regs; struct clk *clock[SCALER_MAX_CLK]; @@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data) scaler->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &scaler->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev); + exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev, + &scaler->dma_priv); } static const struct component_ops scaler_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index bc1565f1822a..0073a2b3b80a 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -852,6 +852,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector, static void hdmi_connector_destroy(struct drm_connector *connector) { + struct hdmi_context *hdata = connector_to_hdmi(connector); + + cec_notifier_conn_unregister(hdata->notifier); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -935,6 +939,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder) { struct hdmi_context *hdata = encoder_to_hdmi(encoder); struct drm_connector *connector = &hdata->connector; + struct cec_connector_info conn_info; int ret; connector->interlace_allowed = true; @@ -957,6 +962,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder) DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n"); } + cec_fill_conn_info_from_drm(&conn_info, connector); + + hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL, + &conn_info); + if (!hdata->notifier) { + ret = -ENOMEM; + DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n"); + } + return ret; } @@ -1528,8 +1542,8 @@ static void hdmi_disable(struct drm_encoder *encoder) */ mutex_unlock(&hdata->mutex); cancel_delayed_work(&hdata->hotplug_work); - cec_notifier_set_phys_addr(hdata->notifier, - CEC_PHYS_ADDR_INVALID); + if (hdata->notifier) + cec_notifier_phys_addr_invalidate(hdata->notifier); return; } @@ -1788,18 +1802,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata) hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); - if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) { + if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) if (IS_ERR(hdata->reg_hdmi_en)) return PTR_ERR(hdata->reg_hdmi_en); - ret = regulator_enable(hdata->reg_hdmi_en); - if (ret) { - DRM_DEV_ERROR(dev, - "failed to enable hdmi-en regulator\n"); - return ret; - } - } - return hdmi_bridge_init(hdata); } @@ -2006,10 +2012,13 @@ static int hdmi_probe(struct platform_device *pdev) } } - hdata->notifier = cec_notifier_get(&pdev->dev); - if (hdata->notifier == NULL) { - ret = -ENOMEM; - goto err_hdmiphy; + if (!IS_ERR(hdata->reg_hdmi_en)) { + ret = regulator_enable(hdata->reg_hdmi_en); + if (ret) { + DRM_DEV_ERROR(dev, + "failed to enable hdmi-en regulator\n"); + goto err_hdmiphy; + } } pm_runtime_enable(dev); @@ -2023,7 +2032,7 @@ static int hdmi_probe(struct platform_device *pdev) ret = hdmi_register_audio_device(hdata); if (ret) - goto err_notifier_put; + goto err_rpm_disable; ret = component_add(&pdev->dev, &hdmi_component_ops); if (ret) @@ -2034,10 +2043,10 @@ static int hdmi_probe(struct platform_device *pdev) err_unregister_audio: platform_device_unregister(hdata->audio.pdev); -err_notifier_put: - cec_notifier_put(hdata->notifier); +err_rpm_disable: pm_runtime_disable(dev); - + if (!IS_ERR(hdata->reg_hdmi_en)) + regulator_disable(hdata->reg_hdmi_en); err_hdmiphy: if (hdata->hdmiphy_port) put_device(&hdata->hdmiphy_port->dev); @@ -2054,12 +2063,10 @@ static int hdmi_remove(struct platform_device *pdev) struct hdmi_context *hdata = platform_get_drvdata(pdev); cancel_delayed_work_sync(&hdata->hotplug_work); - cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); component_del(&pdev->dev, &hdmi_component_ops); platform_device_unregister(hdata->audio.pdev); - cec_notifier_put(hdata->notifier); pm_runtime_disable(&pdev->dev); if (!IS_ERR(hdata->reg_hdmi_en)) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 7b24338fad3c..22f494145411 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -94,6 +94,7 @@ struct mixer_context { struct platform_device *pdev; struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[MIXER_WIN_NR]; unsigned long flags; @@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, } } - return exynos_drm_register_dma(drm_dev, mixer_ctx->dev); + return exynos_drm_register_dma(drm_dev, mixer_ctx->dev, + &mixer_ctx->dma_priv); } static void mixer_ctx_remove(struct mixer_context *mixer_ctx) { - exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev); + exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev, + &mixer_ctx->dma_priv); } static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 218f3bb15276..90237abee088 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -462,6 +462,7 @@ static int psbfb_probe(struct drm_fb_helper *helper, container_of(helper, struct psb_fbdev, psb_fb_helper); struct drm_device *dev = psb_fbdev->psb_fb_helper.dev; struct drm_psb_private *dev_priv = dev->dev_private; + unsigned int fb_size; int bytespp; bytespp = sizes->surface_bpp / 8; @@ -471,8 +472,11 @@ static int psbfb_probe(struct drm_fb_helper *helper, /* If the mode will not fit in 32bit then switch to 16bit to get a console on full resolution. The X mode setting server will allocate its own 32bit GEM framebuffer */ - if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height > - dev_priv->vram_stolen_size) { + fb_size = ALIGN(sizes->surface_width * bytespp, 64) * + sizes->surface_height; + fb_size = ALIGN(fb_size, PAGE_SIZE); + + if (fb_size > dev_priv->vram_stolen_size) { sizes->surface_bpp = 16; sizes->surface_depth = 16; } diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 167c10767dd4..900e5499249d 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit, s32 freq_error, min_error = 100000; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.n = limit->n.min; clock.n <= limit->n.max; @@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit, int err = target; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h index 0da860200410..e2ac09894a6d 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h @@ -83,7 +83,6 @@ #define VSIZE_OFST 20 #define LDI_INT_EN 0x741C #define FRAME_END_INT_EN_OFST 1 -#define UNDERFLOW_INT_EN_OFST 2 #define LDI_CTRL 0x7420 #define BPP_OFST 3 #define DATA_GATE_EN BIT(2) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 73cd28a6ea07..86000127d4ee 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -46,7 +46,6 @@ struct ade_hw_ctx { struct clk *media_noc_clk; struct clk *ade_pix_clk; struct reset_control *reset; - struct work_struct display_reset_wq; bool power_on; int irq; @@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx) */ ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST, FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND); - ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1); } static bool ade_crtc_mode_fixup(struct drm_crtc *crtc, @@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc) MASK(1), 0); } -static void drm_underflow_wq(struct work_struct *work) -{ - struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx, - display_reset_wq); - struct drm_device *drm_dev = ctx->crtc->dev; - struct drm_atomic_state *state; - - state = drm_atomic_helper_suspend(drm_dev); - drm_atomic_helper_resume(drm_dev, state); -} - static irqreturn_t ade_irq_handler(int irq, void *data) { struct ade_hw_ctx *ctx = data; @@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data) MASK(1), 1); drm_crtc_handle_vblank(crtc); } - if (status & BIT(UNDERFLOW_INT_EN_OFST)) { - ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST, - MASK(1), 1); - DRM_ERROR("LDI underflow!"); - schedule_work(&ctx->display_reset_wq); - } return IRQ_HANDLED; } @@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev, if (ret) return ERR_PTR(-EIO); - INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq); ctx->crtc = crtc; return ctx; diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 2a77823b8e9a..e66c38332df4 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev, if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; if (sarea_priv->dirty) @@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in if (u != I810_BUF_CLIENT) DRM_DEBUG("MC found buffer that isn't mine!\n"); - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; sarea_priv->dirty = 0x7f; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 0d21402945ab..3317798945e8 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR help This option enables capturing the GPU state when a hang is detected. This information is vital for triaging hangs and assists in debugging. - Please report any hang to - https://bugs.freedesktop.org/enter_bug.cgi?product=DRI - for triaging. + Please report any hang for triaging according to: + https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs If in doubt, say "Y". diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 00786a142ff0..41c8e39a73ba 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -7,7 +7,6 @@ config DRM_I915_WERROR # We use the dependency on !COMPILE_TEST to not be enabled in # allmodconfig or allyesconfig configurations depends on !COMPILE_TEST - select HEADER_TEST default n help Add -Werror to the build flags for (and only for) i915.ko. diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index af50f05f4e9d..272503615378 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10510,7 +10510,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) u32 base; if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) - base = obj->phys_handle->busaddr; + base = sg_dma_address(obj->mm.pages->sgl); else base = intel_plane_ggtt_offset(plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c002f234ff31..2422af6a99a0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -4205,13 +4205,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv) { - u32 val; + u32 mask, val; - val = MBUS_ABOX_BT_CREDIT_POOL1(16) | - MBUS_ABOX_BT_CREDIT_POOL2(16) | - MBUS_ABOX_B_CREDIT(1) | - MBUS_ABOX_BW_CREDIT(1); + mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | + MBUS_ABOX_BT_CREDIT_POOL2_MASK | + MBUS_ABOX_B_CREDIT_MASK | + MBUS_ABOX_BW_CREDIT_MASK; + val = I915_READ(MBUS_ABOX_CTL); + val &= ~mask; + val |= MBUS_ABOX_BT_CREDIT_POOL1(16) | + MBUS_ABOX_BT_CREDIT_POOL2(16) | + MBUS_ABOX_B_CREDIT(1) | + MBUS_ABOX_BW_CREDIT(1); I915_WRITE(MBUS_ABOX_CTL, val); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 16ed44bfd734..07a038f21619 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1284,7 +1284,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) return 0; /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */ - if (IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return 0; if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 3d4f5775a4ba..25235ef630c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -9,16 +9,16 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" -static __always_inline u32 __busy_read_flag(u8 id) +static __always_inline u32 __busy_read_flag(u16 id) { - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffff0000u; GEM_BUG_ON(id >= 16); return 0x10000u << id; } -static __always_inline u32 __busy_write_id(u8 id) +static __always_inline u32 __busy_write_id(u16 id) { /* * The uABI guarantees an active writer is also amongst the read @@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id) * last_read - hence we always set both read and write busy for * last_write. */ - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffffffffu; return (id + 1) | __busy_read_flag(id); } static __always_inline unsigned int -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) +__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; @@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) return 0; /* Beware type-expansion follies! */ - BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); + BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); return flag(rq->engine->uabi_class); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index e41fd94ae5a9..b3d2b91575cb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -2094,8 +2094,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ext_data.fpriv = file->driver_priv; if (client_is_banned(ext_data.fpriv)) { DRM_DEBUG("client %s[%d] banned from creating ctx\n", - current->comm, - pid_nr(get_task_pid(current, PIDTYPE_PID))); + current->comm, task_pid_nr(current)); return -EIO; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index e635e1e5f4d3..bd4e41380777 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -439,7 +439,8 @@ eb_validate_vma(struct i915_execbuffer *eb, if (unlikely(entry->flags & eb->invalid_flags)) return -EINVAL; - if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) + if (unlikely(entry->alignment && + !is_power_of_2_u64(entry->alignment))) return -EINVAL; /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 646859fea224..08b35587bc6d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -240,9 +240,6 @@ struct drm_i915_gem_object { void *gvt_info; }; - - /** for phys allocated objects */ - struct drm_dma_handle *phys_handle; }; static inline struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 768356908160..0cfe9bd76377 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -21,88 +21,87 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = obj->base.filp->f_mapping; - struct drm_dma_handle *phys; - struct sg_table *st; struct scatterlist *sg; - char *vaddr; + struct sg_table *st; + dma_addr_t dma; + void *vaddr; + void *dst; int i; - int err; if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) return -EINVAL; - /* Always aligning to the object size, allows a single allocation + /* + * Always aligning to the object size, allows a single allocation * to handle all possible callers, and given typical object sizes, * the alignment of the buddy allocation will naturally match. */ - phys = drm_pci_alloc(obj->base.dev, - roundup_pow_of_two(obj->base.size), - roundup_pow_of_two(obj->base.size)); - if (!phys) + vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), + &dma, GFP_KERNEL); + if (!vaddr) return -ENOMEM; - vaddr = phys->vaddr; - for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { - struct page *page; - char *src; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto err_phys; - } - - src = kmap_atomic(page); - memcpy(vaddr, src, PAGE_SIZE); - drm_clflush_virt_range(vaddr, PAGE_SIZE); - kunmap_atomic(src); - - put_page(page); - vaddr += PAGE_SIZE; - } - - intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) { - err = -ENOMEM; - goto err_phys; - } + if (!st) + goto err_pci; - if (sg_alloc_table(st, 1, GFP_KERNEL)) { - kfree(st); - err = -ENOMEM; - goto err_phys; - } + if (sg_alloc_table(st, 1, GFP_KERNEL)) + goto err_st; sg = st->sgl; sg->offset = 0; sg->length = obj->base.size; - sg_dma_address(sg) = phys->busaddr; + sg_assign_page(sg, (struct page *)vaddr); + sg_dma_address(sg) = dma; sg_dma_len(sg) = obj->base.size; - obj->phys_handle = phys; + dst = vaddr; + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + void *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) + goto err_st; + + src = kmap_atomic(page); + memcpy(dst, src, PAGE_SIZE); + drm_clflush_virt_range(dst, PAGE_SIZE); + kunmap_atomic(src); + + put_page(page); + dst += PAGE_SIZE; + } + + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); __i915_gem_object_set_pages(obj, st, sg->length); return 0; -err_phys: - drm_pci_free(obj->base.dev, phys); - - return err; +err_st: + kfree(st); +err_pci: + dma_free_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), + vaddr, dma); + return -ENOMEM; } static void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, struct sg_table *pages) { + dma_addr_t dma = sg_dma_address(pages->sgl); + void *vaddr = sg_page(pages->sgl); + __i915_gem_object_release_shmem(obj, pages, false); if (obj->mm.dirty) { struct address_space *mapping = obj->base.filp->f_mapping; - char *vaddr = obj->phys_handle->vaddr; + void *src = vaddr; int i; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { @@ -114,15 +113,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, continue; dst = kmap_atomic(page); - drm_clflush_virt_range(vaddr, PAGE_SIZE); - memcpy(dst, vaddr, PAGE_SIZE); + drm_clflush_virt_range(src, PAGE_SIZE); + memcpy(dst, src, PAGE_SIZE); kunmap_atomic(dst); set_page_dirty(page); if (obj->mm.madv == I915_MADV_WILLNEED) mark_page_accessed(page); put_page(page); - vaddr += PAGE_SIZE; + + src += PAGE_SIZE; } obj->mm.dirty = false; } @@ -130,7 +130,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, sg_free_table(pages); kfree(pages); - drm_pci_free(obj->base.dev, obj->phys_handle); + dma_free_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), + vaddr, dma); } static void phys_release(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index abfbac49b8e8..968d9b2705d0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -427,7 +427,7 @@ struct get_pages_work { static struct sg_table * __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, - struct page **pvec, int num_pages) + struct page **pvec, unsigned long num_pages) { unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; @@ -473,9 +473,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { struct get_pages_work *work = container_of(_work, typeof(*work), work); struct drm_i915_gem_object *obj = work->obj; - const int npages = obj->base.size >> PAGE_SHIFT; + const unsigned long npages = obj->base.size >> PAGE_SHIFT; + unsigned long pinned; struct page **pvec; - int pinned, ret; + int ret; ret = -ENOMEM; pinned = 0; @@ -578,7 +579,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { - const int num_pages = obj->base.size >> PAGE_SHIFT; + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; struct mm_struct *mm = obj->userptr.mm->mm; struct page **pvec; struct sg_table *pages; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 1d27babff0ce..dc9d3a5ec4a6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -375,7 +375,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) - return PTR_ERR(obj); + return false; err = create_mmap_offset(obj); i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 22aab8593abf..926272b5a0ca 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -250,6 +250,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) return pos & (ring->size - 1); } +static inline int intel_ring_direction(const struct intel_ring *ring, + u32 next, u32 prev) +{ + typecheck(typeof(ring->size), next); + typecheck(typeof(ring->size), prev); + return (next - prev) << ring->wrap; +} + static inline bool intel_ring_offset_valid(const struct intel_ring *ring, unsigned int pos) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 9dd8c299cb2d..c77c9518c58b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -107,6 +107,7 @@ struct intel_ring { u32 space; u32 size; + u32 wrap; u32 effective_size; }; @@ -300,8 +301,8 @@ struct intel_engine_cs { u8 class; u8 instance; - u8 uabi_class; - u8 uabi_instance; + u16 uabi_class; + u16 uabi_instance; u32 context_size; u32 mmio_base; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 06a506c29463..66f6d1a897f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -471,12 +471,6 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) return desc; } -static void unwind_wa_tail(struct i915_request *rq) -{ - rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); - assert_ring_tail_valid(rq->ring, rq->tail); -} - static struct i915_request * __unwind_incomplete_requests(struct intel_engine_cs *engine) { @@ -495,7 +489,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) continue; /* XXX */ __i915_request_unsubmit(rq); - unwind_wa_tail(rq); /* * Push the request back into the queue for later resubmission. @@ -525,7 +518,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) */ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) { - spin_lock(&rq->lock); + spin_lock_nested(&rq->lock, + SINGLE_DEPTH_NESTING); i915_request_cancel_breadcrumb(rq); spin_unlock(&rq->lock); } @@ -649,13 +643,35 @@ execlists_schedule_out(struct i915_request *rq) i915_request_put(rq); } -static u64 execlists_update_context(const struct i915_request *rq) +static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = rq->hw_context; - u64 desc; + u64 desc = ce->lrc_desc; + u32 tail, prev; - ce->lrc_reg_state[CTX_RING_TAIL + 1] = - intel_ring_set_tail(rq->ring, rq->tail); + /* + * WaIdleLiteRestore:bdw,skl + * + * We should never submit the context with the same RING_TAIL twice + * just in case we submit an empty ring, which confuses the HW. + * + * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of + * the normal request to be able to always advance the RING_TAIL on + * subsequent resubmissions (for lite restore). Should that fail us, + * and we try and submit the same tail again, force the context + * reload. + * + * If we need to return to a preempted context, we need to skip the + * lite-restore and force it to reload the RING_TAIL. Otherwise, the + * HW has a tendency to ignore us rewinding the TAIL to the end of + * an earlier request. + */ + tail = intel_ring_set_tail(rq->ring, rq->tail); + prev = ce->lrc_reg_state[CTX_RING_TAIL + 1]; + if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) + desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc_reg_state[CTX_RING_TAIL + 1] = tail; + rq->tail = rq->wa_tail; /* * Make sure the context image is complete before we submit it to HW. @@ -674,7 +690,6 @@ static u64 execlists_update_context(const struct i915_request *rq) */ mb(); - desc = ce->lrc_desc; ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; return desc; @@ -918,6 +933,11 @@ last_active(const struct intel_engine_execlists *execlists) return *last; } +#define for_each_waiter(p__, rq__) \ + list_for_each_entry_lockless(p__, \ + &(rq__)->sched.waiters_list, \ + wait_link) + static void defer_request(struct i915_request *rq, struct list_head * const pl) { LIST_HEAD(list); @@ -935,7 +955,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl) GEM_BUG_ON(i915_request_is_active(rq)); list_move_tail(&rq->sched.link, pl); - list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + for_each_waiter(p, rq) { struct i915_request *w = container_of(p->waiter, typeof(*w), sched); @@ -1101,14 +1121,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ __unwind_incomplete_requests(engine); - /* - * If we need to return to the preempted context, we - * need to skip the lite-restore and force it to - * reload the RING_TAIL. Otherwise, the HW has a - * tendency to ignore us rewinding the TAIL to the - * end of an earlier request. - */ - last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; last = NULL; } else if (need_timeslice(engine, last) && !timer_pending(&engine->execlists.timer)) { @@ -1149,16 +1161,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (!list_is_last(&last->sched.link, &engine->active.requests)) return; - - /* - * WaIdleLiteRestore:bdw,skl - * Apply the wa NOOPs to prevent - * ring:HEAD == rq:TAIL as we resubmit the - * request. See gen8_emit_fini_breadcrumb() for - * where we prepare the padding after the - * end of the request. - */ - last->tail = last->wa_tail; } } @@ -2131,6 +2133,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE, + slm_offset(engine)); + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); /* WaMediaPoolStateCmdInWABB:bxt,glk */ @@ -3715,9 +3725,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.i915 = ctx->i915; ve->base.gt = siblings[0]->gt; ve->base.id = -1; + ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; /* * The decision on whether to submit a request using semaphores diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index bacaa7bb8c9a..eee9fcbe0434 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1312,6 +1312,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) kref_init(&ring->ref); ring->size = size; + ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size); + /* Workaround an erratum on the i830 which causes a hang if * the TAIL pointer points to within the last 2 cachelines * of the buffer. diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index e753b1e706e2..fc29a3705354 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1597,9 +1597,9 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s) if (!(cmd_val(s, 0) & (1 << 22))) return ret; - /* check if QWORD */ - if (DWORD_FIELD(0, 20, 19) == 1) - valid_len += 8; + /* check inline data */ + if (cmd_val(s, 0) & BIT(18)) + valid_len = CMD_LEN(9); ret = gvt_check_valid_cmd_length(cmd_length(s), valid_len); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index e1c313da6c00..a62bdf9be682 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; /* TODO: add more platforms support */ - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv)) { if (connected) { vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 4bfaefdf548d..c0347956f7cf 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -96,12 +96,12 @@ static void dmabuf_gem_object_free(struct kref *kref) dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { + list_del(pos); intel_gvt_hypervisor_put_vfio_device(vgpu); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); kfree(dmabuf_obj); - list_del(pos); break; } } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 4b04af569c05..7dc7bb850d0a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) if (mm->type == INTEL_GVT_MM_PPGTT) { list_del(&mm->ppgtt_mm.list); + + mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); list_del(&mm->ppgtt_mm.lru_list); + mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); + invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index d5a6e4e3d0fd..32e57635709a 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - mutex_lock(&vgpu->vgpu_lock); - WARN(vgpu->active, "vGPU is still active!\n"); + /* + * remove idr first so later clean can judge if need to stop + * service if no active vgpu. + */ + mutex_lock(&gvt->lock); + idr_remove(&gvt->vgpu_idr, vgpu->id); + mutex_unlock(&gvt->lock); + + mutex_lock(&vgpu->vgpu_lock); intel_gvt_debugfs_remove_vgpu(vgpu); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); @@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) mutex_unlock(&vgpu->vgpu_lock); mutex_lock(&gvt->lock); - idr_remove(&gvt->vgpu_idr, vgpu->id); if (idr_is_empty(&gvt->vgpu_idr)) intel_gvt_clean_irq(gvt); intel_gvt_update_vgpu_types(gvt); @@ -560,9 +566,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_mmio(vgpu, dmlr); populate_pvinfo_page(vgpu); - intel_vgpu_reset_display(vgpu); if (dmlr) { + intel_vgpu_reset_display(vgpu); intel_vgpu_reset_cfg_space(vgpu); /* only reset the failsafe mode when dmlr reset */ vgpu->failsafe = false; diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 48e16ad93bbd..51dc8753b527 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -121,7 +121,7 @@ static inline void debug_active_assert(struct i915_active *ref) { } #endif static void -__active_retire(struct i915_active *ref) +__active_retire(struct i915_active *ref, bool lock) { struct active_node *it, *n; struct rb_root root; @@ -138,7 +138,8 @@ __active_retire(struct i915_active *ref) retire = true; } - mutex_unlock(&ref->mutex); + if (likely(lock)) + mutex_unlock(&ref->mutex); if (!retire) return; @@ -153,21 +154,28 @@ __active_retire(struct i915_active *ref) } static void -active_retire(struct i915_active *ref) +active_retire(struct i915_active *ref, bool lock) { GEM_BUG_ON(!atomic_read(&ref->count)); if (atomic_add_unless(&ref->count, -1, 1)) return; /* One active may be flushed from inside the acquire of another */ - mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); - __active_retire(ref); + if (likely(lock)) + mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); + __active_retire(ref, lock); } static void node_retire(struct i915_active_request *base, struct i915_request *rq) { - active_retire(node_from_active(base)->ref); + active_retire(node_from_active(base)->ref, true); +} + +static void +node_retire_nolock(struct i915_active_request *base, struct i915_request *rq) +{ + active_retire(node_from_active(base)->ref, false); } static struct i915_active_request * @@ -364,7 +372,7 @@ int i915_active_acquire(struct i915_active *ref) void i915_active_release(struct i915_active *ref) { debug_active_assert(ref); - active_retire(ref); + active_retire(ref, true); } static void __active_ungrab(struct i915_active *ref) @@ -391,7 +399,7 @@ void i915_active_ungrab(struct i915_active *ref) { GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)); - active_retire(ref); + active_retire(ref, true); __active_ungrab(ref); } @@ -421,12 +429,13 @@ int i915_active_wait(struct i915_active *ref) break; } - err = i915_active_request_retire(&it->base, BKL(ref)); + err = i915_active_request_retire(&it->base, BKL(ref), + node_retire_nolock); if (err) break; } - __active_retire(ref); + __active_retire(ref, true); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index f95058f99057..0ad7ef60d15f 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -309,7 +309,7 @@ i915_active_request_isset(const struct i915_active_request *active) */ static inline int __must_check i915_active_request_retire(struct i915_active_request *active, - struct mutex *mutex) + struct mutex *mutex, i915_active_retire_fn retire) { struct i915_request *request; long ret; @@ -327,7 +327,7 @@ i915_active_request_retire(struct i915_active_request *active, list_del_init(&active->link); RCU_INIT_POINTER(active->request, NULL); - active->retire(active, request); + retire(active, request); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 98305d987ac1..4d561da3dcea 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -136,7 +136,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - void *vaddr = obj->phys_handle->vaddr + args->offset; + void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); /* @@ -802,10 +802,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_gtt_pwrite_fast(obj, args); if (ret == -EFAULT || ret == -ENOSPC) { - if (obj->phys_handle) - ret = i915_gem_phys_pwrite(obj, args, file); - else + if (i915_gem_object_has_struct_page(obj)) ret = i915_gem_shmem_pwrite(obj, args); + else + ret = i915_gem_phys_pwrite(obj, args, file); } i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b1a7a8b9b46a..f614646ed3f9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1178,6 +1178,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { + GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; @@ -1657,6 +1658,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { + GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); iter.dma += I915_GTT_PAGE_SIZE; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index e284bd76fa86..fe9edbba997c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1768,7 +1768,8 @@ void i915_capture_error_state(struct drm_i915_private *i915, if (!xchg(&warned, true) && ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); - pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); + pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); + pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n"); pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n", diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f8ee9aba3955..7b6e68f082f8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4049,7 +4049,13 @@ enum { #define GWUNIT_CLKGATE_DIS (1 << 16) #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) -#define VFUNIT_CLKGATE_DIS (1 << 20) +#define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define HSUNIT_CLKGATE_DIS REG_BIT(8) +#define VSUNIT_CLKGATE_DIS REG_BIT(3) + +#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) +#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) +#define PSDUNIT_CLKGATE_DIS REG_BIT(5) #define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560) #define CGPSF_CLKGATE_DIS (1 << 3) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 1c5506822dc7..0d39038898d4 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -560,19 +560,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } +static void irq_semaphore_cb(struct irq_work *wrk) +{ + struct i915_request *rq = + container_of(wrk, typeof(*rq), semaphore_work); + + i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE); + i915_request_put(rq); +} + static int __i915_sw_fence_call semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { - struct i915_request *request = - container_of(fence, typeof(*request), semaphore); + struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); switch (state) { case FENCE_COMPLETE: - i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); + if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) { + i915_request_get(rq); + init_irq_work(&rq->semaphore_work, irq_semaphore_cb); + irq_work_queue(&rq->semaphore_work); + } break; case FENCE_FREE: - i915_request_put(request); + i915_request_put(rq); break; } @@ -1215,9 +1227,9 @@ void __i915_request_queue(struct i915_request *rq, * decide whether to preempt the entire chain so that it is ready to * run at the earliest possible convenience. */ - i915_sw_fence_commit(&rq->semaphore); if (attr && rq->engine->schedule) rq->engine->schedule(rq, attr); + i915_sw_fence_commit(&rq->semaphore); i915_sw_fence_commit(&rq->submit); } diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index e4dd013761e8..3a3e7bbf19ff 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -26,6 +26,7 @@ #define I915_REQUEST_H #include +#include #include #include "gt/intel_context_types.h" @@ -147,6 +148,7 @@ struct i915_request { }; struct list_head execute_cb; struct i915_sw_fence semaphore; + struct irq_work semaphore_work; /* * A list of everyone we wait upon, and everyone who waits upon us. diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 3eba8a2b39c2..0ef205fe5e29 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -418,8 +418,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, if (!node_signaled(signal)) { INIT_LIST_HEAD(&dep->dfs_link); - list_add(&dep->wait_link, &signal->waiters_list); - list_add(&dep->signal_link, &node->signalers_list); dep->signaler = signal; dep->waiter = node; dep->flags = flags; @@ -429,6 +427,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, !node_started(signal)) node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; + /* All set, now publish. Beware the lockless walkers. */ + list_add(&dep->signal_link, &node->signalers_list); + list_add_rcu(&dep->wait_link, &signal->waiters_list); + /* * As we do not allow WAIT to preempt inflight requests, * once we have executed a request, along with triggering diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 16acdf7bdbe6..17cfeef35a24 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -8,9 +8,8 @@ #include "i915_drv.h" #include "i915_utils.h" -#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" -#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ - "providing the dmesg log by booting with drm.debug=0xf" +#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" +#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details." void __i915_printk(struct drm_i915_private *dev_priv, const char *level, diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 562f756da421..5b1622a40baa 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -233,6 +233,11 @@ static inline u64 ptr_to_u64(const void *ptr) __idx; \ }) +static inline bool is_power_of_2_u64(u64 n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + static inline void __list_del_many(struct list_head *head, struct list_head *first) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2efe1d12d5a9..3ccfc025fde2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -9194,6 +9194,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) /* WaEnable32PlaneMode:icl */ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); + + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, + 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, + 0, PSDUNIT_CLKGATE_DIS); } static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 8e1ff9c105b6..22335cee2203 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -25,6 +25,7 @@ #ifndef __I915_SELFTESTS_RANDOM_H__ #define __I915_SELFTESTS_RANDOM_H__ +#include #include #include "../i915_selftest.h" diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 2e2ed653e9c6..f156f245fdec 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -371,14 +371,18 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, struct ingenic_drm *priv = drm_plane_get_priv(plane); struct drm_plane_state *state = plane->state; unsigned int width, height, cpp; + dma_addr_t addr; - width = state->crtc->state->adjusted_mode.hdisplay; - height = state->crtc->state->adjusted_mode.vdisplay; - cpp = state->fb->format->cpp[plane->index]; + if (state && state->fb) { + addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); + width = state->crtc->state->adjusted_mode.hdisplay; + height = state->crtc->state->adjusted_mode.vdisplay; + cpp = state->fb->format->cpp[plane->index]; - priv->dma_hwdesc->addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); - priv->dma_hwdesc->cmd = width * height * cpp / 4; - priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ; + priv->dma_hwdesc->addr = addr; + priv->dma_hwdesc->cmd = width * height * cpp / 4; + priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ; + } } static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 9a09eba53182..5649887d2b90 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev) } if (!match) { dev_err(dev, "no matching components\n"); - return -ENODEV; + ret = -ENODEV; + goto clk_disable; } if (IS_ERR(match)) { dev_err(dev, "could not create component match\n"); diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index f9c9e32b299c..35bb825d1918 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -935,11 +935,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, for_each_available_child_of_node(dev->of_node, child) { panel = of_drm_find_panel(child); if (IS_ERR(panel)) { - dev_err(dev, "failed to find panel try bridge (%lu)\n", + dev_err(dev, "failed to find panel try bridge (%ld)\n", PTR_ERR(panel)); + panel = NULL; + bridge = of_drm_find_bridge(child); if (IS_ERR(bridge)) { - dev_err(dev, "failed to find bridge (%lu)\n", + dev_err(dev, "failed to find bridge (%ld)\n", PTR_ERR(bridge)); return PTR_ERR(bridge); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 34a731755791..f9455f2724d2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -298,6 +298,7 @@ err_pm_runtime_put: static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; int i; DRM_DEBUG_DRIVER("%s\n", __func__); @@ -319,6 +320,13 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) mtk_disp_mutex_unprepare(mtk_crtc->mutex); pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); + } } static void mtk_crtc_ddp_config(struct drm_crtc *crtc) @@ -488,10 +496,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { static int mtk_drm_crtc_init(struct drm_device *drm, struct mtk_drm_crtc *mtk_crtc, - struct drm_plane *primary, - struct drm_plane *cursor, unsigned int pipe) + unsigned int pipe) { - int ret; + struct drm_plane *primary = NULL; + struct drm_plane *cursor = NULL; + int i, ret; + + for (i = 0; i < mtk_crtc->layer_nr; i++) { + if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY) + primary = &mtk_crtc->planes[i]; + else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR) + cursor = &mtk_crtc->planes[i]; + } ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor, &mtk_crtc_funcs, NULL); @@ -529,6 +545,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, int pipe = priv->num_pipes; int ret; int i; + uint gamma_lut_size = 0; if (!path) return 0; @@ -579,6 +596,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } mtk_crtc->ddp_comp[i] = comp; + + if (comp->funcs && comp->funcs->gamma_set) + gamma_lut_size = MTK_LUT_SIZE; } mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); @@ -596,13 +616,13 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, return ret; } - ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], - mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : - NULL, pipe); + ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe); if (ret < 0) return ret; - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); + + if (gamma_lut_size) + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, gamma_lut_size); priv->num_pipes++; return 0; diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index ac491a781952..f690793ae2d5 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -638,13 +638,18 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv, if (frac >= HDMI_FRAC_MAX_GXBB) return false; } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || - meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) || - meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { /* Empiric supported min/max dividers */ if (m < 106 || m > 247) return false; if (frac >= HDMI_FRAC_MAX_GXL) return false; + } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + /* Empiric supported min/max dividers */ + if (m < 106 || m > 247) + return false; + if (frac >= HDMI_FRAC_MAX_G12A) + return false; } return true; diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index 9ab27aecfcf3..1bd6b6d15ffb 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -64,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { }, }; +static const struct meson_cvbs_mode * +meson_cvbs_get_mode(const struct drm_display_mode *req_mode) +{ + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + if (drm_mode_match(req_mode, &meson_mode->mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return meson_mode; + } + + return NULL; +} + /* Connector */ static void meson_cvbs_connector_destroy(struct drm_connector *connector) @@ -136,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode)) - return 0; - } + if (meson_cvbs_get_mode(&crtc_state->mode)) + return 0; return -EINVAL; } @@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode); struct meson_venc_cvbs *meson_venc_cvbs = encoder_to_meson_venc_cvbs(encoder); struct meson_drm *priv = meson_venc_cvbs->priv; - int i; - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + if (meson_mode) { + meson_venci_cvbs_mode_set(priv, meson_mode->enci); - if (drm_mode_equal(mode, &meson_mode->mode)) { - meson_venci_cvbs_mode_set(priv, - meson_mode->enci); - - /* Setup 27MHz vclk2 for ENCI and VDAC */ - meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - MESON_VCLK_CVBS, true); - break; - } + /* Setup 27MHz vclk2 for ENCI and VDAC */ + meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, true); } } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index afd9119b6cf1..c96c4393b124 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -30,7 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400); static struct drm_driver driver; static const struct pci_device_id pciidlist[] = { - { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A }, + { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD}, { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B }, { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV }, { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB }, @@ -63,6 +64,35 @@ static const struct file_operations mgag200_driver_fops = { DRM_VRAM_MM_FILE_OPERATIONS }; +static bool mgag200_pin_bo_at_0(const struct mga_device *mdev) +{ + return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD; +} + +int mgag200_driver_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct mga_device *mdev = dev->dev_private; + unsigned long pg_align; + + if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) + return -EINVAL; + + pg_align = 0ul; + + /* + * Aligning scanout buffers to the size of the video ram forces + * placement at offset 0. Works around a bug where HW does not + * respect 'startadd' field. + */ + if (mgag200_pin_bo_at_0(mdev)) + pg_align = PFN_UP(mdev->mc.vram_size); + + return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, + pg_align, false, args); +} + static struct drm_driver driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, .load = mgag200_driver_load, @@ -74,7 +104,9 @@ static struct drm_driver driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - DRM_GEM_VRAM_DRIVER + .dumb_create = mgag200_driver_dumb_create, + .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, + .gem_prime_mmap = drm_gem_prime_mmap, }; static struct pci_driver mgag200_pci_driver = { diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 1c93f8dc08c7..7cc1a242df5f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -159,6 +159,12 @@ enum mga_type { G200_EW3, }; +/* HW does not handle 'startadd' field correct. */ +#define MGAG200_FLAG_HW_BUG_NO_STARTADD (1ul << 8) + +#define MGAG200_TYPE_MASK (0x000000ff) +#define MGAG200_FLAG_MASK (0x00ffff00) + #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B) struct mga_device { @@ -188,6 +194,18 @@ struct mga_device { u32 unique_rev_id; }; +static inline enum mga_type +mgag200_type_from_driver_data(kernel_ulong_t driver_data) +{ + return (enum mga_type)(driver_data & MGAG200_TYPE_MASK); +} + +static inline unsigned long +mgag200_flags_from_driver_data(kernel_ulong_t driver_data) +{ + return driver_data & MGAG200_FLAG_MASK; +} + /* mgag200_mode.c */ int mgag200_modeset_init(struct mga_device *mdev); void mgag200_modeset_fini(struct mga_device *mdev); diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index a9773334dedf..388212b2d63f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -94,7 +94,8 @@ static int mgag200_device_init(struct drm_device *dev, struct mga_device *mdev = dev->dev_private; int ret, option; - mdev->type = flags; + mdev->flags = mgag200_flags_from_driver_data(flags); + mdev->type = mgag200_type_from_driver_data(flags); /* Hardcode the number of CRTCs to 1 */ mdev->num_crtc = 1; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e9c55d1d6c04..99cd6e62a971 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -726,11 +726,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu->funcs->flush(gpu, gpu->rb[0]); if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; - } else { - /* Print a warning so if we die, we know why */ + } else if (ret == -ENODEV) { + /* + * This device does not use zap shader (but print a warning + * just in case someone got their dt wrong.. hopefully they + * have a debug UART to realize the error of their ways... + * if you mess this up you are about to crash horribly) + */ dev_warn_once(gpu->dev->dev, "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); + } else { + return ret; } /* Last step - yield the ringbuffer */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index dc8ec2c94301..686c34d706b0 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -537,12 +537,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu) a6xx_flush(gpu, gpu->rb[0]); if (!a6xx_idle(gpu, gpu->rb[0])) return -EINVAL; - } else { - /* Print a warning so if we die, we know why */ + } else if (ret == -ENODEV) { + /* + * This device does not use zap shader (but print a warning + * just in case someone got their dt wrong.. hopefully they + * have a debug UART to realize the error of their ways... + * if you mess this up you are about to crash horribly) + */ dev_warn_once(gpu->dev->dev, "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); ret = 0; + } else { + return ret; } out: diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index e686331fa089..691c1a277d91 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, cxdbg = ioremap(res->start, resource_size(res)); if (cxdbg) { - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT, A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM, A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0); } a6xx_state->debugbus = state_kcalloc(a6xx_state, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 24ab6249083a..6f420cc73dbd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = { INTERLEAVED_RGB_FMT(RGB565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(BGR565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c index 772f0753ed38..aaf2f26f8505 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c @@ -121,7 +121,7 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder) if (mdp4_dsi_encoder->enabled) return; - mdp4_crtc_set_config(encoder->crtc, + mdp4_crtc_set_config(encoder->crtc, MDP4_DMA_CONFIG_PACK_ALIGN_MSB | MDP4_DMA_CONFIG_DEFLKR_EN | MDP4_DMA_CONFIG_DITHER_EN | diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index eb0b4b7dc7cc..03c6d6157e4d 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1112,8 +1112,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, msecs_to_jiffies(50)); if (ret == 0) - dev_warn(dev->dev, "pp done time out, lm=%d\n", - mdp5_cstate->pipeline.mixer->lm); + dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", + mdp5_cstate->pipeline.mixer->lm); } static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 271aa7bbca92..73127948f54d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) return num; } -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int id = dsi_mgr_connector_get_id(connector); @@ -479,6 +479,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; + struct msm_dsi_pll *src_pll; bool is_dual_dsi = IS_DUAL_DSI(); int ret; @@ -519,6 +520,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) id, ret); } + /* Save PLL status if it is a clock source */ + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); + msm_dsi_pll_save_state(src_pll); + ret = msm_dsi_host_power_off(host); if (ret) pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 3522863a4984..21519229fe73 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) if (!phy || !phy->cfg->ops.disable) return; - /* Save PLL status if it is a clock source */ - if (phy->usecase != MSM_DSI_PHY_SLAVE) - msm_dsi_pll_save_state(phy->pll); - phy->cfg->ops.disable(phy); dsi_phy_regulator_disable(phy); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 8f6100db90ed..aa9385d5bfff 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) if (pll_10nm->slave) dsi_pll_enable_pll_bias(pll_10nm->slave); + rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); + if (rc) { + pr_err("vco_set_rate failed, rc=%d\n", rc); + return rc; + } + /* Start PLL */ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x01); diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 6be879578140..1c74381a4fc9 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file) struct msm_gpu_show_priv *show_priv = m->private; struct msm_drm_private *priv = show_priv->dev->dev_private; struct msm_gpu *gpu = priv->gpu; - int ret; - - ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex); - if (ret) - return ret; + mutex_lock(&show_priv->dev->struct_mutex); gpu->funcs->gpu_state_put(show_priv->state); mutex_unlock(&show_priv->dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c84f0a8b3f2c..b73fbb65e14b 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto err_msm_uninit; + if (!dev->dma_parms) { + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), + GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + } + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + msm_gem_shrinker_init(ddev); switch (get_mdp_ver(pdev)) { diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index a052364a5d74..edd45f434ccd 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -16,6 +16,7 @@ #include #include #include +#include /* * Power Management: diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h index 43df86c38f58..24f7700768da 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/atom.h +++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h @@ -114,6 +114,7 @@ struct nv50_head_atom { u8 nhsync:1; u8 nvsync:1; u8 depth:4; + u8 bpc; } or; /* Currently only used for MST */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b46be8a091e9..d735ea7e2d88 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder, * same size as the native one (e.g. different * refresh rate) */ - if (adjusted_mode->hdisplay == native_mode->hdisplay && - adjusted_mode->vdisplay == native_mode->vdisplay && - adjusted_mode->type & DRM_MODE_TYPE_DRIVER) + if (mode->hdisplay == native_mode->hdisplay && + mode->vdisplay == native_mode->vdisplay && + mode->type & DRM_MODE_TYPE_DRIVER) break; mode = native_mode; asyc->scaler.full = true; @@ -353,10 +353,20 @@ nv50_outp_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct nouveau_connector *nv_connector = - nouveau_connector(conn_state->connector); - return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, - nv_connector->native_mode); + struct drm_connector *connector = conn_state->connector; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); + int ret; + + ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, + nv_connector->native_mode); + if (ret) + return ret; + + if (crtc_state->mode_changed || crtc_state->connectors_changed) + asyh->or.bpc = connector->display_info.bpc; + + return 0; } /****************************************************************************** @@ -770,32 +780,54 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, struct nv50_mstm *mstm = mstc->mstm; struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); int slots; + int ret; + + ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, + mstc->native); + if (ret) + return ret; + + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) + return 0; + + /* + * When restoring duplicated states, we need to make sure that the bw + * remains the same and avoid recalculating it, as the connector's bpc + * may have changed after the state was duplicated + */ + if (!state->duplicated) { + const int clock = crtc_state->adjusted_mode.clock; - if (crtc_state->mode_changed || crtc_state->connectors_changed) { /* - * When restoring duplicated states, we need to make sure that - * the bw remains the same and avoid recalculating it, as the - * connector's bpc may have changed after the state was - * duplicated + * XXX: Since we don't use HDR in userspace quite yet, limit + * the bpc to 8 to save bandwidth on the topology. In the + * future, we'll want to properly fix this by dynamically + * selecting the highest possible bpc that would fit in the + * topology */ - if (!state->duplicated) { - const int bpp = connector->display_info.bpc * 3; - const int clock = crtc_state->adjusted_mode.clock; - - asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp); - } - - slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, - mstc->port, - asyh->dp.pbn); - if (slots < 0) - return slots; - - asyh->dp.tu = slots; + asyh->or.bpc = min(connector->display_info.bpc, 8U); + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3); } - return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, - mstc->native); + slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, + asyh->dp.pbn); + if (slots < 0) + return slots; + + asyh->dp.tu = slots; + + return 0; +} + +static u8 +nv50_dp_bpc_to_depth(unsigned int bpc) +{ + switch (bpc) { + case 6: return 0x2; + case 8: return 0x5; + case 10: /* fall-through */ + default: return 0x6; + } } static void @@ -808,7 +840,7 @@ nv50_msto_enable(struct drm_encoder *encoder) struct nv50_mstm *mstm = NULL; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - u8 proto, depth; + u8 proto; bool r; drm_connector_list_iter_begin(encoder->dev, &conn_iter); @@ -837,14 +869,8 @@ nv50_msto_enable(struct drm_encoder *encoder) else proto = 0x9; - switch (mstc->connector.display_info.bpc) { - case 6: depth = 0x2; break; - case 8: depth = 0x5; break; - case 10: - default: depth = 0x6; break; - } - - mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth); + mstm->outp->update(mstm->outp, head->base.index, armh, proto, + nv50_dp_bpc_to_depth(armh->or.bpc)); msto->head = head; msto->mstc = mstc; @@ -1498,20 +1524,14 @@ nv50_sor_enable(struct drm_encoder *encoder) lvds.lvds.script |= 0x0200; } - if (nv_connector->base.display_info.bpc == 8) + if (asyh->or.bpc == 8) lvds.lvds.script |= 0x0200; } nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds)); break; case DCB_OUTPUT_DP: - if (nv_connector->base.display_info.bpc == 6) - depth = 0x2; - else - if (nv_connector->base.display_info.bpc == 8) - depth = 0x5; - else - depth = 0x6; + depth = nv50_dp_bpc_to_depth(asyh->or.bpc); if (nv_encoder->link & 1) proto = 0x8; @@ -1662,7 +1682,7 @@ nv50_pior_enable(struct drm_encoder *encoder) nv50_outp_acquire(nv_encoder); nv_connector = nouveau_encoder_connector_get(nv_encoder); - switch (nv_connector->base.display_info.bpc) { + switch (asyh->or.bpc) { case 10: asyh->or.depth = 0x6; break; case 8: asyh->or.depth = 0x5; break; case 6: asyh->or.depth = 0x2; break; diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 71c23bf1fe25..c9692df2b76c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh, struct nv50_head_atom *asyh, struct nouveau_conn_atom *asyc) { - struct drm_connector *connector = asyc->state.connector; u32 mode = 0x00; if (asyc->dither.mode == DITHERING_MODE_AUTO) { - if (asyh->base.depth > connector->display_info.bpc * 3) + if (asyh->base.depth > asyh->or.bpc * 3) mode = DITHERING_MODE_DYNAMIC2X2; } else { mode = asyc->dither.mode; } if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { - if (connector->display_info.bpc >= 8) + if (asyh->or.bpc >= 8) mode |= DITHERING_DEPTH_8BPC; } else { mode |= asyc->dither.depth; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 5193b6257061..b856e87574fd 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -451,6 +451,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) asyw->clr.ntfy = armw->ntfy.handle != 0; asyw->clr.sema = armw->sema.handle != 0; asyw->clr.xlut = armw->xlut.handle != 0; + if (asyw->clr.xlut && asyw->visible) + asyw->set.xlut = asyw->xlut.handle != 0; asyw->clr.csc = armw->csc.valid; if (wndw->func->image_clr) asyw->clr.image = armw->image.handle[0] != 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 94dfa2e5a9ab..eb31c5b6c8e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector) void nouveau_conn_reset(struct drm_connector *connector) { + struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_conn_atom *asyc; - if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) - return; + if (drm_drv_uses_atomic_modeset(connector->dev)) { + if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) + return; + + if (connector->state) + nouveau_conn_atomic_destroy_state(connector, + connector->state); + + __drm_atomic_helper_connector_reset(connector, &asyc->state); + } else { + asyc = &nv_connector->properties_state; + } - if (connector->state) - nouveau_conn_atomic_destroy_state(connector, connector->state); - __drm_atomic_helper_connector_reset(connector, &asyc->state); asyc->dither.mode = DITHERING_MODE_AUTO; asyc->dither.depth = DITHERING_DEPTH_AUTO; asyc->scaler.mode = DRM_MODE_SCALE_NONE; @@ -276,8 +284,14 @@ void nouveau_conn_attach_properties(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state); struct nouveau_display *disp = nouveau_display(dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_conn_atom *armc; + + if (drm_drv_uses_atomic_modeset(connector->dev)) + armc = nouveau_conn_atom(connector->state); + else + armc = &nv_connector->properties_state; /* Init DVI-I specific properties. */ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) @@ -749,9 +763,9 @@ static int nouveau_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { - struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state); struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct nouveau_conn_atom *asyc = &nv_connector->properties_state; struct drm_encoder *encoder = to_drm_encoder(nv_encoder); int ret; @@ -1131,6 +1145,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) const char *name = connector->name; struct nouveau_encoder *nv_encoder; int ret; + bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); + + if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { + NV_DEBUG(drm, "service %s\n", name); + drm_dp_cec_irq(&nv_connector->aux); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) + nv50_mstm_service(nv_encoder->dp.mstm); + + return NVIF_NOTIFY_KEEP; + } ret = pm_runtime_get(drm->dev->dev); if (ret == 0) { @@ -1151,25 +1175,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) return NVIF_NOTIFY_DROP; } - if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { - NV_DEBUG(drm, "service %s\n", name); - drm_dp_cec_irq(&nv_connector->aux); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) - nv50_mstm_service(nv_encoder->dp.mstm); - } else { - bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); - + if (!plugged) + drm_dp_cec_unset_edid(&nv_connector->aux); + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { if (!plugged) - drm_dp_cec_unset_edid(&nv_connector->aux); - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { - if (!plugged) - nv50_mstm_remove(nv_encoder->dp.mstm); - } - - drm_helper_hpd_irq_event(connector->dev); + nv50_mstm_remove(nv_encoder->dp.mstm); } + drm_helper_hpd_irq_event(connector->dev); + pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index f43a8d63aef8..de84fb4708c7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -29,6 +29,7 @@ #include +#include #include #include #include @@ -44,6 +45,60 @@ struct dcb_output; struct nouveau_backlight; #endif +#define nouveau_conn_atom(p) \ + container_of((p), struct nouveau_conn_atom, state) + +struct nouveau_conn_atom { + struct drm_connector_state state; + + struct { + /* The enum values specifically defined here match nv50/gf119 + * hw values, and the code relies on this. + */ + enum { + DITHERING_MODE_OFF = 0x00, + DITHERING_MODE_ON = 0x01, + DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, + DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, + DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, + DITHERING_MODE_AUTO + } mode; + enum { + DITHERING_DEPTH_6BPC = 0x00, + DITHERING_DEPTH_8BPC = 0x02, + DITHERING_DEPTH_AUTO + } depth; + } dither; + + struct { + int mode; /* DRM_MODE_SCALE_* */ + struct { + enum { + UNDERSCAN_OFF, + UNDERSCAN_ON, + UNDERSCAN_AUTO, + } mode; + u32 hborder; + u32 vborder; + } underscan; + bool full; + } scaler; + + struct { + int color_vibrance; + int vibrant_hue; + } procamp; + + union { + struct { + bool dither:1; + bool scaler:1; + bool procamp:1; + }; + u8 mask; + } set; +}; + struct nouveau_connector { struct drm_connector base; enum dcb_connector_type type; @@ -63,6 +118,12 @@ struct nouveau_connector { #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT struct nouveau_backlight *backlight; #endif + /* + * Our connector property code expects a nouveau_conn_atom struct + * even on pre-nv50 where we do not support atomic. This embedded + * version gets used in the non atomic modeset case. + */ + struct nouveau_conn_atom properties_state; }; static inline struct nouveau_connector *nouveau_connector( @@ -121,61 +182,6 @@ extern int nouveau_ignorelid; extern int nouveau_duallink; extern int nouveau_hdmimhz; -#include -#define nouveau_conn_atom(p) \ - container_of((p), struct nouveau_conn_atom, state) - -struct nouveau_conn_atom { - struct drm_connector_state state; - - struct { - /* The enum values specifically defined here match nv50/gf119 - * hw values, and the code relies on this. - */ - enum { - DITHERING_MODE_OFF = 0x00, - DITHERING_MODE_ON = 0x01, - DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, - DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, - DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, - DITHERING_MODE_AUTO - } mode; - enum { - DITHERING_DEPTH_6BPC = 0x00, - DITHERING_DEPTH_8BPC = 0x02, - DITHERING_DEPTH_AUTO - } depth; - } dither; - - struct { - int mode; /* DRM_MODE_SCALE_* */ - struct { - enum { - UNDERSCAN_OFF, - UNDERSCAN_ON, - UNDERSCAN_AUTO, - } mode; - u32 hborder; - u32 vborder; - } underscan; - bool full; - } scaler; - - struct { - int color_vibrance; - int vibrant_hue; - } procamp; - - union { - struct { - bool dither:1; - bool scaler:1; - bool procamp:1; - }; - u8 mask; - } set; -}; - void nouveau_conn_attach_properties(struct drm_connector *); void nouveau_conn_reset(struct drm_connector *); struct drm_connector_state * diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index fa1439941596..0ad5d87b5a8e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, unsigned long c, i; int ret = -ENOMEM; - args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL); + args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL); if (!args.src) goto out; - args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL); + args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL); if (!args.dst) goto out_free_src; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 9118df035b28..70bb6bb97af8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -156,7 +156,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) fence = list_entry(fctx->pending.next, typeof(*fence), head); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); - if (nouveau_fence_update(fence->channel, fctx)) + if (nouveau_fence_update(chan, fctx)) ret = NVIF_NOTIFY_DROP; } spin_unlock_irqrestore(&fctx->lock, flags); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index f0daf958e03a..621d28f094bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - struct nouveau_mem *mem; int ret; if (drm->client.device.info.ram_size == 0) return -ENOMEM; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); if (ret) return ret; @@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - struct nouveau_mem *mem; int ret; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c index e85a08ecd9da..4cc186262d34 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c @@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, } refcount_set(&tags->refcount, 1); + *ptags = memory->tags = tags; mutex_unlock(&fb->subdev.mutex); - *ptags = tags; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c index bcf32d92ee5a..50e3539f33d2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c @@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug) if (debug > subdev->debug) return; + if (!mthd) + return; for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) { u32 base = chan->head * mthd->addr; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c index 500cb08dd608..b57ab5cea9a1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c @@ -143,23 +143,24 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, nent = (fuc.size / sizeof(struct gk20a_fw_av)); - pack = vzalloc((sizeof(*pack) * max_classes) + - (sizeof(*init) * (nent + 1))); + pack = vzalloc((sizeof(*pack) * (max_classes + 1)) + + (sizeof(*init) * (nent + max_classes + 1))); if (!pack) { ret = -ENOMEM; goto end; } - init = (void *)(pack + max_classes); + init = (void *)(pack + max_classes + 1); - for (i = 0; i < nent; i++) { - struct gf100_gr_init *ent = &init[i]; + for (i = 0; i < nent; i++, init++) { struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; u32 class = av->addr & 0xffff; u32 addr = (av->addr & 0xffff0000) >> 14; if (prevclass != class) { - pack[classidx].init = ent; + if (prevclass) /* Add terminator to the method list. */ + init++; + pack[classidx].init = init; pack[classidx].type = class; prevclass = class; if (++classidx >= max_classes) { @@ -169,10 +170,10 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, } } - ent->addr = addr; - ent->data = av->data; - ent->count = 1; - ent->pitch = 1; + init->addr = addr; + init->data = av->data; + init->count = 1; + init->pitch = 1; } *ppack = pack; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c index ca251560d3e0..bb4a4266897c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c @@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev) struct nvkm_fault *fault = nvkm_fault(subdev); int i; + nvkm_notify_fini(&fault->nrpfb); nvkm_event_fini(&fault->event); for (i = 0; i < fault->buffer_nr; i++) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c index df8b919dcf09..ace6fefba428 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c @@ -108,6 +108,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index, struct gm200_secboot *gsb; struct nvkm_acr *acr; + *psb = NULL; acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | BIT(NVKM_SECBOOT_FALCON_PMU)); if (IS_ERR(acr)) @@ -116,10 +117,8 @@ gm20b_secboot_new(struct nvkm_device *device, int index, acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU); gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; + if (!gsb) return -ENOMEM; - } *psb = &gsb->base; ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base); diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index ad47cc95459e..bf5fcc3e5379 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -197,7 +197,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) static int panel_lvds_probe(struct platform_device *pdev) { struct panel_lvds *lvds; - struct device_node *np; int ret; lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL); @@ -243,14 +242,9 @@ static int panel_lvds_probe(struct platform_device *pdev) return ret; } - np = of_parse_phandle(lvds->dev->of_node, "backlight", 0); - if (np) { - lvds->backlight = of_find_backlight_by_node(np); - of_node_put(np); - - if (!lvds->backlight) - return -EPROBE_DEFER; - } + lvds->backlight = devm_of_find_backlight(lvds->dev); + if (IS_ERR(lvds->backlight)) + return PTR_ERR(lvds->backlight); /* * TODO: Handle all power supplies specified in the DT node in a generic @@ -266,14 +260,10 @@ static int panel_lvds_probe(struct platform_device *pdev) ret = drm_panel_add(&lvds->panel); if (ret < 0) - goto error; + return ret; dev_set_drvdata(lvds->dev, lvds); return 0; - -error: - put_device(&lvds->backlight->dev); - return ret; } static int panel_lvds_remove(struct platform_device *pdev) @@ -284,9 +274,6 @@ static int panel_lvds_remove(struct platform_device *pdev) panel_lvds_disable(&lvds->panel); - if (lvds->backlight) - put_device(&lvds->backlight->dev); - return 0; } diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index b5b14aa059ea..2aa89eaecf6f 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -426,6 +426,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, return PTR_ERR(ts->dsi); } + drm_panel_init(&ts->base); ts->base.dev = dev; ts->base.funcs = &rpi_touchscreen_funcs; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 28fa6ba7b767..8abb31f83ffc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2048,6 +2048,40 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = { .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; +static const struct drm_display_mode logicpd_type_28_mode = { + .clock = 9000, + .hdisplay = 480, + .hsync_start = 480 + 3, + .hsync_end = 480 + 3 + 42, + .htotal = 480 + 3 + 42 + 2, + + .vdisplay = 272, + .vsync_start = 272 + 2, + .vsync_end = 272 + 2 + 11, + .vtotal = 272 + 2 + 11 + 3, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, +}; + +static const struct panel_desc logicpd_type_28 = { + .modes = &logicpd_type_28_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 105, + .height = 67, + }, + .delay = { + .prepare = 200, + .enable = 200, + .unprepare = 200, + .disable = 200, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | + DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE, +}; + static const struct panel_desc mitsubishi_aa070mc01 = { .modes = &mitsubishi_aa070mc01_mode, .num_modes = 1, @@ -3264,6 +3298,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "lg,lp129qe", .data = &lg_lp129qe, + }, { + .compatible = "logicpd,type28", + .data = &logicpd_type_28, }, { .compatible = "mitsubishi,aa070mc01-ca1", .data = &mitsubishi_aa070mc01, diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 5e3e92ea9ea6..3b2612ae931e 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -381,6 +381,7 @@ static int st7789v_probe(struct spi_device *spi) spi_set_drvdata(spi, ctx); ctx->spi = spi; + drm_panel_init(&ctx->panel); ctx->panel.dev = &spi->dev; ctx->panel.funcs = &st7789v_drm_funcs; diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 12ff77dacc95..c1eb8cfe6aeb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -53,8 +53,10 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, if (err) { dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, err); - regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt, - pfdev->devfreq.cur_volt); + if (pfdev->regulator) + regulator_set_voltage(pfdev->regulator, + pfdev->devfreq.cur_volt, + pfdev->devfreq.cur_volt); return err; } diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index f21bc8a7ee3a..f57dd195dfb8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) { + struct panfrost_file_priv *priv = file->driver_priv; struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; + struct panfrost_gem_mapping *mapping; if (!args->size || args->pad || (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) @@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); - args->offset = bo->node.start << PAGE_SHIFT; + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + drm_gem_object_put_unlocked(&bo->base.base); + return -EINVAL; + } + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } @@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev, struct drm_panfrost_submit *args, struct panfrost_job *job) { + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo; + unsigned int i; + int ret; + job->bo_count = args->bo_handle_count; if (!job->bo_count) @@ -130,9 +144,33 @@ panfrost_lookup_bos(struct drm_device *dev, if (!job->implicit_fences) return -ENOMEM; - return drm_gem_objects_lookup(file_priv, - (void __user *)(uintptr_t)args->bo_handles, - job->bo_count, &job->bos); + ret = drm_gem_objects_lookup(file_priv, + (void __user *)(uintptr_t)args->bo_handles, + job->bo_count, &job->bos); + if (ret) + return ret; + + job->mappings = kvmalloc_array(job->bo_count, + sizeof(struct panfrost_gem_mapping *), + GFP_KERNEL | __GFP_ZERO); + if (!job->mappings) + return -ENOMEM; + + for (i = 0; i < job->bo_count; i++) { + struct panfrost_gem_mapping *mapping; + + bo = to_panfrost_bo(job->bos[i]); + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + ret = -EINVAL; + break; + } + + atomic_inc(&bo->gpu_usecount); + job->mappings[i] = mapping; + } + + return ret; } /** @@ -303,21 +341,26 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, } /* Don't allow mmapping of heap objects as pages are not pinned. */ - if (to_panfrost_bo(gem_obj)->is_heap) - return -EINVAL; + if (to_panfrost_bo(gem_obj)->is_heap) { + ret = -EINVAL; + goto out; + } ret = drm_gem_create_mmap_offset(gem_obj); if (ret == 0) args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_put_unlocked(gem_obj); +out: + drm_gem_object_put_unlocked(gem_obj); return ret; } static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_get_bo_offset *args = data; + struct panfrost_gem_mapping *mapping; struct drm_gem_object *gem_obj; struct panfrost_gem_object *bo; @@ -328,18 +371,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, } bo = to_panfrost_bo(gem_obj); - args->offset = bo->node.start << PAGE_SHIFT; - + mapping = panfrost_gem_mapping_get(bo, priv); drm_gem_object_put_unlocked(gem_obj); + + if (!mapping) + return -EINVAL; + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_madvise *args = data; struct panfrost_device *pfdev = dev->dev_private; struct drm_gem_object *gem_obj; + struct panfrost_gem_object *bo; + int ret = 0; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { @@ -347,23 +398,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, return -ENOENT; } + bo = to_panfrost_bo(gem_obj); + + mutex_lock(&pfdev->shrinker_lock); + mutex_lock(&bo->mappings.lock); + if (args->madv == PANFROST_MADV_DONTNEED) { + struct panfrost_gem_mapping *first; + + first = list_first_entry(&bo->mappings.list, + struct panfrost_gem_mapping, + node); + + /* + * If we want to mark the BO purgeable, there must be only one + * user: the caller FD. + * We could do something smarter and mark the BO purgeable only + * when all its users have marked it purgeable, but globally + * visible/shared BOs are likely to never be marked purgeable + * anyway, so let's not bother. + */ + if (!list_is_singular(&bo->mappings.list) || + WARN_ON_ONCE(first->mmu != &priv->mmu)) { + ret = -EINVAL; + goto out_unlock_mappings; + } + } + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); if (args->retained) { - struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj); - - mutex_lock(&pfdev->shrinker_lock); - if (args->madv == PANFROST_MADV_DONTNEED) - list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list); + list_add_tail(&bo->base.madv_list, + &pfdev->shrinker_list); else if (args->madv == PANFROST_MADV_WILLNEED) list_del_init(&bo->base.madv_list); - - mutex_unlock(&pfdev->shrinker_lock); } +out_unlock_mappings: + mutex_unlock(&bo->mappings.lock); + mutex_unlock(&pfdev->shrinker_lock); + drm_gem_object_put_unlocked(gem_obj); - return 0; + return ret; } int panfrost_unstable_ioctl_check(void) @@ -443,7 +519,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) { struct panfrost_file_priv *panfrost_priv = file->driver_priv; - panfrost_perfcnt_close(panfrost_priv); + panfrost_perfcnt_close(file); panfrost_job_close(panfrost_priv); panfrost_mmu_pgtable_free(panfrost_priv); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index acb07fe06580..77c3a3855c68 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -19,6 +19,22 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_device *pfdev = obj->dev->dev_private; + /* + * Make sure the BO is no longer inserted in the shrinker list before + * taking care of the destruction itself. If we don't do that we have a + * race condition between this function and what's done in + * panfrost_gem_shrinker_scan(). + */ + mutex_lock(&pfdev->shrinker_lock); + list_del_init(&bo->base.madv_list); + mutex_unlock(&pfdev->shrinker_lock); + + /* + * If we still have mappings attached to the BO, there's a problem in + * our refcounting. + */ + WARN_ON_ONCE(!list_empty(&bo->mappings.list)); + if (bo->sgts) { int i; int n_sgt = bo->base.base.size / SZ_2M; @@ -33,15 +49,73 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) kfree(bo->sgts); } - mutex_lock(&pfdev->shrinker_lock); - if (!list_empty(&bo->base.madv_list)) - list_del(&bo->base.madv_list); - mutex_unlock(&pfdev->shrinker_lock); - drm_gem_shmem_free_object(obj); } -static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv) +{ + struct panfrost_gem_mapping *iter, *mapping = NULL; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + kref_get(&iter->refcount); + mapping = iter; + break; + } + } + mutex_unlock(&bo->mappings.lock); + + return mapping; +} + +static void +panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) +{ + struct panfrost_file_priv *priv; + + if (mapping->active) + panfrost_mmu_unmap(mapping); + + priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); + spin_lock(&priv->mm_lock); + if (drm_mm_node_allocated(&mapping->mmnode)) + drm_mm_remove_node(&mapping->mmnode); + spin_unlock(&priv->mm_lock); +} + +static void panfrost_gem_mapping_release(struct kref *kref) +{ + struct panfrost_gem_mapping *mapping; + + mapping = container_of(kref, struct panfrost_gem_mapping, refcount); + + panfrost_gem_teardown_mapping(mapping); + drm_gem_object_put_unlocked(&mapping->obj->base.base); + kfree(mapping); +} + +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) +{ + if (!mapping) + return; + + kref_put(&mapping->refcount, panfrost_gem_mapping_release); +} + +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo) +{ + struct panfrost_gem_mapping *mapping; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(mapping, &bo->mappings.list, node) + panfrost_gem_teardown_mapping(mapping); + mutex_unlock(&bo->mappings.lock); +} + +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) { int ret; size_t size = obj->size; @@ -49,6 +123,16 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p struct panfrost_gem_object *bo = to_panfrost_bo(obj); unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_mapping *mapping; + + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; + + INIT_LIST_HEAD(&mapping->node); + kref_init(&mapping->refcount); + drm_gem_object_get(obj); + mapping->obj = bo; /* * Executable buffers cannot cross a 16MB boundary as the program @@ -61,37 +145,48 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p else align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; - bo->mmu = &priv->mmu; + mapping->mmu = &priv->mmu; spin_lock(&priv->mm_lock); - ret = drm_mm_insert_node_generic(&priv->mm, &bo->node, + ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode, size >> PAGE_SHIFT, align, color, 0); spin_unlock(&priv->mm_lock); if (ret) - return ret; + goto err; if (!bo->is_heap) { - ret = panfrost_mmu_map(bo); - if (ret) { - spin_lock(&priv->mm_lock); - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); - } + ret = panfrost_mmu_map(mapping); + if (ret) + goto err; } + + mutex_lock(&bo->mappings.lock); + WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED); + list_add_tail(&mapping->node, &bo->mappings.list); + mutex_unlock(&bo->mappings.lock); + +err: + if (ret) + panfrost_gem_mapping_put(mapping); return ret; } -static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) +void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) { - struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + struct panfrost_gem_mapping *mapping = NULL, *iter; - if (bo->is_mapped) - panfrost_mmu_unmap(bo); + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + mapping = iter; + list_del(&iter->node); + break; + } + } + mutex_unlock(&bo->mappings.lock); - spin_lock(&priv->mm_lock); - if (drm_mm_node_allocated(&bo->node)) - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); + panfrost_gem_mapping_put(mapping); } static int panfrost_gem_pin(struct drm_gem_object *obj) @@ -131,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t if (!obj) return NULL; + INIT_LIST_HEAD(&obj->mappings.list); + mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; return &obj->base.base; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 50920819cc16..b3517ff9630c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -13,23 +13,52 @@ struct panfrost_gem_object { struct drm_gem_shmem_object base; struct sg_table *sgts; - struct panfrost_mmu *mmu; - struct drm_mm_node node; - bool is_mapped :1; + /* + * Use a list for now. If searching a mapping ever becomes the + * bottleneck, we should consider using an RB-tree, or even better, + * let the core store drm_gem_object_mapping entries (where we + * could place driver specific data) instead of drm_gem_object ones + * in its drm_file->object_idr table. + * + * struct drm_gem_object_mapping { + * struct drm_gem_object *obj; + * void *driver_priv; + * }; + */ + struct { + struct list_head list; + struct mutex lock; + } mappings; + + /* + * Count the number of jobs referencing this BO so we don't let the + * shrinker reclaim this object prematurely. + */ + atomic_t gpu_usecount; + bool noexec :1; bool is_heap :1; }; +struct panfrost_gem_mapping { + struct list_head node; + struct kref refcount; + struct panfrost_gem_object *obj; + struct drm_mm_node mmnode; + struct panfrost_mmu *mmu; + bool active :1; +}; + static inline struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) { return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); } -static inline -struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) +static inline struct panfrost_gem_mapping * +drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node) { - return container_of(node, struct panfrost_gem_object, node); + return container_of(node, struct panfrost_gem_mapping, mmnode); } struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); @@ -45,6 +74,16 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, u32 flags, uint32_t *handle); +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); +void panfrost_gem_close(struct drm_gem_object *obj, + struct drm_file *file_priv); + +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv); +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo); + void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 458f0fa68111..288e46c40673 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -39,11 +39,15 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc static bool panfrost_gem_purge(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + + if (atomic_read(&bo->gpu_usecount)) + return false; if (!mutex_trylock(&shmem->pages_lock)) return false; - panfrost_mmu_unmap(to_panfrost_bo(obj)); + panfrost_gem_teardown_mappings(bo); drm_gem_shmem_purge_locked(obj); mutex_unlock(&shmem->pages_lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 21f34d44aac2..9f770d454684 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -269,9 +269,25 @@ static void panfrost_job_cleanup(struct kref *ref) dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); + if (job->mappings) { + for (i = 0; i < job->bo_count; i++) { + if (!job->mappings[i]) + break; + + atomic_dec(&job->mappings[i]->obj->gpu_usecount); + panfrost_gem_mapping_put(job->mappings[i]); + } + kvfree(job->mappings); + } + if (job->bos) { - for (i = 0; i < job->bo_count; i++) + struct panfrost_gem_object *bo; + + for (i = 0; i < job->bo_count; i++) { + bo = to_panfrost_bo(job->bos[i]); drm_gem_object_put_unlocked(job->bos[i]); + } + kvfree(job->bos); } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 62454128a792..bbd3ba97ff67 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -32,6 +32,7 @@ struct panfrost_job { /* Exclusive fences we have taken from the BOs to wait for */ struct dma_fence **implicit_fences; + struct panfrost_gem_mapping **mappings; struct drm_gem_object **bos; u32 bo_count; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index a3ed64a1f15e..5d75f8cf6477 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) as = mmu->as; if (as >= 0) { int en = atomic_inc_return(&mmu->as_count); - WARN_ON(en >= NUM_JOB_SLOTS); + + /* + * AS can be retained by active jobs or a perfcnt context, + * hence the '+ 1' here. + */ + WARN_ON(en >= (NUM_JOB_SLOTS + 1)); list_move(&mmu->list, &pfdev->as_lru_list); goto out; @@ -269,14 +274,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, return 0; } -int panfrost_mmu_map(struct panfrost_gem_object *bo) +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE; - if (WARN_ON(bo->is_mapped)) + if (WARN_ON(mapping->active)) return 0; if (bo->noexec) @@ -286,25 +292,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); - mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); - bo->is_mapped = true; + mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, + prot, sgt); + mapping->active = true; return 0; } -void panfrost_mmu_unmap(struct panfrost_gem_object *bo) +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; - size_t len = bo->node.size << PAGE_SHIFT; + struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; + u64 iova = mapping->mmnode.start << PAGE_SHIFT; + size_t len = mapping->mmnode.size << PAGE_SHIFT; size_t unmapped_len = 0; - if (WARN_ON(!bo->is_mapped)) + if (WARN_ON(!mapping->active)) return; - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); + dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", + mapping->mmu->as, iova, len); while (unmapped_len < len) { size_t unmapped_page; @@ -318,8 +327,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) unmapped_len += pgsize; } - panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len); - bo->is_mapped = false; + panfrost_mmu_flush_range(pfdev, mapping->mmu, + mapping->mmnode.start << PAGE_SHIFT, len); + mapping->active = false; } static void mmu_tlb_inv_context_s1(void *cookie) @@ -394,10 +404,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) free_io_pgtable_ops(mmu->pgtbl_ops); } -static struct panfrost_gem_object * -addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) +static struct panfrost_gem_mapping * +addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) { - struct panfrost_gem_object *bo = NULL; + struct panfrost_gem_mapping *mapping = NULL; struct panfrost_file_priv *priv; struct drm_mm_node *node; u64 offset = addr >> PAGE_SHIFT; @@ -418,8 +428,9 @@ found_mmu: drm_mm_for_each_node(node, &priv->mm) { if (offset >= node->start && offset < (node->start + node->size)) { - bo = drm_mm_node_to_panfrost_bo(node); - drm_gem_object_get(&bo->base.base); + mapping = drm_mm_node_to_panfrost_mapping(node); + + kref_get(&mapping->refcount); break; } } @@ -427,7 +438,7 @@ found_mmu: spin_unlock(&priv->mm_lock); out: spin_unlock(&pfdev->as_lock); - return bo; + return mapping; } #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) @@ -436,28 +447,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) { int ret, i; + struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; - bo = addr_to_drm_mm_node(pfdev, as, addr); - if (!bo) + bomapping = addr_to_mapping(pfdev, as, addr); + if (!bomapping) return -ENOENT; + bo = bomapping->obj; if (!bo->is_heap) { dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", - bo->node.start << PAGE_SHIFT); + bomapping->mmnode.start << PAGE_SHIFT); ret = -EINVAL; goto err_bo; } - WARN_ON(bo->mmu->as != as); + WARN_ON(bomapping->mmu->as != as); /* Assume 2MB alignment and size multiple */ addr &= ~((u64)SZ_2M - 1); page_offset = addr >> PAGE_SHIFT; - page_offset -= bo->node.start; + page_offset -= bomapping->mmnode.start; mutex_lock(&bo->base.pages_lock); @@ -509,13 +522,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_map; } - mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + mmu_map_sg(pfdev, bomapping->mmu, addr, + IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); - bo->is_mapped = true; + bomapping->active = true; dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); - drm_gem_object_put_unlocked(&bo->base.base); + panfrost_gem_mapping_put(bomapping); return 0; @@ -587,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) source_id = (fault_status >> 16); /* Page fault only */ - if ((status & mask) == BIT(i)) { - WARN_ON(exception_type < 0xC1 || exception_type > 0xC4); - + ret = -1; + if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0) ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); - if (!ret) { - mmu_write(pfdev, MMU_INT_CLEAR, BIT(i)); - status &= ~mask; - continue; - } - } - /* terminal fault, print info about the fault */ - dev_err(pfdev->dev, - "Unhandled Page fault in AS%d at VA 0x%016llX\n" - "Reason: %s\n" - "raw fault status: 0x%X\n" - "decoded fault status: %s\n" - "exception type 0x%X: %s\n" - "access type 0x%X: %s\n" - "source id 0x%X\n", - i, addr, - "TODO", - fault_status, - (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), - exception_type, panfrost_exception_name(pfdev, exception_type), - access_type, access_type_name(pfdev, fault_status), - source_id); + if (ret) + /* terminal fault, print info about the fault */ + dev_err(pfdev->dev, + "Unhandled Page fault in AS%d at VA 0x%016llX\n" + "Reason: %s\n" + "raw fault status: 0x%X\n" + "decoded fault status: %s\n" + "exception type 0x%X: %s\n" + "access type 0x%X: %s\n" + "source id 0x%X\n", + i, addr, + "TODO", + fault_status, + (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), + exception_type, panfrost_exception_name(pfdev, exception_type), + access_type, access_type_name(pfdev, fault_status), + source_id); mmu_write(pfdev, MMU_INT_CLEAR, mask); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index 7c5b6775ae23..44fc2edf63ce 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -4,12 +4,12 @@ #ifndef __PANFROST_MMU_H__ #define __PANFROST_MMU_H__ -struct panfrost_gem_object; +struct panfrost_gem_mapping; struct panfrost_file_priv; struct panfrost_mmu; -int panfrost_mmu_map(struct panfrost_gem_object *bo); -void panfrost_mmu_unmap(struct panfrost_gem_object *bo); +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping); +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); int panfrost_mmu_init(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 2dba192bf198..6913578d5aa7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -25,7 +25,7 @@ #define V4_SHADERS_PER_COREGROUP 4 struct panfrost_perfcnt { - struct panfrost_gem_object *bo; + struct panfrost_gem_mapping *mapping; size_t bosize; void *buf; struct panfrost_file_priv *user; @@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) int ret; reinit_completion(&pfdev->perfcnt->dump_comp); - gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT; + gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); gpu_write(pfdev, GPU_INT_CLEAR, @@ -67,12 +67,13 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) } static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user, + struct drm_file *file_priv, unsigned int counterset) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_gem_shmem_object *bo; - u32 cfg; + u32 cfg, as; int ret; if (user == perfcnt->user) @@ -88,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (IS_ERR(bo)) return PTR_ERR(bo); - perfcnt->bo = to_panfrost_bo(&bo->base); - /* Map the perfcnt buf in the address space attached to file_priv. */ - ret = panfrost_mmu_map(perfcnt->bo); + ret = panfrost_gem_open(&bo->base, file_priv); if (ret) goto err_put_bo; + perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), + user); + if (!perfcnt->mapping) { + ret = -EINVAL; + goto err_close_bo; + } + perfcnt->buf = drm_gem_shmem_vmap(&bo->base); if (IS_ERR(perfcnt->buf)) { ret = PTR_ERR(perfcnt->buf); - goto err_put_bo; + goto err_put_mapping; } /* @@ -120,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, perfcnt->user = user; - /* - * Always use address space 0 for now. - * FIXME: this needs to be updated when we start using different - * address space. - */ - cfg = GPU_PERFCNT_CFG_AS(0) | + as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); + cfg = GPU_PERFCNT_CFG_AS(as) | GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL); /* @@ -153,18 +155,26 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); + /* The BO ref is retained by the mapping. */ + drm_gem_object_put_unlocked(&bo->base); + return 0; err_vunmap: - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); +err_put_mapping: + panfrost_gem_mapping_put(perfcnt->mapping); +err_close_bo: + panfrost_gem_close(&bo->base, file_priv); err_put_bo: drm_gem_object_put_unlocked(&bo->base); return ret; } static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user) + struct drm_file *file_priv) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; if (user != perfcnt->user) @@ -178,10 +188,12 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); perfcnt->buf = NULL; - drm_gem_object_put_unlocked(&perfcnt->bo->base.base); - perfcnt->bo = NULL; + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); + panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); + panfrost_gem_mapping_put(perfcnt->mapping); + perfcnt->mapping = NULL; pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); @@ -191,7 +203,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = dev->dev_private; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_enable *req = data; @@ -207,10 +218,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, mutex_lock(&perfcnt->lock); if (req->enable) - ret = panfrost_perfcnt_enable_locked(pfdev, pfile, + ret = panfrost_perfcnt_enable_locked(pfdev, file_priv, req->counterset); else - ret = panfrost_perfcnt_disable_locked(pfdev, pfile); + ret = panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); return ret; @@ -248,15 +259,16 @@ out: return ret; } -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile) +void panfrost_perfcnt_close(struct drm_file *file_priv) { + struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = pfile->pfdev; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; pm_runtime_get_sync(pfdev->dev); mutex_lock(&perfcnt->lock); if (perfcnt->user == pfile) - panfrost_perfcnt_disable_locked(pfdev, pfile); + panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h index 13b8fdaa1b43..8bbcf5f5fb33 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h @@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev); void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev); int panfrost_perfcnt_init(struct panfrost_device *pfdev); void panfrost_perfcnt_fini(struct panfrost_device *pfdev); -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile); +void panfrost_perfcnt_close(struct drm_file *file_priv); int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv); int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 611cbe7aee69..bfc1631093e9 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -184,7 +184,7 @@ int qxl_device_init(struct qxl_device *qdev, if (!qxl_check_device(qdev)) { r = -ENODEV; - goto surface_mapping_free; + goto rom_unmap; } r = qxl_bo_init(qdev); diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 6f60f4840cc5..e0c632af6fdf 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -6,3 +6,6 @@ config DRM_RADEON_USERPTR help This option selects CONFIG_MMU_NOTIFIER if it isn't already selected to enabled full userptr support. + +config ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE + tristate "1500a set radeon mem type is device and nocache" diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 62eab82a64f9..897442754fd0 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6969,8 +6969,8 @@ static int cik_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* XXX this should actually be a bus address, not an MC address. same on older asics */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 7089dfc8c2a9..110fb38004b1 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1826,8 +1826,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->textures[i].use_pitch = 1; } else { track->textures[i].use_pitch = 0; - track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); - track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); + track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT); + track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT); } if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) track->textures[i].tex_coord_type = 2; diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 840401413c58..f5f2ffea5ab2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->textures[i].use_pitch = 1; } else { track->textures[i].use_pitch = 0; - track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); - track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); + track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT); + track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT); } if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE) track->textures[i].lookup_disable = true; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e937cc01910d..033bc466a862 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* set dummy read address to ring address */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e81b01f8db90..0826efd9b5f5 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc) DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); + msleep(10); + WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4528f4dc0b2d..6128792ab883 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -37,6 +37,7 @@ #include #include +#include #include #include #include @@ -325,6 +326,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long flags = 0; + struct drm_device *dev; int ret; if (!ent) @@ -365,7 +367,44 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret) return ret; - return drm_get_pci_dev(pdev, ent, &kms_driver); + dev = drm_dev_alloc(&kms_driver, &pdev->dev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + ret = pci_enable_device(pdev); + if (ret) + goto err_free; + + dev->pdev = pdev; +#ifdef __alpha__ + dev->hose = pdev->sysdata; +#endif + + pci_set_drvdata(pdev, dev); + + if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) + dev->agp = drm_agp_init(dev); + if (dev->agp) { + dev->agp->agp_mtrr = arch_phys_wc_add( + dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * + 1024 * 1024); + } + + ret = drm_dev_register(dev, ent->driver_data); + if (ret) + goto err_agp; + + return 0; + +err_agp: + if (dev->agp) + arch_phys_wc_del(dev->agp->agp_mtrr); + kfree(dev->agp); + pci_disable_device(pdev); +err_free: + drm_dev_put(dev); + return ret; } static void @@ -578,7 +617,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, static struct drm_driver kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER, + DRIVER_GEM | DRIVER_RENDER, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .postclose = radeon_driver_postclose_kms, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index e85c554eeaa9..2bb0187c5bc7 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev) radeon_modeset_fini(rdev); radeon_device_fini(rdev); + if (dev->agp) + arch_phys_wc_del(dev->agp->agp_mtrr); + kfree(dev->agp); + dev->agp = NULL; + done_free: kfree(rdev); dev->dev_private = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index a05e10724d46..5a1303276a8c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -112,8 +112,13 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; +#ifdef CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE + man->available_caching = TTM_PL_FLAG_UNCACHED; + man->default_caching = TTM_PL_FLAG_UNCACHED; +#else man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; +#endif break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1ad5c3b86b64..ebfda5e32826 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -285,17 +285,21 @@ int radeon_uvd_resume(struct radeon_device *rdev) if (rdev->uvd.vcpu_bo == NULL) return -EINVAL; - +#ifdef CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE + memcpy_toio(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); +#else memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); - +#endif size = radeon_bo_size(rdev->uvd.vcpu_bo); size -= rdev->uvd_fw->size; ptr = rdev->uvd.cpu_addr; ptr += rdev->uvd_fw->size; - +#ifdef CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE + memset_io(ptr, 0, size); +#else memset(ptr, 0, size); - +#endif return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 59db54ace428..6fbc014aae87 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -239,12 +239,19 @@ int radeon_vce_resume(struct radeon_device *rdev) return r; } +#ifdef CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE + memset_io(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); +#else memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); +#endif if (rdev->family < CHIP_BONAIRE) r = vce_v1_0_load_fw(rdev, cpu_addr); else +#ifdef CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE + memcpy_toio(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); +#else memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); - +#endif radeon_bo_kunmap(rdev->vce.vcpu_bo); radeon_bo_unreserve(rdev->vce.vcpu_bo); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 05894d198a79..1d8efb0eefdb 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* set dummy read address to ring address */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 3fc7e6899cab..50c11a7f0467 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds) return 0; } +static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = { + .gen = 2, + .quirks = RCAR_LVDS_QUIRK_LANES, + .pll_setup = rcar_lvds_pll_setup_gen2, +}; + +static const struct soc_device_attribute lvds_quirk_matches[] = { + { + .soc_id = "r8a7790", .revision = "ES1.*", + .data = &rcar_lvds_r8a7790es1_info, + }, + { /* sentinel */ } +}; + static int rcar_lvds_probe(struct platform_device *pdev) { + const struct soc_device_attribute *attr; struct rcar_lvds *lvds; struct resource *mem; int ret; @@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev) lvds->dev = &pdev->dev; lvds->info = of_device_get_match_data(&pdev->dev); + attr = soc_device_match(lvds_quirk_matches); + if (attr) + lvds->info = attr->data; + ret = rcar_lvds_parse_dt(lvds); if (ret < 0) return ret; @@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = { .pll_setup = rcar_lvds_pll_setup_gen2, }; -static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = { - .gen = 2, - .quirks = RCAR_LVDS_QUIRK_LANES, - .pll_setup = rcar_lvds_pll_setup_gen2, -}; - static const struct rcar_lvds_device_info rcar_lvds_gen3_info = { .gen = 3, .quirks = RCAR_LVDS_QUIRK_PWD, @@ -930,7 +944,7 @@ static const struct of_device_id rcar_lvds_of_table[] = { { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info }, - { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info }, + { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info }, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 613404f86668..84e3decb17b1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1040,10 +1040,41 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct vop *vop = to_vop(crtc); + unsigned long rate; - adjusted_mode->clock = - DIV_ROUND_UP(clk_round_rate(vop->dclk, - adjusted_mode->clock * 1000), 1000); + /* + * Clock craziness. + * + * Key points: + * + * - DRM works in in kHz. + * - Clock framework works in Hz. + * - Rockchip's clock driver picks the clock rate that is the + * same _OR LOWER_ than the one requested. + * + * Action plan: + * + * 1. When DRM gives us a mode, we should add 999 Hz to it. That way + * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to + * make 60000 kHz then the clock framework will actually give us + * the right clock. + * + * NOTE: if the PLL (maybe through a divider) could actually make + * a clock rate 999 Hz higher instead of the one we want then this + * could be a problem. Unfortunately there's not much we can do + * since it's baked into DRM to use kHz. It shouldn't matter in + * practice since Rockchip PLLs are controlled by tables and + * even if there is a divider in the middle I wouldn't expect PLL + * rates in the table that are just a few kHz different. + * + * 2. Get the clock framework to round the rate for us to tell us + * what it will actually make. + * + * 3. Store the rounded up rate so that we don't need to worry about + * this in the actual clk_set_rate(). + */ + rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999); + adjusted_mode->clock = DIV_ROUND_UP(rate, 1000); return true; } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index f39b97ed4ade..2af64459b3d7 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -632,43 +632,41 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) } /** - * drm_sched_cleanup_jobs - destroy finished jobs + * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed * * @sched: scheduler instance * - * Remove all finished jobs from the mirror list and destroy them. + * Returns the next finished job from the mirror list (if there is one) + * ready for it to be destroyed. */ -static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched) +static struct drm_sched_job * +drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) { + struct drm_sched_job *job; unsigned long flags; /* Don't destroy jobs while the timeout worker is running */ if (sched->timeout != MAX_SCHEDULE_TIMEOUT && !cancel_delayed_work(&sched->work_tdr)) - return; + return NULL; + spin_lock_irqsave(&sched->job_list_lock, flags); - while (!list_empty(&sched->ring_mirror_list)) { - struct drm_sched_job *job; - - job = list_first_entry(&sched->ring_mirror_list, + job = list_first_entry_or_null(&sched->ring_mirror_list, struct drm_sched_job, node); - if (!dma_fence_is_signaled(&job->s_fence->finished)) - break; - spin_lock_irqsave(&sched->job_list_lock, flags); + if (job && dma_fence_is_signaled(&job->s_fence->finished)) { /* remove job from ring_mirror_list */ list_del_init(&job->node); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - sched->ops->free_job(job); + } else { + job = NULL; + /* queue timeout for next job */ + drm_sched_start_timeout(sched); } - /* queue timeout for next job */ - spin_lock_irqsave(&sched->job_list_lock, flags); - drm_sched_start_timeout(sched); spin_unlock_irqrestore(&sched->job_list_lock, flags); + return job; } /** @@ -708,12 +706,19 @@ static int drm_sched_main(void *param) struct drm_sched_fence *s_fence; struct drm_sched_job *sched_job; struct dma_fence *fence; + struct drm_sched_job *cleanup_job = NULL; wait_event_interruptible(sched->wake_up_worker, - (drm_sched_cleanup_jobs(sched), + (cleanup_job = drm_sched_get_cleanup_job(sched)) || (!drm_sched_blocked(sched) && (entity = drm_sched_select_entity(sched))) || - kthread_should_stop())); + kthread_should_stop()); + + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + /* queue timeout for next job */ + drm_sched_start_timeout(sched); + } if (!entity) continue; diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h index 6d61a0eb5d64..84e6bc050bf2 100644 --- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h +++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h @@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0) cmdline_test(drm_cmdline_test_rotate_90) cmdline_test(drm_cmdline_test_rotate_180) cmdline_test(drm_cmdline_test_rotate_270) +cmdline_test(drm_cmdline_test_rotate_multiple) cmdline_test(drm_cmdline_test_rotate_invalid_val) cmdline_test(drm_cmdline_test_rotate_truncated) cmdline_test(drm_cmdline_test_hmirror) diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c index 013de9d27c35..035f86c5d648 100644 --- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c +++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c @@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored) return 0; } +static int drm_cmdline_test_rotate_multiple(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90", + &no_connector, + &mode)); + + return 0; +} + static int drm_cmdline_test_rotate_invalid_val(void *ignored) { struct drm_cmdline_mode mode = { }; @@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored) FAIL_ON(!mode.specified); FAIL_ON(mode.xres != 720); FAIL_ON(mode.yres != 480); - FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X); + FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X)); FAIL_ON(mode.refresh_specified); @@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored) FAIL_ON(!mode.specified); FAIL_ON(mode.xres != 720); FAIL_ON(mode.yres != 480); - FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y); + FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y)); FAIL_ON(mode.refresh_specified); diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index a5757b11b730..5b54eff12cc0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev) } drm_mode_config_init(drm); - drm->mode_config.allow_fb_modifiers = true; ret = component_bind_all(drm->dev, drm); if (ret) { diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index eb8071a4d6d0..9c3bdfd20337 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -683,8 +683,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, struct sun4i_hdmi *hdmi = dev_get_drvdata(dev); cec_unregister_adapter(hdmi->cec_adap); - drm_connector_cleanup(&hdmi->connector); - drm_encoder_cleanup(&hdmi->encoder); i2c_del_adapter(hdmi->i2c); i2c_put_adapter(hdmi->ddc_i2c); clk_disable_unprepare(hdmi->mod_clk); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index b89439ed210d..27c80c9e2b83 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -488,7 +488,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, WARN_ON(!tcon->quirks->has_channel_0); - tcon->dclk_min_div = 1; + tcon->dclk_min_div = tcon->quirks->dclk_min_div; tcon->dclk_max_div = 127; sun4i_tcon0_mode_set_common(tcon, mode); @@ -1425,12 +1425,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, static const struct sun4i_tcon_quirks sun4i_a10_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun4i_a10_tcon_set_mux, }; static const struct sun4i_tcon_quirks sun5i_a13_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun5i_a13_tcon_set_mux, }; @@ -1439,6 +1441,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = { .has_channel_1 = true, .has_lvds_alt = true, .needs_de_be_mux = true, + .dclk_min_div = 1, .set_mux = sun6i_tcon_set_mux, }; @@ -1446,11 +1449,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = { .has_channel_0 = true, .has_channel_1 = true, .needs_de_be_mux = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun7i_a20_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, /* Same display pipeline structure as A10 */ .set_mux = sun4i_a10_tcon_set_mux, }; @@ -1458,11 +1463,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = { static const struct sun4i_tcon_quirks sun8i_a33_quirks = { .has_channel_0 = true, .has_lvds_alt = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { .supports_lvds = true, .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = { @@ -1476,11 +1483,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = { - .has_channel_0 = true, - .needs_edp_reset = true, + .has_channel_0 = true, + .needs_edp_reset = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index f9f1fe80b206..a62ec826ae71 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -224,6 +224,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool supports_lvds; /* Does the TCON support an LVDS output? */ + u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ /* callback to handle tcon muxing options */ int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 1636344ba9ec..f83522717488 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -437,9 +437,9 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT)); val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE; - } else if ((mode->hsync_end - mode->hdisplay) > 20) { + } else if ((mode->hsync_start - mode->hdisplay) > 20) { /* Maaaaaagic */ - u16 drq = (mode->hsync_end - mode->hdisplay) - 20; + u16 drq = (mode->hsync_start - mode->hdisplay) - 20; drq *= mipi_dsi_pixel_format_to_bpp(device->format); drq /= 32; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 8b803eb903b8..18b4881f4481 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -106,48 +106,128 @@ static const struct de2_fmt_info de2_formats[] = { .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XRGB4444, + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_ABGR4444, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XBGR4444, + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_RGBA4444, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_RGBX4444, + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_BGRA4444, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_BGRX4444, + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_ARGB1555, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XRGB1555, + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_ABGR1555, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XBGR1555, + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_RGBA5551, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_RGBX5551, + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_BGRA5551, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, + { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_BGRX5551, + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_ARGB2101010, + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_ABGR2101010, + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_RGBA1010102, + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_BGRA1010102, + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, { .drm_fmt = DRM_FORMAT_UYVY, .de2_fmt = SUN8I_MIXER_FBFMT_UYVY, @@ -196,12 +276,6 @@ static const struct de2_fmt_info de2_formats[] = { .rgb = false, .csc = SUN8I_CSC_MODE_YUV2RGB, }, - { - .drm_fmt = DRM_FORMAT_YUV444, - .de2_fmt = SUN8I_MIXER_FBFMT_RGB888, - .rgb = true, - .csc = SUN8I_CSC_MODE_YUV2RGB, - }, { .drm_fmt = DRM_FORMAT_YUV422, .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, @@ -220,12 +294,6 @@ static const struct de2_fmt_info de2_formats[] = { .rgb = false, .csc = SUN8I_CSC_MODE_YUV2RGB, }, - { - .drm_fmt = DRM_FORMAT_YVU444, - .de2_fmt = SUN8I_MIXER_FBFMT_RGB888, - .rgb = true, - .csc = SUN8I_CSC_MODE_YVU2RGB, - }, { .drm_fmt = DRM_FORMAT_YVU422, .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, @@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = { .rgb = false, .csc = SUN8I_CSC_MODE_YVU2RGB, }, + { + .drm_fmt = DRM_FORMAT_P010, + .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV, + .rgb = false, + .csc = SUN8I_CSC_MODE_YUV2RGB, + }, + { + .drm_fmt = DRM_FORMAT_P210, + .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV, + .rgb = false, + .csc = SUN8I_CSC_MODE_YUV2RGB, + }, }; const struct de2_fmt_info *sun8i_mixer_format_info(u32 format) diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index c6cc94057faf..345b28b0a80a 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -93,6 +93,10 @@ #define SUN8I_MIXER_FBFMT_ABGR1555 17 #define SUN8I_MIXER_FBFMT_RGBA5551 18 #define SUN8I_MIXER_FBFMT_BGRA5551 19 +#define SUN8I_MIXER_FBFMT_ARGB2101010 20 +#define SUN8I_MIXER_FBFMT_ABGR2101010 21 +#define SUN8I_MIXER_FBFMT_RGBA1010102 22 +#define SUN8I_MIXER_FBFMT_BGRA1010102 23 #define SUN8I_MIXER_FBFMT_YUYV 0 #define SUN8I_MIXER_FBFMT_UYVY 1 @@ -109,6 +113,13 @@ /* format 12 is semi-planar YUV411 UVUV */ /* format 13 is semi-planar YUV411 VUVU */ #define SUN8I_MIXER_FBFMT_YUV411 14 +/* format 15 doesn't exist */ +/* format 16 is P010 YVU */ +#define SUN8I_MIXER_FBFMT_P010_YUV 17 +/* format 18 is P210 YVU */ +#define SUN8I_MIXER_FBFMT_P210_YUV 19 +/* format 20 is packed YVU444 10-bit */ +/* format 21 is packed YUV444 10-bit */ /* * Sub-engines listed bellow are unused for now. The EN registers are here only diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 42d445d23773..b8398ca18b0f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -398,26 +398,26 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = { }; /* - * While all RGB formats are supported, VI planes don't support - * alpha blending, so there is no point having formats with alpha - * channel if their opaque analog exist. + * While DE2 VI layer supports same RGB formats as UI layer, alpha + * channel is ignored. This structure lists all unique variants + * where alpha channel is replaced with "don't care" (X) channel. */ static const u32 sun8i_vi_layer_formats[] = { - DRM_FORMAT_ABGR1555, - DRM_FORMAT_ABGR4444, - DRM_FORMAT_ARGB1555, - DRM_FORMAT_ARGB4444, DRM_FORMAT_BGR565, DRM_FORMAT_BGR888, - DRM_FORMAT_BGRA5551, - DRM_FORMAT_BGRA4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_BGRX5551, DRM_FORMAT_BGRX8888, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, - DRM_FORMAT_RGBA4444, - DRM_FORMAT_RGBA5551, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_RGBX5551, DRM_FORMAT_RGBX8888, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_XBGR4444, DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XRGB4444, DRM_FORMAT_XRGB8888, DRM_FORMAT_NV16, @@ -431,11 +431,53 @@ static const u32 sun8i_vi_layer_formats[] = { DRM_FORMAT_YUV411, DRM_FORMAT_YUV420, DRM_FORMAT_YUV422, - DRM_FORMAT_YUV444, DRM_FORMAT_YVU411, DRM_FORMAT_YVU420, DRM_FORMAT_YVU422, - DRM_FORMAT_YVU444, +}; + +static const u32 sun8i_vi_layer_de3_formats[] = { + DRM_FORMAT_ABGR1555, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGR565, + DRM_FORMAT_BGR888, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV61, + DRM_FORMAT_P010, + DRM_FORMAT_P210, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_YUV411, + DRM_FORMAT_YUV420, + DRM_FORMAT_YUV422, + DRM_FORMAT_YVU411, + DRM_FORMAT_YVU420, + DRM_FORMAT_YVU422, }; struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, @@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, int index) { u32 supported_encodings, supported_ranges; + unsigned int plane_cnt, format_count; struct sun8i_vi_layer *layer; - unsigned int plane_cnt; + const u32 *formats; int ret; layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL); if (!layer) return ERR_PTR(-ENOMEM); + if (mixer->cfg->is_de3) { + formats = sun8i_vi_layer_de3_formats; + format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats); + } else { + formats = sun8i_vi_layer_formats; + format_count = ARRAY_SIZE(sun8i_vi_layer_formats); + } + /* possible crtcs are set later */ ret = drm_universal_plane_init(drm, &layer->plane, 0, &sun8i_vi_layer_funcs, - sun8i_vi_layer_formats, - ARRAY_SIZE(sun8i_vi_layer_formats), + formats, format_count, NULL, DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) { dev_err(drm->dev, "Couldn't initialize layer\n"); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 6fb7d74ff553..bc7cc32140f8 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -201,19 +201,19 @@ hub: if (tegra->hub) tegra_display_hub_cleanup(tegra->hub); device: - host1x_device_exit(device); -fbdev: - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_free(drm); -config: - drm_mode_config_cleanup(drm); - if (tegra->domain) { mutex_destroy(&tegra->mm_lock); drm_mm_takedown(&tegra->mm); put_iova_domain(&tegra->carveout.domain); iova_cache_put(); } + + host1x_device_exit(device); +fbdev: + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_free(drm); +config: + drm_mode_config_cleanup(drm); domain: if (tegra->domain) iommu_domain_free(tegra->domain); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index e1669ada0a40..75e65d9536d5 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3200,6 +3200,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor) * earlier */ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index; + } else { + if (sor->soc->supports_edp) + sor->index = 0; + else + sor->index = 1; } err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 98819462f025..f07803699809 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -926,7 +926,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *mem, + bool no_wait_gpu) { struct dma_fence *fence; int ret; @@ -935,19 +936,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); - if (fence) { - dma_resv_add_shared_fence(bo->base.resv, fence); + if (!fence) + return 0; - ret = dma_resv_reserve_shared(bo->base.resv, 1); - if (unlikely(ret)) { - dma_fence_put(fence); - return ret; - } + if (no_wait_gpu) + return -EBUSY; - dma_fence_put(bo->moving); - bo->moving = fence; + dma_resv_add_shared_fence(bo->base.resv, fence); + + ret = dma_resv_reserve_shared(bo->base.resv, 1); + if (unlikely(ret)) { + dma_fence_put(fence); + return ret; } + dma_fence_put(bo->moving); + bo->moving = fence; return 0; } @@ -978,7 +982,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; } while (1); - return ttm_bo_add_move_fence(bo, man, mem); + return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ -1120,14 +1124,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) goto error; - if (mem->mm_node) { - ret = ttm_bo_add_move_fence(bo, man, mem); - if (unlikely(ret)) { - (*man->func->put_node)(man, mem); - goto error; - } - return 0; + if (!mem->mm_node) + continue; + + ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); + if (unlikely(ret)) { + (*man->func->put_node)(man, mem); + if (ret == -EBUSY) + continue; + + goto error; } + return 0; } for (i = 0; i < placement->num_busy_placement; ++i) { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ee7d4e7b0ee3..0853b980bcb3 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1285,6 +1285,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) { +#ifdef CONFIG_DRM_VC4_HDMI_CEC + struct cec_connector_info conn_info; +#endif struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = drm->dev_private; @@ -1403,13 +1406,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) #ifdef CONFIG_DRM_VC4_HDMI_CEC hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, vc4, "vc4", - CEC_CAP_TRANSMIT | - CEC_CAP_LOG_ADDRS | - CEC_CAP_PASSTHROUGH | - CEC_CAP_RC, 1); + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, 1); ret = PTR_ERR_OR_ZERO(hdmi->cec_adap); if (ret < 0) goto err_destroy_conn; + + cec_fill_conn_info_from_drm(&conn_info, hdmi->connector); + cec_s_conn_info(hdmi->cec_adap, &conn_info); + HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff); value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 5bd60ded3d81..909eba43664a 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, return ERR_CAST(obj); ret = drm_gem_handle_create(file, &obj->base, handle); - drm_gem_object_put_unlocked(&obj->base); - if (ret) + if (ret) { + drm_gem_object_put_unlocked(&obj->base); return ERR_PTR(ret); + } return &obj->base; } @@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, args->size = gem_object->size; args->pitch = pitch; - DRM_DEBUG("Created object of size %lld\n", size); + drm_gem_object_put_unlocked(gem_object); + + DRM_DEBUG("Created object of size %llu\n", args->size); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 09b526518f5a..8de93ccacaac 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -23,38 +23,44 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include + #include #include "virtgpu_drv.h" +static int virtio_gpu_virglrenderer_workaround = 1; +module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) { -#if 0 - int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); - - if (handle < 0) - return handle; -#else - static int handle; - - /* - * FIXME: dirty hack to avoid re-using IDs, virglrenderer - * can't deal with that. Needs fixing in virglrenderer, also - * should figure a better way to handle that in the guest. - */ - handle++; -#endif - - *resid = handle + 1; + if (virtio_gpu_virglrenderer_workaround) { + /* + * Hack to avoid re-using resource IDs. + * + * virglrenderer versions up to (and including) 0.7.0 + * can't deal with that. virglrenderer commit + * "f91a9dd35715 Fix unlinking resources from hash + * table." (Feb 2019) fixes the bug. + */ + static atomic_t seqno = ATOMIC_INIT(0); + int handle = atomic_inc_return(&seqno); + *resid = handle + 1; + } else { + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) + return handle; + *resid = handle + 1; + } return 0; } static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { -#if 0 - ida_free(&vgdev->resource_ida, id - 1); -#endif + if (!virtio_gpu_virglrenderer_workaround) { + ida_free(&vgdev->resource_ida, id - 1); + } } static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 4ac55fc2bf97..44d858ce4ce7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -209,8 +209,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, cres->hash.key = user_key | (res_type << 24); ret = drm_ht_insert_item(&man->resources, &cres->hash); - if (unlikely(ret != 0)) + if (unlikely(ret != 0)) { + kfree(cres); goto out_invalid_key; + } cres->state = VMW_CMDBUF_RES_ADD; cres->res = vmw_resource_reference(res); diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index eaa5c3352c13..22559670faee 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -436,7 +436,8 @@ out: return err; } -static inline int copy_gathers(struct host1x_job *job, struct device *dev) +static inline int copy_gathers(struct device *host, struct host1x_job *job, + struct device *dev) { struct host1x_firewall fw; size_t size = 0; @@ -459,12 +460,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) * Try a non-blocking allocation from a higher priority pools first, * as awaiting for the allocation here is a major performance hit. */ - job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_NOWAIT); /* the higher priority allocation failed, try the generic-blocking */ if (!job->gather_copy_mapped) - job->gather_copy_mapped = dma_alloc_wc(dev, size, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_KERNEL); if (!job->gather_copy_mapped) @@ -512,7 +513,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) goto out; if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) { - err = copy_gathers(job, dev); + err = copy_gathers(host->dev, job, dev); if (err) goto out; } @@ -573,7 +574,7 @@ void host1x_job_unpin(struct host1x_job *job) job->num_unpins = 0; if (job->gather_copy_size) - dma_free_wc(job->channel->dev, job->gather_copy_size, + dma_free_wc(host->dev, job->gather_copy_size, job->gather_copy_mapped, job->gather_copy); } EXPORT_SYMBOL(host1x_job_unpin); diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index ae79a7c66737..fa704153cb00 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -730,7 +730,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) if (data->has_sp) { input2 = input_allocate_device(); if (!input2) { - input_free_device(input2); + ret = -ENOMEM; goto exit; } diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 6ac8becc2372..d732d1d10caf 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -340,7 +340,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, unsigned long **bit, int *max) { if (usage->hid == (HID_UP_CUSTOM | 0x0003) || - usage->hid == (HID_UP_MSVENDOR | 0x0003)) { + usage->hid == (HID_UP_MSVENDOR | 0x0003) || + usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 8063b1d567b1..e6e4c841fb06 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 && - (usage->hid & HID_USAGE) != 0x00 && !usage->type) { + (usage->hid & HID_USAGE) != 0x00 && + (usage->hid & HID_USAGE) != 0xff && !usage->type) { hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n", usage->hid & HID_USAGE); } diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index 3f6abd190df4..db6da21ade06 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = { struct bigben_device { struct hid_device *hid; struct hid_report *report; + bool removed; u8 led_state; /* LED1 = 1 .. LED4 = 8 */ u8 right_motor_on; /* right motor off/on 0/1 */ u8 left_motor_force; /* left motor force 0-255 */ @@ -190,6 +191,9 @@ static void bigben_worker(struct work_struct *work) struct bigben_device, worker); struct hid_field *report_field = bigben->report->field[0]; + if (bigben->removed) + return; + if (bigben->work_led) { bigben->work_led = false; report_field->value[0] = 0x01; /* 1 = led message */ @@ -220,10 +224,16 @@ static void bigben_worker(struct work_struct *work) static int hid_bigben_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { - struct bigben_device *bigben = data; + struct hid_device *hid = input_get_drvdata(dev); + struct bigben_device *bigben = hid_get_drvdata(hid); u8 right_motor_on; u8 left_motor_force; + if (!bigben) { + hid_err(hid, "no device data\n"); + return 0; + } + if (effect->type != FF_RUMBLE) return 0; @@ -298,8 +308,8 @@ static void bigben_remove(struct hid_device *hid) { struct bigben_device *bigben = hid_get_drvdata(hid); + bigben->removed = true; cancel_work_sync(&bigben->worker); - hid_hw_close(hid); hid_hw_stop(hid); } @@ -319,6 +329,7 @@ static int bigben_probe(struct hid_device *hid, return -ENOMEM; hid_set_drvdata(hid, bigben); bigben->hid = hid; + bigben->removed = false; error = hid_parse(hid); if (error) { @@ -341,10 +352,10 @@ static int bigben_probe(struct hid_device *hid, INIT_WORK(&bigben->worker, bigben_worker); - error = input_ff_create_memless(hidinput->input, bigben, + error = input_ff_create_memless(hidinput->input, NULL, hid_bigben_play_effect); if (error) - return error; + goto error_hw_stop; name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1; @@ -354,8 +365,10 @@ static int bigben_probe(struct hid_device *hid, sizeof(struct led_classdev) + name_sz, GFP_KERNEL ); - if (!led) - return -ENOMEM; + if (!led) { + error = -ENOMEM; + goto error_hw_stop; + } name = (void *)(&led[1]); snprintf(name, name_sz, "%s:red:bigben%d", @@ -369,7 +382,7 @@ static int bigben_probe(struct hid_device *hid, bigben->leds[n] = led; error = devm_led_classdev_register(&hid->dev, led); if (error) - return error; + goto error_hw_stop; } /* initial state: LED1 is on, no rumble effect */ @@ -383,6 +396,10 @@ static int bigben_probe(struct hid_device *hid, hid_info(hid, "LED and force feedback support for BigBen gamepad\n"); return 0; + +error_hw_stop: + hid_hw_stop(hid); + return error; } static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc, diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 63fdbf09b044..359616e3efbb 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -211,6 +211,18 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) return 0; /* we know nothing about this usage type */ } +/* + * Concatenate usage which defines 16 bits or less with the + * currently defined usage page to form a 32 bit usage + */ + +static void complete_usage(struct hid_parser *parser, unsigned int index) +{ + parser->local.usage[index] &= 0xFFFF; + parser->local.usage[index] |= + (parser->global.usage_page & 0xFFFF) << 16; +} + /* * Add a usage to the temporary parser table. */ @@ -222,6 +234,14 @@ static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) return -1; } parser->local.usage[parser->local.usage_index] = usage; + + /* + * If Usage item only includes usage id, concatenate it with + * currently defined usage page + */ + if (size <= 2) + complete_usage(parser, parser->local.usage_index); + parser->local.usage_size[parser->local.usage_index] = size; parser->local.collection_index[parser->local.usage_index] = parser->collection_stack_ptr ? @@ -268,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign offset = report->size; report->size += parser->global.report_size * parser->global.report_count; + /* Total size check: Allow for possible report index byte */ + if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { + hid_err(parser->device, "report is too long\n"); + return -1; + } + if (!parser->local.usage_index) /* Ignore padding fields */ return 0; @@ -543,13 +569,32 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) * usage value." */ -static void hid_concatenate_usage_page(struct hid_parser *parser) +static void hid_concatenate_last_usage_page(struct hid_parser *parser) { int i; + unsigned int usage_page; + unsigned int current_page; - for (i = 0; i < parser->local.usage_index; i++) - if (parser->local.usage_size[i] <= 2) - parser->local.usage[i] += parser->global.usage_page << 16; + if (!parser->local.usage_index) + return; + + usage_page = parser->global.usage_page; + + /* + * Concatenate usage page again only if last declared Usage Page + * has not been already used in previous usages concatenation + */ + for (i = parser->local.usage_index - 1; i >= 0; i--) { + if (parser->local.usage_size[i] > 2) + /* Ignore extended usages */ + continue; + + current_page = parser->local.usage[i] >> 16; + if (current_page == usage_page) + break; + + complete_usage(parser, i); + } } /* @@ -561,7 +606,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) __u32 data; int ret; - hid_concatenate_usage_page(parser); + hid_concatenate_last_usage_page(parser); data = item_udata(item); @@ -742,6 +787,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) if (usage == 0xff0000c5 && parser->global.report_count == 256 && parser->global.report_size == 8) parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; + + if (usage == 0xff0000c6 && parser->global.report_count == 1 && + parser->global.report_size == 8) + parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; } static void hid_scan_collection(struct hid_parser *parser, unsigned type) @@ -772,7 +821,7 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) __u32 data; int i; - hid_concatenate_usage_page(parser); + hid_concatenate_last_usage_page(parser); data = item_udata(item); @@ -1692,7 +1741,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, rsize = ((report->size - 1) >> 3) + 1; - if (rsize > HID_MAX_BUFFER_SIZE) + if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) + rsize = HID_MAX_BUFFER_SIZE - 1; + else if (rsize > HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE; if (csize < rsize) { diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index d86a9189e88f..aeb351658ad3 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -473,6 +473,8 @@ static const struct hid_device_id hammer_devices[] = { USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 447e8db21174..646b98809ed3 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -478,6 +478,7 @@ #define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030 #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d +#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f @@ -573,6 +574,7 @@ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641 +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a #define USB_VENDOR_ID_HUION 0x256c #define USB_DEVICE_ID_HUION_TABLET 0x006e @@ -630,6 +632,7 @@ #define USB_VENDOR_ID_ITE 0x048d #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 +#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a #define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396 #define USB_DEVICE_ID_ITE8595 0x8595 @@ -724,11 +727,13 @@ #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 #define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 #define I2C_DEVICE_ID_LG_8001 0x8001 +#define I2C_DEVICE_ID_LG_7010 0x7010 #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e @@ -959,6 +964,7 @@ #define I2C_VENDOR_ID_RAYDIUM 0x2386 #define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33 +#define I2C_PRODUCT_ID_RAYDIUM_3118 0x3118 #define USB_VENDOR_ID_RAZER 0x1532 #define USB_DEVICE_ID_RAZER_BLADE_14 0x011D @@ -1096,6 +1102,7 @@ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 +#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 63855f275a38..dea9cc65bf80 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: - if (device->driver->input_mapped && device->driver->input_mapped(device, - hidinput, field, usage, &bit, &max) < 0) - goto ignore; + if (device->driver->input_mapped && + device->driver->input_mapped(device, hidinput, field, usage, + &bit, &max) < 0) { + /* + * The driver indicated that no further generic handling + * of the usage is desired. + */ + return; + } set_bit(usage->type, input->evbit); @@ -1215,9 +1221,11 @@ mapped: set_bit(MSC_SCAN, input->mscbit); } -ignore: return; +ignore: + usage->type = 0; + usage->code = 0; } static void hidinput_handle_scroll(struct hid_usage *usage, diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index a45f2352618d..6c55682c5974 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -40,6 +40,10 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 8e91e2f06cb4..cd9193078525 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -1102,6 +1102,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp, ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS, NULL, 0, &response); + /* Ignore these intermittent errors */ + if (ret == HIDPP_ERROR_RESOURCE_ERROR) + return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 3cfeb1629f79..362805ddf377 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input, tool = MT_TOOL_DIAL; else if (unlikely(!confidence_state)) { tool = MT_TOOL_PALM; - if (!active && + if (!active && mt && input_mt_is_active(&mt->slots[slotnum])) { /* * The non-confidence was reported for @@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_LG, HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, + { .driver_data = MT_CLS_LG, + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index c50bcd967d99..ae64a286a68f 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT }, @@ -102,6 +103,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET }, @@ -173,6 +175,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, { 0 } }; diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 7c6abd7e0979..9ce22acdfaca 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -744,7 +744,8 @@ static void rmi_remove(struct hid_device *hdev) { struct rmi_data *hdata = hid_get_drvdata(hdev); - if (hdata->device_flags & RMI_DEVICE) { + if ((hdata->device_flags & RMI_DEVICE) + && test_bit(RMI_STARTED, &hdata->flags)) { clear_bit(RMI_STARTED, &hdata->flags); cancel_work_sync(&hdata->reset_work); rmi_unregister_transport_device(&hdata->xport); diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8dae0f9b819e..6286204d4c56 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev, if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); + /* If using a wireless adaptor ask for connection status */ + steam->connected = false; steam_request_conn_status(steam); } else { + /* A wired connection is always present */ + steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index bbc6ec1aa5cb..b382c6bf2c5c 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -249,13 +249,14 @@ out: static __poll_t hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) - return EPOLLIN | EPOLLRDNORM | EPOLLOUT; + mask |= EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist) - return EPOLLERR | EPOLLHUP; - return 0; + mask |= EPOLLERR | EPOLLHUP; + return mask; } static int hidraw_open(struct inode *inode, struct file *file) diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 04c088131e04..479934f7d241 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -48,6 +48,9 @@ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) +#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) +#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) + /* flags */ #define I2C_HID_STARTED 0 @@ -170,8 +173,14 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118, + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, { USB_VENDOR_ID_ELAN, HID_ANY_ID, I2C_HID_QUIRK_BOGUS_IRQ }, + { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, + I2C_HID_QUIRK_RESET_ON_RESUME }, + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, + I2C_HID_QUIRK_BAD_INPUT_SIZE }, { 0, 0 } }; @@ -493,9 +502,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) } if ((ret_size > size) || (ret_size < 2)) { - dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", - __func__, size, ret_size); - return; + if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) { + ihid->inbuf[0] = size & 0xff; + ihid->inbuf[1] = size >> 8; + ret_size = size; + } else { + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", + __func__, size, ret_size); + return; + } } i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf); @@ -1212,8 +1227,15 @@ static int i2c_hid_resume(struct device *dev) * solves "incomplete reports" on Raydium devices 2386:3118 and * 2386:4B33 and fixes various SIS touchscreens no longer sending * data after a suspend/resume. + * + * However some ALPS touchpads generate IRQ storm without reset, so + * let's still reset them here. */ - ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); + if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME) + ret = i2c_hid_hwreset(client); + else + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); + if (ret) return ret; diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index d31ea82b84c1..a66f08041a1a 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -341,6 +341,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Trekstor SURFBOOK E11B", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"), + }, + .driver_data = (void *)&sipodev_desc + }, { .ident = "Direkt-Tek DTLAPY116-2", .matches = { diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 6c1e6110867f..1fb294ca463e 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -24,7 +24,9 @@ #define ICL_MOBILE_DEVICE_ID 0x34FC #define SPT_H_DEVICE_ID 0xA135 #define CML_LP_DEVICE_ID 0x02FC +#define CMP_H_DEVICE_ID 0x06FC #define EHL_Ax_DEVICE_ID 0x4BB3 +#define TGL_LP_DEVICE_ID 0xA0FC #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 784dcc8c7022..f491d8b4e24c 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index fa0cc0899827..8fe3efcb8327 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -766,13 +766,14 @@ unlock: static __poll_t uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */ poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) - return EPOLLIN | EPOLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; - return 0; + return mask; } static const struct file_operations uhid_fops = { diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 1f9bc4483465..35b1fa6d962e 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file) return 0; } +static int __hiddev_open(struct hiddev *hiddev, struct file *file) +{ + struct hiddev_list *list; + int error; + + lockdep_assert_held(&hiddev->existancelock); + + list = vzalloc(sizeof(*list)); + if (!list) + return -ENOMEM; + + mutex_init(&list->thread_lock); + list->hiddev = hiddev; + + if (!hiddev->open++) { + error = hid_hw_power(hiddev->hid, PM_HINT_FULLON); + if (error < 0) + goto err_drop_count; + + error = hid_hw_open(hiddev->hid); + if (error < 0) + goto err_normal_power; + } + + spin_lock_irq(&hiddev->list_lock); + list_add_tail(&list->node, &hiddev->list); + spin_unlock_irq(&hiddev->list_lock); + + file->private_data = list; + + return 0; + +err_normal_power: + hid_hw_power(hiddev->hid, PM_HINT_NORMAL); +err_drop_count: + hiddev->open--; + vfree(list); + return error; +} + /* * open file op */ static int hiddev_open(struct inode *inode, struct file *file) { - struct hiddev_list *list; struct usb_interface *intf; struct hid_device *hid; struct hiddev *hiddev; @@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file) intf = usbhid_find_interface(iminor(inode)); if (!intf) return -ENODEV; + hid = usb_get_intfdata(intf); hiddev = hid->hiddev; - if (!(list = vzalloc(sizeof(struct hiddev_list)))) - return -ENOMEM; - mutex_init(&list->thread_lock); - list->hiddev = hiddev; - file->private_data = list; - - /* - * no need for locking because the USB major number - * is shared which usbcore guards against disconnect - */ - if (list->hiddev->exist) { - if (!list->hiddev->open++) { - res = hid_hw_open(hiddev->hid); - if (res < 0) - goto bail; - } - } else { - res = -ENODEV; - goto bail; - } - - spin_lock_irq(&list->hiddev->list_lock); - list_add_tail(&list->node, &hiddev->list); - spin_unlock_irq(&list->hiddev->list_lock); - mutex_lock(&hiddev->existancelock); - /* - * recheck exist with existance lock held to - * avoid opening a disconnected device - */ - if (!list->hiddev->exist) { - res = -ENODEV; - goto bail_unlock; - } - if (!list->hiddev->open++) - if (list->hiddev->exist) { - struct hid_device *hid = hiddev->hid; - res = hid_hw_power(hid, PM_HINT_FULLON); - if (res < 0) - goto bail_unlock; - res = hid_hw_open(hid); - if (res < 0) - goto bail_normal_power; - } - mutex_unlock(&hiddev->existancelock); - return 0; -bail_normal_power: - hid_hw_power(hid, PM_HINT_NORMAL); -bail_unlock: + res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV; mutex_unlock(&hiddev->existancelock); - spin_lock_irq(&list->hiddev->list_lock); - list_del(&list->node); - spin_unlock_irq(&list->hiddev->list_lock); -bail: - file->private_data = NULL; - vfree(list); return res; } @@ -954,9 +941,9 @@ void hiddev_disconnect(struct hid_device *hid) hiddev->exist = 0; if (hiddev->open) { - mutex_unlock(&hiddev->existancelock); hid_hw_close(hiddev->hid); wake_up_interruptible(&hiddev->wait); + mutex_unlock(&hiddev->existancelock); } else { mutex_unlock(&hiddev->existancelock); kfree(hiddev); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index ccb74529bc78..d99a9d407671 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */ hdev->product == 0x392 || /* Intuos Pro 2 */ - hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */ + hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */ + hdev->product == 0x3AA)) { /* MobileStudio Pro */ value = (field->logical_maximum - value); if (hdev->product == 0x357 || hdev->product == 0x358 || hdev->product == 0x392) value = wacom_offset_rotation(input, usage, value, 3, 16); else if (hdev->product == 0x34d || hdev->product == 0x34e || - hdev->product == 0x398 || hdev->product == 0x399) + hdev->product == 0x398 || hdev->product == 0x399 || + hdev->product == 0x3AA) value = wacom_offset_rotation(input, usage, value, 1, 2); } else { diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 34bd73526afd..930674117533 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1213,10 +1213,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, unsigned int i, j; struct page *pg; - if (num_pages < alloc_unit) - return 0; - - for (i = 0; (i * alloc_unit) < num_pages; i++) { + for (i = 0; i < num_pages / alloc_unit; i++) { if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > PAGE_SIZE) return i * alloc_unit; @@ -1254,7 +1251,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, } - return num_pages; + return i * alloc_unit; } static void balloon_up(struct work_struct *dummy) @@ -1269,9 +1266,6 @@ static void balloon_up(struct work_struct *dummy) long avail_pages; unsigned long floor; - /* The host balloons pages in 2M granularity. */ - WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0); - /* * We will attempt 2M allocations. However, if we fail to * allocate 2M chunks, we will go back to 4k allocations. @@ -1281,14 +1275,13 @@ static void balloon_up(struct work_struct *dummy) avail_pages = si_mem_available(); floor = compute_balloon_floor(); - /* Refuse to balloon below the floor, keep the 2M granularity. */ + /* Refuse to balloon below the floor. */ if (avail_pages < num_pages || avail_pages - num_pages < floor) { pr_warn("Balloon request will be partially fulfilled. %s\n", avail_pages < num_pages ? "Not enough memory." : "Balloon floor reached."); num_pages = avail_pages > floor ? (avail_pages - floor) : 0; - num_pages -= num_pages % PAGES_IN_2M; } while (!done) { diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 53a60c81e220..05ead1735c6e 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2308,7 +2308,7 @@ static void hv_crash_handler(struct pt_regs *regs) vmbus_connection.conn_state = DISCONNECTED; cpu = smp_processor_id(); hv_stimer_cleanup(cpu); - hv_synic_cleanup(cpu); + hv_synic_disable_regs(cpu); hyperv_cleanup(); }; diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index 9632e2e3c4bb..319a0519ebdb 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -413,7 +413,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which) return 0x95; break; } - return -ENODEV; + return 0; } /* Provide labels for sysfs */ diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 6c64d50c9aae..01c2eeb02aa9 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) long reg; if (bypass_attn & (1 << channel)) - reg = (volt * 1024) / 2250; + reg = DIV_ROUND_CLOSEST(volt * 1024, 2250); else - reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250); + reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024, + (r[0] + r[1]) * 2250); return clamp_val(reg, 0, 1023) & (0xff << 2); } diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 1f3b30b085b9..d018b20089ec 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -51,6 +51,7 @@ struct hwmon_device_attribute { #define to_hwmon_attr(d) \ container_of(d, struct hwmon_device_attribute, dev_attr) +#define to_dev_attr(a) container_of(a, struct device_attribute, attr) /* * Thermal zone information @@ -58,7 +59,7 @@ struct hwmon_device_attribute { * also provides the sensor index. */ struct hwmon_thermal_data { - struct hwmon_device *hwdev; /* Reference to hwmon device */ + struct device *dev; /* Reference to hwmon device */ int index; /* sensor index */ }; @@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = { NULL }; +static void hwmon_free_attrs(struct attribute **attrs) +{ + int i; + + for (i = 0; attrs[i]; i++) { + struct device_attribute *dattr = to_dev_attr(attrs[i]); + struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr); + + kfree(hattr); + } + kfree(attrs); +} + static void hwmon_dev_release(struct device *dev) { - kfree(to_hwmon_device(dev)); + struct hwmon_device *hwdev = to_hwmon_device(dev); + + if (hwdev->group.attrs) + hwmon_free_attrs(hwdev->group.attrs); + kfree(hwdev->groups); + kfree(hwdev); } static struct class hwmon_class = { @@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida); static int hwmon_thermal_get_temp(void *data, int *temp) { struct hwmon_thermal_data *tdata = data; - struct hwmon_device *hwdev = tdata->hwdev; + struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); int ret; long t; - ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input, + ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input, tdata->index, &t); if (ret < 0) return ret; @@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = { .get_temp = hwmon_thermal_get_temp, }; -static int hwmon_thermal_add_sensor(struct device *dev, - struct hwmon_device *hwdev, int index) +static int hwmon_thermal_add_sensor(struct device *dev, int index) { struct hwmon_thermal_data *tdata; struct thermal_zone_device *tzd; @@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev, if (!tdata) return -ENOMEM; - tdata->hwdev = hwdev; + tdata->dev = dev; tdata->index = index; - tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata, + tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); /* * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, @@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, return 0; } #else -static int hwmon_thermal_add_sensor(struct device *dev, - struct hwmon_device *hwdev, int index) +static int hwmon_thermal_add_sensor(struct device *dev, int index) { return 0; } @@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr) (type == hwmon_fan && attr == hwmon_fan_label); } -static struct attribute *hwmon_genattr(struct device *dev, - const void *drvdata, +static struct attribute *hwmon_genattr(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int index, @@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev, if ((mode & 0222) && !ops->write) return ERR_PTR(-EINVAL); - hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL); + hattr = kzalloc(sizeof(*hattr), GFP_KERNEL); if (!hattr) return ERR_PTR(-ENOMEM); @@ -492,8 +508,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) return n; } -static int hwmon_genattrs(struct device *dev, - const void *drvdata, +static int hwmon_genattrs(const void *drvdata, struct attribute **attrs, const struct hwmon_ops *ops, const struct hwmon_channel_info *info) @@ -519,7 +534,7 @@ static int hwmon_genattrs(struct device *dev, attr_mask &= ~BIT(attr); if (attr >= template_size) return -EINVAL; - a = hwmon_genattr(dev, drvdata, info->type, attr, i, + a = hwmon_genattr(drvdata, info->type, attr, i, templates[attr], ops); if (IS_ERR(a)) { if (PTR_ERR(a) != -ENOENT) @@ -533,8 +548,7 @@ static int hwmon_genattrs(struct device *dev, } static struct attribute ** -__hwmon_create_attrs(struct device *dev, const void *drvdata, - const struct hwmon_chip_info *chip) +__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip) { int ret, i, aindex = 0, nattrs = 0; struct attribute **attrs; @@ -545,15 +559,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata, if (nattrs == 0) return ERR_PTR(-EINVAL); - attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL); + attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) return ERR_PTR(-ENOMEM); for (i = 0; chip->info[i]; i++) { - ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops, + ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops, chip->info[i]); - if (ret < 0) + if (ret < 0) { + hwmon_free_attrs(attrs); return ERR_PTR(ret); + } aindex += ret; } @@ -595,14 +611,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, for (i = 0; groups[i]; i++) ngroups++; - hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), - GFP_KERNEL); + hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL); if (!hwdev->groups) { err = -ENOMEM; goto free_hwmon; } - attrs = __hwmon_create_attrs(dev, drvdata, chip); + attrs = __hwmon_create_attrs(drvdata, chip); if (IS_ERR(attrs)) { err = PTR_ERR(attrs); goto free_hwmon; @@ -647,8 +662,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, hwmon_temp_input, j)) continue; if (info[i]->config[j] & HWMON_T_INPUT) { - err = hwmon_thermal_add_sensor(dev, - hwdev, j); + err = hwmon_thermal_add_sensor(hdev, j); if (err) { device_unregister(hdev); /* @@ -667,7 +681,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, return hdev; free_hwmon: - kfree(hwdev); + hwmon_dev_release(hdev); ida_remove: ida_simple_remove(&hwmon_ida, id); return ERR_PTR(err); diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index f3dd2a17bd42..2e97e56c72c7 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -23,8 +23,8 @@ static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e }; static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = { - { 0x40, 0x00, 0x42, 0x44, 0x46 }, - { 0x3f, 0x00, 0x41, 0x43, 0x45 }, + { 0x46, 0x00, 0x40, 0x42, 0x44 }, + { 0x45, 0x00, 0x3f, 0x41, 0x43 }, }; static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 }; @@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = { struct nct7802_data { struct regmap *regmap; struct mutex access_lock; /* for multi-byte read and write operations */ + u8 in_status; + struct mutex in_alarm_lock; }; static ssize_t temp_type_show(struct device *dev, @@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr, return err ? : count; } +static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + struct nct7802_data *data = dev_get_drvdata(dev); + int volt, min, max, ret; + unsigned int val; + + mutex_lock(&data->in_alarm_lock); + + /* + * The SMI Voltage status register is the only register giving a status + * for voltages. A bit is set for each input crossing a threshold, in + * both direction, but the "inside" or "outside" limits info is not + * available. Also this register is cleared on read. + * Note: this is not explicitly spelled out in the datasheet, but + * from experiment. + * To deal with this we use a status cache with one validity bit and + * one status bit for each input. Validity is cleared at startup and + * each time the register reports a change, and the status is processed + * by software based on current input value and limits. + */ + ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */ + if (ret < 0) + goto abort; + + /* invalidate cached status for all inputs crossing a threshold */ + data->in_status &= ~((val & 0x0f) << 4); + + /* if cached status for requested input is invalid, update it */ + if (!(data->in_status & (0x10 << sattr->index))) { + ret = nct7802_read_voltage(data, sattr->nr, 0); + if (ret < 0) + goto abort; + volt = ret; + + ret = nct7802_read_voltage(data, sattr->nr, 1); + if (ret < 0) + goto abort; + min = ret; + + ret = nct7802_read_voltage(data, sattr->nr, 2); + if (ret < 0) + goto abort; + max = ret; + + if (volt < min || volt > max) + data->in_status |= (1 << sattr->index); + else + data->in_status &= ~(1 << sattr->index); + + data->in_status |= 0x10 << sattr->index; + } + + ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index))); +abort: + mutex_unlock(&data->in_alarm_lock); + return ret; +} + static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = { static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0); static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1); static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2); -static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3); +static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3); static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3); static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0); @@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0); static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0); static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1); static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2); -static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0); +static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0); static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0); static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0); static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1); static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2); -static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1); +static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1); static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1); static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0); static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1); static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2); -static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2); +static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2); static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2); static struct attribute *nct7802_in_attrs[] = { @@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client, return PTR_ERR(data->regmap); mutex_init(&data->access_lock); + mutex_init(&data->in_alarm_lock); ret = nct7802_init_chip(data); if (ret < 0) diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c index d44745e498e7..929c909ac27a 100644 --- a/drivers/hwmon/pmbus/ibm-cffps.c +++ b/drivers/hwmon/pmbus/ibm-cffps.c @@ -39,9 +39,13 @@ #define CFFPS_MFR_VAUX_FAULT BIT(6) #define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7) +/* + * LED off state actually relinquishes LED control to PSU firmware, so it can + * turn on the LED for faults. + */ +#define CFFPS_LED_OFF 0 #define CFFPS_LED_BLINK BIT(0) #define CFFPS_LED_ON BIT(1) -#define CFFPS_LED_OFF BIT(2) #define CFFPS_BLINK_RATE_MS 250 enum { @@ -292,28 +296,38 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page, return rc; } -static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) +static int ibm_cffps_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) { int rc; + u8 next_led_state; struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led); if (brightness == LED_OFF) { - psu->led_state = CFFPS_LED_OFF; + next_led_state = CFFPS_LED_OFF; } else { brightness = LED_FULL; + if (psu->led_state != CFFPS_LED_BLINK) - psu->led_state = CFFPS_LED_ON; + next_led_state = CFFPS_LED_ON; + else + next_led_state = CFFPS_LED_BLINK; } + dev_dbg(&psu->client->dev, "LED brightness set: %d. Command: %d.\n", + brightness, next_led_state); + pmbus_set_page(psu->client, 0); rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD, - psu->led_state); + next_led_state); if (rc < 0) - return; + return rc; + psu->led_state = next_led_state; led_cdev->brightness = brightness; + + return 0; } static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev, @@ -323,10 +337,7 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev, int rc; struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led); - psu->led_state = CFFPS_LED_BLINK; - - if (led_cdev->brightness == LED_OFF) - return 0; + dev_dbg(&psu->client->dev, "LED blink set.\n"); pmbus_set_page(psu->client, 0); @@ -335,6 +346,8 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev, if (rc < 0) return rc; + psu->led_state = CFFPS_LED_BLINK; + led_cdev->brightness = LED_FULL; *delay_on = CFFPS_BLINK_RATE_MS; *delay_off = CFFPS_BLINK_RATE_MS; @@ -351,7 +364,7 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu) client->addr); psu->led.name = psu->led_name; psu->led.max_brightness = LED_FULL; - psu->led.brightness_set = ibm_cffps_led_brightness_set; + psu->led.brightness_set_blocking = ibm_cffps_led_brightness_set; psu->led.blink_set = ibm_cffps_led_blink_set; rc = devm_led_classdev_register(dev, &psu->led); diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c index f01f4887fb2e..a91ed01abb68 100644 --- a/drivers/hwmon/pmbus/ltc2978.c +++ b/drivers/hwmon/pmbus/ltc2978.c @@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882, #define LTC_POLL_TIMEOUT 100 /* in milli-seconds */ -#define LTC_NOT_BUSY BIT(5) -#define LTC_NOT_PENDING BIT(4) +#define LTC_NOT_BUSY BIT(6) +#define LTC_NOT_PENDING BIT(5) /* * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index 219c10eb752c..ee44640edeb5 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; + + /* mask off max threshold before checking min value */ + val &= ETM_CYC_THRESHOLD_MASK; if (val < drvdata->ccitmin) return -EINVAL; - config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; + config->ccctlr = val; return size; } static DEVICE_ATTR_RW(cyc_threshold); @@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev, return -EINVAL; if (!drvdata->nr_addr_cmp) return -EINVAL; + /* - * Bit[7:0] selects which address range comparator is used for - * branch broadcast control. + * Bit[8] controls include(1) / exclude(0), bits[0-7] select + * individual range comparators. If include then at least 1 + * range must be selected. */ - if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) + if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0)) return -EINVAL; - config->bb_ctrl = val; + config->bb_ctrl = val & GENMASK(8, 0); return size; } static DEVICE_ATTR_RW(bb_ctrl); @@ -1324,8 +1329,8 @@ static ssize_t seq_event_store(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->seq_idx; - /* RST, bits[7:0] */ - config->seq_ctrl[idx] = val & 0xFF; + /* Seq control has two masks B[15:8] F[7:0] */ + config->seq_ctrl[idx] = val & 0xFFFF; spin_unlock(&drvdata->spinlock); return size; } @@ -1580,7 +1585,7 @@ static ssize_t res_ctrl_store(struct device *dev, if (idx % 2 != 0) /* PAIRINV, bit[21] */ val &= ~BIT(21); - config->res_ctrl[idx] = val; + config->res_ctrl[idx] = val & GENMASK(21, 0); spin_unlock(&drvdata->spinlock); return size; } diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 05f7896c3a01..b605889b507a 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -38,12 +38,14 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel"); * @atclk: optional clock for the core parts of the funnel. * @csdev: component vitals needed by the framework. * @priority: port selection order. + * @spinlock: serialize enable/disable operations. */ struct funnel_drvdata { void __iomem *base; struct clk *atclk; struct coresight_device *csdev; unsigned long priority; + spinlock_t spinlock; }; static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port) @@ -76,11 +78,21 @@ static int funnel_enable(struct coresight_device *csdev, int inport, { int rc = 0; struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + unsigned long flags; + bool first_enable = false; - if (drvdata->base) - rc = dynamic_funnel_enable_hw(drvdata, inport); - + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_read(&csdev->refcnt[inport]) == 0) { + if (drvdata->base) + rc = dynamic_funnel_enable_hw(drvdata, inport); + if (!rc) + first_enable = true; + } if (!rc) + atomic_inc(&csdev->refcnt[inport]); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + if (first_enable) dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport); return rc; } @@ -107,11 +119,19 @@ static void funnel_disable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + unsigned long flags; + bool last_disable = false; - if (drvdata->base) - dynamic_funnel_disable_hw(drvdata, inport); + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_dec_return(&csdev->refcnt[inport]) == 0) { + if (drvdata->base) + dynamic_funnel_disable_hw(drvdata, inport); + last_disable = true; + } + spin_unlock_irqrestore(&drvdata->spinlock, flags); - dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport); + if (last_disable) + dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport); } static const struct coresight_ops_link funnel_link_ops = { diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index b29ba640eb25..43304196a1a6 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -31,11 +31,13 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator"); * whether this one is programmable or not. * @atclk: optional clock for the core parts of the replicator. * @csdev: component vitals needed by the framework + * @spinlock: serialize enable/disable operations. */ struct replicator_drvdata { void __iomem *base; struct clk *atclk; struct coresight_device *csdev; + spinlock_t spinlock; }; static void dynamic_replicator_reset(struct replicator_drvdata *drvdata) @@ -97,10 +99,22 @@ static int replicator_enable(struct coresight_device *csdev, int inport, { int rc = 0; struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + unsigned long flags; + bool first_enable = false; - if (drvdata->base) - rc = dynamic_replicator_enable(drvdata, inport, outport); + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_read(&csdev->refcnt[outport]) == 0) { + if (drvdata->base) + rc = dynamic_replicator_enable(drvdata, inport, + outport); + if (!rc) + first_enable = true; + } if (!rc) + atomic_inc(&csdev->refcnt[outport]); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + if (first_enable) dev_dbg(&csdev->dev, "REPLICATOR enabled\n"); return rc; } @@ -137,10 +151,19 @@ static void replicator_disable(struct coresight_device *csdev, int inport, int outport) { struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + unsigned long flags; + bool last_disable = false; - if (drvdata->base) - dynamic_replicator_disable(drvdata, inport, outport); - dev_dbg(&csdev->dev, "REPLICATOR disabled\n"); + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_dec_return(&csdev->refcnt[outport]) == 0) { + if (drvdata->base) + dynamic_replicator_disable(drvdata, inport, outport); + last_disable = true; + } + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + if (last_disable) + dev_dbg(&csdev->dev, "REPLICATOR disabled\n"); } static const struct coresight_ops_link replicator_link_ops = { diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index 807416b75ecc..d0cc3985b72a 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -334,9 +334,10 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev) static int tmc_enable_etf_link(struct coresight_device *csdev, int inport, int outport) { - int ret; + int ret = 0; unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + bool first_enable = false; spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { @@ -344,12 +345,18 @@ static int tmc_enable_etf_link(struct coresight_device *csdev, return -EBUSY; } - ret = tmc_etf_enable_hw(drvdata); + if (atomic_read(&csdev->refcnt[0]) == 0) { + ret = tmc_etf_enable_hw(drvdata); + if (!ret) { + drvdata->mode = CS_MODE_SYSFS; + first_enable = true; + } + } if (!ret) - drvdata->mode = CS_MODE_SYSFS; + atomic_inc(&csdev->refcnt[0]); spin_unlock_irqrestore(&drvdata->spinlock, flags); - if (!ret) + if (first_enable) dev_dbg(&csdev->dev, "TMC-ETF enabled\n"); return ret; } @@ -359,6 +366,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev, { unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + bool last_disable = false; spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { @@ -366,11 +374,15 @@ static void tmc_disable_etf_link(struct coresight_device *csdev, return; } - tmc_etf_disable_hw(drvdata); - drvdata->mode = CS_MODE_DISABLED; + if (atomic_dec_return(&csdev->refcnt[0]) == 0) { + tmc_etf_disable_hw(drvdata); + drvdata->mode = CS_MODE_DISABLED; + last_disable = true; + } spin_unlock_irqrestore(&drvdata->spinlock, flags); - dev_dbg(&csdev->dev, "TMC-ETF disabled\n"); + if (last_disable) + dev_dbg(&csdev->dev, "TMC-ETF disabled\n"); } static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 6453c67a4d01..0bbce0d29158 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -253,9 +253,9 @@ static int coresight_enable_link(struct coresight_device *csdev, struct coresight_device *parent, struct coresight_device *child) { - int ret; + int ret = 0; int link_subtype; - int refport, inport, outport; + int inport, outport; if (!parent || !child) return -EINVAL; @@ -264,29 +264,17 @@ static int coresight_enable_link(struct coresight_device *csdev, outport = coresight_find_link_outport(csdev, child); link_subtype = csdev->subtype.link_subtype; - if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) - refport = inport; - else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) - refport = outport; - else - refport = 0; + if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && inport < 0) + return inport; + if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0) + return outport; - if (refport < 0) - return refport; + if (link_ops(csdev)->enable) + ret = link_ops(csdev)->enable(csdev, inport, outport); + if (!ret) + csdev->enable = true; - if (atomic_inc_return(&csdev->refcnt[refport]) == 1) { - if (link_ops(csdev)->enable) { - ret = link_ops(csdev)->enable(csdev, inport, outport); - if (ret) { - atomic_dec(&csdev->refcnt[refport]); - return ret; - } - } - } - - csdev->enable = true; - - return 0; + return ret; } static void coresight_disable_link(struct coresight_device *csdev, @@ -295,7 +283,7 @@ static void coresight_disable_link(struct coresight_device *csdev, { int i, nr_conns; int link_subtype; - int refport, inport, outport; + int inport, outport; if (!parent || !child) return; @@ -305,20 +293,15 @@ static void coresight_disable_link(struct coresight_device *csdev, link_subtype = csdev->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { - refport = inport; nr_conns = csdev->pdata->nr_inport; } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) { - refport = outport; nr_conns = csdev->pdata->nr_outport; } else { - refport = 0; nr_conns = 1; } - if (atomic_dec_return(&csdev->refcnt[refport]) == 0) { - if (link_ops(csdev)->disable) - link_ops(csdev)->disable(csdev, inport, outport); - } + if (link_ops(csdev)->disable) + link_ops(csdev)->disable(csdev, inport, outport); for (i = 0; i < nr_conns; i++) if (atomic_read(&csdev->refcnt[i]) != 0) diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index d5c1821b31c6..ca232ec565e8 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -649,10 +649,8 @@ intel_th_subdevice_alloc(struct intel_th *th, } err = intel_th_device_add_resources(thdev, res, subdev->nres); - if (err) { - put_device(&thdev->dev); + if (err) goto fail_put_device; - } if (subdev->type == INTEL_TH_OUTPUT) { if (subdev->mknode) @@ -667,10 +665,8 @@ intel_th_subdevice_alloc(struct intel_th *th, } err = device_add(&thdev->dev); - if (err) { - put_device(&thdev->dev); + if (err) goto fail_free_res; - } /* need switch driver to be loaded to enumerate the rest */ if (subdev->type == INTEL_TH_SWITCH && !req) { @@ -838,9 +834,6 @@ static irqreturn_t intel_th_irq(int irq, void *data) ret |= d->irq(th->thdev[i]); } - if (ret == IRQ_NONE) - pr_warn_ratelimited("nobody cared for irq\n"); - return ret; } @@ -891,6 +884,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, if (th->irq == -1) th->irq = devres[r].start; + th->num_irqs++; break; default: dev_warn(dev, "Unknown resource type %lx\n", @@ -944,6 +938,9 @@ void intel_th_free(struct intel_th *th) th->num_thdevs = 0; + for (i = 0; i < th->num_irqs; i++) + devm_free_irq(th->dev, th->irq + i, th); + pm_runtime_get_sync(th->dev); pm_runtime_forbid(th->dev); diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 0df480072b6c..6f4f5486fe6d 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -261,6 +261,7 @@ enum th_mmio_idx { * @num_thdevs: number of devices in the @thdev array * @num_resources: number of resources in the @resource array * @irq: irq number + * @num_irqs: number of IRQs is use * @id: this Intel TH controller's device ID in the system * @major: device node major for output devices */ @@ -277,6 +278,7 @@ struct intel_th { unsigned int num_thdevs; unsigned int num_resources; int irq; + int num_irqs; int id; int major; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 6d240dfae9d9..255f8f41c8ff 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -718,9 +718,6 @@ static int msc_win_set_lockout(struct msc_window *win, if (old != expect) { ret = -EINVAL; - dev_warn_ratelimited(msc_dev(win->msc), - "expected lockout state %d, got %d\n", - expect, old); goto unlock; } @@ -741,6 +738,10 @@ unlock: /* from intel_th_msc_window_unlock(), don't warn if not locked */ if (expect == WIN_LOCKED && old == new) return 0; + + dev_warn_ratelimited(msc_dev(win->msc), + "expected lockout state %d, got %d\n", + expect, old); } return ret; @@ -760,7 +761,7 @@ static int msc_configure(struct msc *msc) lockdep_assert_held(&msc->buf_mutex); if (msc->mode > MSC_MODE_MULTI) - return -ENOTSUPP; + return -EINVAL; if (msc->mode == MSC_MODE_MULTI) { if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) @@ -1294,7 +1295,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, } else if (msc->mode == MSC_MODE_MULTI) { ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); } else { - ret = -ENOTSUPP; + ret = -EINVAL; } if (!ret) { @@ -1530,7 +1531,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, if (ret >= 0) *ppos = iter->offset; } else { - ret = -ENOTSUPP; + ret = -EINVAL; } put_count: @@ -1676,10 +1677,13 @@ static int intel_th_msc_init(struct msc *msc) return 0; } -static void msc_win_switch(struct msc *msc) +static int msc_win_switch(struct msc *msc) { struct msc_window *first; + if (list_empty(&msc->win_list)) + return -EINVAL; + first = list_first_entry(&msc->win_list, struct msc_window, entry); if (msc_is_last_win(msc->cur_win)) @@ -1691,6 +1695,8 @@ static void msc_win_switch(struct msc *msc) msc->base_addr = msc_win_base_dma(msc->cur_win); intel_th_trace_switch(msc->thdev); + + return 0; } /** @@ -2025,16 +2031,15 @@ win_switch_store(struct device *dev, struct device_attribute *attr, if (val != 1) return -EINVAL; + ret = -EINVAL; mutex_lock(&msc->buf_mutex); /* * Window switch can only happen in the "multi" mode. * If a external buffer is engaged, they have the full * control over window switching. */ - if (msc->mode != MSC_MODE_MULTI || msc->mbuf) - ret = -ENOTSUPP; - else - msc_win_switch(msc); + if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) + ret = msc_win_switch(msc); mutex_unlock(&msc->buf_mutex); return ret ? ret : size; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 03ca5b1bef9f..86aa6a46bcba 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -204,11 +204,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Comet Lake PCH-V */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Ice Lake NNPI */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Ice Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8a29), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Tiger Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9a33), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Tiger Lake PCH */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), @@ -219,6 +234,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Elkhart Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Elkhart Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c index b178a5495b67..360b5c03df95 100644 --- a/drivers/hwtracing/stm/p_sys-t.c +++ b/drivers/hwtracing/stm/p_sys-t.c @@ -238,7 +238,7 @@ static struct configfs_attribute *sys_t_policy_attrs[] = { static inline bool sys_t_need_ts(struct sys_t_output *op) { if (op->node.ts_interval && - time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) { + time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) { op->ts_jiffies = jiffies; return true; @@ -250,8 +250,8 @@ static inline bool sys_t_need_ts(struct sys_t_output *op) static bool sys_t_need_clock_sync(struct sys_t_output *op) { if (op->node.clocksync_interval && - time_after(op->clocksync_jiffies + op->node.clocksync_interval, - jiffies)) { + time_after(jiffies, + op->clocksync_jiffies + op->node.clocksync_interval)) { op->clocksync_jiffies = jiffies; return true; diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 4b9e44b227d8..4f932a419752 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c @@ -345,7 +345,11 @@ void stp_policy_unbind(struct stp_policy *policy) stm->policy = NULL; policy->stm = NULL; + /* + * Drop the reference on the protocol driver and lose the link. + */ stm_put_protocol(stm->pdrv); + stm->pdrv = NULL; stm_put_device(stm); } diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 5255d3755411..1de23b4f3809 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -171,7 +171,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev) /* SCL Low Time */ writel(t_low, idev->base + ALTR_I2C_SCL_LOW); /* SDA Hold Time, 300ns */ - writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD); + writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD); /* Mask all master interrupt bits */ altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index e01b2b57e724..5ab901ad615d 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -58,6 +58,7 @@ struct bcm2835_i2c_dev { struct i2c_adapter adapter; struct completion completion; struct i2c_msg *curr_msg; + struct clk *bus_clk; int num_msgs; u32 msg_err; u8 *msg_buf; @@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) struct resource *mem, *irq; int ret; struct i2c_adapter *adap; - struct clk *bus_clk; struct clk *mclk; u32 bus_clk_rate; @@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) return PTR_ERR(mclk); } - bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); + i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); - if (IS_ERR(bus_clk)) { + if (IS_ERR(i2c_dev->bus_clk)) { dev_err(&pdev->dev, "Could not register clock\n"); - return PTR_ERR(bus_clk); + return PTR_ERR(i2c_dev->bus_clk); } ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", @@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) bus_clk_rate = 100000; } - ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate); + ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate); if (ret < 0) { dev_err(&pdev->dev, "Could not set clock frequency\n"); return ret; } - ret = clk_prepare_enable(bus_clk); + ret = clk_prepare_enable(i2c_dev->bus_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare clock"); return ret; @@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) static int bcm2835_i2c_remove(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev); - struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div"); - clk_rate_exclusive_put(bus_clk); - clk_disable_unprepare(bus_clk); + clk_rate_exclusive_put(i2c_dev->bus_clk); + clk_disable_unprepare(i2c_dev->bus_clk); free_irq(i2c_dev->irq, i2c_dev); i2c_del_adapter(&i2c_dev->adapter); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 050adda7c1bd..05b35ac33ce3 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev) pm_runtime_get_noresume(&pdev->dev); i2c_del_adapter(&dev->adapter); + devm_free_irq(&pdev->dev, dev->irq, dev); pci_free_irq_vectors(pdev); } diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 3a9e840a3546..a4a6825c8758 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev, if (ret == -ENOENT) retdesc = ERR_PTR(-EPROBE_DEFER); - if (ret != -EPROBE_DEFER) + if (PTR_ERR(retdesc) != -EPROBE_DEFER) dev_err(dev, "error trying to get descriptor: %d\n", ret); return retdesc; diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 8497c7a95dd4..224f830f77f9 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -477,6 +477,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&priv->adap); pm_runtime_disable(priv->dev); pm_runtime_set_suspended(priv->dev); + clk_disable_unprepare(priv->clk); return 0; } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index f1c714acc280..3ff6fbd79b12 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -129,11 +129,6 @@ #define TCOBASE 0x050 #define TCOCTL 0x054 -#define ACPIBASE 0x040 -#define ACPIBASE_SMI_OFF 0x030 -#define ACPICTRL 0x044 -#define ACPICTRL_EN 0x080 - #define SBREG_BAR 0x10 #define SBREG_SMBCTRL 0xc6000c #define SBREG_SMBCTRL_DNV 0xcf000c @@ -1544,7 +1539,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev, pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden); spin_unlock(&p2sb_spinlock); - res = &tco_res[ICH_RES_MEM_OFF]; + res = &tco_res[1]; if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; else @@ -1554,7 +1549,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev, res->flags = IORESOURCE_MEM; return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, - tco_res, 3, &spt_tco_platform_data, + tco_res, 2, &spt_tco_platform_data, sizeof(spt_tco_platform_data)); } @@ -1567,17 +1562,16 @@ static struct platform_device * i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev, struct resource *tco_res) { - return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, - tco_res, 2, &cnl_tco_platform_data, - sizeof(cnl_tco_platform_data)); + return platform_device_register_resndata(&pci_dev->dev, + "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data, + sizeof(cnl_tco_platform_data)); } static void i801_add_tco(struct i801_priv *priv) { - u32 base_addr, tco_base, tco_ctl, ctrl_val; struct pci_dev *pci_dev = priv->pci_dev; - struct resource tco_res[3], *res; - unsigned int devfn; + struct resource tco_res[2], *res; + u32 tco_base, tco_ctl; /* If we have ACPI based watchdog use that instead */ if (acpi_has_watchdog()) @@ -1592,30 +1586,15 @@ static void i801_add_tco(struct i801_priv *priv) return; memset(tco_res, 0, sizeof(tco_res)); - - res = &tco_res[ICH_RES_IO_TCO]; + /* + * Always populate the main iTCO IO resource here. The second entry + * for NO_REBOOT MMIO is filled by the SPT specific function. + */ + res = &tco_res[0]; res->start = tco_base & ~1; res->end = res->start + 32 - 1; res->flags = IORESOURCE_IO; - /* - * Power Management registers. - */ - devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2); - pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr); - - res = &tco_res[ICH_RES_IO_SMI]; - res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF; - res->end = res->start + 3; - res->flags = IORESOURCE_IO; - - /* - * Enable the ACPI I/O space. - */ - pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val); - ctrl_val |= ACPICTRL_EN; - pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val); - if (priv->features & FEATURE_TCO_CNL) priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res); else diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 38556381f4ca..2f8b8050a223 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev) adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev, "scl", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_scl)) - return PTR_ERR(adapter_data->gpio_scl); + if (IS_ERR(adapter_data->gpio_scl)) { + ret = PTR_ERR(adapter_data->gpio_scl); + goto free_both; + } adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_sda)) - return PTR_ERR(adapter_data->gpio_sda); + if (IS_ERR(adapter_data->gpio_sda)) { + ret = PTR_ERR(adapter_data->gpio_sda); + goto free_both; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 25dcd73acd63..8f0e1f802f2d 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -73,25 +73,6 @@ #define JZ4780_I2C_STA_TFNF BIT(1) #define JZ4780_I2C_STA_ACT BIT(0) -static const char * const jz4780_i2c_abrt_src[] = { - "ABRT_7B_ADDR_NOACK", - "ABRT_10ADDR1_NOACK", - "ABRT_10ADDR2_NOACK", - "ABRT_XDATA_NOACK", - "ABRT_GCALL_NOACK", - "ABRT_GCALL_READ", - "ABRT_HS_ACKD", - "SBYTE_ACKDET", - "ABRT_HS_NORSTRT", - "SBYTE_NORSTRT", - "ABRT_10B_RD_NORSTRT", - "ABRT_MASTER_DIS", - "ARB_LOST", - "SLVFLUSH_TXFIFO", - "SLV_ARBLOST", - "SLVRD_INTX", -}; - #define JZ4780_I2C_INTST_IGC BIT(11) #define JZ4780_I2C_INTST_ISTT BIT(10) #define JZ4780_I2C_INTST_ISTP BIT(9) @@ -529,21 +510,8 @@ done: static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src) { - int i; - - dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src); - dev_err(&i2c->adap.dev, "device addr=%x\n", - jz4780_i2c_readw(i2c, JZ4780_I2C_TAR)); - dev_err(&i2c->adap.dev, "send cmd count:%d %d\n", - i2c->cmd, i2c->cmd_buf[i2c->cmd]); - dev_err(&i2c->adap.dev, "receive data count:%d %d\n", - i2c->cmd, i2c->data_buf[i2c->cmd]); - - for (i = 0; i < 16; i++) { - if (src & BIT(i)) - dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n", - i, jz4780_i2c_abrt_src[i]); - } + dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n", + src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]); } static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c, diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index 5a1235fd86bb..32cd62188a3d 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -75,20 +76,15 @@ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd) static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd) { - unsigned long target = jiffies + msecs_to_jiffies(1000); u32 val; + int ret; - do { - val = readl(i2cd->regs + I2C_MST_CNTL); - if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER)) - break; - if ((val & I2C_MST_CNTL_STATUS) != - I2C_MST_CNTL_STATUS_BUS_BUSY) - break; - usleep_range(500, 600); - } while (time_is_after_jiffies(target)); + ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val, + !(val & I2C_MST_CNTL_CYCLE_TRIGGER) || + (val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY, + 500, 1000 * USEC_PER_MSEC); - if (time_is_before_jiffies(target)) { + if (ret) { dev_err(i2cd->dev, "i2c timeout error %x\n", val); return -ETIMEDOUT; } diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c index 07d5dfce68d4..1da347e6a358 100644 --- a/drivers/i2c/busses/i2c-stm32.c +++ b/drivers/i2c/busses/i2c-stm32.c @@ -20,13 +20,13 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev, dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); if (!dma) - return NULL; + return ERR_PTR(-ENOMEM); /* Request and configure I2C TX dma channel */ - dma->chan_tx = dma_request_slave_channel(dev, "tx"); - if (!dma->chan_tx) { + dma->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(dma->chan_tx)) { dev_dbg(dev, "can't request DMA tx channel\n"); - ret = -EINVAL; + ret = PTR_ERR(dma->chan_tx); goto fail_al; } @@ -42,10 +42,10 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev, } /* Request and configure I2C RX dma channel */ - dma->chan_rx = dma_request_slave_channel(dev, "rx"); - if (!dma->chan_rx) { + dma->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(dma->chan_rx)) { dev_err(dev, "can't request DMA rx channel\n"); - ret = -EINVAL; + ret = PTR_ERR(dma->chan_rx); goto fail_tx; } @@ -75,7 +75,7 @@ fail_al: devm_kfree(dev, dma); dev_info(dev, "can't use DMA\n"); - return NULL; + return ERR_PTR(ret); } void stm32_i2c_dma_free(struct stm32_i2c_dma *dma) diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index b24e7b937f21..b2634afe066d 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1267,8 +1267,8 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev, * slave[0] supports 7-bit and 10-bit slave address * slave[1] supports 7-bit slave address only */ - for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) { - if (i == 1 && (slave->flags & I2C_CLIENT_PEC)) + for (i = STM32F7_I2C_MAX_SLAVE - 1; i >= 0; i--) { + if (i == 1 && (slave->flags & I2C_CLIENT_TEN)) continue; if (!i2c_dev->slave[i]) { *id = i; @@ -1955,6 +1955,15 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr, STM32F7_I2C_TXDR, STM32F7_I2C_RXDR); + if (PTR_ERR(i2c_dev->dma) == -ENODEV) + i2c_dev->dma = NULL; + else if (IS_ERR(i2c_dev->dma)) { + ret = PTR_ERR(i2c_dev->dma); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "Failed to request dma error %i\n", ret); + goto clk_free; + } platform_set_drvdata(pdev, i2c_dev); @@ -1985,6 +1994,11 @@ pm_disable: pm_runtime_set_suspended(i2c_dev->dev); pm_runtime_dont_use_autosuspend(i2c_dev->dev); + if (i2c_dev->dma) { + stm32_i2c_dma_free(i2c_dev->dma); + i2c_dev->dma = NULL; + } + clk_free: clk_disable_unprepare(i2c_dev->clk); @@ -1995,21 +2009,21 @@ static int stm32f7_i2c_remove(struct platform_device *pdev) { struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev); - if (i2c_dev->dma) { - stm32_i2c_dma_free(i2c_dev->dma); - i2c_dev->dma = NULL; - } - i2c_del_adapter(&i2c_dev->adap); pm_runtime_get_sync(i2c_dev->dev); - clk_disable_unprepare(i2c_dev->clk); - pm_runtime_put_noidle(i2c_dev->dev); pm_runtime_disable(i2c_dev->dev); pm_runtime_set_suspended(i2c_dev->dev); pm_runtime_dont_use_autosuspend(i2c_dev->dev); + if (i2c_dev->dma) { + stm32_i2c_dma_free(i2c_dev->dma); + i2c_dev->dma = NULL; + } + + clk_disable_unprepare(i2c_dev->clk); + return 0; } diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index c1683f9338b4..dbc43cfec19d 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) + if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_i2c_runtime_resume(&pdev->dev); - else + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto unprepare_div_clk; + } + } else { ret = pm_runtime_get_sync(i2c_dev->dev); - - if (ret < 0) { - dev_err(&pdev->dev, "runtime resume failed\n"); - goto unprepare_div_clk; + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto disable_rpm; + } } if (i2c_dev->is_multimaster_mode) { @@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(i2c_dev->dev, "div_clk enable failed %d\n", ret); - goto disable_rpm; + goto put_rpm; } } @@ -1671,11 +1675,16 @@ disable_div_clk: if (i2c_dev->is_multimaster_mode) clk_disable(i2c_dev->div_clk); -disable_rpm: - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) +put_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_put_sync(&pdev->dev); + else tegra_i2c_runtime_suspend(&pdev->dev); +disable_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_disable(&pdev->dev); + unprepare_div_clk: clk_unprepare(i2c_dev->div_clk); @@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int err; i2c_mark_adapter_suspended(&i2c_dev->adapter); + err = pm_runtime_force_suspend(dev); + if (err < 0) + return err; + return 0; } @@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; + err = pm_runtime_force_resume(dev); + if (err < 0) + return err; + i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 62a1c92ab803..ce70b5288472 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle); static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { struct device *dev; + struct i2c_client *client; dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev); - return dev ? i2c_verify_client(dev) : NULL; + if (!dev) + return NULL; + + client = i2c_verify_client(dev); + if (!client) + put_device(dev); + + return client; } static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 5f6a4985f2bc..810a942eaa8e 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) * If we can set SDA, we will always create a STOP to ensure additional * pulses will do no harm. This is achieved by letting SDA follow SCL * half a cycle later. Check the 'incomplete_write_byte' fault injector - * for details. + * for details. Note that we must honour tsu:sto, 4us, but lets use 5us + * here for simplicity. */ bri->set_scl(adap, scl); - ndelay(RECOVERY_NDELAY / 2); + ndelay(RECOVERY_NDELAY); if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); @@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) scl = !scl; bri->set_scl(adap, scl); /* Creating STOP again, see above */ - ndelay(RECOVERY_NDELAY / 2); + if (scl) { + /* Honour minimum tsu:sto */ + ndelay(RECOVERY_NDELAY); + } else { + /* Honour minimum tf and thd:dat */ + ndelay(RECOVERY_NDELAY / 2); + } if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c index a1898e11b04e..943bf944bf72 100644 --- a/drivers/ide/cmd64x.c +++ b/drivers/ide/cmd64x.c @@ -66,6 +66,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode) struct ide_timing t; u8 arttim = 0; + if (drive->dn >= ARRAY_SIZE(drwtim_regs)) + return; + ide_timing_compute(drive, mode, &t, T, 0); /* diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c index ac6fc3fffa0d..458e72e034b0 100644 --- a/drivers/ide/serverworks.c +++ b/drivers/ide/serverworks.c @@ -115,6 +115,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) struct pci_dev *dev = to_pci_dev(hwif->dev); const u8 pio = drive->pio_mode - XFER_PIO_0; + if (drive->dn >= ARRAY_SIZE(drive_pci)) + return; + pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]); if (svwks_csb_check(dev)) { @@ -141,6 +144,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0; + if (drive->dn >= ARRAY_SIZE(drive_pci2)) + return; + pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing); pci_read_config_byte(dev, 0x54, &ultra_enable); diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index 67b8817995c0..60daf04ce188 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -237,6 +237,7 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = { .realbits = 12, \ .storagebits = 16, \ .shift = 4, \ + .endianness = IIO_BE, \ }, \ } diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 2e37f8a6d8cf..be661396095c 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -993,6 +993,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = { #define ST_ACCEL_TRIGGER_OPS NULL #endif +#ifdef CONFIG_ACPI static const struct iio_mount_matrix * get_mount_matrix(const struct iio_dev *indio_dev, const struct iio_chan_spec *chan) @@ -1013,7 +1014,6 @@ static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = { static int apply_acpi_orientation(struct iio_dev *indio_dev, struct iio_chan_spec *channels) { -#ifdef CONFIG_ACPI struct st_sensor_data *adata = iio_priv(indio_dev); struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_device *adev; @@ -1141,10 +1141,14 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev, out: kfree(buffer.pointer); return ret; -#else /* !CONFIG_ACPI */ - return 0; -#endif } +#else /* !CONFIG_ACPI */ +static int apply_acpi_orientation(struct iio_dev *indio_dev, + struct iio_chan_spec *channels) +{ + return 0; +} +#endif /* * st_accel_get_settings() - get sensor settings from device name diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c index 50fa0fc32baa..0a0bffe04217 100644 --- a/drivers/iio/accel/st_accel_i2c.c +++ b/drivers/iio/accel/st_accel_i2c.c @@ -114,7 +114,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id st_accel_acpi_match[] = { - {"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME}, + {"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME}, {"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME}, { }, }; diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index edc6f1cc90b2..306bf15023a7 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -39,6 +39,8 @@ #define AD7124_STATUS_POR_FLAG_MSK BIT(4) /* AD7124_ADC_CONTROL */ +#define AD7124_ADC_CTRL_REF_EN_MSK BIT(8) +#define AD7124_ADC_CTRL_REF_EN(x) FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x) #define AD7124_ADC_CTRL_PWR_MSK GENMASK(7, 6) #define AD7124_ADC_CTRL_PWR(x) FIELD_PREP(AD7124_ADC_CTRL_PWR_MSK, x) #define AD7124_ADC_CTRL_MODE_MSK GENMASK(5, 2) @@ -424,7 +426,10 @@ static int ad7124_init_channel_vref(struct ad7124_state *st, break; case AD7124_INT_REF: st->channel_config[channel_number].vref_mv = 2500; - break; + st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK; + st->adc_control |= AD7124_ADC_CTRL_REF_EN(1); + return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, + 2, st->adc_control); default: dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel); return -EINVAL; @@ -489,13 +494,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, st->channel_config[channel].buf_negative = of_property_read_bool(child, "adi,buffered-negative"); - *chan = ad7124_channel_template; - chan->address = channel; - chan->scan_index = channel; - chan->channel = ain[0]; - chan->channel2 = ain[1]; - - chan++; + chan[channel] = ad7124_channel_template; + chan[channel].address = channel; + chan[channel].scan_index = channel; + chan[channel].channel = ain[0]; + chan[channel].channel2 = ain[1]; } return 0; diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index f5ba94c03a8d..e4683a68522a 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -85,7 +85,7 @@ err_unlock: static int ad7606_read_samples(struct ad7606_state *st) { - unsigned int num = st->chip_info->num_channels; + unsigned int num = st->chip_info->num_channels - 1; u16 *data = st->data; int ret; diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c index ac0ffff6c5ae..6b51bfcad0d0 100644 --- a/drivers/iio/adc/ad7949.c +++ b/drivers/iio/adc/ad7949.c @@ -57,29 +57,11 @@ struct ad7949_adc_chip { u32 buffer ____cacheline_aligned; }; -static bool ad7949_spi_cfg_is_read_back(struct ad7949_adc_chip *ad7949_adc) -{ - if (!(ad7949_adc->cfg & AD7949_CFG_READ_BACK)) - return true; - - return false; -} - -static int ad7949_spi_bits_per_word(struct ad7949_adc_chip *ad7949_adc) -{ - int ret = ad7949_adc->resolution; - - if (ad7949_spi_cfg_is_read_back(ad7949_adc)) - ret += AD7949_CFG_REG_SIZE_BITS; - - return ret; -} - static int ad7949_spi_write_cfg(struct ad7949_adc_chip *ad7949_adc, u16 val, u16 mask) { int ret; - int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc); + int bits_per_word = ad7949_adc->resolution; int shift = bits_per_word - AD7949_CFG_REG_SIZE_BITS; struct spi_message msg; struct spi_transfer tx[] = { @@ -107,7 +89,8 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, unsigned int channel) { int ret; - int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc); + int i; + int bits_per_word = ad7949_adc->resolution; int mask = GENMASK(ad7949_adc->resolution, 0); struct spi_message msg; struct spi_transfer tx[] = { @@ -118,12 +101,23 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, }, }; - ret = ad7949_spi_write_cfg(ad7949_adc, - channel << AD7949_OFFSET_CHANNEL_SEL, - AD7949_MASK_CHANNEL_SEL); - if (ret) - return ret; + /* + * 1: write CFG for sample N and read old data (sample N-2) + * 2: if CFG was not changed since sample N-1 then we'll get good data + * at the next xfer, so we bail out now, otherwise we write something + * and we read garbage (sample N-1 configuration). + */ + for (i = 0; i < 2; i++) { + ret = ad7949_spi_write_cfg(ad7949_adc, + channel << AD7949_OFFSET_CHANNEL_SEL, + AD7949_MASK_CHANNEL_SEL); + if (ret) + return ret; + if (channel == ad7949_adc->current_channel) + break; + } + /* 3: write something and read actual data */ ad7949_adc->buffer = 0; spi_message_init_with_transfers(&msg, tx, 1); ret = spi_sync(ad7949_adc->spi, &msg); @@ -138,10 +132,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, ad7949_adc->current_channel = channel; - if (ad7949_spi_cfg_is_read_back(ad7949_adc)) - *val = (ad7949_adc->buffer >> AD7949_CFG_REG_SIZE_BITS) & mask; - else - *val = ad7949_adc->buffer & mask; + *val = ad7949_adc->buffer & mask; return 0; } diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index a2837a0e7cba..2c01963a6a5c 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -723,6 +723,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit); + u32 cor; if (!chan) continue; @@ -731,6 +732,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) chan->type == IIO_PRESSURE) continue; + if (state) { + cor = at91_adc_readl(st, AT91_SAMA5D2_COR); + + if (chan->differential) + cor |= (BIT(chan->channel) | + BIT(chan->channel2)) << + AT91_SAMA5D2_COR_DIFF_OFFSET; + else + cor &= ~(BIT(chan->channel) << + AT91_SAMA5D2_COR_DIFF_OFFSET); + + at91_adc_writel(st, AT91_SAMA5D2_COR, cor); + } + if (state) { at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel)); diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index 5fa78c273a25..65c7c9329b1c 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -524,6 +524,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) u16 conflict; unsigned int trigger_chan; + ret = iio_triggered_buffer_postenable(indio_dev); + if (ret) + return ret; + mutex_lock(&dln2->mutex); /* Enable ADC */ @@ -537,6 +541,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) (int)conflict); ret = -EBUSY; } + iio_triggered_buffer_predisable(indio_dev); return ret; } @@ -550,6 +555,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&dln2->mutex); if (ret < 0) { dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + iio_triggered_buffer_predisable(indio_dev); return ret; } } else { @@ -557,12 +563,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&dln2->mutex); } - return iio_triggered_buffer_postenable(indio_dev); + return 0; } static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) { - int ret; + int ret, ret2; struct dln2_adc *dln2 = iio_priv(indio_dev); mutex_lock(&dln2->mutex); @@ -577,12 +583,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) ret = dln2_adc_set_port_enabled(dln2, false, NULL); mutex_unlock(&dln2->mutex); - if (ret < 0) { + if (ret < 0) dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); - return ret; - } - return iio_triggered_buffer_predisable(indio_dev); + ret2 = iio_triggered_buffer_predisable(indio_dev); + if (ret == 0) + ret = ret2; + + return ret; } static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = { diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c index 214883458582..e3be8eedd773 100644 --- a/drivers/iio/adc/max1027.c +++ b/drivers/iio/adc/max1027.c @@ -458,6 +458,14 @@ static int max1027_probe(struct spi_device *spi) return ret; } + /* Internal reset */ + st->reg = MAX1027_RST_REG; + ret = spi_write(st->spi, &st->reg, 1); + if (ret < 0) { + dev_err(&indio_dev->dev, "Failed to reset the ADC\n"); + return ret; + } + /* Disable averaging */ st->reg = MAX1027_AVG_REG; ret = spi_write(st->spi, &st->reg, 1); diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index da073d72f649..e480529b3f04 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -89,6 +89,12 @@ #define MAX9611_TEMP_SCALE_NUM 1000000 #define MAX9611_TEMP_SCALE_DIV 2083 +/* + * Conversion time is 2 ms (typically) at Ta=25 degreeC + * No maximum value is known, so play it safe. + */ +#define MAX9611_CONV_TIME_US_RANGE 3000, 3300 + struct max9611_dev { struct device *dev; struct i2c_client *i2c_client; @@ -236,11 +242,9 @@ static int max9611_read_single(struct max9611_dev *max9611, return ret; } - /* - * need a delay here to make register configuration - * stabilize. 1 msec at least, from empirical testing. - */ - usleep_range(1000, 2000); + /* need a delay here to make register configuration stabilize. */ + + usleep_range(MAX9611_CONV_TIME_US_RANGE); ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr); if (ret < 0) { @@ -507,7 +511,7 @@ static int max9611_init(struct max9611_dev *max9611) MAX9611_REG_CTRL2, 0); return ret; } - usleep_range(1000, 2000); + usleep_range(MAX9611_CONV_TIME_US_RANGE); return 0; } diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index e493242c266e..3ae0366a7b58 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -842,31 +842,6 @@ static inline void stm32_dfsdm_process_data(struct stm32_dfsdm_adc *adc, } } -static irqreturn_t stm32_dfsdm_adc_trigger_handler(int irq, void *p) -{ - struct iio_poll_func *pf = p; - struct iio_dev *indio_dev = pf->indio_dev; - struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); - int available = stm32_dfsdm_adc_dma_residue(adc); - - while (available >= indio_dev->scan_bytes) { - s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi]; - - stm32_dfsdm_process_data(adc, buffer); - - iio_push_to_buffers_with_timestamp(indio_dev, buffer, - pf->timestamp); - available -= indio_dev->scan_bytes; - adc->bufi += indio_dev->scan_bytes; - if (adc->bufi >= adc->buf_sz) - adc->bufi = 0; - } - - iio_trigger_notify_done(indio_dev->trig); - - return IRQ_HANDLED; -} - static void stm32_dfsdm_dma_buffer_done(void *data) { struct iio_dev *indio_dev = data; @@ -874,11 +849,6 @@ static void stm32_dfsdm_dma_buffer_done(void *data) int available = stm32_dfsdm_adc_dma_residue(adc); size_t old_pos; - if (indio_dev->currentmode & INDIO_BUFFER_TRIGGERED) { - iio_trigger_poll_chained(indio_dev->trig); - return; - } - /* * FIXME: In Kernel interface does not support cyclic DMA buffer,and * offers only an interface to push data samples per samples. @@ -906,7 +876,15 @@ static void stm32_dfsdm_dma_buffer_done(void *data) adc->bufi = 0; old_pos = 0; } - /* regular iio buffer without trigger */ + /* + * In DMA mode the trigger services of IIO are not used + * (e.g. no call to iio_trigger_poll). + * Calling irq handler associated to the hardware trigger is not + * relevant as the conversions have already been done. Data + * transfers are performed directly in DMA callback instead. + * This implementation avoids to call trigger irq handler that + * may sleep, in an atomic context (DMA irq handler context). + */ if (adc->dev_data->type == DFSDM_IIO) iio_push_to_buffers(indio_dev, buffer); } @@ -1204,6 +1182,8 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev, stm32_dfsdm_stop_conv(adc); + stm32_dfsdm_process_data(adc, res); + stop_dfsdm: stm32_dfsdm_stop_dfsdm(adc->dfsdm); @@ -1515,8 +1495,7 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) } ret = iio_triggered_buffer_setup(indio_dev, - &iio_pollfunc_store_time, - &stm32_dfsdm_adc_trigger_handler, + &iio_pollfunc_store_time, NULL, &stm32_dfsdm_buffer_setup_ops); if (ret) { stm32_dfsdm_dma_release(indio_dev); diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index fa4586037bb8..a7e65a59bf42 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -65,6 +65,7 @@ config IAQCORE config PMS7003 tristate "Plantower PMS7003 particulate matter sensor" depends on SERIAL_DEV_BUS + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Plantower PMS7003 particulate @@ -90,6 +91,8 @@ config SPS30 tristate "SPS30 particulate matter sensor" depends on I2C select CRC8 + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Sensirion SPS30 particulate matter sensor. diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index cc42219a64f7..979070196da9 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -60,8 +60,8 @@ config AD5446 help Say yes here to build support for Analog Devices AD5300, AD5301, AD5310, AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453, - AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612, - AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs + AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611, + AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101. To compile this driver as a module, choose M here: the diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 7df8b4cc295d..61c670f7fc5f 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -327,6 +327,7 @@ enum ad5446_supported_spi_device_ids { ID_AD5541A, ID_AD5512A, ID_AD5553, + ID_AD5600, ID_AD5601, ID_AD5611, ID_AD5621, @@ -381,6 +382,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = { .channel = AD5446_CHANNEL(14, 16, 0), .write = ad5446_write, }, + [ID_AD5600] = { + .channel = AD5446_CHANNEL(16, 16, 0), + .write = ad5446_write, + }, [ID_AD5601] = { .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6), .write = ad5446_write, @@ -448,6 +453,7 @@ static const struct spi_device_id ad5446_spi_ids[] = { {"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */ {"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */ {"ad5553", ID_AD5553}, + {"ad5600", ID_AD5600}, {"ad5601", ID_AD5601}, {"ad5611", ID_AD5611}, {"ad5621", ID_AD5621}, diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index c0acbb5d2ffb..b21f72dd473c 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -139,7 +139,6 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { [2] = LSM330DLC_GYRO_DEV_NAME, [3] = L3G4IS_GYRO_DEV_NAME, [4] = LSM330_GYRO_DEV_NAME, - [5] = LSM9DS0_GYRO_DEV_NAME, }, .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, .odr = { @@ -209,6 +208,80 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { .multi_read_bit = true, .bootime = 2, }, + { + .wai = 0xd4, + .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, + .sensors_supported = { + [0] = LSM9DS0_GYRO_DEV_NAME, + }, + .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, + .odr = { + .addr = 0x20, + .mask = GENMASK(7, 6), + .odr_avl = { + { .hz = 95, .value = 0x00, }, + { .hz = 190, .value = 0x01, }, + { .hz = 380, .value = 0x02, }, + { .hz = 760, .value = 0x03, }, + }, + }, + .pw = { + .addr = 0x20, + .mask = BIT(3), + .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, + .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, + }, + .enable_axis = { + .addr = ST_SENSORS_DEFAULT_AXIS_ADDR, + .mask = ST_SENSORS_DEFAULT_AXIS_MASK, + }, + .fs = { + .addr = 0x23, + .mask = GENMASK(5, 4), + .fs_avl = { + [0] = { + .num = ST_GYRO_FS_AVL_245DPS, + .value = 0x00, + .gain = IIO_DEGREE_TO_RAD(8750), + }, + [1] = { + .num = ST_GYRO_FS_AVL_500DPS, + .value = 0x01, + .gain = IIO_DEGREE_TO_RAD(17500), + }, + [2] = { + .num = ST_GYRO_FS_AVL_2000DPS, + .value = 0x02, + .gain = IIO_DEGREE_TO_RAD(70000), + }, + }, + }, + .bdu = { + .addr = 0x23, + .mask = BIT(7), + }, + .drdy_irq = { + .int2 = { + .addr = 0x22, + .mask = BIT(3), + }, + /* + * The sensor has IHL (active low) and open + * drain settings, but only for INT1 and not + * for the DRDY line on INT2. + */ + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = GENMASK(2, 0), + }, + }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, + .multi_read_bit = true, + .bootime = 2, + }, { .wai = 0xd7, .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index bfe1cdb16846..dcf5a5bdfaa8 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -229,7 +229,7 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev, *val2 = 65536; return IIO_VAL_FRACTIONAL; } else { - *val = 100; + *val = 100000; *val2 = 65536; return IIO_VAL_FRACTIONAL; } diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index 8743b2f376e2..cf7b59d97802 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -454,12 +454,14 @@ static int adis16480_get_calibbias(struct iio_dev *indio_dev, case IIO_MAGN: case IIO_PRESSURE: ret = adis_read_reg_16(&st->adis, reg, &val16); - *bias = sign_extend32(val16, 15); + if (ret == 0) + *bias = sign_extend32(val16, 15); break; case IIO_ANGL_VEL: case IIO_ACCEL: ret = adis_read_reg_32(&st->adis, reg, &val32); - *bias = sign_extend32(val32, 31); + if (ret == 0) + *bias = sign_extend32(val32, 31); break; default: ret = -EINVAL; @@ -623,9 +625,13 @@ static int adis16480_read_raw(struct iio_dev *indio_dev, *val2 = (st->chip_info->temp_scale % 1000) * 1000; return IIO_VAL_INT_PLUS_MICRO; case IIO_PRESSURE: - *val = 0; - *val2 = 4000; /* 40ubar = 0.004 kPa */ - return IIO_VAL_INT_PLUS_MICRO; + /* + * max scale is 1310 mbar + * max raw value is 32767 shifted for 32bits + */ + *val = 131; /* 1310mbar = 131 kPa */ + *val2 = 32767 << 16; + return IIO_VAL_FRACTIONAL; default: return -EINVAL; } @@ -786,13 +792,14 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), /* - * storing the value in rad/degree and the scale in degree - * gives us the result in rad and better precession than - * storing the scale directly in rad. + * Typically we do IIO_RAD_TO_DEGREE in the denominator, which + * is exactly the same as IIO_DEGREE_TO_RAD in numerator, since + * it gives better approximation. However, in this case we + * cannot do it since it would not fit in a 32bit variable. */ - .gyro_max_val = IIO_RAD_TO_DEGREE(22887), - .gyro_max_scale = 300, - .accel_max_val = IIO_M_S_2_TO_G(21973), + .gyro_max_val = 22887 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(300), + .accel_max_val = IIO_M_S_2_TO_G(21973 << 16), .accel_max_scale = 18, .temp_scale = 5650, /* 5.65 milli degree Celsius */ .int_clk = 2460000, @@ -802,9 +809,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16480] = { .channels = adis16480_channels, .num_channels = ARRAY_SIZE(adis16480_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(22500), - .gyro_max_scale = 450, - .accel_max_val = IIO_M_S_2_TO_G(12500), + .gyro_max_val = 22500 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(450), + .accel_max_val = IIO_M_S_2_TO_G(12500 << 16), .accel_max_scale = 10, .temp_scale = 5650, /* 5.65 milli degree Celsius */ .int_clk = 2460000, @@ -814,9 +821,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16485] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(22500), - .gyro_max_scale = 450, - .accel_max_val = IIO_M_S_2_TO_G(20000), + .gyro_max_val = 22500 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(450), + .accel_max_val = IIO_M_S_2_TO_G(20000 << 16), .accel_max_scale = 5, .temp_scale = 5650, /* 5.65 milli degree Celsius */ .int_clk = 2460000, @@ -826,9 +833,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16488] = { .channels = adis16480_channels, .num_channels = ARRAY_SIZE(adis16480_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(22500), - .gyro_max_scale = 450, - .accel_max_val = IIO_M_S_2_TO_G(22500), + .gyro_max_val = 22500 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(450), + .accel_max_val = IIO_M_S_2_TO_G(22500 << 16), .accel_max_scale = 18, .temp_scale = 5650, /* 5.65 milli degree Celsius */ .int_clk = 2460000, @@ -838,9 +845,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16495_1] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(20000), - .gyro_max_scale = 125, - .accel_max_val = IIO_M_S_2_TO_G(32000), + .gyro_max_val = 20000 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(125), + .accel_max_val = IIO_M_S_2_TO_G(32000 << 16), .accel_max_scale = 8, .temp_scale = 12500, /* 12.5 milli degree Celsius */ .int_clk = 4250000, @@ -851,9 +858,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16495_2] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(18000), - .gyro_max_scale = 450, - .accel_max_val = IIO_M_S_2_TO_G(32000), + .gyro_max_val = 18000 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(450), + .accel_max_val = IIO_M_S_2_TO_G(32000 << 16), .accel_max_scale = 8, .temp_scale = 12500, /* 12.5 milli degree Celsius */ .int_clk = 4250000, @@ -864,9 +871,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16495_3] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(20000), - .gyro_max_scale = 2000, - .accel_max_val = IIO_M_S_2_TO_G(32000), + .gyro_max_val = 20000 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(2000), + .accel_max_val = IIO_M_S_2_TO_G(32000 << 16), .accel_max_scale = 8, .temp_scale = 12500, /* 12.5 milli degree Celsius */ .int_clk = 4250000, @@ -877,9 +884,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16497_1] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(20000), - .gyro_max_scale = 125, - .accel_max_val = IIO_M_S_2_TO_G(32000), + .gyro_max_val = 20000 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(125), + .accel_max_val = IIO_M_S_2_TO_G(32000 << 16), .accel_max_scale = 40, .temp_scale = 12500, /* 12.5 milli degree Celsius */ .int_clk = 4250000, @@ -890,9 +897,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16497_2] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(18000), - .gyro_max_scale = 450, - .accel_max_val = IIO_M_S_2_TO_G(32000), + .gyro_max_val = 18000 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(450), + .accel_max_val = IIO_M_S_2_TO_G(32000 << 16), .accel_max_scale = 40, .temp_scale = 12500, /* 12.5 milli degree Celsius */ .int_clk = 4250000, @@ -903,9 +910,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { [ADIS16497_3] = { .channels = adis16485_channels, .num_channels = ARRAY_SIZE(adis16485_channels), - .gyro_max_val = IIO_RAD_TO_DEGREE(20000), - .gyro_max_scale = 2000, - .accel_max_val = IIO_M_S_2_TO_G(32000), + .gyro_max_val = 20000 << 16, + .gyro_max_scale = IIO_DEGREE_TO_RAD(2000), + .accel_max_val = IIO_M_S_2_TO_G(32000 << 16), .accel_max_scale = 40, .temp_scale = 12500, /* 12.5 milli degree Celsius */ .int_clk = 4250000, @@ -919,6 +926,7 @@ static const struct iio_info adis16480_info = { .read_raw = &adis16480_read_raw, .write_raw = &adis16480_write_raw, .update_scan_mode = adis_update_scan_mode, + .debugfs_reg_access = adis_debugfs_reg_access, }; static int adis16480_stop_device(struct iio_dev *indio_dev) diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 868281b8adb0..2261c6c4ac65 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -115,6 +115,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6050, .config = &chip_config_6050, .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU6500_WHOAMI_VALUE, @@ -122,6 +123,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU6515_WHOAMI_VALUE, @@ -129,6 +131,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU6000_WHOAMI_VALUE, @@ -136,6 +139,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6050, .config = &chip_config_6050, .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU9150_WHOAMI_VALUE, @@ -143,6 +147,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6050, .config = &chip_config_6050, .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU9250_WHOAMI_VALUE, @@ -150,6 +155,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU9255_WHOAMI_VALUE, @@ -157,6 +163,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_ICM20608_WHOAMI_VALUE, @@ -164,6 +171,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, }, { .whoami = INV_ICM20602_WHOAMI_VALUE, @@ -171,6 +179,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_icm20602, .config = &chip_config_6050, .fifo_size = 1008, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, }, }; @@ -471,12 +480,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: - *val = 0; - if (st->chip_type == INV_ICM20602) - *val2 = INV_ICM20602_TEMP_SCALE; - else - *val2 = INV_MPU6050_TEMP_SCALE; - + *val = st->hw->temp.scale / 1000000; + *val2 = st->hw->temp.scale % 1000000; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; @@ -484,11 +489,7 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_OFFSET: switch (chan->type) { case IIO_TEMP: - if (st->chip_type == INV_ICM20602) - *val = INV_ICM20602_TEMP_OFFSET; - else - *val = INV_MPU6050_TEMP_OFFSET; - + *val = st->hw->temp.offset; return IIO_VAL_INT; default: return -EINVAL; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index 51235677c534..c32bd0c012b5 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -101,6 +101,7 @@ struct inv_mpu6050_chip_config { * @reg: register map of the chip. * @config: configuration of the chip. * @fifo_size: size of the FIFO in bytes. + * @temp: offset and scale to apply to raw temperature. */ struct inv_mpu6050_hw { u8 whoami; @@ -108,6 +109,10 @@ struct inv_mpu6050_hw { const struct inv_mpu6050_reg_map *reg; const struct inv_mpu6050_chip_config *config; size_t fifo_size; + struct { + int offset; + int scale; + } temp; }; /* @@ -218,16 +223,19 @@ struct inv_mpu6050_state { #define INV_MPU6050_REG_UP_TIME_MIN 5000 #define INV_MPU6050_REG_UP_TIME_MAX 10000 -#define INV_MPU6050_TEMP_OFFSET 12421 -#define INV_MPU6050_TEMP_SCALE 2941 +#define INV_MPU6050_TEMP_OFFSET 12420 +#define INV_MPU6050_TEMP_SCALE 2941176 #define INV_MPU6050_MAX_GYRO_FS_PARAM 3 #define INV_MPU6050_MAX_ACCL_FS_PARAM 3 #define INV_MPU6050_THREE_AXIS 3 #define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3 #define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3 -#define INV_ICM20602_TEMP_OFFSET 8170 -#define INV_ICM20602_TEMP_SCALE 3060 +#define INV_MPU6500_TEMP_OFFSET 7011 +#define INV_MPU6500_TEMP_SCALE 2995178 + +#define INV_ICM20608_TEMP_OFFSET 8170 +#define INV_ICM20608_TEMP_SCALE 3059976 /* 6 + 6 round up and plus 8 */ #define INV_MPU6050_OUTPUT_DATA_SIZE 24 diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index fd5ebe1e1594..057a4b010010 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -152,9 +152,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .addr = 0x10, .mask = GENMASK(4, 3), }, - .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 }, - .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 }, - .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 }, + + .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 }, + .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, + .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, .fs_len = 3, }, }, @@ -910,7 +911,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id, for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) { for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) { - if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) + if (st_lsm6dsx_sensor_settings[i].id[j].name && + id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) break; } if (j < ST_LSM6DSX_MAX_ID) @@ -985,8 +987,7 @@ int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val) return -EINVAL; *val = odr_table->odr_avl[i].val; - - return 0; + return odr_table->odr_avl[i].hz; } static u16 st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u16 odr, @@ -1149,8 +1150,10 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev, case IIO_CHAN_INFO_SAMP_FREQ: { u8 data; - err = st_lsm6dsx_check_odr(sensor, val, &data); - if (!err) + val = st_lsm6dsx_check_odr(sensor, val, &data); + if (val < 0) + err = val; + else sensor->odr = val; break; } diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index c193d64e5217..112225c0e486 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const unsigned long *mask, bool timestamp) { unsigned bytes = 0; - int length, i; + int length, i, largest = 0; /* How much space will the demuxed element take? */ for_each_set_bit(i, mask, @@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, length = iio_storage_bytes_for_si(indio_dev, i); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } if (timestamp) { length = iio_storage_bytes_for_timestamp(indio_dev); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } + + bytes = ALIGN(bytes, largest); return bytes; } diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c index 28347df78cff..adb5ab9e3439 100644 --- a/drivers/iio/light/bh1750.c +++ b/drivers/iio/light/bh1750.c @@ -59,9 +59,9 @@ struct bh1750_chip_info { u16 int_time_low_mask; u16 int_time_high_mask; -} +}; -static const bh1750_chip_info_tbl[] = { +static const struct bh1750_chip_info bh1750_chip_info_tbl[] = { [BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 }, [BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 }, [BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 }, diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index 16dacea9eadf..e5b00a6611ac 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -163,20 +163,22 @@ static int vcnl4200_init(struct vcnl4000_data *data) if (ret < 0) return ret; - data->al_scale = 24000; data->vcnl4200_al.reg = VCNL4200_AL_DATA; data->vcnl4200_ps.reg = VCNL4200_PS_DATA; switch (id) { case VCNL4200_PROD_ID: - /* Integration time is 50ms, but the experiments */ - /* show 54ms in total. */ - data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000); - data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000); + /* Default wait time is 50ms, add 20% tolerance. */ + data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000); + /* Default wait time is 4.8ms, add 20% tolerance. */ + data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000); + data->al_scale = 24000; break; case VCNL4040_PROD_ID: - /* Integration time is 80ms, add 10ms. */ - data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000); - data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000); + /* Default wait time is 80ms, add 20% tolerance. */ + data->vcnl4200_al.sampling_rate = ktime_set(0, 96000 * 1000); + /* Default wait time is 5ms, add 20% tolerance. */ + data->vcnl4200_ps.sampling_rate = ktime_set(0, 6000 * 1000); + data->al_scale = 120000; break; } data->vcnl4200_al.last_measurement = ktime_set(0, 0); diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index fc7e910f8e8b..d32996702110 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -564,7 +564,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev, * We read all axes and discard all but one, for optimized * reading, use the triggered buffer. */ - *val = le16_to_cpu(hw_values[chan->address]); + *val = (s16)le16_to_cpu(hw_values[chan->address]); ret = IIO_VAL_INT; } diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c index 2354302375de..52f53f3123b1 100644 --- a/drivers/iio/pressure/cros_ec_baro.c +++ b/drivers/iio/pressure/cros_ec_baro.c @@ -114,6 +114,7 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev, static const struct iio_info cros_ec_baro_info = { .read_raw = &cros_ec_baro_read, .write_raw = &cros_ec_baro_write, + .read_avail = &cros_ec_sensors_core_read_avail, }; static int cros_ec_baro_probe(struct platform_device *pdev) @@ -149,6 +150,8 @@ static int cros_ec_baro_probe(struct platform_device *pdev) BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_FREQUENCY); + channel->info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->scan_type.realbits = CROS_EC_SENSOR_BITS; channel->scan_type.storagebits = CROS_EC_SENSOR_BITS; channel->scan_type.shift = 0; diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c index f184ba5601d9..73ed550e3fc9 100644 --- a/drivers/iio/temperature/max31856.c +++ b/drivers/iio/temperature/max31856.c @@ -284,6 +284,8 @@ static int max31856_probe(struct spi_device *spi) spi_set_drvdata(spi, indio_dev); indio_dev->info = &max31856_info; + indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = id->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = max31856_channels; diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index a5dfe65cd9b9..f98510c714b5 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c @@ -161,7 +161,8 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv, return 0; } -static void stm32_timer_stop(struct stm32_timer_trigger *priv) +static void stm32_timer_stop(struct stm32_timer_trigger *priv, + struct iio_trigger *trig) { u32 ccer, cr1; @@ -179,6 +180,12 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv) regmap_write(priv->regmap, TIM_PSC, 0); regmap_write(priv->regmap, TIM_ARR, 0); + /* Force disable master mode */ + if (stm32_timer_is_trgo2_name(trig->name)) + regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0); + else + regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0); + /* Make sure that registers are updated */ regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG); } @@ -197,7 +204,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev, return ret; if (freq == 0) { - stm32_timer_stop(priv); + stm32_timer_stop(priv, trig); } else { ret = stm32_timer_start(priv, trig, freq); if (ret) diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 6d7ec371e7b2..1753a9801b70 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb, if (ib_nl_is_good_ip_resp(nlh)) ib_nl_process_good_ip_rsep(nlh); - return skb->len; + return 0; } static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, @@ -421,16 +421,15 @@ static int addr6_resolve(struct sockaddr *src_sock, (const struct sockaddr_in6 *)dst_sock; struct flowi6 fl6; struct dst_entry *dst; - int ret; memset(&fl6, 0, sizeof fl6); fl6.daddr = dst_in->sin6_addr; fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; - ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); if (ipv6_addr_any(&src_in->sin6_addr)) src_in->sin6_addr = fl6.saddr; diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 00fb3eacda19..65b10efca2b8 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -51,9 +51,8 @@ struct ib_pkey_cache { struct ib_update_work { struct work_struct work; - struct ib_device *device; - u8 port_num; - bool enforce_security; + struct ib_event event; + bool enforce_security; }; union ib_gid zgid; @@ -130,7 +129,7 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port) event.element.port_num = port; event.event = IB_EVENT_GID_CHANGE; - ib_dispatch_event(&event); + ib_dispatch_event_clients(&event); } static const char * const gid_type_str[] = { @@ -1387,9 +1386,8 @@ err: return ret; } -static void ib_cache_update(struct ib_device *device, - u8 port, - bool enforce_security) +static int +ib_cache_update(struct ib_device *device, u8 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; @@ -1397,11 +1395,11 @@ static void ib_cache_update(struct ib_device *device, int ret; if (!rdma_is_port_valid(device, port)) - return; + return -EINVAL; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) - return; + return -ENOMEM; ret = ib_query_port(device, port, tprops); if (ret) { @@ -1419,8 +1417,10 @@ static void ib_cache_update(struct ib_device *device, pkey_cache = kmalloc(struct_size(pkey_cache, table, tprops->pkey_tbl_len), GFP_KERNEL); - if (!pkey_cache) + if (!pkey_cache) { + ret = -ENOMEM; goto err; + } pkey_cache->table_len = tprops->pkey_tbl_len; @@ -1452,50 +1452,84 @@ static void ib_cache_update(struct ib_device *device, kfree(old_pkey_cache); kfree(tprops); - return; + return 0; err: kfree(pkey_cache); kfree(tprops); + return ret; } -static void ib_cache_task(struct work_struct *_work) +static void ib_cache_event_task(struct work_struct *_work) +{ + struct ib_update_work *work = + container_of(_work, struct ib_update_work, work); + int ret; + + /* Before distributing the cache update event, first sync + * the cache. + */ + ret = ib_cache_update(work->event.device, work->event.element.port_num, + work->enforce_security); + + /* GID event is notified already for individual GID entries by + * dispatch_gid_change_event(). Hence, notifiy for rest of the + * events. + */ + if (!ret && work->event.event != IB_EVENT_GID_CHANGE) + ib_dispatch_event_clients(&work->event); + + kfree(work); +} + +static void ib_generic_event_task(struct work_struct *_work) { struct ib_update_work *work = container_of(_work, struct ib_update_work, work); - ib_cache_update(work->device, - work->port_num, - work->enforce_security); + ib_dispatch_event_clients(&work->event); kfree(work); } -static void ib_cache_event(struct ib_event_handler *handler, - struct ib_event *event) +static bool is_cache_update_event(const struct ib_event *event) +{ + return (event->event == IB_EVENT_PORT_ERR || + event->event == IB_EVENT_PORT_ACTIVE || + event->event == IB_EVENT_LID_CHANGE || + event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_CLIENT_REREGISTER || + event->event == IB_EVENT_GID_CHANGE); +} + +/** + * ib_dispatch_event - Dispatch an asynchronous event + * @event:Event to dispatch + * + * Low-level drivers must call ib_dispatch_event() to dispatch the + * event to all registered event handlers when an asynchronous event + * occurs. + */ +void ib_dispatch_event(const struct ib_event *event) { struct ib_update_work *work; - if (event->event == IB_EVENT_PORT_ERR || - event->event == IB_EVENT_PORT_ACTIVE || - event->event == IB_EVENT_LID_CHANGE || - event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_CLIENT_REREGISTER || - event->event == IB_EVENT_GID_CHANGE) { - work = kmalloc(sizeof *work, GFP_ATOMIC); - if (work) { - INIT_WORK(&work->work, ib_cache_task); - work->device = event->device; - work->port_num = event->element.port_num; - if (event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_GID_CHANGE) - work->enforce_security = true; - else - work->enforce_security = false; + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; - queue_work(ib_wq, &work->work); - } - } + if (is_cache_update_event(event)) + INIT_WORK(&work->work, ib_cache_event_task); + else + INIT_WORK(&work->work, ib_generic_event_task); + + work->event = *event; + if (event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_GID_CHANGE) + work->enforce_security = true; + + queue_work(ib_wq, &work->work); } +EXPORT_SYMBOL(ib_dispatch_event); int ib_cache_setup_one(struct ib_device *device) { @@ -1511,9 +1545,6 @@ int ib_cache_setup_one(struct ib_device *device) rdma_for_each_port (device, p) ib_cache_update(device, p, true); - INIT_IB_EVENT_HANDLER(&device->cache.event_handler, - device, ib_cache_event); - ib_register_event_handler(&device->cache.event_handler); return 0; } @@ -1535,14 +1566,12 @@ void ib_cache_release_one(struct ib_device *device) void ib_cache_cleanup_one(struct ib_device *device) { - /* The cleanup function unregisters the event handler, - * waits for all in-progress workqueue elements and cleans - * up the GID cache. This function should be called after - * the device was removed from the devices list and all - * clients were removed, so the cache exists but is + /* The cleanup function waits for all in-progress workqueue + * elements and cleans up the GID cache. This function should be + * called after the device was removed from the devices list and + * all clients were removed, so the cache exists but is * non-functional and shouldn't be updated anymore. */ - ib_unregister_event_handler(&device->cache.event_handler); flush_workqueue(ib_wq); gid_table_cleanup_one(device); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 5920c0085d35..319e4b4ae639 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1228,6 +1228,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, /* Sharing an ib_cm_id with different handlers is not * supported */ spin_unlock_irqrestore(&cm.lock, flags); + ib_destroy_cm_id(cm_id); return ERR_PTR(-EINVAL); } atomic_inc(&cm_id_priv->refcount); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d78f67623f24..8f776b7de45e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2911,6 +2911,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) err2: kfree(route->path_rec); route->path_rec = NULL; + route->num_paths = 0; err1: kfree(work); return ret; @@ -3091,6 +3092,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); + atomic_inc(&id_priv->refcount); cma_init_resolve_addr_work(work, id_priv); queue_work(cma_wq, &work->work); return 0; @@ -3117,6 +3119,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); + atomic_inc(&id_priv->refcount); cma_init_resolve_addr_work(work, id_priv); queue_work(cma_wq, &work->work); return 0; @@ -3153,19 +3156,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, int ret; id_priv = container_of(id, struct rdma_id_private, id); + memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); if (id_priv->state == RDMA_CM_IDLE) { ret = cma_bind_addr(id, src_addr, dst_addr); - if (ret) + if (ret) { + memset(cma_dst_addr(id_priv), 0, + rdma_addr_size(dst_addr)); return ret; + } } - if (cma_family(id_priv) != dst_addr->sa_family) + if (cma_family(id_priv) != dst_addr->sa_family) { + memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); return -EINVAL; + } - if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { + memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); return -EINVAL; + } - memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); if (cma_any_addr(dst_addr)) { ret = cma_resolve_loopback(id_priv); } else { @@ -4710,6 +4720,19 @@ static int __init cma_init(void) { int ret; + /* + * There is a rare lock ordering dependency in cma_netdev_callback() + * that only happens when bonding is enabled. Teach lockdep that rtnl + * must never be nested under lock so it can find these without having + * to test with bonding. + */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + rtnl_lock(); + mutex_lock(&lock); + mutex_unlock(&lock); + rtnl_unlock(); + } + cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); if (!cma_wq) return -ENOMEM; @@ -4736,6 +4759,7 @@ err_ib: err: unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); + unregister_pernet_subsys(&cma_pernet_operations); err_wq: destroy_workqueue(cma_wq); return ret; diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 9d07378b5b42..86a17a87f181 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -149,6 +149,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); int ib_cache_setup_one(struct ib_device *device); void ib_cache_cleanup_one(struct ib_device *device); void ib_cache_release_one(struct ib_device *device); +void ib_dispatch_event_clients(struct ib_event *event); #ifdef CONFIG_CGROUP_RDMA void ib_device_register_rdmacg(struct ib_device *device); @@ -337,6 +338,21 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, qp->pd = pd; qp->uobject = uobj; qp->real_qp = qp; + + qp->qp_type = attr->qp_type; + qp->qp_context = attr->qp_context; + qp->rwq_ind_tbl = attr->rwq_ind_tbl; + qp->send_cq = attr->send_cq; + qp->recv_cq = attr->recv_cq; + qp->srq = attr->srq; + qp->rwq_ind_tbl = attr->rwq_ind_tbl; + qp->event_handler = attr->event_handler; + + atomic_set(&qp->usecnt, 0); + spin_lock_init(&qp->mr_lock); + INIT_LIST_HEAD(&qp->rdma_mrs); + INIT_LIST_HEAD(&qp->sig_mrs); + /* * We don't track XRC QPs for now, because they don't have PD * and more importantly they are created internaly by driver, diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 680ad27f497d..46dd50ff7c85 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -282,6 +282,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) struct rdma_counter *counter; int ret; + if (!qp->res.valid) + return 0; + if (!rdma_is_port_valid(dev, port)) return -EINVAL; @@ -463,10 +466,15 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, u32 qp_num, u32 counter_id) { + struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; + port_counter = &dev->port_data[port].port_counter; + if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) + return -EINVAL; + qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; @@ -503,6 +511,7 @@ err: int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, u32 qp_num, u32 *counter_id) { + struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; @@ -510,9 +519,13 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, if (!rdma_is_port_valid(dev, port)) return -EINVAL; - if (!dev->port_data[port].port_counter.hstats) + port_counter = &dev->port_data[port].port_counter; + if (!port_counter->hstats) return -EOPNOTSUPP; + if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) + return -EINVAL; + qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 50a92442c4f7..10ae6c6eab0a 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -591,6 +591,7 @@ struct ib_device *_ib_alloc_device(size_t size) INIT_LIST_HEAD(&device->event_handler_list); spin_lock_init(&device->event_handler_lock); + init_rwsem(&device->event_handler_rwsem); mutex_init(&device->unregistration_lock); /* * client_data needs to be alloc because we don't want our mark to be @@ -898,7 +899,9 @@ static int add_one_compat_dev(struct ib_device *device, cdev->dev.parent = device->dev.parent; rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); cdev->dev.release = compatdev_release; - dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); + ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); + if (ret) + goto add_err; ret = device_add(&cdev->dev); if (ret) @@ -1199,9 +1202,21 @@ static void setup_dma_device(struct ib_device *device) WARN_ON_ONCE(!parent); device->dma_device = parent; } - /* Setup default max segment size for all IB devices */ - dma_set_max_seg_size(device->dma_device, SZ_2G); + if (!device->dev.dma_parms) { + if (parent) { + /* + * The caller did not provide DMA parameters, so + * 'parent' probably represents a PCI device. The PCI + * core sets the maximum segment size to 64 + * KB. Increase this parameter to 2 GB. + */ + device->dev.dma_parms = parent->dma_parms; + dma_set_max_seg_size(device->dma_device, SZ_2G); + } else { + WARN_ON_ONCE(true); + } + } } /* @@ -1920,17 +1935,15 @@ EXPORT_SYMBOL(ib_set_client_data); * * ib_register_event_handler() registers an event handler that will be * called back when asynchronous IB events occur (as defined in - * chapter 11 of the InfiniBand Architecture Specification). This - * callback may occur in interrupt context. + * chapter 11 of the InfiniBand Architecture Specification). This + * callback occurs in workqueue context. */ void ib_register_event_handler(struct ib_event_handler *event_handler) { - unsigned long flags; - - spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); + down_write(&event_handler->device->event_handler_rwsem); list_add_tail(&event_handler->list, &event_handler->device->event_handler_list); - spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); + up_write(&event_handler->device->event_handler_rwsem); } EXPORT_SYMBOL(ib_register_event_handler); @@ -1943,35 +1956,23 @@ EXPORT_SYMBOL(ib_register_event_handler); */ void ib_unregister_event_handler(struct ib_event_handler *event_handler) { - unsigned long flags; - - spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); + down_write(&event_handler->device->event_handler_rwsem); list_del(&event_handler->list); - spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); + up_write(&event_handler->device->event_handler_rwsem); } EXPORT_SYMBOL(ib_unregister_event_handler); -/** - * ib_dispatch_event - Dispatch an asynchronous event - * @event:Event to dispatch - * - * Low-level drivers must call ib_dispatch_event() to dispatch the - * event to all registered event handlers when an asynchronous event - * occurs. - */ -void ib_dispatch_event(struct ib_event *event) +void ib_dispatch_event_clients(struct ib_event *event) { - unsigned long flags; struct ib_event_handler *handler; - spin_lock_irqsave(&event->device->event_handler_lock, flags); + down_read(&event->device->event_handler_rwsem); list_for_each_entry(handler, &event->device->event_handler_list, list) handler->handler(handler, event); - spin_unlock_irqrestore(&event->device->event_handler_lock, flags); + up_read(&event->device->event_handler_rwsem); } -EXPORT_SYMBOL(ib_dispatch_event); static int iw_query_port(struct ib_device *device, u8 port_num, @@ -2397,8 +2398,12 @@ int ib_modify_port(struct ib_device *device, rc = device->ops.modify_port(device, port_num, port_modify_mask, port_modify); + else if (rdma_protocol_roce(device, port_num) && + ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 || + (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0)) + rc = 0; else - rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; + rc = -EOPNOTSUPP; return rc; } EXPORT_SYMBOL(ib_modify_port); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index ade71823370f..da8adadf4755 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) { struct list_head *e, *tmp; - list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) + list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) { + list_del(e); kfree(list_entry(e, struct iwcm_work, free_list)); + } } static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index c03af08b80e7..ef4b0c7061e4 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -863,6 +863,10 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], IB_DEVICE_NAME_MAX); + if (strlen(name) == 0) { + err = -EINVAL; + goto done; + } err = ib_device_rename(device, name); goto done; } @@ -1468,7 +1472,7 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(ibdev_name)); - if (strchr(ibdev_name, '%')) + if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) return -EINVAL; nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); @@ -1711,6 +1715,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, if (ret) goto err_msg; } else { + if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) + goto err_msg; qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 5337393d4dfe..bb10cbfa0070 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -268,6 +268,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, return 1; } +static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg, + u32 sg_cnt, enum dma_data_direction dir) +{ + if (is_pci_p2pdma_page(sg_page(sg))) + pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir); + else + ib_dma_unmap_sg(dev, sg, sg_cnt, dir); +} + +static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, + u32 sg_cnt, enum dma_data_direction dir) +{ + if (is_pci_p2pdma_page(sg_page(sg))) + return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); + return ib_dma_map_sg(dev, sg, sg_cnt, dir); +} + /** * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context * @ctx: context to initialize @@ -290,11 +307,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, struct ib_device *dev = qp->pd->device; int ret; - if (is_pci_p2pdma_page(sg_page(sg))) - ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); - else - ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); - + ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); if (!ret) return -ENOMEM; sg_cnt = ret; @@ -333,7 +346,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, return ret; out_unmap_sg: - ib_dma_unmap_sg(dev, sg, sg_cnt, dir); + rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); return ret; } EXPORT_SYMBOL(rdma_rw_ctx_init); @@ -583,11 +596,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, break; } - if (is_pci_p2pdma_page(sg_page(sg))) - pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg, - sg_cnt, dir); - else - ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); + rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); } EXPORT_SYMBOL(rdma_rw_ctx_destroy); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 17fc2936c077..bddb5434fbed 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, } settimeout_out: - return skb->len; + return 0; } static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) @@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb, } resp_out: - return skb->len; + return 0; } static void free_sm_ah(struct kref *kref) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 6eb6d2717ca5..75e7ec017836 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -339,27 +339,20 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, if (!new_pps) return NULL; - if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { - if (!qp_pps) { - new_pps->main.port_num = qp_attr->port_num; - new_pps->main.pkey_index = qp_attr->pkey_index; - } else { - new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? - qp_attr->port_num : - qp_pps->main.port_num; - - new_pps->main.pkey_index = - (qp_attr_mask & IB_QP_PKEY_INDEX) ? - qp_attr->pkey_index : - qp_pps->main.pkey_index; - } - new_pps->main.state = IB_PORT_PKEY_VALID; - } else if (qp_pps) { + if (qp_attr_mask & IB_QP_PORT) + new_pps->main.port_num = qp_attr->port_num; + else if (qp_pps) new_pps->main.port_num = qp_pps->main.port_num; + + if (qp_attr_mask & IB_QP_PKEY_INDEX) + new_pps->main.pkey_index = qp_attr->pkey_index; + else if (qp_pps) new_pps->main.pkey_index = qp_pps->main.pkey_index; - if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) - new_pps->main.state = IB_PORT_PKEY_VALID; - } + + if (((qp_attr_mask & IB_QP_PKEY_INDEX) && + (qp_attr_mask & IB_QP_PORT)) || + (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)) + new_pps->main.state = IB_PORT_PKEY_VALID; if (qp_attr_mask & IB_QP_ALT_PATH) { new_pps->alt.port_num = qp_attr->alt_port_num; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 0274e9b704be..f4f79f1292b9 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -91,6 +91,7 @@ struct ucma_context { struct ucma_file *file; struct rdma_cm_id *cm_id; + struct mutex mutex; u64 uid; struct list_head list; @@ -216,6 +217,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; + mutex_init(&ctx->mutex); if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL)) goto error; @@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) } events_reported = ctx->events_reported; + mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; } @@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); + ucma_put_ctx(ctx); return ret; } @@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? @@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: + mutex_unlock(&ctx->mutex); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; @@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); @@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file, ret = -ENOSYS; break; } + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); + mutex_lock(&ctx->mutex); ret = rdma_connect(ctx->cm_id, &conn_param); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; + mutex_lock(&ctx->mutex); ret = rdma_listen(ctx->cm_id, ctx->backlog); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); + mutex_unlock(&ctx->mutex); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); - } else + } else { + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, NULL, NULL); - + mutex_unlock(&ctx->mutex); + } ucma_put_ctx(ctx); return ret; } @@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_disconnect(ctx->cm_id); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; + mutex_lock(&ctx->mutex); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); + mutex_unlock(&ctx->mutex); if (ret) goto out; @@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx, struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &opa); + mutex_unlock(&ctx->mutex); } else { + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &sa_path); + mutex_unlock(&ctx->mutex); } if (ret) return ret; @@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level, switch (level) { case RDMA_OPTION_ID: + mutex_lock(&ctx->mutex); ret = ucma_set_option_id(ctx, optname, optval, optlen); + mutex_unlock(&ctx->mutex); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); @@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); + mutex_unlock(&ctx->mutex); if (ret) goto err2; @@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, goto out; } + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&mc->ctx->mutex); + mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 24244a2f68cc..0d42ba8c0b69 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, * for any address. */ mask |= (sg_dma_address(sg) + pgoff) ^ va; - if (i && i != (umem->nmap - 1)) - /* restrict by length as well for interior SGEs */ - mask |= sg_dma_len(sg); va += sg_dma_len(sg) - pgoff; + /* Except for the last entry, the ending iova alignment sets + * the maximum possible page size as the low bits of the iova + * must be zero when starting the next chunk. + */ + if (i != (umem->nmap - 1)) + mask |= va; pgoff = 0; } best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap); diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 163ff7ba92b7..fedf6829cdec 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -632,7 +632,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, while (bcnt > 0) { const size_t gup_num_pages = min_t(size_t, - (bcnt + BIT(page_shift) - 1) >> page_shift, + ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE, PAGE_SIZE / sizeof(struct page *)); down_read(&owning_mm->mmap_sem); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index d1407fa378e8..da229eab5903 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1129,17 +1129,30 @@ static const struct file_operations umad_sm_fops = { .llseek = no_llseek, }; +static struct ib_umad_port *get_port(struct ib_device *ibdev, + struct ib_umad_device *umad_dev, + unsigned int port) +{ + if (!umad_dev) + return ERR_PTR(-EOPNOTSUPP); + if (!rdma_is_port_valid(ibdev, port)) + return ERR_PTR(-EINVAL); + if (!rdma_cap_ib_mad(ibdev, port)) + return ERR_PTR(-EOPNOTSUPP); + + return &umad_dev->ports[port - rdma_start_port(ibdev)]; +} + static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data, struct ib_client_nl_info *res) { - struct ib_umad_device *umad_dev = client_data; + struct ib_umad_port *port = get_port(ibdev, client_data, res->port); - if (!rdma_is_port_valid(ibdev, res->port)) - return -EINVAL; + if (IS_ERR(port)) + return PTR_ERR(port); res->abi = IB_USER_MAD_ABI_VERSION; - res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev; - + res->cdev = &port->dev; return 0; } @@ -1154,15 +1167,13 @@ MODULE_ALIAS_RDMA_CLIENT("umad"); static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data, struct ib_client_nl_info *res) { - struct ib_umad_device *umad_dev = - ib_get_client_data(ibdev, &umad_client); + struct ib_umad_port *port = get_port(ibdev, client_data, res->port); - if (!rdma_is_port_valid(ibdev, res->port)) - return -EINVAL; + if (IS_ERR(port)) + return PTR_ERR(port); res->abi = IB_USER_MAD_ABI_VERSION; - res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev; - + res->cdev = &port->sm_dev; return 0; } @@ -1312,6 +1323,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port) struct ib_umad_file *file; int id; + cdev_device_del(&port->sm_cdev, &port->sm_dev); + cdev_device_del(&port->cdev, &port->dev); + mutex_lock(&port->file_mutex); /* Mark ib_dev NULL and block ioctl or other file ops to progress @@ -1331,8 +1345,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port) mutex_unlock(&port->file_mutex); - cdev_device_del(&port->sm_cdev, &port->sm_dev); - cdev_device_del(&port->cdev, &port->dev); ida_free(&umad_ida, port->dev_num); /* balances device_initialize() */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 14a80fd9f464..e2ddcb0dc4ee 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1431,17 +1431,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs, if (ret) goto err_cb; - qp->pd = pd; - qp->send_cq = attr.send_cq; - qp->recv_cq = attr.recv_cq; - qp->srq = attr.srq; - qp->rwq_ind_tbl = ind_tbl; - qp->event_handler = attr.event_handler; - qp->qp_context = attr.qp_context; - qp->qp_type = attr.qp_type; - atomic_set(&qp->usecnt, 0); atomic_inc(&pd->usecnt); - qp->port = 0; if (attr.send_cq) atomic_inc(&attr.send_cq->usecnt); if (attr.recv_cq) @@ -2718,12 +2708,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, return 0; } -static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec) -{ - /* Returns user space filter size, includes padding */ - return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; -} - static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size, u16 ib_real_filter_sz) { @@ -2867,11 +2851,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec) { - ssize_t kern_filter_sz; + size_t kern_filter_sz; void *kern_spec_mask; void *kern_spec_val; - kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); + if (check_sub_overflow((size_t)kern_spec->hdr.size, + sizeof(struct ib_uverbs_flow_spec_hdr), + &kern_filter_sz)) + return -EINVAL; + + kern_filter_sz /= 2; kern_spec_val = (void *)kern_spec + sizeof(struct ib_uverbs_flow_spec_hdr); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index db98111b47f4..f2a2d1246c19 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -220,7 +220,6 @@ void ib_uverbs_release_file(struct kref *ref) } static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, - struct ib_uverbs_file *uverbs_file, struct file *filp, char __user *buf, size_t count, loff_t *pos, size_t eventsz) @@ -238,19 +237,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, if (wait_event_interruptible(ev_queue->poll_wait, (!list_empty(&ev_queue->event_list) || - /* The barriers built into wait_event_interruptible() - * and wake_up() guarentee this will see the null set - * without using RCU - */ - !uverbs_file->device->ib_dev))) + ev_queue->is_closed))) return -ERESTARTSYS; - /* If device was disassociated and no event exists set an error */ - if (list_empty(&ev_queue->event_list) && - !uverbs_file->device->ib_dev) - return -EIO; - spin_lock_irq(&ev_queue->lock); + + /* If device was disassociated and no event exists set an error */ + if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) { + spin_unlock_irq(&ev_queue->lock); + return -EIO; + } } event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); @@ -285,8 +281,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf, { struct ib_uverbs_async_event_file *file = filp->private_data; - return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp, - buf, count, pos, + return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos, sizeof(struct ib_uverbs_async_event_desc)); } @@ -296,9 +291,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf, struct ib_uverbs_completion_event_file *comp_ev_file = filp->private_data; - return ib_uverbs_event_read(&comp_ev_file->ev_queue, - comp_ev_file->uobj.ufile, filp, - buf, count, pos, + return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count, + pos, sizeof(struct ib_uverbs_comp_event_desc)); } @@ -321,7 +315,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, static __poll_t ib_uverbs_async_event_poll(struct file *filp, struct poll_table_struct *wait) { - return ib_uverbs_event_poll(filp->private_data, filp, wait); + struct ib_uverbs_async_event_file *file = filp->private_data; + + return ib_uverbs_event_poll(&file->ev_queue, filp, wait); } static __poll_t ib_uverbs_comp_event_poll(struct file *filp, @@ -335,9 +331,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp, static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on) { - struct ib_uverbs_event_queue *ev_queue = filp->private_data; + struct ib_uverbs_async_event_file *file = filp->private_data; - return fasync_helper(fd, filp, on, &ev_queue->async_queue); + return fasync_helper(fd, filp, on, &file->ev_queue.async_queue); } static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 35c2841a569e..6c4093d0a91d 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1180,16 +1180,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd, if (ret) goto err; - qp->qp_type = qp_init_attr->qp_type; - qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; - - atomic_set(&qp->usecnt, 0); - qp->mrs_used = 0; - spin_lock_init(&qp->mr_lock); - INIT_LIST_HEAD(&qp->rdma_mrs); - INIT_LIST_HEAD(&qp->sig_mrs); - qp->port = 0; - if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { struct ib_qp *xrc_qp = create_xrc_qp_user(qp, qp_init_attr, udata); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index b4149dc9e824..ebc3e3d4a6e2 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3323,8 +3323,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) int rc; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 30a54f8aa42c..27e2df44d043 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -477,6 +477,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); req.update_period_ms = cpu_to_le32(1000); req.stats_dma_addr = cpu_to_le64(dma_map); + req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext)); req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); @@ -1270,10 +1271,10 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) return; } rdev->qplib_ctx.hwrm_intf_ver = - (u64)resp.hwrm_intf_major << 48 | - (u64)resp.hwrm_intf_minor << 32 | - (u64)resp.hwrm_intf_build << 16 | - resp.hwrm_intf_patch; + (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 | + (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 | + (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 | + le16_to_cpu(resp.hwrm_intf_patch); } static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 958c1ff9c515..4d07d22bfa7b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Add qp to flush list of the CQ */ bnxt_qplib_add_flush_qp(qp); } else { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { - /* Before we complete, do WA 9060 */ - if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, - cqe_sq_cons)) { - *lib_qp = qp; - goto out; - } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index fbda11a7ab1a..aaa76d792185 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -186,7 +186,9 @@ struct bnxt_qplib_chip_ctx { u8 chip_metal; }; -#define CHIP_NUM_57500 0x1750 +#define CHIP_NUM_57508 0x1750 +#define CHIP_NUM_57504 0x1751 +#define CHIP_NUM_57502 0x1752 struct bnxt_qplib_res { struct pci_dev *pdev; @@ -203,7 +205,9 @@ struct bnxt_qplib_res { static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx) { - return (cctx->chip_num == CHIP_NUM_57500); + return (cctx->chip_num == CHIP_NUM_57508 || + cctx->chip_num == CHIP_NUM_57504 || + cctx->chip_num == CHIP_NUM_57502); } static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res) diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 347dc242fb88..d82e0589cfd2 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } + /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3, + * when entering the TERM state the RNIC MUST initiate a CLOSE. + */ + c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); } else pr_warn("TERM received tid %u no ep/qp\n", tid); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index bbcac539777a..89ac2f9ae6dd 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.ecode = attrs->ecode; ep = qhp->ep; - c4iw_get_ep(&ep->com); - disconnect = 1; if (!internal) { + c4iw_get_ep(&ep->com); terminate = 1; + disconnect = 1; } else { terminate = qhp->attr.send_term; ret = rdma_fini(rhp, qhp, ep); diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 3c412bc5b94f..0778f4f7dccd 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c @@ -317,6 +317,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu struct efa_admin_acq_entry *comp, size_t comp_size_in_bytes) { + struct efa_admin_aq_entry *aqe; struct efa_comp_ctx *comp_ctx; u16 queue_size_mask; u16 cmd_id; @@ -350,7 +351,9 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu reinit_completion(&comp_ctx->wait_event); - memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes); + aqe = &aq->sq.entries[pi]; + memset(aqe, 0, sizeof(*aqe)); + memcpy(aqe, cmd, cmd_size_in_bytes); aq->sq.pc++; atomic64_inc(&aq->stats.submitted_cmd); diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index c142b23bb401..1aeea5d65c01 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd, rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu); } + free_cpumask_var(available_cpus); + free_cpumask_var(non_intr_cpus); return 0; fail: diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9b1fb84a3d45..10924f122072 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1685,6 +1685,14 @@ static u64 access_sw_pio_drain(const struct cntr_entry *entry, return dd->verbs_dev.n_piodrain; } +static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry, + void *context, int vl, int mode, u64 data) +{ + struct hfi1_devdata *dd = context; + + return dd->ctx0_seq_drop; +} + static u64 access_sw_vtx_wait(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { @@ -4106,6 +4114,7 @@ def_access_ibp_counter(rc_crwaits); static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH), +[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH), [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH), [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH), [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, @@ -4249,6 +4258,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { access_sw_cpu_intr), [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL, access_sw_cpu_rcv_limit), +[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL, + access_sw_ctx0_seq_drop), [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL, access_sw_vtx_wait), [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL, diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 4ca5ac8d7e9e..af0061936c66 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -859,6 +859,7 @@ static inline int idx_from_vl(int vl) enum { C_RCV_OVF = 0, C_RX_LEN_ERR, + C_RX_SHORT_ERR, C_RX_ICRC_ERR, C_RX_EBP, C_RX_TID_FULL, @@ -926,6 +927,7 @@ enum { C_DC_PG_STS_TX_MBE_CNT, C_SW_CPU_INTR, C_SW_CPU_RCV_LIM, + C_SW_CTX0_SEQ_DROP, C_SW_VTX_WAIT, C_SW_PIO_WAIT, C_SW_PIO_DRAIN, diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index ab3589d17aee..fb3ec9bff7a2 100644 --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h @@ -381,6 +381,7 @@ #define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468) #define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0) #define RCV_LENGTH_ERR_CNT 0 +#define RCV_SHORT_ERR_CNT 2 #define RCV_ICRC_ERR_CNT 6 #define RCV_EBP_CNT 9 #define RCV_BUF_OVFL_CNT 10 diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 01aa1f132f55..941b465244ab 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -734,6 +734,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) { int ret; + packet->rcd->dd->ctx0_seq_drop++; /* Set up for the next packet */ packet->rhqoff += packet->rsize; if (packet->rhqoff >= packet->maxcnt) diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index f9a7e9d29c8b..89e1dfd07a1b 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) fd = kzalloc(sizeof(*fd), GFP_KERNEL); - if (fd) { - fd->rec_cpu_num = -1; /* no cpu affinity by default */ - fd->mm = current->mm; - mmgrab(fd->mm); - fd->dd = dd; - kobject_get(&fd->dd->kobj); - fp->private_data = fd; - } else { - fp->private_data = NULL; - - if (atomic_dec_and_test(&dd->user_refcount)) - complete(&dd->user_comp); - - return -ENOMEM; - } - + if (!fd || init_srcu_struct(&fd->pq_srcu)) + goto nomem; + spin_lock_init(&fd->pq_rcu_lock); + spin_lock_init(&fd->tid_lock); + spin_lock_init(&fd->invalid_lock); + fd->rec_cpu_num = -1; /* no cpu affinity by default */ + fd->mm = current->mm; + mmgrab(fd->mm); + fd->dd = dd; + kobject_get(&fd->dd->kobj); + fp->private_data = fd; return 0; +nomem: + kfree(fd); + fp->private_data = NULL; + if (atomic_dec_and_test(&dd->user_refcount)) + complete(&dd->user_comp); + return -ENOMEM; } static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, @@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) { struct hfi1_filedata *fd = kiocb->ki_filp->private_data; - struct hfi1_user_sdma_pkt_q *pq = fd->pq; + struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_comp_q *cq = fd->cq; int done = 0, reqs = 0; unsigned long dim = from->nr_segs; + int idx; - if (!cq || !pq) + idx = srcu_read_lock(&fd->pq_srcu); + pq = srcu_dereference(fd->pq, &fd->pq_srcu); + if (!cq || !pq) { + srcu_read_unlock(&fd->pq_srcu, idx); return -EIO; + } - if (!iter_is_iovec(from) || !dim) + if (!iter_is_iovec(from) || !dim) { + srcu_read_unlock(&fd->pq_srcu, idx); return -EINVAL; + } trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); - if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) + if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { + srcu_read_unlock(&fd->pq_srcu, idx); return -ENOSPC; + } while (dim) { int ret; @@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) reqs++; } + srcu_read_unlock(&fd->pq_srcu, idx); return reqs; } @@ -707,6 +718,7 @@ done: if (atomic_dec_and_test(&dd->user_refcount)) complete(&dd->user_comp); + cleanup_srcu_struct(&fdata->pq_srcu); kfree(fdata); return 0; } diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index fa45350a9a1d..b79931cc74ab 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1153,6 +1153,8 @@ struct hfi1_devdata { char *boardname; /* human readable board info */ + u64 ctx0_seq_drop; + /* reset value */ u64 z_int_counter; u64 z_rcv_limit; @@ -1436,10 +1438,13 @@ struct mmu_rb_handler; /* Private data for file operations */ struct hfi1_filedata { + struct srcu_struct pq_srcu; struct hfi1_devdata *dd; struct hfi1_ctxtdata *uctxt; struct hfi1_user_sdma_comp_q *cq; - struct hfi1_user_sdma_pkt_q *pq; + /* update side lock for SRCU */ + spinlock_t pq_rcu_lock; + struct hfi1_user_sdma_pkt_q __rcu *pq; u16 subctxt; /* for cpu affinity; -1 if none */ int rec_cpu_num; diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c index adb4a1ba921b..5836fe7b2817 100644 --- a/drivers/infiniband/hw/hfi1/iowait.c +++ b/drivers/infiniband/hw/hfi1/iowait.c @@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit, void iowait_cancel_work(struct iowait *w) { cancel_work_sync(&iowait_get_ib_work(w)->iowork); - cancel_work_sync(&iowait_get_tid_work(w)->iowork); + /* Make sure that the iowork for TID RDMA is used */ + if (iowait_get_tid_work(w)->iowork.func) + cancel_work_sync(&iowait_get_tid_work(w)->iowork); } /** diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 90f62c4bddba..074ec71772d2 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping sc2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + /* + * Based on the documentation for kobject_init_and_add(), the + * caller should call kobject_put even if this call fails. + */ + goto bail_sc2vl; } kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD); @@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping sl2sc sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sc2vl; + goto bail_sl2sc; } kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD); @@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping vl2mtu sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl2sc; + goto bail_vl2mtu; } kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD); @@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_vl2mtu; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -742,7 +746,6 @@ bail_sl2sc: kobject_put(&ppd->sl2sc_kobj); bail_sc2vl: kobject_put(&ppd->sc2vl_kobj); -bail: return ret; } @@ -853,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd) return 0; bail: - for (i = 0; i < dd->num_sdma; i++) - kobject_del(&dd->per_sdma[i].kobj); + /* + * The function kobject_put() will call kobject_del() if the kobject + * has been added successfully. The sysfs files created under the + * kobject directory will also be removed during the process. + */ + for (; i >= 0; i--) + kobject_put(&dd->per_sdma[i].kobj); return ret; } @@ -867,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd) struct hfi1_pportdata *ppd; int i; + /* Unwind operations in hfi1_verbs_register_sysfs() */ + for (i = 0; i < dd->num_sdma; i++) + kobject_put(&dd->per_sdma[i].kobj); + for (i = 0; i < dd->num_pports; i++) { ppd = &dd->pport[i]; diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index e53f542b60af..8a2e0d9351e9 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) */ fpsn = full_flow_psn(flow, flow->flow_state.spsn); req->r_ack_psn = psn; + /* + * If resync_psn points to the last flow PSN for a + * segment and the new segment (likely from a new + * request) starts with a new generation number, we + * need to adjust resync_psn accordingly. + */ + if (flow->flow_state.generation != + (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT)) + resync_psn = mask_psn(fpsn - 1); flow->resync_npkts += delta_psn(mask_psn(resync_psn + 1), fpsn); /* diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index 3592a9ec155e..4d732353379d 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -90,9 +90,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd, struct hfi1_devdata *dd = uctxt->dd; int ret = 0; - spin_lock_init(&fd->tid_lock); - spin_lock_init(&fd->invalid_lock); - fd->entry_to_rb = kcalloc(uctxt->expected_count, sizeof(struct rb_node *), GFP_KERNEL); @@ -165,10 +162,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) if (fd->handler) { hfi1_mmu_rb_unregister(fd->handler); } else { + mutex_lock(&uctxt->exp_mutex); if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); + mutex_unlock(&uctxt->exp_mutex); } kfree(fd->invalid_tids); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index fd754a16475a..13e4203497b3 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -141,6 +141,7 @@ static int defer_packet_queue( */ xchg(&pq->state, SDMA_PKT_Q_DEFERRED); if (list_empty(&pq->busy.list)) { + pq->busy.lock = &sde->waitlock; iowait_get_priority(&pq->busy); iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); } @@ -155,6 +156,7 @@ static void activate_packet_queue(struct iowait *wait, int reason) { struct hfi1_user_sdma_pkt_q *pq = container_of(wait, struct hfi1_user_sdma_pkt_q, busy); + pq->busy.lock = NULL; xchg(&pq->state, SDMA_PKT_Q_ACTIVE); wake_up(&wait->wait_dma); }; @@ -179,7 +181,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, pq = kzalloc(sizeof(*pq), GFP_KERNEL); if (!pq) return -ENOMEM; - pq->dd = dd; pq->ctxt = uctxt->ctxt; pq->subctxt = fd->subctxt; @@ -236,7 +237,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, goto pq_mmu_fail; } - fd->pq = pq; + rcu_assign_pointer(fd->pq, pq); fd->cq = cq; return 0; @@ -257,6 +258,21 @@ pq_reqs_nomem: return ret; } +static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) +{ + unsigned long flags; + seqlock_t *lock = pq->busy.lock; + + if (!lock) + return; + write_seqlock_irqsave(lock, flags); + if (!list_empty(&pq->busy.list)) { + list_del_init(&pq->busy.list); + pq->busy.lock = NULL; + } + write_sequnlock_irqrestore(lock, flags); +} + int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt) { @@ -264,8 +280,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); - pq = fd->pq; + spin_lock(&fd->pq_rcu_lock); + pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, + lockdep_is_held(&fd->pq_rcu_lock)); if (pq) { + rcu_assign_pointer(fd->pq, NULL); + spin_unlock(&fd->pq_rcu_lock); + synchronize_srcu(&fd->pq_srcu); + /* at this point there can be no more new requests */ if (pq->handler) hfi1_mmu_rb_unregister(pq->handler); iowait_sdma_drain(&pq->busy); @@ -276,8 +298,10 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, kfree(pq->reqs); kfree(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); + flush_pq_iowait(pq); kfree(pq); - fd->pq = NULL; + } else { + spin_unlock(&fd->pq_rcu_lock); } if (fd->cq) { vfree(fd->cq->comps); @@ -321,7 +345,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, { int ret = 0, i; struct hfi1_ctxtdata *uctxt = fd->uctxt; - struct hfi1_user_sdma_pkt_q *pq = fd->pq; + struct hfi1_user_sdma_pkt_q *pq = + srcu_dereference(fd->pq, &fd->pq_srcu); struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_devdata *dd = pq->dd; unsigned long idx = 0; @@ -580,11 +605,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, if (ret < 0) { if (ret != -EBUSY) goto free_req; - wait_event_interruptible_timeout( + if (wait_event_interruptible_timeout( pq->busy.wait_dma, - (pq->state == SDMA_PKT_Q_ACTIVE), + pq->state == SDMA_PKT_Q_ACTIVE, msecs_to_jiffies( - SDMA_IOWAIT_TIMEOUT)); + SDMA_IOWAIT_TIMEOUT)) <= 0) + flush_pq_iowait(pq); } } *count += idx; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 089e201d7550..2f6323ad9c59 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet, opa_get_lid(packet->dlid, 9B)); if (!mcast) goto drop; + rcu_read_lock(); list_for_each_entry_rcu(p, &mcast->qp_list, list) { packet->qp = p->qp; if (hfi1_do_pkey_check(packet)) - goto drop; + goto unlock_drop; spin_lock_irqsave(&packet->qp->r_lock, flags); packet_handler = qp_ok(packet); if (likely(packet_handler)) @@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet, ibp->rvp.n_pkt_drops++; spin_unlock_irqrestore(&packet->qp->r_lock, flags); } + rcu_read_unlock(); /* * Notify rvt_multicast_detach() if it is waiting for us * to finish. diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index d602b698b57e..4921c1e40ccd 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -1,23 +1,34 @@ # SPDX-License-Identifier: GPL-2.0-only config INFINIBAND_HNS - bool "HNS RoCE Driver" + tristate "HNS RoCE Driver" depends on NET_VENDOR_HISILICON depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on (HNS_DSAF && HNS_ENET) || HNS3 ---help--- This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine is used in Hisilicon Hip06 and more further ICT SoC based on platform device. + To compile HIP06 or HIP08 driver as module, choose M here. + config INFINIBAND_HNS_HIP06 - tristate "Hisilicon Hip06 Family RoCE support" + bool "Hisilicon Hip06 Family RoCE support" depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET + depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y) ---help--- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and Hip07 SoC. These RoCE engines are platform devices. + To compile this driver, choose Y here: if INFINIBAND_HNS is m, this + module will be called hns-roce-hw-v1 + config INFINIBAND_HNS_HIP08 - tristate "Hisilicon Hip08 Family RoCE support" + bool "Hisilicon Hip08 Family RoCE support" depends on INFINIBAND_HNS && PCI && HNS3 + depends on INFINIBAND_HNS=m || HNS3=y ---help--- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. The RoCE engine is a PCI device. + + To compile this driver, choose Y here: if INFINIBAND_HNS is m, this + module will be called hns-roce-hw-v2. diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index 449a2d81319d..e105945b94a1 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -9,8 +9,12 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o +ifdef CONFIG_INFINIBAND_HNS_HIP06 hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o +obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o +endif +ifdef CONFIG_INFINIBAND_HNS_HIP08 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o +obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o +endif diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 96d1302abde1..e36d31569081 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -425,7 +425,7 @@ struct hns_roce_mr_table { struct hns_roce_wq { u64 *wrid; /* Work request ID */ spinlock_t lock; - int wqe_cnt; /* WQE num */ + u32 wqe_cnt; /* WQE num */ u32 max_post; int max_gs; int offset; @@ -658,7 +658,6 @@ struct hns_roce_qp { u8 sdb_en; u32 doorbell_qpn; u32 sq_signal_bits; - u32 sq_next_wqe; struct hns_roce_wq sq; struct ib_umem *umem; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 5f74bf55f471..a79fa67df871 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -74,8 +74,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, unsigned long flags = 0; void *wqe = NULL; __le32 doorbell[2]; + u32 wqe_idx = 0; int nreq = 0; - u32 ind = 0; int ret = 0; u8 *smac; int loopback; @@ -88,7 +88,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, } spin_lock_irqsave(&qp->sq.lock, flags); - ind = qp->sq_next_wqe; + for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { ret = -ENOMEM; @@ -96,6 +96,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, goto out; } + wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); + if (unlikely(wr->num_sge > qp->sq.max_gs)) { dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", wr->num_sge, qp->sq.max_gs); @@ -104,9 +106,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, goto out; } - wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); - qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = - wr->wr_id; + wqe = get_send_wqe(qp, wqe_idx); + qp->sq.wrid[wqe_idx] = wr->wr_id; /* Corresponding to the RC and RD type wqe process separately */ if (ibqp->qp_type == IB_QPT_GSI) { @@ -210,7 +211,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, cpu_to_le32((wr->sg_list[1].addr) >> 32); ud_sq_wqe->l_key1 = cpu_to_le32(wr->sg_list[1].lkey); - ind++; } else if (ibqp->qp_type == IB_QPT_RC) { u32 tmp_len = 0; @@ -308,7 +308,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, ctrl->flag |= cpu_to_le32(wr->num_sge << HNS_ROCE_WQE_SGE_NUM_BIT); } - ind++; } } @@ -336,7 +335,6 @@ out: doorbell[1] = sq_db.u32_8; hns_roce_write64_k(doorbell, qp->sq.db_reg_l); - qp->sq_next_wqe = ind; } spin_unlock_irqrestore(&qp->sq.lock, flags); @@ -348,12 +346,6 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { - int ret = 0; - int nreq = 0; - int ind = 0; - int i = 0; - u32 reg_val; - unsigned long flags = 0; struct hns_roce_rq_wqe_ctrl *ctrl = NULL; struct hns_roce_wqe_data_seg *scat = NULL; struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); @@ -361,9 +353,14 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct device *dev = &hr_dev->pdev->dev; struct hns_roce_rq_db rq_db; __le32 doorbell[2] = {0}; + unsigned long flags = 0; + unsigned int wqe_idx; + int ret = 0; + int nreq = 0; + int i = 0; + u32 reg_val; spin_lock_irqsave(&hr_qp->rq.lock, flags); - ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&hr_qp->rq, nreq, @@ -373,6 +370,8 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, goto out; } + wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); + if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", wr->num_sge, hr_qp->rq.max_gs); @@ -381,7 +380,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, goto out; } - ctrl = get_recv_wqe(hr_qp, ind); + ctrl = get_recv_wqe(hr_qp, wqe_idx); roce_set_field(ctrl->rwqe_byte_12, RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, @@ -393,9 +392,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, for (i = 0; i < wr->num_sge; i++) set_data_seg(scat + i, wr->sg_list + i); - hr_qp->rq.wrid[ind] = wr->wr_id; - - ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1); + hr_qp->rq.wrid[wqe_idx] = wr->wr_id; } out: @@ -2702,7 +2699,6 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, hr_qp->rq.tail = 0; hr_qp->sq.head = 0; hr_qp->sq.tail = 0; - hr_qp->sq_next_wqe = 0; } kfree(context); @@ -3316,7 +3312,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, hr_qp->rq.tail = 0; hr_qp->sq.head = 0; hr_qp->sq.tail = 0; - hr_qp->sq_next_wqe = 0; } out: kfree(context); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index e82567fcdeb7..4540b00ccee9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -110,7 +110,7 @@ static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg, } static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, - unsigned int *sge_ind) + unsigned int *sge_ind, int valid_num_sge) { struct hns_roce_v2_wqe_data_seg *dseg; struct ib_sge *sg; @@ -123,7 +123,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; - extend_sge_num = wr->num_sge - num_in_wqe; + extend_sge_num = valid_num_sge - num_in_wqe; sg = wr->sg_list + num_in_wqe; shift = qp->hr_buf.page_shift; @@ -159,14 +159,16 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, void *wqe, unsigned int *sge_ind, + int valid_num_sge, const struct ib_send_wr **bad_wr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_v2_wqe_data_seg *dseg = wqe; struct hns_roce_qp *qp = to_hr_qp(ibqp); + int j = 0; int i; - if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { + if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) { if (le32_to_cpu(rc_sq_wqe->msg_len) > hr_dev->caps.max_sq_inline) { *bad_wr = wr; @@ -190,7 +192,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1); } else { - if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) { + if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) { for (i = 0; i < wr->num_sge; i++) { if (likely(wr->sg_list[i].length)) { set_data_seg_v2(dseg, wr->sg_list + i); @@ -203,19 +205,21 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, (*sge_ind) & (qp->sge.sge_cnt - 1)); - for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) { + for (i = 0; i < wr->num_sge && + j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) { if (likely(wr->sg_list[i].length)) { set_data_seg_v2(dseg, wr->sg_list + i); dseg++; + j++; } } - set_extend_sge(qp, wr, sge_ind); + set_extend_sge(qp, wr, sge_ind, valid_num_sge); } roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge); + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); } return 0; @@ -239,10 +243,11 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct device *dev = hr_dev->dev; struct hns_roce_v2_db sq_db; struct ib_qp_attr attr; - unsigned int sge_ind; unsigned int owner_bit; + unsigned int sge_idx; + unsigned int wqe_idx; unsigned long flags; - unsigned int ind; + int valid_num_sge; void *wqe = NULL; bool loopback; int attr_mask; @@ -269,8 +274,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, } spin_lock_irqsave(&qp->sq.lock, flags); - ind = qp->sq_next_wqe; - sge_ind = qp->next_sge; + sge_idx = qp->next_sge; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { @@ -279,6 +283,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, goto out; } + wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); + if (unlikely(wr->num_sge > qp->sq.max_gs)) { dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", wr->num_sge, qp->sq.max_gs); @@ -287,14 +293,20 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, goto out; } - wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); - qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = - wr->wr_id; - + wqe = get_send_wqe(qp, wqe_idx); + qp->sq.wrid[wqe_idx] = wr->wr_id; owner_bit = ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); + valid_num_sge = 0; tmp_len = 0; + for (i = 0; i < wr->num_sge; i++) { + if (likely(wr->sg_list[i].length)) { + tmp_len += wr->sg_list[i].length; + valid_num_sge++; + } + } + /* Corresponding to the QP type, wqe process separately */ if (ibqp->qp_type == IB_QPT_GSI) { ud_sq_wqe = wqe; @@ -330,9 +342,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, V2_UD_SEND_WQE_BYTE_4_OPCODE_S, HNS_ROCE_V2_WQE_OP_SEND); - for (i = 0; i < wr->num_sge; i++) - tmp_len += wr->sg_list[i].length; - ud_sq_wqe->msg_len = cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len); @@ -368,12 +377,12 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, - wr->num_sge); + valid_num_sge); roce_set_field(ud_sq_wqe->byte_20, V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - sge_ind & (qp->sge.sge_cnt - 1)); + sge_idx & (qp->sge.sge_cnt - 1)); roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, @@ -389,7 +398,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, V2_UD_SEND_WQE_BYTE_36_VLAN_S, - le16_to_cpu(ah->av.vlan)); + ah->av.vlan); roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, @@ -423,13 +432,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2); - set_extend_sge(qp, wr, &sge_ind); - ind++; + set_extend_sge(qp, wr, &sge_idx, valid_num_sge); } else if (ibqp->qp_type == IB_QPT_RC) { rc_sq_wqe = wqe; memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); - for (i = 0; i < wr->num_sge; i++) - tmp_len += wr->sg_list[i].length; rc_sq_wqe->msg_len = cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len); @@ -550,15 +556,14 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, - wr->num_sge); + valid_num_sge); } else if (wr->opcode != IB_WR_REG_MR) { ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, - wqe, &sge_ind, bad_wr); + wqe, &sge_idx, + valid_num_sge, bad_wr); if (ret) goto out; } - - ind++; } else { dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); spin_unlock_irqrestore(&qp->sq.lock, flags); @@ -588,8 +593,7 @@ out: hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l); - qp->sq_next_wqe = ind; - qp->next_sge = sge_ind; + qp->next_sge = sge_idx; if (qp->state == IB_QPS_ERR) { attr_mask = IB_QP_STATE; @@ -623,13 +627,12 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, unsigned long flags; void *wqe = NULL; int attr_mask; + u32 wqe_idx; int ret = 0; int nreq; - int ind; int i; spin_lock_irqsave(&hr_qp->rq.lock, flags); - ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); if (hr_qp->state == IB_QPS_RESET) { spin_unlock_irqrestore(&hr_qp->rq.lock, flags); @@ -645,6 +648,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, goto out; } + wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); + if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", wr->num_sge, hr_qp->rq.max_gs); @@ -653,7 +658,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, goto out; } - wqe = get_recv_wqe(hr_qp, ind); + wqe = get_recv_wqe(hr_qp, wqe_idx); dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; for (i = 0; i < wr->num_sge; i++) { if (!wr->sg_list[i].length) @@ -669,8 +674,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, /* rq support inline data */ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { - sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; - hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = + sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list; + hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge; for (i = 0; i < wr->num_sge; i++) { sge_list[i].addr = @@ -679,9 +684,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, } } - hr_qp->rq.wrid[ind] = wr->wr_id; - - ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1); + hr_qp->rq.wrid[wqe_idx] = wr->wr_id; } out: @@ -4465,7 +4468,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, hr_qp->rq.tail = 0; hr_qp->sq.head = 0; hr_qp->sq.tail = 0; - hr_qp->sq_next_wqe = 0; hr_qp->next_sge = 0; if (hr_qp->rq.wqe_cnt) *hr_qp->rdb.db_record = 0; @@ -4650,16 +4652,14 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, { struct hns_roce_cq *send_cq, *recv_cq; struct ib_device *ibdev = &hr_dev->ib_dev; - int ret; + int ret = 0; if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) { /* Modify qp to reset before destroying qp */ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); - if (ret) { + if (ret) ibdev_err(ibdev, "modify QP to Reset failed.\n"); - return ret; - } } send_cq = to_hr_cq(hr_qp->ibqp.send_cq); @@ -4715,7 +4715,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, kfree(hr_qp->rq_inl_buf.wqe_list); } - return 0; + return ret; } static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) @@ -4725,11 +4725,9 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) int ret; ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); - if (ret) { + if (ret) ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", hr_qp->qpn, ret); - return ret; - } if (hr_qp->ibqp.qp_type == IB_QPT_GSI) kfree(hr_to_hr_sqp(hr_qp)); @@ -6092,11 +6090,11 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M, SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S, - hr_dev->caps.idx_ba_pg_sz); + hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M, SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S, - hr_dev->caps.idx_buf_pg_sz); + hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET); srq_context->idx_nxt_blk_addr = cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 43219d2f7de0..76a14db7028d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -87,8 +87,8 @@ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64 #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32 #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32 -#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096 -#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096 +#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE +#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_INVALID_LKEY 0x100 diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 5f8416ba09a9..702b59f0dab9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -1062,8 +1062,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) { if (page_addr & ((1 << mtt->page_shift) - 1)) { dev_err(dev, - "page_addr 0x%llx is not page_shift %d alignment!\n", - page_addr, mtt->page_shift); + "page_addr is not page_shift %d alignment!\n", + mtt->page_shift); ret = -EINVAL; goto out; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index bd78ff90d998..8dd2d666f687 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -332,9 +332,8 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, u8 max_sq_stride = ilog2(roundup_sq_stride); /* Sanity check SQ size before proceeding */ - if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || - ucmd->log_sq_stride > max_sq_stride || - ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { + if (ucmd->log_sq_stride > max_sq_stride || + ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n"); return -EINVAL; } @@ -358,13 +357,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, u32 max_cnt; int ret; + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) || + hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) + return -EINVAL; + ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); if (ret) { ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n"); return ret; } - hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; hr_qp->sq.wqe_shift = ucmd->log_sq_stride; max_cnt = max(1U, cap->max_send_sge); diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 0a31d0a3d657..39c08217e861 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -95,14 +95,18 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg, ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); if (ret) - goto err; + return -EINVAL; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); - if (!table_attr) + if (!table_attr) { + ret = -EMSGSIZE; goto err; + } - if (hns_roce_fill_cq(msg, context)) + if (hns_roce_fill_cq(msg, context)) { + ret = -EMSGSIZE; goto err_cancel_table; + } nla_nest_end(msg, table_attr); kfree(context); @@ -113,7 +117,7 @@ err_cancel_table: nla_nest_cancel(msg, table_attr); err: kfree(context); - return -EMSGSIZE; + return ret; } int hns_roce_fill_res_entry(struct sk_buff *msg, diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 43ea2c13b212..108667ae6b14 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -180,8 +180,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, { struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); struct hns_roce_ib_create_srq ucmd; - u32 page_shift; - u32 npages; + struct hns_roce_buf *buf; int ret; if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) @@ -191,11 +190,13 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, if (IS_ERR(srq->umem)) return PTR_ERR(srq->umem); - npages = (ib_umem_page_count(srq->umem) + - (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / - (1 << hr_dev->caps.srqwqe_buf_pg_sz); - page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; - ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt); + buf = &srq->buf; + buf->npages = (ib_umem_page_count(srq->umem) + + (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / + (1 << hr_dev->caps.srqwqe_buf_pg_sz); + buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; + ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, + &srq->mtt); if (ret) goto err_user_buf; @@ -212,9 +213,12 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, goto err_user_srq_mtt; } - ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem), - PAGE_SHIFT, &srq->idx_que.mtt); - + buf = &srq->idx_que.idx_buf; + buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem), + 1 << hr_dev->caps.idx_buf_pg_sz); + buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; + ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, + &srq->idx_que.mtt); if (ret) { dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n"); goto err_user_idx_mtt; diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index d44cf33df81a..238614370927 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) const struct in_ifaddr *ifa; idev = in_dev_get(dev); + if (!idev) + continue; in_dev_for_each_ifa_rtnl(ifa, idev) { i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index ecd6cadd529a..b591861934b3 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -186,23 +186,6 @@ out: kfree(ent); } -static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) -{ - struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; - struct rb_root *sl_id_map = &sriov->sl_id_map; - struct id_map_entry *ent, *found_ent; - - spin_lock(&sriov->id_map_lock); - ent = xa_erase(&sriov->pv_id_table, pv_cm_id); - if (!ent) - goto out; - found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); - if (found_ent && found_ent == ent) - rb_erase(&found_ent->node, sl_id_map); -out: - spin_unlock(&sriov->id_map_lock); -} - static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) { struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; @@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) spin_lock(&sriov->id_map_lock); spin_lock_irqsave(&sriov->going_down_lock, flags); /*make sure that there is no schedule inside the scheduled work.*/ - if (!sriov->is_going_down) { + if (!sriov->is_going_down && !id->scheduled_delete) { id->scheduled_delete = 1; schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); } @@ -341,9 +324,6 @@ cont: if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) schedule_delayed(ibdev, id); - else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) - id_map_find_del(ibdev, pv_cm_id); - return 0; } @@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, *slave = id->slave_id; set_remote_comm_id(mad, id->sl_cm_id); - if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) + if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID || + mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) schedule_delayed(ibdev, id); - else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || - mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) { - id_map_find_del(ibdev, (int) pv_cm_id); - } return 0; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8d2f1e38b891..369a203332a2 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids, return mlx4_ib_update_gids_v1(gids, ibdev, port_num); } +static void free_gid_entry(struct gid_entry *entry) +{ + memset(&entry->gid, 0, sizeof(entry->gid)); + kfree(entry->ctx); + entry->ctx = NULL; +} + static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context) { struct mlx4_ib_dev *ibdev = to_mdev(attr->device); @@ -306,6 +313,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context) GFP_ATOMIC); if (!gids) { ret = -ENOMEM; + *context = NULL; + free_gid_entry(&port_gid_table->gids[free]); } else { for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); @@ -317,6 +326,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context) if (!ret && hw_update) { ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); + if (ret) { + spin_lock_bh(&iboe->lock); + *context = NULL; + free_gid_entry(&port_gid_table->gids[free]); + spin_unlock_bh(&iboe->lock); + } kfree(gids); } @@ -346,10 +361,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context) if (!ctx->refcount) { unsigned int real_index = ctx->real_index; - memset(&port_gid_table->gids[real_index].gid, 0, - sizeof(port_gid_table->gids[real_index].gid)); - kfree(port_gid_table->gids[real_index].ctx); - port_gid_table->gids[real_index].ctx = NULL; + free_gid_entry(&port_gid_table->gids[real_index]); hw_update = 1; } } @@ -3008,16 +3020,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ibdev->ib_active = false; flush_workqueue(wq); - mlx4_ib_close_sriov(ibdev); - mlx4_ib_mad_cleanup(ibdev); - ib_unregister_device(&ibdev->ib_dev); - mlx4_ib_diag_cleanup(ibdev); if (ibdev->iboe.nb.notifier_call) { if (unregister_netdevice_notifier(&ibdev->iboe.nb)) pr_warn("failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } + mlx4_ib_close_sriov(ibdev); + mlx4_ib_mad_cleanup(ibdev); + ib_unregister_device(&ibdev->ib_dev); + mlx4_ib_diag_cleanup(ibdev); + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); kfree(ibdev->ib_uc_qpns_bitmap); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 45f48cde6b9d..ff664355de55 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, dump_cqe(dev, cqe); } +static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, + u16 tail, u16 head) +{ + u16 idx; + + do { + idx = tail & (qp->sq.wqe_cnt - 1); + if (idx == head) + break; + + tail = qp->sq.w_list[idx].next; + } while (1); + tail = qp->sq.w_list[idx].next; + qp->sq.last_poll = tail; +} + static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) { mlx5_frag_buf_free(dev->mdev, &buf->frag_buf); @@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, } static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc, - int *npolled, int is_send) + int *npolled, bool is_send) { struct mlx5_ib_wq *wq; unsigned int cur; @@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc, return; for (i = 0; i < cur && np < num_entries; i++) { - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + unsigned int idx; + + idx = (is_send) ? wq->last_poll : wq->tail; + idx &= (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; wc->status = IB_WC_WR_FLUSH_ERR; wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; wq->tail++; + if (is_send) + wq->last_poll = wq->w_list[idx].next; np++; wc->qp = &qp->ibqp; wc++; @@ -476,6 +498,7 @@ repoll: wqe_ctr = be16_to_cpu(cqe64->wqe_counter); idx = wqe_ctr & (wq->wqe_cnt - 1); handle_good_req(wc, cqe64, wq, idx); + handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); wc->wr_id = wq->wrid[idx]; wq->tail = wq->wqe_head[idx] + 1; wc->status = IB_WC_SUCCESS; diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c index 4950df3f71b6..5c73c0a790fa 100644 --- a/drivers/infiniband/hw/mlx5/gsi.c +++ b/drivers/infiniband/hw/mlx5/gsi.c @@ -507,8 +507,7 @@ int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr); if (ret) { /* Undo the effect of adding the outstanding wr */ - gsi->outstanding_pi = (gsi->outstanding_pi - 1) % - gsi->cap.max_send_wr; + gsi->outstanding_pi--; goto err; } spin_unlock_irqrestore(&gsi->lock, flags); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 831539419c30..4f44a731a48e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -829,6 +829,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { + size_t uhw_outlen = (uhw) ? uhw->outlen : 0; struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; int err = -ENOMEM; @@ -842,12 +843,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, u64 max_tso; resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); - if (uhw->outlen && uhw->outlen < resp_len) + if (uhw_outlen && uhw_outlen < resp_len) return -EINVAL; else resp.response_length = resp_len; - if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) + if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) return -EINVAL; memset(props, 0, sizeof(*props)); @@ -911,7 +912,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->raw_packet_caps |= IB_RAW_PACKET_CAP_CVLAN_STRIPPING; - if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { + if (field_avail(typeof(resp), tso_caps, uhw_outlen)) { max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); if (max_tso) { resp.tso_caps.max_tso = 1 << max_tso; @@ -921,7 +922,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { + if (field_avail(typeof(resp), rss_caps, uhw_outlen)) { resp.rss_caps.rx_hash_function = MLX5_RX_HASH_FUNC_TOEPLITZ; resp.rss_caps.rx_hash_fields_mask = @@ -941,9 +942,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.response_length += sizeof(resp.rss_caps); } } else { - if (field_avail(typeof(resp), tso_caps, uhw->outlen)) + if (field_avail(typeof(resp), tso_caps, uhw_outlen)) resp.response_length += sizeof(resp.tso_caps); - if (field_avail(typeof(resp), rss_caps, uhw->outlen)) + if (field_avail(typeof(resp), rss_caps, uhw_outlen)) resp.response_length += sizeof(resp.rss_caps); } @@ -1066,7 +1067,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, MLX5_MAX_CQ_PERIOD; } - if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { + if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) { resp.response_length += sizeof(resp.cqe_comp_caps); if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { @@ -1084,7 +1085,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) && + if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) && raw_support) { if (MLX5_CAP_QOS(mdev, packet_pacing) && MLX5_CAP_GEN(mdev, qos)) { @@ -1103,7 +1104,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, - uhw->outlen)) { + uhw_outlen)) { if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) resp.mlx5_ib_support_multi_pkt_send_wqes = MLX5_IB_ALLOW_MPW; @@ -1116,7 +1117,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); } - if (field_avail(typeof(resp), flags, uhw->outlen)) { + if (field_avail(typeof(resp), flags, uhw_outlen)) { resp.response_length += sizeof(resp.flags); if (MLX5_CAP_GEN(mdev, cqe_compression_128)) @@ -1132,8 +1133,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; } - if (field_avail(typeof(resp), sw_parsing_caps, - uhw->outlen)) { + if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) { resp.response_length += sizeof(resp.sw_parsing_caps); if (MLX5_CAP_ETH(mdev, swp)) { resp.sw_parsing_caps.sw_parsing_offloads |= @@ -1153,7 +1153,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) && + if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) && raw_support) { resp.response_length += sizeof(resp.striding_rq_caps); if (MLX5_CAP_GEN(mdev, striding_rq)) { @@ -1170,8 +1170,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), tunnel_offloads_caps, - uhw->outlen)) { + if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) { resp.response_length += sizeof(resp.tunnel_offloads_caps); if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) resp.tunnel_offloads_caps |= @@ -1182,17 +1181,15 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_GRE) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_UDP) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; } - if (uhw->outlen) { + if (uhw_outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); if (err) @@ -3548,10 +3545,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, } INIT_LIST_HEAD(&handler->list); - if (dst) { - memcpy(&dest_arr[0], dst, sizeof(*dst)); - dest_num++; - } for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { err = parse_flow_attr(dev->mdev, spec, @@ -3564,6 +3557,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, ib_flow += ((union ib_flow_spec *)ib_flow)->size; } + if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { + memcpy(&dest_arr[0], dst, sizeof(*dst)); + dest_num++; + } + if (!flow_is_multicast_only(flow_attr)) set_underlay_qp(dev, spec, underlay_qpn); @@ -3604,10 +3602,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { - if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { + if (!dest_num) rule_dst = NULL; - dest_num = 0; - } } else { if (is_egress) flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; @@ -4739,7 +4735,6 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port) struct ib_device_attr *dprops = NULL; struct ib_port_attr *pprops = NULL; int err = -ENOMEM; - struct ib_udata uhw = {.inlen = 0, .outlen = 0}; pprops = kzalloc(sizeof(*pprops), GFP_KERNEL); if (!pprops) @@ -4749,7 +4744,7 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port) if (!dprops) goto out; - err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); + err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL); if (err) { mlx5_ib_warn(dev, "query_device failed %d\n", err); goto out; @@ -5641,9 +5636,10 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port - 1); - /* Q counters are in the beginning of all counters */ return rdma_alloc_hw_stats_struct(cnts->names, - cnts->num_q_counters, + cnts->num_q_counters + + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 1a98ee2e01c4..a9ce46c4c1ae 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -283,6 +283,7 @@ struct mlx5_ib_wq { unsigned head; unsigned tail; u16 cur_post; + u16 last_poll; void *cur_edge; }; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 7019c12005f4..99d563dba91b 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -428,7 +428,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); - return NULL; + return ERR_PTR(-EINVAL); } ent = &cache->ent[entry]; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 5fd071c05944..881decb1309a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3391,9 +3391,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, struct mlx5_ib_qp_base *base; u32 set_id; - if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) - return 0; - if (counter) set_id = counter->id; else @@ -3728,6 +3725,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, qp->sq.cur_post = 0; if (qp->sq.wqe_cnt) qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); + qp->sq.last_poll = 0; qp->db.db[MLX5_RCV_DBR] = 0; qp->db.db[MLX5_SND_DBR] = 0; } @@ -6134,6 +6132,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, if (udata->outlen && udata->outlen < min_resp_len) return ERR_PTR(-EINVAL); + if (!capable(CAP_SYS_RAWIO) && + init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) + return ERR_PTR(-EPERM); + dev = to_mdev(pd->device); switch (init_attr->wq_type) { case IB_WQT_RQ: @@ -6503,6 +6505,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp) */ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) { + struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_qp *mqp = to_mqp(qp); int err = 0; @@ -6512,6 +6515,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) goto out; } + if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) { + err = -EOPNOTSUPP; + goto out; + } + if (mqp->state == IB_QPS_RTS) { err = __mlx5_ib_qp_set_counter(qp, counter); if (!err) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index dc71b6e16a07..b462eaca1ee3 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -357,6 +357,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) return -ENOMEM; spin_lock_init(&dev->sgid_lock); + xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ); if (IS_IWARP(dev)) { xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 6f3ce86019b7..a7ccca3c4f89 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1577,6 +1577,14 @@ static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp) ib_umem_release(qp->urq.umem); qp->urq.umem = NULL; + + if (rdma_protocol_roce(&dev->ibdev, 1)) { + qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); + qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl); + } else { + kfree(qp->usq.pbl_tbl); + kfree(qp->urq.pbl_tbl); + } } static int qedr_create_user_qp(struct qedr_dev *dev, @@ -2673,8 +2681,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); - if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR)) - qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); + if (mr->type != QEDR_MR_DMA) + free_mr_info(dev, &mr->info); /* it could be user registered memory. */ ib_umem_release(mr->umem); diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 3926be78036e..568b21eb6ea1 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj, struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, pport_kobj); + if (!pattr->show) + return -EIO; + return pattr->show(ppd, buf); } @@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj, struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, pport_kobj); + if (!pattr->store) + return -EIO; + return pattr->store(ppd, buf, len); } diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 33778d451b82..5ef93f8f17a1 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) if (mcast == NULL) goto drop; this_cpu_inc(ibp->pmastats->n_multicast_rcv); + rcu_read_lock(); list_for_each_entry_rcu(p, &mcast->qp_list, list) qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); + rcu_read_unlock(); /* * Notify rvt_multicast_detach() if it is waiting for us * to finish. diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index a85571a4cf57..0fee3c87776b 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) if (cq->ip) kref_put(&cq->ip->ref, rvt_release_mmap_info); else - vfree(cq->queue); + vfree(cq->kqueue); } /** diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 0b0a241c57ff..799254a049ba 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -61,6 +61,8 @@ #define RVT_RWQ_COUNT_THRESHOLD 16 static void rvt_rc_timeout(struct timer_list *t); +static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, + enum ib_qp_type type); /* * Convert the AETH RNR timeout code into the number of microseconds. @@ -452,40 +454,41 @@ no_qp_table: } /** - * free_all_qps - check for QPs still in use + * rvt_free_qp_cb - callback function to reset a qp + * @qp: the qp to reset + * @v: a 64-bit value + * + * This function resets the qp and removes it from the + * qp hash table. + */ +static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) +{ + unsigned int *qp_inuse = (unsigned int *)v; + struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); + + /* Reset the qp and remove it from the qp hash list */ + rvt_reset_qp(rdi, qp, qp->ibqp.qp_type); + + /* Increment the qp_inuse count */ + (*qp_inuse)++; +} + +/** + * rvt_free_all_qps - check for QPs still in use * @rdi: rvt device info structure * * There should not be any QPs still in use. * Free memory for table. + * Return the number of QPs still in use. */ static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) { - unsigned long flags; - struct rvt_qp *qp; - unsigned n, qp_inuse = 0; - spinlock_t *ql; /* work around too long line below */ - - if (rdi->driver_f.free_all_qps) - qp_inuse = rdi->driver_f.free_all_qps(rdi); + unsigned int qp_inuse = 0; qp_inuse += rvt_mcast_tree_empty(rdi); - if (!rdi->qp_dev) - return qp_inuse; + rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb); - ql = &rdi->qp_dev->qpt_lock; - spin_lock_irqsave(ql, flags); - for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { - qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], - lockdep_is_held(ql)); - RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); - - for (; qp; qp = rcu_dereference_protected(qp->next, - lockdep_is_held(ql))) - qp_inuse++; - } - spin_unlock_irqrestore(ql, flags); - synchronize_rcu(); return qp_inuse; } @@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, } /** - * rvt_reset_qp - initialize the QP state to the reset state + * _rvt_reset_qp - initialize the QP state to the reset state * @qp: the QP to reset * @type: the QP type * * r_lock, s_hlock, and s_lock are required to be held by the caller */ -static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, - enum ib_qp_type type) +static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, + enum ib_qp_type type) __must_hold(&qp->s_lock) __must_hold(&qp->s_hlock) __must_hold(&qp->r_lock) @@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, lockdep_assert_held(&qp->s_lock); } +/** + * rvt_reset_qp - initialize the QP state to the reset state + * @rdi: the device info + * @qp: the QP to reset + * @type: the QP type + * + * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock + * before calling _rvt_reset_qp(). + */ +static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, + enum ib_qp_type type) +{ + spin_lock_irq(&qp->r_lock); + spin_lock(&qp->s_hlock); + spin_lock(&qp->s_lock); + _rvt_reset_qp(rdi, qp, type); + spin_unlock(&qp->s_lock); + spin_unlock(&qp->s_hlock); + spin_unlock_irq(&qp->r_lock); +} + /** rvt_free_qpn - Free a qpn from the bit map * @qpt: QP table * @qpn: queue pair number to free @@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, switch (new_state) { case IB_QPS_RESET: if (qp->state != IB_QPS_RESET) - rvt_reset_qp(rdi, qp, ibqp->qp_type); + _rvt_reset_qp(rdi, qp, ibqp->qp_type); break; case IB_QPS_RTR: @@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); - spin_lock_irq(&qp->r_lock); - spin_lock(&qp->s_hlock); - spin_lock(&qp->s_lock); rvt_reset_qp(rdi, qp, ibqp->qp_type); - spin_unlock(&qp->s_lock); - spin_unlock(&qp->s_hlock); - spin_unlock_irq(&qp->r_lock); wait_event(qp->wait, !atomic_read(&qp->refcount)); /* qpn is now available for use again */ diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 116cafc9afcf..4bc88708b355 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, qp->comp.psn = pkt->psn; if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_run_task(&qp->req.task, 1); + rxe_run_task(&qp->req.task, 0); } } return COMPST_ERROR_RETRY; @@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) */ if (qp->req.wait_fence) { qp->req.wait_fence = 0; - rxe_run_task(&qp->req.task, 1); + rxe_run_task(&qp->req.task, 0); } } @@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, if (qp->req.need_rd_atomic) { qp->comp.timeout_retry = 0; qp->req.need_rd_atomic = 0; - rxe_run_task(&qp->req.task, 1); + rxe_run_task(&qp->req.task, 0); } } @@ -725,7 +725,7 @@ int rxe_completer(void *arg) RXE_CNT_COMP_RETRY); qp->req.need_retry = 1; qp->comp.started_retry = 1; - rxe_run_task(&qp->req.task, 1); + rxe_run_task(&qp->req.task, 0); } if (pkt) { diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 5a3474f9351b..312c2fc961c0 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -117,10 +117,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; - if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), - recv_sockets.sk6->sk, &ndst, &fl6))) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), + recv_sockets.sk6->sk, &fl6, + NULL); + if (unlikely(IS_ERR(ndst))) { pr_err_ratelimited("no route to %pI6\n", daddr); - goto put; + return NULL; } if (unlikely(ndst->error)) { diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index f9a492ed900b..831ad578a7b2 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -389,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb) calc_icrc = rxe_icrc_hdr(pkt, skb); calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt), - payload_size(pkt)); + payload_size(pkt) + bth_pad(pkt)); calc_icrc = (__force u32)cpu_to_be32(~calc_icrc); if (unlikely(calc_icrc != pack_icrc)) { if (skb->protocol == htons(ETH_P_IPV6)) diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index c5d9b558fa90..e5031172c019 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (err) return err; } + if (bth_pad(pkt)) { + u8 *pad = payload_addr(pkt) + paylen; + + memset(pad, 0, bth_pad(pkt)); + crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt)); + } } p = payload_addr(pkt) + paylen + bth_pad(pkt); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 1cbfbd98eb22..c4a8195bf670 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -732,6 +732,13 @@ static enum resp_states read_reply(struct rxe_qp *qp, if (err) pr_err("Failed copying memory\n"); + if (bth_pad(&ack_pkt)) { + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + u8 *pad = payload_addr(&ack_pkt) + payload; + + memset(pad, 0, bth_pad(&ack_pkt)); + icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt)); + } p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); *p = ~icrc; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 5c4b2239129c..b0a02d4c8b93 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -407,7 +407,7 @@ struct rxe_dev { struct list_head pending_mmaps; spinlock_t mmap_offset_lock; /* guard mmap_offset */ - int mmap_offset; + u64 mmap_offset; atomic64_t stats_counters[RXE_NUM_OF_COUNTERS]; diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 8c1931a57f4a..e3bac1a877bb 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk) read_lock(&sk->sk_callback_lock); cep = sk_to_cep(sk); - if (!cep) { - WARN_ON(1); + if (!cep) goto out; - } + siw_dbg_cep(cep, "state: %d\n", cep->state); switch (cep->state) { @@ -1784,14 +1783,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len) return 0; } -static int siw_listen_address(struct iw_cm_id *id, int backlog, - struct sockaddr *laddr, int addr_family) +/* + * siw_create_listen - Create resources for a listener's IWCM ID @id + * + * Starts listen on the socket address id->local_addr. + * + */ +int siw_create_listen(struct iw_cm_id *id, int backlog) { struct socket *s; struct siw_cep *cep = NULL; struct siw_device *sdev = to_siw_dev(id->device); + int addr_family = id->local_addr.ss_family; int rv = 0, s_val; + if (addr_family != AF_INET && addr_family != AF_INET6) + return -EAFNOSUPPORT; + rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s); if (rv < 0) return rv; @@ -1806,9 +1814,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, siw_dbg(id->device, "setsockopt error: %d\n", rv); goto error; } - rv = s->ops->bind(s, laddr, addr_family == AF_INET ? - sizeof(struct sockaddr_in) : - sizeof(struct sockaddr_in6)); + if (addr_family == AF_INET) { + struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr); + + /* For wildcard addr, limit binding to current device only */ + if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) + s->sk->sk_bound_dev_if = sdev->netdev->ifindex; + + rv = s->ops->bind(s, (struct sockaddr *)laddr, + sizeof(struct sockaddr_in)); + } else { + struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr); + + /* For wildcard addr, limit binding to current device only */ + if (ipv6_addr_any(&laddr->sin6_addr)) + s->sk->sk_bound_dev_if = sdev->netdev->ifindex; + + rv = s->ops->bind(s, (struct sockaddr *)laddr, + sizeof(struct sockaddr_in6)); + } if (rv) { siw_dbg(id->device, "socket bind error: %d\n", rv); goto error; @@ -1867,14 +1891,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, list_add_tail(&cep->listenq, (struct list_head *)id->provider_data); cep->state = SIW_EPSTATE_LISTENING; - if (addr_family == AF_INET) - siw_dbg(id->device, "Listen at laddr %pI4 %u\n", - &(((struct sockaddr_in *)laddr)->sin_addr), - ((struct sockaddr_in *)laddr)->sin_port); - else - siw_dbg(id->device, "Listen at laddr %pI6 %u\n", - &(((struct sockaddr_in6 *)laddr)->sin6_addr), - ((struct sockaddr_in6 *)laddr)->sin6_port); + siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr); return 0; @@ -1932,114 +1949,6 @@ static void siw_drop_listeners(struct iw_cm_id *id) } } -/* - * siw_create_listen - Create resources for a listener's IWCM ID @id - * - * Listens on the socket addresses id->local_addr and id->remote_addr. - * - * If the listener's @id provides a specific local IP address, at most one - * listening socket is created and associated with @id. - * - * If the listener's @id provides the wildcard (zero) local IP address, - * a separate listen is performed for each local IP address of the device - * by creating a listening socket and binding to that local IP address. - * - */ -int siw_create_listen(struct iw_cm_id *id, int backlog) -{ - struct net_device *dev = to_siw_dev(id->device)->netdev; - int rv = 0, listeners = 0; - - siw_dbg(id->device, "backlog %d\n", backlog); - - /* - * For each attached address of the interface, create a - * listening socket, if id->local_addr is the wildcard - * IP address or matches the IP address. - */ - if (id->local_addr.ss_family == AF_INET) { - struct in_device *in_dev = in_dev_get(dev); - struct sockaddr_in s_laddr, *s_raddr; - const struct in_ifaddr *ifa; - - if (!in_dev) { - rv = -ENODEV; - goto out; - } - memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); - s_raddr = (struct sockaddr_in *)&id->remote_addr; - - siw_dbg(id->device, - "laddr %pI4:%d, raddr %pI4:%d\n", - &s_laddr.sin_addr, ntohs(s_laddr.sin_port), - &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); - - rtnl_lock(); - in_dev_for_each_ifa_rtnl(ifa, in_dev) { - if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) || - s_laddr.sin_addr.s_addr == ifa->ifa_address) { - s_laddr.sin_addr.s_addr = ifa->ifa_address; - - rv = siw_listen_address(id, backlog, - (struct sockaddr *)&s_laddr, - AF_INET); - if (!rv) - listeners++; - } - } - rtnl_unlock(); - in_dev_put(in_dev); - } else if (id->local_addr.ss_family == AF_INET6) { - struct inet6_dev *in6_dev = in6_dev_get(dev); - struct inet6_ifaddr *ifp; - struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr), - *s_raddr = &to_sockaddr_in6(id->remote_addr); - - if (!in6_dev) { - rv = -ENODEV; - goto out; - } - siw_dbg(id->device, - "laddr %pI6:%d, raddr %pI6:%d\n", - &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), - &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); - - rtnl_lock(); - list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { - if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) - continue; - if (ipv6_addr_any(&s_laddr->sin6_addr) || - ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { - struct sockaddr_in6 bind_addr = { - .sin6_family = AF_INET6, - .sin6_port = s_laddr->sin6_port, - .sin6_flowinfo = 0, - .sin6_addr = ifp->addr, - .sin6_scope_id = dev->ifindex }; - - rv = siw_listen_address(id, backlog, - (struct sockaddr *)&bind_addr, - AF_INET6); - if (!rv) - listeners++; - } - } - rtnl_unlock(); - in6_dev_put(in6_dev); - } else { - rv = -EAFNOSUPPORT; - } -out: - if (listeners) - rv = 0; - else if (!rv) - rv = -EINVAL; - - siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK"); - - return rv; -} - int siw_destroy_listen(struct iw_cm_id *id) { if (!id->provider_data) { diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 05a92f997f60..130b1e31b978 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -248,24 +248,6 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id) return NULL; } -static void siw_verbs_sq_flush(struct ib_qp *base_qp) -{ - struct siw_qp *qp = to_siw_qp(base_qp); - - down_write(&qp->state_lock); - siw_sq_flush(qp); - up_write(&qp->state_lock); -} - -static void siw_verbs_rq_flush(struct ib_qp *base_qp) -{ - struct siw_qp *qp = to_siw_qp(base_qp); - - down_write(&qp->state_lock); - siw_rq_flush(qp); - up_write(&qp->state_lock); -} - static const struct ib_device_ops siw_device_ops = { .owner = THIS_MODULE, .uverbs_abi_ver = SIW_ABI_VERSION, @@ -284,8 +266,6 @@ static const struct ib_device_ops siw_device_ops = { .destroy_cq = siw_destroy_cq, .destroy_qp = siw_destroy_qp, .destroy_srq = siw_destroy_srq, - .drain_rq = siw_verbs_rq_flush, - .drain_sq = siw_verbs_sq_flush, .get_dma_mr = siw_get_dma_mr, .get_port_immutable = siw_get_port_immutable, .iw_accept = siw_accept, @@ -399,6 +379,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev) base_dev->dev.dma_ops = &dma_virt_ops; base_dev->num_comp_vectors = num_possible_cpus(); + xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); + xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1); + ib_set_device_ops(base_dev, &siw_device_ops); rv = ib_device_set_netdev(base_dev, netdev, 1); if (rv) @@ -426,9 +409,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev) sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR; sdev->attrs.max_srq_sge = SIW_MAX_SGE; - xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); - xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1); - INIT_LIST_HEAD(&sdev->cep_list); INIT_LIST_HEAD(&sdev->qp_list); diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index b18a677832e1..1b1a40db529c 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -685,6 +685,47 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, return bytes; } +/* Complete SQ WR's without processing */ +static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr) +{ + struct siw_sqe sqe = {}; + int rv = 0; + + while (wr) { + sqe.id = wr->wr_id; + sqe.opcode = wr->opcode; + rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); + if (rv) { + if (bad_wr) + *bad_wr = wr; + break; + } + wr = wr->next; + } + return rv; +} + +/* Complete RQ WR's without processing */ +static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr) +{ + struct siw_rqe rqe = {}; + int rv = 0; + + while (wr) { + rqe.id = wr->wr_id; + rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR); + if (rv) { + if (bad_wr) + *bad_wr = wr; + break; + } + wr = wr->next; + } + return rv; +} + /* * siw_post_send() * @@ -703,26 +744,54 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, unsigned long flags; int rv = 0; + if (wr && !qp->kernel_verbs) { + siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); + *bad_wr = wr; + return -EINVAL; + } + /* * Try to acquire QP state lock. Must be non-blocking * to accommodate kernel clients needs. */ if (!down_read_trylock(&qp->state_lock)) { - *bad_wr = wr; - siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state); - return -ENOTCONN; + if (qp->attrs.state == SIW_QP_STATE_ERROR) { + /* + * ERROR state is final, so we can be sure + * this state will not change as long as the QP + * exists. + * + * This handles an ib_drain_sq() call with + * a concurrent request to set the QP state + * to ERROR. + */ + rv = siw_sq_flush_wr(qp, wr, bad_wr); + } else { + siw_dbg_qp(qp, "QP locked, state %d\n", + qp->attrs.state); + *bad_wr = wr; + rv = -ENOTCONN; + } + return rv; } if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) { + if (qp->attrs.state == SIW_QP_STATE_ERROR) { + /* + * Immediately flush this WR to CQ, if QP + * is in ERROR state. SQ is guaranteed to + * be empty, so WR complets in-order. + * + * Typically triggered by ib_drain_sq(). + */ + rv = siw_sq_flush_wr(qp, wr, bad_wr); + } else { + siw_dbg_qp(qp, "QP out of state %d\n", + qp->attrs.state); + *bad_wr = wr; + rv = -ENOTCONN; + } up_read(&qp->state_lock); - *bad_wr = wr; - siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state); - return -ENOTCONN; - } - if (wr && !qp->kernel_verbs) { - siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); - up_read(&qp->state_lock); - *bad_wr = wr; - return -EINVAL; + return rv; } spin_lock_irqsave(&qp->sq_lock, flags); @@ -917,24 +986,54 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, *bad_wr = wr; return -EOPNOTSUPP; /* what else from errno.h? */ } + if (!qp->kernel_verbs) { + siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n"); + *bad_wr = wr; + return -EINVAL; + } + /* * Try to acquire QP state lock. Must be non-blocking * to accommodate kernel clients needs. */ if (!down_read_trylock(&qp->state_lock)) { - *bad_wr = wr; - return -ENOTCONN; - } - if (!qp->kernel_verbs) { - siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n"); - up_read(&qp->state_lock); - *bad_wr = wr; - return -EINVAL; + if (qp->attrs.state == SIW_QP_STATE_ERROR) { + /* + * ERROR state is final, so we can be sure + * this state will not change as long as the QP + * exists. + * + * This handles an ib_drain_rq() call with + * a concurrent request to set the QP state + * to ERROR. + */ + rv = siw_rq_flush_wr(qp, wr, bad_wr); + } else { + siw_dbg_qp(qp, "QP locked, state %d\n", + qp->attrs.state); + *bad_wr = wr; + rv = -ENOTCONN; + } + return rv; } if (qp->attrs.state > SIW_QP_STATE_RTS) { + if (qp->attrs.state == SIW_QP_STATE_ERROR) { + /* + * Immediately flush this WR to CQ, if QP + * is in ERROR state. RQ is guaranteed to + * be empty, so WR complets in-order. + * + * Typically triggered by ib_drain_rq(). + */ + rv = siw_rq_flush_wr(qp, wr, bad_wr); + } else { + siw_dbg_qp(qp, "QP out of state %d\n", + qp->attrs.state); + *bad_wr = wr; + rv = -ENOTCONN; + } up_read(&qp->state_lock); - *bad_wr = wr; - return -EINVAL; + return rv; } /* * Serialize potentially multiple producers. diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 2e72fc5af157..c4c015c60446 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -646,6 +646,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, if (ib_conn->pi_support) { u32 sig_caps = ib_dev->attrs.sig_prot_cap; + shost->sg_prot_tablesize = shost->sg_tablesize; scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index b5960351bec0..8708ed5477e9 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2536,7 +2536,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, if (lrsp->opcode == SRP_LOGIN_RSP) { ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); - ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP; + ch->use_imm_data = srp_use_imm_data && + (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP); ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, ch->use_imm_data); WARN_ON_ONCE(ch->max_it_iu_len > diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index e25c70a56be6..02b92e3cd9a8 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1364,9 +1364,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, struct srpt_send_ioctx *ioctx, u64 tag, int status) { + struct se_cmd *cmd = &ioctx->cmd; struct srp_rsp *srp_rsp; const u8 *sense_data; int sense_data_len, max_sense_len; + u32 resid = cmd->residual_count; /* * The lowest bit of all SAM-3 status codes is zero (see also @@ -1388,6 +1390,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, srp_rsp->tag = tag; srp_rsp->status = status; + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + if (cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an underflow write */ + srp_rsp->flags = SRP_RSP_FLAG_DOUNDER; + srp_rsp->data_out_res_cnt = cpu_to_be32(resid); + } else if (cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an underflow read */ + srp_rsp->flags = SRP_RSP_FLAG_DIUNDER; + srp_rsp->data_in_res_cnt = cpu_to_be32(resid); + } + } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + if (cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an overflow write */ + srp_rsp->flags = SRP_RSP_FLAG_DOOVER; + srp_rsp->data_out_res_cnt = cpu_to_be32(resid); + } else if (cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an overflow read */ + srp_rsp->flags = SRP_RSP_FLAG_DIOVER; + srp_rsp->data_in_res_cnt = cpu_to_be32(resid); + } + } + if (sense_data_len) { BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index d7dd6fcf2db0..cb6e3a5f509c 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client, */ client->tail = (client->head - 2) & (client->bufsize - 1); - client->buffer[client->tail].input_event_sec = - event->input_event_sec; - client->buffer[client->tail].input_event_usec = - event->input_event_usec; - client->buffer[client->tail].type = EV_SYN; - client->buffer[client->tail].code = SYN_DROPPED; - client->buffer[client->tail].value = 0; + client->buffer[client->tail] = (struct input_event) { + .input_event_sec = event->input_event_sec, + .input_event_usec = event->input_event_usec, + .type = EV_SYN, + .code = SYN_DROPPED, + .value = 0, + }; client->packet_head = client->tail; } @@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file) struct evdev_client *client; int error; - client = kzalloc(struct_size(client, buffer, bufsize), - GFP_KERNEL | __GFP_NOWARN); - if (!client) - client = vzalloc(struct_size(client, buffer, bufsize)); + client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL); if (!client) return -ENOMEM; diff --git a/drivers/input/input.c b/drivers/input/input.c index 55086279d044..e2eb9b9b8363 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -190,6 +190,7 @@ static void input_repeat_key(struct timer_list *t) input_value_sync }; + input_set_timestamp(dev, ktime_get()); input_pass_values(dev, vals, ARRAY_SIZE(vals)); if (dev->rep[REP_PERIOD]) @@ -878,16 +879,18 @@ static int input_default_setkeycode(struct input_dev *dev, } } - __clear_bit(*old_keycode, dev->keybit); - __set_bit(ke->keycode, dev->keybit); - - for (i = 0; i < dev->keycodemax; i++) { - if (input_fetch_keycode(dev, i) == *old_keycode) { - __set_bit(*old_keycode, dev->keybit); - break; /* Setting the bit twice is useless, so break */ + if (*old_keycode <= KEY_MAX) { + __clear_bit(*old_keycode, dev->keybit); + for (i = 0; i < dev->keycodemax; i++) { + if (input_fetch_keycode(dev, i) == *old_keycode) { + __set_bit(*old_keycode, dev->keybit); + /* Setting the bit twice is useless, so break */ + break; + } } } + __set_bit(ke->keycode, dev->keybit); return 0; } @@ -943,9 +946,13 @@ int input_set_keycode(struct input_dev *dev, * Simulate keyup event if keycode is not present * in the keymap anymore */ - if (test_bit(EV_KEY, dev->evbit) && - !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && - __test_and_clear_bit(old_keycode, dev->key)) { + if (old_keycode > KEY_MAX) { + dev_warn(dev->dev.parent ?: &dev->dev, + "%s: got too big old keycode %#x\n", + __func__, old_keycode); + } else if (test_bit(EV_KEY, dev->evbit) && + !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && + __test_and_clear_bit(old_keycode, dev->key)) { struct input_value vals[] = { { EV_KEY, old_keycode, 0 }, input_value_sync diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c index 7eee1b0e360f..99a6052500ca 100644 --- a/drivers/input/joystick/psxpad-spi.c +++ b/drivers/input/joystick/psxpad-spi.c @@ -292,7 +292,7 @@ static int psxpad_spi_probe(struct spi_device *spi) if (!pad) return -ENOMEM; - pdev = input_allocate_polled_device(); + pdev = devm_input_allocate_polled_device(&spi->dev); if (!pdev) { dev_err(&spi->dev, "failed to allocate input device\n"); return -ENOMEM; diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c index 83368f1e7c4e..4650f4a94989 100644 --- a/drivers/input/misc/keyspan_remote.c +++ b/drivers/input/misc/keyspan_remote.c @@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev) int retval = 0; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0); + 0x11, 0x40, 0x5601, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", __func__, retval); @@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x44, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x44, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", __func__, retval); @@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x22, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x22, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", __func__, retval); diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c index 4d875f2ac13d..ee55f22dbca5 100644 --- a/drivers/input/misc/max77650-onkey.c +++ b/drivers/input/misc/max77650-onkey.c @@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev) return input_register_device(onkey->input); } +static const struct of_device_id max77650_onkey_of_match[] = { + { .compatible = "maxim,max77650-onkey" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_onkey_of_match); + static struct platform_driver max77650_onkey_driver = { .driver = { .name = "max77650-onkey", + .of_match_table = max77650_onkey_of_match, }, .probe = max77650_onkey_probe, }; diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c index ecd762f93732..53ad25eaf1a2 100644 --- a/drivers/input/misc/pm8xxx-vibrator.c +++ b/drivers/input/misc/pm8xxx-vibrator.c @@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on) if (regs->enable_mask) rc = regmap_update_bits(vib->regmap, regs->enable_addr, - on ? regs->enable_mask : 0, val); + regs->enable_mask, on ? ~0 : 0); return rc; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 84051f20b18a..002654ec7040 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev, struct uinput_device *udev = input_get_drvdata(dev); struct timespec64 ts; - udev->buff[udev->head].type = type; - udev->buff[udev->head].code = code; - udev->buff[udev->head].value = value; ktime_get_ts64(&ts); - udev->buff[udev->head].input_event_sec = ts.tv_sec; - udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC; + + udev->buff[udev->head] = (struct input_event) { + .input_event_sec = ts.tv_sec, + .input_event_usec = ts.tv_nsec / NSEC_PER_USEC, + .type = type, + .code = code, + .value = value, + }; + udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE; wake_up_interruptible(&udev->waitq); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 56fae3472114..4d2036209b45 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0042", /* Yoga */ "LEN0045", "LEN0047", - "LEN0049", "LEN2000", /* S540 */ "LEN2001", /* Edge E431 */ "LEN2002", /* Edge E531 */ @@ -166,23 +165,28 @@ static const char * const smbus_pnp_ids[] = { /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ + "LEN0049", /* Yoga 11e */ "LEN004a", /* W541 */ "LEN005b", /* P50 */ "LEN005e", /* T560 */ + "LEN006c", /* T470s */ "LEN0071", /* T480 */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0073", /* X1 Carbon G5 (Elantech) */ + "LEN0091", /* X1 Carbon 6 */ "LEN0092", /* X1 Carbon 6 */ "LEN0093", /* T480 */ "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ "LEN009b", /* T580 */ "LEN200f", /* T450s */ + "LEN2044", /* L470 */ "LEN2054", /* E480 */ "LEN2055", /* E580 */ "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ + "SYN3257", /* HP Envy 13-ad105ng */ NULL }; diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c index a4cabf52740c..74f7c6f214ff 100644 --- a/drivers/input/rmi4/rmi_f34v7.c +++ b/drivers/input/rmi4/rmi_f34v7.c @@ -1189,6 +1189,9 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw) { int ret; + f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev, + f34->fn->irq_mask); + rmi_f34v7_read_queries_bl_version(f34); f34->v7.image = fw->data; diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 2ca586fb914f..06d0ffef4a17 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); /* Verify that a device really has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "interface has %d endpoints, but must have minimum 1\n", - intf->altsetting[0].desc.bNumEndpoints); + intf->cur_altsetting->desc.bNumEndpoints); err = -EINVAL; goto fail3; } - endpoint = &intf->altsetting[0].endpoint[0].desc; + endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index 35031228a6d0..799c94dda651 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c @@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface, } /* Sanity check that a device has an endpoint */ - if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { + if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&usbinterface->dev, "Invalid number of endpoints\n"); error = -EINVAL; goto err_free_urb; } - /* - * The endpoint is always altsetting 0, we know this since we know - * this device only has one interrupt endpoint - */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; + endpoint = &usbinterface->cur_altsetting->endpoint[0].desc; /* Some debug */ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); @@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface, input_dev->dev.parent = &usbinterface->dev; /* Setup the URB, it will be posted later on open of input device */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; + endpoint = &usbinterface->cur_altsetting->endpoint[0].desc; usb_fill_int_urb(gtco->urbinfo, udev, diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c index a1f3a0cb197e..38f087404f7a 100644 --- a/drivers/input/tablet/pegasus_notetaker.c +++ b/drivers/input/tablet/pegasus_notetaker.c @@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf, return -ENODEV; /* Sanity check that the device has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "Invalid number of endpoints\n"); return -EINVAL; } diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 24c4b691b1c9..ae60442efda0 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -3156,6 +3156,8 @@ static int __maybe_unused mxt_suspend(struct device *dev) mutex_unlock(&input_dev->mutex); + disable_irq(data->irq); + return 0; } @@ -3168,6 +3170,8 @@ static int __maybe_unused mxt_resume(struct device *dev) if (!input_dev) return 0; + enable_irq(data->irq); + mutex_lock(&input_dev->mutex); if (input_dev->users) diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 5525f1fb1526..240e8de24cd2 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -1041,6 +1041,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client, { const struct edt_i2c_chip_data *chip_data; struct edt_ft5x06_ts_data *tsdata; + u8 buf[2] = { 0xfc, 0x00 }; struct input_dev *input; unsigned long irq_flags; int error; @@ -1110,6 +1111,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client, return error; } + /* + * Dummy read access. EP0700MLP1 returns bogus data on the first + * register read access and ignores writes. + */ + edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf); + edt_ft5x06_ts_set_regs(tsdata); edt_ft5x06_ts_get_defaults(&client->dev, tsdata); edt_ft5x06_ts_get_parameters(tsdata); diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index fb43aa708660..0403102e807e 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -128,6 +128,15 @@ static const unsigned long goodix_irq_flags[] = { */ static const struct dmi_system_id rotated_screen[] = { #if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "Teclast X89", + .matches = { + /* tPAD is too generic, also match on bios date */ + DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), + DMI_MATCH(DMI_BOARD_NAME, "tPAD"), + DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"), + }, + }, { .ident = "WinBook TW100", .matches = { diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index e9006407c9bc..f4ebdab06280 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -334,7 +334,12 @@ static int ili210x_i2c_probe(struct i2c_client *client, input_set_abs_params(input, ABS_MT_POSITION_X, 0, 0xffff, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 0xffff, 0, 0); touchscreen_parse_properties(input, true, &priv->prop); - input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT); + + error = input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT); + if (error) { + dev_err(dev, "Unable to set up slots, err: %d\n", error); + return error; + } error = devm_add_action(dev, ili210x_cancel_work, priv); if (error) diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 6ed9f22e6401..fe245439adee 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -432,7 +432,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, return 0; } -static bool raydium_i2c_boot_trigger(struct i2c_client *client) +static int raydium_i2c_boot_trigger(struct i2c_client *client) { static const u8 cmd[7][6] = { { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 }, @@ -457,10 +457,10 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client) } } - return false; + return 0; } -static bool raydium_i2c_fw_trigger(struct i2c_client *client) +static int raydium_i2c_fw_trigger(struct i2c_client *client) { static const u8 cmd[5][11] = { { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 }, @@ -483,7 +483,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client) } } - return false; + return 0; } static int raydium_i2c_check_path(struct i2c_client *client) diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index 1139714e72e2..1c5f8875cb79 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -149,6 +149,11 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron) gpiod_set_value_cansleep(ts->reset_gpio, !poweron); } +static void st1232_ts_power_off(void *data) +{ + st1232_ts_power(data, false); +} + static const struct st_chip_info st1232_chip_info = { .have_z = true, .max_x = 0x31f, /* 800 - 1 */ @@ -229,6 +234,13 @@ static int st1232_ts_probe(struct i2c_client *client, st1232_ts_power(ts, true); + error = devm_add_action_or_reset(&client->dev, st1232_ts_power_off, ts); + if (error) { + dev_err(&client->dev, + "Failed to install power off action: %d\n", error); + return error; + } + input_dev->name = "st1232-touchscreen"; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; @@ -271,15 +283,6 @@ static int st1232_ts_probe(struct i2c_client *client, return 0; } -static int st1232_ts_remove(struct i2c_client *client) -{ - struct st1232_ts_data *ts = i2c_get_clientdata(client); - - st1232_ts_power(ts, false); - - return 0; -} - static int __maybe_unused st1232_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); @@ -329,7 +332,6 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids); static struct i2c_driver st1232_ts_driver = { .probe = st1232_ts_probe, - .remove = st1232_ts_remove, .id_table = st1232_ts_id, .driver = { .name = ST1232_TS_NAME, diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 0af0fe8c40d7..742a7e96c1b5 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device *hwmon; + struct thermal_zone_device *thermal; int error; u32 reg; bool ts_attached; @@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev) if (IS_ERR(hwmon)) return PTR_ERR(hwmon); - devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops); + thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, + &sun4i_ts_tz_ops); + if (IS_ERR(thermal)) + return PTR_ERR(thermal); writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC); diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index 3fd3e862269b..2e2ea5719c90 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -653,7 +653,7 @@ static int sur40_probe(struct usb_interface *interface, int error; /* Check if we really have the right interface. */ - iface_desc = &interface->altsetting[0]; + iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c index b4966d8f3348..8e0735a87040 100644 --- a/drivers/interconnect/qcom/qcs404.c +++ b/drivers/interconnect/qcom/qcs404.c @@ -414,7 +414,7 @@ static int qnoc_probe(struct platform_device *pdev) struct icc_provider *provider; struct qcom_icc_node **qnodes; struct qcom_icc_provider *qp; - struct icc_node *node; + struct icc_node *node, *tmp; size_t num_nodes, i; int ret; @@ -494,7 +494,7 @@ static int qnoc_probe(struct platform_device *pdev) return 0; err: - list_for_each_entry(node, &provider->nodes, node_list) { + list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) { icc_node_del(node); icc_node_destroy(node->id); } @@ -508,9 +508,9 @@ static int qnoc_remove(struct platform_device *pdev) { struct qcom_icc_provider *qp = platform_get_drvdata(pdev); struct icc_provider *provider = &qp->provider; - struct icc_node *n; + struct icc_node *n, *tmp; - list_for_each_entry(n, &provider->nodes, node_list) { + list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { icc_node_del(n); icc_node_destroy(n->id); } diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c index 502a6c22b41e..387267ee9648 100644 --- a/drivers/interconnect/qcom/sdm845.c +++ b/drivers/interconnect/qcom/sdm845.c @@ -868,9 +868,9 @@ static int qnoc_remove(struct platform_device *pdev) { struct qcom_icc_provider *qp = platform_get_drvdata(pdev); struct icc_provider *provider = &qp->provider; - struct icc_node *n; + struct icc_node *n, *tmp; - list_for_each_entry(n, &provider->nodes, node_list) { + list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { icc_node_del(n); icc_node_destroy(n->id); } diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e3842eabcfdd..d69d43b4355b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -485,4 +485,10 @@ config VIRTIO_IOMMU Say Y here if you intend to run this kernel as a guest. +config SMMU_BYPASS_DEV + bool "SMMU bypass streams for some specific devices" + help + according smmu.bypassdev cmdline, SMMU performs attribute + transformation only,with no address translation. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index dd555078258c..bc7771498342 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -226,71 +226,58 @@ static struct iommu_dev_data *search_dev_data(u16 devid) return NULL; } -static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) +static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) { - *(u16 *)data = alias; + u16 devid = pci_dev_id(pdev); + + if (devid == alias) + return 0; + + amd_iommu_rlookup_table[alias] = + amd_iommu_rlookup_table[devid]; + memcpy(amd_iommu_dev_table[alias].data, + amd_iommu_dev_table[devid].data, + sizeof(amd_iommu_dev_table[alias].data)); + return 0; } -static u16 get_alias(struct device *dev) +static void clone_aliases(struct pci_dev *pdev) +{ + if (!pdev) + return; + + /* + * The IVRS alias stored in the alias table may not be + * part of the PCI DMA aliases if it's bus differs + * from the original device. + */ + clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL); + + pci_for_each_dma_alias(pdev, clone_alias, NULL); +} + +static struct pci_dev *setup_aliases(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - u16 devid, ivrs_alias, pci_alias; + u16 ivrs_alias; - /* The callers make sure that get_device_id() does not fail here */ - devid = get_device_id(dev); - - /* For ACPI HID devices, we simply return the devid as such */ + /* For ACPI HID devices, there are no aliases */ if (!dev_is_pci(dev)) - return devid; - - ivrs_alias = amd_iommu_alias_table[devid]; - - pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); - - if (ivrs_alias == pci_alias) - return ivrs_alias; + return NULL; /* - * DMA alias showdown - * - * The IVRS is fairly reliable in telling us about aliases, but it - * can't know about every screwy device. If we don't have an IVRS - * reported alias, use the PCI reported alias. In that case we may - * still need to initialize the rlookup and dev_table entries if the - * alias is to a non-existent device. + * Add the IVRS alias to the pci aliases if it is on the same + * bus. The IVRS table may know about a quirk that we don't. */ - if (ivrs_alias == devid) { - if (!amd_iommu_rlookup_table[pci_alias]) { - amd_iommu_rlookup_table[pci_alias] = - amd_iommu_rlookup_table[devid]; - memcpy(amd_iommu_dev_table[pci_alias].data, - amd_iommu_dev_table[devid].data, - sizeof(amd_iommu_dev_table[pci_alias].data)); - } + ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)]; + if (ivrs_alias != pci_dev_id(pdev) && + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) + pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1); - return pci_alias; - } + clone_aliases(pdev); - pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d " - "for device [%04x:%04x], kernel reported alias " - "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), - PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device, - PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), - PCI_FUNC(pci_alias)); - - /* - * If we don't have a PCI DMA alias and the IVRS alias is on the same - * bus, then the IVRS table may know about a quirk that we don't. - */ - if (pci_alias == devid && - PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { - pci_add_dma_alias(pdev, ivrs_alias & 0xff); - pci_info(pdev, "Added PCI DMA alias %02x.%d\n", - PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias)); - } - - return ivrs_alias; + return pdev; } static struct iommu_dev_data *find_dev_data(u16 devid) @@ -428,7 +415,7 @@ static int iommu_init_device(struct device *dev) if (!dev_data) return -ENOMEM; - dev_data->alias = get_alias(dev); + dev_data->pdev = setup_aliases(dev); /* * By default we use passthrough mode for IOMMUv2 capable device. @@ -453,20 +440,16 @@ static int iommu_init_device(struct device *dev) static void iommu_ignore_device(struct device *dev) { - u16 alias; int devid; devid = get_device_id(dev); if (devid < 0) return; - alias = get_alias(dev); - - memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); - memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); - amd_iommu_rlookup_table[devid] = NULL; - amd_iommu_rlookup_table[alias] = NULL; + memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); + + setup_aliases(dev); } static void iommu_uninit_device(struct device *dev) @@ -1236,6 +1219,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data, return iommu_queue_command(iommu, &cmd); } +static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct amd_iommu *iommu = data; + + return iommu_flush_dte(iommu, alias); +} + /* * Command send function for invalidating a device table entry */ @@ -1246,14 +1236,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data) int ret; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; - ret = iommu_flush_dte(iommu, dev_data->devid); - if (!ret && alias != dev_data->devid) - ret = iommu_flush_dte(iommu, alias); + if (dev_data->pdev) + ret = pci_for_each_dma_alias(dev_data->pdev, + device_flush_dte_alias, iommu); + else + ret = iommu_flush_dte(iommu, dev_data->devid); if (ret) return ret; + alias = amd_iommu_alias_table[dev_data->devid]; + if (alias != dev_data->devid) { + ret = iommu_flush_dte(iommu, alias); + if (ret) + return ret; + } + if (dev_data->ats.enabled) ret = device_flush_iotlb(dev_data, 0, ~0UL); @@ -2035,11 +2033,9 @@ static void do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { struct amd_iommu *iommu; - u16 alias; bool ats; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; ats = dev_data->ats.enabled; /* Update data structures */ @@ -2052,8 +2048,7 @@ static void do_attach(struct iommu_dev_data *dev_data, /* Update device table */ set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2); - if (alias != dev_data->devid) - set_dte_entry(alias, domain, ats, dev_data->iommu_v2); + clone_aliases(dev_data->pdev); device_flush_dte(dev_data); } @@ -2062,17 +2057,14 @@ static void do_detach(struct iommu_dev_data *dev_data) { struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu; - u16 alias; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); clear_dte_entry(dev_data->devid); - if (alias != dev_data->devid) - clear_dte_entry(alias); + clone_aliases(dev_data->pdev); /* Flush the DTE entry */ device_flush_dte(dev_data); @@ -2384,13 +2376,7 @@ static void update_device_table(struct protection_domain *domain) list_for_each_entry(dev_data, &domain->dev_list, list) { set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled, dev_data->iommu_v2); - - if (dev_data->devid == dev_data->alias) - continue; - - /* There is an alias, update device table entry for it */ - set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled, - dev_data->iommu_v2); + clone_aliases(dev_data->pdev); } } @@ -3752,7 +3738,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, iommu_flush_dte(iommu, devid); } -static struct irq_remap_table *alloc_irq_table(u16 devid) +static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias, + void *data) +{ + struct irq_remap_table *table = data; + + irq_lookup_table[alias] = table; + set_dte_irq_entry(alias, table); + + iommu_flush_dte(amd_iommu_rlookup_table[alias], alias); + + return 0; +} + +static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev) { struct irq_remap_table *table = NULL; struct irq_remap_table *new_table = NULL; @@ -3798,7 +3797,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid) table = new_table; new_table = NULL; - set_remap_table_entry(iommu, devid, table); + if (pdev) + pci_for_each_dma_alias(pdev, set_remap_table_entry_alias, + table); + else + set_remap_table_entry(iommu, devid, table); + if (devid != alias) set_remap_table_entry(iommu, alias, table); @@ -3815,7 +3819,8 @@ out_unlock: return table; } -static int alloc_irq_index(u16 devid, int count, bool align) +static int alloc_irq_index(u16 devid, int count, bool align, + struct pci_dev *pdev) { struct irq_remap_table *table; int index, c, alignment = 1; @@ -3825,7 +3830,7 @@ static int alloc_irq_index(u16 devid, int count, bool align) if (!iommu) return -ENODEV; - table = alloc_irq_table(devid); + table = alloc_irq_table(devid, pdev); if (!table) return -ENODEV; @@ -4258,7 +4263,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, struct irq_remap_table *table; struct amd_iommu *iommu; - table = alloc_irq_table(devid); + table = alloc_irq_table(devid, NULL); if (table) { if (!table->min_index) { /* @@ -4275,11 +4280,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, } else { index = -ENOMEM; } - } else { + } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI || + info->type == X86_IRQ_ALLOC_TYPE_MSIX) { bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI); - index = alloc_irq_index(devid, nr_irqs, align); + index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev); + } else { + index = alloc_irq_index(devid, nr_irqs, false, NULL); } + if (index < 0) { pr_warn("Failed to allocate IRTE\n"); ret = index; @@ -4412,7 +4421,7 @@ int amd_iommu_activate_guest_mode(void *data) entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; return modify_irte_ga(ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, NULL); + ir_data->irq_2_irte.index, entry, ir_data); } EXPORT_SYMBOL(amd_iommu_activate_guest_mode); @@ -4438,7 +4447,7 @@ int amd_iommu_deactivate_guest_mode(void *data) APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); return modify_irte_ga(ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, NULL); + ir_data->irq_2_irte.index, entry, ir_data); } EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 568c52317757..00319f404a30 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -147,10 +147,10 @@ bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; -static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; +static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; static bool amd_iommu_detected; -static bool __initdata amd_iommu_disabled; +static bool __initdata amd_iommu_disabled = true; static int amd_iommu_target_ivhd_type; u16 amd_iommu_last_bdf; /* largest PCI device id we have @@ -1523,8 +1523,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; - if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0)) - amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; break; case 0x11: case 0x40: @@ -1534,8 +1532,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; - if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0)) - amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; + /* + * Note: Since iommu_update_intcapxt() leverages + * the IOMMU MMIO access to MSI capability block registers + * for MSI address lo/hi/data, we need to check both + * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support. + */ + if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) && + (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT))) + amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; break; default: return -EINVAL; @@ -1655,27 +1660,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, static void init_iommu_perf_ctr(struct amd_iommu *iommu) { struct pci_dev *pdev = iommu->dev; - u64 val = 0xabcd, val2 = 0; + u64 val = 0xabcd, val2 = 0, save_reg = 0; if (!iommu_feature(iommu, FEATURE_PC)) return; amd_iommu_pc_present = true; + /* save the value to restore, if writable */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) + goto pc_false; + /* Check if the performance counters can be written to */ if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || - (val != val2)) { - pci_err(pdev, "Unable to write to IOMMU perf counter.\n"); - amd_iommu_pc_present = false; - return; - } + (val != val2)) + goto pc_false; + + /* restore */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) + goto pc_false; pci_info(pdev, "IOMMU performance counters supported\n"); val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); iommu->max_banks = (u8) ((val >> 12) & 0x3f); iommu->max_counters = (u8) ((val >> 7) & 0xf); + + return; + +pc_false: + pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; + return; } static ssize_t amd_iommu_show_cap(struct device *dev, @@ -1984,8 +2001,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu) struct irq_affinity_notify *notify = &iommu->intcapxt_notify; /** - * IntCapXT requires XTSup=1, which can be inferred - * amd_iommu_xt_mode. + * IntCapXT requires XTSup=1 and MsiCapMmioSup=1, + * which can be inferred from amd_iommu_xt_mode. */ if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) return 0; @@ -2516,6 +2533,7 @@ static int __init early_amd_iommu_init(void) struct acpi_table_header *ivrs_base; acpi_status status; int i, remap_cache_sz, ret = 0; + u32 pci_id; if (!amd_iommu_detected) return -ENODEV; @@ -2603,6 +2621,16 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; + /* Disable IOMMU if there's Stoney Ridge graphics */ + for (i = 0; i < 32; i++) { + pci_id = read_pci_config(0, i, 0, 0); + if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { + pr_info("Disable IOMMU on Stoney Ridge\n"); + amd_iommu_disabled = true; + break; + } + } + /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) disable_iommus(); @@ -2711,7 +2739,7 @@ static int __init state_next(void) ret = early_amd_iommu_init(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { - pr_info("AMD IOMMU disabled on kernel command-line\n"); + pr_info("AMD IOMMU disabled\n"); init_state = IOMMU_CMDLINE_DISABLED; ret = -EINVAL; } @@ -2936,6 +2964,8 @@ static int __init parse_amd_iommu_options(char *str) amd_iommu_unmap_flush = true; if (strncmp(str, "off", 3) == 0) amd_iommu_disabled = true; + if (strncmp(str, "on", 2) == 0) + amd_iommu_disabled = false; if (strncmp(str, "force_isolation", 15) == 0) amd_iommu_force_isolation = true; } diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 17bd5a349119..daeabd98c60e 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -377,12 +377,12 @@ #define IOMMU_CAP_EFR 27 /* IOMMU Feature Reporting Field (for IVHD type 10h */ -#define IOMMU_FEAT_XTSUP_SHIFT 0 #define IOMMU_FEAT_GASUP_SHIFT 6 /* IOMMU Extended Feature Register (EFR) */ #define IOMMU_EFR_XTSUP_SHIFT 2 #define IOMMU_EFR_GASUP_SHIFT 7 +#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 #define MAX_DOMAIN_ID 65536 @@ -639,8 +639,8 @@ struct iommu_dev_data { struct list_head list; /* For domain->dev_list */ struct llist_node dev_data_list; /* For global dev_data_list */ struct protection_domain *domain; /* Domain the device is bound to */ + struct pci_dev *pdev; u16 devid; /* PCI Device ID */ - u16 alias; /* Alias Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ bool passthrough; /* Device is identity mapped */ struct { diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8da93e730d6f..4736d8612d30 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -393,6 +393,63 @@ module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); +#ifdef CONFIG_SMMU_BYPASS_DEV +struct smmu_bypass_device { + unsigned short vendor; + unsigned short device; +}; +#define MAX_CMDLINE_SMMU_BYPASS_DEV 32 + +static struct smmu_bypass_device smmu_bypass_devices[MAX_CMDLINE_SMMU_BYPASS_DEV]; +static int smmu_bypass_devices_num; +static bool smmu_bypass_all_blkdev = false; + +static int __init arm_smmu_bypass_dev_setup(char *str) +{ +#define PARAMS_LEN 256 + unsigned short vendor; + unsigned short device; + int ret, len; + char params[PARAMS_LEN]; + char *p1, *p2, *e; + + if (!str) + return -EINVAL; + + len = min((size_t)(PARAMS_LEN - 1), strlen(str)); + strncpy(params, str, PARAMS_LEN); + p1 = params; + e = params + len; + + while (p1 <= e) { + p2 = strstr(p1, ","); + if (p2) + *p2 = 0; + if (!strcmp(p1, "blk")) { + smmu_bypass_all_blkdev = true; + } else { + ret = sscanf(p1, "%hx:%hx", &vendor, &device); + if (ret != 2) + return -EINVAL; + + if (smmu_bypass_devices_num >= MAX_CMDLINE_SMMU_BYPASS_DEV) + return -ERANGE; + + smmu_bypass_devices[smmu_bypass_devices_num].vendor = vendor; + smmu_bypass_devices[smmu_bypass_devices_num].device = device; + smmu_bypass_devices_num++; + } + if (!p2) + break; + p1 = p2 + 1; + } + + return 0; +} + +__setup("smmu.bypassdev=", arm_smmu_bypass_dev_setup); +#endif + enum pri_resp { PRI_RESP_DENY = 0, PRI_RESP_FAIL = 1, @@ -856,6 +913,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31); break; case CMDQ_OP_TLBI_NH_VA: + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; @@ -1642,7 +1700,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, STRTAB_STE_1_EATS_TRANS)); arm_smmu_sync_ste_for_sid(smmu, sid); - dst[0] = cpu_to_le64(val); + /* See comment in arm_smmu_write_ctx_desc() */ + WRITE_ONCE(dst[0], cpu_to_le64(val)); arm_smmu_sync_ste_for_sid(smmu, sid); /* It's likely that we'll want to use the new STE soon */ @@ -2719,6 +2778,39 @@ static void arm_smmu_put_resv_regions(struct device *dev, kfree(entry); } +#ifdef CONFIG_SMMU_BYPASS_DEV +static int arm_smmu_device_domain_type(struct device *dev, unsigned int *type) +{ + int i; + struct pci_dev *pdev; + + if (!dev_is_pci(dev)) + return -ERANGE; + + pdev = to_pci_dev(dev); + + if (smmu_bypass_all_blkdev && + (pdev->class >> 16) == PCI_BASE_CLASS_STORAGE) { + dev_info(dev, "device 0x%hx:0x%hx uses identity mapping.", + pdev->vendor, pdev->device); + *type = IOMMU_DOMAIN_IDENTITY; + return 0; + } + + for (i = 0; i < smmu_bypass_devices_num; i++) { + if ((smmu_bypass_devices[i].vendor == pdev->vendor) + && (smmu_bypass_devices[i].device == pdev->device)) { + dev_info(dev, "device 0x%hx:0x%hx uses identity mapping.", + pdev->vendor, pdev->device); + *type = IOMMU_DOMAIN_IDENTITY; + return 0; + } + } + + return -ERANGE; +} +#endif + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -2737,6 +2829,9 @@ static struct iommu_ops arm_smmu_ops = { .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions, +#ifdef CONFIG_SMMU_BYPASS_DEV + .device_domain_type = arm_smmu_device_domain_type, +#endif .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -2863,12 +2958,60 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) return 0; } +#ifdef CONFIG_SMMU_BYPASS_DEV +static void arm_smmu_install_bypass_ste_for_dev(struct arm_smmu_device *smmu, + u32 sid) +{ + u64 val; + __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + + if (!step) + return; + + val = STRTAB_STE_0_V; + val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); + step[0] = cpu_to_le64(val); + step[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, + STRTAB_STE_1_SHCFG_INCOMING)); + step[2] = 0; + +} + +static int arm_smmu_prepare_init_l2_strtab(struct device *dev, void *data) +{ + u32 sid; + int ret; + unsigned int type; + struct pci_dev *pdev; + struct arm_smmu_device *smmu = (struct arm_smmu_device *)data; + + if (arm_smmu_device_domain_type(dev, &type)) + return 0; + + pdev = to_pci_dev(dev); + sid = PCI_DEVID(pdev->bus->number, pdev->devfn); + if (!arm_smmu_sid_in_range(smmu, sid)) + return -ERANGE; + + ret = arm_smmu_init_l2_strtab(smmu, sid); + if (ret) + return ret; + + arm_smmu_install_bypass_ste_for_dev(smmu, sid); + + return 0; +} +#endif + static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; u64 reg; u32 size, l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; +#ifdef CONFIG_SMMU_BYPASS_DEV + int ret; +#endif /* Calculate the L1 size, capped to the SIDSIZE. */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); @@ -2898,7 +3041,19 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); cfg->strtab_base_cfg = reg; +#ifdef CONFIG_SMMU_BYPASS_DEV + ret = arm_smmu_init_l1_strtab(smmu); + if (ret) + return ret; + + if (smmu_bypass_devices_num) { + ret = bus_for_each_dev(&pci_bus_type, NULL, (void *)smmu, + arm_smmu_prepare_init_l2_strtab); + } + return ret; +#else return arm_smmu_init_l1_strtab(smmu); +#endif } static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) @@ -3611,19 +3766,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Interrupt lines */ - irq = platform_get_irq_byname(pdev, "combined"); + irq = platform_get_irq_byname_optional(pdev, "combined"); if (irq > 0) smmu->combined_irq = irq; else { - irq = platform_get_irq_byname(pdev, "eventq"); + irq = platform_get_irq_byname_optional(pdev, "eventq"); if (irq > 0) smmu->evtq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "priq"); + irq = platform_get_irq_byname_optional(pdev, "priq"); if (irq > 0) smmu->priq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "gerror"); + irq = platform_get_irq_byname_optional(pdev, "gerror"); if (irq > 0) smmu->gerr_irq = irq; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f321279baf9e..76bd2309e023 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -43,7 +44,6 @@ struct iommu_dma_cookie { dma_addr_t msi_iova; }; struct list_head msi_page_list; - spinlock_t msi_lock; /* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; @@ -62,7 +62,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { - spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->type = type; } @@ -177,15 +176,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, start -= iova_offset(iovad, start); num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); - msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); - if (!msi_page) - return -ENOMEM; - for (i = 0; i < num_pages; i++) { - msi_page[i].phys = start; - msi_page[i].iova = start; - INIT_LIST_HEAD(&msi_page[i].list); - list_add(&msi_page[i].list, &cookie->msi_page_list); + msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); + if (!msi_page) + return -ENOMEM; + + msi_page->phys = start; + msi_page->iova = start; + INIT_LIST_HEAD(&msi_page->list); + list_add(&msi_page->list, &cookie->msi_page_list); start += iovad->granule; } @@ -1150,7 +1149,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (msi_page->phys == msi_addr) return msi_page; - msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); + msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); if (!msi_page) return NULL; @@ -1178,25 +1177,22 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { struct device *dev = msi_desc_to_dev(desc); struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_dma_cookie *cookie; struct iommu_dma_msi_page *msi_page; - unsigned long flags; + static DEFINE_MUTEX(msi_prepare_lock); /* see below */ if (!domain || !domain->iova_cookie) { desc->iommu_cookie = NULL; return 0; } - cookie = domain->iova_cookie; - /* - * We disable IRQs to rule out a possible inversion against - * irq_desc_lock if, say, someone tries to retarget the affinity - * of an MSI from within an IPI handler. + * In fact the whole prepare operation should already be serialised by + * irq_domain_mutex further up the callchain, but that's pretty subtle + * on its own, so consider this locking as failsafe documentation... */ - spin_lock_irqsave(&cookie->msi_lock, flags); + mutex_lock(&msi_prepare_lock); msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); - spin_unlock_irqrestore(&cookie->msi_lock, flags); + mutex_unlock(&msi_prepare_lock); msi_desc_set_iommu_cookie(desc, msi_page); diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index eecd6a421667..9e393b9c5091 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -128,6 +129,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) BUG_ON(dev->is_virtfn); + /* + * Ignore devices that have a domain number higher than what can + * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000 + */ + if (pci_domain_nr(dev->bus) > U16_MAX) + return NULL; + /* Only generate path[] for device addition event */ if (event == BUS_NOTIFY_ADD_DEVICE) for (tmp = dev; tmp; tmp = tmp->bus->self) @@ -363,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) { struct dmar_drhd_unit *dmaru; - list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list) + list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list, + dmar_rcu_check()) if (dmaru->segment == drhd->segment && dmaru->reg_base_addr == drhd->address) return dmaru; @@ -440,12 +449,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, /* Check for NUL termination within the designated length */ if (strnlen(andd->device_name, header->length - 8) == header->length - 8) { - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, + pr_warn(FW_BUG "Your BIOS is broken; ANDD object name is not NUL-terminated\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); return -EINVAL; } pr_info("ANDD device: %x name: %s\n", andd->device_number, @@ -471,14 +481,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) return 0; } } - WARN_TAINT( - 1, TAINT_FIRMWARE_WORKAROUND, + pr_warn(FW_BUG "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - drhd->reg_base_addr, + rhsa->base_address, dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); return 0; } @@ -827,14 +837,14 @@ int __init dmar_table_init(void) static void warn_invalid_dmar(u64 addr, const char *message) { - WARN_TAINT_ONCE( - 1, TAINT_FIRMWARE_WORKAROUND, + pr_warn_once(FW_BUG "Your BIOS is broken; DMAR reported at address %llx%s!\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", addr, message, dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); } static int __ref @@ -1351,7 +1361,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, struct qi_desc desc; if (mask) { - WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1)); addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; } else diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index 471f05d452e0..bdf095e9dbe0 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c @@ -32,38 +32,42 @@ struct iommu_regset { #define IOMMU_REGSET_ENTRY(_reg_) \ { DMAR_##_reg_##_REG, __stringify(_reg_) } -static const struct iommu_regset iommu_regs[] = { + +static const struct iommu_regset iommu_regs_32[] = { IOMMU_REGSET_ENTRY(VER), - IOMMU_REGSET_ENTRY(CAP), - IOMMU_REGSET_ENTRY(ECAP), IOMMU_REGSET_ENTRY(GCMD), IOMMU_REGSET_ENTRY(GSTS), - IOMMU_REGSET_ENTRY(RTADDR), - IOMMU_REGSET_ENTRY(CCMD), IOMMU_REGSET_ENTRY(FSTS), IOMMU_REGSET_ENTRY(FECTL), IOMMU_REGSET_ENTRY(FEDATA), IOMMU_REGSET_ENTRY(FEADDR), IOMMU_REGSET_ENTRY(FEUADDR), - IOMMU_REGSET_ENTRY(AFLOG), IOMMU_REGSET_ENTRY(PMEN), IOMMU_REGSET_ENTRY(PLMBASE), IOMMU_REGSET_ENTRY(PLMLIMIT), - IOMMU_REGSET_ENTRY(PHMBASE), - IOMMU_REGSET_ENTRY(PHMLIMIT), - IOMMU_REGSET_ENTRY(IQH), - IOMMU_REGSET_ENTRY(IQT), - IOMMU_REGSET_ENTRY(IQA), IOMMU_REGSET_ENTRY(ICS), - IOMMU_REGSET_ENTRY(IRTA), - IOMMU_REGSET_ENTRY(PQH), - IOMMU_REGSET_ENTRY(PQT), - IOMMU_REGSET_ENTRY(PQA), IOMMU_REGSET_ENTRY(PRS), IOMMU_REGSET_ENTRY(PECTL), IOMMU_REGSET_ENTRY(PEDATA), IOMMU_REGSET_ENTRY(PEADDR), IOMMU_REGSET_ENTRY(PEUADDR), +}; + +static const struct iommu_regset iommu_regs_64[] = { + IOMMU_REGSET_ENTRY(CAP), + IOMMU_REGSET_ENTRY(ECAP), + IOMMU_REGSET_ENTRY(RTADDR), + IOMMU_REGSET_ENTRY(CCMD), + IOMMU_REGSET_ENTRY(AFLOG), + IOMMU_REGSET_ENTRY(PHMBASE), + IOMMU_REGSET_ENTRY(PHMLIMIT), + IOMMU_REGSET_ENTRY(IQH), + IOMMU_REGSET_ENTRY(IQT), + IOMMU_REGSET_ENTRY(IQA), + IOMMU_REGSET_ENTRY(IRTA), + IOMMU_REGSET_ENTRY(PQH), + IOMMU_REGSET_ENTRY(PQT), + IOMMU_REGSET_ENTRY(PQA), IOMMU_REGSET_ENTRY(MTRRCAP), IOMMU_REGSET_ENTRY(MTRRDEF), IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000), @@ -126,10 +130,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused) * by adding the offset to the pointer (virtual address). */ raw_spin_lock_irqsave(&iommu->register_lock, flag); - for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) { - value = dmar_readq(iommu->reg + iommu_regs[i].offset); + for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) { + value = dmar_readl(iommu->reg + iommu_regs_32[i].offset); seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", - iommu_regs[i].regs, iommu_regs[i].offset, + iommu_regs_32[i].regs, iommu_regs_32[i].offset, + value); + } + for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) { + value = dmar_readq(iommu->reg + iommu_regs_64[i].offset); + seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", + iommu_regs_64[i].regs, iommu_regs_64[i].offset, value); } raw_spin_unlock_irqrestore(&iommu->register_lock, flag); @@ -271,9 +281,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; + u32 sts; rcu_read_lock(); for_each_active_iommu(iommu, drhd) { + sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_TES)) { + seq_printf(m, "DMA Remapping is not enabled on %s\n", + iommu->name); + continue; + } root_tbl_walk(m, iommu); seq_putc(m, '\n'); } @@ -343,6 +360,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused) struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; u64 irta; + u32 sts; rcu_read_lock(); for_each_active_iommu(iommu, drhd) { @@ -352,7 +370,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused) seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n", iommu->name); - if (iommu->ir_table) { + sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); + if (iommu->ir_table && (sts & DMA_GSTS_IRES)) { irta = virt_to_phys(iommu->ir_table->base); seq_printf(m, " IR table address:%llx\n", irta); ir_tbl_remap_entry_show(m, iommu); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 6db6d969e31c..0d922eeae357 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2762,10 +2762,8 @@ static int __init si_domain_init(int hw) } /* - * Normally we use DMA domains for devices which have RMRRs. But we - * loose this requirement for graphic and usb devices. Identity map - * the RMRRs for graphic and USB devices so that they could use the - * si_domain. + * Identity map the RMRRs so that devices with RMRRs could also use + * the si_domain. */ for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, @@ -2773,9 +2771,6 @@ static int __init si_domain_init(int hw) unsigned long long start = rmrr->base_address; unsigned long long end = rmrr->end_address; - if (device_is_rmrr_locked(dev)) - continue; - if (WARN_ON(end < start || end >> agaw_to_width(si_domain->agaw))) continue; @@ -2914,9 +2909,6 @@ static int device_def_domain_type(struct device *dev) if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); - if (device_is_rmrr_locked(dev)) - return IOMMU_DOMAIN_DMA; - /* * Prevent any device marked as untrusted from getting * placed into the statically identity mapping domain. @@ -2954,9 +2946,6 @@ static int device_def_domain_type(struct device *dev) return IOMMU_DOMAIN_DMA; } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) return IOMMU_DOMAIN_DMA; - } else { - if (device_has_rmrr(dev)) - return IOMMU_DOMAIN_DMA; } return (iommu_identity_mapping & IDENTMAP_ALL) ? @@ -3401,7 +3390,8 @@ static unsigned long intel_alloc_iova(struct device *dev, iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask), true); if (unlikely(!iova_pfn)) { - dev_err(dev, "Allocating %ld-page iova failed", nrpages); + dev_err_once(dev, "Allocating %ld-page iova failed\n", + nrpages); return 0; } @@ -4128,10 +4118,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) /* we know that the this iommu should be at offset 0xa000 from vtbar */ drhd = dmar_find_matched_drhd_unit(pdev); - if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, - TAINT_FIRMWARE_WORKAROUND, - "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) + if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { + pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); @@ -4959,6 +4950,9 @@ int __init intel_iommu_init(void) down_write(&dmar_global_lock); + if (!no_iommu) + intel_iommu_debugfs_init(); + if (no_iommu || dmar_disabled) { /* * We exit the function here to ensure IOMMU's remapping and @@ -5022,6 +5016,7 @@ int __init intel_iommu_init(void) init_iommu_pm_ops(); + down_read(&dmar_global_lock); for_each_active_iommu(iommu, drhd) { iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, @@ -5029,6 +5024,7 @@ int __init intel_iommu_init(void) iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); iommu_device_register(&iommu->iommu); } + up_read(&dmar_global_lock); bus_set_iommu(&pci_bus_type, &intel_iommu_ops); if (si_domain && !hw_pass_through) @@ -5039,7 +5035,6 @@ int __init intel_iommu_init(void) down_read(&dmar_global_lock); if (probe_acpi_namespace_devices()) pr_warn("ACPI name space devices didn't probe correctly\n"); - up_read(&dmar_global_lock); /* Finally, we enable the DMA remapping hardware. */ for_each_iommu(iommu, drhd) { @@ -5048,10 +5043,11 @@ int __init intel_iommu_init(void) iommu_disable_protect_mem_regions(iommu); } + up_read(&dmar_global_lock); + pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); intel_iommu_enabled = 1; - intel_iommu_debugfs_init(); return 0; @@ -5132,7 +5128,8 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; - if (info) + if (info && info != DEFER_DEVICE_DOMAIN_INFO + && info != DUMMY_DEVICE_DOMAIN_INFO) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -5447,9 +5444,6 @@ static int intel_iommu_map(struct iommu_domain *domain, int prot = 0; int ret; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return -EINVAL; - if (iommu_prot & IOMMU_READ) prot |= DMA_PTE_READ; if (iommu_prot & IOMMU_WRITE) @@ -5492,8 +5486,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, /* Cope with horrid API which requires us to unmap more than the size argument if it happens to be a large-page mapping. */ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)); - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) size = VTD_PAGE_SIZE << level_to_offset_bits(level); @@ -5525,12 +5517,11 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, int level = 0; u64 phys = 0; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; - pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); - if (pte) - phys = dma_pte_addr(pte); + if (pte && dma_pte_present(pte)) + phys = dma_pte_addr(pte) + + (iova & (BIT_MASK(level_to_offset_bits(level) + + VTD_PAGE_SHIFT) - 1)); return phys; } @@ -5601,8 +5592,10 @@ static int intel_iommu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto unlink; + } iommu_group_put(group); @@ -5628,7 +5621,8 @@ static int intel_iommu_add_device(struct device *dev) if (!get_private_domain_for_dev(dev)) { dev_warn(dev, "Failed to get a private domain.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto unlink; } dev_info(dev, @@ -5643,6 +5637,10 @@ static int intel_iommu_add_device(struct device *dev) } return 0; + +unlink: + iommu_device_unlink(&iommu->iommu, dev); + return ret; } static void intel_iommu_remove_device(struct device *dev) @@ -5705,8 +5703,8 @@ static void intel_iommu_get_resv_regions(struct device *device, struct pci_dev *pdev = to_pci_dev(device); if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { - reg = iommu_alloc_resv_region(0, 1UL << 24, 0, - IOMMU_RESV_DIRECT); + reg = iommu_alloc_resv_region(0, 1UL << 24, prot, + IOMMU_RESV_DIRECT_RELAXABLE); if (reg) list_add_tail(®->list, head); } @@ -5794,6 +5792,13 @@ static void intel_iommu_apply_resv_region(struct device *dev, WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); } +static struct iommu_group *intel_iommu_device_group(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_device_group(dev); + return generic_device_group(dev); +} + #ifdef CONFIG_INTEL_IOMMU_SVM struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) { @@ -5966,7 +5971,7 @@ const struct iommu_ops intel_iommu_ops = { .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = intel_iommu_put_resv_regions, .apply_resv_region = intel_iommu_apply_resv_region, - .device_group = pci_device_group, + .device_group = intel_iommu_device_group, .dev_has_feat = intel_iommu_dev_has_feat, .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c index 040a445be300..e7cb0b8a7332 100644 --- a/drivers/iommu/intel-pasid.c +++ b/drivers/iommu/intel-pasid.c @@ -499,8 +499,16 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, } #ifdef CONFIG_X86 - if (cpu_feature_enabled(X86_FEATURE_LA57)) - pasid_set_flpm(pte, 1); + /* Both CPU and IOMMU paging mode need to match */ + if (cpu_feature_enabled(X86_FEATURE_LA57)) { + if (cap_5lp_support(iommu->cap)) { + pasid_set_flpm(pte, 1); + } else { + pr_err("VT-d has no 5-level paging support for CPU\n"); + pasid_clear_entry(pte); + return -EINVAL; + } + } #endif /* CONFIG_X86 */ pasid_set_domain_id(pte, did); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 9b159132405d..518d0b2d12af 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d { struct qi_desc desc; - /* - * Do PASID granu IOTLB invalidation if page selective capability is - * not available. - */ - if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { + if (pages == -1) { desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | @@ -321,7 +317,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ ret = intel_pasid_alloc_id(svm, !!cap_caching_mode(iommu->cap), - pasid_max - 1, GFP_KERNEL); + pasid_max, GFP_KERNEL); if (ret < 0) { kfree(svm); kfree(sdev); @@ -658,11 +654,10 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (req->priv_data_present) memcpy(&resp.qw2, req->priv_data, sizeof(req->priv_data)); + resp.qw2 = 0; + resp.qw3 = 0; + qi_submit_sync(&resp, iommu); } - resp.qw2 = 0; - resp.qw3 = 0; - qi_submit_sync(&resp, iommu); - head = (head + sizeof(*req)) & PRQ_RING_MASK; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d658c7c6a2ab..b20fa3bb4939 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -312,8 +312,8 @@ int iommu_insert_resv_region(struct iommu_resv_region *new, list_for_each_entry_safe(iter, tmp, regions, list) { phys_addr_t top_end, iter_end = iter->start + iter->length - 1; - /* no merge needed on elements of different types than @nr */ - if (iter->type != nr->type) { + /* no merge needed on elements of different types than @new */ + if (iter->type != new->type) { list_move_tail(&iter->list, &stack); continue; } @@ -751,6 +751,7 @@ err_put_group: mutex_unlock(&group->mutex); dev->iommu_group = NULL; kobject_put(group->devices_kobj); + sysfs_remove_link(group->devices_kobj, device->name); err_free_name: kfree(device->name); err_remove_link: @@ -1354,7 +1355,9 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) const struct iommu_ops *ops = dev->bus->iommu_ops; struct iommu_group *group; int ret; - +#ifdef CONFIG_SMMU_BYPASS_DEV + unsigned int type = iommu_def_domain_type; +#endif group = iommu_group_get(dev); if (group) return group; @@ -1376,8 +1379,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (!group->default_domain) { struct iommu_domain *dom; +#ifdef CONFIG_SMMU_BYPASS_DEV + /* direct allocate required default domain type for some specific devices. */ + if (ops->device_domain_type != NULL) { + if (ops->device_domain_type(dev, &type)) + type = iommu_def_domain_type; + } + + dom = __iommu_domain_alloc(dev->bus, type); + if (!dom && type != IOMMU_DOMAIN_DMA) { +#else dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { +#endif dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); if (dom) { dev_warn(dev, @@ -2221,13 +2235,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) goto out; } - iommu_group_create_direct_mappings(group, dev); - /* Make the domain the default for this group */ if (group->default_domain) iommu_domain_free(group->default_domain); group->default_domain = domain; + iommu_group_create_direct_mappings(group, dev); + dev_info(dev, "Using iommu %s mapping\n", type == IOMMU_DOMAIN_DMA ? "dma" : "direct"); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 41c605b0058f..0e6a9536eca6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex); struct iova *alloc_iova_mem(void) { - return kmem_cache_alloc(iova_cache, GFP_ATOMIC); + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN); } EXPORT_SYMBOL(alloc_iova_mem); diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 67a483c1a935..c2f6c78fee44 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -219,22 +219,37 @@ static void mtk_iommu_tlb_sync(void *cookie) static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct mtk_iommu_data *data = cookie; + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie); mtk_iommu_tlb_sync(cookie); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct mtk_iommu_data *data = cookie; + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie); mtk_iommu_tlb_sync(cookie); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) { + struct mtk_iommu_data *data = cookie; + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static const struct iommu_flush_ops mtk_iommu_flush_ops = { @@ -447,13 +462,18 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) { - mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); + mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data()); } static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { - mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); + mtk_iommu_tlb_sync(data); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, @@ -733,6 +753,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; + spin_lock_init(&data->tlb_lock); list_add_tail(&data->list, &m4ulist); if (!iommu_present(&platform_bus_type)) diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index fc0f16eabacd..8cae22de7663 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -58,6 +58,7 @@ struct mtk_iommu_data { struct iommu_group *m4u_group; bool enable_4GB; bool tlb_flush_active; + spinlock_t tlb_lock; /* lock for tlb range flush */ struct iommu_device iommu; const struct mtk_iommu_plat_data *plat_data; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c31e7bc4ccbe..e0b3fa2bb7ab 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -345,21 +345,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); - if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */ - return; - iommu_put_dma_cookie(domain); - /* NOTE: unmap can be called after client device is powered off, - * for example, with GPUs or anything involving dma-buf. So we - * cannot rely on the device_link. Make sure the IOMMU is on to - * avoid unclocked accesses in the TLB inv path: - */ - pm_runtime_get_sync(qcom_domain->iommu->dev); - - free_io_pgtable_ops(qcom_domain->pgtbl_ops); - - pm_runtime_put_sync(qcom_domain->iommu->dev); + if (qcom_domain->iommu) { + /* + * NOTE: unmap can be called after client device is powered + * off, for example, with GPUs or anything involving dma-buf. + * So we cannot rely on the device_link. Make sure the IOMMU + * is on to avoid unclocked accesses in the TLB inv path: + */ + pm_runtime_get_sync(qcom_domain->iommu->dev); + free_io_pgtable_ops(qcom_domain->pgtbl_ops); + pm_runtime_put_sync(qcom_domain->iommu->dev); + } kfree(qcom_domain); } @@ -405,7 +403,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); unsigned i; - if (!qcom_domain->iommu) + if (WARN_ON(!qcom_domain->iommu)) return; pm_runtime_get_sync(qcom_iommu->dev); @@ -418,8 +416,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ctx->domain = NULL; } pm_runtime_put_sync(qcom_iommu->dev); - - qcom_domain->iommu = NULL; } static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4dcbf68dfda4..0df091934361 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) if (!dma_dev) return NULL; - rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL); + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); if (!rk_domain) return NULL; if (type == IOMMU_DOMAIN_DMA && iommu_get_dma_cookie(&rk_domain->domain)) - return NULL; + goto err_free_domain; /* * rk32xx iommus use a 2 level pagetable. @@ -1021,6 +1021,8 @@ err_free_dt: err_put_cookie: if (type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); +err_free_domain: + kfree(rk_domain); return NULL; } @@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) if (domain->type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); + kfree(rk_domain); } static int rk_iommu_add_device(struct device *dev) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 7293fc3f796d..dd486233e282 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) return (addr & smmu->pfn_mask) == addr; } -static dma_addr_t smmu_pde_to_dma(u32 pde) +static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) { - return pde << 12; + return (dma_addr_t)(pde & smmu->pfn_mask) << 12; } static void smmu_flush_ptc_all(struct tegra_smmu *smmu) @@ -549,6 +549,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, dma_addr_t *dmap) { unsigned int pd_index = iova_pd_index(iova); + struct tegra_smmu *smmu = as->smmu; struct page *pt_page; u32 *pd; @@ -557,7 +558,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, return NULL; pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pd_index]); + *dmap = smmu_pde_to_dma(smmu, pd[pd_index]); return tegra_smmu_pte_offset(pt_page, iova); } @@ -599,7 +600,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, } else { u32 *pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pde]); + *dmap = smmu_pde_to_dma(smmu, pd[pde]); } return tegra_smmu_pte_offset(as->pts[pde], iova); @@ -624,7 +625,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) if (--as->count[pde] == 0) { struct tegra_smmu *smmu = as->smmu; u32 *pd = page_address(as->pd); - dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); + dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]); tegra_smmu_set_pde(as, iova, 0); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index ccbb8973a324..97f9c001d8ff 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -483,8 +483,6 @@ config TI_SCI_INTA_IRQCHIP If you wish to use interrupt aggregator irq resources managed by the TI System Controller, say Y here. Otherwise, say N. -endmenu - config SIFIVE_PLIC bool "SiFive Platform-Level Interrupt Controller" depends on RISCV @@ -496,3 +494,5 @@ config SIFIVE_PLIC interrupt sources are subordinate to the PLIC. If you don't know what to do here, say Y. + +endmenu diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index fc75c61233aa..58bec2126966 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -281,6 +281,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, pr_err("failed to map parent interrupt %d\n", parent_irq); return -EINVAL; } + + if (of_property_read_bool(dn, "brcm,irq-can-wake")) + enable_irq_wake(parent_irq); + irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle, intc); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 787e8eec9a7f..11f3b50dcdcb 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -571,7 +571,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its, struct its_cmd_desc *desc) { its_encode_cmd(cmd, GITS_CMD_INVALL); - its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); its_fixup_cmd(cmd); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1edc99335a94..446603efbc90 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1801,6 +1801,7 @@ static struct struct redist_region *redist_regs; u32 nr_redist_regions; bool single_redist; + int enabled_rdists; u32 maint_irq; int maint_irq_mode; phys_addr_t vcpu_base; @@ -1895,8 +1896,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, * If GICC is enabled and has valid gicr base address, then it means * GICR base is presented via GICC */ - if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; return 0; + } /* * It's perfectly valid firmware can pass disabled GICC entry, driver @@ -1926,8 +1929,10 @@ static int __init gic_acpi_count_gicr_regions(void) count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, gic_acpi_match_gicc, 0); - if (count > 0) + if (count > 0) { acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } return count; } diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index f126255b3260..dda512dfe2c1 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -108,6 +108,14 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } + domain = irq_domain_add_legacy(node, num_chips * 32, + JZ4740_IRQ_BASE, 0, + &irq_domain_simple_ops, NULL); + if (!domain) { + err = -ENOMEM; + goto out_unmap_base; + } + for (i = 0; i < num_chips; i++) { /* Mask all irqs */ writel(0xffffffff, intc->base + (i * CHIP_SIZE) + @@ -134,14 +142,11 @@ static int __init ingenic_intc_of_init(struct device_node *node, IRQ_NOPROBE | IRQ_LEVEL); } - domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0, - &irq_domain_simple_ops, NULL); - if (!domain) - pr_warn("unable to register IRQ domain\n"); - setup_irq(parent_irq, &intc_cascade_action); return 0; +out_unmap_base: + iounmap(intc->base); out_unmap_irq: irq_dispose_mapping(parent_irq); out_free: diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 3f09f658e8e2..6b566bba263b 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = { .name = "Hisilicon MBIGEN-V2", .of_match_table = mbigen_of_match, .acpi_match_table = ACPI_PTR(mbigen_acpi_match), + .suppress_bind_attrs = true, }, .probe = mbigen_device_probe, }; diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c index 250dc9d6f635..82350a28a564 100644 --- a/drivers/leds/leds-an30259a.c +++ b/drivers/leds/leds-an30259a.c @@ -305,6 +305,13 @@ static int an30259a_probe(struct i2c_client *client) chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config); + if (IS_ERR(chip->regmap)) { + err = PTR_ERR(chip->regmap); + dev_err(&client->dev, "Failed to allocate register map: %d\n", + err); + goto exit; + } + for (i = 0; i < chip->num_leds; i++) { struct led_init_data init_data = {}; diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index a5c73f3d5f79..2bf74595610f 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) struct gpio_led led = {}; const char *state = NULL; + /* + * Acquire gpiod from DT with uninitialized label, which + * will be updated after LED class device is registered, + * Only then the final LED name is known. + */ led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child, GPIOD_ASIS, - led.name); + NULL); if (IS_ERR(led.gpiod)) { fwnode_handle_put(child); return ERR_CAST(led.gpiod); @@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) fwnode_handle_put(child); return ERR_PTR(ret); } + /* Set gpiod label to match the corresponding LED name. */ + gpiod_set_consumer_name(led_dat->gpiod, + led_dat->cdev.dev->kobj.name); priv->num_leds++; } diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c index 3d381f2f73d0..1ac9a44570ee 100644 --- a/drivers/leds/leds-lm3692x.c +++ b/drivers/leds/leds-lm3692x.c @@ -334,9 +334,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led) return ret; } - led->regulator = devm_regulator_get(&led->client->dev, "vled"); - if (IS_ERR(led->regulator)) + led->regulator = devm_regulator_get_optional(&led->client->dev, "vled"); + if (IS_ERR(led->regulator)) { + ret = PTR_ERR(led->regulator); + if (ret != -ENODEV) { + if (ret != -EPROBE_DEFER) + dev_err(&led->client->dev, + "Failed to get vled regulator: %d\n", + ret); + return ret; + } led->regulator = NULL; + } child = device_get_next_child_node(&led->client->dev, child); if (!child) { diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c index 4c2d0b3c6dad..a0d4b725c917 100644 --- a/drivers/leds/leds-max77650.c +++ b/drivers/leds/leds-max77650.c @@ -135,9 +135,16 @@ err_node_put: return rv; } +static const struct of_device_id max77650_led_of_match[] = { + { .compatible = "maxim,max77650-led" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_led_of_match); + static struct platform_driver max77650_led_driver = { .driver = { .name = "max77650-led", + .of_match_table = max77650_led_of_match, }, .probe = max77650_led_probe, }; diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c index 4afc317901a8..66cdc003b8f4 100644 --- a/drivers/leds/leds-pca963x.c +++ b/drivers/leds/leds-pca963x.c @@ -40,6 +40,8 @@ #define PCA963X_LED_PWM 0x2 /* Controlled through PWM */ #define PCA963X_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */ +#define PCA963X_MODE2_OUTDRV 0x04 /* Open-drain or totem pole */ +#define PCA963X_MODE2_INVRT 0x10 /* Normal or inverted direction */ #define PCA963X_MODE2_DMBLNK 0x20 /* Enable blinking */ #define PCA963X_MODE1 0x00 @@ -438,12 +440,12 @@ static int pca963x_probe(struct i2c_client *client, PCA963X_MODE2); /* Configure output: open-drain or totem pole (push-pull) */ if (pdata->outdrv == PCA963X_OPEN_DRAIN) - mode2 |= 0x01; + mode2 &= ~PCA963X_MODE2_OUTDRV; else - mode2 |= 0x05; + mode2 |= PCA963X_MODE2_OUTDRV; /* Configure direction: normal or inverted */ if (pdata->dir == PCA963X_INVERTED) - mode2 |= 0x10; + mode2 |= PCA963X_MODE2_INVRT; i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2, mode2); } diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c index 59ff088c7d75..9feaec3c8329 100644 --- a/drivers/leds/leds-tlc591xx.c +++ b/drivers/leds/leds-tlc591xx.c @@ -13,6 +13,7 @@ #include #define TLC591XX_MAX_LEDS 16 +#define TLC591XX_MAX_BRIGHTNESS 256 #define TLC591XX_REG_MODE1 0x00 #define MODE1_RESPON_ADDR_MASK 0xF0 @@ -112,11 +113,11 @@ tlc591xx_brightness_set(struct led_classdev *led_cdev, struct tlc591xx_priv *priv = led->priv; int err; - switch (brightness) { + switch ((int)brightness) { case 0: err = tlc591xx_set_ledout(priv, led, LEDOUT_OFF); break; - case LED_FULL: + case TLC591XX_MAX_BRIGHTNESS: err = tlc591xx_set_ledout(priv, led, LEDOUT_ON); break; default: @@ -157,7 +158,7 @@ tlc591xx_configure(struct device *dev, led->priv = priv; led->led_no = i; led->ldev.brightness_set_blocking = tlc591xx_brightness_set; - led->ldev.max_brightness = LED_FULL; + led->ldev.max_brightness = TLC591XX_MAX_BRIGHTNESS; err = led_classdev_register(dev, &led->ldev); if (err < 0) { dev_err(dev, "couldn't register LED %s\n", diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index 136f86a1627d..d5e774d83021 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -302,10 +302,12 @@ static int netdev_trig_notify(struct notifier_block *nb, container_of(nb, struct led_netdev_data, notifier); if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE - && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER) + && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER + && evt != NETDEV_CHANGENAME) return NOTIFY_DONE; if (!(dev == trigger_data->net_dev || + (evt == NETDEV_CHANGENAME && !strcmp(dev->name, trigger_data->device_name)) || (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name)))) return NOTIFY_DONE; @@ -315,6 +317,7 @@ static int netdev_trig_notify(struct notifier_block *nb, clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode); switch (evt) { + case NETDEV_CHANGENAME: case NETDEV_REGISTER: if (trigger_data->net_dev) dev_put(trigger_data->net_dev); diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 8c744578122a..a0d87ed9da69 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -300,9 +300,11 @@ static int control_loop(void *dummy) /* i2c probing and setup */ /************************************************************************/ -static int -do_attach( struct i2c_adapter *adapter ) +static void do_attach(struct i2c_adapter *adapter) { + struct i2c_board_info info = { }; + struct device_node *np; + /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */ static const unsigned short scan_ds1775[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, @@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter ) I2C_CLIENT_END }; - if( strncmp(adapter->name, "uni-n", 5) ) - return 0; + if (x.running || strncmp(adapter->name, "uni-n", 5)) + return; - if( !x.running ) { - struct i2c_board_info info; - - memset(&info, 0, sizeof(struct i2c_board_info)); - strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE); + np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775"); + if (np) { + of_node_put(np); + } else { + strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE); i2c_new_probed_device(adapter, &info, scan_ds1775, NULL); - - strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE); - i2c_new_probed_device(adapter, &info, scan_adm1030, NULL); - - if( x.thermostat && x.fan ) { - x.running = 1; - x.poll_task = kthread_run(control_loop, NULL, "g4fand"); - } } - return 0; + + np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030"); + if (np) { + of_node_put(np); + } else { + strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE); + i2c_new_probed_device(adapter, &info, scan_adm1030, NULL); + } } static int @@ -404,8 +405,8 @@ out: enum chip { ds1775, adm1030 }; static const struct i2c_device_id therm_windtunnel_id[] = { - { "therm_ds1775", ds1775 }, - { "therm_adm1030", adm1030 }, + { "MAC,ds1775", ds1775 }, + { "MAC,adm1030", adm1030 }, { } }; MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id); @@ -414,6 +415,7 @@ static int do_probe(struct i2c_client *cl, const struct i2c_device_id *id) { struct i2c_adapter *adapter = cl->adapter; + int ret = 0; if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_WRITE_BYTE) ) @@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id) switch (id->driver_data) { case adm1030: - return attach_fan( cl ); + ret = attach_fan(cl); + break; case ds1775: - return attach_thermostat(cl); + ret = attach_thermostat(cl); + break; } - return 0; + + if (!x.running && x.thermostat && x.fan) { + x.running = 1; + x.poll_task = kthread_run(control_loop, NULL, "g4fand"); + } + + return ret; } static struct i2c_driver g4fan_driver = { diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c index 125605987b44..e7dec328c7cf 100644 --- a/drivers/macintosh/windfarm_ad7417_sensor.c +++ b/drivers/macintosh/windfarm_ad7417_sensor.c @@ -312,9 +312,16 @@ static const struct i2c_device_id wf_ad7417_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_ad7417_id); +static const struct of_device_id wf_ad7417_of_id[] = { + { .compatible = "ad7417", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_ad7417_of_id); + static struct i2c_driver wf_ad7417_driver = { .driver = { .name = "wf_ad7417", + .of_match_table = wf_ad7417_of_id, }, .probe = wf_ad7417_probe, .remove = wf_ad7417_remove, diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c index 3c971297b6dc..89b48e8ac6ef 100644 --- a/drivers/macintosh/windfarm_fcu_controls.c +++ b/drivers/macintosh/windfarm_fcu_controls.c @@ -582,9 +582,16 @@ static const struct i2c_device_id wf_fcu_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_fcu_id); +static const struct of_device_id wf_fcu_of_id[] = { + { .compatible = "fcu", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_fcu_of_id); + static struct i2c_driver wf_fcu_driver = { .driver = { .name = "wf_fcu", + .of_match_table = wf_fcu_of_id, }, .probe = wf_fcu_probe, .remove = wf_fcu_remove, diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 282c28a17ea1..1e5fa09845e7 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -91,9 +92,14 @@ static int wf_lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wf_lm75_sensor *lm; - int rc, ds1775 = id->driver_data; + int rc, ds1775; const char *name, *loc; + if (id) + ds1775 = id->driver_data; + else + ds1775 = !!of_device_get_match_data(&client->dev); + DBG("wf_lm75: creating %s device at address 0x%02x\n", ds1775 ? "ds1775" : "lm75", client->addr); @@ -164,9 +170,17 @@ static const struct i2c_device_id wf_lm75_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_lm75_id); +static const struct of_device_id wf_lm75_of_id[] = { + { .compatible = "lm75", .data = (void *)0}, + { .compatible = "ds1775", .data = (void *)1 }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_lm75_of_id); + static struct i2c_driver wf_lm75_driver = { .driver = { .name = "wf_lm75", + .of_match_table = wf_lm75_of_id, }, .probe = wf_lm75_probe, .remove = wf_lm75_remove, diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c index e44525b19071..1a1f8f3f0abc 100644 --- a/drivers/macintosh/windfarm_lm87_sensor.c +++ b/drivers/macintosh/windfarm_lm87_sensor.c @@ -166,9 +166,16 @@ static const struct i2c_device_id wf_lm87_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_lm87_id); +static const struct of_device_id wf_lm87_of_id[] = { + { .compatible = "lm87cimt", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_lm87_of_id); + static struct i2c_driver wf_lm87_driver = { .driver = { .name = "wf_lm87", + .of_match_table = wf_lm87_of_id, }, .probe = wf_lm87_probe, .remove = wf_lm87_remove, diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index e666cc020683..1e7b03d44ad9 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c @@ -120,9 +120,16 @@ static const struct i2c_device_id wf_max6690_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_max6690_id); +static const struct of_device_id wf_max6690_of_id[] = { + { .compatible = "max6690", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_max6690_of_id); + static struct i2c_driver wf_max6690_driver = { .driver = { .name = "wf_max6690", + .of_match_table = wf_max6690_of_id, }, .probe = wf_max6690_probe, .remove = wf_max6690_remove, diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index c84ec49c3741..cb75dc035616 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -341,9 +341,16 @@ static const struct i2c_device_id wf_sat_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_sat_id); +static const struct of_device_id wf_sat_of_id[] = { + { .compatible = "smu-sat", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_sat_of_id); + static struct i2c_driver wf_sat_driver = { .driver = { .name = "wf_smu_sat", + .of_match_table = wf_sat_of_id, }, .probe = wf_sat_probe, .remove = wf_sat_remove, diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 9f74dee1a58c..afe625e88a5c 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -214,11 +214,24 @@ static void imx_mu_shutdown(struct mbox_chan *chan) struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox); struct imx_mu_con_priv *cp = chan->con_priv; - if (cp->type == IMX_MU_TYPE_TXDB) + if (cp->type == IMX_MU_TYPE_TXDB) { tasklet_kill(&cp->txdb_tasklet); + return; + } - imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx) | - IMX_MU_xCR_RIEn(cp->idx) | IMX_MU_xCR_GIEn(cp->idx)); + switch (cp->type) { + case IMX_MU_TYPE_TX: + imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx)); + break; + case IMX_MU_TYPE_RX: + imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx)); + break; + case IMX_MU_TYPE_RXDB: + imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx)); + break; + default: + break; + } free_irq(priv->irq, chan); } diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index 4c5ba35d48d4..834b35dc3b13 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -657,7 +657,7 @@ static int tegra_hsp_probe(struct platform_device *pdev) hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK; hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK; - err = platform_get_irq_byname(pdev, "doorbell"); + err = platform_get_irq_byname_optional(pdev, "doorbell"); if (err >= 0) hsp->doorbell_irq = err; @@ -677,7 +677,7 @@ static int tegra_hsp_probe(struct platform_device *pdev) if (!name) return -ENOMEM; - err = platform_get_irq_byname(pdev, name); + err = platform_get_irq_byname_optional(pdev, name); if (err >= 0) { hsp->shared_irqs[i] = err; count++; diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 6f776823b9ba..a1df0d95151c 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -377,7 +377,10 @@ retry_invalidate: if (!fifo_full(&ca->free_inc)) goto retry_invalidate; - bch_prio_write(ca); + if (bch_prio_write(ca, false) < 0) { + ca->invalidate_needs_gc = 1; + wake_up_gc(ca->set); + } } } out: diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 013e35a9e317..3d2b63585da9 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -329,6 +329,9 @@ struct cached_dev { */ atomic_t has_dirty; +#define BCH_CACHE_READA_ALL 0 +#define BCH_CACHE_READA_META_ONLY 1 + unsigned int cache_readahead_policy; struct bch_ratelimit writeback_rate; struct delayed_work writeback_rate_update; @@ -977,7 +980,7 @@ bool bch_cached_dev_error(struct cached_dev *dc); __printf(2, 3) bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); -void bch_prio_write(struct cache *ca); +int bch_prio_write(struct cache *ca, bool wait); void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index c71365e7c1fa..a50dcfda656f 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state); /* Bkey utility code */ -#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) +#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \ + (unsigned int)(i)->keys) static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx) { diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index ba434d9ac720..46a8b5a91c38 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -723,6 +723,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, * IO can always make forward progress: */ nr /= c->btree_pages; + if (nr == 0) + nr = 1; nr = min_t(unsigned long, nr, mca_can_free(c)); i = 0; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index be2a2a201603..6730820780b0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -417,10 +417,15 @@ err: /* Journalling */ +#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask)) + static void btree_flush_write(struct cache_set *c) { struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR]; - unsigned int i, n; + unsigned int i, nr; + int ref_nr; + atomic_t *fifo_front_p, *now_fifo_front_p; + size_t mask; if (c->journal.btree_flushing) return; @@ -433,12 +438,50 @@ static void btree_flush_write(struct cache_set *c) c->journal.btree_flushing = true; spin_unlock(&c->journal.flush_write_lock); + /* get the oldest journal entry and check its refcount */ + spin_lock(&c->journal.lock); + fifo_front_p = &fifo_front(&c->journal.pin); + ref_nr = atomic_read(fifo_front_p); + if (ref_nr <= 0) { + /* + * do nothing if no btree node references + * the oldest journal entry + */ + spin_unlock(&c->journal.lock); + goto out; + } + spin_unlock(&c->journal.lock); + + mask = c->journal.pin.mask; + nr = 0; atomic_long_inc(&c->flush_write); memset(btree_nodes, 0, sizeof(btree_nodes)); - n = 0; mutex_lock(&c->bucket_lock); list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { + /* + * It is safe to get now_fifo_front_p without holding + * c->journal.lock here, because we don't need to know + * the exactly accurate value, just check whether the + * front pointer of c->journal.pin is changed. + */ + now_fifo_front_p = &fifo_front(&c->journal.pin); + /* + * If the oldest journal entry is reclaimed and front + * pointer of c->journal.pin changes, it is unnecessary + * to scan c->btree_cache anymore, just quit the loop and + * flush out what we have already. + */ + if (now_fifo_front_p != fifo_front_p) + break; + /* + * quit this loop if all matching btree nodes are + * scanned and record in btree_nodes[] already. + */ + ref_nr = atomic_read(fifo_front_p); + if (nr >= ref_nr) + break; + if (btree_node_journal_flush(b)) pr_err("BUG: flush_write bit should not be set here!"); @@ -454,17 +497,44 @@ static void btree_flush_write(struct cache_set *c) continue; } + /* + * Only select the btree node which exactly references + * the oldest journal entry. + * + * If the journal entry pointed by fifo_front_p is + * reclaimed in parallel, don't worry: + * - the list_for_each_xxx loop will quit when checking + * next now_fifo_front_p. + * - If there are matched nodes recorded in btree_nodes[], + * they are clean now (this is why and how the oldest + * journal entry can be reclaimed). These selected nodes + * will be ignored and skipped in the folowing for-loop. + */ + if (nr_to_fifo_front(btree_current_write(b)->journal, + fifo_front_p, + mask) != 0) { + mutex_unlock(&b->write_lock); + continue; + } + set_btree_node_journal_flush(b); mutex_unlock(&b->write_lock); - btree_nodes[n++] = b; - if (n == BTREE_FLUSH_NR) + btree_nodes[nr++] = b; + /* + * To avoid holding c->bucket_lock too long time, + * only scan for BTREE_FLUSH_NR matched btree nodes + * at most. If there are more btree nodes reference + * the oldest journal entry, try to flush them next + * time when btree_flush_write() is called. + */ + if (nr == BTREE_FLUSH_NR) break; } mutex_unlock(&c->bucket_lock); - for (i = 0; i < n; i++) { + for (i = 0; i < nr; i++) { b = btree_nodes[i]; if (!b) { pr_err("BUG: btree_nodes[%d] is NULL", i); @@ -497,6 +567,7 @@ static void btree_flush_write(struct cache_set *c) mutex_unlock(&b->write_lock); } +out: spin_lock(&c->journal.flush_write_lock); c->journal.btree_flushing = false; spin_unlock(&c->journal.flush_write_lock); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 41adcd1546f1..4045ae748f17 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -391,13 +391,20 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) goto skip; /* - * Flag for bypass if the IO is for read-ahead or background, - * unless the read-ahead request is for metadata + * If the bio is for read-ahead or background IO, bypass it or + * not depends on the following situations, + * - If the IO is for meta data, always cache it and no bypass + * - If the IO is not meta data, check dc->cache_reada_policy, + * BCH_CACHE_READA_ALL: cache it and not bypass + * BCH_CACHE_READA_META_ONLY: not cache it and bypass + * That is, read-ahead request for metadata always get cached * (eg, for gfs2 or xfs). */ - if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) && - !(bio->bi_opf & (REQ_META|REQ_PRIO))) - goto skip; + if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) { + if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) && + (dc->cache_readahead_policy != BCH_CACHE_READA_ALL)) + goto skip; + } if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || bio_sectors(bio) & (c->sb.block_size - 1)) { diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index ba1c93791d8d..503aafe188dc 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -109,9 +109,13 @@ int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, void bch_cache_accounting_clear(struct cache_accounting *acc) { - memset(&acc->total.cache_hits, - 0, - sizeof(struct cache_stats)); + acc->total.cache_hits = 0; + acc->total.cache_misses = 0; + acc->total.cache_bypass_hits = 0; + acc->total.cache_bypass_misses = 0; + acc->total.cache_readaheads = 0; + acc->total.cache_miss_collisions = 0; + acc->total.sectors_bypassed = 0; } void bch_cache_accounting_destroy(struct cache_accounting *acc) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 20ed838e9413..658b0f4a01f5 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -529,12 +529,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op, closure_sync(cl); } -void bch_prio_write(struct cache *ca) +int bch_prio_write(struct cache *ca, bool wait) { int i; struct bucket *b; struct closure cl; + pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu", + fifo_used(&ca->free[RESERVE_PRIO]), + fifo_used(&ca->free[RESERVE_NONE]), + fifo_used(&ca->free_inc)); + + /* + * Pre-check if there are enough free buckets. In the non-blocking + * scenario it's better to fail early rather than starting to allocate + * buckets and do a cleanup later in case of failure. + */ + if (!wait) { + size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + + fifo_used(&ca->free[RESERVE_NONE]); + if (prio_buckets(ca) > avail) + return -ENOMEM; + } + closure_init_stack(&cl); lockdep_assert_held(&ca->set->bucket_lock); @@ -544,9 +561,6 @@ void bch_prio_write(struct cache *ca) atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), &ca->meta_sectors_written); - //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), - // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); - for (i = prio_buckets(ca) - 1; i >= 0; --i) { long bucket; struct prio_set *p = ca->disk_buckets; @@ -564,7 +578,7 @@ void bch_prio_write(struct cache *ca) p->magic = pset_magic(&ca->sb); p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); - bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); + bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); BUG_ON(bucket == -1); mutex_unlock(&ca->set->bucket_lock); @@ -593,6 +607,7 @@ void bch_prio_write(struct cache *ca) ca->prio_last_buckets[i] = ca->prio_buckets[i]; } + return 0; } static void prio_read(struct cache *ca, uint64_t bucket) @@ -761,20 +776,28 @@ static inline int idx_to_first_minor(int idx) static void bcache_device_free(struct bcache_device *d) { + struct gendisk *disk = d->disk; + lockdep_assert_held(&bch_register_lock); - pr_info("%s stopped", d->disk->disk_name); + if (disk) + pr_info("%s stopped", disk->disk_name); + else + pr_err("bcache device (NULL gendisk) stopped"); if (d->c) bcache_device_detach(d); - if (d->disk && d->disk->flags & GENHD_FL_UP) - del_gendisk(d->disk); - if (d->disk && d->disk->queue) - blk_cleanup_queue(d->disk->queue); - if (d->disk) { + + if (disk) { + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + + if (disk->queue) + blk_cleanup_queue(disk->queue); + ida_simple_remove(&bcache_device_idx, - first_minor_to_idx(d->disk->first_minor)); - put_disk(d->disk); + first_minor_to_idx(disk->first_minor)); + put_disk(disk); } bioset_exit(&d->bio_split); @@ -1251,6 +1274,9 @@ static void cached_dev_free(struct closure *cl) mutex_unlock(&bch_register_lock); + if (dc->sb_bio.bi_inline_vecs[0].bv_page) + put_page(bio_first_page_all(&dc->sb_bio)); + if (!IS_ERR_OR_NULL(dc->bdev)) blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); @@ -1954,7 +1980,7 @@ static int run_cache_set(struct cache_set *c) mutex_lock(&c->bucket_lock); for_each_cache(ca, c, i) - bch_prio_write(ca); + bch_prio_write(ca, true); mutex_unlock(&c->bucket_lock); err = "cannot allocate new UUID bucket"; @@ -2346,29 +2372,35 @@ static bool bch_is_open(struct block_device *bdev) static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, const char *buffer, size_t size) { - ssize_t ret = -EINVAL; - const char *err = "cannot allocate memory"; + const char *err; char *path = NULL; - struct cache_sb *sb = NULL; + struct cache_sb *sb; struct block_device *bdev = NULL; - struct page *sb_page = NULL; + struct page *sb_page; + ssize_t ret; + ret = -EBUSY; + err = "failed to reference bcache module"; if (!try_module_get(THIS_MODULE)) - return -EBUSY; + goto out; /* For latest state of bcache_is_reboot */ smp_mb(); + err = "bcache is in reboot"; if (bcache_is_reboot) - return -EBUSY; + goto out_module_put; + ret = -ENOMEM; + err = "cannot allocate memory"; path = kstrndup(buffer, size, GFP_KERNEL); if (!path) - goto err; + goto out_module_put; sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); if (!sb) - goto err; + goto out_free_path; + ret = -EINVAL; err = "failed to open device"; bdev = blkdev_get_by_path(strim(path), FMODE_READ|FMODE_WRITE|FMODE_EXCL, @@ -2385,57 +2417,69 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!IS_ERR(bdev)) bdput(bdev); if (attr == &ksysfs_register_quiet) - goto quiet_out; + goto done; } - goto err; + goto out_free_sb; } err = "failed to set blocksize"; if (set_blocksize(bdev, 4096)) - goto err_close; + goto out_blkdev_put; err = read_super(sb, bdev, &sb_page); if (err) - goto err_close; + goto out_blkdev_put; err = "failed to register device"; if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); if (!dc) - goto err_close; + goto out_put_sb_page; mutex_lock(&bch_register_lock); ret = register_bdev(sb, sb_page, bdev, dc); mutex_unlock(&bch_register_lock); /* blkdev_put() will be called in cached_dev_free() */ - if (ret < 0) - goto err; + if (ret < 0) { + bdev = NULL; + goto out_put_sb_page; + } } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) - goto err_close; + goto out_put_sb_page; /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(sb, sb_page, bdev, ca) != 0) - goto err; + if (register_cache(sb, sb_page, bdev, ca) != 0) { + bdev = NULL; + goto out_put_sb_page; + } } -quiet_out: - ret = size; -out: - if (sb_page) - put_page(sb_page); + + put_page(sb_page); +done: kfree(sb); kfree(path); module_put(THIS_MODULE); - return ret; + return size; -err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -err: - pr_info("error %s: %s", path, err); - goto out; +out_put_sb_page: + put_page(sb_page); +out_blkdev_put: + if (bdev) + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); +out_free_sb: + kfree(sb); +out_free_path: + kfree(path); + path = NULL; +out_module_put: + module_put(THIS_MODULE); +out: + pr_info("error %s: %s", path?path:"", err); + return ret; } diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 627dcea0f5b6..7f0fb4b5755a 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -27,6 +27,12 @@ static const char * const bch_cache_modes[] = { NULL }; +static const char * const bch_reada_cache_policies[] = { + "all", + "meta-only", + NULL +}; + /* Default is 0 ("auto") */ static const char * const bch_stop_on_failure_modes[] = { "auto", @@ -100,6 +106,7 @@ rw_attribute(congested_write_threshold_us); rw_attribute(sequential_cutoff); rw_attribute(data_csum); rw_attribute(cache_mode); +rw_attribute(readahead_cache_policy); rw_attribute(stop_when_cache_set_failed); rw_attribute(writeback_metadata); rw_attribute(writeback_running); @@ -167,6 +174,11 @@ SHOW(__bch_cached_dev) bch_cache_modes, BDEV_CACHE_MODE(&dc->sb)); + if (attr == &sysfs_readahead_cache_policy) + return bch_snprint_string_list(buf, PAGE_SIZE, + bch_reada_cache_policies, + dc->cache_readahead_policy); + if (attr == &sysfs_stop_when_cache_set_failed) return bch_snprint_string_list(buf, PAGE_SIZE, bch_stop_on_failure_modes, @@ -352,6 +364,15 @@ STORE(__cached_dev) } } + if (attr == &sysfs_readahead_cache_policy) { + v = __sysfs_match_string(bch_reada_cache_policies, -1, buf); + if (v < 0) + return v; + + if ((unsigned int) v != dc->cache_readahead_policy) + dc->cache_readahead_policy = v; + } + if (attr == &sysfs_stop_when_cache_set_failed) { v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); if (v < 0) @@ -466,6 +487,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_data_csum, #endif &sysfs_cache_mode, + &sysfs_readahead_cache_policy, &sysfs_stop_when_cache_set_failed, &sysfs_writeback_metadata, &sysfs_writeback_running, diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index c82578af56a5..2ea0360108e1 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -20,8 +20,13 @@ struct dm_bio_details { struct gendisk *bi_disk; u8 bi_partno; + int __bi_remaining; unsigned long bi_flags; struct bvec_iter bi_iter; + bio_end_io_t *bi_end_io; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload *bi_integrity; +#endif }; static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) @@ -30,6 +35,11 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) bd->bi_partno = bio->bi_partno; bd->bi_flags = bio->bi_flags; bd->bi_iter = bio->bi_iter; + bd->__bi_remaining = atomic_read(&bio->__bi_remaining); + bd->bi_end_io = bio->bi_end_io; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + bd->bi_integrity = bio_integrity(bio); +#endif } static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) @@ -38,6 +48,11 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) bio->bi_partno = bd->bi_partno; bio->bi_flags = bd->bi_flags; bio->bi_iter = bd->bi_iter; + atomic_set(&bio->__bi_remaining, bd->__bi_remaining); + bio->bi_end_io = bd->bi_end_io; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + bio->bi_integrity = bd->bi_integrity; +#endif } #endif diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 8346e6d1816c..f595e9867cbe 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2867,8 +2867,8 @@ static void cache_postsuspend(struct dm_target *ti) prevent_background_work(cache); BUG_ON(atomic_read(&cache->nr_io_migrations)); - cancel_delayed_work(&cache->waker); - flush_workqueue(cache->wq); + cancel_delayed_work_sync(&cache->waker); + drain_workqueue(cache->wq); WARN_ON(cache->tracker.in_flight); /* diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c index 6bc8c1d1c351..54e4fdd607e1 100644 --- a/drivers/md/dm-clone-metadata.c +++ b/drivers/md/dm-clone-metadata.c @@ -67,23 +67,34 @@ struct superblock_disk { * To save constantly doing look ups on disk we keep an in core copy of the * on-disk bitmap, the region_map. * - * To further reduce metadata I/O overhead we use a second bitmap, the dmap - * (dirty bitmap), which tracks the dirty words, i.e. longs, of the region_map. + * In order to track which regions are hydrated during a metadata transaction, + * we use a second set of bitmaps, the dmap (dirty bitmap), which includes two + * bitmaps, namely dirty_regions and dirty_words. The dirty_regions bitmap + * tracks the regions that got hydrated during the current metadata + * transaction. The dirty_words bitmap tracks the dirty words, i.e. longs, of + * the dirty_regions bitmap. + * + * This allows us to precisely track the regions that were hydrated during the + * current metadata transaction and update the metadata accordingly, when we + * commit the current transaction. This is important because dm-clone should + * only commit the metadata of regions that were properly flushed to the + * destination device beforehand. Otherwise, in case of a crash, we could end + * up with a corrupted dm-clone device. * * When a region finishes hydrating dm-clone calls * dm_clone_set_region_hydrated(), or for discard requests * dm_clone_cond_set_range(), which sets the corresponding bits in region_map * and dmap. * - * During a metadata commit we scan the dmap for dirty region_map words (longs) - * and update accordingly the on-disk metadata. Thus, we don't have to flush to - * disk the whole region_map. We can just flush the dirty region_map words. + * During a metadata commit we scan dmap->dirty_words and dmap->dirty_regions + * and update the on-disk metadata accordingly. Thus, we don't have to flush to + * disk the whole region_map. We can just flush the dirty region_map bits. * - * We use a dirty bitmap, which is smaller than the original region_map, to - * reduce the amount of memory accesses during a metadata commit. As dm-bitset - * accesses the on-disk bitmap in 64-bit word granularity, there is no - * significant benefit in tracking the dirty region_map bits with a smaller - * granularity. + * We use the helper dmap->dirty_words bitmap, which is smaller than the + * original region_map, to reduce the amount of memory accesses during a + * metadata commit. Moreover, as dm-bitset also accesses the on-disk bitmap in + * 64-bit word granularity, the dirty_words bitmap helps us avoid useless disk + * accesses. * * We could update directly the on-disk bitmap, when dm-clone calls either * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), buts this @@ -92,12 +103,13 @@ struct superblock_disk { * e.g., in a hooked overwrite bio's completion routine, and further reduce the * I/O completion latency. * - * We maintain two dirty bitmaps. During a metadata commit we atomically swap - * the currently used dmap with the unused one. This allows the metadata update - * functions to run concurrently with an ongoing commit. + * We maintain two dirty bitmap sets. During a metadata commit we atomically + * swap the currently used dmap with the unused one. This allows the metadata + * update functions to run concurrently with an ongoing commit. */ struct dirty_map { unsigned long *dirty_words; + unsigned long *dirty_regions; unsigned int changed; }; @@ -115,6 +127,9 @@ struct dm_clone_metadata { struct dirty_map dmap[2]; struct dirty_map *current_dmap; + /* Protected by lock */ + struct dirty_map *committing_dmap; + /* * In core copy of the on-disk bitmap to save constantly doing look ups * on disk. @@ -461,34 +476,53 @@ static size_t bitmap_size(unsigned long nr_bits) return BITS_TO_LONGS(nr_bits) * sizeof(long); } +static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words, + unsigned long nr_regions) +{ + dmap->changed = 0; + + dmap->dirty_words = kvzalloc(bitmap_size(nr_words), GFP_KERNEL); + if (!dmap->dirty_words) + return -ENOMEM; + + dmap->dirty_regions = kvzalloc(bitmap_size(nr_regions), GFP_KERNEL); + if (!dmap->dirty_regions) { + kvfree(dmap->dirty_words); + return -ENOMEM; + } + + return 0; +} + +static void __dirty_map_exit(struct dirty_map *dmap) +{ + kvfree(dmap->dirty_words); + kvfree(dmap->dirty_regions); +} + static int dirty_map_init(struct dm_clone_metadata *cmd) { - cmd->dmap[0].changed = 0; - cmd->dmap[0].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL); - - if (!cmd->dmap[0].dirty_words) { + if (__dirty_map_init(&cmd->dmap[0], cmd->nr_words, cmd->nr_regions)) { DMERR("Failed to allocate dirty bitmap"); return -ENOMEM; } - cmd->dmap[1].changed = 0; - cmd->dmap[1].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL); - - if (!cmd->dmap[1].dirty_words) { + if (__dirty_map_init(&cmd->dmap[1], cmd->nr_words, cmd->nr_regions)) { DMERR("Failed to allocate dirty bitmap"); - kvfree(cmd->dmap[0].dirty_words); + __dirty_map_exit(&cmd->dmap[0]); return -ENOMEM; } cmd->current_dmap = &cmd->dmap[0]; + cmd->committing_dmap = NULL; return 0; } static void dirty_map_exit(struct dm_clone_metadata *cmd) { - kvfree(cmd->dmap[0].dirty_words); - kvfree(cmd->dmap[1].dirty_words); + __dirty_map_exit(&cmd->dmap[0]); + __dirty_map_exit(&cmd->dmap[1]); } static int __load_bitset_in_core(struct dm_clone_metadata *cmd) @@ -633,21 +667,23 @@ unsigned long dm_clone_find_next_unhydrated_region(struct dm_clone_metadata *cmd return find_next_zero_bit(cmd->region_map, cmd->nr_regions, start); } -static int __update_metadata_word(struct dm_clone_metadata *cmd, unsigned long word) +static int __update_metadata_word(struct dm_clone_metadata *cmd, + unsigned long *dirty_regions, + unsigned long word) { int r; unsigned long index = word * BITS_PER_LONG; unsigned long max_index = min(cmd->nr_regions, (word + 1) * BITS_PER_LONG); while (index < max_index) { - if (test_bit(index, cmd->region_map)) { + if (test_bit(index, dirty_regions)) { r = dm_bitset_set_bit(&cmd->bitset_info, cmd->bitset_root, index, &cmd->bitset_root); - if (r) { DMERR("dm_bitset_set_bit failed"); return r; } + __clear_bit(index, dirty_regions); } index++; } @@ -721,7 +757,7 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) if (word == cmd->nr_words) break; - r = __update_metadata_word(cmd, word); + r = __update_metadata_word(cmd, dmap->dirty_regions, word); if (r) return r; @@ -743,16 +779,18 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) return 0; } -int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) +int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd) { - int r = -EPERM; + int r = 0; unsigned long flags; struct dirty_map *dmap, *next_dmap; down_write(&cmd->lock); - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { + r = -EPERM; goto out; + } /* Get current dirty bitmap */ dmap = cmd->current_dmap; @@ -764,7 +802,7 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) * The last commit failed, so we don't have a clean dirty-bitmap to * use. */ - if (WARN_ON(next_dmap->changed)) { + if (WARN_ON(next_dmap->changed || cmd->committing_dmap)) { r = -EINVAL; goto out; } @@ -774,11 +812,33 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) cmd->current_dmap = next_dmap; spin_unlock_irqrestore(&cmd->bitmap_lock, flags); - /* - * No one is accessing the old dirty bitmap anymore, so we can flush - * it. - */ - r = __flush_dmap(cmd, dmap); + /* Set old dirty bitmap as currently committing */ + cmd->committing_dmap = dmap; +out: + up_write(&cmd->lock); + + return r; +} + +int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) +{ + int r = -EPERM; + + down_write(&cmd->lock); + + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) + goto out; + + if (WARN_ON(!cmd->committing_dmap)) { + r = -EINVAL; + goto out; + } + + r = __flush_dmap(cmd, cmd->committing_dmap); + if (!r) { + /* Clear committing dmap */ + cmd->committing_dmap = NULL; + } out: up_write(&cmd->lock); @@ -803,6 +863,7 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re dmap = cmd->current_dmap; __set_bit(word, dmap->dirty_words); + __set_bit(region_nr, dmap->dirty_regions); __set_bit(region_nr, cmd->region_map); dmap->changed = 1; @@ -831,6 +892,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, if (!test_bit(region_nr, cmd->region_map)) { word = region_nr / BITS_PER_LONG; __set_bit(word, dmap->dirty_words); + __set_bit(region_nr, dmap->dirty_regions); __set_bit(region_nr, cmd->region_map); dmap->changed = 1; } diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h index 434bff08508b..c7848c49aef8 100644 --- a/drivers/md/dm-clone-metadata.h +++ b/drivers/md/dm-clone-metadata.h @@ -73,7 +73,23 @@ void dm_clone_metadata_close(struct dm_clone_metadata *cmd); /* * Commit dm-clone metadata to disk. + * + * We use a two phase commit: + * + * 1. dm_clone_metadata_pre_commit(): Prepare the current transaction for + * committing. After this is called, all subsequent metadata updates, done + * through either dm_clone_set_region_hydrated() or + * dm_clone_cond_set_range(), will be part of the **next** transaction. + * + * 2. dm_clone_metadata_commit(): Actually commit the current transaction to + * disk and start a new transaction. + * + * This allows dm-clone to flush the destination device after step (1) to + * ensure that all freshly hydrated regions, for which we are updating the + * metadata, are properly written to non-volatile storage and won't be lost in + * case of a crash. */ +int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd); int dm_clone_metadata_commit(struct dm_clone_metadata *cmd); /* @@ -110,6 +126,7 @@ int dm_clone_metadata_abort(struct dm_clone_metadata *cmd); * Switches metadata to a read only mode. Once read-only mode has been entered * the following functions will return -EPERM: * + * dm_clone_metadata_pre_commit() * dm_clone_metadata_commit() * dm_clone_set_region_hydrated() * dm_clone_cond_set_range() diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 4ca8f1977222..e6e5d24a79f5 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -86,6 +86,12 @@ struct clone { struct dm_clone_metadata *cmd; + /* + * bio used to flush the destination device, before committing the + * metadata. + */ + struct bio flush_bio; + /* Region hydration hash table */ struct hash_table_bucket *ht; @@ -1106,10 +1112,13 @@ static bool need_commit_due_to_time(struct clone *clone) /* * A non-zero return indicates read-only or fail mode. */ -static int commit_metadata(struct clone *clone) +static int commit_metadata(struct clone *clone, bool *dest_dev_flushed) { int r = 0; + if (dest_dev_flushed) + *dest_dev_flushed = false; + mutex_lock(&clone->commit_lock); if (!dm_clone_changed_this_transaction(clone->cmd)) @@ -1120,8 +1129,26 @@ static int commit_metadata(struct clone *clone) goto out; } - r = dm_clone_metadata_commit(clone->cmd); + r = dm_clone_metadata_pre_commit(clone->cmd); + if (unlikely(r)) { + __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r); + goto out; + } + bio_reset(&clone->flush_bio); + bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev); + clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + + r = submit_bio_wait(&clone->flush_bio); + if (unlikely(r)) { + __metadata_operation_failed(clone, "flush destination device", r); + goto out; + } + + if (dest_dev_flushed) + *dest_dev_flushed = true; + + r = dm_clone_metadata_commit(clone->cmd); if (unlikely(r)) { __metadata_operation_failed(clone, "dm_clone_metadata_commit", r); goto out; @@ -1194,6 +1221,7 @@ static void process_deferred_flush_bios(struct clone *clone) { struct bio *bio; unsigned long flags; + bool dest_dev_flushed; struct bio_list bios = BIO_EMPTY_LIST; struct bio_list bio_completions = BIO_EMPTY_LIST; @@ -1213,7 +1241,7 @@ static void process_deferred_flush_bios(struct clone *clone) !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone))) return; - if (commit_metadata(clone)) { + if (commit_metadata(clone, &dest_dev_flushed)) { bio_list_merge(&bios, &bio_completions); while ((bio = bio_list_pop(&bios))) @@ -1227,8 +1255,17 @@ static void process_deferred_flush_bios(struct clone *clone) while ((bio = bio_list_pop(&bio_completions))) bio_endio(bio); - while ((bio = bio_list_pop(&bios))) - generic_make_request(bio); + while ((bio = bio_list_pop(&bios))) { + if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) { + /* We just flushed the destination device as part of + * the metadata commit, so there is no reason to send + * another flush. + */ + bio_endio(bio); + } else { + generic_make_request(bio); + } + } } static void do_worker(struct work_struct *work) @@ -1400,7 +1437,7 @@ static void clone_status(struct dm_target *ti, status_type_t type, /* Commit to ensure statistics aren't out-of-date */ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) - (void) commit_metadata(clone); + (void) commit_metadata(clone, NULL); r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks); @@ -1834,6 +1871,7 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) bio_list_init(&clone->deferred_flush_completions); clone->hydration_offset = 0; atomic_set(&clone->hydrations_in_flight, 0); + bio_init(&clone->flush_bio, NULL, 0); clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); if (!clone->wq) { @@ -1907,6 +1945,7 @@ static void clone_dtr(struct dm_target *ti) struct clone *clone = ti->private; mutex_destroy(&clone->commit_lock); + bio_uninit(&clone->flush_bio); for (i = 0; i < clone->nr_ctr_args; i++) kfree(clone->ctr_args[i]); @@ -1961,7 +2000,7 @@ static void clone_postsuspend(struct dm_target *ti) wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight)); flush_workqueue(clone->wq); - (void) commit_metadata(clone); + (void) commit_metadata(clone, NULL); } static void clone_resume(struct dm_target *ti) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f87f6495652f..492bbe0584d9 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -331,8 +331,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs = crypto_skcipher_blocksize(any_tfm(cc)); - int log = ilog2(bs); + unsigned bs; + int log; + + if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) + bs = crypto_aead_blocksize(any_tfm_aead(cc)); + else + bs = crypto_skcipher_blocksize(any_tfm(cc)); + log = ilog2(bs); /* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */ @@ -717,7 +723,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, struct crypto_wait wait; int err; - req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS); + req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); if (!req) return -ENOMEM; @@ -2700,21 +2706,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io/%s", - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, - 1, devname); + cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) - cc->crypt_queue = alloc_workqueue("kcryptd/%s", - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, + cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1, devname); else cc->crypt_queue = alloc_workqueue("kcryptd/%s", - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(), devname); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index dab4446fe7d8..145bc2e7eaf0 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -6,6 +6,8 @@ * This file is released under the GPL. */ +#include "dm-bio-record.h" + #include #include #include @@ -199,17 +201,19 @@ struct dm_integrity_c { __u8 log2_blocks_per_bitmap_bit; unsigned char mode; - int suspending; int failed; struct crypto_shash *internal_hash; + struct dm_target *ti; + /* these variables are locked with endio_wait.lock */ struct rb_root in_progress; struct list_head wait_list; wait_queue_head_t endio_wait; struct workqueue_struct *wait_wq; + struct workqueue_struct *offload_wq; unsigned char commit_seq; commit_id_t commit_ids[N_COMMIT_IDS]; @@ -290,11 +294,7 @@ struct dm_integrity_io { struct completion *completion; - struct gendisk *orig_bi_disk; - u8 orig_bi_partno; - bio_end_io_t *orig_bi_end_io; - struct bio_integrity_payload *orig_bi_integrity; - struct bvec_iter orig_bi_iter; + struct dm_bio_details bio_details; }; struct journal_completion { @@ -1434,7 +1434,7 @@ static void dec_in_flight(struct dm_integrity_io *dio) dio->range.logical_sector += dio->range.n_sectors; bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->wait_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); return; } do_endio_flush(ic, dio); @@ -1445,14 +1445,9 @@ static void integrity_end_io(struct bio *bio) { struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); - bio->bi_iter = dio->orig_bi_iter; - bio->bi_disk = dio->orig_bi_disk; - bio->bi_partno = dio->orig_bi_partno; - if (dio->orig_bi_integrity) { - bio->bi_integrity = dio->orig_bi_integrity; + dm_bio_restore(&dio->bio_details, bio); + if (bio->bi_integrity) bio->bi_opf |= REQ_INTEGRITY; - } - bio->bi_end_io = dio->orig_bi_end_io; if (dio->completion) complete(dio->completion); @@ -1537,7 +1532,7 @@ static void integrity_metadata(struct work_struct *w) } } - __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) { + __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { unsigned pos; char *mem, *checksums_ptr; @@ -1581,7 +1576,7 @@ again: if (likely(checksums != checksums_onstack)) kfree(checksums); } else { - struct bio_integrity_payload *bip = dio->orig_bi_integrity; + struct bio_integrity_payload *bip = dio->bio_details.bi_integrity; if (bip) { struct bio_vec biv; @@ -1860,7 +1855,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map if (need_sync_io && from_map) { INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->metadata_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); return; } @@ -2000,20 +1995,13 @@ offload_to_thread: } else dio->completion = NULL; - dio->orig_bi_iter = bio->bi_iter; - - dio->orig_bi_disk = bio->bi_disk; - dio->orig_bi_partno = bio->bi_partno; + dm_bio_record(&dio->bio_details, bio); bio_set_dev(bio, ic->dev->bdev); - - dio->orig_bi_integrity = bio_integrity(bio); bio->bi_integrity = NULL; bio->bi_opf &= ~REQ_INTEGRITY; - - dio->orig_bi_end_io = bio->bi_end_io; bio->bi_end_io = integrity_end_io; - bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; + generic_make_request(bio); if (need_sync_io) { @@ -2310,7 +2298,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (READ_ONCE(ic->suspending) && !ic->meta_dev) + if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) return; spin_lock_irq(&ic->endio_wait.lock); @@ -2371,7 +2359,7 @@ static void integrity_recalc(struct work_struct *w) next_chunk: - if (unlikely(READ_ONCE(ic->suspending))) + if (unlikely(dm_suspended(ic->ti))) goto unlock_ret; range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); @@ -2496,7 +2484,7 @@ static void bitmap_block_work(struct work_struct *w) dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { remove_range(ic, &dio->range); INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->wait_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); } else { block_bitmap_op(ic, ic->journal, dio->range.logical_sector, dio->range.n_sectors, BITMAP_OP_SET); @@ -2519,7 +2507,7 @@ static void bitmap_block_work(struct work_struct *w) remove_range(ic, &dio->range); INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->wait_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); } queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); @@ -2799,8 +2787,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti) del_timer_sync(&ic->autocommit_timer); - WRITE_ONCE(ic->suspending, 1); - if (ic->recalc_wq) drain_workqueue(ic->recalc_wq); @@ -2829,8 +2815,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti) #endif } - WRITE_ONCE(ic->suspending, 0); - BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); ic->journal_uptodate = true; @@ -2883,17 +2867,24 @@ static void dm_integrity_resume(struct dm_target *ti) } else { replay_journal(ic); if (ic->mode == 'B') { - int mode; ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); - mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR; - block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode); - block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode); - block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode); + block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); + block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { + block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector), + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); + block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); + block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); + } rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); } @@ -2961,7 +2952,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, DMEMIT(" meta_device:%s", ic->meta_dev->name); if (ic->sectors_per_block != 1) DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); - if (ic->recalculate_flag) + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) DMEMIT(" recalculate"); DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); @@ -3607,6 +3598,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) } ti->private = ic; ti->per_io_data_size = sizeof(struct dm_integrity_io); + ic->ti = ti; ic->in_progress = RB_ROOT; INIT_LIST_HEAD(&ic->wait_list); @@ -3818,6 +3810,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } + ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, + METADATA_WORKQUEUE_MAX_ACTIVE); + if (!ic->offload_wq) { + ti->error = "Cannot allocate workqueue"; + r = -ENOMEM; + goto bad; + } + ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); if (!ic->commit_wq) { ti->error = "Cannot allocate workqueue"; @@ -4122,6 +4122,8 @@ static void dm_integrity_dtr(struct dm_target *ti) destroy_workqueue(ic->metadata_wq); if (ic->wait_wq) destroy_workqueue(ic->wait_wq); + if (ic->offload_wq) + destroy_workqueue(ic->offload_wq); if (ic->commit_wq) destroy_workqueue(ic->commit_wq); if (ic->writer_wq) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index dbcc1e41cd57..e0c32793c248 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -599,45 +599,10 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) return pgpath; } -static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) -{ - struct pgpath *pgpath; - unsigned long flags; - - /* Do we need to select a new pgpath? */ - /* - * FIXME: currently only switching path if no path (due to failure, etc) - * - which negates the point of using a path selector - */ - pgpath = READ_ONCE(m->current_pgpath); - if (!pgpath) - pgpath = choose_pgpath(m, bio->bi_iter.bi_size); - - if (!pgpath) { - if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { - /* Queue for the daemon to resubmit */ - spin_lock_irqsave(&m->lock, flags); - bio_list_add(&m->queued_bios, bio); - spin_unlock_irqrestore(&m->lock, flags); - queue_work(kmultipathd, &m->process_queued_bios); - - return ERR_PTR(-EAGAIN); - } - return NULL; - } - - return pgpath; -} - static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio) { - struct pgpath *pgpath; - - if (!m->hw_handler_name) - pgpath = __map_bio_fast(m, bio); - else - pgpath = __map_bio(m, bio); + struct pgpath *pgpath = __map_bio(m, bio); if (IS_ERR(pgpath)) return DM_MAPIO_SUBMITTED; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3c50c4e4da8f..963d3774c93e 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -17,7 +17,7 @@ #include #define DM_MSG_PREFIX "persistent snapshot" -#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ +#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */ #define DM_PREFETCH_CHUNKS 12 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 4c68a7b93d5e..4cd8868f8004 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -188,6 +188,15 @@ struct dm_pool_metadata { unsigned long flags; sector_t data_block_size; + /* + * Pre-commit callback. + * + * This allows the thin provisioning target to run a callback before + * the metadata are committed. + */ + dm_pool_pre_commit_fn pre_commit_fn; + void *pre_commit_context; + /* * We reserve a section of the metadata for commit overhead. * All reported space does *not* include this. @@ -378,16 +387,15 @@ static int subtree_equal(void *context, const void *value1_le, const void *value * Variant that is used for in-core only changes or code that * shouldn't put the pool in service on its own (e.g. commit). */ -static inline void __pmd_write_lock(struct dm_pool_metadata *pmd) +static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd) __acquires(pmd->root_lock) { down_write(&pmd->root_lock); } -#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd)) static inline void pmd_write_lock(struct dm_pool_metadata *pmd) { - __pmd_write_lock(pmd); + pmd_write_lock_in_core(pmd); if (unlikely(!pmd->in_service)) pmd->in_service = true; } @@ -822,10 +830,19 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) * We need to know if the thin_disk_superblock exceeds a 512-byte sector. */ BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512); + BUG_ON(!rwsem_is_locked(&pmd->root_lock)); if (unlikely(!pmd->in_service)) return 0; + if (pmd->pre_commit_fn) { + r = pmd->pre_commit_fn(pmd->pre_commit_context); + if (r < 0) { + DMERR("pre-commit callback failed"); + return r; + } + } + r = __write_changed_details(pmd); if (r < 0) return r; @@ -892,6 +909,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, pmd->in_service = false; pmd->bdev = bdev; pmd->data_block_size = data_block_size; + pmd->pre_commit_fn = NULL; + pmd->pre_commit_context = NULL; r = __create_persistent_data_objects(pmd, format_device); if (r) { @@ -934,12 +953,14 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) return -EBUSY; } + pmd_write_lock_in_core(pmd); if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { r = __commit_transaction(pmd); if (r < 0) DMWARN("%s: __commit_transaction() failed, error = %d", __func__, r); } + pmd_write_unlock(pmd); if (!pmd->fail_io) __destroy_persistent_data_objects(pmd); @@ -1822,7 +1843,7 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd) * Care is taken to not have commit be what * triggers putting the thin-pool in-service. */ - __pmd_write_lock(pmd); + pmd_write_lock_in_core(pmd); if (pmd->fail_io) goto out; @@ -2044,6 +2065,16 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, return r; } +void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd, + dm_pool_pre_commit_fn fn, + void *context) +{ + pmd_write_lock_in_core(pmd); + pmd->pre_commit_fn = fn; + pmd->pre_commit_context = context; + pmd_write_unlock(pmd); +} + int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) { int r = -EINVAL; diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index f6be0d733c20..7ef56bd2a7e3 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -230,6 +230,13 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); */ void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd); +/* Pre-commit callback */ +typedef int (*dm_pool_pre_commit_fn)(void *context); + +void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd, + dm_pool_pre_commit_fn fn, + void *context); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fcd887703f95..1b2c98b43519 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -231,6 +231,7 @@ struct pool { struct dm_target *ti; /* Only set if a pool target is bound */ struct mapped_device *pool_md; + struct block_device *data_dev; struct block_device *md_dev; struct dm_pool_metadata *pmd; @@ -328,6 +329,7 @@ struct pool_c { dm_block_t low_water_blocks; struct pool_features requested_pf; /* Features requested during table load */ struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ + struct bio flush_bio; }; /* @@ -2392,8 +2394,16 @@ static void process_deferred_bios(struct pool *pool) while ((bio = bio_list_pop(&bio_completions))) bio_endio(bio); - while ((bio = bio_list_pop(&bios))) - generic_make_request(bio); + while ((bio = bio_list_pop(&bios))) { + /* + * The data device was flushed as part of metadata commit, + * so complete redundant flushes immediately. + */ + if (bio->bi_opf & REQ_PREFLUSH) + bio_endio(bio); + else + generic_make_request(bio); + } } static void do_worker(struct work_struct *ws) @@ -2936,6 +2946,7 @@ static struct kmem_cache *_new_mapping_cache; static struct pool *pool_create(struct mapped_device *pool_md, struct block_device *metadata_dev, + struct block_device *data_dev, unsigned long block_size, int read_only, char **error) { @@ -3043,6 +3054,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, pool->last_commit_jiffies = jiffies; pool->pool_md = pool_md; pool->md_dev = metadata_dev; + pool->data_dev = data_dev; __pool_table_insert(pool); return pool; @@ -3084,6 +3096,7 @@ static void __pool_dec(struct pool *pool) static struct pool *__pool_find(struct mapped_device *pool_md, struct block_device *metadata_dev, + struct block_device *data_dev, unsigned long block_size, int read_only, char **error, int *created) { @@ -3094,19 +3107,23 @@ static struct pool *__pool_find(struct mapped_device *pool_md, *error = "metadata device already in use by a pool"; return ERR_PTR(-EBUSY); } + if (pool->data_dev != data_dev) { + *error = "data device already in use by a pool"; + return ERR_PTR(-EBUSY); + } __pool_inc(pool); } else { pool = __pool_table_lookup(pool_md); if (pool) { - if (pool->md_dev != metadata_dev) { + if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) { *error = "different pool cannot replace a pool"; return ERR_PTR(-EINVAL); } __pool_inc(pool); } else { - pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); + pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error); *created = 1; } } @@ -3127,6 +3144,7 @@ static void pool_dtr(struct dm_target *ti) __pool_dec(pt->pool); dm_put_device(ti, pt->metadata_dev); dm_put_device(ti, pt->data_dev); + bio_uninit(&pt->flush_bio); kfree(pt); mutex_unlock(&dm_thin_pool_table.mutex); @@ -3192,6 +3210,29 @@ static void metadata_low_callback(void *context) dm_table_event(pool->ti->table); } +/* + * We need to flush the data device **before** committing the metadata. + * + * This ensures that the data blocks of any newly inserted mappings are + * properly written to non-volatile storage and won't be lost in case of a + * crash. + * + * Failure to do so can result in data corruption in the case of internal or + * external snapshots and in the case of newly provisioned blocks, when block + * zeroing is enabled. + */ +static int metadata_pre_commit_callback(void *context) +{ + struct pool_c *pt = context; + struct bio *flush_bio = &pt->flush_bio; + + bio_reset(flush_bio); + bio_set_dev(flush_bio, pt->data_dev->bdev); + flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + + return submit_bio_wait(flush_bio); +} + static sector_t get_dev_size(struct block_device *bdev) { return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; @@ -3335,7 +3376,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out; } - pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, + pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); if (IS_ERR(pool)) { r = PTR_ERR(pool); @@ -3360,6 +3401,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) pt->data_dev = data_dev; pt->low_water_blocks = low_water_blocks; pt->adjusted_pf = pt->requested_pf = pf; + bio_init(&pt->flush_bio, NULL, 0); ti->num_flush_bios = 1; /* @@ -3549,6 +3591,9 @@ static int pool_preresume(struct dm_target *ti) if (r) return r; + dm_pool_register_pre_commit_callback(pool->pmd, + metadata_pre_commit_callback, pt); + r = maybe_resize_data_dev(ti, &need_commit1); if (r) return r; @@ -4077,7 +4122,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 21, 0}, + .version = {1, 22, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -4456,7 +4501,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type thin_target = { .name = "thin", - .version = {1, 21, 0}, + .version = {1, 22, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index d06b8aa41e26..184dabce1bad 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -442,7 +442,13 @@ static void writecache_notify_io(unsigned long error, void *context) complete(&endio->c); } -static void ssd_commit_flushed(struct dm_writecache *wc) +static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) +{ + wait_event(wc->bio_in_progress_wait[direction], + !atomic_read(&wc->bio_in_progress[direction])); +} + +static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) { struct dm_io_region region; struct dm_io_request req; @@ -488,17 +494,20 @@ static void ssd_commit_flushed(struct dm_writecache *wc) writecache_notify_io(0, &endio); wait_for_completion_io(&endio.c); + if (wait_for_ios) + writecache_wait_for_ios(wc, WRITE); + writecache_disk_flush(wc, wc->ssd_dev); memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size); } -static void writecache_commit_flushed(struct dm_writecache *wc) +static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) { if (WC_MODE_PMEM(wc)) wmb(); else - ssd_commit_flushed(wc); + ssd_commit_flushed(wc, wait_for_ios); } static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) @@ -522,12 +531,6 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) writecache_error(wc, r, "error flushing metadata: %d", r); } -static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) -{ - wait_event(wc->bio_in_progress_wait[direction], - !atomic_read(&wc->bio_in_progress[direction])); -} - #define WFE_RETURN_FOLLOWING 1 #define WFE_LOWEST_SEQ 2 @@ -622,6 +625,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry wc->freelist_size++; } +static inline void writecache_verify_watermark(struct dm_writecache *wc) +{ + if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) + queue_work(wc->writeback_wq, &wc->writeback_work); +} + static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc) { struct wc_entry *e; @@ -643,8 +652,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc) list_del(&e->lru); } wc->freelist_size--; - if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) - queue_work(wc->writeback_wq, &wc->writeback_work); + + writecache_verify_watermark(wc); return e; } @@ -724,15 +733,12 @@ static void writecache_flush(struct dm_writecache *wc) e = e2; cond_resched(); } - writecache_commit_flushed(wc); - - if (!WC_MODE_PMEM(wc)) - writecache_wait_for_ios(wc, WRITE); + writecache_commit_flushed(wc, true); wc->seq_count++; pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count)); writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); wc->overwrote_committed = false; @@ -756,7 +762,7 @@ static void writecache_flush(struct dm_writecache *wc) } if (need_flush_after_free) - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); } static void writecache_flush_work(struct work_struct *work) @@ -809,7 +815,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ } if (discarded_something) - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); } static bool writecache_wait_for_writeback(struct dm_writecache *wc) @@ -838,7 +844,7 @@ static void writecache_suspend(struct dm_target *ti) } wc_unlock(wc); - flush_workqueue(wc->writeback_wq); + drain_workqueue(wc->writeback_wq); wc_lock(wc); if (flush_on_suspend) @@ -958,9 +964,11 @@ erase_this: if (need_flush) { writecache_flush_all_metadata(wc); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); } + writecache_verify_watermark(wc); + wc_unlock(wc); } @@ -1218,7 +1226,8 @@ bio_copy: } } while (bio->bi_iter.bi_size); - if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) + if (unlikely(bio->bi_opf & REQ_FUA || + wc->uncommitted_blocks >= wc->autocommit_blocks)) writecache_flush(wc); else writecache_schedule_autocommit(wc); @@ -1341,7 +1350,7 @@ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head * wc->writeback_size--; n_walked++; if (unlikely(n_walked >= ENDIO_LATENCY)) { - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); wc_unlock(wc); wc_lock(wc); n_walked = 0; @@ -1422,7 +1431,7 @@ pop_from_list: writecache_wait_for_ios(wc, READ); } - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); wc_unlock(wc); } @@ -1765,10 +1774,10 @@ static int init_memory(struct dm_writecache *wc) write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); writecache_flush_all_metadata(wc); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC)); writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); return 0; } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 595a73110e17..5205cf9bbfd9 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -134,6 +134,7 @@ struct dmz_metadata { sector_t zone_bitmap_size; unsigned int zone_nr_bitmap_blocks; + unsigned int zone_bits_per_mblk; unsigned int nr_bitmap_blocks; unsigned int nr_map_blocks; @@ -554,6 +555,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { dmz_release_mblock(zmd, mblk); + dmz_check_bdev(zmd->dev); return ERR_PTR(-EIO); } @@ -625,6 +627,8 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, ret = submit_bio_wait(bio); bio_put(bio); + if (ret) + dmz_check_bdev(zmd->dev); return ret; } @@ -691,6 +695,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { clear_bit(DMZ_META_ERROR, &mblk->state); + dmz_check_bdev(zmd->dev); ret = -EIO; } nr_mblks_submitted--; @@ -768,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) /* If there are no dirty metadata blocks, just flush the device cache */ if (list_empty(&write_list)) { ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); - goto out; + goto err; } /* @@ -778,7 +783,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) */ ret = dmz_log_dirty_mblocks(zmd, &write_list); if (ret) - goto out; + goto err; /* * The log is on disk. It is now safe to update in place @@ -786,11 +791,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) */ ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary); if (ret) - goto out; + goto err; ret = dmz_write_sb(zmd, zmd->mblk_primary); if (ret) - goto out; + goto err; while (!list_empty(&write_list)) { mblk = list_first_entry(&write_list, struct dmz_mblock, link); @@ -805,16 +810,20 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) zmd->sb_gen++; out: - if (ret && !list_empty(&write_list)) { - spin_lock(&zmd->mblk_lock); - list_splice(&write_list, &zmd->mblk_dirty_list); - spin_unlock(&zmd->mblk_lock); - } - dmz_unlock_flush(zmd); up_write(&zmd->mblk_sem); return ret; + +err: + if (!list_empty(&write_list)) { + spin_lock(&zmd->mblk_lock); + list_splice(&write_list, &zmd->mblk_dirty_list); + spin_unlock(&zmd->mblk_lock); + } + if (!dmz_check_bdev(zmd->dev)) + ret = -EIO; + goto out; } /* @@ -1159,7 +1168,10 @@ static int dmz_init_zones(struct dmz_metadata *zmd) /* Init */ zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3; - zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT; + zmd->zone_nr_bitmap_blocks = + max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT); + zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks, + DMZ_BLOCK_SIZE_BITS); /* Allocate zone array */ zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL); @@ -1244,6 +1256,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) if (ret) { dmz_dev_err(zmd->dev, "Get zone %u report failed", dmz_id(zmd, zone)); + dmz_check_bdev(zmd->dev); return ret; } @@ -1982,7 +1995,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, dmz_release_mblock(zmd, to_mblk); dmz_release_mblock(zmd, from_mblk); - chunk_block += DMZ_BLOCK_SIZE_BITS; + chunk_block += zmd->zone_bits_per_mblk; } to_zone->weight = from_zone->weight; @@ -2043,7 +2056,7 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, /* Set bits */ bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits); if (count) { @@ -2122,7 +2135,7 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, /* Clear bits */ bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); count = dmz_clear_bits((unsigned long *)mblk->data, bit, nr_bits); @@ -2182,6 +2195,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, { struct dmz_mblock *mblk; unsigned int bit, set_bit, nr_bits; + unsigned int zone_bits = zmd->zone_bits_per_mblk; unsigned long *bitmap; int n = 0; @@ -2196,15 +2210,15 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, /* Get offset */ bitmap = (unsigned long *) mblk->data; bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zone_bits - bit); if (set) - set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit); + set_bit = find_next_bit(bitmap, zone_bits, bit); else - set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit); + set_bit = find_next_zero_bit(bitmap, zone_bits, bit); dmz_release_mblock(zmd, mblk); n += set_bit - bit; - if (set_bit < DMZ_BLOCK_SIZE_BITS) + if (set_bit < zone_bits) break; nr_blocks -= nr_bits; @@ -2307,7 +2321,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) /* Count bits in this block */ bitmap = mblk->data; bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); n += dmz_count_bits(bitmap, bit, nr_bits); dmz_release_mblock(zmd, mblk); diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index d240d7ca8a8a..e7ace908a9b7 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -82,6 +82,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone, "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", dmz_id(zmd, zone), (unsigned long long)wp_block, (unsigned long long)block, nr_blocks, ret); + dmz_check_bdev(zrc->dev); return ret; } @@ -489,12 +490,7 @@ static void dmz_reclaim_work(struct work_struct *work) ret = dmz_do_reclaim(zrc); if (ret) { dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret); - if (ret == -EIO) - /* - * LLD might be performing some error handling sequence - * at the underlying device. To not interfere, do not - * attempt to schedule the next reclaim run immediately. - */ + if (!dmz_check_bdev(zrc->dev)) return; } diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index d3bcc4197f5d..03267609b515 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -80,6 +80,8 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) bio->bi_status = status; + if (bio->bi_status != BLK_STS_OK) + bioctx->target->dev->flags |= DMZ_CHECK_BDEV; if (refcount_dec_and_test(&bioctx->ref)) { struct dm_zone *zone = bioctx->zone; @@ -531,8 +533,9 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) /* Get the BIO chunk work. If one is not active yet, create one */ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); - if (!cw) { - + if (cw) { + dmz_get_chunk_work(cw); + } else { /* Create a new chunk work */ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); if (unlikely(!cw)) { @@ -541,7 +544,7 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) } INIT_WORK(&cw->work, dmz_chunk_work); - refcount_set(&cw->refcount, 0); + refcount_set(&cw->refcount, 1); cw->target = dmz; cw->chunk = chunk; bio_list_init(&cw->bio_list); @@ -554,7 +557,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) } bio_list_add(&cw->bio_list, bio); - dmz_get_chunk_work(cw); dmz_reclaim_bio_acc(dmz->reclaim); if (queue_work(dmz->chunk_wq, &cw->work)) @@ -565,31 +567,51 @@ out: } /* - * Check the backing device availability. If it's on the way out, + * Check if the backing device is being removed. If it's on the way out, * start failing I/O. Reclaim and metadata components also call this * function to cleanly abort operation in the event of such failure. */ bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev) { - struct gendisk *disk; + if (dmz_dev->flags & DMZ_BDEV_DYING) + return true; - if (!(dmz_dev->flags & DMZ_BDEV_DYING)) { - disk = dmz_dev->bdev->bd_disk; - if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { - dmz_dev_warn(dmz_dev, "Backing device queue dying"); - dmz_dev->flags |= DMZ_BDEV_DYING; - } else if (disk->fops->check_events) { - if (disk->fops->check_events(disk, 0) & - DISK_EVENT_MEDIA_CHANGE) { - dmz_dev_warn(dmz_dev, "Backing device offline"); - dmz_dev->flags |= DMZ_BDEV_DYING; - } - } + if (dmz_dev->flags & DMZ_CHECK_BDEV) + return !dmz_check_bdev(dmz_dev); + + if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { + dmz_dev_warn(dmz_dev, "Backing device queue dying"); + dmz_dev->flags |= DMZ_BDEV_DYING; } return dmz_dev->flags & DMZ_BDEV_DYING; } +/* + * Check the backing device availability. This detects such events as + * backing device going offline due to errors, media removals, etc. + * This check is less efficient than dmz_bdev_is_dying() and should + * only be performed as a part of error handling. + */ +bool dmz_check_bdev(struct dmz_dev *dmz_dev) +{ + struct gendisk *disk; + + dmz_dev->flags &= ~DMZ_CHECK_BDEV; + + if (dmz_bdev_is_dying(dmz_dev)) + return false; + + disk = dmz_dev->bdev->bd_disk; + if (disk->fops->check_events && + disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) { + dmz_dev_warn(dmz_dev, "Backing device offline"); + dmz_dev->flags |= DMZ_BDEV_DYING; + } + + return !(dmz_dev->flags & DMZ_BDEV_DYING); +} + /* * Process a new BIO. */ @@ -902,8 +924,8 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) { struct dmz_target *dmz = ti->private; - if (dmz_bdev_is_dying(dmz->dev)) - return -ENODEV; + if (!dmz_check_bdev(dmz->dev)) + return -EIO; *bdev = dmz->dev->bdev; diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h index d8e70b0ade35..5b5e493d479c 100644 --- a/drivers/md/dm-zoned.h +++ b/drivers/md/dm-zoned.h @@ -72,6 +72,7 @@ struct dmz_dev { /* Device flags. */ #define DMZ_BDEV_DYING (1 << 0) +#define DMZ_CHECK_BDEV (2 << 0) /* * Zone descriptor. @@ -255,5 +256,6 @@ void dmz_schedule_reclaim(struct dmz_reclaim *zrc); * Functions defined in dm-zoned-target.c */ bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev); +bool dmz_check_bdev(struct dmz_dev *dmz_dev); #endif /* DM_ZONED_H */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1a5e328c443a..ed612dbc972d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -25,6 +25,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "core" @@ -668,11 +669,18 @@ static void end_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->orig_bio; + struct blkcg *blkcg = bio_blkcg(bio); unsigned long duration = jiffies - io->start_time; + int rw = bio_data_dir(bio), cpu; generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, io->start_time); + cpu = part_stat_lock(); + blkcg_part_stat_add(blkcg, cpu, &dm_disk(md)->part0, nsecs[rw], + jiffies_to_nsecs(duration)); + part_stat_unlock(); + if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), @@ -1760,8 +1768,9 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, * won't be imposed. */ if (current->bio_list) { - blk_queue_split(md->queue, &bio); - if (!is_abnormal_io(bio)) + if (is_abnormal_io(bio)) + blk_queue_split(md->queue, &bio); + else dm_queue_split(md, ti, &bio); } @@ -1775,11 +1784,19 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) { struct mapped_device *md = q->queuedata; blk_qc_t ret = BLK_QC_T_NONE; + struct blkcg *blkcg = bio_blkcg(bio); int srcu_idx; struct dm_table *map; + int rw = bio_data_dir(bio), cpu; map = dm_get_live_table(md, &srcu_idx); + cpu = part_stat_lock(); + blkcg_part_stat_inc(blkcg, cpu, &dm_disk(md)->part0, ios[rw]); + blkcg_part_stat_add(blkcg, cpu, &dm_disk(md)->part0, sectors[rw], + bio_sectors(bio)); + part_stat_unlock(); + /* if we're suspended, we have to queue this io for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { dm_put_live_table(md, srcu_idx); @@ -1809,7 +1826,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits) * With request-based DM we only need to check the * top-level queue for congestion. */ - r = md->queue->backing_dev_info->wb.state & bdi_bits; + struct backing_dev_info *bdi = md->queue->backing_dev_info; + r = bdi->wb.congested->state & bdi_bits; } else { map = dm_get_live_table_fast(md); if (map) @@ -1875,14 +1893,6 @@ static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); -static void dm_init_normal_md_queue(struct mapped_device *md) -{ - /* - * Initialize aspects of queue that aren't relevant for blk-mq - */ - md->queue->backing_dev_info->congested_fn = dm_any_congested; -} - static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) @@ -1970,7 +1980,12 @@ static struct mapped_device *alloc_dev(int minor) if (!md->queue) goto bad; md->queue->queuedata = md; - md->queue->backing_dev_info->congested_data = md; + /* + * default to bio-based required ->make_request_fn until DM + * table is loaded and md->type established. If request-based + * table is loaded: blk-mq will override accordingly. + */ + blk_queue_make_request(md->queue, dm_make_request); md->disk = alloc_disk_node(1, md->numa_node_id); if (!md->disk) @@ -2264,6 +2279,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_get_queue_limits); +static void dm_init_congested_fn(struct mapped_device *md) +{ + md->queue->backing_dev_info->congested_data = md; + md->queue->backing_dev_info->congested_fn = dm_any_congested; +} + /* * Setup the DM device's queue based on md's type */ @@ -2280,12 +2301,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) DMERR("Cannot initialize queue for request-based dm-mq mapped device"); return r; } + dm_init_congested_fn(md); break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_NVME_BIO_BASED: - dm_init_normal_md_queue(md); - blk_queue_make_request(md->queue, dm_make_request); + dm_init_congested_fn(md); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2384,6 +2405,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); + set_bit(DMF_SUSPENDED, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index b092c7b5282f..3ad18246fcb3 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2139,6 +2139,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), sizeof(bitmap_super_t)); + spin_lock_irq(&bitmap->counts.lock); md_bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; @@ -2154,7 +2155,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, blocks = min(old_counts.chunks << old_counts.chunkshift, chunks << chunkshift); - spin_lock_irq(&bitmap->counts.lock); /* For cluster raid, need to pre-allocate bitmap */ if (mddev_is_clustered(bitmap->mddev)) { unsigned long page; diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index c766c559d36d..26c75c0199fa 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -244,10 +244,9 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) sector_t start_sector, end_sector, data_offset; sector_t bio_sector = bio->bi_iter.bi_sector; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } tmp_dev = which_dev(mddev, bio_sector); start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 6780938d2991..152f9e65a226 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -104,10 +104,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) struct multipath_bh * mp_bh; struct multipath_info *multipath; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } mp_bh = mempool_alloc(&conf->pool, GFP_NOIO); diff --git a/drivers/md/md.c b/drivers/md/md.c index 1be7abeb24fd..dc32437ebb56 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include "md.h" @@ -374,7 +375,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) const int rw = bio_data_dir(bio); const int sgrp = op_stat_group(bio_op(bio)); struct mddev *mddev = q->queuedata; - unsigned int sectors; + struct blkcg *blkcg = bio_blkcg(bio); + unsigned int sectors, cpu; if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { bio_io_error(bio); @@ -404,9 +406,12 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) md_handle_request(mddev, bio); - part_stat_lock(); + cpu = part_stat_lock(); part_stat_inc(&mddev->gendisk->part0, ios[sgrp]); part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); + blkcg_part_stat_inc(blkcg, cpu, &mddev->gendisk->part0, ios[rw]); + blkcg_part_stat_add(blkcg, cpu, &mddev->gendisk->part0, sectors[rw], + sectors); part_stat_unlock(); return BLK_QC_T_NONE; @@ -550,7 +555,13 @@ static void md_submit_flush_data(struct work_struct *ws) } } -void md_flush_request(struct mddev *mddev, struct bio *bio) +/* + * Manages consolidation of flushes and submitting any flushes needed for + * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is + * being finished in another context. Returns false if the flushing is + * complete but still needs the I/O portion of the bio to be processed. + */ +bool md_flush_request(struct mddev *mddev, struct bio *bio) { ktime_t start = ktime_get_boottime(); spin_lock_irq(&mddev->lock); @@ -575,9 +586,10 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) bio_endio(bio); else { bio->bi_opf &= ~REQ_PREFLUSH; - mddev->pers->make_request(mddev, bio); + return false; } } + return true; } EXPORT_SYMBOL(md_flush_request); @@ -1098,6 +1110,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; mdp_super_t *sb; int ret; + bool spare_disk = true; /* * Calculate the position of the superblock (512byte sectors), @@ -1148,8 +1161,19 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor else rdev->desc_nr = sb->this_disk.number; + /* not spare disk, or LEVEL_MULTIPATH */ + if (sb->level == LEVEL_MULTIPATH || + (rdev->desc_nr >= 0 && + rdev->desc_nr < MD_SB_DISKS && + sb->disks[rdev->desc_nr].state & + ((1<sb_page); @@ -1165,7 +1189,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor } ev1 = md_event(sb); ev2 = md_event(refsb); - if (ev1 > ev2) + + if (!spare_disk && ev1 > ev2) ret = 1; else ret = 0; @@ -1525,6 +1550,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ sector_t sectors; char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; int bmask; + bool spare_disk = true; /* * Calculate the position of the superblock in 512byte sectors. @@ -1658,8 +1684,19 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ sb->level != 0) return -EINVAL; + /* not spare disk, or LEVEL_MULTIPATH */ + if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) || + (rdev->desc_nr >= 0 && + rdev->desc_nr < le32_to_cpu(sb->max_dev) && + (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || + le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))) + spare_disk = false; + if (!refdev) { - ret = 1; + if (!spare_disk) + ret = 1; + else + ret = 0; } else { __u64 ev1, ev2; struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); @@ -1676,7 +1713,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ ev1 = le64_to_cpu(sb->events); ev2 = le64_to_cpu(refsb->events); - if (ev1 > ev2) + if (!spare_disk && ev1 > ev2) ret = 1; else ret = 0; @@ -3597,7 +3634,7 @@ abort_free: * Check a full RAID array for plausibility */ -static void analyze_sbs(struct mddev *mddev) +static int analyze_sbs(struct mddev *mddev) { int i; struct md_rdev *rdev, *freshest, *tmp; @@ -3618,6 +3655,12 @@ static void analyze_sbs(struct mddev *mddev) md_kick_rdev_from_array(rdev); } + /* Cannot find a valid fresh disk */ + if (!freshest) { + pr_warn("md: cannot find a valid disk\n"); + return -EINVAL; + } + super_types[mddev->major_version]. validate_super(mddev, freshest); @@ -3652,6 +3695,8 @@ static void analyze_sbs(struct mddev *mddev) clear_bit(In_sync, &rdev->flags); } } + + return 0; } /* Read a fixed-point number. @@ -5570,7 +5615,9 @@ int md_run(struct mddev *mddev) if (!mddev->raid_disks) { if (!mddev->persistent) return -EINVAL; - analyze_sbs(mddev); + err = analyze_sbs(mddev); + if (err) + return -EINVAL; } if (mddev->level != LEVEL_NONE) diff --git a/drivers/md/md.h b/drivers/md/md.h index c5e3ff398b59..5f86f8adb0a4 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -550,7 +550,7 @@ struct md_personality int level; struct list_head list; struct module *owner; - bool (*make_request)(struct mddev *mddev, struct bio *bio); + bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio); /* * start up works that do NOT require md_thread. tasks that * requires md_thread should go into start() @@ -703,7 +703,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); extern int mddev_congested(struct mddev *mddev, int bits); -extern void md_flush_request(struct mddev *mddev, struct bio *bio); +extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); extern int md_super_wait(struct mddev *mddev); diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 21ea537bd55e..eff04fa23dfa 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, struct btree_node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); - unsigned threshold = 2 * merge_threshold(left) + 1; + /* + * Ensure the number of entries in each child will be greater + * than or equal to (max_entries / 3 + 1), so no matter which + * child is used for removal, the number will still be not + * less than (max_entries / 3). + */ + unsigned int threshold = 2 * (merge_threshold(left) + 1); if (nr_left + nr_right < threshold) { /* diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index bd68f6fef694..d8b4125e338c 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -380,6 +380,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, return -ENOSPC; } +int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, + dm_block_t begin, dm_block_t end, dm_block_t *b) +{ + int r; + uint32_t count; + + do { + r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b); + if (r) + break; + + /* double check this block wasn't used in the old transaction */ + if (*b >= old_ll->nr_blocks) + count = 0; + else { + r = sm_ll_lookup(old_ll, *b, &count); + if (r) + break; + + if (count) + begin = *b + 1; + } + } while (count); + + return r; +} + static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, int (*mutator)(void *context, uint32_t old, uint32_t *new), void *context, enum allocation_event *ev) diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h index b3078d5eda0c..8de63ce39bdd 100644 --- a/drivers/md/persistent-data/dm-space-map-common.h +++ b/drivers/md/persistent-data/dm-space-map-common.h @@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result); int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result); int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, dm_block_t end, dm_block_t *result); +int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, + dm_block_t begin, dm_block_t end, dm_block_t *result); int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev); int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index 32adf6b4a9c7..bf4c5e2ccb6f 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) enum allocation_event ev; struct sm_disk *smd = container_of(sm, struct sm_disk, sm); - /* FIXME: we should loop round a couple of times */ - r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b); + /* + * Any block we allocate has to be free in both the old and current ll. + */ + r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); if (r) return r; diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 25328582cc48..9e3c64ec2026 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -448,7 +448,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) enum allocation_event ev; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); - r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b); + /* + * Any block we allocate has to be free in both the old and current ll. + */ + r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); if (r) return r; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 1e772287b1c8..322386ff5d22 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) char b[BDEVNAME_SIZE]; char b2[BDEVNAME_SIZE]; struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); - unsigned short blksize = 512; + unsigned blksize = 512; *private_conf = ERR_PTR(-ENOMEM); if (!conf) @@ -575,10 +575,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) unsigned chunk_sects; unsigned sectors; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { raid0_handle_discard(mddev, bio); @@ -615,7 +614,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) tmp_dev = map_sector(mddev, zone, sector, §or); break; default: - WARN("md/raid0:%s: Invalid layout\n", mdname(mddev)); + WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev)); bio_io_error(bio); return true; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0466ee2453b4..c7137f50bd1d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1567,10 +1567,9 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio) { sector_t sectors; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } /* * There is a limit to the maximum size, but @@ -2782,7 +2781,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, write_targets++; } } - if (bio->bi_end_io) { + if (rdev && bio->bi_end_io) { atomic_inc(&rdev->nr_pending); bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio_set_dev(bio, rdev->bdev); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 299c7b1c9718..ec136e44aef7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: while (--j >= 0) - resync_free_pages(&rps[j * 2]); + resync_free_pages(&rps[j]); j = 0; out_free_bio: @@ -1525,10 +1525,9 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) int chunk_sects = chunk_mask + 1; int sectors = bio_sectors(bio); - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } if (!md_write_start(mddev, bio)) return false; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 223e97ab27e6..36cd7c2fbf40 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5592,8 +5592,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) if (ret == 0) return true; if (ret == -ENODEV) { - md_flush_request(mddev, bi); - return true; + if (md_flush_request(mddev, bi)) + return true; } /* ret == -EAGAIN, fallback */ /* @@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) do_flush = false; } - if (!sh->batch_head) + if (!sh->batch_head || sh == sh->batch_head) set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); if ((!sh->batch_head || sh == sh->batch_head) && diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 5ef7daeb8cbd..b14c09cd9593 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -378,7 +378,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status) } else { list_del_init(&data->list); if (!(data->msg.tx_status & CEC_TX_STATUS_OK)) - data->adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + data->adap->transmit_queue_sz--; } if (data->msg.tx_status & CEC_TX_STATUS_OK) { @@ -430,6 +431,14 @@ static void cec_flush(struct cec_adapter *adap) * need to do anything special in that case. */ } + /* + * If something went wrong and this counter isn't what it should + * be, then this will reset it back to 0. Warn if it is not 0, + * since it indicates a bug, either in this framework or in a + * CEC driver. + */ + if (WARN_ON(adap->transmit_queue_sz)) + adap->transmit_queue_sz = 0; } /* @@ -454,7 +463,7 @@ int cec_thread_func(void *_adap) bool timeout = false; u8 attempts; - if (adap->transmitting) { + if (adap->transmit_in_progress) { int err; /* @@ -489,7 +498,7 @@ int cec_thread_func(void *_adap) goto unlock; } - if (adap->transmitting && timeout) { + if (adap->transmit_in_progress && timeout) { /* * If we timeout, then log that. Normally this does * not happen and it is an indication of a faulty CEC @@ -498,14 +507,18 @@ int cec_thread_func(void *_adap) * so much traffic on the bus that the adapter was * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). */ - pr_warn("cec-%s: message %*ph timed out\n", adap->name, - adap->transmitting->msg.len, - adap->transmitting->msg.msg); + if (adap->transmitting) { + pr_warn("cec-%s: message %*ph timed out\n", adap->name, + adap->transmitting->msg.len, + adap->transmitting->msg.msg); + /* Just give up on this. */ + cec_data_cancel(adap->transmitting, + CEC_TX_STATUS_TIMEOUT); + } else { + pr_warn("cec-%s: transmit timed out\n", adap->name); + } adap->transmit_in_progress = false; adap->tx_timeouts++; - /* Just give up on this. */ - cec_data_cancel(adap->transmitting, - CEC_TX_STATUS_TIMEOUT); goto unlock; } @@ -520,7 +533,8 @@ int cec_thread_func(void *_adap) data = list_first_entry(&adap->transmit_queue, struct cec_data, list); list_del_init(&data->list); - adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + adap->transmit_queue_sz--; /* Make this the current transmitting message */ adap->transmitting = data; @@ -1083,11 +1097,11 @@ void cec_received_msg_ts(struct cec_adapter *adap, valid_la = false; else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED)) valid_la = false; - else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4)) + else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST)) valid_la = false; else if (cec_msg_is_broadcast(msg) && - adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 && - !(dir_fl & BCAST2_0)) + adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 && + !(dir_fl & BCAST1_4)) valid_la = false; } if (valid_la && min_len) { diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 7eee1812bba3..fcffcc31d168 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -1113,6 +1113,7 @@ comment "SDR tuner chips" config SDR_MAX2175 tristate "Maxim 2175 RF to Bits tuner" depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C + select REGMAP_I2C help Support for Maxim 2175 tuner. It is an advanced analog/digital radio receiver with RF-to-Bits front-end designed for SDR solutions. diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c index 925c171e7797..7a49651f4d1f 100644 --- a/drivers/media/i2c/ad5820.c +++ b/drivers/media/i2c/ad5820.c @@ -309,6 +309,7 @@ static int ad5820_probe(struct i2c_client *client, v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops); coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; coil->subdev.internal_ops = &ad5820_internal_ops; + coil->subdev.entity.function = MEDIA_ENT_F_LENS; strscpy(coil->subdev.name, "ad5820 focus", sizeof(coil->subdev.name)); ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL); diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h index 5042f9e94aee..fccb388ce179 100644 --- a/drivers/media/i2c/adv748x/adv748x.h +++ b/drivers/media/i2c/adv748x/adv748x.h @@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page, #define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r) #define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v) -#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v) +#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v)) #define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r) -#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m) +#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m)) #define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v) #define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r) @@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page, #define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r) #define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v) -#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v) +#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v)) #define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r) #define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v) -#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v) +#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v)) #define tx_read(t, r) adv748x_read(t->state, t->page, r) #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v) diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c index 4b9b98cf6674..5bd3ae82992f 100644 --- a/drivers/media/i2c/mt9v032.c +++ b/drivers/media/i2c/mt9v032.c @@ -428,10 +428,12 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) { + struct mt9v032 *mt9v032 = to_mt9v032(subdev); + if (code->index > 0) return -EINVAL; - code->code = MEDIA_BUS_FMT_SGRBG10_1X10; + code->code = mt9v032->format.code; return 0; } @@ -439,7 +441,11 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_frame_size_enum *fse) { - if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) + struct mt9v032 *mt9v032 = to_mt9v032(subdev); + + if (fse->index >= 3) + return -EINVAL; + if (mt9v032->format.code != fse->code) return -EINVAL; fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index); diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c index f4ded0669ff9..e1ff38009cf0 100644 --- a/drivers/media/i2c/ov2659.c +++ b/drivers/media/i2c/ov2659.c @@ -419,10 +419,14 @@ static struct sensor_register ov2659_720p[] = { { REG_TIMING_YINC, 0x11 }, { REG_TIMING_VERT_FORMAT, 0x80 }, { REG_TIMING_HORIZ_FORMAT, 0x00 }, + { 0x370a, 0x12 }, { 0x3a03, 0xe8 }, { 0x3a09, 0x6f }, { 0x3a0b, 0x5d }, { 0x3a15, 0x9a }, + { REG_VFIFO_READ_START_H, 0x00 }, + { REG_VFIFO_READ_START_L, 0x80 }, + { REG_ISP_CTRL02, 0x00 }, { REG_NULL, 0x00 }, }; @@ -1201,11 +1205,15 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on) goto unlock; } - ov2659_set_pixel_clock(ov2659); - ov2659_set_frame_size(ov2659); - ov2659_set_format(ov2659); - ov2659_set_streaming(ov2659, 1); - ov2659->streaming = on; + ret = ov2659_set_pixel_clock(ov2659); + if (!ret) + ret = ov2659_set_frame_size(ov2659); + if (!ret) + ret = ov2659_set_format(ov2659); + if (!ret) { + ov2659_set_streaming(ov2659, 1); + ov2659->streaming = on; + } unlock: mutex_unlock(&ov2659->lock); diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 500d9bbff10b..a398ea81e422 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -874,7 +874,7 @@ static unsigned long ov5640_calc_sys_clk(struct ov5640_dev *sensor, * We have reached the maximum allowed PLL1 output, * increase sysdiv. */ - if (!rate) + if (!_rate) break; /* @@ -1611,6 +1611,11 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr, !(mode->hact == 640 && mode->vact == 480)) return NULL; + /* 2592x1944 only works at 15fps max */ + if ((mode->hact == 2592 && mode->vact == 1944) && + fr > OV5640_15_FPS) + return NULL; + return mode; } diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c index 5b9af5e5b7f1..af482620f94a 100644 --- a/drivers/media/i2c/ov6650.c +++ b/drivers/media/i2c/ov6650.c @@ -130,6 +130,7 @@ #define CLKRC_24MHz 0xc0 #define CLKRC_DIV_MASK 0x3f #define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1) +#define DEF_CLKRC 0x00 #define COMA_RESET BIT(7) #define COMA_QCIF BIT(5) @@ -200,7 +201,6 @@ struct ov6650 { unsigned long pclk_max; /* from resolution and format */ struct v4l2_fract tpf; /* as requested with s_frame_interval */ u32 code; - enum v4l2_colorspace colorspace; }; @@ -213,6 +213,17 @@ static u32 ov6650_codes[] = { MEDIA_BUS_FMT_Y8_1X8, }; +static const struct v4l2_mbus_framefmt ov6650_def_fmt = { + .width = W_CIF, + .height = H_CIF, + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .colorspace = V4L2_COLORSPACE_SRGB, + .field = V4L2_FIELD_NONE, + .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT, + .quantization = V4L2_QUANTIZATION_DEFAULT, + .xfer_func = V4L2_XFER_FUNC_DEFAULT, +}; + /* read a register */ static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val) { @@ -465,38 +476,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd, { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); - struct v4l2_rect rect = sel->r; int ret; if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; - v4l_bound_align_image(&rect.width, 2, W_CIF, 1, - &rect.height, 2, H_CIF, 1, 0); - v4l_bound_align_image(&rect.left, DEF_HSTRT << 1, - (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1, - &rect.top, DEF_VSTRT << 1, - (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1, - 0); + v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1, + &sel->r.height, 2, H_CIF, 1, 0); + v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1, + (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1, + &sel->r.top, DEF_VSTRT << 1, + (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height, + 1, 0); - ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1); + ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1); if (!ret) { - priv->rect.left = rect.left; + priv->rect.width += priv->rect.left - sel->r.left; + priv->rect.left = sel->r.left; ret = ov6650_reg_write(client, REG_HSTOP, - (rect.left + rect.width) >> 1); + (sel->r.left + sel->r.width) >> 1); } if (!ret) { - priv->rect.width = rect.width; - ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1); + priv->rect.width = sel->r.width; + ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1); } if (!ret) { - priv->rect.top = rect.top; + priv->rect.height += priv->rect.top - sel->r.top; + priv->rect.top = sel->r.top; ret = ov6650_reg_write(client, REG_VSTOP, - (rect.top + rect.height) >> 1); + (sel->r.top + sel->r.height) >> 1); } if (!ret) - priv->rect.height = rect.height; + priv->rect.height = sel->r.height; return ret; } @@ -512,12 +524,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd, if (format->pad) return -EINVAL; - mf->width = priv->rect.width >> priv->half_scale; - mf->height = priv->rect.height >> priv->half_scale; - mf->code = priv->code; - mf->colorspace = priv->colorspace; - mf->field = V4L2_FIELD_NONE; + /* initialize response with default media bus frame format */ + *mf = ov6650_def_fmt; + /* update media bus format code and frame size */ + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { + mf->width = cfg->try_fmt.width; + mf->height = cfg->try_fmt.height; + mf->code = cfg->try_fmt.code; + + } else { + mf->width = priv->rect.width >> priv->half_scale; + mf->height = priv->rect.height >> priv->half_scale; + mf->code = priv->code; + } return 0; } @@ -610,7 +630,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code); return -EINVAL; } - priv->code = code; if (code == MEDIA_BUS_FMT_Y8_1X8 || code == MEDIA_BUS_FMT_SBGGR8_1X8) { @@ -623,11 +642,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) priv->pclk_max = 8000000; } - if (code == MEDIA_BUS_FMT_SBGGR8_1X8) - priv->colorspace = V4L2_COLORSPACE_SRGB; - else if (code != 0) - priv->colorspace = V4L2_COLORSPACE_JPEG; - if (half_scale) { dev_dbg(&client->dev, "max resolution: QCIF\n"); coma_set |= COMA_QCIF; @@ -636,7 +650,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) dev_dbg(&client->dev, "max resolution: CIF\n"); coma_mask |= COMA_QCIF; } - priv->half_scale = half_scale; clkrc = CLKRC_12MHz; mclk = 12000000; @@ -654,14 +667,14 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask); if (!ret) ret = ov6650_reg_write(client, REG_CLKRC, clkrc); - if (!ret) - ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask); - if (!ret) { - mf->colorspace = priv->colorspace; - mf->width = priv->rect.width >> half_scale; - mf->height = priv->rect.height >> half_scale; + priv->half_scale = half_scale; + + ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask); } + if (!ret) + priv->code = code; + return ret; } @@ -680,8 +693,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd, v4l_bound_align_image(&mf->width, 2, W_CIF, 1, &mf->height, 2, H_CIF, 1, 0); - mf->field = V4L2_FIELD_NONE; - switch (mf->code) { case MEDIA_BUS_FMT_Y10_1X10: mf->code = MEDIA_BUS_FMT_Y8_1X8; @@ -691,20 +702,39 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd, case MEDIA_BUS_FMT_YUYV8_2X8: case MEDIA_BUS_FMT_VYUY8_2X8: case MEDIA_BUS_FMT_UYVY8_2X8: - mf->colorspace = V4L2_COLORSPACE_JPEG; break; default: mf->code = MEDIA_BUS_FMT_SBGGR8_1X8; /* fall through */ case MEDIA_BUS_FMT_SBGGR8_1X8: - mf->colorspace = V4L2_COLORSPACE_SRGB; break; } - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - return ov6650_s_fmt(sd, mf); - cfg->try_fmt = *mf; + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { + /* store media bus format code and frame size in pad config */ + cfg->try_fmt.width = mf->width; + cfg->try_fmt.height = mf->height; + cfg->try_fmt.code = mf->code; + /* return default mbus frame format updated with pad config */ + *mf = ov6650_def_fmt; + mf->width = cfg->try_fmt.width; + mf->height = cfg->try_fmt.height; + mf->code = cfg->try_fmt.code; + + } else { + /* apply new media bus format code and frame size */ + int ret = ov6650_s_fmt(sd, mf); + + if (ret) + return ret; + + /* return default format updated with active size and code */ + *mf = ov6650_def_fmt; + mf->width = priv->rect.width >> priv->half_scale; + mf->height = priv->rect.height >> priv->half_scale; + mf->code = priv->code; + } return 0; } @@ -754,19 +784,17 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd, else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK)) div = GET_CLKRC_DIV(CLKRC_DIV_MASK); - /* - * Keep result to be used as tpf limit - * for subsequent clock divider calculations - */ - priv->tpf.numerator = div; - priv->tpf.denominator = FRAME_RATE_MAX; + tpf->numerator = div; + tpf->denominator = FRAME_RATE_MAX; - clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max); + clkrc = to_clkrc(tpf, priv->pclk_limit, priv->pclk_max); ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK); if (!ret) { - tpf->numerator = GET_CLKRC_DIV(clkrc); - tpf->denominator = FRAME_RATE_MAX; + priv->tpf.numerator = GET_CLKRC_DIV(clkrc); + priv->tpf.denominator = FRAME_RATE_MAX; + + *tpf = priv->tpf; } return ret; @@ -849,6 +877,11 @@ static int ov6650_video_probe(struct v4l2_subdev *sd) ret = ov6650_reset(client); if (!ret) ret = ov6650_prog_dflt(client); + if (!ret) { + struct v4l2_mbus_framefmt mf = ov6650_def_fmt; + + ret = ov6650_s_fmt(sd, &mf); + } if (!ret) ret = v4l2_ctrl_handler_setup(&priv->hdl); @@ -989,8 +1022,10 @@ static int ov6650_probe(struct i2c_client *client, V4L2_CID_GAMMA, 0, 0xff, 1, 0x12); priv->subdev.ctrl_handler = &priv->hdl; - if (priv->hdl.error) - return priv->hdl.error; + if (priv->hdl.error) { + ret = priv->hdl.error; + goto ectlhdlfree; + } v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true); v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true); @@ -1001,15 +1036,18 @@ static int ov6650_probe(struct i2c_client *client, priv->rect.top = DEF_VSTRT << 1; priv->rect.width = W_CIF; priv->rect.height = H_CIF; - priv->half_scale = false; - priv->code = MEDIA_BUS_FMT_YUYV8_2X8; - priv->colorspace = V4L2_COLORSPACE_JPEG; + + /* Hardware default frame interval */ + priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC); + priv->tpf.denominator = FRAME_RATE_MAX; priv->subdev.internal_ops = &ov6650_internal_ops; ret = v4l2_async_register_subdev(&priv->subdev); - if (ret) - v4l2_ctrl_handler_free(&priv->hdl); + if (!ret) + return 0; +ectlhdlfree: + v4l2_ctrl_handler_free(&priv->hdl); return ret; } diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 9adf8e034e7d..42805dfbffeb 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -3101,19 +3101,23 @@ static int smiapp_probe(struct i2c_client *client) if (rval < 0) goto out_media_entity_cleanup; - rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd); - if (rval < 0) - goto out_media_entity_cleanup; - pm_runtime_set_active(&client->dev); pm_runtime_get_noresume(&client->dev); pm_runtime_enable(&client->dev); + + rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd); + if (rval < 0) + goto out_disable_runtime_pm; + pm_runtime_set_autosuspend_delay(&client->dev, 1000); pm_runtime_use_autosuspend(&client->dev); pm_runtime_put_autosuspend(&client->dev); return 0; +out_disable_runtime_pm: + pm_runtime_disable(&client->dev); + out_media_entity_cleanup: media_entity_cleanup(&sensor->src->sd.entity); diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c index 81285b8d5cfb..003ba22334cd 100644 --- a/drivers/media/i2c/st-mipid02.c +++ b/drivers/media/i2c/st-mipid02.c @@ -971,6 +971,11 @@ static int mipid02_probe(struct i2c_client *client) bridge->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(bridge->reset_gpio)) { + dev_err(dev, "failed to get reset GPIO\n"); + return PTR_ERR(bridge->reset_gpio); + } + ret = mipid02_get_regulators(bridge); if (ret) { dev_err(dev, "failed to get regulators %d", ret); diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index 7c429ce98bae..668770e9f609 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -639,9 +639,9 @@ int media_get_pad_index(struct media_entity *entity, bool is_sink, return -EINVAL; for (i = 0; i < entity->num_pads; i++) { - if (entity->pads[i].flags == MEDIA_PAD_FL_SINK) + if (entity->pads[i].flags & MEDIA_PAD_FL_SINK) pad_is_sink = true; - else if (entity->pads[i].flags == MEDIA_PAD_FL_SOURCE) + else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE) pad_is_sink = false; else continue; /* This is an error! */ diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index 8644205d3cd3..8e5a2c580821 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -801,6 +801,25 @@ struct cx23885_board cx23885_boards[] = { .name = "Hauppauge WinTV-Starburst2", .portb = CX23885_MPEG_DVB, }, + [CX23885_BOARD_AVERMEDIA_CE310B] = { + .name = "AVerMedia CE310B", + .porta = CX23885_ANALOG_VIDEO, + .force_bff = 1, + .input = {{ + .type = CX23885_VMUX_COMPOSITE1, + .vmux = CX25840_VIN1_CH1 | + CX25840_NONE_CH2 | + CX25840_NONE0_CH3, + .amux = CX25840_AUDIO7, + }, { + .type = CX23885_VMUX_SVIDEO, + .vmux = CX25840_VIN8_CH1 | + CX25840_NONE_CH2 | + CX25840_VIN7_CH3 | + CX25840_SVIDEO_ON, + .amux = CX25840_AUDIO7, + } }, + }, }; const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards); @@ -1124,6 +1143,10 @@ struct cx23885_subid cx23885_subids[] = { .subvendor = 0x0070, .subdevice = 0xf02a, .card = CX23885_BOARD_HAUPPAUGE_STARBURST2, + }, { + .subvendor = 0x1461, + .subdevice = 0x3100, + .card = CX23885_BOARD_AVERMEDIA_CE310B, }, }; const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids); @@ -2348,6 +2371,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_DVBSKY_T982: case CX23885_BOARD_VIEWCAST_260E: case CX23885_BOARD_VIEWCAST_460E: + case CX23885_BOARD_AVERMEDIA_CE310B: dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[2].i2c_adap, "cx25840", 0x88 >> 1, NULL); diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index 8098b15493de..7fc408ee4934 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -257,7 +257,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input) (dev->board == CX23885_BOARD_MYGICA_X8507) || (dev->board == CX23885_BOARD_AVERMEDIA_HC81R) || (dev->board == CX23885_BOARD_VIEWCAST_260E) || - (dev->board == CX23885_BOARD_VIEWCAST_460E)) { + (dev->board == CX23885_BOARD_VIEWCAST_460E) || + (dev->board == CX23885_BOARD_AVERMEDIA_CE310B)) { /* Configure audio routing */ v4l2_subdev_call(dev->sd_cx25840, audio, s_routing, INPUT(input)->amux, 0, 0); diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index a95a2e4c6a0d..c472498e57c4 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -101,6 +101,7 @@ #define CX23885_BOARD_HAUPPAUGE_STARBURST2 59 #define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885 60 #define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885 61 +#define CX23885_BOARD_AVERMEDIA_CE310B 62 #define GPIO_0 0x00000001 #define GPIO_1 0x00000002 diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index dcc0f02aeb70..b8abcd550604 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1277,7 +1277,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, core = cx88_core_get(dev->pci); if (!core) { err = -EINVAL; - goto fail_free; + goto fail_disable; } dev->core = core; @@ -1323,7 +1323,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, cc->step, cc->default_value); if (!vc) { err = core->audio_hdl.error; - goto fail_core; + goto fail_irq; } vc->priv = (void *)cc; } @@ -1337,7 +1337,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, cc->step, cc->default_value); if (!vc) { err = core->video_hdl.error; - goto fail_core; + goto fail_irq; } vc->priv = (void *)cc; if (vc->id == V4L2_CID_CHROMA_AGC) @@ -1509,11 +1509,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev, fail_unreg: cx8800_unregister_video(dev); - free_irq(pci_dev->irq, dev); mutex_unlock(&core->lock); +fail_irq: + free_irq(pci_dev->irq, dev); fail_core: core->v4ldev = NULL; cx88_core_put(core, dev->pci); +fail_disable: + pci_disable_device(pci_dev); fail_free: kfree(dev); return err; diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c index 2b42ba1f5949..e13dbf27a9c2 100644 --- a/drivers/media/platform/am437x/am437x-vpfe.c +++ b/drivers/media/platform/am437x/am437x-vpfe.c @@ -1830,6 +1830,10 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id) if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD)) return -ENODATA; + /* if trying to set the same std then nothing to do */ + if (vpfe_standards[vpfe->std_index].std_id == std_id) + return 0; + /* If streaming is started, return error */ if (vb2_is_busy(&vpfe->buffer_queue)) { vpfe_err(vpfe, "%s device busy\n", __func__); diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index eb12f3793062..4eaaf39b9223 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -606,6 +606,16 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg) aspeed_video_start_frame(video); } + /* + * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these + * are disabled in the VE_INTERRUPT_CTRL register so clear them to + * prevent unnecessary interrupt calls. + */ + if (sts & VE_INTERRUPT_CAPTURE_COMPLETE) + sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE; + if (sts & VE_INTERRUPT_FRAME_COMPLETE) + sts &= ~VE_INTERRUPT_FRAME_COMPLETE; + return sts ? IRQ_NONE : IRQ_HANDLED; } @@ -741,6 +751,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) } set_bit(VIDEO_RES_DETECT, &video->flags); + aspeed_video_update(video, VE_CTRL, + VE_CTRL_VSYNC_POL | VE_CTRL_HSYNC_POL, 0); aspeed_video_enable_mode_detect(video); rc = wait_event_interruptible_timeout(video->wait, @@ -1646,7 +1658,8 @@ static int aspeed_video_probe(struct platform_device *pdev) { int rc; struct resource *res; - struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL); + struct aspeed_video *video = + devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL); if (!video) return -ENOMEM; diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c index 31ace114eda1..be9ec59774d6 100644 --- a/drivers/media/platform/cadence/cdns-csi2rx.c +++ b/drivers/media/platform/cadence/cdns-csi2rx.c @@ -129,7 +129,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx) */ for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) { unsigned int idx = find_first_zero_bit(&lanes_used, - sizeof(lanes_used)); + csi2rx->max_lanes); set_bit(idx, &lanes_used); reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1); } diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 73222c0615c0..834f11fe9dc2 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -1084,16 +1084,16 @@ static int coda_decoder_cmd(struct file *file, void *fh, switch (dc->cmd) { case V4L2_DEC_CMD_START: - mutex_lock(&ctx->bitstream_mutex); mutex_lock(&dev->coda_mutex); + mutex_lock(&ctx->bitstream_mutex); coda_bitstream_flush(ctx); - mutex_unlock(&dev->coda_mutex); dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); vb2_clear_last_buffer_dequeued(dst_vq); ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG; coda_fill_bitstream(ctx, NULL); mutex_unlock(&ctx->bitstream_mutex); + mutex_unlock(&dev->coda_mutex); break; case V4L2_DEC_CMD_STOP: stream_end = false; diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index 378cc302e1f8..d2cbcdca0463 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -313,7 +313,7 @@ static int isp_video_release(struct file *file) ivc->streaming = 0; } - vb2_fop_release(file); + _vb2_fop_release(file, NULL); if (v4l2_fh_is_singular_file(file)) { fimc_pipeline_call(&ivc->ve, close); diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index a838189d4490..9aaf3b8060d5 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -1457,12 +1457,12 @@ static int fimc_md_probe(struct platform_device *pdev) ret = v4l2_device_register(dev, &fmd->v4l2_dev); if (ret < 0) { v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret); - return ret; + goto err_md; } ret = fimc_md_get_clocks(fmd); if (ret) - goto err_md; + goto err_v4l2dev; ret = fimc_md_get_pinctrl(fmd); if (ret < 0) { @@ -1519,9 +1519,10 @@ err_m_ent: fimc_md_unregister_entities(fmd); err_clk: fimc_md_put_clocks(fmd); +err_v4l2dev: + v4l2_device_unregister(&fmd->v4l2_dev); err_md: media_device_cleanup(&fmd->media_dev); - v4l2_device_unregister(&fmd->v4l2_dev); return ret; } diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c index 3b39e875292e..3d8fe854feb0 100644 --- a/drivers/media/platform/meson/ao-cec-g12a.c +++ b/drivers/media/platform/meson/ao-cec-g12a.c @@ -662,34 +662,27 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev) if (IS_ERR(ao_cec->adap)) return PTR_ERR(ao_cec->adap); - ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL, - ao_cec->adap); - if (!ao_cec->notify) { - ret = -ENOMEM; - goto out_probe_adapter; - } - ao_cec->adap->owner = THIS_MODULE; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) { ret = PTR_ERR(base); - goto out_probe_notify; + goto out_probe_adapter; } ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base, &meson_ao_cec_g12a_regmap_conf); if (IS_ERR(ao_cec->regmap)) { ret = PTR_ERR(ao_cec->regmap); - goto out_probe_notify; + goto out_probe_adapter; } ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec, &meson_ao_cec_g12a_cec_regmap_conf); if (IS_ERR(ao_cec->regmap_cec)) { ret = PTR_ERR(ao_cec->regmap_cec); - goto out_probe_notify; + goto out_probe_adapter; } irq = platform_get_irq(pdev, 0); @@ -699,45 +692,52 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev) 0, NULL, ao_cec); if (ret) { dev_err(&pdev->dev, "irq request failed\n"); - goto out_probe_notify; + goto out_probe_adapter; } ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin"); if (IS_ERR(ao_cec->oscin)) { dev_err(&pdev->dev, "oscin clock request failed\n"); ret = PTR_ERR(ao_cec->oscin); - goto out_probe_notify; + goto out_probe_adapter; } ret = meson_ao_cec_g12a_setup_clk(ao_cec); if (ret) - goto out_probe_notify; + goto out_probe_adapter; ret = clk_prepare_enable(ao_cec->core); if (ret) { dev_err(&pdev->dev, "core clock enable failed\n"); - goto out_probe_notify; + goto out_probe_adapter; } device_reset_optional(&pdev->dev); platform_set_drvdata(pdev, ao_cec); + ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL, + ao_cec->adap); + if (!ao_cec->notify) { + ret = -ENOMEM; + goto out_probe_core_clk; + } + ret = cec_register_adapter(ao_cec->adap, &pdev->dev); if (ret < 0) - goto out_probe_core_clk; + goto out_probe_notify; /* Setup Hardware */ regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET); return 0; -out_probe_core_clk: - clk_disable_unprepare(ao_cec->core); - out_probe_notify: cec_notifier_cec_adap_unregister(ao_cec->notify); +out_probe_core_clk: + clk_disable_unprepare(ao_cec->core); + out_probe_adapter: cec_delete_adapter(ao_cec->adap); diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c index 64ed549bf012..03600e8b3ef0 100644 --- a/drivers/media/platform/meson/ao-cec.c +++ b/drivers/media/platform/meson/ao-cec.c @@ -624,20 +624,13 @@ static int meson_ao_cec_probe(struct platform_device *pdev) if (IS_ERR(ao_cec->adap)) return PTR_ERR(ao_cec->adap); - ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL, - ao_cec->adap); - if (!ao_cec->notify) { - ret = -ENOMEM; - goto out_probe_adapter; - } - ao_cec->adap->owner = THIS_MODULE; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ao_cec->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ao_cec->base)) { ret = PTR_ERR(ao_cec->base); - goto out_probe_notify; + goto out_probe_adapter; } irq = platform_get_irq(pdev, 0); @@ -647,20 +640,20 @@ static int meson_ao_cec_probe(struct platform_device *pdev) 0, NULL, ao_cec); if (ret) { dev_err(&pdev->dev, "irq request failed\n"); - goto out_probe_notify; + goto out_probe_adapter; } ao_cec->core = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(ao_cec->core)) { dev_err(&pdev->dev, "core clock request failed\n"); ret = PTR_ERR(ao_cec->core); - goto out_probe_notify; + goto out_probe_adapter; } ret = clk_prepare_enable(ao_cec->core); if (ret) { dev_err(&pdev->dev, "core clock enable failed\n"); - goto out_probe_notify; + goto out_probe_adapter; } ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE); @@ -674,9 +667,16 @@ static int meson_ao_cec_probe(struct platform_device *pdev) ao_cec->pdev = pdev; platform_set_drvdata(pdev, ao_cec); + ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL, + ao_cec->adap); + if (!ao_cec->notify) { + ret = -ENOMEM; + goto out_probe_clk; + } + ret = cec_register_adapter(ao_cec->adap, &pdev->dev); if (ret < 0) - goto out_probe_clk; + goto out_probe_notify; /* Setup Hardware */ writel_relaxed(CEC_GEN_CNTL_RESET, @@ -684,12 +684,12 @@ static int meson_ao_cec_probe(struct platform_device *pdev) return 0; -out_probe_clk: - clk_disable_unprepare(ao_cec->core); - out_probe_notify: cec_notifier_cec_adap_unregister(ao_cec->notify); +out_probe_clk: + clk_disable_unprepare(ao_cec->core); + out_probe_adapter: cec_delete_adapter(ao_cec->adap); diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index e6eff512a8a1..84e982f259a0 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -427,10 +427,11 @@ static const struct venus_resources msm8916_res = { }; static const struct freq_tbl msm8996_freq_table[] = { - { 1944000, 490000000 }, /* 4k UHD @ 60 */ - { 972000, 320000000 }, /* 4k UHD @ 30 */ - { 489600, 150000000 }, /* 1080p @ 60 */ - { 244800, 75000000 }, /* 1080p @ 30 */ + { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */ + { 972000, 520000000 }, /* 4k UHD @ 30 */ + { 489600, 346666667 }, /* 1080p @ 60 */ + { 244800, 150000000 }, /* 1080p @ 30 */ + { 108000, 75000000 }, /* 720p @ 30 */ }; static const struct reg_val msm8996_reg_preset[] = { diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index 7129a2aea09a..0d8855014ab3 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -1472,6 +1472,7 @@ static int venus_suspend_3xx(struct venus_core *core) { struct venus_hfi_device *hdev = to_hfi_priv(core); struct device *dev = core->dev; + u32 ctrl_status; bool val; int ret; @@ -1487,6 +1488,10 @@ static int venus_suspend_3xx(struct venus_core *core) return -EINVAL; } + ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY) + goto power_off; + /* * Power collapse sequence for Venus 3xx and 4xx versions: * 1. Check for ARM9 and video core to be idle by checking WFI bit @@ -1511,6 +1516,7 @@ static int venus_suspend_3xx(struct venus_core *core) if (ret) return ret; +power_off: mutex_lock(&hdev->lock); ret = venus_power_off(hdev); diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 7f4660555ddb..59ae7a1e63bc 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -1412,9 +1412,6 @@ static const struct v4l2_file_operations vdec_fops = { .unlocked_ioctl = video_ioctl2, .poll = v4l2_m2m_fop_poll, .mmap = v4l2_m2m_fop_mmap, -#ifdef CONFIG_COMPAT - .compat_ioctl32 = v4l2_compat_ioctl32, -#endif }; static int vdec_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 1b7fb2d5887c..30028ceb548b 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -1235,9 +1235,6 @@ static const struct v4l2_file_operations venc_fops = { .unlocked_ioctl = video_ioctl2, .poll = v4l2_m2m_fop_poll, .mmap = v4l2_m2m_fop_mmap, -#ifdef CONFIG_COMPAT - .compat_ioctl32 = v4l2_compat_ioctl32, -#endif }; static int venc_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c index cbc1c07f0a96..ec2796413e26 100644 --- a/drivers/media/platform/rcar-vin/rcar-v4l2.c +++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c @@ -208,6 +208,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which, ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format); if (ret < 0 && ret != -ENOIOCTLCMD) goto done; + ret = 0; v4l2_fill_pix_format(pix, &format.format); @@ -242,7 +243,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which, done: v4l2_subdev_free_pad_config(pad_cfg); - return 0; + return ret; } static int rvin_querycap(struct file *file, void *priv, diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c index 608e5217ccd5..0f267a237b42 100644 --- a/drivers/media/platform/rcar_drif.c +++ b/drivers/media/platform/rcar_drif.c @@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv, { struct rcar_drif_sdr *sdr = video_drvdata(file); + memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); f->fmt.sdr.pixelformat = sdr->fmt->pixelformat; f->fmt.sdr.buffersize = sdr->fmt->buffersize; diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c index 9cd60fe1867c..a86b6e8f9196 100644 --- a/drivers/media/platform/seco-cec/seco-cec.c +++ b/drivers/media/platform/seco-cec/seco-cec.c @@ -675,6 +675,7 @@ err_notifier: err_delete_adapter: cec_delete_adapter(secocec->cec_adap); err: + release_region(BRA_SMB_BASE_ADDR, 7); dev_err(dev, "%s device probe failed\n", dev_name(dev)); return ret; diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c index 4372abbb5950..a74e9fd65238 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-hw.c +++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c @@ -14,8 +14,8 @@ #define MAX_SRC_WIDTH 2048 /* Reset & boot poll config */ -#define POLL_RST_MAX 50 -#define POLL_RST_DELAY_MS 20 +#define POLL_RST_MAX 500 +#define POLL_RST_DELAY_MS 2 enum bdisp_target_plan { BDISP_RGB, @@ -382,7 +382,7 @@ int bdisp_hw_reset(struct bdisp_dev *bdisp) for (i = 0; i < POLL_RST_MAX; i++) { if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE) break; - msleep(POLL_RST_DELAY_MS); + udelay(POLL_RST_DELAY_MS * 1000); } if (i == POLL_RST_MAX) dev_err(bdisp->dev, "Reset timeout\n"); diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index e90f1ba30574..675b5f2b4c2e 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -651,8 +651,7 @@ static int bdisp_release(struct file *file) dev_dbg(bdisp->dev, "%s\n", __func__); - if (mutex_lock_interruptible(&bdisp->lock)) - return -ERESTARTSYS; + mutex_lock(&bdisp->lock); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c index f36dc6258900..b8b07c1de2a8 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -155,6 +156,27 @@ static int sun4i_csi_probe(struct platform_device *pdev) subdev = &csi->subdev; vdev = &csi->vdev; + /* + * On Allwinner SoCs, some high memory bandwidth devices do DMA + * directly over the memory bus (called MBUS), instead of the + * system bus. The memory bus has a different addressing scheme + * without the DRAM starting offset. + * + * In some cases this can be described by an interconnect in + * the device tree. In other cases where the hardware is not + * fully understood and the interconnect is left out of the + * device tree, fall back to a default offset. + */ + if (of_find_property(csi->dev->of_node, "interconnects", NULL)) { + ret = of_dma_configure(csi->dev, csi->dev->of_node, true); + if (ret) + return ret; + } else { +#ifdef PHYS_PFN_OFFSET + csi->dev->dma_pfn_offset = PHYS_PFN_OFFSET; +#endif + } + csi->mdev.dev = csi->dev; strscpy(csi->mdev.model, "Allwinner Video Capture Device", sizeof(csi->mdev.model)); diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h index 001c8bde006c..88d39b3554c4 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h @@ -22,8 +22,8 @@ #define CSI_CFG_INPUT_FMT(fmt) ((fmt) << 20) #define CSI_CFG_OUTPUT_FMT(fmt) ((fmt) << 16) #define CSI_CFG_YUV_DATA_SEQ(seq) ((seq) << 8) -#define CSI_CFG_VSYNC_POL(pol) ((pol) << 2) -#define CSI_CFG_HSYNC_POL(pol) ((pol) << 1) +#define CSI_CFG_VREF_POL(pol) ((pol) << 2) +#define CSI_CFG_HREF_POL(pol) ((pol) << 1) #define CSI_CFG_PCLK_POL(pol) ((pol) << 0) #define CSI_CPT_CTRL_REG 0x08 diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c index d6979e11a67b..78fa1c535ac6 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c @@ -228,7 +228,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count) struct sun4i_csi *csi = vb2_get_drv_priv(vq); struct v4l2_fwnode_bus_parallel *bus = &csi->bus; const struct sun4i_csi_format *csi_fmt; - unsigned long hsync_pol, pclk_pol, vsync_pol; + unsigned long href_pol, pclk_pol, vref_pol; unsigned long flags; unsigned int i; int ret; @@ -278,13 +278,21 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count) writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height), csi->regs + CSI_WIN_CTRL_H_REG); - hsync_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH); - pclk_pol = !!(bus->flags & V4L2_MBUS_DATA_ACTIVE_HIGH); - vsync_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH); + /* + * This hardware uses [HV]REF instead of [HV]SYNC. Based on the + * provided timing diagrams in the manual, positive polarity + * equals active high [HV]REF. + * + * When the back porch is 0, [HV]REF is more or less equivalent + * to [HV]SYNC inverted. + */ + href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW); + vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW); + pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING); writel(CSI_CFG_INPUT_FMT(csi_fmt->input) | CSI_CFG_OUTPUT_FMT(csi_fmt->output) | - CSI_CFG_VSYNC_POL(vsync_pol) | - CSI_CFG_HSYNC_POL(hsync_pol) | + CSI_CFG_VREF_POL(vref_pol) | + CSI_CFG_HREF_POL(href_pol) | CSI_CFG_PCLK_POL(pclk_pol), csi->regs + CSI_CFG_REG); diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h index 28bc94129348..9bacfd603250 100644 --- a/drivers/media/platform/ti-vpe/vpdma.h +++ b/drivers/media/platform/ti-vpe/vpdma.h @@ -57,6 +57,7 @@ struct vpdma_data_format { * line stride of source and dest * buffers should be 16 byte aligned */ +#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */ #define VPDMA_DTD_DESC_SIZE 32 /* 8 words */ #define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */ diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index 60b575bb44c4..8b14ba4a3d9e 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -338,20 +338,25 @@ enum { }; /* find our format description corresponding to the passed v4l2_format */ -static struct vpe_fmt *find_format(struct v4l2_format *f) +static struct vpe_fmt *__find_format(u32 fourcc) { struct vpe_fmt *fmt; unsigned int k; for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) { fmt = &vpe_formats[k]; - if (fmt->fourcc == f->fmt.pix.pixelformat) + if (fmt->fourcc == fourcc) return fmt; } return NULL; } +static struct vpe_fmt *find_format(struct v4l2_format *f) +{ + return __find_format(f->fmt.pix.pixelformat); +} + /* * there is one vpe_dev structure in the driver, it is shared by * all instances. @@ -1013,11 +1018,14 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port) dma_addr_t dma_addr; u32 flags = 0; u32 offset = 0; + u32 stride; if (port == VPE_PORT_MV_OUT) { vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; q_data = &ctx->q_data[Q_DATA_SRC]; + stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3, + VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ int plane = fmt->coplanar ? p_data->vb_part : 0; @@ -1044,6 +1052,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port) } /* Apply the offset */ dma_addr += offset; + stride = q_data->bytesperline[VPE_LUMA]; } if (q_data->flags & Q_DATA_FRAME_1D) @@ -1055,7 +1064,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port) MAX_W, MAX_H); vpdma_add_out_dtd(&ctx->desc_list, q_data->width, - q_data->bytesperline[VPE_LUMA], &q_data->c_rect, + stride, &q_data->c_rect, vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1, MAX_OUT_HEIGHT_REG1, p_data->channel, flags); } @@ -1074,10 +1083,13 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port) dma_addr_t dma_addr; u32 flags = 0; u32 offset = 0; + u32 stride; if (port == VPE_PORT_MV_IN) { vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; + stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3, + VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ int plane = fmt->coplanar ? p_data->vb_part : 0; @@ -1104,6 +1116,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port) } /* Apply the offset */ dma_addr += offset; + stride = q_data->bytesperline[VPE_LUMA]; if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) { /* @@ -1139,10 +1152,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port) if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12) frame_height /= 2; - vpdma_add_in_dtd(&ctx->desc_list, q_data->width, - q_data->bytesperline[VPE_LUMA], &q_data->c_rect, - vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width, - frame_height, 0, 0); + vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride, + &q_data->c_rect, vpdma_fmt, dma_addr, + p_data->channel, field, flags, frame_width, + frame_height, 0, 0); } /* @@ -1391,9 +1404,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data) /* the previous dst mv buffer becomes the next src mv buffer */ ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; - if (ctx->aborting) - goto finished; - s_vb = ctx->src_vbs[0]; d_vb = ctx->dst_vb; @@ -1404,6 +1414,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data) d_vb->timecode = s_vb->timecode; d_vb->sequence = ctx->sequence; + s_vb->sequence = ctx->sequence; d_q_data = &ctx->q_data[Q_DATA_DST]; if (d_q_data->flags & Q_IS_INTERLACED) { @@ -1457,6 +1468,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data) ctx->src_vbs[0] = NULL; ctx->dst_vb = NULL; + if (ctx->aborting) + goto finished; + ctx->bufs_completed++; if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) { device_run(ctx); @@ -1566,9 +1580,9 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, unsigned int stride = 0; if (!fmt || !(fmt->types & type)) { - vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n", + vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n", pix->pixelformat); - return -EINVAL; + fmt = __find_format(V4L2_PIX_FMT_YUYV); } if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE @@ -1615,7 +1629,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, &pix->height, MIN_H, MAX_H, H_ALIGN, S_ALIGN); - if (!pix->num_planes) + if (!pix->num_planes || pix->num_planes > 2) pix->num_planes = fmt->coplanar ? 2 : 1; else if (pix->num_planes > 1 && !fmt->coplanar) pix->num_planes = 1; @@ -1654,6 +1668,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, if (stride > plane_fmt->bytesperline) plane_fmt->bytesperline = stride; + plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline, + stride, + VPDMA_MAX_STRIDE); + plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline, VPDMA_STRIDE_ALIGN); @@ -2274,7 +2292,7 @@ static int vpe_open(struct file *file) v4l2_ctrl_handler_setup(hdl); s_q_data = &ctx->q_data[Q_DATA_SRC]; - s_q_data->fmt = &vpe_formats[2]; + s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV); s_q_data->width = 1920; s_q_data->height = 1080; s_q_data->nplanes = 1; @@ -2352,6 +2370,12 @@ static int vpe_release(struct file *file) mutex_lock(&dev->dev_mutex); free_mv_buffers(ctx); + + vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v); + vpdma_free_desc_list(&ctx->desc_list); vpdma_free_desc_buf(&ctx->mmr_adb); diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c index 3c93d9232c3c..b6e39fbd8ad5 100644 --- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c +++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c @@ -27,17 +27,17 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = { { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, - { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, + { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_HSV}, { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB}, }; @@ -175,22 +175,14 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf, case V4L2_PIX_FMT_RGB32: case V4L2_PIX_FMT_XRGB32: case V4L2_PIX_FMT_HSV32: - rf->cr = rf->luma + 1; - rf->cb = rf->cr + 2; - rf->luma += 2; - break; - case V4L2_PIX_FMT_BGR32: - case V4L2_PIX_FMT_XBGR32: - rf->cb = rf->luma; - rf->cr = rf->cb + 2; - rf->luma++; - break; case V4L2_PIX_FMT_ARGB32: rf->alpha = rf->luma; rf->cr = rf->luma + 1; rf->cb = rf->cr + 2; rf->luma += 2; break; + case V4L2_PIX_FMT_BGR32: + case V4L2_PIX_FMT_XBGR32: case V4L2_PIX_FMT_ABGR32: rf->cb = rf->luma; rf->cr = rf->cb + 2; @@ -198,10 +190,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf, rf->alpha = rf->cr + 1; break; case V4L2_PIX_FMT_BGRX32: - rf->cb = rf->luma + 1; - rf->cr = rf->cb + 2; - rf->luma += 2; - break; case V4L2_PIX_FMT_BGRA32: rf->alpha = rf->luma; rf->cb = rf->luma + 1; @@ -209,10 +197,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf, rf->luma += 2; break; case V4L2_PIX_FMT_RGBX32: - rf->cr = rf->luma; - rf->cb = rf->cr + 2; - rf->luma++; - break; case V4L2_PIX_FMT_RGBA32: rf->alpha = rf->luma + 3; rf->cr = rf->luma; diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 0ee143ae0f6b..82350097503e 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -2139,6 +2139,9 @@ static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev) v4l2_m2m_release(dev->stateful_enc.m2m_dev); v4l2_m2m_release(dev->stateful_dec.m2m_dev); v4l2_m2m_release(dev->stateless_dec.m2m_dev); +#ifdef CONFIG_MEDIA_CONTROLLER + media_device_cleanup(&dev->mdev); +#endif kfree(dev); } @@ -2250,7 +2253,6 @@ static int vicodec_remove(struct platform_device *pdev) v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev); v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev); v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev); - media_device_cleanup(&dev->mdev); #endif video_unregister_device(&dev->stateful_enc.vfd); diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index acd3bd48c7e2..8d6b09623d88 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -1073,6 +1073,9 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count) if (!q_data) return -EINVAL; + if (V4L2_TYPE_IS_OUTPUT(q->type)) + ctx->aborting = 0; + q_data->sequence = 0; return 0; } @@ -1272,6 +1275,9 @@ static void vim2m_device_release(struct video_device *vdev) v4l2_device_unregister(&dev->v4l2_dev); v4l2_m2m_release(dev->m2m_dev); +#ifdef CONFIG_MEDIA_CONTROLLER + media_device_cleanup(&dev->mdev); +#endif kfree(dev); } @@ -1343,6 +1349,7 @@ static int vim2m_probe(struct platform_device *pdev) if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); + dev->m2m_dev = NULL; goto error_dev; } @@ -1395,7 +1402,6 @@ static int vim2m_remove(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER media_device_unregister(&dev->mdev); v4l2_m2m_unregister_media_controller(dev->m2m_dev); - media_device_cleanup(&dev->mdev); #endif video_unregister_device(&dev->vfd); diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c index 7e1ae0b12f1e..a3120f4f7a90 100644 --- a/drivers/media/platform/vimc/vimc-common.c +++ b/drivers/media/platform/vimc/vimc-common.c @@ -375,7 +375,7 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved, { int ret; - /* Allocate the pads */ + /* Allocate the pads. Should be released from the sd_int_op release */ ved->pads = vimc_pads_init(num_pads, pads_flag); if (IS_ERR(ved->pads)) return PTR_ERR(ved->pads); @@ -424,7 +424,6 @@ EXPORT_SYMBOL_GPL(vimc_ent_sd_register); void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd) { media_entity_cleanup(ved->ent); - vimc_pads_cleanup(ved->pads); v4l2_device_unregister_subdev(sd); } EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister); diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index b72b8385067b..baafd9d7fb2c 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c @@ -484,6 +484,7 @@ static void vimc_deb_release(struct v4l2_subdev *sd) struct vimc_deb_device *vdeb = container_of(sd, struct vimc_deb_device, sd); + vimc_pads_cleanup(vdeb->ved.pads); kfree(vdeb); } diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index 49ab8d9dd9c9..c0d9f43d5777 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c @@ -343,6 +343,7 @@ static void vimc_sca_release(struct v4l2_subdev *sd) struct vimc_sca_device *vsca = container_of(sd, struct vimc_sca_device, sd); + vimc_pads_cleanup(vsca->ved.pads); kfree(vsca); } diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index 6c53b9fc1617..420573e5f6d6 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c @@ -25,7 +25,6 @@ struct vimc_sen_device { struct v4l2_subdev sd; struct device *dev; struct tpg_data tpg; - struct task_struct *kthread_sen; u8 *frame; /* The active format */ struct v4l2_mbus_framefmt mbus_format; @@ -208,10 +207,6 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) const struct vimc_pix_map *vpix; unsigned int frame_size; - if (vsen->kthread_sen) - /* tpg is already executing */ - return 0; - /* Calculate the frame size */ vpix = vimc_pix_map_by_code(vsen->mbus_format.code); frame_size = vsen->mbus_format.width * vpix->bpp * @@ -297,6 +292,7 @@ static void vimc_sen_release(struct v4l2_subdev *sd) v4l2_ctrl_handler_free(&vsen->hdl); tpg_free(&vsen->tpg); + vimc_pads_cleanup(vsen->ved.pads); kfree(vsen); } diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index 53315c8dd2bb..f6a5cdbd74e7 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -616,6 +616,9 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev) vivid_free_controls(dev); v4l2_device_unregister(&dev->v4l2_dev); +#ifdef CONFIG_MEDIA_CONTROLLER + media_device_cleanup(&dev->mdev); +#endif vfree(dev->scaled_line); vfree(dev->blended_line); vfree(dev->edid); @@ -1580,7 +1583,6 @@ static int vivid_remove(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER media_device_unregister(&dev->mdev); - media_device_cleanup(&dev->mdev); #endif if (dev->has_vid_cap) { diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c index 003319d7816d..31f78d6a05a4 100644 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c @@ -796,7 +796,11 @@ static int vivid_thread_vid_cap(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->cap_seq_resync) { dev->jiffies_vid_cap = cur_jiffies; @@ -956,8 +960,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) /* shutdown control thread */ vivid_grab_controls(dev, false); - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_vid_cap); dev->kthread_vid_cap = NULL; - mutex_lock(&dev->mutex); } diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c index ce5bcda2348c..1e165a6a2207 100644 --- a/drivers/media/platform/vivid/vivid-kthread-out.c +++ b/drivers/media/platform/vivid/vivid-kthread-out.c @@ -143,7 +143,11 @@ static int vivid_thread_vid_out(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->out_seq_resync) { dev->jiffies_vid_out = cur_jiffies; @@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) /* shutdown control thread */ vivid_grab_controls(dev, false); - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_vid_out); dev->kthread_vid_out = NULL; - mutex_lock(&dev->mutex); } diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c index 9acc709b0740..2b7522e16efc 100644 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c @@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->sdr_cap_seq_resync) { dev->jiffies_sdr_cap = cur_jiffies; @@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq) } /* shutdown control thread */ - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_sdr_cap); dev->kthread_sdr_cap = NULL; - mutex_lock(&dev->mutex); } static void sdr_cap_buf_request_complete(struct vb2_buffer *vb) diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index 8cbaa0c998ed..2d030732feac 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) if (vb2_is_streaming(&dev->vb_vid_out_q)) dev->can_loop_video = vivid_vid_can_loop(dev); - if (dev->kthread_vid_cap) - return 0; - dev->vid_cap_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); for (i = 0; i < VIDEO_MAX_FRAME; i++) diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index 148b663a6075..a0364ac497f9 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) if (vb2_is_streaming(&dev->vb_vid_cap_q)) dev->can_loop_video = vivid_vid_can_loop(dev); - if (dev->kthread_vid_out) - return 0; - dev->vid_out_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); if (dev->start_streaming_error) { diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index 104ac41c6f96..112376873167 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -1148,8 +1148,7 @@ static int wl1273_fm_fops_release(struct file *file) if (radio->rds_users > 0) { radio->rds_users--; if (radio->rds_users == 0) { - if (mutex_lock_interruptible(&core->lock)) - return -EINTR; + mutex_lock(&core->lock); radio->irq_flags &= ~WL1273_RDS_EVENT; diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index 7541698a0be1..f491420d7b53 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -482,6 +482,8 @@ static int si470x_i2c_remove(struct i2c_client *client) if (radio->gpio_reset) gpiod_set_value(radio->gpio_reset, 0); + v4l2_ctrl_handler_free(&radio->hdl); + v4l2_device_unregister(&radio->v4l2_dev); return 0; } diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index 872d6441e512..a7deca1fefb7 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -413,7 +413,7 @@ static int iguanair_probe(struct usb_interface *intf, int ret, pipein, pipeout; struct usb_host_interface *idesc; - idesc = intf->altsetting; + idesc = intf->cur_altsetting; if (idesc->desc.bNumEndpoints < 2) return -ENODEV; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 37a850421fbb..c683a244b9fa 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1598,8 +1598,7 @@ static void imon_incoming_packet(struct imon_context *ictx, spin_unlock_irqrestore(&ictx->kc_lock, flags); /* send touchscreen events through input subsystem if touchpad data */ - if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && - buf[7] == 0x86) { + if (ictx->touch && len == 8 && buf[7] == 0x86) { imon_touch_event(ictx, buf); return; diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 3fc9829a9233..f9616158bcf4 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd) datasize = 4; break; case MCE_CMD_G_REVISION: - datasize = 2; + datasize = 4; break; case MCE_RSP_EQWAKESUPPORT: case MCE_RSP_GETWAKESOURCE: @@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, char *inout; u8 cmd, subcmd, *data; struct device *dev = ir->dev; - int start, skip = 0; u32 carrier, period; - /* skip meaningless 0xb1 0x60 header bytes on orig receiver */ - if (ir->flags.microsoft_gen1 && !out && !offset) - skip = 2; - - if (len <= skip) + if (offset < 0 || offset >= buf_len) return; dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)", @@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, inout = out ? "Request" : "Got"; - start = offset + skip; - cmd = buf[start] & 0xff; - subcmd = buf[start + 1] & 0xff; - data = buf + start + 2; + cmd = buf[offset]; + subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0; + data = &buf[offset] + 2; + /* Trace meaningless 0xb1 0x60 header bytes on original receiver */ + if (ir->flags.microsoft_gen1 && !out && !offset) { + dev_dbg(dev, "MCE gen 1 header"); + return; + } + + /* Trace IR data header or trailer */ + if (cmd != MCE_CMD_PORT_IR && + (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) { + if (cmd == MCE_IRDATA_TRAILER) + dev_dbg(dev, "End of raw IR data"); + else + dev_dbg(dev, "Raw IR data, %d pulse/space samples", + cmd & MCE_PACKET_LENGTH_MASK); + return; + } + + /* Unexpected end of buffer? */ + if (offset + len > buf_len) + return; + + /* Decode MCE command/response */ switch (cmd) { case MCE_CMD_NULL: if (subcmd == MCE_CMD_NULL) @@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, dev_dbg(dev, "Get hw/sw rev?"); else dev_dbg(dev, "hw/sw rev %*ph", - 4, &buf[start + 2]); + 4, &buf[offset + 2]); break; case MCE_CMD_RESUME: dev_dbg(dev, "Device resume requested"); @@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, default: break; } - - if (cmd == MCE_IRDATA_TRAILER) - dev_dbg(dev, "End of raw IR data"); - else if ((cmd != MCE_CMD_PORT_IR) && - ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA)) - dev_dbg(dev, "Raw IR data, %d pulse/space samples", - cmd & MCE_PACKET_LENGTH_MASK); #endif } @@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) } /* + * Handle PORT_SYS/IR command response received from the MCE device. + * + * Assumes single response with all its data (not truncated) + * in buf_in[]. The response itself determines its total length + * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[]. + * * We don't do anything but print debug spew for many of the command bits * we receive from the hardware, but some of them are useful information * we want to store so that we can use them. */ -static void mceusb_handle_command(struct mceusb_dev *ir, int index) +static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in) { + u8 cmd = buf_in[0]; + u8 subcmd = buf_in[1]; + u8 *hi = &buf_in[2]; /* read only when required */ + u8 *lo = &buf_in[3]; /* read only when required */ struct ir_raw_event rawir = {}; - u8 hi = ir->buf_in[index + 1] & 0xff; - u8 lo = ir->buf_in[index + 2] & 0xff; u32 carrier_cycles; u32 cycles_fix; - switch (ir->buf_in[index]) { - /* the one and only 5-byte return value command */ - case MCE_RSP_GETPORTSTATUS: - if ((ir->buf_in[index + 4] & 0xff) == 0x00) - ir->txports_cabled |= 1 << hi; - break; + if (cmd == MCE_CMD_PORT_SYS) { + switch (subcmd) { + /* the one and only 5-byte return value command */ + case MCE_RSP_GETPORTSTATUS: + if (buf_in[5] == 0) + ir->txports_cabled |= 1 << *hi; + break; + /* 1-byte return value commands */ + case MCE_RSP_EQEMVER: + ir->emver = *hi; + break; + + /* No return value commands */ + case MCE_RSP_CMD_ILLEGAL: + ir->need_reset = true; + break; + + default: + break; + } + + return; + } + + if (cmd != MCE_CMD_PORT_IR) + return; + + switch (subcmd) { /* 2-byte return value commands */ case MCE_RSP_EQIRTIMEOUT: - ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT); + ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT); break; case MCE_RSP_EQIRNUMPORTS: - ir->num_txports = hi; - ir->num_rxports = lo; + ir->num_txports = *hi; + ir->num_rxports = *lo; break; case MCE_RSP_EQIRRXCFCNT: /* @@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) */ if (ir->carrier_report_enabled && ir->learning_active && ir->pulse_tunit > 0) { - carrier_cycles = (hi << 8 | lo); + carrier_cycles = (*hi << 8 | *lo); /* * Adjust carrier cycle count by adding * 1 missed count per pulse "on" @@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) break; /* 1-byte return value commands */ - case MCE_RSP_EQEMVER: - ir->emver = hi; - break; case MCE_RSP_EQIRTXPORTS: - ir->tx_mask = hi; + ir->tx_mask = *hi; break; case MCE_RSP_EQIRRXPORTEN: - ir->learning_active = ((hi & 0x02) == 0x02); - if (ir->rxports_active != hi) { + ir->learning_active = ((*hi & 0x02) == 0x02); + if (ir->rxports_active != *hi) { dev_info(ir->dev, "%s-range (0x%x) receiver active", - ir->learning_active ? "short" : "long", hi); - ir->rxports_active = hi; + ir->learning_active ? "short" : "long", *hi); + ir->rxports_active = *hi; } break; + + /* No return value commands */ case MCE_RSP_CMD_ILLEGAL: case MCE_RSP_TX_TIMEOUT: ir->need_reset = true; break; + default: break; } @@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]); mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1, ir->rem + 2, false); - mceusb_handle_command(ir, i); + if (i + ir->rem < buf_len) + mceusb_handle_command(ir, &ir->buf_in[i - 1]); ir->parser_state = CMD_DATA; break; case PARSE_IRDATA: @@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) ir->rem--; break; case CMD_HEADER: - /* decode mce packets of the form (84),AA,BB,CC,DD */ - /* IR data packets can span USB messages - rem */ ir->cmd = ir->buf_in[i]; if ((ir->cmd == MCE_CMD_PORT_IR) || ((ir->cmd & MCE_PORT_MASK) != MCE_COMMAND_IRDATA)) { + /* + * got PORT_SYS, PORT_IR, or unknown + * command response prefix + */ ir->parser_state = SUBCMD; continue; } + /* + * got IR data prefix (0x80 + num_bytes) + * decode MCE packets of the form {0x83, AA, BB, CC} + * IR data packets can span USB messages + */ ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK); mceusb_dev_printdata(ir, ir->buf_in, buf_len, i, ir->rem + 1, false); @@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) if (ir->parser_state != CMD_HEADER && !ir->rem) ir->parser_state = CMD_HEADER; } + + /* + * Accept IR data spanning multiple rx buffers. + * Reject MCE command response spanning multiple rx buffers. + */ + if (ir->parser_state != PARSE_IRDATA || !ir->rem) + ir->parser_state = CMD_HEADER; + if (event) { dev_dbg(ir->dev, "processed IR data"); ir_raw_event_handle(ir->rc); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 13da4c5c7d17..6f80c251f641 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev) set_bit(MSC_SCAN, dev->input_dev->mscbit); /* Pointer/mouse events */ + set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit); set_bit(EV_REL, dev->input_dev->evbit); set_bit(REL_X, dev->input_dev->relbit); set_bit(REL_Y, dev->input_dev->relbit); @@ -1890,23 +1891,28 @@ int rc_register_device(struct rc_dev *dev) dev->registered = true; - if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { - rc = rc_setup_rx_device(dev); - if (rc) - goto out_dev; - } - - /* Ensure that the lirc kfifo is setup before we start the thread */ + /* + * once the the input device is registered in rc_setup_rx_device, + * userspace can open the input device and rc_open() will be called + * as a result. This results in driver code being allowed to submit + * keycodes with rc_keydown, so lirc must be registered first. + */ if (dev->allowed_protocols != RC_PROTO_BIT_CEC) { rc = ir_lirc_register(dev); if (rc < 0) - goto out_rx; + goto out_dev; + } + + if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { + rc = rc_setup_rx_device(dev); + if (rc) + goto out_lirc; } if (dev->driver_type == RC_DRIVER_IR_RAW) { rc = ir_raw_event_register(dev); if (rc < 0) - goto out_lirc; + goto out_rx; } dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor, @@ -1914,11 +1920,11 @@ int rc_register_device(struct rc_dev *dev) return 0; +out_rx: + rc_free_rx_device(dev); out_lirc: if (dev->allowed_protocols != RC_PROTO_BIT_CEC) ir_lirc_unregister(dev); -out_rx: - rc_free_rx_device(dev); out_dev: device_del(&dev->dev); out_rx_free: diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 1826ff825c2e..198ddfb8d2b1 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, mutex_unlock(&fc_usb->data_mutex); - return 0; + return ret; } /* actual bus specific access functions, @@ -504,7 +504,16 @@ urb_error: static int flexcop_usb_init(struct flexcop_usb *fc_usb) { /* use the alternate setting with the larges buffer */ - usb_set_interface(fc_usb->udev,0,1); + int ret = usb_set_interface(fc_usb->udev, 0, 1); + + if (ret) { + err("set interface failed."); + return ret; + } + + if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + switch (fc_usb->udev->speed) { case USB_SPEED_LOW: err("cannot handle USB speed because it is too slow."); diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c index 617a306f6815..dc380c0c9536 100644 --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c @@ -792,6 +792,9 @@ static const struct usb_device_id dvbsky_id_table[] = { { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C, &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C", RC_MAP_TOTAL_MEDIA_IN_HAND_02) }, + { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C_LITE, + &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C Lite", + NULL) }, { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C2, &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C v2", RC_MAP_TOTAL_MEDIA_IN_HAND_02) }, diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index 02697d86e8c1..89b4b5d84cdf 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c @@ -554,7 +554,7 @@ static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply, u8 *buf, int size) { u16 checksum; - int act_len, i, ret; + int act_len = 0, i, ret; memset(buf, 0, size); buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); @@ -976,8 +976,9 @@ static int af9005_identify_state(struct usb_device *udev, else if (reply == 0x02) *cold = 0; else - return -EIO; - deb_info("Identify state cold = %d\n", *cold); + ret = -EIO; + if (!ret) + deb_info("Identify state cold = %d\n", *cold); err: kfree(buf); diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index f02fa0a67aa4..fac19ec46089 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d) { u8 ircode[4]; - cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); + if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0) + return 0; if (ircode[2] || ircode[3]) rc_keydown(d->rc_dev, RC_PROTO_NEC, diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index e53c58ab6488..ef62dd6c5ae4 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -818,7 +818,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf) /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */ - if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1) + if (intf->cur_altsetting->desc.bNumEndpoints < rc_ep + 1) return -ENODEV; purb = usb_alloc_urb(0, GFP_KERNEL); @@ -838,7 +838,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf) * Some devices like the Hauppauge NovaTD model 52009 use an interrupt * endpoint, while others use a bulk one. */ - e = &intf->altsetting[0].endpoint[rc_ep].desc; + e = &intf->cur_altsetting->endpoint[rc_ep].desc; if (usb_endpoint_dir_in(e)) { if (usb_endpoint_xfer_bulk(e)) { pipe = usb_rcvbulkpipe(d->udev, rc_ep); diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c index dd5bb230cec1..99a39339d45d 100644 --- a/drivers/media/usb/dvb-usb/digitv.c +++ b/drivers/media/usb/dvb-usb/digitv.c @@ -230,18 +230,22 @@ static struct rc_map_table rc_map_digitv_table[] = { static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { - int i; + int ret, i; u8 key[5]; u8 b[4] = { 0 }; *event = 0; *state = REMOTE_NO_KEY_PRESSED; - digitv_ctrl_msg(d,USB_READ_REMOTE,0,NULL,0,&key[1],4); + ret = digitv_ctrl_msg(d, USB_READ_REMOTE, 0, NULL, 0, &key[1], 4); + if (ret) + return ret; /* Tell the device we've read the remote. Not sure how necessary this is, but the Nebula SDK does it. */ - digitv_ctrl_msg(d,USB_WRITE_REMOTE,0,b,4,NULL,0); + ret = digitv_ctrl_msg(d, USB_WRITE_REMOTE, 0, b, 4, NULL, 0); + if (ret) + return ret; /* if something is inside the buffer, simulate key press */ if (key[1] != 0) diff --git a/drivers/media/usb/dvb-usb/dvb-usb-urb.c b/drivers/media/usb/dvb-usb/dvb-usb-urb.c index c1b4e94a37f8..2aabf90d8697 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-urb.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-urb.c @@ -12,7 +12,7 @@ int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen, int delay_ms) { - int actlen,ret = -ENOMEM; + int actlen = 0, ret = -ENOMEM; if (!d || wbuf == NULL || wlen == 0) return -EINVAL; diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c index 80c1cf05384b..2baf57216d19 100644 --- a/drivers/media/usb/dvb-usb/vp7045.c +++ b/drivers/media/usb/dvb-usb/vp7045.c @@ -96,10 +96,14 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff) static int vp7045_rc_query(struct dvb_usb_device *d) { + int ret; u8 key; - vp7045_usb_op(d,RC_VAL_READ,NULL,0,&key,1,20); - deb_rc("remote query key: %x %d\n",key,key); + ret = vp7045_usb_op(d, RC_VAL_READ, NULL, 0, &key, 1, 20); + if (ret) + return ret; + + deb_rc("remote query key: %x\n", key); if (key != 0x44) { /* @@ -115,15 +119,18 @@ static int vp7045_rc_query(struct dvb_usb_device *d) static int vp7045_read_eeprom(struct dvb_usb_device *d,u8 *buf, int len, int offset) { - int i = 0; - u8 v,br[2]; + int i, ret; + u8 v, br[2]; for (i=0; i < len; i++) { v = offset + i; - vp7045_usb_op(d,GET_EE_VALUE,&v,1,br,2,5); + ret = vp7045_usb_op(d, GET_EE_VALUE, &v, 1, br, 2, 5); + if (ret) + return ret; + buf[i] = br[1]; } - deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ",offset, i); - debug_dump(buf,i,deb_info); + deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ", offset, i); + debug_dump(buf, i, deb_info); return 0; } diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 4add2b12d330..c1b307bbe540 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -1461,7 +1461,7 @@ int gspca_dev_probe2(struct usb_interface *intf, pr_err("couldn't kzalloc gspca struct\n"); return -ENOMEM; } - gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL); + gspca_dev->usb_buf = kzalloc(USB_BUF_SZ, GFP_KERNEL); if (!gspca_dev->usb_buf) { pr_err("out of memory\n"); ret = -ENOMEM; diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index f417dfc0b872..0afe70a3f9a2 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -3477,6 +3477,11 @@ static void ov511_mode_init_regs(struct sd *sd) return; } + if (alt->desc.bNumEndpoints < 1) { + sd->gspca_dev.usb_err = -ENODEV; + return; + } + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5); @@ -3603,6 +3608,11 @@ static void ov518_mode_init_regs(struct sd *sd) return; } + if (alt->desc.bNumEndpoints < 1) { + sd->gspca_dev.usb_err = -ENODEV; + return; + } + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2); diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c index 79653d409951..95673fc0a99c 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c @@ -282,6 +282,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev) return -EIO; } + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size); if (err < 0) @@ -306,11 +309,21 @@ out: static int stv06xx_isoc_init(struct gspca_dev *gspca_dev) { + struct usb_interface_cache *intfc; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; + intfc = gspca_dev->dev->actconfig->intf_cache[0]; + + if (intfc->num_altsetting < 2) + return -ENODEV; + + alt = &intfc->altsetting[1]; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]); @@ -323,6 +336,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev) struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; + /* + * Existence of altsetting and endpoint was verified in + * stv06xx_isoc_init() + */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode]; diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c index 6d1007715ff7..ae382b3b5f7f 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c @@ -185,6 +185,10 @@ static int pb0100_start(struct sd *sd) alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) return -ENODEV; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); /* If we don't have enough bandwidth use a lower framerate */ diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c index 934a90bd78c2..c579b100f066 100644 --- a/drivers/media/usb/gspca/xirlink_cit.c +++ b/drivers/media/usb/gspca/xirlink_cit.c @@ -1442,6 +1442,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev) return -EIO; } + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); } @@ -2626,6 +2629,7 @@ static int sd_start(struct gspca_dev *gspca_dev) static int sd_isoc_init(struct gspca_dev *gspca_dev) { + struct usb_interface_cache *intfc; struct usb_host_interface *alt; int max_packet_size; @@ -2641,8 +2645,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) break; } + intfc = gspca_dev->dev->actconfig->intf_cache[0]; + + if (intfc->num_altsetting < 2) + return -ENODEV; + + alt = &intfc->altsetting[1]; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size); return 0; @@ -2665,6 +2678,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev) break; } + /* + * Existence of altsetting and endpoint was verified in sd_isoc_init() + */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); if (packet_size <= min_packet_size) diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index ac88ade94cda..59609556d969 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -116,6 +116,7 @@ struct pulse8 { unsigned int vers; struct completion cmd_done; struct work_struct work; + u8 work_result; struct delayed_work ping_eeprom_work; struct cec_msg rx_msg; u8 data[DATA_SIZE]; @@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work) { struct pulse8 *pulse8 = container_of(work, struct pulse8, work); + u8 result = pulse8->work_result; - switch (pulse8->data[0] & 0x3f) { + pulse8->work_result = 0; + switch (result & 0x3f) { case MSGCODE_FRAME_DATA: cec_received_msg(pulse8->adap, &pulse8->rx_msg); break; @@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, pulse8->escape = false; } else if (data == MSGEND) { struct cec_msg *msg = &pulse8->rx_msg; + u8 msgcode = pulse8->buf[0]; if (debug) dev_info(pulse8->dev, "received: %*ph\n", pulse8->idx, pulse8->buf); - pulse8->data[0] = pulse8->buf[0]; - switch (pulse8->buf[0] & 0x3f) { + switch (msgcode & 0x3f) { case MSGCODE_FRAME_START: msg->len = 1; msg->msg[0] = pulse8->buf[1]; @@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, if (msg->len == CEC_MAX_MSG_SIZE) break; msg->msg[msg->len++] = pulse8->buf[1]; - if (pulse8->buf[0] & MSGCODE_FRAME_EOM) + if (msgcode & MSGCODE_FRAME_EOM) { + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); + break; + } break; case MSGCODE_TRANSMIT_SUCCEEDED: case MSGCODE_TRANSMIT_FAILED_LINE: case MSGCODE_TRANSMIT_FAILED_ACK: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); break; case MSGCODE_HIGH_ERROR: diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index a34717eba409..eaa08c7999d4 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -898,8 +898,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp) pvr2_v4l2_dev_disassociate_parent(vp->dev_video); pvr2_v4l2_dev_disassociate_parent(vp->dev_radio); if (!list_empty(&vp->dev_video->devbase.fh_list) || - !list_empty(&vp->dev_radio->devbase.fh_list)) + (vp->dev_radio && + !list_empty(&vp->dev_radio->devbase.fh_list))) { + pvr2_trace(PVR2_TRACE_STRUCT, + "pvr2_v4l2 internal_check exit-empty id=%p", vp); return; + } pvr2_v4l2_destroy_no_lock(vp); } @@ -935,7 +939,8 @@ static int pvr2_v4l2_release(struct file *file) kfree(fhp); if (vp->channel.mc_head->disconnect_flag && list_empty(&vp->dev_video->devbase.fh_list) && - list_empty(&vp->dev_radio->devbase.fh_list)) { + (!vp->dev_radio || + list_empty(&vp->dev_radio->devbase.fh_list))) { pvr2_v4l2_destroy_no_lock(vp); } return 0; diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index 5095c380b2c1..ee9c656d121f 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -56,7 +56,7 @@ int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, index, NULL, 0, 0); + value, index, NULL, 0, USB_CTRL_GET_TIMEOUT); if (ret < 0) return ret; } diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 3d9284a09ee5..b249f037900c 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -800,7 +800,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl) ret = usb_control_msg(usbtv->udev, usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, USBTV_BASE + 0x0244, (void *)data, 3, 0); + 0, USBTV_BASE + 0x0244, (void *)data, 3, + USB_CTRL_GET_TIMEOUT); if (ret < 0) goto error; } @@ -851,7 +852,7 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl) ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, index, (void *)data, size, 0); + 0, index, (void *)data, size, USB_CTRL_SET_TIMEOUT); error: if (ret < 0) diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index cdc66adda755..93d36aab824f 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file) if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; + if (usbvision->remove_pending) { + err_code = -ENODEV; + goto unlock; + } if (usbvision->user) { err_code = -EBUSY; } else { @@ -377,6 +381,7 @@ unlock: static int usbvision_v4l2_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); + int r; PDEBUG(DBG_IO, "close"); @@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file) usbvision_scratch_free(usbvision); usbvision->user--; + r = usbvision->remove_pending; mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->remove_pending) { + if (r) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); return 0; @@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv, { struct usb_usbvision *usbvision = video_drvdata(file); + if (!usbvision->dev) + return -ENODEV; + strscpy(vc->driver, "USBVision", sizeof(vc->driver)); strscpy(vc->card, usbvision_device_data[usbvision->dev_model].model_string, @@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file) if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; + + if (usbvision->remove_pending) { + err_code = -ENODEV; + goto out; + } err_code = v4l2_fh_open(file); if (err_code) goto out; @@ -1093,21 +1107,24 @@ out: static int usbvision_radio_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); + int r; PDEBUG(DBG_IO, ""); mutex_lock(&usbvision->v4l2_lock); /* Set packet size to 0 */ usbvision->iface_alt = 0; - usb_set_interface(usbvision->dev, usbvision->iface, - usbvision->iface_alt); + if (usbvision->dev) + usb_set_interface(usbvision->dev, usbvision->iface, + usbvision->iface_alt); usbvision_audio_off(usbvision); usbvision->radio = 0; usbvision->user--; + r = usbvision->remove_pending; mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->remove_pending) { + if (r) { printk(KERN_INFO "%s: Final disconnect\n", __func__); v4l2_fh_release(file); usbvision_release(usbvision); @@ -1539,6 +1556,7 @@ err_usb: static void usbvision_disconnect(struct usb_interface *intf) { struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); + int u; PDEBUG(DBG_PROBE, ""); @@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf) v4l2_device_disconnect(&usbvision->v4l2_dev); usbvision_i2c_unregister(usbvision); usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ + u = usbvision->user; usb_put_dev(usbvision->dev); usbvision->dev = NULL; /* USB device is no more */ mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->user) { + if (u) { printk(KERN_INFO "%s: In use, disconnect pending\n", __func__); wake_up_interruptible(&usbvision->wait_frame); diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 66ee168ddc7e..99883550375e 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -497,6 +497,22 @@ static int uvc_parse_format(struct uvc_device *dev, } } + /* Some devices report bpp that doesn't match the format. */ + if (dev->quirks & UVC_QUIRK_FORCE_BPP) { + const struct v4l2_format_info *info = + v4l2_format_info(format->fcc); + + if (info) { + unsigned int div = info->hdiv * info->vdiv; + + n = info->bpp[0] * div; + for (i = 1; i < info->comp_planes; i++) + n += info->bpp[i]; + + format->bpp = DIV_ROUND_UP(8 * n, div); + } + } + if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) { ftype = UVC_VS_FRAME_UNCOMPRESSED; } else { @@ -1493,6 +1509,11 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain, break; if (forward == prev) continue; + if (forward->chain.next || forward->chain.prev) { + uvc_trace(UVC_TRACE_DESCR, "Found reference to " + "entity %d already in chain.\n", forward->id); + return -EINVAL; + } switch (UVC_ENTITY_TYPE(forward)) { case UVC_VC_EXTENSION_UNIT: @@ -1574,6 +1595,13 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain, return -1; } + if (term->chain.next || term->chain.prev) { + uvc_trace(UVC_TRACE_DESCR, "Found reference to " + "entity %d already in chain.\n", + term->id); + return -EINVAL; + } + if (uvc_trace_param & UVC_TRACE_PROBE) printk(KERN_CONT " %d", term->id); @@ -2151,6 +2179,20 @@ static int uvc_probe(struct usb_interface *intf, sizeof(dev->name) - len); } + /* Initialize the media device. */ +#ifdef CONFIG_MEDIA_CONTROLLER + dev->mdev.dev = &intf->dev; + strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); + if (udev->serial) + strscpy(dev->mdev.serial, udev->serial, + sizeof(dev->mdev.serial)); + usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); + dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); + media_device_init(&dev->mdev); + + dev->vdev.mdev = &dev->mdev; +#endif + /* Parse the Video Class control descriptor. */ if (uvc_parse_control(dev) < 0) { uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC " @@ -2171,19 +2213,7 @@ static int uvc_probe(struct usb_interface *intf, "linux-uvc-devel mailing list.\n"); } - /* Initialize the media device and register the V4L2 device. */ -#ifdef CONFIG_MEDIA_CONTROLLER - dev->mdev.dev = &intf->dev; - strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); - if (udev->serial) - strscpy(dev->mdev.serial, udev->serial, - sizeof(dev->mdev.serial)); - usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); - dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - media_device_init(&dev->mdev); - - dev->vdev.mdev = &dev->mdev; -#endif + /* Register the V4L2 device. */ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) goto error; @@ -2860,6 +2890,15 @@ static const struct usb_device_id uvc_ids[] = { .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_force_y8 }, + /* GEO Semiconductor GC6500 */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x29fe, + .idProduct = 0x4d53, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) }, /* Intel RealSense D4M */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index c7c1baa90dea..24e3d8c647e7 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -198,6 +198,7 @@ #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400 #define UVC_QUIRK_FORCE_Y8 0x00000800 +#define UVC_QUIRK_FORCE_BPP 0x00001000 /* Format flags */ #define UVC_FMT_FLAG_COMPRESSED 0x00000001 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index e1eaf1135c7f..7ad6db8dd9f6 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -1183,36 +1183,38 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar u32 aux_space; int compatible_arg = 1; long err = 0; + unsigned int ncmd; /* * 1. When struct size is different, converts the command. */ switch (cmd) { - case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break; - case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break; - case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break; - case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break; - case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break; - case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break; - case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break; - case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break; - case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break; - case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break; - case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break; - case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break; - case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break; - case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break; - case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break; - case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break; - case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break; - case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break; - case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break; - case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break; - case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break; - case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break; - case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break; - case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break; - case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break; + case VIDIOC_G_FMT32: ncmd = VIDIOC_G_FMT; break; + case VIDIOC_S_FMT32: ncmd = VIDIOC_S_FMT; break; + case VIDIOC_QUERYBUF32: ncmd = VIDIOC_QUERYBUF; break; + case VIDIOC_G_FBUF32: ncmd = VIDIOC_G_FBUF; break; + case VIDIOC_S_FBUF32: ncmd = VIDIOC_S_FBUF; break; + case VIDIOC_QBUF32: ncmd = VIDIOC_QBUF; break; + case VIDIOC_DQBUF32: ncmd = VIDIOC_DQBUF; break; + case VIDIOC_ENUMSTD32: ncmd = VIDIOC_ENUMSTD; break; + case VIDIOC_ENUMINPUT32: ncmd = VIDIOC_ENUMINPUT; break; + case VIDIOC_TRY_FMT32: ncmd = VIDIOC_TRY_FMT; break; + case VIDIOC_G_EXT_CTRLS32: ncmd = VIDIOC_G_EXT_CTRLS; break; + case VIDIOC_S_EXT_CTRLS32: ncmd = VIDIOC_S_EXT_CTRLS; break; + case VIDIOC_TRY_EXT_CTRLS32: ncmd = VIDIOC_TRY_EXT_CTRLS; break; + case VIDIOC_DQEVENT32: ncmd = VIDIOC_DQEVENT; break; + case VIDIOC_OVERLAY32: ncmd = VIDIOC_OVERLAY; break; + case VIDIOC_STREAMON32: ncmd = VIDIOC_STREAMON; break; + case VIDIOC_STREAMOFF32: ncmd = VIDIOC_STREAMOFF; break; + case VIDIOC_G_INPUT32: ncmd = VIDIOC_G_INPUT; break; + case VIDIOC_S_INPUT32: ncmd = VIDIOC_S_INPUT; break; + case VIDIOC_G_OUTPUT32: ncmd = VIDIOC_G_OUTPUT; break; + case VIDIOC_S_OUTPUT32: ncmd = VIDIOC_S_OUTPUT; break; + case VIDIOC_CREATE_BUFS32: ncmd = VIDIOC_CREATE_BUFS; break; + case VIDIOC_PREPARE_BUF32: ncmd = VIDIOC_PREPARE_BUF; break; + case VIDIOC_G_EDID32: ncmd = VIDIOC_G_EDID; break; + case VIDIOC_S_EDID32: ncmd = VIDIOC_S_EDID; break; + default: ncmd = cmd; break; } /* @@ -1221,11 +1223,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar * argument into it. */ switch (cmd) { - case VIDIOC_OVERLAY: - case VIDIOC_STREAMON: - case VIDIOC_STREAMOFF: - case VIDIOC_S_INPUT: - case VIDIOC_S_OUTPUT: + case VIDIOC_OVERLAY32: + case VIDIOC_STREAMON32: + case VIDIOC_STREAMOFF32: + case VIDIOC_S_INPUT32: + case VIDIOC_S_OUTPUT32: err = alloc_userspace(sizeof(unsigned int), 0, &new_p64); if (!err && assign_in_user((unsigned int __user *)new_p64, (compat_uint_t __user *)p32)) @@ -1233,23 +1235,23 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar compatible_arg = 0; break; - case VIDIOC_G_INPUT: - case VIDIOC_G_OUTPUT: + case VIDIOC_G_INPUT32: + case VIDIOC_G_OUTPUT32: err = alloc_userspace(sizeof(unsigned int), 0, &new_p64); compatible_arg = 0; break; - case VIDIOC_G_EDID: - case VIDIOC_S_EDID: + case VIDIOC_G_EDID32: + case VIDIOC_S_EDID32: err = alloc_userspace(sizeof(struct v4l2_edid), 0, &new_p64); if (!err) err = get_v4l2_edid32(new_p64, p32); compatible_arg = 0; break; - case VIDIOC_G_FMT: - case VIDIOC_S_FMT: - case VIDIOC_TRY_FMT: + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: err = bufsize_v4l2_format(p32, &aux_space); if (!err) err = alloc_userspace(sizeof(struct v4l2_format), @@ -1262,7 +1264,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar compatible_arg = 0; break; - case VIDIOC_CREATE_BUFS: + case VIDIOC_CREATE_BUFS32: err = bufsize_v4l2_create(p32, &aux_space); if (!err) err = alloc_userspace(sizeof(struct v4l2_create_buffers), @@ -1275,10 +1277,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar compatible_arg = 0; break; - case VIDIOC_PREPARE_BUF: - case VIDIOC_QUERYBUF: - case VIDIOC_QBUF: - case VIDIOC_DQBUF: + case VIDIOC_PREPARE_BUF32: + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: err = bufsize_v4l2_buffer(p32, &aux_space); if (!err) err = alloc_userspace(sizeof(struct v4l2_buffer), @@ -1291,7 +1293,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar compatible_arg = 0; break; - case VIDIOC_S_FBUF: + case VIDIOC_S_FBUF32: err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, &new_p64); if (!err) @@ -1299,13 +1301,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar compatible_arg = 0; break; - case VIDIOC_G_FBUF: + case VIDIOC_G_FBUF32: err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, &new_p64); compatible_arg = 0; break; - case VIDIOC_ENUMSTD: + case VIDIOC_ENUMSTD32: err = alloc_userspace(sizeof(struct v4l2_standard), 0, &new_p64); if (!err) @@ -1313,16 +1315,16 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar compatible_arg = 0; break; - case VIDIOC_ENUMINPUT: + case VIDIOC_ENUMINPUT32: err = alloc_userspace(sizeof(struct v4l2_input), 0, &new_p64); if (!err) err = get_v4l2_input32(new_p64, p32); compatible_arg = 0; break; - case VIDIOC_G_EXT_CTRLS: - case VIDIOC_S_EXT_CTRLS: - case VIDIOC_TRY_EXT_CTRLS: + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: err = bufsize_v4l2_ext_controls(p32, &aux_space); if (!err) err = alloc_userspace(sizeof(struct v4l2_ext_controls), @@ -1334,7 +1336,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar } compatible_arg = 0; break; - case VIDIOC_DQEVENT: + case VIDIOC_DQEVENT32: err = alloc_userspace(sizeof(struct v4l2_event), 0, &new_p64); compatible_arg = 0; break; @@ -1352,9 +1354,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar * Otherwise, it will pass the newly allocated @new_p64 argument. */ if (compatible_arg) - err = native_ioctl(file, cmd, (unsigned long)p32); + err = native_ioctl(file, ncmd, (unsigned long)p32); else - err = native_ioctl(file, cmd, (unsigned long)new_p64); + err = native_ioctl(file, ncmd, (unsigned long)new_p64); if (err == -ENOTTY) return err; @@ -1370,13 +1372,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar * the blocks to maximum allowed value. */ switch (cmd) { - case VIDIOC_G_EXT_CTRLS: - case VIDIOC_S_EXT_CTRLS: - case VIDIOC_TRY_EXT_CTRLS: + case VIDIOC_G_EXT_CTRLS32: + case VIDIOC_S_EXT_CTRLS32: + case VIDIOC_TRY_EXT_CTRLS32: if (put_v4l2_ext_controls32(file, new_p64, p32)) err = -EFAULT; break; - case VIDIOC_S_EDID: + case VIDIOC_S_EDID32: if (put_v4l2_edid32(new_p64, p32)) err = -EFAULT; break; @@ -1389,49 +1391,49 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar * the original 32 bits structure. */ switch (cmd) { - case VIDIOC_S_INPUT: - case VIDIOC_S_OUTPUT: - case VIDIOC_G_INPUT: - case VIDIOC_G_OUTPUT: + case VIDIOC_S_INPUT32: + case VIDIOC_S_OUTPUT32: + case VIDIOC_G_INPUT32: + case VIDIOC_G_OUTPUT32: if (assign_in_user((compat_uint_t __user *)p32, ((unsigned int __user *)new_p64))) err = -EFAULT; break; - case VIDIOC_G_FBUF: + case VIDIOC_G_FBUF32: err = put_v4l2_framebuffer32(new_p64, p32); break; - case VIDIOC_DQEVENT: + case VIDIOC_DQEVENT32: err = put_v4l2_event32(new_p64, p32); break; - case VIDIOC_G_EDID: + case VIDIOC_G_EDID32: err = put_v4l2_edid32(new_p64, p32); break; - case VIDIOC_G_FMT: - case VIDIOC_S_FMT: - case VIDIOC_TRY_FMT: + case VIDIOC_G_FMT32: + case VIDIOC_S_FMT32: + case VIDIOC_TRY_FMT32: err = put_v4l2_format32(new_p64, p32); break; - case VIDIOC_CREATE_BUFS: + case VIDIOC_CREATE_BUFS32: err = put_v4l2_create32(new_p64, p32); break; - case VIDIOC_PREPARE_BUF: - case VIDIOC_QUERYBUF: - case VIDIOC_QBUF: - case VIDIOC_DQBUF: + case VIDIOC_PREPARE_BUF32: + case VIDIOC_QUERYBUF32: + case VIDIOC_QBUF32: + case VIDIOC_DQBUF32: err = put_v4l2_buffer32(new_p64, p32); break; - case VIDIOC_ENUMSTD: + case VIDIOC_ENUMSTD32: err = put_v4l2_standard32(new_p64, p32); break; - case VIDIOC_ENUMINPUT: + case VIDIOC_ENUMINPUT32: err = put_v4l2_input32(new_p64, p32); break; } diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 1d8f38824631..cd84dbbf6a89 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -3144,6 +3144,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj) struct v4l2_ctrl_handler *prev_hdl = NULL; struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL; + mutex_lock(main_hdl->lock); if (list_empty(&main_hdl->requests_queued)) goto queue; @@ -3175,18 +3176,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj) queue: list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); hdl->request_is_queued = true; + mutex_unlock(main_hdl->lock); } static void v4l2_ctrl_request_unbind(struct media_request_object *obj) { struct v4l2_ctrl_handler *hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; list_del_init(&hdl->requests); + mutex_lock(main_hdl->lock); if (hdl->request_is_queued) { list_del_init(&hdl->requests_queued); hdl->request_is_queued = false; } + mutex_unlock(main_hdl->lock); } static void v4l2_ctrl_request_release(struct media_request_object *obj) @@ -4128,9 +4133,11 @@ void v4l2_ctrl_request_complete(struct media_request *req, v4l2_ctrl_unlock(ctrl); } + mutex_lock(main_hdl->lock); WARN_ON(!hdl->request_is_queued); list_del_init(&hdl->requests_queued); hdl->request_is_queued = false; + mutex_unlock(main_hdl->lock); media_request_object_complete(obj); media_request_object_put(obj); } diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c index 63d6b147b21e..41da73ce2e98 100644 --- a/drivers/media/v4l2-core/v4l2-device.c +++ b/drivers/media/v4l2-core/v4l2-device.c @@ -179,6 +179,7 @@ static void v4l2_subdev_release(struct v4l2_subdev *sd) if (sd->internal_ops && sd->internal_ops->release) sd->internal_ops->release(sd); + sd->devnode = NULL; module_put(owner); } diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 51b912743f0f..58868d7129eb 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1466,10 +1466,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, return ret; } +static void v4l_pix_format_touch(struct v4l2_pix_format *p) +{ + /* + * The v4l2_pix_format structure contains fields that make no sense for + * touch. Set them to default values in this case. + */ + + p->field = V4L2_FIELD_NONE; + p->colorspace = V4L2_COLORSPACE_RAW; + p->flags = 0; + p->ycbcr_enc = 0; + p->quantization = 0; + p->xfer_func = 0; +} + static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); int ret = check_fmt(file, p->type); if (ret) @@ -1507,6 +1523,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); @@ -1544,21 +1562,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, return -EINVAL; } -static void v4l_pix_format_touch(struct v4l2_pix_format *p) -{ - /* - * The v4l2_pix_format structure contains fields that make no sense for - * touch. Set them to default values in this case. - */ - - p->field = V4L2_FIELD_NONE; - p->colorspace = V4L2_COLORSPACE_RAW; - p->flags = 0; - p->ycbcr_enc = 0; - p->quantization = 0; - p->xfer_func = 0; -} - static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { @@ -1602,12 +1605,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, case V4L2_BUF_TYPE_VBI_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) break; - CLEAR_AFTER_FIELD(p, fmt.vbi); + CLEAR_AFTER_FIELD(p, fmt.vbi.flags); return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) break; - CLEAR_AFTER_FIELD(p, fmt.sliced); + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_vid_out)) @@ -1633,22 +1636,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, case V4L2_BUF_TYPE_VBI_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_vbi_out)) break; - CLEAR_AFTER_FIELD(p, fmt.vbi); + CLEAR_AFTER_FIELD(p, fmt.vbi.flags); return ops->vidioc_s_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) break; - CLEAR_AFTER_FIELD(p, fmt.sliced); + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) break; - CLEAR_AFTER_FIELD(p, fmt.sdr); + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_sdr_out)) break; - CLEAR_AFTER_FIELD(p, fmt.sdr); + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); return ops->vidioc_s_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_meta_cap)) @@ -1704,12 +1707,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, case V4L2_BUF_TYPE_VBI_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) break; - CLEAR_AFTER_FIELD(p, fmt.vbi); + CLEAR_AFTER_FIELD(p, fmt.vbi.flags); return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) break; - CLEAR_AFTER_FIELD(p, fmt.sliced); + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_vid_out)) @@ -1735,22 +1738,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, case V4L2_BUF_TYPE_VBI_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_vbi_out)) break; - CLEAR_AFTER_FIELD(p, fmt.vbi); + CLEAR_AFTER_FIELD(p, fmt.vbi.flags); return ops->vidioc_try_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) break; - CLEAR_AFTER_FIELD(p, fmt.sliced); + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) break; - CLEAR_AFTER_FIELD(p, fmt.sdr); + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_sdr_out)) break; - CLEAR_AFTER_FIELD(p, fmt.sdr); + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); return ops->vidioc_try_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_meta_cap)) diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 19937dd3c6f6..3d6a6306cec7 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -809,12 +809,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, goto err_rel_entity1; /* Connect the three entities */ - ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1, + ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rel_entity2; - ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0, + ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rm_links0; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 66a6c6c236a7..28262190c3ab 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -349,8 +349,11 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma) BUG_ON(dma->sglen); if (dma->pages) { - for (i = 0; i < dma->nr_pages; i++) + for (i = 0; i < dma->nr_pages; i++) { + if (dma->direction == DMA_FROM_DEVICE) + set_page_dirty_lock(dma->pages[i]); put_page(dma->pages[i]); + } kfree(dma->pages); dma->pages = NULL; } diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 439d7d886873..a113e811faab 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev) static const struct dev_pm_ops smi_larb_pm_ops = { SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL) + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver mtk_smi_larb_driver = { @@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev) static const struct dev_pm_ops smi_common_pm_ops = { SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL) + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver mtk_smi_common_driver = { diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index f9ac22413000..1074b882c57c 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -100,19 +100,19 @@ struct buflist { * Function prototypes. Called from OS entry point mptctl_ioctl. * arg contents specific to function. */ -static int mptctl_fw_download(unsigned long arg); -static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); -static int mptctl_gettargetinfo(unsigned long arg); -static int mptctl_readtest(unsigned long arg); -static int mptctl_mpt_command(unsigned long arg); -static int mptctl_eventquery(unsigned long arg); -static int mptctl_eventenable(unsigned long arg); -static int mptctl_eventreport(unsigned long arg); -static int mptctl_replace_fw(unsigned long arg); +static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg); -static int mptctl_do_reset(unsigned long arg); -static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); -static int mptctl_hp_targetinfo(unsigned long arg); +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg); static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); static void mptctl_remove(struct pci_dev *); @@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); /* * Private function calls. */ -static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); -static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); +static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr); +static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen); static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, @@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { - return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); + return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { - return mptctl_gettargetinfo(arg); + return mptctl_gettargetinfo(iocp, arg); } else if (cmd == MPTTEST) { - return mptctl_readtest(arg); + return mptctl_readtest(iocp, arg); } else if (cmd == MPTEVENTQUERY) { - return mptctl_eventquery(arg); + return mptctl_eventquery(iocp, arg); } else if (cmd == MPTEVENTENABLE) { - return mptctl_eventenable(arg); + return mptctl_eventenable(iocp, arg); } else if (cmd == MPTEVENTREPORT) { - return mptctl_eventreport(arg); + return mptctl_eventreport(iocp, arg); } else if (cmd == MPTFWREPLACE) { - return mptctl_replace_fw(arg); + return mptctl_replace_fw(iocp, arg); } /* All of these commands require an interrupt or @@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; if (cmd == MPTFWDOWNLOAD) - ret = mptctl_fw_download(arg); + ret = mptctl_fw_download(iocp, arg); else if (cmd == MPTCOMMAND) - ret = mptctl_mpt_command(arg); + ret = mptctl_mpt_command(iocp, arg); else if (cmd == MPTHARDRESET) - ret = mptctl_do_reset(arg); + ret = mptctl_do_reset(iocp, arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) - ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); + ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) - ret = mptctl_hp_targetinfo(arg); + ret = mptctl_hp_targetinfo(iocp, arg); else ret = -EINVAL; @@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } -static int mptctl_do_reset(unsigned long arg) +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; - MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " @@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg) return -EFAULT; } - if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", - __FILE__, __LINE__, krinfo.hdr.iocnum); - return -ENODEV; /* (-6) No such device or address */ - } - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); @@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_fw_download(unsigned long arg) +mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; @@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg) return -EFAULT; } - return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); + return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) +mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; - MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; @@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; - if (mpt_verify_adapter(ioc, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", - ioc); - return -ENODEV; /* (-6) No such device or address */ - } else { - - /* Valid device. Get a message frame and construct the FW download message. - */ - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) - return -EAGAIN; - } + /* Valid device. Get a message frame and construct the FW download message. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) + return -EAGAIN; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); @@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", - iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; @@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE * -ENODEV if no such device/adapter */ static int -mptctl_getiocinfo (unsigned long arg, unsigned int data_size) +mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; - int iocnum; unsigned int port; int cim_rev; struct scsi_device *sdev; @@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) return PTR_ERR(karg); } - if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - kfree(karg); - return -ENODEV; - } - /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " @@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) * -ENODEV if no such device/adapter */ static int -mptctl_gettargetinfo (unsigned long arg) +mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; - MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; - int iocnum; int numDevices = 0; int lun; int maxWordsLeft; @@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes @@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_readtest (unsigned long arg) +mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " @@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling @@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_eventquery (unsigned long arg) +mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " @@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; @@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventenable (unsigned long arg) +mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " @@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { @@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventreport (unsigned long arg) +mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; - MPT_ADAPTER *ioc; - int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { @@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); @@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_replace_fw (unsigned long arg) +mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; struct mpt_ioctl_replace_fw karg; - MPT_ADAPTER *ioc; - int iocnum; int newFwSize; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { @@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", ioc->name)); /* If caching FW, Free the old FW image @@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg) * -ENOMEM if memory allocation error */ static int -mptctl_mpt_command (unsigned long arg) +mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; - MPT_ADAPTER *ioc; - int iocnum; int rc; @@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - - rc = mptctl_do_mpt_command (karg, &uarg->MF); + rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF); return rc; } @@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg) * -EPERM if SCSI I/O and target is untagged */ static int -mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) +mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr) { - MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; @@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ - int iocnum, flagsLength; + int flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; @@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); @@ -2418,17 +2321,15 @@ done_free_mem: * -ENOMEM if memory allocation error */ static int -mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) +mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; - int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; @@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); @@ -2659,15 +2554,13 @@ retry_wait: * -ENOMEM if memory allocation error */ static int -mptctl_hp_targetinfo(unsigned long arg) +mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; - MPT_ADAPTER *ioc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; - int iocnum; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; @@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } if (karg.hdr.id >= MPT_MAX_FC_DEVICES) return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", @@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); - ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); @@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, /* Pass new structure to do_mpt_command */ - ret = mptctl_do_mpt_command (karg, &uarg->MF); + ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index ae24d3ea68ea..43169f25da1f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -758,6 +758,7 @@ config MFD_MAX77650 depends on OF || COMPILE_TEST select MFD_CORE select REGMAP_I2C + select REGMAP_IRQ help Say Y here to add support for Maxim Semiconductor MAX77650 and MAX77651 Power Management ICs. This is the core multifunction diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index a4aaadaa0cb0..aa59496e4376 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -126,7 +126,7 @@ static const struct regmap_range axp288_writeable_ranges[] = { static const struct regmap_range axp288_volatile_ranges[] = { regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON), regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL), - regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT), + regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT), regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL), regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L), regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL), diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c index e69626867c26..9143de7b77b8 100644 --- a/drivers/mfd/da9062-core.c +++ b/drivers/mfd/da9062-core.c @@ -248,7 +248,7 @@ static const struct mfd_cell da9062_devs[] = { .name = "da9062-watchdog", .num_resources = ARRAY_SIZE(da9062_wdt_resources), .resources = da9062_wdt_resources, - .of_compatible = "dlg,da9062-wdt", + .of_compatible = "dlg,da9062-watchdog", }, { .name = "da9062-thermal", diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 381593fbe50f..7841c11411d0 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -722,6 +722,8 @@ static int dln2_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { struct usb_host_interface *hostif = interface->cur_altsetting; + struct usb_endpoint_descriptor *epin; + struct usb_endpoint_descriptor *epout; struct device *dev = &interface->dev; struct dln2_dev *dln2; int ret; @@ -731,12 +733,19 @@ static int dln2_probe(struct usb_interface *interface, hostif->desc.bNumEndpoints < 2) return -ENODEV; + epin = &hostif->endpoint[0].desc; + epout = &hostif->endpoint[1].desc; + if (!usb_endpoint_is_bulk_out(epout)) + return -ENODEV; + if (!usb_endpoint_is_bulk_in(epin)) + return -ENODEV; + dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL); if (!dln2) return -ENOMEM; - dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress; - dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress; + dln2->ep_out = epout->bEndpointAddress; + dln2->ep_in = epin->bEndpointAddress; dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dln2->interface = interface; usb_set_intfdata(interface, dln2); diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 9355db29d2f9..b33030e3385c 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -122,13 +122,25 @@ static const struct intel_lpss_platform_info apl_i2c_info = { .properties = apl_i2c_properties, }; +static struct property_entry glk_i2c_properties[] = { + PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 313), + PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171), + PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 290), + { }, +}; + +static const struct intel_lpss_platform_info glk_i2c_info = { + .clk_rate = 133000000, + .properties = glk_i2c_properties, +}; + static const struct intel_lpss_platform_info cnl_i2c_info = { .clk_rate = 216000000, .properties = spt_i2c_properties, }; static const struct pci_device_id intel_lpss_pci_ids[] = { - /* CML */ + /* CML-LP */ { PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info }, @@ -141,6 +153,17 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info }, { PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info }, { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info }, + /* CML-H */ + { PCI_VDEVICE(INTEL, 0x06a8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x06a9), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0x06c7), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x06e8), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x06e9), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x06ea), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x06eb), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&spt_info }, /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info }, @@ -174,14 +197,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ - { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&glk_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&glk_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&glk_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&glk_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&glk_i2c_info }, + { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&glk_i2c_info }, { PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info }, diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c index da5cd9c92a59..ead2e79036a9 100644 --- a/drivers/mfd/rn5t618.c +++ b/drivers/mfd/rn5t618.c @@ -26,6 +26,7 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg) case RN5T618_WATCHDOGCNT: case RN5T618_DCIRQ: case RN5T618_ILIMDATAH ... RN5T618_AIN0DATAL: + case RN5T618_ADCCNT3: case RN5T618_IR_ADC1 ... RN5T618_IR_ADC3: case RN5T618_IR_GPR: case RN5T618_IR_GPF: diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index 25e5f24b3fec..5bdf57472314 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -2112,8 +2112,8 @@ exit_done: return status; } -static int altera_get_note(u8 *p, s32 program_size, - s32 *offset, char *key, char *value, int length) +static int altera_get_note(u8 *p, s32 program_size, s32 *offset, + char *key, char *value, int keylen, int vallen) /* * Gets key and value of NOTE fields in the JBC file. * Can be called in two modes: if offset pointer is NULL, @@ -2170,7 +2170,7 @@ static int altera_get_note(u8 *p, s32 program_size, &p[note_table + (8 * i) + 4])]; if (value != NULL) - strlcpy(value, value_ptr, length); + strlcpy(value, value_ptr, vallen); } } @@ -2189,13 +2189,13 @@ static int altera_get_note(u8 *p, s32 program_size, strlcpy(key, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])], - length); + keylen); if (value != NULL) strlcpy(value, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i) + 4])], - length); + vallen); *offset = i + 1; } @@ -2449,7 +2449,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw) __func__, (format_version == 2) ? "Jam STAPL" : "pre-standardized Jam 1.1"); while (altera_get_note((u8 *)fw->data, fw->size, - &offset, key, value, 256) == 0) + &offset, key, value, 32, 256) == 0) printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n", __func__, key, value); } diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 4feed296a327..3a9467aaa435 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -394,7 +394,8 @@ static const struct pcr_ops rts522a_pcr_ops = { void rts522a_init_params(struct rtsx_pcr *pcr) { rts5227_init_params(pcr); - + pcr->ops = &rts522a_pcr_ops; + pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11); pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3; pcr->option.ocp_en = 1; diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index db936e4d6e56..1a81cda948c1 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -618,6 +618,7 @@ static const struct pcr_ops rts524a_pcr_ops = { void rts524a_init_params(struct rtsx_pcr *pcr) { rts5249_init_params(pcr); + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11); pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF; pcr->option.ltr_l1off_snooze_sspwrgate = LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF; @@ -733,6 +734,7 @@ static const struct pcr_ops rts525a_pcr_ops = { void rts525a_init_params(struct rtsx_pcr *pcr) { rts5249_init_params(pcr); + pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11); pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF; pcr->option.ltr_l1off_snooze_sspwrgate = LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF; diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 40a6d199f2ea..c9327bf92d16 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -663,7 +663,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr) pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; pcr->aspm_en = ASPM_L1_EN; - pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16); + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11); pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); pcr->ic_version = rts5260_get_ic_version(pcr); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 6d27ccfe0680..3c2d405bc79b 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) cdev = &edev->component[i]; if (cdev->dev == dev) { enclosure_remove_links(cdev); - device_del(&cdev->cdev); put_device(dev); cdev->dev = NULL; - return device_add(&cdev->cdev); + return 0; } } return -ENODEV; diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 1b1a794d639d..842f2210dc7e 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1430,8 +1430,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) return -ENOMEM; data->miscdev.minor = MISC_DYNAMIC_MINOR; - data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s", - domains[domain_id]); + data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s", + domains[domain_id]); data->miscdev.fops = &fastrpc_fops; err = misc_register(&data->miscdev); if (err) diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index a9ac045dcfde..447f307ef4d6 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -777,8 +777,9 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) memset(args, 0, sizeof(*args)); if (rc < 0) { - dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n", - rc, seq); + dev_err_ratelimited(hdev->dev, + "Error %ld on waiting for CS handle %llu\n", + rc, seq); if (rc == -ERESTARTSYS) { args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED; rc = -EINTR; diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c index 17db7b3dfb4c..2df6fb87e7ff 100644 --- a/drivers/misc/habanalabs/context.c +++ b/drivers/misc/habanalabs/context.c @@ -176,7 +176,7 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq) spin_lock(&ctx->cs_lock); if (seq >= ctx->cs_sequence) { - dev_notice(hdev->dev, + dev_notice_ratelimited(hdev->dev, "Can't wait on seq %llu because current CS is at seq %llu\n", seq, ctx->cs_sequence); spin_unlock(&ctx->cs_lock); diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 459fee70a597..a7a4fed4d899 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -600,7 +600,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) goto out; } - hdev->asic_funcs->halt_coresight(hdev); + if (!hdev->hard_reset_pending) + hdev->asic_funcs->halt_coresight(hdev); + hdev->in_debug = 0; goto out; @@ -1185,6 +1187,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { dev_info(hdev->dev, "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->halt_engines(hdev, true); hdev->asic_funcs->hw_fini(hdev, true); } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 6fba14b81f90..13b39fd97429 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -869,6 +869,11 @@ void goya_init_dma_qmans(struct hl_device *hdev) */ static void goya_disable_external_queues(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return; + WREG32(mmDMA_QM_0_GLBL_CFG0, 0); WREG32(mmDMA_QM_1_GLBL_CFG0, 0); WREG32(mmDMA_QM_2_GLBL_CFG0, 0); @@ -930,6 +935,11 @@ static int goya_stop_external_queues(struct hl_device *hdev) { int rc, retval = 0; + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return retval; + rc = goya_stop_queue(hdev, mmDMA_QM_0_GLBL_CFG1, mmDMA_QM_0_CP_STS, @@ -1719,9 +1729,18 @@ void goya_init_tpc_qmans(struct hl_device *hdev) */ static void goya_disable_internal_queues(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + goto disable_tpc; + WREG32(mmMME_QM_GLBL_CFG0, 0); WREG32(mmMME_CMDQ_GLBL_CFG0, 0); +disable_tpc: + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return; + WREG32(mmTPC0_QM_GLBL_CFG0, 0); WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0); @@ -1757,8 +1776,12 @@ static void goya_disable_internal_queues(struct hl_device *hdev) */ static int goya_stop_internal_queues(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; int rc, retval = 0; + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + goto stop_tpc; + /* * Each queue (QMAN) is a separate H/W logic. That means that each * QMAN can be stopped independently and failure to stop one does NOT @@ -1785,6 +1808,10 @@ static int goya_stop_internal_queues(struct hl_device *hdev) retval = -EIO; } +stop_tpc: + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return retval; + rc = goya_stop_queue(hdev, mmTPC0_QM_GLBL_CFG1, mmTPC0_QM_CP_STS, @@ -1950,6 +1977,11 @@ static int goya_stop_internal_queues(struct hl_device *hdev) static void goya_dma_stall(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return; + WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT); WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT); @@ -1959,6 +1991,11 @@ static void goya_dma_stall(struct hl_device *hdev) static void goya_tpc_stall(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return; + WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT); WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT); @@ -1971,6 +2008,11 @@ static void goya_tpc_stall(struct hl_device *hdev) static void goya_mme_stall(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + return; + WREG32(mmMME_STALL, 0xFFFFFFFF); } @@ -2171,7 +2213,7 @@ static int goya_push_linux_to_device(struct hl_device *hdev) static int goya_pldm_init_cpu(struct hl_device *hdev) { - u32 val, unit_rst_val; + u32 unit_rst_val; int rc; /* Must initialize SRAM scrambler before pushing u-boot to SRAM */ @@ -2179,14 +2221,14 @@ static int goya_pldm_init_cpu(struct hl_device *hdev) /* Put ARM cores into reset */ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); /* Reset the CA53 MACRO */ unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); rc = goya_push_uboot_to_device(hdev); if (rc) @@ -2207,7 +2249,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev) /* Release ARM core 0 from reset */ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_CORE0_DEASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); return 0; } @@ -2475,13 +2517,12 @@ err: static int goya_hw_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; - u32 val; int rc; dev_info(hdev->dev, "Starting initialization of H/W\n"); /* Perform read from the device to make sure device is up */ - val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); /* * Let's mark in the H/W that we have reached this point. We check @@ -2533,7 +2574,7 @@ static int goya_hw_init(struct hl_device *hdev) goto disable_queues; /* Perform read from the device to flush all MSI-X configuration */ - val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); return 0; @@ -4625,8 +4666,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, rc = goya_send_job_on_qman0(hdev, job); - hl_cb_put(job->patched_cb); - hl_debugfs_remove_job(hdev, job); kfree(job); cb->cs_cnt--; diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index 365fb0cb8dff..22566b75ca50 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -965,17 +965,19 @@ init_page_pack_err: * * @ctx : current context * @vaddr : device virtual address to unmap + * @ctx_free : true if in context free flow, false otherwise. * * This function does the following: * - Unmap the physical pages related to the given virtual address * - return the device virtual block to the virtual block list */ -static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) +static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) { struct hl_device *hdev = ctx->hdev; struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; struct hl_vm_hash_node *hnode = NULL; struct hl_userptr *userptr = NULL; + struct hl_va_range *va_range; enum vm_type_t *vm_type; u64 next_vaddr, i; u32 page_size; @@ -1003,6 +1005,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) if (*vm_type == VM_TYPE_USERPTR) { is_userptr = true; + va_range = &ctx->host_va_range; userptr = hnode->ptr; rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack); @@ -1014,6 +1017,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) } } else if (*vm_type == VM_TYPE_PHYS_PACK) { is_userptr = false; + va_range = &ctx->dram_va_range; phys_pg_pack = hnode->ptr; } else { dev_warn(hdev->dev, @@ -1052,12 +1056,18 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) mutex_unlock(&ctx->mmu_lock); - if (add_va_block(hdev, - is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, - vaddr, - vaddr + phys_pg_pack->total_size - 1)) - dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n", - vaddr); + /* + * No point in maintaining the free VA block list if the context is + * closing as the list will be freed anyway + */ + if (!ctx_free) { + rc = add_va_block(hdev, va_range, vaddr, + vaddr + phys_pg_pack->total_size - 1); + if (rc) + dev_warn(hdev->dev, + "add va block failed for vaddr: 0x%llx\n", + vaddr); + } atomic_dec(&phys_pg_pack->mapping_cnt); kfree(hnode); @@ -1189,8 +1199,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) break; case HL_MEM_OP_UNMAP: - rc = unmap_device_va(ctx, - args->in.unmap.device_virt_addr); + rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr, + false); break; default: @@ -1620,7 +1630,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) dev_dbg(hdev->dev, "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", hnode->vaddr, ctx->asid); - unmap_device_va(ctx, hnode->vaddr); + unmap_device_va(ctx, hnode->vaddr, true); } spin_lock(&vm->idr_lock); diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 7284a22b1a09..4d5a512769e9 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -274,7 +274,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void) void lkdtm_UNSET_SMEP(void) { -#ifdef CONFIG_X86_64 +#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) #define MOV_CR4_DEPTH 64 void (*direct_write_cr4)(unsigned long val); unsigned char *insn; diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 985bd4fd3328..53bb394ccba6 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -873,15 +873,16 @@ static const struct device_type mei_cl_device_type = { /** * mei_cl_bus_set_name - set device name for me client device + * - + * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb * * @cldev: me client device */ static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) { - dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X", - cldev->name, - mei_me_cl_uuid(cldev->me_cl), - mei_me_cl_ver(cldev->me_cl)); + dev_set_name(&cldev->dev, "%s-%pUl", + dev_name(cldev->bus->dev), + mei_me_cl_uuid(cldev->me_cl)); } /** diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index c681f6fab342..a9793ea6933b 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -758,11 +758,38 @@ static const struct component_master_ops mei_component_master_ops = { .unbind = mei_component_master_unbind, }; +/** + * mei_hdcp_component_match - compare function for matching mei hdcp. + * + * The function checks if the driver is i915, the subcomponent is HDCP + * and the grand parent of hdcp and the parent of i915 are the same + * PCH device. + * + * @dev: master device + * @subcomponent: subcomponent to match (I915_COMPONENT_HDCP) + * @data: compare data (mei hdcp device) + * + * Return: + * * 1 - if components match + * * 0 - otherwise + */ static int mei_hdcp_component_match(struct device *dev, int subcomponent, void *data) { - return !strcmp(dev->driver->name, "i915") && - subcomponent == I915_COMPONENT_HDCP; + struct device *base = data; + + if (strcmp(dev->driver->name, "i915") || + subcomponent != I915_COMPONENT_HDCP) + return 0; + + base = base->parent; + if (!base) + return 0; + + base = base->parent; + dev = dev->parent; + + return (base && dev && dev == base); } static int mei_hdcp_probe(struct mei_cl_device *cldev, @@ -786,7 +813,7 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev, master_match = NULL; component_match_add_typed(&cldev->dev, &master_match, - mei_hdcp_component_match, comp_master); + mei_hdcp_component_match, &cldev->dev); if (IS_ERR_OR_NULL(master_match)) { ret = -ENOMEM; goto err_exit; diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index c09f8bb49495..e56dc4754064 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -82,6 +82,13 @@ #define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */ #define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */ +#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */ + +#define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */ +#define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */ + +#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */ + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ #define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 3dca63eddaa0..75ab2ffbf235 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -98,6 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, @@ -106,6 +109,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c index 994563a078eb..de8a66b9d76b 100644 --- a/drivers/misc/ocxl/context.c +++ b/drivers/misc/ocxl/context.c @@ -10,18 +10,17 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, int pasid; struct ocxl_context *ctx; - *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL); - if (!*context) + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) return -ENOMEM; - ctx = *context; - ctx->afu = afu; mutex_lock(&afu->contexts_lock); pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, afu->pasid_base + afu->pasid_max, GFP_KERNEL); if (pasid < 0) { mutex_unlock(&afu->contexts_lock); + kfree(ctx); return pasid; } afu->pasid_count++; @@ -43,6 +42,7 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, * duration of the life of the context */ ocxl_afu_get(afu); + *context = ctx; return 0; } EXPORT_SYMBOL_GPL(ocxl_context_alloc); diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 2870c25da166..4d1b44de1492 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -18,18 +18,15 @@ static struct class *ocxl_class; static struct mutex minors_idr_lock; static struct idr minors_idr; -static struct ocxl_file_info *find_file_info(dev_t devno) +static struct ocxl_file_info *find_and_get_file_info(dev_t devno) { struct ocxl_file_info *info; - /* - * We don't declare an RCU critical section here, as our AFU - * is protected by a reference counter on the device. By the time the - * info reference is removed from the idr, the ref count of - * the device is already at 0, so no user API will access that AFU and - * this function can't return it. - */ + mutex_lock(&minors_idr_lock); info = idr_find(&minors_idr, MINOR(devno)); + if (info) + get_device(&info->dev); + mutex_unlock(&minors_idr_lock); return info; } @@ -58,14 +55,16 @@ static int afu_open(struct inode *inode, struct file *file) pr_debug("%s for device %x\n", __func__, inode->i_rdev); - info = find_file_info(inode->i_rdev); + info = find_and_get_file_info(inode->i_rdev); if (!info) return -ENODEV; rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping); - if (rc) + if (rc) { + put_device(&info->dev); return rc; - + } + put_device(&info->dev); file->private_data = ctx; return 0; } @@ -487,7 +486,6 @@ static void info_release(struct device *dev) { struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev); - free_minor(info); ocxl_afu_put(info->afu); kfree(info); } @@ -577,6 +575,7 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu) ocxl_file_make_invisible(info); ocxl_sysfs_unregister_afu(info); + free_minor(info); device_unregister(&info->dev); } diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 6e208a060a58..1154f0435b0a 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -98,6 +98,7 @@ struct pci_endpoint_test { struct completion irq_raised; int last_irq; int num_irqs; + int irq_type; /* mutex to protect the ioctls */ struct mutex mutex; struct miscdevice miscdev; @@ -157,6 +158,7 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) struct pci_dev *pdev = test->pdev; pci_free_irq_vectors(pdev); + test->irq_type = IRQ_TYPE_UNDEFINED; } static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, @@ -191,6 +193,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, irq = 0; res = false; } + + test->irq_type = type; test->num_irqs = irq; return res; @@ -330,6 +334,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) dma_addr_t orig_dst_phys_addr; size_t offset; size_t alignment = test->alignment; + int irq_type = test->irq_type; u32 src_crc32; u32 dst_crc32; @@ -426,6 +431,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) dma_addr_t orig_phys_addr; size_t offset; size_t alignment = test->alignment; + int irq_type = test->irq_type; u32 crc32; if (size > SIZE_MAX - alignment) @@ -494,6 +500,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size) dma_addr_t orig_phys_addr; size_t offset; size_t alignment = test->alignment; + int irq_type = test->irq_type; u32 crc32; if (size > SIZE_MAX - alignment) @@ -555,7 +562,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, return false; } - if (irq_type == req_irq_type) + if (test->irq_type == req_irq_type) return true; pci_endpoint_test_release_irq(test); @@ -567,12 +574,10 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, if (!pci_endpoint_test_request_irq(test)) goto err; - irq_type = req_irq_type; return true; err: pci_endpoint_test_free_irq_vectors(test); - irq_type = IRQ_TYPE_UNDEFINED; return false; } @@ -633,7 +638,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, { int err; int id; - char name[20]; + char name[24]; enum pci_barno bar; void __iomem *base; struct device *dev = &pdev->dev; @@ -652,6 +657,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, test->test_reg_bar = 0; test->alignment = 0; test->pdev = pdev; + test->irq_type = IRQ_TYPE_UNDEFINED; if (no_msi) irq_type = IRQ_TYPE_LEGACY; diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 11835969e982..48ba7e02bed7 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -1025,25 +1025,25 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd, } #endif -static unsigned int xsdfec_poll(struct file *file, poll_table *wait) +static __poll_t xsdfec_poll(struct file *file, poll_table *wait) { - unsigned int mask = 0; + __poll_t mask = 0; struct xsdfec_dev *xsdfec; xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev); if (!xsdfec) - return POLLNVAL | POLLHUP; + return EPOLLNVAL | EPOLLHUP; poll_wait(file, &xsdfec->waitq, wait); /* XSDFEC ISR detected an error */ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); if (xsdfec->state_updated) - mask |= POLLIN | POLLPRI; + mask |= EPOLLIN | EPOLLPRI; if (xsdfec->stats_updated) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); return mask; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 2c71a434c915..95b41c0891d0 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -408,38 +408,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, - u32 retries_max) -{ - int err; - u32 retry_count = 0; - - if (!status || !retries_max) - return -EINVAL; - - do { - err = __mmc_send_status(card, status, 5); - if (err) - break; - - if (!R1_STATUS(*status) && - (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) - break; /* RPMB programming operation complete */ - - /* - * Rechedule to give the MMC device a chance to continue - * processing the previous command without being polled too - * frequently. - */ - usleep_range(1000, 5000); - } while (++retry_count < retries_max); - - if (retry_count == retries_max) - err = -EPERM; - - return err; -} - static int ioctl_do_sanitize(struct mmc_card *card) { int err; @@ -468,6 +436,58 @@ out: return err; } +static inline bool mmc_blk_in_tran_state(u32 status) +{ + /* + * Some cards mishandle the status bits, so make sure to check both the + * busy indication and the card state. + */ + return status & R1_READY_FOR_DATA && + (R1_CURRENT_STATE(status) == R1_STATE_TRAN); +} + +static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, + u32 *resp_errs) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + int err = 0; + u32 status; + + do { + bool done = time_after(jiffies, timeout); + + err = __mmc_send_status(card, &status, 5); + if (err) { + dev_err(mmc_dev(card->host), + "error %d requesting status\n", err); + return err; + } + + /* Accumulate any response error bits seen */ + if (resp_errs) + *resp_errs |= status; + + /* + * Timeout if the device never becomes ready for data and never + * leaves the program state. + */ + if (done) { + dev_err(mmc_dev(card->host), + "Card stuck in wrong state! %s status: %#x\n", + __func__, status); + return -ETIMEDOUT; + } + + /* + * Some cards mishandle the status bits, + * so make sure to check both the busy + * indication and the card state. + */ + } while (!mmc_blk_in_tran_state(status)); + + return err; +} + static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct mmc_blk_ioc_data *idata) { @@ -477,7 +497,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct scatterlist sg; int err; unsigned int target_part; - u32 status = 0; if (!card || !md || !idata) return -EINVAL; @@ -611,16 +630,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (idata->rpmb) { + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) { /* - * Ensure RPMB command has completed by polling CMD13 + * Ensure RPMB/R1B command has completed by polling CMD13 * "Send Status". */ - err = ioctl_rpmb_card_status_poll(card, &status, 5); - if (err) - dev_err(mmc_dev(card->host), - "%s: Card Status=0x%08X, error %d\n", - __func__, status, err); + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL); } return err; @@ -970,58 +985,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, return ms; } -static inline bool mmc_blk_in_tran_state(u32 status) -{ - /* - * Some cards mishandle the status bits, so make sure to check both the - * busy indication and the card state. - */ - return status & R1_READY_FOR_DATA && - (R1_CURRENT_STATE(status) == R1_STATE_TRAN); -} - -static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, - struct request *req, u32 *resp_errs) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - int err = 0; - u32 status; - - do { - bool done = time_after(jiffies, timeout); - - err = __mmc_send_status(card, &status, 5); - if (err) { - pr_err("%s: error %d requesting status\n", - req->rq_disk->disk_name, err); - return err; - } - - /* Accumulate any response error bits seen */ - if (resp_errs) - *resp_errs |= status; - - /* - * Timeout if the device never becomes ready for data and never - * leaves the program state. - */ - if (done) { - pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n", - mmc_hostname(card->host), - req->rq_disk->disk_name, __func__, status); - return -ETIMEDOUT; - } - - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!mmc_blk_in_tran_state(status)); - - return err; -} - static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { @@ -1671,7 +1634,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) mmc_blk_send_stop(card, timeout); - err = card_busy_detect(card, timeout, req, NULL); + err = card_busy_detect(card, timeout, NULL); mmc_retune_release(card->host); @@ -1895,7 +1858,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) return 0; - err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status); + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status); /* * Do not assume data transferred correctly if there are any error bits diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 221127324709..26644b7ec13e 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1469,8 +1469,7 @@ void mmc_detach_bus(struct mmc_host *host) mmc_bus_put(host); } -static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, - bool cd_irq) +void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq) { /* * If the device is configured as wakeup, we prevent a new sleep for @@ -1733,8 +1732,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, * the erase operation does not exceed the max_busy_timeout, we should * use R1B response. Or we need to prevent the host from doing hw busy * detection, which is done by converting to a R1 response instead. + * Note, some hosts requires R1B, which also means they are on their own + * when it comes to deal with the busy timeout. */ - if (card->host->max_busy_timeout && + if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) && + card->host->max_busy_timeout && busy_timeout > card->host->max_busy_timeout) { cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; } else { @@ -2129,7 +2131,7 @@ int mmc_hw_reset(struct mmc_host *host) ret = host->bus_ops->hw_reset(host); mmc_bus_put(host); - if (ret) + if (ret < 0) pr_warn("%s: tried to HW reset card, got error %d\n", mmc_hostname(host), ret); @@ -2297,11 +2299,8 @@ void mmc_rescan(struct work_struct *work) mmc_bus_get(host); - /* - * if there is a _removable_ card registered, check whether it is - * still present - */ - if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host)) + /* Verify a registered card to be functional, else remove it. */ + if (host->bus_ops && !host->bus_dead) host->bus_ops->detect(host); host->detect_change = 0; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 328c78dbee66..575ac0257af2 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -70,6 +70,8 @@ void mmc_rescan(struct work_struct *work); void mmc_start_host(struct mmc_host *host); void mmc_stop_host(struct mmc_host *host); +void _mmc_detect_change(struct mmc_host *host, unsigned long delay, + bool cd_irq); int _mmc_detect_card_removed(struct mmc_host *host); int mmc_detect_card_removed(struct mmc_host *host); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 105b7a7c0251..b3484def0a8b 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -176,7 +176,6 @@ int mmc_of_parse(struct mmc_host *host) u32 bus_width, drv_type, cd_debounce_delay_ms; int ret; bool cd_cap_invert, cd_gpio_invert = false; - bool ro_cap_invert, ro_gpio_invert = false; if (!dev || !dev_fwnode(dev)) return 0; @@ -255,9 +254,11 @@ int mmc_of_parse(struct mmc_host *host) } /* Parse Write Protection */ - ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); + if (device_property_read_bool(dev, "wp-inverted")) + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(host, "wp", 0, 0, NULL); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) @@ -266,10 +267,6 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "disable-wp")) host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; - /* See the comment on CD inversion above */ - if (ro_cap_invert ^ ro_gpio_invert) - host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - if (device_property_read_bool(dev, "cap-sd-highspeed")) host->caps |= MMC_CAP_SD_HIGHSPEED; if (device_property_read_bool(dev, "cap-mmc-highspeed")) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c8804895595f..b7159e243323 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1911,9 +1911,12 @@ static int mmc_sleep(struct mmc_host *host) * If the max_busy_timeout of the host is specified, validate it against * the sleep cmd timeout. A failure means we need to prevent the host * from doing hw busy detection, which is done by converting to a R1 - * response instead of a R1B. + * response instead of a R1B. Note, some hosts requires R1B, which also + * means they are on their own when it comes to deal with the busy + * timeout. */ - if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) { + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && + (timeout_ms > host->max_busy_timeout)) { cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; } else { cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 09113b9ad679..18a7afb2a5b2 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -538,10 +538,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, * If the cmd timeout and the max_busy_timeout of the host are both * specified, let's validate them. A failure means we need to prevent * the host from doing hw busy detection, which is done by converting - * to a R1 response instead of a R1B. + * to a R1 response instead of a R1B. Note, some hosts requires R1B, + * which also means they are on their own when it comes to deal with the + * busy timeout. */ - if (timeout_ms && host->max_busy_timeout && - (timeout_ms > host->max_busy_timeout)) + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms && + host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = MMC_SWITCH; diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index 2d2d9ea8be4f..3dba15bccce2 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = { END_FIXUP }; + static const struct mmc_fixup sdio_fixup_methods[] = { + SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251, + add_quirk, MMC_QUIRK_NONSTD_FUNC_IF), + + SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251, + add_quirk, MMC_QUIRK_DISABLE_CD), + SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, add_quirk, MMC_QUIRK_NONSTD_FUNC_IF), diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 26cabd53ddc5..ebb387aa5158 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -1048,9 +1048,35 @@ static int mmc_sdio_runtime_resume(struct mmc_host *host) return ret; } +/* + * SDIO HW reset + * + * Returns 0 if the HW reset was executed synchronously, returns 1 if the HW + * reset was asynchronously scheduled, else a negative error code. + */ static int mmc_sdio_hw_reset(struct mmc_host *host) { - mmc_power_cycle(host, host->card->ocr); + struct mmc_card *card = host->card; + + /* + * In case the card is shared among multiple func drivers, reset the + * card through a rescan work. In this way it will be removed and + * re-detected, thus all func drivers becomes informed about it. + */ + if (atomic_read(&card->sdio_funcs_probed) > 1) { + if (mmc_card_removed(card)) + return 1; + host->rescan_entered = 0; + mmc_card_set_removed(card); + _mmc_detect_change(host, 0, false); + return 1; + } + + /* + * A single func driver has been probed, then let's skip the heavy + * hotplug dance above and execute the reset immediately. + */ + mmc_power_cycle(host, card->ocr); return mmc_sdio_reinit_card(host); } diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 2963e6542958..3cc928282af7 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -138,6 +138,8 @@ static int sdio_bus_probe(struct device *dev) if (ret) return ret; + atomic_inc(&func->card->sdio_funcs_probed); + /* Unbound SDIO functions are always suspended. * During probe, the function is set active and the usage count * is incremented. If the driver supports runtime PM, @@ -153,7 +155,10 @@ static int sdio_bus_probe(struct device *dev) /* Set the default block size so the driver is sure it's something * sensible. */ sdio_claim_host(func); - ret = sdio_set_block_size(func, 0); + if (mmc_card_removed(func->card)) + ret = -ENOMEDIUM; + else + ret = sdio_set_block_size(func, 0); sdio_release_host(func); if (ret) goto disable_runtimepm; @@ -165,6 +170,7 @@ static int sdio_bus_probe(struct device *dev) return 0; disable_runtimepm: + atomic_dec(&func->card->sdio_funcs_probed); if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_put_noidle(dev); dev_pm_domain_detach(dev, false); @@ -181,6 +187,7 @@ static int sdio_bus_remove(struct device *dev) pm_runtime_get_sync(dev); drv->remove(func); + atomic_dec(&func->card->sdio_funcs_probed); if (func->irq_handler) { pr_warn("WARNING: driver %s did not remove its interrupt handler!\n", diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index da2596c5fa28..582ec3d720f6 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -241,6 +241,9 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, return ret; } + if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); + if (gpio_invert) *gpio_invert = !gpiod_is_active_low(desc); diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 66e354d51ee9..7083d8ddd495 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host) * SPI protocol. Another is that when chipselect is released while * the card returns BUSY status, the clock must issue several cycles * with chipselect high before the card will stop driving its output. + * + * SPI_CS_HIGH means "asserted" here. In some cases like when using + * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically + * inverted by gpiolib, so if we want to ascertain to drive it high + * we should toggle the default with an XOR as we do here. */ - host->spi->mode |= SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Just warn; most cards work without it. */ dev_warn(&host->spi->dev, "can't change chip-select polarity\n"); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; } else { mmc_spi_readbytes(host, 18); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Wot, we can't get the same setup we had before? */ dev_err(&host->spi->dev, diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 189e42674d85..010fe29a4888 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -228,6 +228,7 @@ #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ +#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */ #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ @@ -1881,6 +1882,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) /* select EMMC50 PAD CMD tune */ sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); + sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || mmc->ios.timing == MMC_TIMING_UHS_SDR104) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 952fa4063ff8..d0df054b0b47 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1512,6 +1512,36 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) if (mmc_pdata(host)->init_card) mmc_pdata(host)->init_card(card); + else if (card->type == MMC_TYPE_SDIO || + card->type == MMC_TYPE_SD_COMBO) { + struct device_node *np = mmc_dev(mmc)->of_node; + + /* + * REVISIT: should be moved to sdio core and made more + * general e.g. by expanding the DT bindings of child nodes + * to provide a mechanism to provide this information: + * Documentation/devicetree/bindings/mmc/mmc-card.txt + */ + + np = of_get_compatible_child(np, "ti,wl1251"); + if (np) { + /* + * We have TI wl1251 attached to MMC3. Pass this + * information to the SDIO core because it can't be + * probed by normal methods. + */ + + dev_info(host->dev, "found wl1251\n"); + card->quirks |= MMC_QUIRK_NONSTD_SDIO; + card->cccr.wide_bus = 1; + card->cis.vendor = 0x104c; + card->cis.device = 0x9066; + card->cis.blksize = 512; + card->cis.max_dtr = 24000000; + card->ocr = 0x80; + of_node_put(np); + } + } } static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 024acc1b0a2e..b2bbcb09a49e 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -740,16 +740,16 @@ static int pxamci_probe(struct platform_device *pdev) goto out; } + if (!host->pdata->gpio_card_ro_invert) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_ro\n"); goto out; } - if (!ret) { + if (!ret) host->use_ro_gpio = true; - mmc->caps2 |= host->pdata->gpio_card_ro_invert ? - 0 : MMC_CAP2_RO_ACTIVE_HIGH; - } if (host->pdata->init) host->pdata->init(dev, pxamci_detect_irq, mmc); diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index bd50935dc37d..11087976ab19 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -606,19 +606,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host, u8 sample_point, bool rx) { struct rtsx_pcr *pcr = host->pcr; - + u16 SD_VP_CTL = 0; dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", __func__, rx ? "RX" : "TX", sample_point); rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK); - if (rx) + if (rx) { + SD_VP_CTL = SD_VPRX_CTL; rtsx_pci_write_register(pcr, SD_VPRX_CTL, PHASE_SELECT_MASK, sample_point); - else + } else { + SD_VP_CTL = SD_VPTX_CTL; rtsx_pci_write_register(pcr, SD_VPTX_CTL, PHASE_SELECT_MASK, sample_point); - rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); - rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, + } + rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0); + rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET); rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0); rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index ae0ec27dd7cc..5f2e9696ee4d 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "sdhci-pltfm.h" @@ -235,6 +236,11 @@ static const struct sdhci_ops sdhci_cdns_ops = { .set_uhs_signaling = sdhci_cdns_set_uhs_signaling, }; +static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = { + .ops = &sdhci_cdns_ops, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = { .ops = &sdhci_cdns_ops, }; @@ -334,6 +340,7 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc, static int sdhci_cdns_probe(struct platform_device *pdev) { struct sdhci_host *host; + const struct sdhci_pltfm_data *data; struct sdhci_pltfm_host *pltfm_host; struct sdhci_cdns_priv *priv; struct clk *clk; @@ -350,8 +357,12 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (ret) return ret; + data = of_device_get_match_data(dev); + if (!data) + data = &sdhci_cdns_pltfm_data; + nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node); - host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, + host = sdhci_pltfm_init(pdev, data, struct_size(priv, phy_params, nr_phy_params)); if (IS_ERR(host)) { ret = PTR_ERR(host); @@ -431,7 +442,10 @@ static const struct dev_pm_ops sdhci_cdns_pm_ops = { }; static const struct of_device_id sdhci_cdns_match[] = { - { .compatible = "socionext,uniphier-sd4hc" }, + { + .compatible = "socionext,uniphier-sd4hc", + .data = &sdhci_cdns_uniphier_pltfm_data, + }, { .compatible = "cdns,sd4hc" }, { /* sentinel */ } }; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1c988d6a2433..dccb4df46512 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1381,13 +1381,14 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, host->mmc->parent->platform_data); /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); if (err) { dev_err(mmc_dev(host->mmc), "failed to request write-protect gpio!\n"); return err; } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } /* card_detect */ diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index 57b582bf73d9..9289bb4d633e 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -51,6 +51,11 @@ #define ESDHC_CLOCK_HCKEN 0x00000002 #define ESDHC_CLOCK_IPGEN 0x00000001 +/* System Control 2 Register */ +#define ESDHC_SYSTEM_CONTROL_2 0x3c +#define ESDHC_SMPCLKSEL 0x00800000 +#define ESDHC_EXTN 0x00400000 + /* Host Controller Capabilities Register 2 */ #define ESDHC_CAPABILITIES_1 0x114 @@ -59,7 +64,16 @@ #define ESDHC_HS400_WNDW_ADJUST 0x00000040 #define ESDHC_HS400_MODE 0x00000010 #define ESDHC_TB_EN 0x00000004 +#define ESDHC_TB_MODE_MASK 0x00000003 +#define ESDHC_TB_MODE_SW 0x00000003 +#define ESDHC_TB_MODE_3 0x00000002 + +#define ESDHC_TBSTAT 0x124 + #define ESDHC_TBPTR 0x128 +#define ESDHC_WNDW_STRT_PTR_SHIFT 8 +#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8) +#define ESDHC_WNDW_END_PTR_MASK 0x7f /* SD Clock Control Register */ #define ESDHC_SDCLKCTL 0x144 diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index b75c82d8d6c1..3d0bb5e2e09b 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -99,7 +99,7 @@ #define CORE_PWRSAVE_DLL BIT(3) -#define DDR_CONFIG_POR_VAL 0x80040853 +#define DDR_CONFIG_POR_VAL 0x80040873 #define INVALID_TUNING_PHASE -1 @@ -148,8 +148,9 @@ struct sdhci_msm_offset { u32 core_ddr_200_cfg; u32 core_vendor_spec3; u32 core_dll_config_2; + u32 core_dll_config_3; + u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ u32 core_ddr_config; - u32 core_ddr_config_2; }; static const struct sdhci_msm_offset sdhci_msm_v5_offset = { @@ -177,8 +178,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = { .core_ddr_200_cfg = 0x224, .core_vendor_spec3 = 0x250, .core_dll_config_2 = 0x254, - .core_ddr_config = 0x258, - .core_ddr_config_2 = 0x25c, + .core_dll_config_3 = 0x258, + .core_ddr_config = 0x25c, }; static const struct sdhci_msm_offset sdhci_msm_mci_offset = { @@ -207,8 +208,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = { .core_ddr_200_cfg = 0x184, .core_vendor_spec3 = 0x1b0, .core_dll_config_2 = 0x1b4, - .core_ddr_config = 0x1b8, - .core_ddr_config_2 = 0x1bc, + .core_ddr_config_old = 0x1b8, + .core_ddr_config = 0x1bc, }; struct sdhci_msm_variant_ops { @@ -253,6 +254,7 @@ struct sdhci_msm_host { const struct sdhci_msm_offset *offset; bool use_cdr; u32 transfer_mode; + bool updated_ddr_cfg; }; static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) @@ -924,8 +926,10 @@ out: static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) { struct mmc_host *mmc = host->mmc; - u32 dll_status, config; + u32 dll_status, config, ddr_cfg_offset; int ret; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); @@ -938,8 +942,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) * bootloaders. In the future, if this changes, then the desired * values will need to be programmed appropriately. */ - writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + - msm_offset->core_ddr_config); + if (msm_host->updated_ddr_cfg) + ddr_cfg_offset = msm_offset->core_ddr_config; + else + ddr_cfg_offset = msm_offset->core_ddr_config_old; + writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset); if (mmc->ios.enhanced_strobe) { config = readl_relaxed(host->ioaddr + @@ -1899,6 +1906,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_offset->core_vendor_spec_capabilities0); } + if (core_major == 1 && core_minor >= 0x49) + msm_host->updated_ddr_cfg = true; + /* * Power on reset state may trigger power irq if previous status of * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 0ae986c42bc8..881f8138e7de 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -118,7 +118,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask) { sdhci_reset(host, mask); - if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) + || mmc_gpio_get_cd(host->mmc) >= 0) sdhci_at91_set_force_card_detect(host); } @@ -324,19 +325,22 @@ static int sdhci_at91_probe(struct platform_device *pdev) priv->mainck = devm_clk_get(&pdev->dev, "baseclk"); if (IS_ERR(priv->mainck)) { dev_err(&pdev->dev, "failed to get baseclk\n"); - return PTR_ERR(priv->mainck); + ret = PTR_ERR(priv->mainck); + goto sdhci_pltfm_free; } priv->hclock = devm_clk_get(&pdev->dev, "hclock"); if (IS_ERR(priv->hclock)) { dev_err(&pdev->dev, "failed to get hclock\n"); - return PTR_ERR(priv->hclock); + ret = PTR_ERR(priv->hclock); + goto sdhci_pltfm_free; } priv->gck = devm_clk_get(&pdev->dev, "multclk"); if (IS_ERR(priv->gck)) { dev_err(&pdev->dev, "failed to get multclk\n"); - return PTR_ERR(priv->gck); + ret = PTR_ERR(priv->gck); + goto sdhci_pltfm_free; } ret = sdhci_at91_set_clks_presets(&pdev->dev); @@ -394,8 +398,11 @@ static int sdhci_at91_probe(struct platform_device *pdev) * detection procedure using the SDMCC_CD signal is bypassed. * This bit is reset when a software reset for all command is performed * so we need to implement our own reset function to set back this bit. + * + * WA: SAMA5D2 doesn't drive CMD if using CD GPIO line. */ - if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) + || mmc_gpio_get_cd(host->mmc) >= 0) sdhci_at91_set_force_card_detect(host); pm_runtime_put_autosuspend(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 1d1953dfc54b..fcfb50f84c8b 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -77,8 +77,11 @@ struct sdhci_esdhc { bool quirk_incorrect_hostver; bool quirk_limited_clk_division; bool quirk_unreliable_pulse_detection; - bool quirk_fixup_tuning; + bool quirk_tuning_erratum_type1; + bool quirk_tuning_erratum_type2; bool quirk_ignore_data_inhibit; + bool quirk_delay_before_data_reset; + bool in_sw_tuning; unsigned int peripheral_clock; const struct esdhc_clk_fixup *clk_fixup; u32 div_ratio; @@ -408,6 +411,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg) static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); int base = reg & ~0x3; u32 value; u32 ret; @@ -416,10 +421,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) ret = esdhc_writew_fixup(host, reg, val, value); if (reg != SDHCI_TRANSFER_MODE) iowrite32be(ret, host->ioaddr + base); + + /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set + * 1us later after ESDHC_EXTN is set. + */ + if (base == ESDHC_SYSTEM_CONTROL_2) { + if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) && + esdhc->in_sw_tuning) { + udelay(1); + ret |= ESDHC_SMPCLKSEL; + iowrite32be(ret, host->ioaddr + base); + } + } } static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); int base = reg & ~0x3; u32 value; u32 ret; @@ -428,6 +447,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) ret = esdhc_writew_fixup(host, reg, val, value); if (reg != SDHCI_TRANSFER_MODE) iowrite32(ret, host->ioaddr + base); + + /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set + * 1us later after ESDHC_EXTN is set. + */ + if (base == ESDHC_SYSTEM_CONTROL_2) { + if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) && + esdhc->in_sw_tuning) { + udelay(1); + ret |= ESDHC_SMPCLKSEL; + iowrite32(ret, host->ioaddr + base); + } + } } static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg) @@ -705,14 +736,16 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); u32 val; + if (esdhc->quirk_delay_before_data_reset && + (mask & SDHCI_RESET_DATA) && + (host->flags & SDHCI_REQ_USE_DMA)) + mdelay(5); + sdhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); - if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) - mdelay(5); - if (mask & SDHCI_RESET_ALL) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; @@ -796,16 +829,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc, } } -static struct soc_device_attribute soc_fixup_tuning[] = { +static struct soc_device_attribute soc_tuning_erratum_type1[] = { + { .family = "QorIQ T1023", .revision = "1.0", }, { .family = "QorIQ T1040", .revision = "1.0", }, { .family = "QorIQ T2080", .revision = "1.0", }, - { .family = "QorIQ T1023", .revision = "1.0", }, { .family = "QorIQ LS1021A", .revision = "1.0", }, - { .family = "QorIQ LS1080A", .revision = "1.0", }, - { .family = "QorIQ LS2080A", .revision = "1.0", }, + { }, +}; + +static struct soc_device_attribute soc_tuning_erratum_type2[] = { { .family = "QorIQ LS1012A", .revision = "1.0", }, { .family = "QorIQ LS1043A", .revision = "1.*", }, { .family = "QorIQ LS1046A", .revision = "1.0", }, + { .family = "QorIQ LS1080A", .revision = "1.0", }, + { .family = "QorIQ LS2080A", .revision = "1.0", }, + { .family = "QorIQ LA1575A", .revision = "1.0", }, { }, }; @@ -829,15 +867,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable) esdhc_clock_enable(host, true); } +static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, + u8 *window_end) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 tbstat_15_8, tbstat_7_0; + u32 val; + + if (esdhc->quirk_tuning_erratum_type1) { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + return; + } + + /* Write TBCTL[11:8]=4'h8 */ + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~(0xf << 8); + val |= 8 << 8; + sdhci_writel(host, val, ESDHC_TBCTL); + + mdelay(1); + + /* Read TBCTL[31:0] register and rewrite again */ + val = sdhci_readl(host, ESDHC_TBCTL); + sdhci_writel(host, val, ESDHC_TBCTL); + + mdelay(1); + + /* Read the TBSTAT[31:0] register twice */ + val = sdhci_readl(host, ESDHC_TBSTAT); + val = sdhci_readl(host, ESDHC_TBSTAT); + + /* Reset data lines by setting ESDHCCTL[RSTD] */ + sdhci_reset(host, SDHCI_RESET_DATA); + /* Write 32'hFFFF_FFFF to IRQSTAT register */ + sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS); + + /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio + * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio, + * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio + * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio. + */ + tbstat_7_0 = val & 0xff; + tbstat_15_8 = (val >> 8) & 0xff; + + if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) { + *window_start = 8 * esdhc->div_ratio; + *window_end = 4 * esdhc->div_ratio; + } else { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + } +} + +static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode, + u8 window_start, u8 window_end) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u32 val; + int ret; + + /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */ + val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) & + ESDHC_WNDW_STRT_PTR_MASK; + val |= window_end & ESDHC_WNDW_END_PTR_MASK; + sdhci_writel(host, val, ESDHC_TBPTR); + + /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */ + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_MODE_MASK; + val |= ESDHC_TB_MODE_SW; + sdhci_writel(host, val, ESDHC_TBCTL); + + esdhc->in_sw_tuning = true; + ret = sdhci_execute_tuning(mmc, opcode); + esdhc->in_sw_tuning = false; + return ret; +} + static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 window_start, window_end; + int ret, retries = 1; bool hs400_tuning; unsigned int clk; u32 val; - int ret; /* For tuning mode, the sd clock divisor value * must be larger than 3 according to reference manual. @@ -846,39 +966,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) if (host->clock > clk) esdhc_of_set_clock(host, clk); - if (esdhc->quirk_limited_clk_division && - host->flags & SDHCI_HS400_TUNING) - esdhc_of_set_clock(host, host->clock); - esdhc_tuning_block_enable(host, true); hs400_tuning = host->flags & SDHCI_HS400_TUNING; - ret = sdhci_execute_tuning(mmc, opcode); - if (hs400_tuning) { + do { + if (esdhc->quirk_limited_clk_division && + hs400_tuning) + esdhc_of_set_clock(host, host->clock); + + /* Do HW tuning */ + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_MODE_MASK; + val |= ESDHC_TB_MODE_3; + sdhci_writel(host, val, ESDHC_TBCTL); + + ret = sdhci_execute_tuning(mmc, opcode); + if (ret) + break; + + /* If HW tuning fails and triggers erratum, + * try workaround. + */ + ret = host->tuning_err; + if (ret == -EAGAIN && + (esdhc->quirk_tuning_erratum_type1 || + esdhc->quirk_tuning_erratum_type2)) { + /* Recover HS400 tuning flag */ + if (hs400_tuning) + host->flags |= SDHCI_HS400_TUNING; + pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n", + mmc_hostname(mmc)); + /* Do SW tuning */ + esdhc_prepare_sw_tuning(host, &window_start, + &window_end); + ret = esdhc_execute_sw_tuning(mmc, opcode, + window_start, + window_end); + if (ret) + break; + + /* Retry both HW/SW tuning with reduced clock. */ + ret = host->tuning_err; + if (ret == -EAGAIN && retries) { + /* Recover HS400 tuning flag */ + if (hs400_tuning) + host->flags |= SDHCI_HS400_TUNING; + + clk = host->max_clk / (esdhc->div_ratio + 1); + esdhc_of_set_clock(host, clk); + pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n", + mmc_hostname(mmc)); + } else { + break; + } + } else { + break; + } + } while (retries--); + + if (ret) { + esdhc_tuning_block_enable(host, false); + } else if (hs400_tuning) { val = sdhci_readl(host, ESDHC_SDTIMNGCTL); val |= ESDHC_FLW_CTL_BG; sdhci_writel(host, val, ESDHC_SDTIMNGCTL); } - if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) { - - /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and - * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO - */ - val = sdhci_readl(host, ESDHC_TBPTR); - val = (val & ~((0x7f << 8) | 0x7f)) | - (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8); - sdhci_writel(host, val, ESDHC_TBPTR); - - /* program the software tuning mode by setting - * TBCTL[TB_MODE]=2'h3 - */ - val = sdhci_readl(host, ESDHC_TBCTL); - val |= 0x3; - sdhci_writel(host, val, ESDHC_TBCTL); - sdhci_execute_tuning(mmc, opcode); - } return ret; } @@ -1049,6 +1203,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) if (match) esdhc->clk_fixup = match->data; np = pdev->dev.of_node; + + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) + esdhc->quirk_delay_before_data_reset = true; + clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { /* @@ -1114,10 +1272,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); esdhc = sdhci_pltfm_priv(pltfm_host); - if (soc_device_match(soc_fixup_tuning)) - esdhc->quirk_fixup_tuning = true; + if (soc_device_match(soc_tuning_erratum_type1)) + esdhc->quirk_tuning_erratum_type1 = true; else - esdhc->quirk_fixup_tuning = false; + esdhc->quirk_tuning_erratum_type1 = false; + + if (soc_device_match(soc_tuning_erratum_type2)) + esdhc->quirk_tuning_erratum_type2 = true; + else + esdhc->quirk_tuning_erratum_type2 = false; if (esdhc->vendor_ver == VENDOR_V_22) host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; @@ -1126,8 +1289,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) { - host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST; - host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST; + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; } if (of_device_is_compatible(np, "fsl,p5040-esdhc") || diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 083e7e053c95..d3135249b2e4 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1134,6 +1134,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning; host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq; + /* R1B responses is required to properly manage HW busy detection. */ + mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + ret = sdhci_setup_host(host); if (ret) goto err_put_sync; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index eaffa85bc728..5091e2c1c0e5 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -21,11 +21,13 @@ #include #include #include +#include #include #include #include #include #include +#include #ifdef CONFIG_X86 #include @@ -782,11 +784,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) return 0; } +static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + dmi_match(DMI_BIOS_VENDOR, "LENOVO"); +} + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); - slot->host->mmc->caps2 |= MMC_CAP2_CQE; + if (!glk_broken_cqhci(slot)) + slot->host->mmc->caps2 |= MMC_CAP2_CQE; if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, @@ -1590,11 +1599,59 @@ static int amd_probe(struct sdhci_pci_chip *chip) return 0; } +static u32 sdhci_read_present_state(struct sdhci_host *host) +{ + return sdhci_readl(host, SDHCI_PRESENT_STATE); +} + +static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct pci_dev *pdev = slot->chip->pdev; + u32 present_state; + + /* + * SDHC 0x7906 requires a hard reset to clear all internal state. + * Otherwise it can get into a bad state where the DATA lines are always + * read as zeros. + */ + if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) { + pci_clear_master(pdev); + + pci_save_state(pdev); + + pci_set_power_state(pdev, PCI_D3cold); + pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc), + pdev->current_state); + pci_set_power_state(pdev, PCI_D0); + + pci_restore_state(pdev); + + /* + * SDHCI_RESET_ALL says the card detect logic should not be + * reset, but since we need to reset the entire controller + * we should wait until the card detect logic has stabilized. + * + * This normally takes about 40ms. + */ + readx_poll_timeout( + sdhci_read_present_state, + host, + present_state, + present_state & SDHCI_CD_STABLE, + 10000, + 100000 + ); + } + + return sdhci_reset(host, mask); +} + static const struct sdhci_ops amd_sdhci_pci_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = amd_sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; @@ -1673,6 +1730,8 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd), SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd), SDHCI_PCI_DEVICE(O2, 8120, o2), SDHCI_PCI_DEVICE(O2, 8220, o2), SDHCI_PCI_DEVICE(O2, 8221, o2), diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 5eea8d70a85d..ce15a05f23d4 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode) return 0; } +static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot) +{ + int ret; + + ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + pr_warn("%s: enable PCI MSI failed, error=%d\n", + mmc_hostname(slot->host->mmc), ret); + return; + } + + slot->host->irq = pci_irq_vector(slot->chip->pdev, 0); +} + static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot) { struct sdhci_host *host = slot->host; + gli_pcie_enable_msi(slot); slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO; sdhci_enable_v4_mode(host); @@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot) { struct sdhci_host *host = slot->host; + gli_pcie_enable_msi(slot); slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO; sdhci_enable_v4_mode(host); diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 558202fe64c6..981bbbe63aff 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -55,6 +55,8 @@ #define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4 #define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5 #define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5 +#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4 +#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8 #define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000 #define PCI_DEVICE_ID_VIA_95D0 0x95d0 diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 7bc950520fd9..a25c3a4d3f6c 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; - if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; } @@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev) if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) host->mmc->caps |= MMC_CAP_1_8V_DDR; + /* R1B responses is required to properly manage HW busy detection. */ + host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + tegra_sdhci_parse_dt(host); tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index b056400e34b1..4478b94d4791 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1871,9 +1871,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) ctrl_2 |= SDHCI_CTRL_UHS_SDR104; else if (timing == MMC_TIMING_UHS_SDR12) ctrl_2 |= SDHCI_CTRL_UHS_SDR12; - else if (timing == MMC_TIMING_SD_HS || - timing == MMC_TIMING_MMC_HS || - timing == MMC_TIMING_UHS_SDR25) + else if (timing == MMC_TIMING_UHS_SDR25) ctrl_2 |= SDHCI_CTRL_UHS_SDR25; else if (timing == MMC_TIMING_UHS_SDR50) ctrl_2 |= SDHCI_CTRL_UHS_SDR50; @@ -2408,8 +2406,8 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_send_tuning(host, opcode); if (!host->tuning_done) { - pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", - mmc_hostname(host->mmc)); + pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); sdhci_abort_tuning(host, opcode); return -ETIMEDOUT; } @@ -3758,6 +3756,9 @@ int sdhci_setup_host(struct sdhci_host *host) mmc_hostname(mmc), host->version); } + if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) + mmc->caps2 &= ~MMC_CAP2_CQE; + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; else if (!(host->caps & SDHCI_CAN_DO_SDMA)) @@ -3901,11 +3902,13 @@ int sdhci_setup_host(struct sdhci_host *host) if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else if (host->version >= SDHCI_SPEC_300) { - if (host->clk_mul) { - mmc->f_min = (host->max_clk * host->clk_mul) / 1024; + if (host->clk_mul) max_clk = host->max_clk * host->clk_mul; - } else - mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; + /* + * Divided Clock Mode minimum clock rate is always less than + * Programmable Clock Mode minimum clock rate. + */ + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 0ed3e0eaef5f..fe83ece6965b 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -409,6 +409,8 @@ struct sdhci_host { #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) /* Controller reports inverted write-protect state */ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) +/* Controller has unusable command queue engine */ +#define SDHCI_QUIRK_BROKEN_CQE (1<<17) /* Controller does not like fast PIO transfers */ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) /* Controller does not have a LED */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index bb90757ecace..4cbb764c9822 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -236,6 +236,22 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) writeb(val, host->ioaddr + reg); } +static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err = sdhci_execute_tuning(mmc, opcode); + + if (err) + return err; + /* + * Tuning data remains in the buffer after tuning. + * Do a command and data reset to get rid of it + */ + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + + return 0; +} + static struct sdhci_ops sdhci_am654_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -249,8 +265,7 @@ static struct sdhci_ops sdhci_am654_ops = { static const struct sdhci_pltfm_data sdhci_am654_pdata = { .ops = &sdhci_am654_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -272,8 +287,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { .ops = &sdhci_j721e_8bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -295,8 +309,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { .ops = &sdhci_j721e_4bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -480,6 +493,8 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_put; } + host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; + ret = sdhci_am654_init(host); if (ret) goto pm_runtime_put; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 9b6e1001e77c..dec5a99f52cf 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1184,7 +1184,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) if (ret == -EPROBE_DEFER) return ret; - mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities; mmc->caps2 |= pdata->capabilities2; mmc->max_segs = pdata->max_segs ? : 32; mmc->max_blk_size = TMIO_MAX_BLK_SIZE; diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index cf8c8be40a9c..a4f2d8cdca12 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -123,19 +123,23 @@ static int cfi_use_status_reg(struct cfi_private *cfi) (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; } -static void cfi_check_err_status(struct map_info *map, struct flchip *chip, - unsigned long adr) +static int cfi_check_err_status(struct map_info *map, struct flchip *chip, + unsigned long adr) { struct cfi_private *cfi = map->fldrv_priv; map_word status; if (!cfi_use_status_reg(cfi)) - return; + return 0; cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); status = map_read(map, adr); + /* The error bits are invalid while the chip's busy */ + if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) + return 0; + if (map_word_bitsset(map, status, CMD(0x3a))) { unsigned long chipstatus = MERGESTATUS(status); @@ -151,7 +155,12 @@ static void cfi_check_err_status(struct map_info *map, struct flchip *chip, if (chipstatus & CFI_SR_SLSB) pr_err("%s sector write protected, status %lx\n", map->name, chipstatus); + + /* Erase/Program status bits are set on the operation failure */ + if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB)) + return 1; } + return 0; } /* #define DEBUG_CFI_FEATURES */ @@ -848,20 +857,16 @@ static int __xipram chip_good(struct map_info *map, struct flchip *chip, if (cfi_use_status_reg(cfi)) { map_word ready = CMD(CFI_SR_DRB); - map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB); + /* * For chips that support status register, check device - * ready bit and Erase/Program status bit to know if - * operation succeeded. + * ready bit */ cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); curd = map_read(map, addr); - if (map_word_andequal(map, curd, ready, ready)) - return !map_word_bitsset(map, curd, err); - - return 0; + return map_word_andequal(map, curd, ready, ready); } oldd = map_read(map, addr); @@ -1699,8 +1704,11 @@ static int __xipram do_write_oneword_once(struct map_info *map, break; } - if (chip_good(map, chip, adr, datum)) + if (chip_good(map, chip, adr, datum)) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); @@ -1773,7 +1781,6 @@ static int __xipram do_write_oneword_retry(struct map_info *map, ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); if (ret) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -1970,12 +1977,17 @@ static int __xipram do_write_buffer_wait(struct map_info *map, */ if (time_after(jiffies, timeo) && !chip_good(map, chip, adr, datum)) { + pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", + __func__, adr); ret = -EIO; break; } - if (chip_good(map, chip, adr, datum)) + if (chip_good(map, chip, adr, datum)) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); @@ -2071,12 +2083,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, chip->word_write_time); ret = do_write_buffer_wait(map, chip, adr, datum); - if (ret) { - cfi_check_err_status(map, chip, adr); + if (ret) do_write_buffer_reset(map, chip, cfi); - pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", - __func__, adr); - } xip_enable(map, chip, adr); @@ -2271,9 +2279,9 @@ retry: udelay(1); } - if (!chip_good(map, chip, adr, datum)) { + if (!chip_good(map, chip, adr, datum) || + cfi_check_err_status(map, chip, adr)) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -2467,8 +2475,11 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->erase_suspended = 0; } - if (chip_good(map, chip, adr, map_word_ff(map))) + if (chip_good(map, chip, adr, map_word_ff(map))) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } if (time_after(jiffies, timeo)) { printk(KERN_WARNING "MTD %s(): software timeout\n", @@ -2483,7 +2494,6 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) /* Did we succeed? */ if (ret) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -2564,8 +2574,11 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->erase_suspended = 0; } - if (chip_good(map, chip, adr, map_word_ff(map))) + if (chip_good(map, chip, adr, map_word_ff(map))) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } if (time_after(jiffies, timeo)) { printk(KERN_WARNING "MTD %s(): software timeout\n", @@ -2580,7 +2593,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, /* Did we succeed? */ if (ret) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c index b20d02b4f830..77c872fd3d83 100644 --- a/drivers/mtd/devices/mchp23k256.c +++ b/drivers/mtd/devices/mchp23k256.c @@ -64,15 +64,17 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len, struct spi_transfer transfer[2] = {}; struct spi_message message; unsigned char command[MAX_CMD_SIZE]; - int ret; + int ret, cmd_len; spi_message_init(&message); + cmd_len = mchp23k256_cmdsz(flash); + command[0] = MCHP23K256_CMD_WRITE; mchp23k256_addr2cmd(flash, to, command); transfer[0].tx_buf = command; - transfer[0].len = mchp23k256_cmdsz(flash); + transfer[0].len = cmd_len; spi_message_add_tail(&transfer[0], &message); transfer[1].tx_buf = buf; @@ -88,8 +90,8 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len, if (ret) return ret; - if (retlen && message.actual_length > sizeof(command)) - *retlen += message.actual_length - sizeof(command); + if (retlen && message.actual_length > cmd_len) + *retlen += message.actual_length - cmd_len; return 0; } @@ -101,16 +103,18 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len, struct spi_transfer transfer[2] = {}; struct spi_message message; unsigned char command[MAX_CMD_SIZE]; - int ret; + int ret, cmd_len; spi_message_init(&message); + cmd_len = mchp23k256_cmdsz(flash); + memset(&transfer, 0, sizeof(transfer)); command[0] = MCHP23K256_CMD_READ; mchp23k256_addr2cmd(flash, from, command); transfer[0].tx_buf = command; - transfer[0].len = mchp23k256_cmdsz(flash); + transfer[0].len = cmd_len; spi_message_add_tail(&transfer[0], &message); transfer[1].rx_buf = buf; @@ -126,8 +130,8 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len, if (ret) return ret; - if (retlen && message.actual_length > sizeof(command)) - *retlen += message.actual_length - sizeof(command); + if (retlen && message.actual_length > cmd_len) + *retlen += message.actual_length - cmd_len; return 0; } diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index 986f81d2f93e..47ad0766affa 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c @@ -592,6 +592,26 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, return 0; } +/* + * The purpose of this function is to ensure a memcpy_toio() with byte writes + * only. Its structure is inspired from the ARM implementation of _memcpy_toio() + * which also does single byte writes but cannot be used here as this is just an + * implementation detail and not part of the API. Not mentioning the comment + * stating that _memcpy_toio() should be optimized. + */ +static void spear_smi_memcpy_toio_b(volatile void __iomem *dest, + const void *src, size_t len) +{ + const unsigned char *from = src; + + while (len) { + len--; + writeb(*from, dest); + from++; + dest++; + } +} + static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank, void __iomem *dest, const void *src, size_t len) { @@ -614,7 +634,23 @@ static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank, ctrlreg1 = readl(dev->io_base + SMI_CR1); writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1); - memcpy_toio(dest, src, len); + /* + * In Write Burst mode (WB_MODE), the specs states that writes must be: + * - incremental + * - of the same size + * The ARM implementation of memcpy_toio() will optimize the number of + * I/O by using as much 4-byte writes as possible, surrounded by + * 2-byte/1-byte access if: + * - the destination is not 4-byte aligned + * - the length is not a multiple of 4-byte. + * Avoid this alternance of write access size by using our own 'byte + * access' helper if at least one of the two conditions above is true. + */ + if (IS_ALIGNED(len, sizeof(u32)) && + IS_ALIGNED((uintptr_t)dest, sizeof(u32))) + memcpy_toio(dest, src, len); + else + spear_smi_memcpy_toio_b(dest, src, len); writel(ctrlreg1, dev->io_base + SMI_CR1); diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c index edf94ee54ec7..71a632b815aa 100644 --- a/drivers/mtd/nand/onenand/omap2.c +++ b/drivers/mtd/nand/onenand/omap2.c @@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c, struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; - tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0); + tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!tx) { dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n"); return -EIO; diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index 77bd32a683e1..9e81cd982dd3 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, stats = mtd->ecc_stats; - /* Read-while-load method */ + /* Read-while-load method */ - /* Do first load to bufferRAM */ - if (read < len) { - if (!onenand_check_bufferram(mtd, from)) { + /* Do first load to bufferRAM */ + if (read < len) { + if (!onenand_check_bufferram(mtd, from)) { this->command(mtd, ONENAND_CMD_READ, from, writesize); - ret = this->wait(mtd, FL_READING); - onenand_update_bufferram(mtd, from, !ret); + ret = this->wait(mtd, FL_READING); + onenand_update_bufferram(mtd, from, !ret); if (mtd_is_eccerr(ret)) ret = 0; - } - } + } + } thislen = min_t(int, writesize, len - read); column = from & (writesize - 1); if (column + thislen > writesize) thislen = writesize - column; - while (!ret) { - /* If there is more to load then start next load */ - from += thislen; - if (read + thislen < len) { + while (!ret) { + /* If there is more to load then start next load */ + from += thislen; + if (read + thislen < len) { this->command(mtd, ONENAND_CMD_READ, from, writesize); - /* - * Chip boundary handling in DDP - * Now we issued chip 1 read and pointed chip 1 + /* + * Chip boundary handling in DDP + * Now we issued chip 1 read and pointed chip 1 * bufferram so we have to point chip 0 bufferram. - */ - if (ONENAND_IS_DDP(this) && - unlikely(from == (this->chipsize >> 1))) { - this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2); - boundary = 1; - } else - boundary = 0; - ONENAND_SET_PREV_BUFFERRAM(this); - } - /* While load is going, read from last bufferRAM */ - this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); + */ + if (ONENAND_IS_DDP(this) && + unlikely(from == (this->chipsize >> 1))) { + this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2); + boundary = 1; + } else + boundary = 0; + ONENAND_SET_PREV_BUFFERRAM(this); + } + /* While load is going, read from last bufferRAM */ + this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); /* Read oob area if needed */ if (oobbuf) { @@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, oobcolumn = 0; } - /* See if we are done */ - read += thislen; - if (read == len) - break; - /* Set up for next read from bufferRAM */ - if (unlikely(boundary)) - this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2); - ONENAND_SET_NEXT_BUFFERRAM(this); - buf += thislen; + /* See if we are done */ + read += thislen; + if (read == len) + break; + /* Set up for next read from bufferRAM */ + if (unlikely(boundary)) + this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2); + ONENAND_SET_NEXT_BUFFERRAM(this); + buf += thislen; thislen = min_t(int, writesize, len - read); - column = 0; - cond_resched(); - /* Now wait for load */ - ret = this->wait(mtd, FL_READING); - onenand_update_bufferram(mtd, from, !ret); + column = 0; + cond_resched(); + /* Now wait for load */ + ret = this->wait(mtd, FL_READING); + onenand_update_bufferram(mtd, from, !ret); if (mtd_is_eccerr(ret)) ret = 0; - } + } /* * Return success, if no ECC failures, else -EBADMSG diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 334fe3130285..b9d5d55a5edb 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this) struct resources *r = &this->resources; int ret; + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) + return ret; + ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) goto err_out; @@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this) */ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); - return 0; err_out: + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); return ret; } @@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev) return ret; } + /* Set flag to get timing setup restored for next exec_op */ + if (this->hw.clk_rate) + this->hw.must_apply_timings = true; + /* re-init the BCH registers */ ret = bch_set_geometry(this); if (ret) { diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 5c2c30a7dffa..f64e3b6605c6 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -292,12 +292,16 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page) struct mtd_info *mtd = nand_to_mtd(chip); int last_page = ((mtd->erasesize - mtd->writesize) >> chip->page_shift) & chip->pagemask; + unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE + | NAND_BBM_LASTPAGE; + if (page == 0 && !(chip->options & bbm_flags)) + return 0; if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) return 0; - else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) + if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) return 1; - else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) + if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) return last_page; return -EINVAL; diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 8ca9fad6e6ad..56654030ec7f 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -446,8 +446,10 @@ static int micron_nand_init(struct nand_chip *chip) if (ret) goto err_free_manuf_data; + chip->options |= NAND_BBM_FIRSTPAGE; + if (mtd->writesize == 2048) - chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; + chip->options |= NAND_BBM_SECONDPAGE; ondie = micron_supports_on_die_ecc(chip); diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 8cc852dc7d54..5c06e0b4d4ef 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -37,6 +37,7 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) +#define FMC2_TIMEOUT_US 1000 #define FMC2_TIMEOUT_MS 1000 /* Timings */ @@ -53,6 +54,8 @@ #define FMC2_PMEM 0x88 #define FMC2_PATT 0x8c #define FMC2_HECCR 0x94 +#define FMC2_ISR 0x184 +#define FMC2_ICR 0x188 #define FMC2_CSQCR 0x200 #define FMC2_CSQCFGR1 0x204 #define FMC2_CSQCFGR2 0x208 @@ -118,6 +121,12 @@ #define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) #define FMC2_PATT_DEFAULT 0x0a0a0a0a +/* Register: FMC2_ISR */ +#define FMC2_ISR_IHLF BIT(1) + +/* Register: FMC2_ICR */ +#define FMC2_ICR_CIHLF BIT(1) + /* Register: FMC2_CSQCR */ #define FMC2_CSQCR_CSQSTART BIT(0) @@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, stm32_fmc2_set_buswidth_16(fmc2, true); } +static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + const struct nand_sdr_timings *timings; + u32 isr, sr; + + /* Check if there is no pending requests to the NAND flash */ + if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, + sr & FMC2_SR_NWRF, 1, + FMC2_TIMEOUT_US)) + dev_warn(fmc2->dev, "Waitrdy timeout\n"); + + /* Wait tWB before R/B# signal is low */ + timings = nand_get_sdr_timings(&chip->data_interface); + ndelay(PSEC_TO_NSEC(timings->tWB_max)); + + /* R/B# signal is low, clear high level flag */ + writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); + + /* Wait R/B# signal is high */ + return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, + isr, isr & FMC2_ISR_IHLF, + 5, 1000 * timeout_ms); +} + static int stm32_fmc2_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) @@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, break; case NAND_OP_WAITRDY_INSTR: - ret = nand_soft_waitrdy(chip, - instr->ctx.waitrdy.timeout_ms); + ret = stm32_fmc2_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); break; } } diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c index e5ea6127ab5a..671a61845bd5 100644 --- a/drivers/mtd/parsers/sharpslpart.c +++ b/drivers/mtd/parsers/sharpslpart.c @@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob) static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl) { - unsigned int block_num, log_num, phymax; + unsigned int block_num, phymax; + int i, ret, log_num; loff_t block_adr; u8 *oob; - int i, ret; oob = kzalloc(mtd->oobsize, GFP_KERNEL); if (!oob) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 7acf4a93b592..f417fb680cd8 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -2310,15 +2310,16 @@ static const struct flash_info spi_nor_ids[] = { { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024, + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | + SPI_NOR_QUAD_READ) }, { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, - { "mt25qu512a (n25q512a)", INFO(0x20bb20, 0, 64 * 1024, 1024, - SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | - SPI_NOR_QUAD_READ | - SPI_NOR_4B_OPCODES) }, { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, /* Micron */ @@ -2544,7 +2545,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct spi_nor *nor = mtd_to_spi_nor(mtd); - int ret; + ssize_t ret; dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); @@ -2865,7 +2866,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) */ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) { - int ret; + ssize_t ret; while (len) { ret = spi_nor_read_data(nor, addr, len, buf); @@ -4544,9 +4545,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor) static void spansion_post_sfdp_fixups(struct spi_nor *nor) { - struct mtd_info *mtd = &nor->mtd; - - if (mtd->size <= SZ_16M) + if (nor->params.size <= SZ_16M) return; nor->flags |= SNOR_F_4B_OPCODES; diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 30621c67721a..604772fc4a96 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -64,7 +64,7 @@ static int self_check_seen(struct ubi_device *ubi, unsigned long *seen) return 0; for (pnum = 0; pnum < ubi->peb_count; pnum++) { - if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) { + if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) { ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum); ret = -EINVAL; } @@ -1137,7 +1137,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, struct rb_node *tmp_rb; int ret, i, j, free_peb_count, used_peb_count, vol_count; int scrub_peb_count, erase_peb_count; - unsigned long *seen_pebs = NULL; + unsigned long *seen_pebs; fm_raw = ubi->fm_buf; memset(ubi->fm_buf, 0, ubi->fm_size); @@ -1151,7 +1151,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID); if (!dvbuf) { ret = -ENOMEM; - goto out_kfree; + goto out_free_avbuf; } avhdr = ubi_get_vid_hdr(avbuf); @@ -1160,7 +1160,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, seen_pebs = init_seen(ubi); if (IS_ERR(seen_pebs)) { ret = PTR_ERR(seen_pebs); - goto out_kfree; + goto out_free_dvbuf; } spin_lock(&ubi->volumes_lock); @@ -1328,7 +1328,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf); if (ret) { ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); - goto out_kfree; + goto out_free_seen; } for (i = 0; i < new_fm->used_blocks; i++) { @@ -1350,7 +1350,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, if (ret) { ubi_err(ubi, "unable to write vid_hdr to PEB %i!", new_fm->e[i]->pnum); - goto out_kfree; + goto out_free_seen; } } @@ -1360,7 +1360,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, if (ret) { ubi_err(ubi, "unable to write fastmap to PEB %i!", new_fm->e[i]->pnum); - goto out_kfree; + goto out_free_seen; } } @@ -1370,10 +1370,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi, ret = self_check_seen(ubi, seen_pebs); dbg_bld("fastmap written!"); -out_kfree: - ubi_free_vid_buf(avbuf); - ubi_free_vid_buf(dvbuf); +out_free_seen: free_seen(seen_pebs); +out_free_dvbuf: + ubi_free_vid_buf(dvbuf); +out_free_avbuf: + ubi_free_vid_buf(avbuf); + out: return ret; } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index df1c7989e13d..df3cd2589bcf 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -106,6 +106,7 @@ config NET_FC config IFB tristate "Intermediate Functional Block support" depends on NET_CLS_ACT + select NET_REDIRECT ---help--- This is an intermediate driver that allows sharing of resources. diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index e3b25f310936..73485520b138 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -122,6 +122,8 @@ static void ad_marker_response_received(struct bond_marker *marker, static void ad_update_actor_keys(struct port *port, bool reset); +int bond_8023ad_up; + /* ================= api to bonding and kernel code ================== */ /** @@ -1204,6 +1206,13 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) __record_pdu(lacpdu, port); port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)); port->actor_oper_port_state &= ~AD_STATE_EXPIRED; + + if ((last_state != AD_RX_CURRENT) && + port->slave && + port->slave->bond && + (port->slave->bond->params.broadcast_arp || + port->slave->bond->params.broadcast_nd)) + port->slave->bond->send_peer_notif++; break; default: break; @@ -2343,6 +2352,22 @@ void bond_3ad_state_machine_handler(struct work_struct *work) port->sm_vars &= ~AD_PORT_BEGIN; } + if (bond_8023ad_up > 0 && + (jiffies >= bond->params.last_na + + msecs_to_jiffies(5 * 1000))) { + bond->send_peer_notif++; + bond->params.last_na = jiffies; + bond_8023ad_up--; + } + + if (bond->params.periodic_na && + (bond->params.periodic_na_interval >= 1) && + (jiffies >= bond->params.last_na + + msecs_to_jiffies(bond->params.periodic_na_interval * 1000))) { + bond->send_peer_notif++; + bond->params.last_na = jiffies; + } + re_arm: bond_for_each_slave_rcu(bond, slave, iter) { if (slave->should_notify) { diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 4f2e6910c623..c81698550e5a 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -50,11 +50,6 @@ struct arp_pkt { }; #pragma pack() -static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) -{ - return (struct arp_pkt *)skb_network_header(skb); -} - /* Forward declaration */ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], bool strict_match); @@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) spin_unlock(&bond->mode_lock); } -static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) +static struct slave *rlb_choose_channel(struct sk_buff *skb, + struct bonding *bond, + const struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct arp_pkt *arp = arp_pkt(skb); struct slave *assigned_slave, *curr_active_slave; struct rlb_client_info *client_info; u32 hash_index = 0; @@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon */ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) { - struct arp_pkt *arp = arp_pkt(skb); struct slave *tx_slave = NULL; + struct arp_pkt *arp; + + if (!pskb_network_may_pull(skb, sizeof(*arp))) + return NULL; + arp = (struct arp_pkt *)skb_network_header(skb); /* Don't modify or load balance ARPs that do not originate locally * (e.g.,arrive via a bridge). @@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) if (arp->op_code == htons(ARPOP_REPLY)) { /* the arp must be sent on the selected rx channel */ - tx_slave = rlb_choose_channel(skb, bond); + tx_slave = rlb_choose_channel(skb, bond, arp); if (tx_slave) bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr, tx_slave->dev->addr_len); @@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) * When the arp reply is received the entry will be updated * with the correct unicast address of the client. */ - tx_slave = rlb_choose_channel(skb, bond); + tx_slave = rlb_choose_channel(skb, bond, arp); /* The ARP reply packets must be delayed so that * they can cancel out the influence of the ARP request. @@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) bool do_tx_balance = true; u32 hash_index = 0; const u8 *hash_start = NULL; - struct ipv6hdr *ip6hdr; skb_reset_mac_header(skb); eth_data = eth_hdr(skb); switch (ntohs(skb->protocol)) { case ETH_P_IP: { - const struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph; if (is_broadcast_ether_addr(eth_data->h_dest) || - iph->daddr == ip_bcast || - iph->protocol == IPPROTO_IGMP) { + !pskb_network_may_pull(skb, sizeof(*iph))) { + do_tx_balance = false; + break; + } + iph = ip_hdr(skb); + if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { do_tx_balance = false; break; } hash_start = (char *)&(iph->daddr); hash_size = sizeof(iph->daddr); - } break; - case ETH_P_IPV6: + } + case ETH_P_IPV6: { + const struct ipv6hdr *ip6hdr; + /* IPv6 doesn't really use broadcast mac address, but leave * that here just in case. */ @@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } - /* Additianally, DAD probes should not be tx-balanced as that + if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) { + do_tx_balance = false; + break; + } + /* Additionally, DAD probes should not be tx-balanced as that * will lead to false positives for duplicate addresses and * prevent address configuration from working. */ @@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } - hash_start = (char *)&(ipv6_hdr(skb)->daddr); - hash_size = sizeof(ipv6_hdr(skb)->daddr); + hash_start = (char *)&ip6hdr->daddr; + hash_size = sizeof(ip6hdr->daddr); break; - case ETH_P_IPX: - if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { + } + case ETH_P_IPX: { + const struct ipxhdr *ipxhdr; + + if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { + do_tx_balance = false; + break; + } + ipxhdr = (struct ipxhdr *)skb_network_header(skb); + + if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { /* something is wrong with this packet */ do_tx_balance = false; break; } - if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { + if (ipxhdr->ipx_type != IPX_TYPE_NCP) { /* The only protocol worth balancing in * this family since it has an "ARP" like * mechanism @@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } + eth_data = eth_hdr(skb); hash_start = (char *)eth_data->h_dest; hash_size = ETH_ALEN; break; + } case ETH_P_ARP: do_tx_balance = false; if (bond_info->rlb_enabled) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 62f65573eb04..a7059e018c47 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -76,6 +76,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -789,7 +792,19 @@ static bool bond_should_notify_peers(struct bonding *bond) struct slave *slave; rcu_read_lock(); - slave = rcu_dereference(bond->curr_active_slave); + + if ((bond->params.broadcast_arp || bond->params.broadcast_nd) && + bond->params.mode == BOND_MODE_8023AD) + /* + * For BOND_MODE_8023AD, curr_active_slave could be NULL, + * we use first_slave instead. + * BOND_MODE_ACTIVEBACKUP also use this function, + * in that case, you could close broadcast_arp and broadcast_nd. + */ + slave = bond_first_slave_rcu(bond); + else + slave = rcu_dereference(bond->curr_active_slave); + rcu_read_unlock(); netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", @@ -2225,9 +2240,6 @@ static void bond_miimon_commit(struct bonding *bond) } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); - } else if (slave != primary) { - /* prevent it from being the active one */ - bond_set_backup_slave(slave); } slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", @@ -2236,6 +2248,10 @@ static void bond_miimon_commit(struct bonding *bond) bond_miimon_link_change(bond, slave, BOND_LINK_UP); + /* notify twice */ + if (BOND_MODE(bond) == BOND_MODE_8023AD) + bond_8023ad_up = 2; + if (!bond->curr_active_slave || slave == primary) goto do_failover; @@ -2257,6 +2273,11 @@ static void bond_miimon_commit(struct bonding *bond) bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); + if ((bond->params.broadcast_arp || + bond->params.broadcast_nd) && + bond->params.mode == BOND_MODE_8023AD) + bond->send_peer_notif++; + if (slave == rcu_access_pointer(bond->curr_active_slave)) goto do_failover; @@ -3439,6 +3460,47 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } +#ifdef CONFIG_LOCKDEP +static int bond_get_lowest_level_rcu(struct net_device *dev) +{ + struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; + struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; + int cur = 0, max = 0; + + now = dev; + iter = &dev->adj_list.lower; + + while (1) { + next = NULL; + while (1) { + ldev = netdev_next_lower_dev_rcu(now, &iter); + if (!ldev) + break; + + next = ldev; + niter = &ldev->adj_list.lower; + dev_stack[cur] = now; + iter_stack[cur++] = iter; + if (max <= cur) + max = cur; + break; + } + + if (!next) { + if (!cur) + return max; + next = dev_stack[--cur]; + niter = iter_stack[cur]; + } + + now = next; + iter = niter; + } + + return max; +} +#endif + static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3446,11 +3508,17 @@ static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 temp; struct list_head *iter; struct slave *slave; + int nest_level = 0; - spin_lock(&bond->stats_lock); - memcpy(stats, &bond->bond_stats, sizeof(*stats)); rcu_read_lock(); +#ifdef CONFIG_LOCKDEP + nest_level = bond_get_lowest_level_rcu(bond_dev); +#endif + + spin_lock_nested(&bond->stats_lock, nest_level); + memcpy(stats, &bond->bond_stats, sizeof(*stats)); + bond_for_each_slave_rcu(bond, slave, iter) { const struct rtnl_link_stats64 *new = dev_get_stats(slave->dev, &temp); @@ -3460,10 +3528,10 @@ static void bond_get_stats(struct net_device *bond_dev, /* save off the slave stats for the next run */ memcpy(&slave->slave_stats, new, sizeof(*new)); } - rcu_read_unlock(); memcpy(&bond->bond_stats, stats, sizeof(*stats)); spin_unlock(&bond->stats_lock); + rcu_read_unlock(); } static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) @@ -3553,6 +3621,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd case BOND_RELEASE_OLD: case SIOCBONDRELEASE: res = bond_release(bond_dev, slave_dev); + if (!res) + netdev_update_lockdep_key(slave_dev); break; case BOND_SETHWADDR_OLD: case SIOCBONDSETHWADDR: @@ -3612,32 +3682,35 @@ static int bond_neigh_init(struct neighbour *n) const struct net_device_ops *slave_ops; struct neigh_parms parms; struct slave *slave; - int ret; + int ret = 0; - slave = bond_first_slave(bond); + rcu_read_lock(); + slave = bond_first_slave_rcu(bond); if (!slave) - return 0; + goto out; slave_ops = slave->dev->netdev_ops; if (!slave_ops->ndo_neigh_setup) - return 0; + goto out; - parms.neigh_setup = NULL; - parms.neigh_cleanup = NULL; - ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); - if (ret) - return ret; - - /* Assign slave's neigh_cleanup to neighbour in case cleanup is called - * after the last slave has been detached. Assumes that all slaves - * utilize the same neigh_cleanup (true at this writing as only user - * is ipoib). + /* TODO: find another way [1] to implement this. + * Passing a zeroed structure is fragile, + * but at least we do not pass garbage. + * + * [1] One way would be that ndo_neigh_setup() never touch + * struct neigh_parms, but propagate the new neigh_setup() + * back to ___neigh_create() / neigh_parms_alloc() */ - n->parms->neigh_cleanup = parms.neigh_cleanup; + memset(&parms, 0, sizeof(parms)); + ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); - if (!parms.neigh_setup) - return 0; + if (ret) + goto out; - return parms.neigh_setup(n); + if (parms.neigh_setup) + ret = parms.neigh_setup(n); +out: + rcu_read_unlock(); + return ret; } /* The bonding ndo_neigh_setup is called at init time beofre any @@ -4041,30 +4114,6 @@ out: return ret; } -/* Use this Xmit function for 3AD as well as XOR modes. The current - * usable slave array is formed in the control path. The xmit function - * just calculates hash and sends the packet out. - */ -static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct bonding *bond = netdev_priv(dev); - struct slave *slave; - struct bond_up_slave *slaves; - unsigned int count; - - slaves = rcu_dereference(bond->slave_arr); - count = slaves ? READ_ONCE(slaves->count) : 0; - if (likely(count)) { - slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; - bond_dev_queue_xmit(bond, skb, slave->dev); - } else { - bond_tx_drop(dev, skb); - } - - return NETDEV_TX_OK; -} - /* in broadcast mode, we send everything to all usable interfaces. */ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) @@ -4095,6 +4144,47 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, return NETDEV_TX_OK; } +/* Use this Xmit function for 3AD as well as XOR modes. The current + * usable slave array is formed in the control path. The xmit function + * just calculates hash and sends the packet out. + */ +static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bonding *bond = netdev_priv(dev); + struct slave *slave; + struct bond_up_slave *slaves; + unsigned int count; + + /* Broadcast arp or nd to all slaves. */ + if (bond->params.broadcast_arp && (ntohs(skb->protocol) == ETH_P_ARP)) + return bond_xmit_broadcast(skb, dev); + + if (bond->params.broadcast_nd && (ntohs(skb->protocol) == ETH_P_IPV6) && + pskb_may_pull(skb, + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr))) { + if (ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct icmp6hdr *icmph = icmp6_hdr(skb); + + if ((icmph->icmp6_type == + NDISC_NEIGHBOUR_SOLICITATION) || + (icmph->icmp6_type == + NDISC_NEIGHBOUR_ADVERTISEMENT)) + return bond_xmit_broadcast(skb, dev); + } + } + + slaves = rcu_dereference(bond->slave_arr); + count = slaves ? READ_ONCE(slaves->count) : 0; + if (likely(count)) { + slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; + bond_dev_queue_xmit(bond, skb, slave->dev); + } else { + bond_tx_drop(dev, skb); + } + + return NETDEV_TX_OK; +} + /*------------------------- Device initialization ---------------------------*/ /* Lookup the slave that corresponds to a qid */ diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index ddb3916d3506..215c10923289 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1398,6 +1398,8 @@ static int bond_option_slaves_set(struct bonding *bond, case '-': slave_dbg(bond->dev, dev, "Releasing interface\n"); ret = bond_release(bond->dev, dev); + if (!ret) + netdev_update_lockdep_key(dev); break; default: diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index fd5c9cbe45b1..09718c763be5 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -189,6 +189,9 @@ static void bond_info_show_slave(struct seq_file *seq, seq_printf(seq, "Permanent HW addr: %*phC\n", slave->dev->addr_len, slave->perm_hwaddr); seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); + seq_printf(seq, "Slave active: %d\n", !slave->backup); + seq_printf(seq, "Slave sm_vars: 0x%x\n", + SLAVE_AD_INFO(slave)->port.sm_vars); if (BOND_MODE(bond) == BOND_MODE_8023AD) { const struct port *port = &SLAVE_AD_INFO(slave)->port; @@ -197,6 +200,8 @@ static void bond_info_show_slave(struct seq_file *seq, if (agg) { seq_printf(seq, "Aggregator ID: %d\n", agg->aggregator_identifier); + seq_printf(seq, "Aggregator active: %d\n", + agg->is_active); seq_printf(seq, "Actor Churn State: %s\n", bond_3ad_churn_desc(port->sm_churn_actor_state)); seq_printf(seq, "Partner Churn State: %s\n", diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 2d615a93685e..b3660647a1e7 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -720,6 +720,164 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d, static DEVICE_ATTR(ad_user_port_key, 0644, bonding_show_ad_user_port_key, bonding_sysfs_store_option); +static ssize_t bonding_show_broadcast_arp(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%d\n", bond->params.broadcast_arp); +} + +static ssize_t bonding_store_broadcast_arp(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int new_value, ret = count; + struct bonding *bond = to_bond(d); + + if (sscanf(buf, "%d", &new_value) != 1) { + pr_err("%s: no broadcast_arp value specified.\n", + bond->dev->name); + ret = -EINVAL; + goto out; + } + + if (new_value != 0 && new_value != 1) { + pr_err("%s: Invalid broadcast_arp value %d not in range 0-1; rejected.\n", + bond->dev->name, new_value); + ret = -EINVAL; + goto out; + } + pr_info("%s: Setting broadcast_arp to %d.\n", + bond->dev->name, new_value); + bond->params.broadcast_arp = new_value; + +out: + return ret; +} + +static DEVICE_ATTR(broadcast_arp, S_IRUGO | S_IWUSR, + bonding_show_broadcast_arp, + bonding_store_broadcast_arp); + +static ssize_t bonding_show_broadcast_nd(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%d\n", bond->params.broadcast_nd); +} + +static ssize_t bonding_store_broadcast_nd(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int new_value, ret = count; + struct bonding *bond = to_bond(d); + + if (sscanf(buf, "%d", &new_value) != 1) { + pr_err("%s: no broadcast_nd value specified.\n", + bond->dev->name); + ret = -EINVAL; + goto out; + } + + if (new_value != 0 && new_value != 1) { + pr_err("%s: Invalid broadcast_nd value %d not in range 0-1; rejected.\n", + bond->dev->name, new_value); + ret = -EINVAL; + goto out; + } + pr_info("%s: Setting broadcast_nd to %d.\n", + bond->dev->name, new_value); + bond->params.broadcast_nd = new_value; + +out: + return ret; +} + +static DEVICE_ATTR(broadcast_nd, S_IRUGO | S_IWUSR, + bonding_show_broadcast_nd, + bonding_store_broadcast_nd); + +static ssize_t bonding_show_periodic_na(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%d\n", bond->params.periodic_na); +} + +static ssize_t bonding_store_periodic_na(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int new_value, ret = count; + struct bonding *bond = to_bond(d); + + if (sscanf(buf, "%d", &new_value) != 1) { + pr_err("%s: no periodic_na value specified.\n", + bond->dev->name); + ret = -EINVAL; + goto out; + } + + if (new_value != 0 && new_value != 1) { + pr_err("%s: Invalid periodic_na value %d not in range 0-1; rejected.\n", + bond->dev->name, new_value); + ret = -EINVAL; + goto out; + } + pr_info("%s: Setting periodic_na to %d.\n", + bond->dev->name, new_value); + bond->params.periodic_na = new_value; + +out: + return ret; +} + +static DEVICE_ATTR(periodic_na, S_IRUGO | S_IWUSR, + bonding_show_periodic_na, + bonding_store_periodic_na); + +static ssize_t bonding_show_periodic_na_interval(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%d\n", bond->params.periodic_na_interval); +} + +static ssize_t bonding_store_periodic_na_interval(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int new_value, ret = count; + struct bonding *bond = to_bond(d); + + if (sscanf(buf, "%u", &new_value) != 1) { + pr_err("%s: no periodic_na_interval value specified.\n", + bond->dev->name); + ret = -EINVAL; + goto out; + } + + pr_info("%s: Setting periodic_na_interval to %u.\n", + bond->dev->name, new_value); + bond->params.periodic_na_interval = new_value; + +out: + return ret; +} + +static DEVICE_ATTR(periodic_na_interval, S_IRUGO | S_IWUSR, + bonding_show_periodic_na_interval, + bonding_store_periodic_na_interval); + static struct attribute *per_bond_attrs[] = { &dev_attr_slaves.attr, &dev_attr_mode.attr, @@ -757,6 +915,10 @@ static struct attribute *per_bond_attrs[] = { &dev_attr_ad_actor_sys_prio.attr, &dev_attr_ad_actor_system.attr, &dev_attr_ad_user_port_key.attr, + &dev_attr_broadcast_arp.attr, + &dev_attr_broadcast_nd.attr, + &dev_attr_periodic_na.attr, + &dev_attr_periodic_na_interval.attr, NULL, }; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 1c88c361938c..3a33fb503400 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -884,6 +884,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { = { .len = sizeof(struct can_bittiming) }, [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, }; static int can_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 57f9a2f51085..e5c207ad3c77 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -389,6 +389,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv (&priv->regs->mb[bank][priv->mb_size * mb_index]); } +static int flexcan_low_power_enter_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_low_power_exit_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + return -ETIMEDOUT; + + return 0; +} + static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) { struct flexcan_regs __iomem *regs = priv->regs; @@ -407,7 +435,6 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int ackval; u32 reg_mcr; reg_mcr = priv->read(®s->mcr); @@ -418,36 +445,24 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); - /* get stop acknowledgment */ - if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, - ackval, ackval & (1 << priv->stm.ack_bit), - 0, FLEXCAN_TIMEOUT_US)) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_enter_ack(priv); } static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int ackval; u32 reg_mcr; /* remove stop request */ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 0); - /* get stop acknowledgment */ - if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, - ackval, !(ackval & (1 << priv->stm.ack_bit)), - 0, FLEXCAN_TIMEOUT_US)) - return -ETIMEDOUT; reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, ®s->mcr); - return 0; + return flexcan_low_power_exit_ack(priv); } static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) @@ -506,39 +521,25 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg &= ~FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_exit_ack(priv); } static int flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_enter_ack(priv); } static int flexcan_chip_freeze(struct flexcan_priv *priv) @@ -1703,6 +1704,9 @@ static int __maybe_unused flexcan_resume(struct device *device) netif_start_queue(dev); if (device_may_wakeup(device)) { disable_irq_wake(dev->irq); + err = flexcan_exit_stop_mode(priv); + if (err) + return err; } else { err = pm_runtime_force_resume(device); if (err) @@ -1748,14 +1752,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - int err; - if (netif_running(dev) && device_may_wakeup(device)) { + if (netif_running(dev) && device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); - err = flexcan_exit_stop_mode(priv); - if (err) - return err; - } return 0; } diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index 3db619209fe1..b233756345f8 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -164,6 +164,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) } } +static int tcan4x5x_reset(struct tcan4x5x_priv *priv) +{ + int ret = 0; + + if (priv->reset_gpio) { + gpiod_set_value(priv->reset_gpio, 1); + + /* tpulse_width minimum 30us */ + usleep_range(30, 100); + gpiod_set_value(priv->reset_gpio, 0); + } else { + ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_SW_RESET); + if (ret) + return ret; + } + + usleep_range(700, 1000); + + return ret; +} + static int regmap_spi_gather_write(void *context, const void *reg, size_t reg_len, const void *val, size_t val_len) @@ -341,6 +363,7 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) static int tcan4x5x_parse_config(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + int ret; tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", GPIOD_OUT_HIGH); @@ -354,6 +377,10 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev) if (IS_ERR(tcan4x5x->reset_gpio)) tcan4x5x->reset_gpio = NULL; + ret = tcan4x5x_reset(tcan4x5x); + if (ret) + return ret; + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, "device-state", GPIOD_IN); @@ -443,6 +470,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi) tcan4x5x_power_enable(priv->power, 1); + ret = tcan4x5x_init(mcan_class); + if (ret) + goto out_power; + ret = m_can_class_register(mcan_class); if (ret) goto out_power; diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 8caf7af0dee2..99101d7027a8 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) struct net_device *dev = napi->dev; struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; - int npackets = 0; - int ret = 1; + int work_done = 0; struct sk_buff *skb; struct can_frame *frame; u8 canrflg; - while (npackets < quota) { + while (work_done < quota) { canrflg = in_8(®s->canrflg); if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) break; @@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) stats->rx_packets++; stats->rx_bytes += frame->can_dlc; - npackets++; + work_done++; netif_receive_skb(skb); } - if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { - napi_complete(&priv->napi); - clear_bit(F_RX_PROGRESS, &priv->flags); - if (priv->can.state < CAN_STATE_BUS_OFF) - out_8(®s->canrier, priv->shadow_canrier); - ret = 0; + if (work_done < quota) { + if (likely(napi_complete_done(&priv->napi, work_done))) { + clear_bit(F_RX_PROGRESS, &priv->flags); + if (priv->can.state < CAN_STATE_BUS_OFF) + out_8(®s->canrier, priv->shadow_canrier); + } } - return ret; + return work_done; } static irqreturn_t mscan_isr(int irq, void *dev_id) diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 0a9f42e5fedf..4dfa459ef5c7 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; - cf.can_id = 0; + memset(&cf, 0, sizeof(cf)); switch (*cmd) { case 'r': @@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl) else return; - *(u64 *) (&cf.data) = 0; /* clear payload */ - /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf.can_dlc; i++) { @@ -344,9 +342,16 @@ static void slcan_transmit(struct work_struct *work) */ static void slcan_write_wakeup(struct tty_struct *tty) { - struct slcan *sl = tty->disc_data; + struct slcan *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } /* Send a can_frame to a TTY queue. */ @@ -617,7 +622,11 @@ err_free_chan: sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); + slc_free_netdev(sl->dev); + /* do not call free_netdev before rtnl_unlock */ + rtnl_unlock(); free_netdev(sl->dev); + return err; err_exit: rtnl_unlock(); @@ -643,10 +652,11 @@ static void slcan_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* Flush network side */ diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 2f74f6704c12..a4b4b742c80c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, hconf, sizeof(*hconf), 1000); @@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, dconf, sizeof(*dconf), 1000); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 5fc0be564274..7ab87a758754 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *ep; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { ep = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 07d2f3aa2c02..1b9957f12459 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, struct kvaser_cmd *cmd; int err; - cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; @@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *endpoint; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 04aac3bb54ef..81e942f713e6 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -792,7 +792,7 @@ resubmit: up); usb_anchor_urb(urb, &up->rx_urbs); - ret = usb_submit_urb(urb, GFP_KERNEL); + ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { netdev_err(up->netdev, diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 7c482b2d78d2..2be846ee627d 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -60,6 +60,8 @@ enum xcan_reg { XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ + XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ + XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ }; #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) @@ -1803,6 +1805,11 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { + priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); + priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); + } + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, hw_tx_max, priv->tx_max); diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index f6232ce8481f..685e12b05a7c 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -77,6 +77,7 @@ config NET_DSA_REALTEK_SMI config NET_DSA_SMSC_LAN9303 tristate select NET_DSA_TAG_LAN9303 + select REGMAP ---help--- This enables support for the SMSC/Microchip LAN9303 3 port ethernet switch chips. diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index cc3536315eff..d618650533b6 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -347,7 +347,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) * frames should be flooded or not. */ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); - mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN; + mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); } @@ -526,6 +526,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) cpu_port = ds->ports[port].cpu_dp->index; + b53_br_egress_floods(ds, port, true, true); + if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); if (ret) @@ -641,6 +643,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); b53_brcm_hdr_setup(dev->ds, port); + + b53_br_egress_floods(dev->ds, port, true, true); } static void b53_enable_mib(struct b53_device *dev) @@ -676,7 +680,7 @@ int b53_configure_vlan(struct dsa_switch *ds) b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, false, ds->vlan_filtering); + b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, @@ -1349,6 +1353,9 @@ void b53_vlan_add(struct dsa_switch *ds, int port, b53_get_vlan_entry(dev, vid, vl); + if (vid == 0 && vid == b53_default_pvid(dev)) + untagged = true; + vl->members |= BIT(port); if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag |= BIT(port); @@ -1766,19 +1773,26 @@ int b53_br_egress_floods(struct dsa_switch *ds, int port, struct b53_device *dev = ds->priv; u16 uc, mc; - b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc); + b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); if (unicast) uc |= BIT(port); else uc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc); + b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); - b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc); + b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); if (multicast) mc |= BIT(port); else mc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc); + b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); + + b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); + if (multicast) + mc |= BIT(port); + else + mc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); return 0; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 69fc13046ac7..9502db66092e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -69,6 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); + reg &= ~GMII_SPEED_UP_2G; core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ @@ -458,7 +459,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->slave_mii_bus->parent = ds->dev->parent; priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - err = of_mdiobus_register(priv->slave_mii_bus, dn); + err = mdiobus_register(priv->slave_mii_bus); if (err && dn) of_node_put(dn); @@ -1052,6 +1053,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) const struct bcm_sf2_of_data *data; struct b53_platform_data *pdata; struct dsa_switch_ops *ops; + struct device_node *ports; struct bcm_sf2_priv *priv; struct b53_device *dev; struct dsa_switch *ds; @@ -1114,7 +1116,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); - bcm_sf2_identify_ports(priv, dn->child); + ports = of_find_node_by_name(dn, "ports"); + if (ports) { + bcm_sf2_identify_ports(priv, ports); + of_node_put(ports); + } priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index d264776a95a3..471837cf0b21 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, return -EINVAL; } - ip_frag = be32_to_cpu(fs->m_ext.data[0]); + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); /* Locate the first rule available */ if (fs->location == RX_CLS_LOC_ANY) @@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, if (rule->fs.flow_type != fs->flow_type || rule->fs.ring_cookie != fs->ring_cookie || - rule->fs.m_ext.data[0] != fs->m_ext.data[0]) + rule->fs.h_ext.data[0] != fs->h_ext.data[0]) continue; switch (fs->flow_type & ~FLOW_EXT) { @@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, return -EINVAL; } - ip_frag = be32_to_cpu(fs->m_ext.data[0]); + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); layout = &udf_tcpip6_layout; slice_num = bcm_sf2_get_slice_number(layout, 0); diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index c5f64959a184..1142768969c2 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = { module_spi_driver(ksz9477_spi_driver); +MODULE_ALIAS("spi:ksz9477"); +MODULE_ALIAS("spi:ksz9897"); +MODULE_ALIAS("spi:ksz9893"); +MODULE_ALIAS("spi:ksz9563"); +MODULE_ALIAS("spi:ksz8563"); +MODULE_ALIAS("spi:ksz9567"); MODULE_AUTHOR("Woojung Huh "); MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1d8d36de4d20..8071c3fa3fb7 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds) static void mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable) { - u32 mask = PMCR_TX_EN | PMCR_RX_EN; + u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK; if (enable) mt7530_set(priv, MT7530_PMCR_P(port), mask); @@ -1353,6 +1353,9 @@ mt7530_setup(struct dsa_switch *ds) continue; phy_node = of_parse_phandle(mac_np, "phy-handle", 0); + if (!phy_node) + continue; + if (phy_node->parent == priv->dev->of_node->parent) { interface = of_get_phy_mode(mac_np); id = of_mdio_parse_addr(ds->dev, phy_node); @@ -1439,7 +1442,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port, mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 | PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN); mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN | - PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK; + PMCR_BACKPR_EN | PMCR_FORCE_MODE; /* Are we connected to external phy */ if (port == 5 && dsa_is_user_port(ds, 5)) diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 25ec4c0ac589..8a903624fdd7 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -332,6 +332,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) { u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST; + /* Use the default high priority for management frames sent to + * the CPU. + */ + port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI; + return mv88e6390_g1_monitor_write(chip, ptr, port); } diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 0870fcc8bfc8..0ae96a1e919b 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -210,6 +210,7 @@ #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000 +#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff /* Offset 0x1C: Global Control 2 */ diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index bdbb72fc20ed..6240976679e1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1083,6 +1083,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) { int err, irq, virq; + chip->g2_irq.masked = ~0; + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked); + mv88e6xxx_reg_unlock(chip); + if (err) + return err; + chip->g2_irq.domain = irq_domain_add_simple( chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip); if (!chip->g2_irq.domain) @@ -1092,7 +1099,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) irq_create_mapping(chip->g2_irq.domain, irq); chip->g2_irq.chip = mv88e6xxx_g2_irq_chip; - chip->g2_irq.masked = ~0; chip->device_irq = irq_find_mapping(chip->g1_irq.domain, MV88E6XXX_G1_STS_IRQ_DEVICE); diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 15ef81654b67..330c41e87171 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port) } static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, - phy_interface_t mode) + phy_interface_t mode, bool force) { u8 lane; u16 cmode; @@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } - /* cmode doesn't change, nothing to do for us */ - if (cmode == chip->ports[port].cmode) + /* cmode doesn't change, nothing to do for us unless forced */ + if (cmode == chip->ports[port].cmode && !force) return 0; lane = mv88e6xxx_serdes_get_lane(chip, port); @@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (port != 9 && port != 10) return -EOPNOTSUPP; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, @@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, break; } - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, @@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, true); } int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 7687ddcae159..3b51e87a3714 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -594,15 +594,15 @@ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int i; for (i = 0; i < SJA1105_NUM_PORTS; i++) { - if (ports->role == XMII_MAC) + if (ports[i].role == XMII_MAC) continue; - if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || - ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || + ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) priv->rgmii_rx_delay[i] = true; - if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || - ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || + ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) priv->rgmii_tx_delay[i] = true; if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && @@ -619,7 +619,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, struct device *dev = &priv->spidev->dev; struct device_node *child; - for_each_child_of_node(ports_node, child) { + for_each_available_child_of_node(ports_node, child) { struct device_node *phy_node; int phy_mode; u32 index; @@ -1389,6 +1389,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv) int speed_mbps[SJA1105_NUM_PORTS]; int rc, i; + mutex_lock(&priv->mgmt_lock); + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; /* Back up the dynamic link speed changed by sja1105_adjust_port_config @@ -1420,6 +1422,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv) goto out; } out: + mutex_unlock(&priv->mgmt_lock); + return rc; } @@ -1556,8 +1560,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) if (enabled) { /* Enable VLAN filtering. */ - tpid = ETH_P_8021AD; - tpid2 = ETH_P_8021Q; + tpid = ETH_P_8021Q; + tpid2 = ETH_P_8021AD; } else { /* Disable VLAN filtering. */ tpid = ETH_P_SJA1105; @@ -1566,9 +1570,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; - /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ - general_params->tpid = tpid; /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ + general_params->tpid = tpid; + /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ general_params->tpid2 = tpid2; /* When VLAN filtering is on, we need to at least be able to * decode management traffic through the "backup plan". diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index 0d03e13e9909..63d2311817c4 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, return size; } +/* TPID and TPID2 are intentionally reversed so that semantic + * compatibility with E/T is kept. + */ static size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, enum packing_op op) @@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op); sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op); sja1105_packing(buf, &entry->vlmask, 106, 75, size, op); - sja1105_packing(buf, &entry->tpid, 74, 59, size, op); + sja1105_packing(buf, &entry->tpid2, 74, 59, size, op); sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op); - sja1105_packing(buf, &entry->tpid2, 57, 42, size, op); + sja1105_packing(buf, &entry->tpid, 57, 42, size, op); sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op); sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op); sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op); diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index ea62604fdf8c..48de4bee209e 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue, static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, u16 command_id, bool capture) { + if (unlikely(!queue->comp_ctx)) { + pr_err("Completion context is NULL\n"); + return NULL; + } + if (unlikely(command_id >= queue->q_depth)) { pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", command_id, queue->q_depth); @@ -1041,9 +1046,41 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev, feature_ver); } +static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) +{ + struct ena_admin_feature_rss_flow_hash_control *hash_key = + (ena_dev->rss).hash_key; + + netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); + /* The key is stored in the device in u32 array + * as well as the API requires the key to be passed in this + * format. Thus the size of our array should be divided by 4 + */ + hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32); +} + +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) +{ + return ena_dev->rss.hash_func; +} + static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_flow_hash_control *hash_key; + struct ena_admin_get_feat_resp get_resp; + int rc; + + hash_key = (ena_dev->rss).hash_key; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + ena_dev->rss.hash_key_dma_addr, + sizeof(ena_dev->rss.hash_key), 0); + if (unlikely(rc)) { + hash_key = NULL; + return -EOPNOTSUPP; + } rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), @@ -1254,30 +1291,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) return 0; } -static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) -{ - u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; - struct ena_rss *rss = &ena_dev->rss; - u8 idx; - u16 i; - - for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) - dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; - - for (i = 0; i < 1 << rss->tbl_log_size; i++) { - if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) - return -EINVAL; - idx = (u8)rss->rss_ind_tbl[i].cq_idx; - - if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) - return -EINVAL; - - rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; - } - - return 0; -} - static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 intr_delay_resolution) { @@ -2297,15 +2310,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, switch (func) { case ENA_ADMIN_TOEPLITZ: - if (key_len > sizeof(hash_key->key)) { - pr_err("key len (%hu) is bigger than the max supported (%zu)\n", - key_len, sizeof(hash_key->key)); - return -EINVAL; + if (key) { + if (key_len != sizeof(hash_key->key)) { + pr_err("key len (%hu) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); + return -EINVAL; + } + memcpy(hash_key->key, key, key_len); + rss->hash_init_val = init_val; + hash_key->keys_num = key_len >> 2; } - - memcpy(hash_key->key, key, key_len); - rss->hash_init_val = init_val; - hash_key->keys_num = key_len >> 2; break; case ENA_ADMIN_CRC32: rss->hash_init_val = init_val; @@ -2342,7 +2356,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (unlikely(rc)) return rc; - rss->hash_func = get_resp.u.flow_hash_func.selected_func; + /* ffs() returns 1 in case the lsb is set */ + rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); + if (rss->hash_func) + rss->hash_func--; + if (func) *func = rss->hash_func; @@ -2606,10 +2624,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) if (!ind_tbl) return 0; - rc = ena_com_ind_tbl_convert_from_device(ena_dev); - if (unlikely(rc)) - return rc; - for (i = 0; i < (1 << rss->tbl_log_size); i++) ind_tbl[i] = rss->host_rss_ind_tbl[i]; @@ -2626,9 +2640,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) if (unlikely(rc)) goto err_indr_tbl; + /* The following function might return unsupported in case the + * device doesn't support setting the key / hash function. We can safely + * ignore this error and have indirection table support only. + */ rc = ena_com_hash_key_allocate(ena_dev); - if (unlikely(rc)) + if (unlikely(rc) && rc != -EOPNOTSUPP) goto err_hash_key; + else if (rc != -EOPNOTSUPP) + ena_com_hash_key_fill_default_key(ena_dev); rc = ena_com_hash_ctrl_init(ena_dev); if (unlikely(rc)) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 7c941eba0bc9..469f298199a7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -44,6 +44,7 @@ #include #include #include +#include #include "ena_common_defs.h" #include "ena_admin_defs.h" @@ -72,7 +73,7 @@ /*****************************************************************************/ /* ENA adaptive interrupt moderation settings */ -#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 @@ -655,6 +656,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); */ void ena_com_rss_destroy(struct ena_com_dev *ena_dev); +/* ena_com_get_current_hash_function - Get RSS hash function + * @ena_dev: ENA communication layer struct + * + * Return the current hash function. + * @return: 0 or one of the ena_admin_hash_functions values. + */ +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev); + /* ena_com_fill_hash_function - Fill RSS hash function * @ena_dev: ENA communication layer struct * @func: The hash function (Toeplitz or crc) diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 16553d92fad2..ae631b8770fc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -315,10 +315,9 @@ static int ena_get_coalesce(struct net_device *net_dev, ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * ena_dev->intr_delay_resolution; - if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) - coalesce->rx_coalesce_usecs = - ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) - * ena_dev->intr_delay_resolution; + coalesce->rx_coalesce_usecs = + ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) + * ena_dev->intr_delay_resolution; coalesce->use_adaptive_rx_coalesce = ena_com_get_adaptive_moderation_enabled(ena_dev); @@ -367,12 +366,6 @@ static int ena_set_coalesce(struct net_device *net_dev, ena_update_tx_rings_intr_moderation(adapter); - if (coalesce->use_adaptive_rx_coalesce) { - if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) - ena_com_enable_adaptive_moderation(ena_dev); - return 0; - } - rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, coalesce->rx_coalesce_usecs); if (rc) @@ -380,10 +373,13 @@ static int ena_set_coalesce(struct net_device *net_dev, ena_update_rx_rings_intr_moderation(adapter); - if (!coalesce->use_adaptive_rx_coalesce) { - if (ena_com_get_adaptive_moderation_enabled(ena_dev)) - ena_com_disable_adaptive_moderation(ena_dev); - } + if (coalesce->use_adaptive_rx_coalesce && + !ena_com_get_adaptive_moderation_enabled(ena_dev)) + ena_com_enable_adaptive_moderation(ena_dev); + + if (!coalesce->use_adaptive_rx_coalesce && + ena_com_get_adaptive_moderation_enabled(ena_dev)) + ena_com_disable_adaptive_moderation(ena_dev); return 0; } @@ -640,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev) return ENA_HASH_KEY_SIZE; } +static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int i, rc; + + if (!indir) + return 0; + + rc = ena_com_indirect_table_get(ena_dev, indir); + if (rc) + return rc; + + /* Our internal representation of the indices is: even indices + * for Tx and uneven indices for Rx. We need to convert the Rx + * indices to be consecutive + */ + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) + indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]); + + return rc; +} + static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { @@ -648,11 +666,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 func; int rc; - rc = ena_com_indirect_table_get(adapter->ena_dev, indir); + rc = ena_indirection_table_get(adapter, indir); if (rc) return rc; + /* We call this function in order to check if the device + * supports getting/setting the hash function. + */ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key); + + if (rc) { + if (rc == -EOPNOTSUPP) { + key = NULL; + hfunc = NULL; + rc = 0; + } + + return rc; + } + if (rc) return rc; @@ -661,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, func = ETH_RSS_HASH_TOP; break; case ENA_ADMIN_CRC32: - func = ETH_RSS_HASH_XOR; + func = ETH_RSS_HASH_CRC32; break; default: netif_err(adapter, drv, netdev, @@ -704,10 +736,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, } switch (hfunc) { + case ETH_RSS_HASH_NO_CHANGE: + func = ena_com_get_current_hash_function(ena_dev); + break; case ETH_RSS_HASH_TOP: func = ENA_ADMIN_TOEPLITZ; break; - case ETH_RSS_HASH_XOR: + case ETH_RSS_HASH_CRC32: func = ENA_ADMIN_CRC32; break; default: @@ -809,6 +844,7 @@ static const struct ethtool_ops ena_ethtool_ops = { .get_channels = ena_get_channels, .get_tunable = ena_get_tunable, .set_tunable = ena_set_tunable, + .get_ts_info = ethtool_op_get_ts_info, }; void ena_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c487d2a7d6dd..26325f7b3c1f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget) struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring, *rx_ring; - u32 tx_work_done; - u32 rx_work_done; + int tx_work_done; + int rx_work_done = 0; int tx_budget; int napi_comp_call = 0; int ret; @@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget) } tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); - rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); + /* On netpoll the budget is zero and the handler should only clean the + * tx completions. + */ + if (likely(budget)) + rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); /* If the device is about to reset or down, avoid unmask * the interrupt and return 0 so NAPI won't reschedule @@ -3031,8 +3035,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; - keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies + - adapter->keep_alive_timeout); + keep_alive_expired = adapter->last_keep_alive_jiffies + + adapter->keep_alive_timeout; if (unlikely(time_is_before_jiffies(keep_alive_expired))) { netif_err(adapter, drv, adapter->netdev, "Keep alive watchdog timeout.\n"); @@ -3134,7 +3138,7 @@ static void ena_timer_service(struct timer_list *t) } /* Reset the timer */ - mod_timer(&adapter->timer_service, jiffies + HZ); + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); } static int ena_calc_io_queue_num(struct pci_dev *pdev, @@ -3648,13 +3652,15 @@ err_disable_device: /*****************************************************************************/ -/* ena_remove - Device Removal Routine +/* __ena_shutoff - Helper used in both PCI remove/shutdown routines * @pdev: PCI device information struct + * @shutdown: Is it a shutdown operation? If false, means it is a removal * - * ena_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. + * __ena_shutoff is a helper routine that does the real work on shutdown and + * removal paths; the difference between those paths is with regards to whether + * dettach or unregister the netdevice. */ -static void ena_remove(struct pci_dev *pdev) +static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) { struct ena_adapter *adapter = pci_get_drvdata(pdev); struct ena_com_dev *ena_dev; @@ -3673,13 +3679,17 @@ static void ena_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); - rtnl_lock(); + rtnl_lock(); /* lock released inside the below if-else block */ ena_destroy_device(adapter, true); - rtnl_unlock(); - - unregister_netdev(netdev); - - free_netdev(netdev); + if (shutdown) { + netif_device_detach(netdev); + dev_close(netdev); + rtnl_unlock(); + } else { + rtnl_unlock(); + unregister_netdev(netdev); + free_netdev(netdev); + } ena_com_rss_destroy(ena_dev); @@ -3694,6 +3704,30 @@ static void ena_remove(struct pci_dev *pdev) vfree(ena_dev); } +/* ena_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ena_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ + +static void ena_remove(struct pci_dev *pdev) +{ + __ena_shutoff(pdev, false); +} + +/* ena_shutdown - Device Shutdown Routine + * @pdev: PCI device information struct + * + * ena_shutdown is called by the PCI subsystem to alert the driver that + * a shutdown/reboot (or kexec) is happening and device must be disabled. + */ + +static void ena_shutdown(struct pci_dev *pdev) +{ + __ena_shutoff(pdev, true); +} + #ifdef CONFIG_PM /* ena_suspend - PM suspend callback * @pdev: PCI device information struct @@ -3743,6 +3777,7 @@ static struct pci_driver ena_pci_driver = { .id_table = ena_pci_tbl, .probe = ena_probe, .remove = ena_remove, + .shutdown = ena_shutdown, #ifdef CONFIG_PM .suspend = ena_suspend, .resume = ena_resume, diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 72ee51a82ec7..dc02950a96b8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -127,6 +127,8 @@ #define ENA_IO_TXQ_IDX(q) (2 * (q)) #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) +#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2) +#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2) #define ENA_MGMNT_IRQ_IDX 0 #define ENA_IO_IRQ_FIRST_IDX 1 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d8612131c55e..cc8031ae9aa3 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -2020,7 +2020,7 @@ static int xgene_enet_probe(struct platform_device *pdev) int ret; ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), - XGENE_NUM_RX_RING, XGENE_NUM_TX_RING); + XGENE_NUM_TX_RING, XGENE_NUM_RX_RING); if (!ndev) return -ENOMEM; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index aee827f07c16..01af0f028693 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -158,7 +158,7 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic, } if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && - (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci), + (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK, aq_nic->active_vlans))) { netdev_err(aq_nic->ndev, "ethtool: unknown vlan-id specified"); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 137c1de4c6ec..12949f1ec1ea 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -467,8 +467,10 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, dx_buff->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) { + ret = 0; goto exit; + } first = dx_buff; dx_buff->len_pkt = skb->len; @@ -598,10 +600,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) if (likely(frags)) { err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, ring, frags); - if (err >= 0) { - ++ring->stats.tx.packets; - ring->stats.tx.bytes += skb->len; - } } else { err = NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 76bdbe1596d6..03821b46a8cb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -243,9 +243,12 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop)) - dev_kfree_skb_any(buff->skb); + if (unlikely(buff->is_eop)) { + ++self->stats.rx.packets; + self->stats.tx.bytes += buff->skb->len; + dev_kfree_skb_any(buff->skb); + } buff->pa = 0U; buff->eop_index = 0xffffU; self->sw_head = aq_ring_next_dx(self, self->sw_head); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 52646855495e..873f9865f0d1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -22,6 +22,7 @@ #define HW_ATL_MIF_ADDR 0x0208U #define HW_ATL_MIF_VAL 0x020CU +#define HW_ATL_MPI_RPC_ADDR 0x0334U #define HW_ATL_RPC_CONTROL_ADR 0x0338U #define HW_ATL_RPC_STATE_ADR 0x033CU @@ -48,15 +49,14 @@ #define FORCE_FLASHLESS 0 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); - static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); - static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self); static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self); static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self); static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self); static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self); +static u32 aq_fw1x_rpc_get(struct aq_hw_s *self); int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) { @@ -413,6 +413,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, self, self->mbox_addr, self->mbox_addr != 0U, 1000U, 10000U); + err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self, + self->rpc_addr, + self->rpc_addr != 0U, + 1000U, 100000U); return err; } @@ -469,6 +473,12 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, self, fw.val, sw.tid == fw.tid, 1000U, 100000U); + if (err < 0) + goto err_exit; + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; if (fw.len == 0xFFFFU) { err = hw_atl_utils_fw_rpc_call(self, sw.len); @@ -950,6 +960,11 @@ static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self) return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR); } +static u32 aq_fw1x_rpc_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR); +} + const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, .deinit = hw_atl_fw1x_deinit, diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 97ab0dd25552..1a7710c399d7 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1519,8 +1519,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) int ethaddr_bytes = ETH_ALEN; memset(ppattern + offset, 0xff, magicsync); - for (j = 0; j < magicsync; j++) - set_bit(len++, (unsigned long *) pmask); + for (j = 0; j < magicsync; j++) { + pmask[len >> 3] |= BIT(len & 7); + len++; + } for (j = 0; j < B44_MAX_PATTERNS; j++) { if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) @@ -1532,7 +1534,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) for (k = 0; k< ethaddr_bytes; k++) { ppattern[offset + magicsync + (j * ETH_ALEN) + k] = macaddr[k]; - set_bit(len++, (unsigned long *) pmask); + pmask[len >> 3] |= BIT(len & 7); + len++; } } return len - 1; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index a977a459bd20..ad86a186ddc5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, return -ENOSPC; index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); - if (index > RXCHK_BRCM_TAG_MAX) + if (index >= RXCHK_BRCM_TAG_MAX) return -ENOSPC; /* Location is the classification ID, and index is the position @@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; - priv->ring_map[q + port * num_tx_queues] = ring; + priv->ring_map[qp + port * num_tx_queues] = ring; qp++; } @@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, struct net_device *slave_dev; unsigned int num_tx_queues; struct net_device *dev; - unsigned int q, port; + unsigned int q, qp, port; priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); if (priv->netdev != info->master) @@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, continue; ring->inspect = false; - priv->ring_map[q + port * num_tx_queues] = NULL; + qp = ring->switch_queue; + priv->ring_map[qp + port * num_tx_queues] = NULL; } return 0; @@ -2727,6 +2728,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) umac_reset(priv); + /* Disable the UniMAC RX/TX */ + umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); + /* We may have been suspended and never received a WOL event that * would turn off MPD detection, take care of that now */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 8b08cb18e363..3f63ffd7561b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) for (i = 0; i < E1H_FUNC_MAX / 2; i++) { u32 func_config = MF_CFG_RD(bp, - func_mf_config[BP_PORT(bp) + 2 * i]. + func_mf_config[BP_PATH(bp) + 2 * i]. config); func_num += ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 192ff8d5da32..cff64e43bdd8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp) */ static void bnx2x_parity_recover(struct bnx2x *bp) { - bool global = false; u32 error_recovered, error_unrecovered; - bool is_parity; + bool is_parity, global = false; +#ifdef CONFIG_BNX2X_SRIOV + int vf_idx; + for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + if (vf) + vf->state = VF_LOST; + } +#endif DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 0edbb0a76847..5097a44686b3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2397,15 +2397,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) /* send the ramrod on all the queues of the PF */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + int tx_idx; /* Set the appropriate Queue object */ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure Tx switching\n"); - return rc; + for (tx_idx = FIRST_TX_COS_INDEX; + tx_idx < fp->max_cos; tx_idx++) { + q_params.params.update.cid_index = tx_idx; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure Tx switching\n"); + return rc; + } } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index b6ebd92ec565..3a716c015415 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -139,6 +139,7 @@ struct bnx2x_virtf { #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ +#define VF_LOST 4 /* Recovery while VFs are loaded */ bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0752b7fa4d9c..ea0e9394f898 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, { int i; + if (vf->state == VF_LOST) { + /* Just ack the FW and return if VFs are lost + * in case of parity error. VFs are supposed to be timedout + * on waiting for PF response. + */ + DP(BNX2X_MSG_IOV, + "VF 0x%x lost, not handling the request\n", vf->abs_vfid); + + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + return; + } + /* check if tlv type is known */ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { /* Lock the per vf op mutex and note the locker's identity. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 04ec909e06df..6862594b49ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -942,6 +942,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, dma_addr -= bp->rx_dma_offset; dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + page_pool_release_page(rxr->page_pool, page); if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@ -1767,8 +1768,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); - bnxt_sched_reset(bp, rxr); + bnapi->cp_ring.rx_buf_errors++; + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { + netdev_warn(bp->dev, "RX buffer error %x\n", + rx_err); + bnxt_sched_reset(bp, rxr); + } } goto next_rx_no_len; } @@ -1991,6 +1996,9 @@ static int bnxt_async_event_process(struct bnxt *bp, case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { u32 data1 = le32_to_cpu(cmpl->event_data1); + if (!bp->fw_health) + goto async_event_process_exit; + bp->fw_reset_timestamp = jiffies; bp->fw_reset_min_dsecs = cmpl->timestamp_lo; if (!bp->fw_reset_min_dsecs) @@ -4434,8 +4442,9 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | - FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT; req.flags = cpu_to_le32(flags); @@ -6170,7 +6179,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); val = clamp_t(u16, tmr, 1, coal_cap->cmpl_aggr_dma_tmr_during_int_max); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); req->enables |= cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); } @@ -6854,12 +6863,12 @@ skip_rdma: } ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); - if (rc) + if (rc) { netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", rc); - else - ctx->flags |= BNXT_CTX_FLAG_INITED; - + return rc; + } + ctx->flags |= BNXT_CTX_FLAG_INITED; return 0; } @@ -7092,14 +7101,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) goto err_recovery_out; - if (!fw_health) { - fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL); - bp->fw_health = fw_health; - if (!fw_health) { - rc = -ENOMEM; - goto err_recovery_out; - } - } fw_health->flags = le32_to_cpu(resp->flags); if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { @@ -7386,14 +7387,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) pri2cos = &resp2->pri0_cos_queue_id; for (i = 0; i < 8; i++) { u8 queue_id = pri2cos[i]; + u8 queue_idx; + /* Per port queue IDs start from 0, 10, 20, etc */ + queue_idx = queue_id % 10; + if (queue_idx > BNXT_MAX_QUEUE) { + bp->pri2cos_valid = false; + goto qstats_done; + } for (j = 0; j < bp->max_q; j++) { if (bp->q_ids[j] == queue_id) - bp->pri2cos[i] = j; + bp->pri2cos_idx[i] = queue_idx; } } bp->pri2cos_valid = 1; } +qstats_done: mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -7873,7 +7882,7 @@ static void bnxt_setup_msix(struct bnxt *bp) int tcs, i; tcs = netdev_get_num_tc(dev); - if (tcs > 1) { + if (tcs) { int i, off, count; for (i = 0; i < tcs; i++) { @@ -8762,6 +8771,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) } if (resc_reinit || fw_reset) { if (fw_reset) { + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rc = bnxt_fw_init_one(bp); if (rc) { set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@ -9270,10 +9282,6 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); del_timer_sync(&bp->timer); - if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && - pci_is_enabled(bp->pdev)) - pci_disable_device(bp->pdev); - bnxt_free_skbs(bp); /* Save ring stats before shutdown */ @@ -9950,8 +9958,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) struct bnxt_fw_health *fw_health = bp->fw_health; u32 val; - if (!fw_health || !fw_health->enabled || - test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return; if (fw_health->tmr_counter) { @@ -10050,8 +10057,15 @@ static void bnxt_fw_reset_close(struct bnxt *bp) { __bnxt_close_nic(bp, true, false); bnxt_ulp_irq_stop(bp); + /* When firmware is fatal state, disable PCI device to prevent + * any potential bad DMAs before freeing kernel memory. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + pci_disable_device(bp->pdev); bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); + if (pci_is_enabled(bp->pdev)) + pci_disable_device(bp->pdev); bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; @@ -10412,6 +10426,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +static void bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) { + netdev_warn(bp->dev, "Failed to allocate fw_health\n"); + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + } +} + static int bnxt_fw_init_one_p1(struct bnxt *bp) { int rc; @@ -10458,6 +10489,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", rc); + bnxt_alloc_fw_health(bp); rc = bnxt_hwrm_error_recovery_qcfg(bp); if (rc) netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", @@ -10543,6 +10575,12 @@ static int bnxt_fw_init_one(struct bnxt *bp) rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); if (rc) return rc; + + /* In case fw capabilities have changed, destroy the unneeded + * reporters and create newly capable ones. + */ + bnxt_dl_fw_reporters_destroy(bp, false); + bnxt_dl_fw_reporters_create(bp); bnxt_fw_init_one_p3(bp); return 0; } @@ -10676,8 +10714,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); return; case BNXT_FW_RESET_STATE_ENABLE_DEV: - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && - bp->fw_health) { + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { u32 val; val = bnxt_fw_health_readl(bp, @@ -10862,13 +10899,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) struct bnxt *bp = netdev_priv(dev); if (netif_running(dev)) - bnxt_close_nic(bp, false, false); + bnxt_close_nic(bp, true, false); dev->mtu = new_mtu; bnxt_set_ring_params(bp); if (netif_running(dev)) - return bnxt_open_nic(bp, false, false); + return bnxt_open_nic(bp, true, false); return 0; } @@ -10966,11 +11003,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys; - if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && - keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && - keys1->ports.ports == keys2->ports.ports && - keys1->basic.ip_proto == keys2->basic.ip_proto && - keys1->basic.n_proto == keys2->basic.n_proto && + if (keys1->basic.n_proto != keys2->basic.n_proto || + keys1->basic.ip_proto != keys2->basic.ip_proto) + return false; + + if (keys1->basic.n_proto == htons(ETH_P_IP)) { + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) + return false; + } else { + if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, + sizeof(keys1->addrs.v6addrs.src)) || + memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, + sizeof(keys1->addrs.v6addrs.dst))) + return false; + } + + if (keys1->ports.ports == keys2->ports.ports && keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) @@ -11262,7 +11311,7 @@ int bnxt_get_port_parent_id(struct net_device *dev, return -EOPNOTSUPP; /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP; ppid->id_len = sizeof(bp->switch_id); @@ -11318,13 +11367,13 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) { + if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - bnxt_dl_unregister(bp); - } + bnxt_dl_fw_reporters_destroy(bp, true); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); + bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); bnxt_cancel_sp_work(bp); bp->sp_event = 0; @@ -11337,6 +11386,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; + kfree(bp->fw_health); + bp->fw_health = NULL; bnxt_cleanup_pci(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); @@ -11552,6 +11603,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bp->rx_nr_rings++; bp->cp_nr_rings++; } + if (rc) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } return rc; } @@ -11652,6 +11707,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) put_unaligned_le32(dw, &dsn[0]); pci_read_config_dword(pdev, pos + 4, &dw); put_unaligned_le32(dw, &dsn[4]); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; } @@ -11668,6 +11724,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (version_printed++ == 0) pr_info("%s", version); + /* Clear any pending DMA transactions from crash kernel + * while loading driver in capture kernel. + */ + if (is_kdump_kernel()) { + pci_clear_master(pdev); + pcie_flr(pdev); + } + max_irqs = bnxt_get_max_irq(pdev); dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); if (!dev) @@ -11763,9 +11827,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_PF(bp)) { /* Read the adapter's DSN to use as the eswitch switch_id */ - rc = bnxt_pcie_dsn_get(bp, bp->switch_id); - if (rc) - goto init_err_pci_clean; + bnxt_pcie_dsn_get(bp, bp->switch_id); } /* MTU range: 60 - FW defined max */ @@ -11812,12 +11874,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_init_tc(bp); } + bnxt_dl_register(bp); + rc = register_netdev(dev); if (rc) - goto init_err_cleanup_tc; + goto init_err_cleanup; if (BNXT_PF(bp)) - bnxt_dl_register(bp); + devlink_port_type_eth_set(&bp->dl_port, bp->dev); + bnxt_dl_fw_reporters_create(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, @@ -11826,19 +11891,20 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -init_err_cleanup_tc: +init_err_cleanup: + bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); init_err_pci_clean: bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); - bnxt_free_ctx_mem(bp); - kfree(bp->ctx); - bp->ctx = NULL; kfree(bp->fw_health); bp->fw_health = NULL; bnxt_cleanup_pci(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; init_err_free: free_netdev(dev); @@ -11862,10 +11928,10 @@ static void bnxt_shutdown(struct pci_dev *pdev) dev_close(dev); bnxt_ulp_shutdown(bp); + bnxt_clear_int_mode(bp); + pci_disable_device(pdev); if (system_state == SYSTEM_POWER_OFF) { - bnxt_clear_int_mode(bp); - pci_disable_device(pdev); pci_wake_from_d3(pdev, bp->wol); pci_set_power_state(pdev, PCI_D3hot); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d333589811a5..cda7ba31095a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -927,6 +927,7 @@ struct bnxt_cp_ring_info { dma_addr_t hw_stats_map; u32 hw_stats_ctx_id; u64 rx_l4_csum_errors; + u64 rx_buf_errors; u64 missed_irqs; struct bnxt_ring_struct cp_ring_struct; @@ -1509,6 +1510,7 @@ struct bnxt { #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_MULTI_HOST 0x100000 + #define BNXT_FLAG_DSN_VALID 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 @@ -1657,6 +1659,7 @@ struct bnxt { #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 + #define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; @@ -1685,7 +1688,7 @@ struct bnxt { u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; u16 hw_ring_stats_size; - u8 pri2cos[8]; + u8 pri2cos_idx[8]; u8 pri2cos_valid; u16 hwrm_max_req_len; @@ -1902,9 +1905,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type) case HWRM_CFA_ENCAP_RECORD_FREE: case HWRM_CFA_DECAP_FILTER_ALLOC: case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_ALLOC: - case HWRM_CFA_NTUPLE_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_CFG: case HWRM_CFA_EM_FLOW_ALLOC: case HWRM_CFA_EM_FLOW_FREE: case HWRM_CFA_EM_FLOW_CFG: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index fb6f30d0d1d0..b1511bcffb1b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct bnxt *bp = netdev_priv(dev); struct ieee_ets *my_ets = bp->ieee_ets; + int rc; ets->ets_cap = bp->max_tc; if (!my_ets) { - int rc; - if (bp->dcbx_cap & DCB_CAP_DCBX_HOST) return 0; my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL); if (!my_ets) - return 0; + return -ENOMEM; rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets); if (rc) - return 0; + goto error; rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets); if (rc) - return 0; + goto error; + + /* cache result */ + bp->ieee_ets = my_ets; } ets->cbs = my_ets->cbs; @@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); return 0; +error: + kfree(my_ets); + return rc; } static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 7151244f8c7d..2d817ba0602c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -19,11 +19,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { struct bnxt *bp = devlink_health_reporter_priv(reporter); - struct bnxt_fw_health *health = bp->fw_health; u32 val, health_status; int rc; - if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return 0; val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); @@ -103,21 +102,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { .recover = bnxt_fw_fatal_recover, }; -static void bnxt_dl_fw_reporters_create(struct bnxt *bp) +void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - health->fw_reporter = - devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, - 0, false, bp); - if (IS_ERR(health->fw_reporter)) { - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", - PTR_ERR(health->fw_reporter)); - health->fw_reporter = NULL; - } + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) + goto err_recovery; health->fw_reset_reporter = devlink_health_reporter_create(bp->dl, @@ -127,8 +120,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_reset_reporter)); health->fw_reset_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; } +err_recovery: + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + if (!health->fw_reporter) { + health->fw_reporter = + devlink_health_reporter_create(bp->dl, + &bnxt_dl_fw_reporter_ops, + 0, false, bp); + if (IS_ERR(health->fw_reporter)) { + netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", + PTR_ERR(health->fw_reporter)); + health->fw_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return; + } + } + + if (health->fw_fatal_reporter) + return; + health->fw_fatal_reporter = devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_fatal_reporter_ops, @@ -137,24 +152,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_fatal_reporter)); health->fw_fatal_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; } } -static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - if (health->fw_reporter) - devlink_health_reporter_destroy(health->fw_reporter); - - if (health->fw_reset_reporter) + if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && + health->fw_reset_reporter) { devlink_health_reporter_destroy(health->fw_reset_reporter); + health->fw_reset_reporter = NULL; + } - if (health->fw_fatal_reporter) + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all) + return; + + if (health->fw_reporter) { + devlink_health_reporter_destroy(health->fw_reporter); + health->fw_reporter = NULL; + } + + if (health->fw_fatal_reporter) { devlink_health_reporter_destroy(health->fw_fatal_reporter); + health->fw_fatal_reporter = NULL; + } } void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) @@ -162,9 +188,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) struct bnxt_fw_health *fw_health = bp->fw_health; struct bnxt_fw_reporter_ctx fw_reporter_ctx; - if (!fw_health) - return; - fw_reporter_ctx.sp_event = event; switch (event) { case BNXT_FW_RESET_NOTIFY_SP_EVENT: @@ -203,6 +226,8 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ }; +static const struct devlink_ops bnxt_vf_dl_ops; + enum bnxt_dl_param_id { BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, @@ -311,10 +336,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } else { rc = hwrm_send_message_silent(bp, msg, msg_len, HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { bnxt_copy_from_nvm_data(val, data, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); + } else { + struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; + + if (resp->cmd_err == + NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) + rc = -EOPNOTSUPP; + } } dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); if (rc == -EACCES) @@ -409,7 +441,10 @@ int bnxt_dl_register(struct bnxt *bp) return -ENOTSUPP; } - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (BNXT_PF(bp)) + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + else + dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); if (!dl) { netdev_warn(bp->dev, "devlink_alloc failed"); return -ENOMEM; @@ -428,6 +463,9 @@ int bnxt_dl_register(struct bnxt *bp) goto err_dl_free; } + if (!BNXT_PF(bp)) + return 0; + rc = devlink_params_register(dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); if (rc) { @@ -444,7 +482,6 @@ int bnxt_dl_register(struct bnxt *bp) netdev_err(bp->dev, "devlink_port_register failed"); goto err_dl_param_unreg; } - devlink_port_type_eth_set(&bp->dl_port, bp->dev); rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, ARRAY_SIZE(bnxt_dl_port_params)); @@ -455,8 +492,6 @@ int bnxt_dl_register(struct bnxt *bp) devlink_params_publish(dl); - bnxt_dl_fw_reporters_create(bp); - return 0; err_dl_port_unreg: @@ -479,12 +514,14 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; - bnxt_dl_fw_reporters_destroy(bp); - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - devlink_port_unregister(&bp->dl_port); - devlink_params_unregister(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); + if (BNXT_PF(bp)) { + devlink_port_params_unregister(&bp->dl_port, + bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); + devlink_port_unregister(&bp->dl_port); + devlink_params_unregister(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + } devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 2f4fd0a7d04b..689c47ab2155 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -57,6 +57,8 @@ struct bnxt_dl_nvm_param { }; void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); +void bnxt_dl_fw_reporters_create(struct bnxt *bp); +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 51c140476717..fb1ab58da9fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -173,6 +173,7 @@ static const char * const bnxt_ring_tpa2_stats_str[] = { static const char * const bnxt_ring_sw_stats_str[] = { "rx_l4_csum_errors", + "rx_buf_errors", "missed_irqs", }; @@ -552,6 +553,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (k = 0; k < stat_fields; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; + buf[j++] = cpr->rx_buf_errors; buf[j++] = cpr->missed_irqs; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += @@ -587,25 +589,25 @@ skip_ring_stats: if (bp->pri2cos_valid) { for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_bytes_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_pkts_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_bytes_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_pkts_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); } @@ -2003,8 +2005,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev, struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; const struct firmware *fw; - int rc, hwrm_err = 0; u32 item_len; + int rc = 0; u16 index; bnxt_hwrm_fw_set_time(bp); @@ -2048,15 +2050,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev, memcpy(kmem, fw->data, fw->size); modify.host_src_addr = cpu_to_le64(dma_handle); - hwrm_err = hwrm_send_message(bp, &modify, - sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + rc = hwrm_send_message(bp, &modify, sizeof(modify), + FLASH_PACKAGE_TIMEOUT); dma_free_coherent(&bp->pdev->dev, fw->size, kmem, dma_handle); } } release_firmware(fw); - if (rc || hwrm_err) + if (rc) goto err_exit; if ((install_type & 0xffff) == 0) @@ -2065,20 +2066,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev, install.install_type = cpu_to_le32(install_type); mutex_lock(&bp->hwrm_cmd_lock); - hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) { + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; if (resp->error_code && error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - hwrm_err = _hwrm_send_message(bp, &install, - sizeof(install), - INSTALL_PACKAGE_TIMEOUT); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); } - if (hwrm_err) + if (rc) goto flash_pkg_exit; } @@ -2090,7 +2090,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev, flash_pkg_exit: mutex_unlock(&bp->hwrm_cmd_lock); err_exit: - if (hwrm_err == -EACCES) + if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } @@ -3062,8 +3062,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, } } - if (info->dest_buf) - memcpy(info->dest_buf + off, dma_buf, len); + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + memcpy(info->dest_buf + off, dma_buf, len); + } else { + rc = -ENOBUFS; + break; + } + } if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) @@ -3117,7 +3124,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, - void *buf, u32 offset) + void *buf, u32 buf_len, u32 offset) { struct hwrm_dbg_coredump_retrieve_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; @@ -3132,8 +3139,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, data_len); - if (buf) + if (buf) { info.dest_buf = buf + offset; + info.buf_len = buf_len; + info.seg_start = offset; + } rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); if (!rc) @@ -3223,14 +3233,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) { u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; struct coredump_segment_record *seg_record = NULL; - u32 offset = 0, seg_hdr_len, seg_record_len; struct bnxt_coredump_segment_hdr seg_hdr; struct bnxt_coredump coredump = {NULL}; time64_t start_time; u16 start_utc; int rc = 0, i; + if (buf) + buf_len = *dump_len; + start_time = ktime_get_real_seconds(); start_utc = sys_tz.tz_minuteswest * 60; seg_hdr_len = sizeof(seg_hdr); @@ -3263,6 +3276,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) u32 duration = 0, seg_len = 0; unsigned long start, end; + if (buf && ((offset + seg_hdr_len) > + BNXT_COREDUMP_BUF_LEN(buf_len))) { + rc = -ENOBUFS; + goto err; + } + start = jiffies; rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); @@ -3275,9 +3294,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) /* Write segment data into the buffer */ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, - &seg_len, buf, + &seg_len, buf, buf_len, offset + seg_hdr_len); - if (rc) + if (rc && rc == -ENOBUFS) + goto err; + else if (rc) netdev_err(bp->dev, "Failed to retrieve coredump for seg = %d\n", seg_record->segment_id); @@ -3307,7 +3328,8 @@ err: rc); kfree(coredump.data); *dump_len += sizeof(struct bnxt_coredump_record); - + if (rc == -ENOBUFS) + netdev_err(bp->dev, "Firmware returned large coredump buffer"); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index b5b65b3f8534..3998f6e809a9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -31,6 +31,8 @@ struct bnxt_coredump { u16 total_segs; }; +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) + struct bnxt_hwrm_dbg_dma_info { void *dest_buf; int dest_buf_size; @@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info { u16 seq_off; u16 data_len_off; u16 segs; + u32 seg_start; + u32 buf_len; }; struct hwrm_dbg_cmn_input { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index b2c160947fc8..30816ec4fa91 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc; int max_idx, max_cp_rings; int avail_msix, idx; + int total_vecs; int rc = 0; ASSERT_RTNL(); @@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_requested = avail_msix; - if (bp->total_irqs < (idx + avail_msix)) { + hw_resc = &bp->hw_resc; + total_vecs = idx + avail_msix; + if (bp->total_irqs < total_vecs || + (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) { if (netif_running(dev)) { bnxt_close_nic(bp, true, false); rc = bnxt_open_nic(bp, true, false); @@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } if (BNXT_NEW_RM(bp)) { - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int resv_msix; resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index f9bf7d7250ab..b010b34cdaf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp) struct net_device *dev; int rc, i; + if (!(bp->flags & BNXT_FLAG_DSN_VALID)) + return -ENODEV; + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); if (!bp->vf_reps) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1de51811fcb4..8f909d57501f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, DMA_END_ADDR); /* Initialize Tx NAPI */ - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, - NAPI_POLL_WEIGHT); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); } /* Initialize a RDMA ring */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1e1b774e1953..234c13ebbc41 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt { /* Max length of transmit frame must be a multiple of 8 bytes */ #define MACB_TX_LEN_ALIGN 8 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) -#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) +/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a + * false amba_error in TX path from the DMA assuming there is not enough + * space in the SRAM (16KB) even when there is. + */ +#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) #define GEM_MTU_MIN_SIZE ETH_MIN_MTU #define MACB_NETIF_LSO NETIF_F_TSO @@ -1664,16 +1668,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb, /* Validate LSO compatibility */ - /* there is only one buffer */ - if (!skb_is_nonlinear(skb)) + /* there is only one buffer or protocol is not UDP */ + if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) return features; /* length of header */ hdrlen = skb_transport_offset(skb); - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - hdrlen += tcp_hdrlen(skb); - /* For LSO: + /* For UFO only: * When software supplies two or more payload buffers all payload buffers * apart from the last must be a multiple of 8 bytes in size. */ @@ -3688,6 +3690,10 @@ static int at91ether_open(struct net_device *dev) u32 ctl; int ret; + ret = pm_runtime_get_sync(&lp->pdev->dev); + if (ret < 0) + return ret; + /* Clear internal statistics */ ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); @@ -3748,7 +3754,7 @@ static int at91ether_close(struct net_device *dev) q->rx_buffers, q->rx_buffers_dma); q->rx_buffers = NULL; - return 0; + return pm_runtime_put(&lp->pdev->dev); } /* Transmit packet */ @@ -4027,7 +4033,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, mgmt->rate = 0; mgmt->hw.init = &init; - *tx_clk = clk_register(NULL, &mgmt->hw); + *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); if (IS_ERR(*tx_clk)) return PTR_ERR(*tx_clk); @@ -4361,7 +4367,6 @@ err_out_free_netdev: err_disable_clocks: clk_disable_unprepare(tx_clk); - clk_unregister(tx_clk); clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); clk_disable_unprepare(rx_clk); @@ -4392,11 +4397,11 @@ static int macb_remove(struct platform_device *pdev) mdiobus_free(bp->mii_bus); unregister_netdev(dev); + tasklet_kill(&bp->hresp_err_tasklet); pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { clk_disable_unprepare(bp->tx_clk); - clk_unregister(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); clk_disable_unprepare(bp->rx_clk); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index acb016834f04..76ff42ec3ae5 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -410,10 +410,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) lmac = &bgx->lmac[lmacid]; cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); - if (enable) + if (enable) { cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; - else + + /* enable TX FIFO Underflow interrupt */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S, + GMI_TXX_INT_UNDFLW); + } else { cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); + + /* Disable TX FIFO Underflow interrupt */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C, + GMI_TXX_INT_UNDFLW); + } bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); if (bgx->is_rgx) @@ -1115,7 +1124,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) phy_interface_mode(lmac->lmac_type))) return -ENODEV; - phy_start_aneg(lmac->phydev); + phy_start(lmac->phydev); return 0; } @@ -1535,6 +1544,48 @@ static int bgx_init_phy(struct bgx *bgx) return bgx_init_of_phy(bgx); } +static irqreturn_t bgx_intr_handler(int irq, void *data) +{ + struct bgx *bgx = (struct bgx *)data; + u64 status, val; + int lmac; + + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT); + if (status & GMI_TXX_INT_UNDFLW) { + pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n", + bgx->bgx_id, lmac); + val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG); + val &= ~CMR_EN; + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); + val |= CMR_EN; + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); + } + /* clear interrupts */ + bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status); + } + + return IRQ_HANDLED; +} + +static void bgx_register_intr(struct pci_dev *pdev) +{ + struct bgx *bgx = pci_get_drvdata(pdev); + int ret; + + ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET, + BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + pci_err(pdev, "Req for #%d msix vectors failed\n", + BGX_LMAC_VEC_OFFSET); + return; + } + ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL, + bgx, "BGX%d", bgx->bgx_id); + if (ret) + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); +} + static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; @@ -1550,7 +1601,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, bgx); - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); @@ -1604,6 +1655,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) bgx_init_hw(bgx); + bgx_register_intr(pdev); + /* Enable all LMACs */ for (lmac = 0; lmac < bgx->lmac_count; lmac++) { err = bgx_lmac_enable(bgx, lmac); @@ -1620,6 +1673,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_enable: bgx_vnic[bgx->bgx_id] = NULL; + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); err_release_regions: pci_release_regions(pdev); err_disable_device: @@ -1637,6 +1691,8 @@ static void bgx_remove(struct pci_dev *pdev) for (lmac = 0; lmac < bgx->lmac_count; lmac++) bgx_lmac_disable(bgx, lmac); + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); + bgx_vnic[bgx->bgx_id] = NULL; pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 25888706bdcd..cdea49392185 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -180,6 +180,15 @@ #define BGX_GMP_GMI_TXX_BURST 0x38228 #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240 #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300 +#define BGX_GMP_GMI_TXX_INT 0x38500 +#define BGX_GMP_GMI_TXX_INT_W1S 0x38508 +#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510 +#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518 +#define GMI_TXX_INT_PTP_LOST BIT_ULL(4) +#define GMI_TXX_INT_LATE_COL BIT_ULL(3) +#define GMI_TXX_INT_XSDEF BIT_ULL(2) +#define GMI_TXX_INT_XSCOL BIT_ULL(1) +#define GMI_TXX_INT_UNDFLW BIT_ULL(0) #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */ #define BGX_MSIX_VEC_0_29_CTL 0x400008 diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 58f89f6a040f..97ff8608f0ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (!is_offload(adapter)) return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; if (!(adapter->flags & FULL_INIT_DONE)) return -EIO; /* need the memory controllers */ if (copy_from_user(&t, useraddr, sizeof(t))) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1fbb640e896a..4025a683fa7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -503,6 +503,7 @@ struct link_config { enum cc_pause requested_fc; /* flow control user has requested */ enum cc_pause fc; /* actual link flow control */ + enum cc_pause advertised_fc; /* actual advertised flow control */ enum cc_fec requested_fec; /* Forward Error Correction: */ enum cc_fec fec; /* requested and actual in use */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index ae6a47dd7dc9..2ce96cc1bad4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos) static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos) { v = seq_tab_get_idx(seq->private, *pos + 1); - if (v) - ++*pos; + ++(*pos); return v; } @@ -2996,6 +2995,9 @@ static int sge_queue_entries(const struct adapter *adap) int tot_uld_entries = 0; int i; + if (!is_uld(adap)) + goto lld_only; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_TX_MAX; i++) tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i); @@ -3006,6 +3008,7 @@ static int sge_queue_entries(const struct adapter *adap) } mutex_unlock(&uld_mutex); +lld_only: return DIV_ROUND_UP(adap->sge.ethqsets, 4) + tot_uld_entries + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 76538f4cd595..f537be9cb315 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -793,8 +793,8 @@ static void get_pauseparam(struct net_device *dev, struct port_info *p = netdev_priv(dev); epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; - epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; + epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0; } static int set_pauseparam(struct net_device *dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 38024877751c..069a51847885 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3032,7 +3032,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) return ret; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - pi->xact_addr_filt = ret; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 1a407d3c1d67..e6fe2870137b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -682,8 +682,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) { v = l2t_get_idx(seq, *pos); - if (v) - ++*pos; + ++(*pos); return v; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 928bfea5457b..3a45ac8f0e01 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1324,8 +1324,9 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, int maxreclaim) { + unsigned int reclaimed, hw_cidx; struct sge_txq *q = &eq->q; - unsigned int reclaimed; + int hw_in_use; if (!q->in_use || !__netif_tx_trylock(eq->txq)) return 0; @@ -1333,12 +1334,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, /* Reclaim pending completed TX Descriptors. */ reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); + hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); + hw_in_use = q->pidx - hw_cidx; + if (hw_in_use < 0) + hw_in_use += q->size; + /* If the TX Queue is currently stopped and there's now more than half * the queue available, restart it. Otherwise bail out since the rest * of what we want do here is with the possibility of shipping any * currently buffered Coalesced TX Work Request. */ - if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) { + if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { netif_tx_wake_queue(eq->txq); eq->q.restarts++; } @@ -1469,16 +1475,7 @@ out_free: dev_kfree_skb_any(skb); * has opened up. */ eth_txq_stop(q); - - /* If we're using the SGE Doorbell Queue Timer facility, we - * don't need to ask the Firmware to send us Egress Queue CIDX - * Updates: the Hardware will do this automatically. And - * since we send the Ingress Queue CIDX Updates to the - * corresponding Ethernet Response Queue, we'll get them very - * quickly. - */ - if (!q->dbqt) - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1792,16 +1789,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, * has opened up. */ eth_txq_stop(txq); - - /* If we're using the SGE Doorbell Queue Timer facility, we - * don't need to ask the Firmware to send us Egress Queue CIDX - * Updates: the Hardware will do this automatically. And - * since we send the Ingress Queue CIDX Updates to the - * corresponding Ethernet Response Queue, we'll get them very - * quickly. - */ - if (!txq->dbqt) - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* Start filling in our Work Request. Note that we do _not_ handle @@ -2924,26 +2912,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq, } txq = &s->ethtxq[pi->first_qset + rspq->idx]; - - /* We've got the Hardware Consumer Index Update in the Egress Update - * message. If we're using the SGE Doorbell Queue Timer mechanism, - * these Egress Update messages will be our sole CIDX Updates we get - * since we don't want to chew up PCIe bandwidth for both Ingress - * Messages and Status Page writes. However, The code which manages - * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value - * stored in the Status Page at the end of the TX Queue. It's easiest - * to simply copy the CIDX Update value from the Egress Update message - * to the Status Page. Also note that no Endian issues need to be - * considered here since both are Big Endian and we're just copying - * bytes consistently ... - */ - if (txq->dbqt) { - struct cpl_sge_egr_update *egr; - - egr = (struct cpl_sge_egr_update *)rsp; - WRITE_ONCE(txq->q.stat->cidx, egr->cidx); - } - t4_sge_eth_txq_egress_update(adapter, txq, -1); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f2a7824da42b..3f6813daf3c1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) if (cc_pause & PAUSE_TX) fw_pause |= FW_PORT_CAP32_802_3_PAUSE; else - fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; + fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR | + FW_PORT_CAP32_802_3_PAUSE; } else if (cc_pause & PAUSE_TX) { fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; } @@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) { const struct fw_port_cmd *cmd = (const void *)rpl; - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); - struct adapter *adapter = pi->adapter; - struct link_config *lc = &pi->link_cfg; - int link_ok, linkdnrc; - enum fw_port_type port_type; - enum fw_port_module_type mod_type; - unsigned int speed, fc, fec; fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + struct link_config *lc = &pi->link_cfg; + struct adapter *adapter = pi->adapter; + unsigned int speed, fc, fec, adv_fc; + enum fw_port_module_type mod_type; + int action, link_ok, linkdnrc; + enum fw_port_type port_type; /* Extract the various fields from the Port Information message. */ + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); switch (action) { case FW_PORT_ACTION_GET_PORT_INFO: { u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); @@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } fec = fwcap_to_cc_fec(acaps); + adv_fc = fwcap_to_cc_pause(acaps); fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); @@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc || fec != lc->fec) { /* something changed */ + fc != lc->fc || adv_fc != lc->advertised_fc || + fec != lc->fec) { + /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; dev_warn_ratelimited(adapter->pdev_dev, @@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } lc->link_ok = link_ok; lc->speed = speed; + lc->advertised_fc = adv_fc; lc->fc = fc; lc->fec = fec; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index f6fc0875d5b0..f4d41f968afa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev, struct port_info *pi = netdev_priv(dev); pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; - pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; + pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0; + pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0; } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index ccca67cf4487..57cfd10a99ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -135,6 +135,7 @@ struct link_config { enum cc_pause requested_fc; /* flow control user has requested */ enum cc_pause fc; /* actual link flow control */ + enum cc_pause advertised_fc; /* actual advertised flow control */ enum cc_fec auto_fec; /* Forward Error Correction: */ enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 8a389d617a23..9d49ff211cc1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) static void t4vf_handle_get_port_info(struct port_info *pi, const struct fw_port_cmd *cmd) { - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); - struct adapter *adapter = pi->adapter; - struct link_config *lc = &pi->link_cfg; - int link_ok, linkdnrc; - enum fw_port_type port_type; - enum fw_port_module_type mod_type; - unsigned int speed, fc, fec; fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + struct link_config *lc = &pi->link_cfg; + struct adapter *adapter = pi->adapter; + unsigned int speed, fc, fec, adv_fc; + enum fw_port_module_type mod_type; + int action, link_ok, linkdnrc; + enum fw_port_type port_type; /* Extract the various fields from the Port Information message. */ + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); switch (action) { case FW_PORT_ACTION_GET_PORT_INFO: { u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); @@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } fec = fwcap_to_cc_fec(acaps); + adv_fc = fwcap_to_cc_pause(acaps); fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); @@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc || fec != lc->fec) { /* something changed */ + fc != lc->fc || adv_fc != lc->advertised_fc || + fec != lc->fec) { + /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; dev_warn_ratelimited(adapter->pdev_dev, @@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } lc->link_ok = link_ok; lc->speed = speed; + lc->advertised_fc = adv_fc; lc->fc = fc; lc->fec = fec; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index acb2856936d2..6e2ab10ad2e6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev) napi_disable(&enic->napi[i]); netif_carrier_off(netdev); - netif_tx_disable(netdev); if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) for (i = 0; i < enic->wq_count; i++) napi_disable(&enic->napi[enic_cq_wq(enic, i)]); + netif_tx_disable(netdev); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_del_station_addr(enic); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index a8f4c69252ff..2814b96751b4 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -576,6 +576,8 @@ static int gmac_setup_txqs(struct net_device *netdev) if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { dev_warn(geth->dev, "TX queue base is not aligned\n"); + dma_free_coherent(geth->dev, len * sizeof(*desc_ring), + desc_ring, port->txq_dma_base); kfree(skb_tab); return -ENOMEM; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index cce90b5925d9..70060c51854f 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1405,6 +1405,8 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) ether_addr_copy(pdata->dev_addr, mac_addr); + else if (PTR_ERR(mac_addr) == -EPROBE_DEFER) + return ERR_CAST(mac_addr); return pdata; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 0efdbd1a4a6f..32d470d4122a 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -2214,15 +2214,16 @@ static int __init dmfe_init_module(void) if (cr6set) dmfe_cr6_user_set = cr6set; - switch(mode) { - case DMFE_10MHF: + switch (mode) { + case DMFE_10MHF: case DMFE_100MHF: case DMFE_10MFD: case DMFE_100MFD: case DMFE_1M_HPNA: dmfe_media_mode = mode; break; - default:dmfe_media_mode = DMFE_AUTO; + default: + dmfe_media_mode = DMFE_AUTO; break; } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index b1f30b194300..117ffe08800d 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -1809,8 +1809,8 @@ static int __init uli526x_init_module(void) if (cr6set) uli526x_cr6_user_set = cr6set; - switch (mode) { - case ULI526X_10MHF: + switch (mode) { + case ULI526X_10MHF: case ULI526X_100MHF: case ULI526X_10MFD: case ULI526X_100MFD: diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index b4b82b9c5cd6..00c4beb760c3 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1600,13 +1600,15 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) * Skb freeing is not handled here. * * This function may be called on error paths in the Tx function, so guard - * against cases when not all fd relevant fields were filled in. + * against cases when not all fd relevant fields were filled in. To avoid + * reading the invalid transmission timestamp for the error paths set ts to + * false. * * Return the skb backpointer, since for S/G frames the buffer containing it * gets freed here. */ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, - const struct qm_fd *fd) + const struct qm_fd *fd, bool ts) { const enum dma_data_direction dma_dir = DMA_TO_DEVICE; struct device *dev = priv->net_dev->dev.parent; @@ -1620,18 +1622,6 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, skbh = (struct sk_buff **)phys_to_virt(addr); skb = *skbh; - if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - - if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, - &ns)) { - shhwtstamps.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(skb, &shhwtstamps); - } else { - dev_warn(dev, "fman_port_get_tstamp failed!\n"); - } - } - if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; dma_unmap_single(dev, addr, @@ -1654,14 +1644,29 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, dma_unmap_page(dev, qm_sg_addr(&sgt[i]), qm_sg_entry_get_len(&sgt[i]), dma_dir); } - - /* Free the page frag that we allocated on Tx */ - skb_free_frag(phys_to_virt(addr)); } else { dma_unmap_single(dev, addr, skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); } + /* DMA unmapping is required before accessing the HW provided info */ + if (ts && priv->tx_tstamp && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, + &ns)) { + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } else { + dev_warn(dev, "fman_port_get_tstamp failed!\n"); + } + } + + if (qm_fd_get_format(fd) == qm_fd_sg) + /* Free the page frag that we allocated on Tx */ + skb_free_frag(phys_to_virt(addr)); + return skb; } @@ -2114,7 +2119,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) return NETDEV_TX_OK; - dpaa_cleanup_tx_fd(priv, &fd); + dpaa_cleanup_tx_fd(priv, &fd, false); skb_to_fd_failed: enomem: percpu_stats->tx_errors++; @@ -2160,7 +2165,7 @@ static void dpaa_tx_error(struct net_device *net_dev, percpu_priv->stats.tx_errors++; - skb = dpaa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd, false); dev_kfree_skb(skb); } @@ -2200,7 +2205,7 @@ static void dpaa_tx_conf(struct net_device *net_dev, percpu_priv->tx_confirm++; - skb = dpaa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd, true); consume_skb(skb); } @@ -2430,7 +2435,7 @@ static void egress_ern(struct qman_portal *portal, percpu_priv->stats.tx_fifo_errors++; count_ern(percpu_priv, msg); - skb = dpaa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd, false); dev_kfree_skb_any(skb); } @@ -2478,6 +2483,9 @@ static void dpaa_adjust_link(struct net_device *net_dev) mac_dev->adjust_link(mac_dev); } +/* The Aquantia PHYs are capable of performing rate adaptation */ +#define PHY_VEND_AQUANTIA 0x03a1b400 + static int dpaa_phy_init(struct net_device *net_dev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; @@ -2496,9 +2504,14 @@ static int dpaa_phy_init(struct net_device *net_dev) return -ENODEV; } - /* Remove any features not supported by the controller */ - ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support); - linkmode_and(phy_dev->supported, phy_dev->supported, mask); + /* Unless the PHY is capable of rate adaptation */ + if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || + ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) { + /* remove any features not supported by the controller */ + ethtool_convert_legacy_u32_to_link_mode(mask, + mac_dev->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, mask); + } phy_support_asym_pause(phy_dev); @@ -2757,9 +2770,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); - return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, - DPAA_FD_DATA_ALIGNMENT) : - headroom; + return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); } static int dpaa_eth_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 0aa1c34019bb..dc9a6c36cac0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -216,7 +216,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, if (err == -EINVAL) /* Older firmware versions don't support all pages */ memset(&dpni_stats, 0, sizeof(dpni_stats)); - else + else if (err) netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); num_cnt = dpni_stats_page_size[j] / sizeof(u64); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index a9503aea527f..6437fe6b9abf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -160,10 +160,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) irq = mc_dev->irqs[0]; ptp_qoriq->irq = irq->msi_desc->irq; - err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL, - dpaa2_ptp_irq_handler_thread, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(dev), ptp_qoriq); + err = request_threaded_irq(ptp_qoriq->irq, NULL, + dpaa2_ptp_irq_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(dev), ptp_qoriq); if (err < 0) { dev_err(dev, "devm_request_threaded_irq(): %d\n", err); goto err_free_mc_irq; @@ -173,18 +173,20 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) DPRTC_IRQ_INDEX, 1); if (err < 0) { dev_err(dev, "dprtc_set_irq_enable(): %d\n", err); - goto err_free_mc_irq; + goto err_free_threaded_irq; } err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps); if (err) - goto err_free_mc_irq; + goto err_free_threaded_irq; dpaa2_phc_index = ptp_qoriq->phc_index; dev_set_drvdata(dev, ptp_qoriq); return 0; +err_free_threaded_irq: + free_irq(ptp_qoriq->irq, ptp_qoriq); err_free_mc_irq: fsl_mc_free_irqs(mc_dev); err_unmap: diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4bb30761abfc..3fc8a66e4f41 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + struct device *dev = &fep->pdev->dev; u32 *buf = (u32 *)regbuf; u32 i, off; + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return; regs->version = fec_enet_register_version; @@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev, off >>= 2; buf[off] = readl(&theregs[off]); } + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } static int fec_enet_get_ts_info(struct net_device *ndev, @@ -2520,15 +2529,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) return -EINVAL; } - cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); if (cycle > 0xFFFF) { dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); return -EINVAL; } - cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); if (cycle > 0xFFFF) { - dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); + dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index 0139cb9042ec..34150182cc35 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -8,3 +8,31 @@ config FSL_FMAN help Freescale Data-Path Acceleration Architecture Frame Manager (FMan) support + +config DPAA_ERRATUM_A050385 + bool + depends on ARM64 && FSL_DPAA + default y + help + DPAA FMan erratum A050385 software workaround implementation: + align buffers, data start, SG fragment length to avoid FMan DMA + splits. + FMAN DMA read or writes under heavy traffic load may cause FMAN + internal resource leak thus stopping further packet processing. + The FMAN internal queue can overflow when FMAN splits single + read or write transactions into multiple smaller transactions + such that more than 17 AXI transactions are in flight from FMAN + to interconnect. When the FMAN internal queue overflows, it can + stall further packet processing. The issue can occur with any + one of the following three conditions: + 1. FMAN AXI transaction crosses 4K address boundary (Errata + A010022) + 2. FMAN DMA address for an AXI transaction is not 16 byte + aligned, i.e. the last 4 bits of an address are non-zero + 3. Scatter Gather (SG) frames have more than one SG buffer in + the SG list and any one of the buffers, except the last + buffer in the SG list has data size that is not a multiple + of 16 bytes, i.e., other than 16, 32, 48, 64, etc. + With any one of the above three conditions present, there is + likelihood of stalled FMAN packet processing, especially under + stress with multiple ports injecting line-rate traffic. diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 210749bf1eac..4c2fa13a7dd7 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1,5 +1,6 @@ /* * Copyright 2008-2015 Freescale Semiconductor Inc. + * Copyright 2020 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -566,6 +567,10 @@ struct fman_cfg { u32 qmi_def_tnums_thresh; }; +#ifdef CONFIG_DPAA_ERRATUM_A050385 +static bool fman_has_err_a050385; +#endif + static irqreturn_t fman_exceptions(struct fman *fman, enum fman_exceptions exception) { @@ -2514,6 +2519,14 @@ struct fman *fman_bind(struct device *fm_dev) } EXPORT_SYMBOL(fman_bind); +#ifdef CONFIG_DPAA_ERRATUM_A050385 +bool fman_has_errata_a050385(void) +{ + return fman_has_err_a050385; +} +EXPORT_SYMBOL(fman_has_errata_a050385); +#endif + static irqreturn_t fman_err_irq(int irq, void *handle) { struct fman *fman = (struct fman *)handle; @@ -2841,6 +2854,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_free; } +#ifdef CONFIG_DPAA_ERRATUM_A050385 + fman_has_err_a050385 = + of_property_read_bool(fm_node, "fsl,erratum-a050385"); +#endif + return fman; fman_node_put: diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index 935c317fa696..f2ede1360f03 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -1,5 +1,6 @@ /* * Copyright 2008-2015 Freescale Semiconductor Inc. + * Copyright 2020 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -398,6 +399,10 @@ u16 fman_get_max_frm(void); int fman_get_rx_extra_headroom(void); +#ifdef CONFIG_DPAA_ERRATUM_A050385 +bool fman_has_errata_a050385(void); +#endif + struct fman *fman_bind(struct device *dev); #endif /* __FM_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 41c6fa200e74..e1901874c19f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -110,7 +110,7 @@ do { \ /* Interface Mode Register (IF_MODE) */ #define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */ -#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */ +#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */ #define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */ #define IF_MODE_RGMII 0x00000004 #define IF_MODE_RGMII_AUTO 0x00008000 @@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, tmp = 0; switch (phy_if) { case PHY_INTERFACE_MODE_XGMII: - tmp |= IF_MODE_XGMII; + tmp |= IF_MODE_10G; break; default: tmp |= IF_MODE_GMII; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 51ad86417cb1..2580bcd85025 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2204,13 +2204,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) skb_dirtytx = tx_queue->skb_dirtytx; while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { + bool do_tstamp; + + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en; frags = skb_shinfo(skb)->nr_frags; /* When time stamping, one additional TxBD must be freed. * Also, we need to dma_unmap_single() the TxPAL. */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + if (unlikely(do_tstamp)) nr_txbds = frags + 2; else nr_txbds = frags + 1; @@ -2224,7 +2228,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) (lstatus & BD_LENGTH_MASK)) break; - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (unlikely(do_tstamp)) { next = next_txbd(bdp, base, tx_ring_size); buflen = be16_to_cpu(next->length) + GMAC_FCB_LEN + GMAC_TXPAL_LEN; @@ -2234,7 +2238,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), buflen, DMA_TO_DEVICE); - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (unlikely(do_tstamp)) { struct skb_shared_hwtstamps shhwtstamps; u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL); diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index e03b30c60dcf..c82c85ef5fb3 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -49,6 +49,7 @@ struct tgec_mdio_controller { struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; + bool has_a011043; }; static u32 xgmac_read32(void __iomem *regs, @@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; /* Return all Fs if nothing was there */ - if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { + if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && + !priv->has_a011043) { dev_err(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); @@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev) priv->is_little_endian = of_property_read_bool(pdev->dev.of_node, "little-endian"); + priv->has_a011043 = of_property_read_bool(pdev->dev.of_node, + "fsl,erratum-a011043"); + ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "cannot register MDIO bus\n"); diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index aca95f64bde8..9b7a8db9860f 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, } qpl->id = id; - qpl->num_entries = pages; + qpl->num_entries = 0; qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); /* caller handles clean up */ if (!qpl->pages) @@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, /* caller handles clean up */ if (err) return -ENOMEM; + qpl->num_entries++; } priv->num_registered_pages += pages; diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index edec61dfc868..9f52e72ff641 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, rx->cnt = cnt; rx->fill_cnt += work_done; - /* restock desc ring slots */ - dma_wmb(); /* Ensure descs are visible before ringing doorbell */ gve_rx_write_doorbell(priv, rx); return gve_rx_work_pending(rx); } diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index f4889431f9b7..d0244feb0301 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) * may have added descriptors without ringing the doorbell. */ - /* Ensure tx descs from a prior gve_tx are visible before - * ringing doorbell. - */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } @@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) return NETDEV_TX_OK; - /* Ensure tx descs are visible before ringing doorbell */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 4606a7e4a6d1..2ffe035e96d6 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -543,9 +543,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); hip04_set_xmit_desc(priv, phys); - priv->tx_head = TX_NEXT(tx_head); count++; netdev_sent_queue(ndev, skb->len); + priv->tx_head = TX_NEXT(tx_head); stats->tx_bytes += skb->len; stats->tx_packets++; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 14ab20491fd0..eb69e5c81a4d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { - netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } @@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) container_of(napi, struct hns_nic_ring_data, napi); struct hnae_ring *ring = ring_data->ring; -try_again: clean_complete += ring_data->poll_one( ring_data, budget - clean_complete, ring_data->ex_process); @@ -1066,7 +1064,7 @@ try_again: napi_complete(napi); ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); } else { - goto try_again; + return budget; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 616cad0faa21..403e0f089f2a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -54,6 +54,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); #define HNS3_INNER_VLAN_TAG 1 #define HNS3_OUTER_VLAN_TAG 2 +#define HNS3_MIN_TX_LEN 33U + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -1329,6 +1331,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) int ret; int i; + /* Hardware can only handle short frames above 32 bytes */ + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) + return NETDEV_TX_OK; + /* Prefetch the data used later */ prefetch(skb->data); @@ -1590,7 +1596,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; + kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; } static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -1692,6 +1698,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) time_after(jiffies, (trans_start + ndev->watchdog_timeo))) { timeout_queue = i; + netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", + q->state, + jiffies_to_msecs(jiffies - trans_start)); break; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c052bb33b3d3..d4652dea4569 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2417,10 +2417,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) { + struct hclge_mac *mac = &hdev->hw.mac; int ret; duplex = hclge_check_speed_dup(duplex, speed); - if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + if (!mac->support_autoneg && mac->speed == speed && + mac->duplex == duplex) return 0; ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); @@ -6003,6 +6005,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, struct hclge_fd_rule_tuples *tuples) { +#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 +#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 + tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); tuples->ip_proto = fkeys->basic.ip_proto; tuples->dst_port = be16_to_cpu(fkeys->ports.dst); @@ -6011,12 +6016,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); } else { - memcpy(tuples->src_ip, - fkeys->addrs.v6addrs.src.in6_u.u6_addr32, - sizeof(tuples->src_ip)); - memcpy(tuples->dst_ip, - fkeys->addrs.v6addrs.dst.in6_u.u6_addr32, - sizeof(tuples->dst_ip)); + int i; + + for (i = 0; i < IPV6_SIZE; i++) { + tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); + tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); + } } } @@ -9437,12 +9442,22 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to reinit manager table, ret = %d\n", ret); + return ret; + } + ret = hclge_init_fd_config(hdev); if (ret) { dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); return ret; } + /* Log and clear the hw errors those already occurred */ + hclge_handle_all_hns_hw_errors(ae_dev); + /* Re-enable the hw error interrupts because * the interrupts get disabled on global reset. */ diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index fe88ab88cacc..14b9e1eacd11 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,7 +1,13 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_HINIC) += hinic.o -hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ - hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \ - hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \ - hinic_common.o hinic_ethtool.o +hinic-y := hinic_nic_cfg.o hinic_nic_io.o hinic_nic_dbg.o \ + hinic_hwif.o hinic_msix_attr.o hinic_eqs.o \ + hinic_mbox.o hinic_api_cmd.o hinic_mgmt.o \ + hinic_wq.o hinic_cmdq.o hinic_hwdev.o hinic_cfg.o \ + ossl_knl_linux.o \ + hinic_sml_counter.o hinic_sml_lt.o \ + hinic_main.o hinic_lld.o hinic_qp.o \ + hinic_rx.o hinic_tx.o hinic_dbgtool_knl.o\ + hinic_nictool.o hinic_sriov.o hinic_ethtool.o\ + hinic_dcb.o hinic_multi_host_mgmt.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c new file mode 100644 index 000000000000..3328b0714b4b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c @@ -0,0 +1,1180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_api_cmd.h" + +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U + +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 + +#define API_CMD_CELL_WB_ADDR_SIZE 8 + +#define API_CHAIN_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 10000 +#define API_CMD_STATUS_TIMEOUT 100000 + +#define API_CMD_BUF_SIZE 2048ULL + +#define API_CMD_NODE_ALIGN_SIZE 512ULL +#define API_PAYLOAD_ALIGN_SIZE 64ULL + +#define API_CHAIN_RESP_ALIGNMENT 64ULL + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) + +#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) (((u8)id) << 16) + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) + +enum api_cmd_data_format { + SGL_DATA = 1, +}; + +enum api_cmd_type { + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, +}; + +enum api_cmd_bypass { + NOT_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, +}; + +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 checksum = 0; + u8 *val = data; + + for (idx = 0; idx < 7; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hinic_api_cmd_chain *chain) +{ + enum hinic_api_cmd_chain_type chain_type = chain->chain_type; + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + u32 prod_idx = chain->prod_idx; + + hinic_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); + + return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); +} + +static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); + + sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR), + HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR), + HINIC_API_CMD_STATUS_GET(val, FSM)); + + sdk_err(dev, "Chain hw current ci: 0x%x\n", + HINIC_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); + sdk_err(dev, "Chain hw current pi: 0x%x\n", val); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + * Return: 0 - success, negative - failure + */ +static int chain_busy(struct hinic_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + struct hinic_api_cmd_cell_ctxt *ctxt; + u64 resp_header; + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC_API_CMD_MULTI_READ: + case HINIC_API_CMD_POLL_READ: + resp_header = be64_to_cpu(ctxt->resp->header); + if (ctxt->status && + !HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %d, resp_header: 0x%08x%08x\n", + ctxt->status, chain->prod_idx, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %d, prod_idx = %d\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); + return -EINVAL; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of specific cell type + * @type: chain type + * @cmd_size: the command size + * Return: cell_data_size + */ +static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type, u16 cmd_size) +{ + u16 cell_data_size = 0; + + switch (type) { + case HINIC_API_CMD_POLL_READ: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_WB_ADDR_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + */ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) +{ + u64 ctrl; + u8 chksum; + + ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + HINIC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + HINIC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @cell: the cell of the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + */ +static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, + enum hinic_node_id dest, + const void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + u32 priv; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC_API_CMD_POLL_READ: + priv = READ_API_CMD_PRIV_DATA(chain->chain_type, + cell_ctxt->saved_prod_idx); + cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | + HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HINIC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC_API_CMD_POLL_WRITE: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HINIC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HINIC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + HINIC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + HINIC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + default: + sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", + chain->chain_type); + return; + } + + cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | + HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current producer cell + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + */ +static void prepare_cell(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell *curr_node; + u16 cell_size; + + curr_node = chain->curr_node; + + cell_size = get_cell_data_size(chain->chain_type, cmd_size); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +static void issue_api_cmd(struct hinic_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + +/** + * api_cmd_status_update - update the status of the chain + * @chain: chain to update + */ +static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) +{ + struct hinic_api_cmd_status *wb_status; + enum hinic_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; + + wb_status = chain->wb_status; + + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (HINIC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_status->header); + chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HINIC_API_CMD_MAX) + return; + + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = HINIC_API_CMD_STATUS_GET(buf_desc, CONS_IDX); +} + +/** + * wait_for_status_poll - wait for write to mgmt command to complete + * @chain: the chain of the command + * Return: 0 - success, negative - failure + */ +static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) +{ + int err = -ETIMEDOUT; + u32 cnt = 0; + + while (cnt < API_CMD_STATUS_TIMEOUT && + chain->hwdev->chip_present_flag) { + api_cmd_status_update(chain); + + /* SYNC API CMD cmd should start after prev cmd finished */ + if (chain->cons_idx == chain->prod_idx) { + err = 0; + break; + } + + usleep_range(50, 100); + cnt++; + } + + return err; +} + +static void copy_resp_data(struct hinic_api_cmd_cell_ctxt *ctxt, void *ack, + u16 ack_size) +{ + struct hinic_api_cmd_resp_fmt *resp = ctxt->resp; + + memcpy(ack, &resp->resp_data, ack_size); + ctxt->status = 0; +} + +/** + * prepare_cell - polling for respense data of the read api-command + * @ctxt: pointer to api cmd cell ctxt + * + * Return: 0 - success, negative - failure + */ +static int wait_for_resp_polling(struct hinic_api_cmd_cell_ctxt *ctxt) +{ + u64 resp_header; + int ret = -ETIMEDOUT; + u32 cnt = 0; + + while (cnt < POLLING_COMPLETION_TIMEOUT_DEFAULT) { + resp_header = be64_to_cpu(ctxt->resp->header); + + rmb(); /* read the latest header */ + + if (HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + ret = 0; + break; + } + usleep_range(100, 1000); + cnt++; + } + + if (ret) + pr_err("Wait for api chain response timeout\n"); + + return ret; +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * @ctxt: pointer to api cmd cell ctxt + * @ack: pointer to ack message + * @ack_size: the size of ack message + * Return: 0 - success, negative - failure + */ +static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) +{ + void *dev = chain->hwdev->dev_hdl; + int err = 0; + + switch (chain->chain_type) { + case HINIC_API_CMD_POLL_READ: + err = wait_for_resp_polling(ctxt); + if (!err) + copy_resp_data(ctxt, ack, ack_size); + break; + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err) { + sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", + chain->chain_type); + break; + } + break; + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* No need to wait */ + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + if (err) + dump_api_chain_reg(chain); + + return err; +} + +static inline void update_api_cmd_ctxt(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; + if (ctxt->resp) { + ctxt->resp->header = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + * @ack: the buffer for ack + * @ack_size: the size of ack + * Return: 0 - success, negative - failure + */ +static int api_cmd(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct hinic_api_cmd_cell_ctxt *ctxt; + + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock(&chain->async_lock); + else + down(&chain->sem); + ctxt = &chain->cell_ctxt[chain->prod_idx]; + if (chain_busy(chain)) { + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + return -EBUSY; + } + update_api_cmd_ctxt(chain, ctxt); + + prepare_cell(chain, dest, cmd, cmd_size); + + cmd_chain_prod_idx_inc(chain); + + wmb(); /* issue the command */ + + issue_api_cmd(chain); + + /* incremented prod idx, update ctxt */ + + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); +} + +/** + * hinic_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + */ +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, void *cmd, u16 size) +{ + /* Verify the chain type */ + return api_cmd(chain, dest, cmd, size, NULL, 0); +} + +int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 size, void *ack, u16 ack_size) +{ + return api_cmd(chain, dest, cmd, size, ack, ack_size); +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @cmd_chain: the API CMD specific chain to restart + */ +static int api_cmd_hw_restart(struct hinic_api_cmd_chain *cmd_chain) +{ + struct hinic_hwif *hwif = cmd_chain->hwdev->hwif; + u32 reg_addr, val; + int err; + u32 cnt = 0; + + /* Read Modify Write */ + reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); + val = hinic_hwif_read_reg(hwif, reg_addr); + + val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hinic_hwif_write_reg(hwif, reg_addr, val); + + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { + val = hinic_hwif_read_reg(hwif, reg_addr); + + if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { + err = 0; + break; + } + + usleep_range(900, 1000); + cnt++; + } + + return err; +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + */ +static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 size; + + /* Read Modify Write */ + reg_addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); + + ctrl = hinic_hwif_read_reg(hwif, reg_addr); + + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + HINIC_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); + + hinic_hwif_write_reg(hwif, reg_addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set status address for + */ +static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set the number of cells for + */ +static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + */ +static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + */ +static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + u32 hw_cons_idx; + u32 cnt = 0; + int err; + + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { + val = hinic_hwif_read_reg(hwif, addr); + hw_cons_idx = HINIC_API_CMD_STATUS_GET(val, CONS_IDX); + + /* wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) { + err = 0; + break; + } + + usleep_range(900, 1000); + cnt++; + } + + return err; +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + */ +static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, ctrl; + + addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = hinic_hwif_read_reg(hwif, addr); + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * Return: 0 - success, negative - failure + */ +static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + if (api_cmd_hw_restart(chain)) { + sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); + return -EBUSY; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + + return wait_for_ready_chain(chain); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * Return: 0 - success, negative - failure + */ +static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, u32 cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + void *dev = chain->hwdev->dev_hdl; + void *buf_vaddr; + u64 buf_paddr; + int err = 0; + + buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + + chain->buf_size_align * cell_idx); + buf_paddr = chain->buf_paddr_base + + chain->buf_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = buf_vaddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HINIC_API_CMD_POLL_READ: + cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + return err; +} + +static void alloc_resp_buf(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, u32 cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * cell_idx); + resp_paddr = chain->rsp_paddr_base + + chain->rsp_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->resp = resp_vaddr; + cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); +} + +static int hinic_alloc_api_cmd_cell_buf(struct hinic_api_cmd_chain *chain, + u32 cell_idx, + struct hinic_api_cmd_cell *node) +{ + void *dev = chain->hwdev->dev_hdl; + int err; + + /* For read chain, we should allocate buffer for the response data */ + if (chain->chain_type == HINIC_API_CMD_MULTI_READ || + chain->chain_type == HINIC_API_CMD_POLL_READ) + alloc_resp_buf(chain, node, cell_idx); + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_POLL_READ: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + sdk_err(dev, "Failed to allocate cmd buffer\n"); + goto alloc_cmd_buf_err; + } + break; + /* For api command write and api command read, the data section + * is directly inserted in the cell, so no need to allocate. + */ + case HINIC_API_CMD_MULTI_READ: + chain->cell_ctxt[cell_idx].api_cmd_vaddr = + &node->read.hw_cmd_paddr; + break; + default: + sdk_err(dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto alloc_cmd_buf_err; + } + + return 0; + +alloc_cmd_buf_err: + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, u32 cell_idx, + struct hinic_api_cmd_cell *pre_node, + struct hinic_api_cmd_cell **node_vaddr) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_api_cmd_cell *node; + void *cell_vaddr; + u64 cell_paddr; + int err; + + cell_vaddr = (void *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * cell_idx); + cell_paddr = chain->cell_paddr_base + + chain->cell_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = cell_vaddr; + node = cell_ctxt->cell_vaddr; + + if (!pre_node) { + chain->head_node = cell_vaddr; + chain->head_cell_paddr = cell_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); + } + + /* Driver software should make sure that there is an empty API + * command cell at the end the chain + */ + node->next_cell_paddr = 0; + + err = hinic_alloc_api_cmd_cell_buf(chain, cell_idx, node); + if (err) + return err; + + *node_vaddr = node; + + return 0; +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; + void *dev = chain->hwdev->dev_hdl; + u32 cell_idx; + int err; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + sdk_err(dev, "Failed to create API CMD cell\n"); + return err; + } + + pre_node = node; + } + + if (!node) + return -EFAULT; + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + */ +static int api_chain_init(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_chain_attr *attr) +{ + void *dev = chain->hwdev->dev_hdl; + size_t cell_ctxt_size; + size_t cells_buf_size; + int err; + + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_init(&chain->async_lock); + else + sema_init(&chain->sem, 1); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + if (!cell_ctxt_size) { + sdk_err(dev, "Api chain cell size cannot be zero\n"); + err = -EINVAL; + goto alloc_cell_ctxt_err; + } + + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); + err = -ENOMEM; + goto alloc_cell_ctxt_err; + } + + chain->wb_status = dma_zalloc_coherent(dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); + if (!chain->wb_status) { + sdk_err(dev, "Failed to allocate DMA wb status\n"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + chain->cell_size_align = ALIGN((u64)chain->cell_size, + API_CMD_NODE_ALIGN_SIZE); + chain->rsp_size_align = ALIGN((u64)chain->rsp_size, + API_CHAIN_RESP_ALIGNMENT); + chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); + + cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + + chain->buf_size_align) * chain->num_cells; + + err = hinic_dma_zalloc_coherent_align(dev, cells_buf_size, + API_CMD_NODE_ALIGN_SIZE, + GFP_KERNEL, + &chain->cells_addr); + if (err) { + sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); + goto alloc_cells_buf_err; + } + + chain->cell_vaddr_base = chain->cells_addr.align_vaddr; + chain->cell_paddr_base = chain->cells_addr.align_paddr; + + chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * chain->num_cells); + chain->rsp_paddr_base = chain->cell_paddr_base + + chain->cell_size_align * chain->num_cells; + + chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * chain->num_cells); + chain->buf_paddr_base = chain->rsp_paddr_base + + chain->rsp_size_align * chain->num_cells; + + return 0; + +alloc_cells_buf_err: + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + +alloc_cell_ctxt_err: + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_deinit(&chain->async_lock); + else + sema_deinit(&chain->sem); + return err; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + */ +static void api_chain_free(struct hinic_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + + hinic_dma_free_coherent_align(dev, &chain->cells_addr); + + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); + + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_deinit(&chain->async_lock); + else + sema_deinit(&chain->sem); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain, + struct hinic_api_cmd_chain_attr *attr) +{ + struct hinic_hwdev *hwdev = attr->hwdev; + struct hinic_api_cmd_chain *chain; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); + return -EINVAL; + } + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + chain->hwdev = hwdev; + + err = api_chain_init(chain, attr); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); + goto chain_init_err; + } + + err = api_cmd_create_cells(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); + goto create_cells_err; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); + goto chain_hw_init_err; + } + + *cmd_chain = chain; + return 0; + +chain_hw_init_err: +create_cells_err: + api_chain_free(chain); + +chain_init_err: + kfree(chain); + return err; +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + */ +static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) +{ + api_chain_free(chain); + kfree(chain); +} + +/** + * hinic_api_cmd_init - Initialize all the API CMD chains + * @hwdev: the pointer to hw device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + */ +int hinic_api_cmd_init(struct hinic_hwdev *hwdev, + struct hinic_api_cmd_chain **chain) +{ + void *dev = hwdev->dev_hdl; + struct hinic_api_cmd_chain_attr attr; + enum hinic_api_cmd_chain_type chain_type, i; + int err; + + attr.hwdev = hwdev; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; + + chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for (; chain_type < HINIC_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err) { + sdk_err(dev, "Failed to create chain %d\n", chain_type); + goto create_chain_err; + } + } + + return 0; + +create_chain_err: + i = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); + + return err; +} + +/** + * hinic_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that will be freed + */ +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) +{ + enum hinic_api_cmd_chain_type chain_type; + + chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + + for (; chain_type < HINIC_API_CMD_MAX; chain_type++) + api_cmd_destroy_chain(chain[chain_type]); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h new file mode 100644 index 000000000000..0839708ccb3d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_API_CMD_H_ +#define HINIC_API_CMD_H_ + +#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CELL_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1 +#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HINIC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define HINIC_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define HINIC_API_CMD_DESC_DEST_SHIFT 32 +#define HINIC_API_CMD_DESC_SIZE_SHIFT 40 +#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1U +#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1U +#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define HINIC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define HINIC_API_CMD_DESC_DEST_MASK 0x1FU +#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FFU +#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU +#define HINIC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU + +#define HINIC_API_CMD_DESC_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \ + HINIC_API_CMD_DESC_##member##_SHIFT) + +#define HINIC_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU + +#define HINIC_API_CMD_STATUS_VALID_CODE 0xFF + +#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_HEADER_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U + +#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HINIC_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \ + << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +#define HINIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define HINIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define HINIC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & HINIC_API_CMD_RESP_HEAD_VALID_MASK) == \ + HINIC_API_CMD_RESP_HEAD_VALID_CODE) + +#define HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define HINIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU + +#define HINIC_API_CMD_RESP_HEAD_ERR_CODE 0x1 +#define HINIC_API_CMD_RESP_HEAD_ERR(val) \ + ((((val) >> HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_STATUS_MASK) == \ + HINIC_API_CMD_RESP_HEAD_ERR_CODE) + +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFF + +#define HINIC_API_CMD_RESP_RESERVED 3 +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + (u16)(((val) >> HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK) + +#define HINIC_API_CMD_STATUS_HEAD_VALID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEAD_VALID_SHIFT 0 + +#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 + +#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU +#define HINIC_API_CMD_STATUS_FSM_SHIFT 24 + +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define HINIC_API_CMD_STATUS_CHAIN_ID(val) \ + (((val) >> HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT) & \ + HINIC_API_CMD_STATUS_HEAD_VALID_MASK) + +#define HINIC_API_CMD_STATUS_CONS_IDX(val) \ + ((val) & HINIC_API_CMD_STATUS_CONS_IDX_MASK) + +#define HINIC_API_CMD_STATUS_CHKSUM_ERR(val) \ + (((val) >> HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ + HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK) + +#define HINIC_API_CMD_STATUS_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_##member##_MASK) + +enum hinic_api_cmd_chain_type { + /* write command with completion notification */ + HINIC_API_CMD_WRITE = 0, + /* read command with completion notification */ + HINIC_API_CMD_READ = 1, + /* write to mgmt cpu command with completion */ + HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, + /* multi read command with completion notification - not used */ + HINIC_API_CMD_MULTI_READ = 3, + /* write command without completion notification */ + HINIC_API_CMD_POLL_WRITE = 4, + /* read command without completion notification */ + HINIC_API_CMD_POLL_READ = 5, + /* read from mgmt cpu command with completion */ + HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + HINIC_API_CMD_MAX, +}; + +struct hinic_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct hinic_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hinic_api_cmd_resp_fmt { + u64 header; + u64 rsvd[3]; + u64 resp_data; +}; + +struct hinic_api_cmd_cell_ctxt { + struct hinic_api_cmd_cell *cell_vaddr; + + void *api_cmd_vaddr; + + struct hinic_api_cmd_resp_fmt *resp; + + struct completion done; + int status; + + u32 saved_prod_idx; +}; + +struct hinic_api_cmd_chain_attr { + struct hinic_hwdev *hwdev; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct hinic_api_cmd_chain { + struct hinic_hwdev *hwdev; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + /* Async cmd can not be scheduling */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct hinic_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hinic_api_cmd_cell *head_node; + + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_api_cmd_cell *curr_node; + + struct hinic_dma_addr_align cells_addr; + + u8 *cell_vaddr_base; + u64 cell_paddr_base; + u8 *rsp_vaddr_base; + u64 rsp_paddr_base; + u8 *buf_vaddr_base; + u64 buf_paddr_base; + u64 cell_size_align; + u64 rsp_size_align; + u64 buf_size_align; +}; + +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, void *cmd, u16 size); + +int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, void *cmd, u16 size, + void *ack, u16 ack_size); + +int hinic_api_cmd_init(struct hinic_hwdev *hwdev, + struct hinic_api_cmd_chain **chain); + +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.c b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c new file mode 100644 index 000000000000..b99fa7688ed0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c @@ -0,0 +1,2496 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwif.h" +#include "hinic_mbox.h" +#include "hinic_cfg.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_multi_host_mgmt.h" + +uint g_rdma_mtts_num; +uint g_rdma_qps_num; +uint g_rdma_mpts_num; +uint g_vfs_num; +module_param(g_rdma_mtts_num, uint, 0444); +MODULE_PARM_DESC(g_rdma_mtts_num, "number of roce used mtts, use default value when pass 0"); +module_param(g_rdma_qps_num, uint, 0444); +MODULE_PARM_DESC(g_rdma_qps_num, "number of roce used qps, use default value when pass 0"); +module_param(g_rdma_mpts_num, uint, 0444); +MODULE_PARM_DESC(g_rdma_mpts_num, "number of roce used mpts, use default value when pass 0"); +module_param(g_vfs_num, uint, 0444); +MODULE_PARM_DESC(g_vfs_num, "number of used vfs, use default value when pass 0 "); + +uint intr_mode; + +uint timer_enable = 1; +uint bloomfilter_enable; +uint g_test_qpc_num; +uint g_test_qpc_resvd_num; +uint g_test_pagesize_reorder; +uint g_test_xid_alloc_mode = 1; +uint g_test_gpa_check_enable = 1; +uint g_test_qpc_alloc_mode = 2; +uint g_test_scqc_alloc_mode = 2; +uint g_test_max_conn; +uint g_test_max_cache_conn; +uint g_test_scqc_num; +uint g_test_mpt_num; +uint g_test_mpt_resvd; +uint g_test_scq_resvd; +uint g_test_hash_num; +uint g_test_reorder_num; + + +static void set_cfg_test_param(struct cfg_mgmt_info *cfg_mgmt) +{ + cfg_mgmt->svc_cap.timer_en = (u8)timer_enable; + cfg_mgmt->svc_cap.bloomfilter_en = (u8)bloomfilter_enable; + cfg_mgmt->svc_cap.test_qpc_num = g_test_qpc_num; + cfg_mgmt->svc_cap.test_qpc_resvd_num = g_test_qpc_resvd_num; + cfg_mgmt->svc_cap.test_page_size_reorder = g_test_pagesize_reorder; + cfg_mgmt->svc_cap.test_xid_alloc_mode = (bool)g_test_xid_alloc_mode; + cfg_mgmt->svc_cap.test_gpa_check_enable = (bool)g_test_gpa_check_enable; + cfg_mgmt->svc_cap.test_qpc_alloc_mode = (u8)g_test_qpc_alloc_mode; + cfg_mgmt->svc_cap.test_scqc_alloc_mode = (u8)g_test_scqc_alloc_mode; + cfg_mgmt->svc_cap.test_max_conn_num = g_test_max_conn; + cfg_mgmt->svc_cap.test_max_cache_conn_num = g_test_max_cache_conn; + cfg_mgmt->svc_cap.test_scqc_num = g_test_scqc_num; + cfg_mgmt->svc_cap.test_mpt_num = g_test_mpt_num; + cfg_mgmt->svc_cap.test_scq_resvd_num = g_test_scq_resvd; + cfg_mgmt->svc_cap.test_mpt_recvd_num = g_test_mpt_resvd; + cfg_mgmt->svc_cap.test_hash_num = g_test_hash_num; + cfg_mgmt->svc_cap.test_reorder_num = g_test_reorder_num; +} + + +int hinic_sync_time(void *hwdev, u64 time) +{ + struct hinic_sync_time_info time_info = {0}; + u16 out_size = sizeof(time_info); + int err; + + time_info.mstime = time; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info), &time_info, &out_size, + 0); + if (err || time_info.status || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", + err, time_info.status, out_size); + return -EFAULT; + } + + return err; +} + +static void parse_sf_en_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, enum func_type type) +{ + struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; + + if (type == TYPE_PPF) { + /* For PPF's SF EN flag, we assign it in get_dynamic_res_cap(). + * we only save its VF's flag. + */ + attr->sf_en_vf = dev_cap->sf_en_vf; + } else if (type == TYPE_PF) { + if (dev_cap->sf_en_pf) + cap->sf_en = true; + else + cap->sf_en = false; + + attr->sf_en_vf = dev_cap->sf_en_vf; + } else { + /* VF gets SF_EN_VF from PPF/PF */ + if (dev_cap->sf_en_vf) + cap->sf_en = true; + else + cap->sf_en = false; + + attr->sf_en_vf = 0; + } +} + +static void parse_pub_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; + + cap->svc_type = dev_cap->svc_cap_en; + cap->chip_svc_type = cap->svc_type; + + if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT) + attr->ft_en = true; + else + attr->ft_en = false; + + if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT) + attr->rdma_en = true; + else + attr->rdma_en = false; + + cap->host_id = dev_cap->host_id; + cap->ep_id = dev_cap->ep_id; + + cap->max_cos_id = dev_cap->max_cos_id; + cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap; + cap->er_id = dev_cap->er_id; + cap->port_id = dev_cap->port_id; + cap->force_up = dev_cap->force_up; + + parse_sf_en_cap(cap, dev_cap, type); + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + cap->max_vf = dev_cap->max_vf; + cap->pf_num = dev_cap->pf_num; + cap->pf_id_start = dev_cap->pf_id_start; + cap->vf_num = dev_cap->vf_num; + cap->vf_id_start = dev_cap->vf_id_start; + + /* FC need max queue number, but max queue number info is in + * l2nic cap, we also put max queue num info in public cap, so + * FC can get correct max queue number info. + */ + cap->max_sqs = dev_cap->nic_max_sq + 1; + cap->max_rqs = dev_cap->nic_max_rq + 1; + } else { + cap->max_vf = 0; + cap->max_sqs = dev_cap->nic_max_sq; + cap->max_rqs = dev_cap->nic_max_rq; + } + + cap->host_total_function = dev_cap->host_total_func; + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + cap->max_connect_num = dev_cap->max_conn_num; + cap->max_stick2cache_num = dev_cap->max_stick2cache_num; + cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr; + cap->bfilter_len = dev_cap->bfilter_len; + cap->hash_bucket_num = dev_cap->hash_bucket_num; + cap->dev_ver_info.cfg_file_ver = dev_cap->cfg_file_ver; + cap->net_port_mode = dev_cap->net_port_mode; + + /* FC does not use VF */ + if (cap->net_port_mode == CFG_NET_MODE_FC) + cap->max_vf = 0; + + pr_info("Get public resource capbility, svc_cap_en: 0x%x\n", + dev_cap->svc_cap_en); + pr_info("Host_id=0x%x, ep_id=0x%x, max_cos_id=0x%x, cos_bitmap=0x%x, er_id=0x%x, port_id=0x%x\n", + cap->host_id, cap->ep_id, + cap->max_cos_id, cap->cos_valid_bitmap, + cap->er_id, cap->port_id); + pr_info("Host_total_function=0x%x, host_oq_id_mask_val=0x%x, net_port_mode=0x%x, max_vf=0x%x\n", + cap->host_total_function, cap->host_oq_id_mask_val, + cap->net_port_mode, cap->max_vf); + + pr_info("Pf_num=0x%x, pf_id_start=0x%x, vf_num=0x%x, vf_id_start=0x%x\n", + cap->pf_num, cap->pf_id_start, + cap->vf_num, cap->vf_id_start); + + /* Check parameters from firmware */ + if (cap->max_sqs > HINIC_CFG_MAX_QP || + cap->max_rqs > HINIC_CFG_MAX_QP) { + pr_info("Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n", + HINIC_CFG_MAX_QP, cap->max_sqs, cap->max_rqs); + cap->max_sqs = HINIC_CFG_MAX_QP; + cap->max_rqs = HINIC_CFG_MAX_QP; + } +} + +static void parse_dynamic_share_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap; + + shared_cap->host_pctxs = dev_cap->host_pctx_num; + + if (dev_cap->host_sf_en) + cap->sf_en = true; + else + cap->sf_en = false; + + shared_cap->host_cctxs = dev_cap->host_ccxt_num; + shared_cap->host_scqs = dev_cap->host_scq_num; + shared_cap->host_srqs = dev_cap->host_srq_num; + shared_cap->host_mpts = dev_cap->host_mpt_num; + + pr_info("Dynamic share resource capbility, host_pctxs=0x%x, host_cctxs=0x%x, host_scqs=0x%x, host_srqs=0x%x, host_mpts=0x%x\n", + shared_cap->host_pctxs, + shared_cap->host_cctxs, + shared_cap->host_scqs, + shared_cap->host_srqs, + shared_cap->host_mpts); +} + +static void parse_l2nic_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct nic_service_cap *nic_cap = &cap->nic_cap; + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + nic_cap->max_sqs = dev_cap->nic_max_sq + 1; + nic_cap->max_rqs = dev_cap->nic_max_rq + 1; + nic_cap->vf_max_sqs = dev_cap->nic_vf_max_sq + 1; + nic_cap->vf_max_rqs = dev_cap->nic_vf_max_rq + 1; + nic_cap->max_queue_allowed = 0; + nic_cap->dynamic_qp = 0; + } else { + nic_cap->max_sqs = dev_cap->nic_max_sq; + nic_cap->max_rqs = dev_cap->nic_max_rq; + nic_cap->vf_max_sqs = 0; + nic_cap->vf_max_rqs = 0; + nic_cap->max_queue_allowed = dev_cap->max_queue_allowed; + nic_cap->dynamic_qp = dev_cap->ovs_dq_en; + } + + if (dev_cap->nic_lro_en) + nic_cap->lro_en = true; + else + nic_cap->lro_en = false; + + nic_cap->lro_sz = dev_cap->nic_lro_sz; + nic_cap->tso_sz = dev_cap->nic_tso_sz; + + pr_info("L2nic resource capbility, max_sqs=0x%x, max_rqs=0x%x, vf_max_sqs=0x%x, vf_max_rqs=0x%x, max_queue_allowed=0x%x\n", + nic_cap->max_sqs, + nic_cap->max_rqs, + nic_cap->vf_max_sqs, + nic_cap->vf_max_rqs, + nic_cap->max_queue_allowed); + + /* Check parameters from firmware */ + if (nic_cap->max_sqs > HINIC_CFG_MAX_QP || + nic_cap->max_rqs > HINIC_CFG_MAX_QP) { + pr_info("Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n", + HINIC_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); + nic_cap->max_sqs = HINIC_CFG_MAX_QP; + nic_cap->max_rqs = HINIC_CFG_MAX_QP; + } +} + +static void parse_roce_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->max_qps = dev_cap->roce_max_qp; + roce_cap->max_cqs = dev_cap->roce_max_cq; + roce_cap->max_srqs = dev_cap->roce_max_srq; + roce_cap->max_mpts = dev_cap->roce_max_mpt; + roce_cap->num_cos = dev_cap->max_cos_id + 1; + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + roce_cap->vf_max_qps = dev_cap->roce_vf_max_qp; + roce_cap->vf_max_cqs = dev_cap->roce_vf_max_cq; + roce_cap->vf_max_srqs = dev_cap->roce_vf_max_srq; + roce_cap->vf_max_mpts = dev_cap->roce_vf_max_mpt; + } else { + roce_cap->vf_max_qps = 0; + roce_cap->vf_max_cqs = 0; + roce_cap->vf_max_srqs = 0; + roce_cap->vf_max_mpts = 0; + } + + roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start; + roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end; + roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size; + + roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start; + roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end; + roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size; + + roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start; + roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end; + roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size; + + pr_info("Get roce resource capbility\n"); + pr_info("Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_mpts=0x%x\n", + roce_cap->max_qps, roce_cap->max_cqs, + roce_cap->max_srqs, roce_cap->max_mpts); + + pr_info("Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_srqs= 0x%x, vf_max_mpts= 0x%x\n", + roce_cap->vf_max_qps, roce_cap->vf_max_cqs, + roce_cap->vf_max_srqs, roce_cap->vf_max_mpts); + + pr_info("Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n", + roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end, + roce_cap->cmtt_cl_sz); + + pr_info("Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n", + roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end, + roce_cap->dmtt_cl_sz); + + pr_info("Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n", + roce_cap->wqe_cl_start, roce_cap->wqe_cl_end, + roce_cap->wqe_cl_sz); + + if (roce_cap->max_qps == 0) { + roce_cap->max_qps = 1024; + roce_cap->max_cqs = 2048; + roce_cap->max_srqs = 1024; + roce_cap->max_mpts = 1024; + + if (type == TYPE_PF || type == TYPE_PPF) { + roce_cap->vf_max_qps = 512; + roce_cap->vf_max_cqs = 1024; + roce_cap->vf_max_srqs = 512; + roce_cap->vf_max_mpts = 512; + } + } +} + +static void parse_iwarp_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) + +{ + struct dev_iwarp_svc_own_cap *iwarp_cap = + &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap; + + iwarp_cap->max_qps = dev_cap->iwarp_max_qp; + iwarp_cap->max_cqs = dev_cap->iwarp_max_cq; + iwarp_cap->max_mpts = dev_cap->iwarp_max_mpt; + iwarp_cap->num_cos = dev_cap->max_cos_id + 1; + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + iwarp_cap->vf_max_qps = dev_cap->iwarp_vf_max_qp; + iwarp_cap->vf_max_cqs = dev_cap->iwarp_vf_max_cq; + iwarp_cap->vf_max_mpts = dev_cap->iwarp_vf_max_mpt; + } else { + iwarp_cap->vf_max_qps = 0; + iwarp_cap->vf_max_cqs = 0; + iwarp_cap->vf_max_mpts = 0; + } + + iwarp_cap->cmtt_cl_start = dev_cap->iwarp_cmtt_cl_start; + iwarp_cap->cmtt_cl_end = dev_cap->iwarp_cmtt_cl_end; + iwarp_cap->cmtt_cl_sz = dev_cap->iwarp_cmtt_cl_size; + + iwarp_cap->dmtt_cl_start = dev_cap->iwarp_dmtt_cl_start; + iwarp_cap->dmtt_cl_end = dev_cap->iwarp_dmtt_cl_end; + iwarp_cap->dmtt_cl_sz = dev_cap->iwarp_dmtt_cl_size; + + iwarp_cap->wqe_cl_start = dev_cap->iwarp_wqe_cl_start; + iwarp_cap->wqe_cl_end = dev_cap->iwarp_wqe_cl_end; + iwarp_cap->wqe_cl_sz = dev_cap->iwarp_wqe_cl_size; + + pr_info("Get iwrap resource capbility\n"); + pr_info("Max_qps=0x%x, max_cqs=0x%x, max_mpts=0x%x\n", + iwarp_cap->max_qps, iwarp_cap->max_cqs, + iwarp_cap->max_mpts); + pr_info("Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_mpts=0x%x\n", + iwarp_cap->vf_max_qps, iwarp_cap->vf_max_cqs, + iwarp_cap->vf_max_mpts); + + pr_info("Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n", + iwarp_cap->cmtt_cl_start, iwarp_cap->cmtt_cl_end, + iwarp_cap->cmtt_cl_sz); + + pr_info("Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n", + iwarp_cap->dmtt_cl_start, iwarp_cap->dmtt_cl_end, + iwarp_cap->dmtt_cl_sz); + + pr_info("Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n", + iwarp_cap->wqe_cl_start, iwarp_cap->wqe_cl_end, + iwarp_cap->wqe_cl_sz); + + if (iwarp_cap->max_qps == 0) { + iwarp_cap->max_qps = 8; + iwarp_cap->max_cqs = 16; + iwarp_cap->max_mpts = 8; + + if (type == TYPE_PF || type == TYPE_PPF) { + iwarp_cap->vf_max_qps = 8; + iwarp_cap->vf_max_cqs = 16; + iwarp_cap->vf_max_mpts = 8; + } + } +} + +static void parse_fcoe_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fcoe_svc_cap *fcoe_cap = &cap->fcoe_cap.dev_fcoe_cap; + + fcoe_cap->max_qps = dev_cap->fcoe_max_qp; + fcoe_cap->max_cqs = dev_cap->fcoe_max_cq; + fcoe_cap->max_srqs = dev_cap->fcoe_max_srq; + fcoe_cap->max_cctxs = dev_cap->fcoe_max_cctx; + fcoe_cap->cctxs_id_start = dev_cap->fcoe_cctx_id_start; + fcoe_cap->vp_id_start = dev_cap->fcoe_vp_id_start; + fcoe_cap->vp_id_end = dev_cap->fcoe_vp_id_end; + + pr_info("Get fcoe resource capbility\n"); + pr_info("Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_cctxs=0x%x, cctxs_id_start=0x%x\n", + fcoe_cap->max_qps, fcoe_cap->max_cqs, fcoe_cap->max_srqs, + fcoe_cap->max_cctxs, fcoe_cap->cctxs_id_start); + pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n", + fcoe_cap->vp_id_start, fcoe_cap->vp_id_end); +} + +static void parse_toe_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap; + + toe_cap->max_pctxs = dev_cap->toe_max_pctx; + toe_cap->max_cqs = dev_cap->toe_max_cq; + toe_cap->max_srqs = dev_cap->toe_max_srq; + toe_cap->srq_id_start = dev_cap->toe_srq_id_start; + toe_cap->num_cos = dev_cap->max_cos_id + 1; + + pr_info("Get toe resource capbility, max_pctxs=0x%x, max_cqs=0x%x, max_srqs=0x%x, srq_id_start=0x%x\n", + toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs, + toe_cap->srq_id_start); +} + +static void parse_fc_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; + + fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; + fc_cap->scq_num = dev_cap->fc_max_scq; + fc_cap->srq_num = dev_cap->fc_max_srq; + fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; + fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start; + fc_cap->vp_id_start = dev_cap->fc_vp_id_start; + fc_cap->vp_id_end = dev_cap->fc_vp_id_end; + + pr_info("Get fc resource capbility\n"); + pr_info("Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x, child_qpc_id_start=0x%x\n", + fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, + fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start); + pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n", + fc_cap->vp_id_start, fc_cap->vp_id_end); +} + +static void parse_ovs_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc; + ovs_cap->dev_ovs_cap.max_cqs = 0; + + if (type == TYPE_PF || type == TYPE_PPF) + ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->ovs_dq_en; + + pr_info("Get ovs resource capbility, max_qpc: 0x%x\n", + ovs_cap->dev_ovs_cap.max_pctxs); +} + +static void parse_acl_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct acl_service_cap *acl_cap = &cap->acl_cap; + + acl_cap->dev_acl_cap.max_pctxs = 1024 * 1024; + acl_cap->dev_acl_cap.max_cqs = 8; +} + +static void parse_dev_cap(struct hinic_hwdev *dev, + struct hinic_dev_cap *dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + parse_pub_res_cap(cap, dev_cap, type); + + /* PPF managed dynamic resource */ + if (type == TYPE_PPF) + parse_dynamic_share_res_cap(cap, dev_cap, type); + + /* L2 NIC resource */ + if (IS_NIC_TYPE(dev)) + parse_l2nic_res_cap(cap, dev_cap, type); + + + /* FCoE/IOE/TOE/FC without virtulization */ + if (type == TYPE_PF || type == TYPE_PPF) { + if (IS_FC_TYPE(dev)) + parse_fc_res_cap(cap, dev_cap, type); + + + if (IS_FCOE_TYPE(dev)) + parse_fcoe_res_cap(cap, dev_cap, type); + + if (IS_TOE_TYPE(dev)) + parse_toe_res_cap(cap, dev_cap, type); + } + + /* RoCE resource */ + if (IS_ROCE_TYPE(dev)) + parse_roce_res_cap(cap, dev_cap, type); + + /* iWARP resource */ + if (IS_IWARP_TYPE(dev)) + parse_iwarp_res_cap(cap, dev_cap, type); + + if (IS_OVS_TYPE(dev)) + parse_ovs_res_cap(cap, dev_cap, type); + + if (IS_ACL_TYPE(dev)) + parse_acl_res_cap(cap, dev_cap, type); +} + +static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type) +{ + struct hinic_dev_cap dev_cap = {0}; + u16 out_len = sizeof(dev_cap); + int err; + + dev_cap.version = HINIC_CMD_VER_FUNC_ID; + err = hinic_global_func_id_get(dev, &dev_cap.func_id); + if (err) + return err; + + sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n", + dev_cap.func_id); + + err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP, + &dev_cap, sizeof(dev_cap), + &dev_cap, &out_len, 0); + if (err || dev_cap.status || !out_len) { + sdk_err(dev->dev_hdl, + "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static int get_cap_from_pf(struct hinic_hwdev *dev, enum func_type type) +{ + struct hinic_dev_cap dev_cap = {0}; + u16 in_len, out_len; + int err; + + in_len = sizeof(dev_cap); + out_len = in_len; + + err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_MBOX_CAP, + &dev_cap, in_len, &dev_cap, &out_len, 0); + if (err || dev_cap.status || !out_len) { + sdk_err(dev->dev_hdl, "Failed to get capability from PF, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static int get_dev_cap(struct hinic_hwdev *dev) +{ + int err; + enum func_type type = HINIC_FUNC_TYPE(dev); + + switch (type) { + case TYPE_PF: + case TYPE_PPF: + err = get_cap_from_fw(dev, type); + if (err) { + sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n"); + return err; + } + break; + case TYPE_VF: + err = get_cap_from_pf(dev, type); + if (err) { + sdk_err(dev->dev_hdl, "Failed to get VF capability\n"); + return err; + } + break; + default: + sdk_err(dev->dev_hdl, "Unsupported PCI Function type: %d\n", + type); + return -EINVAL; + } + + return 0; +} + +static void nic_param_fix(struct hinic_hwdev *dev) +{ + struct nic_service_cap *nic_cap = &dev->cfg_mgmt->svc_cap.nic_cap; + + if ((hinic_func_type(dev) == TYPE_VF) && (nic_cap->max_queue_allowed != 0)) { + nic_cap->max_rqs = nic_cap->max_queue_allowed; + nic_cap->max_sqs = nic_cap->max_queue_allowed; + } + +} + +static void rdma_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + struct dev_roce_svc_own_cap *roce_cap = + &rdma_cap->dev_rdma_cap.roce_own_cap; + struct dev_iwarp_svc_own_cap *iwarp_cap = + &rdma_cap->dev_rdma_cap.iwarp_own_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_rdmarc = LOG_RDMARC_SEG; + rdma_cap->reserved_qps = RDMA_RSVD_QPS; + rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE; + + /* RoCE */ + if (IS_ROCE_TYPE(dev)) { + roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ; + roce_cap->max_wqes = ROCE_MAX_WQES; + roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE; + roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ; + roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ; + roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ; + roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA; + roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA; + roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES; + roce_cap->reserved_srqs = ROCE_RSVD_SRQS; + roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE; + roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ; + roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ; + } else { + iwarp_cap->qpc_entry_sz = IWARP_QPC_ENTRY_SZ; + iwarp_cap->max_wqes = IWARP_MAX_WQES; + iwarp_cap->max_rq_sg = IWARP_MAX_RQ_SGE; + iwarp_cap->max_sq_inline_data_sz = IWARP_MAX_SQ_INLINE_DATA_SZ; + iwarp_cap->max_rq_desc_sz = IWARP_MAX_RQ_DESC_SZ; + iwarp_cap->max_irq_depth = IWARP_MAX_IRQ_DEPTH; + iwarp_cap->irq_entry_size = IWARP_IRQ_ENTRY_SZ; + iwarp_cap->max_orq_depth = IWARP_MAX_ORQ_DEPTH; + iwarp_cap->orq_entry_size = IWARP_ORQ_ENTRY_SZ; + iwarp_cap->max_rtoq_depth = IWARP_MAX_RTOQ_DEPTH; + iwarp_cap->rtoq_entry_size = IWARP_RTOQ_ENTRY_SZ; + iwarp_cap->max_ackq_depth = IWARP_MAX_ACKQ_DEPTH; + iwarp_cap->ackq_entry_size = IWARP_ACKQ_ENTRY_SZ; + iwarp_cap->max_msg_sz = IWARP_MAX_MSG_SZ; + } + + rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ; + rdma_cap->wqebb_size = WQEBB_SZ; + rdma_cap->max_cqes = RDMA_MAX_CQES; + rdma_cap->reserved_cqs = RDMA_RSVD_CQS; + rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ; + rdma_cap->cqe_size = RDMA_CQE_SZ; + rdma_cap->reserved_mrws = RDMA_RSVD_MRWS; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + + /* 2^8 - 1 + * +------------------------+-----------+ + * | 4B | 1M(20b) | Key(8b) | + * +------------------------+-----------+ + * key = 8bit key + 24bit index, + * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit, + * we use original 8bits directly for simpilification + */ + rdma_cap->max_fmr_maps = 255; + rdma_cap->num_mtts = g_rdma_mtts_num > 0 ? + g_rdma_mtts_num : RDMA_NUM_MTTS; + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG; + rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY; + rdma_cap->num_ports = RDMA_NUM_PORTS; + rdma_cap->db_page_size = DB_PAGE_SZ; + rdma_cap->direct_wqe_size = DWQE_SZ; + rdma_cap->num_pds = NUM_PD; + rdma_cap->reserved_pds = RSVD_PD; + rdma_cap->max_xrcds = MAX_XRCDS; + rdma_cap->reserved_xrcds = RSVD_XRCDS; + rdma_cap->max_gid_per_port = MAX_GID_PER_PORT; + rdma_cap->gid_entry_sz = GID_ENTRY_SZ; + rdma_cap->reserved_lkey = RSVD_LKEY; + rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq; + rdma_cap->page_size_cap = PAGE_SZ_CAP; + rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV | + RDMA_BMME_FLAG_REMOTE_INV | + RDMA_BMME_FLAG_FAST_REG_WR | + RDMA_DEV_CAP_FLAG_XRC | + RDMA_DEV_CAP_FLAG_MEM_WINDOW | + RDMA_BMME_FLAG_TYPE_2_WIN | + RDMA_BMME_FLAG_WIN_TYPE_2B | + RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_cap->max_frpl_len = MAX_FRPL_LEN; + rdma_cap->max_pkeys = MAX_PKEYS; +} + +static void fcoe_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fcoe_service_cap *fcoe_cap = &cap->fcoe_cap; + + fcoe_cap->qpc_basic_size = FCOE_PCTX_SZ; + fcoe_cap->childc_basic_size = FCOE_CCTX_SZ; + fcoe_cap->sqe_size = FCOE_SQE_SZ; + + fcoe_cap->scqc_basic_size = FCOE_SCQC_SZ; + fcoe_cap->scqe_size = FCOE_SCQE_SZ; + + fcoe_cap->srqc_size = FCOE_SRQC_SZ; + fcoe_cap->srqe_size = FCOE_SRQE_SZ; +} + +static void toe_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct toe_service_cap *toe_cap = &cap->toe_cap; + + toe_cap->pctx_sz = TOE_PCTX_SZ; + toe_cap->scqc_sz = TOE_CQC_SZ; +} + +static void fc_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fc_service_cap *fc_cap = &cap->fc_cap; + + fc_cap->parent_qpc_size = FC_PCTX_SZ; + fc_cap->child_qpc_size = FC_CCTX_SZ; + fc_cap->sqe_size = FC_SQE_SZ; + + fc_cap->scqc_size = FC_SCQC_SZ; + fc_cap->scqe_size = FC_SCQE_SZ; + + fc_cap->srqc_size = FC_SRQC_SZ; + fc_cap->srqe_size = FC_SRQE_SZ; +} + +static void ovs_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->pctx_sz = OVS_PCTX_SZ; + ovs_cap->scqc_sz = OVS_SCQC_SZ; +} + +static void acl_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct acl_service_cap *acl_cap = &cap->acl_cap; + + acl_cap->pctx_sz = ACL_PCTX_SZ; + acl_cap->scqc_sz = ACL_SCQC_SZ; +} + +static void init_service_param(struct hinic_hwdev *dev) +{ + if (IS_NIC_TYPE(dev)) + nic_param_fix(dev); + + if (IS_RDMA_TYPE(dev)) + rdma_param_fix(dev); + + if (IS_FCOE_TYPE(dev)) + fcoe_param_fix(dev); + + if (IS_TOE_TYPE(dev)) + toe_param_fix(dev); + + if (IS_FC_TYPE(dev)) + fc_param_fix(dev); + + if (IS_OVS_TYPE(dev)) + ovs_param_fix(dev); + + if (IS_ACL_TYPE(dev)) + acl_param_fix(dev); +} + +static void cfg_get_eq_num(struct hinic_hwdev *dev) +{ + struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; + + eq_info->num_ceq = dev->hwif->attr.num_ceqs; + eq_info->num_ceq_remain = eq_info->num_ceq; +} + +static int cfg_init_eq(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_eq *eq; + u8 num_ceq, i = 0; + + cfg_get_eq_num(dev); + num_ceq = cfg_mgmt->eq_info.num_ceq; + + sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", + cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); + + if (!num_ceq) { + sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); + return -EFAULT; + } + eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); + if (!eq) + return -ENOMEM; + + for (i = 0; i < num_ceq; ++i) { + eq[i].eqn = i; + eq[i].free = CFG_FREE; + eq[i].type = SERVICE_T_MAX; + } + + cfg_mgmt->eq_info.eq = eq; + mutex_init(&cfg_mgmt->eq_info.eq_mutex); + + return 0; +} + +int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + + if (!hwdev || !ver) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + + memcpy(ver, &cfg_mgmt->svc_cap.dev_ver_info, sizeof(*ver)); + + return 0; +} +EXPORT_SYMBOL(hinic_dev_ver_info); + +int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_eq *eq; + int eqn = -EINVAL; + + if (!hwdev || vector < 0) + return -EINVAL; + + if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) { + sdk_err(dev->dev_hdl, + "Service type :%d, only RDMA service could get eqn by vector.\n", + type); + return -EINVAL; + } + + cfg_mgmt = dev->cfg_mgmt; + vector = (vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE; + + eq = cfg_mgmt->eq_info.eq; + if ((eq[vector].type == SERVICE_T_ROCE || + eq[vector].type == SERVICE_T_IWARP) && + eq[vector].free == CFG_BUSY) + eqn = eq[vector].eqn; + + return eqn; +} +EXPORT_SYMBOL(hinic_vector_to_eqn); + +static int cfg_init_interrupt(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; + u16 intr_num = dev->hwif->attr.num_irqs; + + if (!intr_num) { + sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n"); + return -EFAULT; + } + irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), + GFP_KERNEL); + if (!irq_info->alloc_info) + return -ENOMEM; + + irq_info->num_irq_hw = intr_num; + + /* Production requires VF only surppots MSI-X */ + if (HINIC_FUNC_TYPE(dev) == TYPE_VF) + cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX; + else + cfg_mgmt->svc_cap.interrupt_type = intr_mode; + mutex_init(&irq_info->irq_mutex); + return 0; +} + +static int cfg_enable_interrupt(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; + + void *pcidev = dev->pcidev_hdl; + struct irq_alloc_info_st *irq_info; + struct msix_entry *entry; + u16 i = 0; + int actual_irq; + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + + sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d.\n", + cfg_mgmt->svc_cap.interrupt_type, nreq); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + if (!nreq) { + sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); + return -EINVAL; + } + entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < nreq; i++) + entry[i].entry = i; + + actual_irq = pci_enable_msix_range(pcidev, entry, + VECTOR_THRESHOLD, nreq); + if (actual_irq < 0) { + sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed.\n"); + kfree(entry); + return -ENOMEM; + } + + nreq = (u16)actual_irq; + cfg_mgmt->irq_param_info.num_total = nreq; + cfg_mgmt->irq_param_info.num_irq_remain = nreq; + sdk_info(dev->dev_hdl, "Request %d msix vector success.\n", + nreq); + + for (i = 0; i < nreq; ++i) { + /* u16 driver uses to specify entry, OS writes */ + irq_info[i].info.msix_entry_idx = entry[i].entry; + /* u32 kernel uses to write allocated vector */ + irq_info[i].info.irq_id = entry[i].vector; + irq_info[i].type = SERVICE_T_MAX; + irq_info[i].free = CFG_FREE; + } + + kfree(entry); + + break; + + default: + sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", + cfg_mgmt->svc_cap.interrupt_type); + break; + } + + return 0; +} + +int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + u16 free_num_irq; + int i, j; + + if (!hwdev || !irq_info_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + free_num_irq = irq_info->num_irq_remain; + + mutex_lock(&irq_info->irq_mutex); + + if (num > free_num_irq) { + if (free_num_irq == 0) { + sdk_err(dev->dev_hdl, + "no free irq resource in cfg mgmt.\n"); + mutex_unlock(&irq_info->irq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt.\n", + free_num_irq); + num = free_num_irq; + } + + *act_num = 0; + + for (i = 0; i < num; i++) { + for (j = 0; j < max_num_irq; j++) { + if (alloc_info[j].free == CFG_FREE) { + if (irq_info->num_irq_remain == 0) { + sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -EINVAL; + } + alloc_info[j].type = type; + alloc_info[j].free = CFG_BUSY; + + irq_info_array[i].msix_entry_idx = + alloc_info[j].info.msix_entry_idx; + irq_info_array[i].irq_id = + alloc_info[j].info.irq_id; + (*act_num)++; + irq_info->num_irq_remain--; + + break; + } + } + } + + mutex_unlock(&irq_info->irq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic_alloc_irqs); + +void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + int i; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < max_num_irq; i++) { + if (irq_id == alloc_info[i].info.irq_id && + type == alloc_info[i].type) { + if (alloc_info[i].free == CFG_BUSY) { + alloc_info[i].free = CFG_FREE; + irq_info->num_irq_remain++; + if (irq_info->num_irq_remain > max_num_irq) { + sdk_err(dev->dev_hdl, "Find target,but over range\n"); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + } + } + + if (i >= max_num_irq) + sdk_warn(dev->dev_hdl, "Irq %d don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} +EXPORT_SYMBOL(hinic_free_irq); + +int hinic_vector_to_irq(void *hwdev, enum hinic_service_type type, int vector) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct irq_alloc_info_st *irq_info; + int irq = -EINVAL; + + if (!hwdev) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) { + sdk_err(dev->dev_hdl, + "Service type: %u, only RDMA service could get eqn by vector\n", + type); + return -EINVAL; + } + + /* Current RDMA CEQ are 2 - 31, will change in the future */ + vector = ((vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE); + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + if (irq_info[vector].type == SERVICE_T_ROCE || + irq_info[vector].type == SERVICE_T_IWARP) + if (irq_info[vector].free == CFG_BUSY) + irq = (int)irq_info[vector].info.irq_id; + + return irq; +} +EXPORT_SYMBOL(hinic_vector_to_irq); + +int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int num, + int *ceq_id_array, int *act_num) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_eq_info *eq; + int free_ceq; + int i, j; + + if (!hwdev || !ceq_id_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + free_ceq = eq->num_ceq_remain; + + mutex_lock(&eq->eq_mutex); + + if (num > free_ceq) { + if (free_ceq <= 0) { + sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n"); + mutex_unlock(&eq->eq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n", + free_ceq); + } + + *act_num = 0; + + num = min(num, eq->num_ceq - CFG_RDMA_CEQ_BASE); + for (i = 0; i < num; i++) { + if (eq->num_ceq_remain == 0) { + sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n", + *act_num, num); + mutex_unlock(&eq->eq_mutex); + return 0; + } + + for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) { + if (eq->eq[j].free == CFG_FREE) { + eq->eq[j].type = type; + eq->eq[j].free = CFG_BUSY; + eq->num_ceq_remain--; + ceq_id_array[i] = eq->eq[j].eqn; + (*act_num)++; + break; + } + } + } + + mutex_unlock(&eq->eq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic_alloc_ceqs); + +void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_eq_info *eq; + u8 num_ceq; + u8 i = 0; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + num_ceq = eq->num_ceq; + + mutex_lock(&eq->eq_mutex); + + for (i = 0; i < num_ceq; i++) { + if (ceq_id == eq->eq[i].eqn && + type == cfg_mgmt->eq_info.eq[i].type) { + if (eq->eq[i].free == CFG_BUSY) { + eq->eq[i].free = CFG_FREE; + eq->num_ceq_remain++; + if (eq->num_ceq_remain > num_ceq) + eq->num_ceq_remain %= num_ceq; + + mutex_unlock(&eq->eq_mutex); + return; + } + } + } + + if (i >= num_ceq) + sdk_warn(dev->dev_hdl, "ceq %d don`t need to free.\n", ceq_id); + + mutex_unlock(&eq->eq_mutex); +} +EXPORT_SYMBOL(hinic_free_ceq); + +static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_dev_cap *dev_cap = buf_out; + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct nic_service_cap *nic_cap = &cap->nic_cap; + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + struct dev_iwarp_svc_own_cap *iwarp_cap = + &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap; + struct dev_ovs_svc_cap *ovs_cap = &cap->ovs_cap.dev_ovs_cap; + struct hinic_dev_cap dev_cap_tmp = {0}; + u16 out_len = 0; + u16 func_id; + int err; + + memset(dev_cap, 0, sizeof(*dev_cap)); + + if (cap->sf_svc_attr.ft_en) + dev_cap->sf_svc_attr |= SF_SVC_FT_BIT; + else + dev_cap->sf_svc_attr &= ~SF_SVC_FT_BIT; + + if (cap->sf_svc_attr.rdma_en) + dev_cap->sf_svc_attr |= SF_SVC_RDMA_BIT; + else + dev_cap->sf_svc_attr &= ~SF_SVC_RDMA_BIT; + + dev_cap->sf_en_vf = cap->sf_svc_attr.sf_en_vf; + + dev_cap->host_id = cap->host_id; + dev_cap->ep_id = cap->ep_id; + dev_cap->intr_type = cap->interrupt_type; + dev_cap->max_cos_id = cap->max_cos_id; + dev_cap->er_id = cap->er_id; + dev_cap->port_id = cap->port_id; + dev_cap->max_vf = cap->max_vf; + dev_cap->svc_cap_en = cap->chip_svc_type; + dev_cap->host_total_func = cap->host_total_function; + dev_cap->host_oq_id_mask_val = cap->host_oq_id_mask_val; + dev_cap->net_port_mode = cap->net_port_mode; + + /* Parameters below is uninitialized because NIC and ROCE not use it + * max_connect_num + * max_stick2cache_num + * bfilter_start_addr + * bfilter_len + * hash_bucket_num + * cfg_file_ver + */ + + /* NIC VF resources */ + dev_cap->nic_max_sq = nic_cap->vf_max_sqs; + dev_cap->nic_max_rq = nic_cap->vf_max_rqs; + + /* ROCE VF resources */ + dev_cap->roce_max_qp = roce_cap->vf_max_qps; + dev_cap->roce_max_cq = roce_cap->vf_max_cqs; + dev_cap->roce_max_srq = roce_cap->vf_max_srqs; + dev_cap->roce_max_mpt = roce_cap->vf_max_mpts; + + dev_cap->roce_cmtt_cl_start = roce_cap->cmtt_cl_start; + dev_cap->roce_cmtt_cl_end = roce_cap->cmtt_cl_end; + dev_cap->roce_cmtt_cl_size = roce_cap->cmtt_cl_sz; + + dev_cap->roce_dmtt_cl_start = roce_cap->dmtt_cl_start; + dev_cap->roce_dmtt_cl_end = roce_cap->dmtt_cl_end; + dev_cap->roce_dmtt_cl_size = roce_cap->dmtt_cl_sz; + + dev_cap->roce_wqe_cl_start = roce_cap->wqe_cl_start; + dev_cap->roce_wqe_cl_end = roce_cap->wqe_cl_end; + dev_cap->roce_wqe_cl_size = roce_cap->wqe_cl_sz; + + /* Iwarp VF resources */ + dev_cap->iwarp_max_qp = iwarp_cap->vf_max_qps; + dev_cap->iwarp_max_cq = iwarp_cap->vf_max_cqs; + dev_cap->iwarp_max_mpt = iwarp_cap->vf_max_mpts; + + /* OVS VF resources */ + dev_cap->ovs_max_qpc = ovs_cap->max_pctxs; + dev_cap->ovs_dq_en = ovs_cap->dynamic_qp_en; + + *out_size = sizeof(*dev_cap); + + out_len = sizeof(dev_cap_tmp); + /* fixed qnum in ovs mode */ + func_id = vf_id + hinic_glb_pf_vf_offset(hwdev); + dev_cap_tmp.func_id = func_id; + err = hinic_pf_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_FUNC_CAP, + &dev_cap_tmp, sizeof(dev_cap_tmp), + &dev_cap_tmp, &out_len, 0); + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) { + sdk_err(dev->dev_hdl, + "Get func_id: %u capability from FW failed, err: %d, status: 0x%x, out_size: 0x%x\n", + func_id, err, dev_cap_tmp.status, out_len); + return -EFAULT; + } else if (err) { + return err; + } + + dev_cap->nic_max_sq = dev_cap_tmp.nic_max_sq + 1; + dev_cap->nic_max_rq = dev_cap_tmp.nic_max_rq + 1; + dev_cap->max_queue_allowed = dev_cap_tmp.max_queue_allowed; + + sdk_info(dev->dev_hdl, "func_id(%u) %s qnum %u max_queue_allowed %u\n", + func_id, (ovs_cap->dynamic_qp_en ? "dynamic" : "fixed"), + dev_cap->nic_max_sq, dev_cap->max_queue_allowed); + + return 0; +} + +static int cfg_mbx_ppf_proc_msg(void *hwdev, u16 pf_id, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic_hwdev *dev = hwdev; + + sdk_info(dev->dev_hdl, "ppf receive other pf cfgmgmt cmd %d mbox msg\n", + cmd); + + return hinic_ppf_process_mbox_msg(hwdev, pf_id, vf_id, HINIC_MOD_CFGM, + cmd, buf_in, in_size, buf_out, + out_size); +} + +static int cfg_mbx_vf_proc_msg(void *hwdev, u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *dev = hwdev; + + *out_size = 0; + sdk_err(dev->dev_hdl, "VF msg callback not supported\n"); + + return -EOPNOTSUPP; +} + +static int cfg_mbx_init(struct hinic_hwdev *dev, struct cfg_mgmt_info *cfg_mgmt) +{ + int err; + enum func_type type = dev->hwif->attr.func_type; + + if (type == TYPE_PF) { + err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_pf_proc_vf_msg); + if (err) { + sdk_err(dev->dev_hdl, + "PF: Register PF mailbox callback failed\n"); + return err; + } + } else if (type == TYPE_PPF) { + err = hinic_register_ppf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_ppf_proc_msg); + if (err) { + sdk_err(dev->dev_hdl, + "PPF: Register PPF mailbox callback failed\n"); + return err; + } + + err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_pf_proc_vf_msg); + if (err) { + sdk_err(dev->dev_hdl, + "PPF: Register PF mailbox callback failed\n"); + hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM); + return err; + } + } else if (type == TYPE_VF) { + err = hinic_register_vf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_vf_proc_msg); + if (err) { + sdk_err(dev->dev_hdl, + "VF: Register VF mailbox callback failed\n"); + return err; + } + } else { + sdk_err(dev->dev_hdl, "Invalid func_type: %d, not supported\n", + type); + return -EINVAL; + } + + return 0; +} + +static void cfg_mbx_cleanup(struct hinic_hwdev *dev) +{ + hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM); + hinic_unregister_pf_mbox_cb(dev, HINIC_MOD_CFGM); + hinic_unregister_vf_mbox_cb(dev, HINIC_MOD_CFGM); +} + +int init_cfg_mgmt(struct hinic_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + dev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = dev; + + err = cfg_init_eq(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", + err); + goto free_mgmt_mem; + } + + err = cfg_init_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", + err); + goto free_eq_mem; + } + + err = cfg_enable_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", + err); + goto free_interrupt_mem; + } + + return 0; + +free_interrupt_mem: + kfree(cfg_mgmt->irq_param_info.alloc_info); + mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex)); + cfg_mgmt->irq_param_info.alloc_info = NULL; + +free_eq_mem: + kfree(cfg_mgmt->eq_info.eq); + mutex_deinit(&cfg_mgmt->eq_info.eq_mutex); + cfg_mgmt->eq_info.eq = NULL; + +free_mgmt_mem: + kfree(cfg_mgmt); + return err; +} + +void free_cfg_mgmt(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + /* if the allocated resource were recycled */ + if (cfg_mgmt->irq_param_info.num_irq_remain != + cfg_mgmt->irq_param_info.num_total || + cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) + sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + pci_disable_msix(dev->pcidev_hdl); + break; + + case INTR_TYPE_MSI: + pci_disable_msi(dev->pcidev_hdl); + break; + + case INTR_TYPE_INT: + default: + break; + } + + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex)); + + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + mutex_deinit(&cfg_mgmt->eq_info.eq_mutex); + + kfree(cfg_mgmt); +} + +int init_capability(struct hinic_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + set_cfg_test_param(cfg_mgmt); + + err = cfg_mbx_init(dev, cfg_mgmt); + if (err) { + sdk_err(dev->dev_hdl, "Configure mailbox init failed, err: %d\n", + err); + return err; + } + + cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false; + cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false; + + err = get_dev_cap(dev); + if (err) { + cfg_mbx_cleanup(dev); + return err; + } + + init_service_param(dev); + + sdk_info(dev->dev_hdl, "Init capability success\n"); + return 0; +} + +void free_capability(struct hinic_hwdev *dev) +{ + cfg_mbx_cleanup(dev); + sdk_info(dev->dev_hdl, "Free capability success"); +} + +/* 0 - MSIx, 1 - MSI, 2 - INTx */ +enum intr_type hinic_intr_type(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return INTR_TYPE_NONE; + + return dev->cfg_mgmt->svc_cap.interrupt_type; +} +EXPORT_SYMBOL(hinic_intr_type); + +bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_NIC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_nic); + +bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_ROCE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_roce); + +bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FCOE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fcoe_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_fcoe); + +/* Only PPF support it, PF is not */ +bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_TOE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_toe); + +bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_IWARP_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_iwarp); + +bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_fc); + +bool hinic_support_fic(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FIC_TYPE(dev)) + return false; + + return true; +} +EXPORT_SYMBOL(hinic_support_fic); + +bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_OVS_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_ovs); + +bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_ACL_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.acl_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_acl); + +bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_RDMA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_rdma); + +bool hinic_support_ft(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FT_TYPE(dev)) + return false; + + return true; +} +EXPORT_SYMBOL(hinic_support_ft); + +bool hinic_support_dynamic_q(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.nic_cap.dynamic_qp ? true : false; +} + +bool hinic_func_for_mgmt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (dev->cfg_mgmt->svc_cap.chip_svc_type >= CFG_SVC_NIC_BIT0) + return false; + else + return true; +} + +bool hinic_func_for_hwpt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_HWPT_TYPE(dev)) + return true; + else + return false; +} + +bool hinic_func_for_pt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (dev->cfg_mgmt->svc_cap.force_up) + return true; + else + return false; +} + +int cfg_set_func_sf_en(void *hwdev, u32 enbits, u32 enmask) +{ + struct hinic_hwdev *dev = hwdev; + struct nic_misc_func_sf_enbits *func_sf_enbits; + u16 out_size = sizeof(*func_sf_enbits); + u16 glb_func_idx; + u16 api_info_len; + int err; + + api_info_len = sizeof(struct nic_misc_func_sf_enbits); + func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL); + if (!func_sf_enbits) { + sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n"); + return -ENOMEM; + } + + err = hinic_global_func_id_get(dev, &glb_func_idx); + if (err) { + kfree(func_sf_enbits); + return err; + } + + func_sf_enbits->stateful_enbits = enbits; + func_sf_enbits->stateful_enmask = enmask; + func_sf_enbits->function_id = glb_func_idx; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_MISC_SET_FUNC_SF_ENBITS, + (void *)func_sf_enbits, api_info_len, + (void *)func_sf_enbits, &out_size, + VSW_UP_CFG_TIMEOUT); + if (err || !out_size || func_sf_enbits->status) { + sdk_err(dev->dev_hdl, + "Failed to set stateful enable, err: %d, status: 0x%x, out_size: 0x%x\n", + err, func_sf_enbits->status, out_size); + kfree(func_sf_enbits); + return -EFAULT; + } + + kfree(func_sf_enbits); + return 0; +} + +int cfg_get_func_sf_en(void *hwdev, u32 *enbits) +{ + struct nic_misc_func_sf_enbits *func_sf_enbits; + struct hinic_hwdev *dev = hwdev; + u16 out_size = sizeof(*func_sf_enbits); + u16 glb_func_idx; + u16 api_info_len; + int err; + + api_info_len = sizeof(struct nic_misc_func_sf_enbits); + func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL); + if (!func_sf_enbits) { + sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n"); + return -ENOMEM; + } + + err = hinic_global_func_id_get(dev, &glb_func_idx); + if (err) { + kfree(func_sf_enbits); + return err; + } + + func_sf_enbits->function_id = glb_func_idx; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_MISC_GET_FUNC_SF_ENBITS, + (void *)func_sf_enbits, api_info_len, + (void *)func_sf_enbits, &out_size, + VSW_UP_CFG_TIMEOUT); + if (err || !out_size || func_sf_enbits->status) { + sdk_err(dev->dev_hdl, "Failed to get stateful enable, err: %d, status: 0x%x, out_size: 0x%x\n", + err, func_sf_enbits->status, out_size); + kfree(func_sf_enbits); + return -EFAULT; + } + + *enbits = func_sf_enbits->stateful_enbits; + + kfree(func_sf_enbits); + return 0; +} + +int hinic_set_toe_enable(void *hwdev, bool enable) +{ + u32 enbits; + u32 enmask; + + if (!hwdev) + return -EINVAL; + + enbits = VSW_SET_STATEFUL_BITS_TOE((u16)enable); + enmask = VSW_SET_STATEFUL_BITS_TOE(0x1U); + + return cfg_set_func_sf_en(hwdev, enbits, enmask); +} +EXPORT_SYMBOL(hinic_set_toe_enable); + +bool hinic_get_toe_enable(void *hwdev) +{ + int err; + u32 enbits; + + if (!hwdev) + return false; + + err = cfg_get_func_sf_en(hwdev, &enbits); + if (err) + return false; + + return VSW_GET_STATEFUL_BITS_TOE(enbits); +} +EXPORT_SYMBOL(hinic_get_toe_enable); + +int hinic_set_fcoe_enable(void *hwdev, bool enable) +{ + u32 enbits; + u32 enmask; + + if (!hwdev) + return -EINVAL; + + enbits = VSW_SET_STATEFUL_BITS_FCOE((u16)enable); + enmask = VSW_SET_STATEFUL_BITS_FCOE(0x1U); + + return cfg_set_func_sf_en(hwdev, enbits, enmask); +} +EXPORT_SYMBOL(hinic_set_fcoe_enable); + +bool hinic_get_fcoe_enable(void *hwdev) +{ + int err; + u32 enbits; + + if (!hwdev) + return false; + + err = cfg_get_func_sf_en(hwdev, &enbits); + if (err) + return false; + + return VSW_GET_STATEFUL_BITS_FCOE(enbits); +} +EXPORT_SYMBOL(hinic_get_fcoe_enable); + +bool hinic_get_stateful_enable(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.sf_en; +} +EXPORT_SYMBOL(hinic_get_stateful_enable); + +u8 hinic_host_oq_id_mask(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; +} +EXPORT_SYMBOL(hinic_host_oq_id_mask); + +u8 hinic_host_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_id; +} +EXPORT_SYMBOL(hinic_host_id); + +u16 hinic_host_total_func(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host total function number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_total_function; +} +EXPORT_SYMBOL(hinic_host_total_func); + +u16 hinic_func_max_qnum(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_sqs; +} +EXPORT_SYMBOL(hinic_func_max_qnum); + +u16 hinic_func_max_nic_qnum(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} +EXPORT_SYMBOL(hinic_func_max_nic_qnum); + +u8 hinic_ep_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting ep id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.ep_id; +} +EXPORT_SYMBOL(hinic_ep_id); + +u8 hinic_er_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting er id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.er_id; +} +EXPORT_SYMBOL(hinic_er_id); + +u8 hinic_physical_port_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting physical port id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.port_id; +} +EXPORT_SYMBOL(hinic_physical_port_id); + +u8 hinic_func_max_vf(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting max vf number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_vf; +} +EXPORT_SYMBOL(hinic_func_max_vf); + +u8 hinic_max_num_cos(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting max cos number\n"); + return 0; + } + return (u8)(dev->cfg_mgmt->svc_cap.max_cos_id + 1); +} +EXPORT_SYMBOL(hinic_max_num_cos); + +u8 hinic_cos_valid_bitmap(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n"); + return 0; + } + return (u8)(dev->cfg_mgmt->svc_cap.cos_valid_bitmap); +} +EXPORT_SYMBOL(hinic_cos_valid_bitmap); + +u8 hinic_net_port_mode(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting net port mode\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.net_port_mode; +} +EXPORT_SYMBOL(hinic_net_port_mode); + + +bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev || state >= HINIC_HWDEV_MAX_INVAL_INITED) + return false; + + return !!test_bit(state, &dev->func_state); +} + +static int hinic_os_dep_init(struct hinic_hwdev *hwdev) +{ + + hwdev->workq = create_singlethread_workqueue(HINIC_HW_WQ_NAME); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n"); + return -EFAULT; + } + + sema_init(&hwdev->recover_sem, 1); + sema_init(&hwdev->fault_list_sem, 1); + + INIT_WORK(&hwdev->fault_work, hinic_fault_work_handler); + + return 0; +} + +static void hinic_os_dep_deinit(struct hinic_hwdev *hwdev) +{ + + destroy_work(&hwdev->fault_work); + + destroy_workqueue(hwdev->workq); + + down(&hwdev->fault_list_sem); + + + up(&hwdev->fault_list_sem); + + sema_deinit(&hwdev->fault_list_sem); + sema_deinit(&hwdev->recover_sem); +} + +void hinic_ppf_hwdev_unreg(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + down(&dev->ppf_sem); + dev->ppf_hwdev = NULL; + up(&dev->ppf_sem); + + sdk_info(dev->dev_hdl, "Unregister PPF hwdev\n"); +} + +void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + down(&dev->ppf_sem); + dev->ppf_hwdev = ppf_hwdev; + up(&dev->ppf_sem); + + sdk_info(dev->dev_hdl, "Register PPF hwdev\n"); +} + +static int __vf_func_init(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_vf_mbox_random_id_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init vf mbox random id\n"); + return err; + } + err = hinic_vf_func_init(hwdev); + if (err) + nic_err(hwdev->dev_hdl, "Failed to init nic mbox\n"); + + return err; +} + +static int __hilink_phy_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (!HINIC_IS_VF(hwdev)) { + err = hinic_phy_init_status_judge(hwdev); + if (err) { + sdk_info(hwdev->dev_hdl, "Phy init failed\n"); + return err; + } + + if (hinic_support_nic(hwdev, NULL)) + hinic_hilink_info_show(hwdev); + } + + return 0; +} + +/* Return: + * 0: all success + * >0: partitial success + * <0: all failed + */ +int hinic_init_hwdev(struct hinic_init_para *para) +{ + struct hinic_hwdev *hwdev; + int err; + + if (!(*para->hwdev)) { + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + *para->hwdev = hwdev; + hwdev->adapter_hdl = para->adapter_hdl; + hwdev->pcidev_hdl = para->pcidev_hdl; + hwdev->dev_hdl = para->dev_hdl; + hwdev->chip_node = para->chip_node; + hwdev->ppf_hwdev = para->ppf_hwdev; + sema_init(&hwdev->ppf_sem, 1); + sema_init(&hwdev->func_sem, 1); + hwdev->func_ref = 0; + + hwdev->chip_fault_stats = vzalloc(HINIC_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) + goto alloc_chip_fault_stats_err; + + err = hinic_init_hwif(hwdev, para->cfg_reg_base, + para->intr_reg_base, + para->db_base_phy, para->db_base, + para->dwqe_mapping); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); + goto init_hwif_err; + } + } else { + hwdev = *para->hwdev; + } + + /* detect slave host according to BAR reg */ + detect_host_mode_pre(hwdev); + + if (IS_BMGW_SLAVE_HOST(hwdev) && + (!hinic_get_master_host_mbox_enable(hwdev))) { + set_bit(HINIC_HWDEV_NONE_INITED, &hwdev->func_state); + sdk_info(hwdev->dev_hdl, "Master host not ready, init hwdev later\n"); + return (1 << HINIC_HWDEV_ALL_INITED); + } + + err = hinic_os_dep_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n"); + goto os_dep_init_err; + } + + hinic_set_chip_present(hwdev); + hinic_init_heartbeat(hwdev); + + err = init_cfg_mgmt(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); + goto init_cfg_mgmt_err; + } + + err = hinic_init_comm_ch(hwdev); + if (err) { + if (!(hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK)) { + sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); + goto init_comm_ch_err; + } else { + sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n"); + return hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK; + } + } + + err = init_capability(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); + goto init_cap_err; + } + + if (hwdev->board_info.service_mode == HINIC_WORK_MODE_OVS_SP) + hwdev->feature_cap &= ~HINIC_FUNC_SUPP_RATE_LIMIT; + + if (hwdev->cfg_mgmt->svc_cap.force_up) + hwdev->feature_cap |= HINIC_FUNC_FORCE_LINK_UP; + + err = __vf_func_init(hwdev); + if (err) + goto vf_func_init_err; + + + err = __hilink_phy_init(hwdev); + if (err) + goto hilink_phy_init_err; + + set_bit(HINIC_HWDEV_ALL_INITED, &hwdev->func_state); + + sdk_info(hwdev->dev_hdl, "Init hwdev success\n"); + + return 0; + +hilink_phy_init_err: + + hinic_vf_func_free(hwdev); +vf_func_init_err: + free_capability(hwdev); +init_cap_err: + return (hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK); + +init_comm_ch_err: + free_cfg_mgmt(hwdev); + +init_cfg_mgmt_err: + hinic_destroy_heartbeat(hwdev); + hinic_os_dep_deinit(hwdev); + +os_dep_init_err: + hinic_free_hwif(hwdev); + +init_hwif_err: + vfree(hwdev->chip_fault_stats); + +alloc_chip_fault_stats_err: + sema_deinit(&hwdev->func_sem); + sema_deinit(&hwdev->ppf_sem); + kfree(hwdev); + *para->hwdev = NULL; + + return -EFAULT; +} + +/** + * hinic_set_vf_dev_cap - Set max queue num for VF + * @hwdev: the HW device for VF + */ +int hinic_set_vf_dev_cap(void *hwdev) +{ + int err; + struct hinic_hwdev *dev; + enum func_type type; + + if (!hwdev) + return EFAULT; + + dev = (struct hinic_hwdev *)hwdev; + type = HINIC_FUNC_TYPE(dev); + if (type != TYPE_VF) + return EPERM; + + err = get_dev_cap(dev); + if (err) + return err; + + nic_param_fix(dev); + + return 0; +} + +void hinic_free_hwdev(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + enum hinic_hwdev_init_state state = HINIC_HWDEV_ALL_INITED; + int flag = 0; + + if (!hwdev) + return; + + if (test_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state)) { + clear_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state); + + /* BM slave function not need to exec rx_tx_flush */ + if (dev->func_mode != FUNC_MOD_MULTI_BM_SLAVE) + hinic_func_rx_tx_flush(hwdev); + + + hinic_vf_func_free(hwdev); + + free_capability(dev); + } + while (state > HINIC_HWDEV_NONE_INITED) { + if (test_bit(state, &dev->func_state)) { + flag = 1; + break; + } + state--; + } + if (flag) { + hinic_uninit_comm_ch(dev); + free_cfg_mgmt(dev); + hinic_destroy_heartbeat(dev); + hinic_os_dep_deinit(dev); + } + clear_bit(HINIC_HWDEV_NONE_INITED, &dev->func_state); + hinic_free_hwif(dev); + vfree(dev->chip_fault_stats); + sema_deinit(&dev->func_sem); + sema_deinit(&dev->ppf_sem); + kfree(dev); +} + +void hinic_set_api_stop(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + dev->chip_present_flag = HINIC_CHIP_ABSENT; + sdk_info(dev->dev_hdl, "Set card absent\n"); + hinic_force_complete_all(dev); + sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n"); +} + +void hinic_shutdown_hwdev(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (IS_SLAVE_HOST(dev)) + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); +} + +u32 hinic_func_pf_num(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf number capability\n"); + return 0; + } + + return dev->cfg_mgmt->svc_cap.pf_num; +} + +u64 hinic_get_func_feature_cap(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function feature capability\n"); + return 0; + } + + return dev->feature_cap; +} + +enum hinic_func_mode hinic_get_func_mode(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function mode\n"); + return 0; + } + + return dev->func_mode; +} +EXPORT_SYMBOL(hinic_get_func_mode); + +enum hinic_service_mode hinic_get_service_mode(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting service mode\n"); + return HINIC_WORK_MODE_INVALID; + } + + return dev->board_info.service_mode; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.h b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h new file mode 100644 index 000000000000..728e676e11c1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h @@ -0,0 +1,525 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __CFG_MGT_H__ +#define __CFG_MGT_H__ + +#include "hinic_ctx_def.h" + +enum { + CFG_FREE = 0, + CFG_BUSY = 1 +}; + +/* start position for CEQs allocation, Max number of CEQs is 32 */ +enum { + CFG_RDMA_CEQ_BASE = 0 +}; + +enum { + CFG_NET_MODE_ETH = 0, /* Eth */ + CFG_NET_MODE_FIC = 1, /* FIC */ + CFG_NET_MODE_FC = 2 /* FC */ +}; + +enum { + SF_SVC_FT_BIT = (1 << 0), + SF_SVC_RDMA_BIT = (1 << 1), +}; + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +/* number of PFs and VFs */ +#define HOST_PF_NUM 4 +#define HOST_VF_NUM 0 +#define HOST_OQID_MASK_VAL 2 + +/* L2NIC */ +#define L2NIC_SQ_DEPTH (4 * K_UNIT) +#define L2NIC_RQ_DEPTH (4 * K_UNIT) + +#define HINIC_CFG_MAX_QP 64 + +/* RDMA */ +#define RDMA_RSVD_QPS 2 +#define ROCE_MAX_WQES (16 * K_UNIT - 1) +#define IWARP_MAX_WQES (8 * K_UNIT) + +#define RDMA_MAX_SQ_SGE 8 + +#define ROCE_MAX_RQ_SGE 8 +#define IWARP_MAX_RQ_SGE 2 + +#define RDMA_MAX_SQ_DESC_SZ (1 * K_UNIT) + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 64B(max_task_seg_len)) */ +#define ROCE_MAX_SQ_INLINE_DATA_SZ 192 + +#define IWARP_MAX_SQ_INLINE_DATA_SZ 108 + +#define ROCE_MAX_RQ_DESC_SZ 128 +#define IWARP_MAX_RQ_DESC_SZ 64 + +#define IWARP_MAX_IRQ_DEPTH 1024 +#define IWARP_IRQ_ENTRY_SZ 64 + +#define IWARP_MAX_ORQ_DEPTH 1024 +#define IWARP_ORQ_ENTRY_SZ 32 + +#define IWARP_MAX_RTOQ_DEPTH 1024 +#define IWARP_RTOQ_ENTRY_SZ 32 + +#define IWARP_MAX_ACKQ_DEPTH 1024 +#define IWARP_ACKQ_ENTRY_SZ 16 + +#define ROCE_QPC_ENTRY_SZ 512 +#define IWARP_QPC_ENTRY_SZ 1024 + +#define WQEBB_SZ 64 + +#define ROCE_RDMARC_ENTRY_SZ 32 +#define ROCE_MAX_QP_INIT_RDMA 128 +#define ROCE_MAX_QP_DEST_RDMA 128 + +#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1) +#define ROCE_RSVD_SRQS 0 +#define ROCE_MAX_SRQ_SGE 7 +#define ROCE_SRQC_ENTERY_SZ 64 + +#define RDMA_MAX_CQES (64 * K_UNIT - 1) +#define RDMA_RSVD_CQS 0 + +#define RDMA_CQC_ENTRY_SZ 128 + +#define RDMA_CQE_SZ 32 +#define RDMA_RSVD_MRWS 128 +#define RDMA_MPT_ENTRY_SZ 64 +#define RDMA_NUM_MTTS (1 * G_UNIT) +#define LOG_MTT_SEG 5 +#define MTT_ENTRY_SZ 8 +#define LOG_RDMARC_SEG 3 + +#define LOCAL_ACK_DELAY 15 +#define RDMA_NUM_PORTS 1 +#define ROCE_MAX_MSG_SZ (2 * G_UNIT) +#define IWARP_MAX_MSG_SZ (1 * G_UNIT) + +#define DB_PAGE_SZ (4 * K_UNIT) +#define DWQE_SZ 256 + +#define NUM_PD (128 * K_UNIT) +#define RSVD_PD 0 + +#define MAX_XRCDS (64 * K_UNIT) +#define RSVD_XRCDS 0 + +#define MAX_GID_PER_PORT 16 +#define GID_ENTRY_SZ 32 +#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8) +#define NUM_COMP_VECTORS 32 +#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 13) | (1UL << 14) | \ + (1UL << 16) | (1UL << 18) | (1UL << 20) | \ + (1UL << 22)) +#define ROCE_MODE 1 + +#define MAX_FRPL_LEN 511 +#define MAX_PKEYS 1 + +/* FCoE */ +#define FCOE_PCTX_SZ 256 +#define FCOE_CCTX_SZ 256 +#define FCOE_SQE_SZ 128 +#define FCOE_SCQC_SZ 64 +#define FCOE_SCQE_SZ 64 +#define FCOE_SRQC_SZ 64 +#define FCOE_SRQE_SZ 32 + +/* ToE */ +#define TOE_PCTX_SZ 1024 +#define TOE_CQC_SZ 64 + +/* IoE */ +#define IOE_PCTX_SZ 512 + +/* FC */ +#define FC_PCTX_SZ 256 +#define FC_CCTX_SZ 256 +#define FC_SQE_SZ 128 +#define FC_SCQC_SZ 64 +#define FC_SCQE_SZ 64 +#define FC_SRQC_SZ 64 +#define FC_SRQE_SZ 32 + +/* OVS */ +#define OVS_PCTX_SZ 256 +#define OVS_SCQC_SZ 64 + +/* ACL */ +#define ACL_PCTX_SZ 512 +#define ACL_SCQC_SZ 64 + +struct dev_sf_svc_attr { + bool ft_en; /* business enable flag (not include RDMA) */ + bool ft_pf_en; /* In FPGA Test VF resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ + bool rdma_en; + bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ + u8 sf_en_vf; /* SF_EN for PPF/PF's VF */ +}; + +struct host_shared_resource_cap { + u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */ + u32 host_cctxs; /* Child Context: max 8K */ + u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ + * TOE/IOE/FCoE each uses 1 SCQ + * RoCE/IWARP uses multiple SCQs + * So 6 SCQ least + */ + u32 host_srqs; /* SRQ number: 256K */ + u32 host_mpts; /* MR number:1M */ +}; + +/* device capability */ +struct service_cap { + struct dev_sf_svc_attr sf_svc_attr; + enum cfg_svc_type_en svc_type; /* user input service type */ + enum cfg_svc_type_en chip_svc_type; /* HW supported service type */ + + /* Host global resources */ + u16 host_total_function; + u8 host_oq_id_mask_val; + u8 host_id; + u8 ep_id; + /* DO NOT get interrupt_type from firmware */ + enum intr_type interrupt_type; + u8 intr_chip_en; + u8 max_cos_id; /* PF/VF's max cos id */ + u8 cos_valid_bitmap; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + u8 max_vf; /* max VF number that PF supported */ + u8 force_up; + bool sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable*/ + u16 max_sqs; + u16 max_rqs; + + /* For test */ + u32 test_qpc_num; + u32 test_qpc_resvd_num; + u32 test_page_size_reorder; + bool test_xid_alloc_mode; + bool test_gpa_check_enable; + u8 test_qpc_alloc_mode; + u8 test_scqc_alloc_mode; + + u32 test_max_conn_num; + u32 test_max_cache_conn_num; + u32 test_scqc_num; + u32 test_mpt_num; + u32 test_scq_resvd_num; + u32 test_mpt_recvd_num; + u32 test_hash_num; + u32 test_reorder_num; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + u8 net_port_mode; /* 0:ETH,1:FIC,2:4FC */ + + u32 pf_num; + u32 pf_id_start; + u32 vf_num; /* max numbers of vf in current host */ + u32 vf_id_start; + + struct host_shared_resource_cap shared_res_cap; /* shared capability */ + struct dev_version_info dev_ver_info; /* version */ + struct nic_service_cap nic_cap; /* NIC capability */ + struct rdma_service_cap rdma_cap; /* RDMA capability */ + struct fcoe_service_cap fcoe_cap; /* FCoE capability */ + struct toe_service_cap toe_cap; /* ToE capability */ + struct fc_service_cap fc_cap; /* FC capability */ + struct ovs_service_cap ovs_cap; /* OVS capability */ + struct acl_service_cap acl_cap; /* ACL capability */ +}; + +struct cfg_eq { + enum hinic_service_type type; + int eqn; + int free; /* 1 - alocated, 0- freed */ +}; + +struct cfg_eq_info { + struct cfg_eq *eq; + + u8 num_ceq; + u8 num_ceq_remain; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +struct irq_alloc_info_st { + enum hinic_service_type type; + int free; /* 1 - alocated, 0- freed */ + struct irq_info info; +}; + +struct cfg_irq_info { + struct irq_alloc_info_st *alloc_info; + u16 num_total; + u16 num_irq_remain; + u16 num_irq_hw; /* device max irq number */ + + /* mutex used for allocate EQs */ + struct mutex irq_mutex; +}; + +#define VECTOR_THRESHOLD 2 + +struct cfg_mgmt_info { + struct hinic_hwdev *hwdev; + struct service_cap svc_cap; + struct cfg_eq_info eq_info; /* EQ */ + struct cfg_irq_info irq_param_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +enum cfg_sub_cmd { + /* PPF(PF) <-> FW */ + HINIC_CFG_NIC_CAP = 0, + CFG_FW_VERSION, + CFG_UCODE_VERSION, + HINIC_CFG_FUNC_CAP, + HINIC_CFG_MBOX_CAP = 6, +}; + +struct hinic_dev_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + /* Public resource */ + u8 sf_svc_attr; + u8 host_id; + u8 sf_en_pf; + u8 sf_en_vf; + + u8 ep_id; + u8 intr_type; + u8 max_cos_id; + u8 er_id; + u8 port_id; + u8 max_vf; + u16 svc_cap_en; + u16 host_total_func; + u8 host_oq_id_mask_val; + u8 max_vf_cos_id; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + u8 cfg_file_ver; + u8 net_port_mode; + u8 valid_cos_bitmap; /* every bit indicate cos is valid */ + u8 force_up; + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + /* shared resource */ + u32 host_pctx_num; + u8 host_sf_en; + u8 rsvd2[3]; + u32 host_ccxt_num; + u32 host_scq_num; + u32 host_srq_num; + u32 host_mpt_num; + + /* l2nic */ + u16 nic_max_sq; + u16 nic_max_rq; + u16 nic_vf_max_sq; + u16 nic_vf_max_rq; + u8 nic_lro_en; + u8 nic_lro_sz; + u8 nic_tso_sz; + u8 max_queue_allowed; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + + u32 roce_vf_max_qp; + u32 roce_vf_max_cq; + u32 roce_vf_max_srq; + u32 roce_vf_max_mpt; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + + /* IWARP */ + u32 iwarp_max_qp; + u32 iwarp_max_cq; + u32 iwarp_max_mpt; + + u32 iwarp_vf_max_qp; + u32 iwarp_vf_max_cq; + u32 iwarp_vf_max_mpt; + + u32 iwarp_cmtt_cl_start; + u32 iwarp_cmtt_cl_end; + u32 iwarp_cmtt_cl_size; + + u32 iwarp_dmtt_cl_start; + u32 iwarp_dmtt_cl_end; + u32 iwarp_dmtt_cl_size; + + u32 iwarp_wqe_cl_start; + u32 iwarp_wqe_cl_end; + u32 iwarp_wqe_cl_size; + + /* FCoE */ + u32 fcoe_max_qp; + u32 fcoe_max_cq; + u32 fcoe_max_srq; + + u32 fcoe_max_cctx; + u32 fcoe_cctx_id_start; + + u8 fcoe_vp_id_start; + u8 fcoe_vp_id_end; + u8 rsvd4[2]; + + /* OVS */ + u32 ovs_max_qpc; + u8 ovs_dq_en; + u8 rsvd5[3]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u32 toe_max_srq; + u32 toe_srq_id_start; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u16 func_id; +}; + +#define VSW_UP_CFG_TIMEOUT (0xFF00000) + +#define VSW_SET_STATEFUL_BITS_TOE(flag) \ + ((flag) << VSW_STATEFUL_TOE_EN) +#define VSW_SET_STATEFUL_BITS_FCOE(flag) \ + ((flag) << VSW_STATEFUL_FCOE_EN) +#define VSW_SET_STATEFUL_BITS_IWARP(flag) \ + ((flag) << VSW_STATEFUL_IWARP_EN) +#define VSW_SET_STATEFUL_BITS_ROCE(flag) \ + ((flag) << VSW_STATEFUL_ROCE_EN) + +#define VSW_GET_STATEFUL_BITS_TOE(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_TOE_EN) & 0x1U)) +#define VSW_GET_STATEFUL_BITS_FCOE(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_FCOE_EN) & 0x1U)) +#define VSW_GET_STATEFUL_BITS_IWARP(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_IWARP_EN) & 0x1U)) +#define VSW_GET_STATEFUL_BITS_ROCE(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_ROCE_EN) & 0x1U)) + +enum tag_vsw_major_cmd { + VSW_MAJOR_MISC = 10, /* 0~9 reserved for driver */ + VSW_MAJOR_L2SWITCH, + VSW_MAJOR_L2MULTICAST, + VSW_MAJOR_QOS, + VSW_MAJOR_PKTSUPS, + VSW_MAJOR_VLANFILTER, + VSW_MAJOR_MACFILTER, + VSW_MAJOR_IPFILTER, + VSW_MAJOR_VLANMAPPING, + VSW_MAJOR_ETHTRUNK, + VSW_MAJOR_MIRROR, + VSW_MAJOR_DFX, + VSW_MAJOR_ACL, +}; + +enum tag_vsw_minor_misc_cmd { + VSW_MINOR_MISC_INIT_FUNC = 0, + VSW_MINOR_MISC_SET_FUNC_SF_ENBITS, + VSW_MINOR_MISC_GET_FUNC_SF_ENBITS, + VSW_MINOR_MISC_CMD_MAX, +}; + +/* vswitch eth-trunk sub-command */ +enum tag_nic_stateful_enbits { + VSW_STATEFUL_TOE_EN = 0, + VSW_STATEFUL_FCOE_EN = 1, + VSW_STATEFUL_IWARP_EN = 2, + VSW_STATEFUL_ROCE_EN = 3, +}; + +/* function stateful enable parameters */ +struct nic_misc_func_sf_enbits { + u8 status; + u8 version; + u8 rsvd0[6]; + u32 function_id; + u32 stateful_enbits; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */ + u32 stateful_enmask; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */ +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c new file mode 100644 index 000000000000..0d8145f3113c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c @@ -0,0 +1,1562 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_hwif.h" +#include "hinic_nic_io.h" +#include "hinic_eqs.h" +#include "hinic_wq.h" +#include "hinic_cmdq.h" + +#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27 + +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU + +#define CMDQ_DB_INFO_SET(val, member) \ + ((val & CMDQ_DB_INFO_##member##_MASK) \ + << CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + (((val) & CMDQ_CTRL_##member##_MASK) \ + << CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) \ + & CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + (((val) & CMDQ_WQE_HEADER_##member##_MASK) \ + << CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) \ + & CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 56 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0x1F +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 + +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define SAVED_DATA_ARM_SHIFT 31 + +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) \ + << SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK \ + << SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 20 + +#define WQE_ERRCODE_VAL_MASK 0xF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ + WQE_ERRCODE_##member##_MASK) + +#define CEQE_CMDQ_TYPE_SHIFT 0 + +#define CEQE_CMDQ_TYPE_MASK 0x7 + +#define CEQE_CMDQ_GET(val, member) \ + (((val) >> CEQE_CMDQ_##member##_SHIFT) & CEQE_CMDQ_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)db_base + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_PFN_SHIFT 12 +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define COMPLETE_LEN 3 + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 + +#define CMDQ_WQ_PAGE_SIZE 4096 + +#define WQE_NUM_WQEBBS(wqe_size, wq) \ + ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size)) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hinic_cmdqs, cmdq[0]) + +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_COMPLETE_CMPT_CODE 11 + +#define HINIC_GET_CMDQ_FREE_WQEBBS(cmdq_wq) \ + atomic_read(&cmdq_wq->delta) + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, +}; + +enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +enum cmdq_cmd_type { + SYNC_CMD_DIRECT_RESP, + SYNC_CMD_SGE_RESP, + ASYNC_CMD, +}; + +bool hinic_cmdq_idle(struct hinic_cmdq *cmdq) +{ + struct hinic_wq *wq = cmdq->wq; + + return (atomic_read(&wq->delta) == wq->q_depth ? true : false); +} + +struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev) +{ + struct hinic_cmdqs *cmdqs; + struct hinic_cmd_buf *cmd_buf; + void *dev; + + if (!hwdev) { + pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); + return NULL; + } + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) + return NULL; + + cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto alloc_pci_buf_err; + } + + return cmd_buf; + +alloc_pci_buf_err: + kfree(cmd_buf); + return NULL; +} +EXPORT_SYMBOL(hinic_alloc_cmd_buf); + +void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf) +{ + struct hinic_cmdqs *cmdqs; + + if (!hwdev || !cmd_buf) { + pr_err("Failed to free cmd buf\n"); + return; + } + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} +EXPORT_SYMBOL(hinic_free_cmd_buf); + +static int cmdq_wqe_size(enum cmdq_wqe_type wqe_type) +{ + int wqe_size = 0; + + switch (wqe_type) { + case WQE_LCMD_TYPE: + wqe_size = WQE_LCMD_SIZE; + break; + case WQE_SCMD_TYPE: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static int cmdq_get_wqe_size(enum bufdesc_len len) +{ + int wqe_size = 0; + + switch (len) { + case BUFDESC_LCMD_LEN: + wqe_size = WQE_LCMD_SIZE; + break; + case BUFDESC_SCMD_LEN: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static void cmdq_set_completion(struct hinic_cmdq_completion *complete, + struct hinic_cmd_buf *buf_out) +{ + struct hinic_sge_resp *sge_resp = &complete->sge_resp; + + hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, + HINIC_CMDQ_BUF_SIZE); +} + +static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe, + struct hinic_cmd_buf *buf_in) +{ + hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_set_inline_wqe_data(struct hinic_cmdq_inline_wqe *wqe, + const void *buf_in, u32 in_size) +{ + struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; + + wqe_scmd->buf_desc.buf_len = in_size; + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); +} + +static void cmdq_fill_db(struct hinic_cmdq_db *db, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | + CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct hinic_cmdq *cmdq, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + struct hinic_cmdq_db db; + + cmdq_fill_db(&db, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = cpu_to_be32(db.db_info); + + wmb(); /* write all before the doorbell */ + writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static void cmdq_wqe_fill(void *dst, const void *src) +{ + memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct hinic_ctrl *ctrl; + enum ctrl_sect_len ctrl_len; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + u32 saved_data = WQE_HEADER(wqe)->saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD) | + CMDQ_CTRL_SET(ack_type, ACK_TYPE); + + WQE_HEADER(wqe)->header_info = + CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, buf_out); + } + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + break; + case ASYNC_CMD: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + + wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, + prod_idx, complete_format, DATA_SGE, + BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_set_inline_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + void *buf_in, u16 in_size, + struct hinic_cmd_buf *buf_out, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_scmd->completion, buf_out); + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_scmd->completion.direct_resp = 0; + break; + default: + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); + + cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size); +} + +static void cmdq_update_cmd_status(struct hinic_cmdq *cmdq, u16 prod_idx, + struct hinic_cmdq_wqe *wqe) +{ + struct hinic_cmdq_cmd_info *cmd_info; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + u32 status_info; + + wqe_lcmd = &wqe->wqe_lcmd; + cmd_info = &cmdq->cmd_infos[prod_idx]; + + if (cmd_info->errcode) { + status_info = be32_to_cpu(wqe_lcmd->status.status_info); + *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); + } + + if (cmd_info->direct_resp && + cmd_info->cmd_type == HINIC_CMD_TYPE_DIRECT_RESP) + *cmd_info->direct_resp = + cpu_to_be64(wqe_lcmd->completion.direct_resp); +} + +static int hinic_cmdq_sync_timeout_check(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 pi, + enum hinic_mod_type mod, u8 cmd) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + if (!WQE_COMPLETED(ctrl_info)) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EFAULT; + } + + cmdq_update_cmd_status(cmdq, pi, wqe); + + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed, mod: %u, cmd: 0x%x\n", + mod, cmd); + return 0; +} + +static void __clear_cmd_info(struct hinic_cmdq_cmd_info *cmd_info, + const int *errcode, struct completion *done, + u64 *out_param) +{ + if (cmd_info->errcode == errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == out_param) + cmd_info->direct_resp = NULL; +} + + +static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + u64 *out_param, u32 timeout) +{ + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_wqe *curr_wqe, wqe; + struct hinic_cmdq_cmd_info *cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HINIC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HINIC_CMD_TYPE_DIRECT_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo)) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == &cmpt_code) + cmd_info->cmpt_code = NULL; + + if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + goto timeout_check_ok; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx, + mod, cmd); + if (err) + cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = + HINIC_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, + "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n", + mod, cmd); + } + + __clear_cmd_info(cmd_info, &errcode, &done, out_param); + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n", + curr_prod_idx); + hinic_dump_ceq_info(cmdq->hwdev); + destory_completion(&done); + return -ETIMEDOUT; + } + +timeout_check_ok: + destory_completion(&done); + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_sync_cmd_detail_resp(struct hinic_cmdq *cmdq, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, + u32 timeout) +{ + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_wqe *curr_wqe, wqe; + struct hinic_cmdq_cmd_info *cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HINIC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->cmpt_code = &cmpt_code; + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + hinic_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HINIC_CMD_TYPE_SGE_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo)) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == &cmpt_code) + cmd_info->cmpt_code = NULL; + + if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq detail sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + goto timeout_check_ok; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx, + mod, cmd); + if (err) + cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = + HINIC_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, + "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n", + mod, cmd); + } + + if (cmd_info->errcode == &errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == &done) + cmd_info->done = NULL; + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n", + curr_prod_idx); + hinic_dump_ceq_info(cmdq->hwdev); + destory_completion(&done); + return -ETIMEDOUT; + } + +timeout_check_ok: + destory_completion(&done); + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_async_cmd(struct hinic_cmdq *cmdq, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in) +{ + struct hinic_wq *wq = cmdq->wq; + int wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + struct hinic_cmdq_wqe *curr_wqe, wqe; + int wrapped; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= cmdq->wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= cmdq->wq->q_depth; + } + + cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, + ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_ASYNC; + + cmdq_set_db(cmdq, HINIC_CMDQ_ASYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, u16 in_size) +{ + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_wqe *curr_wqe, wqe; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, wqe_size = cmdq_wqe_size(WQE_SCMD_TYPE); + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb setting arm\n"); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL, + wrapped, HINIC_ACK_TYPE_CMDQ, HINIC_MOD_COMM, + CMDQ_SET_ARM_CMD, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&wqe, wqe_size); + + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_SET_ARM; + + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_params_valid(void *hwdev, struct hinic_cmd_buf *buf_in) +{ + if (!buf_in || !hwdev) { + pr_err("Invalid CMDQ buffer addr\n"); + return -EINVAL; + } + + if (!buf_in->size || buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) { + pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); + return -EINVAL; + } + + return 0; +} + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 + +static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if (cmdqs->status & HINIC_CMDQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && + !cmdqs->disable_flag); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, u64 *out_param, + u32 timeout) +{ + struct hinic_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) { + pr_err("Invalid CMDQ parameters\n"); + return err; + } + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = hinic_func_own_get(hwdev); + if (err) + return err; + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, out_param, timeout); + hinic_func_own_free(hwdev); + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} +EXPORT_SYMBOL(hinic_cmdq_direct_resp); + +int hinic_cmdq_detail_resp(void *hwdev, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, + u32 timeout) +{ + struct hinic_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, buf_out, timeout); + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} +EXPORT_SYMBOL(hinic_cmdq_detail_resp); + +int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in) +{ + struct hinic_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + return cmdq_async_cmd(&cmdqs->cmdq[HINIC_CMDQ_ASYNC], ack_type, mod, + cmd, buf_in); +} +EXPORT_SYMBOL(hinic_cmdq_async); + +int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id) +{ + struct hinic_cmdqs *cmdqs; + struct hinic_cmdq *cmdq; + struct hinic_cmdq_arm_bit arm_bit; + enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC; + u16 in_size; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(cmdqs->status & HINIC_CMDQ_ENABLE)) + return -EBUSY; + + if (q_type == HINIC_SET_ARM_CMDQ) { + if (q_id >= HINIC_MAX_CMDQ_TYPES) + return -EFAULT; + + cmdq_type = q_id; + } + /* sq is using interrupt now, so we only need to set arm bit for cmdq, + * remove comment below if need to set sq arm bit + * else + * cmdq_type = HINIC_CMDQ_SYNC; + */ + + cmdq = &cmdqs->cmdq[cmdq_type]; + + arm_bit.q_type = q_type; + arm_bit.q_id = q_id; + in_size = sizeof(arm_bit); + + err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, + "Failed to set arm for q_type: %d, qid %d\n", + q_type, q_id); + return err; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_arm_bit); + +static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 ci) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_inline_wqe *inline_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + struct hinic_ctrl *ctrl; + u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info); + int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); + int wqe_size = cmdq_get_wqe_size(buf_len); + u16 num_wqebbs; + + if (wqe_size == WQE_LCMD_SIZE) { + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + } else { + inline_wqe = &wqe->inline_wqe; + wqe_scmd = &inline_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + } + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + cmdq->cmd_infos[ci].cmd_type = HINIC_CMD_TYPE_NONE; + + wmb(); /* verify wqe is clear */ + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq); + hinic_put_wqe(cmdq->wq, num_wqebbs); +} + +static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 cons_idx) +{ + u16 prod_idx = cons_idx; + + spin_lock(&cmdq->cmdq_lock); + + cmdq_update_cmd_status(cmdq, prod_idx, wqe); + + if (cmdq->cmd_infos[prod_idx].cmpt_code) { + *(cmdq->cmd_infos[prod_idx].cmpt_code) = + CMDQ_COMPLETE_CMPT_CODE; + cmdq->cmd_infos[prod_idx].cmpt_code = NULL; + } + + /* make sure cmpt_code operation before done operation */ + smp_rmb(); + + if (cmdq->cmd_infos[prod_idx].done) { + complete(cmdq->cmd_infos[prod_idx].done); + cmdq->cmd_infos[prod_idx].done = NULL; + } + + spin_unlock(&cmdq->cmdq_lock); + + clear_wqe_complete_bit(cmdq, wqe, cons_idx); +} + +static void cmdq_async_cmd_handler(struct hinic_hwdev *hwdev, + struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 ci) +{ + u64 buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + int addr_sz = sizeof(u64); + + hinic_be32_to_cpu((void *)&buf, addr_sz); + if (buf) + hinic_free_cmd_buf(hwdev, (struct hinic_cmd_buf *)buf); + + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 ci) +{ + struct hinic_cmdq_inline_wqe *inline_wqe = &wqe->inline_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd = &inline_wqe->wqe_scmd; + struct hinic_ctrl *ctrl = &wqe_scmd->ctrl; + u32 ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe, ci); + + return 0; +} + +#define HINIC_CMDQ_WQE_HEAD_LEN 32 +static void hinic_dump_cmdq_wqe_head(struct hinic_hwdev *hwdev, + struct hinic_cmdq_wqe *wqe) +{ + u32 i; + u32 *data = (u32 *)wqe; + + for (i = 0; i < (HINIC_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) { + sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + data[i], data[i + 1], data[i + 2], + data[i + 3]); + } +} + +void hinic_cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)handle)->cmdqs; + enum hinic_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); + struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hinic_hwdev *hwdev = cmdqs->hwdev; + struct hinic_cmdq_wqe *wqe; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_ctrl *ctrl; + struct hinic_cmdq_cmd_info *cmd_info; + u32 ctrl_info; + u16 ci; + int set_arm = 1; + + while ((wqe = hinic_read_wqe(cmdq->wq, 1, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + + if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) { + set_arm = 1; + break; + } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT || + cmd_info->cmd_type == HINIC_CMD_TYPE_FAKE_TIMEOUT) { + if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT) { + sdk_info(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", + cmdq_type, ci); + hinic_dump_cmdq_wqe_head(hwdev, wqe); + } + + set_arm = 1; + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_SET_ARM) { + /* arm_bit was set until here */ + set_arm = 0; + + if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) + break; + } else { + set_arm = 1; + + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + if (cmdq_type == HINIC_CMDQ_ASYNC) + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + else + cmdq_sync_cmd_handler(cmdq, wqe, ci); + } + } + + if (set_arm) + hinic_set_arm_bit(hwdev, HINIC_SET_ARM_CMDQ, cmdq_type); +} + +static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq, + struct hinic_cmdq_pages *cmdq_pages, + struct hinic_cmdq_ctxt *cmdq_ctxt) +{ + struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + struct hinic_hwdev *hwdev = cmdqs->hwdev; + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); + + pfn = CMDQ_PFN(wq_first_page_paddr); + + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + /* If only use one page, use 0-level CLA */ + if (cmdq->wq->num_q_pages != 1) { + cmdq_first_block_paddr = cmdq_pages->cmdq_page_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); + + cmdq_ctxt->func_idx = hinic_global_func_id_hw(hwdev); + cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + cmdq_ctxt->cmdq_id = cmdq->cmdq_type; +} + +bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdq_ctxt *cmdq_ctxt) +{ + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 curr_pg_pfn, wq_block_pfn; + + if (cmdq_ctxt->ppf_idx != hinic_ppf_idx(hwdev) || + cmdq_ctxt->cmdq_id > HINIC_MAX_CMDQ_TYPES) + return false; + + curr_pg_pfn = CMDQ_CTXT_PAGE_INFO_GET(ctxt_info->curr_wqe_page_pfn, + CURR_WQE_PAGE_PFN); + wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_GET(ctxt_info->wq_block_pfn, + WQ_BLOCK_PFN); + /* VF must use 0-level CLA */ + if (curr_pg_pfn != wq_block_pfn) + return false; + + return true; +} + +static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev, + struct hinic_wq *wq, enum hinic_cmdq_type q_type) +{ + void __iomem *db_base; + int err = 0; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(wq->q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + err = -ENOMEM; + goto cmd_infos_err; + } + + err = hinic_alloc_db_addr(hwdev, &db_base, NULL); + if (err) + goto alloc_db_err; + + cmdq->db_base = (u8 *)db_base; + return 0; + +alloc_db_err: + kfree(cmdq->cmd_infos); + +cmd_infos_err: + spin_lock_deinit(&cmdq->cmdq_lock); + + return err; +} + +static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq) +{ + hinic_free_db_addr(hwdev, cmdq->db_base, NULL); + kfree(cmdq->cmd_infos); + spin_lock_deinit(&cmdq->cmdq_lock); +} + +int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + struct hinic_cmdq_ctxt *cmdq_ctxt, cmdq_ctxt_out = {0}; + enum hinic_cmdq_type cmdq_type; + u16 in_size; + u16 out_size = sizeof(*cmdq_ctxt); + int err; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_ctxt->func_idx = hinic_global_func_id_hw(hwdev); + in_size = sizeof(*cmdq_ctxt); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_CMDQ_CTXT_SET, + cmdq_ctxt, in_size, + &cmdq_ctxt_out, &out_size, 0); + if (err || !out_size || cmdq_ctxt_out.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt_out.status, out_size); + return -EFAULT; + } + } + + cmdqs->status |= HINIC_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev, + struct hinic_cmdq *cmdq) +{ + struct hinic_cmdq_wqe *wqe; + struct hinic_cmdq_cmd_info *cmdq_info; + u16 ci, wqe_left, i; + u64 buf; + + spin_lock_bh(&cmdq->cmdq_lock); + wqe_left = cmdq->wq->q_depth - (u16)atomic_read(&cmdq->wq->delta); + ci = MASKED_WQE_IDX(cmdq->wq, cmdq->wq->cons_idx); + for (i = 0; i < wqe_left; i++, ci++) { + ci = MASKED_WQE_IDX(cmdq->wq, ci); + cmdq_info = &cmdq->cmd_infos[ci]; + + if (cmdq_info->cmd_type == HINIC_CMD_TYPE_SET_ARM) + continue; + + if (cmdq->cmdq_type == HINIC_CMDQ_ASYNC) { + wqe = hinic_get_wqebb_addr(cmdq->wq, ci); + buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + wqe->wqe_lcmd.buf_desc.saved_async_buf = 0; + + hinic_be32_to_cpu((void *)&buf, sizeof(u64)); + if (buf) + hinic_free_cmd_buf(hwdev, + (struct hinic_cmd_buf *)buf); + } else { + if (cmdq_info->done) { + complete(cmdq_info->done); + cmdq_info->done = NULL; + cmdq_info->cmpt_code = NULL; + cmdq_info->direct_resp = NULL; + cmdq_info->errcode = NULL; + } + } + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic_cmdq_type cmdq_type; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq); + } + + return hinic_set_cmdq_ctxts(hwdev); +} + +int hinic_cmdqs_init(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs; + struct hinic_cmdq_ctxt *cmdq_ctxt; + enum hinic_cmdq_type type, cmdq_type; + size_t saved_wqs_size; + u32 max_wqe_size; + int err; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + + saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); + cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + sdk_err(hwdev->dev_hdl, "Failed to allocate saved wqs\n"); + err = -ENOMEM; + goto alloc_wqs_err; + } + + cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev->dev_hdl, + HINIC_CMDQ_BUF_SIZE, + HINIC_CMDQ_BUF_SIZE, 0ULL); + if (!cmdqs->cmd_buf_pool) { + sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); + err = -ENOMEM; + goto pool_create_err; + } + + max_wqe_size = (u32)cmdq_wqe_size(WQE_LCMD_TYPE); + err = hinic_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + hwdev->dev_hdl, HINIC_MAX_CMDQ_TYPES, + hwdev->wq_page_size, CMDQ_WQEBB_SIZE, + HINIC_CMDQ_DEPTH, max_wqe_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq\n"); + goto cmdq_alloc_err; + } + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, + &cmdqs->saved_wqs[cmdq_type], cmdq_type); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", + cmdq_type); + goto init_cmdq_err; + } + + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq_pages, cmdq_ctxt); + } + + err = hinic_set_cmdq_ctxts(hwdev); + if (err) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + type = HINIC_CMDQ_SYNC; + for (; type < cmdq_type; type++) + free_cmdq(hwdev, &cmdqs->cmdq[type]); + + hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + +cmdq_alloc_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + +pool_create_err: + kfree(cmdqs->saved_wqs); + +alloc_wqs_err: + kfree(cmdqs); + + return err; +} + +void hinic_cmdqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC; + + cmdqs->status &= ~HINIC_CMDQ_ENABLE; + + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); + } + + hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs->saved_wqs); + + kfree(cmdqs); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h new file mode 100644 index 000000000000..67c7abb2bea9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CMDQ_H_ +#define HINIC_CMDQ_H_ + +#define HINIC_DB_OFF 0x00000800 + +#define HINIC_SCMD_DATA_LEN 16 + +#define HINIC_CMDQ_DEPTH 4096 + +#define HINIC_CMDQ_BUF_SIZE 2048U +#define HINIC_CMDQ_BUF_HW_RSVD 8 +#define HINIC_CMDQ_MAX_DATA_SIZE \ + (HINIC_CMDQ_BUF_SIZE - HINIC_CMDQ_BUF_HW_RSVD) + +enum hinic_cmdq_type { + HINIC_CMDQ_SYNC, + HINIC_CMDQ_ASYNC, + HINIC_MAX_CMDQ_TYPES, +}; + +enum hinic_db_src_type { + HINIC_DB_SRC_CMDQ_TYPE, + HINIC_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum hinic_cmdq_db_type { + HINIC_DB_SQ_RQ_TYPE, + HINIC_DB_CMDQ_TYPE, +}; + +/* CMDQ WQE CTRLS */ +struct hinic_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hinic_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HINIC_SCMD_DATA_LEN]; +}; + +struct hinic_lcmd_bufdesc { + struct hinic_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct hinic_cmdq_db { + u32 db_info; + u32 rsvd; +}; + +struct hinic_status { + u32 status_info; +}; + +struct hinic_ctrl { + u32 ctrl_info; +}; + +struct hinic_sge_resp { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_cmdq_completion { + /* HW Format */ + union { + struct hinic_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hinic_cmdq_wqe_scmd { + struct hinic_cmdq_header header; + struct hinic_cmdq_db db; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_scmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_wqe_lcmd { + struct hinic_cmdq_header header; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_lcmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_inline_wqe { + struct hinic_cmdq_wqe_scmd wqe_scmd; +}; + +struct hinic_cmdq_wqe { + /* HW Format */ + union { + struct hinic_cmdq_inline_wqe inline_wqe; + struct hinic_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct hinic_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +struct hinic_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct hinic_cmdq_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 cmdq_id; + u8 ppf_idx; + + u8 rsvd1[4]; + + struct hinic_cmdq_ctxt_info ctxt_info; +}; + +enum hinic_cmdq_status { + HINIC_CMDQ_ENABLE = BIT(0), +}; + +enum hinic_cmdq_cmd_type { + HINIC_CMD_TYPE_NONE, + HINIC_CMD_TYPE_SET_ARM, + HINIC_CMD_TYPE_DIRECT_RESP, + HINIC_CMD_TYPE_SGE_RESP, + HINIC_CMD_TYPE_ASYNC, + HINIC_CMD_TYPE_TIMEOUT, + HINIC_CMD_TYPE_FAKE_TIMEOUT, +}; + +struct hinic_cmdq_cmd_info { + enum hinic_cmdq_cmd_type cmd_type; + + struct completion *done; + int *errcode; + int *cmpt_code; + u64 *direct_resp; + u64 cmdq_msg_id; +}; + +struct hinic_cmdq { + struct hinic_wq *wq; + + enum hinic_cmdq_type cmdq_type; + int wrapped; + + /* spinlock for send cmdq commands */ + spinlock_t cmdq_lock; + + /* doorbell area */ + u8 __iomem *db_base; + + struct hinic_cmdq_ctxt cmdq_ctxt; + + struct hinic_cmdq_cmd_info *cmd_infos; + + struct hinic_hwdev *hwdev; +}; + +struct hinic_cmdqs { + struct hinic_hwdev *hwdev; + + struct pci_pool *cmd_buf_pool; + + struct hinic_wq *saved_wqs; + + struct hinic_cmdq_pages cmdq_pages; + struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; + + u32 status; + u32 disable_flag; +}; + +void hinic_cmdq_ceq_handler(void *hwdev, u32 ceqe_data); + +int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev); + +bool hinic_cmdq_idle(struct hinic_cmdq *cmdq); + +int hinic_cmdqs_init(struct hinic_hwdev *hwdev); + +void hinic_cmdqs_free(struct hinic_hwdev *hwdev); + +bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdq_ctxt *cmdq_ctxt); + +void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev, + struct hinic_cmdq *cmdq); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c deleted file mode 100644 index 8e9b4a6c88c2..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_common.c +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include - -#include "hinic_common.h" - -/** - * hinic_cpu_to_be32 - convert data to big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert - **/ -void hinic_cpu_to_be32(void *data, int len) -{ - u32 *mem = data; - int i; - - len = len / sizeof(u32); - - for (i = 0; i < len; i++) { - *mem = cpu_to_be32(*mem); - mem++; - } -} - -/** - * hinic_be32_to_cpu - convert data from big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert - **/ -void hinic_be32_to_cpu(void *data, int len) -{ - u32 *mem = data; - int i; - - len = len / sizeof(u32); - - for (i = 0; i < len; i++) { - *mem = be32_to_cpu(*mem); - mem++; - } -} - -/** - * hinic_set_sge - set dma area in scatter gather entry - * @sge: scatter gather entry - * @addr: dma address - * @len: length of relevant data in the dma address - **/ -void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len) -{ - sge->hi_addr = upper_32_bits(addr); - sge->lo_addr = lower_32_bits(addr); - sge->len = len; -} - -/** - * hinic_sge_to_dma - get dma address from scatter gather entry - * @sge: scatter gather entry - * - * Return dma address of sg entry - **/ -dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) -{ - return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h deleted file mode 100644 index a0de9d9644c6..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_common.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_COMMON_H -#define HINIC_COMMON_H - -#include - -#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) -#define LOWER_8_BITS(data) ((data) & 0xFF) - -struct hinic_sge { - u32 hi_addr; - u32 lo_addr; - u32 len; -}; - -void hinic_cpu_to_be32(void *data, int len); - -void hinic_be32_to_cpu(void *data, int len); - -void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len); - -dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_csr.h new file mode 100644 index 000000000000..b126ff0459ce --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_csr.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CSR_H +#define HINIC_CSR_H + +#define HINIC_CSR_GLOBAL_BASE_ADDR 0x4000 + +/* HW interface registers */ +#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 +#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 +#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8 +#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10 +#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 + +#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80 +#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100 +#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104 +#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108 +#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C + +#define HINIC_CSR_DMA_ATTR_TBL_BASE 0xC80 + +#define HINIC_ELECTION_BASE 0x200 + +#define HINIC_CSR_DMA_ATTR_TBL_STRIDE 0x4 +#define HINIC_CSR_DMA_ATTR_TBL_ADDR(idx) \ + (HINIC_CSR_DMA_ATTR_TBL_BASE \ + + (idx) * HINIC_CSR_DMA_ATTR_TBL_STRIDE) + +#define HINIC_PPF_ELECTION_STRIDE 0x4 +#define HINIC_CSR_MAX_PORTS 4 +#define HINIC_CSR_PPF_ELECTION_ADDR \ + (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE) + +#define HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR \ + (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE + \ + HINIC_CSR_MAX_PORTS * HINIC_PPF_ELECTION_STRIDE) + +/* MSI-X registers */ +#define HINIC_CSR_MSIX_CTRL_BASE 0x2000 +#define HINIC_CSR_MSIX_CNT_BASE 0x2004 + +#define HINIC_CSR_MSIX_STRIDE 0x8 + +#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \ + (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +#define HINIC_CSR_MSIX_CNT_ADDR(idx) \ + (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +/* EQ registers */ +#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 +#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400 + +#define HINIC_EQ_MTT_OFF_STRIDE 0x40 + +#define HINIC_CSR_AEQ_MTT_OFF(id) \ + (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_CEQ_MTT_OFF(id) \ + (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)((type == HINIC_AEQ) ? \ + HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \ + HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num))) + +#define HINIC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)((type == HINIC_AEQ) ? \ + HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \ + HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num))) + +#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 +#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 +#define HINIC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08 +#define HINIC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C + +#define HINIC_EQ_OFF_STRIDE 0x80 + +#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ + (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \ + (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000 +#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004 +#define HINIC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008 +#define HINIC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C + +/* For multi-host mgmt + * CEQ_CTRL_0_ADDR: bit26~29: uP write vf mode is normal(0x0),bmgw(0x1), + * vmgw(0x2) + */ +#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \ + (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \ + (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \ + (HINIC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \ + (HINIC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +/* API CMD registers */ +#define HINIC_CSR_API_CMD_BASE 0xF000 + +#define HINIC_CSR_API_CMD_STRIDE 0x100 + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +/* VF control registers in pf */ +#define HINIC_PF_CSR_VF_FLUSH_BASE 0x1F400 +#define HINIC_PF_CSR_VF_FLUSH_STRIDE 0x4 + +#define HINIC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C + +#define HINIC_ICPL_RESERVD_ADDR 0x9204 + +#define HINIC_PF_CSR_VF_FLUSH_OFF(idx) \ + (HINIC_PF_CSR_VF_FLUSH_BASE + (idx) * HINIC_PF_CSR_VF_FLUSH_STRIDE) + +#define HINIC_IPSU_CHANNEL_NUM 7 +#define HINIC_IPSU_CHANNEL0_ADDR 0x404 +#define HINIC_IPSU_CHANNEL_OFFSET 0x14 +#define HINIC_IPSU_DIP_OFFSET 13 +#define HINIC_IPSU_SIP_OFFSET 14 +#define HINIC_IPSU_DIP_SIP_MASK \ + ((0x1 << HINIC_IPSU_SIP_OFFSET) | (0x1 << HINIC_IPSU_DIP_OFFSET)) + +#define HINIC_IPSURX_VXLAN_DPORT_ADDR 0x6d4 + +/* For multi-host mgmt + * 0x75C0: bit0~3: uP write, host mode is bmwg or normal host + * bit4~7: master host ppf write when function initializing + * bit8~23: only for slave host PXE + * 0x75C4: slave host status + * bit0~7: host 0~7 functions status + */ +#define HINIC_HOST_MODE_ADDR 0x75C0 +#define HINIC_MULT_HOST_SLAVE_STATUS_ADDR 0x75C4 + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h new file mode 100644 index 000000000000..37f521047909 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_CTX_DEF_H__ +#define __HINIC_CTX_DEF_H__ + +#ifdef __cplusplus + #if __cplusplus +extern "C"{ + #endif +#endif /* __cplusplus */ + +#define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask) + +#define HINIC_CEQE_QN_MASK 0x3FFU + +#define HINIC_Q_CTXT_MAX 42 + +#define HINIC_RQ_CQ_MAX 128 + +#define MAX_WQE_SIZE(max_sge, wqebb_size) \ + ((max_sge <= 2) ? (wqebb_size) : \ + ((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size))) + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define HINIC_CI_Q_ADDR_SIZE (64) + +#define CI_TABLE_SIZE(num_qps, pg_sz) \ + (ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz)) + +#define HINIC_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + \ + (q_id) * HINIC_CI_Q_ADDR_SIZE) + +#define HINIC_CI_PADDR(base_paddr, q_id) ((base_paddr) + \ + (q_id) * HINIC_CI_Q_ADDR_SIZE) + +#define Q_CTXT_SIZE 48 +#define TSO_LRO_CTXT_SIZE 240 + +#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \ + + (q_id) * Q_CTXT_SIZE) + +#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \ + + (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE) + +#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \ + + (num_sqs) * sizeof(struct hinic_sq_ctxt))) + +#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \ + + (num_rqs) * sizeof(struct hinic_rq_ctxt))) + +#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT 8 +#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13 +#define SQ_CTXT_CEQ_ATTR_EN_SHIFT 23 +#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT 31 + +#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK 0x1FU +#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FFU +#define SQ_CTXT_CEQ_ATTR_EN_MASK 0x1U +#define SQ_CTXT_CEQ_ATTR_ARM_MASK 0x1U + +#define SQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & \ + SQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << SQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define SQ_CTXT_CI_IDX_SHIFT 11 +#define SQ_CTXT_CI_OWNER_SHIFT 23 + +#define SQ_CTXT_CI_IDX_MASK 0xFFFU +#define SQ_CTXT_CI_OWNER_MASK 0x1U + +#define SQ_CTXT_CI_SET(val, member) (((val) & \ + SQ_CTXT_CI_##member##_MASK) \ + << SQ_CTXT_CI_##member##_SHIFT) + +#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define SQ_CTXT_WQ_PAGE_PI_SHIFT 20 + +#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SQ_CTXT_WQ_PAGE_PI_MASK 0xFFFU + +#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \ + SQ_CTXT_WQ_PAGE_##member##_MASK) \ + << SQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define SQ_CTXT_PREF_CI_SHIFT 20 + +#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SQ_CTXT_PREF_CI_MASK 0xFFFU + +#define SQ_CTXT_PREF_SET(val, member) (((val) & \ + SQ_CTXT_PREF_##member##_MASK) \ + << SQ_CTXT_PREF_##member##_SHIFT) + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \ + SQ_CTXT_WQ_BLOCK_##member##_MASK) \ + << SQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 0 +#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT 1 + +#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U +#define RQ_CTXT_CEQ_ATTR_OWNER_MASK 0x1U + +#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & \ + RQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << RQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define RQ_CTXT_PI_IDX_SHIFT 0 +#define RQ_CTXT_PI_INTR_SHIFT 22 +#define RQ_CTXT_PI_CEQ_ARM_SHIFT 31 + +#define RQ_CTXT_PI_IDX_MASK 0xFFFU +#define RQ_CTXT_PI_INTR_MASK 0x3FFU +#define RQ_CTXT_PI_CEQ_ARM_MASK 0x1U + +#define RQ_CTXT_PI_SET(val, member) (((val) & \ + RQ_CTXT_PI_##member##_MASK) << \ + RQ_CTXT_PI_##member##_SHIFT) + +#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define RQ_CTXT_WQ_PAGE_CI_SHIFT 20 + +#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define RQ_CTXT_WQ_PAGE_CI_MASK 0xFFFU + +#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \ + RQ_CTXT_WQ_PAGE_##member##_MASK) << \ + RQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define RQ_CTXT_PREF_CI_SHIFT 20 + +#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define RQ_CTXT_PREF_CI_MASK 0xFFFU + +#define RQ_CTXT_PREF_SET(val, member) (((val) & \ + RQ_CTXT_PREF_##member##_MASK) << \ + RQ_CTXT_PREF_##member##_SHIFT) + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \ + RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + RQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4) + +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 + +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + +enum sq_cflag { + CFLAG_DATA_PATH = 0, +}; + +enum hinic_qp_ctxt_type { + HINIC_QP_CTXT_TYPE_SQ, + HINIC_QP_CTXT_TYPE_RQ, +}; + +/* service type relates define */ +enum cfg_svc_type_en { + CFG_SVC_NIC_BIT0 = (1 << 0), + CFG_SVC_ROCE_BIT1 = (1 << 1), + CFG_SVC_FCOE_BIT2 = (1 << 2), + CFG_SVC_TOE_BIT3 = (1 << 3), + CFG_SVC_IWARP_BIT4 = (1 << 4), + CFG_SVC_FC_BIT5 = (1 << 5), + + CFG_SVC_FIC_BIT6 = (1 << 6), + CFG_SVC_OVS_BIT7 = (1 << 7), + CFG_SVC_ACL_BIT8 = (1 << 8), /* used for 8PF ovs in up */ + CFG_SVC_IOE_BIT9 = (1 << 9), + CFG_SVC_HWPT_BIT10 = (1 << 10), + + CFG_SVC_FT_EN = (CFG_SVC_FCOE_BIT2 | CFG_SVC_TOE_BIT3 | + CFG_SVC_FC_BIT5 | CFG_SVC_IOE_BIT9), + CFG_SVC_RDMA_EN = (CFG_SVC_ROCE_BIT1 | CFG_SVC_IWARP_BIT4) +}; + +#define IS_NIC_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0) +#define IS_ROCE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ROCE_BIT1) +#define IS_FCOE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FCOE_BIT2) +#define IS_TOE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_TOE_BIT3) +#define IS_IWARP_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IWARP_BIT4) +#define IS_FC_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FC_BIT5) +#define IS_FIC_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FIC_BIT6) +#define IS_OVS_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_OVS_BIT7) +#define IS_ACL_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ACL_BIT8) +#define IS_IOE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IOE_BIT9) +#define IS_FT_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FT_EN) +#define IS_RDMA_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_RDMA_EN) +#define IS_HWPT_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_HWPT_BIT10) + +#ifdef __cplusplus + #if __cplusplus +} + #endif +#endif /* __cplusplus */ +#endif /* __HINIC_CTX_DEF_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbg.h b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h new file mode 100644 index 000000000000..0f6797606788 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NIC_DBG_H_ +#define HINIC_NIC_DBG_H_ + +u16 hinic_dbg_get_qp_num(void *hwdev); + +void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id); + +void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id); + +void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id); + +u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id); + +u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id); + +u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id); + +void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id); + +u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id); + +u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id); + +int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr, + u64 *phy_addr, u32 *pg_idx); + +u16 hinic_dbg_get_global_qpn(const void *hwdev); + +int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size); + +int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size); + +int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data); + +int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data, u16 mask); + +int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value); + +int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value); + +int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value); + +int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value); + +int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 value); + +int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value1, u64 *value2); + +int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 value1, u64 value2); + +int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val); + +int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val); + +int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val); + +int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size); + +u16 hinic_dbg_clear_hw_stats(void *hwdev); + +void hinic_get_chip_fault_stats(const void *hwdev, + u8 *chip_fault_stats, int offset); + +int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c new file mode 100644 index 000000000000..72ee5c00b940 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hw_mgmt.h" +#include "hinic_nic_dev.h" +#include "hinic_lld.h" +#include "hinic_dbgtool_knl.h" + +struct ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +#define DBGTOOL_MSG_MAX_SIZE 2048ULL +#define HINIC_SELF_CMD_UP2PF_FFM 0x26 + +void *g_card_node_array[MAX_CARD_NUM] = {0}; +void *g_card_vir_addr[MAX_CARD_NUM] = {0}; +u64 g_card_phy_addr[MAX_CARD_NUM] = {0}; +/* lock for g_card_vir_addr */ +struct mutex g_addr_lock; +int card_id; + +/* dbgtool character device name, class name, dev path */ +#define CHR_DEV_DBGTOOL "dbgtool_chr_dev" +#define CLASS_DBGTOOL "dbgtool_class" +#define DBGTOOL_DEV_PATH "/dev/dbgtool_chr_dev" + +struct dbgtool_k_glb_info { + struct semaphore dbgtool_sem; + struct ffm_record_info *ffm; +}; + +dev_t dbgtool_dev_id; /* device id */ +struct cdev dbgtool_chr_dev; /* struct of char device */ + +struct class *dbgtool_d_class; /* struct of char class */ + +int g_dbgtool_init_flag; +int g_dbgtool_ref_cnt; + +static int dbgtool_knl_open(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static int dbgtool_knl_release(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static ssize_t dbgtool_knl_read(struct file *pfile, + char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static ssize_t dbgtool_knl_write(struct file *pfile, + const char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static bool is_valid_phy_addr(u64 offset) +{ + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + if (offset == g_card_phy_addr[i]) + return true; + } + + return false; +} + +int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vmsize = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) { + pr_err("Map size = %lu is bigger than alloc\n", vmsize); + return -EAGAIN; + } + + if (offset && !is_valid_phy_addr((u64)offset) && + !hinic_is_valid_bar_addr((u64)offset)) { + pr_err("offset is invalid"); + return -EAGAIN; + } + + /* old version of tool set vma->vm_pgoff to 0 */ + phy_addr = offset ? offset : g_card_phy_addr[card_id]; + if (!phy_addr) { + pr_err("Card_id = %d physical address is 0\n", card_id); + return -EAGAIN; + } + + if (remap_pfn_range(vma, vma->vm_start, + (phy_addr >> PAGE_SHIFT), + vmsize, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/** + * dbgtool_knl_api_cmd_read - used for read operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_api_cmd_read(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_rd.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining pf_id chipif pointer */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id); + return -EFAULT; + } + + /* alloc cmd and ack memory */ + size = para->param.api_rd.size; + if (para->param.api_rd.size == 0) { + pr_err("Read cmd size invalid\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc read cmd mem fail\n"); + return -ENOMEM; + } + + ack_size = para->param.api_rd.ack_size; + if (para->param.api_rd.ack_size == 0) { + pr_err("Read cmd ack size is 0\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL); + if (!ack) { + pr_err("Alloc read ack mem fail\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + /* Invoke the api cmd interface read content*/ + ret = hinic_api_cmd_read_ack(hwdev, para->param.api_rd.dest, + cmd, size, ack, ack_size); + if (ret) { + pr_err("Api send single cmd ack fail!\n"); + goto api_rd_fail; + } + + /* Copy the contents of the ack to the user state */ + if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) { + pr_err("Copy ack to user fail\n"); + ret = -EFAULT; + } +api_rd_fail: +copy_user_cmd_fail: + kfree(ack); +alloc_ack_mem_fail: + kfree(cmd); + return ret; +} + +/** + * dbgtool_knl_api_cmd_write - used for write operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_api_cmd_write(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_wr.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining chipif pointer according to pf_id */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null\n", pf_id); + return -EFAULT; + } + + /* alloc cmd memory */ + size = para->param.api_wr.size; + if (para->param.api_wr.size == 0) { + pr_err("Write cmd size invalid\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc write cmd mem fail\n"); + return -ENOMEM; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + + /* api cmd interface is invoked to write the content */ + ret = hinic_api_cmd_write_nack(hwdev, para->param.api_wr.dest, + cmd, size); + if (ret) + pr_err("Api send single cmd nack fail\n"); + +copy_user_cmd_fail: + kfree(cmd); + return ret; +} + +void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx, + void **g_func_handle_array) +{ + u32 func_idx; + struct hinic_hwdev *hwdev; + + if (!dev_info) { + pr_err("Params error!\n"); + return; + } + + /* pf at most 16 */ + for (func_idx = 0; func_idx < 16; func_idx++) { + hwdev = (struct hinic_hwdev *)g_func_handle_array[func_idx]; + + dev_info[func_idx].phy_addr = g_card_phy_addr[card_idx]; + + if (!hwdev) { + dev_info[func_idx].bar0_size = 0; + dev_info[func_idx].bus = 0; + dev_info[func_idx].slot = 0; + dev_info[func_idx].func = 0; + } else { + dev_info[func_idx].bar0_size = + pci_resource_len + (((struct pci_dev *)hwdev->pcidev_hdl), 0); + dev_info[func_idx].bus = + ((struct pci_dev *) + hwdev->pcidev_hdl)->bus->number; + dev_info[func_idx].slot = + PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + dev_info[func_idx].func = + PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + } + } +} + +/** + * dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para, + void **g_func_handle_array) +{ + struct pf_dev_info dev_info[16] = { {0} }; + unsigned char *tmp; + int i; + + mutex_lock(&g_addr_lock); + if (!g_card_vir_addr[card_id]) { + g_card_vir_addr[card_id] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_card_vir_addr[card_id]) { + pr_err("Alloc dbgtool api chain fail!\n"); + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + memset(g_card_vir_addr[card_id], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_card_phy_addr[card_id] = + virt_to_phys(g_card_vir_addr[card_id]); + if (!g_card_phy_addr[card_id]) { + pr_err("phy addr for card %d is 0\n", card_id); + free_pages((unsigned long)g_card_vir_addr[card_id], + DBGTOOL_PAGE_ORDER); + g_card_vir_addr[card_id] = NULL; + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + tmp = g_card_vir_addr[card_id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_addr_lock); + + chipif_get_all_pf_dev_info(dev_info, card_id, g_func_handle_array); + + /* Copy the dev_info to user mode */ + if (copy_to_user(para->param.dev_info, dev_info, + (unsigned int)sizeof(dev_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_rd - Read ffm information + * @para: the dbgtool parameter + * @dbgtool_info: the dbgtool info + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + /* Copy the ffm_info to user mode */ + if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm, + (unsigned int)sizeof(struct ffm_record_info))) { + pr_err("Copy ffm_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_clr - Clear FFM information + * @para: unused + * @dbgtool_info: the dbgtool info + */ +void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + dbgtool_info->ffm->ffm_num = 0; +} + +/** + * dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_msg_to_up(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + void *buf_in; + void *buf_out; + u16 out_size; + u8 pf_id; + + if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("User data(%d) more than 2KB\n", + para->param.msg2up.in_size); + return -EFAULT; + } + + pf_id = para->param.msg2up.pf_id; + /* pf at most 16 */ + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id); + return -EFAULT; + } + + if (!g_func_handle_array[pf_id]) { + pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id); + return -EFAULT; + } + + /* alloc buf_in and buf_out memory, apply for 2K */ + buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL); + if (!buf_in) { + pr_err("Alloc buf_in mem fail\n"); + return -ENOMEM; + } + + buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0); + if (!buf_out) { + pr_err("Alloc buf_out mem fail\n"); + ret = -ENOMEM; + goto alloc_buf_out_mem_fail; + } + + /* copy buf_in from the user state */ + if (copy_from_user(buf_in, para->param.msg2up.buf_in, + (unsigned long)para->param.msg2up.in_size)) { + pr_err("Copy buf_in from user fail\n"); + ret = -EFAULT; + goto copy_user_buf_in_fail; + } + + out_size = DBGTOOL_MSG_MAX_SIZE; + /* Invoke the pf2up communication interface */ + ret = hinic_msg_to_mgmt_sync(g_func_handle_array[pf_id], + para->param.msg2up.mod, + para->param.msg2up.cmd, + buf_in, + para->param.msg2up.in_size, + buf_out, + &out_size, + 0); + if (ret) + goto msg_2_up_fail; + + /* Copy the out_size and buf_out content to user mode */ + if (copy_to_user(para->param.msg2up.out_size, &out_size, + (unsigned int)sizeof(out_size))) { + pr_err("Copy out_size to user fail\n"); + ret = -EFAULT; + goto copy_out_size_fail; + } + + if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) { + pr_err("Copy buf_out to user fail\n"); + ret = -EFAULT; + } + +copy_out_size_fail: +msg_2_up_fail: +copy_user_buf_in_fail: + kfree(buf_out); +alloc_buf_out_mem_fail: + kfree(buf_in); + return ret; +} + +long dbgtool_knl_free_mem(int id) +{ + unsigned char *tmp; + int i; + + mutex_lock(&g_addr_lock); + + if (!g_card_vir_addr[id]) { + mutex_unlock(&g_addr_lock); + return 0; + } + + tmp = g_card_vir_addr[id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + + free_pages((unsigned long)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER); + g_card_vir_addr[id] = NULL; + g_card_phy_addr[id] = 0; + + mutex_unlock(&g_addr_lock); + + return 0; +} + +/** + * dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry + * @pfile: the pointer to file + * @cmd: the command type + */ +long dbgtool_knl_unlocked_ioctl(struct file *pfile, + unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + unsigned int real_cmd; + struct dbgtool_param param; + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *card_info = NULL; + int i; + + (void)memset(¶m, 0, sizeof(param)); + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + pr_err("Copy param from user fail\n"); + return -EFAULT; + } + + param.chip_name[IFNAMSIZ - 1] = '\0'; + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(param.chip_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Can't find this card %s\n", param.chip_name); + return -EFAULT; + } + + card_id = i; + + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + + down(&dbgtool_info->dbgtool_sem); + + real_cmd = _IOC_NR(cmd); + + switch (real_cmd) { + case DBGTOOL_CMD_API_RD: + ret = dbgtool_knl_api_cmd_read(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_API_WR: + ret = dbgtool_knl_api_cmd_write(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FFM_RD: + ret = dbgtool_knl_ffm_info_rd(¶m, dbgtool_info); + break; + case DBGTOOL_CMD_FFM_CLR: + dbgtool_knl_ffm_info_clr(¶m, dbgtool_info); + break; + case DBGTOOL_CMD_PF_DEV_INFO_GET: + ret = dbgtool_knl_pf_dev_info_get(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_MSG_2_UP: + ret = dbgtool_knl_msg_to_up(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FREE_MEM: + ret = dbgtool_knl_free_mem(i); + break; + default: + pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd); + ret = -EFAULT; + } + + up(&dbgtool_info->dbgtool_sem); + return ret; +} + +/** + * ffm_intr_msg_record - FFM interruption records sent up + * @handle: the function handle + * @buf_in: the pointer to input buffer + * @buf_out: the pointer to outputput buffer + */ +void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct ffm_intr_info *intr; + u32 ffm_idx; + struct timex txc; + struct rtc_time rctm; + struct card_node *card_info = NULL; + bool flag = false; + int i, j; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + + for (j = 0; j < MAX_FUNCTION_NUM; j++) { + if (handle == card_info->func_handle_array[j]) { + flag = true; + break; + } + } + + if (flag) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Id(%d) cant find this card\n", i); + return; + } + + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + if (!dbgtool_info) { + pr_err("Dbgtool info is null\n"); + return; + } + + intr = (struct ffm_intr_info *)buf_in; + + if (!dbgtool_info->ffm) + return; + + ffm_idx = dbgtool_info->ffm->ffm_num; + if (ffm_idx < FFM_RECORD_NUM_MAX) { + pr_info("%s: recv intr, ffm_idx: %d\n", __func__, ffm_idx); + + dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id; + dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level; + dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr = + intr->err_csr_addr; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_value = + intr->err_csr_value; + + /* Obtain the current UTC time */ + do_gettimeofday(&txc.time); + + /* Calculate the time in date value to tm */ + rtc_time_to_tm((unsigned long)txc.time.tv_sec + + 60 * 60 * 8, &rctm); + + /* tm_year starts from 1900; 0->1900, 1->1901, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].year = + (u16)(rctm.tm_year + 1900); + /* tm_mon starts from 0, 0 indicates January, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1; + dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday; + dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour; + dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min; + dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec; + + dbgtool_info->ffm->ffm_num++; + } +} + +static const struct file_operations dbgtool_file_operations = { + .owner = THIS_MODULE, + .open = dbgtool_knl_open, + .release = dbgtool_knl_release, + .read = dbgtool_knl_read, + .write = dbgtool_knl_write, + .unlocked_ioctl = dbgtool_knl_unlocked_ioctl, + .mmap = hinic_mem_mmap, +}; + +/** + * dbgtool_knl_init - dbgtool character device init + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + * Return: 0 - success, negative - failure + */ +int dbgtool_knl_init(void *vhwdev, void *chip_node) +{ + int ret = 0; + int id; + struct dbgtool_k_glb_info *dbgtool_info; + struct device *pdevice; + struct card_node *chip_info = (struct card_node *)chip_node; + struct hinic_hwdev *hwdev = vhwdev; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + if (ret) { + pr_err("Failed to sysfs create file\n"); + return ret; + } + + chip_info->func_handle_array[hinic_global_func_id(hwdev)] = hwdev; + + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, HINIC_SELF_CMD_UP2PF_FFM, + ffm_intr_msg_record); + + if (chip_info->dbgtool_info) { + chip_info->func_num++; + return 0; + } + + dbgtool_info = (struct dbgtool_k_glb_info *) + kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL); + if (!dbgtool_info) { + pr_err("Failed to allocate dbgtool_info\n"); + ret = -EFAULT; + goto dbgtool_info_fail; + } + chip_info->dbgtool_info = dbgtool_info; + + /* FFM init */ + dbgtool_info->ffm = (struct ffm_record_info *) + kzalloc(sizeof(struct ffm_record_info), + GFP_KERNEL); + if (!dbgtool_info->ffm) { + pr_err("Failed to allocate cell contexts for a chain\n"); + ret = -EFAULT; + goto dbgtool_info_ffm_fail; + } + + sema_init(&dbgtool_info->dbgtool_sem, 1); + + ret = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id); + if (ret <= 0) { + pr_err("Failed to get hinic id\n"); + goto sscanf_chdev_fail; + } + + g_card_node_array[id] = chip_info; + chip_info->func_num++; + + if (g_dbgtool_init_flag) { + g_dbgtool_ref_cnt++; + /* already initialized */ + return 0; + } + + /*alloc device id*/ + ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL); + if (ret) { + pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret); + goto alloc_chdev_fail; + } + + /*init device*/ + cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations); + + /*add device*/ + ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1); + if (ret) { + pr_err("Add dgbtool dev fail, ret=0x%x\n", ret); + goto cdev_add_fail; + } + + dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL); + if (IS_ERR(dbgtool_d_class)) { + pr_err("Create dgbtool class fail\n"); + ret = -EFAULT; + goto cls_create_fail; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(dbgtool_d_class, NULL, + dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL); + if (IS_ERR(pdevice)) { + pr_err("Create dgbtool device fail\n"); + ret = -EFAULT; + goto dev_create_fail; + } + g_dbgtool_init_flag = 1; + g_dbgtool_ref_cnt = 1; + mutex_init(&g_addr_lock); + + return 0; + +dev_create_fail: + class_destroy(dbgtool_d_class); +cls_create_fail: + cdev_del(&(dbgtool_chr_dev)); +cdev_add_fail: + unregister_chrdev_region(dbgtool_dev_id, 1); +alloc_chdev_fail: + g_card_node_array[id] = NULL; +sscanf_chdev_fail: + kfree(dbgtool_info->ffm); +dbgtool_info_ffm_fail: + kfree(dbgtool_info); + dbgtool_info = NULL; + chip_info->dbgtool_info = NULL; +dbgtool_info_fail: + hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM); + chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL; + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + return ret; +} + +/** + * dbgtool_knl_deinit - dbgtool character device deinit + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + */ +void dbgtool_knl_deinit(void *vhwdev, void *chip_node) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *chip_info = (struct card_node *)chip_node; + int id; + int err; + struct hinic_hwdev *hwdev = vhwdev; + + if (hinic_func_type(hwdev) == TYPE_VF) + return; + + hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM); + + chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL; + + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + + chip_info->func_num--; + if (chip_info->func_num) + return; + + err = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id); + if (err <= 0) + pr_err("Failed to get hinic id\n"); + + g_card_node_array[id] = NULL; + + dbgtool_info = chip_info->dbgtool_info; + /* FFM deinit */ + kfree(dbgtool_info->ffm); + dbgtool_info->ffm = NULL; + + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + + (void)dbgtool_knl_free_mem(id); + + if (g_dbgtool_init_flag) { + if ((--g_dbgtool_ref_cnt)) + return; + } + + if (!dbgtool_d_class) + return; + + device_destroy(dbgtool_d_class, dbgtool_dev_id); + class_destroy(dbgtool_d_class); + dbgtool_d_class = NULL; + + cdev_del(&(dbgtool_chr_dev)); + unregister_chrdev_region(dbgtool_dev_id, 1); + + g_dbgtool_init_flag = 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h new file mode 100644 index 000000000000..6e131612e0d0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __DBGTOOL_KNL_H__ +#define __DBGTOOL_KNL_H__ + +#define DBG_TOOL_MAGIC 'w' + +/* dbgtool command type */ +/* You can add the required dbgtool through these commands + * can invoke all X86 kernel mode driver interface + */ +enum dbgtool_cmd { + DBGTOOL_CMD_API_RD = 0, + DBGTOOL_CMD_API_WR, + + DBGTOOL_CMD_FFM_RD, + DBGTOOL_CMD_FFM_CLR, + + DBGTOOL_CMD_PF_DEV_INFO_GET, + + DBGTOOL_CMD_MSG_2_UP, + + DBGTOOL_CMD_FREE_MEM, + DBGTOOL_CMD_NUM +}; + +struct api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +/* Interrupt at most records, interrupt will be recorded in the FFM */ +#define FFM_RECORD_NUM_MAX 64 + +struct ffm_intr_tm_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + + u8 sec; /* second*/ + u8 min; /* minute */ + u8 hour; /* hour */ + u8 mday; /* day */ + u8 mon; /* month */ + u16 year; /* year */ +}; + +struct ffm_record_info { + u32 ffm_num; + struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; +}; + +struct msg_2_up { + u8 pf_id; /* which pf sends messages to the up */ + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct dbgtool_param { + union { + struct api_cmd_rd api_rd; + struct api_cmd_wr api_wr; + struct pf_dev_info *dev_info; + struct ffm_record_info *ffm_rd; + struct msg_2_up msg2up; + } param; + char chip_name[16]; +}; + +#ifndef MAX_CARD_NUM +#define MAX_CARD_NUM 64 +#endif +#define DBGTOOL_PAGE_ORDER 10 + +int dbgtool_knl_init(void *vhwdev, void *chip_node); +void dbgtool_knl_deinit(void *vhwdev, void *chip_node); +int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma); +void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id, + void **g_func_handle_array); +long dbgtool_knl_free_mem(int id); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.c b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c new file mode 100644 index 000000000000..789126214b69 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c @@ -0,0 +1,1805 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_lld.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_dcb.h" + +#define DCB_HW_CFG_CHG 0 +#define DCB_HW_CFG_NO_CHG 1 +#define DCB_HW_CFG_ERR 2 + +#define DCB_CFG_CHG_PG_TX 0x1 +#define DCB_CFG_CHG_PG_RX 0x2 +#define DCB_CFG_CHG_PFC 0x4 +#define DCB_CFG_CHG_UP_COS 0x8 + +u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up) +{ + struct hinic_tc_cfg *tc_cfg = &dcb_cfg->tc_cfg[0]; + u8 tc = dcb_cfg->pg_tcs; + + if (!tc) + return 0; + + for (tc--; tc; tc--) { + if (BIT(up) & tc_cfg[tc].path[dir].up_map) + break; + } + + return tc; +} + +#define UP_MAPPING(prio) ((u8)(1U << ((HINIC_DCB_UP_MAX - 1) - (prio)))) + +void hinic_dcb_config_init(struct hinic_nic_dev *nic_dev, + struct hinic_dcb_config *dcb_cfg) +{ + struct hinic_tc_cfg *tc; + int i; + + memset(dcb_cfg->tc_cfg, 0, sizeof(dcb_cfg->tc_cfg)); + tc = &dcb_cfg->tc_cfg[0]; + /* All TC mapping to PG0 */ + for (i = 0; i < dcb_cfg->pg_tcs; i++) { + tc = &dcb_cfg->tc_cfg[i]; + tc->path[HINIC_DCB_CFG_TX].pg_id = 0; + tc->path[HINIC_DCB_CFG_TX].bw_pct = 100; + tc->path[HINIC_DCB_CFG_TX].up_map = UP_MAPPING(i); + tc->path[HINIC_DCB_CFG_RX].pg_id = 0; + tc->path[HINIC_DCB_CFG_RX].bw_pct = 100; + tc->path[HINIC_DCB_CFG_RX].up_map = UP_MAPPING(i); + + tc->pfc_en = false; + } + + for (; i < HINIC_DCB_UP_MAX; i++) { + tc->path[HINIC_DCB_CFG_TX].up_map |= UP_MAPPING(i); + tc->path[HINIC_DCB_CFG_RX].up_map |= UP_MAPPING(i); + } + + memset(dcb_cfg->bw_pct, 0, sizeof(dcb_cfg->bw_pct)); + /* Use PG0 in default, PG0's bw is 100% */ + dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][0] = 100; + dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][0] = 100; + dcb_cfg->pfc_state = false; +} + +void hinic_init_ieee_settings(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_ets *ets = &nic_dev->hinic_ieee_ets_default; + struct ieee_pfc *pfc = &nic_dev->hinic_ieee_pfc; + struct hinic_tc_attr *tc_attr; + u8 i; + + memset(ets, 0x0, sizeof(struct ieee_ets)); + memset(&nic_dev->hinic_ieee_ets, 0x0, sizeof(struct ieee_ets)); + ets->ets_cap = dcb_cfg->pg_tcs; + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + tc_attr = &dcb_cfg->tc_cfg[i].path[HINIC_DCB_CFG_TX]; + ets->tc_tsa[i] = tc_attr->prio_type ? + IEEE8021Q_TSA_STRICT : IEEE8021Q_TSA_ETS; + ets->tc_tx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i]; + ets->tc_rx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i]; + ets->prio_tc[i] = hinic_dcb_get_tc(dcb_cfg, + HINIC_DCB_CFG_TX, i); + } + memcpy(&nic_dev->hinic_ieee_ets, ets, sizeof(struct ieee_ets)); + + memset(pfc, 0x0, sizeof(struct ieee_pfc)); + pfc->pfc_cap = dcb_cfg->pfc_tcs; + for (i = 0; i < dcb_cfg->pfc_tcs; i++) { + if (dcb_cfg->tc_cfg[i].pfc_en) + pfc->pfc_en |= (u8)BIT(i); + } +} + +static int hinic_set_up_cos_map(struct hinic_nic_dev *nic_dev, + u8 num_cos, u8 *cos_up) +{ + u8 up_valid_bitmap, up_cos[HINIC_DCB_UP_MAX] = {0}; + u8 i; + + up_valid_bitmap = 0; + for (i = 0; i < num_cos; i++) { + if (cos_up[i] >= HINIC_DCB_UP_MAX) { + hinic_info(nic_dev, drv, "Invalid up %d mapping to cos %d\n", + cos_up[i], i); + return -EFAULT; + } + + if (i > 0 && cos_up[i] >= cos_up[i - 1]) { + hinic_info(nic_dev, drv, + "Invalid priority order, should be descending cos[%d]=%d, cos[%d]=%d\n", + i, cos_up[i], i - 1, cos_up[i - 1]); + return -EINVAL; + } + + up_valid_bitmap |= (u8)BIT(cos_up[i]); + if (i == (num_cos - 1)) + up_cos[cos_up[i]] = nic_dev->default_cos_id; + else + up_cos[cos_up[i]] = i; /* reverse up and cos */ + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (up_valid_bitmap & (u8)BIT(i)) + continue; + + up_cos[i] = nic_dev->default_cos_id; + } + + nic_dev->up_valid_bitmap = up_valid_bitmap; + memcpy(nic_dev->up_cos, up_cos, sizeof(up_cos)); + + return hinic_sq_cos_mapping(nic_dev->netdev); +} + +static int hinic_init_up_cos_map(struct hinic_nic_dev *nic_dev, u8 num_cos) +{ + u8 default_map[HINIC_DCB_COS_MAX] = {0}; + bool setted = false; + u8 max_cos, cos_id, up; + int err; + + max_cos = hinic_max_num_cos(nic_dev->hwdev); + if (!max_cos || ((max_cos - 1) < nic_dev->default_cos_id)) { + hinic_err(nic_dev, drv, "Max_cos is %d, default cos id %d\n", + max_cos, nic_dev->default_cos_id); + return -EFAULT; + } + + err = hinic_get_chip_cos_up_map(nic_dev->pdev, &setted, default_map); + if (err) { + hinic_err(nic_dev, drv, "Get chip cos_up map failed\n"); + return -EFAULT; + } + + if (!setted) { + /* Use (max_cos-1)~0 as default user priority and mapping + * to cos0~(max_cos-1) + */ + up = nic_dev->max_cos - 1; + for (cos_id = 0; cos_id < nic_dev->max_cos; cos_id++, up--) + default_map[cos_id] = up; + } + + return hinic_set_up_cos_map(nic_dev, num_cos, default_map); +} + +int hinic_dcb_init(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + u8 num_cos, support_cos = 0, default_cos = 0; + u8 i, cos_valid_bitmap; + int err; + + if (HINIC_FUNC_IS_VF(nic_dev->hwdev)) + return 0; + + cos_valid_bitmap = hinic_cos_valid_bitmap(nic_dev->hwdev); + if (!cos_valid_bitmap) { + hinic_err(nic_dev, drv, "None cos supported\n"); + return -EFAULT; + } + + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + if (cos_valid_bitmap & BIT(i)) { + support_cos++; + default_cos = i; /* Find max cos id as default cos */ + } + } + + hinic_info(nic_dev, drv, "Support num cos %d, default cos %d\n", + support_cos, default_cos); + + num_cos = (u8)(1U << ilog2(support_cos)); + if (num_cos != support_cos) + hinic_info(nic_dev, drv, "Adjust num_cos from %d to %d\n", + support_cos, num_cos); + + nic_dev->dcbx_cap = 0; + nic_dev->max_cos = num_cos; + nic_dev->default_cos_id = default_cos; + dcb_cfg->pfc_tcs = nic_dev->max_cos; + dcb_cfg->pg_tcs = nic_dev->max_cos; + err = hinic_init_up_cos_map(nic_dev, num_cos); + if (err) { + hinic_info(nic_dev, drv, "Initialize up_cos mapping failed\n"); + return -EFAULT; + } + + hinic_dcb_config_init(nic_dev, dcb_cfg); + + nic_dev->dcb_changes = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX | + DCB_CFG_CHG_PG_RX | DCB_CFG_CHG_UP_COS; + nic_dev->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + + memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->dcb_cfg, + sizeof(nic_dev->tmp_dcb_cfg)); + memcpy(&nic_dev->save_dcb_cfg, &nic_dev->dcb_cfg, + sizeof(nic_dev->save_dcb_cfg)); + + hinic_init_ieee_settings(nic_dev); + + sema_init(&nic_dev->dcb_sem, 1); + + return 0; +} + +void hinic_set_prio_tc_map(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 prio, tc; + + for (prio = 0; prio < HINIC_DCB_UP_MAX; prio++) { + tc = nic_dev->up_cos[prio]; + if (tc == nic_dev->default_cos_id) + tc = nic_dev->max_cos - 1; + + netdev_set_prio_tc_map(netdev, prio, tc); + } +} + +int hinic_setup_tc(struct net_device *netdev, u8 tc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (!FUNC_SUPPORT_DCB(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, + "Current function doesn't support DCB\n"); + return -EOPNOTSUPP; + } + + if (tc > nic_dev->dcb_cfg.pg_tcs) { + nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %d, max tc: %d\n", + tc, nic_dev->dcb_cfg.pg_tcs); + return -EINVAL; + } + + if (netif_running(netdev)) { + err = hinic_close(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to close device\n"); + return -EFAULT; + } + } + + if (tc) { + /* TODO: verify if num_tc should be power of 2 */ + if (tc & (tc - 1)) { + nicif_err(nic_dev, drv, netdev, + "Invalid num_tc: %d, must be power of 2\n", + tc); + return -EINVAL; + } + + netdev_set_num_tc(netdev, tc); + hinic_set_prio_tc_map(nic_dev); + + set_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + } else { + netdev_reset_tc(netdev); + + clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + } + + hinic_sq_cos_mapping(netdev); + + if (netif_running(netdev)) { + err = hinic_open(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to open device\n"); + return -EFAULT; + } + } else { + hinic_update_num_qps(netdev); + } + + hinic_configure_dcb(netdev); + + return 0; +} + +u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (wr_flag) { + if ((nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs) && *dcb_en) { + netif_err(nic_dev, drv, netdev, + "max_qps:%d is less than %d\n", + nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs); + return 1; + } + if (*dcb_en) + set_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + else + clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + /* hinic_setup_tc need get the nic_mutex lock again */ + mutex_unlock(&nic_dev->nic_mutex); + /* kill the rtnl assert warning */ + rtnl_lock(); + err = hinic_setup_tc(netdev, + *dcb_en ? nic_dev->dcb_cfg.pg_tcs : 0); + rtnl_unlock(); + mutex_lock(&nic_dev->nic_mutex); + + if (!err) + netif_info(nic_dev, drv, netdev, "%s DCB\n", + *dcb_en ? "Enable" : "Disable"); + } else { + *dcb_en = (u8)test_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + } + + return !!err; +} + +static u8 hinic_dcbnl_get_state(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags); +} + +static u8 hinic_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 curr_state = !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + int err = 0; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + if (state == curr_state) + return 0; + + if ((nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs) && state) { + netif_err(nic_dev, drv, netdev, + "max_qps:%d is less than %d\n", + nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs); + return 1; + } + + err = hinic_setup_tc(netdev, state ? nic_dev->dcb_cfg.pg_tcs : 0); + if (!err) + netif_info(nic_dev, drv, netdev, "%s DCB\n", + state ? "Enable" : "Disable"); + + return !!err; +} + +static void hinic_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + err = hinic_get_default_mac(nic_dev->hwdev, perm_addr); + if (err) + nicif_err(nic_dev, drv, netdev, "Failed to get default mac\n"); +} + +void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg; + struct hinic_tc_cfg *tc_conf = nic_dev->dcb_cfg.tc_cfg; + u8 i, tc_tmp, j; + + if (flag) { + /*need to clear first */ + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + cfg[i].path[HINIC_DCB_CFG_TX].up_map = 0; + cfg[i].path[HINIC_DCB_CFG_RX].up_map = 0; + } + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + tc_tmp = tc[i]; + cfg[tc_tmp].path[HINIC_DCB_CFG_TX].up_map |= (u8)BIT(i); + cfg[tc_tmp].path[HINIC_DCB_CFG_RX].up_map |= (u8)BIT(i); + cfg[tc_tmp].path[HINIC_DCB_CFG_TX].pg_id = (u8)tc_tmp; + cfg[tc_tmp].path[HINIC_DCB_CFG_RX].pg_id = (u8)tc_tmp; + } + } else { + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + for (j = 0; j < HINIC_DCB_TC_MAX; j++) { + if (tc_conf[i].path[HINIC_DCB_CFG_TX].up_map & + (u8)BIT(j)) { + tc[j] = i; + } + } + } + } +} + +void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev, + u8 percent[], bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int i; + + if (flag) { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i] = + percent[i]; + nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i] = + percent[i]; + } + } else { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) + percent[i] = + nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i]; + } +} + +static void hinic_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 prio, u8 pg_id, u8 bw_pct, + u8 up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].prio_type = prio; + if (pg_id != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].pg_id = pg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].bw_pct = bw_pct; + /* if all priority mapping to the same tc, + * up_map is 0xFF, and it's a valid value + */ + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].up_map = up_map; +} + +static void hinic_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + nic_dev->tmp_dcb_cfg.bw_pct[0][bwg_id] = bw_pct; +} + +static void hinic_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 prio, u8 pg_id, u8 bw_pct, + u8 up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].prio_type = prio; + if (pg_id != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].pg_id = pg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].bw_pct = bw_pct; + + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].up_map = up_map; +} + +static void hinic_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + nic_dev->tmp_dcb_cfg.bw_pct[1][bwg_id] = bw_pct; +} + +static void hinic_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 *prio, u8 *pg_id, u8 *bw_pct, + u8 *up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[0].prio_type; + *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[0].pg_id; + *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[0].bw_pct; + *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[0].up_map; +} + +static void hinic_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + *bw_pct = nic_dev->dcb_cfg.bw_pct[0][bwg_id]; +} + +static void hinic_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 *prio, u8 *pg_id, u8 *bw_pct, + u8 *up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[1].prio_type; + *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[1].pg_id; + *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[1].bw_pct; + *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[1].up_map; +} + +static void hinic_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + *bw_pct = nic_dev->dcb_cfg.bw_pct[1][bwg_id]; +} + +void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 i; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en = !!(setting & BIT(i)); + if (nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en != + nic_dev->dcb_cfg.tc_cfg[i].pfc_en) { + nic_dev->tmp_dcb_cfg.pfc_state = true; + } + } +} + +void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev, + u8 *setting, bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg; + struct hinic_tc_cfg *conf = nic_dev->dcb_cfg.tc_cfg; + u8 i; + + if (flag) { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + cfg[i].path[HINIC_DCB_CFG_TX].prio_type = + !!(*setting & BIT(i)) ? 2 : 0; + cfg[i].path[HINIC_DCB_CFG_RX].prio_type = + !!(*setting & BIT(i)) ? 2 : 0; + } + } else { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + *setting = *setting | + (u8)((u32)(!!(conf[i].path[0].prio_type)) << i); + } + } +} + +void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev, + u8 *value, bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (flag) + nic_dev->tmp_dcb_cfg.pfc_state = !!(*value); + else + *value = nic_dev->tmp_dcb_cfg.pfc_state; +} + +void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev, + u8 *value, bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (flag) { + if (*value) + set_bit(HINIC_ETS_ENABLE, &nic_dev->flags); + else + clear_bit(HINIC_ETS_ENABLE, &nic_dev->flags); + } else { + *value = (u8)test_bit(HINIC_ETS_ENABLE, &nic_dev->flags); + } +} + +static void hinic_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en = !!setting; + if (nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en != + nic_dev->dcb_cfg.tc_cfg[prio].pfc_en) + nic_dev->tmp_dcb_cfg.pfc_state = true; +} + +void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 i; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + *setting = *setting | + (u8)((u32)(nic_dev->dcb_cfg.tc_cfg[i].pfc_en) << i); + } +} + +void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + *tc_num = nic_dev->max_cos; +} + +static void hinic_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (prio > HINIC_DCB_TC_MAX - 1) + return; + + *setting = nic_dev->dcb_cfg.tc_cfg[prio].pfc_en; +} + +static u8 hinic_dcbnl_getcap(struct net_device *netdev, int cap_id, + u8 *dcb_cap) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (cap_id) { + case DCB_CAP_ATTR_PG: + *dcb_cap = true; + break; + case DCB_CAP_ATTR_PFC: + *dcb_cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *dcb_cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *dcb_cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *dcb_cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *dcb_cap = true; + break; + case DCB_CAP_ATTR_BCN: + *dcb_cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *dcb_cap = nic_dev->dcbx_cap; + break; + default: + *dcb_cap = false; + break; + } + + return 0; +} + +static u8 hinic_sync_tc_cfg(struct hinic_tc_cfg *tc_dst, + struct hinic_tc_cfg *tc_src, int dir) +{ + u8 tc_dir_change = (dir == HINIC_DCB_CFG_TX) ? + DCB_CFG_CHG_PG_TX : DCB_CFG_CHG_PG_RX; + u8 changes = 0; + + if (tc_dst->path[dir].prio_type != tc_src->path[dir].prio_type) { + tc_dst->path[dir].prio_type = tc_src->path[dir].prio_type; + changes |= tc_dir_change; + } + + if (tc_dst->path[dir].pg_id != tc_src->path[dir].pg_id) { + tc_dst->path[dir].pg_id = tc_src->path[dir].pg_id; + changes |= tc_dir_change; + } + + if (tc_dst->path[dir].bw_pct != tc_src->path[dir].bw_pct) { + tc_dst->path[dir].bw_pct = tc_src->path[dir].bw_pct; + changes |= tc_dir_change; + } + + if (tc_dst->path[dir].up_map != tc_src->path[dir].up_map) { + tc_dst->path[dir].up_map = tc_src->path[dir].up_map; + changes |= (tc_dir_change | DCB_CFG_CHG_PFC); + } + + return changes; +} + +static u8 hinic_sync_dcb_cfg(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct hinic_dcb_config *tmp_dcb_cfg = &nic_dev->tmp_dcb_cfg; + struct hinic_tc_cfg *tc_dst, *tc_src; + u8 changes = 0; + int i; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + tc_src = &tmp_dcb_cfg->tc_cfg[i]; + tc_dst = &dcb_cfg->tc_cfg[i]; + + changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_TX); + changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_RX); + } + + for (i = 0; i < HINIC_DCB_PG_MAX; i++) { + if (dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] != + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]) { + dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] = + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]; + changes |= DCB_CFG_CHG_PG_TX; + } + + if (dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] != + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i]) { + dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] = + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i]; + changes |= DCB_CFG_CHG_PG_RX; + } + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (dcb_cfg->tc_cfg[i].pfc_en != + tmp_dcb_cfg->tc_cfg[i].pfc_en) { + dcb_cfg->tc_cfg[i].pfc_en = + tmp_dcb_cfg->tc_cfg[i].pfc_en; + changes |= DCB_CFG_CHG_PFC; + } + } + + if (dcb_cfg->pfc_state != tmp_dcb_cfg->pfc_state) { + dcb_cfg->pfc_state = tmp_dcb_cfg->pfc_state; + changes |= DCB_CFG_CHG_PFC; + } + + return changes; +} + +static void hinic_dcb_get_pfc_map(struct hinic_nic_dev *nic_dev, + struct hinic_dcb_config *dcb_cfg, u8 *pfc_map) +{ + u8 i, up; + u8 pfc_en = 0, outof_range_pfc = 0; + + for (i = 0; i < dcb_cfg->pfc_tcs; i++) { + up = (HINIC_DCB_UP_MAX - 1) - i; + if (dcb_cfg->tc_cfg[up].pfc_en) + *pfc_map |= (u8)BIT(up); + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + up = (HINIC_DCB_UP_MAX - 1) - i; + if (dcb_cfg->tc_cfg[up].pfc_en) + pfc_en |= (u8)BIT(up); + } + + *pfc_map = pfc_en & nic_dev->up_valid_bitmap; + outof_range_pfc = pfc_en & (~nic_dev->up_valid_bitmap); + + if (outof_range_pfc) + hinic_info(nic_dev, drv, + "PFC setting out of range, 0x%x will be ignored\n", + outof_range_pfc); +} + +static bool is_cos_in_use(u8 cos, u8 up_valid_bitmap, u8 *up_cos) +{ + u32 i; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(up_valid_bitmap & BIT(i))) + continue; + + if (cos == up_cos[i]) + return true; + } + + return false; +} + +static void hinic_dcb_adjust_up_bw(struct hinic_nic_dev *nic_dev, u8 *up_pgid, + u8 *up_bw) +{ + u8 tmp_cos, pg_id; + u16 bw_all; + u8 bw_remain, cos_cnt; + + for (pg_id = 0; pg_id < HINIC_DCB_PG_MAX; pg_id++) { + bw_all = 0; + cos_cnt = 0; + /* Find all up mapping to the same pg */ + for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) { + if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap, + nic_dev->up_cos)) + continue; + + if (up_pgid[tmp_cos] == pg_id) { + bw_all += up_bw[tmp_cos]; + cos_cnt++; + } + } + + if (bw_all <= 100 || !cos_cnt) + continue; + + /* Calculate up percent of bandwidth group, The sum of + * percentages for priorities in the same priority group + * must be 100 + */ + bw_remain = 100 % cos_cnt; + for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) { + if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap, + nic_dev->up_cos)) + continue; + + if (up_pgid[tmp_cos] == pg_id) { + up_bw[tmp_cos] = + (u8)(100 * up_bw[tmp_cos] / bw_all + + (u8)!!bw_remain); + if (bw_remain) + bw_remain--; + } + } + } +} + +static void hinic_dcb_dump_configuration(struct hinic_nic_dev *nic_dev, + u8 *up_tc, u8 *up_pgid, u8 *up_bw, + u8 *pg_bw, u8 *up_strict, u8 *bw_pct) +{ + u8 i; + u8 cos; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(nic_dev->up_valid_bitmap & BIT(i))) + continue; + + cos = nic_dev->up_cos[i]; + hinic_info(nic_dev, drv, + "up: %d, cos: %d, tc: %d, pgid: %d, bw: %d, tsa: %d\n", + i, cos, up_tc[cos], up_pgid[cos], up_bw[cos], + up_strict[cos]); + } + + for (i = 0; i < HINIC_DCB_PG_MAX; i++) + hinic_info(nic_dev, drv, "pgid: %d, bw: %d\n", i, pg_bw[i]); +} + +/* Ucode thread timeout is 210ms, must be lagger then 210ms */ +#define HINIC_WAIT_PORT_IO_STOP 250 + +static int hinic_stop_port_traffic_flow(struct hinic_nic_dev *nic_dev) +{ + int err = 0; + + down(&nic_dev->dcb_sem); + + if (nic_dev->disable_port_cnt++ != 0) + goto out; + + err = hinic_force_port_disable(nic_dev); + if (err) { + hinic_err(nic_dev, drv, "Failed to disable port\n"); + goto set_port_err; + } + + err = hinic_set_port_funcs_state(nic_dev->hwdev, false); + if (err) { + hinic_err(nic_dev, drv, + "Failed to disable all functions in port\n"); + goto set_port_funcs_err; + } + + hinic_info(nic_dev, drv, "Stop port traffic flow\n"); + + goto out; + +set_port_funcs_err: + hinic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); + +set_port_err: +out: + if (err) + nic_dev->disable_port_cnt--; + + up(&nic_dev->dcb_sem); + + return err; +} + +static int hinic_start_port_traffic_flow(struct hinic_nic_dev *nic_dev) +{ + int err; + + down(&nic_dev->dcb_sem); + + nic_dev->disable_port_cnt--; + if (nic_dev->disable_port_cnt > 0) { + up(&nic_dev->dcb_sem); + return 0; + } + + nic_dev->disable_port_cnt = 0; + up(&nic_dev->dcb_sem); + + err = hinic_force_set_port_state(nic_dev, + !!netif_running(nic_dev->netdev)); + if (err) + hinic_err(nic_dev, drv, "Failed to disable port\n"); + + err = hinic_set_port_funcs_state(nic_dev->hwdev, true); + if (err) + hinic_err(nic_dev, drv, + "Failed to disable all functions in port\n"); + + hinic_info(nic_dev, drv, "Start port traffic flow\n"); + + return err; +} + +static int __set_hw_cos_up_map(struct hinic_nic_dev *nic_dev) +{ + u8 cos, cos_valid_bitmap, cos_up_map[HINIC_DCB_COS_MAX] = {0}; + u8 i; + int err; + + cos_valid_bitmap = 0; + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(nic_dev->up_valid_bitmap & BIT(i))) + continue; + + cos = nic_dev->up_cos[i]; + cos_up_map[cos] = i; + cos_valid_bitmap |= (u8)BIT(cos); + } + + err = hinic_dcb_set_cos_up_map(nic_dev->hwdev, cos_valid_bitmap, + cos_up_map); + if (err) { + hinic_info(nic_dev, drv, "Set cos_up map failed\n"); + return err; + } + + return 0; +} + +static int __set_hw_ets(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + struct hinic_tc_attr *tc_attr; + u8 up_tc[HINIC_DCB_UP_MAX] = {0}; + u8 up_pgid[HINIC_DCB_UP_MAX] = {0}; + u8 up_bw[HINIC_DCB_UP_MAX] = {0}; + u8 pg_bw[HINIC_DCB_UP_MAX] = {0}; + u8 up_strict[HINIC_DCB_UP_MAX] = {0}; + u8 i, tc, cos; + int err; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(nic_dev->up_valid_bitmap & BIT(i))) + continue; + + cos = nic_dev->up_cos[i]; + if ((nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + up_tc[cos] = my_ets->prio_tc[i]; + up_pgid[cos] = my_ets->prio_tc[i]; + up_bw[cos] = 100; + up_strict[i] = + (my_ets->tc_tsa[cos] == IEEE8021Q_TSA_STRICT) ? + HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR; + + } else { + tc = hinic_dcb_get_tc(dcb_cfg, HINIC_DCB_CFG_TX, i); + tc_attr = &dcb_cfg->tc_cfg[tc].path[HINIC_DCB_CFG_TX]; + up_tc[cos] = tc; + up_pgid[cos] = tc_attr->pg_id; + up_bw[cos] = tc_attr->bw_pct; + up_strict[cos] = tc_attr->prio_type ? + HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR; + } + } + + hinic_dcb_adjust_up_bw(nic_dev, up_pgid, up_bw); + + if (nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { + for (i = 0; i < HINIC_DCB_PG_MAX; i++) + pg_bw[i] = my_ets->tc_tx_bw[i]; + } else { + for (i = 0; i < HINIC_DCB_PG_MAX; i++) + pg_bw[i] = dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]; + } + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + hinic_dcb_dump_configuration(nic_dev, up_tc, up_pgid, + up_bw, pg_bw, up_strict, + pg_bw); + + err = hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, up_pgid, + up_bw, up_strict); + if (err) { + hinic_err(nic_dev, drv, "Failed to set ets with mode:%d\n", + nic_dev->dcbx_cap); + return err; + } + + hinic_info(nic_dev, drv, "Set ets to hw done with mode:%d\n", + nic_dev->dcbx_cap); + + return 0; +} + +u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 state = DCB_HW_CFG_CHG; + int err; + + nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev); + if (!nic_dev->dcb_changes) + return DCB_HW_CFG_CHG; + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return DCB_HW_CFG_ERR; + /* wait all traffic flow stopped */ + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) { + err = __set_hw_cos_up_map(nic_dev); + if (err) { + hinic_info(nic_dev, drv, + "Set cos_up map to hardware failed\n"); + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS); + } + + if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) { + err = __set_hw_ets(nic_dev); + if (err) { + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= + (~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)); + } + +out: + hinic_start_port_traffic_flow(nic_dev); + + return state; +} + +static int hinic_dcbnl_set_df_ieee_cfg(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ieee_ets *ets_default = &nic_dev->hinic_ieee_ets_default; + struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + struct ieee_pfc pfc = {0}; + int err1 = 0; + int err2 = 0; + u8 flag = 0; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return 0; + + if (memcmp(my_ets, ets_default, sizeof(struct ieee_ets))) + flag |= (u8)BIT(0); + + if (my_pfc->pfc_en) + flag |= (u8)BIT(1); + if (!flag) + return 0; + + err1 = hinic_stop_port_traffic_flow(nic_dev); + if (err1) + return err1; + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + if (flag & BIT(0)) { + memcpy(my_ets, ets_default, sizeof(struct ieee_ets)); + err1 = __set_hw_ets(nic_dev); + } + if (flag & BIT(1)) { + my_pfc->pfc_en = 0; + err2 = hinic_dcb_set_pfc(nic_dev->hwdev, false, pfc.pfc_en); + if (err2) + nicif_err(nic_dev, drv, netdev, "Failed to set pfc\n"); + } + + hinic_start_port_traffic_flow(nic_dev); + + return (err1 || err2) ? -EINVAL : 0; +} + +u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + u8 state = DCB_HW_CFG_CHG; + int err; + + nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev); + if (!nic_dev->dcb_changes) + return DCB_HW_CFG_CHG; + + if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) { + u8 pfc_map = 0; + + hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map); + err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state, + pfc_map); + if (err) { + hinic_info(nic_dev, drv, "Failed to %s PFC\n", + dcb_cfg->pfc_state ? "enable" : "disable"); + state = DCB_HW_CFG_ERR; + goto out; + } + + if (dcb_cfg->pfc_state) + hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n", + pfc_map); + else + hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n"); + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC); + } +out: + + return state; +} + +u8 hinic_dcbnl_set_all(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + u8 state = DCB_HW_CFG_CHG; + int err; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return DCB_HW_CFG_ERR; + + nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev); + if (!nic_dev->dcb_changes) + return DCB_HW_CFG_NO_CHG; + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return DCB_HW_CFG_ERR; + /* wait all traffic flow stopped */ + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) { + err = __set_hw_cos_up_map(nic_dev); + if (err) { + hinic_info(nic_dev, drv, + "Set cos_up map to hardware failed\n"); + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS); + } + + if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) { + err = __set_hw_ets(nic_dev); + if (err) { + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= + (~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)); + } + + if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) { + u8 pfc_map = 0; + + hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map); + err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state, + pfc_map); + if (err) { + hinic_info(nic_dev, drv, "Failed to %s PFC\n", + dcb_cfg->pfc_state ? "enable" : "disable"); + state = DCB_HW_CFG_ERR; + goto out; + } + + if (dcb_cfg->pfc_state) + hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n", + pfc_map); + else + hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n"); + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC); + } + +out: + hinic_start_port_traffic_flow(nic_dev); + + return state; +} + +static int hinic_dcbnl_ieee_get_ets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + + ets->ets_cap = my_ets->ets_cap; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + + return 0; +} + +static int hinic_dcbnl_ieee_set_ets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + struct ieee_ets back_ets; + int err, i; + u8 max_tc = 0; + u16 total_bw = 0; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!memcmp(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)) && + !memcmp(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)) && + !memcmp(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)) && + !memcmp(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa))) + return 0; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) + total_bw += ets->tc_tx_bw[i]; + if (!total_bw) + return -EINVAL; + + for (i = 0; i < dcb_cfg->pg_tcs; i++) { + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + } + if (max_tc) + max_tc++; + + if (max_tc > dcb_cfg->pg_tcs) + return -EINVAL; + + max_tc = max_tc ? dcb_cfg->pg_tcs : 0; + memcpy(&back_ets, my_ets, sizeof(struct ieee_ets)); + memcpy(my_ets->tc_tx_bw, ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(my_ets->tc_rx_bw, ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(my_ets->prio_tc, ets->prio_tc, sizeof(ets->prio_tc)); + memcpy(my_ets->tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + + if (max_tc != netdev_get_num_tc(netdev)) { + err = hinic_setup_tc(netdev, max_tc); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to setup tc with max_tc:%d, err:%d\n", + max_tc, err); + memcpy(my_ets, &back_ets, sizeof(struct ieee_ets)); + return err; + } + } + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return err; + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + err = __set_hw_ets(nic_dev); + + hinic_start_port_traffic_flow(nic_dev); + + return err; +} + +static int hinic_dcbnl_ieee_get_pfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc; + + pfc->pfc_en = my_pfc->pfc_en; + pfc->pfc_cap = my_pfc->pfc_cap; + + return 0; +} + +static int hinic_dcbnl_ieee_set_pfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + int err, i; + u8 pfc_map, max_tc; + u8 outof_range_pfc = 0; + bool pfc_en; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (my_pfc->pfc_en == pfc->pfc_en) + return 0; + + pfc_map = pfc->pfc_en & nic_dev->up_valid_bitmap; + outof_range_pfc = pfc->pfc_en & (~nic_dev->up_valid_bitmap); + if (outof_range_pfc) + netif_info(nic_dev, drv, netdev, + "pfc setting out of range, 0x%x will be ignored\n", + outof_range_pfc); + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return err; + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + pfc_en = pfc_map ? true : false; + max_tc = 0; + for (i = 0; i < dcb_cfg->pg_tcs; i++) { + if (my_ets->prio_tc[i] > max_tc) + max_tc = my_ets->prio_tc[i]; + } + pfc_en = max_tc ? pfc_en : false; + + err = hinic_dcb_set_pfc(nic_dev->hwdev, pfc_en, pfc_map); + if (err) { + hinic_info(nic_dev, drv, + "Failed to set pfc to hw with pfc_map:0x%x err:%d\n", + pfc_map, err); + hinic_start_port_traffic_flow(nic_dev); + return err; + } + + hinic_start_port_traffic_flow(nic_dev); + my_pfc->pfc_en = pfc->pfc_en; + hinic_info(nic_dev, drv, + "Set pfc successfully with pfc_map:0x%x, pfc_en:%d\n", + pfc_map, pfc_en); + + return 0; +} + +#ifdef NUMTCS_RETURNS_U8 +static u8 hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#else +static int hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#endif +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + + if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + return -EINVAL; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = dcb_cfg->pg_tcs; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = dcb_cfg->pfc_tcs; + break; + default: + return -EINVAL; + } + + return 0; +} + +#ifdef NUMTCS_RETURNS_U8 +static u8 hinic_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#else +static int hinic_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#endif +{ + return -EINVAL; +} + +static u8 hinic_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return (u8)nic_dev->dcb_cfg.pfc_state; +} + +static void hinic_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->tmp_dcb_cfg.pfc_state = !!state; +} + +static u8 hinic_dcbnl_getdcbx(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->dcbx_cap; +} + +static u8 hinic_dcbnl_setdcbx(struct net_device *netdev, u8 mode) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + ((mode & DCB_CAP_DCBX_LLD_MANAGED) && + (!(mode & DCB_CAP_DCBX_HOST)))) { + nicif_info(nic_dev, drv, netdev, + "Set dcbx failed with invalid mode:%d\n", mode); + return 1; + } + + if (nic_dev->dcbx_cap == mode) + return 0; + nic_dev->dcbx_cap = mode; + + if (mode & DCB_CAP_DCBX_VER_CEE) { + u8 mask = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX | + DCB_CFG_CHG_PG_RX; + nic_dev->dcb_changes |= mask; + hinic_dcbnl_set_all(netdev); + } else if (mode & DCB_CAP_DCBX_VER_IEEE) { + if (netdev_get_num_tc(netdev)) { + err = hinic_setup_tc(netdev, 0); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to setup tc with mode:%d\n", + mode); + return 1; + } + } + + hinic_dcbnl_set_df_ieee_cfg(netdev); + hinic_force_port_relink(nic_dev->hwdev); + } else { + err = hinic_setup_tc(netdev, 0); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to setup tc with mode:%d\n", mode); + return 1; + } + } + nicif_info(nic_dev, drv, netdev, "Change dcbx mode to 0x%x\n", mode); + + return 0; +} + +const struct dcbnl_rtnl_ops hinic_dcbnl_ops = { + /* IEEE 802.1Qaz std */ + .ieee_getets = hinic_dcbnl_ieee_get_ets, + .ieee_setets = hinic_dcbnl_ieee_set_ets, + .ieee_getpfc = hinic_dcbnl_ieee_get_pfc, + .ieee_setpfc = hinic_dcbnl_ieee_set_pfc, + + /* CEE std */ + .getstate = hinic_dcbnl_get_state, + .setstate = hinic_dcbnl_set_state, + .getpermhwaddr = hinic_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = hinic_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = hinic_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = hinic_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = hinic_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = hinic_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = hinic_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = hinic_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = hinic_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = hinic_dcbnl_set_pfc_cfg, + .getpfccfg = hinic_dcbnl_get_pfc_cfg, + .setall = hinic_dcbnl_set_all, + .getcap = hinic_dcbnl_getcap, + .getnumtcs = hinic_dcbnl_getnumtcs, + .setnumtcs = hinic_dcbnl_setnumtcs, + .getpfcstate = hinic_dcbnl_getpfcstate, + .setpfcstate = hinic_dcbnl_setpfcstate, + + /* DCBX configuration */ + .getdcbx = hinic_dcbnl_getdcbx, + .setdcbx = hinic_dcbnl_setdcbx, +}; + +int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 state; + + hinic_dcb_config_init(nic_dev, &nic_dev->tmp_dcb_cfg); + state = hinic_dcbnl_set_all(netdev); + if (state == DCB_HW_CFG_ERR) + return -EFAULT; + + if (state == DCB_HW_CFG_CHG) + hinic_info(nic_dev, drv, + "Reset hardware DCB configuration done\n"); + + return 0; +} + +void hinic_configure_dcb(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) { + memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->save_dcb_cfg, + sizeof(nic_dev->tmp_dcb_cfg)); + hinic_dcbnl_set_all(netdev); + } else { + memcpy(&nic_dev->save_dcb_cfg, &nic_dev->tmp_dcb_cfg, + sizeof(nic_dev->save_dcb_cfg)); + err = hinic_dcb_reset_hw_config(nic_dev); + if (err) + nicif_warn(nic_dev, drv, netdev, + "Failed to reset hw dcb configuration\n"); + } +} + +static bool __is_cos_up_map_change(struct hinic_nic_dev *nic_dev, u8 *cos_up) +{ + u8 cos, up; + + for (cos = 0; cos < nic_dev->max_cos; cos++) { + up = cos_up[cos]; + if (BIT(up) != (nic_dev->up_valid_bitmap & BIT(up))) + return true; + } + + return false; +} + +int __set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up) +{ + struct net_device *netdev; + u8 state; + int err = 0; + + if (!nic_dev || !cos_up) + return -EINVAL; + + netdev = nic_dev->netdev; + + if (test_and_set_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { + nicif_err(nic_dev, drv, netdev, + "Cos_up map setting in inprocess, please try again later\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Set cos2up:%d%d%d%d%d%d%d%d\n", + cos_up[0], cos_up[1], cos_up[2], cos_up[3], + cos_up[4], cos_up[5], cos_up[6], cos_up[7]); + + if (!__is_cos_up_map_change(nic_dev, cos_up)) { + nicif_err(nic_dev, drv, netdev, + "Same mapping, don't need to change anything\n"); + err = 0; + goto out; + } + + err = hinic_set_up_cos_map(nic_dev, nic_dev->max_cos, cos_up); + if (err) { + err = -EFAULT; + goto out; + } + + nic_dev->dcb_changes = DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX | + DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS; + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) { + /* Change map in kernel */ + hinic_set_prio_tc_map(nic_dev); + + state = hinic_dcbnl_set_all(netdev); + if (state == DCB_HW_CFG_ERR) { + nicif_err(nic_dev, drv, netdev, + "Reconfig dcb to hw failed\n"); + err = -EFAULT; + } + } + +out: + clear_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); + + return err; +} + +int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos) +{ + if (!nic_dev || !num_cos) + return -EINVAL; + + *num_cos = nic_dev->max_cos; + + return 0; +} + +int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *num_cos, + u8 *cos_up) +{ + u8 up, cos; + + if (!nic_dev || !cos_up) + return -EINVAL; + + for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) { + for (up = 0; up < HINIC_DCB_UP_MAX; up++) { + if (!(nic_dev->up_valid_bitmap & BIT(up))) + continue; + + if (nic_dev->up_cos[up] == cos || + nic_dev->up_cos[up] == nic_dev->default_cos_id) + cos_up[cos] = up; + } + } + + *num_cos = nic_dev->max_cos; + + return 0; +} + +static int __stop_port_flow(void *uld_array[], u32 num_dev) +{ + struct hinic_nic_dev *tmp_dev; + u32 i, idx; + int err; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[idx]; + err = hinic_stop_port_traffic_flow(tmp_dev); + if (err) { + nicif_err(tmp_dev, drv, tmp_dev->netdev, + "Stop port traffic flow failed\n"); + goto stop_port_err; + } + } + + /* wait all traffic flow stopped */ + msleep(HINIC_WAIT_PORT_IO_STOP); + + return 0; + +stop_port_err: + for (i = 0; i < idx; i++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[i]; + hinic_start_port_traffic_flow(tmp_dev); + } + + return err; +} + +static void __start_port_flow(void *uld_array[], u32 num_dev) +{ + struct hinic_nic_dev *tmp_dev; + u32 idx; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[idx]; + hinic_start_port_traffic_flow(tmp_dev); + } +} + +/* for hinicadm tool, need to chang all port of the chip */ +int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up) +{ + void *uld_array[HINIC_MAX_PF_NUM]; + struct hinic_nic_dev *tmp_dev; + u8 num_cos, old_cos_up[HINIC_DCB_COS_MAX] = {0}; + u32 i, idx, num_dev = 0; + int err, rollback_err; + + /* Save old map, in case of set failed */ + err = hinic_get_cos_up_map(nic_dev, &num_cos, old_cos_up); + if (err || !num_cos) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get old cos_up map failed\n"); + return -EFAULT; + } + + if (!memcmp(cos_up, old_cos_up, sizeof(u8) * num_cos)) { + nicif_info(nic_dev, drv, nic_dev->netdev, + "Same cos2up map, don't need to change anything\n"); + return 0; + } + + /* Get all pf of this chip */ + err = hinic_get_pf_uld_array(nic_dev->pdev, &num_dev, uld_array); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get all pf private handle failed\n"); + return -EFAULT; + } + + err = __stop_port_flow(uld_array, num_dev); + if (err) + return -EFAULT; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[idx]; + err = __set_cos_up_map(tmp_dev, cos_up); + if (err) { + nicif_err(tmp_dev, drv, tmp_dev->netdev, + "Set cos_up map to hw failed\n"); + goto set_err; + } + } + + __start_port_flow(uld_array, num_dev); + + hinic_set_chip_cos_up_map(nic_dev->pdev, cos_up); + + return 0; + +set_err: + /* undo all settings */ + for (i = 0; i < idx; i++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[i]; + rollback_err = __set_cos_up_map(tmp_dev, old_cos_up); + if (rollback_err) + nicif_err(tmp_dev, drv, tmp_dev->netdev, + "Undo cos_up map to hw failed\n"); + } + + __start_port_flow(uld_array, num_dev); + + return err; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.h b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h new file mode 100644 index 000000000000..52b8a2f499a0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_DCB_H_ +#define HINIC_DCB_H_ + +#define HINIC_DCB_CFG_TX 0 +#define HINIC_DCB_CFG_RX 1 + +/* IEEE8021QAZ Transmission selection algorithm identifiers */ +#define IEEE8021Q_TSA_STRICT 0x0 +#define IEEE8021Q_TSA_CBSHAPER 0x1 +#define IEEE8021Q_TSA_ETS 0x2 +#define IEEE8021Q_TSA_VENDOR 0xFF + +enum HINIC_DCB_FLAGS { + HINIC_DCB_UP_COS_SETTING, + HINIC_DCB_TRAFFIC_STOPPED, +}; + +extern const struct dcbnl_rtnl_ops hinic_dcbnl_ops; + +u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up); + +int hinic_dcb_init(struct hinic_nic_dev *nic_dev); + +int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev); + +int hinic_setup_tc(struct net_device *netdev, u8 tc); + +void hinic_configure_dcb(struct net_device *netdev); + +int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up); + +int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos); + +int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev, + u8 *num_cos, u8 *cos_up); +u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag); +void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev, + u8 *value, bool flag); +void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting); +void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting); +u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev); +void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num); +void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag); +void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev, + u8 percent[], bool flag); +void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev, + u8 *value, bool flag); +void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev, + u8 *setting, bool flag); +u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h deleted file mode 100644 index a209b14160cc..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ /dev/null @@ -1,83 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_DEV_H -#define HINIC_DEV_H - -#include -#include -#include -#include -#include - -#include "hinic_hw_dev.h" -#include "hinic_tx.h" -#include "hinic_rx.h" - -#define HINIC_DRV_NAME "hinic" - -enum hinic_flags { - HINIC_LINK_UP = BIT(0), - HINIC_INTF_UP = BIT(1), - HINIC_RSS_ENABLE = BIT(2), -}; - -struct hinic_rx_mode_work { - struct work_struct work; - u32 rx_mode; -}; - -struct hinic_rss_type { - u8 tcp_ipv6_ext; - u8 ipv6_ext; - u8 tcp_ipv6; - u8 ipv6; - u8 tcp_ipv4; - u8 ipv4; - u8 udp_ipv6; - u8 udp_ipv4; -}; - -enum hinic_rss_hash_type { - HINIC_RSS_HASH_ENGINE_TYPE_XOR, - HINIC_RSS_HASH_ENGINE_TYPE_TOEP, - HINIC_RSS_HASH_ENGINE_TYPE_MAX, -}; - -struct hinic_dev { - struct net_device *netdev; - struct hinic_hwdev *hwdev; - - u32 msg_enable; - unsigned int tx_weight; - unsigned int rx_weight; - u16 num_qps; - u16 max_qps; - - unsigned int flags; - - struct semaphore mgmt_lock; - unsigned long *vlan_bitmap; - - struct hinic_rx_mode_work rx_mode_work; - struct workqueue_struct *workq; - - struct hinic_txq *txqs; - struct hinic_rxq *rxqs; - - struct hinic_txq_stats tx_stats; - struct hinic_rxq_stats rx_stats; - - u8 rss_tmpl_idx; - u8 rss_hash_engine; - u16 num_rss; - u16 rss_limit; - struct hinic_rss_type rss_type; - u8 *rss_hkey_user; - s32 *rss_indir_user; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h new file mode 100644 index 000000000000..57c09d6dd8fc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_DFX_DEF_H__ +#define __HINIC_DFX_DEF_H__ + +#ifdef __cplusplus + #if __cplusplus +extern "C"{ + #endif +#endif /* __cplusplus */ + +enum module_name { + SEND_TO_NIC_DRIVER = 1, + SEND_TO_HW_DRIVER, + SEND_TO_UCODE, + SEND_TO_UP, + SEND_TO_SM, + + HINICADM_OVS_DRIVER = 6, + HINICADM_ROCE_DRIVER, + HINICADM_TOE_DRIVER, + HINICADM_IWAP_DRIVER, + HINICADM_FC_DRIVER, + HINICADM_FCOE_DRIVER, +}; + +enum driver_cmd_type { + TX_INFO = 1, + Q_NUM, + TX_WQE_INFO, + TX_MAPPING, + RX_INFO, + RX_WQE_INFO, + RX_CQE_INFO, + UPRINT_FUNC_EN, + UPRINT_FUNC_RESET, + UPRINT_SET_PATH, + UPRINT_GET_STATISTICS, + FUNC_TYPE, + GET_FUNC_IDX, + GET_INTER_NUM, + CLOSE_TX_STREAM, + GET_DRV_VERSION, + CLEAR_FUNC_STASTIC, + GET_HW_STATS, + CLEAR_HW_STATS, + GET_SELF_TEST_RES, + GET_CHIP_FAULT_STATS, + GET_NUM_COS, + SET_COS_UP_MAP, + GET_COS_UP_MAP, + GET_CHIP_ID, + GET_SINGLE_CARD_INFO, + GET_FIRMWARE_ACTIVE_STATUS, + ROCE_DFX_FUNC, + GET_DEVICE_ID, + GET_PF_DEV_INFO, + CMD_FREE_MEM, + GET_LOOPBACK_MODE = 32, + SET_LOOPBACK_MODE, + SET_LINK_MODE, + SET_PF_BW_LIMIT, + GET_PF_BW_LIMIT, + ROCE_CMD, + GET_POLL_WEIGHT, + SET_POLL_WEIGHT, + GET_HOMOLOGUE, + SET_HOMOLOGUE, + GET_SSET_COUNT, + GET_SSET_ITEMS, + IS_DRV_IN_VM, + LRO_ADPT_MGMT, + SET_INTER_COAL_PARAM, + GET_INTER_COAL_PARAM, + GET_CHIP_INFO, + GET_NIC_STATS_LEN, + GET_NIC_STATS_STRING, + GET_NIC_STATS_INFO, + GET_PF_ID, + SET_DCB_CFG, + SET_PFC_PRIORITY, + GET_PFC_INFO, + SET_PFC_CONTROL, + SET_ETS, + GET_ETS_INFO, + GET_SUPPORT_UP, + GET_SUPPORT_TC, + + RSS_CFG = 0x40, + RSS_INDIR, + PORT_ID, + + GET_WIN_STAT = 0x60, + WIN_CSR_READ = 0x61, + WIN_CSR_WRITE = 0x62, + WIN_API_CMD_RD = 0x63, + + VM_COMPAT_TEST = 0xFF +}; + +enum hinic_nic_link_mode { + HINIC_LINK_MODE_AUTO = 0, + HINIC_LINK_MODE_UP, + HINIC_LINK_MODE_DOWN, + HINIC_LINK_MODE_MAX +}; + +enum api_chain_cmd_type { + API_CSR_READ, + API_CSR_WRITE +}; + +enum sm_cmd_type { + SM_CTR_RD32 = 1, + SM_CTR_RD64_PAIR, + SM_CTR_RD64 +}; + +enum hinic_show_set { + HINIC_SHOW_SSET_IO_STATS = 1, +}; + +#define HINIC_SHOW_ITEM_LEN 32 +struct hinic_show_item { + char name[HINIC_SHOW_ITEM_LEN]; + u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ + u8 rsvd[7]; + u64 value; +}; + +#define UP_UPDATEFW_TIME_OUT_VAL 20000U +#define UCODE_COMP_TIME_OUT_VAL 0xFF00000 +#define NIC_TOOL_MAGIC 'x' + +#ifdef __cplusplus + #if __cplusplus +} + #endif +#endif /* __cplusplus */ +#endif /* __HINIC_DFX_DEF_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c new file mode 100644 index 000000000000..39bf58aa2369 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c @@ -0,0 +1,1434 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_hwif.h" +#include "hinic_csr.h" +#include "hinic_eqs.h" + +#define HINIC_EQS_WQ_NAME "hinic_eqs" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_FUNC_BUSY_SHIFT 10 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_QPS_NUM_SHIFT 22 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_FUNC_BUSY_MASK 0x1U +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define AEQ_CTRL_0_QPS_NUM_MASK 0xFFU +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_GET(val, member) \ + (((val) >> AEQ_CTRL_0_##member##_SHIFT) & \ + AEQ_CTRL_0_##member##_MASK) + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK \ + << AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_FUNC_OWN_SHIFT 21 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_FUNC_OWN_MASK 0x1U +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_GET(val, member) \ + (((val) >> AEQ_CTRL_1_##member##_SHIFT) & \ + AEQ_CTRL_1_##member##_MASK) + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK \ + << AEQ_CTRL_1_##member##_SHIFT))) + +#define HINIC_EQ_PROD_IDX_MASK 0xFFFFF +#define HINIC_TASK_PROCESS_EQE_LIMIT 1024 +#define HINIC_EQ_UPDATE_CI_STEP 64 + +static uint g_aeq_len = HINIC_DEFAULT_AEQ_LEN; +module_param(g_aeq_len, uint, 0444); +MODULE_PARM_DESC(g_aeq_len, + "aeq depth, valid range is " __stringify(HINIC_MIN_AEQ_LEN) + " - " __stringify(HINIC_MAX_AEQ_LEN)); + +static uint g_ceq_len = HINIC_DEFAULT_CEQ_LEN; +module_param(g_ceq_len, uint, 0444); +MODULE_PARM_DESC(g_ceq_len, + "ceq depth, valid range is " __stringify(HINIC_MIN_CEQ_LEN) + " - " __stringify(HINIC_MAX_CEQ_LEN)); + +static uint g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT; +module_param(g_num_ceqe_in_tasklet, uint, 0444); +MODULE_PARM_DESC(g_num_ceqe_in_tasklet, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define CEQ_CTRL_0_SET(val, member) \ + (((val) & CEQ_CTRL_0_##member##_MASK) << \ + CEQ_CTRL_0_##member##_SHIFT) + +#define CEQ_CTRL_1_LEN_SHIFT 0 +#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define CEQ_CTRL_1_SET(val, member) \ + (((val) & CEQ_CTRL_1_##member##_MASK) << \ + CEQ_CTRL_1_##member##_SHIFT) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 + +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK \ + << EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) + +#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ + (size)) / (size))) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \ + (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *)\ + GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ + ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_DMA_ATTR_DEFAULT 0 + +#define CEQ_LMT_KICK_DEFAULT 0 + +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define EQ_WRAPPED_SHIFT 20 + +#define EQ_VALID_SHIFT 31 + +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) + +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) + +#define EQ_MIN_PAGE_SIZE 0x1000U +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) + +static irqreturn_t aeq_interrupt(int irq, void *data); +static irqreturn_t ceq_interrupt(int irq, void *data); + +/** + * hinic_qps_num_set - set the number of queues that are actually opened + * @hwdev: the pointer to hw device + * @num_qps: number of queue + */ +void hinic_qps_num_set(void *hwdev, u32 num_qps) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + u32 addr, val, ctrl; + + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + val = AEQ_CTRL_0_CLEAR(val, QPS_NUM); + ctrl = AEQ_CTRL_0_SET(num_qps, QPS_NUM); + val |= ctrl; + hinic_hwif_write_reg(hwif, addr, val); +} + +u32 hinic_func_busy_state_get(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + return AEQ_CTRL_0_GET(val, FUNC_BUSY); +} + +void hinic_func_busy_state_set(struct hinic_hwdev *hwdev, u32 cfg) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val, ctrl; + + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + val = AEQ_CTRL_0_CLEAR(val, FUNC_BUSY); + ctrl = AEQ_CTRL_0_SET(cfg, FUNC_BUSY); + val |= ctrl; + hinic_hwif_write_reg(hwif, addr, val); +} + +u32 hinic_func_own_bit_get(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + return AEQ_CTRL_1_GET(val, FUNC_OWN); +} + +void hinic_func_own_bit_set(struct hinic_hwdev *hwdev, u32 cfg) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val, ctrl; + + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + val = AEQ_CTRL_1_CLEAR(val, FUNC_OWN); + ctrl = AEQ_CTRL_1_SET(cfg, FUNC_OWN); + val |= ctrl; + hinic_hwif_write_reg(hwif, addr, val); +} + +static void ceq_tasklet(ulong eq_tasklet); + +static u8 eq_cons_idx_checksum_set(u32 val) +{ + u8 checksum = 0; + u8 idx; + + for (idx = 0; idx < 32; idx += 4) + checksum ^= ((val >> idx) & 0xF); + + return checksum & 0xF; +} + +/** + * hinic_aeq_register_hw_cb - register aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @hw_cb: callback function + * Return: 0 - success, negative - failure + */ +int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event, + hinic_aeq_hwe_cb hwe_cb) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || !hwe_cb || event >= HINIC_MAX_AEQ_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + aeqs->aeq_hwe_cb[event] = hwe_cb; + + set_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic_aeq_register_hw_cb); + +/** + * hinic_aeq_unregister_hw_cb - unregister the aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + */ +void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || event >= HINIC_MAX_AEQ_EVENTS) + return; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + clear_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + while (test_bit(HINIC_AEQ_HW_CB_RUNNING, &aeqs->aeq_hw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_hwe_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic_aeq_unregister_hw_cb); + +/** + * hinic_aeq_register_sw_cb - register aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + * @sw_cb: callback function + * Return: 0 - success, negative - failure + */ +int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event, + hinic_aeq_swe_cb aeq_swe_cb) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || !aeq_swe_cb || event >= HINIC_MAX_AEQ_SW_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + aeqs->aeq_swe_cb[event] = aeq_swe_cb; + + set_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic_aeq_register_swe_cb); + +/** + * hinic_aeq_unregister_sw_cb - unregister the aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + */ +void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || event >= HINIC_MAX_AEQ_SW_EVENTS) + return; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + clear_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + while (test_bit(HINIC_AEQ_SW_CB_RUNNING, &aeqs->aeq_sw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_swe_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic_aeq_unregister_swe_cb); + +/** + * hinic_ceq_register_sw_cb - register ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event, + hinic_ceq_event_cb callback) +{ + struct hinic_ceqs *ceqs; + + if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS) + return -EINVAL; + + ceqs = ((struct hinic_hwdev *)hwdev)->ceqs; + + ceqs->ceq_cb[event] = callback; + + set_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic_ceq_register_cb); + +/** + * hinic_ceq_unregister_cb - unregister ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + */ +void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event) +{ + struct hinic_ceqs *ceqs; + + if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS) + return; + + ceqs = ((struct hinic_hwdev *)hwdev)->ceqs; + + clear_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + while (test_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) + usleep_range(900, 1000); + + ceqs->ceq_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic_ceq_unregister_cb); + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @arm_state: arm state value + */ +static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state) +{ + u32 eq_wrap_ci, val; + u32 addr = EQ_CONS_IDX_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + + /* other filed is resverd, set to 0 */ + val = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) | + EQ_CONS_IDX_SET(arm_state, INT_ARMED); + + val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + + hinic_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * ceq_event_handler - handle for the ceq events + * @ceqs: eqs part of the chip + * @ceqe: ceq element of the event + */ +static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) +{ + struct hinic_hwdev *hwdev = ceqs->hwdev; + enum hinic_ceq_event event = CEQE_TYPE(ceqe); + u32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= HINIC_MAX_CEQ_EVENTS) { + sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", + event, ceqe_data); + return; + } + + set_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); + + if (ceqs->ceq_cb[event] && + test_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) + ceqs->ceq_cb[event](hwdev, ceqe_data); + + clear_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); +} + +/** + * aeq_irq_handler - handler for the aeq event + * @eq: the async event queue of the event + */ +static bool aeq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); + struct hinic_aeq_elem *aeqe_pos; + enum hinic_aeq_type event; + enum hinic_aeq_sw_type sw_event; + enum hinic_ucode_event_type ucode_event; + u64 aeqe_data; + u32 aeqe_desc; + u32 i, eqe_cnt = 0; + u8 size; + u8 lev; + + for (i = 0; i < HINIC_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + return false; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { + ucode_event = event; + /* SW event uses only the first 8B */ + sw_event = ucode_event >= HINIC_NIC_FATAL_ERROR_MAX ? + HINIC_STATEFULL_EVENT : + HINIC_STATELESS_EVENT; + aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data)); + set_bit(HINIC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); + if (aeqs->aeq_swe_cb[sw_event] && + test_bit(HINIC_AEQ_SW_CB_REG, + &aeqs->aeq_sw_cb_state[sw_event])) { + lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev, + ucode_event, + aeqe_data); + hinic_swe_fault_handler(aeqs->hwdev, lev, + ucode_event, aeqe_data); + } + clear_bit(HINIC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); + } else { + if (event < HINIC_MAX_AEQ_EVENTS) { + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + set_bit(HINIC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + if (aeqs->aeq_hwe_cb[event] && + test_bit(HINIC_AEQ_HW_CB_REG, + &aeqs->aeq_hw_cb_state[event])) + aeqs->aeq_hwe_cb[event](aeqs->hwdev, + aeqe_pos->aeqe_data, size); + clear_bit(HINIC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + } else { + sdk_warn(eq->hwdev->dev_hdl, + "Unknown aeq hw event %d\n", event); + } + } + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * ceq_irq_handler - handler for the ceq event + * @eq: the completion event queue of the event + * Return: true - success, false - failure + */ +static bool ceq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe, eqe_cnt = 0; + u32 i; + + for (i = 0; i < g_num_ceqe_in_tasklet; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + ceqe = be32_to_cpu(ceqe); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return false; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED); + } + } + + return true; +} + +static void reschedule_eq_handler(struct hinic_eq *eq) +{ + if (eq->type == HINIC_AEQ) { + struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); + struct workqueue_struct *workq = aeqs->workq; + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + queue_work(workq, &aeq_work->work); + } else { + tasklet_schedule(&eq->ceq_tasklet); + } +} + +/** + * eq_irq_handler - handler for the eq event + * @data: the event queue of the event + * Return: true - success, false - failure + */ +static bool eq_irq_handler(void *data) +{ + struct hinic_eq *eq = (struct hinic_eq *)data; + bool uncompleted; + + if (eq->type == HINIC_AEQ) + uncompleted = aeq_irq_handler(eq); + else + uncompleted = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, uncompleted ? HINIC_EQ_NOT_ARMED : HINIC_EQ_ARMED); + + return uncompleted; +} + + + +/** + * eq_irq_work - eq work for the event + * @work: the work that is associated with the eq + */ +static void eq_irq_work(struct work_struct *work) +{ + struct hinic_eq_work *aeq_work = + container_of(work, struct hinic_eq_work, work); + + if (eq_irq_handler(aeq_work->data)) + reschedule_eq_handler(aeq_work->data); +} + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the async event queue of the event + */ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct hinic_eq *aeq = (struct hinic_eq *)data; + struct hinic_hwdev *hwdev = aeq->hwdev; + + struct hinic_aeqs *aeqs = aeq_to_aeqs(aeq); + struct workqueue_struct *workq = aeqs->workq; + struct hinic_eq_work *aeq_work; + + /* clear resend timer cnt register */ + hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + aeq_work = &aeq->aeq_work; + aeq_work->data = aeq; + + queue_work(workq, &aeq_work->work); + + return IRQ_HANDLED; +} + +/** + * ceq_tasklet - ceq tasklet for the event + * @ceq_data: data that will be used by the tasklet(ceq) + */ +static void ceq_tasklet(ulong ceq_data) +{ + struct hinic_ceq_tasklet_data *ceq_tasklet_data = + (struct hinic_ceq_tasklet_data *)ceq_data; + struct hinic_eq *eq = (struct hinic_eq *)ceq_tasklet_data->data; + + eq->soft_intr_jif = jiffies; + + if (eq_irq_handler(ceq_tasklet_data->data)) + reschedule_eq_handler(ceq_tasklet_data->data); +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the completion event queue of the event + */ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hinic_eq *ceq = (struct hinic_eq *)data; + struct hinic_ceq_tasklet_data *ceq_tasklet_data; + + ceq->hard_intr_jif = jiffies; + + /* clear resend timer counters */ + hinic_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + ceq_tasklet_data = &ceq->ceq_tasklet_data; + ceq_tasklet_data->data = data; + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +struct hinic_ceq_ctrl_reg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 q_id; + u32 ctrl0; + u32 ctrl1; +}; + +static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1) +{ + struct hinic_ceq_ctrl_reg ceq_ctrl = {0}; + u16 in_size = sizeof(ceq_ctrl); + u16 out_size = sizeof(ceq_ctrl); + int err; + + err = hinic_global_func_id_get(hwdev, &ceq_ctrl.func_id); + if (err) + return err; + + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, + &ceq_ctrl, in_size, + &ceq_ctrl, &out_size, 0); + if (err || !out_size || ceq_ctrl.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", + q_id, err, ceq_ctrl.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + * Return: 0 - success, negative - failure + */ +static int set_eq_ctrls(struct hinic_eq *eq) +{ + enum hinic_eq_type type = eq->type; + struct hinic_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif); + int err; + + if (type == HINIC_AEQ) { + /* set ctrl0 */ + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); + + val = hinic_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + if (HINIC_IS_VF(eq->hwdev)) { + val = AEQ_CTRL_0_CLEAR(val, FUNC_BUSY) & + AEQ_CTRL_1_CLEAR(val, FUNC_OWN); + } + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); + val |= ctrl0; + + hinic_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl1); + + } else { + ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) | + CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + /* set ceq ctrl reg through mgmt cpu */ + err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); + if (err) + return err; + } + + return 0; +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + */ +static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + u32 i; + u32 *ceqe; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + */ +static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + struct hinic_aeq_elem *aeqe; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + */ +static int alloc_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwdev->hwif; + u32 init_val; + u64 dma_addr_size, virt_addr_size; + u16 pg_num, i; + u32 reg; + int err; + u8 flag = 0; + + dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr); + virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr); + + eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr) + return -ENOMEM; + + eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr) { + err = -ENOMEM; + goto virt_addr_alloc_err; + } + + eq->dma_addr_for_free = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr_for_free) { + err = -ENOMEM; + goto dma_addr_free_alloc_err; + } + + eq->virt_addr_for_free = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr_for_free) { + err = -ENOMEM; + goto virt_addr_free_alloc_err; + } + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) { + eq->virt_addr_for_free[pg_num] = dma_zalloc_coherent + (eq->hwdev->dev_hdl, eq->page_size, + &eq->dma_addr_for_free[pg_num], GFP_KERNEL); + if (!eq->virt_addr_for_free[pg_num]) { + err = -ENOMEM; + goto dma_alloc_err; + } + + eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num]; + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num]; + if (eq->dma_addr_for_free[pg_num] & (eq->page_size - 1)) { + sdk_info(eq->hwdev->dev_hdl, + "Address is not aligned to %u-bytes as hardware required\n", + eq->page_size); + sdk_info(eq->hwdev->dev_hdl, "Change eq's page size %u\n", + ((eq->page_size) >> 1)); + eq->dma_addr[pg_num] = ALIGN + (eq->dma_addr_for_free[pg_num], + (u64)((eq->page_size) >> 1)); + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num] + + ((u64)eq->dma_addr[pg_num] + - (u64)eq->dma_addr_for_free[pg_num]); + flag = 1; + } + reg = HINIC_EQ_HI_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hinic_hwif_write_reg(hwif, reg, + upper_32_bits(eq->dma_addr[pg_num])); + + reg = HINIC_EQ_LO_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hinic_hwif_write_reg(hwif, reg, + lower_32_bits(eq->dma_addr[pg_num])); + } + + if (flag) { + eq->page_size = eq->page_size >> 1; + eq->eq_len = eq->eq_len >> 1; + } + + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); + if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { + sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); + err = -EINVAL; + goto dma_alloc_err; + } + init_val = EQ_WRAPPED(eq); + + if (eq->type == HINIC_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_num; i++) + dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size, + eq->virt_addr_for_free[i], + eq->dma_addr_for_free[i]); + kfree(eq->virt_addr_for_free); +virt_addr_free_alloc_err: + kfree(eq->dma_addr_for_free); +dma_addr_free_alloc_err: + kfree(eq->virt_addr); +virt_addr_alloc_err: + kfree(eq->dma_addr); + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + */ +static void free_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwdev *hwdev = eq->hwdev; + u16 pg_num; + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) + dma_free_coherent(hwdev->dev_hdl, eq->orig_page_size, + eq->virt_addr_for_free[pg_num], + eq->dma_addr_for_free[pg_num]); + + kfree(eq->virt_addr_for_free); + kfree(eq->dma_addr_for_free); + kfree(eq->virt_addr); + kfree(eq->dma_addr); +} + +static inline u32 get_page_size(struct hinic_eq *eq) +{ + u32 total_size; + u16 count, n = 0; + + total_size = ALIGN((eq->eq_len * eq->elem_size), EQ_MIN_PAGE_SIZE); + + if (total_size <= (HINIC_EQ_MAX_PAGES * EQ_MIN_PAGE_SIZE)) + return EQ_MIN_PAGE_SIZE; + + count = (u16)(ALIGN((total_size / HINIC_EQ_MAX_PAGES), + EQ_MIN_PAGE_SIZE) / EQ_MIN_PAGE_SIZE); + + if (!(count & (count - 1))) + return EQ_MIN_PAGE_SIZE * count; + + while (count) { + count >>= 1; + n++; + } + + return EQ_MIN_PAGE_SIZE << n; +} + +/** + * init_eq - initialize eq + * @eq: the event queue + * @hwdev: the pointer to hw device + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + */ +static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id, + u32 q_len, enum hinic_eq_type type, struct irq_info *entry) +{ + int err = 0; + + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == HINIC_AEQ) + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + else + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = (type == HINIC_AEQ) ? + HINIC_AEQE_SIZE : HINIC_CEQE_SIZE; + + eq->page_size = get_page_size(eq); + eq->orig_page_size = eq->page_size; + eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); + if (eq->num_pages > HINIC_EQ_MAX_PAGES) { + sdk_err(hwdev->dev_hdl, "Number pages:%d too many pages for eq\n", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + return err; + } + + eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; + eq->eq_irq.irq_id = entry->irq_id; + + err = set_eq_ctrls(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + goto init_eq_ctrls_err; + } + + hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + set_eq_cons_idx(eq, HINIC_EQ_ARMED); + + if (type == HINIC_AEQ) { + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + INIT_WORK(&aeq_work->work, eq_irq_work); + } else { + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, + (ulong)(&eq->ceq_tasklet_data)); + } + + if (type == HINIC_AEQ) { + err = snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic_aeq%d@pci:%s", eq->q_id, + pci_name(hwdev->pcidev_hdl)); + if (err <= 0 || err >= (int)sizeof(eq->irq_name)) { + pr_err("Failed snprintf irq_name, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(eq->irq_name)); + err = -EINVAL; + goto req_irq_err; + } + + err = request_irq(entry->irq_id, aeq_interrupt, 0UL, + eq->irq_name, eq); + } else { + err = snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic_ceq%d@pci:%s", eq->q_id, + pci_name(hwdev->pcidev_hdl)); + if (err <= 0 || err >= (int)sizeof(eq->irq_name)) { + pr_err("Failed snprintf irq_name, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(eq->irq_name)); + err = -EINVAL; + goto req_irq_err; + } + + err = request_irq(entry->irq_id, ceq_interrupt, 0UL, + eq->irq_name, eq); + } + + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to request irq for the eq, err: %d\n", + err); + goto req_irq_err; + } + + hinic_set_msix_state(hwdev, entry->msix_entry_idx, HINIC_MSIX_ENABLE); + + return 0; + +init_eq_ctrls_err: +req_irq_err: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove eq + * @eq: the event queue + */ +static void remove_eq(struct hinic_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HINIC_MSIX_DISABLE); + synchronize_irq(entry->irq_id); + + free_irq(entry->irq_id, eq); + + if (eq->type == HINIC_AEQ) { + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + cancel_work_sync(&aeq_work->work); + + /* clear eq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hinic_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +/** + * hinic_aeqs_init - init all the aeqs + * @hwdev: the pointer to hw device + * @num_aeqs: number of AEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + */ +int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries) +{ + struct hinic_aeqs *aeqs; + int err; + u16 i, q_id; + u32 aeq_len; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + + aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); + if (!aeqs->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto create_work_err; + } + + if (g_aeq_len < HINIC_MIN_AEQ_LEN || g_aeq_len > HINIC_MAX_AEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %d out of range, resetting to %d\n", + g_aeq_len, HINIC_DEFAULT_AEQ_LEN); + g_aeq_len = HINIC_DEFAULT_AEQ_LEN; + } + + if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF && + hwdev->hwif->chip_mode != CHIP_MODE_NORMAL) + aeq_len = HINIC_VMGW_DEFAULT_AEQ_LEN; + else + aeq_len = g_aeq_len; + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, aeq_len, + HINIC_AEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeq %d\n", + q_id); + goto init_aeq_err; + } + } + + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + +create_work_err: + kfree(aeqs); + + return err; +} + +/** + * hinic_aeqs_free - free all the aeqs + * @hwdev: the pointer to hw device + */ +void hinic_aeqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + enum hinic_aeq_type aeq_event = HINIC_HW_INTER_INT; + enum hinic_aeq_sw_type sw_aeq_event = HINIC_STATELESS_EVENT; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + for (; sw_aeq_event < HINIC_MAX_AEQ_SW_EVENTS; sw_aeq_event++) + hinic_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (; aeq_event < HINIC_MAX_AEQ_EVENTS; aeq_event++) + hinic_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +/** + * hinic_ceqs_init - init all the ceqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of CEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + */ +int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries) +{ + struct hinic_ceqs *ceqs; + int err; + u16 i, q_id; + u32 ceq_len; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + if (g_ceq_len < HINIC_MIN_CEQ_LEN || g_ceq_len > HINIC_MAX_CEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %d out of range, resetting to %d\n", + g_ceq_len, HINIC_DEFAULT_CEQ_LEN); + g_ceq_len = HINIC_DEFAULT_CEQ_LEN; + } + + if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF && + hwdev->hwif->chip_mode != CHIP_MODE_NORMAL) + ceq_len = HINIC_VMGW_DEFAULT_CEQ_LEN; + else + ceq_len = g_ceq_len; + + if (!g_num_ceqe_in_tasklet) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", + HINIC_TASK_PROCESS_EQE_LIMIT); + g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT; + } + + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, ceq_len, + HINIC_CEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceq %d\n", + q_id); + goto init_ceq_err; + } + } + + return 0; + +init_ceq_err: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + kfree(ceqs); + + return err; +} + +/** + * hinic_ceqs_free - free all the ceqs + * @hwdev: the pointer to hw device + */ +void hinic_ceqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_ceqs *ceqs = hwdev->ceqs; + enum hinic_ceq_event ceq_event = HINIC_CMDQ; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); + + for (; ceq_event < HINIC_MAX_CEQ_EVENTS; ceq_event++) + hinic_ceq_unregister_cb(hwdev, ceq_event); + + kfree(ceqs); +} + +void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic_ceqs *ceqs = hwdev->ceqs; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + ceqs->ceq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = ceqs->num_ceqs; +} + + +void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + aeqs->aeq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = aeqs->num_aeqs; +} + + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev) +{ + struct hinic_aeq_elem *aeqe_pos; + struct hinic_eq *eq; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n", + q_id, ci, pi, work_busy(&eq->aeq_work.work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc)); + } +} + +void hinic_dump_ceq_info(struct hinic_hwdev *hwdev) +{ + struct hinic_eq *eq; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) { + eq = &hwdev->ceqs->ceq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic_hwif_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n", + q_id, ci, eq->cons_idx, pi, + tasklet_state(&eq->ceq_tasklet), + eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq)))); + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->hard_intr_jif)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->soft_intr_jif)); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h new file mode 100644 index 000000000000..4c3932eb973f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_EQS_H +#include + +#define HINIC_EQS_H + +#define HINIC_EQ_PAGE_SIZE 0x00001000 + +#define HINIC_MAX_AEQS 3 +#define HINIC_MAX_CEQS 32 + +#define HINIC_EQ_MAX_PAGES 8 + +#define HINIC_AEQE_SIZE 64 +#define HINIC_CEQE_SIZE 4 + +#define HINIC_AEQE_DESC_SIZE 4 +#define HINIC_AEQE_DATA_SIZE \ + (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) + +#define HINIC_DEFAULT_AEQ_LEN 0x10000 +#define HINIC_DEFAULT_CEQ_LEN 0x10000 + +#define HINIC_VMGW_DEFAULT_AEQ_LEN 128 +#define HINIC_VMGW_DEFAULT_CEQ_LEN 1024 + +#define HINIC_MIN_AEQ_LEN 64 +#define HINIC_MAX_AEQ_LEN (512 * 1024) +#define HINIC_MIN_CEQ_LEN 64 +#define HINIC_MAX_CEQ_LEN (1024 * 1024) + +#define HINIC_CEQ_ID_CMDQ 0 + +#define EQ_IRQ_NAME_LEN 64 + +enum hinic_eq_type { + HINIC_AEQ, + HINIC_CEQ +}; + +enum hinic_eq_intr_mode { + HINIC_INTR_MODE_ARMED, + HINIC_INTR_MODE_ALWAYS, +}; + +enum hinic_eq_ci_arm_state { + HINIC_EQ_NOT_ARMED, + HINIC_EQ_ARMED, +}; + +struct hinic_eq_work { + struct work_struct work; + void *data; +}; + +struct hinic_ceq_tasklet_data { + void *data; +}; + +struct hinic_eq { + struct hinic_hwdev *hwdev; + u16 q_id; + enum hinic_eq_type type; + u32 page_size; + u32 orig_page_size; + u32 eq_len; + + u32 cons_idx; + u16 wrapped; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + char irq_name[EQ_IRQ_NAME_LEN]; + + dma_addr_t *dma_addr; + u8 **virt_addr; + dma_addr_t *dma_addr_for_free; + u8 **virt_addr_for_free; + + struct hinic_eq_work aeq_work; + struct tasklet_struct ceq_tasklet; + struct hinic_ceq_tasklet_data ceq_tasklet_data; + + u64 hard_intr_jif; + u64 soft_intr_jif; +}; + +struct hinic_aeq_elem { + u8 aeqe_data[HINIC_AEQE_DATA_SIZE]; + u32 desc; +}; + +enum hinic_aeq_cb_state { + HINIC_AEQ_HW_CB_REG = 0, + HINIC_AEQ_HW_CB_RUNNING, + HINIC_AEQ_SW_CB_REG, + HINIC_AEQ_SW_CB_RUNNING, +}; + +struct hinic_aeqs { + struct hinic_hwdev *hwdev; + + hinic_aeq_hwe_cb aeq_hwe_cb[HINIC_MAX_AEQ_EVENTS]; + hinic_aeq_swe_cb aeq_swe_cb[HINIC_MAX_AEQ_SW_EVENTS]; + unsigned long aeq_hw_cb_state[HINIC_MAX_AEQ_EVENTS]; + unsigned long aeq_sw_cb_state[HINIC_MAX_AEQ_SW_EVENTS]; + + struct hinic_eq aeq[HINIC_MAX_AEQS]; + u16 num_aeqs; + + struct workqueue_struct *workq; +}; + +enum hinic_ceq_cb_state { + HINIC_CEQ_CB_REG = 0, + HINIC_CEQ_CB_RUNNING, +}; + +struct hinic_ceqs { + struct hinic_hwdev *hwdev; + + hinic_ceq_event_cb ceq_cb[HINIC_MAX_CEQ_EVENTS]; + void *ceq_data[HINIC_MAX_CEQ_EVENTS]; + unsigned long ceq_cb_state[HINIC_MAX_CEQ_EVENTS]; + + struct hinic_eq ceq[HINIC_MAX_CEQS]; + u16 num_ceqs; +}; + +enum hinic_msg_pipe_state { + PIPE_STATE_IDLE, + PIPE_STATE_BUSY, + PIPE_STATE_SUSPEND, +}; + +#define PIPE_CYCLE_MAX 10000 + +u32 hinic_func_busy_state_get(struct hinic_hwdev *hwdev); + +void hinic_func_busy_state_set(struct hinic_hwdev *hwdev, u32 cfg); + +u32 hinic_func_own_bit_get(struct hinic_hwdev *hwdev); + +void hinic_func_own_bit_set(struct hinic_hwdev *hwdev, u32 cfg); + +int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries); + +void hinic_aeqs_free(struct hinic_hwdev *hwdev); + +int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries); + +void hinic_ceqs_free(struct hinic_hwdev *hwdev); + +void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic_dump_ceq_info(struct hinic_hwdev *hwdev); + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 60ec48fe4144..6dd95e68ef7d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -1,10 +1,11 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Huawei HiNIC PCI Express Linux driver +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or @@ -12,6 +13,7 @@ * for more details. * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt #include #include @@ -26,427 +28,120 @@ #include #include -#include "hinic_hw_qp.h" -#include "hinic_hw_dev.h" -#include "hinic_port.h" +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_dfx_def.h" #include "hinic_tx.h" #include "hinic_rx.h" -#include "hinic_dev.h" +#include "hinic_qp.h" -static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, - enum hinic_speed speed) -{ - switch (speed) { - case HINIC_SPEED_10MB_LINK: - link_ksettings->base.speed = SPEED_10; - break; +#ifndef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) \ + ((netdev)->ethtool_ops = (ops)) +#endif - case HINIC_SPEED_100MB_LINK: - link_ksettings->base.speed = SPEED_100; - break; +struct hinic_stats { + char name[ETH_GSTRING_LEN]; + u32 size; + int offset; +}; - case HINIC_SPEED_1000MB_LINK: - link_ksettings->base.speed = SPEED_1000; - break; +#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0]))) - case HINIC_SPEED_10GB_LINK: - link_ksettings->base.speed = SPEED_10000; - break; - - case HINIC_SPEED_25GB_LINK: - link_ksettings->base.speed = SPEED_25000; - break; - - case HINIC_SPEED_40GB_LINK: - link_ksettings->base.speed = SPEED_40000; - break; - - case HINIC_SPEED_100GB_LINK: - link_ksettings->base.speed = SPEED_100000; - break; - - default: - link_ksettings->base.speed = SPEED_UNKNOWN; - break; - } +#define HINIC_NETDEV_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct rtnl_link_stats64, _stat_item), \ + .offset = offsetof(struct rtnl_link_stats64, _stat_item) \ } -static int hinic_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings - *link_ksettings) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - enum hinic_port_link_state link_state; - struct hinic_port_cap port_cap; - int err; +static struct hinic_stats hinic_netdev_stats[] = { + HINIC_NETDEV_STAT(rx_packets), + HINIC_NETDEV_STAT(tx_packets), + HINIC_NETDEV_STAT(rx_bytes), + HINIC_NETDEV_STAT(tx_bytes), + HINIC_NETDEV_STAT(rx_errors), + HINIC_NETDEV_STAT(tx_errors), + HINIC_NETDEV_STAT(rx_dropped), + HINIC_NETDEV_STAT(tx_dropped), + HINIC_NETDEV_STAT(multicast), + HINIC_NETDEV_STAT(collisions), + HINIC_NETDEV_STAT(rx_length_errors), + HINIC_NETDEV_STAT(rx_over_errors), + HINIC_NETDEV_STAT(rx_crc_errors), + HINIC_NETDEV_STAT(rx_frame_errors), + HINIC_NETDEV_STAT(rx_fifo_errors), + HINIC_NETDEV_STAT(rx_missed_errors), + HINIC_NETDEV_STAT(tx_aborted_errors), + HINIC_NETDEV_STAT(tx_carrier_errors), + HINIC_NETDEV_STAT(tx_fifo_errors), + HINIC_NETDEV_STAT(tx_heartbeat_errors), +}; - ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - ethtool_link_ksettings_add_link_mode(link_ksettings, supported, - Autoneg); - - link_ksettings->base.speed = SPEED_UNKNOWN; - link_ksettings->base.autoneg = AUTONEG_DISABLE; - link_ksettings->base.duplex = DUPLEX_UNKNOWN; - - err = hinic_port_get_cap(nic_dev, &port_cap); - if (err) - return err; - - err = hinic_port_link_state(nic_dev, &link_state); - if (err) - return err; - - if (link_state != HINIC_LINK_STATE_UP) - return err; - - set_link_speed(link_ksettings, port_cap.speed); - - if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, Autoneg); - - if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) - link_ksettings->base.autoneg = AUTONEG_ENABLE; - - link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ? - DUPLEX_FULL : DUPLEX_HALF; - return 0; +#define HINIC_NIC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_nic_stats, _stat_item), \ + .offset = offsetof(struct hinic_nic_stats, _stat_item) \ } -static void hinic_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *info) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - int err; +static struct hinic_stats hinic_nic_dev_stats[] = { + HINIC_NIC_STAT(netdev_tx_timeout), +}; - strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); +static struct hinic_stats hinic_nic_dev_stats_extern[] = { + HINIC_NIC_STAT(tx_carrier_off_drop), + HINIC_NIC_STAT(tx_invalid_qid), +}; - err = hinic_get_mgmt_version(nic_dev, mgmt_ver); - if (err) - return; - - snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); +#define HINIC_RXQ_STAT(_stat_item) { \ + .name = "rxq%d_"#_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \ + .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ } -static void hinic_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - ring->rx_max_pending = HINIC_RQ_DEPTH; - ring->tx_max_pending = HINIC_SQ_DEPTH; - ring->rx_pending = HINIC_RQ_DEPTH; - ring->tx_pending = HINIC_SQ_DEPTH; +#define HINIC_TXQ_STAT(_stat_item) { \ + .name = "txq%d_"#_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \ + .offset = offsetof(struct hinic_txq_stats, _stat_item) \ } -static void hinic_get_channels(struct net_device *netdev, - struct ethtool_channels *channels) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; +static struct hinic_stats hinic_rx_queue_stats[] = { + HINIC_RXQ_STAT(packets), + HINIC_RXQ_STAT(bytes), + HINIC_RXQ_STAT(errors), + HINIC_RXQ_STAT(csum_errors), + HINIC_RXQ_STAT(other_errors), + HINIC_RXQ_STAT(dropped), +}; - channels->max_rx = hwdev->nic_cap.max_qps; - channels->max_tx = hwdev->nic_cap.max_qps; - channels->max_other = 0; - channels->max_combined = 0; - channels->rx_count = hinic_hwdev_num_qps(hwdev); - channels->tx_count = hinic_hwdev_num_qps(hwdev); - channels->other_count = 0; - channels->combined_count = 0; -} +static struct hinic_stats hinic_rx_queue_stats_extern[] = { + HINIC_RXQ_STAT(alloc_skb_err), +}; -static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev, - struct ethtool_rxnfc *cmd) -{ - struct hinic_rss_type rss_type = { 0 }; - int err; +static struct hinic_stats hinic_tx_queue_stats[] = { + HINIC_TXQ_STAT(packets), + HINIC_TXQ_STAT(bytes), + HINIC_TXQ_STAT(busy), + HINIC_TXQ_STAT(wake), + HINIC_TXQ_STAT(dropped), + HINIC_TXQ_STAT(big_frags_pkts), + HINIC_TXQ_STAT(big_udp_pkts), +}; - cmd->data = 0; - - if (!(nic_dev->flags & HINIC_RSS_ENABLE)) - return 0; - - err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx, - &rss_type); - if (err) - return err; - - cmd->data = RXH_IP_SRC | RXH_IP_DST; - switch (cmd->flow_type) { - case TCP_V4_FLOW: - if (rss_type.tcp_ipv4) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case TCP_V6_FLOW: - if (rss_type.tcp_ipv6) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V4_FLOW: - if (rss_type.udp_ipv4) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V6_FLOW: - if (rss_type.udp_ipv6) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case IPV4_FLOW: - case IPV6_FLOW: - break; - default: - cmd->data = 0; - return -EINVAL; - } - - return 0; -} - -static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, - struct hinic_rss_type *rss_type) -{ - u8 rss_l4_en = 0; - - switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - rss_l4_en = 0; - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - rss_l4_en = 1; - break; - default: - return -EINVAL; - } - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - rss_type->tcp_ipv4 = rss_l4_en; - break; - case TCP_V6_FLOW: - rss_type->tcp_ipv6 = rss_l4_en; - break; - case UDP_V4_FLOW: - rss_type->udp_ipv4 = rss_l4_en; - break; - case UDP_V6_FLOW: - rss_type->udp_ipv6 = rss_l4_en; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev, - struct ethtool_rxnfc *cmd) -{ - struct hinic_rss_type *rss_type = &nic_dev->rss_type; - int err; - - if (!(nic_dev->flags & HINIC_RSS_ENABLE)) { - cmd->data = 0; - return -EOPNOTSUPP; - } - - /* RSS does not support anything other than hashing - * to queues on src and dst IPs and ports - */ - if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | - RXH_L4_B_2_3)) - return -EINVAL; - - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) - return -EINVAL; - - err = hinic_get_rss_type(nic_dev, - nic_dev->rss_tmpl_idx, rss_type); - if (err) - return -EFAULT; - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - case UDP_V4_FLOW: - case UDP_V6_FLOW: - err = set_l4_rss_hash_ops(cmd, rss_type); - if (err) - return err; - break; - case IPV4_FLOW: - rss_type->ipv4 = 1; - break; - case IPV6_FLOW: - rss_type->ipv6 = 1; - break; - default: - return -EINVAL; - } - - err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx, - *rss_type); - if (err) - return -EFAULT; - - return 0; -} - -static int __set_rss_rxfh(struct net_device *netdev, - const u32 *indir, const u8 *key) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int err; - - if (indir) { - if (!nic_dev->rss_indir_user) { - nic_dev->rss_indir_user = - kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, - GFP_KERNEL); - if (!nic_dev->rss_indir_user) - return -ENOMEM; - } - - memcpy(nic_dev->rss_indir_user, indir, - sizeof(u32) * HINIC_RSS_INDIR_SIZE); - - err = hinic_rss_set_indir_tbl(nic_dev, - nic_dev->rss_tmpl_idx, indir); - if (err) - return -EFAULT; - } - - if (key) { - if (!nic_dev->rss_hkey_user) { - nic_dev->rss_hkey_user = - kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL); - - if (!nic_dev->rss_hkey_user) - return -ENOMEM; - } - - memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE); - - err = hinic_rss_set_template_tbl(nic_dev, - nic_dev->rss_tmpl_idx, key); - if (err) - return -EFAULT; - } - - return 0; -} - -static int hinic_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd, u32 *rule_locs) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = nic_dev->num_qps; - break; - case ETHTOOL_GRXFH: - err = hinic_get_rss_hash_opts(nic_dev, cmd); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - err = hinic_set_rss_hash_opts(nic_dev, cmd); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int hinic_get_rxfh(struct net_device *netdev, - u32 *indir, u8 *key, u8 *hfunc) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - u8 hash_engine_type = 0; - int err = 0; - - if (!(nic_dev->flags & HINIC_RSS_ENABLE)) - return -EOPNOTSUPP; - - if (hfunc) { - err = hinic_rss_get_hash_engine(nic_dev, - nic_dev->rss_tmpl_idx, - &hash_engine_type); - if (err) - return -EFAULT; - - *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; - } - - if (indir) { - err = hinic_rss_get_indir_tbl(nic_dev, - nic_dev->rss_tmpl_idx, indir); - if (err) - return -EFAULT; - } - - if (key) - err = hinic_rss_get_template_tbl(nic_dev, - nic_dev->rss_tmpl_idx, key); - - return err; -} - -static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - if (!(nic_dev->flags & HINIC_RSS_ENABLE)) - return -EOPNOTSUPP; - - if (hfunc != ETH_RSS_HASH_NO_CHANGE) { - if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) - return -EOPNOTSUPP; - - nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? - HINIC_RSS_HASH_ENGINE_TYPE_XOR : - HINIC_RSS_HASH_ENGINE_TYPE_TOEP; - err = hinic_rss_set_hash_engine - (nic_dev, nic_dev->rss_tmpl_idx, - nic_dev->rss_hash_engine); - if (err) - return -EFAULT; - } - - err = __set_rss_rxfh(netdev, indir, key); - - return err; -} - -static u32 hinic_get_rxfh_key_size(struct net_device *netdev) -{ - return HINIC_RSS_KEY_SIZE; -} - -static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) -{ - return HINIC_RSS_INDIR_SIZE; -} - -#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0]))) +static struct hinic_stats hinic_tx_queue_stats_extern[] = { + HINIC_TXQ_STAT(ufo_pkt_unsupport), + HINIC_TXQ_STAT(ufo_linearize_err), + HINIC_TXQ_STAT(ufo_alloc_skb_err), + HINIC_TXQ_STAT(skb_pad_err), + HINIC_TXQ_STAT(frag_len_overflow), + HINIC_TXQ_STAT(offload_cow_skb_err), + HINIC_TXQ_STAT(alloc_cpy_frag_err), + HINIC_TXQ_STAT(map_cpy_frag_err), + HINIC_TXQ_STAT(map_frag_err), + HINIC_TXQ_STAT(frag_size_err), +}; #define HINIC_FUNC_STAT(_stat_item) { \ .name = #_stat_item, \ @@ -569,36 +264,1304 @@ static struct hinic_stats hinic_port_stats[] = { HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), }; -#define HINIC_TXQ_STAT(_stat_item) { \ - .name = "txq%d_"#_stat_item, \ - .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \ - .offset = offsetof(struct hinic_txq_stats, _stat_item) \ +u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev) +{ + return ARRAY_LEN(hinic_nic_dev_stats) + + ARRAY_LEN(hinic_nic_dev_stats_extern) + + (ARRAY_LEN(hinic_tx_queue_stats) + + ARRAY_LEN(hinic_tx_queue_stats_extern) + + ARRAY_LEN(hinic_rx_queue_stats) + + ARRAY_LEN(hinic_rx_queue_stats_extern)) * nic_dev->max_qps; } -static struct hinic_stats hinic_tx_queue_stats[] = { - HINIC_TXQ_STAT(pkts), - HINIC_TXQ_STAT(bytes), - HINIC_TXQ_STAT(tx_busy), - HINIC_TXQ_STAT(tx_wake), - HINIC_TXQ_STAT(tx_dropped), - HINIC_TXQ_STAT(big_frags_pkts), -}; +#define GET_VALUE_OF_PTR(size, ptr) ( \ + (size) == sizeof(u64) ? *(u64 *)(ptr) : \ + (size) == sizeof(u32) ? *(u32 *)(ptr) : \ + (size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ +) -#define HINIC_RXQ_STAT(_stat_item) { \ - .name = "rxq%d_"#_stat_item, \ - .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \ - .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ +#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + HINIC_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + GET_VALUE_OF_PTR((array)[j].size, \ + (char *)(stats_ptr) + (array)[j].offset); \ + item_idx++; \ + } \ +} while (0) + +#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) do { \ + int j, err; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + HINIC_SHOW_ITEM_LEN); \ + err = snprintf((items)[item_idx].name, HINIC_SHOW_ITEM_LEN,\ + (array)[j].name, (qid)); \ + if (err <= 0 || err >= HINIC_SHOW_ITEM_LEN) \ + pr_err("Failed snprintf: func_ret(%d), dest_len(%d)\n",\ + err, HINIC_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + GET_VALUE_OF_PTR((array)[j].size, \ + (char *)(stats_ptr) + (array)[j].offset); \ + item_idx++; \ + } \ +} while (0) + +void hinic_get_io_stats(struct hinic_nic_dev *nic_dev, + struct hinic_show_item *items) +{ + int item_idx = 0; + u16 qid; + + DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats, &nic_dev->stats); + DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats_extern, + &nic_dev->stats); + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats, + &nic_dev->txqs[qid].txq_stats, qid); + QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats_extern, + &nic_dev->txqs[qid].txq_stats, qid); + } + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats, + &nic_dev->rxqs[qid].rxq_stats, qid); + QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats_extern, + &nic_dev->rxqs[qid].rxq_stats, qid); + } } -static struct hinic_stats hinic_rx_queue_stats[] = { - HINIC_RXQ_STAT(pkts), - HINIC_RXQ_STAT(bytes), - HINIC_RXQ_STAT(errors), - HINIC_RXQ_STAT(csum_errors), - HINIC_RXQ_STAT(other_errors), +#define LP_DEFAULT_TIME (5) /* seconds */ +#define LP_PKT_LEN (1514) +#define OBJ_STR_MAX_LEN (32) +#define SET_LINK_STR_MAX_LEN (128) + +#define PORT_DOWN_ERR_IDX 0 +enum diag_test_index { + INTERNAL_LP_TEST = 0, + EXTERNAL_LP_TEST = 1, + DIAG_TEST_MAX = 2, }; -static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) +static char hinic_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + +struct hw2ethtool_link_mode { + u32 supported; + u32 advertised; + enum ethtool_link_mode_bit_indices link_mode_bit; + u32 speed; + enum hinic_link_mode hw_link_mode; +}; + +static struct hw2ethtool_link_mode + hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = { + { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + .speed = SPEED_10000, + .hw_link_mode = HINIC_10GE_BASE_KR, + }, + { + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + .speed = SPEED_40000, + .hw_link_mode = HINIC_40GE_BASE_KR4, + }, + { + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + .speed = SPEED_40000, + .hw_link_mode = HINIC_40GE_BASE_CR4, + }, + { + .supported = SUPPORTED_100000baseKR4_Full, + .advertised = ADVERTISED_100000baseKR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + .speed = SPEED_100000, + .hw_link_mode = HINIC_100GE_BASE_KR4, + }, + { + .supported = SUPPORTED_100000baseCR4_Full, + .advertised = ADVERTISED_100000baseCR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + .speed = SPEED_100000, + .hw_link_mode = HINIC_100GE_BASE_CR4, + }, + { + .supported = SUPPORTED_25000baseKR_Full, + .advertised = ADVERTISED_25000baseKR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_KR_S, + }, + { + .supported = SUPPORTED_25000baseCR_Full, + .advertised = ADVERTISED_25000baseCR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_CR_S, + }, + { + .supported = SUPPORTED_25000baseKR_Full, + .advertised = ADVERTISED_25000baseKR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_KR, + }, + { + .supported = SUPPORTED_25000baseCR_Full, + .advertised = ADVERTISED_25000baseCR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_CR, + }, + { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .speed = SPEED_1000, + .hw_link_mode = HINIC_GE_BASE_KX, + }, +}; + +u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = { + SPEED_10, SPEED_100, + SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, + SPEED_100000 +}; + +static int hinic_ethtool_to_hw_speed_level(u32 speed) +{ + int i; + + for (i = 0; i < LINK_SPEED_LEVELS; i++) { + if (hw_to_ethtool_speed[i] == speed) + break; + } + + return i; +} + +static int hinic_get_link_mode_index(enum hinic_link_mode link_mode) +{ + int i = 0; + + for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) { + if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode) + break; + } + + return i; +} + +static int hinic_is_support_speed(enum hinic_link_mode supported_link, + u32 speed) +{ + enum hinic_link_mode link_mode; + int idx; + + for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { + if (!(supported_link & ((u32)1 << link_mode))) + continue; + + idx = hinic_get_link_mode_index(link_mode); + if (idx >= HINIC_LINK_MODE_NUMBERS) + continue; + + if (hw_to_ethtool_link_mode_table[idx].speed == speed) + return 1; + } + + return 0; +} + +#define GET_SUPPORTED_MODE 0 +#define GET_ADVERTISED_MODE 1 + +struct cmd_link_settings { + u64 supported; + u64 advertising; + + u32 speed; + u8 duplex; + u8 port; + u8 autoneg; +}; + +#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ + ((ecmd)->supported |= \ + (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) +#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ + ((ecmd)->advertising |= \ + (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) + +#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ + ((ecmd)->supported |= SUPPORTED_##mode) +#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ + ((ecmd)->advertising |= ADVERTISED_##mode) +#define ETHTOOL_TEST_LINK_MODE_SUPPORTED(ecmd, mode) \ + ((ecmd)->supported & SUPPORTED_##Autoneg) + +static void hinic_link_port_type(struct cmd_link_settings *link_settings, + enum hinic_port_type port_type) +{ + switch (port_type) { + case HINIC_PORT_ELEC: + case HINIC_PORT_TP: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); + link_settings->port = PORT_TP; + break; + + case HINIC_PORT_AOC: + case HINIC_PORT_FIBRE: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_FIBRE; + break; + + case HINIC_PORT_COPPER: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_DA; + break; + + case HINIC_PORT_BACKPLANE: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); + link_settings->port = PORT_NONE; + break; + + default: + link_settings->port = PORT_OTHER; + break; + } +} + +static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, + enum hinic_link_mode hw_link_mode, + u32 name) +{ + enum hinic_link_mode link_mode; + int idx = 0; + + for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { + if (hw_link_mode & ((u32)1 << link_mode)) { + idx = hinic_get_link_mode_index(link_mode); + if (idx >= HINIC_LINK_MODE_NUMBERS) + continue; + + if (name == GET_SUPPORTED_MODE) + ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE + (link_settings, idx); + else + ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE + (link_settings, idx); + } + } +} + +static int hinic_link_speed_set(struct hinic_nic_dev *nic_dev, + struct cmd_link_settings *link_settings, + struct nic_port_info *port_info) +{ + struct net_device *netdev = nic_dev->netdev; + enum hinic_link_mode supported_link = 0, advertised_link = 0; + u8 link_state = 0; + int err; + + err = hinic_get_link_mode(nic_dev->hwdev, + &supported_link, &advertised_link); + if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || + advertised_link == HINIC_SUPPORTED_UNKNOWN) { + nicif_err(nic_dev, drv, netdev, "Failed to get supported link modes\n"); + return err; + } + + hinic_add_ethtool_link_mode(link_settings, supported_link, + GET_SUPPORTED_MODE); + hinic_add_ethtool_link_mode(link_settings, advertised_link, + GET_ADVERTISED_MODE); + + err = hinic_get_link_state(nic_dev->hwdev, &link_state); + if (!err && link_state) { + link_settings->speed = port_info->speed < LINK_SPEED_LEVELS ? + hw_to_ethtool_speed[port_info->speed] : + (u32)SPEED_UNKNOWN; + + link_settings->duplex = port_info->duplex; + } else { + link_settings->speed = (u32)SPEED_UNKNOWN; + link_settings->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int get_link_settings(struct net_device *netdev, + struct cmd_link_settings *link_settings) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + struct nic_pause_config nic_pause = {0}; + int err; + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get port info\n"); + return err; + } + + err = hinic_link_speed_set(nic_dev, link_settings, &port_info); + if (err) + return err; + + hinic_link_port_type(link_settings, port_info.port_type); + + link_settings->autoneg = port_info.autoneg_state; + if (port_info.autoneg_cap) + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg); + if (port_info.autoneg_state) + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg); + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to get pauseparam from hw\n"); + return err; + } + + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause); + if (nic_pause.rx_pause && nic_pause.tx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + } else if (nic_pause.tx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, + Asym_Pause); + } else if (nic_pause.rx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, + Asym_Pause); + } + } + + return 0; +} + +#ifndef HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY +static int hinic_get_settings(struct net_device *netdev, + struct ethtool_cmd *ep) +{ + struct cmd_link_settings settings = {0}; + int err; + + err = get_link_settings(netdev, &settings); + if (err) + return err; + + ep->supported = settings.supported & ((u32)~0); + ep->advertising = settings.advertising & ((u32)~0); + + ep->autoneg = settings.autoneg; + ethtool_cmd_speed_set(ep, settings.speed); + ep->duplex = settings.duplex; + ep->port = settings.port; + ep->transceiver = XCVR_INTERNAL; + + return 0; +} +#endif + +#ifdef ETHTOOL_GLINKSETTINGS +static int hinic_get_link_ksettings( + struct net_device *netdev, + struct ethtool_link_ksettings *link_settings) +{ + struct cmd_link_settings settings = {0}; + struct ethtool_link_settings *base = &link_settings->base; + int err; + + ethtool_link_ksettings_zero_link_mode(link_settings, supported); + ethtool_link_ksettings_zero_link_mode(link_settings, advertising); + + err = get_link_settings(netdev, &settings); + if (err) + return err; + + bitmap_copy(link_settings->link_modes.supported, + (unsigned long *)&settings.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(link_settings->link_modes.advertising, + (unsigned long *)&settings.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + base->autoneg = settings.autoneg; + base->speed = settings.speed; + base->duplex = settings.duplex; + base->port = settings.port; + + return 0; +} +#endif + +static int hinic_is_speed_legal(struct hinic_nic_dev *nic_dev, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + enum hinic_link_mode supported_link = 0, advertised_link = 0; + enum nic_speed_level speed_level = 0; + int err; + + err = hinic_get_link_mode(nic_dev->hwdev, + &supported_link, &advertised_link); + if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || + advertised_link == HINIC_SUPPORTED_UNKNOWN) { + nicif_err(nic_dev, drv, netdev, + "Failed to get supported link modes\n"); + return -EAGAIN; + } + + speed_level = hinic_ethtool_to_hw_speed_level(speed); + if (speed_level >= LINK_SPEED_LEVELS || + !hinic_is_support_speed(supported_link, speed)) { + nicif_err(nic_dev, drv, netdev, + "Not supported speed: %d\n", speed); + return -EINVAL; + } + + return 0; +} + +static int hinic_set_settings_to_hw(struct hinic_nic_dev *nic_dev, + u32 set_settings, u8 autoneg, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_link_ksettings settings = {0}; + enum nic_speed_level speed_level = 0; + char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; + int err = 0; + + err = snprintf(set_link_str, sizeof(set_link_str), "%s", + (set_settings & HILINK_LINK_SET_AUTONEG) ? + (autoneg ? "autong enable " : "autong disable ") : ""); + if (err < 0 || err >= SET_LINK_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed snprintf link state, function return(%d) and dest_len(%d)\n", + err, SET_LINK_STR_MAX_LEN); + return -EFAULT; + } + if (set_settings & HILINK_LINK_SET_SPEED) { + speed_level = hinic_ethtool_to_hw_speed_level(speed); + err = snprintf(set_link_str, sizeof(set_link_str), + "%sspeed %d ", set_link_str, speed); + if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed snprintf link speed, function return(%d) and dest_len(%d)\n", + err, SET_LINK_STR_MAX_LEN); + return -EFAULT; + } + } + + settings.valid_bitmap = set_settings; + settings.autoneg = autoneg; + settings.speed = speed_level; + + err = hinic_set_link_settings(nic_dev->hwdev, &settings); + if (err != HINIC_MGMT_CMD_UNSUPPORTED) { + if (err) + nicif_err(nic_dev, drv, netdev, "Set %sfailed\n", + set_link_str); + else + nicif_info(nic_dev, drv, netdev, "Set %ssuccess\n", + set_link_str); + + return err; + } + + if (set_settings & HILINK_LINK_SET_AUTONEG) { + err = hinic_set_autoneg(nic_dev->hwdev, + (autoneg == AUTONEG_ENABLE)); + if (err) + nicif_err(nic_dev, drv, netdev, "%s autoneg failed\n", + (autoneg == AUTONEG_ENABLE) ? + "Enable" : "Disable"); + else + nicif_info(nic_dev, drv, netdev, "%s autoneg success\n", + (autoneg == AUTONEG_ENABLE) ? + "Enable" : "Disable"); + } + + if (!err && (set_settings & HILINK_LINK_SET_SPEED)) { + err = hinic_set_speed(nic_dev->hwdev, speed_level); + if (err) + nicif_err(nic_dev, drv, netdev, "Set speed %d failed\n", + speed); + else + nicif_info(nic_dev, drv, netdev, "Set speed %d success\n", + speed); + } + + return err; +} + +static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + u32 set_settings = 0; + int err = 0; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Not support set link settings\n"); + return -EOPNOTSUPP; + } + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get current settings\n"); + return -EAGAIN; + } + + /* Alwayse set autonegation */ + if (port_info.autoneg_cap) + set_settings |= HILINK_LINK_SET_AUTONEG; + + if (autoneg == AUTONEG_ENABLE) { + if (!port_info.autoneg_cap) { + nicif_err(nic_dev, drv, netdev, "Not support autoneg\n"); + return -EOPNOTSUPP; + } + } else if (speed != (u32)SPEED_UNKNOWN) { + /* Set speed only when autoneg is disable */ + err = hinic_is_speed_legal(nic_dev, speed); + if (err) + return err; + + set_settings |= HILINK_LINK_SET_SPEED; + } else { + nicif_err(nic_dev, drv, netdev, "Need to set speed when autoneg is off\n"); + return -EOPNOTSUPP; + } + + if (set_settings) + err = hinic_set_settings_to_hw(nic_dev, set_settings, + autoneg, speed); + else + nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n"); + + return err; +} + +#ifndef HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY +static int hinic_set_settings(struct net_device *netdev, + struct ethtool_cmd *link_settings) +{ + /* Only support to set autoneg and speed */ + return set_link_settings(netdev, link_settings->autoneg, + ethtool_cmd_speed(link_settings)); +} +#endif + +#ifdef ETHTOOL_GLINKSETTINGS +static int hinic_set_link_ksettings( + struct net_device *netdev, + const struct ethtool_link_ksettings *link_settings) +{ + /* Only support to set autoneg and speed */ + return set_link_settings(netdev, link_settings->base.autoneg, + link_settings->base.speed); +} +#endif + +static void hinic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; + int err; + + strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, HINIC_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); + + err = hinic_get_mgmt_version(nic_dev->hwdev, mgmt_ver); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n"); + return; + } + + err = snprintf(info->fw_version, sizeof(info->fw_version), + "%s", mgmt_ver); + if (err <= 0 || err >= (int)sizeof(info->fw_version)) + nicif_err(nic_dev, drv, netdev, + "Failed snprintf fw_version, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(info->fw_version)); +} + +static u32 hinic_get_msglevel(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->msg_enable; +} + +static void hinic_set_msglevel(struct net_device *netdev, u32 data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->msg_enable = data; + + nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data); +} + +static int hinic_nway_reset(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + int err; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Current function don't support to restart autoneg\n"); + return -EOPNOTSUPP; + } + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Get autonegotiation state failed\n"); + return -EFAULT; + } + + if (!port_info.autoneg_state) { + nicif_err(nic_dev, drv, netdev, + "Autonegotiation is off, don't support to restart it\n"); + return -EINVAL; + } + + err = hinic_set_autoneg(nic_dev->hwdev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Restart autonegotiation failed\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Restart autonegotiation success\n"); + + return 0; +} + +static void hinic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH; + ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH; + ring->rx_pending = nic_dev->rxqs[0].q_depth; + ring->tx_pending = nic_dev->txqs[0].q_depth; +} + +static void hinic_update_qp_depth(struct hinic_nic_dev *nic_dev, + u16 sq_depth, u16 rq_depth) +{ + u16 i; + + nic_dev->sq_depth = sq_depth; + nic_dev->rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qps; i++) { + nic_dev->txqs[i].q_depth = sq_depth; + nic_dev->txqs[i].q_mask = sq_depth - 1; + nic_dev->rxqs[i].q_depth = rq_depth; + nic_dev->rxqs[i].q_mask = rq_depth - 1; + } +} + +static int hinic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 new_sq_depth, new_rq_depth; + int err; + + if (ring->rx_jumbo_pending || ring->rx_mini_pending) { + nicif_err(nic_dev, drv, netdev, + "Unsupported rx_jumbo_pending/rx_mini_pending\n"); + return -EINVAL; + } + + if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH || + ring->tx_pending < HINIC_MIN_QUEUE_DEPTH || + ring->rx_pending > HINIC_MAX_QUEUE_DEPTH || + ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) { + nicif_err(nic_dev, drv, netdev, + "Queue depth out of rang [%d-%d]\n", + HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH); + return -EINVAL; + } + + new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending)); + new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending)); + + if (new_sq_depth == nic_dev->sq_depth && + new_rq_depth == nic_dev->rq_depth) + return 0; + + if (test_bit(HINIC_BP_ENABLE, &nic_dev->flags) && + new_rq_depth <= nic_dev->bp_upper_thd) { + nicif_err(nic_dev, drv, netdev, + "BP is enable, rq_depth must be larger than upper threshold: %d\n", + nic_dev->bp_upper_thd); + return -EINVAL; + } + + nicif_info(nic_dev, drv, netdev, + "Change Tx/Rx ring depth from %d/%d to %d/%d\n", + nic_dev->sq_depth, nic_dev->rq_depth, + new_sq_depth, new_rq_depth); + + if (!netif_running(netdev)) { + hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); + } else { + nicif_info(nic_dev, drv, netdev, "Restarting netdev\n"); + err = hinic_close(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; + } + + hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); + + err = hinic_open(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + return -EFAULT; + } + } + + return 0; +} + +static u16 hinic_max_channels(struct hinic_nic_dev *nic_dev) +{ + u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + + return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps; +} + +static u16 hinic_curr_channels(struct hinic_nic_dev *nic_dev) +{ + if (netif_running(nic_dev->netdev)) + return nic_dev->num_rss ? nic_dev->num_rss : 1; + else + return min_t(u16, hinic_max_channels(nic_dev), + nic_dev->rss_limit); +} + +static void hinic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = hinic_max_channels(nic_dev); + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = hinic_curr_channels(nic_dev); +} + +void hinic_update_num_qps(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 num_qps; + u8 tcs; + + /* change num_qps to change counter in ethtool -S */ + tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + num_qps = (u16)(nic_dev->rss_limit * (tcs ? tcs : 1)); + nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_qps); +} + +static int hinic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int count = channels->combined_count; + int err; + + if (!count) { + nicif_err(nic_dev, drv, netdev, + "Unsupported combined_count=0\n"); + return -EINVAL; + } + + if (channels->tx_count || channels->rx_count || channels->other_count) { + nicif_err(nic_dev, drv, netdev, + "Setting rx/tx/other count not supported\n"); + return -EINVAL; + } + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "This function don't support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + if (count > hinic_max_channels(nic_dev)) { + nicif_err(nic_dev, drv, netdev, + "Combined count %d exceed limit %d\n", + count, hinic_max_channels(nic_dev)); + return -EINVAL; + } + + nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n", + nic_dev->rss_limit, count); + nic_dev->rss_limit = (u16)count; + + if (netif_running(netdev)) { + nicif_info(nic_dev, drv, netdev, "Restarting netdev\n"); + err = hinic_close(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; + } + /* Discard user configured rss */ + hinic_set_default_rss_indir(netdev); + + err = hinic_open(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + return -EFAULT; + } + } else { + /* Discard user configured rss */ + hinic_set_default_rss_indir(netdev); + + hinic_update_num_qps(netdev); + } + + return 0; +} + +static int hinic_get_sset_count(struct net_device *netdev, int sset) +{ + int count = 0, q_num = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return ARRAY_LEN(hinic_test_strings); + case ETH_SS_STATS: + q_num = nic_dev->num_qps; + count = ARRAY_LEN(hinic_netdev_stats) + + ARRAY_LEN(hinic_nic_dev_stats) + + (ARRAY_LEN(hinic_tx_queue_stats) + + ARRAY_LEN(hinic_rx_queue_stats)) * q_num; + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + count += ARRAY_LEN(hinic_function_stats); + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) + count += ARRAY_LEN(hinic_port_stats); + + return count; + default: + return -EOPNOTSUPP; + } +} + +#define COALESCE_ALL_QUEUE 0xFFFF +#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) +#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) +#define COALESCE_PENDING_LIMIT_UNIT 8 +#define COALESCE_TIMER_CFG_UNIT 9 + +static int __hinic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_intr_coal_info *interrupt_info; + + if (queue == COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + interrupt_info = &nic_dev->intr_coalesce[0]; + } else { + if (queue >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %d\n", queue); + return -EINVAL; + } + interrupt_info = &nic_dev->intr_coalesce[queue]; + } + + /* coalescs_timer is in unit of 9us */ + coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg * + COALESCE_TIMER_CFG_UNIT; + /* coalescs_frams is in unit of 8 */ + coal->rx_max_coalesced_frames = interrupt_info->pending_limt * + COALESCE_PENDING_LIMIT_UNIT; + + /* tx/rx use the same interrupt */ + coal->tx_coalesce_usecs = coal->rx_coalesce_usecs; + coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames; + coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal; + + coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high; + coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_high = + interrupt_info->rx_pending_limt_high * + COALESCE_PENDING_LIMIT_UNIT; + + coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low; + coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_low = + interrupt_info->rx_pending_limt_low * + COALESCE_PENDING_LIMIT_UNIT; + + return 0; +} + +static int set_queue_coalesce(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_intr_coal_info *coal) +{ + struct hinic_intr_coal_info *intr_coal; + struct nic_interrupt_info interrupt_info = {0}; + struct net_device *netdev = nic_dev->netdev; + int err; + + intr_coal = &nic_dev->intr_coalesce[q_id]; + if ((intr_coal->coalesce_timer_cfg != coal->coalesce_timer_cfg) || + (intr_coal->pending_limt != coal->pending_limt)) + intr_coal->user_set_intr_coal_flag = 1; + + intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; + intr_coal->pending_limt = coal->pending_limt; + intr_coal->pkt_rate_low = coal->pkt_rate_low; + intr_coal->rx_usecs_low = coal->rx_usecs_low; + intr_coal->rx_pending_limt_low = coal->rx_pending_limt_low; + intr_coal->pkt_rate_high = coal->pkt_rate_high; + intr_coal->rx_usecs_high = coal->rx_usecs_high; + intr_coal->rx_pending_limt_high = coal->rx_pending_limt_high; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) || + q_id >= nic_dev->num_qps || nic_dev->adaptive_rx_coal) + return 0; + + interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx; + interrupt_info.lli_set = 0; + interrupt_info.interrupt_coalesc_set = 1; + interrupt_info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; + interrupt_info.pending_limt = intr_coal->pending_limt; + interrupt_info.resend_timer_cfg = intr_coal->resend_timer_cfg; + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = + intr_coal->coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = intr_coal->pending_limt; + err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info); + if (err) + nicif_warn(nic_dev, drv, netdev, + "Failed to set queue%d coalesce", q_id); + + return err; +} + +static int is_coalesce_legal(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ethtool_coalesce tmp_coal = {0}; + + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, + "tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, + "tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + + tmp_coal.cmd = coal->cmd; + tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs; + tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames; + tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs; + tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames; + tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + + tmp_coal.pkt_rate_low = coal->pkt_rate_low; + tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low; + tmp_coal.rx_max_coalesced_frames_low = + coal->rx_max_coalesced_frames_low; + + tmp_coal.pkt_rate_high = coal->pkt_rate_high; + tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high; + tmp_coal.rx_max_coalesced_frames_high = + coal->rx_max_coalesced_frames_high; + + if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce))) { + nicif_err(nic_dev, drv, netdev, + "Only support to change rx/tx-usecs and rx/tx-frames\n"); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames out of range[%d-%d]\n", 0, + COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs_low out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames_low out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs_high out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames_high out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >= + coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n", + coal->rx_coalesce_usecs_high, + coal->rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >= + coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesced_frames_high(%u) must more than coalesced_frames_low(%u), after dividing %d frames unit\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + return -EOPNOTSUPP; + } + + if (coal->pkt_rate_low >= coal->pkt_rate_high) { + nicif_err(nic_dev, drv, netdev, + "pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", + coal->pkt_rate_high, + coal->pkt_rate_low); + return -EOPNOTSUPP; + } + + return 0; +} + +#define CHECK_COALESCE_ALIGN(coal, item, unit) \ +do { \ + if (coal->item % (unit)) \ + nicif_warn(nic_dev, drv, netdev, \ + "%s in %d units, change to %d\n", \ + #item, (unit), ALIGN_DOWN(coal->item, unit));\ +} while (0) + +#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ +do { \ + if ((coal->item / (unit)) != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %d to %d %s\n", \ + #item, (ori_val) * (unit), \ + ALIGN_DOWN(coal->item, unit), (obj_str)); \ +} while (0) + +#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ +do { \ + if (coal->item != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %llu to %u %s\n", \ + #item, (ori_val), coal->item, (obj_str)); \ +} while (0) + +static int __hinic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_intr_coal_info intr_coal = {0}; + struct hinic_intr_coal_info *ori_intr_coal; + char obj_str[OBJ_STR_MAX_LEN] = {0}; + u16 i; + int err = 0; + + err = is_coalesce_legal(netdev, coal); + if (err) + return err; + + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, + COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + + if (queue == COALESCE_ALL_QUEUE) { + ori_intr_coal = &nic_dev->intr_coalesce[0]; + err = snprintf(obj_str, sizeof(obj_str), "for netdev"); + if (err <= 0 || err >= OBJ_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed snprintf string, function return(%d) and dest_len(%d)\n", + err, OBJ_STR_MAX_LEN); + return -EFAULT; + } + } else { + ori_intr_coal = &nic_dev->intr_coalesce[queue]; + err = snprintf(obj_str, sizeof(obj_str), "for queue %d", queue); + if (err <= 0 || err >= OBJ_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed snprintf string, function return(%d) and dest_len(%d)\n", + err, OBJ_STR_MAX_LEN); + return -EFAULT; + } + } + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->coalesce_timer_cfg, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->pending_limt, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, + ori_intr_coal->pkt_rate_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, + COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_high, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, + ori_intr_coal->pkt_rate_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_low, obj_str); + + intr_coal.coalesce_timer_cfg = + (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); + + nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + + intr_coal.pkt_rate_high = coal->pkt_rate_high; + intr_coal.rx_usecs_high = + (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT); + intr_coal.rx_pending_limt_high = + (u8)(coal->rx_max_coalesced_frames_high / + COALESCE_PENDING_LIMIT_UNIT); + + intr_coal.pkt_rate_low = coal->pkt_rate_low; + intr_coal.rx_usecs_low = + (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT); + intr_coal.rx_pending_limt_low = + (u8)(coal->rx_max_coalesced_frames_low / + COALESCE_PENDING_LIMIT_UNIT); + + /* coalesce timer or pending set to zero will disable coalesce */ + if (!nic_dev->adaptive_rx_coal && + (!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limt)) + nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); + + if (queue == COALESCE_ALL_QUEUE) { + for (i = 0; i < nic_dev->max_qps; i++) + set_queue_coalesce(nic_dev, i, &intr_coal); + } else { + if (queue >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %d\n", queue); + return -EINVAL; + } + set_queue_coalesce(nic_dev, queue, &intr_coal); + } + + return 0; +} + +static int hinic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +static int hinic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +#if defined(ETHTOOL_PERQUEUE) && defined(ETHTOOL_GCOALESCE) +static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return __hinic_get_coalesce(netdev, coal, queue); +} + +static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return __hinic_set_coalesce(netdev, coal, queue); +} +#endif + +static void get_drv_queue_stats(struct hinic_nic_dev *nic_dev, u64 *data) { struct hinic_txq_stats txq_stats; struct hinic_rxq_stats rxq_stats; @@ -611,7 +1574,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) { - p = (char *)&txq_stats + + p = (char *)(&txq_stats) + hinic_tx_queue_stats[j].offset; data[i] = (hinic_tx_queue_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; @@ -624,7 +1587,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) { - p = (char *)&rxq_stats + + p = (char *)(&rxq_stats) + hinic_rx_queue_stats[j].offset; data[i] = (hinic_rx_queue_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; @@ -635,90 +1598,122 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) static void hinic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_vport_stats vport_stats = {0}; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef HAVE_NDO_GET_STATS64 + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; +#else + const struct net_device_stats *net_stats; +#endif struct hinic_phy_port_stats *port_stats; + struct hinic_nic_stats *nic_stats; + struct hinic_vport_stats vport_stats = {0}; u16 i = 0, j = 0; char *p; int err; - err = hinic_get_vport_stats(nic_dev, &vport_stats); - if (err) - netif_err(nic_dev, drv, netdev, - "Failed to get vport stats from firmware\n"); - - for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) { - p = (char *)&vport_stats + hinic_function_stats[j].offset; - data[i] = (hinic_function_stats[j].size == +#ifdef HAVE_NDO_GET_STATS64 + net_stats = dev_get_stats(netdev, &temp); +#else + net_stats = dev_get_stats(netdev); +#endif + for (j = 0; j < ARRAY_LEN(hinic_netdev_stats); j++, i++) { + p = (char *)(net_stats) + hinic_netdev_stats[j].offset; + data[i] = (hinic_netdev_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); - if (!port_stats) { - memset(&data[i], 0, - ARRAY_LEN(hinic_port_stats) * sizeof(*data)); - i += ARRAY_LEN(hinic_port_stats); - goto get_drv_stats; - } - - err = hinic_get_phy_port_stats(nic_dev, port_stats); - if (err) - netif_err(nic_dev, drv, netdev, - "Failed to get port stats from firmware\n"); - - for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) { - p = (char *)port_stats + hinic_port_stats[j].offset; - data[i] = (hinic_port_stats[j].size == + nic_stats = &nic_dev->stats; + for (j = 0; j < ARRAY_LEN(hinic_nic_dev_stats); j++, i++) { + p = (char *)(nic_stats) + hinic_nic_dev_stats[j].offset; + data[i] = (hinic_nic_dev_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - kfree(port_stats); + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); + if (err) + nicif_err(nic_dev, drv, netdev, + "Failed to get function stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) { + p = (char *)(&vport_stats) + + hinic_function_stats[j].offset; + data[i] = (hinic_function_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) { + nicif_err(nic_dev, drv, netdev, + "Failed to malloc port stats\n"); + memset(&data[i], 0, + ARRAY_LEN(hinic_port_stats) * sizeof(*data)); + i += ARRAY_LEN(hinic_port_stats); + goto get_drv_stats; + } + + err = hinic_get_phy_port_stats(nic_dev->hwdev, port_stats); + if (err) + nicif_err(nic_dev, drv, netdev, + "Failed to get port stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) { + p = (char *)(port_stats) + hinic_port_stats[j].offset; + data[i] = (hinic_port_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + kfree(port_stats); + } get_drv_stats: get_drv_queue_stats(nic_dev, data + i); } -static int hinic_get_sset_count(struct net_device *netdev, int sset) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int count, q_num; - - switch (sset) { - case ETH_SS_STATS: - q_num = nic_dev->num_qps; - count = ARRAY_LEN(hinic_function_stats) + - (ARRAY_LEN(hinic_tx_queue_stats) + - ARRAY_LEN(hinic_rx_queue_stats)) * q_num; - - count += ARRAY_LEN(hinic_port_stats); - - return count; - default: - return -EOPNOTSUPP; - } -} - static void hinic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 i = 0, j = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); char *p = (char *)data; - u16 i, j; switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); + return; case ETH_SS_STATS: - for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) { - memcpy(p, hinic_function_stats[i].name, + for (i = 0; i < ARRAY_LEN(hinic_netdev_stats); i++) { + memcpy(p, hinic_netdev_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) { - memcpy(p, hinic_port_stats[i].name, - ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_LEN(hinic_nic_dev_stats); i++) { + memcpy(p, hinic_nic_dev_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) { + memcpy(p, hinic_function_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) { + memcpy(p, hinic_port_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + for (i = 0; i < nic_dev->num_qps; i++) { for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) { sprintf(p, hinic_tx_queue_stats[j].name, i); @@ -735,28 +1730,999 @@ static void hinic_get_strings(struct net_device *netdev, return; default: + nicif_err(nic_dev, drv, netdev, + "Invalid string set %d.", stringset); return; } } +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int hinic_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 port; + int err; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Current function don't support to set LED status\n"); + return -EOPNOTSUPP; + } + + port = hinic_physical_port_id(nic_dev->hwdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + err = hinic_set_led_status(nic_dev->hwdev, port, + HINIC_LED_TYPE_LINK, + HINIC_LED_MODE_FORCE_2HZ); + if (err) + nicif_err(nic_dev, drv, netdev, + "Set LED blinking in 2HZ failed\n"); + else + nicif_info(nic_dev, drv, netdev, + "Set LED blinking in 2HZ success\n"); + break; + + case ETHTOOL_ID_INACTIVE: + err = hinic_reset_led_status(nic_dev->hwdev, port); + if (err) + nicif_err(nic_dev, drv, netdev, + "Reset LED to original status failed\n"); + else + nicif_info(nic_dev, drv, netdev, + "Reset LED to original status success\n"); + + break; + + default: + return -EOPNOTSUPP; + } + + if (err) + return -EFAULT; + else + return 0; +} +#else +/* TODO: implement */ +static int hinic_phys_id(struct net_device *netdev, u32 data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nicif_err(nic_dev, drv, netdev, "Not support to set phys id\n"); + + return -EOPNOTSUPP; +} +#endif + +static void hinic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + int err; + + err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to get pauseparam from hw\n"); + } else { + pause->autoneg = nic_pause.auto_neg; + pause->rx_pause = nic_pause.rx_pause; + pause->tx_pause = nic_pause.tx_pause; + } +} + +static int hinic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + struct nic_port_info port_info = {0}; + int err; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Not support to set pause parameters\n"); + return -EOPNOTSUPP; + } + + if (test_bit(HINIC_BP_ENABLE, &nic_dev->flags) && pause->tx_pause && + nic_dev->rq_depth <= nic_dev->bp_upper_thd) { + nicif_err(nic_dev, drv, netdev, + "Can not set tx pause enable, rq depth is less than bp upper threshold: %d\n", + nic_dev->bp_upper_thd); + return -EINVAL; + } + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to get auto-negotiation state\n"); + return -EFAULT; + } + + if (pause->autoneg != port_info.autoneg_state) { + nicif_err(nic_dev, drv, netdev, + "To change autoneg please use: ethtool -s autoneg \n"); + return -EOPNOTSUPP; + } + + nic_pause.auto_neg = pause->autoneg; + nic_pause.rx_pause = pause->rx_pause; + nic_pause.tx_pause = pause->tx_pause; + + err = hinic_set_pause_info(nic_dev->hwdev, nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Set pause options, tx: %s, rx: %s\n", + pause->tx_pause ? "on" : "off", + pause->rx_pause ? "on" : "off"); + + return 0; +} + +static int hinic_run_lp_test(struct hinic_nic_dev *nic_dev, u32 test_time) +{ + u32 i; + u8 j; + u32 cnt = test_time * 5; + struct sk_buff *skb = NULL; + struct sk_buff *skb_tmp = NULL; + u8 *test_data = NULL; + u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; + struct net_device *netdev = nic_dev->netdev; + + skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); + if (!skb_tmp) { + nicif_err(nic_dev, drv, netdev, + "Alloc xmit skb template failed for loopback test\n"); + return -ENOMEM; + } + + test_data = __skb_put(skb_tmp, LP_PKT_LEN); + + memset(test_data, 0xFF, (2 * ETH_ALEN)); + test_data[ETH_ALEN] = 0xFE; + test_data[2 * ETH_ALEN] = 0x08; + test_data[2 * ETH_ALEN + 1] = 0x0; + + for (i = ETH_HLEN; i < LP_PKT_LEN; i++) + test_data[i] = i & 0xFF; + + skb_tmp->queue_mapping = 0; + skb_tmp->ip_summed = CHECKSUM_COMPLETE; + skb_tmp->dev = netdev; + + for (i = 0; i < cnt; i++) { + nic_dev->lb_test_rx_idx = 0; + memset(lb_test_rx_buf, 0, (LP_PKT_CNT * LP_PKT_LEN)); + + for (j = 0; j < LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Copy skb failed for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[LP_PKT_LEN - 1] = j; + + if (hinic_lb_xmit_frame(skb, netdev)) { + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Xmit pkt failed for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkt received to rx buffer */ + msleep(200); + + for (j = 0; j < LP_PKT_CNT; j++) { + if (memcmp((lb_test_rx_buf + (j * LP_PKT_LEN)), + skb_tmp->data, (LP_PKT_LEN - 1)) || + (*(lb_test_rx_buf + ((j * LP_PKT_LEN) + + (LP_PKT_LEN - 1))) != j)) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + (j + (i * LP_PKT_CNT)), + (LP_PKT_LEN - 1), + *(lb_test_rx_buf + + (((j * LP_PKT_LEN) + + (LP_PKT_LEN - 1))))); + return -EIO; + } + } + } + + dev_kfree_skb_any(skb_tmp); + nicif_info(nic_dev, drv, netdev, "Loopback test succeed.\n"); + return 0; +} + +void hinic_lp_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data, u32 test_time) +{ + int err = 0; + u8 link_status = 0; + u8 *lb_test_rx_buf = NULL; + struct ethtool_test test = {0}; + enum diag_test_index test_index = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + memset(data, 0, (DIAG_TEST_MAX * sizeof(u64))); + + /* Do not support loopback test when netdev is closed. */ + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "Do not support loopback test when netdev is closed.\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[PORT_DOWN_ERR_IDX] = 1; + return; + } + + test.flags = eth_test->flags; + + if (test_time == 0) + test_time = LP_DEFAULT_TIME; + + netif_carrier_off(netdev); + + if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) { + test_index = INTERNAL_LP_TEST; + + if (hinic_set_loopback_mode(nic_dev->hwdev, true)) { + nicif_err(nic_dev, drv, netdev, + "Failed to set port loopback mode before loopback test\n"); + err = 1; + goto resume_link; + } + } else { + test_index = EXTERNAL_LP_TEST; + } + + lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); + if (!lb_test_rx_buf) { + nicif_err(nic_dev, drv, netdev, + "Failed to alloc rx buffer for loopback test.\n"); + err = 1; + } else { + nic_dev->lb_test_rx_buf = lb_test_rx_buf; + nic_dev->lb_pkt_len = LP_PKT_LEN; + set_bit(HINIC_LP_TEST, &nic_dev->flags); + + if (hinic_run_lp_test(nic_dev, test_time)) + err = 1; + + clear_bit(HINIC_LP_TEST, &nic_dev->flags); + msleep(100); + vfree(lb_test_rx_buf); + nic_dev->lb_test_rx_buf = NULL; + } + + if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) { + if (hinic_set_loopback_mode(nic_dev->hwdev, false)) { + nicif_err(nic_dev, drv, netdev, + "Failed to cancel port loopback mode after loopback test.\n"); + err = 1; + + goto resume_link; + } + } + +resume_link: + if (err) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_index] = 1; + } + + err = hinic_get_link_state(nic_dev->hwdev, &link_status); + if (!err && link_status) + netif_carrier_on(netdev); +} + +static void hinic_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Current function don't support self test\n"); + return; + } + + hinic_lp_test(netdev, eth_test, data, 0); +} + +#ifdef ETHTOOL_GMODULEEEPROM +static int hinic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_type; + u8 sfp_type_ext; + int err; + + err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); + if (err) + return err; + + switch (sfp_type) { + case MODULE_TYPE_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case MODULE_TYPE_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case MODULE_TYPE_QSFP_PLUS: + if (sfp_type_ext >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } + break; + case MODULE_TYPE_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + nicif_warn(nic_dev, drv, netdev, + "Optical module unknown: 0x%x\n", sfp_type); + return -EINVAL; + } + + return 0; +} + +static int hinic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + u16 len; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + + err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} +#endif /* ETHTOOL_GMODULEEEPROM */ + +static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, + struct nic_rss_type *rss_type) +{ + u8 rss_l4_en = 0; + + switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_l4_en = 0; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_l4_en = 1; + break; + default: + return -EINVAL; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int hinic_set_rss_hash_opts(struct hinic_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type *rss_type = &nic_dev->rss_type; + int err; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + cmd->data = 0; + nicif_err(nic_dev, drv, nic_dev->netdev, + "RSS is disable, not support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | + RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) + return -EINVAL; + + err = hinic_get_rss_type(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); + return -EFAULT; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + err = set_l4_rss_hash_ops(cmd, rss_type); + if (err) + return err; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupported flow type\n"); + return -EINVAL; + } + + err = hinic_set_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx, + *rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set rss type\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n"); + + return 0; +} + +static int hinic_get_rss_hash_opts(struct hinic_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type rss_type = {0}; + int err; + + cmd->data = 0; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + return 0; + + err = hinic_get_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx, + &rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get rss type\n"); + return err; + } + + cmd->data = RXH_IP_SRC | RXH_IP_DST; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (rss_type.tcp_ipv4) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (rss_type.tcp_ipv6) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (rss_type.udp_ipv4) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (rss_type.udp_ipv6) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupported flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +#if (KERNEL_VERSION(3, 4, 24) > LINUX_VERSION_CODE) + static int hinic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, void *rule_locs) +#else + static int hinic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +#endif +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->num_qps; + break; + case ETHTOOL_GRXFH: + err = hinic_get_rss_hash_opts(nic_dev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = hinic_set_rss_hash_opts(nic_dev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE +static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) +{ + return HINIC_RSS_INDIR_SIZE; +} +#endif + +static int __set_rss_rxfh(struct net_device *netdev, + const u32 *indir, const u8 *key) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err, i; + + if (indir) { + if (!nic_dev->rss_indir_user) { + nic_dev->rss_indir_user = + kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, + GFP_KERNEL); + if (!nic_dev->rss_indir_user) { + nicif_err(nic_dev, drv, netdev, + "Failed to alloc memory for rss_indir_usr\n"); + return -ENOMEM; + } + } + + memcpy(nic_dev->rss_indir_user, indir, + sizeof(u32) * HINIC_RSS_INDIR_SIZE); + + err = hinic_rss_set_indir_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, indir); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to set rss indir table\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Change rss indir success\n"); + } + + if (key) { + if (!nic_dev->rss_hkey_user) { + /* We request double spaces for the hash key, + * the second one holds the key of Big Edian + * format. + */ + nic_dev->rss_hkey_user = + kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL); + + if (!nic_dev->rss_hkey_user) { + nicif_err(nic_dev, drv, netdev, + "Failed to alloc memory for rss_hkey_user\n"); + return -ENOMEM; + } + + /* The second space is for big edian hash key */ + nic_dev->rss_hkey_user_be = + (u32 *)(nic_dev->rss_hkey_user + + HINIC_RSS_KEY_SIZE); + } + + memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE); + + /* make a copy of the key, and convert it to Big Endian */ + memcpy(nic_dev->rss_hkey_user_be, key, HINIC_RSS_KEY_SIZE); + for (i = 0; i < HINIC_RSS_KEY_SIZE / 4; i++) + nic_dev->rss_hkey_user_be[i] = + cpu_to_be32(nic_dev->rss_hkey_user_be[i]); + + err = hinic_rss_set_template_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, key); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Change rss key success\n"); + } + + return 0; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +static u32 hinic_get_rxfh_key_size(struct net_device *netdev) +{ + return HINIC_RSS_KEY_SIZE; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int hinic_get_rxfh(struct net_device *netdev, + u32 *indir, u8 *key, u8 *hfunc) +#else +static int hinic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + return -EOPNOTSUPP; + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) { + u8 hash_engine_type = 0; + + err = hinic_rss_get_hash_engine(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, + &hash_engine_type); + if (err) + return -EFAULT; + + *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; + } +#endif + + if (indir) { + err = hinic_rss_get_indir_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, indir); + if (err) + return -EFAULT; + } + + if (key) + err = hinic_rss_get_template_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, key); + + return err; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int hinic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Not support to set rss parameters when rss is disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "Not support to set indir when DCB is enabled\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { + nicif_err(nic_dev, drv, netdev, + "Not support to set hfunc type except TOP and XOR\n"); + return -EOPNOTSUPP; + } + + nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? + HINIC_RSS_HASH_ENGINE_TYPE_XOR : + HINIC_RSS_HASH_ENGINE_TYPE_TOEP; + err = hinic_rss_set_hash_engine + (nic_dev->hwdev, nic_dev->rss_tmpl_idx, + nic_dev->rss_hash_engine); + if (err) + return -EFAULT; + + nicif_info(nic_dev, drv, netdev, + "Change hfunc to RSS_HASH_%s success\n", + (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); + } +#endif + + err = __set_rss_rxfh(netdev, indir, key); + + return err; +} + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +static int hinic_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *indir1) +#else +static int hinic_get_rxfh_indir(struct net_device *netdev, u32 *indir) +#endif +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + u32 *indir; + + /* In a low version kernel(eg:suse 11.2), call the interface twice. + * First call to get the size value, + * and second call to get the rxfh indir according to the size value. + */ + if (indir1->size == 0) { + indir1->size = HINIC_RSS_INDIR_SIZE; + return 0; + } + + if (indir1->size < HINIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get rss indir, hinic rss size(%d) is more than system rss size(%u).\n", + HINIC_RSS_INDIR_SIZE, indir1->size); + return -EINVAL; + } + + indir = indir1->ring_index; +#endif + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + return -EOPNOTSUPP; + + if (indir) + err = hinic_rss_get_indir_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, indir); + + return err; +} + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +static int hinic_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *indir1) +#else +static int hinic_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +#endif +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + const u32 *indir; + + if (indir1->size != HINIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set rss indir, hinic rss size(%d) is more than system rss size(%u).\n", + HINIC_RSS_INDIR_SIZE, indir1->size); + return -EINVAL; + } + + indir = indir1->ring_index; +#endif + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Not support to set rss indir when rss is disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "Not support to set indir when DCB is enabled\n"); + return -EOPNOTSUPP; + } + + return __set_rss_rxfh(netdev, indir, NULL); +} +#endif /* defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) */ + static const struct ethtool_ops hinic_ethtool_ops = { +#ifdef ETHTOOL_GLINKSETTINGS .get_link_ksettings = hinic_get_link_ksettings, - .get_drvinfo = hinic_get_drvinfo, + .set_link_ksettings = hinic_set_link_ksettings, +#endif +#ifndef HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY + .get_settings = hinic_get_settings, + .set_settings = hinic_set_settings, +#endif + .get_drvinfo = hinic_get_drvinfo, + .get_msglevel = hinic_get_msglevel, + .set_msglevel = hinic_set_msglevel, + .nway_reset = hinic_nway_reset, .get_link = ethtool_op_get_link, .get_ringparam = hinic_get_ringparam, - .get_channels = hinic_get_channels, - .get_rxnfc = hinic_get_rxnfc, - .set_rxnfc = hinic_set_rxnfc, - .get_rxfh_key_size = hinic_get_rxfh_key_size, - .get_rxfh_indir_size = hinic_get_rxfh_indir_size, - .get_rxfh = hinic_get_rxfh, - .set_rxfh = hinic_set_rxfh, + .set_ringparam = hinic_set_ringparam, + .get_pauseparam = hinic_get_pauseparam, + .set_pauseparam = hinic_set_pauseparam, .get_sset_count = hinic_get_sset_count, + .get_coalesce = hinic_get_coalesce, + .set_coalesce = hinic_set_coalesce, +#if defined(ETHTOOL_PERQUEUE) && defined(ETHTOOL_GCOALESCE) + .get_per_queue_coalesce = hinic_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic_set_per_queue_coalesce, +#endif .get_ethtool_stats = hinic_get_ethtool_stats, .get_strings = hinic_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = hinic_set_phys_id, +#else + .phys_id = hinic_phys_id, +#endif +#endif + .self_test = hinic_diag_test, + .get_rxnfc = hinic_get_rxnfc, + .set_rxnfc = hinic_set_rxnfc, + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + .get_channels = hinic_get_channels, + .set_channels = hinic_set_channels, +#ifdef ETHTOOL_GMODULEEEPROM + .get_module_info = hinic_get_module_info, + .get_module_eeprom = hinic_get_module_eeprom, +#endif +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic_get_rxfh_indir_size, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic_get_rxfh_key_size, + .get_rxfh = hinic_get_rxfh, + .set_rxfh = hinic_set_rxfh, +#else + .get_rxfh_indir = hinic_get_rxfh_indir, + .set_rxfh_indir = hinic_set_rxfh_indir, +#endif +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ }; +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext hinic_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .set_phys_id = hinic_set_phys_id, + .get_channels = hinic_get_channels, + .set_channels = hinic_set_channels, +#ifdef ETHTOOL_GMODULEEEPROM + .get_module_info = hinic_get_module_info, + .get_module_eeprom = hinic_get_module_eeprom, +#endif + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic_get_rxfh_indir_size, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic_get_rxfh_key_size, + .get_rxfh = hinic_get_rxfh, + .set_rxfh = hinic_set_rxfh, +#else + .get_rxfh_indir = hinic_get_rxfh_indir, + .set_rxfh_indir = hinic_set_rxfh_indir, +#endif +}; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + +static const struct ethtool_ops hinicvf_ethtool_ops = { +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = hinic_get_link_ksettings, +#else + .get_settings = hinic_get_settings, +#endif + .get_drvinfo = hinic_get_drvinfo, + .get_msglevel = hinic_get_msglevel, + .set_msglevel = hinic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic_get_ringparam, + .set_ringparam = hinic_set_ringparam, + .get_sset_count = hinic_get_sset_count, + .get_coalesce = hinic_get_coalesce, + .set_coalesce = hinic_set_coalesce, +#if defined(ETHTOOL_PERQUEUE) && defined(ETHTOOL_GCOALESCE) + .get_per_queue_coalesce = hinic_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic_set_per_queue_coalesce, +#endif + .get_ethtool_stats = hinic_get_ethtool_stats, + .get_strings = hinic_get_strings, + .get_rxnfc = hinic_get_rxnfc, + .set_rxnfc = hinic_set_rxnfc, + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + .get_channels = hinic_get_channels, + .set_channels = hinic_set_channels, +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic_get_rxfh_indir_size, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic_get_rxfh_key_size, + .get_rxfh = hinic_get_rxfh, + .set_rxfh = hinic_set_rxfh, +#else + .get_rxfh_indir = hinic_get_rxfh_indir, + .set_rxfh_indir = hinic_set_rxfh_indir, +#endif +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext hinicvf_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_channels = hinic_get_channels, + .set_channels = hinic_set_channels, +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic_get_rxfh_indir_size, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic_get_rxfh_key_size, + .get_rxfh = hinic_get_rxfh, + .set_rxfh = hinic_set_rxfh, +#else + .get_rxfh_indir = hinic_get_rxfh_indir, + .set_rxfh_indir = hinic_set_rxfh_indir, +#endif +}; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + void hinic_set_ethtool_ops(struct net_device *netdev) { - netdev->ethtool_ops = &hinic_ethtool_ops; + SET_ETHTOOL_OPS(netdev, &hinic_ethtool_ops); +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &hinic_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +} + +void hinicvf_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &hinicvf_ethtool_ops); +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &hinicvf_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw.h b/drivers/net/ethernet/huawei/hinic/hinic_hw.h new file mode 100644 index 000000000000..aa40c9c57d9c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw.h @@ -0,0 +1,809 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_H_ +#define HINIC_HW_H_ + +#ifndef __BIG_ENDIAN__ +#define __BIG_ENDIAN__ 0x4321 +#endif + +#ifndef __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN__ 0x1234 +#endif + +#ifdef __BYTE_ORDER__ +#undef __BYTE_ORDER__ +#endif +/* X86 */ +#define __BYTE_ORDER__ __LITTLE_ENDIAN__ + +enum hinic_mod_type { + HINIC_MOD_COMM = 0, /* HW communication module */ + HINIC_MOD_L2NIC = 1, /* L2NIC module */ + HINIC_MOD_ROCE = 2, + HINIC_MOD_IWARP = 3, + HINIC_MOD_TOE = 4, + HINIC_MOD_FLR = 5, + HINIC_MOD_FCOE = 6, + HINIC_MOD_CFGM = 7, /* Configuration module */ + HINIC_MOD_CQM = 8, + HINIC_MOD_VSWITCH = 9, + HINIC_MOD_FC = 10, + HINIC_MOD_OVS = 11, + HINIC_MOD_FIC = 12, + HINIC_MOD_MIGRATE = 13, + HINIC_MOD_HILINK = 14, + HINIC_MOD_HW_MAX = 16, /* hardware max module id */ + + /* Software module id, for PF/VF and multi-host */ + HINIC_MOD_SW_FUNC = 17, + HINIC_MOD_MAX, +}; + +struct hinic_cmd_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; +}; + +enum hinic_ack_type { + HINIC_ACK_TYPE_CMDQ, + HINIC_ACK_TYPE_SHARE_CQN, + HINIC_ACK_TYPE_APP_CQN, + + HINIC_MOD_ACK_MAX = 15, + +}; + +#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF + +int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +/* for pxe, ovs */ +int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); + +int hinic_mbox_to_vf(void *hwdev, enum hinic_mod_type mod, + u16 vf_id, u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +int hinic_api_cmd_write_nack(void *hwdev, u8 dest, + void *cmd, u16 size); + +int hinic_api_cmd_read_ack(void *hwdev, u8 dest, + void *cmd, u16 size, void *ack, u16 ack_size); +/* PF/VF send cmd to ucode by cmdq, and return if success. + * timeout=0, use default timeout. + */ +int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + u64 *out_param, u32 timeout); +/* 1. whether need the timeout parameter + * 2. out_param indicates the status of the microcode processing command + */ + +/* PF/VF send cmd to ucode by cmdq, and return detailed result. + * timeout=0, use default timeout. + */ +int hinic_cmdq_detail_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, u32 timeout); + +/* PF/VF send cmd to ucode by cmdq, and return immediately */ +int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in); + +int hinic_ppf_tmr_start(void *hwdev); +int hinic_ppf_tmr_stop(void *hwdev); + +/* CLP */ +int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + + +enum hinic_ceq_event { + HINIC_NON_L2NIC_SCQ, + HINIC_NON_L2NIC_ECQ, + HINIC_NON_L2NIC_NO_CQ_EQ, + HINIC_CMDQ, + HINIC_L2NIC_SQ, + HINIC_L2NIC_RQ, + HINIC_MAX_CEQ_EVENTS, +}; + +typedef void (*hinic_ceq_event_cb)(void *handle, u32 ceqe_data); +int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event, + hinic_ceq_event_cb callback); +void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event); + +enum hinic_aeq_type { + HINIC_HW_INTER_INT = 0, + HINIC_MBX_FROM_FUNC = 1, + HINIC_MSG_FROM_MGMT_CPU = 2, + HINIC_API_RSP = 3, + HINIC_API_CHAIN_STS = 4, + HINIC_MBX_SEND_RSLT = 5, + HINIC_MAX_AEQ_EVENTS +}; + +enum hinic_aeq_sw_type { + HINIC_STATELESS_EVENT = 0, + HINIC_STATEFULL_EVENT = 1, + HINIC_MAX_AEQ_SW_EVENTS +}; + +typedef void (*hinic_aeq_hwe_cb)(void *handle, u8 *data, u8 size); +int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event, + hinic_aeq_hwe_cb hwe_cb); +void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event); + +typedef u8 (*hinic_aeq_swe_cb)(void *handle, u8 event, u64 data); +int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event, + hinic_aeq_swe_cb aeq_swe_cb); +void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event); + +typedef void (*hinic_mgmt_msg_cb)(void *hwdev, void *pri_handle, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +int hinic_register_mgmt_msg_cb(void *hwdev, + enum hinic_mod_type mod, void *pri_handle, + hinic_mgmt_msg_cb callback); +void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod); + +struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev); +void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *buf); + +int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); +void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); +int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base); +void hinic_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base); + +struct nic_interrupt_info { + u32 lli_set; + u32 interrupt_coalesc_set; + u16 msix_index; + u8 lli_credit_limit; + u8 lli_timer_cfg; + u8 pending_limt; + u8 coalesc_timer_cfg; + u8 resend_timer_cfg; +}; + +int hinic_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info); +int hinic_set_interrupt_cfg_direct(void *hwdev, + struct nic_interrupt_info *interrupt_info); +int hinic_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info); + +/* The driver code implementation interface */ +void hinic_misx_intr_clear_resend_bit(void *hwdev, + u16 msix_idx, u8 clear_resend_en); + +struct hinic_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u64 ci_dma_base; +}; + +int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr); + +int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz); +int hinic_clean_root_ctxt(void *hwdev); +void hinic_record_pcie_error(void *hwdev); + +int hinic_func_rx_tx_flush(void *hwdev); + +int hinic_func_tmr_bitmap_set(void *hwdev, bool enable); + +struct hinic_init_para { + /* Record hinic_pcidev or NDIS_Adapter pointer address */ + void *adapter_hdl; + /* Record pcidev or Handler pointer address + * for example: ioremap interface input parameter + */ + void *pcidev_hdl; + /* Record pcidev->dev or Handler pointer address which used to + * dma address application or dev_err print the parameter + */ + void *dev_hdl; + + void *cfg_reg_base; /* Configure virtual address, bar0/1 */ + /* interrupt configuration register address, bar2/3 */ + void *intr_reg_base; + u64 db_base_phy; + void *db_base; /* the doorbell address, bar4/5 higher 4M space */ + void *dwqe_mapping; /* direct wqe 4M, follow the doorbell address */ + void **hwdev; + void *chip_node; + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + void *ppf_hwdev; +}; + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +#define MAX_FUNCTION_NUM 512 +#define HINIC_MAX_PF_NUM 16 +#define HINIC_MAX_COS 8 +#define INIT_FAILED 0 +#define INIT_SUCCESS 1 +#define MAX_DRV_BUF_SIZE 4096 + +struct hinic_cmd_get_light_module_abs { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsv[2]; +}; + +#define MODULE_TYPE_SFP 0x3 +#define MODULE_TYPE_QSFP28 0x11 +#define MODULE_TYPE_QSFP 0x0C +#define MODULE_TYPE_QSFP_PLUS 0x0D + +#define SFP_INFO_MAX_SIZE 512 +struct hinic_cmd_get_sfp_qsfp_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 wire_type; + u16 out_len; + u8 sfp_qsfp_info[SFP_INFO_MAX_SIZE]; +}; + +#define STD_SFP_INFO_MAX_SIZE 640 +struct hinic_cmd_get_std_sfp_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 wire_type; + u16 eeprom_len; + u32 rsvd; + u8 sfp_info[STD_SFP_INFO_MAX_SIZE]; +}; + +#define HINIC_MAX_PORT_ID 4 + +struct hinic_port_routine_cmd { + bool up_send_sfp_info; + bool up_send_sfp_abs; + + struct hinic_cmd_get_sfp_qsfp_info sfp_info; + struct hinic_cmd_get_light_module_abs abs; +}; + +struct card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + void *log_info; + void *dbgtool_info; + void *func_handle_array[MAX_FUNCTION_NUM]; + unsigned char dp_bus_num; + u8 func_num; + struct attribute dbgtool_attr_file; + + bool cos_up_setted; + u8 cos_up[HINIC_MAX_COS]; + bool ppf_state; + u8 pf_bus_num[HINIC_MAX_PF_NUM]; + bool disable_vf_load[HINIC_MAX_PF_NUM]; + u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM]; + u32 vf_mbx_rand_id[MAX_FUNCTION_NUM]; + struct hinic_port_routine_cmd rt_cmd[HINIC_MAX_PORT_ID]; + + /* mutex used for copy sfp info */ + struct mutex sfp_mutex; +}; + +enum hinic_hwdev_init_state { + HINIC_HWDEV_NONE_INITED = 0, + HINIC_HWDEV_CLP_INITED, + HINIC_HWDEV_AEQ_INITED, + HINIC_HWDEV_MGMT_INITED, + HINIC_HWDEV_MBOX_INITED, + HINIC_HWDEV_CMDQ_INITED, + HINIC_HWDEV_COMM_CH_INITED, + HINIC_HWDEV_ALL_INITED, + HINIC_HWDEV_MAX_INVAL_INITED +}; + +enum hinic_func_mode { + /* single host */ + FUNC_MOD_NORMAL_HOST, + /* multi host, baremate, sdi side */ + FUNC_MOD_MULTI_BM_MASTER, + /* multi host, baremate, host side */ + FUNC_MOD_MULTI_BM_SLAVE, + /* multi host, vm mode, sdi side */ + FUNC_MOD_MULTI_VM_MASTER, + /* multi host, vm mode, host side */ + FUNC_MOD_MULTI_VM_SLAVE, +}; + +enum hinic_func_cap { + /* send message to mgmt cpu directly */ + HINIC_FUNC_MGMT = 1 << 0, + /* setting port attribute, pause/speed etc. */ + HINIC_FUNC_PORT = 1 << 1, + /* Enable SR-IOV in default */ + HINIC_FUNC_SRIOV_EN_DFLT = 1 << 2, + /* Can't change VF num */ + HINIC_FUNC_SRIOV_NUM_FIX = 1 << 3, + /* Fcorce pf/vf link up */ + HINIC_FUNC_FORCE_LINK_UP = 1 << 4, + /* Support rate limit */ + HINIC_FUNC_SUPP_RATE_LIMIT = 1 << 5, + HINIC_FUNC_SUPP_DFX_REG = 1 << 6, + /* Support promisc/multicast/all-multi */ + HINIC_FUNC_SUPP_RX_MODE = 1 << 7, + /* Set vf mac and vlan by ip link */ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8, + /* Support set mac by ifconfig */ + HINIC_FUNC_SUPP_CHANGE_MAC = 1 << 9, + /* OVS don't support SCTP_CRC/HW_VLAN/LRO */ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10, + /* OVS don't support encap-tso/encap-csum */ + HINIC_FUNC_SUPP_ENCAP_TSO_CSUM = 1 << 11, +}; + +#define FUNC_SUPPORT_MGMT(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_MGMT)) +#define FUNC_SUPPORT_PORT_SETTING(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_PORT)) +#define FUNC_SUPPORT_DCB(hwdev) \ + (FUNC_SUPPORT_PORT_SETTING(hwdev)) +#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SRIOV_EN_DFLT)) +#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SRIOV_NUM_FIX)) +#define FUNC_SUPPORT_RX_MODE(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_RX_MODE)) +#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_RATE_LIMIT)) +#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN)) +#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_CHANGE_MAC)) +#define FUNC_FORCE_LINK_UP(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_FORCE_LINK_UP)) +#define FUNC_SUPPORT_SCTP_CRC(hwdev) \ + (!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_HW_VLAN(hwdev) \ + (!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_LRO(hwdev) \ + (!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_ENCAP_TSO_CSUM(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_ENCAP_TSO_CSUM)) + +int hinic_init_hwdev(struct hinic_init_para *para); +int hinic_set_vf_dev_cap(void *hwdev); +void hinic_free_hwdev(void *hwdev); +void hinic_shutdown_hwdev(void *hwdev); +void hinic_set_api_stop(void *hwdev); + +void hinic_ppf_hwdev_unreg(void *hwdev); +void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev); + +void hinic_qps_num_set(void *hwdev, u32 num_qps); + + +bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state); +enum hinic_func_mode hinic_get_func_mode(void *hwdev); +u64 hinic_get_func_feature_cap(void *hwdev); + +enum hinic_service_mode { + HINIC_WORK_MODE_OVS = 0, + HINIC_WORK_MODE_UNKNOWN, + HINIC_WORK_MODE_NIC, + HINIC_WORK_MODE_OVS_SP = 7, + HINIC_WORK_MODE_OVS_DQ, + HINIC_WORK_MODE_INVALID = 0xFF, +}; + +enum hinic_service_mode hinic_get_service_mode(void *hwdev); + +#define WORK_AS_OVS_MODE(mode) \ + (((mode) == HINIC_WORK_MODE_OVS) || \ + ((mode) == HINIC_WORK_MODE_OVS_SP) || \ + ((mode) == HINIC_WORK_MODE_OVS_DQ)) + +int hinic_slq_init(void *dev, int num_wqs); +void hinic_slq_uninit(void *dev); +int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, + u16 page_size, u64 *cla_addr, void **handle); +void hinic_slq_free(void *dev, void *handle); +u64 hinic_slq_get_addr(void *handle, u16 index); +u64 hinic_slq_get_first_pageaddr(void *handle); + +typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc); + +void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd); + +int hinic_micro_log_path_set(void *hwdev, u8 *log_path); +int hinic_micro_log_func_en(void *hwdev, u8 is_en); + +/* defined by chip */ +enum hinic_fault_type { + FAULT_TYPE_CHIP, + FAULT_TYPE_UCODE, + FAULT_TYPE_MEM_RD_TIMEOUT, + FAULT_TYPE_MEM_WR_TIMEOUT, + FAULT_TYPE_REG_RD_TIMEOUT, + FAULT_TYPE_REG_WR_TIMEOUT, + FAULT_TYPE_PHY_FAULT, + FAULT_TYPE_MAX, +}; + +/* defined by chip */ +enum hinic_fault_err_level { + /* default err_level=FAULT_LEVEL_FATAL if + * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT || + * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT || + * FAULT_TYPE_UCODE + * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP + */ + FAULT_LEVEL_FATAL, + FAULT_LEVEL_SERIOUS_RESET, + FAULT_LEVEL_SERIOUS_FLR, + FAULT_LEVEL_GENERAL, + FAULT_LEVEL_SUGGESTION, + FAULT_LEVEL_MAX +}; + +enum hinic_fault_source_type { + /* same as FAULT_TYPE_CHIP */ + HINIC_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as FAULT_TYPE_UCODE */ + HINIC_FAULT_SRC_HW_MGMT_UCODE, + /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as FAULT_TYPE_REG_RD_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as FAULT_TYPE_REG_WR_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + HINIC_FAULT_SRC_SW_MGMT_UCODE, + HINIC_FAULT_SRC_MGMT_WATCHDOG, + HINIC_FAULT_SRC_MGMT_RESET = 8, + HINIC_FAULT_SRC_HW_PHY_FAULT, + HINIC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20, + HINIC_FAULT_SRC_TYPE_MAX, +}; + +struct hinic_fault_sw_mgmt { + u8 event_id; + u64 event_data; +}; + +union hinic_fault_hw_mgmt { + u32 val[4]; + /* valid only type==FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum hinic_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR */ + u16 func_id; + u16 rsvd2; + } chip; + + /* valid only type==FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct hinic_fault_event { + /* enum hinic_fault_type */ + u8 type; + u8 rsvd0[3]; + union hinic_fault_hw_mgmt event; +}; + +struct hinic_fault_recover_info { + u8 fault_src; /* enum hinic_fault_source_type */ + u8 fault_lev; /* enum hinic_fault_err_level */ + u8 rsvd0[2]; + union { + union hinic_fault_hw_mgmt hw_mgmt; + struct hinic_fault_sw_mgmt sw_mgmt; + u32 mgmt_rsvd[4]; + u32 host_rsvd[4]; + } fault_data; +}; + +struct hinic_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +enum port_module_event_type { + HINIC_PORT_MODULE_CABLE_PLUGGED, + HINIC_PORT_MODULE_CABLE_UNPLUGGED, + HINIC_PORT_MODULE_LINK_ERR, + HINIC_PORT_MODULE_MAX_EVENT, +}; + +struct hinic_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +struct hinic_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +struct hinic_mctp_host_info { + u8 major_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 data_len; + void *data; +}; + +/* multi host mgmt event sub cmd */ +enum hinic_mhost_even_type { + HINIC_MHOST_NIC_STATE_CHANGE = 1, +}; + +struct hinic_mhost_nic_func_state { + u8 status; + + u8 enable; + u16 func_idx; +}; + +struct hinic_multi_host_mgmt_event { + u16 sub_cmd; + u16 rsvd[3]; + + void *data; +}; + +enum hinic_event_type { + HINIC_EVENT_LINK_DOWN = 0, + HINIC_EVENT_LINK_UP = 1, + HINIC_EVENT_HEART_LOST = 2, + HINIC_EVENT_FAULT = 3, + HINIC_EVENT_NOTIFY_VF_DCB_STATE = 4, + HINIC_EVENT_DCB_STATE_CHANGE = 5, + HINIC_EVENT_FMW_ACT_NTC = 6, + HINIC_EVENT_PORT_MODULE_EVENT = 7, + HINIC_EVENT_MCTP_GET_HOST_INFO, + HINIC_EVENT_MULTI_HOST_MGMT, + HINIC_EVENT_INIT_MIGRATE_PF, +}; + +struct hinic_event_info { + enum hinic_event_type type; + union { + struct hinic_event_link_info link_info; + struct hinic_fault_event info; + struct hinic_dcb_state dcb_state; + struct hinic_port_module_event module_event; + u8 vf_default_cos; + struct hinic_mctp_host_info mctp_info; + struct hinic_multi_host_mgmt_event mhost_mgmt; + }; +}; + +enum hinic_ucode_event_type { + HINIC_INTERNAL_TSO_FATAL_ERROR = 0x0, + HINIC_INTERNAL_LRO_FATAL_ERROR = 0x1, + HINIC_INTERNAL_TX_FATAL_ERROR = 0x2, + HINIC_INTERNAL_RX_FATAL_ERROR = 0x3, + HINIC_INTERNAL_OTHER_FATAL_ERROR = 0x4, + HINIC_NIC_FATAL_ERROR_MAX = 0x8, +}; + +typedef void (*hinic_event_handler)(void *handle, + struct hinic_event_info *event); + +typedef void (*hinic_fault_recover_handler)(void *pri_handle, + struct hinic_fault_recover_info info); +/* only register once */ +void hinic_event_register(void *dev, void *pri_handle, + hinic_event_handler callback); +void hinic_event_unregister(void *dev); + +void hinic_detect_hw_present(void *hwdev); + +void hinic_set_chip_absent(void *hwdev); + +int hinic_get_chip_present_flag(void *hwdev); + +void hinic_set_pcie_order_cfg(void *handle); + +int hinic_get_mgmt_channel_status(void *handle); + +enum hinic_led_mode { + HINIC_LED_MODE_ON, + HINIC_LED_MODE_OFF, + HINIC_LED_MODE_FORCE_1HZ, + HINIC_LED_MODE_FORCE_2HZ, + HINIC_LED_MODE_FORCE_4HZ, + HINIC_LED_MODE_1HZ, + HINIC_LED_MODE_2HZ, + HINIC_LED_MODE_4HZ, + HINIC_LED_MODE_INVALID, +}; + +enum hinic_led_type { + HINIC_LED_TYPE_LINK, + HINIC_LED_TYPE_LOW_SPEED, + HINIC_LED_TYPE_HIGH_SPEED, + HINIC_LED_TYPE_INVALID, +}; + +int hinic_reset_led_status(void *hwdev, u8 port); +int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type, + enum hinic_led_mode mode); + +struct hinic_board_info { + u32 board_type; + u32 port_num; + u32 port_speed; + u32 pcie_width; + u32 host_num; + u32 pf_num; + u32 vf_total_num; + u32 tile_num; + u32 qcm_num; + u32 core_num; + u32 work_mode; + u32 service_mode; + u32 pcie_mode; + u32 cfg_addr; + u32 boot_sel; + u32 board_id; +}; + +int hinic_get_board_info(void *hwdev, struct hinic_board_info *info); +bool hinic_get_ppf_status(void *hwdev); + +struct hw_pf_info { + u16 glb_func_idx; + u16 glb_pf_vf_offset; + u8 p2p_idx; + u8 itf_idx; + u16 max_vfs; + u16 max_queue_num; + u16 ovs_q_vf_num[9]; + u32 resv; +}; + +struct hinic_hw_pf_infos { + u8 num_pfs; + u8 rsvd1[3]; + + struct hw_pf_info infos[16]; +}; + +int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos); +int hinic_set_ip_check(void *hwdev, bool ip_check_ctl); +int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); +int hinic_mbox_ppf_to_vf(void *hwdev, enum hinic_mod_type mod, u16 func_id, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_get_card_present_state(void *hwdev, bool *card_present_state); + +void hinic_migrate_report(void *dev); +int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port); +bool is_multi_vm_slave(void *hwdev); +bool is_multi_bm_slave(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c deleted file mode 100644 index 583fd24c29cf..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ /dev/null @@ -1,969 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_csr.h" -#include "hinic_hw_if.h" -#include "hinic_hw_api_cmd.h" - -#define API_CHAIN_NUM_CELLS 32 - -#define API_CMD_CELL_SIZE_SHIFT 6 -#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT)) - -#define API_CMD_CELL_SIZE(cell_size) \ - (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \ - (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN) - -#define API_CMD_CELL_SIZE_VAL(size) \ - ilog2((size) >> API_CMD_CELL_SIZE_SHIFT) - -#define API_CMD_BUF_SIZE 2048 - -/* Sizes of the members in hinic_api_cmd_cell */ -#define API_CMD_CELL_DESC_SIZE 8 -#define API_CMD_CELL_DATA_ADDR_SIZE 8 - -#define API_CMD_CELL_ALIGNMENT 8 - -#define API_CMD_TIMEOUT 1000 - -#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) - -#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3) -#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2) - -#define RD_DMA_ATTR_DEFAULT 0 -#define WR_DMA_ATTR_DEFAULT 0 - -enum api_cmd_data_format { - SGE_DATA = 1, /* cell data is passed by hw address */ -}; - -enum api_cmd_type { - API_CMD_WRITE = 0, -}; - -enum api_cmd_bypass { - NO_BYPASS = 0, - BYPASS = 1, -}; - -enum api_cmd_xor_chk_level { - XOR_CHK_DIS = 0, - - XOR_CHK_ALL = 3, -}; - -static u8 xor_chksum_set(void *data) -{ - int idx; - u8 *val, checksum = 0; - - val = data; - - for (idx = 0; idx < 7; idx++) - checksum ^= val[idx]; - - return checksum; -} - -static void set_prod_idx(struct hinic_api_cmd_chain *chain) -{ - enum hinic_api_cmd_chain_type chain_type = chain->chain_type; - struct hinic_hwif *hwif = chain->hwif; - u32 addr, prod_idx; - - addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); - prod_idx = hinic_hwif_read_reg(hwif, addr); - - prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX); - - prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX); - - hinic_hwif_write_reg(hwif, addr, prod_idx); -} - -static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) -{ - u32 addr, val; - - addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type); - val = hinic_hwif_read_reg(chain->hwif, addr); - - return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); -} - -/** - * chain_busy - check if the chain is still processing last requests - * @chain: chain to check - * - * Return 0 - Success, negative - Failure - **/ -static int chain_busy(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - u32 prod_idx; - - switch (chain->chain_type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - chain->cons_idx = get_hw_cons_idx(chain); - prod_idx = chain->prod_idx; - - /* check for a space for a new command */ - if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) { - dev_err(&pdev->dev, "API CMD chain %d is busy\n", - chain->chain_type); - return -EBUSY; - } - break; - - default: - dev_err(&pdev->dev, "Unknown API CMD Chain type\n"); - break; - } - - return 0; -} - -/** - * get_cell_data_size - get the data size of a specific cell type - * @type: chain type - * - * Return the data(Desc + Address) size in the cell - **/ -static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type) -{ - u8 cell_data_size = 0; - - switch (type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + - API_CMD_CELL_DATA_ADDR_SIZE, - API_CMD_CELL_ALIGNMENT); - break; - default: - break; - } - - return cell_data_size; -} - -/** - * prepare_cell_ctrl - prepare the ctrl of the cell for the command - * @cell_ctrl: the control of the cell to set the control value into it - * @data_size: the size of the data in the cell - **/ -static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size) -{ - u8 chksum; - u64 ctrl; - - ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) | - HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) | - HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR); - - chksum = xor_chksum_set(&ctrl); - - ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); - - /* The data in the HW should be in Big Endian Format */ - *cell_ctrl = cpu_to_be64(ctrl); -} - -/** - * prepare_api_cmd - prepare API CMD command - * @chain: chain for the command - * @dest: destination node on the card that will receive the command - * @cmd: command data - * @cmd_size: the command size - **/ -static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, - void *cmd, u16 cmd_size) -{ - struct hinic_api_cmd_cell *cell = chain->curr_node; - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - - cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; - - switch (chain->chain_type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) | - HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) | - HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS); - break; - - default: - dev_err(&pdev->dev, "unknown Chain type\n"); - return; - } - - cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | - HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); - - cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), - XOR_CHKSUM); - - /* The data in the HW should be in Big Endian Format */ - cell->desc = cpu_to_be64(cell->desc); - - memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); -} - -/** - * prepare_cell - prepare cell ctrl and cmd in the current cell - * @chain: chain for the command - * @dest: destination node on the card that will receive the command - * @cmd: command data - * @cmd_size: the command size - * - * Return 0 - Success, negative - Failure - **/ -static void prepare_cell(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, - void *cmd, u16 cmd_size) -{ - struct hinic_api_cmd_cell *curr_node = chain->curr_node; - u16 data_size = get_cell_data_size(chain->chain_type); - - prepare_cell_ctrl(&curr_node->ctrl, data_size); - prepare_api_cmd(chain, dest, cmd, cmd_size); -} - -static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) -{ - chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); -} - -/** - * api_cmd_status_update - update the status in the chain struct - * @chain: chain to update - **/ -static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) -{ - enum hinic_api_cmd_chain_type chain_type; - struct hinic_api_cmd_status *wb_status; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - u64 status_header; - u32 status; - - wb_status = chain->wb_status; - status_header = be64_to_cpu(wb_status->header); - - status = be32_to_cpu(wb_status->status); - if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) { - dev_err(&pdev->dev, "API CMD status: Xor check error\n"); - return; - } - - chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); - if (chain_type >= HINIC_API_CMD_MAX) { - dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type); - return; - } - - chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX); -} - -/** - * wait_for_status_poll - wait for write to api cmd command to complete - * @chain: the chain of the command - * - * Return 0 - Success, negative - Failure - **/ -static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) -{ - int err = -ETIMEDOUT; - unsigned long end; - - end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); - do { - api_cmd_status_update(chain); - - /* wait for CI to be updated - sign for completion */ - if (chain->cons_idx == chain->prod_idx) { - err = 0; - break; - } - - msleep(20); - } while (time_before(jiffies, end)); - - return err; -} - -/** - * wait_for_api_cmd_completion - wait for command to complete - * @chain: chain for the command - * - * Return 0 - Success, negative - Failure - **/ -static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - switch (chain->chain_type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - err = wait_for_status_poll(chain); - if (err) { - dev_err(&pdev->dev, "API CMD Poll status timeout\n"); - break; - } - break; - - default: - dev_err(&pdev->dev, "unknown API CMD Chain type\n"); - err = -EINVAL; - break; - } - - return err; -} - -/** - * api_cmd - API CMD command - * @chain: chain for the command - * @dest: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * - * Return 0 - Success, negative - Failure - **/ -static int api_cmd(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, u8 *cmd, u16 cmd_size) -{ - struct hinic_api_cmd_cell_ctxt *ctxt; - int err; - - down(&chain->sem); - if (chain_busy(chain)) { - up(&chain->sem); - return -EBUSY; - } - - prepare_cell(chain, dest, cmd, cmd_size); - cmd_chain_prod_idx_inc(chain); - - wmb(); /* inc pi before issue the command */ - - set_prod_idx(chain); /* issue the command */ - - ctxt = &chain->cell_ctxt[chain->prod_idx]; - - chain->curr_node = ctxt->cell_vaddr; - - err = wait_for_api_cmd_completion(chain); - - up(&chain->sem); - return err; -} - -/** - * hinic_api_cmd_write - Write API CMD command - * @chain: chain for write command - * @dest: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * - * Return 0 - Success, negative - Failure - **/ -int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, u8 *cmd, u16 size) -{ - /* Verify the chain type */ - if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU) - return api_cmd(chain, dest, cmd, size); - - return -EINVAL; -} - -/** - * api_cmd_hw_restart - restart the chain in the HW - * @chain: the API CMD specific chain to restart - * - * Return 0 - Success, negative - Failure - **/ -static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - int err = -ETIMEDOUT; - unsigned long end; - u32 reg_addr, val; - - /* Read Modify Write */ - reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type); - val = hinic_hwif_read_reg(hwif, reg_addr); - - val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); - val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART); - - hinic_hwif_write_reg(hwif, reg_addr, val); - - end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); - do { - val = hinic_hwif_read_reg(hwif, reg_addr); - - if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { - err = 0; - break; - } - - msleep(20); - } while (time_before(jiffies, end)); - - return err; -} - -/** - * api_cmd_ctrl_init - set the control register of a chain - * @chain: the API CMD specific chain to set control register for - **/ -static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - u32 addr, ctrl; - u16 cell_size; - - /* Read Modify Write */ - addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); - - cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size); - - ctrl = hinic_hwif_read_reg(hwif, addr); - - ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); - - ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) | - HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) | - HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE); - - hinic_hwif_write_reg(hwif, addr, ctrl); -} - -/** - * api_cmd_set_status_addr - set the status address of a chain in the HW - * @chain: the API CMD specific chain to set in HW status address for - **/ -static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - u32 addr, val; - - addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); - val = upper_32_bits(chain->wb_status_paddr); - hinic_hwif_write_reg(hwif, addr, val); - - addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); - val = lower_32_bits(chain->wb_status_paddr); - hinic_hwif_write_reg(hwif, addr, val); -} - -/** - * api_cmd_set_num_cells - set the number cells of a chain in the HW - * @chain: the API CMD specific chain to set in HW the number of cells for - **/ -static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - u32 addr, val; - - addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); - val = chain->num_cells; - hinic_hwif_write_reg(hwif, addr, val); -} - -/** - * api_cmd_head_init - set the head of a chain in the HW - * @chain: the API CMD specific chain to set in HW the head for - **/ -static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - u32 addr, val; - - addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); - val = upper_32_bits(chain->head_cell_paddr); - hinic_hwif_write_reg(hwif, addr, val); - - addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); - val = lower_32_bits(chain->head_cell_paddr); - hinic_hwif_write_reg(hwif, addr, val); -} - -/** - * api_cmd_chain_hw_clean - clean the HW - * @chain: the API CMD specific chain - **/ -static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - u32 addr, ctrl; - - addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); - - ctrl = hinic_hwif_read_reg(hwif, addr); - ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); - - hinic_hwif_write_reg(hwif, addr, ctrl); -} - -/** - * api_cmd_chain_hw_init - initialize the chain in the HW - * @chain: the API CMD specific chain to initialize in HW - * - * Return 0 - Success, negative - Failure - **/ -static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - api_cmd_chain_hw_clean(chain); - - api_cmd_set_status_addr(chain); - - err = api_cmd_hw_restart(chain); - if (err) { - dev_err(&pdev->dev, "Failed to restart API CMD HW\n"); - return err; - } - - api_cmd_ctrl_init(chain); - api_cmd_set_num_cells(chain); - api_cmd_head_init(chain); - return 0; -} - -/** - * free_cmd_buf - free the dma buffer of API CMD command - * @chain: the API CMD specific chain of the cmd - * @cell_idx: the cell index of the cmd - **/ -static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx) -{ - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE, - cell_ctxt->api_cmd_vaddr, - cell_ctxt->api_cmd_paddr); -} - -/** - * alloc_cmd_buf - allocate a dma buffer for API CMD command - * @chain: the API CMD specific chain for the cmd - * @cell: the cell in the HW for the cmd - * @cell_idx: the index of the cell - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, - struct hinic_api_cmd_cell *cell, int cell_idx) -{ - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - dma_addr_t cmd_paddr; - u8 *cmd_vaddr; - int err = 0; - - cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, - &cmd_paddr, GFP_KERNEL); - if (!cmd_vaddr) { - dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); - return -ENOMEM; - } - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - cell_ctxt->api_cmd_vaddr = cmd_vaddr; - cell_ctxt->api_cmd_paddr = cmd_paddr; - - /* set the cmd DMA address in the cell */ - switch (chain->chain_type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - /* The data in the HW should be in Big Endian Format */ - cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr); - break; - - default: - dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); - free_cmd_buf(chain, cell_idx); - err = -EINVAL; - break; - } - - return err; -} - -/** - * api_cmd_create_cell - create API CMD cell for specific chain - * @chain: the API CMD specific chain to create its cell - * @cell_idx: the index of the cell to create - * @pre_node: previous cell - * @node_vaddr: the returned virt addr of the cell - * - * Return 0 - Success, negative - Failure - **/ -static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, - int cell_idx, - struct hinic_api_cmd_cell *pre_node, - struct hinic_api_cmd_cell **node_vaddr) -{ - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_api_cmd_cell *node; - dma_addr_t node_paddr; - int err; - - node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, - GFP_KERNEL); - if (!node) { - dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); - return -ENOMEM; - } - - node->read.hw_wb_resp_paddr = 0; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - cell_ctxt->cell_vaddr = node; - cell_ctxt->cell_paddr = node_paddr; - - if (!pre_node) { - chain->head_cell_paddr = node_paddr; - chain->head_node = node; - } else { - /* The data in the HW should be in Big Endian Format */ - pre_node->next_cell_paddr = cpu_to_be64(node_paddr); - } - - switch (chain->chain_type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - err = alloc_cmd_buf(chain, node, cell_idx); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmd buffer\n"); - goto err_alloc_cmd_buf; - } - break; - - default: - dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); - err = -EINVAL; - goto err_alloc_cmd_buf; - } - - *node_vaddr = node; - return 0; - -err_alloc_cmd_buf: - dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr); - return err; -} - -/** - * api_cmd_destroy_cell - destroy API CMD cell of specific chain - * @chain: the API CMD specific chain to destroy its cell - * @cell_idx: the cell to destroy - **/ -static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, - int cell_idx) -{ - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_api_cmd_cell *node; - dma_addr_t node_paddr; - size_t node_size; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - node = cell_ctxt->cell_vaddr; - node_paddr = cell_ctxt->cell_paddr; - node_size = chain->cell_size; - - if (cell_ctxt->api_cmd_vaddr) { - switch (chain->chain_type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - free_cmd_buf(chain, cell_idx); - break; - default: - dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); - break; - } - - dma_free_coherent(&pdev->dev, node_size, node, - node_paddr); - } -} - -/** - * api_cmd_destroy_cells - destroy API CMD cells of specific chain - * @chain: the API CMD specific chain to destroy its cells - * @num_cells: number of cells to destroy - **/ -static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, - int num_cells) -{ - int cell_idx; - - for (cell_idx = 0; cell_idx < num_cells; cell_idx++) - api_cmd_destroy_cell(chain, cell_idx); -} - -/** - * api_cmd_create_cells - create API CMD cells for specific chain - * @chain: the API CMD specific chain - * - * Return 0 - Success, negative - Failure - **/ -static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) -{ - struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - int err, cell_idx; - - for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { - err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); - if (err) { - dev_err(&pdev->dev, "Failed to create API CMD cell\n"); - goto err_create_cell; - } - - pre_node = node; - } - - /* set the Final node to point on the start */ - node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); - - /* set the current node to be the head */ - chain->curr_node = chain->head_node; - return 0; - -err_create_cell: - api_cmd_destroy_cells(chain, cell_idx); - return err; -} - -/** - * api_chain_init - initialize API CMD specific chain - * @chain: the API CMD specific chain to initialize - * @attr: attributes to set in the chain - * - * Return 0 - Success, negative - Failure - **/ -static int api_chain_init(struct hinic_api_cmd_chain *chain, - struct hinic_api_cmd_chain_attr *attr) -{ - struct hinic_hwif *hwif = attr->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t cell_ctxt_size; - - chain->hwif = hwif; - chain->chain_type = attr->chain_type; - chain->num_cells = attr->num_cells; - chain->cell_size = attr->cell_size; - - chain->prod_idx = 0; - chain->cons_idx = 0; - - sema_init(&chain->sem, 1); - - cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); - chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); - if (!chain->cell_ctxt) - return -ENOMEM; - - chain->wb_status = dma_alloc_coherent(&pdev->dev, - sizeof(*chain->wb_status), - &chain->wb_status_paddr, - GFP_KERNEL); - if (!chain->wb_status) { - dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * api_chain_free - free API CMD specific chain - * @chain: the API CMD specific chain to free - **/ -static void api_chain_free(struct hinic_api_cmd_chain *chain) -{ - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - - dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status), - chain->wb_status, chain->wb_status_paddr); -} - -/** - * api_cmd_create_chain - create API CMD specific chain - * @attr: attributes to set the chain - * - * Return the created chain - **/ -static struct hinic_api_cmd_chain * - api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr) -{ - struct hinic_hwif *hwif = attr->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_api_cmd_chain *chain; - int err; - - if (attr->num_cells & (attr->num_cells - 1)) { - dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n"); - return ERR_PTR(-EINVAL); - } - - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); - if (!chain) - return ERR_PTR(-ENOMEM); - - err = api_chain_init(chain, attr); - if (err) { - dev_err(&pdev->dev, "Failed to initialize chain\n"); - return ERR_PTR(err); - } - - err = api_cmd_create_cells(chain); - if (err) { - dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n"); - goto err_create_cells; - } - - err = api_cmd_chain_hw_init(chain); - if (err) { - dev_err(&pdev->dev, "Failed to initialize chain HW\n"); - goto err_chain_hw_init; - } - - return chain; - -err_chain_hw_init: - api_cmd_destroy_cells(chain, chain->num_cells); - -err_create_cells: - api_chain_free(chain); - return ERR_PTR(err); -} - -/** - * api_cmd_destroy_chain - destroy API CMD specific chain - * @chain: the API CMD specific chain to destroy - **/ -static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) -{ - api_cmd_chain_hw_clean(chain); - api_cmd_destroy_cells(chain, chain->num_cells); - api_chain_free(chain); -} - -/** - * hinic_api_cmd_init - Initialize all the API CMD chains - * @chain: the API CMD chains that are initialized - * @hwif: the hardware interface of a pci function device - * - * Return 0 - Success, negative - Failure - **/ -int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, - struct hinic_hwif *hwif) -{ - enum hinic_api_cmd_chain_type type, chain_type; - struct hinic_api_cmd_chain_attr attr; - struct pci_dev *pdev = hwif->pdev; - size_t hw_cell_sz; - int err; - - hw_cell_sz = sizeof(struct hinic_api_cmd_cell); - - attr.hwif = hwif; - attr.num_cells = API_CHAIN_NUM_CELLS; - attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz); - - chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; - for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { - attr.chain_type = chain_type; - - if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) - continue; - - chain[chain_type] = api_cmd_create_chain(&attr); - if (IS_ERR(chain[chain_type])) { - dev_err(&pdev->dev, "Failed to create chain %d\n", - chain_type); - err = PTR_ERR(chain[chain_type]); - goto err_create_chain; - } - } - - return 0; - -err_create_chain: - type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; - for ( ; type < chain_type; type++) { - if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) - continue; - - api_cmd_destroy_chain(chain[type]); - } - - return err; -} - -/** - * hinic_api_cmd_free - free the API CMD chains - * @chain: the API CMD chains that are freed - **/ -void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) -{ - enum hinic_api_cmd_chain_type chain_type; - - chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; - for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { - if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) - continue; - - api_cmd_destroy_chain(chain[chain_type]); - } -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h deleted file mode 100644 index 0ba00fd828df..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h +++ /dev/null @@ -1,199 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_API_CMD_H -#define HINIC_HW_API_CMD_H - -#include -#include - -#include "hinic_hw_if.h" - -#define HINIC_API_CMD_PI_IDX_SHIFT 0 - -#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF - -#define HINIC_API_CMD_PI_SET(val, member) \ - (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \ - HINIC_API_CMD_PI_##member##_SHIFT) - -#define HINIC_API_CMD_PI_CLEAR(val, member) \ - ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \ - << HINIC_API_CMD_PI_##member##_SHIFT))) - -#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 - -#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1 - -#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \ - (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \ - HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) - -#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \ - (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ - HINIC_API_CMD_CHAIN_REQ_##member##_MASK) - -#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ - ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \ - << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT))) - -#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 -#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 -#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 -#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 - -#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1 -#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1 -#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3 -#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3 - -#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ - HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT) - -#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ - ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \ - << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) - -#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0 -#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16 -#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24 -#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 - -#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F -#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F -#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F -#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF - -#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \ - ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \ - HINIC_API_CMD_CELL_CTRL_##member##_SHIFT) - -#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0 -#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1 -#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 -#define HINIC_API_CMD_DESC_DEST_SHIFT 32 -#define HINIC_API_CMD_DESC_SIZE_SHIFT 40 -#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 - -#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1 -#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1 -#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1 -#define HINIC_API_CMD_DESC_DEST_MASK 0x1F -#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF -#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF - -#define HINIC_API_CMD_DESC_SET(val, member) \ - ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \ - HINIC_API_CMD_DESC_##member##_SHIFT) - -#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 - -#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF - -#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \ - (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ - HINIC_API_CMD_STATUS_HEADER_##member##_MASK) - -#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 -#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 - -#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF -#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3 - -#define HINIC_API_CMD_STATUS_GET(val, member) \ - (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ - HINIC_API_CMD_STATUS_##member##_MASK) - -enum hinic_api_cmd_chain_type { - HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, - - HINIC_API_CMD_MAX, -}; - -struct hinic_api_cmd_chain_attr { - struct hinic_hwif *hwif; - enum hinic_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 cell_size; -}; - -struct hinic_api_cmd_status { - u64 header; - u32 status; - u32 rsvd0; - u32 rsvd1; - u32 rsvd2; - u64 rsvd3; -}; - -/* HW struct */ -struct hinic_api_cmd_cell { - u64 ctrl; - - /* address is 64 bit in HW struct */ - u64 next_cell_paddr; - - u64 desc; - - /* HW struct */ - union { - struct { - u64 hw_cmd_paddr; - } write; - - struct { - u64 hw_wb_resp_paddr; - u64 hw_cmd_paddr; - } read; - }; -}; - -struct hinic_api_cmd_cell_ctxt { - dma_addr_t cell_paddr; - struct hinic_api_cmd_cell *cell_vaddr; - - dma_addr_t api_cmd_paddr; - u8 *api_cmd_vaddr; -}; - -struct hinic_api_cmd_chain { - struct hinic_hwif *hwif; - enum hinic_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 cell_size; - - /* HW members in 24 bit format */ - u32 prod_idx; - u32 cons_idx; - - struct semaphore sem; - - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - - dma_addr_t wb_status_paddr; - struct hinic_api_cmd_status *wb_status; - - dma_addr_t head_cell_paddr; - struct hinic_api_cmd_cell *head_node; - struct hinic_api_cmd_cell *curr_node; -}; - -int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, u8 *cmd, u16 size); - -int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, - struct hinic_hwif *hwif); - -void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c deleted file mode 100644 index eb53c15b13f3..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ /dev/null @@ -1,938 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" -#include "hinic_hw_io.h" -#include "hinic_hw_dev.h" - -#define CMDQ_CEQE_TYPE_SHIFT 0 - -#define CMDQ_CEQE_TYPE_MASK 0x7 - -#define CMDQ_CEQE_GET(val, member) \ - (((val) >> CMDQ_CEQE_##member##_SHIFT) \ - & CMDQ_CEQE_##member##_MASK) - -#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20 - -#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF - -#define CMDQ_WQE_ERRCODE_GET(val, member) \ - (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \ - & CMDQ_WQE_ERRCODE_##member##_MASK) - -#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) - -#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi)) - -#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) - -#define CMDQ_WQE_COMPLETED(ctrl_info) \ - HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) - -#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) - -#define CMDQ_DB_OFF SZ_2K - -#define CMDQ_WQEBB_SIZE 64 -#define CMDQ_WQE_SIZE 64 -#define CMDQ_DEPTH SZ_4K - -#define CMDQ_WQ_PAGE_SIZE SZ_4K - -#define WQE_LCMD_SIZE 64 -#define WQE_SCMD_SIZE 64 - -#define COMPLETE_LEN 3 - -#define CMDQ_TIMEOUT 1000 - -#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) - -#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ - struct hinic_cmdqs, cmdq[0]) - -#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \ - struct hinic_func_to_io, \ - cmdqs) - -enum cmdq_wqe_type { - WQE_LCMD_TYPE = 0, - WQE_SCMD_TYPE = 1, -}; - -enum completion_format { - COMPLETE_DIRECT = 0, - COMPLETE_SGE = 1, -}; - -enum data_format { - DATA_SGE = 0, - DATA_DIRECT = 1, -}; - -enum bufdesc_len { - BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */ - BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */ -}; - -enum ctrl_sect_len { - CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */ - CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */ -}; - -enum cmdq_scmd_type { - CMDQ_SET_ARM_CMD = 2, -}; - -enum cmdq_cmd_type { - CMDQ_CMD_SYNC_DIRECT_RESP = 0, - CMDQ_CMD_SYNC_SGE_RESP = 1, -}; - -enum completion_request { - NO_CEQ = 0, - CEQ_SET = 1, -}; - -/** - * hinic_alloc_cmdq_buf - alloc buffer for sending command - * @cmdqs: the cmdqs - * @cmdq_buf: the buffer returned in this struct - * - * Return 0 - Success, negative - Failure - **/ -int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf) -{ - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL, - &cmdq_buf->dma_addr); - if (!cmdq_buf->buf) { - dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * hinic_free_cmdq_buf - free buffer - * @cmdqs: the cmdqs - * @cmdq_buf: the buffer to free that is in this struct - **/ -void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf) -{ - dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); -} - -static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len) -{ - unsigned int wqe_size = 0; - - switch (len) { - case BUFDESC_LCMD_LEN: - wqe_size = WQE_LCMD_SIZE; - break; - case BUFDESC_SCMD_LEN: - wqe_size = WQE_SCMD_SIZE; - break; - } - - return wqe_size; -} - -static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion, - struct hinic_cmdq_buf *buf_out) -{ - struct hinic_sge_resp *sge_resp = &completion->sge_resp; - - hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size); -} - -static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, - enum hinic_cmd_ack_type ack_type, - enum hinic_mod_type mod, u8 cmd, u16 prod_idx, - enum completion_format complete_format, - enum data_format data_format, - enum bufdesc_len buf_len) -{ - struct hinic_cmdq_wqe_lcmd *wqe_lcmd; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - enum ctrl_sect_len ctrl_len; - struct hinic_ctrl *ctrl; - u32 saved_data; - - if (data_format == DATA_SGE) { - wqe_lcmd = &wqe->wqe_lcmd; - - wqe_lcmd->status.status_info = 0; - ctrl = &wqe_lcmd->ctrl; - ctrl_len = CTRL_SECT_LEN; - } else { - wqe_scmd = &wqe->direct_wqe.wqe_scmd; - - wqe_scmd->status.status_info = 0; - ctrl = &wqe_scmd->ctrl; - ctrl_len = CTRL_DIRECT_SECT_LEN; - } - - ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) | - HINIC_CMDQ_CTRL_SET(cmd, CMD) | - HINIC_CMDQ_CTRL_SET(mod, MOD) | - HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE); - - CMDQ_WQE_HEADER(wqe)->header_info = - HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | - HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | - HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | - HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | - HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | - HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | - HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED); - - saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; - saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM); - - if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM)) - CMDQ_WQE_HEADER(wqe)->saved_data |= - HINIC_SAVED_DATA_SET(1, ARM); - else - CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; -} - -static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd, - struct hinic_cmdq_buf *buf_in) -{ - hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size); -} - -static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, - void *buf_in, u32 in_size) -{ - struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; - - wqe_scmd->buf_desc.buf_len = in_size; - memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); -} - -static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - struct hinic_cmdq_buf *buf_in, - struct hinic_cmdq_buf *buf_out, int wrapped, - enum hinic_cmd_ack_type ack_type, - enum hinic_mod_type mod, u8 cmd, u16 prod_idx) -{ - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; - enum completion_format complete_format; - - switch (cmd_type) { - case CMDQ_CMD_SYNC_SGE_RESP: - complete_format = COMPLETE_SGE; - cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out); - break; - case CMDQ_CMD_SYNC_DIRECT_RESP: - complete_format = COMPLETE_DIRECT; - wqe_lcmd->completion.direct_resp = 0; - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, - prod_idx, complete_format, DATA_SGE, - BUFDESC_LCMD_LEN); - - cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); -} - -static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - void *buf_in, u16 in_size, - struct hinic_cmdq_buf *buf_out, int wrapped, - enum hinic_cmd_ack_type ack_type, - enum hinic_mod_type mod, u8 cmd, u16 prod_idx) -{ - struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; - enum completion_format complete_format; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - - wqe_scmd = &direct_wqe->wqe_scmd; - - switch (cmd_type) { - case CMDQ_CMD_SYNC_SGE_RESP: - complete_format = COMPLETE_SGE; - cmdq_set_sge_completion(&wqe_scmd->completion, buf_out); - break; - case CMDQ_CMD_SYNC_DIRECT_RESP: - complete_format = COMPLETE_DIRECT; - wqe_scmd->completion.direct_resp = 0; - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, - complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); - - cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size); -} - -static void cmdq_wqe_fill(void *dst, void *src) -{ - memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST, - CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); - - wmb(); /* The first 8 bytes should be written last */ - - *(u64 *)dst = *(u64 *)src; -} - -static void cmdq_fill_db(u32 *db_info, - enum hinic_cmdq_type cmdq_type, u16 prod_idx) -{ - *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | - HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) | - HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | - HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE); -} - -static void cmdq_set_db(struct hinic_cmdq *cmdq, - enum hinic_cmdq_type cmdq_type, u16 prod_idx) -{ - u32 db_info; - - cmdq_fill_db(&db_info, cmdq_type, prod_idx); - - /* The data that is written to HW should be in Big Endian Format */ - db_info = cpu_to_be32(db_info); - - wmb(); /* write all before the doorbell */ - - writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); -} - -static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, - enum hinic_mod_type mod, u8 cmd, - struct hinic_cmdq_buf *buf_in, - u64 *resp) -{ - struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; - u16 curr_prod_idx, next_prod_idx; - int errcode, wrapped, num_wqebbs; - struct hinic_wq *wq = cmdq->wq; - struct hinic_hw_wqe *hw_wqe; - struct completion done; - - /* Keep doorbell index correct. bh - for tasklet(ceq). */ - spin_lock_bh(&cmdq->cmdq_lock); - - /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ - hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx); - if (IS_ERR(hw_wqe)) { - spin_unlock_bh(&cmdq->cmdq_lock); - return -EBUSY; - } - - curr_cmdq_wqe = &hw_wqe->cmdq_wqe; - - wrapped = cmdq->wrapped; - - num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; - next_prod_idx = curr_prod_idx + num_wqebbs; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= wq->q_depth; - } - - cmdq->errcode[curr_prod_idx] = &errcode; - - init_completion(&done); - cmdq->done[curr_prod_idx] = &done; - - cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL, - wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd, - curr_prod_idx); - - /* The data that is written to HW should be in Big Endian Format */ - hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE); - - /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); - - cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); - - spin_unlock_bh(&cmdq->cmdq_lock); - - if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { - spin_lock_bh(&cmdq->cmdq_lock); - - if (cmdq->errcode[curr_prod_idx] == &errcode) - cmdq->errcode[curr_prod_idx] = NULL; - - if (cmdq->done[curr_prod_idx] == &done) - cmdq->done[curr_prod_idx] = NULL; - - spin_unlock_bh(&cmdq->cmdq_lock); - - return -ETIMEDOUT; - } - - smp_rmb(); /* read error code after completion */ - - if (resp) { - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd; - - *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp); - } - - if (errcode != 0) - return -EFAULT; - - return 0; -} - -static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, - u16 in_size) -{ - struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; - u16 curr_prod_idx, next_prod_idx; - struct hinic_wq *wq = cmdq->wq; - struct hinic_hw_wqe *hw_wqe; - int wrapped, num_wqebbs; - - /* Keep doorbell index correct */ - spin_lock(&cmdq->cmdq_lock); - - /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ - hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx); - if (IS_ERR(hw_wqe)) { - spin_unlock(&cmdq->cmdq_lock); - return -EBUSY; - } - - curr_cmdq_wqe = &hw_wqe->cmdq_wqe; - - wrapped = cmdq->wrapped; - - num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; - next_prod_idx = curr_prod_idx + num_wqebbs; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= wq->q_depth; - } - - cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, - in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ, - HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx); - - /* The data that is written to HW should be in Big Endian Format */ - hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE); - - /* cmdq wqe is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); - - cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); - - spin_unlock(&cmdq->cmdq_lock); - return 0; -} - -static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in) -{ - if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) - return -EINVAL; - - return 0; -} - -/** - * hinic_cmdq_direct_resp - send command with direct data as resp - * @cmdqs: the cmdqs - * @mod: module on the card that will handle the command - * @cmd: the command - * @buf_in: the buffer for the command - * @resp: the response to return - * - * Return 0 - Success, negative - Failure - **/ -int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, - enum hinic_mod_type mod, u8 cmd, - struct hinic_cmdq_buf *buf_in, u64 *resp) -{ - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - err = cmdq_params_valid(buf_in); - if (err) { - dev_err(&pdev->dev, "Invalid CMDQ parameters\n"); - return err; - } - - return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], - mod, cmd, buf_in, resp); -} - -/** - * hinic_set_arm_bit - set arm bit for enable interrupt again - * @cmdqs: the cmdqs - * @q_type: type of queue to set the arm bit for - * @q_id: the queue number - * - * Return 0 - Success, negative - Failure - **/ -int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, - enum hinic_set_arm_qtype q_type, u32 q_id) -{ - struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmdq_arm_bit arm_bit; - int err; - - arm_bit.q_type = q_type; - arm_bit.q_id = q_id; - - err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit)); - if (err) { - dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); - return err; - } - - return 0; -} - -static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, - struct hinic_cmdq_wqe *wqe) -{ - u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info); - unsigned int bufdesc_len, wqe_size; - struct hinic_ctrl *ctrl; - - bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); - wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); - if (wqe_size == WQE_LCMD_SIZE) { - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; - - ctrl = &wqe_lcmd->ctrl; - } else { - struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - - wqe_scmd = &direct_wqe->wqe_scmd; - ctrl = &wqe_scmd->ctrl; - } - - /* clear HW busy bit */ - ctrl->ctrl_info = 0; - - wmb(); /* verify wqe is clear */ -} - -/** - * cmdq_arm_ceq_handler - cmdq completion event handler for arm command - * @cmdq: the cmdq of the arm command - * @wqe: the wqe of the arm command - * - * Return 0 - Success, negative - Failure - **/ -static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, - struct hinic_cmdq_wqe *wqe) -{ - struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - struct hinic_ctrl *ctrl; - u32 ctrl_info; - - wqe_scmd = &direct_wqe->wqe_scmd; - ctrl = &wqe_scmd->ctrl; - ctrl_info = be32_to_cpu(ctrl->ctrl_info); - - /* HW should toggle the HW BUSY BIT */ - if (!CMDQ_WQE_COMPLETED(ctrl_info)) - return -EBUSY; - - clear_wqe_complete_bit(cmdq, wqe); - - hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE); - return 0; -} - -static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx, - int errcode) -{ - if (cmdq->errcode[prod_idx]) - *cmdq->errcode[prod_idx] = errcode; -} - -/** - * cmdq_arm_ceq_handler - cmdq completion event handler for sync command - * @cmdq: the cmdq of the command - * @cons_idx: the consumer index to update the error code for - * @errcode: the error code - **/ -static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx, - int errcode) -{ - u16 prod_idx = cons_idx; - - spin_lock(&cmdq->cmdq_lock); - cmdq_update_errcode(cmdq, prod_idx, errcode); - - wmb(); /* write all before update for the command request */ - - if (cmdq->done[prod_idx]) - complete(cmdq->done[prod_idx]); - spin_unlock(&cmdq->cmdq_lock); -} - -static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, - struct hinic_cmdq_wqe *cmdq_wqe) -{ - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd; - struct hinic_status *status = &wqe_lcmd->status; - struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl; - int errcode; - - if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) - return -EBUSY; - - errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); - - cmdq_sync_cmd_handler(cmdq, ci, errcode); - - clear_wqe_complete_bit(cmdq, cmdq_wqe); - hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE); - return 0; -} - -/** - * cmdq_ceq_handler - cmdq completion event handler - * @handle: private data for the handler(cmdqs) - * @ceqe_data: ceq element data - **/ -static void cmdq_ceq_handler(void *handle, u32 ceqe_data) -{ - enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE); - struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle; - struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; - struct hinic_cmdq_header *header; - struct hinic_hw_wqe *hw_wqe; - int err, set_arm = 0; - u32 saved_data; - u16 ci; - - /* Read the smallest wqe size for getting wqe size */ - while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) { - if (IS_ERR(hw_wqe)) - break; - - header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe); - saved_data = be32_to_cpu(header->saved_data); - - if (HINIC_SAVED_DATA_GET(saved_data, ARM)) { - /* arm_bit was set until here */ - set_arm = 0; - - if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe)) - break; - } else { - set_arm = 1; - - hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci); - if (IS_ERR(hw_wqe)) - break; - - if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe)) - break; - } - } - - if (set_arm) { - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type); - if (err) - dev_err(&pdev->dev, "Failed to set arm for CMDQ\n"); - } -} - -/** - * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq - * @cmdq_ctxt: cmdq ctxt to initialize - * @cmdq: the cmdq - * @cmdq_pages: the memory of the queue - **/ -static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt, - struct hinic_cmdq *cmdq, - struct hinic_cmdq_pages *cmdq_pages) -{ - struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; - u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; - struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); - struct hinic_wq *wq = cmdq->wq; - - /* The data in the HW is in Big Endian Format */ - wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); - - pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size); - - ctxt_info->curr_wqe_page_pfn = - HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED); - - /* block PFN - Read Modify Write */ - cmdq_first_block_paddr = cmdq_pages->page_paddr; - - pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size); - - ctxt_info->wq_block_pfn = - HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) | - HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI); - - cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif); - cmdq_ctxt->cmdq_type = cmdq->cmdq_type; -} - -/** - * init_cmdq - initialize cmdq - * @cmdq: the cmdq - * @wq: the wq attaced to the cmdq - * @q_type: the cmdq type of the cmdq - * @db_area: doorbell area for the cmdq - * - * Return 0 - Success, negative - Failure - **/ -static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq, - enum hinic_cmdq_type q_type, void __iomem *db_area) -{ - int err; - - cmdq->wq = wq; - cmdq->cmdq_type = q_type; - cmdq->wrapped = 1; - - spin_lock_init(&cmdq->cmdq_lock); - - cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); - if (!cmdq->done) - return -ENOMEM; - - cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode), - wq->q_depth)); - if (!cmdq->errcode) { - err = -ENOMEM; - goto err_errcode; - } - - cmdq->db_base = db_area + CMDQ_DB_OFF; - return 0; - -err_errcode: - vfree(cmdq->done); - return err; -} - -/** - * free_cmdq - Free cmdq - * @cmdq: the cmdq to free - **/ -static void free_cmdq(struct hinic_cmdq *cmdq) -{ - vfree(cmdq->errcode); - vfree(cmdq->done); -} - -/** - * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq - * @hwdev: the NIC HW device - * @cmdqs: cmdqs to write the ctxts for - * &db_area: db_area for all the cmdqs - * - * Return 0 - Success, negative - Failure - **/ -static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, - struct hinic_cmdqs *cmdqs, void __iomem **db_area) -{ - struct hinic_hwif *hwif = hwdev->hwif; - enum hinic_cmdq_type type, cmdq_type; - struct hinic_cmdq_ctxt *cmdq_ctxts; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - size_t cmdq_ctxts_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI function type\n"); - return -EINVAL; - } - - cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); - cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); - if (!cmdq_ctxts) - return -ENOMEM; - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmdq_type = HINIC_CMDQ_SYNC; - for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { - err = init_cmdq(&cmdqs->cmdq[cmdq_type], - &cmdqs->saved_wqs[cmdq_type], cmdq_type, - db_area[cmdq_type]); - if (err) { - dev_err(&pdev->dev, "Failed to initialize cmdq\n"); - goto err_init_cmdq; - } - - cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type], - &cmdqs->cmdq[cmdq_type], - &cmdqs->cmdq_pages); - } - - /* Write the CMDQ ctxts */ - cmdq_type = HINIC_CMDQ_SYNC; - for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_CMDQ_CTXT_SET, - &cmdq_ctxts[cmdq_type], - sizeof(cmdq_ctxts[cmdq_type]), - NULL, NULL, HINIC_MGMT_MSG_SYNC); - if (err) { - dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n", - cmdq_type); - goto err_write_cmdq_ctxt; - } - } - - devm_kfree(&pdev->dev, cmdq_ctxts); - return 0; - -err_write_cmdq_ctxt: - cmdq_type = HINIC_MAX_CMDQ_TYPES; - -err_init_cmdq: - for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++) - free_cmdq(&cmdqs->cmdq[type]); - - devm_kfree(&pdev->dev, cmdq_ctxts); - return err; -} - -/** - * hinic_init_cmdqs - init all cmdqs - * @cmdqs: cmdqs to init - * @hwif: HW interface for accessing cmdqs - * @db_area: doorbell areas for all the cmdqs - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, - void __iomem **db_area) -{ - struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); - struct pci_dev *pdev = hwif->pdev; - struct hinic_hwdev *hwdev; - size_t saved_wqs_size; - u16 max_wqe_size; - int err; - - cmdqs->hwif = hwif; - cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev, - HINIC_CMDQ_BUF_SIZE, - HINIC_CMDQ_BUF_SIZE, 0); - if (!cmdqs->cmdq_buf_pool) - return -ENOMEM; - - saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); - cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); - if (!cmdqs->saved_wqs) { - err = -ENOMEM; - goto err_saved_wqs; - } - - max_wqe_size = WQE_LCMD_SIZE; - err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif, - HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE, - CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size); - if (err) { - dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n"); - goto err_cmdq_wqs; - } - - hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io); - err = init_cmdqs_ctxt(hwdev, cmdqs, db_area); - if (err) { - dev_err(&pdev->dev, "Failed to write cmdq ctxt\n"); - goto err_cmdq_ctxt; - } - - hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs, - cmdq_ceq_handler); - return 0; - -err_cmdq_ctxt: - hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, - HINIC_MAX_CMDQ_TYPES); - -err_cmdq_wqs: - devm_kfree(&pdev->dev, cmdqs->saved_wqs); - -err_saved_wqs: - dma_pool_destroy(cmdqs->cmdq_buf_pool); - return err; -} - -/** - * hinic_free_cmdqs - free all cmdqs - * @cmdqs: cmdqs to free - **/ -void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) -{ - struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - enum hinic_cmdq_type cmdq_type; - - hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); - - cmdq_type = HINIC_CMDQ_SYNC; - for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) - free_cmdq(&cmdqs->cmdq[cmdq_type]); - - hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, - HINIC_MAX_CMDQ_TYPES); - - devm_kfree(&pdev->dev, cmdqs->saved_wqs); - - dma_pool_destroy(cmdqs->cmdq_buf_pool); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h deleted file mode 100644 index 7a434b653faa..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ /dev/null @@ -1,178 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_CMDQ_H -#define HINIC_CMDQ_H - -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_wq.h" - -#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 -#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56 -#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61 -#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62 -#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63 - -#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF -#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F -#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1 -#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1 -#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1 - -#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \ - (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ - << HINIC_CMDQ_CTXT_##member##_SHIFT) - -#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ - ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ - << HINIC_CMDQ_CTXT_##member##_SHIFT))) - -#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 -#define HINIC_CMDQ_CTXT_CI_SHIFT 52 - -#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF -#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF - -#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ - (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ - << HINIC_CMDQ_CTXT_##member##_SHIFT) - -#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \ - ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ - << HINIC_CMDQ_CTXT_##member##_SHIFT))) - -#define HINIC_SAVED_DATA_ARM_SHIFT 31 - -#define HINIC_SAVED_DATA_ARM_MASK 0x1 - -#define HINIC_SAVED_DATA_SET(val, member) \ - (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \ - << HINIC_SAVED_DATA_##member##_SHIFT) - -#define HINIC_SAVED_DATA_GET(val, member) \ - (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \ - & HINIC_SAVED_DATA_##member##_MASK) - -#define HINIC_SAVED_DATA_CLEAR(val, member) \ - ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \ - << HINIC_SAVED_DATA_##member##_SHIFT))) - -#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 -#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23 -#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 -#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27 - -#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF -#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1 -#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7 -#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F - -#define HINIC_CMDQ_DB_INFO_SET(val, member) \ - (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \ - << HINIC_CMDQ_DB_INFO_##member##_SHIFT) - -#define HINIC_CMDQ_BUF_SIZE 2048 - -#define HINIC_CMDQ_BUF_HW_RSVD 8 -#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \ - HINIC_CMDQ_BUF_HW_RSVD) - -enum hinic_cmdq_type { - HINIC_CMDQ_SYNC, - - HINIC_MAX_CMDQ_TYPES, -}; - -enum hinic_set_arm_qtype { - HINIC_SET_ARM_CMDQ, -}; - -enum hinic_cmd_ack_type { - HINIC_CMD_ACK_TYPE_CMDQ, -}; - -struct hinic_cmdq_buf { - void *buf; - dma_addr_t dma_addr; - size_t size; -}; - -struct hinic_cmdq_arm_bit { - u32 q_type; - u32 q_id; -}; - -struct hinic_cmdq_ctxt_info { - u64 curr_wqe_page_pfn; - u64 wq_block_pfn; -}; - -struct hinic_cmdq_ctxt { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 cmdq_type; - u8 rsvd1[1]; - - u8 rsvd2[4]; - - struct hinic_cmdq_ctxt_info ctxt_info; -}; - -struct hinic_cmdq { - struct hinic_wq *wq; - - enum hinic_cmdq_type cmdq_type; - int wrapped; - - /* Lock for keeping the doorbell order */ - spinlock_t cmdq_lock; - - struct completion **done; - int **errcode; - - /* doorbell area */ - void __iomem *db_base; -}; - -struct hinic_cmdqs { - struct hinic_hwif *hwif; - - struct dma_pool *cmdq_buf_pool; - - struct hinic_wq *saved_wqs; - - struct hinic_cmdq_pages cmdq_pages; - - struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; -}; - -int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf); - -void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf); - -int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, - enum hinic_mod_type mod, u8 cmd, - struct hinic_cmdq_buf *buf_in, u64 *out_param); - -int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, - enum hinic_set_arm_qtype q_type, u32 q_id); - -int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, - void __iomem **db_area); - -void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h deleted file mode 100644 index cdec1d0a3962..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_CSR_H -#define HINIC_HW_CSR_H - -/* HW interface registers */ -#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 -#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 - -#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10 -#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 - -#define HINIC_DMA_ATTR_BASE 0xC80 -#define HINIC_ELECTION_BASE 0x4200 - -#define HINIC_DMA_ATTR_STRIDE 0x4 -#define HINIC_CSR_DMA_ATTR_ADDR(idx) \ - (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE) - -#define HINIC_PPF_ELECTION_STRIDE 0x4 -#define HINIC_CSR_MAX_PORTS 4 - -#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \ - (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE) - -/* API CMD registers */ -#define HINIC_CSR_API_CMD_BASE 0xF000 - -#define HINIC_CSR_API_CMD_STRIDE 0x100 - -#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -/* MSI-X registers */ -#define HINIC_CSR_MSIX_CTRL_BASE 0x2000 -#define HINIC_CSR_MSIX_CNT_BASE 0x2004 - -#define HINIC_CSR_MSIX_STRIDE 0x8 - -#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \ - (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) - -#define HINIC_CSR_MSIX_CNT_ADDR(idx) \ - (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) - -/* EQ registers */ -#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 -#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400 - -#define HINIC_EQ_MTT_OFF_STRIDE 0x40 - -#define HINIC_CSR_AEQ_MTT_OFF(id) \ - (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) - -#define HINIC_CSR_CEQ_MTT_OFF(id) \ - (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) - -#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 - -#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) - -#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) - -#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 -#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 -#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08 -#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C - -#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000 -#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004 -#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008 -#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C - -#define HINIC_EQ_OFF_STRIDE 0x80 - -#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ - (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \ - (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \ - (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ - (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \ - (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \ - (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \ - (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \ - (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c deleted file mode 100644 index 6f2cf569a283..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ /dev/null @@ -1,1020 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_qp_ctxt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" -#include "hinic_hw_dev.h" - -#define IO_STATUS_TIMEOUT 100 -#define OUTBOUND_STATE_TIMEOUT 100 -#define DB_STATE_TIMEOUT 100 - -#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ - (2 * (max_qps) + (num_aeqs) + (num_ceqs)) - -#define ADDR_IN_4BYTES(addr) ((addr) >> 2) - -enum intr_type { - INTR_MSIX_TYPE, -}; - -enum io_status { - IO_STOPPED = 0, - IO_RUNNING = 1, -}; - -enum hw_ioctxt_set_cmdq_depth { - HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT, -}; - -/* HW struct */ -struct hinic_dev_cap { - u8 status; - u8 version; - u8 rsvd0[6]; - - u8 rsvd1[5]; - u8 intr_type; - u8 rsvd2[66]; - u16 max_sqs; - u16 max_rqs; - u8 rsvd3[208]; -}; - -/** - * get_capability - convert device capabilities to NIC capabilities - * @hwdev: the HW device to set and convert device capabilities for - * @dev_cap: device capabilities from FW - * - * Return 0 - Success, negative - Failure - **/ -static int get_capability(struct hinic_hwdev *hwdev, - struct hinic_dev_cap *dev_cap) -{ - struct hinic_cap *nic_cap = &hwdev->nic_cap; - int num_aeqs, num_ceqs, num_irqs; - - if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif)) - return -EINVAL; - - if (dev_cap->intr_type != INTR_MSIX_TYPE) - return -EFAULT; - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); - num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); - num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif); - - /* Each QP has its own (SQ + RQ) interrupts */ - nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2; - - if (nic_cap->num_qps > HINIC_Q_CTXT_MAX) - nic_cap->num_qps = HINIC_Q_CTXT_MAX; - - nic_cap->max_qps = dev_cap->max_sqs + 1; - if (nic_cap->max_qps != (dev_cap->max_rqs + 1)) - return -EFAULT; - - if (nic_cap->num_qps > nic_cap->max_qps) - nic_cap->num_qps = nic_cap->max_qps; - - return 0; -} - -/** - * get_cap_from_fw - get device capabilities from FW - * @pfhwdev: the PF HW device to get capabilities for - * - * Return 0 - Success, negative - Failure - **/ -static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev) -{ - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_dev_cap dev_cap; - u16 in_len, out_len; - int err; - - in_len = 0; - out_len = sizeof(dev_cap); - - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM, - HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap, - &out_len, HINIC_MGMT_MSG_SYNC); - if (err) { - dev_err(&pdev->dev, "Failed to get capability from FW\n"); - return err; - } - - return get_capability(hwdev, &dev_cap); -} - -/** - * get_dev_cap - get device capabilities - * @hwdev: the NIC HW device to get capabilities for - * - * Return 0 - Success, negative - Failure - **/ -static int get_dev_cap(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - int err; - - switch (HINIC_FUNC_TYPE(hwif)) { - case HINIC_PPF: - case HINIC_PF: - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - err = get_cap_from_fw(pfhwdev); - if (err) { - dev_err(&pdev->dev, "Failed to get capability from FW\n"); - return err; - } - break; - - default: - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - return 0; -} - -/** - * init_msix - enable the msix and save the entries - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -static int init_msix(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - int nr_irqs, num_aeqs, num_ceqs; - size_t msix_entries_size; - int i, err; - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); - num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); - nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs); - if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) - nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); - - msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries); - hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size, - GFP_KERNEL); - if (!hwdev->msix_entries) - return -ENOMEM; - - for (i = 0; i < nr_irqs; i++) - hwdev->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs); - if (err) { - dev_err(&pdev->dev, "Failed to enable pci msix\n"); - return err; - } - - return 0; -} - -/** - * disable_msix - disable the msix - * @hwdev: the NIC HW device - **/ -static void disable_msix(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - - pci_disable_msix(pdev); -} - -/** - * hinic_port_msg_cmd - send port msg to mgmt - * @hwdev: the NIC HW device - * @cmd: the port command - * @buf_in: input buffer - * @in_size: input size - * @buf_out: output buffer - * @out_size: returned output size - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return -EINVAL; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd, - buf_in, in_size, buf_out, out_size, - HINIC_MGMT_MSG_SYNC); -} - -/** - * init_fw_ctxt- Init Firmware tables before network mgmt and io operations - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -static int init_fw_ctxt(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmd_fw_ctxt fw_ctxt; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, - &fw_ctxt, sizeof(fw_ctxt), - &fw_ctxt, &out_size); - if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) { - dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n", - fw_ctxt.status); - return -EFAULT; - } - - return 0; -} - -/** - * set_hw_ioctxt - set the shape of the IO queues in FW - * @hwdev: the NIC HW device - * @rq_depth: rq depth - * @sq_depth: sq depth - * - * Return 0 - Success, negative - Failure - **/ -static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, - unsigned int sq_depth) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_cmd_hw_ioctxt hw_ioctxt; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT; - hw_ioctxt.cmdq_depth = 0; - - hw_ioctxt.lro_en = 1; - - hw_ioctxt.rq_depth = ilog2(rq_depth); - - hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX; - - hw_ioctxt.sq_depth = ilog2(sq_depth); - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_HWCTXT_SET, - &hw_ioctxt, sizeof(hw_ioctxt), NULL, - NULL, HINIC_MGMT_MSG_SYNC); -} - -static int wait_for_outbound_state(struct hinic_hwdev *hwdev) -{ - enum hinic_outbound_state outbound_state; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - unsigned long end; - - end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT); - do { - outbound_state = hinic_outbound_state_get(hwif); - - if (outbound_state == HINIC_OUTBOUND_ENABLE) - return 0; - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n"); - return -EFAULT; -} - -static int wait_for_db_state(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - enum hinic_db_state db_state; - unsigned long end; - - end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT); - do { - db_state = hinic_db_state_get(hwif); - - if (db_state == HINIC_DB_ENABLE) - return 0; - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for DB - Timeout\n"); - return -EFAULT; -} - -static int wait_for_io_stopped(struct hinic_hwdev *hwdev) -{ - struct hinic_cmd_io_status cmd_io_status; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - unsigned long end; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); - do { - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_IO_STATUS_GET, - &cmd_io_status, sizeof(cmd_io_status), - &cmd_io_status, &out_size, - HINIC_MGMT_MSG_SYNC); - if ((err) || (out_size != sizeof(cmd_io_status))) { - dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", - err); - return err; - } - - if (cmd_io_status.status == IO_STOPPED) { - dev_info(&pdev->dev, "IO stopped\n"); - return 0; - } - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); - return -ETIMEDOUT; -} - -/** - * clear_io_resource - set the IO resources as not active in the NIC - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -static int clear_io_resources(struct hinic_hwdev *hwdev) -{ - struct hinic_cmd_clear_io_res cmd_clear_io_res; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - err = wait_for_io_stopped(hwdev); - if (err) { - dev_err(&pdev->dev, "IO has not stopped yet\n"); - return err; - } - - cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res, - sizeof(cmd_clear_io_res), NULL, NULL, - HINIC_MGMT_MSG_SYNC); - if (err) { - dev_err(&pdev->dev, "Failed to clear IO resources\n"); - return err; - } - - return 0; -} - -/** - * set_resources_state - set the state of the resources in the NIC - * @hwdev: the NIC HW device - * @state: the state to set - * - * Return 0 - Success, negative - Failure - **/ -static int set_resources_state(struct hinic_hwdev *hwdev, - enum hinic_res_state state) -{ - struct hinic_cmd_set_res_state res_state; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - res_state.state = state; - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, - HINIC_MOD_COMM, - HINIC_COMM_CMD_RES_STATE_SET, - &res_state, sizeof(res_state), NULL, - NULL, HINIC_MGMT_MSG_SYNC); -} - -/** - * get_base_qpn - get the first qp number - * @hwdev: the NIC HW device - * @base_qpn: returned qp number - * - * Return 0 - Success, negative - Failure - **/ -static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn) -{ - struct hinic_cmd_base_qpn cmd_base_qpn; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN, - &cmd_base_qpn, sizeof(cmd_base_qpn), - &cmd_base_qpn, &out_size); - if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) { - dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n", - cmd_base_qpn.status); - return -EFAULT; - } - - *base_qpn = cmd_base_qpn.qpn; - return 0; -} - -/** - * hinic_hwdev_ifup - Preparing the HW for passing IO - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_ifup(struct hinic_hwdev *hwdev) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_cap *nic_cap = &hwdev->nic_cap; - struct hinic_hwif *hwif = hwdev->hwif; - int err, num_aeqs, num_ceqs, num_qps; - struct msix_entry *ceq_msix_entries; - struct msix_entry *sq_msix_entries; - struct msix_entry *rq_msix_entries; - struct pci_dev *pdev = hwif->pdev; - u16 base_qpn; - - err = get_base_qpn(hwdev, &base_qpn); - if (err) { - dev_err(&pdev->dev, "Failed to get global base qp number\n"); - return err; - } - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); - num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); - - ceq_msix_entries = &hwdev->msix_entries[num_aeqs]; - - err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs, - ceq_msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to init IO channel\n"); - return err; - } - - num_qps = nic_cap->num_qps; - sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs]; - rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps]; - - err = hinic_io_create_qps(func_to_io, base_qpn, num_qps, - sq_msix_entries, rq_msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to create QPs\n"); - goto err_create_qps; - } - - err = wait_for_db_state(hwdev); - if (err) { - dev_warn(&pdev->dev, "db - disabled, try again\n"); - hinic_db_state_set(hwif, HINIC_DB_ENABLE); - } - - err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH); - if (err) { - dev_err(&pdev->dev, "Failed to set HW IO ctxt\n"); - goto err_hw_ioctxt; - } - - return 0; - -err_hw_ioctxt: - hinic_io_destroy_qps(func_to_io, num_qps); - -err_create_qps: - hinic_io_free(func_to_io); - return err; -} - -/** - * hinic_hwdev_ifdown - Closing the HW for passing IO - * @hwdev: the NIC HW device - * - **/ -void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_cap *nic_cap = &hwdev->nic_cap; - - clear_io_resources(hwdev); - - hinic_io_destroy_qps(func_to_io, nic_cap->num_qps); - hinic_io_free(func_to_io); -} - -/** - * hinic_hwdev_cb_register - register callback handler for MGMT events - * @hwdev: the NIC HW device - * @cmd: the mgmt event - * @handle: private data for the handler - * @handler: event handler - **/ -void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd, void *handle, - void (*handler)(void *handle, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - struct hinic_nic_cb *nic_cb; - u8 cmd_cb; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; - nic_cb = &pfhwdev->nic_cb[cmd_cb]; - - nic_cb->handler = handler; - nic_cb->handle = handle; - nic_cb->cb_state = HINIC_CB_ENABLED; -} - -/** - * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events - * @hwdev: the NIC HW device - * @cmd: the mgmt event - **/ -void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - struct hinic_nic_cb *nic_cb; - u8 cmd_cb; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; - nic_cb = &pfhwdev->nic_cb[cmd_cb]; - - nic_cb->cb_state &= ~HINIC_CB_ENABLED; - - while (nic_cb->cb_state & HINIC_CB_RUNNING) - schedule(); - - nic_cb->handler = NULL; -} - -/** - * nic_mgmt_msg_handler - nic mgmt event handler - * @handle: private data for the handler - * @buf_in: input buffer - * @in_size: input size - * @buf_out: output buffer - * @out_size: returned output size - **/ -static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct hinic_pfhwdev *pfhwdev = handle; - enum hinic_cb_state cb_state; - struct hinic_nic_cb *nic_cb; - struct hinic_hwdev *hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u8 cmd_cb; - - hwdev = &pfhwdev->hwdev; - hwif = hwdev->hwif; - pdev = hwif->pdev; - - if ((cmd < HINIC_MGMT_MSG_CMD_BASE) || - (cmd >= HINIC_MGMT_MSG_CMD_MAX)) { - dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd); - return; - } - - cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; - - nic_cb = &pfhwdev->nic_cb[cmd_cb]; - - cb_state = cmpxchg(&nic_cb->cb_state, - HINIC_CB_ENABLED, - HINIC_CB_ENABLED | HINIC_CB_RUNNING); - - if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler)) - nic_cb->handler(nic_cb->handle, buf_in, - in_size, buf_out, out_size); - else - dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd); - - nic_cb->cb_state &= ~HINIC_CB_RUNNING; -} - -/** - * init_pfhwdev - Initialize the extended components of PF - * @pfhwdev: the HW device for PF - * - * Return 0 - success, negative - failure - **/ -static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) -{ - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif); - if (err) { - dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n"); - return err; - } - - hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, - pfhwdev, nic_mgmt_msg_handler); - - hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); - return 0; -} - -/** - * free_pfhwdev - Free the extended components of PF - * @pfhwdev: the HW device for PF - **/ -static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) -{ - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - - hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); - - hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC); - - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); -} - -/** - * hinic_init_hwdev - Initialize the NIC HW - * @pdev: the NIC pci device - * - * Return initialized NIC HW device - * - * Initialize the NIC HW device and return a pointer to it - **/ -struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) -{ - struct hinic_pfhwdev *pfhwdev; - struct hinic_hwdev *hwdev; - struct hinic_hwif *hwif; - int err, num_aeqs; - - hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL); - if (!hwif) - return ERR_PTR(-ENOMEM); - - err = hinic_init_hwif(hwif, pdev); - if (err) { - dev_err(&pdev->dev, "Failed to init HW interface\n"); - return ERR_PTR(err); - } - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - err = -EFAULT; - goto err_func_type; - } - - pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL); - if (!pfhwdev) { - err = -ENOMEM; - goto err_pfhwdev_alloc; - } - - hwdev = &pfhwdev->hwdev; - hwdev->hwif = hwif; - - err = init_msix(hwdev); - if (err) { - dev_err(&pdev->dev, "Failed to init msix\n"); - goto err_init_msix; - } - - err = wait_for_outbound_state(hwdev); - if (err) { - dev_warn(&pdev->dev, "outbound - disabled, try again\n"); - hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE); - } - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); - - err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs, - HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE, - hwdev->msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to init async event queues\n"); - goto err_aeqs_init; - } - - err = init_pfhwdev(pfhwdev); - if (err) { - dev_err(&pdev->dev, "Failed to init PF HW device\n"); - goto err_init_pfhwdev; - } - - err = get_dev_cap(hwdev); - if (err) { - dev_err(&pdev->dev, "Failed to get device capabilities\n"); - goto err_dev_cap; - } - - err = init_fw_ctxt(hwdev); - if (err) { - dev_err(&pdev->dev, "Failed to init function table\n"); - goto err_init_fw_ctxt; - } - - err = set_resources_state(hwdev, HINIC_RES_ACTIVE); - if (err) { - dev_err(&pdev->dev, "Failed to set resources state\n"); - goto err_resources_state; - } - - return hwdev; - -err_resources_state: -err_init_fw_ctxt: -err_dev_cap: - free_pfhwdev(pfhwdev); - -err_init_pfhwdev: - hinic_aeqs_free(&hwdev->aeqs); - -err_aeqs_init: - disable_msix(hwdev); - -err_init_msix: -err_pfhwdev_alloc: -err_func_type: - hinic_free_hwif(hwif); - return ERR_PTR(err); -} - -/** - * hinic_free_hwdev - Free the NIC HW device - * @hwdev: the NIC HW device - **/ -void hinic_free_hwdev(struct hinic_hwdev *hwdev) -{ - struct hinic_pfhwdev *pfhwdev = container_of(hwdev, - struct hinic_pfhwdev, - hwdev); - - set_resources_state(hwdev, HINIC_RES_CLEAN); - - free_pfhwdev(pfhwdev); - - hinic_aeqs_free(&hwdev->aeqs); - - disable_msix(hwdev); - - hinic_free_hwif(hwdev->hwif); -} - -int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev) -{ - struct hinic_cap *nic_cap = &hwdev->nic_cap; - - return nic_cap->max_qps; -} - -/** - * hinic_hwdev_num_qps - return the number QPs available for use - * @hwdev: the NIC HW device - * - * Return number QPs available for use - **/ -int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev) -{ - struct hinic_cap *nic_cap = &hwdev->nic_cap; - - return nic_cap->num_qps; -} - -/** - * hinic_hwdev_get_sq - get SQ - * @hwdev: the NIC HW device - * @i: the position of the SQ - * - * Return: the SQ in the i position - **/ -struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_qp *qp = &func_to_io->qps[i]; - - if (i >= hinic_hwdev_num_qps(hwdev)) - return NULL; - - return &qp->sq; -} - -/** - * hinic_hwdev_get_sq - get RQ - * @hwdev: the NIC HW device - * @i: the position of the RQ - * - * Return: the RQ in the i position - **/ -struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_qp *qp = &func_to_io->qps[i]; - - if (i >= hinic_hwdev_num_qps(hwdev)) - return NULL; - - return &qp->rq; -} - -/** - * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry - * @hwdev: the NIC HW device - * @msix_index: msix_index - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index) -{ - return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index); -} - -/** - * hinic_hwdev_msix_set - set message attribute for msix entry - * @hwdev: the NIC HW device - * @msix_index: msix_index - * @pending_limit: the maximum pending interrupt events (unit 8) - * @coalesc_timer: coalesc period for interrupt (unit 8 us) - * @lli_timer: replenishing period for low latency credit (unit 8 us) - * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) - * @resend_timer: maximum wait for resending msix (unit coalesc period) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer_cfg, u8 lli_credit_limit, - u8 resend_timer) -{ - return hinic_msix_attr_set(hwdev->hwif, msix_index, - pending_limit, coalesc_timer, - lli_timer_cfg, lli_credit_limit, - resend_timer); -} - -/** - * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq - * @hwdev: the NIC HW device - * @sq: send queue - * @pending_limit: the maximum pending update ci events (unit 8) - * @coalesc_timer: coalesc period for update ci (unit 8 us) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, - u8 pending_limit, u8 coalesc_timer) -{ - struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - struct hinic_cmd_hw_ci hw_ci; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - hw_ci.dma_attr_off = 0; - hw_ci.pending_limit = pending_limit; - hw_ci.coalesc_timer = coalesc_timer; - - hw_ci.msix_en = 1; - hw_ci.msix_entry_idx = sq->msix_entry; - - hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - hw_ci.sq_id = qp->q_id; - - hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr); - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, - HINIC_MOD_COMM, - HINIC_COMM_CMD_SQ_HI_CI_SET, - &hw_ci, sizeof(hw_ci), NULL, - NULL, HINIC_MGMT_MSG_SYNC); -} - -/** - * hinic_hwdev_set_msix_state- set msix state - * @hwdev: the NIC HW device - * @msix_index: IRQ corresponding index number - * @flag: msix state - * - **/ -void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index, - enum hinic_msix_state flag) -{ - hinic_set_msix_state(hwdev->hwif, msix_index, flag); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h deleted file mode 100644 index b069045de416..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ /dev/null @@ -1,291 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_DEV_H -#define HINIC_HW_DEV_H - -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" - -#define HINIC_MAX_QPS 32 - -#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \ - HINIC_MGMT_MSG_CMD_BASE) - -struct hinic_cap { - u16 max_qps; - u16 num_qps; -}; - -enum hinic_port_cmd { - HINIC_PORT_CMD_CHANGE_MTU = 2, - - HINIC_PORT_CMD_ADD_VLAN = 3, - HINIC_PORT_CMD_DEL_VLAN = 4, - - HINIC_PORT_CMD_SET_MAC = 9, - HINIC_PORT_CMD_GET_MAC = 10, - HINIC_PORT_CMD_DEL_MAC = 11, - - HINIC_PORT_CMD_SET_RX_MODE = 12, - - HINIC_PORT_CMD_GET_LINK_STATE = 24, - - HINIC_PORT_CMD_SET_LRO = 25, - - HINIC_PORT_CMD_SET_RX_CSUM = 26, - - HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 27, - - HINIC_PORT_CMD_GET_PORT_STATISTICS = 28, - - HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 29, - - HINIC_PORT_CMD_GET_VPORT_STAT = 30, - - HINIC_PORT_CMD_CLEAN_VPORT_STAT = 31, - - HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 37, - - HINIC_PORT_CMD_SET_PORT_STATE = 41, - - HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 43, - - HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 44, - - HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 45, - - HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 46, - - HINIC_PORT_CMD_GET_RSS_CTX_TBL = 47, - - HINIC_PORT_CMD_SET_RSS_CTX_TBL = 48, - - HINIC_PORT_CMD_RSS_TEMP_MGR = 49, - - HINIC_PORT_CMD_RSS_CFG = 66, - - HINIC_PORT_CMD_FWCTXT_INIT = 69, - - HINIC_PORT_CMD_GET_MGMT_VERSION = 88, - - HINIC_PORT_CMD_SET_FUNC_STATE = 93, - - HINIC_PORT_CMD_GET_GLOBAL_QPN = 102, - - HINIC_PORT_CMD_SET_TSO = 112, - - HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115, - - HINIC_PORT_CMD_GET_CAP = 170, - - HINIC_PORT_CMD_SET_LRO_TIMER = 244, -}; - -enum hinic_ucode_cmd { - HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0, - HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, - HINIC_UCODE_CMD_ARM_SQ, - HINIC_UCODE_CMD_ARM_RQ, - HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, - HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, - HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE, - HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE, - HINIC_UCODE_CMD_SET_IQ_ENABLE, - HINIC_UCODE_CMD_SET_RQ_FLUSH = 10 -}; - -#define NIC_RSS_CMD_TEMP_ALLOC 0x01 -#define NIC_RSS_CMD_TEMP_FREE 0x02 - -enum hinic_mgmt_msg_cmd { - HINIC_MGMT_MSG_CMD_BASE = 160, - - HINIC_MGMT_MSG_CMD_LINK_STATUS = 160, - - HINIC_MGMT_MSG_CMD_MAX, -}; - -enum hinic_cb_state { - HINIC_CB_ENABLED = BIT(0), - HINIC_CB_RUNNING = BIT(1), -}; - -enum hinic_res_state { - HINIC_RES_CLEAN = 0, - HINIC_RES_ACTIVE = 1, -}; - -struct hinic_cmd_fw_ctxt { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rx_buf_sz; - - u32 rsvd1; -}; - -struct hinic_cmd_hw_ioctxt { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - - u16 rsvd1; - - u8 set_cmdq_depth; - u8 cmdq_depth; - - u8 lro_en; - u8 rsvd3; - u8 rsvd4; - u8 rsvd5; - - u16 rq_depth; - u16 rx_buf_sz_idx; - u16 sq_depth; -}; - -struct hinic_cmd_io_status { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 rsvd1; - u8 rsvd2; - u32 io_status; -}; - -struct hinic_cmd_clear_io_res { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 rsvd1; - u8 rsvd2; -}; - -struct hinic_cmd_set_res_state { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 state; - u8 rsvd1; - u32 rsvd2; -}; - -struct hinic_cmd_base_qpn { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 qpn; -}; - -struct hinic_cmd_hw_ci { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - - u8 dma_attr_off; - u8 pending_limit; - u8 coalesc_timer; - - u8 msix_en; - u16 msix_entry_idx; - - u32 sq_id; - u32 rsvd1; - u64 ci_addr; -}; - -struct hinic_hwdev { - struct hinic_hwif *hwif; - struct msix_entry *msix_entries; - - struct hinic_aeqs aeqs; - struct hinic_func_to_io func_to_io; - - struct hinic_cap nic_cap; -}; - -struct hinic_nic_cb { - void (*handler)(void *handle, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size); - - void *handle; - unsigned long cb_state; -}; - -struct hinic_pfhwdev { - struct hinic_hwdev hwdev; - - struct hinic_pf_to_mgmt pf_to_mgmt; - - struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD]; -}; - -void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd, void *handle, - void (*handler)(void *handle, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)); - -void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd); - -int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size); - -int hinic_hwdev_ifup(struct hinic_hwdev *hwdev); - -void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev); - -struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); - -void hinic_free_hwdev(struct hinic_hwdev *hwdev); - -int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev); - -int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev); - -struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i); - -struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i); - -int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index); - -int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer_cfg, u8 lli_credit_limit, - u8 resend_timer); - -int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, - u8 pending_limit, u8 coalesc_timer); - -void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index, - enum hinic_msix_state flag); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c deleted file mode 100644 index 79243b626ddb..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ /dev/null @@ -1,877 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_csr.h" -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" - -#define HINIC_EQS_WQ_NAME "hinic_eqs" - -#define GET_EQ_NUM_PAGES(eq, pg_size) \ - (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) - -#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) - -#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ - HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) - -#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ - HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) - -#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ - HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) - -#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ - HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) - -#define GET_EQ_ELEMENT(eq, idx) \ - ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ - (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) - -#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \ - GET_EQ_ELEMENT(eq, idx)) - -#define GET_CEQ_ELEM(eq, idx) ((u32 *) \ - GET_EQ_ELEMENT(eq, idx)) - -#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) - -#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx) - -#define PAGE_IN_4K(page_size) ((page_size) >> 12) -#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size))) - -#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) -#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq))) - -#define EQ_MAX_PAGES 8 - -#define CEQE_TYPE_SHIFT 23 -#define CEQE_TYPE_MASK 0x7 - -#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \ - CEQE_TYPE_MASK) - -#define CEQE_DATA_MASK 0x3FFFFFF -#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK) - -#define aeq_to_aeqs(eq) \ - container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) - -#define ceq_to_ceqs(eq) \ - container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) - -#define work_to_aeq_work(work) \ - container_of(work, struct hinic_eq_work, work) - -#define DMA_ATTR_AEQ_DEFAULT 0 -#define DMA_ATTR_CEQ_DEFAULT 0 - -/* No coalescence */ -#define THRESH_CEQ_DEFAULT 0 - -enum eq_int_mode { - EQ_INT_MODE_ARMED, - EQ_INT_MODE_ALWAYS -}; - -enum eq_arm_state { - EQ_NOT_ARMED, - EQ_ARMED -}; - -/** - * hinic_aeq_register_hw_cb - register AEQ callback for specific event - * @aeqs: pointer to Async eqs of the chip - * @event: aeq event to register callback for it - * @handle: private data will be used by the callback - * @hw_handler: callback function - **/ -void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event, void *handle, - void (*hwe_handler)(void *handle, void *data, - u8 size)) -{ - struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; - - hwe_cb->hwe_handler = hwe_handler; - hwe_cb->handle = handle; - hwe_cb->hwe_state = HINIC_EQE_ENABLED; -} - -/** - * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event - * @aeqs: pointer to Async eqs of the chip - * @event: aeq event to unregister callback for it - **/ -void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event) -{ - struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; - - hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED; - - while (hwe_cb->hwe_state & HINIC_EQE_RUNNING) - schedule(); - - hwe_cb->hwe_handler = NULL; -} - -/** - * hinic_ceq_register_cb - register CEQ callback for specific event - * @ceqs: pointer to Completion eqs part of the chip - * @event: ceq event to register callback for it - * @handle: private data will be used by the callback - * @handler: callback function - **/ -void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event, void *handle, - void (*handler)(void *handle, u32 ceqe_data)) -{ - struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; - - ceq_cb->handler = handler; - ceq_cb->handle = handle; - ceq_cb->ceqe_state = HINIC_EQE_ENABLED; -} - -/** - * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event - * @ceqs: pointer to Completion eqs part of the chip - * @event: ceq event to unregister callback for it - **/ -void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event) -{ - struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; - - ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED; - - while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING) - schedule(); - - ceq_cb->handler = NULL; -} - -static u8 eq_cons_idx_checksum_set(u32 val) -{ - u8 checksum = 0; - int idx; - - for (idx = 0; idx < 32; idx += 4) - checksum ^= ((val >> idx) & 0xF); - - return (checksum & 0xF); -} - -/** - * eq_update_ci - update the HW cons idx of event queue - * @eq: the event queue to update the cons idx for - **/ -static void eq_update_ci(struct hinic_eq *eq) -{ - u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); - - /* Read Modify Write */ - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_EQ_CI_CLEAR(val, IDX) & - HINIC_EQ_CI_CLEAR(val, WRAPPED) & - HINIC_EQ_CI_CLEAR(val, INT_ARMED) & - HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM); - - val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | - HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | - HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED); - - val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); - - hinic_hwif_write_reg(eq->hwif, addr, val); -} - -/** - * aeq_irq_handler - handler for the AEQ event - * @eq: the Async Event Queue that received the event - **/ -static void aeq_irq_handler(struct hinic_eq *eq) -{ - struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); - struct hinic_hwif *hwif = aeqs->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_aeq_elem *aeqe_curr; - struct hinic_hw_event_cb *hwe_cb; - enum hinic_aeq_type event; - unsigned long eqe_state; - u32 aeqe_desc; - int i, size; - - for (i = 0; i < eq->q_len; i++) { - aeqe_curr = GET_CURR_AEQ_ELEM(eq); - - /* Data in HW is in Big endian Format */ - aeqe_desc = be32_to_cpu(aeqe_curr->desc); - - /* HW toggles the wrapped bit, when it adds eq element */ - if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) - break; - - event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); - if (event >= HINIC_MAX_AEQ_EVENTS) { - dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); - return; - } - - if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { - hwe_cb = &aeqs->hwe_cb[event]; - - size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE); - - eqe_state = cmpxchg(&hwe_cb->hwe_state, - HINIC_EQE_ENABLED, - HINIC_EQE_ENABLED | - HINIC_EQE_RUNNING); - if ((eqe_state == HINIC_EQE_ENABLED) && - (hwe_cb->hwe_handler)) - hwe_cb->hwe_handler(hwe_cb->handle, - aeqe_curr->data, size); - else - dev_err(&pdev->dev, "Unhandled AEQ Event %d\n", - event); - - hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING; - } - - eq->cons_idx++; - - if (eq->cons_idx == eq->q_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - } -} - -/** - * ceq_event_handler - handler for the ceq events - * @ceqs: ceqs part of the chip - * @ceqe: ceq element that describes the event - **/ -static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) -{ - struct hinic_hwif *hwif = ceqs->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_ceq_cb *ceq_cb; - enum hinic_ceq_type event; - unsigned long eqe_state; - - event = CEQE_TYPE(ceqe); - if (event >= HINIC_MAX_CEQ_EVENTS) { - dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event); - return; - } - - ceq_cb = &ceqs->ceq_cb[event]; - - eqe_state = cmpxchg(&ceq_cb->ceqe_state, - HINIC_EQE_ENABLED, - HINIC_EQE_ENABLED | HINIC_EQE_RUNNING); - - if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler)) - ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe)); - else - dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event); - - ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING; -} - -/** - * ceq_irq_handler - handler for the CEQ event - * @eq: the Completion Event Queue that received the event - **/ -static void ceq_irq_handler(struct hinic_eq *eq) -{ - struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); - u32 ceqe; - int i; - - for (i = 0; i < eq->q_len; i++) { - ceqe = *(GET_CURR_CEQ_ELEM(eq)); - - /* Data in HW is in Big endian Format */ - ceqe = be32_to_cpu(ceqe); - - /* HW toggles the wrapped bit, when it adds eq element event */ - if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) - break; - - ceq_event_handler(ceqs, ceqe); - - eq->cons_idx++; - - if (eq->cons_idx == eq->q_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - } -} - -/** - * eq_irq_handler - handler for the EQ event - * @data: the Event Queue that received the event - **/ -static void eq_irq_handler(void *data) -{ - struct hinic_eq *eq = data; - - if (eq->type == HINIC_AEQ) - aeq_irq_handler(eq); - else if (eq->type == HINIC_CEQ) - ceq_irq_handler(eq); - - eq_update_ci(eq); -} - -/** - * eq_irq_work - the work of the EQ that received the event - * @work: the work struct that is associated with the EQ - **/ -static void eq_irq_work(struct work_struct *work) -{ - struct hinic_eq_work *aeq_work = work_to_aeq_work(work); - struct hinic_eq *aeq; - - aeq = aeq_work->data; - eq_irq_handler(aeq); -} - -/** - * ceq_tasklet - the tasklet of the EQ that received the event - * @ceq_data: the eq - **/ -static void ceq_tasklet(unsigned long ceq_data) -{ - struct hinic_eq *ceq = (struct hinic_eq *)ceq_data; - - eq_irq_handler(ceq); -} - -/** - * aeq_interrupt - aeq interrupt handler - * @irq: irq number - * @data: the Async Event Queue that collected the event - **/ -static irqreturn_t aeq_interrupt(int irq, void *data) -{ - struct hinic_eq_work *aeq_work; - struct hinic_eq *aeq = data; - struct hinic_aeqs *aeqs; - - /* clear resend timer cnt register */ - hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry); - - aeq_work = &aeq->aeq_work; - aeq_work->data = aeq; - - aeqs = aeq_to_aeqs(aeq); - queue_work(aeqs->workq, &aeq_work->work); - - return IRQ_HANDLED; -} - -/** - * ceq_interrupt - ceq interrupt handler - * @irq: irq number - * @data: the Completion Event Queue that collected the event - **/ -static irqreturn_t ceq_interrupt(int irq, void *data) -{ - struct hinic_eq *ceq = data; - - /* clear resend timer cnt register */ - hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry); - - tasklet_schedule(&ceq->ceq_tasklet); - - return IRQ_HANDLED; -} - -static void set_ctrl0(struct hinic_eq *eq) -{ - struct msix_entry *msix_entry = &eq->msix_entry; - enum hinic_eq_type type = eq->type; - u32 addr, val, ctrl0; - - if (type == HINIC_AEQ) { - /* RMW Ctrl0 */ - addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) & - HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & - HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & - HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE); - - ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) | - HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) | - HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), - PCI_INTF_IDX) | - HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE); - - val |= ctrl0; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } else if (type == HINIC_CEQ) { - /* RMW Ctrl0 */ - addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) & - HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) & - HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) & - HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & - HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE); - - ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) | - HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) | - HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) | - HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), - PCI_INTF_IDX) | - HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE); - - val |= ctrl0; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } -} - -static void set_ctrl1(struct hinic_eq *eq) -{ - enum hinic_eq_type type = eq->type; - u32 page_size_val, elem_size; - u32 addr, val, ctrl1; - - if (type == HINIC_AEQ) { - /* RMW Ctrl1 */ - addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); - - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) & - HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) & - HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE); - - ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) | - HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | - HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); - - val |= ctrl1; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } else if (type == HINIC_CEQ) { - /* RMW Ctrl1 */ - addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); - - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) & - HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE); - - ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) | - HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); - - val |= ctrl1; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } -} - -/** - * set_eq_ctrls - setting eq's ctrl registers - * @eq: the Event Queue for setting - **/ -static void set_eq_ctrls(struct hinic_eq *eq) -{ - set_ctrl0(eq); - set_ctrl1(eq); -} - -/** - * aeq_elements_init - initialize all the elements in the aeq - * @eq: the Async Event Queue - * @init_val: value to initialize the elements with it - **/ -static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) -{ - struct hinic_aeq_elem *aeqe; - int i; - - for (i = 0; i < eq->q_len; i++) { - aeqe = GET_AEQ_ELEM(eq, i); - aeqe->desc = cpu_to_be32(init_val); - } - - wmb(); /* Write the initilzation values */ -} - -/** - * ceq_elements_init - Initialize all the elements in the ceq - * @eq: the event queue - * @init_val: value to init with it the elements - **/ -static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) -{ - u32 *ceqe; - int i; - - for (i = 0; i < eq->q_len; i++) { - ceqe = GET_CEQ_ELEM(eq, i); - *(ceqe) = cpu_to_be32(init_val); - } - - wmb(); /* Write the initilzation values */ -} - -/** - * alloc_eq_pages - allocate the pages for the queue - * @eq: the event queue - * - * Return 0 - Success, Negative - Failure - **/ -static int alloc_eq_pages(struct hinic_eq *eq) -{ - struct hinic_hwif *hwif = eq->hwif; - struct pci_dev *pdev = hwif->pdev; - u32 init_val, addr, val; - size_t addr_size; - int err, pg; - - addr_size = eq->num_pages * sizeof(*eq->dma_addr); - eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); - if (!eq->dma_addr) - return -ENOMEM; - - addr_size = eq->num_pages * sizeof(*eq->virt_addr); - eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); - if (!eq->virt_addr) { - err = -ENOMEM; - goto err_virt_addr_alloc; - } - - for (pg = 0; pg < eq->num_pages; pg++) { - eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev, - eq->page_size, - &eq->dma_addr[pg], - GFP_KERNEL); - if (!eq->virt_addr[pg]) { - err = -ENOMEM; - goto err_dma_alloc; - } - - addr = EQ_HI_PHYS_ADDR_REG(eq, pg); - val = upper_32_bits(eq->dma_addr[pg]); - - hinic_hwif_write_reg(hwif, addr, val); - - addr = EQ_LO_PHYS_ADDR_REG(eq, pg); - val = lower_32_bits(eq->dma_addr[pg]); - - hinic_hwif_write_reg(hwif, addr, val); - } - - init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED); - - if (eq->type == HINIC_AEQ) - aeq_elements_init(eq, init_val); - else if (eq->type == HINIC_CEQ) - ceq_elements_init(eq, init_val); - - return 0; - -err_dma_alloc: - while (--pg >= 0) - dma_free_coherent(&pdev->dev, eq->page_size, - eq->virt_addr[pg], - eq->dma_addr[pg]); - - devm_kfree(&pdev->dev, eq->virt_addr); - -err_virt_addr_alloc: - devm_kfree(&pdev->dev, eq->dma_addr); - return err; -} - -/** - * free_eq_pages - free the pages of the queue - * @eq: the Event Queue - **/ -static void free_eq_pages(struct hinic_eq *eq) -{ - struct hinic_hwif *hwif = eq->hwif; - struct pci_dev *pdev = hwif->pdev; - int pg; - - for (pg = 0; pg < eq->num_pages; pg++) - dma_free_coherent(&pdev->dev, eq->page_size, - eq->virt_addr[pg], - eq->dma_addr[pg]); - - devm_kfree(&pdev->dev, eq->virt_addr); - devm_kfree(&pdev->dev, eq->dma_addr); -} - -/** - * init_eq - initialize Event Queue - * @eq: the event queue - * @hwif: the HW interface of a PCI function device - * @type: the type of the event queue, aeq or ceq - * @q_id: Queue id number - * @q_len: the number of EQ elements - * @page_size: the page size of the pages in the event queue - * @entry: msix entry associated with the event queue - * - * Return 0 - Success, Negative - Failure - **/ -static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, - enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, - struct msix_entry entry) -{ - struct pci_dev *pdev = hwif->pdev; - int err; - - eq->hwif = hwif; - eq->type = type; - eq->q_id = q_id; - eq->q_len = q_len; - eq->page_size = page_size; - - /* Clear PI and CI, also clear the ARM bit */ - hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0); - hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); - - eq->cons_idx = 0; - eq->wrapped = 0; - - if (type == HINIC_AEQ) { - eq->elem_size = HINIC_AEQE_SIZE; - } else if (type == HINIC_CEQ) { - eq->elem_size = HINIC_CEQE_SIZE; - } else { - dev_err(&pdev->dev, "Invalid EQ type\n"); - return -EINVAL; - } - - eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); - eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size); - - eq->msix_entry = entry; - - if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { - dev_err(&pdev->dev, "num elements in eq page != power of 2\n"); - return -EINVAL; - } - - if (eq->num_pages > EQ_MAX_PAGES) { - dev_err(&pdev->dev, "too many pages for eq\n"); - return -EINVAL; - } - - set_eq_ctrls(eq); - eq_update_ci(eq); - - err = alloc_eq_pages(eq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate pages for eq\n"); - return err; - } - - if (type == HINIC_AEQ) { - struct hinic_eq_work *aeq_work = &eq->aeq_work; - - INIT_WORK(&aeq_work->work, eq_irq_work); - } else if (type == HINIC_CEQ) { - tasklet_init(&eq->ceq_tasklet, ceq_tasklet, - (unsigned long)eq); - } - - /* set the attributes of the msix entry */ - hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry, - HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT, - HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT, - HINIC_EQ_MSIX_LLI_TIMER_DEFAULT, - HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT, - HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT); - - if (type == HINIC_AEQ) - err = request_irq(entry.vector, aeq_interrupt, 0, - "hinic_aeq", eq); - else if (type == HINIC_CEQ) - err = request_irq(entry.vector, ceq_interrupt, 0, - "hinic_ceq", eq); - - if (err) { - dev_err(&pdev->dev, "Failed to request irq for the EQ\n"); - goto err_req_irq; - } - - return 0; - -err_req_irq: - free_eq_pages(eq); - return err; -} - -/** - * remove_eq - remove Event Queue - * @eq: the event queue - **/ -static void remove_eq(struct hinic_eq *eq) -{ - struct msix_entry *entry = &eq->msix_entry; - - free_irq(entry->vector, eq); - - if (eq->type == HINIC_AEQ) { - struct hinic_eq_work *aeq_work = &eq->aeq_work; - - cancel_work_sync(&aeq_work->work); - } else if (eq->type == HINIC_CEQ) { - tasklet_kill(&eq->ceq_tasklet); - } - - free_eq_pages(eq); -} - -/** - * hinic_aeqs_init - initialize all the aeqs - * @aeqs: pointer to Async eqs of the chip - * @hwif: the HW interface of a PCI function device - * @num_aeqs: number of AEQs - * @q_len: number of EQ elements - * @page_size: the page size of the pages in the event queue - * @msix_entries: msix entries associated with the event queues - * - * Return 0 - Success, negative - Failure - **/ -int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, - int num_aeqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries) -{ - struct pci_dev *pdev = hwif->pdev; - int err, i, q_id; - - aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); - if (!aeqs->workq) - return -ENOMEM; - - aeqs->hwif = hwif; - aeqs->num_aeqs = num_aeqs; - - for (q_id = 0; q_id < num_aeqs; q_id++) { - err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len, - page_size, msix_entries[q_id]); - if (err) { - dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id); - goto err_init_aeq; - } - } - - return 0; - -err_init_aeq: - for (i = 0; i < q_id; i++) - remove_eq(&aeqs->aeq[i]); - - destroy_workqueue(aeqs->workq); - return err; -} - -/** - * hinic_aeqs_free - free all the aeqs - * @aeqs: pointer to Async eqs of the chip - **/ -void hinic_aeqs_free(struct hinic_aeqs *aeqs) -{ - int q_id; - - for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++) - remove_eq(&aeqs->aeq[q_id]); - - destroy_workqueue(aeqs->workq); -} - -/** - * hinic_ceqs_init - init all the ceqs - * @ceqs: ceqs part of the chip - * @hwif: the hardware interface of a pci function device - * @num_ceqs: number of CEQs - * @q_len: number of EQ elements - * @page_size: the page size of the event queue - * @msix_entries: msix entries associated with the event queues - * - * Return 0 - Success, Negative - Failure - **/ -int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, - int num_ceqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries) -{ - struct pci_dev *pdev = hwif->pdev; - int i, q_id, err; - - ceqs->hwif = hwif; - ceqs->num_ceqs = num_ceqs; - - for (q_id = 0; q_id < num_ceqs; q_id++) { - err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len, - page_size, msix_entries[q_id]); - if (err) { - dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id); - goto err_init_ceq; - } - } - - return 0; - -err_init_ceq: - for (i = 0; i < q_id; i++) - remove_eq(&ceqs->ceq[i]); - - return err; -} - -/** - * hinic_ceqs_free - free all the ceqs - * @ceqs: ceqs part of the chip - **/ -void hinic_ceqs_free(struct hinic_ceqs *ceqs) -{ - int q_id; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) - remove_eq(&ceqs->ceq[q_id]); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h deleted file mode 100644 index d35f2068ee0c..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h +++ /dev/null @@ -1,256 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_EQS_H -#define HINIC_HW_EQS_H - -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" - -#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0 -#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 -#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31 - -#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF -#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F -#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 -#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1 - -#define HINIC_AEQ_CTRL_0_SET(val, member) \ - (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \ - HINIC_AEQ_CTRL_0_##member##_SHIFT) - -#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \ - ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \ - << HINIC_AEQ_CTRL_0_##member##_SHIFT))) - -#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0 -#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 -#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 - -#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF -#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3 -#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF - -#define HINIC_AEQ_CTRL_1_SET(val, member) \ - (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \ - HINIC_AEQ_CTRL_1_##member##_SHIFT) - -#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \ - ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \ - << HINIC_AEQ_CTRL_1_##member##_SHIFT))) - -#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0 -#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20 -#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 -#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31 - -#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF -#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F -#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF -#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 -#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1 - -#define HINIC_CEQ_CTRL_0_SET(val, member) \ - (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \ - HINIC_CEQ_CTRL_0_##member##_SHIFT) - -#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \ - ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \ - << HINIC_CEQ_CTRL_0_##member##_SHIFT))) - -#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0 -#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 - -#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF -#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF - -#define HINIC_CEQ_CTRL_1_SET(val, member) \ - (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \ - HINIC_CEQ_CTRL_1_##member##_SHIFT) - -#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \ - ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \ - << HINIC_CEQ_CTRL_1_##member##_SHIFT))) - -#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0 -#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7 -#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8 -#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31 - -#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F -#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1 -#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF -#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1 - -#define HINIC_EQ_ELEM_DESC_SET(val, member) \ - (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \ - HINIC_EQ_ELEM_DESC_##member##_SHIFT) - -#define HINIC_EQ_ELEM_DESC_GET(val, member) \ - (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \ - HINIC_EQ_ELEM_DESC_##member##_MASK) - -#define HINIC_EQ_CI_IDX_SHIFT 0 -#define HINIC_EQ_CI_WRAPPED_SHIFT 20 -#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24 -#define HINIC_EQ_CI_INT_ARMED_SHIFT 31 - -#define HINIC_EQ_CI_IDX_MASK 0xFFFFF -#define HINIC_EQ_CI_WRAPPED_MASK 0x1 -#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF -#define HINIC_EQ_CI_INT_ARMED_MASK 0x1 - -#define HINIC_EQ_CI_SET(val, member) \ - (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \ - HINIC_EQ_CI_##member##_SHIFT) - -#define HINIC_EQ_CI_CLEAR(val, member) \ - ((val) & (~(HINIC_EQ_CI_##member##_MASK \ - << HINIC_EQ_CI_##member##_SHIFT))) - -#define HINIC_MAX_AEQS 4 -#define HINIC_MAX_CEQS 32 - -#define HINIC_AEQE_SIZE 64 -#define HINIC_CEQE_SIZE 4 - -#define HINIC_AEQE_DESC_SIZE 4 -#define HINIC_AEQE_DATA_SIZE \ - (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) - -#define HINIC_DEFAULT_AEQ_LEN 64 -#define HINIC_DEFAULT_CEQ_LEN 1024 - -#define HINIC_EQ_PAGE_SIZE SZ_4K - -#define HINIC_CEQ_ID_CMDQ 0 - -enum hinic_eq_type { - HINIC_AEQ, - HINIC_CEQ, -}; - -enum hinic_aeq_type { - HINIC_MSG_FROM_MGMT_CPU = 2, - - HINIC_MAX_AEQ_EVENTS, -}; - -enum hinic_ceq_type { - HINIC_CEQ_CMDQ = 3, - - HINIC_MAX_CEQ_EVENTS, -}; - -enum hinic_eqe_state { - HINIC_EQE_ENABLED = BIT(0), - HINIC_EQE_RUNNING = BIT(1), -}; - -struct hinic_aeq_elem { - u8 data[HINIC_AEQE_DATA_SIZE]; - u32 desc; -}; - -struct hinic_eq_work { - struct work_struct work; - void *data; -}; - -struct hinic_eq { - struct hinic_hwif *hwif; - - enum hinic_eq_type type; - int q_id; - u32 q_len; - u32 page_size; - - u32 cons_idx; - int wrapped; - - size_t elem_size; - int num_pages; - int num_elem_in_pg; - - struct msix_entry msix_entry; - - dma_addr_t *dma_addr; - void **virt_addr; - - struct hinic_eq_work aeq_work; - - struct tasklet_struct ceq_tasklet; -}; - -struct hinic_hw_event_cb { - void (*hwe_handler)(void *handle, void *data, u8 size); - void *handle; - unsigned long hwe_state; -}; - -struct hinic_aeqs { - struct hinic_hwif *hwif; - - struct hinic_eq aeq[HINIC_MAX_AEQS]; - int num_aeqs; - - struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS]; - - struct workqueue_struct *workq; -}; - -struct hinic_ceq_cb { - void (*handler)(void *handle, u32 ceqe_data); - void *handle; - enum hinic_eqe_state ceqe_state; -}; - -struct hinic_ceqs { - struct hinic_hwif *hwif; - - struct hinic_eq ceq[HINIC_MAX_CEQS]; - int num_ceqs; - - struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS]; -}; - -void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event, void *handle, - void (*hwe_handler)(void *handle, void *data, - u8 size)); - -void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event); - -void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event, void *handle, - void (*ceq_cb)(void *handle, u32 ceqe_data)); - -void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event); - -int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, - int num_aeqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries); - -void hinic_aeqs_free(struct hinic_aeqs *aeqs); - -int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, - int num_ceqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries); - -void hinic_ceqs_free(struct hinic_ceqs *ceqs); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c deleted file mode 100644 index 07bbfbf68577..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ /dev/null @@ -1,370 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_csr.h" -#include "hinic_hw_if.h" - -#define PCIE_ATTR_ENTRY 0 - -#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs) - -/** - * hinic_msix_attr_set - set message attribute for msix entry - * @hwif: the HW interface of a pci function device - * @msix_index: msix_index - * @pending_limit: the maximum pending interrupt events (unit 8) - * @coalesc_timer: coalesc period for interrupt (unit 8 us) - * @lli_timer: replenishing period for low latency credit (unit 8 us) - * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) - * @resend_timer: maximum wait for resending msix (unit coalesc period) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer, u8 lli_credit_limit, - u8 resend_timer) -{ - u32 msix_ctrl, addr; - - if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) - return -EINVAL; - - msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) | - HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) | - HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) | - HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) | - HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER); - - addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); - - hinic_hwif_write_reg(hwif, addr, msix_ctrl); - return 0; -} - -/** - * hinic_msix_attr_get - get message attribute of msix entry - * @hwif: the HW interface of a pci function device - * @msix_index: msix_index - * @pending_limit: the maximum pending interrupt events (unit 8) - * @coalesc_timer: coalesc period for interrupt (unit 8 us) - * @lli_timer: replenishing period for low latency credit (unit 8 us) - * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) - * @resend_timer: maximum wait for resending msix (unit coalesc period) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, - u8 *pending_limit, u8 *coalesc_timer, - u8 *lli_timer, u8 *lli_credit_limit, - u8 *resend_timer) -{ - u32 addr, val; - - if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) - return -EINVAL; - - addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); - val = hinic_hwif_read_reg(hwif, addr); - - *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT); - *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER); - *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER); - *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT); - *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER); - return 0; -} - -/** - * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry - * @hwif: the HW interface of a pci function device - * @msix_index: msix_index - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index) -{ - u32 msix_ctrl, addr; - - if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) - return -EINVAL; - - msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER); - addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index); - - hinic_hwif_write_reg(hwif, addr, msix_ctrl); - return 0; -} - -/** - * hinic_set_pf_action - set action on pf channel - * @hwif: the HW interface of a pci function device - * @action: action on pf channel - * - * Return 0 - Success, negative - Failure - **/ -void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action) -{ - u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); - - attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION); - attr5 |= HINIC_FA5_SET(action, PF_ACTION); - - hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5); -} - -enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - return HINIC_FA4_GET(attr4, OUTBOUND_STATE); -} - -void hinic_outbound_state_set(struct hinic_hwif *hwif, - enum hinic_outbound_state outbound_state) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE); - attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE); - - hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); -} - -enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - return HINIC_FA4_GET(attr4, DB_STATE); -} - -void hinic_db_state_set(struct hinic_hwif *hwif, - enum hinic_db_state db_state) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE); - attr4 |= HINIC_FA4_SET(db_state, DB_STATE); - - hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); -} - -void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx, - enum hinic_msix_state flag) -{ - u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE + - HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL; - u32 mask_bits; - - mask_bits = readl(hwif->intr_regs_base + offset); - mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; - - if (flag) - mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; - - writel(mask_bits, hwif->intr_regs_base + offset); -} - -/** - * hwif_ready - test if the HW is ready for use - * @hwif: the HW interface of a pci function device - * - * Return 0 - Success, negative - Failure - **/ -static int hwif_ready(struct hinic_hwif *hwif) -{ - struct pci_dev *pdev = hwif->pdev; - u32 addr, attr1; - - addr = HINIC_CSR_FUNC_ATTR1_ADDR; - attr1 = hinic_hwif_read_reg(hwif, addr); - - if (!HINIC_FA1_GET(attr1, INIT_STATUS)) { - dev_err(&pdev->dev, "hwif status is not ready\n"); - return -EFAULT; - } - - return 0; -} - -/** - * set_hwif_attr - set the attributes in the relevant members in hwif - * @hwif: the HW interface of a pci function device - * @attr0: the first attribute that was read from the hw - * @attr1: the second attribute that was read from the hw - **/ -static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1) -{ - hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX); - hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX); - hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX); - hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE); - - hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC)); - hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC)); - hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC)); - hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC)); -} - -/** - * read_hwif_attr - read the attributes and set members in hwif - * @hwif: the HW interface of a pci function device - **/ -static void read_hwif_attr(struct hinic_hwif *hwif) -{ - u32 addr, attr0, attr1; - - addr = HINIC_CSR_FUNC_ATTR0_ADDR; - attr0 = hinic_hwif_read_reg(hwif, addr); - - addr = HINIC_CSR_FUNC_ATTR1_ADDR; - attr1 = hinic_hwif_read_reg(hwif, addr); - - set_hwif_attr(hwif, attr0, attr1); -} - -/** - * set_ppf - try to set hwif as ppf and set the type of hwif in this case - * @hwif: the HW interface of a pci function device - **/ -static void set_ppf(struct hinic_hwif *hwif) -{ - struct hinic_func_attr *attr = &hwif->attr; - u32 addr, val, ppf_election; - - /* Read Modify Write */ - addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif)); - - val = hinic_hwif_read_reg(hwif, addr); - val = HINIC_PPF_ELECTION_CLEAR(val, IDX); - - ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX); - - val |= ppf_election; - hinic_hwif_write_reg(hwif, addr, val); - - /* check PPF */ - val = hinic_hwif_read_reg(hwif, addr); - - attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); - if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif)) - attr->func_type = HINIC_PPF; -} - -/** - * set_dma_attr - set the dma attributes in the HW - * @hwif: the HW interface of a pci function device - * @entry_idx: the entry index in the dma table - * @st: PCIE TLP steering tag - * @at: PCIE TLP AT field - * @ph: PCIE TLP Processing Hint field - * @no_snooping: PCIE TLP No snooping - * @tph_en: PCIE TLP Processing Hint Enable - **/ -static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx, - u8 st, u8 at, u8 ph, - enum hinic_pcie_nosnoop no_snooping, - enum hinic_pcie_tph tph_en) -{ - u32 addr, val, dma_attr_entry; - - /* Read Modify Write */ - addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx); - - val = hinic_hwif_read_reg(hwif, addr); - val = HINIC_DMA_ATTR_CLEAR(val, ST) & - HINIC_DMA_ATTR_CLEAR(val, AT) & - HINIC_DMA_ATTR_CLEAR(val, PH) & - HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) & - HINIC_DMA_ATTR_CLEAR(val, TPH_EN); - - dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) | - HINIC_DMA_ATTR_SET(at, AT) | - HINIC_DMA_ATTR_SET(ph, PH) | - HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) | - HINIC_DMA_ATTR_SET(tph_en, TPH_EN); - - val |= dma_attr_entry; - hinic_hwif_write_reg(hwif, addr, val); -} - -/** - * dma_attr_table_init - initialize the the default dma attributes - * @hwif: the HW interface of a pci function device - **/ -static void dma_attr_init(struct hinic_hwif *hwif) -{ - set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE, - HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE, - HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE); -} - -/** - * hinic_init_hwif - initialize the hw interface - * @hwif: the HW interface of a pci function device - * @pdev: the pci device for acessing PCI resources - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) -{ - int err; - - hwif->pdev = pdev; - - hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR); - if (!hwif->cfg_regs_bar) { - dev_err(&pdev->dev, "Failed to map configuration regs\n"); - return -ENOMEM; - } - - hwif->intr_regs_base = pci_ioremap_bar(pdev, HINIC_PCI_INTR_REGS_BAR); - if (!hwif->intr_regs_base) { - dev_err(&pdev->dev, "Failed to map configuration regs\n"); - err = -ENOMEM; - goto err_map_intr_bar; - } - - err = hwif_ready(hwif); - if (err) { - dev_err(&pdev->dev, "HW interface is not ready\n"); - goto err_hwif_ready; - } - - read_hwif_attr(hwif); - - if (HINIC_IS_PF(hwif)) - set_ppf(hwif); - - /* No transactionss before DMA is initialized */ - dma_attr_init(hwif); - return 0; - -err_hwif_ready: - iounmap(hwif->intr_regs_base); - -err_map_intr_bar: - iounmap(hwif->cfg_regs_bar); - - return err; -} - -/** - * hinic_free_hwif - free the HW interface - * @hwif: the HW interface of a pci function device - **/ -void hinic_free_hwif(struct hinic_hwif *hwif) -{ - iounmap(hwif->intr_regs_base); - iounmap(hwif->cfg_regs_bar); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h deleted file mode 100644 index 517794509eb2..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ /dev/null @@ -1,277 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_IF_H -#define HINIC_HW_IF_H - -#include -#include -#include -#include - -#define HINIC_DMA_ATTR_ST_SHIFT 0 -#define HINIC_DMA_ATTR_AT_SHIFT 8 -#define HINIC_DMA_ATTR_PH_SHIFT 10 -#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12 -#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13 - -#define HINIC_DMA_ATTR_ST_MASK 0xFF -#define HINIC_DMA_ATTR_AT_MASK 0x3 -#define HINIC_DMA_ATTR_PH_MASK 0x3 -#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1 -#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1 - -#define HINIC_DMA_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \ - HINIC_DMA_ATTR_##member##_SHIFT) - -#define HINIC_DMA_ATTR_CLEAR(val, member) \ - ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \ - << HINIC_DMA_ATTR_##member##_SHIFT))) - -#define HINIC_FA0_FUNC_IDX_SHIFT 0 -#define HINIC_FA0_PF_IDX_SHIFT 10 -#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14 -/* reserved members - off 16 */ -#define HINIC_FA0_FUNC_TYPE_SHIFT 24 - -#define HINIC_FA0_FUNC_IDX_MASK 0x3FF -#define HINIC_FA0_PF_IDX_MASK 0xF -#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3 -#define HINIC_FA0_FUNC_TYPE_MASK 0x1 - -#define HINIC_FA0_GET(val, member) \ - (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK) - -#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8 -/* reserved members - off 10 */ -#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12 -/* reserved members - off 15 */ -#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20 -#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24 -/* reserved members - off 27 */ -#define HINIC_FA1_INIT_STATUS_SHIFT 30 - -#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3 -#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7 -#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF -#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7 -#define HINIC_FA1_INIT_STATUS_MASK 0x1 - -#define HINIC_FA1_GET(val, member) \ - (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK) - -#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0 -#define HINIC_FA4_DB_STATE_SHIFT 1 - -#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1 -#define HINIC_FA4_DB_STATE_MASK 0x1 - -#define HINIC_FA4_GET(val, member) \ - (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK) - -#define HINIC_FA4_SET(val, member) \ - ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT) - -#define HINIC_FA4_CLEAR(val, member) \ - ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT))) - -#define HINIC_FA5_PF_ACTION_SHIFT 0 -#define HINIC_FA5_PF_ACTION_MASK 0xFFFF - -#define HINIC_FA5_SET(val, member) \ - (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT) - -#define HINIC_FA5_CLEAR(val, member) \ - ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT))) - -#define HINIC_PPF_ELECTION_IDX_SHIFT 0 -#define HINIC_PPF_ELECTION_IDX_MASK 0x1F - -#define HINIC_PPF_ELECTION_SET(val, member) \ - (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \ - HINIC_PPF_ELECTION_##member##_SHIFT) - -#define HINIC_PPF_ELECTION_GET(val, member) \ - (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \ - HINIC_PPF_ELECTION_##member##_MASK) - -#define HINIC_PPF_ELECTION_CLEAR(val, member) \ - ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ - << HINIC_PPF_ELECTION_##member##_SHIFT))) - -#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0 -#define HINIC_MSIX_COALESC_TIMER_SHIFT 8 -#define HINIC_MSIX_LLI_TIMER_SHIFT 16 -#define HINIC_MSIX_LLI_CREDIT_SHIFT 24 -#define HINIC_MSIX_RESEND_TIMER_SHIFT 29 - -#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF -#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF -#define HINIC_MSIX_LLI_TIMER_MASK 0xFF -#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F -#define HINIC_MSIX_RESEND_TIMER_MASK 0x7 - -#define HINIC_MSIX_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_MSIX_##member##_MASK) << \ - HINIC_MSIX_##member##_SHIFT) - -#define HINIC_MSIX_ATTR_GET(val, member) \ - (((val) >> HINIC_MSIX_##member##_SHIFT) & \ - HINIC_MSIX_##member##_MASK) - -#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 - -#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1 - -#define HINIC_MSIX_CNT_SET(val, member) \ - (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \ - HINIC_MSIX_CNT_##member##_SHIFT) - -#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) -#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) -#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) -#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx) -#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx) -#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx) - -#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type) -#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF) -#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF) - -#define HINIC_PCI_CFG_REGS_BAR 0 -#define HINIC_PCI_INTR_REGS_BAR 2 -#define HINIC_PCI_DB_BAR 4 - -#define HINIC_PCIE_ST_DISABLE 0 -#define HINIC_PCIE_AT_DISABLE 0 -#define HINIC_PCIE_PH_DISABLE 0 - -#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */ -#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */ -#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */ -#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */ -#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */ - -#define HINIC_PCI_MSIX_ENTRY_SIZE 16 -#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 -#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1 - -enum hinic_pcie_nosnoop { - HINIC_PCIE_SNOOP = 0, - HINIC_PCIE_NO_SNOOP = 1, -}; - -enum hinic_pcie_tph { - HINIC_PCIE_TPH_DISABLE = 0, - HINIC_PCIE_TPH_ENABLE = 1, -}; - -enum hinic_func_type { - HINIC_PF = 0, - HINIC_PPF = 2, -}; - -enum hinic_mod_type { - HINIC_MOD_COMM = 0, /* HW communication module */ - HINIC_MOD_L2NIC = 1, /* L2NIC module */ - HINIC_MOD_CFGM = 7, /* Configuration module */ - - HINIC_MOD_MAX = 15 -}; - -enum hinic_node_id { - HINIC_NODE_ID_MGMT = 21, -}; - -enum hinic_pf_action { - HINIC_PF_MGMT_INIT = 0x0, - - HINIC_PF_MGMT_ACTIVE = 0x11, -}; - -enum hinic_outbound_state { - HINIC_OUTBOUND_ENABLE = 0, - HINIC_OUTBOUND_DISABLE = 1, -}; - -enum hinic_db_state { - HINIC_DB_ENABLE = 0, - HINIC_DB_DISABLE = 1, -}; - -enum hinic_msix_state { - HINIC_MSIX_ENABLE, - HINIC_MSIX_DISABLE, -}; - -struct hinic_func_attr { - u16 func_idx; - u8 pf_idx; - u8 pci_intf_idx; - - enum hinic_func_type func_type; - - u8 ppf_idx; - - u16 num_irqs; - u8 num_aeqs; - u8 num_ceqs; - - u8 num_dma_attr; -}; - -struct hinic_hwif { - struct pci_dev *pdev; - void __iomem *cfg_regs_bar; - void __iomem *intr_regs_base; - - struct hinic_func_attr attr; -}; - -static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) -{ - return be32_to_cpu(readl(hwif->cfg_regs_bar + reg)); -} - -static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, - u32 val) -{ - writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg); -} - -int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer_cfg, u8 lli_credit_limit, - u8 resend_timer); - -int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, - u8 *pending_limit, u8 *coalesc_timer_cfg, - u8 *lli_timer, u8 *lli_credit_limit, - u8 *resend_timer); - -void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx, - enum hinic_msix_state flag); - -int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index); - -void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action); - -enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif); - -void hinic_outbound_state_set(struct hinic_hwif *hwif, - enum hinic_outbound_state outbound_state); - -enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif); - -void hinic_db_state_set(struct hinic_hwif *hwif, - enum hinic_db_state db_state); - -int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev); - -void hinic_free_hwif(struct hinic_hwif *hwif); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c deleted file mode 100644 index d66f86fa3f46..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ /dev/null @@ -1,584 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" -#include "hinic_hw_qp_ctxt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" - -#define CI_Q_ADDR_SIZE sizeof(u32) - -#define CI_ADDR(base_addr, q_id) ((base_addr) + \ - (q_id) * CI_Q_ADDR_SIZE) - -#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) - -#define DB_IDX(db, db_base) \ - (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) - -enum io_cmd { - IO_CMD_MODIFY_QUEUE_CTXT = 0, - IO_CMD_CLEAN_QUEUE_CTXT, -}; - -static void init_db_area_idx(struct hinic_free_db_area *free_db_area) -{ - int i; - - for (i = 0; i < HINIC_DB_MAX_AREAS; i++) - free_db_area->db_idx[i] = i; - - free_db_area->alloc_pos = 0; - free_db_area->return_pos = HINIC_DB_MAX_AREAS; - - free_db_area->num_free = HINIC_DB_MAX_AREAS; - - sema_init(&free_db_area->idx_lock, 1); -} - -static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io) -{ - struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; - int pos, idx; - - down(&free_db_area->idx_lock); - - free_db_area->num_free--; - - if (free_db_area->num_free < 0) { - free_db_area->num_free++; - up(&free_db_area->idx_lock); - return ERR_PTR(-ENOMEM); - } - - pos = free_db_area->alloc_pos++; - pos &= HINIC_DB_MAX_AREAS - 1; - - idx = free_db_area->db_idx[pos]; - - free_db_area->db_idx[pos] = -1; - - up(&free_db_area->idx_lock); - - return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE; -} - -static void return_db_area(struct hinic_func_to_io *func_to_io, - void __iomem *db_base) -{ - struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; - int pos, idx = DB_IDX(db_base, func_to_io->db_base); - - down(&free_db_area->idx_lock); - - pos = free_db_area->return_pos++; - pos &= HINIC_DB_MAX_AREAS - 1; - - free_db_area->db_idx[pos] = idx; - - free_db_area->num_free++; - - up(&free_db_area->idx_lock); -} - -static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, - u16 num_sqs) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct hinic_sq_ctxt_block *sq_ctxt_block; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmdq_buf cmdq_buf; - struct hinic_sq_ctxt *sq_ctxt; - struct hinic_qp *qp; - u64 out_param; - int err, i; - - err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); - return err; - } - - sq_ctxt_block = cmdq_buf.buf; - sq_ctxt = sq_ctxt_block->sq_ctxt; - - hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ, - num_sqs, func_to_io->max_qps); - for (i = 0; i < num_sqs; i++) { - qp = &func_to_io->qps[i]; - - hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, - base_qpn + qp->q_id); - } - - cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs); - - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, - &out_param); - if ((err) || (out_param != 0)) { - dev_err(&pdev->dev, "Failed to set SQ ctxts\n"); - err = -EFAULT; - } - - hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - return err; -} - -static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, - u16 num_rqs) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct hinic_rq_ctxt_block *rq_ctxt_block; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmdq_buf cmdq_buf; - struct hinic_rq_ctxt *rq_ctxt; - struct hinic_qp *qp; - u64 out_param; - int err, i; - - err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); - return err; - } - - rq_ctxt_block = cmdq_buf.buf; - rq_ctxt = rq_ctxt_block->rq_ctxt; - - hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, - num_rqs, func_to_io->max_qps); - for (i = 0; i < num_rqs; i++) { - qp = &func_to_io->qps[i]; - - hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, - base_qpn + qp->q_id); - } - - cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); - - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, - &out_param); - if ((err) || (out_param != 0)) { - dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); - err = -EFAULT; - } - - hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - return err; -} - -/** - * write_qp_ctxts - write the qp ctxt to HW - * @func_to_io: func to io channel that holds the IO components - * @base_qpn: first qp number - * @num_qps: number of qps to write - * - * Return 0 - Success, negative - Failure - **/ -static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, - u16 num_qps) -{ - return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || - write_rq_ctxts(func_to_io, base_qpn, num_qps)); -} - -static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io, - enum hinic_qp_ctxt_type ctxt_type) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct hinic_clean_queue_ctxt *ctxt_block; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmdq_buf cmdq_buf; - u64 out_param = 0; - int err; - - err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); - return err; - } - - ctxt_block = cmdq_buf.buf; - ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps; - ctxt_block->cmdq_hdr.queue_type = ctxt_type; - ctxt_block->cmdq_hdr.addr_offset = 0; - - /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */ - ctxt_block->ctxt_size = 0x3; - - hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); - - cmdq_buf.size = sizeof(*ctxt_block); - - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - IO_CMD_CLEAN_QUEUE_CTXT, - &cmdq_buf, &out_param); - - if (err || out_param) { - dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n", - err, out_param); - - err = -EFAULT; - } - - hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - - return err; -} - -static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io) -{ - /* clean LRO/TSO context space */ - return (hinic_clean_queue_offload_ctxt(func_to_io, - HINIC_QP_CTXT_TYPE_SQ) || - hinic_clean_queue_offload_ctxt(func_to_io, - HINIC_QP_CTXT_TYPE_RQ)); -} - -/** - * init_qp - Initialize a Queue Pair - * @func_to_io: func to io channel that holds the IO components - * @qp: pointer to the qp to initialize - * @q_id: the id of the qp - * @sq_msix_entry: msix entry for sq - * @rq_msix_entry: msix entry for rq - * - * Return 0 - Success, negative - Failure - **/ -static int init_qp(struct hinic_func_to_io *func_to_io, - struct hinic_qp *qp, int q_id, - struct msix_entry *sq_msix_entry, - struct msix_entry *rq_msix_entry) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct pci_dev *pdev = hwif->pdev; - void __iomem *db_base; - int err; - - qp->q_id = q_id; - - err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], - HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE, - HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE); - if (err) { - dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n"); - return err; - } - - err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], - HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE, - HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE); - if (err) { - dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n"); - goto err_rq_alloc; - } - - db_base = get_db_area(func_to_io); - if (IS_ERR(db_base)) { - dev_err(&pdev->dev, "Failed to get DB area for SQ\n"); - err = PTR_ERR(db_base); - goto err_get_db; - } - - func_to_io->sq_db[q_id] = db_base; - - err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], - sq_msix_entry, - CI_ADDR(func_to_io->ci_addr_base, q_id), - CI_ADDR(func_to_io->ci_dma_base, q_id), db_base); - if (err) { - dev_err(&pdev->dev, "Failed to init SQ\n"); - goto err_sq_init; - } - - err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id], - rq_msix_entry); - if (err) { - dev_err(&pdev->dev, "Failed to init RQ\n"); - goto err_rq_init; - } - - return 0; - -err_rq_init: - hinic_clean_sq(&qp->sq); - -err_sq_init: - return_db_area(func_to_io, db_base); - -err_get_db: - hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); - -err_rq_alloc: - hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); - return err; -} - -/** - * destroy_qp - Clean the resources of a Queue Pair - * @func_to_io: func to io channel that holds the IO components - * @qp: pointer to the qp to clean - **/ -static void destroy_qp(struct hinic_func_to_io *func_to_io, - struct hinic_qp *qp) -{ - int q_id = qp->q_id; - - hinic_clean_rq(&qp->rq); - hinic_clean_sq(&qp->sq); - - return_db_area(func_to_io, func_to_io->sq_db[q_id]); - - hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); - hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); -} - -/** - * hinic_io_create_qps - Create Queue Pairs - * @func_to_io: func to io channel that holds the IO components - * @base_qpn: base qp number - * @num_qps: number queue pairs to create - * @sq_msix_entry: msix entries for sq - * @rq_msix_entry: msix entries for rq - * - * Return 0 - Success, negative - Failure - **/ -int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, - u16 base_qpn, int num_qps, - struct msix_entry *sq_msix_entries, - struct msix_entry *rq_msix_entries) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t qps_size, wq_size, db_size; - void *ci_addr_base; - int i, j, err; - - qps_size = num_qps * sizeof(*func_to_io->qps); - func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); - if (!func_to_io->qps) - return -ENOMEM; - - wq_size = num_qps * sizeof(*func_to_io->sq_wq); - func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); - if (!func_to_io->sq_wq) { - err = -ENOMEM; - goto err_sq_wq; - } - - wq_size = num_qps * sizeof(*func_to_io->rq_wq); - func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); - if (!func_to_io->rq_wq) { - err = -ENOMEM; - goto err_rq_wq; - } - - db_size = num_qps * sizeof(*func_to_io->sq_db); - func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); - if (!func_to_io->sq_db) { - err = -ENOMEM; - goto err_sq_db; - } - - ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), - &func_to_io->ci_dma_base, - GFP_KERNEL); - if (!ci_addr_base) { - dev_err(&pdev->dev, "Failed to allocate CI area\n"); - err = -ENOMEM; - goto err_ci_base; - } - - func_to_io->ci_addr_base = ci_addr_base; - - for (i = 0; i < num_qps; i++) { - err = init_qp(func_to_io, &func_to_io->qps[i], i, - &sq_msix_entries[i], &rq_msix_entries[i]); - if (err) { - dev_err(&pdev->dev, "Failed to create QP %d\n", i); - goto err_init_qp; - } - } - - err = write_qp_ctxts(func_to_io, base_qpn, num_qps); - if (err) { - dev_err(&pdev->dev, "Failed to init QP ctxts\n"); - goto err_write_qp_ctxts; - } - - err = hinic_clean_qp_offload_ctxt(func_to_io); - if (err) { - dev_err(&pdev->dev, "Failed to clean QP contexts space\n"); - goto err_write_qp_ctxts; - } - - return 0; - -err_write_qp_ctxts: -err_init_qp: - for (j = 0; j < i; j++) - destroy_qp(func_to_io, &func_to_io->qps[j]); - - dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), - func_to_io->ci_addr_base, func_to_io->ci_dma_base); - -err_ci_base: - devm_kfree(&pdev->dev, func_to_io->sq_db); - -err_sq_db: - devm_kfree(&pdev->dev, func_to_io->rq_wq); - -err_rq_wq: - devm_kfree(&pdev->dev, func_to_io->sq_wq); - -err_sq_wq: - devm_kfree(&pdev->dev, func_to_io->qps); - return err; -} - -/** - * hinic_io_destroy_qps - Destroy the IO Queue Pairs - * @func_to_io: func to io channel that holds the IO components - * @num_qps: number queue pairs to destroy - **/ -void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t ci_table_size; - int i; - - ci_table_size = CI_TABLE_SIZE(num_qps); - - for (i = 0; i < num_qps; i++) - destroy_qp(func_to_io, &func_to_io->qps[i]); - - dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base, - func_to_io->ci_dma_base); - - devm_kfree(&pdev->dev, func_to_io->sq_db); - - devm_kfree(&pdev->dev, func_to_io->rq_wq); - devm_kfree(&pdev->dev, func_to_io->sq_wq); - - devm_kfree(&pdev->dev, func_to_io->qps); -} - -/** - * hinic_io_init - Initialize the IO components - * @func_to_io: func to io channel that holds the IO components - * @hwif: HW interface for accessing IO - * @max_qps: maximum QPs in HW - * @num_ceqs: number completion event queues - * @ceq_msix_entries: msix entries for ceqs - * - * Return 0 - Success, negative - Failure - **/ -int hinic_io_init(struct hinic_func_to_io *func_to_io, - struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, - struct msix_entry *ceq_msix_entries) -{ - struct pci_dev *pdev = hwif->pdev; - enum hinic_cmdq_type cmdq, type; - void __iomem *db_area; - int err; - - func_to_io->hwif = hwif; - func_to_io->qps = NULL; - func_to_io->max_qps = max_qps; - - err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs, - HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE, - ceq_msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to init CEQs\n"); - return err; - } - - err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); - if (err) { - dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); - goto err_wqs_alloc; - } - - func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); - if (!func_to_io->db_base) { - dev_err(&pdev->dev, "Failed to remap IO DB area\n"); - err = -ENOMEM; - goto err_db_ioremap; - } - - init_db_area_idx(&func_to_io->free_db_area); - - for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { - db_area = get_db_area(func_to_io); - if (IS_ERR(db_area)) { - dev_err(&pdev->dev, "Failed to get cmdq db area\n"); - err = PTR_ERR(db_area); - goto err_db_area; - } - - func_to_io->cmdq_db_area[cmdq] = db_area; - } - - err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif, - func_to_io->cmdq_db_area); - if (err) { - dev_err(&pdev->dev, "Failed to initialize cmdqs\n"); - goto err_init_cmdqs; - } - - return 0; - -err_init_cmdqs: -err_db_area: - for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) - return_db_area(func_to_io, func_to_io->cmdq_db_area[type]); - - iounmap(func_to_io->db_base); - -err_db_ioremap: - hinic_wqs_free(&func_to_io->wqs); - -err_wqs_alloc: - hinic_ceqs_free(&func_to_io->ceqs); - return err; -} - -/** - * hinic_io_free - Free the IO components - * @func_to_io: func to io channel that holds the IO components - **/ -void hinic_io_free(struct hinic_func_to_io *func_to_io) -{ - enum hinic_cmdq_type cmdq; - - hinic_free_cmdqs(&func_to_io->cmdqs); - - for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) - return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); - - iounmap(func_to_io->db_base); - hinic_wqs_free(&func_to_io->wqs); - hinic_ceqs_free(&func_to_io->ceqs); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h deleted file mode 100644 index cac2b722e7dc..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_IO_H -#define HINIC_HW_IO_H - -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" -#include "hinic_hw_qp.h" - -#define HINIC_DB_PAGE_SIZE SZ_4K -#define HINIC_DB_SIZE SZ_4M - -#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE) - -enum hinic_db_type { - HINIC_DB_CMDQ_TYPE, - HINIC_DB_SQ_TYPE, -}; - -enum hinic_io_path { - HINIC_CTRL_PATH, - HINIC_DATA_PATH, -}; - -struct hinic_free_db_area { - int db_idx[HINIC_DB_MAX_AREAS]; - - int alloc_pos; - int return_pos; - - int num_free; - - /* Lock for getting db area */ - struct semaphore idx_lock; -}; - -struct hinic_func_to_io { - struct hinic_hwif *hwif; - - struct hinic_ceqs ceqs; - - struct hinic_wqs wqs; - - struct hinic_wq *sq_wq; - struct hinic_wq *rq_wq; - - struct hinic_qp *qps; - u16 max_qps; - - void __iomem **sq_db; - void __iomem *db_base; - - void *ci_addr_base; - dma_addr_t ci_dma_base; - - struct hinic_free_db_area free_db_area; - - void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES]; - - struct hinic_cmdqs cmdqs; -}; - -int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, - u16 base_qpn, int num_qps, - struct msix_entry *sq_msix_entries, - struct msix_entry *rq_msix_entries); - -void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, - int num_qps); - -int hinic_io_init(struct hinic_func_to_io *func_to_io, - struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, - struct msix_entry *ceq_msix_entries); - -void hinic_io_free(struct hinic_func_to_io *func_to_io); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c deleted file mode 100644 index c1a6be6bf6a8..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ /dev/null @@ -1,588 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_api_cmd.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_dev.h" - -#define SYNC_MSG_ID_MASK 0x1FF - -#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) - -#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ - ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \ - SYNC_MSG_ID_MASK)) - -#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN) - -#define MGMT_MSG_LEN_MIN 20 -#define MGMT_MSG_LEN_STEP 16 -#define MGMT_MSG_RSVD_FOR_DEV 8 - -#define SEGMENT_LEN 48 - -#define MAX_PF_MGMT_BUF_SIZE 2048 - -/* Data should be SEG LEN size aligned */ -#define MAX_MSG_LEN 2016 - -#define MSG_NOT_RESP 0xFFFF - -#define MGMT_MSG_TIMEOUT 1000 - -#define mgmt_to_pfhwdev(pf_mgmt) \ - container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) - -enum msg_segment_type { - NOT_LAST_SEGMENT = 0, - LAST_SEGMENT = 1, -}; - -enum mgmt_direction_type { - MGMT_DIRECT_SEND = 0, - MGMT_RESP = 1, -}; - -enum msg_ack_type { - MSG_ACK = 0, - MSG_NO_ACK = 1, -}; - -/** - * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that this handler will handle its messages - * @handle: private data for the callback - * @callback: the handler that will handle messages - **/ -void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, - void *handle, - void (*callback)(void *handle, - u8 cmd, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)) -{ - struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; - - mgmt_cb->cb = callback; - mgmt_cb->handle = handle; - mgmt_cb->state = HINIC_MGMT_CB_ENABLED; -} - -/** - * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that this handler handles its messages - **/ -void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod) -{ - struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; - - mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED; - - while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING) - schedule(); - - mgmt_cb->cb = NULL; -} - -/** - * prepare_header - prepare the header of the message - * @pf_to_mgmt: PF to MGMT channel - * @msg_len: the length of the message - * @mod: module in the chip that will get the message - * @ack_type: ask for response - * @direction: the direction of the message - * @cmd: command of the message - * @msg_id: message id - * - * Return the prepared header value - **/ -static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt, - u16 msg_len, enum hinic_mod_type mod, - enum msg_ack_type ack_type, - enum mgmt_direction_type direction, - u16 cmd, u16 msg_id) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - - return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | - HINIC_MSG_HEADER_SET(mod, MODULE) | - HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) | - HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | - HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | - HINIC_MSG_HEADER_SET(0, SEQID) | - HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | - HINIC_MSG_HEADER_SET(direction, DIRECTION) | - HINIC_MSG_HEADER_SET(cmd, CMD) | - HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) | - HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) | - HINIC_MSG_HEADER_SET(msg_id, MSG_ID); -} - -/** - * prepare_mgmt_cmd - prepare the mgmt command - * @mgmt_cmd: pointer to the command to prepare - * @header: pointer of the header for the message - * @msg: the data of the message - * @msg_len: the length of the message - **/ -static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len) -{ - memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); - - mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; - memcpy(mgmt_cmd, header, sizeof(*header)); - - mgmt_cmd += sizeof(*header); - memcpy(mgmt_cmd, msg, msg_len); -} - -/** - * mgmt_msg_len - calculate the total message length - * @msg_data_len: the length of the message data - * - * Return the total message length - **/ -static u16 mgmt_msg_len(u16 msg_data_len) -{ - /* RSVD + HEADER_SIZE + DATA_LEN */ - u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len; - - if (msg_len > MGMT_MSG_LEN_MIN) - msg_len = MGMT_MSG_LEN_MIN + - ALIGN((msg_len - MGMT_MSG_LEN_MIN), - MGMT_MSG_LEN_STEP); - else - msg_len = MGMT_MSG_LEN_MIN; - - return msg_len; -} - -/** - * send_msg_to_mgmt - send message to mgmt by API CMD - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @data: the msg data - * @data_len: the msg data length - * @ack_type: ask for response - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * - * Return 0 - Success, negative - Failure - **/ -static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - u8 *data, u16 data_len, - enum msg_ack_type ack_type, - enum mgmt_direction_type direction, - u16 resp_msg_id) -{ - struct hinic_api_cmd_chain *chain; - u64 header; - u16 msg_id; - - msg_id = SYNC_MSG_ID(pf_to_mgmt); - - if (direction == MGMT_RESP) { - header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, - direction, cmd, resp_msg_id); - } else { - SYNC_MSG_ID_INC(pf_to_mgmt); - header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, - direction, cmd, msg_id); - } - - prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len); - - chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; - return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT, - pf_to_mgmt->sync_msg_buf, - mgmt_msg_len(data_len)); -} - -/** - * msg_to_mgmt_sync - send sync message to mgmt - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @buf_in: the msg data - * @in_size: the msg data length - * @buf_out: response - * @out_size: response length - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * - * Return 0 - Success, negative - Failure - **/ -static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - u8 *buf_in, u16 in_size, - u8 *buf_out, u16 *out_size, - enum mgmt_direction_type direction, - u16 resp_msg_id) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_recv_msg *recv_msg; - struct completion *recv_done; - u16 msg_id; - int err; - - /* Lock the sync_msg_buf */ - down(&pf_to_mgmt->sync_msg_lock); - - recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; - recv_done = &recv_msg->recv_done; - - if (resp_msg_id == MSG_NOT_RESP) - msg_id = SYNC_MSG_ID(pf_to_mgmt); - else - msg_id = resp_msg_id; - - init_completion(recv_done); - - err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, - MSG_ACK, direction, resp_msg_id); - if (err) { - dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n"); - goto unlock_sync_msg; - } - - if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { - dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); - err = -ETIMEDOUT; - goto unlock_sync_msg; - } - - smp_rmb(); /* verify reading after completion */ - - if (recv_msg->msg_id != msg_id) { - dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id); - err = -EFAULT; - goto unlock_sync_msg; - } - - if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) { - memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); - *out_size = recv_msg->msg_len; - } - -unlock_sync_msg: - up(&pf_to_mgmt->sync_msg_lock); - return err; -} - -/** - * msg_to_mgmt_async - send message to mgmt without response - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @buf_in: the msg data - * @in_size: the msg data length - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * - * Return 0 - Success, negative - Failure - **/ -static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - u8 *buf_in, u16 in_size, - enum mgmt_direction_type direction, - u16 resp_msg_id) -{ - int err; - - /* Lock the sync_msg_buf */ - down(&pf_to_mgmt->sync_msg_lock); - - err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, - MSG_NO_ACK, direction, resp_msg_id); - - up(&pf_to_mgmt->sync_msg_lock); - return err; -} - -/** - * hinic_msg_to_mgmt - send message to mgmt - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @buf_in: the msg data - * @in_size: the msg data length - * @buf_out: response - * @out_size: returned response length - * @sync: sync msg or async msg - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size, - enum hinic_mgmt_msg_type sync) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - - if (sync != HINIC_MGMT_MSG_SYNC) { - dev_err(&pdev->dev, "Invalid MGMT msg type\n"); - return -EINVAL; - } - - if (!MSG_SZ_IS_VALID(in_size)) { - dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n"); - return -EINVAL; - } - - return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, - buf_out, out_size, MGMT_DIRECT_SEND, - MSG_NOT_RESP); -} - -/** - * mgmt_recv_msg_handler - handler for message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - u8 *buf_out = recv_msg->buf_out; - struct hinic_mgmt_cb *mgmt_cb; - unsigned long cb_state; - u16 out_size = 0; - - if (recv_msg->mod >= HINIC_MOD_MAX) { - dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", - recv_msg->mod); - return; - } - - mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; - - cb_state = cmpxchg(&mgmt_cb->state, - HINIC_MGMT_CB_ENABLED, - HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); - - if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) - mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, - recv_msg->msg, recv_msg->msg_len, - buf_out, &out_size); - else - dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n", - recv_msg->mod); - - mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; - - if (!recv_msg->async_mgmt_to_pf) - /* MGMT sent sync msg, send the response */ - msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, - buf_out, out_size, MGMT_RESP, - recv_msg->msg_id); -} - -/** - * mgmt_resp_msg_handler - handler for a response message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) -{ - wmb(); /* verify writing all, before reading */ - - complete(&recv_msg->recv_done); -} - -/** - * recv_mgmt_msg_handler - handler for a message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @header: the header of the message - * @recv_msg: received message details - **/ -static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - u64 *header, struct hinic_recv_msg *recv_msg) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - int seq_id, seg_len; - u8 *msg_body; - - seq_id = HINIC_MSG_HEADER_GET(*header, SEQID); - seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN); - - if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) { - dev_err(&pdev->dev, "recv big mgmt msg\n"); - return; - } - - msg_body = (u8 *)header + sizeof(*header); - memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len); - - if (!HINIC_MSG_HEADER_GET(*header, LAST)) - return; - - recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD); - recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE); - recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header, - ASYNC_MGMT_TO_PF); - recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN); - recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID); - - if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP) - mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); - else - mgmt_recv_msg_handler(pf_to_mgmt, recv_msg); -} - -/** - * mgmt_msg_aeqe_handler - handler for a mgmt message event - * @handle: PF to MGMT channel - * @data: the header of the message - * @size: unused - **/ -static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size) -{ - struct hinic_pf_to_mgmt *pf_to_mgmt = handle; - struct hinic_recv_msg *recv_msg; - u64 *header = (u64 *)data; - - recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) == - MGMT_DIRECT_SEND ? - &pf_to_mgmt->recv_msg_from_mgmt : - &pf_to_mgmt->recv_resp_msg_from_mgmt; - - recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); -} - -/** - * alloc_recv_msg - allocate receive message memory - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: pointer that will hold the allocated data - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - - recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, - GFP_KERNEL); - if (!recv_msg->msg) - return -ENOMEM; - - recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, - GFP_KERNEL); - if (!recv_msg->buf_out) - return -ENOMEM; - - return 0; -} - -/** - * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - err = alloc_recv_msg(pf_to_mgmt, - &pf_to_mgmt->recv_msg_from_mgmt); - if (err) { - dev_err(&pdev->dev, "Failed to allocate recv msg\n"); - return err; - } - - err = alloc_recv_msg(pf_to_mgmt, - &pf_to_mgmt->recv_resp_msg_from_mgmt); - if (err) { - dev_err(&pdev->dev, "Failed to allocate resp recv msg\n"); - return err; - } - - pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev, - MAX_PF_MGMT_BUF_SIZE, - GFP_KERNEL); - if (!pf_to_mgmt->sync_msg_buf) - return -ENOMEM; - - return 0; -} - -/** - * hinic_pf_to_mgmt_init - initialize PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * @hwif: HW interface the PF to MGMT will use for accessing HW - * - * Return 0 - Success, negative - Failure - **/ -int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_hwif *hwif) -{ - struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - struct pci_dev *pdev = hwif->pdev; - int err; - - pf_to_mgmt->hwif = hwif; - - sema_init(&pf_to_mgmt->sync_msg_lock, 1); - pf_to_mgmt->sync_msg_id = 0; - - err = alloc_msg_buf(pf_to_mgmt); - if (err) { - dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); - return err; - } - - err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); - if (err) { - dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); - return err; - } - - hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU, - pf_to_mgmt, - mgmt_msg_aeqe_handler); - return 0; -} - -/** - * hinic_pf_to_mgmt_free - free PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - **/ -void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) -{ - struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - - hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); - hinic_api_cmd_free(pf_to_mgmt->cmd_chain); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index 182fba17b643..05bf0ff644ad 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -2,143 +2,568 @@ /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * */ -#ifndef HINIC_HW_MGMT_H -#define HINIC_HW_MGMT_H +#ifndef HINIC_HW_MGMT_H_ +#define HINIC_HW_MGMT_H_ -#include -#include -#include -#include +/* show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum hinic_service_type { + SERVICE_T_NIC = 0, + SERVICE_T_OVS, + SERVICE_T_ROCE, + SERVICE_T_TOE, + SERVICE_T_IWARP, + SERVICE_T_FC, + SERVICE_T_FCOE, + SERVICE_T_MIGRATE, + SERVICE_T_PT, + SERVICE_T_HWPT, + SERVICE_T_MAX, -#include "hinic_hw_if.h" -#include "hinic_hw_api_cmd.h" - -#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 -#define HINIC_MSG_HEADER_MODULE_SHIFT 11 -#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 -#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 -#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 -#define HINIC_MSG_HEADER_SEQID_SHIFT 24 -#define HINIC_MSG_HEADER_LAST_SHIFT 30 -#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 -#define HINIC_MSG_HEADER_CMD_SHIFT 32 -#define HINIC_MSG_HEADER_ZEROS_SHIFT 40 -#define HINIC_MSG_HEADER_PCI_INTF_SHIFT 48 -#define HINIC_MSG_HEADER_PF_IDX_SHIFT 50 -#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 - -#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF -#define HINIC_MSG_HEADER_MODULE_MASK 0x1F -#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F -#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 -#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 -#define HINIC_MSG_HEADER_SEQID_MASK 0x3F -#define HINIC_MSG_HEADER_LAST_MASK 0x1 -#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 -#define HINIC_MSG_HEADER_CMD_MASK 0xFF -#define HINIC_MSG_HEADER_ZEROS_MASK 0xFF -#define HINIC_MSG_HEADER_PCI_INTF_MASK 0x3 -#define HINIC_MSG_HEADER_PF_IDX_MASK 0xF -#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF - -#define HINIC_MSG_HEADER_SET(val, member) \ - ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ - HINIC_MSG_HEADER_##member##_SHIFT) - -#define HINIC_MSG_HEADER_GET(val, member) \ - (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ - HINIC_MSG_HEADER_##member##_MASK) - -enum hinic_mgmt_msg_type { - HINIC_MGMT_MSG_SYNC = 1, + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_CQM = (1 << 16), }; -enum hinic_cfg_cmd { - HINIC_CFG_NIC_CAP = 0, +/* NIC service capability + * 1, The chip supports NIC RQ is 1K + * 2, PF/VF RQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * enable the RSS: + * disable VMDq: each PF at most 64 RQ, VF at most 32 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * + * 3, The chip supports NIC SQ is 1K + * 4, PF/VF SQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + * enable the RSS: + * disable VMDq: each PF at most 64 SQ, VF at most 32 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + */ +struct nic_service_cap { + /* PF resources */ + u16 max_sqs; + u16 max_rqs; + + /* VF resources, vf obtain through the MailBox mechanism from + * according PF + */ + u16 vf_max_sqs; + u16 vf_max_rqs; + bool lro_en; /* LRO feature enable bit */ + u8 lro_sz; /* LRO context space: n*16B */ + u8 tso_sz; /* TSO context space: n*16B */ + + u16 max_queue_allowed; + u16 dynamic_qp; /* support dynamic queue */ }; -enum hinic_comm_cmd { - HINIC_COMM_CMD_IO_STATUS_GET = 0x3, +struct dev_roce_svc_own_cap { + u32 max_qps; + u32 max_cqs; + u32 max_srqs; + u32 max_mpts; - HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10, - HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11, + u32 vf_max_qps; + u32 vf_max_cqs; + u32 vf_max_srqs; + u32 vf_max_mpts; - HINIC_COMM_CMD_HWCTXT_SET = 0x12, - HINIC_COMM_CMD_HWCTXT_GET = 0x13, + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_sz; - HINIC_COMM_CMD_SQ_HI_CI_SET = 0x14, + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; - HINIC_COMM_CMD_RES_STATE_SET = 0x24, + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_sz; - HINIC_COMM_CMD_IO_RES_CLEAR = 0x29, + u32 qpc_entry_sz; + u32 max_wqes; + u32 max_rq_sg; + u32 max_sq_inline_data_sz; + u32 max_rq_desc_sz; - HINIC_COMM_CMD_MAX = 0x32, + u32 rdmarc_entry_sz; + u32 max_qp_init_rdma; + u32 max_qp_dest_rdma; + + u32 max_srq_wqes; + u32 reserved_srqs; + u32 max_srq_sge; + u32 srqc_entry_sz; + + u32 max_msg_sz; /* Message size 2GB */ + + u8 num_cos; }; -enum hinic_mgmt_cb_state { - HINIC_MGMT_CB_ENABLED = BIT(0), - HINIC_MGMT_CB_RUNNING = BIT(1), +struct dev_iwarp_svc_own_cap { + u32 max_qps; + u32 max_cqs; + u32 max_mpts; + + u32 vf_max_qps; + u32 vf_max_cqs; + u32 vf_max_mpts; + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_sz; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_sz; + + u32 max_rq_sg; + u32 max_sq_inline_data_sz; + u32 max_rq_desc_sz; + + u32 max_irq_depth; + u32 irq_entry_size; /* 64B */ + u32 max_orq_depth; + u32 orq_entry_size; /* 32B */ + u32 max_rtoq_depth; + u32 rtoq_entry_size; /* 32B */ + u32 max_ackq_depth; + u32 ackq_entry_size; /* 16B */ + + u32 max_msg_sz; /* Message size 1GB */ + + u32 max_wqes; /* 8K */ + u32 qpc_entry_sz; /* 1K */ + + /* true:CQM uses static allocation; + * false:CQM uses dynamic allocation. + * Currently, only consider the QPC + */ + bool alloc_flag; + + u8 num_cos; }; -struct hinic_recv_msg { - u8 *msg; - u8 *buf_out; - - struct completion recv_done; - - u16 cmd; - enum hinic_mod_type mod; - int async_mgmt_to_pf; - - u16 msg_len; - u16 msg_id; +/* RDMA service capability structure */ +struct dev_rdma_svc_cap { + /* ROCE service unique parameter structure */ + struct dev_roce_svc_own_cap roce_own_cap; + /* IWARP service unique parameter structure */ + struct dev_iwarp_svc_own_cap iwarp_own_cap; }; -struct hinic_mgmt_cb { - void (*cb)(void *handle, u8 cmd, - void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); +/* Defines the RDMA service capability flag */ +enum { + RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), + RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), + RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), + RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), + RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), + RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), - void *handle; - unsigned long state; + RDMA_DEV_CAP_FLAG_XRC = (1 << 6), + RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), + RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), + RDMA_DEV_CAP_FLAG_APM = (1 << 9), }; -struct hinic_pf_to_mgmt { - struct hinic_hwif *hwif; +/* RDMA services */ +struct rdma_service_cap { + struct dev_rdma_svc_cap dev_rdma_cap; - struct semaphore sync_msg_lock; - u16 sync_msg_id; - u8 *sync_msg_buf; + u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ - struct hinic_recv_msg recv_resp_msg_from_mgmt; - struct hinic_recv_msg recv_msg_from_mgmt; + u32 reserved_qps; /* Number of reserved QP */ + u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ + u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum + * size if 960B(944B aligned to the 960B), + * 960B=>wqebb alignment=>1024B + */ + u32 wqebb_size; /* Currently, the supports 64B and 128B, + * defined as 64Bytes + */ - struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + u32 max_cqes; /* Size of the depth of the CQ (64K-1) */ + u32 reserved_cqs; /* Number of reserved CQ */ + u32 cqc_entry_sz; /* Size of the CQC (64B/128B) */ + u32 cqe_size; /* Size of CQE (32B) */ - struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; + u32 reserved_mrws; /* Number of reserved MR/MR Window */ + u32 mpt_entry_sz; /* MPT table size (64B) */ + u32 max_fmr_maps; /* max MAP of FMR, + * (1 << (32-ilog2(num_mpt)))-1; + */ + + u32 num_mtts; /* Number of MTT table (4M), + * is actually MTT seg number + */ + /* MTT table number of Each MTT seg(3) */ + u32 log_mtt_seg; + u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits) */ + u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ + + /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ + u32 local_ca_ack_delay; + u32 num_ports; /* Physical port number */ + + u32 db_page_size; /* Size of the DB (4KB) */ + u32 direct_wqe_size; /* Size of the DWQE (256B) */ + + u32 num_pds; /* Maximum number of PD (128K) */ + u32 reserved_pds; /* Number of reserved PD*/ + u32 max_xrcds; /* Maximum number of xrcd (64K) */ + u32 reserved_xrcds; /* Number of reserved xrcd */ + + u32 max_gid_per_port; /* gid number (16) of each port */ + u32 gid_entry_sz; /* RoCE v2 GID table is 32B, + * compatible RoCE v1 expansion + */ + + u32 reserved_lkey; /* local_dma_lkey */ + u32 num_comp_vectors; /* Number of complete vector (32) */ + u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M,4M page_size */ + + u32 flags; /* RDMA some identity */ + u32 max_frpl_len; /* Maximum number of pages frmr registration */ + u32 max_pkeys; /* Number of supported pkey group */ }; -void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, - void *handle, - void (*callback)(void *handle, - u8 cmd, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)); +/* PF/VF FCoE service resource structure defined */ +struct dev_fcoe_svc_cap { + /* PF resources */ + u32 max_qps; + u32 max_cqs; + u32 max_srqs; -void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod); + /* Child Context(Task IO) + * For FCoE/IOE services, at most 8K + */ + u32 max_cctxs; + u32 cctxs_id_start; -int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size, - enum hinic_mgmt_msg_type sync); + u8 vp_id_start; + u8 vp_id_end; +}; -int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_hwif *hwif); +/* FCoE services */ +struct fcoe_service_cap { + struct dev_fcoe_svc_cap dev_fcoe_cap; -void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt); + /* SQ */ + u32 qpc_basic_size; + u32 childc_basic_size; + u32 sqe_size; + /* SCQ */ + u32 scqc_basic_size; + u32 scqe_size; + + /* SRQ */ + u32 srqc_size; + u32 srqe_size; +}; + +/* PF/VF ToE service resource structure */ +struct dev_toe_svc_cap { + /* PF resources*/ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cqs; + u32 max_srqs; + u32 srq_id_start; + + u8 num_cos; +}; + +/* ToE services */ +struct toe_service_cap { + struct dev_toe_svc_cap dev_toe_cap; + + bool alloc_flag; + u32 pctx_sz;/* 1KB */ + u32 scqc_sz;/* 64B */ +}; + +/* PF FC service resource structure defined */ +struct dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048 */ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048 */ + u32 child_qpc_id_start; + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ*/ + u32 srq_num; /* Number of SRQ is 2 */ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FC services*/ +struct fc_service_cap { + struct dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode) */ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B */ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B) */ + u32 srqe_size; /* 32B */ +}; + +/* PF OVS service resource structure defined */ +struct dev_ovs_svc_cap { + /* PF resources */ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cqs; + u8 dynamic_qp_en; + + /* VF resources */ + u32 vf_max_pctxs; /* Parent Context: max specifications 1M */ + u32 vf_max_cqs; +}; + +/* OVS services */ +struct ovs_service_cap { + struct dev_ovs_svc_cap dev_ovs_cap; + + bool alloc_flag; + u32 pctx_sz; /* 512B */ + u32 scqc_sz; /* 64B */ +}; + +/* PF ACL service resource structure */ +struct dev_acl_svc_cap { + /* PF resources */ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cqs; + + /* VF resources */ + u32 vf_max_pctxs; /* Parent Context: max specifications 1M */ + u32 vf_max_cqs; +}; + +/* ACL services */ +struct acl_service_cap { + struct dev_acl_svc_cap dev_acl_cap; + + bool alloc_flag; + u32 pctx_sz; /* 512B */ + u32 scqc_sz; /* 64B */ +}; + +enum hinic_chip_mode { + CHIP_MODE_NORMAL, + CHIP_MODE_BMGW, + CHIP_MODE_VMGW, +}; + +bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap); +bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap); +bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap); +/* PPF support,PF not support */ +bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap); +bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap); +bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap); +bool hinic_support_fic(void *hwdev); +bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap); +bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap); +bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap); +bool hinic_support_ft(void *hwdev); +bool hinic_func_for_mgmt(void *hwdev); +bool hinic_support_dynamic_q(void *hwdev); + +int hinic_set_toe_enable(void *hwdev, bool enable); +bool hinic_get_toe_enable(void *hwdev); +int hinic_set_fcoe_enable(void *hwdev, bool enable); +bool hinic_get_fcoe_enable(void *hwdev); +bool hinic_get_stateful_enable(void *hwdev); + +/* Service interface for obtaining service_cap public fields */ +/* Obtain service_cap.host_oq_id_mask_val */ +u8 hinic_host_oq_id_mask(void *hwdev); +u8 hinic_host_id(void *hwdev);/* Obtain service_cap.host_id */ +/* Obtain service_cap.host_total_function */ +u16 hinic_host_total_func(void *hwdev); +/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */ +u16 hinic_func_max_nic_qnum(void *hwdev); +/* Obtain service_cap.dev_cap.max_sqs */ +u16 hinic_func_max_qnum(void *hwdev); +u8 hinic_ep_id(void *hwdev);/* Obtain service_cap.ep_id */ +u8 hinic_er_id(void *hwdev);/* Obtain service_cap.er_id */ +u8 hinic_physical_port_id(void *hwdev);/* Obtain service_cap.port_id */ +u8 hinic_func_max_vf(void *hwdev);/* Obtain service_cap.max_vf */ +u32 hinic_func_pf_num(void *hwdev);/* Obtain service_cap.pf_num */ +u8 hinic_max_num_cos(void *hwdev); +u8 hinic_cos_valid_bitmap(void *hwdev); +u8 hinic_net_port_mode(void *hwdev);/* Obtain service_cap.net_port_mode */ + +/* The following information is obtained from the bar space + * which is recorded by SDK layer. + * Here provide parameter query interface for service + */ +/* func_attr.glb_func_idx, global function index */ +u16 hinic_global_func_id(void *hwdev); +/* func_attr.intr_num, MSI-X table entry in function */ +u16 hinic_intr_num(void *hwdev); +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + INTR_TYPE_NONE, + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; + +enum intr_type hinic_intr_type(void *hwdev); + +u8 hinic_pf_id_of_vf(void *hwdev); /* func_attr.p2p_idx, belongs to which pf */ +u8 hinic_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */ +u8 hinic_vf_in_pf(void *hwdev); /* func_attr.vf_in_pf, the vf offser in pf */ +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, + TYPE_UNKNOWN, +}; + +/* func_attr.func_type, 0-PF 1-VF 2-PPF */ +enum func_type hinic_func_type(void *hwdev); + +u8 hinic_ceq_num(void *hwdev); /* func_attr.ceq_num, ceq num in one function */ +/* func_attr.dma_attr_entry_num, dma attribute entry num */ +u8 hinic_dma_attr_entry_num(void *hwdev); +/* The PF func_attr.glb_pf_vf_offset, + * PF use only + */ +u16 hinic_glb_pf_vf_offset(void *hwdev); +/* func_attr.mpf_idx, mpf global function index, + * This value is valid only when it is PF + */ +u8 hinic_mpf_idx(void *hwdev); +u8 hinic_ppf_idx(void *hwdev); + +enum hinic_msix_state { + HINIC_MSIX_ENABLE, + HINIC_MSIX_DISABLE, +}; + +void hinic_set_msix_state(void *hwdev, u16 msix_idx, + enum hinic_msix_state flag); + +/* Define the version information structure */ +struct dev_version_info { + u8 up_ver; /* uP version, directly read from uP + * is not configured to file + */ + u8 ucode_ver; /* The microcode version, + * read through the CMDq from microcode + */ + u8 cfg_file_ver;/* uP configuration file version */ + u8 sdk_ver; /* SDK driver version */ + u8 hw_ver; /* Hardware version */ +}; + +/* Obtain service_cap.dev_version_info */ +int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver); + +int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector); + +/* Defines the IRQ information structure */ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 req_num, + struct irq_info *irq_info_array, u16 *resp_num); +void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id); +int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int req_num, + int *ceq_id_array, int *resp_num); +void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id); +int hinic_sync_time(void *hwdev, u64 time); + +struct hinic_micro_log_info { + int (*init)(void *hwdev); + void (*deinit)(void *hwdev); +}; + +int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info); +void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info); + +void hinic_disable_mgmt_msg_report(void *hwdev); +void hinic_set_func_deinit_flag(void *hwdev); +void hinic_flush_mgmt_workq(void *hwdev); + + +enum func_nic_state { + HINIC_FUNC_NIC_DEL, + HINIC_FUNC_NIC_ADD, +}; + +struct hinic_func_nic_state { + u8 state; + u8 rsvd0; + u16 func_idx; + + u8 rsvd1[16]; +}; + +int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state); +int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en); +bool hinic_get_master_host_mbox_enable(void *hwdev); +bool hinic_get_slave_host_enable(void *hwdev, u8 host_id); +int hinic_func_own_get(void *hwdev); +void hinic_func_own_free(void *hwdev); +int hinic_global_func_id_get(void *hwdev, u16 *func_id); +u16 hinic_pf_id_of_vf_hw(void *hwdev); +u16 hinic_global_func_id_hw(void *hwdev); +bool hinic_func_for_pt(void *hwdev); +bool hinic_func_for_hwpt(void *hwdev); +u32 hinic_get_db_size(void *cfg_reg_base, enum hinic_chip_mode *chip_mode); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c deleted file mode 100644 index be364b7a7019..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ /dev/null @@ -1,969 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp_ctxt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" - -#define SQ_DB_OFF SZ_2K - -/* The number of cache line to prefetch Until threshold state */ -#define WQ_PREFETCH_MAX 2 -/* The number of cache line to prefetch After threshold state */ -#define WQ_PREFETCH_MIN 1 -/* Threshold state */ -#define WQ_PREFETCH_THRESHOLD 256 - -/* sizes of the SQ/RQ ctxt */ -#define Q_CTXT_SIZE 48 -#define CTXT_RSVD 240 - -#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ - (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE) - -#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ - (((max_rqs) + (max_sqs)) * CTXT_RSVD + \ - (max_sqs + (q_id)) * Q_CTXT_SIZE) - -#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) -#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3) -#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3) - -#define SQ_DB_PI_HI_SHIFT 8 -#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT) - -#define SQ_DB_PI_LOW_MASK 0xFF -#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK) - -#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) - -#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) -#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) - -enum sq_wqe_type { - SQ_NORMAL_WQE = 0, -}; - -enum rq_completion_fmt { - RQ_COMPLETE_SGE = 1 -}; - -void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, - enum hinic_qp_ctxt_type ctxt_type, - u16 num_queues, u16 max_queues) -{ - u16 max_sqs = max_queues; - u16 max_rqs = max_queues; - - qp_ctxt_hdr->num_queues = num_queues; - qp_ctxt_hdr->queue_type = ctxt_type; - - if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) - qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0); - else - qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0); - - qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); - - hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); -} - -void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, - struct hinic_sq *sq, u16 global_qid) -{ - u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; - u64 wq_page_addr, wq_page_pfn, wq_block_pfn; - u16 pi_start, ci_start; - struct hinic_wq *wq; - - wq = sq->wq; - ci_start = atomic_read(&wq->cons_idx); - pi_start = atomic_read(&wq->prod_idx); - - /* Read the first page paddr from the WQ page paddr ptrs */ - wq_page_addr = be64_to_cpu(*wq->block_vaddr); - - wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid, - GLOBAL_SQ_ID) | - HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN); - - sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) | - HINIC_SQ_CTXT_CI_SET(1, WRAPPED); - - sq_ctxt->wq_hi_pfn_pi = - HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | - HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI); - - sq_ctxt->wq_lo_pfn = wq_page_pfn_lo; - - sq_ctxt->pref_cache = - HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - sq_ctxt->pref_wrapped = 1; - - sq_ctxt->pref_wq_hi_pfn_ci = - HINIC_SQ_CTXT_PREF_SET(ci_start, CI) | - HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN); - - sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; - - sq_ctxt->wq_block_hi_pfn = - HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); - - sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; - - hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); -} - -void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, - struct hinic_rq *rq, u16 global_qid) -{ - u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; - u64 wq_page_addr, wq_page_pfn, wq_block_pfn; - u16 pi_start, ci_start; - struct hinic_wq *wq; - - wq = rq->wq; - ci_start = atomic_read(&wq->cons_idx); - pi_start = atomic_read(&wq->prod_idx); - - /* Read the first page paddr from the WQ page paddr ptrs */ - wq_page_addr = be64_to_cpu(*wq->block_vaddr); - - wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) | - HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED); - - rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) | - HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); - - rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, - HI_PFN) | - HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI); - - rq_ctxt->wq_lo_pfn = wq_page_pfn_lo; - - rq_ctxt->pref_cache = - HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - rq_ctxt->pref_wrapped = 1; - - rq_ctxt->pref_wq_hi_pfn_ci = - HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) | - HINIC_RQ_CTXT_PREF_SET(ci_start, CI); - - rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; - - rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); - rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); - - rq_ctxt->wq_block_hi_pfn = - HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); - - rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; - - hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); -} - -/** - * alloc_sq_skb_arr - allocate sq array for saved skb - * @sq: HW Send Queue - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_sq_skb_arr(struct hinic_sq *sq) -{ - struct hinic_wq *wq = sq->wq; - size_t skb_arr_size; - - skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); - sq->saved_skb = vzalloc(skb_arr_size); - if (!sq->saved_skb) - return -ENOMEM; - - return 0; -} - -/** - * free_sq_skb_arr - free sq array for saved skb - * @sq: HW Send Queue - **/ -static void free_sq_skb_arr(struct hinic_sq *sq) -{ - vfree(sq->saved_skb); -} - -/** - * alloc_rq_skb_arr - allocate rq array for saved skb - * @rq: HW Receive Queue - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_rq_skb_arr(struct hinic_rq *rq) -{ - struct hinic_wq *wq = rq->wq; - size_t skb_arr_size; - - skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); - rq->saved_skb = vzalloc(skb_arr_size); - if (!rq->saved_skb) - return -ENOMEM; - - return 0; -} - -/** - * free_rq_skb_arr - free rq array for saved skb - * @rq: HW Receive Queue - **/ -static void free_rq_skb_arr(struct hinic_rq *rq) -{ - vfree(rq->saved_skb); -} - -/** - * hinic_init_sq - Initialize HW Send Queue - * @sq: HW Send Queue - * @hwif: HW Interface for accessing HW - * @wq: Work Queue for the data of the SQ - * @entry: msix entry for sq - * @ci_addr: address for reading the current HW consumer index - * @ci_dma_addr: dma address for reading the current HW consumer index - * @db_base: doorbell base address - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry, - void *ci_addr, dma_addr_t ci_dma_addr, - void __iomem *db_base) -{ - sq->hwif = hwif; - - sq->wq = wq; - - sq->irq = entry->vector; - sq->msix_entry = entry->entry; - - sq->hw_ci_addr = ci_addr; - sq->hw_ci_dma_addr = ci_dma_addr; - - sq->db_base = db_base + SQ_DB_OFF; - - return alloc_sq_skb_arr(sq); -} - -/** - * hinic_clean_sq - Clean HW Send Queue's Resources - * @sq: Send Queue - **/ -void hinic_clean_sq(struct hinic_sq *sq) -{ - free_sq_skb_arr(sq); -} - -/** - * alloc_rq_cqe - allocate rq completion queue elements - * @rq: HW Receive Queue - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_rq_cqe(struct hinic_rq *rq) -{ - struct hinic_hwif *hwif = rq->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t cqe_dma_size, cqe_size; - struct hinic_wq *wq = rq->wq; - int j, i; - - cqe_size = wq->q_depth * sizeof(*rq->cqe); - rq->cqe = vzalloc(cqe_size); - if (!rq->cqe) - return -ENOMEM; - - cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); - rq->cqe_dma = vzalloc(cqe_dma_size); - if (!rq->cqe_dma) - goto err_cqe_dma_arr_alloc; - - for (i = 0; i < wq->q_depth; i++) { - rq->cqe[i] = dma_alloc_coherent(&pdev->dev, - sizeof(*rq->cqe[i]), - &rq->cqe_dma[i], GFP_KERNEL); - if (!rq->cqe[i]) - goto err_cqe_alloc; - } - - return 0; - -err_cqe_alloc: - for (j = 0; j < i; j++) - dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], - rq->cqe_dma[j]); - - vfree(rq->cqe_dma); - -err_cqe_dma_arr_alloc: - vfree(rq->cqe); - return -ENOMEM; -} - -/** - * free_rq_cqe - free rq completion queue elements - * @rq: HW Receive Queue - **/ -static void free_rq_cqe(struct hinic_rq *rq) -{ - struct hinic_hwif *hwif = rq->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_wq *wq = rq->wq; - int i; - - for (i = 0; i < wq->q_depth; i++) - dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], - rq->cqe_dma[i]); - - vfree(rq->cqe_dma); - vfree(rq->cqe); -} - -/** - * hinic_init_rq - Initialize HW Receive Queue - * @rq: HW Receive Queue - * @hwif: HW Interface for accessing HW - * @wq: Work Queue for the data of the RQ - * @entry: msix entry for rq - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry) -{ - struct pci_dev *pdev = hwif->pdev; - size_t pi_size; - int err; - - rq->hwif = hwif; - - rq->wq = wq; - - rq->irq = entry->vector; - rq->msix_entry = entry->entry; - - rq->buf_sz = HINIC_RX_BUF_SZ; - - err = alloc_rq_skb_arr(rq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate rq priv data\n"); - return err; - } - - err = alloc_rq_cqe(rq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate rq cqe\n"); - goto err_alloc_rq_cqe; - } - - /* HW requirements: Must be at least 32 bit */ - pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); - rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, - &rq->pi_dma_addr, GFP_KERNEL); - if (!rq->pi_virt_addr) { - dev_err(&pdev->dev, "Failed to allocate PI address\n"); - err = -ENOMEM; - goto err_pi_virt; - } - - return 0; - -err_pi_virt: - free_rq_cqe(rq); - -err_alloc_rq_cqe: - free_rq_skb_arr(rq); - return err; -} - -/** - * hinic_clean_rq - Clean HW Receive Queue's Resources - * @rq: HW Receive Queue - **/ -void hinic_clean_rq(struct hinic_rq *rq) -{ - struct hinic_hwif *hwif = rq->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t pi_size; - - pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); - dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr, - rq->pi_dma_addr); - - free_rq_cqe(rq); - free_rq_skb_arr(rq); -} - -/** - * hinic_get_sq_free_wqebbs - return number of free wqebbs for use - * @sq: send queue - * - * Return number of free wqebbs - **/ -int hinic_get_sq_free_wqebbs(struct hinic_sq *sq) -{ - struct hinic_wq *wq = sq->wq; - - return atomic_read(&wq->delta) - 1; -} - -/** - * hinic_get_rq_free_wqebbs - return number of free wqebbs for use - * @rq: recv queue - * - * Return number of free wqebbs - **/ -int hinic_get_rq_free_wqebbs(struct hinic_rq *rq) -{ - struct hinic_wq *wq = rq->wq; - - return atomic_read(&wq->delta) - 1; -} - -static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx, - int nr_descs) -{ - u32 ctrl_size, task_size, bufdesc_size; - - ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); - task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); - bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc); - bufdesc_size = SIZE_8BYTES(bufdesc_size); - - ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | - HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) | - HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - HINIC_SQ_CTRL_SET(ctrl_size, LEN); - - ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT, - QUEUE_INFO_MSS) | - HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC); -} - -static void sq_prepare_task(struct hinic_sq_task *task) -{ - task->pkt_info0 = 0; - task->pkt_info1 = 0; - task->pkt_info2 = 0; - - task->ufo_v6_identify = 0; - - task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE); - - task->zero_pad = 0; -} - -void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len) -{ - task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN); -} - -void hinic_task_set_outter_l3(struct hinic_sq_task *task, - enum hinic_l3_offload_type l3_type, - u32 network_len) -{ - task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | - HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN); -} - -void hinic_task_set_inner_l3(struct hinic_sq_task *task, - enum hinic_l3_offload_type l3_type, - u32 network_len) -{ - task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); - task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); -} - -void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, - enum hinic_l4_tunnel_type l4_type, - u32 tunnel_len) -{ - task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | - HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN); -} - -void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, - enum hinic_l4_offload_type l4_offload, - u32 l4_len, u32 offset) -{ - u32 tcp_udp_cs = 0, sctp = 0; - u32 mss = HINIC_MSS_DEFAULT; - - if (l4_offload == TCP_OFFLOAD_ENABLE || - l4_offload == UDP_OFFLOAD_ENABLE) - tcp_udp_cs = 1; - else if (l4_offload == SCTP_OFFLOAD_ENABLE) - sctp = 1; - - task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); - task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); - - *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | - HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) | - HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP); - - *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); - *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); -} - -void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, - enum hinic_l4_offload_type l4_offload, - u32 l4_len, u32 offset, u32 ip_ident, u32 mss) -{ - u32 tso = 0, ufo = 0; - - if (l4_offload == TCP_OFFLOAD_ENABLE) - tso = 1; - else if (l4_offload == UDP_OFFLOAD_ENABLE) - ufo = 1; - - task->ufo_v6_identify = ip_ident; - - task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); - task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG); - task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); - - *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | - HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) | - HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) | - HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS); - - /* set MSS value */ - *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); - *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); -} - -/** - * hinic_sq_prepare_wqe - prepare wqe before insert to the queue - * @sq: send queue - * @prod_idx: pi value - * @sq_wqe: wqe to prepare - * @sges: sges for use by the wqe for send for buf addresses - * @nr_sges: number of sges - **/ -void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, - int nr_sges) -{ - int i; - - sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges); - - sq_prepare_task(&sq_wqe->task); - - for (i = 0; i < nr_sges; i++) - sq_wqe->buf_descs[i].sge = sges[i]; -} - -/** - * sq_prepare_db - prepare doorbell to write - * @sq: send queue - * @prod_idx: pi value for the doorbell - * @cos: cos of the doorbell - * - * Return db value - **/ -static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos) -{ - struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); - u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx)); - - /* Data should be written to HW in Big Endian Format */ - return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) | - HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) | - HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) | - HINIC_SQ_DB_INFO_SET(cos, COS) | - HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); -} - -/** - * hinic_sq_write_db- write doorbell - * @sq: send queue - * @prod_idx: pi value for the doorbell - * @wqe_size: wqe size - * @cos: cos of the wqe - **/ -void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, - unsigned int cos) -{ - struct hinic_wq *wq = sq->wq; - - /* increment prod_idx to the next */ - prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - - wmb(); /* Write all before the doorbell */ - - writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx)); -} - -/** - * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi - * @sq: sq to get wqe from - * @wqe_size: wqe size - * @prod_idx: returned pi - * - * Return wqe pointer - **/ -struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, - unsigned int wqe_size, u16 *prod_idx) -{ - struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, - prod_idx); - - if (IS_ERR(hw_wqe)) - return NULL; - - return &hw_wqe->sq_wqe; -} - -/** - * hinic_sq_return_wqe - return the wqe to the sq - * @sq: send queue - * @wqe_size: the size of the wqe - **/ -void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) -{ - hinic_return_wqe(sq->wq, wqe_size); -} - -/** - * hinic_sq_write_wqe - write the wqe to the sq - * @sq: send queue - * @prod_idx: pi of the wqe - * @sq_wqe: the wqe to write - * @skb: skb to save - * @wqe_size: the size of the wqe - **/ -void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *sq_wqe, - struct sk_buff *skb, unsigned int wqe_size) -{ - struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe; - - sq->saved_skb[prod_idx] = skb; - - /* The data in the HW should be in Big Endian Format */ - hinic_cpu_to_be32(sq_wqe, wqe_size); - - hinic_write_wqe(sq->wq, hw_wqe, wqe_size); -} - -/** - * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the - * wqe only have one wqebb - * @sq: send queue - * @skb: return skb that was saved - * @wqe_size: the wqe size ptr - * @cons_idx: consumer index of the wqe - * - * Return wqe in ci position - **/ -struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int *wqe_size, u16 *cons_idx) -{ - struct hinic_hw_wqe *hw_wqe; - struct hinic_sq_wqe *sq_wqe; - struct hinic_sq_ctrl *ctrl; - unsigned int buf_sect_len; - u32 ctrl_info; - - /* read the ctrl section for getting wqe size */ - hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); - if (IS_ERR(hw_wqe)) - return NULL; - - *skb = sq->saved_skb[*cons_idx]; - - sq_wqe = &hw_wqe->sq_wqe; - ctrl = &sq_wqe->ctrl; - ctrl_info = be32_to_cpu(ctrl->ctrl_info); - buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN); - - *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task); - *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len); - *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size); - - return &hw_wqe->sq_wqe; -} - -/** - * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci - * @sq: send queue - * @skb: return skb that was saved - * @wqe_size: the size of the wqe - * @cons_idx: consumer index of the wqe - * - * Return wqe in ci position - **/ -struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int wqe_size, u16 *cons_idx) -{ - struct hinic_hw_wqe *hw_wqe; - - hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx); - *skb = sq->saved_skb[*cons_idx]; - - return &hw_wqe->sq_wqe; -} - -/** - * hinic_sq_put_wqe - release the ci for new wqes - * @sq: send queue - * @wqe_size: the size of the wqe - **/ -void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size) -{ - hinic_put_wqe(sq->wq, wqe_size); -} - -/** - * hinic_sq_get_sges - get sges from the wqe - * @sq_wqe: wqe to get the sges from its buffer addresses - * @sges: returned sges - * @nr_sges: number sges to return - **/ -void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, - int nr_sges) -{ - int i; - - for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) { - sges[i] = sq_wqe->buf_descs[i].sge; - hinic_be32_to_cpu(&sges[i], sizeof(sges[i])); - } -} - -/** - * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi - * @rq: rq to get wqe from - * @wqe_size: wqe size - * @prod_idx: returned pi - * - * Return wqe pointer - **/ -struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, - unsigned int wqe_size, u16 *prod_idx) -{ - struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size, - prod_idx); - - if (IS_ERR(hw_wqe)) - return NULL; - - return &hw_wqe->rq_wqe; -} - -/** - * hinic_rq_write_wqe - write the wqe to the rq - * @rq: recv queue - * @prod_idx: pi of the wqe - * @rq_wqe: the wqe to write - * @skb: skb to save - **/ -void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb) -{ - struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe; - - rq->saved_skb[prod_idx] = skb; - - /* The data in the HW should be in Big Endian Format */ - hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe)); - - hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe)); -} - -/** - * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci - * @rq: recv queue - * @wqe_size: the size of the wqe - * @skb: return saved skb - * @cons_idx: consumer index of the wqe - * - * Return wqe in ci position - **/ -struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, u16 *cons_idx) -{ - struct hinic_hw_wqe *hw_wqe; - struct hinic_rq_cqe *cqe; - int rx_done; - u32 status; - - hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx); - if (IS_ERR(hw_wqe)) - return NULL; - - cqe = rq->cqe[*cons_idx]; - - status = be32_to_cpu(cqe->status); - - rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE); - if (!rx_done) - return NULL; - - *skb = rq->saved_skb[*cons_idx]; - - return &hw_wqe->rq_wqe; -} - -/** - * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position - * @rq: recv queue - * @wqe_size: the size of the wqe - * @skb: return saved skb - * @cons_idx: consumer index in the wq - * - * Return wqe in incremented ci position - **/ -struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, - u16 *cons_idx) -{ - struct hinic_wq *wq = rq->wq; - struct hinic_hw_wqe *hw_wqe; - unsigned int num_wqebbs; - - wqe_size = ALIGN(wqe_size, wq->wqebb_size); - num_wqebbs = wqe_size / wq->wqebb_size; - - *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs); - - *skb = rq->saved_skb[*cons_idx]; - - hw_wqe = hinic_read_wqe_direct(wq, *cons_idx); - - return &hw_wqe->rq_wqe; -} - -/** - * hinic_put_wqe - release the ci for new wqes - * @rq: recv queue - * @cons_idx: consumer index of the wqe - * @wqe_size: the size of the wqe - **/ -void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, - unsigned int wqe_size) -{ - struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; - u32 status = be32_to_cpu(cqe->status); - - status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE); - - /* Rx WQE size is 1 WQEBB, no wq shadow*/ - cqe->status = cpu_to_be32(status); - - wmb(); /* clear done flag */ - - hinic_put_wqe(rq->wq, wqe_size); -} - -/** - * hinic_rq_get_sge - get sge from the wqe - * @rq: recv queue - * @rq_wqe: wqe to get the sge from its buf address - * @cons_idx: consumer index - * @sge: returned sge - **/ -void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe, - u16 cons_idx, struct hinic_sge *sge) -{ - struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; - u32 len = be32_to_cpu(cqe->len); - - sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr); - sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr); - sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN); -} - -/** - * hinic_rq_prepare_wqe - prepare wqe before insert to the queue - * @rq: recv queue - * @prod_idx: pi value - * @rq_wqe: the wqe - * @sge: sge for use by the wqe for recv buf address - **/ -void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge) -{ - struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; - struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; - struct hinic_rq_cqe *cqe = rq->cqe[prod_idx]; - struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; - dma_addr_t cqe_dma = rq->cqe_dma[prod_idx]; - - ctrl->ctrl_info = - HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | - HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), - COMPLETE_LEN) | - HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), - BUFDESC_SECT_LEN) | - HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); - - hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe)); - - buf_desc->hi_addr = sge->hi_addr; - buf_desc->lo_addr = sge->lo_addr; -} - -/** - * hinic_rq_update - update pi of the rq - * @rq: recv queue - * @prod_idx: pi value - **/ -void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx) -{ - *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1)); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h deleted file mode 100644 index f4a339b10b10..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ /dev/null @@ -1,223 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_QP_H -#define HINIC_HW_QP_H - -#include -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp_ctxt.h" - -#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0 -#define HINIC_SQ_DB_INFO_QID_SHIFT 8 -#define HINIC_SQ_DB_INFO_PATH_SHIFT 23 -#define HINIC_SQ_DB_INFO_COS_SHIFT 24 -#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27 - -#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF -#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF -#define HINIC_SQ_DB_INFO_PATH_MASK 0x1 -#define HINIC_SQ_DB_INFO_COS_MASK 0x7 -#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F - -#define HINIC_SQ_DB_INFO_SET(val, member) \ - (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \ - << HINIC_SQ_DB_INFO_##member##_SHIFT) - -#define HINIC_SQ_WQEBB_SIZE 64 -#define HINIC_RQ_WQEBB_SIZE 32 - -#define HINIC_SQ_PAGE_SIZE SZ_4K -#define HINIC_RQ_PAGE_SIZE SZ_4K - -#define HINIC_SQ_DEPTH SZ_4K -#define HINIC_RQ_DEPTH SZ_4K - -/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */ -#define HINIC_RX_BUF_SZ 2048 -#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX - -#define HINIC_MIN_TX_WQE_SIZE(wq) \ - ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size) - -#define HINIC_MIN_TX_NUM_WQEBBS(sq) \ - (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) - -enum hinic_rx_buf_sz_idx { - HINIC_RX_BUF_SZ_32_IDX, - HINIC_RX_BUF_SZ_64_IDX, - HINIC_RX_BUF_SZ_96_IDX, - HINIC_RX_BUF_SZ_128_IDX, - HINIC_RX_BUF_SZ_192_IDX, - HINIC_RX_BUF_SZ_256_IDX, - HINIC_RX_BUF_SZ_384_IDX, - HINIC_RX_BUF_SZ_512_IDX, - HINIC_RX_BUF_SZ_768_IDX, - HINIC_RX_BUF_SZ_1024_IDX, - HINIC_RX_BUF_SZ_1536_IDX, - HINIC_RX_BUF_SZ_2048_IDX, - HINIC_RX_BUF_SZ_3072_IDX, - HINIC_RX_BUF_SZ_4096_IDX, - HINIC_RX_BUF_SZ_8192_IDX, - HINIC_RX_BUF_SZ_16384_IDX, -}; - -struct hinic_sq { - struct hinic_hwif *hwif; - - struct hinic_wq *wq; - - u32 irq; - u16 msix_entry; - - void *hw_ci_addr; - dma_addr_t hw_ci_dma_addr; - - void __iomem *db_base; - - struct sk_buff **saved_skb; -}; - -struct hinic_rq { - struct hinic_hwif *hwif; - - struct hinic_wq *wq; - - u32 irq; - u16 msix_entry; - - size_t buf_sz; - - struct sk_buff **saved_skb; - - struct hinic_rq_cqe **cqe; - dma_addr_t *cqe_dma; - - u16 *pi_virt_addr; - dma_addr_t pi_dma_addr; -}; - -struct hinic_qp { - struct hinic_sq sq; - struct hinic_rq rq; - - u16 q_id; -}; - -void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, - enum hinic_qp_ctxt_type ctxt_type, - u16 num_queues, u16 max_queues); - -void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, - struct hinic_sq *sq, u16 global_qid); - -void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, - struct hinic_rq *rq, u16 global_qid); - -int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, - dma_addr_t ci_dma_addr, void __iomem *db_base); - -void hinic_clean_sq(struct hinic_sq *sq); - -int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry); - -void hinic_clean_rq(struct hinic_rq *rq); - -int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); - -int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); - -void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len); - -void hinic_task_set_outter_l3(struct hinic_sq_task *task, - enum hinic_l3_offload_type l3_type, - u32 network_len); - -void hinic_task_set_inner_l3(struct hinic_sq_task *task, - enum hinic_l3_offload_type l3_type, - u32 network_len); - -void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, - enum hinic_l4_tunnel_type l4_type, - u32 tunnel_len); - -void hinic_set_cs_inner_l4(struct hinic_sq_task *task, - u32 *queue_info, - enum hinic_l4_offload_type l4_offload, - u32 l4_len, u32 offset); - -void hinic_set_tso_inner_l4(struct hinic_sq_task *task, - u32 *queue_info, - enum hinic_l4_offload_type l4_offload, - u32 l4_len, - u32 offset, u32 ip_ident, u32 mss); - -void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *wqe, struct hinic_sge *sges, - int nr_sges); - -void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, - unsigned int cos); - -struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, - unsigned int wqe_size, u16 *prod_idx); - -void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size); - -void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *wqe, struct sk_buff *skb, - unsigned int wqe_size); - -struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int wqe_size, u16 *cons_idx); - -struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int *wqe_size, u16 *cons_idx); - -void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size); - -void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges, - int nr_sges); - -struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, - unsigned int wqe_size, u16 *prod_idx); - -void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *wqe, struct sk_buff *skb); - -struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, u16 *cons_idx); - -struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, - u16 *cons_idx); - -void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, - unsigned int wqe_size); - -void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe, - u16 cons_idx, struct hinic_sge *sge); - -void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *wqe, struct hinic_sge *sge); - -void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h deleted file mode 100644 index 00900a6640ad..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h +++ /dev/null @@ -1,210 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_QP_CTXT_H -#define HINIC_HW_QP_CTXT_H - -#include - -#include "hinic_hw_cmdq.h" - -#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13 -#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23 - -#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF -#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1 - -#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \ - << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT) - -#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11 -#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23 - -#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF -#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1 - -#define HINIC_SQ_CTXT_CI_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \ - << HINIC_SQ_CTXT_CI_##member##_SHIFT) - -#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20 - -#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF -#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF - -#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \ - << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF -#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF -#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F - -#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 -#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20 - -#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF -#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF - -#define HINIC_SQ_CTXT_PREF_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \ - << HINIC_SQ_CTXT_PREF_##member##_SHIFT) - -#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 - -#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF - -#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \ - << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0 -#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1 - -#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1 -#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1 - -#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \ - << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT) - -#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0 -#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22 - -#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF -#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF - -#define HINIC_RQ_CTXT_PI_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \ - HINIC_RQ_CTXT_PI_##member##_SHIFT) - -#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20 - -#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF -#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF - -#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \ - HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF -#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF -#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F - -#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 -#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20 - -#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF -#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF - -#define HINIC_RQ_CTXT_PREF_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \ - HINIC_RQ_CTXT_PREF_##member##_SHIFT) - -#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 - -#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF - -#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ - HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \ - + (num_sqs) * sizeof(struct hinic_sq_ctxt)) - -#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \ - + (num_rqs) * sizeof(struct hinic_rq_ctxt)) - -#define HINIC_WQ_PAGE_PFN_SHIFT 12 -#define HINIC_WQ_BLOCK_PFN_SHIFT 9 - -#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT) -#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \ - HINIC_WQ_BLOCK_PFN_SHIFT) - -#define HINIC_Q_CTXT_MAX \ - ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \ - / sizeof(struct hinic_sq_ctxt)) - -enum hinic_qp_ctxt_type { - HINIC_QP_CTXT_TYPE_SQ, - HINIC_QP_CTXT_TYPE_RQ -}; - -struct hinic_qp_ctxt_header { - u16 num_queues; - u16 queue_type; - u32 addr_offset; -}; - -struct hinic_sq_ctxt { - u32 ceq_attr; - - u32 ci_wrapped; - - u32 wq_hi_pfn_pi; - u32 wq_lo_pfn; - - u32 pref_cache; - u32 pref_wrapped; - u32 pref_wq_hi_pfn_ci; - u32 pref_wq_lo_pfn; - - u32 rsvd0; - u32 rsvd1; - - u32 wq_block_hi_pfn; - u32 wq_block_lo_pfn; -}; - -struct hinic_rq_ctxt { - u32 ceq_attr; - - u32 pi_intr_attr; - - u32 wq_hi_pfn_ci; - u32 wq_lo_pfn; - - u32 pref_cache; - u32 pref_wrapped; - - u32 pref_wq_hi_pfn_ci; - u32 pref_wq_lo_pfn; - - u32 pi_paddr_hi; - u32 pi_paddr_lo; - - u32 wq_block_hi_pfn; - u32 wq_block_lo_pfn; -}; - -struct hinic_clean_queue_ctxt { - struct hinic_qp_ctxt_header cmdq_hdr; - u32 ctxt_size; -}; - -struct hinic_sq_ctxt_block { - struct hinic_qp_ctxt_header hdr; - struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX]; -}; - -struct hinic_rq_ctxt_block { - struct hinic_qp_ctxt_header hdr; - struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX]; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c deleted file mode 100644 index 03363216ff59..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ /dev/null @@ -1,900 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" - -#define WQS_BLOCKS_PER_PAGE 4 - -#define WQ_BLOCK_SIZE 4096 -#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) - -#define WQS_MAX_NUM_BLOCKS 128 -#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ - sizeof((wqs)->free_blocks[0])) - -#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) - -#define WQ_PAGE_ADDR_SIZE sizeof(u64) -#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) - -#define CMDQ_BLOCK_SIZE 512 -#define CMDQ_PAGE_SIZE 4096 - -#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) - -#define WQ_BASE_VADDR(wqs, wq) \ - ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ - + (wq)->block_idx * WQ_BLOCK_SIZE) - -#define WQ_BASE_PADDR(wqs, wq) \ - ((wqs)->page_paddr[(wq)->page_idx] \ - + (wq)->block_idx * WQ_BLOCK_SIZE) - -#define WQ_BASE_ADDR(wqs, wq) \ - ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ - + (wq)->block_idx * WQ_BLOCK_SIZE) - -#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ - ((void *)((cmdq_pages)->page_vaddr) \ - + (wq)->block_idx * CMDQ_BLOCK_SIZE) - -#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ - ((cmdq_pages)->page_paddr \ - + (wq)->block_idx * CMDQ_BLOCK_SIZE) - -#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ - ((void *)((cmdq_pages)->shadow_page_vaddr) \ - + (wq)->block_idx * CMDQ_BLOCK_SIZE) - -#define WQ_PAGE_ADDR(wq, idx) \ - ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) - -#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) - -#define WQE_IN_RANGE(wqe, start, end) \ - (((unsigned long)(wqe) >= (unsigned long)(start)) && \ - ((unsigned long)(wqe) < (unsigned long)(end))) - -#define WQE_SHADOW_PAGE(wq, wqe) \ - (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ - / (wq)->max_wqe_size) - -static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx) -{ - return (((idx) & ((wq)->num_wqebbs_per_page - 1)) - << (wq)->wqebb_size_shift); -} - -static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx) -{ - return (((idx) >> ((wq)->wqebbs_per_page_shift)) - & ((wq)->num_q_pages - 1)); -} -/** - * queue_alloc_page - allocate page for Queue - * @hwif: HW interface for allocating DMA - * @vaddr: virtual address will be returned in this address - * @paddr: physical address will be returned in this address - * @shadow_vaddr: VM area will be return here for holding WQ page addresses - * @page_sz: page size of each WQ page - * - * Return 0 - Success, negative - Failure - **/ -static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, - void ***shadow_vaddr, size_t page_sz) -{ - struct pci_dev *pdev = hwif->pdev; - dma_addr_t dma_addr; - - *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, - GFP_KERNEL); - if (!*vaddr) { - dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); - return -ENOMEM; - } - - *paddr = (u64)dma_addr; - - /* use vzalloc for big mem */ - *shadow_vaddr = vzalloc(page_sz); - if (!*shadow_vaddr) - goto err_shadow_vaddr; - - return 0; - -err_shadow_vaddr: - dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr); - return -ENOMEM; -} - -/** - * wqs_allocate_page - allocate page for WQ set - * @wqs: Work Queue Set - * @page_idx: the page index of the page will be allocated - * - * Return 0 - Success, negative - Failure - **/ -static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx) -{ - return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx], - &wqs->page_paddr[page_idx], - &wqs->shadow_page_vaddr[page_idx], - WQS_PAGE_SIZE); -} - -/** - * wqs_free_page - free page of WQ set - * @wqs: Work Queue Set - * @page_idx: the page index of the page will be freed - **/ -static void wqs_free_page(struct hinic_wqs *wqs, int page_idx) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE, - wqs->page_vaddr[page_idx], - (dma_addr_t)wqs->page_paddr[page_idx]); - vfree(wqs->shadow_page_vaddr[page_idx]); -} - -/** - * cmdq_allocate_page - allocate page for cmdq - * @cmdq_pages: the pages of the cmdq queue struct to hold the page - * - * Return 0 - Success, negative - Failure - **/ -static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) -{ - return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr, - &cmdq_pages->page_paddr, - &cmdq_pages->shadow_page_vaddr, - CMDQ_PAGE_SIZE); -} - -/** - * cmdq_free_page - free page from cmdq - * @cmdq_pages: the pages of the cmdq queue struct that hold the page - * - * Return 0 - Success, negative - Failure - **/ -static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) -{ - struct hinic_hwif *hwif = cmdq_pages->hwif; - struct pci_dev *pdev = hwif->pdev; - - dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE, - cmdq_pages->page_vaddr, - (dma_addr_t)cmdq_pages->page_paddr); - vfree(cmdq_pages->shadow_page_vaddr); -} - -static int alloc_page_arrays(struct hinic_wqs *wqs) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t size; - - size = wqs->num_pages * sizeof(*wqs->page_paddr); - wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wqs->page_paddr) - return -ENOMEM; - - size = wqs->num_pages * sizeof(*wqs->page_vaddr); - wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wqs->page_vaddr) - goto err_page_vaddr; - - size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); - wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wqs->shadow_page_vaddr) - goto err_page_shadow_vaddr; - - return 0; - -err_page_shadow_vaddr: - devm_kfree(&pdev->dev, wqs->page_vaddr); - -err_page_vaddr: - devm_kfree(&pdev->dev, wqs->page_paddr); - return -ENOMEM; -} - -static void free_page_arrays(struct hinic_wqs *wqs) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - devm_kfree(&pdev->dev, wqs->shadow_page_vaddr); - devm_kfree(&pdev->dev, wqs->page_vaddr); - devm_kfree(&pdev->dev, wqs->page_paddr); -} - -static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx, - int *block_idx) -{ - int pos; - - down(&wqs->alloc_blocks_lock); - - wqs->num_free_blks--; - - if (wqs->num_free_blks < 0) { - wqs->num_free_blks++; - up(&wqs->alloc_blocks_lock); - return -ENOMEM; - } - - pos = wqs->alloc_blk_pos++; - pos &= WQS_MAX_NUM_BLOCKS - 1; - - *page_idx = wqs->free_blocks[pos].page_idx; - *block_idx = wqs->free_blocks[pos].block_idx; - - wqs->free_blocks[pos].page_idx = -1; - wqs->free_blocks[pos].block_idx = -1; - - up(&wqs->alloc_blocks_lock); - return 0; -} - -static void wqs_return_block(struct hinic_wqs *wqs, int page_idx, - int block_idx) -{ - int pos; - - down(&wqs->alloc_blocks_lock); - - pos = wqs->return_blk_pos++; - pos &= WQS_MAX_NUM_BLOCKS - 1; - - wqs->free_blocks[pos].page_idx = page_idx; - wqs->free_blocks[pos].block_idx = block_idx; - - wqs->num_free_blks++; - - up(&wqs->alloc_blocks_lock); -} - -static void init_wqs_blocks_arr(struct hinic_wqs *wqs) -{ - int page_idx, blk_idx, pos = 0; - - for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { - for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { - wqs->free_blocks[pos].page_idx = page_idx; - wqs->free_blocks[pos].block_idx = blk_idx; - pos++; - } - } - - wqs->alloc_blk_pos = 0; - wqs->return_blk_pos = pos; - wqs->num_free_blks = pos; - - sema_init(&wqs->alloc_blocks_lock, 1); -} - -/** - * hinic_wqs_alloc - allocate Work Queues set - * @wqs: Work Queue Set - * @max_wqs: maximum wqs to allocate - * @hwif: HW interface for use for the allocation - * - * Return 0 - Success, negative - Failure - **/ -int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs, - struct hinic_hwif *hwif) -{ - struct pci_dev *pdev = hwif->pdev; - int err, i, page_idx; - - max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE); - if (max_wqs > WQS_MAX_NUM_BLOCKS) { - dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs); - return -EINVAL; - } - - wqs->hwif = hwif; - wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE; - - if (alloc_page_arrays(wqs)) { - dev_err(&pdev->dev, - "Failed to allocate mem for page addresses\n"); - return -ENOMEM; - } - - for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { - err = wqs_allocate_page(wqs, page_idx); - if (err) { - dev_err(&pdev->dev, "Failed wq page allocation\n"); - goto err_wq_allocate_page; - } - } - - wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs), - GFP_KERNEL); - if (!wqs->free_blocks) { - err = -ENOMEM; - goto err_alloc_blocks; - } - - init_wqs_blocks_arr(wqs); - return 0; - -err_alloc_blocks: -err_wq_allocate_page: - for (i = 0; i < page_idx; i++) - wqs_free_page(wqs, i); - - free_page_arrays(wqs); - return err; -} - -/** - * hinic_wqs_free - free Work Queues set - * @wqs: Work Queue Set - **/ -void hinic_wqs_free(struct hinic_wqs *wqs) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - int page_idx; - - devm_kfree(&pdev->dev, wqs->free_blocks); - - for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) - wqs_free_page(wqs, page_idx); - - free_page_arrays(wqs); -} - -/** - * alloc_wqes_shadow - allocate WQE shadows for WQ - * @wq: WQ to allocate shadows for - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_wqes_shadow(struct hinic_wq *wq) -{ - struct hinic_hwif *hwif = wq->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t size; - - size = wq->num_q_pages * wq->max_wqe_size; - wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wq->shadow_wqe) - return -ENOMEM; - - size = wq->num_q_pages * sizeof(wq->prod_idx); - wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wq->shadow_idx) - goto err_shadow_idx; - - return 0; - -err_shadow_idx: - devm_kfree(&pdev->dev, wq->shadow_wqe); - return -ENOMEM; -} - -/** - * free_wqes_shadow - free WQE shadows of WQ - * @wq: WQ to free shadows from - **/ -static void free_wqes_shadow(struct hinic_wq *wq) -{ - struct hinic_hwif *hwif = wq->hwif; - struct pci_dev *pdev = hwif->pdev; - - devm_kfree(&pdev->dev, wq->shadow_idx); - devm_kfree(&pdev->dev, wq->shadow_wqe); -} - -/** - * free_wq_pages - free pages of WQ - * @hwif: HW interface for releasing dma addresses - * @wq: WQ to free pages from - * @num_q_pages: number pages to free - **/ -static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, - int num_q_pages) -{ - struct pci_dev *pdev = hwif->pdev; - int i; - - for (i = 0; i < num_q_pages; i++) { - void **vaddr = &wq->shadow_block_vaddr[i]; - u64 *paddr = &wq->block_vaddr[i]; - dma_addr_t dma_addr; - - dma_addr = (dma_addr_t)be64_to_cpu(*paddr); - dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr, - dma_addr); - } - - free_wqes_shadow(wq); -} - -/** - * alloc_wq_pages - alloc pages for WQ - * @hwif: HW interface for allocating dma addresses - * @wq: WQ to allocate pages for - * @max_pages: maximum pages allowed - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, - int max_pages) -{ - struct pci_dev *pdev = hwif->pdev; - int i, err, num_q_pages; - - num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; - if (num_q_pages > max_pages) { - dev_err(&pdev->dev, "Number wq pages exceeds the limit\n"); - return -EINVAL; - } - - if (num_q_pages & (num_q_pages - 1)) { - dev_err(&pdev->dev, "Number wq pages must be power of 2\n"); - return -EINVAL; - } - - wq->num_q_pages = num_q_pages; - - err = alloc_wqes_shadow(wq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate wqe shadow\n"); - return err; - } - - for (i = 0; i < num_q_pages; i++) { - void **vaddr = &wq->shadow_block_vaddr[i]; - u64 *paddr = &wq->block_vaddr[i]; - dma_addr_t dma_addr; - - *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, - &dma_addr, GFP_KERNEL); - if (!*vaddr) { - dev_err(&pdev->dev, "Failed to allocate wq page\n"); - goto err_alloc_wq_pages; - } - - /* HW uses Big Endian Format */ - *paddr = cpu_to_be64(dma_addr); - } - - return 0; - -err_alloc_wq_pages: - free_wq_pages(wq, hwif, i); - return -ENOMEM; -} - -/** - * hinic_wq_allocate - Allocate the WQ resources from the WQS - * @wqs: WQ set from which to allocate the WQ resources - * @wq: WQ to allocate resources for it from the WQ set - * @wqebb_size: Work Queue Block Byte Size - * @wq_page_size: the page size in the Work Queue - * @q_depth: number of wqebbs in WQ - * @max_wqe_size: maximum WQE size that will be used in the WQ - * - * Return 0 - Success, negative - Failure - **/ -int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, - u16 wqebb_size, u16 wq_page_size, u16 q_depth, - u16 max_wqe_size) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 num_wqebbs_per_page; - u16 wqebb_size_shift; - int err; - - if (!is_power_of_2(wqebb_size)) { - dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); - return -EINVAL; - } - - if (wq_page_size == 0) { - dev_err(&pdev->dev, "wq_page_size must be > 0\n"); - return -EINVAL; - } - - if (q_depth & (q_depth - 1)) { - dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); - return -EINVAL; - } - - wqebb_size_shift = ilog2(wqebb_size); - num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) - >> wqebb_size_shift; - - if (!is_power_of_2(num_wqebbs_per_page)) { - dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); - return -EINVAL; - } - - wq->hwif = hwif; - - err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); - if (err) { - dev_err(&pdev->dev, "Failed to get free wqs next block\n"); - return err; - } - - wq->wqebb_size = wqebb_size; - wq->wq_page_size = wq_page_size; - wq->q_depth = q_depth; - wq->max_wqe_size = max_wqe_size; - wq->num_wqebbs_per_page = num_wqebbs_per_page; - wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page); - wq->wqebb_size_shift = wqebb_size_shift; - wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); - wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); - wq->block_paddr = WQ_BASE_PADDR(wqs, wq); - - err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES); - if (err) { - dev_err(&pdev->dev, "Failed to allocate wq pages\n"); - goto err_alloc_wq_pages; - } - - atomic_set(&wq->cons_idx, 0); - atomic_set(&wq->prod_idx, 0); - atomic_set(&wq->delta, q_depth); - wq->mask = q_depth - 1; - - return 0; - -err_alloc_wq_pages: - wqs_return_block(wqs, wq->page_idx, wq->block_idx); - return err; -} - -/** - * hinic_wq_free - Free the WQ resources to the WQS - * @wqs: WQ set to free the WQ resources to it - * @wq: WQ to free its resources to the WQ set resources - **/ -void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) -{ - free_wq_pages(wq, wqs->hwif, wq->num_q_pages); - - wqs_return_block(wqs, wq->page_idx, wq->block_idx); -} - -/** - * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs - * @cmdq_pages: will hold the pages of the cmdq - * @wq: returned wqs - * @hwif: HW interface - * @cmdq_blocks: number of cmdq blocks/wq to allocate - * @wqebb_size: Work Queue Block Byte Size - * @wq_page_size: the page size in the Work Queue - * @q_depth: number of wqebbs in WQ - * @max_wqe_size: maximum WQE size that will be used in the WQ - * - * Return 0 - Success, negative - Failure - **/ -int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, struct hinic_hwif *hwif, - int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, - u16 q_depth, u16 max_wqe_size) -{ - struct pci_dev *pdev = hwif->pdev; - u16 num_wqebbs_per_page_shift; - u16 num_wqebbs_per_page; - u16 wqebb_size_shift; - int i, j, err = -ENOMEM; - - if (!is_power_of_2(wqebb_size)) { - dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); - return -EINVAL; - } - - if (wq_page_size == 0) { - dev_err(&pdev->dev, "wq_page_size must be > 0\n"); - return -EINVAL; - } - - if (q_depth & (q_depth - 1)) { - dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); - return -EINVAL; - } - - wqebb_size_shift = ilog2(wqebb_size); - num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) - >> wqebb_size_shift; - - if (!is_power_of_2(num_wqebbs_per_page)) { - dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); - return -EINVAL; - } - - cmdq_pages->hwif = hwif; - - err = cmdq_allocate_page(cmdq_pages); - if (err) { - dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); - return err; - } - num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page); - - for (i = 0; i < cmdq_blocks; i++) { - wq[i].hwif = hwif; - wq[i].page_idx = 0; - wq[i].block_idx = i; - - wq[i].wqebb_size = wqebb_size; - wq[i].wq_page_size = wq_page_size; - wq[i].q_depth = q_depth; - wq[i].max_wqe_size = max_wqe_size; - wq[i].num_wqebbs_per_page = num_wqebbs_per_page; - wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift; - wq[i].wqebb_size_shift = wqebb_size_shift; - wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); - wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); - wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); - - err = alloc_wq_pages(&wq[i], cmdq_pages->hwif, - CMDQ_WQ_MAX_PAGES); - if (err) { - dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n"); - goto err_cmdq_block; - } - - atomic_set(&wq[i].cons_idx, 0); - atomic_set(&wq[i].prod_idx, 0); - atomic_set(&wq[i].delta, q_depth); - wq[i].mask = q_depth - 1; - } - - return 0; - -err_cmdq_block: - for (j = 0; j < i; j++) - free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages); - - cmdq_free_page(cmdq_pages); - return err; -} - -/** - * hinic_wqs_cmdq_free - Free wqs from cmdqs - * @cmdq_pages: hold the pages of the cmdq - * @wq: wqs to free - * @cmdq_blocks: number of wqs to free - **/ -void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, int cmdq_blocks) -{ - int i; - - for (i = 0; i < cmdq_blocks; i++) - free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages); - - cmdq_free_page(cmdq_pages); -} - -static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, - int num_wqebbs, u16 idx) -{ - void *wqebb_addr; - int i; - - for (i = 0; i < num_wqebbs; i++, idx++) { - idx = MASKED_WQE_IDX(wq, idx); - wqebb_addr = WQ_PAGE_ADDR(wq, idx) + - WQE_PAGE_OFF(wq, idx); - - memcpy(shadow_addr, wqebb_addr, wq->wqebb_size); - - shadow_addr += wq->wqebb_size; - } -} - -static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, - int num_wqebbs, u16 idx) -{ - void *wqebb_addr; - int i; - - for (i = 0; i < num_wqebbs; i++, idx++) { - idx = MASKED_WQE_IDX(wq, idx); - wqebb_addr = WQ_PAGE_ADDR(wq, idx) + - WQE_PAGE_OFF(wq, idx); - - memcpy(wqebb_addr, shadow_addr, wq->wqebb_size); - shadow_addr += wq->wqebb_size; - } -} - -/** - * hinic_get_wqe - get wqe ptr in the current pi and update the pi - * @wq: wq to get wqe from - * @wqe_size: wqe size - * @prod_idx: returned pi - * - * Return wqe pointer - **/ -struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *prod_idx) -{ - int curr_pg, end_pg, num_wqebbs; - u16 curr_prod_idx, end_prod_idx; - - *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx)); - - num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; - - if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { - atomic_add(num_wqebbs, &wq->delta); - return ERR_PTR(-EBUSY); - } - - end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx); - - end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx); - curr_prod_idx = end_prod_idx - num_wqebbs; - curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); - - /* end prod index points to the next wqebb, therefore minus 1 */ - end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1); - - curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); - end_pg = WQE_PAGE_NUM(wq, end_prod_idx); - - *prod_idx = curr_prod_idx; - - if (curr_pg != end_pg) { - void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; - - copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); - - wq->shadow_idx[curr_pg] = *prod_idx; - return shadow_addr; - } - - return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); -} - -/** - * hinic_return_wqe - return the wqe when transmit failed - * @wq: wq to return wqe - * @wqe_size: wqe size - **/ -void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) -{ - int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - - atomic_sub(num_wqebbs, &wq->prod_idx); - - atomic_add(num_wqebbs, &wq->delta); -} - -/** - * hinic_put_wqe - return the wqe place to use for a new wqe - * @wq: wq to return wqe - * @wqe_size: wqe size - **/ -void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) -{ - int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) - >> wq->wqebb_size_shift; - - atomic_add(num_wqebbs, &wq->cons_idx); - - atomic_add(num_wqebbs, &wq->delta); -} - -/** - * hinic_read_wqe - read wqe ptr in the current ci - * @wq: wq to get read from - * @wqe_size: wqe size - * @cons_idx: returned ci - * - * Return wqe pointer - **/ -struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *cons_idx) -{ - int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) - >> wq->wqebb_size_shift; - u16 curr_cons_idx, end_cons_idx; - int curr_pg, end_pg; - - if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) - return ERR_PTR(-EBUSY); - - curr_cons_idx = atomic_read(&wq->cons_idx); - - curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); - end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); - - curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); - end_pg = WQE_PAGE_NUM(wq, end_cons_idx); - - *cons_idx = curr_cons_idx; - - if (curr_pg != end_pg) { - void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; - - copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); - return shadow_addr; - } - - return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); -} - -/** - * hinic_read_wqe_direct - read wqe directly from ci position - * @wq: wq - * @cons_idx: ci position - * - * Return wqe - **/ -struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx) -{ - return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx); -} - -/** - * wqe_shadow - check if a wqe is shadow - * @wq: wq of the wqe - * @wqe: the wqe for shadow checking - * - * Return true - shadow, false - Not shadow - **/ -static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe) -{ - size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; - - return WQE_IN_RANGE(wqe, wq->shadow_wqe, - &wq->shadow_wqe[wqe_shadow_size]); -} - -/** - * hinic_write_wqe - write the wqe to the wq - * @wq: wq to write wqe to - * @wqe: wqe to write - * @wqe_size: wqe size - **/ -void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, - unsigned int wqe_size) -{ - int curr_pg, num_wqebbs; - void *shadow_addr; - u16 prod_idx; - - if (wqe_shadow(wq, wqe)) { - curr_pg = WQE_SHADOW_PAGE(wq, wqe); - - prod_idx = wq->shadow_idx[curr_pg]; - num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; - - copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx); - } -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h deleted file mode 100644 index 811eef744140..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ /dev/null @@ -1,111 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_WQ_H -#define HINIC_HW_WQ_H - -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" - -struct hinic_free_block { - int page_idx; - int block_idx; -}; - -struct hinic_wq { - struct hinic_hwif *hwif; - - int page_idx; - int block_idx; - - u16 wqebb_size; - u16 wq_page_size; - u16 q_depth; - u16 max_wqe_size; - u16 num_wqebbs_per_page; - u16 wqebbs_per_page_shift; - u16 wqebb_size_shift; - /* The addresses are 64 bit in the HW */ - u64 block_paddr; - void **shadow_block_vaddr; - u64 *block_vaddr; - - int num_q_pages; - u8 *shadow_wqe; - u16 *shadow_idx; - - atomic_t cons_idx; - atomic_t prod_idx; - atomic_t delta; - u16 mask; -}; - -struct hinic_wqs { - struct hinic_hwif *hwif; - int num_pages; - - /* The addresses are 64 bit in the HW */ - u64 *page_paddr; - u64 **page_vaddr; - void ***shadow_page_vaddr; - - struct hinic_free_block *free_blocks; - int alloc_blk_pos; - int return_blk_pos; - int num_free_blks; - - /* Lock for getting a free block from the WQ set */ - struct semaphore alloc_blocks_lock; -}; - -struct hinic_cmdq_pages { - /* The addresses are 64 bit in the HW */ - u64 page_paddr; - u64 *page_vaddr; - void **shadow_page_vaddr; - - struct hinic_hwif *hwif; -}; - -int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, struct hinic_hwif *hwif, - int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, - u16 q_depth, u16 max_wqe_size); - -void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, int cmdq_blocks); - -int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, - struct hinic_hwif *hwif); - -void hinic_wqs_free(struct hinic_wqs *wqs); - -int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, - u16 wqebb_size, u16 wq_page_size, u16 q_depth, - u16 max_wqe_size); - -void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); - -struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *prod_idx); - -void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size); - -void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); - -struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *cons_idx); - -struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx); - -void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, - unsigned int wqe_size); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h deleted file mode 100644 index f4b6d2c1061f..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h +++ /dev/null @@ -1,455 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_HW_WQE_H -#define HINIC_HW_WQE_H - -#include "hinic_common.h" - -#define HINIC_CMDQ_CTRL_PI_SHIFT 0 -#define HINIC_CMDQ_CTRL_CMD_SHIFT 16 -#define HINIC_CMDQ_CTRL_MOD_SHIFT 24 -#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29 -#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 - -#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF -#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF -#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F -#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3 -#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1 - -#define HINIC_CMDQ_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \ - << HINIC_CMDQ_CTRL_##member##_SHIFT) - -#define HINIC_CMDQ_CTRL_GET(val, member) \ - (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \ - & HINIC_CMDQ_CTRL_##member##_MASK) - -#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 -#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 -#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 -#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31 - -#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1 -#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3 -#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3 -#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1 - -#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \ - (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \ - << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) - -#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \ - (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \ - & HINIC_CMDQ_WQE_HEADER_##member##_MASK) - -#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 -#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 -#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 -#define HINIC_SQ_CTRL_LEN_SHIFT 29 - -#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF -#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F -#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 -#define HINIC_SQ_CTRL_LEN_MASK 0x3 - -#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 -#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 -#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 -#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 -#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 -#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 -#define HINIC_SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 -#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 - -#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFF -#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1 -#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1 -#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1 -#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF -#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1 -#define HINIC_SQ_CTRL_QUEUE_INFO_UC_MASK 0x1 -#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7 - -#define HINIC_SQ_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \ - << HINIC_SQ_CTRL_##member##_SHIFT) - -#define HINIC_SQ_CTRL_GET(val, member) \ - (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \ - & HINIC_SQ_CTRL_##member##_MASK) - -#define HINIC_SQ_CTRL_CLEAR(val, member) \ - ((u32)(val) & (~(HINIC_SQ_CTRL_##member##_MASK \ - << HINIC_SQ_CTRL_##member##_SHIFT))) - -#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 -#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8 -#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 -#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 -#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15 -#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 - -#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF -#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3 -#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3 -#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1 -#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1 -#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF - -#define HINIC_SQ_TASK_INFO0_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \ - HINIC_SQ_TASK_INFO0_##member##_SHIFT) - -/* 8 bits reserved */ -#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8 -#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16 -#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24 - -/* 8 bits reserved */ -#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF -#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFF -#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFF - -#define HINIC_SQ_TASK_INFO1_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \ - HINIC_SQ_TASK_INFO1_##member##_SHIFT) - -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0 -#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8 -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24 -/* 8 bits reserved */ - -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFF -#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFF -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3 -/* 8 bits reserved */ - -#define HINIC_SQ_TASK_INFO2_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \ - HINIC_SQ_TASK_INFO2_##member##_SHIFT) - -/* 31 bits reserved */ -#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31 - -/* 31 bits reserved */ -#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1 - -#define HINIC_SQ_TASK_INFO4_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \ - HINIC_SQ_TASK_INFO4_##member##_SHIFT) - -#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31 - -#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1 - -#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 - -#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU - -#define HINIC_RQ_CQE_STATUS_GET(val, member) \ - (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \ - HINIC_RQ_CQE_STATUS_##member##_MASK) - -#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \ - ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \ - HINIC_RQ_CQE_STATUS_##member##_SHIFT))) - -#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16 - -#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF - -#define HINIC_RQ_CQE_SGE_GET(val, member) \ - (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \ - HINIC_RQ_CQE_SGE_##member##_MASK) - -#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 -#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15 -#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27 -#define HINIC_RQ_CTRL_LEN_SHIFT 29 - -#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF -#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1 -#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3 -#define HINIC_RQ_CTRL_LEN_MASK 0x3 - -#define HINIC_RQ_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \ - HINIC_RQ_CTRL_##member##_SHIFT) - -#define HINIC_SQ_WQE_SIZE(nr_sges) \ - (sizeof(struct hinic_sq_ctrl) + \ - sizeof(struct hinic_sq_task) + \ - (nr_sges) * sizeof(struct hinic_sq_bufdesc)) - -#define HINIC_SCMD_DATA_LEN 16 - -#define HINIC_MAX_SQ_BUFDESCS 17 - -#define HINIC_SQ_WQE_MAX_SIZE 320 -#define HINIC_RQ_WQE_SIZE 32 - -#define HINIC_MSS_DEFAULT 0x3E00 -#define HINIC_MSS_MIN 0x50 - -#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 -#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU - -#define RQ_CQE_STATUS_GET(val, member) (((val) >> \ - RQ_CQE_STATUS_##member##_SHIFT) & \ - RQ_CQE_STATUS_##member##_MASK) - -#define HINIC_GET_RX_NUM_LRO(status) \ - RQ_CQE_STATUS_GET(status, NUM_LRO) - -#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 -#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU -#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 -#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U - -#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \ - RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ - RQ_CQE_OFFOLAD_TYPE_##member##_MASK) - -#define HINIC_GET_RX_PKT_TYPE(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) - -#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) - -#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU -#define RQ_CQE_SGE_VLAN_SHIFT 0 - -#define RQ_CQE_SGE_GET(val, member) (((val) >> \ - RQ_CQE_SGE_##member##_SHIFT) & \ - RQ_CQE_SGE_##member##_MASK) - -#define HINIC_GET_RX_VLAN_TAG(vlan_len) \ - RQ_CQE_SGE_GET(vlan_len, VLAN) - -#define HINIC_RSS_TYPE_VALID_SHIFT 23 -#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 -#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25 -#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26 -#define HINIC_RSS_TYPE_IPV6_SHIFT 27 -#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28 -#define HINIC_RSS_TYPE_IPV4_SHIFT 29 -#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30 -#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31 - -#define HINIC_RSS_TYPE_SET(val, member) \ - (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT) - -#define HINIC_RSS_TYPE_GET(val, member) \ - (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1) - -enum hinic_l4offload_type { - HINIC_L4_OFF_DISABLE = 0, - HINIC_TCP_OFFLOAD_ENABLE = 1, - HINIC_SCTP_OFFLOAD_ENABLE = 2, - HINIC_UDP_OFFLOAD_ENABLE = 3, -}; - -enum hinic_vlan_offload { - HINIC_VLAN_OFF_DISABLE = 0, - HINIC_VLAN_OFF_ENABLE = 1, -}; - -enum hinic_pkt_parsed { - HINIC_PKT_NOT_PARSED = 0, - HINIC_PKT_PARSED = 1, -}; - -enum hinic_l3_offload_type { - L3TYPE_UNKNOWN = 0, - IPV6_PKT = 1, - IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, - IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, -}; - -enum hinic_l4_offload_type { - OFFLOAD_DISABLE = 0, - TCP_OFFLOAD_ENABLE = 1, - SCTP_OFFLOAD_ENABLE = 2, - UDP_OFFLOAD_ENABLE = 3, -}; - -enum hinic_l4_tunnel_type { - NOT_TUNNEL, - TUNNEL_UDP_NO_CSUM, - TUNNEL_UDP_CSUM, -}; - -enum hinic_outer_l3type { - HINIC_OUTER_L3TYPE_UNKNOWN = 0, - HINIC_OUTER_L3TYPE_IPV6 = 1, - HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2, - HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3, -}; - -enum hinic_media_type { - HINIC_MEDIA_UNKNOWN = 0, -}; - -enum hinic_l2type { - HINIC_L2TYPE_ETH = 0, -}; - -enum hinc_tunnel_l4type { - HINIC_TUNNEL_L4TYPE_UNKNOWN = 0, -}; - -struct hinic_cmdq_header { - u32 header_info; - u32 saved_data; -}; - -struct hinic_status { - u32 status_info; -}; - -struct hinic_ctrl { - u32 ctrl_info; -}; - -struct hinic_sge_resp { - struct hinic_sge sge; - u32 rsvd; -}; - -struct hinic_cmdq_completion { - /* HW Format */ - union { - struct hinic_sge_resp sge_resp; - u64 direct_resp; - }; -}; - -struct hinic_scmd_bufdesc { - u32 buf_len; - u32 rsvd; - u8 data[HINIC_SCMD_DATA_LEN]; -}; - -struct hinic_lcmd_bufdesc { - struct hinic_sge sge; - u32 rsvd1; - u64 rsvd2; - u64 rsvd3; -}; - -struct hinic_cmdq_wqe_scmd { - struct hinic_cmdq_header header; - u64 rsvd; - struct hinic_status status; - struct hinic_ctrl ctrl; - struct hinic_cmdq_completion completion; - struct hinic_scmd_bufdesc buf_desc; -}; - -struct hinic_cmdq_wqe_lcmd { - struct hinic_cmdq_header header; - struct hinic_status status; - struct hinic_ctrl ctrl; - struct hinic_cmdq_completion completion; - struct hinic_lcmd_bufdesc buf_desc; -}; - -struct hinic_cmdq_direct_wqe { - struct hinic_cmdq_wqe_scmd wqe_scmd; -}; - -struct hinic_cmdq_wqe { - /* HW Format */ - union { - struct hinic_cmdq_direct_wqe direct_wqe; - struct hinic_cmdq_wqe_lcmd wqe_lcmd; - }; -}; - -struct hinic_sq_ctrl { - u32 ctrl_info; - u32 queue_info; -}; - -struct hinic_sq_task { - u32 pkt_info0; - u32 pkt_info1; - u32 pkt_info2; - u32 ufo_v6_identify; - u32 pkt_info4; - u32 zero_pad; -}; - -struct hinic_sq_bufdesc { - struct hinic_sge sge; - u32 rsvd; -}; - -struct hinic_sq_wqe { - struct hinic_sq_ctrl ctrl; - struct hinic_sq_task task; - struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS]; -}; - -struct hinic_rq_cqe { - u32 status; - u32 len; - - u32 offload_type; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; - u32 rsvd6; - u32 rsvd7; -}; - -struct hinic_rq_ctrl { - u32 ctrl_info; -}; - -struct hinic_rq_cqe_sect { - struct hinic_sge sge; - u32 rsvd; -}; - -struct hinic_rq_bufdesc { - u32 hi_addr; - u32 lo_addr; -}; - -struct hinic_rq_wqe { - struct hinic_rq_ctrl ctrl; - u32 rsvd; - struct hinic_rq_cqe_sect cqe_sect; - struct hinic_rq_bufdesc buf_desc; -}; - -struct hinic_hw_wqe { - /* HW Format */ - union { - struct hinic_cmdq_wqe cmdq_wqe; - struct hinic_sq_wqe sq_wqe; - struct hinic_rq_wqe rq_wqe; - }; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c new file mode 100644 index 000000000000..e37946268d0b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c @@ -0,0 +1,5355 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_msix_attr.h" +#include "hinic_nic_io.h" +#include "hinic_eqs.h" +#include "hinic_api_cmd.h" +#include "hinic_mgmt.h" +#include "hinic_mbox.h" +#include "hinic_wq.h" +#include "hinic_cmdq.h" +#include "hinic_nic_cfg.h" +#include "hinic_hwif.h" +#include "hinic_mgmt_interface.h" +#include "hinic_multi_host_mgmt.h" + + +#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 + +#define HINIC_WAIT_IO_STATUS_TIMEOUT 100 + +#define HINIC_FLR_TIMEOUT 1000 + +#define HINIC_HT_GPA_PAGE_SIZE 4096UL + +#define HINIC_PPF_HT_GPA_SET_RETRY_TIMES 10 + +#define HINIC_OK_FLAG_OK 0 + +#define HINIC_OK_FLAG_FAILED 1 + +#define HINIC_GET_SFP_INFO_REAL_TIME 0x1 + +#define HINIC_GLB_SO_RO_CFG_SHIFT 0x0 +#define HINIC_GLB_SO_RO_CFG_MASK 0x1 +#define HINIC_DISABLE_ORDER 0 +#define HINIC_GLB_DMA_SO_RO_GET(val, member) \ + (((val) >> HINIC_GLB_##member##_SHIFT) & HINIC_GLB_##member##_MASK) + +#define HINIC_GLB_DMA_SO_R0_CLEAR(val, member) \ + ((val) & (~(HINIC_GLB_##member##_MASK << HINIC_GLB_##member##_SHIFT))) + +#define HINIC_GLB_DMA_SO_R0_SET(val, member) \ + (((val) & HINIC_GLB_##member##_MASK) << HINIC_GLB_##member##_SHIFT) + +#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0 +#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1 +#define HINIC_ACTIVE_STATUS_MASK 0x80000000 +#define HINIC_ACTIVE_STATUS_CLEAR 0x7FFFFFFF + +#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK) + +#define HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, member) \ + ((val) & (~(HINIC_##member##_MASK << HINIC_##member##_SHIFT))) + +#define HINIC_SET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) & HINIC_##member##_MASK) << HINIC_##member##_SHIFT) + +#define HINIC_BOARD_IS_PHY(hwdev) \ + ((hwdev)->board_info.board_type == 4 && \ + (hwdev)->board_info.board_id == 24) + +struct comm_info_ht_gpa_set { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 rsvd1; + u32 rsvd2; + + u64 page_pa0; + u64 page_pa1; +}; + +struct comm_info_eqm_fix { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 chunk_num; + u32 search_gpa_num; +}; + +struct comm_info_eqm_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 ppf_id; + u32 page_size; + u32 valid; +}; + +struct comm_info_eqm_search_gpa { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 start_idx; + u32 num; + u32 resv0; + u32 resv1; + u64 gpa_hi52[0]; +}; + +struct hinic_cons_idx_attr { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u32 sq_id; + u64 ci_addr; +}; + +struct hinic_clear_doorbell { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hinic_clear_resource { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hinic_msix_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 msix_index; + u8 pending_cnt; + u8 coalesct_timer_cnt; + u8 lli_tmier_cnt; + u8 lli_credit_cnt; + u8 resend_timer_cnt; + u8 rsvd1[3]; +}; + +enum func_tmr_bitmap_status { + FUNC_TMR_BITMAP_DISABLE, + FUNC_TMR_BITMAP_ENABLE, +}; + +struct hinic_func_tmr_bitmap_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 op_id; /* 0:start; 1:stop */ + u8 ppf_idx; + u32 rsvd1; +}; + +struct hinic_ppf_tmr_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 ppf_idx; + u8 op_id; /* 0: stop timer; 1:start timer */ + u8 rsvd1[2]; + u32 rsvd2; +}; + +struct hinic_cmd_set_res_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; + u32 rsvd2; +}; + +int hinic_hw_rx_buf_size[] = { + HINIC_RX_BUF_SIZE_32B, + HINIC_RX_BUF_SIZE_64B, + HINIC_RX_BUF_SIZE_96B, + HINIC_RX_BUF_SIZE_128B, + HINIC_RX_BUF_SIZE_192B, + HINIC_RX_BUF_SIZE_256B, + HINIC_RX_BUF_SIZE_384B, + HINIC_RX_BUF_SIZE_512B, + HINIC_RX_BUF_SIZE_768B, + HINIC_RX_BUF_SIZE_1K, + HINIC_RX_BUF_SIZE_1_5K, + HINIC_RX_BUF_SIZE_2K, + HINIC_RX_BUF_SIZE_3K, + HINIC_RX_BUF_SIZE_4K, + HINIC_RX_BUF_SIZE_8K, + HINIC_RX_BUF_SIZE_16K, +}; + +/* vf-pf dma attr table */ +struct hinic_vf_dma_attr_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 func_dma_entry_num; + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u8 resv1[3]; +}; + +struct hinic_led_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port; + u8 type; + u8 mode; + u8 reset; +}; + +struct hinic_comm_board_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_board_info info; + + u32 rsvd1[4]; +}; + +#define PHY_DOING_INIT_TIMEOUT (15 * 1000) + +struct hinic_phy_init_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 init_status; + u8 rsvd1[3]; +}; + +enum phy_init_status_type { + PHY_INIT_DOING = 0, + PHY_INIT_SUCCESS = 1, + PHY_INIT_FAIL = 2, + PHY_NONSUPPORT = 3, +}; + +struct hinic_update_active { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 update_flag; + u32 update_status; +}; + +enum hinic_bios_cfg_op_code { + HINIC_BIOS_CFG_GET = 0, + HINIC_BIOS_CFG_PF_BW_LIMIT = 0x1 << 6, +}; + +struct hinic_bios_cfg_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 op_code; + u32 signature; + + u8 rsvd1[12]; + u32 pf_bw_limit; + u8 rsvd2[5]; + + u8 func_valid; + u8 func_idx; + u8 rsvd3; +}; + +struct hinic_mgmt_watchdog_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 curr_time_h; + u32 curr_time_l; + u32 task_id; + u32 rsv; + + u32 reg[13]; + u32 pc; + u32 lr; + u32 cpsr; + + u32 stack_top; + u32 stack_bottom; + u32 sp; + u32 curr_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 data[1024]; +}; + +struct hinic_fmw_act_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 rsvd1[5]; +}; + +struct hinic_ppf_state { + u8 status; + u8 version; + u8 rsvd0[6]; + u8 ppf_state; + u8 rsvd1[3]; +}; + +#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +struct hinic_wq_page_size { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u8 page_size; + + u32 rsvd1; +}; + +#define MAX_PCIE_DFX_BUF_SIZE (1024) + +struct hinic_pcie_dfx_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + int len; + u32 rsvd; +}; + +struct hinic_pcie_dfx_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 host_id; + u8 last; + u8 rsvd[2]; + u32 offset; + + u8 data[MAX_PCIE_DFX_BUF_SIZE]; +}; + +struct hinic_hw_pf_infos_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_hw_pf_infos infos; +}; + +enum hinic_sdi_mode_ops { + HINIC_SDI_INFO_SET = 1U << 0, /* 1-save, 0-read */ + HINIC_SDI_INFO_MODE = 1U << 1, +}; + +struct hinic_sdi_mode_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + /* Op-Code: + * Bit0: 0 - read configuration, 1 - write configuration + * Bit1: 0 - ignored, 1 - get/set SDI Mode + */ + u32 opcode; + u32 signature; + u16 cur_sdi_mode; + u16 cfg_sdi_mode; + + u32 rsvd1[29]; +}; + +struct hinic_reg_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 reg_addr; + u32 val_length; + + u32 data[2]; +}; + +#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define HINIC_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \ + HINIC_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \ + << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define HINIC_PCIE_ST_DISABLE 0 +#define HINIC_PCIE_AT_DISABLE 0 +#define HINIC_PCIE_PH_DISABLE 0 + +#define PCIE_MSIX_ATTR_ENTRY 0 + +struct hinic_cmd_fault_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_fault_event event; +}; + +#define HEARTBEAT_DRV_MAGIC_ACK 0x5A5A5A5A + +struct hinic_heartbeat_support { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 ppf_id; + u8 pf_issupport; + u8 mgmt_issupport; + u8 rsvd1[5]; +}; + +struct hinic_heartbeat_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 mgmt_init_state; + u8 rsvd1[3]; + u32 heart; /* increased every event */ + u32 drv_heart; +}; + +static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out); +static void hinic_set_mgmt_channel_status(void *handle, bool state); +static inline void __set_heartbeat_ehd_detect_delay(struct hinic_hwdev *hwdev, + u32 delay_ms); + +#define HINIC_QUEUE_MIN_DEPTH 6 +#define HINIC_QUEUE_MAX_DEPTH 12 +#define HINIC_MAX_RX_BUFFER_SIZE 15 + +#define CAP_INFO_MAC_LEN 512 +#define VENDOR_MAX_LEN 17 + +static bool check_root_ctxt(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_root_ctxt *root_ctxt; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + root_ctxt = (struct hinic_root_ctxt *)buf_in; + + if (root_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) + return false; + + if (root_ctxt->set_cmdq_depth) { + if (root_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH && + root_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH) + return true; + + return false; + } + + if (root_ctxt->rq_depth >= HINIC_QUEUE_MIN_DEPTH && + root_ctxt->rq_depth <= HINIC_QUEUE_MAX_DEPTH && + root_ctxt->sq_depth >= HINIC_QUEUE_MIN_DEPTH && + root_ctxt->sq_depth <= HINIC_QUEUE_MAX_DEPTH && + root_ctxt->rx_buf_sz <= HINIC_MAX_RX_BUFFER_SIZE) + return true; + + if (!root_ctxt->rq_depth && !root_ctxt->sq_depth && + !root_ctxt->rx_buf_sz) + return true; + + return false; +} + +static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + return hinic_cmdq_check_vf_ctxt(hwdev, buf_in); +} + +static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_wq_page_size *page_size_info = buf_in; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + if (page_size_info->ppf_idx != hinic_ppf_idx(hwdev)) + return false; + + if (((1U << page_size_info->page_size) * 0x1000) != + HINIC_DEFAULT_WQ_PAGE_SIZE) + return false; + + return true; +} + +static bool __mbox_check_tmr_bitmap(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_func_tmr_bitmap_op *bitmap_op = buf_in; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + if (bitmap_op->op_id == FUNC_TMR_BITMAP_ENABLE) { + if (!hinic_get_ppf_status(hwdev)) { + sdk_err(hwdev->dev_hdl, "PPF timer is not init, can't enable %d timer bitmap\n", + func_idx); + return false; + } + } + + if (bitmap_op->ppf_idx != hinic_ppf_idx(hwdev)) + return false; + + return true; +} + +struct vf_cmd_check_handle hw_cmd_support_vf[] = { + {HINIC_MGMT_CMD_START_FLR, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt}, + {HINIC_MGMT_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt}, + {HINIC_MGMT_CMD_VAT_SET, check_root_ctxt}, + {HINIC_MGMT_CMD_VAT_GET, check_root_ctxt}, + {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B}, + + {HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B}, + + {HINIC_MGMT_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_PAGESIZE_SET, check_set_wq_page_size}, + {HINIC_MGMT_CMD_PAGESIZE_GET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_GET_PPF_STATE, NULL}, + {HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET, __mbox_check_tmr_bitmap}, + {HINIC_MGMT_CMD_GET_BOARD_INFO, NULL}, + {HINIC_MGMT_CMD_GET_SDI_MODE, NULL}, +}; + +struct hinic_mgmt_status_log { + u8 status; + const char *log; +}; + +struct hinic_mgmt_status_log mgmt_status_log[] = { + {HINIC_MGMT_STATUS_ERR_PARAM, "Invalid parameter"}, + {HINIC_MGMT_STATUS_ERR_FAILED, "Operation failed"}, + {HINIC_MGMT_STATUS_ERR_PORT, "Invalid port"}, + {HINIC_MGMT_STATUS_ERR_TIMEOUT, "Operation time out"}, + {HINIC_MGMT_STATUS_ERR_NOMATCH, "Version not match"}, + {HINIC_MGMT_STATUS_ERR_EXIST, "Entry exists"}, + {HINIC_MGMT_STATUS_ERR_NOMEM, "Out of memory"}, + {HINIC_MGMT_STATUS_ERR_INIT, "Feature not initialized"}, + {HINIC_MGMT_STATUS_ERR_FAULT, "Invalid address"}, + {HINIC_MGMT_STATUS_ERR_PERM, "Operation not permitted"}, + {HINIC_MGMT_STATUS_ERR_EMPTY, "Table empty"}, + {HINIC_MGMT_STATUS_ERR_FULL, "Table full"}, + {HINIC_MGMT_STATUS_ERR_NOT_FOUND, "Not found"}, + {HINIC_MGMT_STATUS_ERR_BUSY, "Device or resource busy "}, + {HINIC_MGMT_STATUS_ERR_RESOURCE, "No resources for operation "}, + {HINIC_MGMT_STATUS_ERR_CONFIG, "Invalid configuration"}, + {HINIC_MGMT_STATUS_ERR_UNAVAIL, "Feature unavailable"}, + {HINIC_MGMT_STATUS_ERR_CRC, "CRC check failed"}, + {HINIC_MGMT_STATUS_ERR_NXIO, "No such device or address"}, + {HINIC_MGMT_STATUS_ERR_ROLLBACK, "Chip rollback fail"}, + {HINIC_MGMT_STATUS_ERR_LEN, "Length too short or too long"}, + {HINIC_MGMT_STATUS_ERR_UNSUPPORT, "Feature not supported"}, +}; + +static void __print_status_info(struct hinic_hwdev *dev, + enum hinic_mod_type mod, u8 cmd, int index) +{ + if (mod == HINIC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } else if (mod == HINIC_MOD_L2NIC || + mod == HINIC_MOD_HILINK) { + if (HINIC_IS_VF(dev) && (cmd == HINIC_PORT_CMD_SET_MAC || cmd == + HINIC_PORT_CMD_DEL_MAC || cmd == + HINIC_PORT_CMD_UPDATE_MAC) && + (mgmt_status_log[index].status == HINIC_PF_SET_VF_ALREADY)) + return; + + nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } +} + +static bool hinic_status_need_special_handle(enum hinic_mod_type mod, + u8 cmd, u8 status) +{ + if (mod == HINIC_MOD_L2NIC) { + /* optical module isn't plugged in */ + if (((cmd == HINIC_PORT_CMD_GET_STD_SFP_INFO) || + (cmd == HINIC_PORT_CMD_GET_SFP_INFO)) && + (status == HINIC_MGMT_STATUS_ERR_NXIO)) + return true; + + if ((cmd == HINIC_PORT_CMD_SET_MAC || + cmd == HINIC_PORT_CMD_UPDATE_MAC) && + status == HINIC_MGMT_STATUS_ERR_EXIST) + return true; + } + + return false; +} + +static void hinic_print_status_info(void *hwdev, enum hinic_mod_type mod, + u8 cmd, const void *buf_out) +{ + struct hinic_hwdev *dev = hwdev; + int i, size; + u8 status; + + if (!buf_out) + return; + + if (mod != HINIC_MOD_COMM && mod != HINIC_MOD_L2NIC && + mod != HINIC_MOD_HILINK) + return; + + status = *(u8 *)buf_out; + + if (!status) + return; + + if (hinic_status_need_special_handle(mod, cmd, status)) + return; + + size = sizeof(mgmt_status_log) / sizeof(mgmt_status_log[0]); + for (i = 0; i < size; i++) { + if (status == mgmt_status_log[i].status) { + __print_status_info(dev, mod, cmd, i); + return; + } + } + + if (mod == HINIC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } else if (mod == HINIC_MOD_L2NIC || mod == HINIC_MOD_HILINK) { + nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } +} + +void hinic_set_chip_present(void *hwdev) +{ + ((struct hinic_hwdev *)hwdev)->chip_present_flag = HINIC_CHIP_PRESENT; +} + +void hinic_set_chip_absent(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + sdk_err(dev->dev_hdl, "Card not present\n"); + dev->chip_present_flag = HINIC_CHIP_ABSENT; +} + +int hinic_get_chip_present_flag(void *hwdev) +{ + int flag; + + if (!hwdev) + return -EINVAL; + flag = ((struct hinic_hwdev *)hwdev)->chip_present_flag; + return flag; +} +EXPORT_SYMBOL(hinic_get_chip_present_flag); + +static void hinic_set_fast_recycle_status(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + sdk_err(dev->dev_hdl, "Enter fast recycle status\n"); + dev->chip_present_flag = HINIC_CHIP_ABSENT; +} + +void hinic_force_complete_all(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_recv_msg *recv_resp_msg; + + set_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state); + + if (hinic_func_type(dev) != TYPE_VF && + hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_MGMT_INITED)) { + recv_resp_msg = &dev->pf_to_mgmt->recv_resp_msg_from_mgmt; + if (dev->pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_resp_msg->recv_done); + dev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT; + } + } + + /* only flush sync cmdq to avoid blocking remove */ + if (hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_CMDQ_INITED)) + hinic_cmdq_flush_cmd(hwdev, + &(dev->cmdqs->cmdq[HINIC_CMDQ_SYNC])); + + clear_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state); +} + +void hinic_detect_hw_present(void *hwdev) +{ + u32 addr, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr); + if (attr1 == HINIC_PCIE_LINK_DOWN) { + hinic_set_chip_absent(hwdev); + hinic_force_complete_all(hwdev); + } +} + +void hinic_record_pcie_error(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + + if (!hwdev) + return; + + atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +static int __func_send_mbox(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + int err; + + if (hinic_func_type(hwdev) == TYPE_VF) + err = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size, timeout); + else if (NEED_MBOX_FORWARD(hwdev)) + err = hinic_mbox_to_host_sync(hwdev, mod, cmd, buf_in, + in_size, buf_out, out_size, + timeout); + else + err = -EFAULT; + + return err; +} + +static int __pf_to_mgmt_pre_handle(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd) +{ + if (hinic_get_mgmt_channel_status(hwdev)) { + if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC || + mod == HINIC_MOD_CFGM || mod == HINIC_MOD_HILINK) + return HINIC_DEV_BUSY_ACTIVE_FW; + else + return -EBUSY; + } + + /* Set channel invalid, don't allowed to send other cmd */ + if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_ACTIVATE_FW) { + hinic_set_mgmt_channel_status(hwdev, true); + /* stop heartbeat enhanced detection temporary, and will + * restart in firmware active event when mgmt is resetted + */ + __set_heartbeat_ehd_detect_delay(hwdev, + HINIC_DEV_ACTIVE_FW_TIMEOUT); + } + + return 0; +} + +static void __pf_to_mgmt_after_handle(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + int sw_status, void *mgmt_status) +{ + /* if activate fw is failed, set channel valid */ + if (mod == HINIC_MOD_COMM && + cmd == HINIC_MGMT_CMD_ACTIVATE_FW) { + if (sw_status) + hinic_set_mgmt_channel_status(hwdev, false); + else + hinic_enable_mgmt_channel(hwdev, mgmt_status); + } +} + +int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!((struct hinic_hwdev *)hwdev)->chip_present_flag) + return -EPERM; + + if (NEED_MBOX_FORWARD(dev)) { + if (!hinic_is_hwdev_mod_inited(hwdev, + HINIC_HWDEV_MBOX_INITED)) { + return -EPERM; + } + + err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } else { + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) + return -EPERM; + + if (in_size > HINIC_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + err = __pf_to_mgmt_pre_handle(hwdev, mod, cmd); + if (err) + return err; + + err = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + __pf_to_mgmt_after_handle(hwdev, mod, cmd, err, buf_out); + } + + return err; +} + +static bool is_sfp_info_cmd_cached(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_sfp_qsfp_info *sfp_info = NULL; + struct hinic_port_routine_cmd *rt_cmd = NULL; + struct card_node *chip_node = hwdev->chip_node; + + sfp_info = buf_in; + if (sfp_info->port_id >= HINIC_MAX_PORT_ID || + *out_size < sizeof(*sfp_info)) + return false; + + if (sfp_info->version == HINIC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->sfp_info, sizeof(*sfp_info)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool is_sfp_abs_cmd_cached(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_light_module_abs *abs = NULL; + struct hinic_port_routine_cmd *rt_cmd = NULL; + struct card_node *chip_node = hwdev->chip_node; + + abs = buf_in; + if (abs->port_id >= HINIC_MAX_PORT_ID || + *out_size < sizeof(*abs)) + return false; + + if (abs->version == HINIC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->abs, sizeof(*abs)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool driver_processed_cmd(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct card_node *chip_node = hwdev->chip_node; + + if (mod == HINIC_MOD_L2NIC) { + if (cmd == HINIC_PORT_CMD_GET_SFP_INFO && + chip_node->rt_cmd->up_send_sfp_info) { + return is_sfp_info_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } else if (cmd == HINIC_PORT_CMD_GET_SFP_ABS && + chip_node->rt_cmd->up_send_sfp_abs) { + return is_sfp_abs_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } + } + + return false; +} + +int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_hwdev *dev = hwdev; + unsigned long end; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + end = jiffies + msecs_to_jiffies(HINIC_DEV_ACTIVE_FW_TIMEOUT); + if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) { + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED)) + return -EPERM; + do { + if (!hinic_get_chip_present_flag(hwdev)) + break; + + err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) { + hinic_print_status_info(hwdev, mod, cmd, + buf_out); + return err; + } + + msleep(1000); + } while (time_before(jiffies, end)); + + err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } else { + if (driver_processed_cmd(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size)) + return 0; + + do { + if (!hinic_get_mgmt_channel_status(hwdev) || + !hinic_get_chip_present_flag(hwdev)) + break; + + msleep(1000); + } while (time_before(jiffies, end)); + err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, + in_size, buf_out, out_size, + timeout); + } + + hinic_print_status_info(hwdev, mod, cmd, buf_out); + + return err; +} +EXPORT_SYMBOL(hinic_msg_to_mgmt_sync); + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) || + hinic_get_mgmt_channel_status(hwdev)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF) { + err = -EFAULT; + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Mailbox don't support async cmd\n"); + } else { + err = hinic_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} +EXPORT_SYMBOL(hinic_msg_to_mgmt_async); + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hinic_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) { + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF) + err = hinic_mbox_to_pf_no_ack(hwdev, mod, cmd, buf_in, + in_size); + else + err = hinic_mbox_to_host_no_ack(hwdev, mod, cmd, buf_in, + in_size); + } else { + err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} + +int hinic_mbox_to_vf(void *hwdev, + enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + int err; + + if (!hwdev) + return -EINVAL; + + err = __hinic_mbox_to_vf(hwdev, mod, vf_id, cmd, buf_in, in_size, + buf_out, out_size, timeout); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + /* VF already in error condiction */ + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "VF%d not initialized, disconnect it\n", + vf_id); + hinic_unregister_vf_msg_handler(hwdev, vf_id); + } + + return err; +} +EXPORT_SYMBOL(hinic_mbox_to_vf); + +int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) + +{ + struct hinic_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (!dev->chip_present_flag) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) + return -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CLP_INITED)) + return -EPERM; + + err = hinic_pf_clp_to_mgmt(dev, mod, cmd, buf_in, + in_size, buf_out, out_size); + + return err; +} + +/** + * hinic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +void hinic_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} +EXPORT_SYMBOL(hinic_cpu_to_be32); + +/** + * hinic_be32_to_cpu - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + */ +void hinic_be32_to_cpu(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} +EXPORT_SYMBOL(hinic_be32_to_cpu); + +/** + * hinic_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + */ +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +/** + * hinic_sge_to_dma - get dma address from scatter gather entry + * @sge: scatter gather entry + * + * Return dma address of sg entry + */ +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) +{ + return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); +} + +int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr) +{ + struct hinic_cons_idx_attr cons_idx_attr = {0}; + u16 out_size = sizeof(cons_idx_attr); + int err; + + if (!hwdev || !attr) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &cons_idx_attr.func_idx); + if (err) + return err; + + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + + if (attr->intr_en) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.sq_id = q_id; + + cons_idx_attr.ci_addr = attr->ci_dma_base; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, + &cons_idx_attr, sizeof(cons_idx_attr), + &cons_idx_attr, &out_size, 0); + if (err || !out_size || cons_idx_attr.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cons_idx_attr.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_ci_table); + +static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth) +{ + struct hinic_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +static u16 get_hw_rx_buf_size(int rx_buf_sz) +{ + u16 num_hw_types = + sizeof(hinic_hw_rx_buf_size) / + sizeof(hinic_hw_rx_buf_size[0]); + u16 i; + + for (i = 0; i < num_hw_types; i++) { + if (hinic_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; /* default 2K */ +} + +int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz) +{ + struct hinic_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_root_ctxt); + +int hinic_clean_root_ctxt(void *hwdev) +{ + struct hinic_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_clean_root_ctxt); + +static int wait_for_flr_finish(struct hinic_hwif *hwif) +{ + u32 cnt = 0; + enum hinic_pf_status status; + + while (cnt < HINIC_FLR_TIMEOUT) { + status = hinic_get_pf_status(hwif); + if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) { + hinic_set_pf_status(hwif, HINIC_PF_STATUS_ACTIVE_FLAG); + return 0; + } + + usleep_range(9900, 10000); + cnt++; + } + + return -EFAULT; +} + +#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static int wait_cmdq_stop(struct hinic_hwdev *hwdev) +{ + enum hinic_cmdq_type cmdq_type; + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + u32 cnt = 0; + int err = 0; + + if (!(cmdqs->status & HINIC_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~HINIC_CMDQ_ENABLE; + + while (cnt < HINIC_WAIT_CMDQ_IDLE_TIMEOUT && hwdev->chip_present_flag) { + err = 0; + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) { + err = -EBUSY; + break; + } + } + + if (!err) + return 0; + + usleep_range(500, 1000); + cnt++; + } + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + sdk_err(hwdev->dev_hdl, "Cmdq %d busy\n", cmdq_type); + } + + cmdqs->status |= HINIC_CMDQ_ENABLE; + + return err; +} + +static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + struct hinic_clear_resource clr_res = {0}; + int err; + + err = wait_cmdq_stop(hwdev); + if (err) + sdk_warn(hwdev->dev_hdl, "Cmdq is still working, please check CMDQ timeout value is reasonable\n"); + + err = hinic_global_func_id_get(hwdev, &clr_res.func_idx); + if (err) + return err; + + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res)); + if (err) + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n"); + + /* PF firstly set VF doorbell flush csr to be disabled. After PF finish + * VF resources flush, PF will set VF doorbell flush csr to be enabled. + */ + err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL); + if (err) + sdk_warn(hwdev->dev_hdl, "Wait doorbell flush disable timeout\n"); + err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL); + if (err) + sdk_warn(hwdev->dev_hdl, "Wait doorbell flush enable timeout\n"); + + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + + return 0; +} + +static void hinic_pf_set_vf_db_flush(struct hinic_hwdev *hwdev, u16 vf_id, + enum hinic_doorbell_ctrl val) +{ + u32 addr, vf_attr4; + + addr = HINIC_PF_CSR_VF_FLUSH_OFF(vf_id); + vf_attr4 = hinic_hwif_read_reg(hwdev->hwif, addr); + vf_attr4 = HINIC_AF4_CLEAR(vf_attr4, DOORBELL_CTRL); + vf_attr4 |= HINIC_AF4_SET(val, DOORBELL_CTRL); + hinic_hwif_write_reg(hwdev->hwif, addr, vf_attr4); +} + +static int hinic_vf_rx_tx_flush_in_pf(struct hinic_hwdev *hwdev, u16 vf_id) +{ + struct hinic_clear_doorbell clear_db = {0}; + struct hinic_clear_resource clr_res = {0}; + u16 glb_vf_func_id; + u16 out_size; + int err; + int ret = 0; + + /* disable vf doorbell flush csr */ + hinic_pf_set_vf_db_flush(hwdev, vf_id, DISABLE_DOORBELL); + + /* doorbell flush */ + out_size = sizeof(clear_db); + glb_vf_func_id = HINIC_HWIF_GLOBAL_VF_OFFSET(hwdev->hwif) + vf_id; + clear_db.func_idx = glb_vf_func_id; + clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), &clear_db, &out_size, 0); + if (err || !out_size || clear_db.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clear_db.status, out_size); + if (err) + ret = err; + else + ret = -EFAULT; + } + + /* wait ucode stop I/O */ + msleep(100); + + /* notice up begine vf flush */ + out_size = sizeof(clr_res); + clr_res.func_idx = glb_vf_func_id; + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res), &clr_res, &out_size, 0); + if (err || !out_size || clr_res.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clr_res.status, out_size); + ret = err ? err : (-EFAULT); + } + /* enable vf doorbell flush csr */ + hinic_pf_set_vf_db_flush(hwdev, vf_id, ENABLE_DOORBELL); + + return ret; +} + +static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_clear_doorbell clear_db = {0}; + struct hinic_clear_resource clr_res = {0}; + u16 out_size, func_id; + int err; + int ret = 0; + + /* wait ucode stop I/O */ + msleep(100); + + err = wait_cmdq_stop(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); + ret = err; + } + + hinic_disable_doorbell(hwif); + + out_size = sizeof(clear_db); + func_id = hinic_global_func_id_hw(hwdev); + clear_db.func_idx = func_id; + clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), &clear_db, &out_size, 0); + if (err || !out_size || clear_db.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clear_db.status, out_size); + ret = err ? err : (-EFAULT); + } + + hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG); + + clr_res.func_idx = func_id; + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); + + err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res)); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n"); + ret = err; + } + + err = wait_for_flr_finish(hwif); + if (err) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + + hinic_enable_doorbell(hwif); + + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + ret = err; + } + + return ret; +} + +int hinic_func_rx_tx_flush(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return -EINVAL; + + if (!dev->chip_present_flag) + return 0; + + if (HINIC_FUNC_TYPE(dev) == TYPE_VF) + return hinic_vf_rx_tx_flush(dev); + else + return hinic_pf_rx_tx_flush(dev); +} +EXPORT_SYMBOL(hinic_func_rx_tx_flush); + +int hinic_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info) +{ + struct hinic_hwdev *nic_hwdev = hwdev; + struct hinic_msix_config msix_cfg = {0}; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev || !interrupt_info) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = interrupt_info->msix_index; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EINVAL; + } + + interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt; + interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt; + interrupt_info->pending_limt = msix_cfg.pending_cnt; + interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt; + interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + + return 0; +} +EXPORT_SYMBOL(hinic_get_interrupt_cfg); + +int hinic_set_interrupt_cfg_direct(void *hwdev, + struct nic_interrupt_info *interrupt_info) +{ + struct hinic_hwdev *nic_hwdev = hwdev; + struct hinic_msix_config msix_cfg = {0}; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = (u16)interrupt_info->msix_index; + msix_cfg.lli_credit_cnt = interrupt_info->lli_credit_limit; + msix_cfg.lli_tmier_cnt = interrupt_info->lli_timer_cfg; + msix_cfg.pending_cnt = interrupt_info->pending_limt; + msix_cfg.coalesct_timer_cnt = interrupt_info->coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = interrupt_info->resend_timer_cfg; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info) +{ + struct nic_interrupt_info temp_info; + int err; + + if (!hwdev) + return -EINVAL; + + temp_info.msix_index = interrupt_info.msix_index; + + err = hinic_get_interrupt_cfg(hwdev, &temp_info); + if (err) + return -EINVAL; + + if (!interrupt_info.lli_set) { + interrupt_info.lli_credit_limit = temp_info.lli_credit_limit; + interrupt_info.lli_timer_cfg = temp_info.lli_timer_cfg; + } + + if (!interrupt_info.interrupt_coalesc_set) { + interrupt_info.pending_limt = temp_info.pending_limt; + interrupt_info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg; + interrupt_info.resend_timer_cfg = temp_info.resend_timer_cfg; + } + + return hinic_set_interrupt_cfg_direct(hwdev, &interrupt_info); +} +EXPORT_SYMBOL(hinic_set_interrupt_cfg); + +void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hinic_hwif *hwif; + u32 msix_ctrl = 0, addr; + + if (!hwdev) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + msix_ctrl = HINIC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CNT_ADDR(msix_idx); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); +} +EXPORT_SYMBOL(hinic_misx_intr_clear_resend_bit); + +static int init_aeqs_msix_attr(struct hinic_hwdev *hwdev) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + struct nic_interrupt_info info = {0}; + struct hinic_eq *eq; + int q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic_set_interrupt_cfg_direct(hwdev, &info); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", + q_id); + return -EFAULT; + } + } + + hinic_set_mbox_seg_ack_mod(hwdev, HINIC_MBOX_SEND_MSG_INT); + + return 0; +} + +static int init_ceqs_msix_attr(struct hinic_hwdev *hwdev) +{ + struct hinic_ceqs *ceqs = hwdev->ceqs; + struct nic_interrupt_info info = {0}; + struct hinic_eq *eq; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic_set_interrupt_cfg(hwdev, info); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +/** + * set_pf_dma_attr_entry - set the dma attributes for entry + * @hwdev: the pointer to hw device + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + */ +static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Read Modify Write */ + addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx); + + val = hinic_hwif_read_reg(hwdev->hwif, addr); + val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN); + + dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) | + HINIC_DMA_ATTR_ENTRY_SET(at, AT) | + HINIC_DMA_ATTR_ENTRY_SET(ph, PH) | + HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) | + HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + hinic_hwif_write_reg(hwdev->hwif, addr, val); +} + +static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + struct hinic_vf_dma_attr_table attr = {0}; + u16 out_size = sizeof(attr); + int err; + + err = hinic_global_func_id_get(hwdev, &attr.func_idx); + if (err) + return err; + + attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev); + attr.entry_idx = entry_idx; + attr.st = st; + attr.at = at; + attr.ph = ph; + attr.no_snooping = no_snooping; + attr.tph_en = tph_en; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_DMA_ATTR_SET, &attr, + sizeof(attr), &attr, &out_size, 0); + if (err || !out_size || attr.status) { + sdk_err(hwdev->dev_hdl, "Failed to set dma attribute, err: %d, status: 0x%x, out_size: 0x%x\n", + err, attr.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * dma_attr_table_init - initialize the the default dma attributes + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + */ +static int dma_attr_table_init(struct hinic_hwdev *hwdev) +{ + int err = 0; + + if (HINIC_IS_VF(hwdev)) + err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, + HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, + HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); + else + set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, + HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, + HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); + + return err; +} + +static int resources_state_set(struct hinic_hwdev *hwdev, + enum hinic_res_state state) +{ + struct hinic_cmd_set_res_state res_state = {0}; + u16 out_size = sizeof(res_state); + int err; + + err = hinic_global_func_id_get(hwdev, &res_state.func_idx); + if (err) + return err; + + res_state.state = state; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_RES_STATE_SET, + &res_state, sizeof(res_state), + &res_state, &out_size, 0); + if (err || !out_size || res_state.status) { + sdk_err(hwdev->dev_hdl, "Failed to set resources state, err: %d, status: 0x%x, out_size: 0x%x\n", + err, res_state.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_sync_heartbeat_status(struct hinic_hwdev *hwdev, + enum heartbeat_support_state pf_state, + enum heartbeat_support_state *mgmt_state) +{ + struct hinic_heartbeat_support hb_support = {0}; + u16 out_size = sizeof(hb_support); + int err; + + hb_support.ppf_id = hinic_ppf_idx(hwdev); + hb_support.pf_issupport = pf_state; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_HEARTBEAT_SUPPORTED, + &hb_support, sizeof(hb_support), + &hb_support, &out_size, 0); + if ((hb_support.status != HINIC_MGMT_CMD_UNSUPPORTED && + hb_support.status) || err || !out_size) { + sdk_err(hwdev->dev_hdl, "Failed to synchronize heartbeat status, err: %d, status: 0x%x, out_size: 0x%x\n", + err, hb_support.status, out_size); + return -EFAULT; + } + + if (!hb_support.status) + *mgmt_state = hb_support.mgmt_issupport; + + return hb_support.status; +} + +static void comm_mgmt_msg_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = pri_handle; + u8 cmd_idx; + u32 *mem; + u16 i; + + for (cmd_idx = 0; cmd_idx < pf_to_mgmt->proc.cmd_num; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + if (!pf_to_mgmt->proc.info[cmd_idx].proc) { + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, + "PF recv up comm msg handle null, cmd(0x%x)\n", + cmd); + } else { + pf_to_mgmt->proc.info[cmd_idx].proc(hwdev, + buf_in, in_size, buf_out, out_size); + } + + return; + } + } + + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, "Received mgmt cpu event: 0x%x\n", + cmd); + + mem = buf_in; + for (i = 0; i < (in_size / sizeof(u32)); i++) { + pr_info("0x%x\n", *mem); + mem++; + } + + *out_size = 0; +} + +static int hinic_vf_get_ppf_init_state(void *handle, void *buf_out, + u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + struct hinic_ppf_state *ppf_state = buf_out; + struct card_node *chip_node = hwdev->chip_node; + + ppf_state->ppf_state = (u8)chip_node->ppf_state; + + *out_size = sizeof(*ppf_state); + + return 0; +} + +int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode) +{ + struct hinic_sdi_mode_info sdi_mode = {0}; + u16 out_size = sizeof(sdi_mode); + int err; + + sdi_mode.opcode = HINIC_SDI_INFO_MODE & (~HINIC_SDI_INFO_SET); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_SDI_MODE, &sdi_mode, + sizeof(sdi_mode), &sdi_mode, &out_size, 0); + if ((sdi_mode.status != HINIC_MGMT_CMD_UNSUPPORTED && + sdi_mode.status) || err || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get sdi mode info, err: %d, status: 0x%x, out size: 0x%x\n", + err, sdi_mode.status, out_size); + return -EFAULT; + } + + *cur_mode = sdi_mode.cur_sdi_mode; + + return sdi_mode.status; +} + +int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + int err = 0; + u8 size = sizeof(hw_cmd_support_vf) / sizeof(hw_cmd_support_vf[0]); + + if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd, + buf_in, in_size, size)) { + sdk_err(((struct hinic_hwdev *)handle)->dev_hdl, + "PF Receive VF(%d) common cmd(0x%x), mbox len(0x%x) is invalid\n", + vf_id + hinic_glb_pf_vf_offset(handle), cmd, in_size); + err = HINIC_MBOX_VF_CMD_ERROR; + return err; + } + + if (cmd == HINIC_MGMT_CMD_START_FLR) { + *out_size = 0; + err = hinic_vf_rx_tx_flush_in_pf(handle, vf_id); + } else if (cmd == HINIC_MGMT_CMD_GET_PPF_STATE) { + err = hinic_vf_get_ppf_init_state(handle, buf_out, out_size); + } else { + err = hinic_pf_msg_to_mgmt_sync(handle, HINIC_MOD_COMM, cmd, + buf_in, in_size, buf_out, + out_size, 0U); + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + sdk_err(((struct hinic_hwdev *)handle)->dev_hdl, + "PF mbox common callback handler err: %d\n", + err); + } + + return err; +} + +static int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} }; + u16 num_aeqs, resp_num_irq = 0, i; + int err; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs > HINIC_MAX_AEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + HINIC_MAX_AEQS); + num_aeqs = HINIC_MAX_AEQS; + } + err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %d\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); + goto aeqs_init_err; + } + + set_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state); + + return 0; + +aeqs_init_err: + for (i = 0; i < num_aeqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); + + return err; +} + +static void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} }; + u16 num_irqs, i; + + clear_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state); + + hinic_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); + hinic_aeqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); +} + +static int hinic_comm_ceqs_init(struct hinic_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} }; + u16 num_ceqs, resp_num_irq = 0, i; + int err; + + num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); + if (num_ceqs > HINIC_MAX_CEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + HINIC_MAX_CEQS); + num_ceqs = HINIC_MAX_CEQS; + } + + err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %d\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = hinic_ceqs_init(hwdev, num_ceqs, ceq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to init ceqs, err:%d\n", err); + goto ceqs_init_err; + } + + return 0; + +ceqs_init_err: + for (i = 0; i < num_ceqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); + + return err; +} + +static void hinic_comm_ceqs_free(struct hinic_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} }; + u16 num_irqs; + int i; + + hinic_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); + hinic_ceqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); +} + +static int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_func_to_func_init(hwdev); + if (err) + return err; + + hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_FROM_FUNC, + hinic_mbox_func_aeqe_handler); + hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_SEND_RSLT, + hinic_mbox_self_aeqe_handler); + + if (!HINIC_IS_VF(hwdev)) { + hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, + comm_pf_mbox_handler); + hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC, + sw_func_pf_mbox_handler); + } else { + hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, + vf_to_pf_handler); + } + + set_bit(HINIC_HWDEV_MBOX_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev) +{ + hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_FROM_FUNC); + hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_SEND_RSLT); + + hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM); + hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC); + + hinic_func_to_func_free(hwdev); +} + +static int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; /* VF do not support send msg to mgmt directly */ + + err = hinic_pf_to_mgmt_init(hwdev); + if (err) + return err; + + hinic_aeq_register_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU, + hinic_mgmt_msg_aeqe_handler); + + hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_COMM, + hwdev->pf_to_mgmt, comm_mgmt_msg_handler); + + set_bit(HINIC_HWDEV_MGMT_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; /* VF do not support send msg to mgmt directly */ + + hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_COMM); + + hinic_aeq_unregister_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU); + + hinic_pf_to_mgmt_free(hwdev); +} + +static int hinic_comm_clp_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; + + err = hinic_clp_pf_to_mgmt_init(hwdev); + if (err) + return err; + + set_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic_comm_clp_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; + + clear_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state); + hinic_clp_pf_to_mgmt_free(hwdev); +} + +static int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + return err; + } + + hinic_ceq_register_cb(hwdev, HINIC_CMDQ, hinic_cmdq_ceq_handler); + + err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); + goto set_cmdq_depth_err; + } + + return 0; + +set_cmdq_depth_err: + hinic_cmdqs_free(hwdev); + + return err; +} + +static void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev) +{ + hinic_ceq_unregister_cb(hwdev, HINIC_CMDQ); + hinic_cmdqs_free(hwdev); +} + +static inline void __set_heartbeat_ehd_detect_delay(struct hinic_hwdev *hwdev, + u32 delay_ms) +{ + hwdev->heartbeat_ehd.start_detect_jiffies = + jiffies + msecs_to_jiffies(delay_ms); +} + +static int hinic_sync_mgmt_func_state(struct hinic_hwdev *hwdev) +{ + int err; + + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); + + err = resources_state_set(hwdev, HINIC_RES_ACTIVE); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to set function resources state\n"); + goto resources_state_set_err; + } + + hwdev->heartbeat_ehd.en = false; + if (HINIC_FUNC_TYPE(hwdev) == TYPE_PPF) { + /* heartbeat synchronize must be after set pf active status */ + hinic_comm_recv_mgmt_self_cmd_reg( + hwdev, HINIC_MGMT_CMD_HEARTBEAT_EVENT, + mgmt_heartbeat_event_handler); + } + + return 0; + +resources_state_set_err: + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + + return err; +} + +static void hinic_unsync_mgmt_func_state(struct hinic_hwdev *hwdev) +{ + + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + + hwdev->heartbeat_ehd.en = false; + if (HINIC_FUNC_TYPE(hwdev) == TYPE_PPF) { + hinic_comm_recv_up_self_cmd_unreg( + hwdev, HINIC_MGMT_CMD_HEARTBEAT_EVENT); + } + + resources_state_set(hwdev, HINIC_RES_CLEAN); +} + +int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag) +{ + struct hinic_l2nic_reset l2nic_reset = {0}; + u16 out_size = sizeof(l2nic_reset); + int err = 0; + + err = hinic_set_vport_enable(hwdev, false); + if (err) + return err; + + msleep(100); + + sdk_info(hwdev->dev_hdl, "L2nic reset flag 0x%x\n", reset_flag); + + err = hinic_global_func_id_get(hwdev, &l2nic_reset.func_id); + if (err) + return err; + + l2nic_reset.reset_flag = reset_flag; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_L2NIC_RESET, &l2nic_reset, + sizeof(l2nic_reset), &l2nic_reset, + &out_size, 0); + if (err || !out_size || l2nic_reset.status) { + sdk_err(hwdev->dev_hdl, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n", + err, l2nic_reset.status, out_size); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic_l2nic_reset_base); + +static int hinic_l2nic_reset(struct hinic_hwdev *hwdev) +{ + return hinic_l2nic_reset_base(hwdev, 0); +} + +static int __get_func_misc_info(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_get_board_info(hwdev, &hwdev->board_info); + if (err) { + /* For the pf/vf of slave host, return error */ + if (hinic_pcie_itf_id(hwdev)) + return err; + + /* VF can't get board info in early version */ + if (!HINIC_IS_VF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Get board info failed\n"); + return err; + } + + memset(&hwdev->board_info, 0xff, + sizeof(struct hinic_board_info)); + } + + err = hinic_get_mgmt_version(hwdev, hwdev->mgmt_ver); + if (err) { + sdk_err(hwdev->dev_hdl, "Get mgmt cpu version failed\n"); + return err; + } + + return 0; +} + + +/* initialize communication channel */ +int hinic_init_comm_ch(struct hinic_hwdev *hwdev) +{ + int err; + u16 func_id; + + if (IS_BMGW_SLAVE_HOST(hwdev) && + (!hinic_get_master_host_mbox_enable(hwdev))) { + sdk_err(hwdev->dev_hdl, "Master host not initialized\n"); + return -EFAULT; + } + + err = hinic_comm_clp_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); + return err; + } + + err = hinic_comm_aeqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); + goto aeqs_init_err; + } + + err = hinic_comm_pf_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init msg\n"); + goto msg_init_err; + } + + err = hinic_comm_func_to_func_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); + goto func_to_func_init_err; + } + + err = init_aeqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); + goto aeqs_msix_attr_init_err; + } + + err = __get_func_misc_info(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to get function msic information\n"); + goto get_func_info_err; + } + + /* detect master host chip mode according board type and host id */ + err = rectify_host_mode(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to rectify host mode\n"); + goto rectify_mode_err; + } + + err = hinic_l2nic_reset(hwdev); + if (err) + goto l2nic_reset_err; + + if (IS_MULTI_HOST(hwdev)) { + err = hinic_multi_host_mgmt_init(hwdev); + if (err) + goto multi_host_mgmt_init_err; + } + + dma_attr_table_init(hwdev); + + err = hinic_comm_ceqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); + goto ceqs_init_err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); + goto init_eqs_msix_err; + } + + /* set default wq page_size */ + hwdev->wq_page_size = HINIC_DEFAULT_WQ_PAGE_SIZE; + + err = hinic_global_func_id_get(hwdev, &func_id); + if (err) + goto get_func_id_err; + + err = hinic_set_wq_page_size(hwdev, func_id, hwdev->wq_page_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); + goto init_wq_pg_size_err; + } + + err = hinic_comm_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + goto cmdq_init_err; + } + + set_bit(HINIC_HWDEV_CMDQ_INITED, &hwdev->func_state); + + err = hinic_sync_mgmt_func_state(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to synchronize mgmt function state\n"); + goto sync_mgmt_func_err; + } + + err = hinic_aeq_register_swe_cb(hwdev, HINIC_STATELESS_EVENT, + hinic_nic_sw_aeqe_handler); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to register ucode aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + set_bit(HINIC_HWDEV_COMM_CH_INITED, &hwdev->func_state); + + return 0; + +register_ucode_aeqe_err: + hinic_unsync_mgmt_func_state(hwdev); +sync_mgmt_func_err: + return err; + +cmdq_init_err: + if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF) + hinic_set_wq_page_size(hwdev, func_id, HINIC_HW_WQ_PAGE_SIZE); +init_wq_pg_size_err: +get_func_id_err: +init_eqs_msix_err: + hinic_comm_ceqs_free(hwdev); + +ceqs_init_err: + if (IS_MULTI_HOST(hwdev)) + hinic_multi_host_mgmt_free(hwdev); +multi_host_mgmt_init_err: +l2nic_reset_err: +rectify_mode_err: +get_func_info_err: +aeqs_msix_attr_init_err: +func_to_func_init_err: + return err; + +msg_init_err: + hinic_comm_aeqs_free(hwdev); + +aeqs_init_err: + hinic_comm_clp_to_mgmt_free(hwdev); + + return err; +} + +static void __uninit_comm_module(struct hinic_hwdev *hwdev, + enum hinic_hwdev_init_state init_state) +{ + u16 func_id; + + switch (init_state) { + case HINIC_HWDEV_COMM_CH_INITED: + hinic_aeq_unregister_swe_cb(hwdev, + HINIC_STATELESS_EVENT); + hinic_unsync_mgmt_func_state(hwdev); + break; + case HINIC_HWDEV_CMDQ_INITED: + hinic_comm_cmdqs_free(hwdev); + /* VF can set page size of 256K only, any other value + * will return error in pf, pf will set all vf's page + * size to 4K when disable sriov + */ + if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF) { + func_id = hinic_global_func_id_hw(hwdev); + hinic_set_wq_page_size(hwdev, func_id, + HINIC_HW_WQ_PAGE_SIZE); + } + + hinic_comm_ceqs_free(hwdev); + + if (IS_MULTI_HOST(hwdev)) + hinic_multi_host_mgmt_free(hwdev); + break; + case HINIC_HWDEV_MBOX_INITED: + hinic_comm_func_to_func_free(hwdev); + break; + case HINIC_HWDEV_MGMT_INITED: + hinic_comm_pf_to_mgmt_free(hwdev); + break; + case HINIC_HWDEV_AEQ_INITED: + hinic_comm_aeqs_free(hwdev); + break; + case HINIC_HWDEV_CLP_INITED: + hinic_comm_clp_to_mgmt_free(hwdev); + break; + default: + break; + } +} + +#define HINIC_FUNC_STATE_BUSY_TIMEOUT 300 +void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev) +{ + enum hinic_hwdev_init_state init_state = HINIC_HWDEV_COMM_CH_INITED; + int cnt; + + while (init_state > HINIC_HWDEV_NONE_INITED) { + if (!test_bit(init_state, &hwdev->func_state)) { + init_state--; + continue; + } + clear_bit(init_state, &hwdev->func_state); + + cnt = 0; + while (test_bit(HINIC_HWDEV_STATE_BUSY, &hwdev->func_state) && + cnt++ <= HINIC_FUNC_STATE_BUSY_TIMEOUT) + usleep_range(900, 1000); + + __uninit_comm_module(hwdev, init_state); + + init_state--; + } +} + +int hinic_slq_init(void *dev, int num_wqs) +{ + struct hinic_hwdev *hwdev = dev; + int err; + + if (!dev) + return -EINVAL; + + hwdev->wqs = kzalloc(sizeof(*hwdev->wqs), GFP_KERNEL); + if (!hwdev->wqs) + return -ENOMEM; + + err = hinic_wqs_alloc(hwdev->wqs, num_wqs, hwdev->dev_hdl); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wqs\n"); + kfree(hwdev->wqs); + hwdev->wqs = NULL; + } + + return err; +} +EXPORT_SYMBOL(hinic_slq_init); + +void hinic_slq_uninit(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + + if (!hwdev) + return; + + hinic_wqs_free(hwdev->wqs); + + kfree(hwdev->wqs); +} +EXPORT_SYMBOL(hinic_slq_uninit); + +int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, u16 page_size, + u64 *cla_addr, void **handle) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_wq *wq; + int err; + + if (!dev || !cla_addr || !handle) + return -EINVAL; + + wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return -ENOMEM; + + err = hinic_wq_allocate(hwdev->wqs, wq, wqebb_size, hwdev->wq_page_size, + q_depth, 0); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wq\n"); + kfree(wq); + return -EFAULT; + } + + *cla_addr = wq->block_paddr; + *handle = wq; + + return 0; +} +EXPORT_SYMBOL(hinic_slq_alloc); + +void hinic_slq_free(void *dev, void *handle) +{ + struct hinic_hwdev *hwdev = dev; + + if (!hwdev || !handle) + return; + + hinic_wq_free(hwdev->wqs, handle); + kfree(handle); +} +EXPORT_SYMBOL(hinic_slq_free); + +u64 hinic_slq_get_addr(void *handle, u16 index) +{ + if (!handle) + return 0; /* NULL of wqe addr */ + + return (u64)hinic_get_wqebb_addr(handle, index); +} +EXPORT_SYMBOL(hinic_slq_get_addr); + +u64 hinic_slq_get_first_pageaddr(void *handle) +{ + struct hinic_wq *wq = handle; + + if (!handle) + return 0; /* NULL of wqe addr */ + + return hinic_get_first_wqe_page_addr(wq); +} +EXPORT_SYMBOL(hinic_slq_get_first_pageaddr); + +int hinic_func_tmr_bitmap_set(void *hwdev, bool en) +{ + struct hinic_func_tmr_bitmap_op bitmap_op = {0}; + u16 out_size = sizeof(bitmap_op); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &bitmap_op.func_idx); + if (err) + return err; + + bitmap_op.ppf_idx = hinic_ppf_idx(hwdev); + if (en) + bitmap_op.op_id = FUNC_TMR_BITMAP_ENABLE; + else + bitmap_op.op_id = FUNC_TMR_BITMAP_DISABLE; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET, + &bitmap_op, sizeof(bitmap_op), + &bitmap_op, &out_size, 0); + if (err || !out_size || bitmap_op.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bitmap_op.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_func_tmr_bitmap_set); + +int ppf_ht_gpa_set(struct hinic_hwdev *hwdev, struct hinic_page_addr *pg0, + struct hinic_page_addr *pg1) +{ + struct comm_info_ht_gpa_set ht_gpa_set = {0}; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + &pg0->phys_addr, GFP_KERNEL); + if (!pg0->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); + return -EFAULT; + } + + pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + &pg1->phys_addr, GFP_KERNEL); + if (!pg1->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); + return -EFAULT; + } + + ht_gpa_set.page_pa0 = pg0->phys_addr; + ht_gpa_set.page_pa1 = pg1->phys_addr; + sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", + pg0->phys_addr, pg1->phys_addr); + ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PPF_HT_GPA_SET, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size, 0); + if (ret || !out_size || ht_gpa_set.status) { + sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.status, out_size); + return -EFAULT; + } + + hwdev->page_pa0.phys_addr = pg0->phys_addr; + hwdev->page_pa0.virt_addr = pg0->virt_addr; + + hwdev->page_pa1.phys_addr = pg1->phys_addr; + hwdev->page_pa1.virt_addr = pg1->virt_addr; + + return 0; +} + +int hinic_ppf_ht_gpa_init(struct hinic_hwdev *hwdev) +{ + int ret; + int i; + int j; + int size; + + struct hinic_page_addr page_addr0[HINIC_PPF_HT_GPA_SET_RETRY_TIMES]; + struct hinic_page_addr page_addr1[HINIC_PPF_HT_GPA_SET_RETRY_TIMES]; + + size = HINIC_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); + memset(page_addr0, 0, size); + memset(page_addr1, 0, size); + + for (i = 0; i < HINIC_PPF_HT_GPA_SET_RETRY_TIMES; i++) { + ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); + if (!ret) + break; + } + + for (j = 0; j < i; j++) { + if (page_addr0[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + page_addr0[j].virt_addr, + page_addr0[j].phys_addr); + page_addr0[j].virt_addr = NULL; + } + if (page_addr1[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + page_addr1[j].virt_addr, + page_addr1[j].phys_addr); + page_addr1[j].virt_addr = NULL; + } + } + + if (i >= HINIC_PPF_HT_GPA_SET_RETRY_TIMES) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", + i); + return -EFAULT; + } + + return 0; +} + +void hinic_ppf_ht_gpa_deinit(struct hinic_hwdev *hwdev) +{ + if (hwdev->page_pa0.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE, + hwdev->page_pa0.virt_addr, + hwdev->page_pa0.phys_addr); + hwdev->page_pa0.virt_addr = NULL; + } + + if (hwdev->page_pa1.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE, + hwdev->page_pa1.virt_addr, + hwdev->page_pa1.phys_addr); + hwdev->page_pa1.virt_addr = NULL; + } +} + +static int set_ppf_tmr_status(struct hinic_hwdev *hwdev, + enum ppf_tmr_status status) +{ + struct hinic_ppf_tmr_op op = {0}; + u16 out_size = sizeof(op); + int err = 0; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) != TYPE_PPF) + return -EFAULT; + + if (status == HINIC_PPF_TMR_FLAG_START) { + err = hinic_ppf_ht_gpa_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n"); + return -EFAULT; + } + } else { + hinic_ppf_ht_gpa_deinit(hwdev); + } + + op.op_id = status; + op.ppf_idx = hinic_ppf_idx(hwdev); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PPF_TMR_SET, &op, + sizeof(op), &op, &out_size, 0); + if (err || !out_size || op.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", + err, op.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_ppf_tmr_start(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for starting ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_START); +} +EXPORT_SYMBOL(hinic_ppf_tmr_start); + +int hinic_ppf_tmr_stop(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for stop ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_STOP); +} +EXPORT_SYMBOL(hinic_ppf_tmr_stop); + +int mqm_eqm_try_alloc_mem(struct hinic_hwdev *hwdev, u32 page_size, + u32 page_num) +{ + struct hinic_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr; + u32 valid_num = 0; + u32 flag = 1; + u32 i = 0; + + for (i = 0; i < page_num; i++) { + page_addr->virt_addr = + dma_zalloc_coherent(hwdev->dev_hdl, page_size, + &page_addr->phys_addr, GFP_KERNEL); + if (!page_addr->virt_addr) { + flag = 0; + break; + } + valid_num++; + page_addr++; + } + + if (flag == 1) { + hwdev->mqm_att.page_size = page_size; + hwdev->mqm_att.page_num = page_num; + } else { + page_addr = hwdev->mqm_att.brm_srch_page_addr; + for (i = 0; i < valid_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, + page_addr->phys_addr); + page_addr++; + } + return -EFAULT; + } + + return 0; +} + +int mqm_eqm_alloc_page_mem(struct hinic_hwdev *hwdev) +{ + int ret = 0; + + /* apply for 64KB page, number is chunk_num/16 */ + ret = mqm_eqm_try_alloc_mem(hwdev, 64 * 1024, + hwdev->mqm_att.chunk_num >> 4); + if (!ret) + return 0; + + /* apply for 8KB page, number is chunk_num/2 */ + ret = mqm_eqm_try_alloc_mem(hwdev, 8 * 1024, + hwdev->mqm_att.chunk_num >> 1); + if (!ret) + return 0; + + /* apply for 4KB page, number is chunk_num */ + ret = mqm_eqm_try_alloc_mem(hwdev, 4 * 1024, + hwdev->mqm_att.chunk_num); + if (!ret) + return 0; + + return ret; +} + +void mqm_eqm_free_page_mem(struct hinic_hwdev *hwdev) +{ + u32 i; + struct hinic_page_addr *page_addr; + u32 page_size; + + page_size = hwdev->mqm_att.page_size; + page_addr = hwdev->mqm_att.brm_srch_page_addr; + + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, page_addr->phys_addr); + page_addr++; + } +} + +int mqm_eqm_set_cfg_2_hw(struct hinic_hwdev *hwdev, u32 valid) +{ + struct comm_info_eqm_cfg info_eqm_cfg = {0}; + u16 out_size = sizeof(info_eqm_cfg); + int err; + + info_eqm_cfg.ppf_id = hinic_global_func_id_hw(hwdev); + info_eqm_cfg.page_size = hwdev->mqm_att.page_size; + info_eqm_cfg.valid = valid; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_CFG_INFO_SET, + &info_eqm_cfg, sizeof(info_eqm_cfg), + &info_eqm_cfg, &out_size, 0); + if (err || !out_size || info_eqm_cfg.status) { + sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info_eqm_cfg.status, out_size); + return -EFAULT; + } + + return 0; +} + +#define EQM_DATA_BUF_SIZE 1024 + +int mqm_eqm_set_page_2_hw(struct hinic_hwdev *hwdev) +{ + struct comm_info_eqm_search_gpa *info; + struct hinic_page_addr *page_addr; + void *send_buf; + u16 send_buf_size; + u32 i; + u64 *gpa_hi52; + u64 gpa; + u32 num; + u32 start_idx; + int err = 0; + u32 valid_page_num; + u16 out_size; + + send_buf_size = sizeof(struct comm_info_eqm_search_gpa) + + EQM_DATA_BUF_SIZE; + send_buf = kzalloc(send_buf_size, GFP_KERNEL); + if (!send_buf) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + page_addr = hwdev->mqm_att.brm_srch_page_addr; + info = (struct comm_info_eqm_search_gpa *)send_buf; + valid_page_num = 0; + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = 0; + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + gpa = page_addr->phys_addr >> 12; + gpa_hi52[num] = gpa; + num++; + if (num == 128) { + info->num = num; + info->start_idx = start_idx; + out_size = send_buf_size; + err = hinic_msg_to_mgmt_sync( + hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_SRCH_GPA_SET, + info, (u16)send_buf_size, info, + &out_size, 0); + if (err || !out_size || info->status) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = i + 1; + } + page_addr++; + valid_page_num++; + } + + if (0 != (valid_page_num & 0x7f)) { + info->num = num; + info->start_idx = start_idx; + out_size = send_buf_size; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_SRCH_GPA_SET, + info, (u16)send_buf_size, + info, &out_size, 0); + if (err || !out_size || info->status) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + } + +set_page_2_hw_end: + kfree(send_buf); + return err; +} + +int mqm_eqm_init(struct hinic_hwdev *hwdev) +{ + struct comm_info_eqm_fix info_eqm_fix = {0}; + u16 len = sizeof(info_eqm_fix); + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return 0; + + ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_FIX_INFO_GET, + &info_eqm_fix, sizeof(info_eqm_fix), + &info_eqm_fix, &len, 0); + if (ret || !len || info_eqm_fix.status) { + sdk_err(hwdev->dev_hdl, "Get mqm fix info fail,err: %d, status: 0x%x, out_size: 0x%x\n", + ret, info_eqm_fix.status, len); + return -EFAULT; + } + if (!(info_eqm_fix.chunk_num)) + return 0; + + hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num; + hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num; + hwdev->mqm_att.page_size = 0; + hwdev->mqm_att.page_num = 0; + + hwdev->mqm_att.brm_srch_page_addr = + kcalloc(hwdev->mqm_att.chunk_num, + sizeof(struct hinic_page_addr), GFP_KERNEL); + if (!(hwdev->mqm_att.brm_srch_page_addr)) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + ret = mqm_eqm_alloc_page_mem(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\r\n"); + goto err_page; + } + + ret = mqm_eqm_set_page_2_hw(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + return 0; + +err_ecmd: + mqm_eqm_free_page_mem(hwdev); + +err_page: + kfree(hwdev->mqm_att.brm_srch_page_addr); + + return ret; +} + +void mqm_eqm_deinit(struct hinic_hwdev *hwdev) +{ + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return; + + if (!(hwdev->mqm_att.chunk_num)) + return; + + mqm_eqm_free_page_mem(hwdev); + kfree(hwdev->mqm_att.brm_srch_page_addr); + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 0); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail! err: %d\n", + ret); + return; + } + + hwdev->mqm_att.chunk_num = 0; + hwdev->mqm_att.search_gpa_num = 0; + hwdev->mqm_att.page_num = 0; + hwdev->mqm_att.page_size = 0; +} + +int hinic_ppf_ext_db_init(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + int ret; + + if (!dev) + return -EINVAL; + + ret = mqm_eqm_init(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "MQM eqm init fail!\n"); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_ppf_ext_db_init); + +int hinic_ppf_ext_db_deinit(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + + if (!dev) + return -EINVAL; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return -EFAULT; + + mqm_eqm_deinit(hwdev); + + return 0; +} +EXPORT_SYMBOL(hinic_ppf_ext_db_deinit); + +int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, + u32 page_size) +{ + struct hinic_wq_page_size page_size_info = {0}; + u16 out_size = sizeof(page_size_info); + int err; + + page_size_info.func_idx = func_idx; + page_size_info.ppf_idx = hinic_ppf_idx(hwdev); + page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PAGESIZE_SET, + &page_size_info, sizeof(page_size_info), + &page_size_info, &out_size, 0); + if (err || !out_size || page_size_info.status) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, page_size_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +enum hinic_event_cmd { + /* hilink event */ + HINIC_EVENT_LINK_STATUS_CHANGE = 1, + HINIC_EVENT_LINK_ERR, + HINIC_EVENT_CABLE_PLUG, + HINIC_EVENT_HILINK_INFO, + /* reserved for hilink */ + + /* driver event, pf & vf communicate */ + HINIC_EVENT_HEARTBEAT_LOST = 31, + HINIC_EVENT_SET_VF_COS, + + /* mgmt event */ + HINIC_EVENT_MGMT_FAULT = 61, + HINIC_EVENT_MGMT_WATCHDOG, + HINIC_EVENT_MGMT_FMW_ACT_NTC, + HINIC_EVENT_MGMT_RESET, + HINIC_EVENT_MGMT_PCIE_DFX, + HINIC_EVENT_MCTP_HOST_INFO, + HINIC_EVENT_MGMT_HEARTBEAT_EHD, + HINIC_EVENT_SFP_INFO_REPORT, + HINIC_EVENT_SFP_ABS_REPORT, + + HINIC_EVENT_MAX_TYPE, +}; + +struct hinic_event_convert { + u8 mod; + u8 cmd; + + enum hinic_event_cmd event; +}; + +static struct hinic_event_convert __event_convert[] = { + /* hilink event */ + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_LINK_STATUS_REPORT, + .event = HINIC_EVENT_LINK_STATUS_CHANGE, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_LINK_ERR_EVENT, + .event = HINIC_EVENT_LINK_ERR, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_CABLE_PLUG_EVENT, + .event = HINIC_EVENT_CABLE_PLUG, + }, + { + .mod = HINIC_MOD_HILINK, + .cmd = HINIC_HILINK_CMD_GET_LINK_INFO, + .event = HINIC_EVENT_HILINK_INFO, + }, + + /* driver triggered event */ + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_MGMT_CMD_HEART_LOST_REPORT, + .event = HINIC_EVENT_HEARTBEAT_LOST, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_SET_VF_COS, + .event = HINIC_EVENT_SET_VF_COS, + }, + + /* mgmt event */ + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_FAULT_REPORT, + .event = HINIC_EVENT_MGMT_FAULT, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_WATCHDOG_INFO, + .event = HINIC_EVENT_MGMT_WATCHDOG, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_FMW_ACT_NTC, + .event = HINIC_EVENT_MGMT_FMW_ACT_NTC, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_MGMT_RESET, + .event = HINIC_EVENT_MGMT_RESET, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_PCIE_DFX_NTC, + .event = HINIC_EVENT_MGMT_PCIE_DFX, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_GET_HOST_INFO, + .event = HINIC_EVENT_MCTP_HOST_INFO, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_HEARTBEAT_EVENT, + .event = HINIC_EVENT_MGMT_HEARTBEAT_EHD, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_GET_SFP_INFO, + .event = HINIC_EVENT_SFP_INFO_REPORT, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_GET_SFP_ABS, + .event = HINIC_EVENT_SFP_ABS_REPORT, + }, +}; + +static enum hinic_event_cmd __get_event_type(u8 mod, u8 cmd) +{ + int idx; + int arr_size = sizeof(__event_convert) / sizeof(__event_convert[0]); + + for (idx = 0; idx < arr_size; idx++) { + if (__event_convert[idx].mod == mod && + __event_convert[idx].cmd == cmd) + return __event_convert[idx].event; + } + + return HINIC_EVENT_MAX_TYPE; +} + +bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd) +{ + if ((mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_GET_HOST_INFO) || + (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_HEARTBEAT_EVENT)) + return false; + + if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC || + mod == HINIC_MOD_HILINK) + return true; + + return false; +} + +#define FAULT_SHOW_STR_LEN 16 +static void fault_report_show(struct hinic_hwdev *hwdev, + struct hinic_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault"}; + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "flr", "general", "suggestion"}; + char type_str[FAULT_SHOW_STR_LEN + 1]; + char level_str[FAULT_SHOW_STR_LEN + 1]; + u8 level; + u32 pos, base; + struct hinic_fault_event_stats *fault; + u8 node_id; + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %d.\n", + hinic_global_func_id(hwdev)); + + memset(type_str, 0, FAULT_SHOW_STR_LEN + 1); + if (event->type < FAULT_TYPE_MAX) + strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN); + else + strncpy(type_str, "Unknown", FAULT_SHOW_STR_LEN); + + sdk_err(hwdev->dev_hdl, "Fault type: %d [%s]\n", event->type, type_str); + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + event->event.val[0], event->event.val[1], event->event.val[2], + event->event.val[3]); + + fault = &hwdev->hw_stats.fault_event_stats; + + switch (event->type) { + case FAULT_TYPE_CHIP: + memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[level], + FAULT_SHOW_STR_LEN); + else + strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + + if (level == FAULT_LEVEL_SERIOUS_FLR) { + sdk_err(hwdev->dev_hdl, "err_level: %d [%s], flr func_id: %d\n", + level, level_str, event->event.chip.func_id); + atomic_inc(&fault->fault_type_stat[event->type]); + } + sdk_err(hwdev->dev_hdl, "module_id: 0x%x, err_type: 0x%x, err_level: %d[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + event->event.chip.node_id, + event->event.chip.err_type, level, level_str, + event->event.chip.err_csr_addr, + event->event.chip.err_csr_value); + + node_id = event->event.chip.node_id; + atomic_inc(&fault->chip_fault_stats[node_id][level]); + + base = event->event.chip.node_id * FAULT_LEVEL_MAX * + HINIC_CHIP_ERROR_TYPE_MAX; + pos = base + HINIC_CHIP_ERROR_TYPE_MAX * level + + event->event.chip.err_type; + if (pos < HINIC_CHIP_FAULT_SIZE) + hwdev->chip_fault_stats[pos]++; + break; + case FAULT_TYPE_UCODE: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "cause_id: %d, core_id: %d, c_id: %d, epc: 0x%08x\n", + event->event.ucode.cause_id, event->event.ucode.core_id, + event->event.ucode.c_id, event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", + event->event.mem_timeout.err_csr_ctrl, + event->event.mem_timeout.err_csr_data, + event->event.mem_timeout.ctrl_tab, + event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "err_csr: 0x%08x\n", + event->event.reg_timeout.err_csr); + break; + case FAULT_TYPE_PHY_FAULT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + event->event.phy_fault.op_type, + event->event.phy_fault.port_id, + event->event.phy_fault.dev_ad, + event->event.phy_fault.csr_addr, + event->event.phy_fault.op_data); + break; + default: + break; + } +} + +static void hinic_refresh_history_fault(struct hinic_hwdev *hwdev, + struct hinic_fault_recover_info *info) +{ + if (!hwdev->history_fault_flag) { + hwdev->history_fault_flag = true; + memcpy(&hwdev->history_fault, info, + sizeof(struct hinic_fault_recover_info)); + } else { + if (hwdev->history_fault.fault_lev >= info->fault_lev) + memcpy(&hwdev->history_fault, info, + sizeof(struct hinic_fault_recover_info)); + } +} + +void hinic_migrate_report(void *dev) +{ + struct hinic_hwdev *hwdev = (struct hinic_hwdev *)dev; + struct hinic_event_info event_info = {0}; + + if (!dev) + return; + + event_info.type = HINIC_EVENT_INIT_MIGRATE_PF; + if (hwdev->event_callback) + hwdev->event_callback(hwdev->event_pri_handle, &event_info); +} +EXPORT_SYMBOL(hinic_migrate_report); + +static void fault_event_handler(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cmd_fault_event *fault_event; + struct hinic_event_info event_info; + struct hinic_fault_info_node *fault_node; + + if (in_size != sizeof(*fault_event)) { + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld.\n", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (hwdev->event_callback) { + event_info.type = HINIC_EVENT_FAULT; + memcpy(&event_info.info, &fault_event->event, + sizeof(event_info.info)); + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) + fault_node->info.fault_src = fault_event->event.type; + else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) + fault_node->info.fault_src = HINIC_FAULT_SRC_HW_PHY_FAULT; + + if (fault_node->info.fault_src == HINIC_FAULT_SRC_HW_MGMT_CHIP) + fault_node->info.fault_lev = + fault_event->event.event.chip.err_level; + else + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + + memcpy(&fault_node->info.fault_data.hw_mgmt, &fault_event->event.event, + sizeof(union hinic_fault_hw_mgmt)); + hinic_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); + + queue_work(hwdev->workq, &hwdev->fault_work); +} + +static void heartbeat_lost_event_handler(struct hinic_hwdev *hwdev) +{ + struct hinic_fault_info_node *fault_node; + struct hinic_event_info event_info = {0}; + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + hinic_global_func_id(hwdev)); + + if (hwdev->event_callback) { + event_info.type = HINIC_EVENT_HEART_LOST; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HINIC_FAULT_SRC_HOST_HEARTBEAT_LOST; + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + hinic_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); + + queue_work(hwdev->workq, &hwdev->fault_work); +} + +static void link_status_event_handler(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_port_link_status *link_status, *ret_link_status; + struct hinic_event_info event_info = {0}; + struct hinic_event_link_info *link_info = &event_info.link_info; + struct nic_port_info port_info = {0}; + int err; + + /* Ignore link change event */ + if (FUNC_FORCE_LINK_UP(hwdev)) + return; + + link_status = buf_in; + sdk_info(hwdev->dev_hdl, "Link status report received, func_id: %d, status: %d\n", + hinic_global_func_id(hwdev), link_status->link); + + if (link_status->link) + atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats); + + /* link event reported only after set vport enable */ + if (hinic_func_type(hwdev) != TYPE_VF && + link_status->link == HINIC_EVENT_LINK_UP) { + err = hinic_get_port_info(hwdev, &port_info); + if (err) { + nic_warn(hwdev->dev_hdl, "Failed to get port info\n"); + } else { + link_info->valid = 1; + link_info->port_type = port_info.port_type; + link_info->autoneg_cap = port_info.autoneg_cap; + link_info->autoneg_state = port_info.autoneg_state; + link_info->duplex = port_info.duplex; + link_info->speed = port_info.speed; + hinic_refresh_nic_cfg(hwdev, &port_info); + } + } + + if (!hwdev->event_callback) + return; + + event_info.type = link_status->link ? + HINIC_EVENT_LINK_UP : HINIC_EVENT_LINK_DOWN; + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + if (hinic_func_type(hwdev) != TYPE_VF) { + hinic_notify_all_vfs_link_changed(hwdev, link_status->link); + ret_link_status = buf_out; + ret_link_status->status = 0; + *out_size = sizeof(*ret_link_status); + } +} + +static void module_status_event(struct hinic_hwdev *hwdev, + enum hinic_event_cmd cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cable_plug_event *plug_event; + struct hinic_link_err_event *link_err; + struct hinic_event_info event_info = {0}; + struct hinic_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + event_info.type = HINIC_EVENT_PORT_MODULE_EVENT; + + if (cmd == HINIC_EVENT_CABLE_PLUG) { + plug_event = buf_in; + + if (plug_event->port_id < HINIC_MAX_PORT_ID) { + rt_cmd = &chip_node->rt_cmd[plug_event->port_id]; + mutex_lock(&chip_node->sfp_mutex); + rt_cmd->up_send_sfp_abs = false; + rt_cmd->up_send_sfp_info = false; + mutex_unlock(&chip_node->sfp_mutex); + } + + event_info.module_event.type = plug_event->plugged ? + HINIC_PORT_MODULE_CABLE_PLUGGED : + HINIC_PORT_MODULE_CABLE_UNPLUGGED; + + *out_size = sizeof(*plug_event); + plug_event = buf_out; + plug_event->status = 0; + } else if (cmd == HINIC_EVENT_LINK_ERR) { + link_err = buf_in; + + event_info.module_event.type = HINIC_PORT_MODULE_LINK_ERR; + event_info.module_event.err_type = link_err->err_type; + + *out_size = sizeof(*link_err); + link_err = buf_out; + link_err->status = 0; + } else { + sdk_warn(hwdev->dev_hdl, "Unknown module event: %d\n", cmd); + return; + } + + if (!hwdev->event_callback) + return; + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); +} + +void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state) +{ + struct hinic_event_info event_info = {0}; + + sdk_info(hwdev->dev_hdl, "DCB %s, default cos %d, up2cos %d%d%d%d%d%d%d%d\n", + dcb_state->dcb_on ? "on" : "off", dcb_state->default_cos, + dcb_state->up_cos[0], dcb_state->up_cos[1], + dcb_state->up_cos[2], dcb_state->up_cos[3], + dcb_state->up_cos[4], dcb_state->up_cos[5], + dcb_state->up_cos[6], dcb_state->up_cos[7]); + + /* Saved in sdk for statefull module */ + hinic_save_dcb_state(hwdev, dcb_state); + + if (!hwdev->event_callback) + return; + + event_info.type = HINIC_EVENT_DCB_STATE_CHANGE; + memcpy(&event_info.dcb_state, dcb_state, sizeof(event_info.dcb_state)); + hwdev->event_callback(hwdev->event_pri_handle, &event_info); +} + +static void sw_watchdog_timeout_info_show(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_mgmt_watchdog_info *watchdog_info; + u32 *dump_addr, *reg, stack_len, i, j; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld.\n", + in_size, sizeof(*watchdog_info)); + return; + } + + watchdog_info = buf_in; + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x\n", + watchdog_info->curr_time_h, watchdog_info->curr_time_l, + watchdog_info->task_id, watchdog_info->sp); + sdk_err(hwdev->dev_hdl, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x\n", + watchdog_info->curr_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, + watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x\n", + watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + + for (i = 0; i < 3; i++) { + reg = watchdog_info->reg + (u64)(u32)(4 * i); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *(reg), *(reg + 1), *(reg + 2), *(reg + 3)); + } + + sdk_err(hwdev->dev_hdl, "0x%08x\n", watchdog_info->reg[12]); + + if (watchdog_info->stack_actlen <= 1024) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = 1024; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16Bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / 16); i++) { + dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16))); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 1), *(dump_addr + 2), + *(dump_addr + 3)); + } + + for (j = 0; j < ((stack_len % 16) / 4); j++) { + dump_addr = (u32 *)(watchdog_info->data + + ((u64)(u32)(i * 16 + j * 4))); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->status = 0; +} + +static void mgmt_watchdog_timeout_event_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_fault_info_node *fault_node; + + sw_watchdog_timeout_info_show(hwdev, buf_in, in_size, + buf_out, out_size); + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HINIC_FAULT_SRC_MGMT_WATCHDOG; + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + hinic_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); + + queue_work(hwdev->workq, &hwdev->fault_work); +} + +static void port_sfp_info_event(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_sfp_qsfp_info *sfp_info = buf_in; + struct hinic_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_info)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp info cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_info)); + return; + } + + if (sfp_info->port_id >= HINIC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_info->port_id, HINIC_MAX_PORT_ID - 1); + return; + } + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->sfp_info, sfp_info, sizeof(rt_cmd->sfp_info)); + rt_cmd->up_send_sfp_info = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void port_sfp_abs_event(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_light_module_abs *sfp_abs = buf_in; + struct hinic_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_abs)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp absent cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_abs)); + return; + } + + if (sfp_abs->port_id >= HINIC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_abs->port_id, HINIC_MAX_PORT_ID - 1); + return; + } + + rt_cmd = &chip_node->rt_cmd[sfp_abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->abs, sfp_abs, sizeof(rt_cmd->abs)); + rt_cmd->up_send_sfp_abs = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void mgmt_reset_event_handler(struct hinic_hwdev *hwdev) +{ + sdk_info(hwdev->dev_hdl, "Mgmt is reset\n"); + + /* mgmt reset only occurred when hot update or Mgmt deadloop, + * if Mgmt deadloop, mgmt will report an event with + * mod=0, cmd=0x56, and will reported fault to os, + * so mgmt reset event don't need to report fault + */ +} + +static void hinic_fmw_act_ntc_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_event_info event_info = {0}; + struct hinic_fmw_act_ntc *notice_info; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n", + in_size, sizeof(*notice_info)); + return; + } + + /* mgmt is activated now, restart heartbeat enhanced detection */ + __set_heartbeat_ehd_detect_delay(hwdev, 0); + + if (!hwdev->event_callback) + return; + + event_info.type = HINIC_EVENT_FMW_ACT_NTC; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + *out_size = sizeof(*notice_info); + notice_info = buf_out; + notice_info->status = 0; +} + +static void hinic_pcie_dfx_event_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_pcie_dfx_ntc *notice_info = buf_in; + struct hinic_pcie_dfx_info *dfx_info; + u16 size = 0; + u16 cnt = 0; + u32 num = 0; + u32 i, j; + int err; + u32 *reg; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n", + in_size, sizeof(*notice_info)); + return; + } + + dfx_info = kzalloc(sizeof(*dfx_info), GFP_KERNEL); + if (!dfx_info) { + sdk_err(hwdev->dev_hdl, "Malloc dfx_info memory failed\n"); + return; + } + + ((struct hinic_pcie_dfx_ntc *)buf_out)->status = 0; + *out_size = sizeof(*notice_info); + num = (u32)(notice_info->len / 1024); + sdk_info(hwdev->dev_hdl, "INFO LEN: %d\n", notice_info->len); + sdk_info(hwdev->dev_hdl, "PCIE DFX:\n"); + dfx_info->host_id = 0; + for (i = 0; i < num; i++) { + dfx_info->offset = i * MAX_PCIE_DFX_BUF_SIZE; + if (i == (num - 1)) + dfx_info->last = 1; + size = sizeof(*dfx_info); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PCIE_DFX_GET, + dfx_info, sizeof(*dfx_info), + dfx_info, &size, 0); + if (err || dfx_info->status || !size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x\n", + err, dfx_info->status, size); + kfree(dfx_info); + return; + } + + reg = (u32 *)dfx_info->data; + for (j = 0; j < 256; j = j + 8) { + sdk_info(hwdev->dev_hdl, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + cnt, reg[j], reg[(u32)(j + 1)], + reg[(u32)(j + 2)], reg[(u32)(j + 3)], + reg[(u32)(j + 4)], reg[(u32)(j + 5)], + reg[(u32)(j + 6)], reg[(u32)(j + 7)]); + cnt = cnt + 32; + } + memset(dfx_info->data, 0, MAX_PCIE_DFX_BUF_SIZE); + } + kfree(dfx_info); +} + +struct hinic_mctp_get_host_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 huawei_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 actual_len; + + u8 data[1024]; +}; + +static void hinic_mctp_get_host_info_event_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_event_info event_info = {0}; + struct hinic_mctp_get_host_info *mctp_out, *mctp_in; + struct hinic_mctp_host_info *host_info; + + if (in_size != sizeof(*mctp_in)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt mctp info, length: %d, should be %ld\n", + in_size, sizeof(*mctp_in)); + return; + } + + *out_size = sizeof(*mctp_out); + mctp_out = buf_out; + mctp_out->status = 0; + + if (!hwdev->event_callback) { + mctp_out->status = HINIC_MGMT_STATUS_ERR_INIT; + return; + } + + mctp_in = buf_in; + host_info = &event_info.mctp_info; + host_info->major_cmd = mctp_in->huawei_cmd; + host_info->sub_cmd = mctp_in->sub_cmd; + host_info->data = mctp_out->data; + + event_info.type = HINIC_EVENT_MCTP_GET_HOST_INFO; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + mctp_out->actual_len = host_info->data_len; +} + +char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"RS-FEC", "BASE-FEC", "NO-FEC"}; + +char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = { + "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC", + "Back plane", "BaseT" +}; + +static void __print_cable_info(struct hinic_hwdev *hwdev, + struct hinic_link_info *info) +{ + char tmp_str[CAP_INFO_MAC_LEN] = {0}; + char tmp_vendor[VENDOR_MAX_LEN] = {0}; + char *port_type = "Unknown port type"; + int i; + int err = 0; + + if (info->cable_absent) { + sdk_info(hwdev->dev_hdl, "Cable unpresent\n"); + return; + } + + if (info->port_type < LINK_PORT_MAX_TYPE) + port_type = __hw_to_char_port_type[info->port_type]; + else + sdk_info(hwdev->dev_hdl, "Unknown port type: %u\n", + info->port_type); + if (info->port_type == LINK_PORT_FIBRE) { + if (info->port_sub_type == FIBRE_SUBTYPE_SR) + port_type = "Fibre-SR"; + else if (info->port_sub_type == FIBRE_SUBTYPE_LR) + port_type = "Fibre-LR"; + } + + for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) { + if (info->vendor_name[i] == ' ') + info->vendor_name[i] = '\0'; + else + break; + } + + memcpy(tmp_vendor, info->vendor_name, + sizeof(info->vendor_name)); + err = snprintf(tmp_str, sizeof(tmp_str), + "Vendor: %s, %s, length: %um, max_speed: %uGbps", + tmp_vendor, port_type, info->cable_length, + info->cable_max_speed); + if (err <= 0 || err >= CAP_INFO_MAC_LEN) { + sdk_err(hwdev->dev_hdl, + "Failed snprintf cable vendor info, function return(%d) and dest_len(%d)\n", + err, CAP_INFO_MAC_LEN); + return; + } + + if (info->port_type == LINK_PORT_FIBRE || + info->port_type == LINK_PORT_AOC) { + err = snprintf(tmp_str, sizeof(tmp_str), + "%s, %s, Temperature: %u", tmp_str, + info->sfp_type ? "SFP" : "QSFP", + info->cable_temp); + if (err <= 0 || err >= CAP_INFO_MAC_LEN) { + sdk_err(hwdev->dev_hdl, + "Failed snprintf cable Temp, function return(%d) and dest_len(%d)\n", + err, CAP_INFO_MAC_LEN); + return; + } + + if (info->sfp_type) { + err = snprintf(tmp_str, sizeof(tmp_str), + "%s, rx power: %uuW, tx power: %uuW", + tmp_str, info->power[0], info->power[1]); + } else { + err = snprintf(tmp_str, sizeof(tmp_str), + "%s, rx power: %uuw %uuW %uuW %uuW", + tmp_str, info->power[0], info->power[1], + info->power[2], info->power[3]); + } + if (err <= 0 || err >= CAP_INFO_MAC_LEN) { + sdk_err(hwdev->dev_hdl, + "Failed snprintf power info, function return(%d) and dest_len(%d)\n", + err, CAP_INFO_MAC_LEN); + return; + } + } + + sdk_info(hwdev->dev_hdl, "Cable information: %s\n", + tmp_str); +} + +static void __hi30_lane_info(struct hinic_hwdev *hwdev, + struct hilink_lane *lane) +{ + struct hi30_ffe_data *ffe_data; + struct hi30_ctle_data *ctle_data; + + ffe_data = (struct hi30_ffe_data *)lane->hi30_ffe; + ctle_data = (struct hi30_ctle_data *)lane->hi30_ctle; + + sdk_info(hwdev->dev_hdl, "TX_FFE: PRE1=%s%d; PRE2=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d\n", + (ffe_data->PRE1 & 0x10) ? "-" : "", + (int)(ffe_data->PRE1 & 0xf), + (ffe_data->PRE2 & 0x10) ? "-" : "", + (int)(ffe_data->PRE2 & 0xf), + (int)ffe_data->MAIN, + (ffe_data->POST1 & 0x10) ? "-" : "", + (int)(ffe_data->POST1 & 0xf), + (ffe_data->POST2 & 0x10) ? "-" : "", + (int)(ffe_data->POST2 & 0xf)); + sdk_info(hwdev->dev_hdl, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u\n", + ctle_data->ctlebst[0], ctle_data->ctlebst[1], + ctle_data->ctlebst[2], ctle_data->ctlecmband[0], + ctle_data->ctlecmband[1], ctle_data->ctlecmband[2], + ctle_data->ctlermband[0], ctle_data->ctlermband[1], + ctle_data->ctlermband[2], ctle_data->ctleza[0], + ctle_data->ctleza[1], ctle_data->ctleza[2]); +} + +static void __print_hi30_status(struct hinic_hwdev *hwdev, + struct hinic_link_info *info) +{ + struct hilink_lane *lane; + int lane_used_num = 0, i; + + for (i = 0; i < HILINK_MAX_LANE; i++) { + lane = (struct hilink_lane *)(info->lane2 + i * sizeof(*lane)); + if (!lane->lane_used) + continue; + + __hi30_lane_info(hwdev, lane); + lane_used_num++; + } + + /* in new firmware, all lane info setted in lane2 */ + if (lane_used_num) + return; + + /* compatible old firmware */ + __hi30_lane_info(hwdev, (struct hilink_lane *)info->lane1); +} + +static void __print_link_info(struct hinic_hwdev *hwdev, + struct hinic_link_info *info, + enum hilink_info_print_event type) +{ + char *fec = "None"; + + if (info->fec < HILINK_FEC_MAX_TYPE) + fec = __hw_to_char_fec[info->fec]; + else + sdk_info(hwdev->dev_hdl, "Unknown fec type: %u\n", + info->fec); + + if (type == HILINK_EVENT_LINK_UP || !info->an_state) { + sdk_info(hwdev->dev_hdl, "Link information: speed %dGbps, %s, autoneg %s\n", + info->speed, fec, info->an_state ? "on" : "off"); + } else { + sdk_info(hwdev->dev_hdl, "Link information: antoneg: %s\n", + info->an_state ? "on" : "off"); + } +} + +static char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = { + "", "link up", "link down", "cable plugged" +}; + +void print_hilink_info(struct hinic_hwdev *hwdev, + enum hilink_info_print_event type, + struct hinic_link_info *info) +{ + __print_cable_info(hwdev, info); + + __print_link_info(hwdev, info, type); + + __print_hi30_status(hwdev, info); + + if (type == HILINK_EVENT_LINK_UP) + return; + + if (type == HILINK_EVENT_CABLE_PLUGGED) { + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u\n", + info->alos, info->rx_los); + return; + } + + sdk_info(hwdev->dev_hdl, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug info reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n", + info->pma_status == 1 ? "off" : "on", + info->mac_tx_en ? "enable" : "disable", + info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg, + info->pma_signal_ok_reg, info->rf_lf_status_reg); + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n", + info->alos, info->rx_los, info->pcs_err_blk_cnt_reg, + info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt); +} + +static int hinic_print_hilink_info(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hilink_link_info *hilink_info = buf_in; + struct hinic_link_info *info; + enum hilink_info_print_event type; + + if (in_size != sizeof(*hilink_info)) { + sdk_err(hwdev->dev_hdl, "Invalid hilink info message size %d, should be %ld\n", + in_size, sizeof(*hilink_info)); + return -EINVAL; + } + + ((struct hinic_hilink_link_info *)buf_out)->status = 0; + *out_size = sizeof(*hilink_info); + + info = &hilink_info->info; + type = hilink_info->info_type; + + if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) { + sdk_info(hwdev->dev_hdl, "Invalid hilink info report, type: %d\n", + type); + return -EINVAL; + } + + sdk_info(hwdev->dev_hdl, "Hilink info report after %s\n", + hilink_info_report_type[type]); + + print_hilink_info(hwdev, type, info); + + return 0; +} + +int hinic_hilink_info_show(struct hinic_hwdev *hwdev) +{ + struct hinic_link_info hilink_info = { {0} }; + int err; + + err = hinic_get_hilink_link_info(hwdev, &hilink_info); + if (err) { + if (err == HINIC_MGMT_CMD_UNSUPPORTED) + sdk_info(hwdev->dev_hdl, "Unsupport to get hilink info\n"); + return err; + } + + if (hilink_info.cable_absent) { + sdk_info(hwdev->dev_hdl, "Cable unpresent\n"); + return 0; + } + + sdk_info(hwdev->dev_hdl, "Current state of hilink info:\n"); + print_hilink_info(hwdev, HILINK_EVENT_MAX_TYPE, &hilink_info); + + return 0; +} + +static void mgmt_heartbeat_enhanced_event(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_heartbeat_event *hb_event = buf_in; + struct hinic_heartbeat_event *hb_event_out = buf_out; + struct hinic_hwdev *dev = hwdev; + + if (in_size != sizeof(*hb_event)) { + sdk_err(dev->dev_hdl, "Invalid data size from mgmt for heartbeat event: %d\n", + in_size); + return; + } + + if (dev->heartbeat_ehd.last_heartbeat != hb_event->heart) { + dev->heartbeat_ehd.last_update_jiffies = jiffies; + dev->heartbeat_ehd.last_heartbeat = hb_event->heart; + } + + hb_event_out->drv_heart = HEARTBEAT_DRV_MAGIC_ACK; + + hb_event_out->status = 0; + *out_size = sizeof(*hb_event_out); +} + +/* public process for this event: + * pf link change event + * pf heart lost event ,TBD + * pf fault report event + * vf link change event + * vf heart lost event, TBD + * vf fault report event, TBD + */ +static void _event_handler(struct hinic_hwdev *hwdev, enum hinic_event_cmd cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_vf_dcb_state *vf_dcb; + + if (!hwdev) + return; + + *out_size = 0; + + switch (cmd) { + case HINIC_EVENT_LINK_STATUS_CHANGE: + link_status_event_handler(hwdev, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_CABLE_PLUG: + case HINIC_EVENT_LINK_ERR: + module_status_event(hwdev, cmd, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_HILINK_INFO: + hinic_print_hilink_info(hwdev, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_MGMT_FAULT: + fault_event_handler(hwdev, buf_in, in_size, buf_out, out_size); + break; + + case HINIC_EVENT_HEARTBEAT_LOST: + heartbeat_lost_event_handler(hwdev); + break; + + case HINIC_EVENT_SET_VF_COS: + vf_dcb = buf_in; + if (!vf_dcb) + break; + + hinic_notify_dcb_state_event(hwdev, &vf_dcb->state); + + break; + + case HINIC_EVENT_MGMT_WATCHDOG: + mgmt_watchdog_timeout_event_handler(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_EVENT_MGMT_RESET: + mgmt_reset_event_handler(hwdev); + break; + + case HINIC_EVENT_MGMT_FMW_ACT_NTC: + hinic_fmw_act_ntc_handler(hwdev, buf_in, in_size, buf_out, + out_size); + + break; + + case HINIC_EVENT_MGMT_PCIE_DFX: + hinic_pcie_dfx_event_handler(hwdev, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_MCTP_HOST_INFO: + hinic_mctp_get_host_info_event_handler(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_EVENT_MGMT_HEARTBEAT_EHD: + mgmt_heartbeat_enhanced_event(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_EVENT_SFP_INFO_REPORT: + port_sfp_info_event(hwdev, buf_in, in_size, buf_out, out_size); + break; + + case HINIC_EVENT_SFP_ABS_REPORT: + port_sfp_abs_event(hwdev, buf_in, in_size, buf_out, out_size); + break; + + default: + sdk_warn(hwdev->dev_hdl, "Unsupported event %d to process\n", + cmd); + break; + } +} + +/* vf link change event + * vf fault report event, TBD + */ +static int vf_nic_event_handler(void *hwdev, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport L2NIC event: cmd %d\n", cmd); + *out_size = 0; + return -EINVAL; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); + + return 0; +} + +static int vf_comm_event_handler(void *hwdev, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_COMM, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport COMM event: cmd %d\n", cmd); + *out_size = 0; + return -EFAULT; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); + + return 0; +} + +/* pf link change event */ +static void pf_nic_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport L2NIC event: cmd %d\n", cmd); + *out_size = 0; + return; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); +} + +static void pf_hilink_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_HILINK, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport HILINK event: cmd %d\n", cmd); + *out_size = 0; + return; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); +} + +/* pf fault report event */ +void pf_fault_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_FAULT, buf_in, + in_size, buf_out, out_size); +} + +void mgmt_watchdog_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_WATCHDOG, buf_in, + in_size, buf_out, out_size); +} + +void mgmt_fmw_act_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_FMW_ACT_NTC, buf_in, + in_size, buf_out, out_size); +} + +void mgmt_pcie_dfx_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_PCIE_DFX, buf_in, + in_size, buf_out, out_size); +} + +void mgmt_get_mctp_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MCTP_HOST_INFO, buf_in, + in_size, buf_out, out_size); +} + +void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_HEARTBEAT_EHD, buf_in, + in_size, buf_out, out_size); +} + +static void pf_event_register(struct hinic_hwdev *hwdev) +{ + if (hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) { + hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC, + hwdev, pf_nic_event_handler); + hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK, + hwdev, + pf_hilink_event_handler); + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_FAULT_REPORT, + pf_fault_event_handler); + + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_WATCHDOG_INFO, + mgmt_watchdog_event_handler); + + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_FMW_ACT_NTC, + mgmt_fmw_act_event_handler); + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_PCIE_DFX_NTC, + mgmt_pcie_dfx_event_handler); + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_GET_HOST_INFO, + mgmt_get_mctp_event_handler); + } +} + +void hinic_event_register(void *dev, void *pri_handle, + hinic_event_handler callback) +{ + struct hinic_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = callback; + hwdev->event_pri_handle = pri_handle; + + if (hinic_func_type(hwdev) != TYPE_VF) { + pf_event_register(hwdev); + } else { + hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC, + vf_nic_event_handler); + hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_COMM, + vf_comm_event_handler); + } +} + +void hinic_event_unregister(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + + hwdev->event_callback = NULL; + hwdev->event_pri_handle = NULL; + + if (hinic_func_type(hwdev) != TYPE_VF) { + hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC); + hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_FAULT_REPORT); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_WATCHDOG_INFO); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_FMW_ACT_NTC); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_PCIE_DFX_NTC); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_GET_HOST_INFO); + } else { + hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_COMM); + } +} + +/* 0 - heartbeat lost, 1 - normal */ +static u8 hinic_get_heartbeat_status(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 attr1; + + /* suprise remove should be set 1 */ + if (!hinic_get_chip_present_flag(hwdev)) + return 1; + + attr1 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HINIC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + hinic_set_chip_absent(hwdev); + hinic_force_complete_all(hwdev); + /* should notify chiperr to pangea when detecting pcie link down */ + return 1; + } + + return HINIC_AF1_GET(attr1, MGMT_INIT_STATUS); +} + +static void hinic_heartbeat_event_handler(struct work_struct *work) +{ + struct hinic_hwdev *hwdev = + container_of(work, struct hinic_hwdev, timer_work); + u16 out = 0; + + _event_handler(hwdev, HINIC_EVENT_HEARTBEAT_LOST, + NULL, 0, &out, &out); +} + +static bool __detect_heartbeat_ehd_lost(struct hinic_hwdev *hwdev) +{ + struct hinic_heartbeat_enhanced *hb_ehd = &hwdev->heartbeat_ehd; + u64 update_time; + bool hb_ehd_lost = false; + + if (!hb_ehd->en) + return false; + + if (time_after(jiffies, hb_ehd->start_detect_jiffies)) { + update_time = jiffies_to_msecs(jiffies - + hb_ehd->last_update_jiffies); + if (update_time > HINIC_HEARBEAT_ENHANCED_LOST) { + sdk_warn(hwdev->dev_hdl, "Heartbeat enhanced lost for %d millisecond\n", + (u32)update_time); + hb_ehd_lost = true; + } + } else { + /* mgmt may not report heartbeart enhanced event and won't + * update last_update_jiffies + */ + hb_ehd->last_update_jiffies = jiffies; + } + + return hb_ehd_lost; +} + +#ifdef HAVE_TIMER_SETUP +static void hinic_heartbeat_timer_handler(struct timer_list *t) +#else +static void hinic_heartbeat_timer_handler(unsigned long data) +#endif +{ +#ifdef HAVE_TIMER_SETUP + struct hinic_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); +#else + struct hinic_hwdev *hwdev = (struct hinic_hwdev *)data; +#endif + + if (__detect_heartbeat_ehd_lost(hwdev) || + !hinic_get_heartbeat_status(hwdev)) { + hwdev->heartbeat_lost = 1; + stop_timer(&hwdev->heartbeat_timer); + queue_work(hwdev->workq, &hwdev->timer_work); + } else { + mod_timer(&hwdev->heartbeat_timer, + jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_PERIOD)); + } +} + +void hinic_init_heartbeat(struct hinic_hwdev *hwdev) +{ +#ifdef HAVE_TIMER_SETUP + timer_setup(&hwdev->heartbeat_timer, hinic_heartbeat_timer_handler, 0); +#else + initialize_timer(hwdev->adapter_hdl, &hwdev->heartbeat_timer); + hwdev->heartbeat_timer.data = (unsigned long)hwdev; + hwdev->heartbeat_timer.function = hinic_heartbeat_timer_handler; +#endif + hwdev->heartbeat_timer.expires = + jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_START_EXPIRE); + + add_to_timer(&hwdev->heartbeat_timer, HINIC_HEARTBEAT_PERIOD); + + INIT_WORK(&hwdev->timer_work, hinic_heartbeat_event_handler); +} + +void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev) +{ + destroy_work(&hwdev->timer_work); + stop_timer(&hwdev->heartbeat_timer); + delete_timer(&hwdev->heartbeat_timer); +} + +u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data) +{ + struct hinic_hwdev *hwdev = (struct hinic_hwdev *)handle; + u8 event_level = FAULT_LEVEL_MAX; + + switch (event) { + case HINIC_INTERNAL_TSO_FATAL_ERROR: + case HINIC_INTERNAL_LRO_FATAL_ERROR: + case HINIC_INTERNAL_TX_FATAL_ERROR: + case HINIC_INTERNAL_RX_FATAL_ERROR: + case HINIC_INTERNAL_OTHER_FATAL_ERROR: + atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[event]); + sdk_err(hwdev->dev_hdl, "SW aeqe event type: 0x%x, data: 0x%llx\n", + event, data); + event_level = FAULT_LEVEL_FATAL; + break; + default: + sdk_err(hwdev->dev_hdl, "Unsupported sw event %d to process.\n", + event); + } + + return event_level; +} + +struct hinic_fast_recycled_mode { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 fast_recycled_mode; /* 1: enable fast recycle, available + * in dpdk mode, + * 0: normal mode, available in kernel + * nic mode + */ + u8 rsvd1; +}; + +int hinic_enable_fast_recycle(void *hwdev, bool enable) +{ + struct hinic_fast_recycled_mode fast_recycled_mode = {0}; + u16 out_size = sizeof(fast_recycled_mode); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &fast_recycled_mode.func_id); + if (err) + return err; + + fast_recycled_mode.fast_recycled_mode = enable ? 1 : 0; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET, + &fast_recycled_mode, + sizeof(fast_recycled_mode), + &fast_recycled_mode, &out_size, 0); + if (err || fast_recycled_mode.status || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set recycle mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, fast_recycled_mode.status, out_size); + return -EFAULT; + } + + return 0; +} + +void hinic_set_pcie_order_cfg(void *handle) +{ + struct hinic_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return; + + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_GLB_DMA_SO_RO_REPLACE_ADDR); + + if (HINIC_GLB_DMA_SO_RO_GET(val, SO_RO_CFG)) { + val = HINIC_GLB_DMA_SO_R0_CLEAR(val, SO_RO_CFG); + val |= HINIC_GLB_DMA_SO_R0_SET(HINIC_DISABLE_ORDER, SO_RO_CFG); + hinic_hwif_write_reg(hwdev->hwif, + HINIC_GLB_DMA_SO_RO_REPLACE_ADDR, val); + } +} + +int _set_led_status(struct hinic_hwdev *hwdev, u8 port, + enum hinic_led_type type, + enum hinic_led_mode mode, u8 reset) +{ + struct hinic_led_info led_info = {0}; + u16 out_size = sizeof(led_info); + int err; + + led_info.port = port; + led_info.reset = reset; + + led_info.type = type; + led_info.mode = mode; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SET_LED_STATUS, + &led_info, sizeof(led_info), + &led_info, &out_size, 0); + if (err || led_info.status || !out_size) { + sdk_err(hwdev->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", + err, led_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type, + enum hinic_led_mode mode) +{ + int err; + + if (!hwdev) + return -EFAULT; + + err = _set_led_status(hwdev, port, type, mode, 0); + if (err) + return err; + + return 0; +} + +int hinic_reset_led_status(void *hwdev, u8 port) +{ + int err; + + if (!hwdev) + return -EFAULT; + + err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID, + HINIC_LED_MODE_INVALID, 1); + if (err) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to reset led status\n"); + return err; + } + + return 0; +} + +int hinic_get_board_info(void *hwdev, struct hinic_board_info *info) +{ + struct hinic_comm_board_info board_info = {0}; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, 0); + if (err || board_info.status || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n", + err, board_info.status, out_size); + return -EFAULT; + } + + memcpy(info, &board_info.info, sizeof(*info)); + + return 0; +} +EXPORT_SYMBOL(hinic_get_board_info); + +int hinic_get_phy_init_status(void *hwdev, + enum phy_init_status_type *init_status) +{ + struct hinic_phy_init_status phy_info = {0}; + u16 out_size = sizeof(phy_info); + int err; + + if (!hwdev || !init_status) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_PHY_INIT_STATUS, + &phy_info, sizeof(phy_info), + &phy_info, &out_size, 0); + if ((phy_info.status != HINIC_MGMT_CMD_UNSUPPORTED && + phy_info.status) || err || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get phy info, err: %d, status: 0x%x, out size: 0x%x\n", + err, phy_info.status, out_size); + return -EFAULT; + } + + *init_status = phy_info.init_status; + + return phy_info.status; +} + +int hinic_phy_init_status_judge(void *hwdev) +{ + enum phy_init_status_type init_status; + int ret; + unsigned long end; + + /* It's not a phy, so don't judge phy status */ + if (!HINIC_BOARD_IS_PHY((struct hinic_hwdev *)hwdev)) + return 0; + + end = jiffies + msecs_to_jiffies(PHY_DOING_INIT_TIMEOUT); + do { + ret = hinic_get_phy_init_status(hwdev, &init_status); + if (ret == HINIC_MGMT_CMD_UNSUPPORTED) + return 0; + else if (ret) + return -EFAULT; + + switch (init_status) { + case PHY_INIT_SUCCESS: + sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is success\n"); + return 0; + case PHY_NONSUPPORT: + sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is nonsupport\n"); + return 0; + case PHY_INIT_FAIL: + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is failed\n"); + return -EIO; + case PHY_INIT_DOING: + msleep(250); + break; + default: + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is invalid, init_status: %d\n", + init_status); + return -EINVAL; + } + } while (time_before(jiffies, end)); + + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is timeout\n"); + + return -ETIMEDOUT; +} + +static void hinic_set_mgmt_channel_status(void *handle, bool state) +{ + struct hinic_hwdev *hwdev = handle; + u32 val; + + if (!hwdev || hinic_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG)) + return; + + val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR); + val = HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); + val |= HINIC_SET_MGMT_CHANNEL_STATUS((u32)state, MGMT_CHANNEL_STATUS); + + hinic_hwif_write_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR, val); +} + +int hinic_get_mgmt_channel_status(void *handle) +{ + struct hinic_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return true; + + if (hinic_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG)) + return false; + + val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR); + + return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); +} + +static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_update_active *active_info = buf_out; + + if (!active_info || hinic_func_type(hwdev) == TYPE_VF || + !(dev->feature_cap & HINIC_FUNC_SUPP_DFX_REG)) + return; + + if ((!active_info->status) && + (active_info->update_status & HINIC_ACTIVE_STATUS_MASK)) { + active_info->update_status &= HINIC_ACTIVE_STATUS_CLEAR; + return; + } + + hinic_set_mgmt_channel_status(hwdev, false); +} + +int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_bios_cfg_cmd cfg = {0}; + u16 out_size = sizeof(cfg); + u16 func_id; + int err; + + if (!hwdev || !pf_bw_limit) + return -EINVAL; + + if (HINIC_FUNC_TYPE(dev) == TYPE_VF || + !FUNC_SUPPORT_RATE_LIMIT(hwdev)) + return 0; + + err = hinic_global_func_id_get(hwdev, &func_id); + if (err) + return err; + + cfg.func_valid = 1; + cfg.func_idx = (u8)func_id; + + cfg.op_code = HINIC_BIOS_CFG_GET | HINIC_BIOS_CFG_PF_BW_LIMIT; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT, + &cfg, sizeof(cfg), + &cfg, &out_size, 0); + if (err || cfg.status || !out_size) { + sdk_err(dev->dev_hdl, "Failed to get bios pf bandwidth limit, err: %d, status: 0x%x, out size: 0x%x\n", + err, cfg.status, out_size); + return -EIO; + } + + /* Check data is valid or not */ + if (cfg.signature != 0x19e51822) { + sdk_err(dev->dev_hdl, "Invalid bios configureration data, signature: 0x%x\n", + cfg.signature); + return -EINVAL; + } + + if (cfg.pf_bw_limit > 100) { + sdk_err(dev->dev_hdl, "Invalid bios cfg pf bandwidth limit: %d\n", + cfg.pf_bw_limit); + return -EINVAL; + } + + *pf_bw_limit = cfg.pf_bw_limit; + + return 0; +} + +bool hinic_get_ppf_status(void *hwdev) +{ + struct hinic_ppf_state ppf_state = {0}; + struct hinic_hwdev *dev = hwdev; + struct card_node *chip_node; + u16 out_size = sizeof(ppf_state); + int err; + + if (!hwdev) + return false; + + chip_node = (struct card_node *)dev->chip_node; + + if (!HINIC_IS_VF(dev)) + return chip_node->ppf_state; + + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_PPF_STATE, + &ppf_state, sizeof(ppf_state), + &ppf_state, &out_size, 0); + if (err || ppf_state.status || !out_size) { + sdk_err(dev->dev_hdl, "Failed to get ppf state, err: %d, status: 0x%x, out size: 0x%x\n", + err, ppf_state.status, out_size); + return false; + } + + return (bool)ppf_state.ppf_state; +} + +#define HINIC_RED_REG_TIME_OUT 3000 + +int hinic_read_reg(void *hwdev, u32 reg_addr, u32 *val) +{ + struct hinic_reg_info reg_info = {0}; + u16 out_size = sizeof(reg_info); + int err; + + if (!hwdev || !val) + return -EINVAL; + + reg_info.reg_addr = reg_addr; + reg_info.val_length = sizeof(u32); + + err = hinic_pf_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_REG_READ, + ®_info, sizeof(reg_info), + ®_info, &out_size, + HINIC_RED_REG_TIME_OUT); + if (reg_info.status || err || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to read reg, err: %d, status: 0x%x, out size: 0x%x\n", + err, reg_info.status, out_size); + return -EFAULT; + } + + *val = reg_info.data[0]; + + return 0; +} + + + +static void hinic_exec_recover_cb(struct hinic_hwdev *hwdev, + struct hinic_fault_recover_info *info) +{ + + sdk_info(hwdev->dev_hdl, "Enter %s\n", __func__); + + if (!hinic_get_chip_present_flag(hwdev)) { + sdk_err(hwdev->dev_hdl, "Device surprised removed, abort recover\n"); + return; + } + + if (info->fault_lev >= FAULT_LEVEL_MAX) { + sdk_err(hwdev->dev_hdl, "Invalid fault level\n"); + return; + } + + + down(&hwdev->recover_sem); + if (hwdev->recover_cb) { + if (info->fault_lev <= FAULT_LEVEL_SERIOUS_FLR) + hinic_set_fast_recycle_status(hwdev); + + hwdev->recover_cb(hwdev->recover_pri_hd, *info); + } + up(&hwdev->recover_sem); +} + +void hinic_fault_work_handler(struct work_struct *work) +{ + struct hinic_hwdev *hwdev = + container_of(work, struct hinic_hwdev, fault_work); + + down(&hwdev->fault_list_sem); + up(&hwdev->fault_list_sem); +} + +void hinic_swe_fault_handler(struct hinic_hwdev *hwdev, u8 level, + u8 event, u64 val) +{ + struct hinic_fault_info_node *fault_node; + + if (level < FAULT_LEVEL_MAX) { + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HINIC_FAULT_SRC_SW_MGMT_UCODE; + fault_node->info.fault_lev = level; + fault_node->info.fault_data.sw_mgmt.event_id = event; + fault_node->info.fault_data.sw_mgmt.event_data = val; + hinic_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); + + queue_work(hwdev->workq, &hwdev->fault_work); + } +} + +int hinic_register_fault_recover(void *hwdev, void *pri_handle, + hinic_fault_recover_handler cb) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev || !pri_handle || !cb) { + pr_err("Invalid input parameters when register fault recover handler\n"); + return -EINVAL; + } + + down(&dev->recover_sem); + dev->recover_pri_hd = pri_handle; + dev->recover_cb = cb; + up(&dev->recover_sem); + + if (dev->history_fault_flag) + hinic_exec_recover_cb(dev, &dev->history_fault); + + return 0; +} +EXPORT_SYMBOL(hinic_register_fault_recover); + +int hinic_unregister_fault_recover(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Invalid input parameters when unregister fault recover handler\n"); + return -EINVAL; + } + + down(&dev->recover_sem); + dev->recover_pri_hd = NULL; + dev->recover_cb = NULL; + up(&dev->recover_sem); + + return 0; +} +EXPORT_SYMBOL(hinic_unregister_fault_recover); + +void hinic_set_func_deinit_flag(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + set_bit(HINIC_HWDEV_FUNC_DEINIT, &dev->func_state); +} + +int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos) +{ + struct hinic_hw_pf_infos_cmd pf_infos = {0}; + u16 out_size = sizeof(pf_infos); + int err; + + if (!hwdev || !infos) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_HW_PF_INFOS, + &pf_infos, sizeof(pf_infos), + &pf_infos, &out_size, 0); + if ((pf_infos.status != HINIC_MGMT_CMD_UNSUPPORTED && + pf_infos.status) || err || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get hw pf informations, err: %d, status: 0x%x, out size: 0x%x\n", + err, pf_infos.status, out_size); + return -EFAULT; + } + + if (!pf_infos.status) + memcpy(infos, &pf_infos.infos, sizeof(*infos)); + + return pf_infos.status; +} +EXPORT_SYMBOL(hinic_get_hw_pf_infos); + +int hinic_set_ip_check(void *hwdev, bool ip_check_ctl) +{ + u32 val = 0; + int ret; + int i; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + for (i = 0; i <= HINIC_IPSU_CHANNEL_NUM; i++) { + ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU, + (HINIC_IPSU_CHANNEL0_ADDR + + i * HINIC_IPSU_CHANNEL_OFFSET), &val); + if (ret) + return ret; + + val = be32_to_cpu(val); + if (ip_check_ctl) + val |= HINIC_IPSU_DIP_SIP_MASK; + else + val &= (~HINIC_IPSU_DIP_SIP_MASK); + + val = cpu_to_be32(val); + ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU, + (HINIC_IPSU_CHANNEL0_ADDR + + i * HINIC_IPSU_CHANNEL_OFFSET), val); + if (ret) + return ret; + } + return 0; +} + +int hinic_get_card_present_state(void *hwdev, bool *card_present_state) +{ + u32 addr, attr1; + + if (!hwdev || !card_present_state) + return -EINVAL; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr); + if (attr1 == HINIC_PCIE_LINK_DOWN) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "Card is not present\n"); + *card_present_state = (bool)0; + } else { + *card_present_state = (bool)1; + } + + return 0; +} +EXPORT_SYMBOL(hinic_get_card_present_state); + +void hinic_disable_mgmt_msg_report(void *hwdev) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + + hinic_set_pf_status(hw_dev->hwif, HINIC_PF_STATUS_INIT); +} + +int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port) +{ + u32 val = 0; + int ret; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU, + HINIC_IPSURX_VXLAN_DPORT_ADDR, &val); + if (ret) + return ret; + + nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Update VxLAN UDP dest port: cur port:%u, new port:%u", + be32_to_cpu(val), udp_port); + + if (be32_to_cpu(val) == udp_port) + return 0; + + udp_port = cpu_to_be32(udp_port); + ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU, + HINIC_IPSURX_VXLAN_DPORT_ADDR, udp_port); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h new file mode 100644 index 000000000000..a8d3434a5d1d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h @@ -0,0 +1,412 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HWDEV_H_ +#define HINIC_HWDEV_H_ + +#include "hinic_port_cmd.h" + +/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */ +#define HINIC_DEFAULT_WQ_PAGE_SIZE 0x40000 +#define HINIC_HW_WQ_PAGE_SIZE 0x1000 + +#define HINIC_MSG_TO_MGMT_MAX_LEN 2016 + +#define HINIC_MGMT_STATUS_ERR_OK 0 /* Ok */ +#define HINIC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */ +#define HINIC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */ +#define HINIC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */ +#define HINIC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */ +#define HINIC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */ +#define HINIC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */ +#define HINIC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */ +#define HINIC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */ +#define HINIC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */ +#define HINIC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */ +#define HINIC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */ +#define HINIC_MGMT_STATUS_ERR_FULL 12 /* Table full */ +#define HINIC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */ +#define HINIC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */ +#define HINIC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */ +#define HINIC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */ +#define HINIC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */ +#define HINIC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */ +#define HINIC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */ +#define HINIC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */ +#define HINIC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */ +#define HINIC_MGMT_STATUS_ERR_UNSUPPORT 0xFF /* Feature not supported */ + +#define HINIC_CHIP_PRESENT 1 +#define HINIC_CHIP_ABSENT 0 + +struct cfg_mgmt_info; +struct rdma_comp_resource; + +struct hinic_hwif; +struct hinic_nic_io; +struct hinic_wqs; +struct hinic_aeqs; +struct hinic_ceqs; +struct hinic_mbox_func_to_func; +struct hinic_msg_pf_to_mgmt; +struct hinic_cmdqs; +struct hinic_multi_host_mgmt; + +struct hinic_root_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 set_cmdq_depth; + u8 cmdq_depth; + u8 lro_en; + u8 rsvd2; + u8 ppf_idx; + u8 rsvd3; + u16 rq_depth; + u16 rx_buf_sz; + u16 sq_depth; +}; + +struct hinic_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + struct hinic_page_addr *brm_srch_page_addr; +}; + +#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF + +#define HINIC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000) +#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE + +#define HINIC_HW_WQ_NAME "hinic_hardware" +#define HINIC_HEARTBEAT_PERIOD 1000 +#define HINIC_HEARTBEAT_START_EXPIRE 5000 + +#define HINIC_CHIP_ERROR_TYPE_MAX 1024 +#define HINIC_CHIP_FAULT_SIZE \ + (HINIC_NODE_ID_MAX * FAULT_LEVEL_MAX * HINIC_CHIP_ERROR_TYPE_MAX) + +enum hinic_node_id { + HINIC_NODE_ID_IPSU = 4, + HINIC_NODE_ID_MGMT_HOST = 21, /* Host CPU send API to uP */ + HINIC_NODE_ID_MAX = 22 +}; + +#define HINIC_HWDEV_INIT_MODES_MASK ((1UL << HINIC_HWDEV_ALL_INITED) - 1) + +enum hinic_hwdev_func_state { + HINIC_HWDEV_FUNC_INITED = HINIC_HWDEV_ALL_INITED, + + HINIC_HWDEV_FUNC_DEINIT, + + HINIC_HWDEV_STATE_BUSY = 31, +}; + +struct hinic_cqm_stats { + atomic_t cqm_cmd_alloc_cnt; + atomic_t cqm_cmd_free_cnt; + atomic_t cqm_send_cmd_box_cnt; + atomic_t cqm_send_cmd_imm_cnt; + atomic_t cqm_db_addr_alloc_cnt; + atomic_t cqm_db_addr_free_cnt; + + atomic_t cqm_fc_srq_create_cnt; + atomic_t cqm_srq_create_cnt; + atomic_t cqm_rq_create_cnt; + + atomic_t cqm_qpc_mpt_create_cnt; + atomic_t cqm_nonrdma_queue_create_cnt; + atomic_t cqm_rdma_queue_create_cnt; + atomic_t cqm_rdma_table_create_cnt; + + atomic_t cqm_qpc_mpt_delete_cnt; + atomic_t cqm_nonrdma_queue_delete_cnt; + atomic_t cqm_rdma_queue_delete_cnt; + atomic_t cqm_rdma_table_delete_cnt; + + atomic_t cqm_func_timer_clear_cnt; + atomic_t cqm_func_hash_buf_clear_cnt; + + atomic_t cqm_scq_callback_cnt; + atomic_t cqm_ecq_callback_cnt; + atomic_t cqm_nocq_callback_cnt; + atomic_t cqm_aeq_callback_cnt[112]; +}; + +struct hinic_link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +struct hinic_fault_event_stats { + atomic_t chip_fault_stats[HINIC_NODE_ID_MAX][FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct hinic_hw_stats { + atomic_t heart_lost_stats; + atomic_t nic_ucode_event_stats[HINIC_NIC_FATAL_ERROR_MAX]; + struct hinic_cqm_stats cqm_stats; + struct hinic_link_event_stats link_event_stats; + struct hinic_fault_event_stats fault_event_stats; +}; + +struct hinic_fault_info_node { + struct list_head list; + struct hinic_hwdev *hwdev; + struct hinic_fault_recover_info info; +}; + +enum heartbeat_support_state { + HEARTBEAT_NOT_SUPPORT = 0, + HEARTBEAT_SUPPORT, +}; + +/* 25s for max 5 heartbeat event lost */ +#define HINIC_HEARBEAT_ENHANCED_LOST 25000 +struct hinic_heartbeat_enhanced { + bool en; /* enable enhanced heartbeat or not */ + + unsigned long last_update_jiffies; + u32 last_heartbeat; + + unsigned long start_detect_jiffies; +}; + +#define HINIC_NORMAL_HOST_CAP (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \ + HINIC_FUNC_SUPP_RATE_LIMIT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \ + HINIC_FUNC_SUPP_CHANGE_MAC | \ + HINIC_FUNC_SUPP_ENCAP_TSO_CSUM) +#define HINIC_MULTI_BM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \ + HINIC_FUNC_SUPP_CHANGE_MAC) +#define HINIC_MULTI_BM_SLAVE (HINIC_FUNC_SRIOV_EN_DFLT | \ + HINIC_FUNC_SRIOV_NUM_FIX | \ + HINIC_FUNC_FORCE_LINK_UP | \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP) +#define HINIC_MULTI_VM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \ + HINIC_FUNC_SUPP_CHANGE_MAC) +#define HINIC_MULTI_VM_SLAVE (HINIC_FUNC_MGMT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SRIOV_EN_DFLT | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_CHANGE_MAC | \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP) + +#define MULTI_HOST_CHIP_MODE_SHIFT 0 +#define MULTI_HOST_MASTER_MBX_STS_SHIFT 0x4 +#define MULTI_HOST_PRIV_DATA_SHIFT 0x8 + +#define MULTI_HOST_CHIP_MODE_MASK 0xF +#define MULTI_HOST_MASTER_MBX_STS_MASK 0xF +#define MULTI_HOST_PRIV_DATA_MASK 0xFFFF + +#define MULTI_HOST_REG_SET(val, member) \ + (((val) & MULTI_HOST_##member##_MASK) \ + << MULTI_HOST_##member##_SHIFT) +#define MULTI_HOST_REG_GET(val, member) \ + (((val) >> MULTI_HOST_##member##_SHIFT) \ + & MULTI_HOST_##member##_MASK) +#define MULTI_HOST_REG_CLEAR(val, member) \ + ((val) & (~(MULTI_HOST_##member##_MASK \ + << MULTI_HOST_##member##_SHIFT))) + +#define HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE 12 + +/* new version of roce qp not limited by power of 2 */ +#define HINIC_CMD_VER_ROCE_QP 1 +/* new version for add function id in multi-host */ +#define HINIC_CMD_VER_FUNC_ID 2 + +struct hinic_hwdev { + void *adapter_hdl; /* pointer to hinic_pcidev or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + void *dev_hdl; /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + u32 wq_page_size; + + void *cqm_hdl; + void *chip_node; + + struct hinic_hwif *hwif; /* include void __iomem *bar */ + struct hinic_nic_io *nic_io; + struct cfg_mgmt_info *cfg_mgmt; + struct rdma_comp_resource *rdma_comp_res; + struct hinic_wqs *wqs; /* for FC slq */ + struct mqm_addr_trans_tbl_info mqm_att; + + struct hinic_aeqs *aeqs; + struct hinic_ceqs *ceqs; + + struct hinic_mbox_func_to_func *func_to_func; + + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt; + + struct hinic_cmdqs *cmdqs; + + struct hinic_page_addr page_pa0; + struct hinic_page_addr page_pa1; + + hinic_event_handler event_callback; + void *event_pri_handle; + + struct semaphore recover_sem; + bool collect_log_flag; + bool history_fault_flag; + struct hinic_fault_recover_info history_fault; + void *recover_pri_hd; + hinic_fault_recover_handler recover_cb; + + struct work_struct fault_work; + struct semaphore fault_list_sem; + + struct work_struct timer_work; + struct workqueue_struct *workq; + struct timer_list heartbeat_timer; + /* true represent heartbeat lost, false represent heartbeat restore */ + u32 heartbeat_lost; + int chip_present_flag; + struct hinic_heartbeat_enhanced heartbeat_ehd; + struct hinic_hw_stats hw_stats; + u8 *chip_fault_stats; + + u32 statufull_ref_cnt; + ulong func_state; + + u64 feature_cap; /* enum hinic_func_cap */ + enum hinic_func_mode func_mode; + + struct hinic_multi_host_mgmt *mhost_mgmt; + + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + struct semaphore ppf_sem; + void *ppf_hwdev; + + struct semaphore func_sem; + int func_ref; + struct hinic_board_info board_info; +#define MGMT_VERSION_MAX_LEN 32 + u8 mgmt_ver[MGMT_VERSION_MAX_LEN]; + u64 fw_support_func_flag; +}; + +int hinic_init_comm_ch(struct hinic_hwdev *hwdev); + +void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev); + +int hinic_ppf_ext_db_init(void *dev); + +int hinic_ppf_ext_db_deinit(void *dev); + +enum hinic_set_arm_type { + HINIC_SET_ARM_CMDQ, + HINIC_SET_ARM_SQ, + HINIC_SET_ARM_TYPE_NUM, +}; + +int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id); + +void hinic_set_chip_present(void *hwdev); +void hinic_force_complete_all(void *hwdev); + +void hinic_init_heartbeat(struct hinic_hwdev *hwdev); +void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev); + +u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data); + + +int hinic_enable_fast_recycle(void *hwdev, bool enable); +int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag); + +enum l2nic_resource_type { + RES_TYPE_NIC_FUNC = 0, + RES_TYPE_FLUSH_BIT, + RES_TYPE_PF_BW_CFG, + RES_TYPE_MQM, + RES_TYPE_SMF, + RES_TYPE_CMDQ_ROOTCTX, + RES_TYPE_SQ_CI_TABLE, + RES_TYPE_CEQ, + RES_TYPE_MBOX, + RES_TYPE_AEQ, +}; + +void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state); + +int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +int hinic_pf_send_clp_cmd(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit); + +void hinic_fault_work_handler(struct work_struct *work); +void hinic_swe_fault_handler(struct hinic_hwdev *hwdev, u8 level, + u8 event, u64 val); + +bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd); + +int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, + u32 page_size); + +int hinic_phy_init_status_judge(void *hwdev); + +int hinic_hilink_info_show(struct hinic_hwdev *hwdev); +extern int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val); +extern int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val); + +int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +#define HINIC_SDI_MODE_UNKNOWN 0 +#define HINIC_SDI_MODE_BM 1 +#define HINIC_SDI_MODE_VM 2 +#define HINIC_SDI_MODE_MAX 3 +int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode); + + +void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.c b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c new file mode 100644 index 000000000000..5c43a78ca0bf --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c @@ -0,0 +1,995 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" + +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_eqs.h" + +#define WAIT_HWIF_READY_TIMEOUT 10000 + +#define HINIC_SELFTEST_RESULT 0x883C + +/* For UEFI driver, this function can only read BAR0 */ +u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) +{ + return be32_to_cpu(readl(hwif->cfg_regs_base + reg)); +} + +/* For UEFI driver, this function can only write BAR0 */ +void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val) +{ + writel(cpu_to_be32(val), hwif->cfg_regs_base + reg); +} + +/** + * hwif_ready - test if the HW initialization passed + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + */ +static int hwif_ready(struct hinic_hwdev *hwdev) +{ + u32 addr, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HINIC_PCIE_LINK_DOWN) + return -EBUSY; + + if (!HINIC_AF1_GET(attr1, MGMT_INIT_STATUS)) + return -EBUSY; + + if (HINIC_IS_VF(hwdev)) { + if (!HINIC_AF1_GET(attr1, PF_INIT_STATUS)) + return -EBUSY; + } + + return 0; +} + +static int wait_hwif_ready(struct hinic_hwdev *hwdev) +{ + ulong timeout = 0; + + do { + if (!hwif_ready(hwdev)) + return 0; + + usleep_range(999, 1000); + timeout++; + } while (timeout <= WAIT_HWIF_READY_TIMEOUT); + + sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); + return -EBUSY; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + */ +static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2) +{ + hwif->attr.func_global_idx = HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = HINIC_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = HINIC_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = HINIC_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = HINIC_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = HINIC_AF1_GET(attr1, PPF_IDX); + + hwif->attr.num_aeqs = BIT(HINIC_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = BIT(HINIC_AF1_GET(attr1, CEQS_PER_FUNC)); + hwif->attr.num_irqs = BIT(HINIC_AF1_GET(attr1, IRQS_PER_FUNC)); + hwif->attr.num_dma_attr = BIT(HINIC_AF1_GET(attr1, DMA_ATTR_PER_FUNC)); + + hwif->attr.global_vf_id_of_pf = HINIC_AF2_GET(attr2, + GLOBAL_VF_ID_OF_PF); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + */ +static void get_hwif_attr(struct hinic_hwif *hwif) +{ + u32 addr, attr0, attr1, attr2; + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR2_ADDR; + attr2 = hinic_hwif_read_reg(hwif, addr); + + set_hwif_attr(hwif, attr0, attr1, attr2); +} + +void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status) +{ + u32 attr5 = HINIC_AF5_SET(status, PF_STATUS); + u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR; + + if (hwif->attr.func_type == TYPE_VF) + return; + + hinic_hwif_write_reg(hwif, addr, attr5); +} + +enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif) +{ + u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); + + return HINIC_AF5_GET(attr5, PF_STATUS); +} + +enum hinic_doorbell_ctrl hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_AF4_GET(attr4, DOORBELL_CTRL); +} + +enum hinic_outbound_ctrl hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_AF4_GET(attr4, OUTBOUND_CTRL); +} + +void hinic_enable_doorbell(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +void hinic_disable_doorbell(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +void hinic_enable_outbound(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL); + attr4 |= HINIC_AF4_SET(ENABLE_OUTBOUND, OUTBOUND_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +void hinic_disable_outbound(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL); + attr4 |= HINIC_AF4_SET(DISABLE_OUTBOUND, OUTBOUND_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + */ +static void set_ppf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HINIC_CSR_PPF_ELECTION_ADDR; + + val = hinic_hwif_read_reg(hwif, addr); + val = HINIC_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HINIC_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + hinic_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = hinic_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +/** + * get_mpf - get the mpf index into the hwif + * @hwif: the hardware interface of a pci function device + */ +static void get_mpf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 mpf_election, addr; + + addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + mpf_election = hinic_hwif_read_reg(hwif, addr); + attr->mpf_idx = HINIC_MPF_ELECTION_GET(mpf_election, IDX); +} + +/** + * set_mpf - try to set hwif as mpf and set the mpf idx in hwif + * @hwif: the hardware interface of a pci function device + */ +static void set_mpf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 addr, val, mpf_election; + + /* Read Modify Write */ + addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + val = hinic_hwif_read_reg(hwif, addr); + + val = HINIC_MPF_ELECTION_CLEAR(val, IDX); + mpf_election = HINIC_MPF_ELECTION_SET(attr->func_global_idx, IDX); + + val |= mpf_election; + hinic_hwif_write_reg(hwif, addr, val); +} + +static void init_db_area_idx(struct hinic_hwif *hwif) +{ + struct hinic_free_db_area *free_db_area; + u32 db_max_areas; + u32 i; + + free_db_area = &hwif->free_db_area; + db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE; + + for (i = 0; i < db_max_areas; i++) + free_db_area->db_idx[i] = i; + + free_db_area->num_free = db_max_areas; + + spin_lock_init(&free_db_area->idx_lock); +} + +static int get_db_idx(struct hinic_hwif *hwif, u32 *idx) +{ + struct hinic_free_db_area *free_db_area = &hwif->free_db_area; + u32 db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE; + u32 pos; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + +retry: + if (free_db_area->num_free == 0) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + + free_db_area->num_free--; + + pos = free_db_area->alloc_pos++; + pos &= db_max_areas - 1; + + pg_idx = free_db_area->db_idx[pos]; + + free_db_area->db_idx[pos] = 0xFFFFFFFF; + + /* pg_idx out of range */ + if (pg_idx >= db_max_areas) + goto retry; + + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hinic_hwif *hwif, u32 idx) +{ + struct hinic_free_db_area *free_db_area = &hwif->free_db_area; + u32 db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE; + u32 pos; + + if (idx >= db_max_areas) + return; + + spin_lock(&free_db_area->idx_lock); + + pos = free_db_area->return_pos++; + pos &= db_max_areas - 1; + + free_db_area->db_idx[pos] = idx; + + free_db_area->num_free++; + + spin_unlock(&free_db_area->idx_lock); +} + +void hinic_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base) +{ + struct hinic_hwif *hwif; + u32 idx; + + if (!hwdev || !db_base) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base); + +#if defined(__aarch64__) + /* No need to unmap */ +#else + if (dwqe_base && (hwif->chip_mode == CHIP_MODE_NORMAL)) + io_mapping_unmap(dwqe_base); +#endif + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic_free_db_addr); + +int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base) +{ + struct hinic_hwif *hwif; + u64 offset; + u32 idx; + int err; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base + idx * HINIC_DB_PAGE_SIZE; + + if (!dwqe_base || hwif->chip_mode != CHIP_MODE_NORMAL) + return 0; + + offset = ((u64)idx) << PAGE_SHIFT; + +#if defined(__aarch64__) + *dwqe_base = hwif->dwqe_mapping + offset; +#else +#ifdef HAVE_IO_MAP_WC_SIZE + *dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset, + HINIC_DB_PAGE_SIZE); +#else + *dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset); +#endif /* end of HAVE_IO_MAP_WC_SIZE */ +#endif /* end of "defined(__aarch64__)" */ + + if (!(*dwqe_base)) { + hinic_free_db_addr(hwdev, *db_base, NULL); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_alloc_db_addr); + +void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base) +{ + struct hinic_hwif *hwif; + u32 idx; + + if (!hwdev) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base_phy); + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic_free_db_phy_addr); + +int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base) +{ + struct hinic_hwif *hwif; + u32 idx; + int err; + + if (!hwdev || !db_base || !dwqe_base) + return -EINVAL; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base_phy + idx * HINIC_DB_PAGE_SIZE; + + if (hwif->chip_mode == CHIP_MODE_NORMAL) + *dwqe_base = *db_base + HINIC_DB_DWQE_SIZE; + + return 0; +} +EXPORT_SYMBOL(hinic_alloc_db_phy_addr); + +void hinic_set_msix_state(void *hwdev, u16 msix_idx, enum hinic_msix_state flag) +{ + struct hinic_hwif *hwif; + u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE + + HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL; + u32 mask_bits; + + if (!hwdev) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + mask_bits = readl(hwif->intr_regs_base + offset); + mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + if (flag) + mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + + writel(mask_bits, hwif->intr_regs_base + offset); +} +EXPORT_SYMBOL(hinic_set_msix_state); + +static void disable_all_msix(struct hinic_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hinic_set_msix_state(hwdev, i, HINIC_MSIX_DISABLE); +} + +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states) +{ + enum hinic_doorbell_ctrl db_ctrl; + u32 cnt = 0; + + if (!hwif) + return -EFAULT; + + while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) { + db_ctrl = hinic_get_doorbell_ctrl_status(hwif); + if (db_ctrl == states) + return 0; + + usleep_range(900, 1000); + cnt++; + } + + return -EFAULT; +} +EXPORT_SYMBOL(wait_until_doorbell_flush_states); + +static int wait_until_doorbell_and_outbound_enabled(struct hinic_hwif *hwif) +{ + enum hinic_doorbell_ctrl db_ctrl; + enum hinic_outbound_ctrl outbound_ctrl; + u32 cnt = 0; + + while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) { + db_ctrl = hinic_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hinic_get_outbound_ctrl_status(hwif); + + if (outbound_ctrl == ENABLE_OUTBOUND && + db_ctrl == ENABLE_DOORBELL) + return 0; + + usleep_range(900, 1000); + cnt++; + } + + return -EFAULT; +} + +static void __print_selftest_reg(struct hinic_hwdev *hwdev) +{ + u32 addr, attr0, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HINIC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "PCIE is link down\n"); + return; + } + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwdev->hwif, addr); + if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF && + !HINIC_AF0_GET(attr0, PCI_INTF_IDX)) + sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n", + hinic_hwif_read_reg(hwdev->hwif, + HINIC_SELFTEST_RESULT)); +} + +/** + * hinic_init_hwif - initialize the hw interface + * @hwdev: the pointer to hw device + * @cfg_reg_base: configuration base address + * Return: 0 - success, negative - failure + */ +int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping) +{ + struct hinic_hwif *hwif; + int err; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; + hwif->pdev = hwdev->pcidev_hdl; + + hwif->cfg_regs_base = cfg_reg_base; + hwif->intr_regs_base = intr_reg_base; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->dwqe_mapping = dwqe_mapping; + + hwif->db_size = hinic_get_db_size(cfg_reg_base, &hwif->chip_mode); + + sdk_info(hwdev->dev_hdl, "Doorbell size: 0x%x, chip mode: %d\n", + hwif->db_size, hwif->chip_mode); + + init_db_area_idx(hwif); + + err = wait_hwif_ready(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Chip status is not ready\n"); + __print_selftest_reg(hwdev); + goto hwif_ready_err; + } + + get_hwif_attr(hwif); + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n"); + goto hwif_ready_err; + } + + if (!HINIC_IS_VF(hwdev)) { + set_ppf(hwif); + + if (HINIC_IS_PPF(hwdev)) + set_mpf(hwif); + + get_mpf(hwif); + } + + disable_all_msix(hwdev); + /* disable mgmt cpu report any event */ + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + + pr_info("global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf: %d\n", + hwif->attr.func_global_idx, hwif->attr.func_type, + hwif->attr.pci_intf_idx, hwif->attr.ppf_idx, + hwif->attr.mpf_idx); + + return 0; + +hwif_ready_err: + spin_lock_deinit(&hwif->free_db_area.idx_lock); + kfree(hwif); + + return err; +} + +/** + * hinic_free_hwif - free the hw interface + * @hwdev: the pointer to hw device + */ +void hinic_free_hwif(struct hinic_hwdev *hwdev) +{ + spin_lock_deinit(&hwdev->hwif->free_db_area.idx_lock); + kfree(hwdev->hwif); +} + +int hinic_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned flag, + struct hinic_dma_addr_align *mem_align) +{ + void *vaddr, *align_vaddr; + dma_addr_t paddr, align_paddr; + u64 real_size = size; + + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev_hdl, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void hinic_dma_free_coherent_align(void *dev_hdl, + struct hinic_dma_addr_align *mem_align) +{ + dma_free_coherent(dev_hdl, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} + +u16 hinic_global_func_id(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.func_global_idx; +} +EXPORT_SYMBOL(hinic_global_func_id); + +/** + * get function id from register,used by sriov hot migration process + * @hwdev: the pointer to hw device + */ +u16 hinic_global_func_id_hw(void *hwdev) +{ + u32 addr, attr0; + struct hinic_hwdev *dev; + + dev = (struct hinic_hwdev *)hwdev; + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(dev->hwif, addr); + + return HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX); +} + +static int func_busy_state_check(struct hinic_hwdev *hwdev) +{ + u32 func_state; + int cycle; + + /* set BUSY before src vm suspend and clear it before dst vm resume */ + cycle = PIPE_CYCLE_MAX; + func_state = hinic_func_busy_state_get(hwdev); + while (func_state && cycle) { + usleep_range(800, 1000); + cycle--; + if (!cycle) { + sdk_err(hwdev->dev_hdl, "busy_state suspend timeout"); + return -ETIMEDOUT; + } + + func_state = hinic_func_busy_state_get(hwdev); + } + + return 0; +} + +int hinic_func_own_get(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + u32 func_state; + int err; + + if (!HINIC_IS_VF(dev)) + return 0; + +restart: + down(&dev->func_sem); + + dev->func_ref++; + hinic_func_own_bit_set(dev, 1); + + func_state = hinic_func_busy_state_get(hwdev); + if (func_state) { + dev->func_ref--; + if (dev->func_ref == 0) + hinic_func_own_bit_set(dev, 0); + + up(&dev->func_sem); + err = func_busy_state_check(dev); + if (err) + return err; + goto restart; + } + + up(&dev->func_sem); + return 0; +} + +void hinic_func_own_free(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + + if (!HINIC_IS_VF(dev)) + return; + + down(&dev->func_sem); + dev->func_ref--; + if (dev->func_ref == 0) + hinic_func_own_bit_set(dev, 0); + + up(&dev->func_sem); +} + +/** + * get function id, used by sriov hot migratition process. + * @hwdev: the pointer to hw device + * @func_id: function id + */ +int hinic_global_func_id_get(void *hwdev, u16 *func_id) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + int err; + + /* only vf get func_id from chip reg for sriov migrate */ + if (!HINIC_IS_VF(dev)) { + *func_id = hinic_global_func_id(hwdev); + return 0; + } + + err = func_busy_state_check(dev); + if (err) + return err; + + *func_id = hinic_global_func_id_hw(dev); + return 0; +} + +u16 hinic_intr_num(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.num_irqs; +} +EXPORT_SYMBOL(hinic_intr_num); + +u8 hinic_pf_id_of_vf(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.port_to_port_idx; +} +EXPORT_SYMBOL(hinic_pf_id_of_vf); + +u16 hinic_pf_id_of_vf_hw(void *hwdev) +{ + u32 addr, attr0; + struct hinic_hwdev *dev; + + dev = (struct hinic_hwdev *)hwdev; + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(dev->hwif, addr); + + return HINIC_AF0_GET(attr0, P2P_IDX); +} + +u8 hinic_pcie_itf_id(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.pci_intf_idx; +} +EXPORT_SYMBOL(hinic_pcie_itf_id); + +u8 hinic_vf_in_pf(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.vf_in_pf; +} +EXPORT_SYMBOL(hinic_vf_in_pf); + +enum func_type hinic_func_type(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.func_type; +} +EXPORT_SYMBOL(hinic_func_type); + +u8 hinic_ceq_num(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.num_ceqs; +} +EXPORT_SYMBOL(hinic_ceq_num); + +u8 hinic_dma_attr_entry_num(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.num_dma_attr; +} +EXPORT_SYMBOL(hinic_dma_attr_entry_num); + +u16 hinic_glb_pf_vf_offset(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.global_vf_id_of_pf; +} +EXPORT_SYMBOL(hinic_glb_pf_vf_offset); + +u8 hinic_mpf_idx(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.mpf_idx; +} +EXPORT_SYMBOL(hinic_mpf_idx); + +u8 hinic_ppf_idx(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.ppf_idx; +} +EXPORT_SYMBOL(hinic_ppf_idx); + +#define CEQ_CTRL_0_CHIP_MODE_SHIFT 26 +#define CEQ_CTRL_0_CHIP_MODE_MASK 0xFU +#define CEQ_CTRL_0_GET(val, member) \ + (((val) >> CEQ_CTRL_0_##member##_SHIFT) & \ + CEQ_CTRL_0_##member##_MASK) + +/** + * hinic_get_db_size - get db size ceq ctrl: bit26~29: uP write vf mode is + * normal(0x0), bmgw(0x1) or vmgw(0x2) and normal mode db size is 512k, + * bmgw or vmgw mode db size is 256k + * @cfg_reg_base: pointer to cfg_reg_base + * @chip_mode: pointer to chip_mode + */ +u32 hinic_get_db_size(void *cfg_reg_base, enum hinic_chip_mode *chip_mode) +{ + u32 attr0, ctrl0; + + attr0 = be32_to_cpu(readl((u8 __iomem *)cfg_reg_base + + HINIC_CSR_FUNC_ATTR0_ADDR)); + + /* PF is always normal mode & db size is 512K */ + if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF) { + *chip_mode = CHIP_MODE_NORMAL; + return HINIC_DB_DWQE_SIZE; + } + + ctrl0 = be32_to_cpu(readl((u8 __iomem *)cfg_reg_base + + HINIC_CSR_CEQ_CTRL_0_ADDR(0))); + + *chip_mode = CEQ_CTRL_0_GET(ctrl0, CHIP_MODE); + + switch (*chip_mode) { + case CHIP_MODE_VMGW: + case CHIP_MODE_BMGW: + return HINIC_GW_VF_DB_SIZE; + default: + return HINIC_DB_DWQE_SIZE; + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.h b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h new file mode 100644 index 000000000000..428eefeb33ce --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HWIF_H +#define HINIC_HWIF_H + +#include "hinic_hwdev.h" + +#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 + +struct hinic_free_db_area { + u32 db_idx[HINIC_DB_MAX_AREAS]; + + u32 num_free; + + u32 alloc_pos; + u32 return_pos; + + /* spinlock for allocating doorbell area */ + spinlock_t idx_lock; +}; + +struct hinic_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ + + u8 num_dma_attr; /* max: 2 ^ 6 */ + + u16 global_vf_id_of_pf; +}; + +struct hinic_hwif { + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u64 db_base_phy; + u8 __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + struct hinic_free_db_area free_db_area; + + struct hinic_func_attr attr; + + void *pdev; + enum hinic_chip_mode chip_mode; + u32 db_size; +}; + +struct hinic_dma_addr_align { + u32 real_size; + + void *ori_vaddr; + dma_addr_t ori_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg); + +void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val); + +void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status); + +enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif); + +enum hinic_doorbell_ctrl + hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif); + +enum hinic_outbound_ctrl + hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif); + +void hinic_enable_doorbell(struct hinic_hwif *hwif); + +void hinic_disable_doorbell(struct hinic_hwif *hwif); + +void hinic_enable_outbound(struct hinic_hwif *hwif); + +void hinic_disable_outbound(struct hinic_hwif *hwif); + +int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping); + +void hinic_free_hwif(struct hinic_hwdev *hwdev); + +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states); + +int hinic_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned flag, + struct hinic_dma_addr_align *mem_align); + +void hinic_dma_free_coherent_align(void *dev_hdl, + struct hinic_dma_addr_align *mem_align); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c b/drivers/net/ethernet/huawei/hinic/hinic_lld.c new file mode 100644 index 000000000000..8adfc32fd7ac --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c @@ -0,0 +1,2850 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_lld.h" +#include "hinic_pci_id_tbl.h" +#include "hinic_nic_dev.h" +#include "hinic_sriov.h" +#include "hinic_dbgtool_knl.h" +#include "hinic_nictool.h" + +#define HINIC_PCI_CFG_REG_BAR 0 +#define HINIC_PCI_INTR_REG_BAR 2 +#define HINIC_PCI_DB_BAR 4 +#define HINIC_PCI_VENDOR_ID 0x19e5 + +#define SELF_TEST_BAR_ADDR_OFFSET 0x883c + +#define HINIC_SECOND_BASE (1000) +#define HINIC_SYNC_YEAR_OFFSET (1900) +#define HINIC_SYNC_MONTH_OFFSET (1) +#define HINIC_MINUTE_BASE (60) +#define HINIC_WAIT_TOOL_CNT_TIMEOUT 10000 +#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 + +#define HINIC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver" +#define HINICVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver" + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC_DRV_DESC); +MODULE_VERSION(HINIC_DRV_VERSION); +MODULE_LICENSE("GPL"); + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) +static DEVICE_ATTR(sriov_numvfs, 0664, + hinic_sriov_numvfs_show, hinic_sriov_numvfs_store); +static DEVICE_ATTR(sriov_totalvfs, 0444, + hinic_sriov_totalvfs_show, NULL); +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +static struct attribute *hinic_attributes[] = { +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) + &dev_attr_sriov_numvfs.attr, + &dev_attr_sriov_totalvfs.attr, +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + NULL +}; + +static const struct attribute_group hinic_attr_group = { + .attrs = hinic_attributes, +}; + +#ifdef CONFIG_PCI_IOV +static bool disable_vf_load; +module_param(disable_vf_load, bool, 0444); +MODULE_PARM_DESC(disable_vf_load, + "Disable virtual functions probe or not - default is false"); +#endif /* CONFIG_PCI_IOV */ + +enum { + HINIC_FUNC_IN_REMOVE = BIT(0), + HINIC_FUNC_PRB_ERR = BIT(1), + HINIC_FUNC_PRB_DELAY = BIT(2), +}; + +/* Structure pcidev private */ +struct hinic_pcidev { + struct pci_dev *pcidev; + void *hwdev; + struct card_node *chip_node; + struct hinic_lld_dev lld_dev; + /* Record the service object address, + * such as hinic_dev and toe_dev, fc_dev + */ + void *uld_dev[SERVICE_T_MAX]; + /* Record the service object name */ + char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; + /* It is a the global variable for driver to manage + * all function device linked list + */ + struct list_head node; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + u64 db_base_phy; + void __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + struct hinic_sriov_info sriov_info; + + u32 init_state; + /* setted when uld driver processing event */ + unsigned long state; + struct pci_device_id id; + + unsigned long flag; + + struct work_struct slave_nic_work; + struct workqueue_struct *slave_nic_init_workq; + struct delayed_work slave_nic_init_dwork; + enum hinic_chip_mode chip_mode; + bool nic_cur_enable; + bool nic_des_enable; +}; + +#define HINIC_EVENT_PROCESS_TIMEOUT 10000 + +#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0) +#define SET_BIT(num, n) ((num) | (1UL << (n))) +#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n)))) + +#define MAX_CARD_ID 64 +static u64 card_bit_map; +LIST_HEAD(g_hinic_chip_list); +struct hinic_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; +static const char *s_uld_name[SERVICE_T_MAX] = { + "nic", "ovs", "roce", "toe", "iwarp", "fc", "fcoe", "migrate"}; + +enum hinic_lld_status { + HINIC_NODE_CHANGE = BIT(0), +}; + +struct hinic_lld_lock { + /* lock for chip list */ + struct mutex lld_mutex; + unsigned long status; + atomic_t dev_ref_cnt; +}; + +struct hinic_lld_lock g_lld_lock; + +#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ + +/* node in chip_node will changed, tools or driver can't get node + * during this situation + */ +static void lld_lock_chip_node(void) +{ + u32 loop_cnt; + + mutex_lock(&g_lld_lock.lld_mutex); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) { + if (!test_and_set_bit(HINIC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED) + pr_warn("Wait for lld node change complete timeout when try to get lld lock\n"); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) { + if (!atomic_read(&(g_lld_lock.dev_ref_cnt))) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld dev unused for %us, reference count: %d\n", + loop_cnt / 1000, + atomic_read(&(g_lld_lock.dev_ref_cnt))); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY) + pr_warn("Wait for lld dev unused timeout\n"); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +static void lld_unlock_chip_node(void) +{ + clear_bit(HINIC_NODE_CHANGE, &g_lld_lock.status); +} + +/* When tools or other drivers want to get node of chip_node, use this function + * to prevent node be freed + */ +static void lld_dev_hold(void) +{ + u32 loop_cnt = 0; + + /* ensure there have not any chip node in changing */ + mutex_lock(&g_lld_lock.lld_mutex); + + while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) { + if (!test_bit(HINIC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT) + pr_warn("Wait lld node change complete timeout when try to hode lld dev\n"); + + atomic_inc(&g_lld_lock.dev_ref_cnt); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +static void lld_dev_put(void) +{ + atomic_dec(&g_lld_lock.dev_ref_cnt); +} + +static void hinic_lld_lock_init(void) +{ + mutex_init(&g_lld_lock.lld_mutex); + atomic_set(&g_lld_lock.dev_ref_cnt, 0); +} + +static atomic_t tool_used_cnt; + +void hinic_tool_cnt_inc(void) +{ + atomic_inc(&tool_used_cnt); +} + +void hinic_tool_cnt_dec(void) +{ + atomic_dec(&tool_used_cnt); +} + + +static int attach_uld(struct hinic_pcidev *dev, enum hinic_service_type type, + struct hinic_uld_info *uld_info) +{ + void *uld_dev = NULL; + int err; + enum hinic_service_mode service_mode = + hinic_get_service_mode(dev->hwdev); + + mutex_lock(&dev->pdev_mutex); + + if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) { + sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n"); + err = -EFAULT; + goto out_unlock; + } + + if (dev->uld_dev[type]) { + sdk_err(&dev->pcidev->dev, + "%s driver has attached to pcie device\n", + s_uld_name[type]); + err = 0; + goto out_unlock; + } + + if ((hinic_get_func_mode(dev->hwdev) == FUNC_MOD_NORMAL_HOST) && + (service_mode != HINIC_WORK_MODE_OVS_SP) && + type == SERVICE_T_OVS && !hinic_support_ovs(dev->hwdev, NULL)) { + sdk_warn(&dev->pcidev->dev, "Dev not support %s\n", + s_uld_name[type]); + err = 0; + goto out_unlock; + } + + err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]); + if (err || !uld_dev) { + sdk_err(&dev->pcidev->dev, + "Failed to add object for %s driver to pcie device\n", + s_uld_name[type]); + goto probe_failed; + } + + dev->uld_dev[type] = uld_dev; + mutex_unlock(&dev->pdev_mutex); + + sdk_info(&dev->pcidev->dev, + "Attach %s driver to pcie device succeed\n", s_uld_name[type]); + return 0; + +probe_failed: +out_unlock: + mutex_unlock(&dev->pdev_mutex); + + return err; +} + +static void detach_uld(struct hinic_pcidev *dev, enum hinic_service_type type) +{ + struct hinic_uld_info *uld_info = &g_uld_info[type]; + u32 cnt = 0; + + mutex_lock(&dev->pdev_mutex); + if (!dev->uld_dev[type]) { + mutex_unlock(&dev->pdev_mutex); + return; + } + + while (cnt < HINIC_EVENT_PROCESS_TIMEOUT) { + if (!test_and_set_bit(type, &dev->state)) + break; + usleep_range(900, 1000); + cnt++; + } + + uld_info->remove(&dev->lld_dev, dev->uld_dev[type]); + dev->uld_dev[type] = NULL; + if (cnt < HINIC_EVENT_PROCESS_TIMEOUT) + clear_bit(type, &dev->state); + + sdk_info(&dev->pcidev->dev, + "Detach %s driver from pcie device succeed\n", + s_uld_name[type]); + mutex_unlock(&dev->pdev_mutex); +} + +static void attach_ulds(struct hinic_pcidev *dev) +{ + enum hinic_service_type type; + + for (type = SERVICE_T_OVS; type < SERVICE_T_MAX; type++) { + if (g_uld_info[type].probe) + attach_uld(dev, type, &g_uld_info[type]); + } +} + +static void detach_ulds(struct hinic_pcidev *dev) +{ + enum hinic_service_type type; + + for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) { + if (g_uld_info[type].probe) + detach_uld(dev, type); + } +} + +int hinic_register_uld(enum hinic_service_type type, + struct hinic_uld_info *uld_info) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to register\n", + type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid information of %s driver to register\n", + s_uld_name[type]); + return -EINVAL; + } + + lld_dev_hold(); + + if (g_uld_info[type].probe) { + pr_err("%s driver has registered\n", s_uld_name[type]); + lld_dev_put(); + return -EINVAL; + } + + memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info)); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (attach_uld(dev, type, uld_info)) { + sdk_err(&dev->pcidev->dev, + "Attach %s driver to pcie device failed\n", + s_uld_name[type]); + continue; + } + } + } + + lld_dev_put(); + + pr_info("Register %s driver succeed\n", s_uld_name[type]); + return 0; +} +EXPORT_SYMBOL(hinic_register_uld); + +void hinic_unregister_uld(enum hinic_service_type type) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + struct hinic_uld_info *uld_info; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to unregister\n", + type); + return; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + detach_uld(dev, type); + } + } + + uld_info = &g_uld_info[type]; + memset(uld_info, 0, sizeof(*uld_info)); + lld_dev_put(); +} +EXPORT_SYMBOL(hinic_unregister_uld); + +static void hinic_sync_time_to_fmw(struct hinic_pcidev *pdev_pri) +{ + struct timeval tv = {0}; + struct rtc_time rt_time = {0}; + u64 tv_msec; + int err; + + do_gettimeofday(&tv); + + tv_msec = tv.tv_sec * HINIC_SECOND_BASE + + tv.tv_usec / HINIC_SECOND_BASE; + err = hinic_sync_time(pdev_pri->hwdev, tv_msec); + if (err) { + sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + } else { + rtc_time_to_tm(tv.tv_sec, &rt_time); + sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + rt_time.tm_year + HINIC_SYNC_YEAR_OFFSET, + rt_time.tm_mon + HINIC_SYNC_MONTH_OFFSET, + rt_time.tm_mday, rt_time.tm_hour, + rt_time.tm_min, rt_time.tm_sec); + } +} + +enum hinic_ver_incompat_mode { + /* New driver can't compat with old firmware */ + VER_INCOMP_NEW_DRV_OLD_FW, + /* New Firmware can't compat with old driver */ + VER_INCOMP_NEW_FW_OLD_DRV, +}; + +struct hinic_version_incompat { + char *version; + char *advise; + u32 incompat_mode; +}; + +struct hinic_version_incompat ver_incompat_table[] = { + { + .version = "1.2.2.0", + .advise = "Mechanism of cos changed", + .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW), + }, + { + .version = "1.2.3.0", + .advise = "Driver get sevice mode from firmware", + .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW), + }, +}; + +#define MAX_VER_FIELD_LEN 4 +#define MAX_VER_SPLIT_NUM 4 +static void __version_split(const char *str, int *split_num, + char rst[][MAX_VER_FIELD_LEN]) +{ + const char delim = '.'; + const char *src; + int cnt = 0; + u16 idx, end, token_len; + + idx = 0; + while (idx < strlen(str)) { + for (end = idx; end < strlen(str); end++) { + if (*(str + end) == delim) + break; /* find */ + } + + if (end != idx) { + token_len = min_t(u16, end - idx, + MAX_VER_FIELD_LEN - 1); + src = str + idx; + memcpy(rst[cnt], src, token_len); + if (++cnt >= MAX_VER_SPLIT_NUM) + break; + } + + idx = end + 1; /* skip delim */ + } + + *split_num = cnt; +} + +int hinic_version_cmp(char *ver1, char *ver2) +{ + char ver1_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} }; + char ver2_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} }; + int split1_num, split2_num; + int ver1_num, ver2_num; + int split; + + /* To compat older firmware version */ + if (ver1[0] == 'B') + return -1; + + if (ver2[0] == 'B') + return 1; + + __version_split(ver1, &split1_num, ver1_split); + __version_split(ver2, &split2_num, ver2_split); + + if (split1_num != MAX_VER_SPLIT_NUM || + split2_num != MAX_VER_SPLIT_NUM) { + pr_err("Invalid version %s or %s\n", ver1, ver2); + return 0; + } + + for (split = 0; split < MAX_VER_SPLIT_NUM; split++) { + ver1_num = local_atoi(ver1_split[split]); + ver2_num = local_atoi(ver2_split[split]); + + if (ver1_num > ver2_num) + return 1; + else if (ver1_num < ver2_num) + return -1; + } + + return 0; +} + +static int __version_mismatch(struct hinic_pcidev *pcidev, char *cur_fw_ver, + char *cur_drv_ver, + struct hinic_version_incompat *ver_incompat, + int start_entry) +{ + struct hinic_version_incompat *ver_incmp_tmp; + int fw_ver_comp; + int i, num_entry; + + fw_ver_comp = hinic_version_cmp(cur_fw_ver, ver_incompat->version); + if (fw_ver_comp <= 0) { + /* Check if new driver compatible with old fw */ + for (i = start_entry; i >= 0; i--) { + ver_incmp_tmp = &ver_incompat_table[i]; + if (hinic_version_cmp(cur_fw_ver, + ver_incmp_tmp->version) >= 0) + break; /* Not need to check anymore */ + + if (ver_incmp_tmp->incompat_mode & + BIT(VER_INCOMP_NEW_DRV_OLD_FW)) { + sdk_err(&pcidev->pcidev->dev, + "Version incompatible: %s, please update firmware to %s, or use %s driver\n", + ver_incmp_tmp->advise, + cur_drv_ver, cur_fw_ver); + return -EINVAL; + } + } + + goto compatible; + } + + /* check if old driver compatible with new firmware */ + num_entry = (int)sizeof(ver_incompat_table) / + (int)sizeof(ver_incompat_table[0]); + for (i = start_entry + 1; i < num_entry; i++) { + ver_incmp_tmp = &ver_incompat_table[i]; + + if (hinic_version_cmp(cur_fw_ver, ver_incmp_tmp->version) < 0) + break; /* Not need to check anymore */ + + if (ver_incmp_tmp->incompat_mode & + BIT(VER_INCOMP_NEW_FW_OLD_DRV)) { + sdk_err(&pcidev->pcidev->dev, + "Version incompatible: %s, please update driver to %s, or use %s firmware\n", + ver_incmp_tmp->advise, + cur_fw_ver, cur_drv_ver); + return -EINVAL; + } + } + +compatible: + if (hinic_version_cmp(cur_drv_ver, cur_fw_ver) < 0) + sdk_info(&pcidev->pcidev->dev, + "Firmware newer than driver, you'd better update driver to %s\n", + cur_fw_ver); + else + sdk_info(&pcidev->pcidev->dev, + "Driver newer than firmware, you'd better update firmware to %s\n", + cur_drv_ver); + + return 0; +} + +static void hinic_ignore_minor_version(char *version) +{ + char ver_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} }; + int max_ver_len, split_num = 0; + int err; + + __version_split(version, &split_num, ver_split); + if (split_num != MAX_VER_SPLIT_NUM) + return; + + max_ver_len = (int)strlen(version) + 1; + memset(version, 0, max_ver_len); + + err = snprintf(version, max_ver_len, "%s.%s.%s.0", + ver_split[0], ver_split[1], ver_split[2]); + if (err <= 0 || err >= max_ver_len) + pr_err("Failed snprintf version, function return(%d) and dest_len(%d)\n", + err, max_ver_len); +} + +static int hinic_detect_version_compatible(struct hinic_pcidev *pcidev) +{ + struct hinic_fw_version fw_ver = { {0} }; + struct hinic_version_incompat *ver_incompat; + char drv_ver[MAX_VER_SPLIT_NUM * MAX_VER_FIELD_LEN] = {0}; + int idx, num_entry, drv_ver_len; + int ver_mismatch; + int err; + + err = hinic_get_fw_version(pcidev->hwdev, &fw_ver); + if (err) { + sdk_err(&pcidev->pcidev->dev, + "Failed to get firmware version\n"); + return err; + } + + drv_ver_len = min_t(int, (int)sizeof(drv_ver) - 1, + (int)strlen(HINIC_DRV_VERSION)); + memcpy(drv_ver, HINIC_DRV_VERSION, drv_ver_len); + + sdk_info(&pcidev->pcidev->dev, "Version info: driver %s, firmware %s\n", + drv_ver, fw_ver.mgmt_ver); + + hinic_ignore_minor_version(fw_ver.mgmt_ver); + hinic_ignore_minor_version(drv_ver); + ver_mismatch = hinic_version_cmp(drv_ver, fw_ver.mgmt_ver); + if (!ver_mismatch) + return 0; + + num_entry = (int)sizeof(ver_incompat_table) / + (int)sizeof(ver_incompat_table[0]); + for (idx = num_entry - 1; idx >= 0; idx--) { + ver_incompat = &ver_incompat_table[idx]; + + if (hinic_version_cmp(drv_ver, ver_incompat->version) < 0) + continue; + + /* Find older verion of driver in table */ + return __version_mismatch(pcidev, fw_ver.mgmt_ver, drv_ver, + ver_incompat, idx); + } + + return 0; +} + +struct mctp_hdr { + u16 resp_code; + u16 reason_code; + u32 manufacture_id; + + u8 cmd_rsvd; + u8 major_cmd; + u8 sub_cmd; + u8 spc_field; +}; + +struct mctp_bdf_info { + struct mctp_hdr hdr; /* spc_field: pf index */ + u8 rsvd; + u8 bus; + u8 device; + u8 function; +}; + +enum mctp_resp_code { + /* COMMAND_COMPLETED = 0, */ + /* COMMAND_FAILED = 1, */ + /* COMMAND_UNAVALILABLE = 2, */ + COMMAND_UNSUPPORTED = 3, +}; + +static void __mctp_set_hdr(struct mctp_hdr *hdr, + struct hinic_mctp_host_info *mctp_info) +{ + u32 manufacture_id = 0x07DB; + + hdr->cmd_rsvd = 0; + hdr->major_cmd = mctp_info->major_cmd; + hdr->sub_cmd = mctp_info->sub_cmd; + hdr->manufacture_id = cpu_to_be32(manufacture_id); + hdr->resp_code = cpu_to_be16(hdr->resp_code); + hdr->reason_code = cpu_to_be16(hdr->reason_code); +} + +static void __mctp_get_bdf(struct hinic_pcidev *pci_adapter, + struct hinic_mctp_host_info *mctp_info) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + struct mctp_bdf_info *bdf_info = mctp_info->data; + + bdf_info->bus = pdev->bus->number; + bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */ + bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */ + + memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr)); + __mctp_set_hdr(&bdf_info->hdr, mctp_info); + bdf_info->hdr.spc_field = + (u8)hinic_global_func_id_hw(pci_adapter->hwdev); + + mctp_info->data_len = sizeof(*bdf_info); +} + +#define MCTP_MAJOR_CMD_PUBLIC 0x0 +#define MCTP_MAJOR_CMD_NIC 0x1 + +#define MCTP_PUBLIC_SUB_CMD_BDF 0x1 +#define MCTP_PUBLIC_SUB_CMD_DRV 0x4 + +#define MCTP_NIC_SUB_CMD_IP 0x1 + +static void __mctp_get_host_info(struct hinic_pcidev *dev, + struct hinic_mctp_host_info *mctp_info) +{ + struct mctp_hdr *hdr; + + switch ((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) { + case (MCTP_MAJOR_CMD_PUBLIC << 8 | MCTP_PUBLIC_SUB_CMD_BDF): + __mctp_get_bdf(dev, mctp_info); + break; + + default: + hdr = mctp_info->data; + hdr->reason_code = COMMAND_UNSUPPORTED; + __mctp_set_hdr(hdr, mctp_info); + mctp_info->data_len = sizeof(*hdr); + break; + } +} + +static bool __is_pcidev_match_chip_name(const char *ifname, + struct hinic_pcidev *dev, + struct card_node *chip_node, + enum func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (type == TYPE_UNKNOWN) { + if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED) + return false; + } else { + if (dev->init_state >= + HINIC_INIT_STATE_HW_PART_INITED && + hinic_func_type(dev->hwdev) != type) + return false; + } + + return true; + } + + return false; +} + +static struct hinic_pcidev *_get_pcidev_by_chip_name(char *ifname, + enum func_type type) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_chip_name(ifname, dev, chip_node, + type)) { + lld_dev_put(); + return dev; + } + } + } + + lld_dev_put(); + + return NULL; +} + +static struct hinic_pcidev *hinic_get_pcidev_by_chip_name(char *ifname) +{ + struct hinic_pcidev *dev, *dev_hw_init; + + /* find hw init device first */ + dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN); + if (dev_hw_init) { + if (hinic_func_type(dev_hw_init->hwdev) == TYPE_PPF) + return dev_hw_init; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF); + if (dev) { + if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state)) + return dev_hw_init; + + return dev; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PF); + if (dev) { + if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state)) + return dev_hw_init; + + return dev; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_VF); + if (dev) + return dev; + + return NULL; +} + +static bool __is_pcidev_match_dev_name(const char *ifname, + struct hinic_pcidev *dev, + enum hinic_service_type type) +{ + struct hinic_nic_dev *nic_dev; + enum hinic_service_type i; + + if (type == SERVICE_T_MAX) { + for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) { + if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ)) + return true; + } + } else { + if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ)) + return true; + } + + nic_dev = dev->uld_dev[SERVICE_T_NIC]; + if (nic_dev) { + if (!strncmp(nic_dev->netdev->name, ifname, IFNAMSIZ)) + return true; + } + + return false; +} + +static struct hinic_pcidev * + hinic_get_pcidev_by_dev_name(char *ifname, enum hinic_service_type type) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_dev_name(ifname, dev, type)) { + lld_dev_put(); + return dev; + } + } + } + lld_dev_put(); + + return NULL; +} + +static struct hinic_pcidev *hinic_get_pcidev_by_ifname(char *ifname) +{ + struct hinic_pcidev *dev; + + /* support search hwdev by chip name, net device name, + * or fc device name + */ + /* Find pcidev by chip_name first */ + dev = hinic_get_pcidev_by_chip_name(ifname); + if (dev) + return dev; + + /* If ifname not a chip name, + * find pcidev by FC name or netdevice name + */ + return hinic_get_pcidev_by_dev_name(ifname, SERVICE_T_MAX); +} + +int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!hwdev || !ifname) + return -EINVAL; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->hwdev == hwdev) { + strncpy(ifname, chip_node->chip_name, + IFNAMSIZ - 1); + ifname[IFNAMSIZ - 1] = 0; + lld_dev_put(); + return 0; + } + } + } + lld_dev_put(); + + return -ENXIO; +} +EXPORT_SYMBOL(hinic_get_chip_name_by_hwdev); + +static struct card_node *hinic_get_chip_node_by_hwdev(const void *hwdev) +{ + struct card_node *chip_node = NULL; + struct card_node *node_tmp = NULL; + struct hinic_pcidev *dev; + + if (!hwdev) + return NULL; + + lld_dev_hold(); + list_for_each_entry(node_tmp, &g_hinic_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + lld_dev_put(); + + return chip_node; +} + +int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node; + u32 cnt; + + if (!dev || !hinic_support_nic(dev->hwdev, NULL)) + return -EINVAL; + + lld_dev_hold(); + + cnt = 0; + chip_node = dev->chip_node; + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state < HINIC_INIT_STATE_NIC_INITED) + continue; + + if (HINIC_FUNC_IS_VF(dev->hwdev)) + continue; + + if (!hinic_support_nic(dev->hwdev, NULL)) + continue; + + array[cnt] = dev->uld_dev[SERVICE_T_NIC]; + cnt++; + } + lld_dev_put(); + + *dev_cnt = cnt; + + return 0; +} + +int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted, u8 *cos_up) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node; + + if (!dev) + return -EINVAL; + + chip_node = dev->chip_node; + *is_setted = chip_node->cos_up_setted; + if (chip_node->cos_up_setted) + memcpy(cos_up, chip_node->cos_up, sizeof(chip_node->cos_up)); + + return 0; +} + +int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node; + + if (!dev) + return -EINVAL; + + chip_node = dev->chip_node; + chip_node->cos_up_setted = true; + memcpy(chip_node->cos_up, cos_up, sizeof(chip_node->cos_up)); + + return 0; +} + +void *hinic_get_hwdev_by_ifname(char *ifname) +{ + struct hinic_pcidev *dev; + + dev = hinic_get_pcidev_by_ifname(ifname); + if (dev) + return dev->hwdev; + + return NULL; +} + +void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type) +{ + struct hinic_pcidev *dev; + + if (type >= SERVICE_T_MAX) { + pr_err("Service type :%d is error\n", type); + return NULL; + } + + dev = hinic_get_pcidev_by_dev_name(ifname, type); + if (dev) + return dev->uld_dev[type]; + + return NULL; +} + +void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type) +{ + struct hinic_pcidev *dev; + + /* support search hwdev by chip name, net device name, + * or fc device name, Find pcidev by chip_name first + */ + dev = hinic_get_pcidev_by_chip_name(ifname); + if (dev) + return dev->uld_dev[type]; + + return NULL; +} + +/* NOTICE: nictool can't use this function, because this function can't keep + * tool context mutual exclusive with remove context + */ +void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev, + enum hinic_service_type type) +{ + struct hinic_pcidev *pci_adapter; + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + /* can't test HINIC_FUNC_IN_REMOVE bit in dev->flag, because + * TOE will call this function when detach toe driver + */ + + if (hinic_func_type(dev->hwdev) == TYPE_PPF) { + lld_dev_put(); + return dev->uld_dev[type]; + } + } + lld_dev_put(); + + return NULL; +} +EXPORT_SYMBOL(hinic_get_ppf_uld_by_pdev); + +void *hinic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter; + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->hwdev && hinic_func_type(dev->hwdev) == TYPE_PPF) { + lld_dev_put(); + return dev->hwdev; + } + } + lld_dev_put(); + + return NULL; +} + +void hinic_get_all_chip_id(void *id_info) +{ + struct nic_card_id *card_id = (struct nic_card_id *)id_info; + struct card_node *chip_node; + int i = 0; + int id, err; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%d", &id); + if (err <= 0) + pr_err("Failed to get hinic id\n"); + + card_id->id[i] = id; + i++; + } + lld_dev_put(); + card_id->num = i; +} + +static bool __is_func_valid(struct hinic_pcidev *dev) +{ + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + return false; + + if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) + return false; + + if (HINIC_FUNC_IS_VF(dev->hwdev)) + return false; + + return true; +} + +bool hinic_is_valid_bar_addr(u64 offset) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (offset == pci_resource_start(dev->pcidev, 0)) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +void hinic_get_card_info(void *hwdev, void *bufin) +{ + struct card_node *chip_node = NULL; + struct card_info *info = (struct card_info *)bufin; + struct hinic_nic_dev *nic_dev; + struct hinic_pcidev *dev; + void *fun_hwdev; + u32 i = 0; + + info->pf_num = 0; + + chip_node = hinic_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!__is_func_valid(dev)) + continue; + + fun_hwdev = dev->hwdev; + + if (((hinic_support_fc(fun_hwdev, NULL)) || + (hinic_support_fcoe(fun_hwdev, NULL))) && + dev->uld_dev[SERVICE_T_FC]) { + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_FC); + strlcpy(info->pf[i].name, + dev->uld_dev_name[SERVICE_T_FC], IFNAMSIZ); + } + + if (hinic_support_nic(fun_hwdev, NULL)) { + nic_dev = dev->uld_dev[SERVICE_T_NIC]; + if (nic_dev) { + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC); + strlcpy(info->pf[i].name, + nic_dev->netdev->name, IFNAMSIZ); + } + } + + if ((hinic_support_ovs(fun_hwdev, NULL)) && + dev->uld_dev[SERVICE_T_OVS]) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_OVS); + + if ((hinic_support_roce(fun_hwdev, NULL)) && + dev->uld_dev[SERVICE_T_ROCE]) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_ROCE); + + if ((hinic_support_toe(fun_hwdev, NULL)) && + dev->uld_dev[SERVICE_T_TOE]) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_TOE); + + if (hinic_func_for_mgmt(fun_hwdev)) + strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + if (hinic_func_for_pt(fun_hwdev)) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_PT); + + if (hinic_func_for_hwpt(fun_hwdev)) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_HWPT); + + strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[i].bus_info)); + info->pf_num++; + i = info->pf_num; + } + lld_dev_put(); +} + +void hinic_get_card_func_info_by_card_name( + const char *chip_name, struct hinic_card_func_info *card_func) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + struct func_pdev_info *pdev_info; + + card_func->num_pf = 0; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pdev_info = &card_func->pdev_info[card_func->num_pf]; + pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0); + pdev_info->bar0_phy_addr = + pci_resource_start(dev->pcidev, 0); + + card_func->num_pf++; + if (card_func->num_pf >= MAX_SIZE) + break; + } + } + + lld_dev_put(); +} + +int hinic_get_device_id(void *hwdev, u16 *dev_id) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + u16 vendor_id = 0; + u16 device_id = 0; + + chip_node = hinic_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pci_read_config_word(dev->pcidev, 0, &vendor_id); + if (vendor_id == HINIC_PCI_VENDOR_ID) { + pci_read_config_word(dev->pcidev, 2, &device_id); + break; + } + } + lld_dev_put(); + *dev_id = device_id; + + return 0; +} + +int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + + chip_node = hinic_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic_physical_port_id(dev->hwdev) == port_id) { + *pf_id = hinic_global_func_id(dev->hwdev); + *isvalid = 1; + break; + } + } + lld_dev_put(); + + return 0; +} + +void get_fc_devname(char *devname) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state < HINIC_INIT_STATE_NIC_INITED) + continue; + + if (HINIC_FUNC_IS_VF(dev->hwdev)) + continue; + + if (dev->uld_dev[SERVICE_T_FC]) { + strlcpy(devname, + dev->uld_dev_name[SERVICE_T_FC], + IFNAMSIZ); + lld_dev_put(); + return; + } + } + } + lld_dev_put(); +} + +enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + + if (dev) + return dev->init_state; + + return HINIC_INIT_STATE_NONE; +} + +enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname) +{ + struct hinic_pcidev *dev; + + dev = hinic_get_pcidev_by_ifname(ifname); + if (dev) + return dev->init_state; + + pr_err("Can not get device %s\n", ifname); + + return HINIC_INIT_STATE_NONE; +} + +int hinic_get_self_test_result(char *ifname, u32 *result) +{ + struct hinic_pcidev *dev = NULL; + + dev = hinic_get_pcidev_by_ifname(ifname); + if (!dev) { + pr_err("Get pcidev failed by ifname: %s\n", ifname); + return -EFAULT; + } + + *result = be32_to_cpu(readl((u8 __iomem *)(dev->cfg_reg_base) + + SELF_TEST_BAR_ADDR_OFFSET)); + return 0; +} + +struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *pci_adapter; + struct hinic_nic_dev *nic_dev; + + if (!lld_dev || !hinic_support_nic(lld_dev->hwdev, NULL)) + return NULL; + + pci_adapter = pci_get_drvdata(lld_dev->pdev); + nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC]; + if (!nic_dev) { + sdk_err(&pci_adapter->pcidev->dev, + "There's no net device attached on the pci device"); + return NULL; + } + + return nic_dev->netdev; +} +EXPORT_SYMBOL(hinic_get_netdev_by_lld); + +void *hinic_get_hwdev_by_netdev(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!nic_dev || !netdev) + return NULL; + + return nic_dev->hwdev; +} +EXPORT_SYMBOL(hinic_get_hwdev_by_netdev); + +struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter; + struct hinic_nic_dev *nic_dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter || !hinic_support_nic(pci_adapter->hwdev, NULL)) + return NULL; + + nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC]; + if (!nic_dev) { + sdk_err(&pci_adapter->pcidev->dev, + "There`s no net device attached on the pci device"); + return NULL; + } + + return nic_dev->netdev; +} +EXPORT_SYMBOL(hinic_get_netdev_by_pcidev); + +struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + return &pci_adapter->sriov_info; +} + +bool hinic_is_in_host(void) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state > HINIC_INIT_STATE_PCI_INITED && + hinic_func_type(dev->hwdev) != TYPE_VF) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +int hinic_attach_nic(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return -EINVAL; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); +} +EXPORT_SYMBOL(hinic_attach_nic); + +void hinic_detach_nic(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + detach_uld(dev, SERVICE_T_NIC); +} +EXPORT_SYMBOL(hinic_detach_nic); + +int hinic_attach_roce(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return -EINVAL; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + return attach_uld(dev, SERVICE_T_ROCE, &g_uld_info[SERVICE_T_ROCE]); +} +EXPORT_SYMBOL(hinic_attach_roce); + +void hinic_detach_roce(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + detach_uld(dev, SERVICE_T_ROCE); +} +EXPORT_SYMBOL(hinic_detach_roce); + +static int __set_nic_rss_state(struct hinic_pcidev *dev, bool enable) +{ + void *nic_uld; + int err = 0; + + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + return 0; + + nic_uld = dev->uld_dev[SERVICE_T_NIC]; + if (!hinic_support_nic(dev->hwdev, NULL) || !nic_uld) + return 0; + + if (hinic_func_type(dev->hwdev) == TYPE_VF) + return 0; + + if (enable) + err = hinic_enable_func_rss(nic_uld); + else + err = hinic_disable_func_rss(nic_uld); + if (err) { + sdk_err(&dev->pcidev->dev, "Failed to %s rss\n", + enable ? "enable" : "disable"); + } + + return err; +} + +int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *adapter; + + if (!lld_dev) + return -EINVAL; + + adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev); + + return __set_nic_rss_state(adapter, false); +} +EXPORT_SYMBOL(hinic_disable_nic_rss); + +int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *adapter; + + if (!lld_dev) + return -EINVAL; + + adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev); + + return __set_nic_rss_state(adapter, true); +} +EXPORT_SYMBOL(hinic_enable_nic_rss); + +struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev) +{ + struct hinic_pcidev *adapter; + + if (!pdev) + return NULL; + + adapter = pci_get_drvdata(pdev); + + return &adapter->id; +} +EXPORT_SYMBOL(hinic_get_pci_device_id); + +static int __set_nic_func_state(struct hinic_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + u16 func_id; + int err; + bool enable_nic; + + err = hinic_global_func_id_get(pci_adapter->hwdev, &func_id); + if (err) + return err; + + err = hinic_get_func_nic_enable(pci_adapter->hwdev, func_id, + &enable_nic); + if (err) { + sdk_err(&pdev->dev, "Failed to get nic state\n"); + return err; + } + + if (enable_nic) { + if (is_multi_bm_slave(pci_adapter->hwdev)) + hinic_set_vf_dev_cap(pci_adapter->hwdev); + + err = attach_uld(pci_adapter, SERVICE_T_NIC, + &g_uld_info[SERVICE_T_NIC]); + if (err) { + sdk_err(&pdev->dev, "Failed to initialize NIC\n"); + return err; + } + + if (pci_adapter->init_state < HINIC_INIT_STATE_NIC_INITED) + pci_adapter->init_state = HINIC_INIT_STATE_NIC_INITED; + } else { + detach_uld(pci_adapter, SERVICE_T_NIC); + } + + return 0; +} + +int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev, u16 vf_func_id, + bool en) +{ + struct hinic_pcidev *dev, *des_dev; + struct hinic_nic_dev *uld_dev; + int err = -EFAULT; + + if (!lld_dev) + return -EINVAL; + + dev = pci_get_drvdata(lld_dev->pdev); + + if (!dev) + return -EFAULT; + /* find func_idx pci_adapter and disable or enable nic */ + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED && + !test_bit(HINIC_FUNC_PRB_ERR, + &des_dev->flag)) + continue; + + if (hinic_global_func_id(des_dev->hwdev) != vf_func_id) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED) { + break; + } + + sdk_info(&dev->pcidev->dev, "Receive event: %s vf%d nic\n", + en ? "enable" : "disable", vf_func_id); + + err = 0; + if (en) { + if (des_dev->uld_dev[SERVICE_T_NIC]) { + sdk_err(&des_dev->pcidev->dev, + "%s driver has attached to pcie device, cannot set VF max_queue_num\n", + s_uld_name[SERVICE_T_NIC]); + } else { + err = hinic_set_vf_dev_cap(des_dev->hwdev); + + if (err) { + sdk_err(&des_dev->pcidev->dev, + "%s driver Set VF max_queue_num failed, err=%d.\n", + s_uld_name[SERVICE_T_NIC], err); + + break; + } + } + + err = attach_uld(des_dev, SERVICE_T_NIC, + &g_uld_info[SERVICE_T_NIC]); + if (err) { + sdk_err(&des_dev->pcidev->dev, "Failed to initialize NIC\n"); + break; + } + + uld_dev = (struct hinic_nic_dev *) + (des_dev->uld_dev[SERVICE_T_NIC]); + uld_dev->in_vm = true; + uld_dev->is_vm_slave = + is_multi_vm_slave(uld_dev->hwdev); + uld_dev->is_bm_slave = + is_multi_bm_slave(uld_dev->hwdev); + if (des_dev->init_state < HINIC_INIT_STATE_NIC_INITED) + des_dev->init_state = + HINIC_INIT_STATE_NIC_INITED; + } else { + detach_uld(des_dev, SERVICE_T_NIC); + } + + break; + } + lld_dev_put(); + + return err; +} +EXPORT_SYMBOL(hinic_ovs_set_vf_nic_state); + +static void slave_host_mgmt_work(struct work_struct *work) +{ + struct hinic_pcidev *pci_adapter = + container_of(work, struct hinic_pcidev, slave_nic_work); + + __set_nic_func_state(pci_adapter); +} + +static void __multi_host_mgmt(struct hinic_pcidev *dev, + struct hinic_multi_host_mgmt_event *mhost_mgmt) +{ + struct hinic_pcidev *des_dev; + struct hinic_mhost_nic_func_state *nic_state = {0}; + + switch (mhost_mgmt->sub_cmd) { + case HINIC_MHOST_NIC_STATE_CHANGE: + nic_state = mhost_mgmt->data; + + nic_state->status = 0; + + /* find func_idx pci_adapter and disable or enable nic */ + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag)) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED && + !test_bit(HINIC_FUNC_PRB_ERR, + &des_dev->flag)) + continue; + + if (hinic_global_func_id_hw(des_dev->hwdev) != + nic_state->func_idx) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED) { + nic_state->status = + test_bit(HINIC_FUNC_PRB_ERR, + &des_dev->flag) ? 1 : 0; + break; + } + + sdk_info(&dev->pcidev->dev, "Receive nic state changed event, state: %d\n", + nic_state->enable); + + /* schedule_work */ + schedule_work(&des_dev->slave_nic_work); + + break; + } + lld_dev_put(); + + break; + + default: + sdk_warn(&dev->pcidev->dev, "Received unknown multi-host mgmt event %d\n", + mhost_mgmt->sub_cmd); + break; + } +} + +void hinic_event_process(void *adapter, struct hinic_event_info *event) +{ + struct hinic_pcidev *dev = adapter; + enum hinic_service_type type; + + if (event->type == HINIC_EVENT_FMW_ACT_NTC) + return hinic_sync_time_to_fmw(dev); + else if (event->type == HINIC_EVENT_MCTP_GET_HOST_INFO) + return __mctp_get_host_info(dev, &event->mctp_info); + else if (event->type == HINIC_EVENT_MULTI_HOST_MGMT) + return __multi_host_mgmt(dev, &event->mhost_mgmt); + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (test_and_set_bit(type, &dev->state)) { + sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n", + event->type, s_uld_name[type]); + continue; + } + + if (g_uld_info[type].event) + g_uld_info[type].event(&dev->lld_dev, + dev->uld_dev[type], event); + clear_bit(type, &dev->state); + } +} + +static int mapping_bar(struct pci_dev *pdev, struct hinic_pcidev *pci_adapter) +{ + u32 db_dwqe_size; + u64 dwqe_addr; + + pci_adapter->cfg_reg_base = + pci_ioremap_bar(pdev, HINIC_PCI_CFG_REG_BAR); + if (!pci_adapter->cfg_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HINIC_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + db_dwqe_size = hinic_get_db_size(pci_adapter->cfg_reg_base, + &pci_adapter->chip_mode); + + pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC_PCI_DB_BAR); + pci_adapter->db_base = ioremap(pci_adapter->db_base_phy, + db_dwqe_size); + if (!pci_adapter->db_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map doorbell regs\n"); + goto map_db_err; + } + + if (pci_adapter->chip_mode != CHIP_MODE_NORMAL) + return 0; + + dwqe_addr = pci_adapter->db_base_phy + db_dwqe_size; + +#if defined(__aarch64__) + /* arm do not support call ioremap_wc() */ + pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, db_dwqe_size, + __pgprot(PROT_DEVICE_nGnRnE)); +#else + pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr, + db_dwqe_size); + +#endif /* end of "defined(__aarch64__)" */ + if (!pci_adapter->dwqe_mapping) { + sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n"); + goto mapping_dwqe_err; + } + + return 0; + +mapping_dwqe_err: + iounmap(pci_adapter->db_base); + +map_db_err: + iounmap(pci_adapter->intr_reg_base); + +map_intr_bar_err: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void unmapping_bar(struct hinic_pcidev *pci_adapter) +{ + if (pci_adapter->chip_mode == CHIP_MODE_NORMAL) { +#if defined(__aarch64__) + iounmap(pci_adapter->dwqe_mapping); +#else + io_mapping_free(pci_adapter->dwqe_mapping); +#endif /* end of "defined(__aarch64__)" */ + } + + iounmap(pci_adapter->db_base); + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + + +static int alloc_chip_node(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node; + unsigned char i; + unsigned char parent_bus_number = 0; + int err; + + if (!pci_is_root_bus(pci_adapter->pcidev->bus)) + parent_bus_number = pci_adapter->pcidev->bus->parent->number; + + if (parent_bus_number != 0) { + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (chip_node->dp_bus_num == parent_bus_number) { + pci_adapter->chip_node = chip_node; + return 0; + } + } + } else if (pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF || + pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF_HV) { + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (chip_node) { + pci_adapter->chip_node = chip_node; + return 0; + } + } + } + + for (i = 0; i < MAX_CARD_ID; i++) { + if (!FIND_BIT(card_bit_map, i)) { + card_bit_map = (u64)SET_BIT(card_bit_map, i); + break; + } + } + + if (i == MAX_CARD_ID) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc card id\n"); + return -EFAULT; + } + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc chip node\n"); + goto alloc_chip_err; + } + + chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL); + if (!(chip_node->dbgtool_attr_file.name)) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc dbgtool attr file name\n"); + goto alloc_dbgtool_attr_file_err; + } + + /* parent bus number */ + chip_node->dp_bus_num = parent_bus_number; + + err = snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", + HINIC_CHIP_NAME, i); + if (err <= 0 || err >= IFNAMSIZ) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed snprintf chip_name, function return(%d) and dest_len(%d)\n", + err, IFNAMSIZ); + goto alloc_dbgtool_attr_file_err; + } + + err = snprintf((char *)chip_node->dbgtool_attr_file.name, + IFNAMSIZ, "%s%d", HINIC_CHIP_NAME, i); + if (err <= 0 || err >= IFNAMSIZ) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed snprintf dbgtool_attr_file_name, function return(%d) and dest_len(%d)\n", + err, IFNAMSIZ); + goto alloc_dbgtool_attr_file_err; + } + + sdk_info(&pci_adapter->pcidev->dev, + "Add new chip %s to global list succeed\n", + chip_node->chip_name); + + list_add_tail(&chip_node->node, &g_hinic_chip_list); + + INIT_LIST_HEAD(&chip_node->func_list); + pci_adapter->chip_node = chip_node; + + mutex_init(&chip_node->sfp_mutex); + + return 0; + +alloc_dbgtool_attr_file_err: + kfree(chip_node); + +alloc_chip_err: + card_bit_map = CLEAR_BIT(card_bit_map, i); + return -ENOMEM; +} + +static void free_chip_node(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + u32 id; + int err; + + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&pci_adapter->pcidev->dev, + "Delete chip %s from global list succeed\n", + chip_node->chip_name); + err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%u", &id); + if (err <= 0) + sdk_err(&pci_adapter->pcidev->dev, "Failed to get hinic id\n"); + + card_bit_map = CLEAR_BIT(card_bit_map, id); + + kfree(chip_node->dbgtool_attr_file.name); + kfree(chip_node); + } +} + +static bool hinic_get_vf_load_state(struct pci_dev *pdev) +{ + unsigned char parent_bus_number; + struct card_node *chip_node; + u8 id; + + if (!pdev->is_virtfn) + return false; + + /* vf used in vm */ + if (pci_is_root_bus(pdev->bus)) + return disable_vf_load; + + parent_bus_number = pdev->bus->parent->number; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (chip_node->dp_bus_num == parent_bus_number) { + for (id = 0; id < HINIC_MAX_PF_NUM; id++) { + if (chip_node->pf_bus_num[id] == + pdev->bus->number) { + lld_dev_put(); + return chip_node->disable_vf_load[id]; + } + } + } + } + lld_dev_put(); + + return disable_vf_load; +} + +static void hinic_set_vf_load_state(struct hinic_pcidev *pci_adapter, + bool vf_load_state) +{ + struct card_node *chip_node; + u16 func_id; + + if (hinic_func_type(pci_adapter->hwdev) == TYPE_VF) + return; + + /* The VF on the BM slave side must be probed */ + if (is_multi_bm_slave(pci_adapter->hwdev)) + vf_load_state = false; + + func_id = hinic_global_func_id_hw(pci_adapter->hwdev); + + chip_node = pci_adapter->chip_node; + chip_node->disable_vf_load[func_id] = vf_load_state; + chip_node->pf_bus_num[func_id] = pci_adapter->pcidev->bus->number; + + sdk_info(&pci_adapter->pcidev->dev, "Current function support %s, %s vf load in host\n", + (hinic_support_ovs(pci_adapter->hwdev, NULL) ? "ovs" : "nic"), + (vf_load_state ? "disable" : "enable")); +} + +int hinic_ovs_set_vf_load_state(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter; + + if (!pdev) { + pr_err("pdev is null.\n"); + return -EINVAL; + } + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) { + pr_err("pci_adapter is null.\n"); + return -EFAULT; + } + + hinic_set_vf_load_state(pci_adapter, disable_vf_load); + + return 0; +} +EXPORT_SYMBOL(hinic_ovs_set_vf_load_state); + +static int hinic_config_deft_mrss(struct pci_dev *pdev) +{ + return 0; +} + +static int hinic_config_pci_cto(struct pci_dev *pdev) +{ + return 0; +} + +static int hinic_pci_init(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = NULL; + int err; + + err = hinic_config_deft_mrss(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to configure Max Read Request Size\n"); + return err; + } + + err = hinic_config_pci_cto(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to configure Completion timeout\n"); + return err; + } + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) { + sdk_err(&pdev->dev, + "Failed to alloc pci device adapter\n"); + return -ENOMEM; + } + pci_adapter->pcidev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) { + sdk_info(&pdev->dev, "VFs are not binded to hinic\n"); + return 0; + } +#endif + + err = pci_enable_device(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to enable PCI device\n"); + goto pci_enable_err; + } + + err = pci_request_regions(pdev, HINIC_DRV_NAME); + if (err) { + sdk_err(&pdev->dev, "Failed to request regions\n"); + goto pci_regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, "Failed to set DMA mask\n"); + goto dma_mask_err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, + "Couldn't set 64-bit coherent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, + "Failed to set coherent DMA mask\n"); + goto dma_consistnet_mask_err; + } + } + + return 0; + +dma_consistnet_mask_err: +dma_mask_err: + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + +pci_regions_err: + pci_disable_device(pdev); + +pci_enable_err: + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + + return err; +} + +static void hinic_pci_deinit(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); +} + +static void hinic_notify_ppf_unreg(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + struct hinic_pcidev *dev; + + if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF) + return; + + lld_lock_chip_node(); + list_for_each_entry(dev, &chip_node->func_list, node) { + hinic_ppf_hwdev_unreg(dev->hwdev); + } + lld_unlock_chip_node(); +} + +static void hinic_notify_ppf_reg(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + struct hinic_pcidev *dev; + + if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF) + return; + + lld_lock_chip_node(); + list_for_each_entry(dev, &chip_node->func_list, node) { + hinic_ppf_hwdev_reg(dev->hwdev, pci_adapter->hwdev); + } + lld_unlock_chip_node(); +} + +#ifdef CONFIG_X86 +/** + * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma + * order register to zero + * @pci_adapter: pci adapter + */ +void cfg_order_reg(struct hinic_pcidev *pci_adapter) +{ + u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; + struct cpuinfo_x86 *cpuinfo; + u32 i; + + if (HINIC_FUNC_IS_VF(pci_adapter->hwdev)) + return; + + cpuinfo = &cpu_data(0); + for (i = 0; i < sizeof(cpu_model); i++) { + if (cpu_model[i] == cpuinfo->x86_model) + hinic_set_pcie_order_cfg(pci_adapter->hwdev); + } +} +#endif + +static int hinic_func_init(struct pci_dev *pdev, + struct hinic_pcidev *pci_adapter) +{ + struct hinic_init_para init_para; + bool vf_load_state; + int err; + + init_para.adapter_hdl = pci_adapter; + init_para.pcidev_hdl = pdev; + init_para.dev_hdl = &pdev->dev; + init_para.cfg_reg_base = pci_adapter->cfg_reg_base; + init_para.intr_reg_base = pci_adapter->intr_reg_base; + init_para.db_base = pci_adapter->db_base; + init_para.db_base_phy = pci_adapter->db_base_phy; + init_para.dwqe_mapping = pci_adapter->dwqe_mapping; + init_para.hwdev = &pci_adapter->hwdev; + init_para.chip_node = pci_adapter->chip_node; + init_para.ppf_hwdev = hinic_get_ppf_hwdev_by_pdev(pdev); + err = hinic_init_hwdev(&init_para); + if (err < 0) { + pci_adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } else if (err > 0) { + if (err == (1 << HINIC_HWDEV_ALL_INITED) && + pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED) { + pci_adapter->init_state = HINIC_INIT_STATE_HW_IF_INITED; + sdk_info(&pdev->dev, + "Initialize hardware device later\n"); + queue_delayed_work(pci_adapter->slave_nic_init_workq, + &pci_adapter->slave_nic_init_dwork, + HINIC_SLAVE_NIC_DELAY_TIME); + set_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + } else if (err != (1 << HINIC_HWDEV_ALL_INITED)) { + sdk_err(&pdev->dev, + "Initialize hardware device partitial failed\n"); + hinic_detect_version_compatible(pci_adapter); + hinic_notify_ppf_reg(pci_adapter); + pci_adapter->init_state = + HINIC_INIT_STATE_HW_PART_INITED; + } + return -EFAULT; + } + + hinic_notify_ppf_reg(pci_adapter); + pci_adapter->init_state = HINIC_INIT_STATE_HWDEV_INITED; + + vf_load_state = hinic_support_ovs(pci_adapter->hwdev, NULL) ? + true : disable_vf_load; + + hinic_set_vf_load_state(pci_adapter, vf_load_state); + hinic_qps_num_set(pci_adapter->hwdev, 0); + + pci_adapter->lld_dev.pdev = pdev; + pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; + pci_adapter->sriov_info.pdev = pdev; + pci_adapter->sriov_info.hwdev = pci_adapter->hwdev; + + hinic_event_register(pci_adapter->hwdev, pci_adapter, + hinic_event_process); + + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) + hinic_sync_time_to_fmw(pci_adapter); + + /* dbgtool init */ + lld_lock_chip_node(); + err = dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node); + if (err) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + hinic_event_unregister(pci_adapter->hwdev); + return err; + } + lld_unlock_chip_node(); + + pci_adapter->init_state = HINIC_INIT_STATE_DBGTOOL_INITED; + + err = hinic_detect_version_compatible(pci_adapter); + if (err) + return err; + + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev) && + FUNC_ENABLE_SRIOV_IN_DEFAULT(pci_adapter->hwdev)) { + hinic_pci_sriov_enable(pdev, + hinic_func_max_vf(pci_adapter->hwdev)); + } + + /* NIC is base driver, probe firstly */ + err = __set_nic_func_state(pci_adapter); + if (err) + return err; + + attach_ulds(pci_adapter); + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) { + err = sysfs_create_group(&pdev->dev.kobj, &hinic_attr_group); + if (err) { + sdk_err(&pdev->dev, "Failed to create sysfs group\n"); + return -EFAULT; + } + } + +#ifdef CONFIG_X86 + cfg_order_reg(pci_adapter); +#endif + + sdk_info(&pdev->dev, "Pcie device probed\n"); + pci_adapter->init_state = HINIC_INIT_STATE_ALL_INITED; + + return 0; +} + +static void hinic_func_deinit(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + /* When function deinit, disable mgmt initiative report events firstly, + * then flush mgmt work-queue. + */ + hinic_disable_mgmt_msg_report(pci_adapter->hwdev); + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED) + hinic_flush_mgmt_workq(pci_adapter->hwdev); + + hinic_set_func_deinit_flag(pci_adapter->hwdev); + + if (pci_adapter->init_state >= HINIC_INIT_STATE_NIC_INITED) { + detach_ulds(pci_adapter); + detach_uld(pci_adapter, SERVICE_T_NIC); + } + + if (pci_adapter->init_state >= HINIC_INIT_STATE_DBGTOOL_INITED) { + lld_lock_chip_node(); + dbgtool_knl_deinit(pci_adapter->hwdev, pci_adapter->chip_node); + lld_unlock_chip_node(); + hinic_event_unregister(pci_adapter->hwdev); + } + + hinic_notify_ppf_unreg(pci_adapter); + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) { + /* Remove the current node from node-list first, + * then it's safe to free hwdev + */ + lld_lock_chip_node(); + list_del(&pci_adapter->node); + lld_unlock_chip_node(); + + hinic_free_hwdev(pci_adapter->hwdev); + } +} + +static void wait_tool_unused(void) +{ + u32 loop_cnt = 0; + + while (loop_cnt < HINIC_WAIT_TOOL_CNT_TIMEOUT) { + if (!atomic_read(&tool_used_cnt)) + return; + + usleep_range(9900, 10000); + loop_cnt++; + } +} + +static inline void wait_sriov_cfg_complete(struct hinic_pcidev *pci_adapter) +{ + struct hinic_sriov_info *sriov_info; + u32 loop_cnt = 0; + + sriov_info = &pci_adapter->sriov_info; + + set_bit(HINIC_FUNC_REMOVE, &sriov_info->state); + usleep_range(9900, 10000); + + while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) { + if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) && + !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) + return; + + usleep_range(9900, 10000); + loop_cnt++; + } +} + +static void hinic_remove(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + if (!pci_adapter) + return; + + sdk_info(&pdev->dev, "Pcie device remove begin\n"); +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) { + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + return; + } +#endif + cancel_delayed_work_sync(&pci_adapter->slave_nic_init_dwork); + flush_workqueue(pci_adapter->slave_nic_init_workq); + destroy_workqueue(pci_adapter->slave_nic_init_workq); + + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) + hinic_detect_hw_present(pci_adapter->hwdev); + + switch (pci_adapter->init_state) { + case HINIC_INIT_STATE_ALL_INITED: + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) + sysfs_remove_group(&pdev->dev.kobj, &hinic_attr_group); + case HINIC_INIT_STATE_NIC_INITED: + /* Don't support hotplug when SR-IOV is enabled now. + * So disable SR-IOV capability as normal. + */ + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) { + wait_sriov_cfg_complete(pci_adapter); + hinic_pci_sriov_disable(pdev); + } + case HINIC_INIT_STATE_DBGTOOL_INITED: + case HINIC_INIT_STATE_HWDEV_INITED: + case HINIC_INIT_STATE_HW_PART_INITED: + case HINIC_INIT_STATE_HW_IF_INITED: + case HINIC_INIT_STATE_PCI_INITED: + set_bit(HINIC_FUNC_IN_REMOVE, &pci_adapter->flag); + lld_lock_chip_node(); + cancel_work_sync(&pci_adapter->slave_nic_work); + lld_unlock_chip_node(); + + wait_tool_unused(); + + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) + hinic_func_deinit(pdev); + + lld_lock_chip_node(); + if (pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED) + list_del(&pci_adapter->node); + nictool_k_uninit(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + unmapping_bar(pci_adapter); + hinic_pci_deinit(pdev); + + break; + + default: + break; + } + + sdk_info(&pdev->dev, "Pcie device removed\n"); +} + +static void slave_host_init_delay_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic_pcidev *pci_adapter = container_of(delay, + struct hinic_pcidev, slave_nic_init_dwork); + struct pci_dev *pdev = pci_adapter->pcidev; + struct card_node *chip_node = pci_adapter->chip_node; + int found = 0; + struct hinic_pcidev *ppf_pcidev = NULL; + int err; + + if (!hinic_get_master_host_mbox_enable(pci_adapter->hwdev)) { + queue_delayed_work(pci_adapter->slave_nic_init_workq, + &pci_adapter->slave_nic_init_dwork, + HINIC_SLAVE_NIC_DELAY_TIME); + return; + } + if (hinic_func_type(pci_adapter->hwdev) == TYPE_PPF) { + err = hinic_func_init(pdev, pci_adapter); + clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + if (err) + set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + return; + } + + /* Make sure the PPF must be the first one */ + lld_dev_hold(); + list_for_each_entry(ppf_pcidev, &chip_node->func_list, node) { + if (ppf_pcidev && + hinic_func_type(ppf_pcidev->hwdev) == TYPE_PPF) { + found = 1; + break; + } + } + lld_dev_put(); + if (found && ppf_pcidev->init_state == HINIC_INIT_STATE_ALL_INITED) { + err = hinic_func_init(pdev, pci_adapter); + clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + if (err) + set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + return; + } + queue_delayed_work(pci_adapter->slave_nic_init_workq, + &pci_adapter->slave_nic_init_dwork, + HINIC_SLAVE_NIC_DELAY_TIME); +} + +static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hinic_pcidev *pci_adapter; + int err; + + sdk_info(&pdev->dev, "Pcie device probe begin\n"); + + err = hinic_pci_init(pdev); + if (err) + return err; + +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) + return 0; +#endif + + pci_adapter = pci_get_drvdata(pdev); + clear_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + err = mapping_bar(pdev, pci_adapter); + if (err) { + sdk_err(&pdev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + pci_adapter->id = *id; + INIT_WORK(&pci_adapter->slave_nic_work, slave_host_mgmt_work); + pci_adapter->slave_nic_init_workq = + create_singlethread_workqueue(HINIC_SLAVE_NIC_DELAY); + if (!pci_adapter->slave_nic_init_workq) { + sdk_err(&pdev->dev, + "Failed to create work queue:%s\n", + HINIC_SLAVE_NIC_DELAY); + goto ceate_nic_delay_work_fail; + } + INIT_DELAYED_WORK(&pci_adapter->slave_nic_init_dwork, + slave_host_init_delay_work); + + /* if chip information of pcie function exist, + * add the function into chip + */ + lld_lock_chip_node(); + err = alloc_chip_node(pci_adapter); + if (err) { + sdk_err(&pdev->dev, + "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + + err = nictool_k_init(); + if (err) { + sdk_warn(&pdev->dev, "Failed to init nictool"); + goto init_nictool_err; + } + + list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); + + lld_unlock_chip_node(); + + pci_adapter->init_state = HINIC_INIT_STATE_PCI_INITED; + + err = hinic_func_init(pdev, pci_adapter); + if (err) + goto func_init_err; + + return 0; + +func_init_err: + if (!test_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag)) + set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + return 0; + +init_nictool_err: + free_chip_node(pci_adapter); + +alloc_chip_node_fail: + lld_unlock_chip_node(); +ceate_nic_delay_work_fail: + unmapping_bar(pci_adapter); +map_bar_failed: + hinic_pci_deinit(pdev); + + + sdk_err(&pdev->dev, "Pcie device probe failed\n"); + return err; +} + +static const struct pci_device_id hinic_pci_table[] = { + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PF), HINIC_BOARD_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF_HV), 0}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_SMTIO), HINIC_BOARD_PG_SM_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_100GE), + HINIC_BOARD_PG_100GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_TP_10GE), + HINIC_BOARD_PG_TP_10GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_40GE), HINIC_BOARD_40GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_100GE), HINIC_BOARD_100GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_25GE), HINIC_BOARD_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_MULTI_HOST), HINIC_BOARD_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_100GE), HINIC_BOARD_100GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_DUAL_25GE), HINIC_BOARD_25GE}, + {0, 0} +}; + +MODULE_DEVICE_TABLE(pci, hinic_pci_table); + +/** + * hinic_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + * + * Since we only need error detecting not error handling, so we + * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER + * driver that we don't need reset(error handling). + */ +static pci_ers_result_t hinic_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hinic_pcidev *pci_adapter; + + sdk_err(&pdev->dev, + "Uncorrectable error detected, log and cleanup error status: 0x%08x\n", + state); + + pci_cleanup_aer_uncorrect_error_status(pdev); + pci_adapter = pci_get_drvdata(pdev); + + if (pci_adapter) + hinic_record_pcie_error(pci_adapter->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static void hinic_shutdown(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + sdk_info(&pdev->dev, "Shutdown device\n"); + + if (pci_adapter) + hinic_shutdown_hwdev(pci_adapter->hwdev); + + pci_disable_device(pdev); + + if (pci_adapter) + hinic_set_api_stop(pci_adapter->hwdev); +} + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh hinic_driver_rh = { + .sriov_configure = hinic_pci_sriov_configure, +}; +#endif + +/* Cause we only need error detecting not error handling, so only error_detected + * callback is enough. + */ +static struct pci_error_handlers hinic_err_handler = { + .error_detected = hinic_io_error_detected, +}; + +static struct pci_driver hinic_driver = { + .name = HINIC_DRV_NAME, + .id_table = hinic_pci_table, + .probe = hinic_probe, + .remove = hinic_remove, + .shutdown = hinic_shutdown, + +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = hinic_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &hinic_driver_rh, +#endif + + .err_handler = &hinic_err_handler +}; + +static int __init hinic_lld_init(void) +{ + pr_info("%s - version %s\n", HINIC_DRV_DESC, HINIC_DRV_VERSION); + memset(g_uld_info, 0, sizeof(g_uld_info)); + atomic_set(&tool_used_cnt, 0); + + hinic_lld_lock_init(); + + + /* register nic driver information first, and add net device in + * nic_probe called by hinic_probe. + */ + hinic_register_uld(SERVICE_T_NIC, &nic_uld_info); + + return pci_register_driver(&hinic_driver); +} + +static void __exit hinic_lld_exit(void) +{ + + pci_unregister_driver(&hinic_driver); + + hinic_unregister_uld(SERVICE_T_NIC); + +} + +module_init(hinic_lld_init); +module_exit(hinic_lld_exit); +int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!micro_log_info || !micro_log_info->init || + !micro_log_info->deinit) { + pr_err("Invalid information of micro log info to register\n"); + return -EINVAL; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (hinic_func_type(dev->hwdev) == TYPE_PPF) { + if (micro_log_info->init(dev->hwdev)) { + sdk_err(&dev->pcidev->dev, + "micro log init failed\n"); + continue; + } + } + } + } + lld_dev_put(); + pr_info("Register micro log succeed\n"); + + return 0; +} +EXPORT_SYMBOL(hinic_register_micro_log); + +void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!micro_log_info) + return; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (hinic_func_type(dev->hwdev) == TYPE_PPF) + micro_log_info->deinit(dev->hwdev); + } + } + lld_dev_put(); + pr_info("Unregister micro log succeed\n"); +} +EXPORT_SYMBOL(hinic_unregister_micro_log); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.h b/drivers/net/ethernet/huawei/hinic/hinic_lld.h new file mode 100644 index 000000000000..03cdf009ff33 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_LLD_H_ +#define HINIC_LLD_H_ + +#define HINIC_SLAVE_NIC_DELAY "hinic_slave_nic_delay" +#define HINIC_SLAVE_NIC_DELAY_TIME (5 * HZ) + +struct hinic_lld_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +enum hinic_init_state { + HINIC_INIT_STATE_NONE, + HINIC_INIT_STATE_PCI_INITED, + HINIC_INIT_STATE_HW_IF_INITED, + HINIC_INIT_STATE_HW_PART_INITED, + HINIC_INIT_STATE_HWDEV_INITED, + HINIC_INIT_STATE_DBGTOOL_INITED, + HINIC_INIT_STATE_NIC_INITED, + HINIC_INIT_STATE_ALL_INITED, +}; + +struct hinic_uld_info { + /* uld_dev: should not return null even the function capability + * is not support the up layer driver + * uld_dev_name: NIC driver should copy net device name. + * FC driver could copy fc device name. + * other up layer driver don`t need copy anything + */ + int (*probe)(struct hinic_lld_dev *lld_dev, + void **uld_dev, char *uld_dev_name); + void (*remove)(struct hinic_lld_dev *lld_dev, void *uld_dev); + int (*suspend)(struct hinic_lld_dev *lld_dev, + void *uld_dev, pm_message_t state); + int (*resume)(struct hinic_lld_dev *lld_dev, void *uld_dev); + void (*event)(struct hinic_lld_dev *lld_dev, void *uld_dev, + struct hinic_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +}; + +/* Used for the ULD HiNIC PCIe driver registration interface, + * the original interface is service_register_interface + */ +int hinic_register_uld(enum hinic_service_type uld_type, + struct hinic_uld_info *uld_info); + +/* Used for the ULD HiNIC PCIe driver unregistration interface, + * the original interface is service_unregister_interface + */ +void hinic_unregister_uld(enum hinic_service_type uld_type); + +void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev, + enum hinic_service_type type); + +/* used for TOE/IWARP */ +struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev); +/* used for TOE/IWARP */ +void *hinic_get_hwdev_by_netdev(struct net_device *netdev); + +struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev); +void *hinic_get_hwdev_by_ifname(char *ifname); +int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname); +void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type); +void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type); + +int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]); +int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up); +int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted, + u8 *cos_up); +void hinic_get_all_chip_id(void *card_id); +void hinic_get_card_info(void *hwdev, void *bufin); +int hinic_get_device_id(void *hwdev, u16 *dev_id); +void get_fc_devname(char *devname); +int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid); + +void hinic_tool_cnt_inc(void); +void hinic_tool_cnt_dec(void); + +struct hinic_sriov_info; +struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev); + +int hinic_attach_nic(struct hinic_lld_dev *lld_dev); +void hinic_detach_nic(struct hinic_lld_dev *lld_dev); + +int hinic_attach_roce(struct hinic_lld_dev *lld_dev); +void hinic_detach_roce(struct hinic_lld_dev *lld_dev); + +int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev); +int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev); + +int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev, + u16 vf_func_id, bool en); + +int hinic_ovs_set_vf_load_state(struct pci_dev *pdev); + +int hinic_get_self_test_result(char *ifname, u32 *result); +enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname); +enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev); + +extern struct hinic_uld_info g_uld_info[SERVICE_T_MAX]; + +struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev); +bool hinic_is_in_host(void); + +bool hinic_is_valid_bar_addr(u64 offset); + + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 2411ad270c98..2c4e488de989 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -2,329 +2,2400 @@ /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt #include -#include -#include #include #include -#include +#include +#include #include +#include +#include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include -#include "hinic_hw_qp.h" -#include "hinic_hw_dev.h" -#include "hinic_port.h" +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_dbg.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" #include "hinic_tx.h" #include "hinic_rx.h" -#include "hinic_dev.h" +#include "hinic_qp.h" +#include "hinic_dcb.h" +#include "hinic_lld.h" +#include "hinic_sriov.h" +#include "hinic_pci_id_tbl.h" -MODULE_AUTHOR("Huawei Technologies CO., Ltd"); -MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); -MODULE_LICENSE("GPL"); +static u16 num_qps; +module_param(num_qps, ushort, 0444); +MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default unset)"); -static unsigned int tx_weight = 64; -module_param(tx_weight, uint, 0644); -MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); +static u16 ovs_num_qps = 16; +module_param(ovs_num_qps, ushort, 0444); +MODULE_PARM_DESC(ovs_num_qps, "Number of Queue Pairs in ovs mode (default=16)"); -static unsigned int rx_weight = 64; -module_param(rx_weight, uint, 0644); -MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); +#define DEFAULT_POLL_WEIGHT 64 +static unsigned int poll_weight = DEFAULT_POLL_WEIGHT; +module_param(poll_weight, uint, 0444); +MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)"); -#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 -#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200 -#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205 -#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210 +#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32 +#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 -#define HINIC_WQ_NAME "hinic_dev" +/* suit for sdi3.0 vm mode, change this define for test best performance */ +#define SDI_VM_PENDING_LIMT 2 +#define SDI_VM_COALESCE_TIMER_CFG 16 +#define SDI_VM_RX_PKT_RATE_HIGH 1000000 +#define SDI_VM_RX_PKT_RATE_LOW 30000 +#define SDI_VM_RX_USECS_HIGH 56 +#define SDI_VM_RX_PENDING_LIMT_HIGH 20 +#define SDI_VM_RX_USECS_LOW 16 +#define SDI_VM_RX_PENDING_LIMT_LOW 2 -#define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ - NETIF_MSG_IFUP | \ - NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) +/* if qp_coalesc_use_drv_params_switch !=0, use user setting params */ +static unsigned char qp_coalesc_use_drv_params_switch; +module_param(qp_coalesc_use_drv_params_switch, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_use_drv_params_switch, "QP MSI-X Interrupt coalescing parameter switch (default=0, not use drv parameter)"); -#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8 +static unsigned char qp_pending_limit = HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; +module_param(qp_pending_limit, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); -#define HINIC_LRO_RX_TIMER_DEFAULT 16 +static unsigned char qp_coalesc_timer_cfg = + HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; +module_param(qp_coalesc_timer_cfg, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=32)"); -#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) +/* For arm64 server, the best known configuration of lro max wqe number + * is 4 (8K), for x86_64 server, it is 8 (16K). You can also + * configure these values by hinicadm. + */ +static unsigned char set_max_wqe_num; +module_param(set_max_wqe_num, byte, 0444); +MODULE_PARM_DESC(set_max_wqe_num, "Set lro max wqe number, valid range is 1 - 32, default is 4(arm) / 8(x86)"); -#define work_to_rx_mode_work(work) \ - container_of(work, struct hinic_rx_mode_work, work) +#define DEFAULT_RX_BUFF_LEN 2 +u16 rx_buff = DEFAULT_RX_BUFF_LEN; +module_param(rx_buff, ushort, 0444); +MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); -#define rx_mode_work_to_nic_dev(rx_mode_work) \ - container_of(rx_mode_work, struct hinic_dev, rx_mode_work) +static u32 set_lro_timer; +module_param(set_lro_timer, uint, 0444); +MODULE_PARM_DESC(set_lro_timer, "Set lro timer in micro second, valid range is 1 - 1024, default is 16"); -static int change_mac_addr(struct net_device *netdev, const u8 *addr); +static unsigned char set_link_status_follow = HINIC_LINK_FOLLOW_STATUS_MAX; +module_param(set_link_status_follow, byte, 0444); +MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status. 0 - default, 1 - follow, 2 - separate, other - unset. (default unset)"); -static int set_features(struct hinic_dev *nic_dev, - netdev_features_t pre_features, - netdev_features_t features, bool force_change); -static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) -{ - struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; - struct hinic_rxq_stats rx_stats; +static unsigned int lro_replenish_thld = 256; +module_param(lro_replenish_thld, uint, 0444); +MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer (default=256)"); - u64_stats_init(&rx_stats.syncp); - hinic_rxq_get_stats(rxq, &rx_stats); +static bool l2nic_interrupt_switch = true; +module_param(l2nic_interrupt_switch, bool, 0644); +MODULE_PARM_DESC(l2nic_interrupt_switch, "Control whether execute l2nic io interrupt switch or not, default is true"); - u64_stats_update_begin(&nic_rx_stats->syncp); - nic_rx_stats->bytes += rx_stats.bytes; - nic_rx_stats->pkts += rx_stats.pkts; - nic_rx_stats->errors += rx_stats.errors; - nic_rx_stats->csum_errors += rx_stats.csum_errors; - nic_rx_stats->other_errors += rx_stats.other_errors; - u64_stats_update_end(&nic_rx_stats->syncp); +static unsigned char lro_en_status = HINIC_LRO_STATUS_UNSET; +module_param(lro_en_status, byte, 0444); +MODULE_PARM_DESC(lro_en_status, "lro enable status. 0 - disable, 1 - enable, other - unset. (default unset)"); - hinic_rxq_clean_stats(rxq); -} +static unsigned char qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW; +module_param(qp_pending_limit_low, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit_low, "MSI-X adaptive low coalesce pending limit, range is 0 - 255"); -static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) -{ - struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; - struct hinic_txq_stats tx_stats; +static unsigned char qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW; +module_param(qp_coalesc_timer_low, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_low, "MSI-X adaptive low coalesce time, range is 0 - 255"); - u64_stats_init(&tx_stats.syncp); +static unsigned char qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH; +module_param(qp_pending_limit_high, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit_high, "MSI-X adaptive high coalesce pending limit, range is 0 - 255"); - hinic_txq_get_stats(txq, &tx_stats); +static unsigned char qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH; +module_param(qp_coalesc_timer_high, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_high, "MSI-X adaptive high coalesce time, range is 0 - 255"); - u64_stats_update_begin(&nic_tx_stats->syncp); - nic_tx_stats->bytes += tx_stats.bytes; - nic_tx_stats->pkts += tx_stats.pkts; - nic_tx_stats->tx_busy += tx_stats.tx_busy; - nic_tx_stats->tx_wake += tx_stats.tx_wake; - nic_tx_stats->tx_dropped += tx_stats.tx_dropped; - nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts; - u64_stats_update_end(&nic_tx_stats->syncp); +static unsigned int enable_bp = 0; - hinic_txq_clean_stats(txq); -} +static unsigned int bp_lower_thd = HINIC_RX_BP_LOWER_THD; +static unsigned int bp_upper_thd = HINIC_RX_BP_UPPER_THD; -static void update_nic_stats(struct hinic_dev *nic_dev) -{ - int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); +#define HINIC_NIC_DEV_WQ_NAME "hinic_nic_dev_wq" - for (i = 0; i < num_qps; i++) - update_rx_stats(nic_dev, &nic_dev->rxqs[i]); +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) - for (i = 0; i < num_qps; i++) - update_tx_stats(nic_dev, &nic_dev->txqs[i]); -} +#define QID_MASKED(q_id, nic_dev) (q_id & ((nic_dev)->num_qps - 1)) -/** - * create_txqs - Create the Logical Tx Queues of specific NIC device - * @nic_dev: the specific NIC device - * - * Return 0 - Success, negative - Failure - **/ -static int create_txqs(struct hinic_dev *nic_dev) -{ - int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); - struct net_device *netdev = nic_dev->netdev; - size_t txq_size; +#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) - if (nic_dev->txqs) - return -EINVAL; +#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) - txq_size = num_txqs * sizeof(*nic_dev->txqs); - nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); - if (!nic_dev->txqs) - return -ENOMEM; +#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BITS_SIZE(nic_dev)) - for (i = 0; i < num_txqs; i++) { - struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); +#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BYTE_SIZE(nic_dev)) - err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to init Txq\n"); - goto err_init_txq; - } - } +#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev)) +#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1)) - return 0; +enum hinic_rx_mod { + HINIC_RX_MODE_UC = 1 << 0, + HINIC_RX_MODE_MC = 1 << 1, + HINIC_RX_MODE_BC = 1 << 2, + HINIC_RX_MODE_MC_ALL = 1 << 3, + HINIC_RX_MODE_PROMISC = 1 << 4, +}; -err_init_txq: - for (j = 0; j < i; j++) - hinic_clean_txq(&nic_dev->txqs[j]); +enum hinic_rx_buff_len { + RX_BUFF_VALID_2KB = 2, + RX_BUFF_VALID_4KB = 4, + RX_BUFF_VALID_8KB = 8, + RX_BUFF_VALID_16KB = 16, +}; - devm_kfree(&netdev->dev, nic_dev->txqs); - return err; -} +#define HINIC_AVG_PKT_SMALL 256U +#define HINIC_MODERATONE_DELAY HZ +#define CONVERT_UNIT 1024 -/** - * free_txqs - Free the Logical Tx Queues of specific NIC device - * @nic_dev: the specific NIC device - **/ -static void free_txqs(struct hinic_dev *nic_dev) -{ - int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); - struct net_device *netdev = nic_dev->netdev; +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN +int hinic_netdev_event(struct notifier_block *notifier, + unsigned long event, void *ptr); - if (!nic_dev->txqs) - return; +/* used for netdev notifier register/unregister */ +DEFINE_MUTEX(g_hinic_netdev_notifiers_mutex); +static int hinic_netdev_notifiers_ref_cnt; +static struct notifier_block hinic_netdev_notifier = { + .notifier_call = hinic_netdev_event, +}; - for (i = 0; i < num_txqs; i++) - hinic_clean_txq(&nic_dev->txqs[i]); - - devm_kfree(&netdev->dev, nic_dev->txqs); - nic_dev->txqs = NULL; -} - -/** - * create_txqs - Create the Logical Rx Queues of specific NIC device - * @nic_dev: the specific NIC device - * - * Return 0 - Success, negative - Failure - **/ -static int create_rxqs(struct hinic_dev *nic_dev) -{ - int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); - struct net_device *netdev = nic_dev->netdev; - size_t rxq_size; - - if (nic_dev->rxqs) - return -EINVAL; - - rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); - nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); - if (!nic_dev->rxqs) - return -ENOMEM; - - for (i = 0; i < num_rxqs; i++) { - struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); - - err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to init rxq\n"); - goto err_init_rxq; - } - } - - return 0; - -err_init_rxq: - for (j = 0; j < i; j++) - hinic_clean_rxq(&nic_dev->rxqs[j]); - - devm_kfree(&netdev->dev, nic_dev->rxqs); - return err; -} - -/** - * free_txqs - Free the Logical Rx Queues of specific NIC device - * @nic_dev: the specific NIC device - **/ -static void free_rxqs(struct hinic_dev *nic_dev) -{ - int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); - struct net_device *netdev = nic_dev->netdev; - - if (!nic_dev->rxqs) - return; - - for (i = 0; i < num_rxqs; i++) - hinic_clean_rxq(&nic_dev->rxqs[i]); - - devm_kfree(&netdev->dev, nic_dev->rxqs); - nic_dev->rxqs = NULL; -} - -static int hinic_configure_max_qnum(struct hinic_dev *nic_dev) +static void hinic_register_notifier(struct hinic_nic_dev *nic_dev) { int err; - err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); - if (err) + mutex_lock(&g_hinic_netdev_notifiers_mutex); + hinic_netdev_notifiers_ref_cnt++; + if (hinic_netdev_notifiers_ref_cnt == 1) { + err = register_netdevice_notifier(&hinic_netdev_notifier); + if (err) { + hinic_info(nic_dev, drv, "Register netdevice notifier failed, err: %d\n", + err); + hinic_netdev_notifiers_ref_cnt--; + } + } + mutex_unlock(&g_hinic_netdev_notifiers_mutex); +} + +static void hinic_unregister_notifier(struct hinic_nic_dev *nic_dev) +{ + mutex_lock(&g_hinic_netdev_notifiers_mutex); + if (hinic_netdev_notifiers_ref_cnt == 1) + unregister_netdevice_notifier(&hinic_netdev_notifier); + + if (hinic_netdev_notifiers_ref_cnt) + hinic_netdev_notifiers_ref_cnt--; + mutex_unlock(&g_hinic_netdev_notifiers_mutex); +} + +#define HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 2 +#define HINIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ + NETIF_F_ALL_TSO) + +int hinic_netdev_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct net_device *real_dev, *ret; + struct hinic_nic_dev *nic_dev; + u16 vlan_depth; + + if (!is_vlan_dev(ndev)) + return NOTIFY_DONE; + + dev_hold(ndev); + + switch (event) { + case NETDEV_REGISTER: + real_dev = vlan_dev_real_dev(ndev); + nic_dev = hinic_get_uld_dev_by_ifname(real_dev->name, + SERVICE_T_NIC); + if (!nic_dev) + goto out; + + vlan_depth = 1; + ret = vlan_dev_priv(ndev)->real_dev; + while (is_vlan_dev(ret)) { + ret = vlan_dev_priv(ret)->real_dev; + vlan_depth++; + } + + if (vlan_depth == HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + ndev->vlan_features &= (~HINIC_VLAN_CLEAR_OFFLOAD); + } else if (vlan_depth > HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(ndev, + get_netdev_hw_features(ndev) & + (~HINIC_VLAN_CLEAR_OFFLOAD)); +#else + ndev->hw_features &= (~HINIC_VLAN_CLEAR_OFFLOAD); +#endif +#endif + ndev->features &= (~HINIC_VLAN_CLEAR_OFFLOAD); + } + + break; + + default: + break; + }; + +out: + dev_put(ndev); + + return NOTIFY_DONE; +} +#endif + +void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) || + test_bit(HINIC_LP_TEST, &nic_dev->flags)) + return; + + if (status) { + if (netif_carrier_ok(netdev)) + return; + + nic_dev->link_status = status; + netif_carrier_on(netdev); + nicif_info(nic_dev, link, netdev, "Link is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return; + + nic_dev->link_status = status; + netif_carrier_off(netdev); + nicif_info(nic_dev, link, netdev, "Link is down\n"); + } +} + +static void hinic_heart_lost(struct hinic_nic_dev *nic_dev) +{ + nic_dev->heart_status = false; +} + +static int hinic_setup_qps_resources(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; + + err = hinic_setup_all_tx_resources(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to create Tx queues\n"); return err; + } + + err = hinic_setup_all_rx_resources(netdev, nic_dev->qps_irq_info); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to create Rx queues\n"); + goto create_rxqs_err; + } + + return 0; + +create_rxqs_err: + hinic_free_all_tx_resources(netdev); + + return err; +} + +static int hinic_configure(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; + + /* rx rss init */ + err = hinic_rx_configure(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n"); + return err; + } return 0; } -static int hinic_rss_init(struct hinic_dev *nic_dev) +static void hinic_remove_configure(struct hinic_nic_dev *nic_dev) { - u8 default_rss_key[HINIC_RSS_KEY_SIZE]; - u8 tmpl_idx = nic_dev->rss_tmpl_idx; - u32 *indir_tbl; - int err, i; + hinic_rx_remove_configure(nic_dev->netdev); +} - indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL); - if (!indir_tbl) +static void hinic_setup_dcb_qps(struct hinic_nic_dev *nic_dev, u16 max_qps) +{ + struct net_device *netdev = nic_dev->netdev; + u16 num_rss; + u8 num_tcs; + u8 i; + + if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) || + !test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + return; + + num_tcs = (u8)netdev_get_num_tc(netdev); + /* For now, we don't support to change num_tcs */ + if (num_tcs != nic_dev->max_cos || max_qps < num_tcs) { + nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %d or num_qps: %d, disable DCB\n", + num_tcs, max_qps); + netdev_reset_tc(netdev); + clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + /* if we can't enable rss or get enough num_qps, + * need to sync default configure to hw + */ + hinic_configure_dcb(netdev); + } else { + /* We bind sq with cos but not tc */ + num_rss = (u16)(max_qps / nic_dev->max_cos); + num_rss = min_t(u16, num_rss, nic_dev->rss_limit); + for (i = 0; i < nic_dev->max_cos; i++) + netdev_set_tc_queue(netdev, i, num_rss, + (u16)(num_rss * i)); + + nic_dev->num_rss = num_rss; + nic_dev->num_qps = (u16)(num_tcs * num_rss); + } +} + +/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */ +static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u32 irq_size; + u16 resp_irq_num, i; + int err; + + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nic_dev->num_rss = nic_dev->rss_limit; + nic_dev->num_qps = nic_dev->rss_limit; + } else { + nic_dev->num_rss = 0; + nic_dev->num_qps = 1; + } + + hinic_setup_dcb_qps(nic_dev, nic_dev->max_qps); + + irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->num_qps; + if (!irq_size) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n"); + return -EINVAL; + } + nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL); + if (!nic_dev->qps_irq_info) { + nicif_err(nic_dev, drv, netdev, "Failed to alloc msix entries\n"); return -ENOMEM; + } - netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key)); - for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) - indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss); + err = hinic_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->num_qps, + nic_dev->qps_irq_info, &resp_irq_num); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to alloc irqs\n"); + kfree(nic_dev->qps_irq_info); + return err; + } - err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key); - if (err) - goto out; + /* available irq number is less than rq numbers, adjust rq numbers */ + if (resp_irq_num < nic_dev->num_qps) { + nic_dev->num_qps = resp_irq_num; + nic_dev->num_rss = nic_dev->num_qps; + hinic_setup_dcb_qps(nic_dev, nic_dev->num_qps); + nicif_warn(nic_dev, drv, netdev, + "Can not get enough irqs, adjust num_qps to %d\n", + nic_dev->num_qps); + /* after adjust num_qps, free the remaind irq */ + for (i = nic_dev->num_qps; i < resp_irq_num; i++) + hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC, + nic_dev->qps_irq_info[i].irq_id); + } - err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl); - if (err) - goto out; + nicif_info(nic_dev, drv, netdev, "Finally num_qps: %d, num_rss: %d\n", + nic_dev->num_qps, nic_dev->num_rss); - err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type); - if (err) - goto out; + return 0; +} - err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx, - nic_dev->rss_hash_engine); - if (err) - goto out; +static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev) +{ + u16 i; - err = hinic_rss_cfg(nic_dev, 1, tmpl_idx); - if (err) - goto out; + for (i = 0; i < nic_dev->num_qps; i++) + hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC, + nic_dev->qps_irq_info[i].irq_id); + + kfree(nic_dev->qps_irq_info); +} + +static int hinic_poll(struct napi_struct *napi, int budget) +{ + int tx_pkts, rx_pkts; + struct hinic_irq *irq_cfg = container_of(napi, struct hinic_irq, napi); + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + rx_pkts = hinic_rx_poll(irq_cfg->rxq, budget); + + tx_pkts = hinic_tx_poll(irq_cfg->txq, budget); + + if (tx_pkts >= budget || rx_pkts >= budget) + return budget; + + set_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag); + rx_pkts += hinic_rx_poll(irq_cfg->rxq, budget - rx_pkts); + tx_pkts += hinic_tx_poll(irq_cfg->txq, budget - tx_pkts); + if (rx_pkts >= budget || tx_pkts >= budget) { + clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag); + return budget; + } + + napi_complete(napi); + + if (!test_and_set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag)) { + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + hinic_set_msix_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC_MSIX_ENABLE); + else if (!nic_dev->in_vm && + (hinic_get_func_mode(nic_dev->hwdev) == + FUNC_MOD_NORMAL_HOST)) + enable_irq(irq_cfg->irq_id); + } + + return max(tx_pkts, rx_pkts); +} + +static void qp_add_napi(struct hinic_irq *irq_cfg) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, + hinic_poll, nic_dev->poll_weight); + napi_enable(&irq_cfg->napi); +} + +static void qp_del_napi(struct hinic_irq *irq_cfg) +{ + napi_disable(&irq_cfg->napi); + netif_napi_del(&irq_cfg->napi); +} + +static irqreturn_t qp_irq(int irq, void *data) +{ + struct hinic_irq *irq_cfg = (struct hinic_irq *)data; + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + u16 msix_entry_idx = irq_cfg->msix_entry_idx; + + if (napi_schedule_prep(&irq_cfg->napi)) { + if (l2nic_interrupt_switch) { + /* Disable the interrupt until napi will be completed */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + hinic_set_msix_state(nic_dev->hwdev, + msix_entry_idx, + HINIC_MSIX_DISABLE); + } else if (!nic_dev->in_vm && + (hinic_get_func_mode(nic_dev->hwdev) == + FUNC_MOD_NORMAL_HOST)) { + disable_irq_nosync(irq_cfg->irq_id); + } + + clear_bit(HINIC_INTR_ON, &irq_cfg->intr_flag); + } + + hinic_misx_intr_clear_resend_bit(nic_dev->hwdev, + msix_entry_idx, 1); + + clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag); + + __napi_schedule(&irq_cfg->napi); + } else if (!test_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag)) { + hinic_misx_intr_clear_resend_bit(nic_dev->hwdev, msix_entry_idx, + 1); + } + + return IRQ_HANDLED; +} + +static int hinic_request_irq(struct hinic_irq *irq_cfg, u16 q_id) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + struct nic_interrupt_info info = {0}; + int err; + + qp_add_napi(irq_cfg); + + info.msix_index = irq_cfg->msix_entry_idx; + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; + info.coalesc_timer_cfg = + nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; + info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = + nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = + nic_dev->intr_coalesce[q_id].pending_limt; + err = hinic_set_interrupt_cfg(nic_dev->hwdev, info); + if (err) { + nicif_err(nic_dev, drv, irq_cfg->netdev, + "Failed to set RX interrupt coalescing attribute.\n"); + qp_del_napi(irq_cfg); + return err; + } + + err = request_irq(irq_cfg->irq_id, &qp_irq, 0, + irq_cfg->irq_name, irq_cfg); + if (err) { + nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n"); + qp_del_napi(irq_cfg); + return err; + } + + /* assign the mask for this irq */ + irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask); + + return 0; +} + +static int set_interrupt_moder(struct hinic_nic_dev *nic_dev, u16 q_id, + u8 coalesc_timer_cfg, u8 pending_limt) +{ + struct nic_interrupt_info interrupt_info = {0}; + int err; + + if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg && + pending_limt == nic_dev->rxqs[q_id].last_pending_limt) + return 0; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) || + q_id >= nic_dev->num_qps) + return 0; + + interrupt_info.lli_set = 0; + interrupt_info.interrupt_coalesc_set = 1; + interrupt_info.coalesc_timer_cfg = coalesc_timer_cfg; + interrupt_info.pending_limt = pending_limt; + interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx; + interrupt_info.resend_timer_cfg = + nic_dev->intr_coalesce[q_id].resend_timer_cfg; + + err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed modifying moderation for Queue: %d\n", q_id); + } else { + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = pending_limt; + } -out: - kfree(indir_tbl); return err; } -static void hinic_rss_deinit(struct hinic_dev *nic_dev) +static void __calc_coal_para(struct hinic_nic_dev *nic_dev, + struct hinic_intr_coal_info *q_coal, u64 rate, + u8 *coalesc_timer_cfg, u8 *pending_limt) { - hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx); + if (rate < q_coal->pkt_rate_low) { + *coalesc_timer_cfg = q_coal->rx_usecs_low; + *pending_limt = q_coal->rx_pending_limt_low; + } else if (rate > q_coal->pkt_rate_high) { + *coalesc_timer_cfg = q_coal->rx_usecs_high; + *pending_limt = q_coal->rx_pending_limt_high; + } else { + *coalesc_timer_cfg = + (u8)((rate - q_coal->pkt_rate_low) * + (q_coal->rx_usecs_high - + q_coal->rx_usecs_low) / + (q_coal->pkt_rate_high - + q_coal->pkt_rate_low) + + q_coal->rx_usecs_low); + if (nic_dev->in_vm) + *pending_limt = (u8)((rate - q_coal->pkt_rate_low) * + (q_coal->rx_pending_limt_high - + q_coal->rx_pending_limt_low) / + (q_coal->pkt_rate_high - + q_coal->pkt_rate_low) + + q_coal->rx_pending_limt_low); + else + *pending_limt = q_coal->rx_pending_limt_low; + } } -static void hinic_init_rss_parameters(struct hinic_dev *nic_dev) +static void update_queue_coal(struct hinic_nic_dev *nic_dev, u16 qid, + u64 rate, u64 avg_pkt_size, u64 tx_rate) { - nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR; - nic_dev->rss_type.tcp_ipv6_ext = 1; - nic_dev->rss_type.ipv6_ext = 1; - nic_dev->rss_type.tcp_ipv6 = 1; - nic_dev->rss_type.ipv6 = 1; - nic_dev->rss_type.tcp_ipv4 = 1; - nic_dev->rss_type.ipv4 = 1; - nic_dev->rss_type.udp_ipv6 = 1; - nic_dev->rss_type.udp_ipv4 = 1; + struct hinic_intr_coal_info *q_coal; + u8 coalesc_timer_cfg, pending_limt; + + q_coal = &nic_dev->intr_coalesce[qid]; + + if ((rate > HINIC_RX_RATE_THRESH && + avg_pkt_size > HINIC_AVG_PKT_SMALL) || + (nic_dev->in_vm && rate > HINIC_RX_RATE_THRESH)) { + __calc_coal_para(nic_dev, q_coal, rate, + &coalesc_timer_cfg, &pending_limt); + } else { + coalesc_timer_cfg = HINIC_LOWEST_LATENCY; + pending_limt = q_coal->rx_pending_limt_low; + } + + set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, + pending_limt); } -static void hinic_enable_rss(struct hinic_dev *nic_dev) +#define SDI_VM_PPS_3W 30000 +#define SDI_VM_PPS_5W 50000 + +#define SDI_VM_BPS_100MB 12500000 +#define SDI_VM_BPS_1GB 125000000 + +static void update_queue_coal_sdi_vm(struct hinic_nic_dev *nic_dev, + u16 qid, u64 rx_pps, u64 rx_bps, + u64 tx_pps, u64 tx_bps) +{ + struct hinic_intr_coal_info *q_coal = NULL; + u8 coalesc_timer_cfg, pending_limt; + + q_coal = &nic_dev->intr_coalesce[qid]; + if (qp_coalesc_use_drv_params_switch == 0) { + if (rx_pps < SDI_VM_PPS_3W && + tx_pps < SDI_VM_PPS_3W && + rx_bps < SDI_VM_BPS_100MB && + tx_bps < SDI_VM_BPS_100MB) { + set_interrupt_moder(nic_dev, qid, 0, 0); + } else if (tx_pps > SDI_VM_PPS_3W && + tx_pps < SDI_VM_PPS_5W && + tx_bps > SDI_VM_BPS_1GB) { + set_interrupt_moder(nic_dev, qid, 7, 7); + } else { + __calc_coal_para(nic_dev, q_coal, rx_pps, + &coalesc_timer_cfg, + &pending_limt); + set_interrupt_moder(nic_dev, qid, + coalesc_timer_cfg, + pending_limt); + } + } else { + __calc_coal_para(nic_dev, q_coal, rx_pps, + &coalesc_timer_cfg, + &pending_limt); + set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, + pending_limt); + } +} + +static void hinic_auto_moderation_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic_nic_dev *nic_dev = container_of(delay, + struct hinic_nic_dev, + moderation_task); + unsigned long period = (unsigned long)(jiffies - + nic_dev->last_moder_jiffies); + u64 rx_packets, rx_bytes, rx_pkt_diff, rate, avg_pkt_size; + u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate, rx_bps, tx_bps; + u16 qid; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) + return; + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + HINIC_MODERATONE_DELAY); + + if (!nic_dev->adaptive_rx_coal || !period) + return; + + for (qid = 0; qid < nic_dev->num_qps; qid++) { + rx_packets = nic_dev->rxqs[qid].rxq_stats.packets; + rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes; + tx_packets = nic_dev->txqs[qid].txq_stats.packets; + tx_bytes = nic_dev->txqs[qid].txq_stats.bytes; + + rx_pkt_diff = + rx_packets - nic_dev->rxqs[qid].last_moder_packets; + avg_pkt_size = rx_pkt_diff ? + ((unsigned long)(rx_bytes - + nic_dev->rxqs[qid].last_moder_bytes)) / + rx_pkt_diff : 0; + + rate = rx_pkt_diff * HZ / period; + tx_pkt_diff = + tx_packets - nic_dev->txqs[qid].last_moder_packets; + tx_rate = tx_pkt_diff * HZ / period; + + rx_bps = (unsigned long)(rx_bytes - + nic_dev->rxqs[qid].last_moder_bytes) + * HZ / period; + tx_bps = (unsigned long)(tx_bytes - + nic_dev->txqs[qid].last_moder_bytes) + * HZ / period; + if ((nic_dev->is_vm_slave && nic_dev->in_vm) || + nic_dev->is_bm_slave) { + update_queue_coal_sdi_vm(nic_dev, qid, rate, rx_bps, + tx_rate, tx_bps); + } else { + update_queue_coal(nic_dev, qid, rate, avg_pkt_size, + tx_rate); + } + + nic_dev->rxqs[qid].last_moder_packets = rx_packets; + nic_dev->rxqs[qid].last_moder_bytes = rx_bytes; + nic_dev->txqs[qid].last_moder_packets = tx_packets; + nic_dev->txqs[qid].last_moder_bytes = tx_bytes; + } + + nic_dev->last_moder_jiffies = jiffies; +} + + +static void hinic_release_irq(struct hinic_irq *irq_cfg) +{ + irq_set_affinity_hint(irq_cfg->irq_id, NULL); + synchronize_irq(irq_cfg->irq_id); + free_irq(irq_cfg->irq_id, irq_cfg); + qp_del_napi(irq_cfg); +} + +static int hinic_qps_irq_init(struct hinic_nic_dev *nic_dev) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct irq_info *qp_irq_info; + struct hinic_irq *irq_cfg; + u16 q_id, i; + u32 local_cpu; + int err; + + nic_dev->irq_cfg = kcalloc(nic_dev->num_qps, sizeof(*nic_dev->irq_cfg), + GFP_KERNEL); + if (!nic_dev->irq_cfg) { + nic_err(&pdev->dev, "Failed to alloc irq cfg\n"); + return -ENOMEM; + } + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + qp_irq_info = &nic_dev->qps_irq_info[q_id]; + irq_cfg = &nic_dev->irq_cfg[q_id]; + + irq_cfg->irq_id = qp_irq_info->irq_id; + irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx; + irq_cfg->netdev = nic_dev->netdev; + irq_cfg->txq = &nic_dev->txqs[q_id]; + irq_cfg->rxq = &nic_dev->rxqs[q_id]; + nic_dev->rxqs[q_id].irq_cfg = irq_cfg; + + if (nic_dev->force_affinity) { + irq_cfg->affinity_mask = nic_dev->affinity_mask; + } else { + local_cpu = + cpumask_local_spread(q_id, + dev_to_node(&pdev->dev)); + cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask); + } + + err = snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name), + "%s_qp%d", nic_dev->netdev->name, q_id); + if (err <= 0 || err >= (int)sizeof(irq_cfg->irq_name)) { + nic_err(&pdev->dev, + "Failed snprintf irq_name, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(irq_cfg->irq_name)); + goto req_tx_irq_err; + } + + set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag); + + err = hinic_request_irq(irq_cfg, q_id); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n"); + goto req_tx_irq_err; + } + + hinic_set_msix_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC_MSIX_ENABLE); + } + + INIT_DELAYED_WORK(&nic_dev->moderation_task, + hinic_auto_moderation_work); + + return 0; + +req_tx_irq_err: + for (i = 0; i < q_id; i++) { + hinic_set_msix_state(nic_dev->hwdev, + nic_dev->irq_cfg[i].msix_entry_idx, + HINIC_MSIX_DISABLE); + hinic_release_irq(&nic_dev->irq_cfg[i]); + } + + kfree(nic_dev->irq_cfg); + + return err; +} + +static void hinic_qps_irq_deinit(struct hinic_nic_dev *nic_dev) +{ + u16 q_id; + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + hinic_set_msix_state(nic_dev->hwdev, + nic_dev->irq_cfg[q_id].msix_entry_idx, + HINIC_MSIX_DISABLE); + hinic_release_irq(&nic_dev->irq_cfg[q_id]); + } + + kfree(nic_dev->irq_cfg); +} + +int hinic_force_port_disable(struct hinic_nic_dev *nic_dev) +{ + int err; + + down(&nic_dev->port_state_sem); + + err = hinic_set_port_enable(nic_dev->hwdev, false); + if (!err) + nic_dev->force_port_disable = true; + + up(&nic_dev->port_state_sem); + + return err; +} + +int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable) +{ + int err = 0; + + down(&nic_dev->port_state_sem); + + nic_dev->force_port_disable = false; + err = hinic_set_port_enable(nic_dev->hwdev, enable); + + up(&nic_dev->port_state_sem); + + return err; +} + +int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable) +{ + int err; + + down(&nic_dev->port_state_sem); + + /* Do nothing when force disable + * Port will disable when call force port disable + * and should not enable port when in force mode + */ + if (nic_dev->force_port_disable) { + up(&nic_dev->port_state_sem); + return 0; + } + + err = hinic_set_port_enable(nic_dev->hwdev, enable); + + up(&nic_dev->port_state_sem); + + return err; +} + +static void hinic_print_link_message(struct hinic_nic_dev *nic_dev, + u8 link_status) +{ + if (nic_dev->link_status == link_status) + return; + + nic_dev->link_status = link_status; + + nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n", + (link_status ? "up" : "down")); +} + +int hinic_open(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 link_status = 0; + int err; + + if (test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n"); + return 0; + } + + err = hinic_setup_num_qps(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n"); + return err; + } + + err = hinic_create_qps(nic_dev->hwdev, nic_dev->num_qps, + nic_dev->sq_depth, nic_dev->rq_depth, + nic_dev->qps_irq_info, HINIC_MAX_SQ_BUFDESCS); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to create queue pairs\n"); + goto create_qps_err; + } + + err = hinic_setup_qps_resources(nic_dev); + if (err) + goto setup_qps_resources_err; + + err = hinic_init_qp_ctxts(nic_dev->hwdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to init qp ctxts\n"); + goto init_qp_ctxts_err; + } + + err = hinic_set_port_mtu(nic_dev->hwdev, netdev->mtu); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); + goto mtu_err; + } + + err = hinic_configure(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to configure txrx\n"); + goto cfg_err; + } + + err = hinic_qps_irq_init(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to qps irq init\n"); + goto qps_irqs_init_err; + } + + err = hinic_set_vport_enable(nic_dev->hwdev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n"); + goto vport_enable_err; + } + + err = hinic_maybe_set_port_state(nic_dev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to enable port\n"); + goto port_enable_err; + } + + set_bit(HINIC_INTF_UP, &nic_dev->flags); + + netif_set_real_num_tx_queues(netdev, nic_dev->num_qps); + netif_set_real_num_rx_queues(netdev, nic_dev->num_qps); + netif_tx_wake_all_queues(netdev); + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + HINIC_MODERATONE_DELAY); + + err = hinic_get_link_state(nic_dev->hwdev, &link_status); + if (!err && link_status) { + hinic_update_pf_bw(nic_dev->hwdev); + netif_carrier_on(netdev); + } + + hinic_print_link_message(nic_dev, link_status); + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); + + return 0; + +port_enable_err: + hinic_set_vport_enable(nic_dev->hwdev, false); + +vport_enable_err: + hinic_flush_sq_res(nic_dev->hwdev); + /* After set vport disable 100ms, no packets will be send to host */ + msleep(100); + hinic_qps_irq_deinit(nic_dev); + +qps_irqs_init_err: + hinic_remove_configure(nic_dev); + +cfg_err: +mtu_err: + hinic_free_qp_ctxts(nic_dev->hwdev); + +init_qp_ctxts_err: + hinic_free_all_rx_resources(netdev); + hinic_free_all_tx_resources(netdev); + +setup_qps_resources_err: + hinic_free_qps(nic_dev->hwdev); + +create_qps_err: + hinic_destroy_num_qps(nic_dev); + + return err; +} + +int hinic_close(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!test_and_clear_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n"); + return 0; + } + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + cancel_delayed_work_sync(&nic_dev->moderation_task); + + if (hinic_get_chip_present_flag(nic_dev->hwdev)) { + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); + + hinic_maybe_set_port_state(nic_dev, false); + + hinic_set_vport_enable(nic_dev->hwdev, false); + + hinic_flush_txqs(netdev); + hinic_flush_sq_res(nic_dev->hwdev); + /* After set vport disable 100ms, + * no packets will be send to host + */ + msleep(100); + } + + hinic_qps_irq_deinit(nic_dev); + hinic_remove_configure(nic_dev); + + if (hinic_get_chip_present_flag(nic_dev->hwdev)) + hinic_free_qp_ctxts(nic_dev->hwdev); + + mutex_lock(&nic_dev->nic_mutex); + hinic_free_all_rx_resources(netdev); + + hinic_free_all_tx_resources(netdev); + + hinic_free_qps(nic_dev->hwdev); + + hinic_destroy_num_qps(nic_dev); + mutex_unlock(&nic_dev->nic_mutex); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); + + return 0; +} + +static inline u32 calc_toeplitz_rss(u32 sip, u32 dip, u32 sport, u32 dport, + const u32 *rss_key) +{ + u32 i, port, rss = 0; + + port = (sport << 16) | dport; + + /* The key - SIP, DIP, SPORT, DPORT */ + for (i = 0; i < 32; i++) + if (sip & ((u32)1 << (u32)(31 - i))) + rss ^= (rss_key[0] << i) | + (u32)((u64)rss_key[1] >> (32 - i)); + + for (i = 0; i < 32; i++) + if (dip & ((u32)1 << (u32)(31 - i))) + rss ^= (rss_key[1] << i) | + (u32)((u64)rss_key[2] >> (32 - i)); + + for (i = 0; i < 32; i++) + if (port & ((u32)1 << (u32)(31 - i))) + rss ^= (rss_key[2] << i) | + (u32)((u64)rss_key[3] >> (32 - i)); + + return rss; +} + +static u16 select_queue_by_toeplitz(struct net_device *dev, + struct sk_buff *skb, + unsigned int num_tx_queues) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(dev); + struct tcphdr *tcphdr; + struct iphdr *iphdr; + u32 hash = 0; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return (u16)hash; + } + + if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { + iphdr = ip_hdr(skb); + if (iphdr->protocol == IPPROTO_UDP || + iphdr->protocol == IPPROTO_TCP) { + tcphdr = tcp_hdr(skb); + hash = calc_toeplitz_rss(ntohl(iphdr->daddr), + ntohl(iphdr->saddr), + ntohs(tcphdr->dest), + ntohs(tcphdr->source), + nic_dev->rss_hkey_user_be); + } + } + + return (u16)nic_dev->rss_indir_user[hash & 0xFF]; +} + +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) +static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +#else +static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#endif +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb) +#endif /* end of HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT; + + if (netdev_get_num_tc(netdev) || !nic_dev->rss_hkey_user_be) + goto fall_back; + + if ((nic_dev->rss_hash_engine == HINIC_RSS_HASH_ENGINE_TYPE_TOEP) && + test_bit(HINIC_SAME_RXTX, &nic_dev->flags)) + return select_queue_by_toeplitz(netdev, skb, + netdev->real_num_tx_queues); + +fall_back: +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) + return netdev_pick_tx(netdev, skb, NULL); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV + return fallback(netdev, skb, sb_dev); +#else + return fallback(netdev, skb); +#endif +#else + return skb_tx_hash(netdev, skb); +#endif +} + +#ifdef HAVE_NDO_GET_STATS64 +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void hinic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 + *hinic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif + +#else /* !HAVE_NDO_GET_STATS64 */ +static struct net_device_stats *hinic_get_stats(struct net_device *netdev) +#endif +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); +#ifndef HAVE_NDO_GET_STATS64 +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *stats = &netdev->stats; +#else + struct net_device_stats *stats = &nic_dev->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +#endif /* HAVE_NDO_GET_STATS64 */ + struct hinic_txq_stats *txq_stats; + struct hinic_rxq_stats *rxq_stats; + struct hinic_txq *txq; + struct hinic_rxq *rxq; + u64 bytes, packets, dropped, errors; + unsigned int start; + int i; + + bytes = 0; + packets = 0; + dropped = 0; + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->txqs) + break; + + txq = &nic_dev->txqs[i]; + txq_stats = &txq->txq_stats; + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + bytes += txq_stats->bytes; + packets += txq_stats->packets; + dropped += txq_stats->dropped; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + } + stats->tx_packets = packets; + stats->tx_bytes = bytes; + stats->tx_dropped = dropped; + + bytes = 0; + packets = 0; + errors = 0; + dropped = 0; + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->rxqs) + break; + + rxq = &nic_dev->rxqs[i]; + rxq_stats = &rxq->rxq_stats; + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + bytes += rxq_stats->bytes; + packets += rxq_stats->packets; + errors += rxq_stats->csum_errors + + rxq_stats->other_errors; + dropped += rxq_stats->dropped; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + } + stats->rx_packets = packets; + stats->rx_bytes = bytes; + stats->rx_errors = errors; + stats->rx_dropped = dropped; + +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +static void hinic_tx_timeout(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 q_id = 0; + + HINIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout); + nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) + continue; + + nicif_info(nic_dev, drv, netdev, + "txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n", + q_id, hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id), + hinic_get_sq_hw_ci(nic_dev->hwdev, q_id), + hinic_get_sq_local_ci(nic_dev->hwdev, q_id), + nic_dev->irq_cfg[q_id].napi.state); + } +} + +static int hinic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u32 mtu = (u32)new_mtu; + int err = 0; + + err = hinic_set_port_mtu(nic_dev->hwdev, mtu); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n", + new_mtu); + } else { + nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = mtu; + } + + return err; +} + +static int hinic_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct sockaddr *saddr = addr; + u16 func_id; + int err; + + if (!FUNC_SUPPORT_CHANGE_MAC(nic_dev->hwdev)) { + nicif_warn(nic_dev, drv, netdev, + "Current function don't support to set mac\n"); + return -EOPNOTSUPP; + } + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) { + nicif_info(nic_dev, drv, netdev, + "Already using mac address %pM\n", + saddr->sa_data); + return 0; + } + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + return err; + + err = hinic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data, + 0, func_id); + if (err) + return err; + + memcpy(netdev->dev_addr, saddr->sa_data, ETH_ALEN); + + nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n", + saddr->sa_data); + + /* TODO: vlan mac address of the device must be modified of kernel + * larger than 4.7 (not modified mac address of vlan) + */ + + return 0; +} + +#if (KERNEL_VERSION(3, 3, 0) > LINUX_VERSION_CODE) +static void +#else +static int +#endif +hinic_vlan_rx_add_vid(struct net_device *netdev, + #if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) + __always_unused __be16 proto, + #endif + u16 vid) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + u32 col, line; + int err; + + col = VID_COL(nic_dev, vid); + line = VID_LINE(nic_dev, vid); + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + goto end; + + err = hinic_add_vlan(nic_dev->hwdev, vid, func_id); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to add vlan%d\n", vid); + goto end; + } + + set_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Add vlan %d\n", vid); + +end: +#if (KERNEL_VERSION(3, 3, 0) <= LINUX_VERSION_CODE) + return err; +#else + return; +#endif +} + +#if (KERNEL_VERSION(3, 3, 0) > LINUX_VERSION_CODE) +static void +#else +static int +#endif +hinic_vlan_rx_kill_vid(struct net_device *netdev, + #if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) + __always_unused __be16 proto, + #endif + u16 vid) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + int err, col, line; + + col = VID_COL(nic_dev, vid); + line = VID_LINE(nic_dev, vid); + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + goto end; + + err = hinic_del_vlan(nic_dev->hwdev, vid, func_id); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); + goto end; + } + + clear_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Remove vlan %d\n", vid); + +end: +#if (KERNEL_VERSION(3, 3, 0) <= LINUX_VERSION_CODE) + return err; +#else + return; +#endif +} + +static int set_features(struct hinic_nic_dev *nic_dev, + netdev_features_t pre_features, + netdev_features_t features, bool force_change) +{ + netdev_features_t changed = force_change ? ~0 : pre_features ^ features; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + u8 rxvlan_changed = !!(changed & NETIF_F_HW_VLAN_CTAG_RX); + u8 rxvlan_en = !!(features & NETIF_F_HW_VLAN_CTAG_RX); +#else + u8 rxvlan_changed = !!(changed & NETIF_F_HW_VLAN_RX); + u8 rxvlan_en = !!(features & NETIF_F_HW_VLAN_RX); +#endif + u32 lro_timer, lro_buf_size; + int err = 0; + + + if (changed & NETIF_F_TSO) { + err = hinic_set_tx_tso(nic_dev->hwdev, + !!(features & NETIF_F_TSO)); + hinic_info(nic_dev, drv, "%s tso %s\n", + (features & NETIF_F_TSO) ? "Enable" : "Disable", + err ? "failed" : "success"); + } + + if (rxvlan_changed) { + err = hinic_set_rx_vlan_offload(nic_dev->hwdev, rxvlan_en); + hinic_info(nic_dev, drv, "%s rxvlan %s\n", + rxvlan_en ? "Enable" : "Disable", + err ? "failed" : "success"); + } + + if (changed & NETIF_F_RXCSUM) { + /* hw should always enable rx csum */ + u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + + err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en); + hinic_info(nic_dev, drv, "%s rx csum %s\n", + (features & NETIF_F_RXCSUM) ? "Enable" : "Disable", + err ? "failed" : "success"); + } + + if (changed & NETIF_F_LRO) { + lro_timer = nic_dev->adaptive_cfg.lro.timer; + lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size; + + err = hinic_set_rx_lro_state(nic_dev->hwdev, + !!(features & NETIF_F_LRO), + lro_timer, + lro_buf_size / + nic_dev->rx_buff_len); + hinic_info(nic_dev, drv, "%s lro %s\n", + (features & NETIF_F_LRO) ? "Enable" : "Disable", + err ? "failed" : "success"); + } + + return err; +} + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int hinic_set_features(struct net_device *netdev, u32 features) +#else +static int hinic_set_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return set_features(nic_dev, nic_dev->netdev->features, + features, false); +} + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 hinic_fix_features(struct net_device *netdev, u32 features) +#else +static netdev_features_t hinic_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + /* If Rx checksum is disabled, then LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + +static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) +{ + int err; + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + if (FUNC_SUPPORT_DCB(nic_dev->hwdev)) { + err = hinic_dcb_reset_hw_config(nic_dev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb configuration\n"); + return -EFAULT; + } + } + + if (FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + err = hinic_reset_port_link_cfg(nic_dev->hwdev); + if (err) + return -EFAULT; + } + + if (enable_bp) { + nic_dev->bp_upper_thd = (u16)bp_upper_thd; + nic_dev->bp_lower_thd = (u16)bp_lower_thd; + err = hinic_set_bp_thd(nic_dev->hwdev, + nic_dev->bp_lower_thd); + if (err) { + nic_err(&nic_dev->pdev->dev, + "Failed to set bp lower threshold\n"); + return -EFAULT; + } + + set_bit(HINIC_BP_ENABLE, &nic_dev->flags); + } else { + err = hinic_disable_fw_bp(nic_dev->hwdev); + if (err) + return -EFAULT; + + clear_bit(HINIC_BP_ENABLE, &nic_dev->flags); + } + + hinic_set_anti_attack(nic_dev->hwdev, true); + + if (set_link_status_follow < HINIC_LINK_FOLLOW_STATUS_MAX && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + err = hinic_set_link_status_follow( + nic_dev->hwdev, set_link_status_follow); + if (err == HINIC_MGMT_CMD_UNSUPPORTED) + nic_warn(&nic_dev->pdev->dev, + "Current version of firmware don't support to set link status follow port status\n"); + } + } + + /* enable all hw features in netdev->features */ + return set_features(nic_dev, 0, nic_dev->netdev->features, true); +} + +#ifdef NETIF_F_HW_TC +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX +static int hinic_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return hinic_setup_tc(dev, mqprio->num_tc); +} +#endif /* TC_MQPRIO_HW_OFFLOAD_MAX */ + +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) +static int __hinic_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX) +static int __hinic_setup_tc(struct net_device *dev, __always_unused u32 handle, + u32 chain_index, __always_unused __be16 proto, + struct tc_to_netdev *tc) +#else +static int __hinic_setup_tc(struct net_device *dev, __always_unused u32 handle, + __always_unused __be16 proto, + struct tc_to_netdev *tc) +#endif +{ +#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + unsigned int type = tc->type; + +#ifdef HAVE_NDO_SETUP_TC_CHAIN_INDEX + if (chain_index) + return -EOPNOTSUPP; + +#endif +#endif + switch (type) { + case TC_SETUP_QDISC_MQPRIO: +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) + return hinic_setup_tc_mqprio(dev, type_data); +#elif defined(TC_MQPRIO_HW_OFFLOAD_MAX) + return hinic_setup_tc_mqprio(dev, tc->mqprio); +#else + return hinic_setup_tc(dev, tc->tc); +#endif + default: + return -EOPNOTSUPP; + } +} +#endif /* NETIF_F_HW_TC */ + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void hinic_netpoll(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 i; + + for (i = 0; i < nic_dev->num_qps; i++) + napi_schedule(&nic_dev->irq_cfg[i].napi); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int hinic_uc_sync(struct net_device *netdev, u8 *addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 func_id; + int err; + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + return err; + + err = hinic_set_mac(nic_dev->hwdev, addr, 0, func_id); + return err; +} + +static int hinic_uc_unsync(struct net_device *netdev, u8 *addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 func_id; + int err; + + /* The addr is in use */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + return err; + + err = hinic_del_mac(nic_dev->hwdev, addr, 0, func_id); + return err; +} + +static void hinic_clean_mac_list_filter(struct hinic_nic_dev *nic_dev) { struct net_device *netdev = nic_dev->netdev; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; + struct hinic_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) { + if (f->state == HINIC_MAC_HW_SYNCED) + hinic_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) { + if (f->state == HINIC_MAC_HW_SYNCED) + hinic_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } +} + +static struct hinic_mac_filter *hinic_find_mac(struct list_head *filter_list, + u8 *addr) +{ + struct hinic_mac_filter *f; + + list_for_each_entry(f, filter_list, list) { + if (ether_addr_equal(addr, f->addr)) + return f; + } + return NULL; +} + +static struct hinic_mac_filter + *hinic_add_filter(struct hinic_nic_dev *nic_dev, + struct list_head *mac_filter_list, u8 *addr) +{ + struct hinic_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto out; + + memcpy(f->addr, addr, ETH_ALEN); + + INIT_LIST_HEAD(&f->list); + list_add_tail(&f->list, mac_filter_list); + + f->state = HINIC_MAC_WAIT_HW_SYNC; + set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags); + +out: + return f; +} + +static void hinic_del_filter(struct hinic_nic_dev *nic_dev, + struct hinic_mac_filter *f) +{ + set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags); + + if (f->state == HINIC_MAC_WAIT_HW_SYNC) { + /* have not added to hw, delete it directly */ + list_del(&f->list); + kfree(f); + return; + } + + f->state = HINIC_MAC_WAIT_HW_UNSYNC; +} + +static struct hinic_mac_filter + *hinic_mac_filter_entry_clone(struct hinic_mac_filter *src) +{ + struct hinic_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + + *f = *src; + INIT_LIST_HEAD(&f->list); + + return f; +} + +static void hinic_undo_del_filter_entries(struct list_head *filter_list, + struct list_head *from) +{ + struct hinic_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + if (hinic_find_mac(filter_list, f->addr)) + continue; + + if (f->state == HINIC_MAC_HW_SYNCED) + f->state = HINIC_MAC_WAIT_HW_UNSYNC; + + list_move_tail(&f->list, filter_list); + } +} + +static void hinic_undo_add_filter_entries(struct list_head *filter_list, + struct list_head *from) +{ + struct hinic_mac_filter *f, *ftmp, *tmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + tmp = hinic_find_mac(filter_list, f->addr); + if (tmp && tmp->state == HINIC_MAC_HW_SYNCED) + tmp->state = HINIC_MAC_WAIT_HW_SYNC; + } +} + +static void hinic_cleanup_filter_list(struct list_head *head) +{ + struct hinic_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, head, list) { + list_del(&f->list); + kfree(f); + } +} + +static int hinic_mac_filter_sync_hw(struct hinic_nic_dev *nic_dev, + struct list_head *del_list, + struct list_head *add_list) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_mac_filter *f, *ftmp; + int err = 0, add_count = 0; + + if (!list_empty(del_list)) { + list_for_each_entry_safe(f, ftmp, del_list, list) { + err = hinic_uc_unsync(netdev, f->addr); + if (err) { /* ignore errors when delete mac */ + nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n"); + } + + list_del(&f->list); + kfree(f); + } + } + + if (!list_empty(add_list)) { + list_for_each_entry_safe(f, ftmp, add_list, list) { + err = hinic_uc_sync(netdev, f->addr); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to add mac\n"); + return err; + } + + add_count++; + list_del(&f->list); + kfree(f); + } + } + + return add_count; +} + +static int hinic_mac_filter_sync(struct hinic_nic_dev *nic_dev, + struct list_head *mac_filter_list, bool uc) +{ + struct net_device *netdev = nic_dev->netdev; + struct list_head tmp_del_list, tmp_add_list; + struct hinic_mac_filter *f, *ftmp, *fclone; + int err = 0, add_count = 0; + + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC_MAC_WAIT_HW_UNSYNC) + continue; + + f->state = HINIC_MAC_HW_UNSYNCED; + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC_MAC_WAIT_HW_SYNC) + continue; + + fclone = hinic_mac_filter_entry_clone(f); + if (!fclone) { + err = -ENOMEM; + break; + } + + f->state = HINIC_MAC_HW_SYNCED; + list_add_tail(&fclone->list, &tmp_add_list); + } + + if (err) { + hinic_undo_del_filter_entries(mac_filter_list, &tmp_del_list); + hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n"); + } + + if (err) { + hinic_cleanup_filter_list(&tmp_del_list); + hinic_cleanup_filter_list(&tmp_add_list); + return -ENOMEM; + } + + add_count = + hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + if (list_empty(&tmp_add_list)) + return add_count; + + /* there are errors when add mac to hw, delete all mac in hw */ + hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + /* VF don't support to enter promisc mode, + * so we can't delete any other uc mac + */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC_MAC_HW_SYNCED) + continue; + + fclone = hinic_mac_filter_entry_clone(f); + if (!fclone) + break; + + f->state = HINIC_MAC_WAIT_HW_SYNC; + list_add_tail(&fclone->list, &tmp_del_list); + } + } + + hinic_cleanup_filter_list(&tmp_add_list); + hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + + /* need to enter promisc/allmulti mode */ + return -ENOMEM; +} + +static void hinic_mac_filter_sync_all(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int add_count; + + if (test_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { + clear_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags); + add_count = hinic_mac_filter_sync(nic_dev, + &nic_dev->uc_filter_list, + true); + if (add_count < 0 && !HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + set_bit(HINIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n"); + } else if (add_count) { + clear_bit(HINIC_PROMISC_FORCE_ON, + &nic_dev->rx_mod_state); + } + + add_count = hinic_mac_filter_sync(nic_dev, + &nic_dev->mc_filter_list, + false); + if (add_count < 0) { + set_bit(HINIC_ALLMULTI_FORCE_ON, + &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n"); + } else if (add_count) { + clear_bit(HINIC_ALLMULTI_FORCE_ON, + &nic_dev->rx_mod_state); + } + } +} + +#define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \ + HINIC_RX_MODE_BC) + +static void hinic_update_mac_filter(struct hinic_nic_dev *nic_dev, + struct netdev_hw_addr_list *src_list, + struct list_head *filter_list) +{ + struct netdev_hw_addr *ha; + struct hinic_mac_filter *f, *ftmp, *filter; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) { + filter = hinic_find_mac(filter_list, ha->addr); + if (!filter) + hinic_add_filter(nic_dev, filter_list, ha->addr); + else if (filter->state == HINIC_MAC_WAIT_HW_UNSYNC) + filter->state = HINIC_MAC_HW_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f, ftmp, filter_list, list) { + bool found = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) + if (ether_addr_equal(ha->addr, f->addr)) { + found = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (found) + continue; + + hinic_del_filter(nic_dev, f); + } +} + +#ifndef NETDEV_HW_ADDR_T_MULTICAST +static void hinic_update_mc_filter(struct hinic_nic_dev *nic_dev, + struct list_head *filter_list) +{ + struct dev_mc_list *ha; + struct hinic_mac_filter *f, *ftmp, *filter; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(ha, nic_dev->netdev) { + filter = hinic_find_mac(filter_list, ha->da_addr); + if (!filter) + hinic_add_filter(nic_dev, filter_list, ha->da_addr); + else if (filter->state == HINIC_MAC_WAIT_HW_UNSYNC) + filter->state = HINIC_MAC_HW_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f, ftmp, filter_list, list) { + bool found = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(ha, nic_dev->netdev) + if (ether_addr_equal(ha->da_addr, f->addr)) { + found = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (found) + continue; + + hinic_del_filter(nic_dev, f); + } +} +#endif + +static void __update_mac_filter(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + if (test_and_clear_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { + hinic_update_mac_filter(nic_dev, &netdev->uc, + &nic_dev->uc_filter_list); +#ifdef NETDEV_HW_ADDR_T_MULTICAST + hinic_update_mac_filter(nic_dev, &netdev->mc, + &nic_dev->mc_filter_list); +#else + hinic_update_mc_filter(nic_dev, &nic_dev->mc_filter_list); +#endif + } +} + +static void hinic_set_rx_mode_work(struct work_struct *work) +{ + struct hinic_nic_dev *nic_dev = + container_of(work, struct hinic_nic_dev, rx_mode_work); + struct net_device *netdev = nic_dev->netdev; + int promisc_en = 0, allmulti_en = 0; + int err = 0; + + __update_mac_filter(nic_dev); + + hinic_mac_filter_sync_all(nic_dev); + + /* VF don't support to enter promisc mode */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + promisc_en = !!(netdev->flags & IFF_PROMISC) || + test_bit(HINIC_PROMISC_FORCE_ON, + &nic_dev->rx_mod_state); + } + + allmulti_en = !!(netdev->flags & IFF_ALLMULTI) || + test_bit(HINIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); + + if (promisc_en != + test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) || + allmulti_en != + test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) { + enum hinic_rx_mod rx_mod = HINIC_DEFAULT_RX_MODE; + + rx_mod |= (promisc_en ? HINIC_RX_MODE_PROMISC : 0); + rx_mod |= (allmulti_en ? HINIC_RX_MODE_MC_ALL : 0); + + /* FOR DEBUG */ + if (promisc_en != + test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, + "%s promisc mode\n", + promisc_en ? "Enter" : "Left"); + if (allmulti_en != + test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, + "%s all_multi mode\n", + allmulti_en ? "Enter" : "Left"); + + err = hinic_set_rx_mode(nic_dev->hwdev, rx_mod); + if (!err) { + promisc_en ? + set_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) : + clear_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state); + + allmulti_en ? + set_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) : + clear_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state); + } else { + nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n"); + } + } +} + +static void hinic_nic_set_rx_mode(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || + netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { + set_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags); + nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); + nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); + } + + if (FUNC_SUPPORT_RX_MODE(nic_dev->hwdev)) + queue_work(nic_dev->workq, &nic_dev->rx_mode_work); +} + +static const struct net_device_ops hinic_netdev_ops = { + .ndo_open = hinic_open, + .ndo_stop = hinic_close, + .ndo_start_xmit = hinic_xmit_frame, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = hinic_get_stats64, +#else + .ndo_get_stats = hinic_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = hinic_tx_timeout, + .ndo_select_queue = hinic_select_queue, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = hinic_change_mtu, +#else + .ndo_change_mtu = hinic_change_mtu, +#endif + .ndo_set_mac_address = hinic_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, +#endif +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = hinic_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = hinic_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = hinic_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = hinic_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = hinic_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = hinic_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = hinic_ndo_get_vf_config, +#endif + +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC + .extended.ndo_setup_tc_rh = __hinic_setup_tc, +#else +#ifdef HAVE_SETUP_TC +#ifdef NETIF_F_HW_TC + .ndo_setup_tc = __hinic_setup_tc, +#else + .ndo_setup_tc = hinic_setup_tc, +#endif /* NETIF_F_HW_TC */ +#endif /* HAVE_SETUP_TC */ +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = hinic_nic_set_rx_mode, + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext hinic_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state, +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = hinic_fix_features, + .ndo_set_features = hinic_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +static const struct net_device_ops hinicvf_netdev_ops = { + .ndo_open = hinic_open, + .ndo_stop = hinic_close, + .ndo_start_xmit = hinic_xmit_frame, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = hinic_get_stats64, +#else + .ndo_get_stats = hinic_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = hinic_tx_timeout, + .ndo_select_queue = hinic_select_queue, + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = hinic_change_mtu, +#else + .ndo_change_mtu = hinic_change_mtu, +#endif + .ndo_set_mac_address = hinic_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = hinic_nic_set_rx_mode, + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext hinicvf_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = hinic_fix_features, + .ndo_set_features = hinic_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +static void netdev_feature_init(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else + u32 hw_features; +#endif +#endif + + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | NETIF_F_RXCSUM; + + if (FUNC_SUPPORT_SCTP_CRC(nic_dev->hwdev)) + netdev->features |= NETIF_F_SCTP_CRC; + + netdev->vlan_features = netdev->features; + + if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev)) { +#ifdef HAVE_ENCAPSULATION_TSO + netdev->features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; +#endif /* HAVE_ENCAPSULATION_TSO */ + } + + if (FUNC_SUPPORT_HW_VLAN(nic_dev->hwdev)) { +#if defined(NETIF_F_HW_VLAN_CTAG_TX) + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; +#elif defined(NETIF_F_HW_VLAN_TX) + netdev->features |= NETIF_F_HW_VLAN_TX; +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; +#elif defined(NETIF_F_HW_VLAN_RX) + netdev->features |= NETIF_F_HW_VLAN_RX; +#endif + } + +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = get_netdev_hw_features(netdev); +#else + hw_features = netdev->hw_features; +#endif + hw_features |= netdev->features; +#endif + if (FUNC_SUPPORT_LRO(nic_dev->hwdev)) { + /* LRO is disable in default, only set hw features */ + hw_features |= NETIF_F_LRO; + + /* Enable LRO */ + if (nic_dev->adaptive_cfg.lro.enable && + !HINIC_FUNC_IS_VF(nic_dev->hwdev)) + netdev->features |= NETIF_F_LRO; + } + + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif + +/* Set after hw_features because this could not be part of hw_features */ +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_FILTER; +#endif + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif + + if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev)) { +#ifdef HAVE_ENCAPSULATION_CSUM + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SCTP_CRC | NETIF_F_SG; +#ifdef HAVE_ENCAPSULATION_TSO + netdev->hw_enc_features |= NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_TSO_ECN + | NETIF_F_GSO_UDP_TUNNEL_CSUM + | NETIF_F_GSO_UDP_TUNNEL; +#endif /* HAVE_ENCAPSULATION_TSO */ +#endif /* HAVE_ENCAPSULATION_CSUM */ + } +} + +#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) { \ + if ((num_qps) > nic_dev->max_qps) \ + nic_warn(&nic_dev->pdev->dev, \ + "Module Parameter %s value %d is out of range, "\ + "Maximum value for the device: %d, using %d\n",\ + #num_qps, num_qps, nic_dev->max_qps, \ + nic_dev->max_qps); \ + if (!(num_qps) || (num_qps) > nic_dev->max_qps) \ + out_qps = nic_dev->max_qps; \ + else \ + out_qps = num_qps; \ +} + +static void hinic_try_to_enable_rss(struct hinic_nic_dev *nic_dev) +{ + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; int i, node, err = 0; u16 num_cpus = 0; + enum hinic_service_mode service_mode = + hinic_get_service_mode(nic_dev->hwdev); - nic_dev->max_qps = hinic_hwdev_max_num_qps(hwdev); + nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev); if (nic_dev->max_qps <= 1) { - nic_dev->flags &= ~HINIC_RSS_ENABLE; + clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags); nic_dev->rss_limit = nic_dev->max_qps; nic_dev->num_qps = nic_dev->max_qps; nic_dev->num_rss = nic_dev->max_qps; @@ -332,11 +2403,15 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev) return; } - err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx); + err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx); if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n"); - nic_dev->flags &= ~HINIC_RSS_ENABLE; + if (err == -ENOSPC) + nic_warn(&nic_dev->pdev->dev, + "Failed to alloc tmpl_idx for rss, table is full\n"); + else + nic_err(&nic_dev->pdev->dev, + "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n"); + clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags); nic_dev->max_qps = 1; nic_dev->rss_limit = nic_dev->max_qps; nic_dev->num_qps = nic_dev->max_qps; @@ -345,834 +2420,975 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev) return; } - nic_dev->flags |= HINIC_RSS_ENABLE; + set_bit(HINIC_RSS_ENABLE, &nic_dev->flags); - for (i = 0; i < num_online_cpus(); i++) { - node = cpu_to_node(i); - if (node == dev_to_node(&pdev->dev)) + nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev); + + MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, nic_dev->num_qps); + + /* To reduce memory footprint in ovs mode. + * VF can't get board info correctly with early pf driver. + */ + if ((hinic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) && + service_mode == HINIC_WORK_MODE_OVS && + hinic_func_type(nic_dev->hwdev) != TYPE_VF) + MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps, + nic_dev->num_qps); + + for (i = 0; i < (int)num_online_cpus(); i++) { + node = (int)cpu_to_node(i); + if (node == dev_to_node(&nic_dev->pdev->dev)) num_cpus++; } if (!num_cpus) - num_cpus = num_online_cpus(); + num_cpus = (u16)num_online_cpus(); + + nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus); - nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus); nic_dev->rss_limit = nic_dev->num_qps; nic_dev->num_rss = nic_dev->num_qps; - hinic_init_rss_parameters(nic_dev); - err = hinic_rss_init(nic_dev); - if (err) - netif_err(nic_dev, drv, netdev, "Failed to init rss\n"); + hinic_init_rss_parameters(nic_dev->netdev); + hinic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc); } -static int hinic_open(struct net_device *netdev) +static int hinic_sw_init(struct hinic_nic_dev *adapter) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - enum hinic_port_link_state link_state; - int err, ret; - - if (!(nic_dev->flags & HINIC_INTF_UP)) { - err = hinic_hwdev_ifup(nic_dev->hwdev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed - HW interface up\n"); - return err; - } - } - - err = create_txqs(nic_dev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to create Tx queues\n"); - goto err_create_txqs; - } - - err = create_rxqs(nic_dev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to create Rx queues\n"); - goto err_create_rxqs; - } - - hinic_enable_rss(nic_dev); - - err = hinic_configure_max_qnum(nic_dev); - if (err) { - netif_err(nic_dev, drv, nic_dev->netdev, - "Failed to configure the maximum number of queues\n"); - goto err_port_state; - } - - netif_set_real_num_tx_queues(netdev, nic_dev->num_qps); - netif_set_real_num_rx_queues(netdev, nic_dev->num_qps); - - err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set port state\n"); - goto err_port_state; - } - - err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set func port state\n"); - goto err_func_port_state; - } - - /* Wait up to 3 sec between port enable to link state */ - msleep(3000); - - down(&nic_dev->mgmt_lock); - - err = hinic_port_link_state(nic_dev, &link_state); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); - goto err_port_link; - } - - if (link_state == HINIC_LINK_STATE_UP) - nic_dev->flags |= HINIC_LINK_UP; - - nic_dev->flags |= HINIC_INTF_UP; - - if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == - (HINIC_LINK_UP | HINIC_INTF_UP)) { - netif_info(nic_dev, drv, netdev, "link + intf UP\n"); - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - } - - up(&nic_dev->mgmt_lock); - - netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); - return 0; - -err_port_link: - up(&nic_dev->mgmt_lock); - ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); - if (ret) - netif_warn(nic_dev, drv, netdev, - "Failed to revert func port state\n"); - -err_func_port_state: - ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); - if (ret) - netif_warn(nic_dev, drv, netdev, - "Failed to revert port state\n"); -err_port_state: - free_rxqs(nic_dev); - if (nic_dev->flags & HINIC_RSS_ENABLE) { - hinic_rss_deinit(nic_dev); - hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); - } - -err_create_rxqs: - free_txqs(nic_dev); - -err_create_txqs: - if (!(nic_dev->flags & HINIC_INTF_UP)) - hinic_hwdev_ifdown(nic_dev->hwdev); - return err; -} - -static int hinic_close(struct net_device *netdev) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - unsigned int flags; - int err; - - down(&nic_dev->mgmt_lock); - - flags = nic_dev->flags; - nic_dev->flags &= ~HINIC_INTF_UP; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - update_nic_stats(nic_dev); - - up(&nic_dev->mgmt_lock); - - err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set func port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); - return err; - } - - err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); - return err; - } - - if (nic_dev->flags & HINIC_RSS_ENABLE) { - hinic_rss_deinit(nic_dev); - hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); - } - - free_rxqs(nic_dev); - free_txqs(nic_dev); - - if (flags & HINIC_INTF_UP) - hinic_hwdev_ifdown(nic_dev->hwdev); - - netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); - return 0; -} - -static int hinic_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int err; - - netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); - - err = hinic_port_set_mtu(nic_dev, new_mtu); - if (err) - netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); - else - netdev->mtu = new_mtu; - - return err; -} - -/** - * change_mac_addr - change the main mac address of network device - * @netdev: network device - * @addr: mac address to set - * - * Return 0 - Success, negative - Failure - **/ -static int change_mac_addr(struct net_device *netdev, const u8 *addr) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - u16 vid = 0; - int err; - - if (!is_valid_ether_addr(addr)) - return -EADDRNOTAVAIL; - - netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - - down(&nic_dev->mgmt_lock); - - do { - err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to delete mac\n"); - break; - } - - err = hinic_port_add_mac(nic_dev, addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); - break; - } - - vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); - } while (vid != VLAN_N_VID); - - up(&nic_dev->mgmt_lock); - return err; -} - -static int hinic_set_mac_addr(struct net_device *netdev, void *addr) -{ - unsigned char new_mac[ETH_ALEN]; - struct sockaddr *saddr = addr; - int err; - - memcpy(new_mac, saddr->sa_data, ETH_ALEN); - - err = change_mac_addr(netdev, new_mac); - if (!err) - memcpy(netdev->dev_addr, new_mac, ETH_ALEN); - - return err; -} - -/** - * add_mac_addr - add mac address to network device - * @netdev: network device - * @addr: mac address to add - * - * Return 0 - Success, negative - Failure - **/ -static int add_mac_addr(struct net_device *netdev, const u8 *addr) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - u16 vid = 0; - int err; - - netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - - down(&nic_dev->mgmt_lock); - - do { - err = hinic_port_add_mac(nic_dev, addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); - break; - } - - vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); - } while (vid != VLAN_N_VID); - - up(&nic_dev->mgmt_lock); - return err; -} - -/** - * remove_mac_addr - remove mac address from network device - * @netdev: network device - * @addr: mac address to remove - * - * Return 0 - Success, negative - Failure - **/ -static int remove_mac_addr(struct net_device *netdev, const u8 *addr) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - u16 vid = 0; - int err; - - if (!is_valid_ether_addr(addr)) - return -EADDRNOTAVAIL; - - netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - - down(&nic_dev->mgmt_lock); - - do { - err = hinic_port_del_mac(nic_dev, addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to delete mac\n"); - break; - } - - vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); - } while (vid != VLAN_N_VID); - - up(&nic_dev->mgmt_lock); - return err; -} - -static int hinic_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int ret, err; - - netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); - - down(&nic_dev->mgmt_lock); - - err = hinic_port_add_vlan(nic_dev, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); - goto err_vlan_add; - } - - err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); - goto err_add_mac; - } - - bitmap_set(nic_dev->vlan_bitmap, vid, 1); - - up(&nic_dev->mgmt_lock); - return 0; - -err_add_mac: - ret = hinic_port_del_vlan(nic_dev, vid); - if (ret) - netif_err(nic_dev, drv, netdev, - "Failed to revert by removing vlan\n"); - -err_vlan_add: - up(&nic_dev->mgmt_lock); - return err; -} - -static int hinic_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int err; - - netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); - - down(&nic_dev->mgmt_lock); - - err = hinic_port_del_vlan(nic_dev, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); - goto err_del_vlan; - } - - bitmap_clear(nic_dev->vlan_bitmap, vid, 1); - - up(&nic_dev->mgmt_lock); - return 0; - -err_del_vlan: - up(&nic_dev->mgmt_lock); - return err; -} - -static void set_rx_mode(struct work_struct *work) -{ - struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); - struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); - - netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n"); - - hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); - - __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); - __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); -} - -static void hinic_set_rx_mode(struct net_device *netdev) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_rx_mode_work *rx_mode_work; - u32 rx_mode; - - rx_mode_work = &nic_dev->rx_mode_work; - - rx_mode = HINIC_RX_MODE_UC | - HINIC_RX_MODE_MC | - HINIC_RX_MODE_BC; - - if (netdev->flags & IFF_PROMISC) - rx_mode |= HINIC_RX_MODE_PROMISC; - else if (netdev->flags & IFF_ALLMULTI) - rx_mode |= HINIC_RX_MODE_MC_ALL; - - rx_mode_work->rx_mode = rx_mode; - - queue_work(nic_dev->workq, &rx_mode_work->work); -} - -static void hinic_tx_timeout(struct net_device *netdev) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - - netif_err(nic_dev, drv, netdev, "Tx timeout\n"); -} - -static void hinic_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_rxq_stats *nic_rx_stats; - struct hinic_txq_stats *nic_tx_stats; - - nic_rx_stats = &nic_dev->rx_stats; - nic_tx_stats = &nic_dev->tx_stats; - - down(&nic_dev->mgmt_lock); - - if (nic_dev->flags & HINIC_INTF_UP) - update_nic_stats(nic_dev); - - up(&nic_dev->mgmt_lock); - - stats->rx_bytes = nic_rx_stats->bytes; - stats->rx_packets = nic_rx_stats->pkts; - stats->rx_errors = nic_rx_stats->errors; - - stats->tx_bytes = nic_tx_stats->bytes; - stats->tx_packets = nic_tx_stats->pkts; - stats->tx_errors = nic_tx_stats->tx_dropped; -} - -static int hinic_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - - return set_features(nic_dev, nic_dev->netdev->features, - features, false); -} - -static netdev_features_t hinic_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - - /* If Rx checksum is disabled, then LRO should also be disabled */ - if (!(features & NETIF_F_RXCSUM)) { - netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n"); - features &= ~NETIF_F_LRO; - } - - return features; -} - -static const struct net_device_ops hinic_netdev_ops = { - .ndo_open = hinic_open, - .ndo_stop = hinic_close, - .ndo_change_mtu = hinic_change_mtu, - .ndo_set_mac_address = hinic_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, - .ndo_set_rx_mode = hinic_set_rx_mode, - .ndo_start_xmit = hinic_xmit_frame, - .ndo_tx_timeout = hinic_tx_timeout, - .ndo_get_stats64 = hinic_get_stats64, - .ndo_fix_features = hinic_fix_features, - .ndo_set_features = hinic_set_features, -}; - -static void netdev_features_init(struct net_device *netdev) -{ - netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_RXCSUM | NETIF_F_LRO | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - - netdev->vlan_features = netdev->hw_features; - - netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; -} - -/** - * link_status_event_handler - link event handler - * @handle: nic device for the handler - * @buf_in: input buffer - * @in_size: input size - * @buf_in: output buffer - * @out_size: returned output size - * - * Return 0 - Success, negative - Failure - **/ -static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct hinic_port_link_status *link_status, *ret_link_status; - struct hinic_dev *nic_dev = handle; - - link_status = buf_in; - - if (link_status->link == HINIC_LINK_STATE_UP) { - down(&nic_dev->mgmt_lock); - - nic_dev->flags |= HINIC_LINK_UP; - - if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == - (HINIC_LINK_UP | HINIC_INTF_UP)) { - netif_carrier_on(nic_dev->netdev); - netif_tx_wake_all_queues(nic_dev->netdev); - } - - up(&nic_dev->mgmt_lock); - - netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); - } else { - down(&nic_dev->mgmt_lock); - - nic_dev->flags &= ~HINIC_LINK_UP; - - netif_carrier_off(nic_dev->netdev); - netif_tx_disable(nic_dev->netdev); - - up(&nic_dev->mgmt_lock); - - netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); - } - - ret_link_status = buf_out; - ret_link_status->status = 0; - - *out_size = sizeof(*ret_link_status); -} - -static int set_features(struct hinic_dev *nic_dev, - netdev_features_t pre_features, - netdev_features_t features, bool force_change) -{ - netdev_features_t changed = force_change ? ~0 : pre_features ^ features; - u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + struct net_device *netdev = adapter->netdev; + u16 func_id; int err = 0; - if (changed & NETIF_F_TSO) - err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? - HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); + sema_init(&adapter->port_state_sem, 1); - if (changed & NETIF_F_RXCSUM) - err = hinic_set_rx_csum_offload(nic_dev, csum_en); - - if (changed & NETIF_F_LRO) { - err = hinic_set_rx_lro_state(nic_dev, - !!(features & NETIF_F_LRO), - HINIC_LRO_RX_TIMER_DEFAULT, - HINIC_LRO_MAX_WQE_NUM_DEFAULT); + err = hinic_dcb_init(adapter); + if (err) { + nic_err(&adapter->pdev->dev, "Failed to init dcb\n"); + return -EFAULT; } - if (changed & NETIF_F_HW_VLAN_CTAG_RX) - err = hinic_set_rx_vlan_offload(nic_dev, - !!(features & - NETIF_F_HW_VLAN_CTAG_RX)); - - return err; -} - -/** - * nic_dev_init - Initialize the NIC device - * @pdev: the NIC pci device - * - * Return 0 - Success, negative - Failure - **/ -static int nic_dev_init(struct pci_dev *pdev) -{ - struct hinic_rx_mode_work *rx_mode_work; - struct hinic_txq_stats *tx_stats; - struct hinic_rxq_stats *rx_stats; - struct hinic_dev *nic_dev; - struct net_device *netdev; - struct hinic_hwdev *hwdev; - int err, num_qps; - - hwdev = hinic_init_hwdev(pdev); - if (IS_ERR(hwdev)) { - dev_err(&pdev->dev, "Failed to initialize HW device\n"); - return PTR_ERR(hwdev); + if (HINIC_FUNC_IS_VF(adapter->hwdev)) { + err = hinic_sq_cos_mapping(netdev); + if (err) { + nic_err(&adapter->pdev->dev, "Failed to set sq_cos_mapping\n"); + return -EFAULT; + } } - num_qps = hinic_hwdev_num_qps(hwdev); - if (num_qps <= 0) { - dev_err(&pdev->dev, "Invalid number of QPS\n"); - err = -EINVAL; - goto err_num_qps; + adapter->sq_depth = HINIC_SQ_DEPTH; + adapter->rq_depth = HINIC_RQ_DEPTH; + + hinic_try_to_enable_rss(adapter); + + + err = hinic_get_default_mac(adapter->hwdev, netdev->dev_addr); + if (err) { + nic_err(&adapter->pdev->dev, "Failed to get MAC address\n"); + goto get_mac_err; } - netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); - if (!netdev) { - dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); - err = -ENOMEM; - goto err_alloc_etherdev; + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!HINIC_FUNC_IS_VF(adapter->hwdev)) { + nic_err(&adapter->pdev->dev, "Invalid MAC address\n"); + err = -EIO; + goto err_mac; + } + + nic_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random\n", + netdev->dev_addr); + eth_hw_addr_random(netdev); } - hinic_set_ethtool_ops(netdev); - netdev->netdev_ops = &hinic_netdev_ops; - netdev->max_mtu = ETH_MAX_MTU; - - nic_dev = netdev_priv(netdev); - nic_dev->netdev = netdev; - nic_dev->hwdev = hwdev; - nic_dev->msg_enable = MSG_ENABLE_DEFAULT; - nic_dev->flags = 0; - nic_dev->txqs = NULL; - nic_dev->rxqs = NULL; - nic_dev->tx_weight = tx_weight; - nic_dev->rx_weight = rx_weight; - - sema_init(&nic_dev->mgmt_lock, 1); - - tx_stats = &nic_dev->tx_stats; - rx_stats = &nic_dev->rx_stats; - - u64_stats_init(&tx_stats->syncp); - u64_stats_init(&rx_stats->syncp); - - nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, - VLAN_BITMAP_SIZE(nic_dev), - GFP_KERNEL); - if (!nic_dev->vlan_bitmap) { - err = -ENOMEM; - goto err_vlan_bitmap; - } - - nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); - if (!nic_dev->workq) { - err = -ENOMEM; - goto err_workq; - } - - pci_set_drvdata(pdev, netdev); - - err = hinic_port_get_mac(nic_dev, netdev->dev_addr); + err = hinic_global_func_id_get(adapter->hwdev, &func_id); if (err) - dev_warn(&pdev->dev, "Failed to get mac address\n"); + goto func_id_err; - err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); - if (err) { - dev_err(&pdev->dev, "Failed to add mac\n"); - goto err_add_mac; + err = hinic_set_mac(adapter->hwdev, netdev->dev_addr, 0, func_id); + /* When this is VF driver, we must consider that PF has already set VF + * MAC, and we can't consider this condition is error status during + * driver probe procedure. + */ + if (err && err != HINIC_PF_SET_VF_ALREADY) { + nic_err(&adapter->pdev->dev, "Failed to set default MAC\n"); + goto set_mac_err; } - err = hinic_port_set_mtu(nic_dev, netdev->mtu); - if (err) { - dev_err(&pdev->dev, "Failed to set mtu\n"); - goto err_set_mtu; - } - - rx_mode_work = &nic_dev->rx_mode_work; - INIT_WORK(&rx_mode_work->work, set_rx_mode); - - netdev_features_init(netdev); - - netif_carrier_off(netdev); - - hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, - nic_dev, link_status_event_handler); - - err = set_features(nic_dev, 0, nic_dev->netdev->features, true); - if (err) - goto err_set_features; - - SET_NETDEV_DEV(netdev, &pdev->dev); - - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "Failed to register netdev\n"); - goto err_reg_netdev; - } + /* MTU range: 256 - 9600 */ +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + netdev->min_mtu = HINIC_MIN_MTU_SIZE; + netdev->max_mtu = HINIC_MAX_JUMBO_FRAME_SIZE; +#endif +#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = HINIC_MIN_MTU_SIZE; + netdev->extended->max_mtu = HINIC_MAX_JUMBO_FRAME_SIZE; +#endif return 0; -err_reg_netdev: -err_set_features: - hinic_hwdev_cb_unregister(nic_dev->hwdev, - HINIC_MGMT_MSG_CMD_LINK_STATUS); - cancel_work_sync(&rx_mode_work->work); +set_mac_err: +func_id_err: +err_mac: +get_mac_err: + if (test_bit(HINIC_RSS_ENABLE, &adapter->flags)) + hinic_rss_template_free(adapter->hwdev, adapter->rss_tmpl_idx); -err_set_mtu: -err_add_mac: - pci_set_drvdata(pdev, NULL); - destroy_workqueue(nic_dev->workq); - -err_workq: -err_vlan_bitmap: - free_netdev(netdev); - -err_alloc_etherdev: -err_num_qps: - hinic_free_hwdev(hwdev); return err; } -static int hinic_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static void hinic_assign_netdev_ops(struct hinic_nic_dev *adapter) { - int err = pci_enable_device(pdev); + if (!HINIC_FUNC_IS_VF(adapter->hwdev)) { + adapter->netdev->netdev_ops = &hinic_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(adapter->netdev, &hinic_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + if (FUNC_SUPPORT_DCB(adapter->hwdev)) + adapter->netdev->dcbnl_ops = &hinic_dcbnl_ops; + hinic_set_ethtool_ops(adapter->netdev); + } else { + adapter->netdev->netdev_ops = &hinicvf_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(adapter->netdev, &hinicvf_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + hinicvf_set_ethtool_ops(adapter->netdev); + } + adapter->netdev->watchdog_timeo = 5 * HZ; +} +#define HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT 1 +#define HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER 1 +#define HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER 2 +#define HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER 3 +#define HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC_DFT_PG_100GE_TXRX_MSIX_COALESC_TIMER 2 +#define HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER 3 + +static void update_queue_coal_param(struct hinic_nic_dev *nic_dev, + struct pci_device_id *id, u16 qid) +{ + struct hinic_intr_coal_info *info = NULL; + + info = &nic_dev->intr_coalesce[qid]; + if (!nic_dev->intr_coal_set_flag) { + switch (id->driver_data) { + case HINIC_BOARD_PG_TP_10GE: + info->pending_limt = + HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT; + info->coalesce_timer_cfg = + HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER; + break; + case HINIC_BOARD_PG_SM_25GE: + info->pending_limt = + HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT; +#if defined(__aarch64__) + info->coalesce_timer_cfg = + HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER; +#else + info->coalesce_timer_cfg = + HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER; +#endif + break; + case HINIC_BOARD_PG_100GE: + info->pending_limt = + HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT; +#if defined(__aarch64__) + info->coalesce_timer_cfg = + HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER; +#else + info->coalesce_timer_cfg = + HINIC_DFT_PG_100GE_TXRX_MSIX_COALESC_TIMER; +#endif + break; + default: + info->pending_limt = qp_pending_limit; + info->coalesce_timer_cfg = qp_coalesc_timer_cfg; + break; + } + } + + info->resend_timer_cfg = HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + info->pkt_rate_high = HINIC_RX_RATE_HIGH; + info->rx_usecs_high = qp_coalesc_timer_high; + info->rx_pending_limt_high = qp_pending_limit_high; + info->pkt_rate_low = HINIC_RX_RATE_LOW; + info->rx_usecs_low = qp_coalesc_timer_low; + info->rx_pending_limt_low = qp_pending_limit_low; + + if (nic_dev->in_vm) { + if (qp_pending_limit_high == HINIC_RX_PENDING_LIMIT_HIGH) + qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH_VM; + info->pkt_rate_low = HINIC_RX_RATE_LOW_VM; + info->rx_pending_limt_high = qp_pending_limit_high; + } + + /* suit for sdi3.0 vm mode vf drv or bm mode pf/vf drv */ + if ((nic_dev->is_vm_slave && nic_dev->in_vm) || + nic_dev->is_bm_slave) { + info->pkt_rate_high = SDI_VM_RX_PKT_RATE_HIGH; + info->pkt_rate_low = SDI_VM_RX_PKT_RATE_LOW; + + if (qp_coalesc_use_drv_params_switch == 0) { + /* if arm server, maybe need to change this value + * again + */ + info->pending_limt = SDI_VM_PENDING_LIMT; + info->coalesce_timer_cfg = SDI_VM_COALESCE_TIMER_CFG; + info->rx_usecs_high = SDI_VM_RX_USECS_HIGH; + info->rx_pending_limt_high = + SDI_VM_RX_PENDING_LIMT_HIGH; + info->rx_usecs_low = SDI_VM_RX_USECS_LOW; + info->rx_pending_limt_low = SDI_VM_RX_PENDING_LIMT_LOW; + } else { + info->rx_usecs_high = qp_coalesc_timer_high; + info->rx_pending_limt_high = qp_pending_limit_high; + info->rx_usecs_low = qp_coalesc_timer_low; + info->rx_pending_limt_low = qp_pending_limit_low; + } + } +} + +static void init_intr_coal_param(struct hinic_nic_dev *nic_dev) +{ + struct pci_device_id *id; + u16 i; + + id = hinic_get_pci_device_id(nic_dev->pdev); + switch (id->driver_data) { + case HINIC_BOARD_10GE: + case HINIC_BOARD_PG_TP_10GE: + nic_dev->his_link_speed = SPEED_10000; + break; + case HINIC_BOARD_25GE: + case HINIC_BOARD_PG_SM_25GE: + nic_dev->his_link_speed = SPEED_25000; + break; + case HINIC_BOARD_40GE: + nic_dev->his_link_speed = SPEED_40000; + break; + case HINIC_BOARD_100GE: + case HINIC_BOARD_PG_100GE: + nic_dev->his_link_speed = SPEED_100000; + break; + default: + break; + } + + for (i = 0; i < nic_dev->max_qps; i++) + update_queue_coal_param(nic_dev, id, i); +} + +static int hinic_init_intr_coalesce(struct hinic_nic_dev *nic_dev) +{ + u64 size; + + if ((qp_pending_limit != HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT) || + (qp_coalesc_timer_cfg != HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG)) + nic_dev->intr_coal_set_flag = 1; + else + nic_dev->intr_coal_set_flag = 0; + + size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps; + if (!size) { + nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr coalesce\n"); + return -EINVAL; + } + nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL); + if (!nic_dev->intr_coalesce) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc intr coalesce\n"); + return -ENOMEM; + } + + init_intr_coal_param(nic_dev); + + if (test_bit(HINIC_INTR_ADAPT, &nic_dev->flags)) + nic_dev->adaptive_rx_coal = 1; + else + nic_dev->adaptive_rx_coal = 0; + + return 0; +} + +static void hinic_free_intr_coalesce(struct hinic_nic_dev *nic_dev) +{ + kfree(nic_dev->intr_coalesce); +} + +static int hinic_alloc_qps(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; + + err = hinic_alloc_txqs(netdev); if (err) { - dev_err(&pdev->dev, "Failed to enable PCI device\n"); + nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n"); return err; } - err = pci_request_regions(pdev, HINIC_DRV_NAME); + err = hinic_alloc_rxqs(netdev); if (err) { - dev_err(&pdev->dev, "Failed to request PCI regions\n"); - goto err_pci_regions; + nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n"); + goto alloc_rxqs_err; } - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = hinic_init_intr_coalesce(nic_dev); if (err) { - dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Failed to set DMA mask\n"); - goto err_dma_mask; - } + nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n"); + goto init_intr_err; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, - "Couldn't set 64-bit consistent DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "Failed to set consistent DMA mask\n"); - goto err_dma_consistent_mask; - } - } - - err = nic_dev_init(pdev); - if (err) { - dev_err(&pdev->dev, "Failed to initialize NIC device\n"); - goto err_nic_dev_init; - } - - dev_info(&pdev->dev, "HiNIC driver - probed\n"); return 0; -err_nic_dev_init: -err_dma_consistent_mask: -err_dma_mask: - pci_release_regions(pdev); +init_intr_err: + hinic_free_rxqs(netdev); + +alloc_rxqs_err: + hinic_free_txqs(netdev); -err_pci_regions: - pci_disable_device(pdev); return err; } -static void hinic_remove(struct pci_dev *pdev) +static void hinic_destroy_qps(struct hinic_nic_dev *nic_dev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_rx_mode_work *rx_mode_work; + hinic_free_intr_coalesce(nic_dev); + hinic_free_rxqs(nic_dev->netdev); + hinic_free_txqs(nic_dev->netdev); +} - unregister_netdev(netdev); +static int hinic_validate_parameters(struct hinic_lld_dev *lld_dev) +{ + struct pci_dev *pdev = lld_dev->pdev; - hinic_hwdev_cb_unregister(nic_dev->hwdev, - HINIC_MGMT_MSG_CMD_LINK_STATUS); + if (bp_upper_thd < bp_lower_thd || bp_lower_thd == 0) { + nic_warn(&pdev->dev, "Module Parameter bp_upper_thd: %d, bp_lower_thd: %d is invalid, resetting to default\n", + bp_upper_thd, bp_lower_thd); + bp_lower_thd = HINIC_RX_BP_LOWER_THD; + bp_upper_thd = HINIC_RX_BP_UPPER_THD; + } - rx_mode_work = &nic_dev->rx_mode_work; - cancel_work_sync(&rx_mode_work->work); + if (!poll_weight) { + nic_warn(&pdev->dev, "Module Parameter poll_weight can not be 0, resetting to %d\n", + DEFAULT_POLL_WEIGHT); + poll_weight = DEFAULT_POLL_WEIGHT; + } - pci_set_drvdata(pdev, NULL); + /* check rx_buff value, default rx_buff is 2KB. + * Invalid rx_buff include 2KB/4KB/8KB/16KB. + */ + if ((rx_buff != RX_BUFF_VALID_2KB) && (rx_buff != RX_BUFF_VALID_4KB) && + (rx_buff != RX_BUFF_VALID_8KB) && (rx_buff != RX_BUFF_VALID_16KB)) { + nic_warn(&pdev->dev, "Module Parameter rx_buff value %d is out of range, must be 2^n. Valid range is 2 - 16, resetting to %dKB", + rx_buff, DEFAULT_RX_BUFF_LEN); + rx_buff = DEFAULT_RX_BUFF_LEN; + } + if (qp_coalesc_timer_high <= qp_coalesc_timer_low) { + nic_warn(&pdev->dev, "Module Parameter qp_coalesc_timer_high: %d, qp_coalesc_timer_low: %d is invalid, resetting to default\n", + qp_coalesc_timer_high, qp_coalesc_timer_low); + qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH; + qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW; + } + + if (qp_pending_limit_high <= qp_pending_limit_low) { + nic_warn(&pdev->dev, "Module Parameter qp_pending_limit_high: %d, qp_pending_limit_low: %d is invalid, resetting to default\n", + qp_pending_limit_high, qp_pending_limit_low); + qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH; + qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW; + } + + return 0; +} + +static void check_lro_module_param(struct hinic_nic_dev *nic_dev) +{ + struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro; + + /* Use module parameters first. */ + if (set_lro_timer != 0 && + set_lro_timer >= HINIC_LRO_RX_TIMER_LOWER && + set_lro_timer <= HINIC_LRO_RX_TIMER_UPPER) + lro->timer = set_lro_timer; + + /* Use module parameters first. */ + if (set_max_wqe_num != 0 && + set_max_wqe_num <= HINIC_LRO_MAX_WQE_NUM_UPPER && + set_max_wqe_num >= HINIC_LRO_MAX_WQE_NUM_LOWER) + lro->buffer_size = set_max_wqe_num * nic_dev->rx_buff_len; +} + +static void decide_rss_cfg(struct hinic_nic_dev *nic_dev) +{ + struct hinic_environment_info *info = &nic_dev->env_info; + + switch (info->cpu) { + case HINIC_CPU_ARM_GENERIC: + set_bit(HINIC_SAME_RXTX, &nic_dev->flags); + + break; + case HINIC_CPU_X86_GENERIC: + clear_bit(HINIC_SAME_RXTX, &nic_dev->flags); + + break; + + default: + clear_bit(HINIC_SAME_RXTX, &nic_dev->flags); + break; + } +} + +static void decide_lro_cfg(struct hinic_nic_dev *nic_dev) +{ + struct hinic_environment_info *info = &nic_dev->env_info; + struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro; + + if (lro_en_status < HINIC_LRO_STATUS_UNSET) { + lro->enable = lro_en_status; + } else { + /* LRO will be opened in all Huawei OS */ + switch (info->os) { + case HINIC_OS_HUAWEI: + lro->enable = 1; + break; + case HINIC_OS_NON_HUAWEI: + lro->enable = 0; + break; + default: + lro->enable = 0; + break; + } + } + + switch (info->board) { + case HINIC_BOARD_25GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_25GE; + break; + case HINIC_BOARD_100GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_100GE; + break; + case HINIC_BOARD_PG_TP_10GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE; + break; + case HINIC_BOARD_PG_SM_25GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT; + break; + case HINIC_BOARD_PG_100GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE; + break; + default: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT; + break; + } + + /* Use module parameters first. */ + switch (info->cpu) { + case HINIC_CPU_ARM_GENERIC: + lro->buffer_size = + HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM * + nic_dev->rx_buff_len; + break; + case HINIC_CPU_X86_GENERIC: + lro->buffer_size = + HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 * + nic_dev->rx_buff_len; + break; + default: + lro->buffer_size = + HINIC_LRO_MAX_WQE_NUM_DEFAULT * + nic_dev->rx_buff_len; + break; + } + + /* lro buffer_size need modify according board type */ + switch (info->board) { + case HINIC_BOARD_PG_TP_10GE: + case HINIC_BOARD_PG_SM_25GE: + case HINIC_BOARD_PG_100GE: + lro->buffer_size = + HINIC_LRO_WQE_NUM_PANGEA_DEFAULT * nic_dev->rx_buff_len; + break; + default: + break; + } + + check_lro_module_param(nic_dev); + + nic_info(&nic_dev->pdev->dev, + "LRO default configuration: enable %u, timer %u, buffer size %u\n", + lro->enable, lro->timer, lro->buffer_size); +} + +static void decide_intr_cfg(struct hinic_nic_dev *nic_dev) +{ + struct pci_device_id *id; + + id = hinic_get_pci_device_id(nic_dev->pdev); + switch (id->driver_data) { + case HINIC_BOARD_PG_TP_10GE: + case HINIC_BOARD_PG_SM_25GE: + case HINIC_BOARD_PG_100GE: + clear_bit(HINIC_INTR_ADAPT, &nic_dev->flags); + break; + default: + set_bit(HINIC_INTR_ADAPT, &nic_dev->flags); + break; + } +} + +static void adaptive_configuration_init(struct hinic_nic_dev *nic_dev) +{ + struct pci_device_id *id; + + id = hinic_get_pci_device_id(nic_dev->pdev); + if (id) + nic_dev->env_info.board = id->driver_data; + else + nic_dev->env_info.board = HINIC_BOARD_UKNOWN; + + nic_dev->env_info.os = HINIC_OS_HUAWEI; + +#if defined(__aarch64__) + nic_dev->env_info.cpu = HINIC_CPU_ARM_GENERIC; +#elif defined(__x86_64__) + nic_dev->env_info.cpu = HINIC_CPU_X86_GENERIC; +#else + nic_dev->env_info.cpu = HINIC_CPU_UKNOWN; +#endif + + nic_info(&nic_dev->pdev->dev, + "Board type %u, OS type %u, CPU type %u\n", + nic_dev->env_info.board, nic_dev->env_info.os, + nic_dev->env_info.cpu); + + decide_lro_cfg(nic_dev); + decide_rss_cfg(nic_dev); + decide_intr_cfg(nic_dev); +} + +static int nic_probe(struct hinic_lld_dev *lld_dev, void **uld_dev, + char *uld_dev_name) +{ + struct pci_dev *pdev = lld_dev->pdev; + struct hinic_nic_dev *nic_dev; + struct net_device *netdev; + u16 max_qps; + u32 page_num; + int err; + + /* *uld_dev should always no be NULL */ + *uld_dev = lld_dev; + + if (!hinic_support_nic(lld_dev->hwdev, NULL)) { + nic_info(&pdev->dev, "Hw don't support nic\n"); + return 0; + } + + err = hinic_validate_parameters(lld_dev); + if (err) + return -EINVAL; + + max_qps = hinic_func_max_nic_qnum(lld_dev->hwdev); + netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); + if (!netdev) { + nic_err(&pdev->dev, "Failed to allocate ETH device\n"); + return -ENOMEM; + } + + + SET_NETDEV_DEV(netdev, &pdev->dev); + nic_dev = (struct hinic_nic_dev *)netdev_priv(netdev); + nic_dev->hwdev = lld_dev->hwdev; + nic_dev->pdev = pdev; + nic_dev->poll_weight = (int)poll_weight; + nic_dev->msg_enable = DEFAULT_MSG_ENABLE; + nic_dev->heart_status = true; + nic_dev->in_vm = !hinic_is_in_host(); + nic_dev->is_vm_slave = is_multi_vm_slave(lld_dev->hwdev); + nic_dev->is_bm_slave = is_multi_bm_slave(lld_dev->hwdev); + nic_dev->lro_replenish_thld = lro_replenish_thld; + nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT); + page_num = (RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len) / PAGE_SIZE; + nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0; + + + mutex_init(&nic_dev->nic_mutex); + + adaptive_configuration_init(nic_dev); + + nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); + if (!nic_dev->vlan_bitmap) { + nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n"); + err = -ENOMEM; + goto vlan_bitmap_err; + } + nic_dev->netdev = netdev; + hinic_assign_netdev_ops(nic_dev); + netdev_feature_init(netdev); + /* get nic cap from hw */ + hinic_support_nic(lld_dev->hwdev, &nic_dev->nic_cap); + + err = hinic_init_nic_hwdev(nic_dev->hwdev, nic_dev->rx_buff_len); + if (err) { + nic_err(&pdev->dev, "Failed to init nic hwdev\n"); + goto init_nic_hwdev_err; + } + + err = hinic_set_super_cqe_state(nic_dev->hwdev, true); + if (err) { + nic_err(&pdev->dev, "Failed to set super cqe\n"); + goto set_supper_cqe_err; + } + + err = hinic_sw_init(nic_dev); + if (err) + goto sw_init_err; + + err = hinic_alloc_qps(nic_dev); + if (err) { + nic_err(&pdev->dev, "Failed to alloc qps\n"); + goto alloc_qps_err; + } + + nic_dev->workq = create_singlethread_workqueue(HINIC_NIC_DEV_WQ_NAME); + if (!nic_dev->workq) { + nic_err(&pdev->dev, "Failed to initialize AEQ workqueue\n"); + err = -ENOMEM; + goto create_workq_err; + } + + INIT_LIST_HEAD(&nic_dev->uc_filter_list); + INIT_LIST_HEAD(&nic_dev->mc_filter_list); + INIT_WORK(&nic_dev->rx_mode_work, hinic_set_rx_mode_work); + + err = hinic_set_default_hw_feature(nic_dev); + if (err) + goto set_features_err; + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + hinic_register_notifier(nic_dev); +#endif + err = register_netdev(netdev); + if (err) { + nic_err(&pdev->dev, "Failed to register netdev\n"); + err = -ENOMEM; + goto netdev_err; + } + + netif_carrier_off(netdev); + + *uld_dev = nic_dev; + nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n"); + + return 0; + +netdev_err: +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + hinic_unregister_notifier(nic_dev); +#endif + +set_features_err: destroy_workqueue(nic_dev->workq); - hinic_free_hwdev(nic_dev->hwdev); +create_workq_err: + hinic_destroy_qps(nic_dev); +alloc_qps_err: + hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic_global_func_id_hw(nic_dev->hwdev)); + +sw_init_err: + (void)hinic_set_super_cqe_state(nic_dev->hwdev, false); + +set_supper_cqe_err: + hinic_free_nic_hwdev(nic_dev->hwdev); + +init_nic_hwdev_err: + kfree(nic_dev->vlan_bitmap); + +vlan_bitmap_err: free_netdev(netdev); - pci_release_regions(pdev); - pci_disable_device(pdev); - - dev_info(&pdev->dev, "HiNIC driver - removed\n"); + return err; } -static void hinic_shutdown(struct pci_dev *pdev) +static void nic_remove(struct hinic_lld_dev *lld_dev, void *adapter) { - pci_disable_device(pdev); + struct hinic_nic_dev *nic_dev = adapter; + struct net_device *netdev; + + if (!nic_dev || !hinic_support_nic(lld_dev->hwdev, NULL)) + return; + + netdev = nic_dev->netdev; + + unregister_netdev(netdev); +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + hinic_unregister_notifier(nic_dev); + +#endif + cancel_work_sync(&nic_dev->rx_mode_work); + destroy_workqueue(nic_dev->workq); + + hinic_destroy_qps(nic_dev); + + hinic_clean_mac_list_filter(nic_dev); + hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic_global_func_id_hw(nic_dev->hwdev)); + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx); + + (void)hinic_set_super_cqe_state(nic_dev->hwdev, false); + + hinic_free_nic_hwdev(nic_dev->hwdev); + + kfree(nic_dev->vlan_bitmap); + + free_netdev(netdev); } -static const struct pci_device_id hinic_pci_table[] = { - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0}, - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0}, - { 0, 0} -}; -MODULE_DEVICE_TABLE(pci, hinic_pci_table); +int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err, err_netdev = 0; -static struct pci_driver hinic_driver = { - .name = HINIC_DRV_NAME, - .id_table = hinic_pci_table, - .probe = hinic_probe, - .remove = hinic_remove, - .shutdown = hinic_shutdown, + nicif_info(nic_dev, drv, netdev, "Start to disable RSS\n"); + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "RSS not enabled, do nothing\n"); + return 0; + } + + if (netif_running(netdev)) { + err_netdev = hinic_close(netdev); + if (err_netdev) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; + } + } + + /* free rss template */ + err = hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to free RSS template\n"); + } else { + nicif_info(nic_dev, drv, netdev, "Success to free RSS template\n"); + clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags); + } + + if (netif_running(netdev)) { + err_netdev = hinic_open(netdev); + if (err_netdev) + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + } + + return err ? err : err_netdev; +} + +int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err, err_netdev = 0; + + nicif_info(nic_dev, drv, netdev, "Start to enable RSS\n"); + + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "RSS already enabled, do nothing\n"); + return 0; + } + + if (netif_running(netdev)) { + err_netdev = hinic_close(netdev); + if (err_netdev) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; + } + } + + err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx); + if (err) { + if (err == -ENOSPC) + nicif_warn(nic_dev, drv, netdev, + "Failed to alloc RSS template,table is full\n"); + else + nicif_err(nic_dev, drv, netdev, + "Failed to alloc RSS template\n"); + } else { + set_bit(HINIC_RSS_ENABLE, &nic_dev->flags); + nicif_info(nic_dev, drv, netdev, "Success to alloc RSS template\n"); + } + + if (netif_running(netdev)) { + err_netdev = hinic_open(netdev); + if (err_netdev) + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + } + + return err ? err : err_netdev; +} + +static const char *hinic_module_link_err[LINK_ERR_NUM] = { + "Unrecognized module", }; -module_pci_driver(hinic_driver); +static void hinic_port_module_event_handler(struct hinic_nic_dev *nic_dev, + struct hinic_event_info *event) +{ + enum port_module_event_type type = event->module_event.type; + enum link_err_type err_type = event->module_event.err_type; + + switch (type) { + case HINIC_PORT_MODULE_CABLE_PLUGGED: + case HINIC_PORT_MODULE_CABLE_UNPLUGGED: + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable %s\n", + type == HINIC_PORT_MODULE_CABLE_PLUGGED ? + "plugged" : "unplugged"); + break; + case HINIC_PORT_MODULE_LINK_ERR: + if (err_type >= LINK_ERR_NUM) { + nicif_info(nic_dev, link, nic_dev->netdev, + "Link failed, Unknown error type: 0x%x\n", + err_type); + } else { + nicif_info(nic_dev, link, nic_dev->netdev, + "Link failed, error type: 0x%x: %s\n", + err_type, hinic_module_link_err[err_type]); + } + break; + default: + nicif_err(nic_dev, link, nic_dev->netdev, + "Unknown port module type %d\n", type); + break; + } +} + +static void hinic_intr_coalesc_change(struct hinic_nic_dev *nic_dev, + struct hinic_event_info *event) +{ + u32 hw_to_os_speed[LINK_SPEED_LEVELS] = {SPEED_10, SPEED_100, + SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, + SPEED_100000}; + u8 qid, coalesc_timer_cfg, pending_limt; + struct pci_device_id *id; + u32 speed; + int err; + + if (nic_dev->adaptive_rx_coal) + return; + + speed = hw_to_os_speed[event->link_info.speed]; + if (speed == nic_dev->his_link_speed) + return; + + id = hinic_get_pci_device_id(nic_dev->pdev); + switch (id->driver_data) { + case HINIC_BOARD_PG_TP_10GE: + return; + case HINIC_BOARD_PG_SM_25GE: + if (speed == SPEED_10000) { + pending_limt = + HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT; + coalesc_timer_cfg = + HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER; + } else if (speed == SPEED_25000) { + pending_limt = + HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT; +#if defined(__aarch64__) + coalesc_timer_cfg = + HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER; +#else + coalesc_timer_cfg = + HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER; +#endif + } else { + pending_limt = + HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT; + coalesc_timer_cfg = + HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER; + } + break; + case HINIC_BOARD_PG_100GE: + return; + default: + return; + } + + for (qid = 0; qid < nic_dev->num_qps; qid++) { + if (!nic_dev->intr_coalesce[qid].user_set_intr_coal_flag) { + err = set_interrupt_moder(nic_dev, qid, + coalesc_timer_cfg, + pending_limt); + if (!err) { + nic_dev->intr_coalesce[qid].pending_limt = + pending_limt; + nic_dev->intr_coalesce[qid].coalesce_timer_cfg = + coalesc_timer_cfg; + } + } + } + + nic_dev->his_link_speed = speed; +} + +void nic_event(struct hinic_lld_dev *lld_dev, void *adapter, + struct hinic_event_info *event) +{ + struct hinic_nic_dev *nic_dev = adapter; + struct net_device *netdev; + enum hinic_event_type type; + + if (!nic_dev || !event || !hinic_support_nic(lld_dev->hwdev, NULL)) + return; + + netdev = nic_dev->netdev; + type = event->type; + + switch (type) { + case HINIC_EVENT_LINK_DOWN: + hinic_link_status_change(nic_dev, false); + break; + case HINIC_EVENT_LINK_UP: + hinic_link_status_change(nic_dev, true); + hinic_intr_coalesc_change(nic_dev, event); + break; + case HINIC_EVENT_HEART_LOST: + hinic_heart_lost(nic_dev); + break; + case HINIC_EVENT_FAULT: + break; + case HINIC_EVENT_DCB_STATE_CHANGE: + if (nic_dev->default_cos_id == event->dcb_state.default_cos) + break; + + /* PF notify to vf, don't need to handle this event */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + break; + + nicif_info(nic_dev, drv, netdev, "Change default cos %d to %d\n", + nic_dev->default_cos_id, + event->dcb_state.default_cos); + + nic_dev->default_cos_id = event->dcb_state.default_cos; + hinic_set_sq_default_cos(netdev, nic_dev->default_cos_id); + break; + case HINIC_EVENT_PORT_MODULE_EVENT: + hinic_port_module_event_handler(nic_dev, event); + break; + default: + break; + } +} + +struct hinic_uld_info nic_uld_info = { + .probe = nic_probe, + .remove = nic_remove, + .suspend = NULL, + .resume = NULL, + .event = nic_event, + .ioctl = nic_ioctl, +}; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c new file mode 100644 index 000000000000..2683efc67e97 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c @@ -0,0 +1,1717 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_mbox.h" + +#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0 +#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 +#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define HINIC_MBOX_INT_WB_EN_SHIFT 28 + +#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF +#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F +#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define HINIC_MBOX_INT_WB_EN_MASK 0x1 + +#define HINIC_MBOX_INT_SET(val, field) \ + (((val) & HINIC_MBOX_INT_##field##_MASK) << \ + HINIC_MBOX_INT_##field##_SHIFT) + +enum hinic_mbox_tx_status { + TX_NOT_DONE = 1, +}; + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 +/* specifies the issue request for the message data. + * 0 - Tx request is done; + * 1 - Tx request is in process. + */ +#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1 + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1 + +#define HINIC_MBOX_CTRL_SET(val, field) \ + (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \ + HINIC_MBOX_CTRL_##field##_SHIFT) + +#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MBOX_HEADER_MODULE_SHIFT 11 +#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MBOX_HEADER_SEQID_SHIFT 24 +#define HINIC_MBOX_HEADER_LAST_SHIFT 30 +/* specifies the mailbox message direction + * 0 - send + * 1 - receive + */ +#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MBOX_HEADER_CMD_SHIFT 32 +#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40 +#define HINIC_MBOX_HEADER_STATUS_SHIFT 48 +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54 + +#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F +#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F +#define HINIC_MBOX_HEADER_LAST_MASK 0x1 +#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MBOX_HEADER_CMD_MASK 0xFF +#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF +#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF + +#define HINIC_MBOX_HEADER_GET(val, field) \ + (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \ + HINIC_MBOX_HEADER_##field##_MASK) +#define HINIC_MBOX_HEADER_SET(val, field) \ + ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \ + HINIC_MBOX_HEADER_##field##_SHIFT) + +#define MBOX_SEGLEN_MASK \ + HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN) + +#define HINIC_MBOX_SEG_LEN 48 +#define HINIC_MBOX_COMP_TIME 8000U +#define MBOX_MSG_POLLING_TIMEOUT 8000 + +#define HINIC_MBOX_DATA_SIZE 2040 + +#define MBOX_MAX_BUF_SZ 2048UL +#define MBOX_HEADER_SZ 8 + +#define MBOX_INFO_SZ 4 + +/* MBOX size is 64B, 8B for mbox_header, 4B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16UL + +/* mbox write back status is 16B, only first 4B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define SEQ_ID_START_VAL 0 +#define SEQ_ID_MAX_VAL 42 + +#define DST_AEQ_IDX_DEFAULT_VAL 0 +#define SRC_AEQ_IDX_DEFAULT_VAL 0 +#define NO_DMA_ATTRIBUTE_VAL 0 + +#define HINIC_MGMT_RSP_AEQN 0 +#define HINIC_MBOX_RSP_AEQN 2 +#define HINIC_MBOX_RECV_AEQN 0 + +#define MBOX_MSG_NO_DATA_LEN 1 + +#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + HINIC_FUNC_CSR_MAILBOX_DATA_OFF) + +#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS) + +#define MBOX_RESPONSE_ERROR 0x1 +#define MBOX_MSG_ID_MASK 0xFF +#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) +#define MBOX_MSG_ID_INC(func_to_func) (MBOX_MSG_ID(func_to_func) = \ + (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK) + +#define FUNC_ID_OFF_SET_8B 8 +#define FUNC_ID_OFF_SET_10B 10 + +/* max message counter wait to process for one function */ +#define HINIC_MAX_MSG_CNT_TO_PROCESS 10 + +enum hinic_hwif_direction_type { + HINIC_HWIF_DIRECT_SEND = 0, + HINIC_HWIF_RESPONSE = 1, +}; + +enum mbox_seg_type { + NOT_LAST_SEG, + LAST_SEG, +}; + +enum mbox_ordering_type { + STRONG_ORDER, +}; + +enum mbox_write_back_type { + WRITE_BACK = 1, +}; + +enum mbox_aeq_trig_type { + NOT_TRIGGER, + TRIGGER, +}; + +struct hinic_set_random_id { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 vf_in_pf; + u8 rsvd1; + u16 func_idx; + u32 random_id; +}; + +static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx, + const void *buf_in, u16 in_size, u16 offset) +{ + u16 func_idx; + + if (in_size < offset + sizeof(func_idx)) { + sdk_warn(hwdev->dev_hdl, + "Reveice mailbox msg len: %d less than 10 Bytes is invalid\n", + in_size); + return false; + } + + func_idx = *((u16 *)((u8 *)buf_in + offset)); + + if (src_func_idx != func_idx) { + sdk_warn(hwdev->dev_hdl, + "Reveice mailbox function id(0x%x) not equal to msg function id(0x%x)\n", + src_func_idx, func_idx); + return false; + } + + return true; +} + +bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + return check_func_id(hwdev, func_idx, buf_in, in_size, + FUNC_ID_OFF_SET_8B); +} + +bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + return check_func_id(hwdev, func_idx, buf_in, in_size, + FUNC_ID_OFF_SET_10B); +} + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info); + +/** + * hinic_register_ppf_mbox_cb - register mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_ppf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->ppf_mbox_cb[mod] = callback; + + set_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_register_pf_mbox_cb - register mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->pf_mbox_cb[mod] = callback; + + set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_register_vf_mbox_cb - register mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_vf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->vf_mbox_cb[mod] = callback; + + set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_register_ppf_to_pf_mbox_cb - register mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_recv_from_ppf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->pf_recv_from_ppf_mbox_cb[mod] = callback; + + set_bit(HINIC_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); + + while (test_bit(HINIC_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->ppf_mbox_cb[mod] = NULL; +} + +/** + * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + while (test_bit(HINIC_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->pf_mbox_cb[mod] = NULL; +} + +/** + * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf + * @hwdev:the pointer to hw device + * @mod:specific mod that the callback will handle + */ +void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + while (test_bit(HINIC_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->vf_mbox_cb[mod] = NULL; +} + +/** + * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + while (test_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->pf_recv_from_ppf_mbox_cb[mod] = NULL; +} + +int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_mbox_func_to_func *func_to_func = handle; + + sdk_warn(func_to_func->hwdev->dev_hdl, "Not support vf command yet/n"); + return -EFAULT; +} + +static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic_vf_mbox_cb cb; + int ret; + + if (recv_mbox->mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC_VF_MBOX_CB_REG, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox, + recv_mbox->mbox_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int +recv_pf_from_ppf_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic_pf_recv_from_ppf_mbox_cb cb; + enum hinic_mod_type mod = recv_mbox->mod; + int ret; + + if (mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + mod); + return -EINVAL; + } + + set_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + cb = func_to_func->pf_recv_from_ppf_mbox_cb[mod]; + if (cb && test_bit(HINIC_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) { + ret = cb(func_to_func->hwdev, recv_mbox->cmd, + recv_mbox->mbox, recv_mbox->mbox_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF recvice ppf mailbox callback is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return ret; +} + +static int recv_ppf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u8 pf_id, void *buf_out, u16 *out_size) +{ + hinic_ppf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->ppf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->hwdev, pf_id, vf_id, recv_mbox->cmd, + recv_mbox->mbox, recv_mbox->mbox_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %d\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int +recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u16 src_func_idx, void *buf_out, + u16 *out_size) +{ + hinic_pf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC_PF_MBOX_CB_REG, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) { + vf_id = src_func_idx - + hinic_glb_pf_vf_offset(func_to_func->hwdev); + ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd, + recv_mbox->mbox, recv_mbox->mbox_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u8 cmd, void *buf_in, u16 in_size, + u8 size) +{ + u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev); + int i; + + for (i = 0; i < size; i++) { + if (cmd == cmd_handle[i].cmd) { + if (cmd_handle[i].check_cmd) + return cmd_handle[i].check_cmd(hwdev, src_idx, + buf_in, in_size); + else + return true; + } + } + + return false; +} + +static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u16 src_func_idx) +{ + struct hinic_hwdev *dev = func_to_func->hwdev; + struct mbox_msg_info msg_info = {0}; + u16 out_size = MBOX_MAX_BUF_SZ; + void *buf_out = recv_mbox->buf_out; + int err = 0; + + if (HINIC_IS_VF(dev)) { + err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, + &out_size); + } else { /* pf/ppf process */ + + if (IS_PF_OR_PPF_SRC(src_func_idx)) { + if (HINIC_IS_PPF(dev)) { + err = recv_ppf_mbox_handler(func_to_func, + recv_mbox, + (u8)src_func_idx, + buf_out, &out_size); + if (err) + goto out; + } else { + err = recv_pf_from_ppf_handler(func_to_func, + recv_mbox, + buf_out, + &out_size); + if (err) + goto out; + } + /* The source is neither PF nor PPF, so it is from VF */ + } else { + err = recv_pf_from_vf_mbox_handler(func_to_func, + recv_mbox, + src_func_idx, + buf_out, &out_size); + } + } + +out: + if (recv_mbox->ack_type == MBOX_ACK) { + msg_info.msg_id = recv_mbox->msg_info.msg_id; + if (err == HINIC_DEV_BUSY_ACTIVE_FW || + err == HINIC_MBOX_PF_BUSY_ACTIVE_FW) + msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW; + else if (err == HINIC_MBOX_VF_CMD_ERROR) + msg_info.status = HINIC_MBOX_VF_CMD_ERROR; + else if (err) + msg_info.status = HINIC_MBOX_PF_SEND_ERR; + + /* if not data need to response, set out_size to 1 */ + if (!out_size || err) + out_size = MBOX_MSG_NO_DATA_LEN; + + send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd, + buf_out, out_size, src_func_idx, + HINIC_HWIF_RESPONSE, MBOX_ACK, + &msg_info); + } + + kfree(recv_mbox->buf_out); + kfree(recv_mbox->mbox); + kfree(recv_mbox); +} + +static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox, + u8 seq_id, u8 seg_len) +{ + if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN) + return false; + + if (seq_id == 0) { + recv_mbox->seq_id = seq_id; + } else { + if (seq_id != recv_mbox->seq_id + 1) + return false; + recv_mbox->seq_id = seq_id; + } + + return true; +} + +static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox) +{ + spin_lock(&func_to_func->mbox_lock); + if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id && + func_to_func->event_flag == EVENT_START) + complete(&recv_mbox->recv_done); + else + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", + func_to_func->send_msg_id, recv_mbox->msg_info.msg_id, + recv_mbox->msg_info.status); + spin_unlock(&func_to_func->mbox_lock); +} + +static void recv_func_mbox_work_handler(struct work_struct *work) +{ + struct hinic_mbox_work *mbox_work = + container_of(work, struct hinic_mbox_work, work); + struct hinic_recv_mbox *recv_mbox; + + recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox, + mbox_work->src_func_idx); + + recv_mbox = + &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx]; + + atomic_dec(&recv_mbox->msg_cnt); + + destroy_work(&mbox_work->work); + + kfree(mbox_work); +} + +static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + void *header, struct hinic_recv_mbox *recv_mbox) +{ + u64 mbox_header = *((u64 *)header); + void *mbox_body = MBOX_BODY_FROM_HDR(header); + struct hinic_recv_mbox *rcv_mbox_temp = NULL; + u16 src_func_idx; + struct hinic_mbox_work *mbox_work; + int pos; + u8 seq_id, seg_len; + + seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID); + seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN); + src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n", + src_func_idx, recv_mbox->seq_id, seq_id, seg_len); + recv_mbox->seq_id = SEQ_ID_MAX_VAL; + return; + } + + pos = seq_id * MBOX_SEG_LEN; + memcpy((u8 *)recv_mbox->mbox + pos, mbox_body, + HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN)); + + if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST)) + return; + + recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD); + recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE); + recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN); + recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK); + recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID); + recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS); + recv_mbox->seq_id = SEQ_ID_MAX_VAL; + + if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) == + HINIC_HWIF_RESPONSE) { + resp_mbox_handler(func_to_func, recv_mbox); + return; + } + + if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) { + sdk_warn(func_to_func->hwdev->dev_hdl, "This function(%u) have %d message wait to process, can't add to work queue\n", + src_func_idx, atomic_read(&recv_mbox->msg_cnt)); + return; + } + + rcv_mbox_temp = kzalloc(sizeof(*rcv_mbox_temp), GFP_KERNEL); + if (!rcv_mbox_temp) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate receive mbox memory failed.\n"); + return; + } + memcpy(rcv_mbox_temp, recv_mbox, sizeof(*rcv_mbox_temp)); + + rcv_mbox_temp->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!rcv_mbox_temp->mbox) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate receive mbox message memory failed.\n"); + goto rcv_mbox_msg_err; + } + memcpy(rcv_mbox_temp->mbox, recv_mbox->mbox, MBOX_MAX_BUF_SZ); + + rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!rcv_mbox_temp->buf_out) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate receive mbox out buffer memory failed.\n"); + goto rcv_mbox_buf_err; + } + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); + goto mbox_work_err; + } + + mbox_work->func_to_func = func_to_func; + mbox_work->recv_mbox = rcv_mbox_temp; + + mbox_work->src_func_idx = src_func_idx; + + atomic_inc(&recv_mbox->msg_cnt); + INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); + queue_work(func_to_func->workq, &mbox_work->work); + + return; + +mbox_work_err: + kfree(rcv_mbox_temp->buf_out); + +rcv_mbox_buf_err: + kfree(rcv_mbox_temp->mbox); + +rcv_mbox_msg_err: + kfree(rcv_mbox_temp); +} + +int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + struct hinic_set_random_id rand_info = {0}; + u16 out_size = sizeof(rand_info); + int ret; + + rand_info.version = HINIC_CMD_VER_FUNC_ID; + rand_info.func_idx = func_id; + rand_info.vf_in_pf = (u8)(func_id - hinic_glb_pf_vf_offset(hwdev)); + get_random_bytes(&rand_info.random_id, sizeof(u32)); + + func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id; + + ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SET_VF_RANDOM_ID, + &rand_info, sizeof(rand_info), + &rand_info, &out_size, 0); + if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED && + rand_info.status) || !out_size || ret) { + sdk_err(hwdev->dev_hdl, "Failed to set vf random id, err: %d, status: 0x%x, out size: 0x%x\n", + ret, rand_info.status, out_size); + return -EINVAL; + } + + if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED) + return rand_info.status; + + func_to_func->vf_mbx_old_rand_id[func_id] = + func_to_func->vf_mbx_rand_id[func_id]; + + return 0; +} + +static void update_random_id_work_handler(struct work_struct *work) +{ + struct hinic_mbox_work *mbox_work = + container_of(work, struct hinic_mbox_work, work); + struct hinic_mbox_func_to_func *func_to_func = mbox_work->func_to_func; + u16 src = mbox_work->src_func_idx; + int err; + + err = set_vf_mbox_random_id(func_to_func->hwdev, src); + if (err) + sdk_warn(func_to_func->hwdev->dev_hdl, "Update vf id(0x%x) random id fail\n", + mbox_work->src_func_idx); + + destroy_work(&mbox_work->work); + + kfree(mbox_work); +} + +bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func, + u8 *header) +{ + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u64 mbox_header = *((u64 *)header); + struct hinic_mbox_work *mbox_work; + u32 random_id; + u16 offset, src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + int vf_in_pf; + + if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random) + return true; + + if (!HINIC_IS_PPF(hwdev)) { + offset = hinic_glb_pf_vf_offset(hwdev); + vf_in_pf = src - offset; + + if (vf_in_pf < 1 || vf_in_pf > hinic_func_max_vf(hwdev)) { + sdk_warn(hwdev->dev_hdl, + "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n", + src, (offset + 1), + (hinic_func_max_vf(hwdev) + offset)); + return false; + } + } + + random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN + + MBOX_HEADER_SZ)); + + if (random_id == func_to_func->vf_mbx_rand_id[src] || + random_id == func_to_func->vf_mbx_old_rand_id[src]) + return true; + + sdk_warn(hwdev->dev_hdl, + "Receive func_id(0x%x) mailbox random id(0x%x) mismatch with pf reserve(0x%x)\n", + src, random_id, func_to_func->vf_mbx_rand_id[src]); + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); + return false; + } + + mbox_work->func_to_func = func_to_func; + mbox_work->src_func_idx = src; + + INIT_WORK(&mbox_work->work, update_random_id_work_handler); + queue_work(func_to_func->workq, &mbox_work->work); + + return false; +} + +void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size) +{ + struct hinic_mbox_func_to_func *func_to_func; + struct hinic_recv_mbox *recv_mbox; + u64 mbox_header = *((u64 *)header); + u64 src, dir; + + func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; + + dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION); + src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (src >= HINIC_MAX_FUNCTIONS) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox source function id:%u is invalid\n", (u32)src); + return; + } + + if (!check_vf_mbox_random_id(func_to_func, header)) + return; + + recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ? + &func_to_func->mbox_send[src] : + &func_to_func->mbox_resp[src]; + + recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox); +} + +void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size) +{ + struct hinic_mbox_func_to_func *func_to_func; + struct hinic_send_mbox *send_mbox; + + func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; + send_mbox = &func_to_func->send_mbox; + + complete(&send_mbox->send_done); +} + +static void clear_mbox_status(struct hinic_send_mbox *mbox) +{ + *mbox->wb_status = 0; + + /* clear mailbox write back status */ + wmb(); +} + + +static void mbox_copy_header(struct hinic_hwdev *hwdev, + struct hinic_send_mbox *mbox, u64 *header) +{ + u32 *data = (u32 *)header; + u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); + + for (i = 0; i < idx_max; i++) { + __raw_writel(*(data + i), mbox->data + i * sizeof(u32)); + } +} + +static void mbox_copy_send_data(struct hinic_hwdev *hwdev, + struct hinic_send_mbox *mbox, void *seg, + u16 seg_len) +{ + u32 *data = seg; + u32 data_len, chk_sz = sizeof(u32); + u32 i, idx_max; + u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; + + /* The mbox message should be aligned in 4 bytes. */ + if (seg_len % chk_sz) { + memcpy(mbox_max_buf, seg, seg_len); + data = (u32 *)mbox_max_buf; + } + + data_len = seg_len; + idx_max = ALIGN(data_len, chk_sz) / chk_sz; + + for (i = 0; i < idx_max; i++) { + __raw_writel(*(data + i), + mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); + } +} + +static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func, + u16 dst_func, u16 dst_aeqn, u16 seg_len, + int poll) +{ + u32 mbox_int, mbox_ctrl; + + /* msg_len - the total mbox msg len */ + u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN; + + mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) | + HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) | + HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) | + HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | + HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ + + MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2, + TX_SIZE) | + HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | + HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); + + wmb(); /* writing the mbox int attributes */ + mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); + + if (poll) + mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); + else + mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); +} + +void dump_mox_reg(struct hinic_hwdev *hwdev) +{ + u32 val; + + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val); + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val); +} + +static u16 get_mbox_status(struct hinic_send_mbox *mbox) +{ + /* write back is 16B, but only use first 4B */ + u64 wb_val = be64_to_cpu(*mbox->wb_status); + + rmb(); /* verify reading before check */ + + return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); +} + +static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, + u64 header, u16 dst_func, void *seg, u16 seg_len, + int poll, void *msg_info) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + u16 dst_aeqn, wb_status = 0, errcode; + u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION); + struct completion *done = &send_mbox->send_done; + ulong jif; + u32 cnt = 0; + + if (num_aeqs >= 4) + dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ? + HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN; + else + dst_aeqn = 0; + + if (!poll) + init_completion(done); + + clear_mbox_status(send_mbox); + + mbox_copy_header(hwdev, send_mbox, &header); + + mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll); + + wmb(); /* writing the mbox msg attributes */ + + if (poll) { + while (cnt < MBOX_MSG_POLLING_TIMEOUT) { + wb_status = get_mbox_status(send_mbox); + if (MBOX_STATUS_FINISHED(wb_status)) + break; + + usleep_range(900, 1000); + cnt++; + } + + if (cnt == MBOX_MSG_POLLING_TIMEOUT) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n", + wb_status); + dump_mox_reg(hwdev); + return -ETIMEDOUT; + } + } else { + jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME); + if (!wait_for_completion_timeout(done, jif)) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout\n"); + dump_mox_reg(hwdev); + destory_completion(done); + return -ETIMEDOUT; + } + destory_completion(done); + + wb_status = get_mbox_status(send_mbox); + } + + if (!MBOX_STATUS_SUCCESS(wb_status)) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %d error, wb status: 0x%x\n", + dst_func, wb_status); + errcode = MBOX_STATUS_ERRCODE(wb_status); + return errcode ? errcode : -EFAULT; + } + + return 0; +} + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + struct hinic_hwdev *hwdev = func_to_func->hwdev; + int err = 0; + u32 seq_id = 0; + u16 seg_len = MBOX_SEG_LEN; + u16 left = msg_len; + u8 *msg_seg = (u8 *)msg; + u64 header = 0; + + down(&func_to_func->msg_send_sem); + + header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MBOX_HEADER_SET(mod, MODULE) | + HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) | + HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) | + HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) | + HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) | + HINIC_MBOX_HEADER_SET(direction, DIRECTION) | + HINIC_MBOX_HEADER_SET(cmd, CMD) | + /* The vf's offset to it's associated pf */ + HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) | + HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) | + HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev), + SRC_GLB_FUNC_IDX); + + while (!(HINIC_MBOX_HEADER_GET(header, LAST))) { + if (left <= HINIC_MBOX_SEG_LEN) { + header &= ~MBOX_SEGLEN_MASK; + header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN); + header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST); + + seg_len = left; + } + + err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, + seg_len, func_to_func->send_ack_mod, + msg_info); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n", + HINIC_MBOX_HEADER_GET(header, SEQID)); + goto send_err; + } + + left -= HINIC_MBOX_SEG_LEN; + msg_seg += HINIC_MBOX_SEG_LEN; + + seq_id++; + header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK, + SEQID)); + header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID); + } + +send_err: + up(&func_to_func->msg_send_sem); + + return err; +} + +static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func, + enum mbox_event_state event_flag) +{ + spin_lock(&func_to_func->mbox_lock); + func_to_func->event_flag = event_flag; + spin_unlock(&func_to_func->mbox_lock); +} + +int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, u16 dst_func, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + /* use mbox_resp to hole data which responsed from other function */ + struct hinic_recv_mbox *mbox_for_resp; + struct mbox_msg_info msg_info = {0}; + ulong timeo; + int err; + + if (!func_to_func->hwdev->chip_present_flag) + return -EPERM; + + mbox_for_resp = &func_to_func->mbox_resp[dst_func]; + + down(&func_to_func->mbox_send_sem); + + init_completion(&mbox_for_resp->recv_done); + + msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); + + set_mbox_to_func_event(func_to_func, EVENT_START); + + err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size, + dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK, + &msg_info); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox failed, msg_id: %d\n", + msg_info.msg_id); + set_mbox_to_func_event(func_to_func, EVENT_FAIL); + goto send_err; + } + + + timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME); + if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) { + set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); + sdk_err(func_to_func->hwdev->dev_hdl, + "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id); + err = -ETIMEDOUT; + goto send_err; + } + + set_mbox_to_func_event(func_to_func, EVENT_END); + + if (mbox_for_resp->msg_info.status) { + err = mbox_for_resp->msg_info.status; + if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + sdk_err(func_to_func->hwdev->dev_hdl, "Mbox response error(0x%x)\n", + mbox_for_resp->msg_info.status); + goto send_err; + } + + if (buf_out && out_size) { + if (*out_size < mbox_for_resp->mbox_len) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n", + mbox_for_resp->mbox_len, mod, cmd, *out_size); + err = -EFAULT; + goto send_err; + } + + if (mbox_for_resp->mbox_len) + memcpy(buf_out, mbox_for_resp->mbox, + mbox_for_resp->mbox_len); + + *out_size = mbox_for_resp->mbox_len; + } + +send_err: + destory_completion(&mbox_for_resp->recv_done); + up(&func_to_func->mbox_send_sem); + + return err; +} + +static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func, + void *buf_in, u16 in_size) +{ + if (!buf_in || !in_size) + return -EINVAL; + + if (in_size > HINIC_MBOX_DATA_SIZE) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox msg len(%d) exceed limit(%d)\n", + in_size, HINIC_MBOX_DATA_SIZE); + return -EINVAL; + } + + return 0; +} + +int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (!HINIC_IS_PPF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, only ppf can send message to other host, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, dest_host_ppf_id, + buf_in, in_size, buf_out, out_size, timeout); +} + +int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size); + + if (err) + return err; + + if (HINIC_IS_VF(hwdev) || HINIC_IS_PPF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, hinic_ppf_idx(hwdev), + buf_in, in_size, buf_out, out_size, timeout); +} + +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size); + + if (err) + return err; + + if (!HINIC_IS_VF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + err = hinic_func_own_get(hwdev); + if (err) + return err; + + /* port_to_port_idx - imply which PCIE interface PF is connected */ + err = hinic_mbox_to_func(func_to_func, mod, cmd, + hinic_pf_id_of_vf_hw(hwdev), buf_in, in_size, + buf_out, out_size, timeout); + hinic_func_own_free(hwdev); + return err; +} + +int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size) +{ + struct mbox_msg_info msg_info = {0}; + int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size); + + if (err) + return err; + + down(&hwdev->func_to_func->mbox_send_sem); + + err = send_mbox_to_func(hwdev->func_to_func, mod, cmd, buf_in, in_size, + func_idx, HINIC_HWIF_DIRECT_SEND, MBOX_NO_ACK, + &msg_info); + if (err) + sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n"); + + up(&hwdev->func_to_func->mbox_send_sem); + + return err; +} + +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + int err; + + err = hinic_func_own_get(hwdev); + if (err) + return err; + + err = hinic_mbox_to_func_no_ack(hwdev, hinic_pf_id_of_vf_hw(hwdev), + mod, cmd, buf_in, in_size); + hinic_func_own_free(hwdev); + return err; +} + +int __hinic_mbox_to_vf(void *hwdev, + enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func; + int err; + u16 dst_func_idx; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func; + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + if (!vf_id) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "VF id(%d) error!\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_idx = hinic_glb_pf_vf_offset(hwdev) + vf_id; + + return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, + in_size, buf_out, out_size, timeout); +} + +int hinic_mbox_ppf_to_vf(void *hwdev, enum hinic_mod_type mod, + u16 func_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func; + int err; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func; + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, func_id, buf_in, + in_size, buf_out, out_size, timeout); +} +EXPORT_SYMBOL(hinic_mbox_ppf_to_vf); + +int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u16 dst_pf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (!HINIC_IS_PPF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + if (hinic_ppf_idx(hwdev) == dst_pf_id) { + sdk_err(hwdev->dev_hdl, + "Params error, dst_pf_id(0x%x) is ppf\n", dst_pf_id); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, dst_pf_id, buf_in, + in_size, buf_out, out_size, timeout); +} + +static int init_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + int err; + + mbox_info->seq_id = SEQ_ID_MAX_VAL; + + mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->mbox) + return -ENOMEM; + + mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->buf_out) { + err = -ENOMEM; + goto alloc_buf_out_err; + } + + atomic_set(&mbox_info->msg_cnt, 0); + + return 0; + +alloc_buf_out_err: + kfree(mbox_info->mbox); + + return err; +} + +static void clean_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + kfree(mbox_info->buf_out); + kfree(mbox_info->mbox); +} + +static int alloc_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx, i; + int err; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) { + err = init_mbox_info(&mbox_info[func_idx]); + if (err) { + pr_err("Failed to init mbox info\n"); + goto init_mbox_info_err; + } + } + + return 0; + +init_mbox_info_err: + for (i = 0; i < func_idx; i++) + clean_mbox_info(&mbox_info[i]); + + return err; +} + +static void free_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) + clean_mbox_info(&mbox_info[func_idx]); +} + +static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + + send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); +} + +static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, + MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, + GFP_KERNEL); + if (!send_mbox->wb_vaddr) + return -ENOMEM; + + send_mbox->wb_status = send_mbox->wb_vaddr; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, + addr_h); + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, + addr_l); + + return 0; +} + +static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, + 0); + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, + 0); + + dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, + send_mbox->wb_paddr); +} + +int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev) +{ + u8 vf_in_pf; + int err = 0; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + for (vf_in_pf = 1; vf_in_pf <= hinic_func_max_vf(hwdev); vf_in_pf++) { + err = set_vf_mbox_random_id( + hwdev, (hinic_glb_pf_vf_offset(hwdev) + vf_in_pf)); + if (err) + break; + } + + if (err == HINIC_MGMT_CMD_UNSUPPORTED) { + hwdev->func_to_func->support_vf_random = false; + err = 0; + sdk_warn(hwdev->dev_hdl, "Mgmt unsupport set vf random id\n"); + } else if (!err) { + hwdev->func_to_func->support_vf_random = true; + sdk_info(hwdev->dev_hdl, "PF Set vf random id success\n"); + } + + return err; +} + +void hinic_set_mbox_seg_ack_mod(struct hinic_hwdev *hwdev, + enum hinic_mbox_send_mod mod) +{ + if (!hwdev || !hwdev->func_to_func) + return; + + hwdev->func_to_func->send_ack_mod = mod; +} + +int hinic_func_to_func_init(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func; + struct card_node *chip_node; + int err; + + func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); + if (!func_to_func) + return -ENOMEM; + + hwdev->func_to_func = func_to_func; + func_to_func->hwdev = hwdev; + chip_node = hwdev->chip_node; + func_to_func->vf_mbx_rand_id = chip_node->vf_mbx_rand_id; + func_to_func->vf_mbx_old_rand_id = chip_node->vf_mbx_old_rand_id; + sema_init(&func_to_func->mbox_send_sem, 1); + sema_init(&func_to_func->msg_send_sem, 1); + spin_lock_init(&func_to_func->mbox_lock); + func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME); + if (!func_to_func->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n"); + err = -ENOMEM; + goto create_mbox_workq_err; + } + + err = alloc_mbox_info(func_to_func->mbox_send); + if (err) { + sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_active fail\n"); + goto alloc_mbox_for_send_err; + } + + err = alloc_mbox_info(func_to_func->mbox_resp); + if (err) { + sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_passive fail\n"); + goto alloc_mbox_for_resp_err; + } + + err = alloc_mbox_wb_status(func_to_func); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n"); + goto alloc_wb_status_err; + } + + prepare_send_mbox(func_to_func); + + func_to_func->send_ack_mod = HINIC_MBOX_SEND_MSG_POLL; + + return 0; + +alloc_wb_status_err: + free_mbox_info(func_to_func->mbox_resp); + +alloc_mbox_for_resp_err: + free_mbox_info(func_to_func->mbox_send); + +alloc_mbox_for_send_err: + destroy_workqueue(func_to_func->workq); + +create_mbox_workq_err: + spin_lock_deinit(&func_to_func->mbox_lock); + sema_deinit(&func_to_func->msg_send_sem); + sema_deinit(&func_to_func->mbox_send_sem); + kfree(func_to_func); + + return err; +} + +void hinic_func_to_func_free(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + /* destroy workqueue before free related mbox resources in case of + * illegal resource access + */ + destroy_workqueue(func_to_func->workq); + + free_mbox_wb_status(func_to_func); + free_mbox_info(func_to_func->mbox_resp); + free_mbox_info(func_to_func->mbox_send); + spin_lock_deinit(&func_to_func->mbox_lock); + sema_deinit(&func_to_func->mbox_send_sem); + sema_deinit(&func_to_func->msg_send_sem); + + kfree(func_to_func); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h new file mode 100644 index 000000000000..b53008c166db --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MBOX_H_ +#define HINIC_MBOX_H_ + +#define HINIC_MBOX_PF_SEND_ERR 0x1 +#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2 +#define HINIC_MBOX_VF_CMD_ERROR 0x3 + +#define HINIC_MAX_FUNCTIONS 512 +#define HINIC_MAX_PF_FUNCS 16 + +#define HINIC_MBOX_WQ_NAME "hinic_mbox" + +enum hinic_mbox_seg_errcode { + MBOX_ERRCODE_NO_ERRORS = 0, + /* VF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, + /* PPF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, + /* PF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, + /* The mailbox data size is set to all zero */ + MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400, + /* The sender function attribute has not been learned by CPI hardware */ + MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, + /* The receiver function attr has not been learned by CPI hardware */ + MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, +}; + +enum hinic_mbox_ack_type { + MBOX_ACK, + MBOX_NO_ACK, +}; + +struct mbox_msg_info { + u8 msg_id; + u8 status; /*can only use 6 bit*/ +}; + +struct hinic_recv_mbox { + struct completion recv_done; + void *mbox; + u8 cmd; + enum hinic_mod_type mod; + u16 mbox_len; + void *buf_out; + enum hinic_mbox_ack_type ack_type; + struct mbox_msg_info msg_info; + u8 seq_id; + atomic_t msg_cnt; +}; + +struct hinic_send_mbox { + struct completion send_done; + u8 *data; + + u64 *wb_status; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +typedef int (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); +typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); +typedef int (*hinic_ppf_mbox_cb)(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); +typedef int (*hinic_pf_recv_from_ppf_mbox_cb)(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +enum mbox_event_state { + EVENT_START = 0, + EVENT_FAIL, + EVENT_TIMEOUT, + EVENT_END, +}; + +enum hinic_mbox_cb_state { + HINIC_VF_MBOX_CB_REG = 0, + HINIC_VF_MBOX_CB_RUNNING, + HINIC_PF_MBOX_CB_REG, + HINIC_PF_MBOX_CB_RUNNING, + HINIC_PPF_MBOX_CB_REG, + HINIC_PPF_MBOX_CB_RUNNING, + HINIC_PPF_TO_PF_MBOX_CB_REG, + HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, +}; + +enum hinic_mbox_send_mod { + HINIC_MBOX_SEND_MSG_INT, + HINIC_MBOX_SEND_MSG_POLL, +}; + +struct hinic_mbox_func_to_func { + struct hinic_hwdev *hwdev; + + struct semaphore mbox_send_sem; + struct semaphore msg_send_sem; + struct hinic_send_mbox send_mbox; + + struct workqueue_struct *workq; + + struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS]; + struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS]; + + hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX]; + hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX]; + hinic_ppf_mbox_cb ppf_mbox_cb[HINIC_MOD_MAX]; + hinic_pf_recv_from_ppf_mbox_cb pf_recv_from_ppf_mbox_cb[HINIC_MOD_MAX]; + unsigned long ppf_to_pf_mbox_cb_state[HINIC_MOD_MAX]; + unsigned long ppf_mbox_cb_state[HINIC_MOD_MAX]; + unsigned long pf_mbox_cb_state[HINIC_MOD_MAX]; + unsigned long vf_mbox_cb_state[HINIC_MOD_MAX]; + + u8 send_msg_id; + enum mbox_event_state event_flag; + /* lock for mbox event flag */ + spinlock_t mbox_lock; + + u32 *vf_mbx_old_rand_id; + u32 *vf_mbx_rand_id; + bool support_vf_random; + enum hinic_mbox_send_mod send_ack_mod; +}; + +struct hinic_mbox_work { + struct work_struct work; + u16 src_func_idx; + struct hinic_mbox_func_to_func *func_to_func; + struct hinic_recv_mbox *recv_mbox; +}; + +struct vf_cmd_check_handle { + u8 cmd; + bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx, + void *buf_in, u16 in_size); +}; + +int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_ppf_mbox_cb callback); + +int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_mbox_cb callback); + +int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_vf_mbox_cb callback); + +int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_recv_from_ppf_mbox_cb callback); + +void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size); + +void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size); + +int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev); + +bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size); + +bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size); + +bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u8 cmd, void *buf_in, u16 in_size, + u8 size); + +int hinic_func_to_func_init(struct hinic_hwdev *hwdev); + +void hinic_func_to_func_free(struct hinic_hwdev *hwdev); + +int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size); + +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size); + +int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u16 dst_pf_id, u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); +int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, u16 dst_func, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int __hinic_mbox_to_vf(void *hwdev, + enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +void hinic_set_mbox_seg_ack_mod(struct hinic_hwdev *hwdev, + enum hinic_mbox_send_mod mod); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c new file mode 100644 index 000000000000..c544921e3294 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c @@ -0,0 +1,1452 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" + +#include "hinic_hwif.h" +#include "hinic_api_cmd.h" +#include "hinic_mgmt.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_eqs.h" + +#define BUF_OUT_DEFAULT_SIZE 1 +#define SEGMENT_LEN 48 + +#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HINIC_MSG_TO_MGMT_MAX_LEN, \ + SEGMENT_LEN) / SEGMENT_LEN) + +#define MAX_PF_MGMT_BUF_SIZE 2048UL + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define MGMT_MSG_TIMEOUT 5000 /* millisecond */ + +#define SYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_FLAG 0x200 + +#define MSG_NO_RESP 0xFFFF + +#define MAX_MSG_SZ 2016 + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) + +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +static void pf_to_mgmt_send_event_set(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_flag = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +/** + * hinic_register_mgmt_msg_cb - register sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + * @pri_handle: pri handle function + * @callback: the handler for a sync message that will handle messages + * Return: 0 - success, negative - failure + */ +int hinic_register_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod, + void *pri_handle, hinic_mgmt_msg_cb callback) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + + if (mod >= HINIC_MOD_HW_MAX || !hwdev) + return -EFAULT; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; + pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; + + set_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic_register_mgmt_msg_cb); + +/** + * hinic_unregister_mgmt_msg_cb - unregister sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + */ +void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + + if (!hwdev || mod >= HINIC_MOD_HW_MAX) + return; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + clear_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + while (test_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[mod])) + usleep_range(900, 1000); + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; + pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; +} +EXPORT_SYMBOL(hinic_unregister_mgmt_msg_cb); + +void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev || !proc) + return; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Register recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + pf_to_mgmt->proc.info[cmd_idx].cmd = cmd; + pf_to_mgmt->proc.info[cmd_idx].proc = proc; + + pf_to_mgmt->proc.cmd_num++; +} + +void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev) + return; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Unregister recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + pf_to_mgmt->proc.info[cmd_idx].cmd = 0; + pf_to_mgmt->proc.info[cmd_idx].proc = NULL; + pf_to_mgmt->proc.cmd_num--; + } + } +} + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + */ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: message ack type + * @direction: the direction of the original message + * @cmd: cmd type + * @msg_id: message id + */ +static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, u16 msg_len, enum hinic_mod_type mod, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + enum hinic_mgmt_cmd cmd, u32 msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +static void clp_prepare_header(struct hinic_hwdev *hwdev, + u64 *header, u16 msg_len, + enum hinic_mod_type mod, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + enum hinic_mgmt_cmd cmd, u32 msg_id) +{ + struct hinic_hwif *hwif = hwdev->hwif; + + *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + */ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, + int msg_len) +{ + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + */ +static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (cmd_size > MAX_MSG_SZ) + return -EINVAL; + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + int err; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the async_msg_buf */ + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + ASYNC_MSG_ID_INC(pf_to_mgmt); + + err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_DIRECT_SEND, MSG_NO_RESP); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); + + if (err) { + sdk_err(dev, "Failed to send async mgmt msg\n"); + return err; + } + + return 0; +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @ack_type: message ack type + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + */ +static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (cmd_size > MAX_MSG_SZ) + return -EINVAL; + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); + + if (ack_type == HINIC_MSG_ACK) + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + struct hinic_recv_msg *recv_msg; + struct hinic_msg_head *msg_head; + struct completion *recv_done; + ulong timeo; + int err; + ulong ret; + + /* set aeq fix num to 3, need to ensure response aeq id < 3*/ + if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC) { + msg_head = buf_in; + + if (msg_head->resp_aeq_num >= HINIC_MAX_AEQS) + msg_head->resp_aeq_num = 0; + } + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + init_completion(recv_done); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %d\n", + pf_to_mgmt->sync_msg_id); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (!ret) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %d\n", + pf_to_mgmt->sync_msg_id); + hinic_dump_aeq_info((struct hinic_hwdev *)hwdev); + err = -ETIMEDOUT; + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) { + destory_completion(recv_done); + up(&pf_to_mgmt->sync_msg_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->msg_len) { + sdk_err(dev, "Invalid response message length: %d for mod %d cmd %d from mgmt, should less than: %d\n", + recv_msg->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->msg_len) + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + destory_completion(recv_done); + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +static int __get_clp_reg(void *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *reg_addr) +{ + struct hinic_hwdev *dev = hwdev; + u32 offset; + + offset = HINIC_CLP_REG_GAP * hinic_pcie_itf_id(dev); + + switch (reg_type) { + case HINIC_CLP_BA_HOST: + *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ? + HINIC_CLP_REG(REQ_SRAM_BA) : + HINIC_CLP_REG(RSP_SRAM_BA); + break; + + case HINIC_CLP_SIZE_HOST: + *reg_addr = HINIC_CLP_REG(SRAM_SIZE); + break; + + case HINIC_CLP_LEN_HOST: + *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ? + HINIC_CLP_REG(REQ) : HINIC_CLP_REG(RSP); + break; + + case HINIC_CLP_START_REQ_HOST: + *reg_addr = HINIC_CLP_REG(REQ); + break; + + case HINIC_CLP_READY_RSP_HOST: + *reg_addr = HINIC_CLP_REG(RSP); + break; + + default: + *reg_addr = 0; + break; + } + if (*reg_addr == 0) + return -EINVAL; + + *reg_addr += offset; + + return 0; +} + +static int hinic_read_clp_reg(struct hinic_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *read_value) +{ + int err; + u32 reg_addr, reg_value; + + if (data_type == HINIC_CLP_REQ_HOST && + reg_type == HINIC_CLP_READY_RSP_HOST) + return -EINVAL; + if (data_type == HINIC_CLP_RSP_HOST && + reg_type == HINIC_CLP_START_REQ_HOST) + return -EINVAL; + + err = __get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (err) + return err; + + reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC_CLP_BA_HOST: + reg_value = ((reg_value >> + HINIC_CLP_OFFSET(SRAM_BASE)) & + HINIC_CLP_MASK(SRAM_BASE)); + break; + + case HINIC_CLP_SIZE_HOST: + reg_value = ((reg_value >> + HINIC_CLP_OFFSET(SRAM_SIZE)) & + HINIC_CLP_MASK(SRAM_SIZE)); + break; + + case HINIC_CLP_LEN_HOST: + reg_value = ((reg_value >> HINIC_CLP_OFFSET(LEN)) & + HINIC_CLP_MASK(LEN)); + break; + + case HINIC_CLP_START_REQ_HOST: + reg_value = ((reg_value >> HINIC_CLP_OFFSET(START)) & + HINIC_CLP_MASK(START)); + break; + + case HINIC_CLP_READY_RSP_HOST: + reg_value = ((reg_value >> HINIC_CLP_OFFSET(READY)) & + HINIC_CLP_MASK(READY)); + break; + + default: + break; + } + + *read_value = reg_value; + return 0; +} + +static int __check_reg_value(enum clp_reg_type reg_type, u32 value) +{ + if (reg_type == HINIC_CLP_BA_HOST && + value > HINIC_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_SIZE_HOST && + value > HINIC_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_LEN_HOST && + value > HINIC_CLP_LEN_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_START_REQ_HOST && + value > HINIC_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_READY_RSP_HOST && + value > HINIC_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static void hinic_write_clp_reg(struct hinic_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (data_type == HINIC_CLP_REQ_HOST && + reg_type == HINIC_CLP_READY_RSP_HOST) + return; + if (data_type == HINIC_CLP_RSP_HOST && + reg_type == HINIC_CLP_START_REQ_HOST) + return; + + if (__check_reg_value(reg_type, value)) + return; + + if (__get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC_CLP_LEN_HOST: + reg_value = reg_value & + (~(HINIC_CLP_MASK(LEN) << HINIC_CLP_OFFSET(LEN))); + reg_value = reg_value | (value << HINIC_CLP_OFFSET(LEN)); + break; + + case HINIC_CLP_START_REQ_HOST: + reg_value = reg_value & + (~(HINIC_CLP_MASK(START) << + HINIC_CLP_OFFSET(START))); + reg_value = reg_value | (value << HINIC_CLP_OFFSET(START)); + break; + + case HINIC_CLP_READY_RSP_HOST: + reg_value = reg_value & + (~(HINIC_CLP_MASK(READY) << + HINIC_CLP_OFFSET(READY))); + reg_value = reg_value | (value << HINIC_CLP_OFFSET(READY)); + break; + + default: + return; + } + + hinic_hwif_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int hinic_read_clp_data(struct hinic_hwdev *hwdev, + void *buf_out, u16 *out_size) +{ + int err; + u32 reg = HINIC_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); + delay_cnt++; + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, &ready); + if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX) { + sdk_err(hwdev->dev_hdl, "timeout with delay_cnt:%d\n", + delay_cnt); + return -EINVAL; + } + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_LEN_HOST, &temp_out_size); + if (err) + return err; + + if (temp_out_size > HINIC_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + sdk_err(hwdev->dev_hdl, "invalid temp_out_size:%d\n", + temp_out_size); + return -EINVAL; + } + + *out_size = (u16)(temp_out_size & 0xffff); + for (; temp_out_size > 0; temp_out_size--) { + *ptr = hinic_hwif_read_reg(hwdev->hwif, reg); + ptr++; + reg = reg + 4; + } + + hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, (u32)0x0); + hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_LEN_HOST, (u32)0x0); + + return 0; +} + +static int hinic_write_clp_data(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size) +{ + int err; + u32 reg = HINIC_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_START_REQ_HOST, &start); + if (err) + return err; + + while (start == 1) { + usleep_range(9000, 10000); + delay_cnt++; + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_START_REQ_HOST, &start); + if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_LEN_HOST, in_size); + hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_START_REQ_HOST, (u32)0x1); + + for (; in_size > 0; in_size--) { + hinic_hwif_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + 4; + } + + return 0; +} + +static int hinic_check_clp_init_status(struct hinic_hwdev *hwdev) +{ + int err; + u32 reg_value = 0; + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req ba value:0x%x\n", reg_value); + return -EINVAL; + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp ba value:0x%x\n", reg_value); + return -EINVAL; + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req size\n"); + return -EINVAL; + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void hinic_clear_clp_data(struct hinic_hwdev *hwdev, + enum clp_data_type data_type) +{ + u32 reg = (data_type == HINIC_CLP_REQ_HOST) ? + HINIC_CLP_DATA(REQ) : HINIC_CLP_DATA(RSP); + u32 count = HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST; + + for (; count > 0; count--) { + hinic_hwif_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + 4; + } +} + +int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct hinic_hwdev *dev = hwdev; + u64 header; + u16 real_size; + u8 *clp_msg_buf; + int err; + + clp_pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->clp_pf_to_mgmt; + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + /* 4 bytes alignment */ + if (in_size % HINIC_CLP_DATA_UNIT_HOST) + real_size = (in_size + (u16)sizeof(header) + + HINIC_CLP_DATA_UNIT_HOST); + else + real_size = in_size + (u16)sizeof(header); + real_size = real_size / HINIC_CLP_DATA_UNIT_HOST; + + if (real_size > + (HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST)) { + sdk_err(dev->dev_hdl, "Invalid real_size:%d\n", real_size); + return -EINVAL; + } + down(&clp_pf_to_mgmt->clp_msg_lock); + + err = hinic_check_clp_init_status(dev); + if (err) { + sdk_err(dev->dev_hdl, "Check clp init status failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return err; + } + + hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST); + hinic_write_clp_reg(dev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, 0x0); + + /* Send request */ + memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST); + clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0); + + memcpy(clp_msg_buf, &header, sizeof(header)); + clp_msg_buf += sizeof(header); + memcpy(clp_msg_buf, buf_in, in_size); + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + hinic_clear_clp_data(dev, HINIC_CLP_REQ_HOST); + err = hinic_write_clp_data(hwdev, + clp_pf_to_mgmt->clp_msg_buf, real_size); + if (err) { + sdk_err(dev->dev_hdl, "Send clp request failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + /* Get response */ + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST); + err = hinic_read_clp_data(hwdev, clp_msg_buf, &real_size); + hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST); + if (err) { + sdk_err(dev->dev_hdl, "Read clp response failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + real_size = (u16)((real_size * HINIC_CLP_DATA_UNIT_HOST) & 0xffff); + if ((real_size <= sizeof(header)) || + (real_size > HINIC_CLP_INPUT_BUFFER_LEN_HOST)) { + sdk_err(dev->dev_hdl, "Invalid response size:%d", real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + real_size = real_size - sizeof(header); + if (real_size != *out_size) { + sdk_err(dev->dev_hdl, "Invalid real_size:%d, out_size:%d\n", + real_size, *out_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + + return 0; +} + +int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + return 0; +} + +/* This function is only used by txrx flush */ +int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + int err = -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) { + sdk_err(dev, "Mgmt module not initialized\n"); + return -EINVAL; + } + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + + if (!MSG_SZ_IS_VALID(in_size)) { + sdk_err(dev, "Mgmt msg buffer size: %d is not valid\n", + in_size); + return -EINVAL; + } + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +/** + * api cmd write or read bypass defaut use poll, if want to use aeq interrupt, + * please set wb_trigger_aeqe to 1 + */ +int hinic_api_cmd_write_nack(void *hwdev, u8 dest, void *cmd, u16 size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_api_cmd_chain *chain; + + if (!hwdev || !size || !cmd) + return -EINVAL; + + if (size > MAX_PF_MGMT_BUF_SIZE) + return -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) || + hinic_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_WRITE]; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hinic_api_cmd_write(chain, dest, cmd, size); +} +EXPORT_SYMBOL(hinic_api_cmd_write_nack); + +int hinic_api_cmd_read_ack(void *hwdev, u8 dest, void *cmd, u16 size, void *ack, + u16 ack_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_api_cmd_chain *chain; + + if (!hwdev || !cmd || (ack_size && !ack)) + return -EINVAL; + + if (size > MAX_PF_MGMT_BUF_SIZE) + return -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) || + hinic_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_READ]; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hinic_api_cmd_read(chain, dest, cmd, size, ack, ack_size); +} +EXPORT_SYMBOL(hinic_api_cmd_read_ack); + +static void __send_mgmt_ack(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id) +{ + u16 buf_size; + + if (!in_size) + buf_size = BUF_OUT_DEFAULT_SIZE; + else + buf_size = in_size; + + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + /* MGMT sent sync msg, send the response */ + send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, + buf_in, buf_size, HINIC_MSG_RESPONSE, + msg_id); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the buffer of recv msg + * @in_size: the size of buffer + * @msg_id: message id + * @need_resp: the flag of need resp + */ +static void mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id, int need_resp) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + void *buf_out = pf_to_mgmt->mgmt_ack_buf; + enum hinic_mod_type tmp_mod = mod; + bool ack_first = false; + u16 out_size = 0; + + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mod >= HINIC_MOD_HW_MAX) { + sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", + mod); + goto resp; + } + + set_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || + !test_bit(HINIC_MGMT_MSG_CB_REG, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { + sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n", + mod); + clear_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + goto resp; + } + + ack_first = hinic_mgmt_event_ack_first(mod, cmd); + if (ack_first && need_resp) { + /* send ack to mgmt first to avoid command timeout in + * mgmt(100ms in mgmt); + * mgmt to host command don't need any response data from host, + * just need ack from host + */ + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, in_size, msg_id); + } + + pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev, + pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], + cmd, buf_in, in_size, + buf_out, &out_size); + + clear_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + +resp: + if (!ack_first && need_resp) + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, + msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + */ +static void mgmt_resp_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + /* delete async msg */ + if (recv_msg->msg_id & ASYNC_MSG_FLAG) + return; + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && + pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_msg->recv_done); + } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { + sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } else { + sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static void recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct hinic_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hinic_mgmt_msg_handle_work, work); + + mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, + mgmt_work->cmd, mgmt_work->msg, + mgmt_work->msg_len, mgmt_work->msg_id, + !mgmt_work->async_mgmt_to_pf); + + destroy_work(&mgmt_work->work); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN) + return false; + + if (seq_id == 0) { + recv_msg->seq_id = seq_id; + } else { + if (seq_id != recv_msg->seq_id + 1) + return false; + recv_msg->seq_id = seq_id; + } + + return true; +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + */ +static void recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hinic_recv_msg *recv_msg) +{ + struct hinic_mgmt_msg_handle_work *mgmt_work; + u64 mbox_header = *((u64 *)header); + void *msg_body = header + sizeof(mbox_header); + u8 seq_id, seq_len; + u32 offset; + u64 dir; + + /* Don't need to get anything from hw when cmd is async */ + dir = HINIC_MSG_HEADER_GET(mbox_header, DIRECTION); + if (dir == HINIC_MSG_RESPONSE && + HINIC_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) + return; + + seq_len = HINIC_MSG_HEADER_GET(mbox_header, SEG_LEN); + seq_id = HINIC_MSG_HEADER_GET(mbox_header, SEQID); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x, current seq_id: 0x%x, seg len: 0x%x\n", + recv_msg->seq_id, seq_id, seq_len); + /* set seq_id to invalid seq_id */ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + return; + } + + offset = seq_id * SEGMENT_LEN; + memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); + + if (!HINIC_MSG_HEADER_GET(mbox_header, LAST)) + return; + + recv_msg->cmd = HINIC_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->mod = HINIC_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(mbox_header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HINIC_MSG_HEADER_GET(mbox_header, MSG_LEN); + recv_msg->msg_id = HINIC_MSG_HEADER_GET(mbox_header, MSG_ID); + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + if (HINIC_MSG_HEADER_GET(mbox_header, DIRECTION) == + HINIC_MSG_RESPONSE) { + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + return; + } + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work(pf_to_mgmt->workq, &mgmt_work->work); +} + +/** + * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @hwdev: the pointer to hw device + * @header: the header of the message + * @size: unused + */ +void hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + bool is_send_dir = false; + + pf_to_mgmt = dev->pf_to_mgmt; + + is_send_dir = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HINIC_MSG_DIRECT_SEND) ? true : false; + + recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + */ +static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) +{ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + return 0; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + */ +static void free_recv_msg(struct hinic_recv_msg *recv_msg) +{ + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + */ +static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate resp recv msg\n"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + err = -ENOMEM; + goto sync_msg_buf_err; + } + + pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) { + err = -ENOMEM; + goto ack_msg_buf_err; + } + + return 0; + +ack_msg_buf_err: + kfree(pf_to_mgmt->sync_msg_buf); + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + */ +static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->mgmt_ack_buf); + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + */ +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = hwdev->dev_hdl; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) + return -ENOMEM; + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_event_lock); + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue(HINIC_MGMT_WQ_NAME); + if (!pf_to_mgmt->workq) { + sdk_err(dev, "Failed to initialize MGMT workqueue\n"); + err = -ENOMEM; + goto create_mgmt_workq_err; + } + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate msg buffers\n"); + goto alloc_msg_buf_err; + } + + err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + sdk_err(dev, "Failed to init the api cmd chains\n"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + destroy_workqueue(pf_to_mgmt->workq); + +create_mgmt_workq_err: + spin_lock_deinit(&pf_to_mgmt->sync_event_lock); + spin_lock_deinit(&pf_to_mgmt->async_msg_lock); + sema_deinit(&pf_to_mgmt->sync_msg_lock); + kfree(pf_to_mgmt); + + return err; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to hw device + */ +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + /* destroy workqueue before free related pf_to_mgmt resources in case of + * illegal resource access + */ + destroy_workqueue(pf_to_mgmt->workq); + hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + free_msg_buf(pf_to_mgmt); + spin_lock_deinit(&pf_to_mgmt->sync_event_lock); + spin_lock_deinit(&pf_to_mgmt->async_msg_lock); + sema_deinit(&pf_to_mgmt->sync_msg_lock); + kfree(pf_to_mgmt); +} + +void hinic_flush_mgmt_workq(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + + flush_workqueue(dev->aeqs->workq); + + if (hinic_func_type(dev) != TYPE_VF && + hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) + flush_workqueue(dev->pf_to_mgmt->workq); +} + +int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt; + + clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL); + if (!clp_pf_to_mgmt) + return -ENOMEM; + + clp_pf_to_mgmt->clp_msg_buf = kzalloc(HINIC_CLP_INPUT_BUFFER_LEN_HOST, + GFP_KERNEL); + if (!clp_pf_to_mgmt->clp_msg_buf) { + kfree(clp_pf_to_mgmt); + return -ENOMEM; + } + sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1); + + hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt; + + return 0; +} + +void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt; + + sema_deinit(&clp_pf_to_mgmt->clp_msg_lock); + kfree(clp_pf_to_mgmt->clp_msg_buf); + kfree(clp_pf_to_mgmt); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h new file mode 100644 index 000000000000..430e9cf57e6b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MGMT_H_ +#define HINIC_MGMT_H_ + +#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MSG_HEADER_MODULE_SHIFT 11 +#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HINIC_MSG_HEADER_SEQID_SHIFT 24 +#define HINIC_MSG_HEADER_LAST_SHIFT 30 +#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MSG_HEADER_CMD_SHIFT 32 +#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48 +#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50 +#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HINIC_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC_MSG_HEADER_LAST_MASK 0x1 +#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MSG_HEADER_CMD_MASK 0xFF +#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3 +#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF +#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HINIC_MSG_HEADER_GET(val, member) \ + (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ + HINIC_MSG_HEADER_##member##_MASK) + +#define HINIC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ + HINIC_MSG_HEADER_##member##_SHIFT) + +#define HINIC_MGMT_WQ_NAME "hinic_mgmt" + +enum clp_data_type { + HINIC_CLP_REQ_HOST = 0, + HINIC_CLP_RSP_HOST = 1 +}; + +enum clp_reg_type { + HINIC_CLP_BA_HOST = 0, + HINIC_CLP_SIZE_HOST = 1, + HINIC_CLP_LEN_HOST = 2, + HINIC_CLP_START_REQ_HOST = 3, + HINIC_CLP_READY_RSP_HOST = 4 +}; + +#define HINIC_CLP_REG_GAP 0x20 +#define HINIC_CLP_INPUT_BUFFER_LEN_HOST 4096UL +#define HINIC_CLP_DATA_UNIT_HOST 4UL + +#define HINIC_BAR01_GLOABAL_CTL_OFFSET 0x4000 +#define HINIC_BAR01_CLP_OFFSET 0x5000 + +#define HINIC_CLP_SRAM_SIZE_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x220) +#define HINIC_CLP_REQ_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x224) +#define HINIC_CLP_RSP_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x228) +#define HINIC_CLP_REQ_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x22c) +#define HINIC_CLP_RSP_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x230) +#define HINIC_CLP_REG(member) (HINIC_CLP_##member##_REG) + +#define HINIC_CLP_REQ_DATA (HINIC_BAR01_CLP_OFFSET) +#define HINIC_CLP_RSP_DATA (HINIC_BAR01_CLP_OFFSET + 0x1000) +#define HINIC_CLP_DATA(member) (HINIC_CLP_##member##_DATA) + +#define HINIC_CLP_SRAM_SIZE_OFFSET 16 +#define HINIC_CLP_SRAM_BASE_OFFSET 0 +#define HINIC_CLP_LEN_OFFSET 0 +#define HINIC_CLP_START_OFFSET 31 +#define HINIC_CLP_READY_OFFSET 31 +#define HINIC_CLP_OFFSET(member) (HINIC_CLP_##member##_OFFSET) + +#define HINIC_CLP_SRAM_SIZE_BIT_LEN 0x7ffUL +#define HINIC_CLP_SRAM_BASE_BIT_LEN 0x7ffffffUL +#define HINIC_CLP_LEN_BIT_LEN 0x7ffUL +#define HINIC_CLP_START_BIT_LEN 0x1UL +#define HINIC_CLP_READY_BIT_LEN 0x1UL +#define HINIC_CLP_MASK(member) (HINIC_CLP_##member##_BIT_LEN) + +#define HINIC_CLP_DELAY_CNT_MAX 200UL +#define HINIC_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define HINIC_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define HINIC_CLP_LEN_REG_MAX 0x3ff +#define HINIC_CLP_START_OR_READY_REG_MAX 0x1 + +enum hinic_msg_direction_type { + HINIC_MSG_DIRECT_SEND = 0, + HINIC_MSG_RESPONSE = 1 +}; + +enum hinic_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hinic_mgmt_msg_type { + ASYNC_MGMT_MSG = 0, + SYNC_MGMT_MSG = 1, +}; + +enum hinic_msg_ack_type { + HINIC_MSG_ACK = 0, + HINIC_MSG_NO_ACK = 1, +}; + +struct hinic_recv_msg { + void *msg; + + struct completion recv_done; + + u16 msg_len; + enum hinic_mod_type mod; + u8 cmd; + u8 seq_id; + u16 msg_id; + int async_mgmt_to_pf; +}; + +#define HINIC_COMM_SELF_CMD_MAX 8 + +struct comm_up_self_msg_sub_info { + u8 cmd; + comm_up_self_msg_proc proc; +}; + +struct comm_up_self_msg_info { + u8 cmd_num; + struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_UNINIT = 0, + SEND_EVENT_START, + SEND_EVENT_FAIL, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +enum hinic_mgmt_msg_cb_state { + HINIC_MGMT_MSG_CB_REG = 0, + HINIC_MGMT_MSG_CB_RUNNING, +}; + +struct hinic_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct hinic_msg_pf_to_mgmt { + struct hinic_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + struct semaphore sync_msg_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_msg_buf; + void *mgmt_ack_buf; + + struct hinic_recv_msg recv_msg_from_mgmt; + struct hinic_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + + hinic_mgmt_msg_cb recv_mgmt_msg_cb[HINIC_MOD_HW_MAX]; + void *recv_mgmt_msg_data[HINIC_MOD_HW_MAX]; + unsigned long mgmt_msg_cb_state[HINIC_MOD_HW_MAX]; + + void (*async_msg_cb[HINIC_MOD_HW_MAX])(void *handle, + enum hinic_mgmt_cmd cmd, + void *priv_data, u32 msg_id, + void *buf_out, u32 out_size); + + void *async_msg_cb_data[HINIC_MOD_HW_MAX]; + + struct comm_up_self_msg_info proc; + + /* lock when sending msg */ + spinlock_t sync_event_lock; + enum comm_pf_to_mgmt_event_state event_flag; +}; + +struct hinic_mgmt_msg_handle_work { + struct work_struct work; + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + + int async_mgmt_to_pf; +}; + +int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); + +void hinic_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size); + +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev); + +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev); + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size); + +int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev); +void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h new file mode 100644 index 000000000000..9ba26adfcaea --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h @@ -0,0 +1,989 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MGMT_INTERFACE_H +#define HINIC_MGMT_INTERFACE_H + +#include +#include + +#include "hinic_port_cmd.h" + +/* up to driver event */ +#define HINIC_PORT_CMD_MGMT_RESET 0x0 + +struct hinic_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +struct hinic_register_vf { + u8 status; + u8 version; + u8 rsvd0[6]; +}; + +struct hinic_tx_rate_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 tx_rate; +}; + +struct hinic_tx_rate_cfg_max_min { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 min_rate; + u32 max_rate; + u8 rsvd2[8]; +}; + +struct hinic_port_mac_get { + u16 func_id; + u8 mac[ETH_ALEN]; + int ret; +}; + +struct hinic_function_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rx_wqe_buf_size; + u32 mtu; +}; + +struct hinic_cmd_qpn { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 base_qpn; +}; + +struct hinic_port_mac_set { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct hinic_port_mac_update { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct hinic_vport_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hinic_port_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 state; + u8 rsvd1; + u16 func_id; +}; + +struct hinic_spoofchk_set { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 state; + u8 rsvd1; + u16 func_id; +}; + +struct hinic_mtu { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 mtu; +}; + +struct hinic_vlan_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; +}; + +struct hinic_speed_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 speed; +}; + +struct hinic_link_mode_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u16 supported; /* 0xFFFF represent Invalid value */ + u16 advertised; +}; + +struct hinic_set_autoneg_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 enable; /* 1: enable , 0: disable */ +}; + +struct hinic_set_link_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 enable; +}; + +struct hinic_get_link { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 link_status; + u8 rsvd1; +}; + +struct hinic_link_status_report { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 link_status; + u8 port_id; +}; + +struct hinic_port_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 resv2[3]; +}; + +struct hinic_tso_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 tso_en; + u8 resv2[3]; +}; + +struct hinic_lro_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_wqe_num; + u8 resv2[13]; +}; + +struct hinic_lro_timer { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 type; /* 0: set timer value, 1: get timer value */ + u8 enable; /* when set lro time, enable should be 1 */ + u16 rsvd1; + u32 timer; +}; + +struct hinic_checksum_offload { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 rx_csum_offload; +}; + +struct hinic_vlan_offload { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 vlan_rx_offload; + u8 rsvd1[5]; +}; + +struct hinic_vlan_filter { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 rsvd1[2]; + u32 vlan_filter_ctrl; +}; + +struct hinic_pause_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct hinic_rx_mode_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct nic_rss_indirect_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u8 entry[HINIC_RSS_INDIR_SIZE]; +}; + +struct nic_rss_context_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u32 ctx; +}; + +struct hinic_rss_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 rss_en; + u8 template_id; + u8 rq_priority_number; + u8 rsvd1[3]; + u8 prio_tc[HINIC_DCB_UP_MAX]; +}; + +struct hinic_rss_template_mgmt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 cmd; + u8 template_id; + u8 rsvd1[4]; +}; + +struct hinic_rss_indir_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 indir[HINIC_RSS_INDIR_SIZE]; +}; + +struct hinic_rss_template_key { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 key[HINIC_RSS_KEY_SIZE]; +}; + +struct hinic_rss_engine_type { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct hinic_rss_context_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u32 context; +}; + +struct hinic_up_ets_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 rsvd1[3]; + u8 up_tc[HINIC_DCB_UP_MAX]; + u8 pg_bw[HINIC_DCB_PG_MAX]; + u8 pgid[HINIC_DCB_UP_MAX]; + u8 up_bw[HINIC_DCB_UP_MAX]; + u8 prio[HINIC_DCB_PG_MAX]; +}; + +struct hinic_set_pfc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 pfc_en; + u8 pfc_bitmap; + u8 rsvd1[4]; +}; + +struct hinic_set_micro_pfc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 micro_pfc_en; + u8 rsvd1; + u8 cfg_rq_max; + u8 cfg_rq_depth; + u16 rq_sm_thd; +}; + +struct hinic_cos_up_map { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + /* every bit indicate index of map is valid or not */ + u8 cos_valid_mask; + u16 rsvd1; + + /* user priority in cos(index:cos, value: up) */ + u8 map[HINIC_DCB_COS_MAX]; +}; + +struct hinic_set_rq_iq_mapping { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 map[HINIC_MAX_NUM_RQ]; + u32 num_rqs; + u32 rq_depth; +}; + +#define HINIC_PFC_SET_FUNC_THD 0 +#define HINIC_PFC_SET_GLB_THD 1 +struct hinic_pfc_thd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 op_type; + u16 func_thd; + u16 glb_thd; +}; + +/* set iq enable to ucode */ +struct hinic_cmd_enable_iq { + u8 rq_depth; + u8 num_rq; + u16 glb_rq_id; + + u16 q_id; + u16 lower_thd; + + u16 force_en; /* 1: force unlock, 0: depend on condition */ + u16 prod_idx; +}; + +/* set iq enable to mgmt cpu */ +struct hinic_cmd_enable_iq_mgmt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 rq_depth; + u8 num_rq; + u16 glb_rq_id; + + u16 q_id; + u16 lower_thd; + + u16 force_en; /* 1: force unlock, 0: depend on condition */ + u16 prod_idx; +}; + +struct hinic_port_link_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 link; + u8 port_id; +}; + +struct hinic_cable_plug_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 plugged; /* 0: unplugged, 1: plugged */ + u8 port_id; +}; + +struct hinic_link_err_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 err_type; + u8 port_id; +}; + +struct hinic_sync_time_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u64 mstime; +}; + +#define HINIC_PORT_STATS_VERSION 0 + +struct hinic_port_stats_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_port_stats { + u8 status; + u8 version; + u8 rsvd[6]; + + struct hinic_phy_port_stats stats; +}; + +struct hinic_cmd_vport_stats { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_vport_stats stats; +}; + +struct hinic_vf_vlan_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; + u8 qos; + u8 rsvd1[7]; +}; + +struct hinic_port_ipsu_mac { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 index; + u16 func_id; + u16 vlan_id; + u8 mac[ETH_ALEN]; +}; + +/* get or set loopback mode, need to modify by base API */ +#define HINIC_INTERNAL_LP_MODE 5 +#define LOOP_MODE_MIN 1 +#define LOOP_MODE_MAX 6 + +struct hinic_port_loopback { + u8 status; + u8 version; + u8 rsvd[6]; + + u32 mode; + u32 en; +}; + +#define HINIC_COMPILE_TIME_LEN 20 +struct hinic_version_info { + u8 status; + u8 version; + u8 rsvd[6]; + + u8 ver[HINIC_FW_VERSION_NAME]; + u8 time[HINIC_COMPILE_TIME_LEN]; +}; + +#define ANTI_ATTACK_DEFAULT_CIR 500000 +#define ANTI_ATTACK_DEFAULT_XIR 600000 +#define ANTI_ATTACK_DEFAULT_CBS 10000000 +#define ANTI_ATTACK_DEFAULT_XBS 12000000 +/* set physical port Anti-Attack rate */ +struct hinic_port_anti_attack_rate { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */ + u32 cir; /* Committed Information Rate */ + u32 xir; /* eXtended Information Rate */ + u32 cbs; /* Committed Burst Size */ + u32 xbs; /* eXtended Burst Size */ +}; + +struct hinic_clear_sq_resource { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_l2nic_reset { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 reset_flag; +}; + +struct hinic_super_cqe { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 super_cqe_en; +}; + +struct hinic_capture_info { + u8 status; + u8 version; + u8 rsvd[6]; + + u32 op_type; + u32 func_id; + u32 is_en_trx; + u32 offset_cos; + u32 data_vlan; +}; + +struct hinic_port_rt_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 pf_id; + u8 enable; + u8 rsvd1[6]; +}; + +struct fw_support_func { + u8 status; + u8 version; + u8 rsvd0[6]; + + u64 flag; + u64 rsvd; +}; + +struct hinic_vf_dcb_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_dcb_state state; +}; + +struct hinic_port_funcs_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; /* pf_id */ + u8 drop_en; + u8 rsvd1; +}; + +struct hinic_reset_link_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_force_pkt_drop { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port; + u8 rsvd1[3]; +}; + +struct hinic_set_link_follow { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 follow_status; + u8 rsvd2[3]; +}; + +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz); + +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn); + +int hinic_get_fw_support_func(void *hwdev); + +int hinic_vf_func_init(struct hinic_hwdev *hwdev); + +void hinic_vf_func_free(struct hinic_hwdev *hwdev); + +void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id); + +int hinic_set_port_routine_cmd_report(void *hwdev, bool enable); + +int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info); + +int hinic_save_dcb_state(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state); + +void hinic_clear_vf_infos(void *hwdev, u16 vf_id); + +/* OVS module interface, for BMGW cpath command */ +enum hinic_hiovs_cmd { + OVS_SET_CPATH_VLAN = 39, + OVS_GET_CPATH_VLAN = 40, + OVS_DEL_CPATH_VLAN = 43, +}; + +struct cmd_cpath_vlan { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 vlan_id; + u16 pf_id; +}; + +/* HILINK module interface */ + +/* cmd of mgmt CPU message for HILINK module */ +enum hinic_hilink_cmd { + HINIC_HILINK_CMD_GET_LINK_INFO = 0x3, + HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8, +}; + +enum hilink_info_print_event { + HILINK_EVENT_LINK_UP = 1, + HILINK_EVENT_LINK_DOWN, + HILINK_EVENT_CABLE_PLUGGED, + HILINK_EVENT_MAX_TYPE, +}; + +enum hinic_link_port_type { + LINK_PORT_FIBRE = 1, + LINK_PORT_ELECTRIC, + LINK_PORT_COPPER, + LINK_PORT_AOC, + LINK_PORT_BACKPLANE, + LINK_PORT_BASET, + LINK_PORT_MAX_TYPE, +}; + +enum hilink_fibre_subtype { + FIBRE_SUBTYPE_SR = 1, + FIBRE_SUBTYPE_LR, + FIBRE_SUBTYPE_MAX, +}; + +enum hilink_fec_type { + HILINK_FEC_RSFEC, + HILINK_FEC_BASEFEC, + HILINK_FEC_NOFEC, + HILINK_FEC_MAX_TYPE, +}; + +struct hi30_ffe_data { + u8 PRE2; + u8 PRE1; + u8 POST1; + u8 POST2; + u8 MAIN; +}; + +struct hi30_ctle_data { + u8 ctlebst[3]; + u8 ctlecmband[3]; + u8 ctlermband[3]; + u8 ctleza[3]; + u8 ctlesqh[3]; + u8 ctleactgn[3]; + u8 ctlepassgn; +}; + +struct hi30_dfe_data { + u8 fix_tap1_cen; + u8 fix_tap1_edg; + u8 dfefxtap[6]; + u8 dfefloattap[6]; +}; + +struct hilink_sfp_power { + u32 rx_power; + u32 tx_power; + u32 rsvd; + u32 is_invalid; +}; + +#define HILINK_MAX_LANE 4 + +struct hilink_lane { + u8 lane_used; + u8 hi30_ffe[5]; + u8 hi30_ctle[19]; + u8 hi30_dfe[14]; + u8 rsvd4; +}; + +struct hinic_link_info { + u8 vendor_name[16]; + /* port type: + * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane; + * 6 - baseT; 0xffff - unknown + * + * port subtype: + * Only when port_type is fiber: + * 1 - SR; 2 - LR + */ + u32 port_type; + u32 port_sub_type; + u32 cable_length; + u8 cable_temp; + u8 cable_max_speed;/* 1(G)/10(G)/25(G)... */ + u8 sfp_type; /* 0 - qsfp; 1 - sfp */ + u8 rsvd0; + u32 power[4]; /* uW; if is sfp, only power[2] is valid */ + + u8 an_state; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ + u16 speed; /* 1(G)/10(G)/25(G)... */ + + u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */ + u8 alos; /* 0 - yes; 1 - no */ + u8 rx_los; /* 0 - yes; 1 - no */ + u8 pma_status; + u32 pma_dbg_info_reg; /* pma debug info: */ + u32 pma_signal_ok_reg; /* signal ok: */ + + u32 pcs_err_blk_cnt_reg; /* error block counter: */ + u32 rf_lf_status_reg; /* RF/LF status: */ + u8 pcs_link_reg; /* pcs link: */ + u8 mac_link_reg; /* mac link: */ + u8 mac_tx_en; + u8 mac_rx_en; + u32 pcs_err_cnt; + + /* struct hinic_hilink_lane: 40 bytes */ + u8 lane1[40]; /* 25GE lane in old firmware */ + + u8 rsvd1[266]; /* hilink machine state */ + + u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */ + + u8 rsvd2[2]; +}; + +struct hinic_hilink_link_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 port_id; + u8 info_type; /* 1: link up 2: link down 3 cable plugged */ + u8 rsvd1; + + struct hinic_link_info info; + + u8 rsvd2[352]; +}; + +struct hinic_link_ksettings_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + + u32 valid_bitmap; + u32 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ + u8 rsvd2[18]; /* reserved for duplex, port, etc. */ +}; + +enum hinic_tx_promsic { + HINIC_TX_PROMISC_ENABLE = 0, + HINIC_TX_PROMISC_DISABLE = 1, +}; + +struct hinic_promsic_info { + u8 status; + u8 version; + u8 rsvd0[6]; + u16 func_id; + u8 cfg; + u8 rsvd1; +}; + + +struct hinic_netq_cfg_msg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 netq_en; + u8 rsvd; +}; + +/* add/del rxq filter msg */ +struct hinic_rq_filter_msg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 qid; + u8 filter_type; + u8 qflag;/*0:stdq, 1:defq, 2: netq*/ + + u8 mac[6]; + struct { + u8 inner_mac[6]; + u32 vni; + } vxlan; +}; + +int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c new file mode 100644 index 000000000000..943f09f0db4a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwif.h" +#include "hinic_csr.h" +#include "hinic_msix_attr.h" + +#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs) + +/** + * hinic_msix_attr_set - set message attribute of msix entry + * @hwif: the hardware interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix message + * (unit coalesc period) + * Return: 0 - success, negative - failure + */ +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) | + HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) | + HINIC_MSIX_ATTR_SET(lli_timer_cfg, LLI_TIMER) | + HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) | + HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + + return 0; +} + +/** + * hinic_msix_attr_get - get message attribute of msix entry + * @hwif: the hardware interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer_cfg: coalesc period for interrupt (unit 8 us) + * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer_cfg: maximum wait for resending msix message + * (unit coalesc period) + * Return: 0 - success, negative - failure + */ +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer_cfg, + u8 *lli_timer_cfg, u8 *lli_credit_limit, + u8 *resend_timer_cfg) +{ + u32 addr, val; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + val = hinic_hwif_read_reg(hwif, addr); + + *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT); + *coalesc_timer_cfg = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER); + *lli_timer_cfg = HINIC_MSIX_ATTR_GET(val, LLI_TIMER); + *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT); + *resend_timer_cfg = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER); + + return 0; +} + +/** + * hinic_msix_attr_cnt_set - set message attribute counters of msix entry + * @hwif: the hardware interface of a pci function device + * @msix_index: msix_index + * @lli_timer_cnt: replenishing period for low latency interrupt (unit 8 us) + * @lli_credit_cnt: maximum credits for low latency msix messages (unit 8) + * @coalesc_timer_cnt: coalesc period for interrupt (unit 8 us) + * @pending_cnt: the maximum pending interrupt events (unit 8) + * @resend_timer_cnt: maximum wait for resending msix message + * (unit coalesc period) + * Return: 0 - success, negative - failure + */ +int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index, + u8 lli_timer_cnt, u8 lli_credit_cnt, + u8 coalesc_timer_cnt, u8 pending_cnt, + u8 resend_timer_cnt) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_CNT_SET(lli_timer_cnt, LLI_TIMER) | + HINIC_MSIX_CNT_SET(lli_credit_cnt, LLI_CREDIT) | + HINIC_MSIX_CNT_SET(coalesc_timer_cnt, COALESC_TIMER) | + HINIC_MSIX_CNT_SET(pending_cnt, PENDING) | + HINIC_MSIX_CNT_SET(resend_timer_cnt, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h new file mode 100644 index 000000000000..dd09bd696db3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MSIX_ATTR_H_ +#define HINIC_MSIX_ATTR_H_ + +#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0 +#define HINIC_MSIX_COALESC_TIMER_SHIFT 8 +#define HINIC_MSIX_LLI_TIMER_SHIFT 16 +#define HINIC_MSIX_LLI_CREDIT_SHIFT 24 +#define HINIC_MSIX_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFFU +#define HINIC_MSIX_COALESC_TIMER_MASK 0xFFU +#define HINIC_MSIX_LLI_TIMER_MASK 0xFFU +#define HINIC_MSIX_LLI_CREDIT_MASK 0x1FU +#define HINIC_MSIX_RESEND_TIMER_MASK 0x7U + +#define HINIC_MSIX_ATTR_GET(val, member) \ + (((val) >> HINIC_MSIX_##member##_SHIFT) \ + & HINIC_MSIX_##member##_MASK) + +#define HINIC_MSIX_ATTR_SET(val, member) \ + (((val) & HINIC_MSIX_##member##_MASK) \ + << HINIC_MSIX_##member##_SHIFT) + +#define HINIC_MSIX_CNT_LLI_TIMER_SHIFT 0 +#define HINIC_MSIX_CNT_LLI_CREDIT_SHIFT 8 +#define HINIC_MSIX_CNT_COALESC_TIMER_SHIFT 8 +#define HINIC_MSIX_CNT_PENDING_SHIFT 8 +#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_CNT_LLI_TIMER_MASK 0xFFU +#define HINIC_MSIX_CNT_LLI_CREDIT_MASK 0xFFU +#define HINIC_MSIX_CNT_COALESC_TIMER_MASK 0xFFU +#define HINIC_MSIX_CNT_PENDING_MASK 0x1FU +#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define HINIC_MSIX_CNT_SET(val, member) \ + (((val) & HINIC_MSIX_CNT_##member##_MASK) << \ + HINIC_MSIX_CNT_##member##_SHIFT) + +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); + +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer_cfg, + u8 *lli_timer_cfg, u8 *lli_credit_limit, + u8 *resend_timer_cfg); + +int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c new file mode 100644 index 000000000000..6ab8418a8dfd --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c @@ -0,0 +1,978 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_nic_io.h" +#include "hinic_api_cmd.h" +#include "hinic_mgmt.h" +#include "hinic_mbox.h" +#include "hinic_nic_cfg.h" +#include "hinic_hwif.h" +#include "hinic_mgmt_interface.h" +#include "hinic_multi_host_mgmt.h" + +#define SLAVE_HOST_STATUS_CLEAR(host_id, val) \ + ((val) & (~(1U << (host_id)))) +#define SLAVE_HOST_STATUS_SET(host_id, enable) \ + (((u8)(enable) & 1U) << (host_id)) +#define SLAVE_HOST_STATUS_GET(host_id, val) (!!((val) & (1U << (host_id)))) + +#define MULTI_HOST_PPF_GET(host_id, val) (((val) >> ((host_id) * 4 + 16)) & 0xf) + +static inline u8 get_master_host_ppf_idx(struct hinic_hwdev *hwdev) +{ + u32 reg_val; + + reg_val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_MULT_HOST_SLAVE_STATUS_ADDR); + /* master host sets host_id to 0 */ + return MULTI_HOST_PPF_GET(0, reg_val); +} + +void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable) +{ + u32 reg_val; + + if (HINIC_FUNC_TYPE(hwdev) != TYPE_PPF) + return; + + reg_val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_MULT_HOST_SLAVE_STATUS_ADDR); + + reg_val = SLAVE_HOST_STATUS_CLEAR(host_id, reg_val); + reg_val |= SLAVE_HOST_STATUS_SET(host_id, enable); + hinic_hwif_write_reg(hwdev->hwif, HINIC_MULT_HOST_SLAVE_STATUS_ADDR, + reg_val); + + sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n", + host_id, enable, reg_val); +} + +bool hinic_get_slave_host_enable(void *hwdev, u8 host_id) +{ + u32 reg_val; + struct hinic_hwdev *dev = hwdev; + + if (HINIC_FUNC_TYPE(dev) != TYPE_PPF) + return false; + + reg_val = hinic_hwif_read_reg(dev->hwif, + HINIC_MULT_HOST_SLAVE_STATUS_ADDR); + + return SLAVE_HOST_STATUS_GET(host_id, reg_val); +} +EXPORT_SYMBOL(hinic_get_slave_host_enable); + +void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable) +{ + u32 reg_val; + + if (!IS_MASTER_HOST(hwdev) || HINIC_FUNC_TYPE(hwdev) != TYPE_PPF) + return; + + reg_val = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR); + reg_val = MULTI_HOST_REG_CLEAR(reg_val, MASTER_MBX_STS); + reg_val |= MULTI_HOST_REG_SET((u8)enable, MASTER_MBX_STS); + hinic_hwif_write_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR, reg_val); + + sdk_info(hwdev->dev_hdl, "multi-host status %d, reg value: 0x%x\n", + enable, reg_val); +} + +bool hinic_get_master_host_mbox_enable(void *hwdev) +{ + u32 reg_val; + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_SLAVE_HOST(dev) || HINIC_FUNC_TYPE(dev) == TYPE_VF) + return true; + + reg_val = hinic_hwif_read_reg(dev->hwif, HINIC_HOST_MODE_ADDR); + + return !!MULTI_HOST_REG_GET(reg_val, MASTER_MBX_STS); +} + +void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode) +{ + switch (mode) { + case FUNC_MOD_MULTI_BM_MASTER: + sdk_info(hwdev->dev_hdl, "Detect multi-host BM master host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_BM_MASTER; + hwdev->feature_cap = HINIC_MULTI_BM_MASTER; + break; + case FUNC_MOD_MULTI_BM_SLAVE: + sdk_info(hwdev->dev_hdl, "Detect multi-host BM slave host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_BM_SLAVE; + hwdev->feature_cap = HINIC_MULTI_BM_SLAVE; + break; + case FUNC_MOD_MULTI_VM_MASTER: + sdk_info(hwdev->dev_hdl, "Detect multi-host VM master host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_VM_MASTER; + hwdev->feature_cap = HINIC_MULTI_VM_MASTER; + break; + case FUNC_MOD_MULTI_VM_SLAVE: + sdk_info(hwdev->dev_hdl, "Detect multi-host VM slave host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_VM_SLAVE; + hwdev->feature_cap = HINIC_MULTI_VM_SLAVE; + break; + default: + hwdev->func_mode = FUNC_MOD_NORMAL_HOST; + hwdev->feature_cap = HINIC_NORMAL_HOST_CAP; + break; + } +} + +bool is_multi_vm_slave(void *hwdev) +{ + struct hinic_hwdev *hw_dev = hwdev; + + if (!hwdev) + return false; + + return (hw_dev->func_mode == FUNC_MOD_MULTI_VM_SLAVE) ? true : false; +} + +bool is_multi_bm_slave(void *hwdev) +{ + struct hinic_hwdev *hw_dev = hwdev; + + if (!hwdev) + return false; + + return (hw_dev->func_mode == FUNC_MOD_MULTI_BM_SLAVE) ? true : false; +} + +int rectify_host_mode(struct hinic_hwdev *hwdev) +{ + u16 cur_sdi_mode; + int err; + + if (hwdev->board_info.board_type != + HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE) + return 0; + + sdk_info(hwdev->dev_hdl, "Rectify host mode, host_id: %d\n", + hinic_pcie_itf_id(hwdev)); + + err = hinic_get_sdi_mode(hwdev, &cur_sdi_mode); + if (err == HINIC_MGMT_CMD_UNSUPPORTED) + cur_sdi_mode = HINIC_SDI_MODE_BM; + else if (err) + return err; + + switch (cur_sdi_mode) { + case HINIC_SDI_MODE_BM: + if (hinic_pcie_itf_id(hwdev) == 0) + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_MASTER); + else + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE); + break; + case HINIC_SDI_MODE_VM: + if (hinic_pcie_itf_id(hwdev) == 0) + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_MASTER); + else + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE); + break; + default: + sdk_warn(hwdev->dev_hdl, "Unknown sdi mode %d\n", cur_sdi_mode); + break; + } + + return 0; +} + +void detect_host_mode_pre(struct hinic_hwdev *hwdev) +{ + enum hinic_chip_mode chip_mode; + + /* all pf can set HOST_MODE REG, so don't trust HOST_MODE REG for host0, + * get chip mode from mgmt cpu for host0 + * VF have not right to read HOST_MODE REG, detect mode from board info + */ + if (hinic_pcie_itf_id(hwdev) == 0 || + HINIC_FUNC_TYPE(hwdev) == TYPE_VF) { + set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST); + return; + } + + chip_mode = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR); + switch (MULTI_HOST_REG_GET(chip_mode, CHIP_MODE)) { + case CHIP_MODE_VMGW: + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE); + /* mbox has not initialized, set slave host disable */ + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); + break; + case CHIP_MODE_BMGW: + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE); + /* mbox has not initialized, set slave host disable */ + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); + break; + + default: + set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST); + break; + } +} + + +int __mbox_to_host(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, + enum hinic_mbox_ack_type ack_type) +{ + struct hinic_hwdev *mbox_hwdev = hwdev; + u8 dst_host_func_idx; + int err; + + if (!IS_MULTI_HOST(hwdev) || HINIC_IS_VF(hwdev)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_PF) { + down(&hwdev->ppf_sem); + mbox_hwdev = hwdev->ppf_hwdev; + if (!mbox_hwdev) { + err = -EINVAL; + goto release_lock; + } + + if (!hinic_is_hwdev_mod_inited(mbox_hwdev, + HINIC_HWDEV_MBOX_INITED)) { + err = -EPERM; + goto release_lock; + } + } + + if (!mbox_hwdev->chip_present_flag) { + err = -EPERM; + goto release_lock; + } + + if (!hinic_get_master_host_mbox_enable(hwdev)) { + sdk_err(hwdev->dev_hdl, "Master host not initialized\n"); + err = -EFAULT; + goto release_lock; + } + + if (!mbox_hwdev->mhost_mgmt) { + /* send to master host in default */ + dst_host_func_idx = get_master_host_ppf_idx(hwdev); + } else { + dst_host_func_idx = IS_MASTER_HOST(hwdev) ? + mbox_hwdev->mhost_mgmt->shost_ppf_idx : + mbox_hwdev->mhost_mgmt->mhost_ppf_idx; + } + + if (ack_type == MBOX_ACK) + err = hinic_mbox_to_host(mbox_hwdev, dst_host_func_idx, + mod, cmd, buf_in, in_size, + buf_out, out_size, + timeout); + else + err = hinic_mbox_to_func_no_ack(mbox_hwdev, dst_host_func_idx, + mod, cmd, buf_in, in_size); + +release_lock: + if (hinic_func_type(hwdev) == TYPE_PF) + up(&hwdev->ppf_sem); + + return err; +} + +int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + return __mbox_to_host((struct hinic_hwdev *)hwdev, mod, cmd, buf_in, + in_size, buf_out, out_size, timeout, MBOX_ACK); +} +EXPORT_SYMBOL(hinic_mbox_to_host_sync); + +int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size) +{ + return __mbox_to_host(hwdev, mod, cmd, buf_in, in_size, NULL, NULL, + 0, MBOX_NO_ACK); +} + +static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev, + u16 glb_func_idx, u8 *en); + +int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + struct hinic_slave_func_nic_state *nic_state, *out_state; + int err; + + switch (cmd) { + case HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE: + nic_state = buf_in; + out_state = buf_out; + *out_size = sizeof(*nic_state); + + /* find nic state in ppf func_nic_en bitmap */ + err = __get_func_nic_state_from_pf(hwdev, nic_state->func_idx, + &out_state->enable); + if (err) + out_state->status = 1; + else + out_state->status = 0; + + break; + default: + break; + } + + return 0; +} + +static int __master_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + struct register_slave_host *slave_host, *out_shost; + int err = 0; + + if (!mhost_mgmt) + return -ENXIO; + + switch (cmd) { + case HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER: + slave_host = buf_in; + out_shost = buf_out; + *out_size = sizeof(*slave_host); + mhost_mgmt->shost_registered = true; + mhost_mgmt->shost_host_idx = slave_host->host_id; + mhost_mgmt->shost_ppf_idx = slave_host->ppf_idx; + + bitmap_copy((ulong *)out_shost->funcs_nic_en, + mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS); + sdk_info(hwdev->dev_hdl, "slave host register ppf, host_id: %d, ppf_idx: %d\n", + slave_host->host_id, slave_host->ppf_idx); + + out_shost->status = 0; + break; + case HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER: + slave_host = buf_in; + mhost_mgmt->shost_registered = false; + sdk_info(hwdev->dev_hdl, "slave host unregister ppf, host_id: %d, ppf_idx: %d\n", + slave_host->host_id, slave_host->ppf_idx); + + *out_size = sizeof(*slave_host); + ((struct register_slave_host *)buf_out)->status = 0; + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int __event_set_func_nic_state(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_event_info event_info = {0}; + struct hinic_mhost_nic_func_state nic_state = {0}; + struct hinic_slave_func_nic_state *out_state, *func_nic_state = buf_in; + + event_info.type = HINIC_EVENT_MULTI_HOST_MGMT; + event_info.mhost_mgmt.sub_cmd = HINIC_MHOST_NIC_STATE_CHANGE; + event_info.mhost_mgmt.data = &nic_state; + + nic_state.func_idx = func_nic_state->func_idx; + nic_state.enable = func_nic_state->enable; + + if (!hwdev->event_callback) + return -EFAULT; + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + *out_size = sizeof(*out_state); + out_state = buf_out; + out_state->status = nic_state.status; + + return nic_state.status; +} + +static int multi_host_event_handler(struct hinic_hwdev *hwdev, + u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + int err; + + switch (cmd) { + case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE: + err = __event_set_func_nic_state(hwdev, buf_in, in_size, + buf_out, out_size); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int sw_fwd_msg_to_vf(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_host_fwd_head *fwd_head; + u16 fwd_head_len; + void *msg; + int err; + + fwd_head = buf_in; + fwd_head_len = sizeof(struct hinic_host_fwd_head); + msg = (void *)((u8 *)buf_in + fwd_head_len); + err = hinic_mbox_ppf_to_vf(hwdev, fwd_head->mod, + fwd_head->dst_glb_func_idx, fwd_head->cmd, + msg, (in_size - fwd_head_len), + buf_out, out_size, 0); + if (err) + nic_err(hwdev->dev_hdl, + "Fwd msg to func %u failed, err: %d\n", + fwd_head->dst_glb_func_idx, err); + + return err; +} + +static int __slave_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + struct hinic_slave_func_nic_state *nic_state; + int err = 0; + + if (!mhost_mgmt) + return -ENXIO; + + switch (cmd) { + case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE: + nic_state = buf_in; + + *out_size = sizeof(*nic_state); + ((struct hinic_slave_func_nic_state *)buf_out)->status = 0; + + sdk_info(hwdev->dev_hdl, "slave func %d %s nic\n", + nic_state->func_idx, + nic_state->enable ? "register" : "unregister"); + + if (nic_state->enable) + set_bit(nic_state->func_idx, mhost_mgmt->func_nic_en); + else + clear_bit(nic_state->func_idx, mhost_mgmt->func_nic_en); + + multi_host_event_handler(hwdev, cmd, buf_in, in_size, buf_out, + out_size); + + break; + + case HINIC_SW_CMD_SEND_MSG_TO_VF: + err = sw_fwd_msg_to_vf(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_SW_CMD_MIGRATE_READY: + hinic_migrate_report(hwdev); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +int sw_func_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + int err; + + if (IS_MASTER_HOST(hwdev)) + err = __master_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in, + in_size, buf_out, out_size); + else if (IS_SLAVE_HOST(hwdev)) + err = __slave_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in, + in_size, buf_out, out_size); + else + err = -EINVAL; + + if (err) + sdk_err(hwdev->dev_hdl, "PPF process sw funcs cmd %d failed, err: %d\n", + cmd, err); + + return err; +} + +int __ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + int err; + + if (IS_SLAVE_HOST(hwdev)) { + err = hinic_mbox_to_host_sync(hwdev, mod, cmd, + buf_in, in_size, + buf_out, out_size, 0); + if (err) + sdk_err(hwdev->dev_hdl, "send to mpf failed, err: %d\n", + err); + } else if (IS_MASTER_HOST(hwdev)) { + if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_START_FLR) + err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size); + else + err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size, 0U); + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + sdk_err(hwdev->dev_hdl, "PF mbox common callback handler err: %d\n", + err); + } else { + /* not support */ + err = -EFAULT; + } + + return err; +} + +int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + bool same_host = false; + int err = -EFAULT; + + /* TODO: receive message from other host? get host id from pf_id */ + /* modify same_host according to hinic_get_hw_pf_infos */ + + switch (hwdev->func_mode) { + case FUNC_MOD_MULTI_VM_MASTER: + case FUNC_MOD_MULTI_BM_MASTER: + if (!same_host) + err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id, + mod, cmd, buf_in, in_size, + buf_out, out_size); + else + sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message in BM master\n"); + + break; + case FUNC_MOD_MULTI_VM_SLAVE: + case FUNC_MOD_MULTI_BM_SLAVE: + same_host = true; + if (same_host) + err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id, + mod, cmd, buf_in, in_size, + buf_out, out_size); + else + sdk_warn(hwdev->dev_hdl, "Receive control message from BM master, don't support for now\n"); + + break; + default: + sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message\n"); + + break; + } + + return err; +} + +int comm_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, HINIC_MOD_COMM, + cmd, buf_in, in_size, buf_out, + out_size); +} + +void comm_ppf_to_pf_handler(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + + sdk_err(hwdev->dev_hdl, "pf receive ppf common mbox msg, don't supported for now\n"); +} + +int hilink_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, + HINIC_MOD_HILINK, cmd, buf_in, + in_size, buf_out, out_size); +} + +int hinic_nic_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, + HINIC_MOD_L2NIC, cmd, buf_in, in_size, + buf_out, out_size); +} + +void hinic_nic_ppf_to_pf_handler(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + + sdk_err(hwdev->dev_hdl, "ppf receive other pf l2nic mbox msg, don't supported for now\n"); +} + +int hinic_register_slave_ppf(struct hinic_hwdev *hwdev, bool registered) +{ + struct register_slave_host host_info = {0}; + u16 out_size = sizeof(host_info); + u8 cmd; + int err; + + if (!IS_SLAVE_HOST(hwdev)) + return -EINVAL; + + cmd = registered ? HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER : + HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER; + + host_info.host_id = hinic_pcie_itf_id(hwdev); + host_info.ppf_idx = hinic_ppf_idx(hwdev); + + err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC, cmd, + &host_info, sizeof(host_info), &host_info, + &out_size, 0); + if (err || !out_size || host_info.status) { + sdk_err(hwdev->dev_hdl, "Failed to %s slave host, err: %d, out_size: 0x%x, status: 0x%x\n", + registered ? "register" : "unregister", err, out_size, + host_info.status); + return -EFAULT; + } + bitmap_copy(hwdev->mhost_mgmt->func_nic_en, + (ulong *)host_info.funcs_nic_en, + HINIC_MAX_FUNCTIONS); + return 0; +} + +static int get_host_id_by_func_id(struct hinic_hwdev *hwdev, u16 func_idx, + u8 *host_id) +{ + struct hinic_hw_pf_infos *pf_infos; + u16 vf_id_start, vf_id_end; + int i; + + if (!hwdev || !host_id || !hwdev->mhost_mgmt) + return -EINVAL; + + pf_infos = &hwdev->mhost_mgmt->pf_infos; + + for (i = 0; i < pf_infos->num_pfs; i++) { + if (func_idx == pf_infos->infos[i].glb_func_idx) { + *host_id = pf_infos->infos[i].itf_idx; + return 0; + } + + vf_id_start = pf_infos->infos[i].glb_pf_vf_offset + 1; + vf_id_end = pf_infos->infos[i].glb_pf_vf_offset + + pf_infos->infos[i].max_vfs; + if (func_idx >= vf_id_start && func_idx <= vf_id_end) { + *host_id = pf_infos->infos[i].itf_idx; + return 0; + } + } + + return -EFAULT; +} + +int set_slave_func_nic_state(struct hinic_hwdev *hwdev, u16 func_idx, u8 en) +{ + struct hinic_slave_func_nic_state nic_state = {0}; + u16 out_size = sizeof(nic_state); + int err; + + nic_state.func_idx = func_idx; + nic_state.enable = en; + + err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC, + HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE, + &nic_state, sizeof(nic_state), &nic_state, + &out_size, 0); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + sdk_warn(hwdev->dev_hdl, "Can not notify func %d nic state because slave host not initialized\n", + func_idx); + } else if (err || !out_size || nic_state.status) { + sdk_err(hwdev->dev_hdl, "Failed to set slave host functions nic state, err: %d, out_size: 0x%x, status: 0x%x\n", + err, out_size, nic_state.status); + return -EFAULT; + } + + return 0; +} + +int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state) +{ + struct hinic_hwdev *ppf_hwdev = hwdev; + struct hinic_multi_host_mgmt *mhost_mgmt; + u8 host_id = 0; + bool host_enable; + int err; + int old_state; + + if (!hwdev || !state) + return -EINVAL; + + if (hinic_func_type(hwdev) != TYPE_PPF) + ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev; + + if (!ppf_hwdev || !IS_MASTER_HOST(ppf_hwdev)) + return -EINVAL; + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + if (!mhost_mgmt || state->func_idx >= HINIC_MAX_FUNCTIONS) + return -EINVAL; + + old_state = test_bit(state->func_idx, mhost_mgmt->func_nic_en) ? 1 : 0; + if (state->state == HINIC_FUNC_NIC_DEL) + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + else if (state->state == HINIC_FUNC_NIC_ADD) + set_bit(state->func_idx, mhost_mgmt->func_nic_en); + else + return -EINVAL; + + err = get_host_id_by_func_id(ppf_hwdev, state->func_idx, &host_id); + if (err) { + sdk_err(ppf_hwdev->dev_hdl, "Failed to get function %d host id, err: %d\n", + state->func_idx, err); + old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) : + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + return -EFAULT; + } + + host_enable = hinic_get_slave_host_enable(hwdev, host_id); + sdk_info(ppf_hwdev->dev_hdl, "Set slave host %d(status: %d) func %d %s nic\n", + host_id, host_enable, + state->func_idx, state->state ? "enable" : "disable"); + + if (!host_enable) + return 0; + + /* notify slave host */ + err = set_slave_func_nic_state(hwdev, state->func_idx, state->state); + if (err) { + old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) : + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + return err; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_func_nic_state); + +static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev, + u16 glb_func_idx, u8 *en) +{ + struct hinic_multi_host_mgmt *mhost_mgmt; + struct hinic_hwdev *ppf_hwdev = hwdev; + + if (hinic_func_type(hwdev) != TYPE_PPF) + ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev; + + if (!ppf_hwdev || !ppf_hwdev->mhost_mgmt) + return -EFAULT; + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + *en = !!(test_bit(glb_func_idx, mhost_mgmt->func_nic_en)); + + sdk_info(ppf_hwdev->dev_hdl, "slave host func %d nic %d\n", + glb_func_idx, *en); + + return 0; +} + +int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en) +{ + struct hinic_slave_func_nic_state nic_state = {0}; + u16 out_size = sizeof(nic_state); + u8 nic_en; + int err; + + if (!hwdev || !en) + return -EINVAL; + /*if card mode is OVS, VFs donot need attach_uld, so return false.*/ + if (!IS_SLAVE_HOST((struct hinic_hwdev *)hwdev)) { + if (hinic_func_type(hwdev) == TYPE_VF && + hinic_support_ovs(hwdev, NULL)) { + *en = false; + } else { + *en = true; + } + return 0; + } + + if (hinic_func_type(hwdev) == TYPE_VF) { + nic_state.func_idx = glb_func_idx; + err = hinic_msg_to_mgmt_sync( + hwdev, HINIC_MOD_SW_FUNC, + HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE, + &nic_state, sizeof(nic_state), + &nic_state, &out_size, 0); + if (err || !out_size || nic_state.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Failed to get func %d nic state, err: %d, out_size: 0x%x, status: 0x%x\n", + glb_func_idx, err, out_size, nic_state.status); + return -EFAULT; + } + + *en = !!nic_state.enable; + + return 0; + } + + /* pf in slave host should be probe in CHIP_MODE_VMGW + * mode for pxe install + */ + if (IS_VM_SLAVE_HOST((struct hinic_hwdev *)hwdev)) { + *en = true; + return 0; + } + + /* pf/ppf get function nic state in sdk diretly */ + err = __get_func_nic_state_from_pf(hwdev, glb_func_idx, &nic_en); + if (err) + return err; + + *en = !!nic_en; + + return 0; +} + +int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev)) + return 0; + + hwdev->mhost_mgmt = kzalloc(sizeof(*hwdev->mhost_mgmt), GFP_KERNEL); + if (!hwdev->mhost_mgmt) { + sdk_err(hwdev->dev_hdl, "Failed to alloc multi-host mgmt memory\n"); + return -ENOMEM; + } + + hwdev->mhost_mgmt->mhost_ppf_idx = get_master_host_ppf_idx(hwdev); + hwdev->mhost_mgmt->shost_ppf_idx = 0; + hwdev->mhost_mgmt->shost_host_idx = 2; + + err = hinic_get_hw_pf_infos(hwdev, &hwdev->mhost_mgmt->pf_infos); + if (err) + goto out_free_mhost_mgmt; + + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_COMM, + comm_ppf_mbox_handler); + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC, + hinic_nic_ppf_mbox_handler); + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK, + hilink_ppf_mbox_handler); + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC, + sw_func_ppf_mbox_handler); + + bitmap_zero(hwdev->mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS); + + /* Slave host: + * register slave host ppf functions + * Get function's nic state + */ + if (IS_SLAVE_HOST(hwdev)) { + /* PXE don't support to receive mbox from master host */ + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), true); + if ((IS_VM_SLAVE_HOST(hwdev) && + hinic_get_master_host_mbox_enable(hwdev)) || + IS_BMGW_SLAVE_HOST(hwdev)) { + err = hinic_register_slave_ppf(hwdev, true); + if (err) { + set_slave_host_enable(hwdev, + hinic_pcie_itf_id(hwdev), + false); + goto out_free_mhost_mgmt; + } + } + } else { + /* slave host can send message to mgmt cpu after setup master + * mbox + */ + set_master_host_mbox_enable(hwdev, true); + } + + return 0; + +out_free_mhost_mgmt: + kfree(hwdev->mhost_mgmt); + hwdev->mhost_mgmt = NULL; + + return err; +} + +int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev) +{ + if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev)) + return 0; + + if (IS_SLAVE_HOST(hwdev)) { + hinic_register_slave_ppf(hwdev, false); + + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); + } else { + set_master_host_mbox_enable(hwdev, false); + } + + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_COMM); + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK); + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC); + + kfree(hwdev->mhost_mgmt); + hwdev->mhost_mgmt = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h new file mode 100644 index 000000000000..3133106c6984 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_MULTI_HOST_MGMT_H_ +#define __HINIC_MULTI_HOST_MGMT_H_ + +#define IS_BMGW_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_MASTER) +#define IS_BMGW_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_SLAVE) +#define IS_VM_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_MASTER) +#define IS_VM_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_SLAVE) + +#define IS_MASTER_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_VM_MASTER_HOST(hwdev)) + +#define IS_SLAVE_HOST(hwdev) \ + (IS_BMGW_SLAVE_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define IS_MULTI_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_BMGW_SLAVE_HOST(hwdev) || \ + IS_VM_MASTER_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define NEED_MBOX_FORWARD(hwdev) IS_BMGW_SLAVE_HOST(hwdev) + +struct hinic_multi_host_mgmt { + struct hinic_hwdev *hwdev; + + /* slave host registered */ + bool shost_registered; + u8 shost_host_idx; + u8 shost_ppf_idx; + + /* slave host functios support nic enable */ + DECLARE_BITMAP(func_nic_en, HINIC_MAX_FUNCTIONS); + + u8 mhost_ppf_idx; + + struct hinic_hw_pf_infos pf_infos; +}; + +struct hinic_host_fwd_head { + unsigned short dst_glb_func_idx; + unsigned char dst_itf_idx; + unsigned char mod; + + unsigned char cmd; + unsigned char rsv[3]; +}; + +int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev); +int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev); +int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size); + +struct register_slave_host { + u8 status; + u8 version; + u8 rsvd[6]; + + u8 host_id; + u8 ppf_idx; + u8 rsvd2[6]; + + /* for max 512 functions */ + u64 funcs_nic_en[8]; + + u64 rsvd3[8]; +}; + +struct hinic_slave_func_nic_state { + u8 status; + u8 version; + u8 rsvd[6]; + + u16 func_idx; + u8 enable; + u8 rsvd1; + + u32 rsvd2[2]; +}; + +void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable); +void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable); +void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode); +int rectify_host_mode(struct hinic_hwdev *hwdev); +void detect_host_mode_pre(struct hinic_hwdev *hwdev); + +int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic.h b/drivers/net/ethernet/huawei/hinic/hinic_nic.h new file mode 100644 index 000000000000..1e8a8014ca4a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NIC_H_ +#define HINIC_NIC_H_ + +#include "hinic_wq.h" + +#define SET_VPORT_MBOX_TIMEOUT (30 * 1000) +#define SET_VPORT_MGMT_TIMEOUT (25 * 1000) +struct hinic_sq { + struct hinic_wq *wq; + + u16 q_id; + + u8 owner; + + void *cons_idx_addr; + + u8 __iomem *db_addr; + u16 msix_entry_idx; +}; + +struct hinic_rq { + struct hinic_wq *wq; + + u16 *pi_virt_addr; + dma_addr_t pi_dma_addr; + + u16 q_id; + + u32 irq_id; + u16 msix_entry_idx; + + dma_addr_t cqe_dma_addr; +}; + +struct hinic_qp { + struct hinic_sq sq; + struct hinic_rq rq; +}; + +struct vf_data_storage { + u8 vf_mac_addr[ETH_ALEN]; + bool registered; + bool pf_set_mac; + u16 pf_vlan; + u8 pf_qos; + u32 max_rate; + u32 min_rate; + + bool link_forced; + bool link_up; /* only valid if VF link is forced */ + bool spoofchk; + bool trust; +}; + +struct hinic_nic_cfg { + struct semaphore cfg_lock; + + /* Valid when pfc is disable */ + bool pause_set; + struct nic_pause_config nic_pause; + + u8 pfc_en; + u8 pfc_bitmap; + + struct nic_port_info port_info; + + /* percentage of pf link bandwidth */ + u32 pf_bw_limit; +}; + +struct hinic_nic_io { + struct hinic_hwdev *hwdev; + + u16 global_qpn; + u8 link_status; + + struct hinic_wqs wqs; + + struct hinic_wq *sq_wq; + struct hinic_wq *rq_wq; + + u16 max_qps; + u16 num_qps; + u16 sq_depth; + u16 rq_depth; + struct hinic_qp *qps; + + void *ci_vaddr_base; + dma_addr_t ci_dma_base; + + u16 max_vfs; + struct vf_data_storage *vf_infos; + + struct hinic_dcb_state dcb_state; + + struct hinic_nic_cfg nic_cfg; + u16 rx_buff_len; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c new file mode 100644 index 000000000000..d838932269cc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c @@ -0,0 +1,4113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hw_mgmt.h" +#include "hinic_mbox.h" +#include "hinic_nic_io.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic.h" +#include "hinic_mgmt_interface.h" +#include "hinic_hwif.h" + +static unsigned char set_vf_link_state; +module_param(set_vf_link_state, byte, 0444); +MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0."); + +#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\ + hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd, \ + buf_in, in_size, \ + buf_out, out_size, 0) + +#define l2nic_msg_to_mgmt_async(hwdev, cmd, buf_in, in_size) \ + hinic_msg_to_mgmt_async(hwdev, HINIC_MOD_L2NIC, cmd, buf_in, in_size) + +#define CPATH_FUNC_ID_VALID_LIMIT 2 +#define CHECK_IPSU_15BIT 0X8000 + +static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value); + +static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_function_table *function_table; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + function_table = (struct hinic_function_table *)buf_in; + + if (!function_table->rx_wqe_buf_size) + return false; + + return true; +} + +struct vf_cmd_check_handle nic_cmd_support_vf[] = { + {HINIC_PORT_CMD_VF_REGISTER, NULL}, + {HINIC_PORT_CMD_VF_UNREGISTER, NULL}, + + {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RX_CSUM, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, + hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL, + hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_INIT_FUNC, check_func_table}, + {HINIC_PORT_CMD_SET_LLI_PRI, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL}, + {HINIC_PORT_CMD_GET_BOOT_VERSION, NULL}, + {HINIC_PORT_CMD_GET_MICROCODE_VERSION, NULL}, + + {HINIC_PORT_CMD_GET_VPORT_ENABLE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_VPORT_ENABLE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_LRO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_PORT_INFO, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_IPSU_MAC, hinic_mbox_check_func_id_10B}, + {HINIC_PORT_CMD_GET_IPSU_MAC, hinic_mbox_check_func_id_10B}, + + {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_CLEAR_SQ_RES, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_SUPER_CQE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_VF_COS, NULL}, + {HINIC_PORT_CMD_FORCE_PKT_DROP, NULL}, + {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_Q_FILTER, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_TCAM_FILTER, NULL}, +}; + +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz) +{ + struct hinic_function_table function_table = {0}; + u16 out_size = sizeof(function_table); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &function_table.func_id); + if (err) + return err; + + function_table.version = HINIC_CMD_VER_FUNC_ID; + function_table.mtu = 0x3FFF; /* default, max mtu */ + function_table.rx_wqe_buf_size = rx_buf_sz; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_INIT_FUNC, + &function_table, sizeof(function_table), + &function_table, &out_size, 0); + if (err || function_table.status || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to init func table, err: %d, status: 0x%x, out size: 0x%x\n", + err, function_table.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn) +{ + struct hinic_cmd_qpn cmd_qpn = {0}; + u16 out_size = sizeof(cmd_qpn); + int err; + + if (!hwdev || !global_qpn) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &cmd_qpn.func_id); + if (err) + return err; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_GLOBAL_QPN, + &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn, + &out_size, 0); + if (err || !out_size || cmd_qpn.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n", + err, cmd_qpn.status, out_size); + return -EINVAL; + } + + *global_qpn = cmd_qpn.base_qpn; + + return 0; +} + +int hinic_get_fw_support_func(void *hwdev) +{ + struct fw_support_func support_flag = {0}; + struct hinic_hwdev *dev = hwdev; + u16 out_size = sizeof(support_flag); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_FW_SUPPORT_FLAG, + &support_flag, sizeof(support_flag), + &support_flag, &out_size, 0); + if (support_flag.status == HINIC_MGMT_CMD_UNSUPPORTED) { + nic_info(dev->dev_hdl, "Current firmware doesn't support to get mac reuse flag\n"); + support_flag.flag = 0; + } else if (support_flag.status || err || !out_size) { + nic_err(dev->dev_hdl, "Failed to get mac reuse flag, err: %d, status: 0x%x, out size: 0x%x\n", + err, support_flag.status, out_size); + return -EFAULT; + } + + dev->fw_support_func_flag = support_flag.flag; + + return 0; +} + +#define HINIC_ADD_VLAN_IN_MAC 0x8000 +#define HINIC_VLAN_ID_MASK 0x7FFF + +int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_set mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info, + sizeof(mac_info), &mac_info, &out_size); + if (err || !out_size || + (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST && + mac_info.status != HINIC_PF_SET_VF_ALREADY) || + (mac_info.vlan_id & CHECK_IPSU_15BIT && + mac_info.status == HINIC_MGMT_STATUS_EXIST)) { + nic_err(nic_hwdev->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EINVAL; + } + + if (mac_info.status == HINIC_PF_SET_VF_ALREADY) { + nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore set operation\n"); + return HINIC_PF_SET_VF_ALREADY; + } + + if (mac_info.status == HINIC_MGMT_STATUS_EXIST) { + nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_mac); + +int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_set mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info, + sizeof(mac_info), &mac_info, &out_size); + if (err || !out_size || + (mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY)) { + nic_err(nic_hwdev->dev_hdl, + "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EINVAL; + } + if (mac_info.status == HINIC_PF_SET_VF_ALREADY) { + nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore delete operation.\n"); + return HINIC_PF_SET_VF_ALREADY; + } + + return 0; +} +EXPORT_SYMBOL(hinic_del_mac); + +int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_update mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !old_mac || !new_mac) + return -EINVAL; + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.old_mac, old_mac, ETH_ALEN); + memcpy(mac_info.new_mac, new_mac, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UPDATE_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || + (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST && + mac_info.status != HINIC_PF_SET_VF_ALREADY) || + (mac_info.vlan_id & CHECK_IPSU_15BIT && + mac_info.status == HINIC_MGMT_STATUS_EXIST)) { + nic_err(nic_hwdev->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EINVAL; + } + + if (mac_info.status == HINIC_PF_SET_VF_ALREADY) { + nic_warn(nic_hwdev->dev_hdl, "PF has already set VF MAC. Ignore update operation\n"); + return HINIC_PF_SET_VF_ALREADY; + } + + if (mac_info.status == HINIC_MGMT_STATUS_EXIST) { + nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int hinic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id) +{ + struct hinic_hwdev *dev = hwdev; + struct vf_data_storage *vf_info; + u16 func_id, vlan_id; + int err; + + if (!hwdev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) + return -EINVAL; + + vf_info = dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (!vf_info->pf_set_mac) + return 0; + + if (!FW_SUPPORT_MAC_REUSE_FUNC(dev)) { + nic_info(dev->dev_hdl, "Current firmware doesn't support mac reuse\n"); + return 0; + } + + func_id = hinic_glb_pf_vf_offset(dev) + (u16)vf_id; + vlan_id = old_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + err = hinic_del_mac(dev, vf_info->vf_mac_addr, vlan_id, + func_id); + if (err) { + nic_err(dev->dev_hdl, "Failed to delete VF %d MAC %pM vlan %d\n", + HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, vlan_id); + return err; + } + + vlan_id = new_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + err = hinic_set_mac(dev, vf_info->vf_mac_addr, vlan_id, + func_id); + if (err) { + nic_err(dev->dev_hdl, "Failed to add VF %d MAC %pM vlan %d\n", + HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, vlan_id); + goto out; + } + + return 0; + +out: + vlan_id = old_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + hinic_set_mac(dev, vf_info->vf_mac_addr, vlan_id, + func_id); + + return err; +} + +int hinic_get_default_mac(void *hwdev, u8 *mac_addr) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_set mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &mac_info.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || mac_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EINVAL; + } + + memcpy(mac_addr, mac_info.mac, ETH_ALEN); + + return 0; +} + +int hinic_set_port_mtu(void *hwdev, u32 new_mtu) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_mtu mtu_info = {0}; + u16 out_size = sizeof(mtu_info); + int err; + + if (!hwdev) + return -EINVAL; + + if (new_mtu < HINIC_MIN_MTU_SIZE) { + nic_err(nic_hwdev->dev_hdl, + "Invalid mtu size, mtu size < 256bytes"); + return -EINVAL; + } + + if (new_mtu > HINIC_MAX_JUMBO_FRAME_SIZE) { + nic_err(nic_hwdev->dev_hdl, "Invalid mtu size, mtu size > 9600bytes"); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &mtu_info.func_id); + if (err) + return err; + + mtu_info.mtu = new_mtu; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU, + &mtu_info, sizeof(mtu_info), + &mtu_info, &out_size); + if (err || !out_size || mtu_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n", + err, mtu_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_hiovs_set_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct cmd_cpath_vlan cpath_vlan_info = {0}; + u16 out_size = sizeof(cpath_vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + cpath_vlan_info.pf_id = pf_id; + cpath_vlan_info.vlan_id = vlan_id; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_SET_CPATH_VLAN, + &cpath_vlan_info, sizeof(cpath_vlan_info), + &cpath_vlan_info, &out_size, 0); + + if (err || !out_size || cpath_vlan_info.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set cpath vlan, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, cpath_vlan_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_hiovs_del_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct cmd_cpath_vlan cpath_vlan_info = {0}; + u16 out_size = sizeof(cpath_vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + cpath_vlan_info.pf_id = pf_id; + cpath_vlan_info.vlan_id = vlan_id; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_DEL_CPATH_VLAN, + &cpath_vlan_info, sizeof(cpath_vlan_info), + &cpath_vlan_info, &out_size, 0); + + if (err || !out_size || cpath_vlan_info.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to delte cpath vlan, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, cpath_vlan_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_enable_netq(void *hwdev, u8 en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_netq_cfg_msg netq_cfg = {0}; + u16 out_size = sizeof(netq_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &netq_cfg.func_id); + if (err) + return err; + + netq_cfg.netq_en = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_NETQ, + &netq_cfg, sizeof(netq_cfg), + &netq_cfg, &out_size); + if (netq_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + nic_warn(nic_hwdev->dev_hdl, "Not support enable netq\n"); + } else if (err || !out_size || netq_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to enable netq, err: %d, status: 0x%x, out size: 0x%x\n", + err, netq_cfg.status, out_size); + } + + return err; +} + +int hinic_add_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rq_filter_msg filter_msg = {0}; + u16 out_size = sizeof(filter_msg); + int err; + + if (!hwdev || !filter_info) + return -EINVAL; + + switch (filter_info->filter_type) { + case HINIC_RQ_FILTER_TYPE_MAC_ONLY: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + break; + case HINIC_RQ_FILTER_TYPE_VXLAN: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + memcpy(filter_msg.vxlan.inner_mac, + filter_info->vxlan.inner_mac, ETH_ALEN); + filter_msg.vxlan.vni = filter_info->vxlan.vni; + break; + default: + nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n", + filter_info->filter_type); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &filter_msg.func_id); + if (err) + return err; + + filter_msg.filter_type = filter_info->filter_type; + filter_msg.qid = filter_info->qid; + filter_msg.qflag = filter_info->qflag; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_RQ_FILTER, + &filter_msg, sizeof(filter_msg), + &filter_msg, &out_size); + if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + nic_warn(nic_hwdev->dev_hdl, "Not support add rxq filter\n"); + } else if (err || !out_size || filter_msg.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to add RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n", + err, filter_msg.status, out_size); + return -EINVAL; + } + + return err; +} + +int hinic_del_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rq_filter_msg filter_msg = {0}; + u16 out_size = sizeof(filter_msg); + int err; + + if (!hwdev || !filter_info) + return -EINVAL; + + switch (filter_info->filter_type) { + case HINIC_RQ_FILTER_TYPE_MAC_ONLY: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + break; + case HINIC_RQ_FILTER_TYPE_VXLAN: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + memcpy(filter_msg.vxlan.inner_mac, + filter_info->vxlan.inner_mac, ETH_ALEN); + filter_msg.vxlan.vni = filter_info->vxlan.vni; + break; + default: + nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n", + filter_info->filter_type); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &filter_msg.func_id); + if (err) + return err; + + filter_msg.filter_type = filter_info->filter_type; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_RQ_FILTER, + &filter_msg, sizeof(filter_msg), + &filter_msg, &out_size); + if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + nic_warn(nic_hwdev->dev_hdl, "Not support del rxq filter\n"); + } else if (err || !out_size || filter_msg.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to delte RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n", + err, filter_msg.status, out_size); + return -EINVAL; + } + + return err; +} + +int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_config vlan_info = {0}; + u16 out_size = sizeof(vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_VLAN, + &vlan_info, sizeof(vlan_info), + &vlan_info, &out_size); + if (err || !out_size || vlan_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to add vlan, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_config vlan_info = {0}; + u16 out_size = sizeof(vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_VLAN, + &vlan_info, sizeof(vlan_info), + &vlan_info, &out_size); + if (err || !out_size || vlan_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to delte vlan, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_filter vlan_filter = {0}; + u16 out_size = sizeof(vlan_filter); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &vlan_filter.func_id); + if (err) + return err; + vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER, + &vlan_filter, sizeof(vlan_filter), + &vlan_filter, &out_size); + if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if ((err == HINIC_MBOX_VF_CMD_ERROR) && + HINIC_IS_VF(nic_hwdev)) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if (err || !out_size || vlan_filter.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_filter.status, out_size); + err = -EINVAL; + } + + return err; +} + +int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_info port_msg = {0}; + u16 out_size = sizeof(port_msg); + int err; + + if (!hwdev || !port_info) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &port_msg.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO, + &port_msg, sizeof(port_msg), + &port_msg, &out_size); + if (err || !out_size || port_msg.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_msg.status, out_size); + return -EINVAL; + } + + port_info->autoneg_cap = port_msg.autoneg_cap; + port_info->autoneg_state = port_msg.autoneg_state; + port_info->duplex = port_msg.duplex; + port_info->port_type = port_msg.port_type; + port_info->speed = port_msg.speed; + + return 0; +} +EXPORT_SYMBOL(hinic_get_port_info); + +int hinic_set_autoneg(void *hwdev, bool enable) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_autoneg_cmd autoneg = {0}; + u16 out_size = sizeof(autoneg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &autoneg.func_id); + if (err) + return err; + + autoneg.enable = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_AUTONEG, + &autoneg, sizeof(autoneg), + &autoneg, &out_size); + if (err || !out_size || autoneg.status) { + nic_err(dev->dev_hdl, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "enable" : "disable", err, autoneg.status, + out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_force_port_relink(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + int err; + + /* Force port link down and link up */ + err = hinic_set_port_link_status(hwdev, false); + if (err) { + nic_err(dev->dev_hdl, "Failed to set port link down\n"); + return -EFAULT; + } + + err = hinic_set_port_link_status(hwdev, true); + if (err) { + nic_err(dev->dev_hdl, "Failed to set port link up\n"); + return -EFAULT; + } + + return 0; +} + +int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported, + enum hinic_link_mode *advertised) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_link_mode_cmd link_mode = {0}; + u16 out_size = sizeof(link_mode); + int err; + + if (!hwdev || !supported || !advertised) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &link_mode.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE, + &link_mode, sizeof(link_mode), + &link_mode, &out_size); + if (err || !out_size || link_mode.status) { + nic_err(dev->dev_hdl, + "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, link_mode.status, out_size); + return -EINVAL; + } + + *supported = link_mode.supported; + *advertised = link_mode.advertised; + + return 0; +} + +int hinic_set_port_link_status(void *hwdev, bool enable) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_link_status link_status = {0}; + u16 out_size = sizeof(link_status); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &link_status.func_id); + if (err) + return err; + + link_status.enable = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_LINK_STATUS, + &link_status, sizeof(link_status), + &link_status, &out_size); + if (err || !out_size || link_status.status) { + nic_err(dev->dev_hdl, "Failed to %s port link status, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "Enable" : "Disable", err, link_status.status, + out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_speed(void *hwdev, enum nic_speed_level speed) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_speed_cmd speed_info = {0}; + u16 out_size = sizeof(speed_info); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &speed_info.func_id); + if (err) + return err; + + speed_info.speed = speed; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SPEED, + &speed_info, sizeof(speed_info), + &speed_info, &out_size); + if (err || !out_size || speed_info.status) { + nic_err(dev->dev_hdl, + "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n", + err, speed_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_speed(void *hwdev, enum nic_speed_level *speed) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_speed_cmd speed_info = {0}; + u16 out_size = sizeof(speed_info); + int err; + + if (!hwdev || !speed) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &speed_info.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_SPEED, + &speed_info, sizeof(speed_info), + &speed_info, &out_size); + if (err || !out_size || speed_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get speed, err: %d, status: 0x%x, out size: 0x%x\n", + err, speed_info.status, out_size); + return -EINVAL; + } + + *speed = speed_info.speed; + + return 0; +} +EXPORT_SYMBOL(hinic_get_speed); + +int hinic_get_link_state(void *hwdev, u8 *link_state) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_get_link get_link = {0}; + u16 out_size = sizeof(get_link); + int err; + + if (!hwdev || !link_state) + return -EINVAL; + + if (FUNC_FORCE_LINK_UP(hwdev)) { + *link_state = 1; + return 0; + } + + err = hinic_global_func_id_get(hwdev, &get_link.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, + &get_link, sizeof(get_link), + &get_link, &out_size); + if (err || !out_size || get_link.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", + err, get_link.status, out_size); + return -EINVAL; + } + + *link_state = get_link.link_status; + + return 0; +} + +static int hinic_set_hw_pause_info(void *hwdev, + struct nic_pause_config nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_pause_config pause_info = {0}; + u16 out_size = sizeof(pause_info); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &pause_info.func_id); + if (err) + return err; + + pause_info.auto_neg = nic_pause.auto_neg; + pause_info.rx_pause = nic_pause.rx_pause; + pause_info.tx_pause = nic_pause.tx_pause; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n", + err, pause_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_cfg *nic_cfg; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = &nic_hwdev->nic_io->nic_cfg; + + down(&nic_cfg->cfg_lock); + + err = hinic_set_hw_pause_info(hwdev, nic_pause); + if (err) { + up(&nic_cfg->cfg_lock); + return err; + } + + nic_cfg->pfc_en = 0; + nic_cfg->pfc_bitmap = 0; + nic_cfg->pause_set = true; + nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg; + nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause; + nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause; + + up(&nic_cfg->cfg_lock); + + return 0; +} + +int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_pause_config pause_info = {0}; + u16 out_size = sizeof(pause_info); + int err; + + if (!hwdev || !nic_pause) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &pause_info.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n", + err, pause_info.status, out_size); + return -EINVAL; + } + + nic_pause->auto_neg = pause_info.auto_neg; + nic_pause->rx_pause = pause_info.rx_pause; + nic_pause->tx_pause = pause_info.tx_pause; + + return 0; +} + +int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_cfg *nic_cfg = &nic_hwdev->nic_io->nic_cfg; + int err = 0; + + err = hinic_get_hw_pause_info(hwdev, nic_pause); + if (err) + return err; + + if (nic_cfg->pause_set || !nic_pause->auto_neg) { + nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause; + nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause; + } + + return 0; +} + +int hinic_set_rx_mode(void *hwdev, u32 enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rx_mode_config rx_mode_cfg = {0}; + u16 out_size = sizeof(rx_mode_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rx_mode_cfg.func_id); + if (err) + return err; + + rx_mode_cfg.rx_mode = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE, + &rx_mode_cfg, sizeof(rx_mode_cfg), + &rx_mode_cfg, &out_size); + if (err || !out_size || rx_mode_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_mode_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +/* offload feature */ +int hinic_set_rx_vlan_offload(void *hwdev, u8 en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_offload vlan_cfg = {0}; + u16 out_size = sizeof(vlan_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &vlan_cfg.func_id); + if (err) + return err; + + vlan_cfg.vlan_rx_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, + &vlan_cfg, sizeof(vlan_cfg), + &vlan_cfg, &out_size); + if (err || !out_size || vlan_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_rx_csum_offload(void *hwdev, u32 en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_checksum_offload rx_csum_cfg = {0}; + u16 out_size = sizeof(rx_csum_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rx_csum_cfg.func_id); + if (err) + return err; + + rx_csum_cfg.rx_csum_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM, + &rx_csum_cfg, sizeof(rx_csum_cfg), + &rx_csum_cfg, &out_size); + if (err || !out_size || rx_csum_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_csum_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_tx_tso(void *hwdev, u8 tso_en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_tso_config tso_cfg = {0}; + u16 out_size = sizeof(tso_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &tso_cfg.func_id); + if (err) + return err; + + tso_cfg.tso_en = tso_en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_TSO, + &tso_cfg, sizeof(tso_cfg), + &tso_cfg, &out_size); + if (err || !out_size || tso_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set tso, err: %d, status: 0x%x, out size: 0x%x\n", + err, tso_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num) +{ + struct hinic_hwdev *nic_hwdev = hwdev; + u8 ipv4_en = 0, ipv6_en = 0; + int err; + + if (!hwdev) + return -EINVAL; + + ipv4_en = lro_en ? 1 : 0; + ipv6_en = lro_en ? 1 : 0; + + nic_info(nic_hwdev->dev_hdl, "Set LRO max wqe number to %u\n", wqe_num); + + err = hinic_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)wqe_num); + if (err) + return err; + + /* we don't set LRO timer for VF */ + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + nic_info(nic_hwdev->dev_hdl, "Set LRO timer to %u\n", lro_timer); + + return hinic_set_rx_lro_timer(hwdev, lro_timer); +} + +static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_lro_timer lro_timer = {0}; + u16 out_size = sizeof(lro_timer); + int err; + + if (!hwdev) + return -EINVAL; + + lro_timer.status = 0; + lro_timer.type = 0; + lro_timer.enable = 1; + lro_timer.timer = timer_value; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER, + &lro_timer, sizeof(lro_timer), + &lro_timer, &out_size); + if (lro_timer.status == 0xFF) { + /* For this case, we think status (0xFF) is OK */ + lro_timer.status = 0; + nic_err(nic_hwdev->dev_hdl, "Set lro timer not supported by the current FW version, it will be 1ms default\n"); + } + + if (err || !out_size || lro_timer.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_timer.status, out_size); + + return -EINVAL; + } + + return 0; +} + +int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_lro_config lro_cfg = {0}; + u16 out_size = sizeof(lro_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &lro_cfg.func_id); + if (err) + return err; + + lro_cfg.lro_ipv4_en = ipv4_en; + lro_cfg.lro_ipv6_en = ipv6_en; + lro_cfg.lro_max_wqe_num = max_wqe_num; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO, + &lro_cfg, sizeof(lro_cfg), + &lro_cfg, &out_size); + if (err || !out_size || lro_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +static int hinic_dcb_set_hw_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_pfc pfc = {0}; + u16 out_size = sizeof(pfc); + int err; + + err = hinic_global_func_id_get(hwdev, &pfc.func_id); + if (err) + return err; + + pfc.pfc_bitmap = pfc_bitmap; + pfc.pfc_en = pfc_en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PFC, + &pfc, sizeof(pfc), &pfc, &out_size); + if (err || pfc.status || !out_size) { + nic_err(dev->dev_hdl, "Failed to set pfc, err: %d, status: 0x%x, out size: 0x%x\n", + err, pfc.status, out_size); + return -EINVAL; + } + + return 0; +} + +/* dcbtool */ +int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg; + int err; + + down(&nic_cfg->cfg_lock); + + err = hinic_dcb_set_hw_pfc(hwdev, pfc_en, pfc_bitmap); + if (err) { + up(&nic_cfg->cfg_lock); + return err; + } + + nic_cfg->pfc_en = pfc_en; + nic_cfg->pfc_bitmap = pfc_bitmap; + + /* pause settings is opposite from pfc */ + nic_cfg->nic_pause.rx_pause = pfc_en ? 0 : 1; + nic_cfg->nic_pause.tx_pause = pfc_en ? 0 : 1; + + up(&nic_cfg->cfg_lock); + + return 0; +} + +int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap) +{ + return 0; +} + +int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw, + u8 *prio) +{ + struct hinic_up_ets_cfg ets = {0}; + u16 out_size = sizeof(ets); + u16 up_bw_t = 0; + u8 pg_bw_t = 0; + int i, err; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + up_bw_t += *(up_bw + i); + pg_bw_t += *(pg_bw + i); + + if (*(up_tc + i) > HINIC_DCB_TC_MAX) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Invalid up %d mapping tc: %d\n", + i, *(up_tc + i)); + return -EINVAL; + } + } + + if (pg_bw_t != 100 || (up_bw_t % 100) != 0) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Invalid pg_bw: %d or up_bw: %d\n", pg_bw_t, up_bw_t); + return -EINVAL; + } + + ets.port_id = 0; /* reserved */ + memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX); + memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX); + memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX); + memcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX); + memcpy(ets.prio, prio, HINIC_DCB_UP_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS, + &ets, sizeof(ets), &ets, &out_size); + if (err || ets.status || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\n", + err, ets.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw, + u8 *prio) +{ + return 0; +} + +int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up) +{ + struct hinic_cos_up_map map = {0}; + u16 out_size = sizeof(map); + int err; + + if (!hwdev || !cos_up) + return -EINVAL; + + map.port_id = hinic_physical_port_id(hwdev); + map.cos_valid_mask = cos_valid_bitmap; + memcpy(map.map, cos_up, sizeof(map.map)); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_COS_UP_MAP, + &map, sizeof(map), &map, &out_size); + if (err || map.status || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set cos2up map, err: %d, status: 0x%x, out size: 0x%x\n", + err, map.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map) +{ + struct hinic_hwdev *dev; + struct hinic_nic_io *nic_io; + struct hinic_set_rq_iq_mapping rq_iq_mapping = {0}; + u16 out_size = sizeof(rq_iq_mapping); + int err; + + if (!hwdev || !map || num_rqs > HINIC_MAX_NUM_RQ) + return -EINVAL; + + dev = hwdev; + nic_io = dev->nic_io; + + hinic_qps_num_set(dev, nic_io->num_qps); + + err = hinic_global_func_id_get(hwdev, &rq_iq_mapping.func_id); + if (err) + return err; + + rq_iq_mapping.num_rqs = num_rqs; + rq_iq_mapping.rq_depth = (u16)ilog2(nic_io->rq_depth); + + memcpy(rq_iq_mapping.map, map, num_rqs); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP, + &rq_iq_mapping, sizeof(rq_iq_mapping), + &rq_iq_mapping, &out_size); + if (err || !out_size || rq_iq_mapping.status) { + nic_err(dev->dev_hdl, "Failed to set rq cos mapping, err: %d, status: 0x%x, out size: 0x%x\n", + err, rq_iq_mapping.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_dcb_set_rq_iq_mapping); + +int hinic_set_pfc_threshold(void *hwdev, u16 op_type, u16 threshold) +{ + struct hinic_pfc_thd pfc_thd = {0}; + u16 out_size = sizeof(pfc_thd); + int err; + + if (op_type == HINIC_PFC_SET_FUNC_THD) + pfc_thd.func_thd = threshold; + else if (op_type == HINIC_PFC_SET_GLB_THD) + pfc_thd.glb_thd = threshold; + else + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &pfc_thd.func_id); + if (err) + return err; + + pfc_thd.op_type = op_type; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PFC_THD, + &pfc_thd, sizeof(pfc_thd), + &pfc_thd, &out_size); + if (err || !out_size || pfc_thd.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set pfc threshold, err: %d, status: 0x%x, out size: 0x%x\n", + err, pfc_thd.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_set_bp_thd(void *hwdev, u16 threshold) +{ + int err; + + err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_GLB_THD, threshold); + if (err) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set global threshold\n"); + return -EFAULT; + } + + err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_FUNC_THD, threshold); + if (err) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set function threshold\n"); + return -EFAULT; + } + + return 0; +} + +int hinic_disable_fw_bp(void *hwdev) +{ + int err; + + err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_FUNC_THD, 0); + if (err) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to disable ucode backpressure\n"); + return -EFAULT; + } + + return 0; +} + +int hinic_set_iq_enable(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_cmd_enable_iq *iq_info; + struct hinic_cmd_buf *cmd_buf; + int err; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(dev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + iq_info = cmd_buf->buf; + cmd_buf->size = sizeof(*iq_info); + + iq_info->force_en = 0; + iq_info->rq_depth = (u8)ilog2(dev->nic_io->rq_depth); + iq_info->num_rq = (u8)dev->nic_io->max_qps; + /* num_qps will not lager than 64 */ + iq_info->glb_rq_id = dev->nic_io->global_qpn + q_id; + iq_info->q_id = q_id; + iq_info->lower_thd = lower_thd; + iq_info->prod_idx = prod_idx; + hinic_cpu_to_be32(iq_info, sizeof(*iq_info)); + + err = hinic_cmdq_async(hwdev, HINIC_ACK_TYPE_CMDQ, HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_IQ_ENABLE, cmd_buf); + if (err) { + hinic_free_cmd_buf(hwdev, cmd_buf); + nic_err(dev->dev_hdl, "Failed to set iq enable, err:%d\n", err); + return -EFAULT; + } + + return 0; +} + +int hinic_set_iq_enable_mgmt(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_cmd_enable_iq_mgmt iq_info = {0}; + int err; + + iq_info.force_en = 0; + + iq_info.rq_depth = (u8)ilog2(dev->nic_io->rq_depth); + iq_info.num_rq = (u8)dev->nic_io->max_qps; + /* num_qps will not lager than 64 */ + iq_info.glb_rq_id = dev->nic_io->global_qpn + q_id; + iq_info.q_id = q_id; + iq_info.lower_thd = lower_thd; + iq_info.prod_idx = prod_idx; + + err = l2nic_msg_to_mgmt_async(hwdev, HINIC_PORT_CMD_SET_IQ_ENABLE, + &iq_info, sizeof(iq_info)); + if (err || iq_info.status) { + nic_err(dev->dev_hdl, "Failed to set iq enable for rq:%d, err: %d, status: 0x%x\n", + q_id, err, iq_info.status); + return -EFAULT; + } + + return 0; +} + +/* nictool */ +int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period) +{ + return 0; +} + +int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *cfg) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_lro_config lro_cfg = {0}; + u16 out_size = sizeof(lro_cfg); + int err; + + if (!hwdev || !cfg) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &lro_cfg.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LRO, + &lro_cfg, sizeof(lro_cfg), + &lro_cfg, &out_size); + if (err || !out_size || lro_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_cfg.status, out_size); + return -EINVAL; + } + + cfg->func_id = lro_cfg.func_id; + cfg->lro_ipv4_en = lro_cfg.lro_ipv4_en; + cfg->lro_ipv6_en = lro_cfg.lro_ipv6_en; + cfg->lro_max_wqe_num = lro_cfg.lro_max_wqe_num; + return 0; +} + +int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size) +{ + return 0; +} + +int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size) +{ + return 0; +} + +int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable) +{ + struct hinic_port_loopback lb = {0}; + u16 out_size = sizeof(lb); + int err; + + lb.mode = mode; + lb.en = enable; + + if ((mode < LOOP_MODE_MIN) || (mode > LOOP_MODE_MAX)) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Invalid loopback mode %d to set\n", mode); + return -EINVAL; + } + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE, + &lb, sizeof(lb), &lb, &out_size); + if (err || !out_size || lb.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n", + mode, enable, err, lb.status, out_size); + return -EINVAL; + } + + nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Set loopback mode %d en %d succeed\n", mode, enable); + + return 0; +} + +int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable) +{ + struct hinic_port_loopback lb = {0}; + u16 out_size = sizeof(lb); + int err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LOOPBACK_MODE, + &lb, sizeof(lb), &lb, &out_size); + if (err || !out_size || lb.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get loopback mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, lb.status, out_size); + return -EINVAL; + } + + *mode = lb.mode; + *enable = lb.en; + return 0; +} + +int hinic_set_loopback_mode(void *hwdev, bool enable) +{ + return hinic_set_loopback_mode_ex(hwdev, HINIC_INTERNAL_LP_MODE, + enable); +} + +int hinic_get_port_enable_state(void *hwdev, bool *enable) +{ + return 0; +} + +int hinic_get_vport_enable_state(void *hwdev, bool *enable) +{ + return 0; +} + +int hinic_set_lli_state(void *hwdev, u8 lli_state) +{ + return 0; +} + +int hinic_set_vport_enable(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vport_state en_state = {0}; + u16 out_size = sizeof(en_state); + int err; + u32 timeout; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &en_state.func_id); + if (err) + return err; + + en_state.state = enable ? 1 : 0; + + if (HINIC_IS_VF(nic_hwdev)) + timeout = SET_VPORT_MBOX_TIMEOUT; + else + timeout = SET_VPORT_MGMT_TIMEOUT; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VPORT_ENABLE, + &en_state, sizeof(en_state), &en_state, + &out_size, timeout); + + if (err || !out_size || en_state.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x\n", + err, en_state.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_vport_enable); + +#define NIC_PORT_DISABLE 0x0 +#define NIC_PORT_ENABLE 0x3 +int hinic_set_port_enable(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_state en_state = {0}; + u16 out_size = sizeof(en_state); + int err; + + if (!hwdev) + return -EINVAL; + + if (HINIC_IS_VF(nic_hwdev)) + return 0; + + err = hinic_global_func_id_get(hwdev, &en_state.func_id); + if (err) + return err; + + en_state.version = HINIC_CMD_VER_FUNC_ID; + en_state.state = enable ? NIC_PORT_ENABLE : NIC_PORT_DISABLE; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE, + &en_state, sizeof(en_state), &en_state, + &out_size); + if (err || !out_size || en_state.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n", + err, en_state.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_port_enable); + +/* rss */ +int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct nic_rss_context_tbl *ctx_tbl; + struct hinic_cmd_buf *cmd_buf; + u32 ctx = 0; + u64 out_param; + int err; + + if (!hwdev) + return -EINVAL; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctx |= HINIC_RSS_TYPE_SET(1, VALID) | + HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); + + cmd_buf->size = sizeof(struct nic_rss_context_tbl); + + ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf; + ctx_tbl->group_index = cpu_to_be32(tmpl_idx); + ctx_tbl->offset = 0; + ctx_tbl->size = sizeof(u32); + ctx_tbl->size = cpu_to_be32(ctx_tbl->size); + ctx_tbl->rsvd = 0; + ctx_tbl->ctx = cpu_to_be32(ctx); + + /* cfg the rss context table by command queue */ + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + cmd_buf, &out_param, 0); + + hinic_free_cmd_buf(hwdev, cmd_buf); + + if (err || out_param != 0) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss context table, err: %d\n", + err); + return -EFAULT; + } + + return 0; +} + +int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type *rss_type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_context_table ctx_tbl = {0}; + u16 out_size = sizeof(ctx_tbl); + int err; + + if (!hwdev || !rss_type) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &ctx_tbl.func_id); + if (err) + return err; + + ctx_tbl.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL, + &ctx_tbl, sizeof(ctx_tbl), + &ctx_tbl, &out_size); + if (err || !out_size || ctx_tbl.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", + err, ctx_tbl.status, out_size); + return -EINVAL; + } + + rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); + rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); + rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, + TCP_IPV6_EXT); + rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); + + return 0; +} + +int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_key temp_key = {0}; + u16 out_size = sizeof(temp_key); + int err; + + if (!hwdev || !temp) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &temp_key.func_id); + if (err) + return err; + + temp_key.template_id = (u8)tmpl_idx; + memcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, + &temp_key, sizeof(temp_key), + &temp_key, &out_size); + if (err || !out_size || temp_key.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n", + err, temp_key.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_key temp_key = {0}; + u16 out_size = sizeof(temp_key); + int err; + + if (!hwdev || !temp) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &temp_key.func_id); + if (err) + return err; + + temp_key.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, + &temp_key, sizeof(temp_key), + &temp_key, &out_size); + if (err || !out_size || temp_key.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get hash key, err: %d, status: 0x%x, out size: 0x%x\n", + err, temp_key.status, out_size); + return -EINVAL; + } + + memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE); + + return 0; +} + +int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_engine_type hash_type = {0}; + u16 out_size = sizeof(hash_type); + int err; + + if (!hwdev || !type) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &hash_type.func_id); + if (err) + return err; + + hash_type.template_id = tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, + &hash_type, sizeof(hash_type), + &hash_type, &out_size); + if (err || !out_size || hash_type.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n", + err, hash_type.status, out_size); + return -EINVAL; + } + + *type = hash_type.hash_engine; + return 0; +} + +int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_engine_type hash_type = {0}; + u16 out_size = sizeof(hash_type); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &hash_type.func_id); + if (err) + return err; + + hash_type.hash_engine = type; + hash_type.template_id = tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, + &hash_type, sizeof(hash_type), + &hash_type, &out_size); + if (err || !out_size || hash_type.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n", + err, hash_type.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct nic_rss_indirect_tbl *indir_tbl; + struct hinic_cmd_buf *cmd_buf; + u32 i; + u32 *temp; + u32 indir_size; + u64 out_param; + int err; + + if (!hwdev || !indir_table) + return -EINVAL; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + + indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; + indir_tbl->group_index = cpu_to_be32(tmpl_idx); + + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { + indir_tbl->entry[i] = (u8)(*(indir_table + i)); + + if (0x3 == (i & 0x3)) { + temp = (u32 *)&indir_tbl->entry[i - 3]; + *temp = cpu_to_be32(*temp); + } + } + + /* cfg the rss indirect table by command queue */ + indir_size = HINIC_RSS_INDIR_SIZE / 2; + indir_tbl->offset = 0; + indir_tbl->size = cpu_to_be32(indir_size); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n"); + err = -EFAULT; + goto free_buf; + } + + indir_tbl->offset = cpu_to_be32(indir_size); + indir_tbl->size = cpu_to_be32(indir_size); + memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n"); + err = -EFAULT; + } + +free_buf: + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_indir_table rss_cfg = {0}; + u16 out_size = sizeof(rss_cfg); + int err = 0, i; + + err = hinic_global_func_id_get(hwdev, &rss_cfg.func_id); + if (err) + return err; + + rss_cfg.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, + &rss_cfg, sizeof(rss_cfg), &rss_cfg, + &out_size); + if (err || !out_size || rss_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n", + err, rss_cfg.status, out_size); + return -EINVAL; + } + + hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE); + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) + indir_table[i] = rss_cfg.indir[i]; + + return 0; +} + +int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_config rss_cfg = {0}; + u16 out_size = sizeof(rss_cfg); + int err; + + /* micro code required: number of TC should be power of 2 */ + if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rss_cfg.func_id); + if (err) + return err; + + rss_cfg.rss_en = rss_en; + rss_cfg.template_id = tmpl_idx; + rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0; + + memcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG, + &rss_cfg, sizeof(rss_cfg), + &rss_cfg, &out_size); + if (err || !out_size || rss_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", + err, rss_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats) +{ + struct hinic_port_stats_info stats_info = {0}; + struct hinic_cmd_vport_stats vport_stats = {0}; + u16 out_size = sizeof(vport_stats); + int err; + + err = hinic_global_func_id_get(hwdev, &stats_info.func_id); + if (err) + return err; + + stats_info.stats_version = HINIC_PORT_STATS_VERSION; + stats_info.stats_size = sizeof(vport_stats); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT, + &stats_info, sizeof(stats_info), + &vport_stats, &out_size); + if (err || !out_size || vport_stats.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, vport_stats.status, out_size); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return 0; +} + +int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats) +{ + struct hinic_port_stats *port_stats; + struct hinic_port_stats_info stats_info = {0}; + u16 out_size = sizeof(*port_stats); + int err; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + stats_info.stats_version = HINIC_PORT_STATS_VERSION; + stats_info.stats_size = sizeof(*port_stats); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS, + &stats_info, sizeof(stats_info), + port_stats, &out_size); + if (err || !out_size || port_stats->status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_stats->status, out_size); + err = -EINVAL; + goto out; + } + + memcpy(stats, &port_stats->stats, sizeof(*stats)); + +out: + kfree(port_stats); + + return err; +} + + +int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_version_info up_ver = {0}; + u16 out_size; + int err; + + out_size = sizeof(up_ver); + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, + &up_ver, sizeof(up_ver), &up_ver, + &out_size); + if (err || !out_size || up_ver.status) { + nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", + err, up_ver.status, out_size); + return -EINVAL; + } + + err = snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver); + if (err <= 0 || err >= HINIC_MGMT_VERSION_MAX_LEN) { + nic_err(dev->dev_hdl, + "Failed snprintf fw version, function return(%d) and dest_len(%d)\n", + err, HINIC_MGMT_VERSION_MAX_LEN); + return -EINVAL; + } + + return 0; +} + +int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_version_info ver_info = {0}; + u16 out_size = sizeof(ver_info); + int err; + + if (!hwdev || !fw_ver) + return -EINVAL; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, + &ver_info, sizeof(ver_info), &ver_info, + &out_size); + if (err || !out_size || ver_info.status) { + nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", + err, ver_info.status, out_size); + return -EINVAL; + } + + memcpy(fw_ver->mgmt_ver, ver_info.ver, HINIC_FW_VERSION_NAME); + + out_size = sizeof(ver_info); + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_BOOT_VERSION, + &ver_info, sizeof(ver_info), &ver_info, + &out_size); + if (err || !out_size || ver_info.status) { + nic_err(dev->dev_hdl, "Failed to get boot versionerr: %d, status: 0x%x, out size: 0x%x\n", + err, ver_info.status, out_size); + return -EINVAL; + } + + memcpy(fw_ver->boot_ver, ver_info.ver, HINIC_FW_VERSION_NAME); + + out_size = sizeof(ver_info); + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_GET_MICROCODE_VERSION, + &ver_info, sizeof(ver_info), &ver_info, + &out_size); + if (err || !out_size || ver_info.status) { + nic_err(dev->dev_hdl, "Failed to get microcode version, err: %d, status: 0x%x, out size: 0x%x\n", + err, ver_info.status, out_size); + return -EINVAL; + } + + memcpy(fw_ver->microcode_ver, ver_info.ver, HINIC_FW_VERSION_NAME); + + return 0; +} +EXPORT_SYMBOL(hinic_get_fw_version); + +int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_mgmt template_mgmt = {0}; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev || !tmpl_idx) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &template_mgmt.func_id); + if (err) + return err; + + template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.status) { + if (template_mgmt.status == HINIC_MGMT_STATUS_ERR_FULL) { + nic_warn(nic_hwdev->dev_hdl, "Failed to alloc rss template, table is full\n"); + return -ENOSPC; + } + nic_err(nic_hwdev->dev_hdl, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n", + err, template_mgmt.status, out_size); + return -EINVAL; + } + + *tmpl_idx = template_mgmt.template_id; + + return 0; +} + +int hinic_rss_template_free(void *hwdev, u8 tmpl_idx) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_mgmt template_mgmt = {0}; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &template_mgmt.func_id); + if (err) + return err; + + template_mgmt.template_id = tmpl_idx; + template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n", + err, template_mgmt.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_port_funcs_state(void *hwdev, bool enable) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_port_funcs_state state = {0}; + u16 out_size = sizeof(state); + int err = 0; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &state.func_id); + if (err) + return err; + + state.drop_en = enable ? 0 : 1; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_FUNCS_STATE, + &state, sizeof(state), &state, &out_size); + if (err || !out_size || state.status) { + nic_err(dev->dev_hdl, "Failed to %s all functions in port, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "enable" : "disable", err, state.status, + out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_reset_port_link_cfg(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_reset_link_cfg reset_cfg = {0}; + u16 out_size = sizeof(reset_cfg); + int err; + + err = hinic_global_func_id_get(hwdev, &reset_cfg.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG, + &reset_cfg, sizeof(reset_cfg), + &reset_cfg, &out_size); + if (err || !out_size || reset_cfg.status) { + nic_err(dev->dev_hdl, "Failed to reset port link configure, err: %d, status: 0x%x, out size: 0x%x\n", + err, reset_cfg.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev || !mac) + return -EINVAL; + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + memcpy(nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].vf_mac_addr, mac, + ETH_ALEN); + + return 0; +} + +static int hinic_change_vf_mtu_msg_handler(struct hinic_hwdev *hwdev, u16 vf_id, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + int err; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_CHANGE_MTU, buf_in, in_size, + buf_out, out_size, 0); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set VF %u mtu\n", vf_id); + return err; + } + + return 0; +} + +static bool is_ether_addr_zero(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} + +static int hinic_get_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_set *mac_info = buf_out; + int err; + + if ((nic_io->hwdev->func_mode == FUNC_MOD_MULTI_BM_SLAVE) || + (nic_io->hwdev->func_mode == FUNC_MOD_MULTI_VM_SLAVE) || + (hinic_support_ovs(nic_io->hwdev, NULL))) { + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_MAC, buf_in, + in_size, buf_out, out_size, 0); + + if (!err) { + if (is_ether_addr_zero(&mac_info->mac[0])) + memcpy(mac_info->mac, + vf_info->vf_mac_addr, ETH_ALEN); + } + return err; + } + + memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN); + mac_info->status = 0; + *out_size = sizeof(*mac_info); + + return 0; +} + +static int hinic_set_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_set *mac_in = buf_in; + struct hinic_port_mac_set *mac_out = buf_out; + int err; + + if (vf_info->pf_set_mac && !(vf_info->trust) && + is_valid_ether_addr(mac_in->mac)) { + nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF %d MAC address\n", + HW_VF_ID_TO_OS(vf)); + mac_out->status = HINIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_MAC, buf_in, in_size, + buf_out, out_size, 0); + if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size); + return -EFAULT; + } + + return err; +} + +static int hinic_del_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_set *mac_in = buf_in; + struct hinic_port_mac_set *mac_out = buf_out; + int err; + + if (vf_info->pf_set_mac && !(vf_info->trust) && + is_valid_ether_addr(mac_in->mac) && + !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) { + nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac.\n"); + mac_out->status = HINIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_DEL_MAC, buf_in, in_size, + buf_out, out_size, 0); + if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size); + return -EFAULT; + } + + return err; +} + +static int hinic_update_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_update *mac_in = buf_in; + struct hinic_port_mac_update *mac_out = buf_out; + int err; + + if (!is_valid_ether_addr(mac_in->new_mac)) { + nic_err(nic_io->hwdev->dev_hdl, "Update VF MAC is invalid.\n"); + return -EINVAL; + } + + if (vf_info->pf_set_mac && !(vf_info->trust)) { + nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac.\n"); + mac_out->status = HINIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_UPDATE_MAC, buf_in, + in_size, buf_out, out_size, 0); + if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { + nic_warn(nic_io->hwdev->dev_hdl, "Failed to update VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size); + return -EFAULT; + } + + return err; +} + +static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid, + u8 qos, int vf_id) +{ + struct hinic_vf_vlan_config vf_vlan = {0}; + u8 cmd; + u16 out_size = sizeof(vf_vlan); + int err; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + vf_vlan.vlan_id = vid; + vf_vlan.qos = qos; + + if (add) + cmd = HINIC_PORT_CMD_SET_VF_VLAN; + else + cmd = HINIC_PORT_CMD_CLR_VF_VLAN; + + err = l2nic_msg_to_mgmt_sync(hwdev, cmd, &vf_vlan, sizeof(vf_vlan), + &vf_vlan, &out_size); + if (err || !out_size || vf_vlan.status) { + nic_err(hwdev->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id) +{ + struct vf_data_storage *vf_info; + u16 func_id, vlan_id; + int err = 0; + + vf_info = hwdev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (vf_info->pf_set_mac) { + func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + if (FW_SUPPORT_MAC_REUSE_FUNC(hwdev)) { + vlan_id = vf_info->pf_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + } else { + vlan_id = 0; + } + + err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id, + func_id); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set VF %d MAC\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } + if (hinic_vf_info_vlanprio(hwdev, vf_id)) { + err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan, + vf_info->pf_qos, vf_id); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to add VF %d VLAN_QOS\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } + + if (vf_info->max_rate) { + err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate, + vf_info->min_rate); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set VF %d max rate %d, min rate %d\n", + HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, + vf_info->min_rate); + return err; + } + } + + return 0; +} + +static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct hinic_register_vf *register_info = buf_out; + int err; + + if (vf_id > nic_io->max_vfs) { + nic_err(hw_dev->dev_hdl, "Register VF id %d exceed limit[0-%d]\n", + HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs)); + register_info->status = EFAULT; + return -EFAULT; + } + + *out_size = sizeof(*register_info); + err = hinic_init_vf_config(hw_dev, vf_id); + if (err) { + register_info->status = EFAULT; + return err; + } + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; + + return 0; +} + +void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + + if (vf_id > nic_io->max_vfs) + return; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false; +} + +static void hinic_get_vf_link_status_msg_handler(struct hinic_nic_io *nic_io, + u16 vf_id, void *buf_out, + u16 *out_size) +{ + struct vf_data_storage *vf_infos = nic_io->vf_infos; + struct hinic_get_link *get_link = buf_out; + bool link_forced, link_up; + + link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced; + link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up; + + if (link_forced) + get_link->link_status = link_up ? + HINIC_LINK_UP : HINIC_LINK_DOWN; + else + get_link->link_status = nic_io->link_status; + + get_link->status = 0; + *out_size = sizeof(*get_link); +} + +static void hinic_get_vf_cos_msg_handler(struct hinic_nic_io *nic_io, + u16 vf_id, void *buf_out, + u16 *out_size) +{ + struct hinic_vf_dcb_state *dcb_state = buf_out; + + memcpy(&dcb_state->state, &nic_io->dcb_state, + sizeof(nic_io->dcb_state)); + + dcb_state->status = 0; + *out_size = sizeof(*dcb_state); +} + +/* pf receive message from vf */ +int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + u8 size = sizeof(nic_cmd_support_vf) / sizeof(nic_cmd_support_vf[0]); + struct hinic_nic_io *nic_io; + int err = 0; + u32 timeout = 0; + + if (!hwdev) + return -EFAULT; + + if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd, + buf_in, in_size, size)) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "PF Receive VF nic cmd(0x%x), mbox len(0x%x) is invalid\n", + cmd, in_size); + err = HINIC_MBOX_VF_CMD_ERROR; + return err; + } + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + switch (cmd) { + case HINIC_PORT_CMD_VF_REGISTER: + err = hinic_register_vf_msg_handler(hwdev, vf_id, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_VF_UNREGISTER: + *out_size = 0; + hinic_unregister_vf_msg_handler(hwdev, vf_id); + break; + + case HINIC_PORT_CMD_CHANGE_MTU: + err = hinic_change_vf_mtu_msg_handler(hwdev, vf_id, buf_in, + in_size, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_GET_MAC: + hinic_get_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, out_size); + break; + + case HINIC_PORT_CMD_SET_MAC: + err = hinic_set_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, out_size); + break; + + case HINIC_PORT_CMD_DEL_MAC: + err = hinic_del_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, out_size); + break; + + case HINIC_PORT_CMD_UPDATE_MAC: + err = hinic_update_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_GET_LINK_STATE: + hinic_get_vf_link_status_msg_handler(nic_io, vf_id, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_GET_VF_COS: + hinic_get_vf_cos_msg_handler(nic_io, vf_id, buf_out, out_size); + break; + + default: + /* pass through */ + if (cmd == HINIC_PORT_CMD_SET_VPORT_ENABLE) + timeout = SET_VPORT_MGMT_TIMEOUT; + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + cmd, buf_in, in_size, + buf_out, out_size, timeout); + + break; + } + + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + nic_err(nic_io->hwdev->dev_hdl, "PF receive VF L2NIC cmd: %d process error, err:%d\n", + cmd, err); + return err; +} + +static int hinic_init_vf_infos(struct hinic_nic_io *nic_io, u16 vf_id) +{ + struct vf_data_storage *vf_infos = nic_io->vf_infos; + u8 vf_link_state; + + if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) { + nic_warn(nic_io->hwdev->dev_hdl, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n", + set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO); + set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO; + } + + vf_link_state = hinic_support_ovs(nic_io->hwdev, NULL) ? + HINIC_IFLA_VF_LINK_STATE_ENABLE : set_vf_link_state; + + if (FUNC_FORCE_LINK_UP(nic_io->hwdev)) + vf_link_state = HINIC_IFLA_VF_LINK_STATE_ENABLE; + + switch (vf_link_state) { + case HINIC_IFLA_VF_LINK_STATE_AUTO: + vf_infos[vf_id].link_forced = false; + break; + case HINIC_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = true; + break; + case HINIC_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = false; + break; + default: + nic_err(nic_io->hwdev->dev_hdl, "Input parameter set_vf_link_state error: %d\n", + vf_link_state); + return -EINVAL; + } + + return 0; +} + +int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 i, func_idx; + int err; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = start_vf_id; i <= end_vf_id; i++) { + func_idx = hinic_glb_pf_vf_offset(hwdev) + i; + err = hinic_set_wq_page_size(hwdev, func_idx, + HINIC_DEFAULT_WQ_PAGE_SIZE); + if (err) + return err; + } + + return 0; +} + +int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 func_idx, idx; + + for (idx = start_vf_id; idx <= end_vf_id; idx++) { + func_idx = hinic_glb_pf_vf_offset(hwdev) + idx; + hinic_set_wq_page_size(hwdev, func_idx, HINIC_HW_WQ_PAGE_SIZE); + + hinic_clear_vf_infos(hwdev, idx); + } + + return 0; +} + +int hinic_vf_func_init(struct hinic_hwdev *hwdev) +{ + struct hinic_nic_io *nic_io; + int err = 0; + struct hinic_register_vf register_info = {0}; + u32 size; + u16 i, out_size = sizeof(register_info); + + hwdev->nic_io = kzalloc(sizeof(*hwdev->nic_io), GFP_KERNEL); + if (!hwdev->nic_io) + return -ENOMEM; + + nic_io = hwdev->nic_io; + nic_io->hwdev = hwdev; + + sema_init(&nic_io->nic_cfg.cfg_lock, 1); + + if (hinic_func_type(hwdev) == TYPE_VF) { + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_REGISTER, + ®ister_info, sizeof(register_info), + ®ister_info, &out_size, 0); + if (err || register_info.status || !out_size) { + nic_err(hwdev->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", + err, register_info.status, out_size); + hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + err = -EIO; + goto out_free_nic_io; + } + } else { + nic_io->max_vfs = hinic_func_max_vf(hwdev); + size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs; + if (size != 0) { + nic_io->vf_infos = kzalloc(size, GFP_KERNEL); + if (!nic_io->vf_infos) { + err = -ENOMEM; + goto out_free_nic_io; + } + + for (i = 0; i < nic_io->max_vfs; i++) { + err = hinic_init_vf_infos(nic_io, i); + if (err) + goto init_vf_infos_err; + } + + err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC, + nic_pf_mbox_handler); + if (err) + goto register_pf_mbox_cb_err; + } + } + + return 0; + +register_pf_mbox_cb_err: +init_vf_infos_err: + kfree(nic_io->vf_infos); + +out_free_nic_io: + sema_deinit(&hwdev->nic_io->nic_cfg.cfg_lock); + kfree(hwdev->nic_io); + hwdev->nic_io = NULL; + + return err; +} + +void hinic_vf_func_free(struct hinic_hwdev *hwdev) +{ + struct hinic_register_vf unregister = {0}; + u16 out_size = sizeof(unregister); + int err; + + if (hinic_func_type(hwdev) == TYPE_VF) { + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_UNREGISTER, + &unregister, sizeof(unregister), + &unregister, &out_size, 0); + if (err || !out_size || unregister.status) + nic_err(hwdev->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", + err, unregister.status, out_size); + } else { + if (hwdev->nic_io->vf_infos) { + hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + kfree(hwdev->nic_io->vf_infos); + } + } + + sema_deinit(&hwdev->nic_io->nic_cfg.cfg_lock); + + kfree(hwdev->nic_io); + hwdev->nic_io = NULL; +} + +/* this function just be called by hinic_ndo_set_vf_mac, others are + * not permitted + */ +int hinic_set_vf_mac(void *hwdev, int vf, unsigned char *mac_addr) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + u16 func_id; + int err; + + /* duplicate request, so just return success */ + if (vf_info->pf_set_mac && + !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN)) + return 0; + + vf_info->pf_set_mac = true; + + func_id = hinic_glb_pf_vf_offset(hw_dev) + vf; + err = hinic_update_mac(hw_dev, vf_info->vf_mac_addr, + mac_addr, 0, func_id); + if (err) { + vf_info->pf_set_mac = false; + return err; + } + + memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN); + + return 0; +} + +int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + int err; + + err = hinic_set_vf_vlan(hw_dev, true, vlan, qos, vf_id); + if (err) + return err; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; + + nic_info(hw_dev->dev_hdl, "Setting VLAN %d, QOS 0x%x on VF %d\n", + vlan, qos, HW_VF_ID_TO_OS(vf_id)); + return 0; +} + +int hinic_kill_vf_vlan(void *hwdev, int vf_id) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + int err; + + err = hinic_set_vf_vlan(hw_dev, false, + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, + vf_id); + if (err) + return err; + + nic_info(hw_dev->dev_hdl, "Remove VLAN %d on VF %d\n", + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, + HW_VF_ID_TO_OS(vf_id)); + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0; + + return 0; +} + +u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + u16 pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; + u8 pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; + u16 vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT; + + return vlanprio; +} + +bool hinic_vf_is_registered(void *hwdev, u16 vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered; +} + +void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct vf_data_storage *vfinfo; + + vfinfo = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + + ivi->vf = HW_VF_ID_TO_OS(vf_id); + memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN); + ivi->vlan = vfinfo->pf_vlan; + ivi->qos = vfinfo->pf_qos; + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = vfinfo->spoofchk; +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = vfinfo->trust; +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = vfinfo->max_rate; + ivi->min_tx_rate = vfinfo->min_rate; +#else + ivi->tx_rate = vfinfo->max_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (!vfinfo->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vfinfo->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +} + +void hinic_clear_vf_infos(void *hwdev, u16 vf_id) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct vf_data_storage *vf_infos; + u16 func_id; + + func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + vf_infos = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (vf_infos->pf_set_mac) + hinic_del_mac(hwdev, vf_infos->vf_mac_addr, 0, func_id); + + if (hinic_vf_info_vlanprio(hwdev, vf_id)) + hinic_kill_vf_vlan(hwdev, vf_id); + + if (vf_infos->max_rate) + hinic_set_vf_tx_rate(hwdev, vf_id, 0, 0); + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + if (vf_infos->spoofchk) + hinic_set_vf_spoofchk(hwdev, vf_id, false); +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST + if (vf_infos->trust) + hinic_set_vf_trust(hwdev, vf_id, false); +#endif + + memset(vf_infos, 0, sizeof(*vf_infos)); + /* set vf_infos to default */ + hinic_init_vf_infos(hw_dev->nic_io, HW_VF_ID_TO_OS(vf_id)); +} + +static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id, + u8 link_status) +{ + struct hinic_port_link_status link = {0}; + struct vf_data_storage *vf_infos = hwdev->nic_io->vf_infos; + u16 out_size = sizeof(link); + int err; + + if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { + link.link = link_status; + link.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC, + vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT, + &link, sizeof(link), + &link, &out_size, 0); + if (err || !out_size || link.status) + nic_err(hwdev->dev_hdl, + "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, + link.status, out_size); + } +} + +/* send link change event mbox msg to active vfs under the pf */ +void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link_status) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + u16 i; + + nic_io->link_status = link_status; + for (i = 1; i <= nic_io->max_vfs; i++) { + if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) + hinic_notify_vf_link_status(nic_io->hwdev, i, + link_status); + } +} + +void hinic_save_pf_link_status(void *hwdev, u8 link_status) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + nic_io->link_status = link_status; +} + +int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct vf_data_storage *vf_infos = nic_io->vf_infos; + u8 link_status = 0; + + switch (link) { + case HINIC_IFLA_VF_LINK_STATE_AUTO: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ? + true : false; + link_status = nic_io->link_status; + break; + case HINIC_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true; + link_status = HINIC_LINK_UP; + break; + case HINIC_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false; + link_status = HINIC_LINK_DOWN; + break; + default: + return -EINVAL; + } + + /* Notify the VF of its new link state */ + hinic_notify_vf_link_status(hwdev, vf_id, link_status); + + return 0; +} + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = NULL; + struct hinic_spoofchk_set spoofchk_cfg = {0}; + struct vf_data_storage *vf_infos = NULL; + u16 out_size = sizeof(spoofchk_cfg); + int err = 0; + + if (!hwdev) + return -EINVAL; + + nic_io = hw_dev->nic_io; + vf_infos = nic_io->vf_infos; + + spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + spoofchk_cfg.state = spoofchk ? 1 : 0; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_ENABLE_SPOOFCHK, + &spoofchk_cfg, + sizeof(spoofchk_cfg), &spoofchk_cfg, + &out_size, 0); + if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if (err || !out_size || spoofchk_cfg.status) { + nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status, + out_size); + err = -EINVAL; + } + + vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk; + + return err; +} +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +int hinic_set_vf_trust(void *hwdev, u16 vf_id, bool trust) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = NULL; + struct vf_data_storage *vf_infos = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hw_dev->nic_io; + vf_infos = nic_io->vf_infos; + vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust; + + return 0; +} +#endif + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +bool hinic_vf_info_spoofchk(void *hwdev, int vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + bool spoofchk = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk; + + return spoofchk; +} +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +bool hinic_vf_info_trust(void *hwdev, int vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + bool trust = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust; + + return trust; +} +#endif + +static int hinic_set_vf_rate_limit(void *hwdev, u16 vf_id, u32 tx_rate) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct hinic_tx_rate_cfg rate_cfg = {0}; + u16 out_size = sizeof(rate_cfg); + int err; + + rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + rate_cfg.tx_rate = tx_rate; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg, + sizeof(rate_cfg), &rate_cfg, + &out_size, 0); + if (err || !out_size || rate_cfg.status) { + nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status, + out_size); + if (rate_cfg.status) + return rate_cfg.status; + + return -EIO; + } + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0; + + return 0; +} + +static int hinic_set_vf_tx_rate_max_min(void *hwdev, u16 vf_id, + u32 max_rate, u32 min_rate) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct hinic_tx_rate_cfg_max_min rate_cfg = {0}; + u16 out_size = sizeof(rate_cfg); + int err; + + rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + rate_cfg.max_rate = max_rate; + rate_cfg.min_rate = min_rate; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE, + &rate_cfg, sizeof(rate_cfg), &rate_cfg, + &out_size, 0); + if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED && + rate_cfg.status) || err || !out_size) { + nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err, + rate_cfg.status, out_size); + return -EIO; + } + + if (!rate_cfg.status) { + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate; + } + + return rate_cfg.status; +} + +int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) +{ + struct hinic_hwdev *hw_dev = hwdev; + int err; + + err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate); + if (err != HINIC_MGMT_CMD_UNSUPPORTED) + return err; + + if (min_rate) { + nic_err(hw_dev->dev_hdl, "Current firmware don't support to set min tx rate\n"); + return -EINVAL; + } + + nic_info(hw_dev->dev_hdl, "Current firmware don't support to set min tx rate, force min_tx_rate = max_tx_rate\n"); + + return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate); +} + +int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io; + struct vf_data_storage *vf_infos; + struct hinic_vf_dcb_state vf_dcb = {0}; + u16 vf_id, out_size = 0; + int err; + + if (!hwdev || !dcb_state || !hw_dev->nic_io) + return -EINVAL; + + nic_io = hw_dev->nic_io; + if (!memcmp(&nic_io->dcb_state, dcb_state, sizeof(nic_io->dcb_state))) + return 0; + + memcpy(&vf_dcb.state, dcb_state, sizeof(vf_dcb.state)); + /* save in sdk, vf will get dcb state when probing */ + hinic_save_dcb_state(hwdev, dcb_state); + + /* notify statefull in pf, than notify all vf */ + hinic_notify_dcb_state_event(hwdev, dcb_state); + + /* not vf supported, don't need to notify vf */ + if (!nic_io->vf_infos) + return 0; + + vf_infos = nic_io->vf_infos; + for (vf_id = 0; vf_id < nic_io->max_vfs; vf_id++) { + if (vf_infos[vf_id].registered) { + vf_dcb.status = 0; + out_size = sizeof(vf_dcb); + err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC, + OS_VF_ID_TO_HW(vf_id), + HINIC_PORT_CMD_SET_VF_COS, + &vf_dcb, sizeof(vf_dcb), &vf_dcb, + &out_size, 0); + if (err || vf_dcb.status || !out_size) + nic_err(hw_dev->dev_hdl, + "Failed to notify dcb state to VF %d, err: %d, status: 0x%x, out size: 0x%x\n", + vf_id, err, vf_dcb.status, out_size); + } + } + + return 0; +} + +int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io; + + if (!hwdev || !dcb_state) + return -EINVAL; + + nic_io = hw_dev->nic_io; + memcpy(dcb_state, &nic_io->dcb_state, sizeof(*dcb_state)); + + return 0; +} +EXPORT_SYMBOL(hinic_get_dcb_state); + +int hinic_save_dcb_state(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev || !dcb_state) + return -EINVAL; + + if (!hwdev->nic_io) + return -EINVAL; + + nic_io = hwdev->nic_io; + memcpy(&nic_io->dcb_state, dcb_state, sizeof(*dcb_state)); + + return 0; +} + +int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_vf_dcb_state vf_dcb = {0}; + u16 out_size = sizeof(vf_dcb); + int err; + + if (!hwdev || !dcb_state) + return -EINVAL; + + if (hinic_func_type(hwdev) != TYPE_VF) { + nic_err(hw_dev->dev_hdl, "Only vf need to get pf dcb state\n"); + return -EINVAL; + } + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_VF_COS, &vf_dcb, + sizeof(vf_dcb), &vf_dcb, + &out_size, 0); + if (err || !out_size || vf_dcb.status) { + nic_err(hw_dev->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x, out size: 0x%x\n", + err, vf_dcb.status, out_size); + return -EFAULT; + } + + memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state)); + /* Save dcb_state in hw for statefull module */ + hinic_save_dcb_state(hwdev, dcb_state); + + return 0; +} +EXPORT_SYMBOL(hinic_get_pf_dcb_state); + +int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id, + u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_ipsu_mac mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + mac_info.index = index; + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_IPSU_MAC, + &mac_info, sizeof(mac_info), &mac_info, + &out_size); + if (err || !out_size || mac_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to set IPSU MAC(index %d), err: %d, status: 0x%x, out size: 0x%x\n", + index, err, mac_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id, + u16 *func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_ipsu_mac mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + mac_info.index = index; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_IPSU_MAC, + &mac_info, sizeof(mac_info), &mac_info, + &out_size); + if (err || !out_size || mac_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to get IPSU MAC(index %d), err: %d, status: 0x%x, out size: 0x%x\n", + index, err, mac_info.status, out_size); + return -EINVAL; + } + *func_id = mac_info.func_id; + *vlan_id = mac_info.vlan_id; + memcpy(mac_addr, mac_info.mac, ETH_ALEN); + + return 0; +} + +int hinic_set_anti_attack(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_anti_attack_rate rate = {0}; + u16 out_size = sizeof(rate); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rate.func_id); + if (err) + return err; + + rate.enable = enable; + rate.cir = ANTI_ATTACK_DEFAULT_CIR; + rate.xir = ANTI_ATTACK_DEFAULT_XIR; + rate.cbs = ANTI_ATTACK_DEFAULT_CBS; + rate.xbs = ANTI_ATTACK_DEFAULT_XBS; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, + &rate, sizeof(rate), &rate, + &out_size); + if (err || !out_size || rate.status) { + nic_err(nic_hwdev->dev_hdl, "Can`t %s port Anti-Attack rate limit err: %d, status: 0x%x, out size: 0x%x\n", + (enable ? "enable" : "disable"), err, rate.status, + out_size); + return -EINVAL; + } + + nic_info(nic_hwdev->dev_hdl, "%s port Anti-Attack rate limit succeed.\n", + (enable ? "Enable" : "Disable")); + + return 0; +} + +int hinic_flush_sq_res(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_clear_sq_resource sq_res = {0}; + u16 out_size = sizeof(sq_res); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &sq_res.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_SQ_RES, + &sq_res, sizeof(sq_res), &sq_res, + &out_size); + if (err || !out_size || sq_res.status) { + nic_err(dev->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\n", + err, sq_res.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_flush_sq_res); + +static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level); + +int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg; + int err = 0; + + down(&nic_cfg->cfg_lock); + + /* Enable PFC will disable pause */ + if (nic_cfg->pfc_en) { + err = hinic_dcb_set_hw_pfc(hwdev, nic_cfg->pfc_en, + nic_cfg->pfc_bitmap); + if (err) + nic_err(dev->dev_hdl, "Failed to set pfc\n"); + + } else if (!port_info->autoneg_state || nic_cfg->pause_set) { + nic_cfg->nic_pause.auto_neg = port_info->autoneg_state; + err = hinic_set_hw_pause_info(hwdev, nic_cfg->nic_pause); + if (err) + nic_err(dev->dev_hdl, "Failed to set pause\n"); + } + + if (FUNC_SUPPORT_RATE_LIMIT(hwdev)) { + err = __set_pf_bw(hwdev, port_info->speed); + if (err) + nic_err(dev->dev_hdl, "Failed to set pf bandwidth limit\n"); + } + + up(&nic_cfg->cfg_lock); + + return err; +} + +int hinic_set_super_cqe_state(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_super_cqe super_cqe = {0}; + u16 out_size = sizeof(super_cqe); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &super_cqe.func_id); + if (err) + return err; + + super_cqe.super_cqe_en = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SUPER_CQE, + &super_cqe, sizeof(super_cqe), &super_cqe, + &out_size); + if (err || !out_size || super_cqe.status) { + nic_err(nic_hwdev->dev_hdl, "Can`t %s surper cqe, err: %d, status: 0x%x, out size: 0x%x\n", + (enable ? "enable" : "disable"), err, super_cqe.status, + out_size); + return -EINVAL; + } + + nic_info(nic_hwdev->dev_hdl, "%s super cqe succeed.\n", + (enable ? "Enable" : "Disable")); + + return 0; +} + +int hinic_set_port_routine_cmd_report(void *hwdev, bool enable) +{ + struct hinic_port_rt_cmd rt_cmd = { 0 }; + struct hinic_hwdev *dev = hwdev; + u16 out_size = sizeof(rt_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + rt_cmd.pf_id = (u8)hinic_global_func_id(hwdev); + rt_cmd.enable = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_SET_PORT_REPORT, + &rt_cmd, sizeof(rt_cmd), &rt_cmd, + &out_size); + if (rt_cmd.status == HINIC_MGMT_CMD_UNSUPPORTED) { + nic_info(dev->dev_hdl, "Current firmware doesn't support to set port routine command report\n"); + } else if (rt_cmd.status || err || !out_size) { + nic_err(dev->dev_hdl, + "Failed to set port routine command report, err: %d, status: 0x%x, out size: 0x%x\n", + err, rt_cmd.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_capture_info cap_info = {0}; + u16 out_size = sizeof(cap_info); + int err; + + if (!hwdev) + return -EINVAL; + + cap_info.op_type = 2; /* function capture */ + cap_info.is_en_trx = cap_en; + cap_info.func_id = func_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_UCAPTURE_OPT, + &cap_info, sizeof(cap_info), + &cap_info, &out_size); + if (err || !out_size || cap_info.status) { + nic_err(dev->dev_hdl, + "Failed to set function capture attr, err: %d, status: 0x%x, out size: 0x%x\n", + err, cap_info.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_func_capture_en); + +int hinic_force_drop_tx_pkt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_force_pkt_drop pkt_drop = {0}; + u16 out_size = sizeof(pkt_drop); + int err; + + if (!hwdev) + return -EINVAL; + + pkt_drop.port = hinic_physical_port_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_FORCE_PKT_DROP, + &pkt_drop, sizeof(pkt_drop), + &pkt_drop, &out_size); + if ((pkt_drop.status != HINIC_MGMT_CMD_UNSUPPORTED && + pkt_drop.status) || err || !out_size) { + nic_err(dev->dev_hdl, + "Failed to set force tx packets drop, err: %d, status: 0x%x, out size: 0x%x\n", + err, pkt_drop.status, out_size); + return -EFAULT; + } + + return pkt_drop.status; +} + +u32 hw_speed_convert[LINK_SPEED_LEVELS] = { + 10, 100, 1000, 10000, + 25000, 40000, 100000 +}; + +static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level) +{ + struct hinic_nic_cfg *nic_cfg = &hwdev->nic_io->nic_cfg; + struct hinic_tx_rate_cfg rate_cfg = {0}; + u32 pf_bw = 0; + u16 out_size = sizeof(rate_cfg); + int err; + + if (speed_level >= LINK_SPEED_LEVELS) { + nic_err(hwdev->dev_hdl, "Invalid speed level: %d\n", + speed_level); + return -EINVAL; + } + + if (nic_cfg->pf_bw_limit == 100) { + pf_bw = 0; /* unlimit bandwidth */ + } else { + pf_bw = (hw_speed_convert[speed_level] / 100) * + nic_cfg->pf_bw_limit; + /* bandwidth limit is very small but not unlimit in this case */ + if (pf_bw == 0) + pf_bw = 1; + } + + err = hinic_global_func_id_get(hwdev, &rate_cfg.func_id); + if (err) + return err; + + rate_cfg.tx_rate = pf_bw; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg, + sizeof(rate_cfg), &rate_cfg, + &out_size, 0); + if (err || !out_size || rate_cfg.status) { + nic_err(hwdev->dev_hdl, "Failed to set rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", + pf_bw, err, rate_cfg.status, out_size); + if (rate_cfg.status) + return rate_cfg.status; + + return -EIO; + } + + return 0; +} + +int hinic_update_pf_bw(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct nic_port_info port_info = {0}; + int err; + + if (hinic_func_type(hwdev) == TYPE_VF || + !(FUNC_SUPPORT_RATE_LIMIT(hwdev))) + return 0; + + err = hinic_get_port_info(hwdev, &port_info); + if (err) { + nic_err(dev->dev_hdl, "Failed to get port info\n"); + return -EIO; + } + + err = __set_pf_bw(hwdev, port_info.speed); + if (err) { + nic_err(dev->dev_hdl, "Failed to set pf bandwidth\n"); + return err; + } + + return 0; +} + +int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg; + u32 old_bw_limit; + u8 link_state = 0; + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + if (bw_limit > 100) { + nic_err(dev->dev_hdl, "Invalid bandwidth: %d\n", bw_limit); + return -EINVAL; + } + + err = hinic_get_link_state(hwdev, &link_state); + if (err) { + nic_err(dev->dev_hdl, "Failed to get link state\n"); + return -EIO; + } + + if (!link_state) { + nic_err(dev->dev_hdl, "Link status must be up when set pf tx rate\n"); + return -EINVAL; + } + + nic_cfg = &dev->nic_io->nic_cfg; + old_bw_limit = nic_cfg->pf_bw_limit; + nic_cfg->pf_bw_limit = bw_limit; + + err = hinic_update_pf_bw(hwdev); + if (err) { + nic_cfg->pf_bw_limit = old_bw_limit; + return err; + } + + return 0; +} + +/* Set link status follow port status */ +int hinic_set_link_status_follow(void *hwdev, + enum hinic_link_follow_status status) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_link_follow follow = {0}; + u16 out_size = sizeof(follow); + int err; + + if (!hwdev) + return -EINVAL; + + if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) { + nic_err(dev->dev_hdl, + "Invalid link follow status: %d\n", status); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &follow.func_id); + if (err) + return err; + + follow.follow_status = status; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW, + &follow, sizeof(follow), &follow, + &out_size); + if ((follow.status != HINIC_MGMT_CMD_UNSUPPORTED && + follow.status) || err || !out_size) { + nic_err(dev->dev_hdl, + "Failed to set link status follow port status, err: %d, status: 0x%x, out size: 0x%x\n", + err, follow.status, out_size); + return -EFAULT; + } + + return follow.status; +} +EXPORT_SYMBOL(hinic_set_link_status_follow); + +/* HILINK module */ + +#define HINIC_MGMT_DEFAULT_SIZE 1 + +static int __hilink_msg_to_mgmt_sync(void *hwdev, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + int err; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_HILINK, cmd, buf_in, + in_size, buf_out, out_size, timeout); + if (err) + return err; + + if (*out_size == HINIC_MGMT_DEFAULT_SIZE && buf_out) + *((u8 *)(buf_out)) = HINIC_MGMT_CMD_UNSUPPORTED; + + return 0; +} + +int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info) +{ + struct hinic_hilink_link_info link_info = {0}; + u16 out_size = sizeof(link_info); + int err; + + link_info.port_id = hinic_physical_port_id(hwdev); + + err = __hilink_msg_to_mgmt_sync(hwdev, HINIC_HILINK_CMD_GET_LINK_INFO, + &link_info, sizeof(link_info), + &link_info, &out_size, 0); + if ((link_info.status != HINIC_MGMT_CMD_UNSUPPORTED && + link_info.status) || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get hilink info, err: %d, status: 0x%x, out size: 0x%x\n", + err, link_info.status, out_size); + return -EFAULT; + } + + if (!link_info.status) + memcpy(info, &link_info.info, sizeof(*info)); + else if (link_info.status == HINIC_MGMT_CMD_UNSUPPORTED) + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupported command: mod: %d, cmd: %d\n", + HINIC_MOD_HILINK, HINIC_HILINK_CMD_GET_LINK_INFO); + + return link_info.status; +} + +int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings) +{ + struct hinic_link_ksettings_info info = {0}; + u16 out_size = sizeof(info); + int err; + + err = hinic_global_func_id_get(hwdev, &info.func_id); + if (err) + return err; + + info.valid_bitmap = settings->valid_bitmap; + info.autoneg = settings->autoneg; + info.speed = settings->speed; + info.fec = settings->fec; + + err = __hilink_msg_to_mgmt_sync(hwdev, + HINIC_HILINK_CMD_SET_LINK_SETTINGS, + &info, sizeof(info), + &info, &out_size, 0); + if ((info.status != HINIC_MGMT_CMD_UNSUPPORTED && + info.status) || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n", + err, info.status, out_size); + return -EFAULT; + } + + return info.status; +} + +int hinic_disable_tx_promisc(void *hwdev) +{ + struct hinic_promsic_info info = {0}; + u16 out_size = sizeof(info); + int err; + + err = hinic_global_func_id_get(hwdev, &info.func_id); + if (err) + return err; + + info.cfg = HINIC_TX_PROMISC_DISABLE; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_DISABLE_PROMISIC, &info, + sizeof(info), &info, &out_size, 0); + if (err || !out_size || info.status) { + if (info.status == HINIC_MGMT_CMD_UNSUPPORTED) { + nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupported to disable TX promisic\n"); + return 0; + } + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to disable multihost promisic, err: %d, status: 0x%x, out size: 0x%x\n", + err, info.status, out_size); + return -EFAULT; + } + return 0; +} + + +static bool hinic_if_sfp_absent(void *hwdev) +{ + struct card_node *chip_node = ((struct hinic_hwdev *)hwdev)->chip_node; + struct hinic_port_routine_cmd *rt_cmd; + struct hinic_cmd_get_light_module_abs sfp_abs = {0}; + u8 port_id = hinic_physical_port_id(hwdev); + u16 out_size = sizeof(sfp_abs); + int err; + bool sfp_abs_vaild; + bool sfp_abs_status; + + rt_cmd = &chip_node->rt_cmd[port_id]; + mutex_lock(&chip_node->sfp_mutex); + sfp_abs_vaild = rt_cmd->up_send_sfp_abs; + sfp_abs_status = (bool)rt_cmd->abs.abs_status; + if (sfp_abs_vaild) { + mutex_unlock(&chip_node->sfp_mutex); + return sfp_abs_status; + } + mutex_unlock(&chip_node->sfp_mutex); + + sfp_abs.port_id = port_id; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_SFP_ABS, + &sfp_abs, sizeof(sfp_abs), &sfp_abs, + &out_size); + if (sfp_abs.status || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_abs.status, out_size); + return true; + } + + return ((sfp_abs.abs_status == 0) ? false : true); +} + +int hinic_get_sfp_eeprom(void *hwdev, u8 *data, u16 *len) +{ + struct hinic_cmd_get_std_sfp_info sfp_info = {0}; + u8 port_id; + u16 out_size = sizeof(sfp_info); + int err; + + if (!hwdev || !data || !len) + return -EINVAL; + + port_id = hinic_physical_port_id(hwdev); + if (port_id >= HINIC_MAX_PORT_ID) + return -EINVAL; + + if (hinic_if_sfp_absent(hwdev)) + return -ENXIO; + + sfp_info.port_id = port_id; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO, + &sfp_info, sizeof(sfp_info), &sfp_info, + &out_size); + if (sfp_info.status || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get port%d sfp eeprom infomation, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_info.status, out_size); + return -EIO; + } + + *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE); + memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE); + + return 0; +} + +int hinic_get_sfp_type(void *hwdev, u8 *data0, u8 *data1) +{ + struct card_node *chip_node = NULL; + struct hinic_port_routine_cmd *rt_cmd; + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + u16 len; + u8 port_id; + int err; + + if (!hwdev || !data0 || !data1) + return -EINVAL; + + port_id = hinic_physical_port_id(hwdev); + if (port_id >= HINIC_MAX_PORT_ID) + return -EINVAL; + + if (hinic_if_sfp_absent(hwdev)) + return -ENXIO; + + chip_node = ((struct hinic_hwdev *)hwdev)->chip_node; + rt_cmd = &chip_node->rt_cmd[port_id]; + mutex_lock(&chip_node->sfp_mutex); + if (rt_cmd->up_send_sfp_info) { + *data0 = rt_cmd->sfp_info.sfp_qsfp_info[0]; + *data1 = rt_cmd->sfp_info.sfp_qsfp_info[1]; + mutex_unlock(&chip_node->sfp_mutex); + return 0; + } + mutex_unlock(&chip_node->sfp_mutex); + + err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len); + if (err) + return err; + + *data0 = sfp_data[0]; + *data1 = sfp_data[1]; + + return 0; +} \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h new file mode 100644 index 000000000000..f8842b5d5d82 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h @@ -0,0 +1,664 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CFG_H +#define HINIC_CFG_H + +#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define FW_SUPPORT_MAC_REUSE 0x1 +#define FW_SUPPORT_MAC_REUSE_FUNC(hwdev) \ + ((hwdev)->fw_support_func_flag & FW_SUPPORT_MAC_REUSE) + +#define HINIC_VLAN_PRIORITY_SHIFT 13 + +#define HINIC_RSS_INDIR_SIZE 256 +#define HINIC_DCB_TC_MAX 0x8 +#define HINIC_DCB_UP_MAX 0x8 +#define HINIC_DCB_COS_MAX 0x8 +#define HINIC_DCB_PG_MAX 0x8 + +#define HINIC_DCB_TSA_TC_SP 2 +#define HINIC_DCB_TSA_TC_DWRR 0 + +#define HINIC_RSS_KEY_SIZE 40 + +#define HINIC_MAX_NUM_RQ 64 + +#define HINIC_MIN_MTU_SIZE 256 +#define HINIC_MAX_JUMBO_FRAME_SIZE 9600 + +#define HINIC_LRO_MAX_WQE_NUM_UPPER 32 +#define HINIC_LRO_MAX_WQE_NUM_LOWER 1 +#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM 4 +#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 8 +#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8 +#define HINIC_LRO_WQE_NUM_PANGEA_DEFAULT 32 + +#define HINIC_LRO_RX_TIMER_UPPER 1024 +#define HINIC_LRO_RX_TIMER_LOWER 1 +#define HINIC_LRO_RX_TIMER_DEFAULT 16 +#define HINIC_LRO_RX_TIMER_DEFAULT_25GE 16 +#define HINIC_LRO_RX_TIMER_DEFAULT_100GE 64 +#define HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE 10 +#define HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE 8 + +#if defined(__aarch64__) +#define HINIC_LOWEST_LATENCY 1 +#define HINIC_RX_RATE_LOW 400000 +#define HINIC_RX_COAL_TIME_LOW 20 +#define HINIC_RX_PENDING_LIMIT_LOW 2 +#define HINIC_RX_RATE_HIGH 1000000 +#define HINIC_RX_COAL_TIME_HIGH 225 +#define HINIC_RX_PENDING_LIMIT_HIGH 50 +#define HINIC_RX_RATE_THRESH 35000 +#define HINIC_TX_RATE_THRESH 35000 +#define HINIC_RX_RATE_LOW_VM 400000 +#define HINIC_RX_PENDING_LIMIT_HIGH_VM 50 +#elif defined(__x86_64__) +#define HINIC_LOWEST_LATENCY 1 +#define HINIC_RX_RATE_LOW 400000 +#define HINIC_RX_COAL_TIME_LOW 16 +#define HINIC_RX_PENDING_LIMIT_LOW 2 +#define HINIC_RX_RATE_HIGH 1000000 +#define HINIC_RX_COAL_TIME_HIGH 225 +#define HINIC_RX_PENDING_LIMIT_HIGH 8 +#define HINIC_RX_RATE_THRESH 50000 +#define HINIC_TX_RATE_THRESH 50000 +#define HINIC_RX_RATE_LOW_VM 100000 +#define HINIC_RX_PENDING_LIMIT_HIGH_VM 87 +#else +#define HINIC_LOWEST_LATENCY 1 +#define HINIC_RX_RATE_LOW 400000 +#define HINIC_RX_COAL_TIME_LOW 16 +#define HINIC_RX_PENDING_LIMIT_LOW 2 +#define HINIC_RX_RATE_HIGH 1000000 +#define HINIC_RX_COAL_TIME_HIGH 225 +#define HINIC_RX_PENDING_LIMIT_HIGH 8 +#define HINIC_RX_RATE_THRESH 50000 +#define HINIC_TX_RATE_THRESH 50000 +#define HINIC_RX_RATE_LOW_VM 100000 +#define HINIC_RX_PENDING_LIMIT_HIGH_VM 87 +#endif /* end of defined(__x86_64__) */ + +enum hinic_board_type { + HINIC_BOARD_UKNOWN = 0, + HINIC_BOARD_10GE = 1, + HINIC_BOARD_25GE = 2, + HINIC_BOARD_40GE = 3, + HINIC_BOARD_100GE = 4, + HINIC_BOARD_PG_TP_10GE = 5, + HINIC_BOARD_PG_SM_25GE = 6, + HINIC_BOARD_PG_100GE = 7, +}; + +enum hinic_os_type { + HINIC_OS_UKNOWN = 0, + HINIC_OS_HUAWEI = 1, + HINIC_OS_NON_HUAWEI = 2, +}; + +enum hinic_cpu_type { + HINIC_CPU_UKNOWN = 0, + HINIC_CPU_X86_GENERIC = 1, + HINIC_CPU_ARM_GENERIC = 2, +}; + +struct hinic_adaptive_rx_cfg { + u32 lowest_lat; + u32 rate_low; + u32 coal_time_low; + u32 pending_limit_low; + u32 rate_high; + u32 coal_time_high; + u32 pending_limit_high; + u32 rate_thresh; +}; + +struct hinic_lro_cfg { + u32 enable; + u32 timer; + u32 buffer_size; +}; + +struct hinic_environment_info { + enum hinic_board_type board; + enum hinic_os_type os; + enum hinic_cpu_type cpu; +}; + +struct hinic_adaptive_cfg { + struct hinic_adaptive_rx_cfg adaptive_rx; + struct hinic_lro_cfg lro; +}; + +enum hinic_rss_hash_type { + HINIC_RSS_HASH_ENGINE_TYPE_XOR = 0, + HINIC_RSS_HASH_ENGINE_TYPE_TOEP, + + HINIC_RSS_HASH_ENGINE_TYPE_MAX, +}; + +struct ifla_vf_info; +struct hinic_dcb_state; + +struct nic_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +enum nic_media_type { + MEDIA_UNKNOWN = -1, + MEDIA_FIBRE = 0, + MEDIA_COPPER, + MEDIA_BACKPLANE +}; + +enum nic_speed_level { + LINK_SPEED_10MB = 0, + LINK_SPEED_100MB, + LINK_SPEED_1GB, + LINK_SPEED_10GB, + LINK_SPEED_25GB, + LINK_SPEED_40GB, + LINK_SPEED_100GB, + LINK_SPEED_LEVELS, +}; + +enum hinic_link_mode { + HINIC_10GE_BASE_KR = 0, + HINIC_40GE_BASE_KR4 = 1, + HINIC_40GE_BASE_CR4 = 2, + HINIC_100GE_BASE_KR4 = 3, + HINIC_100GE_BASE_CR4 = 4, + HINIC_25GE_BASE_KR_S = 5, + HINIC_25GE_BASE_CR_S = 6, + HINIC_25GE_BASE_KR = 7, + HINIC_25GE_BASE_CR = 8, + HINIC_GE_BASE_KX = 9, + HINIC_LINK_MODE_NUMBERS, + + HINIC_SUPPORTED_UNKNOWN = 0xFFFF, +}; + +enum hinic_port_type { + HINIC_PORT_TP, /* BASET */ + HINIC_PORT_AUI, + HINIC_PORT_MII, + HINIC_PORT_FIBRE, /* OPTICAL */ + HINIC_PORT_BNC, + HINIC_PORT_ELEC, + HINIC_PORT_COPPER, /* PORT_DA */ + HINIC_PORT_AOC, + HINIC_PORT_BACKPLANE, + HINIC_PORT_NONE = 0xEF, + HINIC_PORT_OTHER = 0xFF, +}; + +enum hinic_link_status { + HINIC_LINK_DOWN = 0, + HINIC_LINK_UP +}; + +struct nic_pause_config { + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct nic_lro_info { + u16 func_id; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_wqe_num; + u8 lro_timer_en; + u32 lro_period; +}; + +struct nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +struct hinic_vport_stats { + u64 tx_unicast_pkts_vport; + u64 tx_unicast_bytes_vport; + u64 tx_multicast_pkts_vport; + u64 tx_multicast_bytes_vport; + u64 tx_broadcast_pkts_vport; + u64 tx_broadcast_bytes_vport; + + u64 rx_unicast_pkts_vport; + u64 rx_unicast_bytes_vport; + u64 rx_multicast_pkts_vport; + u64 rx_multicast_bytes_vport; + u64 rx_broadcast_pkts_vport; + u64 rx_broadcast_bytes_vport; + + u64 tx_discard_vport; + u64 rx_discard_vport; + u64 tx_err_vport; + u64 rx_err_vport; +}; + +struct hinic_phy_port_stats { + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_bad_pkt_num; + u64 mac_rx_1519_max_good_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_jabber_pkt_num; + + u64 mac_rx_pause_num; + u64 mac_rx_pfc_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_rx_control_pkt_num; + u64 mac_rx_y1731_pkt_num; + u64 mac_rx_sym_err_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_bad_pkt_num; + u64 mac_tx_1519_max_good_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_tx_jabber_pkt_num; + + u64 mac_tx_pause_num; + u64 mac_tx_pfc_pkt_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_tx_control_pkt_num; + u64 mac_tx_y1731_pkt_num; + u64 mac_tx_1588_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + + u64 mac_rx_higig2_ext_pkt_num; + u64 mac_rx_higig2_message_pkt_num; + u64 mac_rx_higig2_error_pkt_num; + u64 mac_rx_higig2_cpu_ctrl_pkt_num; + u64 mac_rx_higig2_unicast_pkt_num; + u64 mac_rx_higig2_broadcast_pkt_num; + u64 mac_rx_higig2_l2_multicast_pkt_num; + u64 mac_rx_higig2_l3_multicast_pkt_num; + + u64 mac_tx_higig2_message_pkt_num; + u64 mac_tx_higig2_ext_pkt_num; + u64 mac_tx_higig2_cpu_ctrl_pkt_num; + u64 mac_tx_higig2_unicast_pkt_num; + u64 mac_tx_higig2_broadcast_pkt_num; + u64 mac_tx_higig2_l2_multicast_pkt_num; + u64 mac_tx_higig2_l3_multicast_pkt_num; +}; + +enum hinic_rq_filter_type { + HINIC_RQ_FILTER_TYPE_NONE = 0x0, + HINIC_RQ_FILTER_TYPE_MAC_ONLY = (1 << 0), + HINIC_RQ_FILTER_TYPE_VLAN_ONLY = (1 << 1), + HINIC_RQ_FILTER_TYPE_VLANMAC = (1 << 2), + HINIC_RQ_FILTER_TYPE_VXLAN = (1 << 3), + HINIC_RQ_FILTER_TYPE_GENEVE = (1 << 4), +}; + +struct hinic_rq_filter_info { + u16 qid; + u8 filter_type;/* 1: mac, 8: vxlan */ + u8 qflag;/*0:stdq, 1:defq, 2: netq*/ + + u8 mac[ETH_ALEN]; + struct { + u8 inner_mac[ETH_ALEN]; + u32 vni; + } vxlan; +}; + +#define HINIC_MGMT_VERSION_MAX_LEN 32 + +#define HINIC_FW_VERSION_NAME 16 +#define HINIC_FW_VERSION_SECTION_CNT 4 +#define HINIC_FW_VERSION_SECTION_BORDER 0xFF +struct hinic_fw_version { + u8 mgmt_ver[HINIC_FW_VERSION_NAME]; + u8 microcode_ver[HINIC_FW_VERSION_NAME]; + u8 boot_ver[HINIC_FW_VERSION_NAME]; +}; + +enum hinic_valid_link_settings { + HILINK_LINK_SET_SPEED = 0x1, + HILINK_LINK_SET_AUTONEG = 0x2, + HILINK_LINK_SET_FEC = 0x4, +}; + +struct hinic_link_ksettings { + u32 valid_bitmap; + u32 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ +}; + +enum hinic_link_follow_status { + HINIC_LINK_FOLLOW_DEFAULT, + HINIC_LINK_FOLLOW_PORT, + HINIC_LINK_FOLLOW_SEPARATE, + HINIC_LINK_FOLLOW_STATUS_MAX, +}; + +enum hinic_lro_en_status { + HINIC_LRO_STATUS_DISABLE, + HINIC_LRO_STATUS_ENABLE, + HINIC_LRO_STATUS_UNSET, +}; + +#define HINIC_VLAN_FILTER_EN BIT(0) +#define HINIC_BROADCAST_FILTER_EX_EN BIT(1) + +/* Set mac_vlan table */ +int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id); + +int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id); + +int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, + u16 vlan_id, u16 func_id); +int hinic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id); +/* Obtaining the permanent mac */ +int hinic_get_default_mac(void *hwdev, u8 *mac_addr); +/* Check whether the current solution is using this interface, + * the current code does not invoke the sdk interface to set mtu + */ +int hinic_set_port_mtu(void *hwdev, u32 new_mtu); +/* Set vlan leaf table */ +int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl); + +int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info); + +int hinic_set_autoneg(void *hwdev, bool enable); + +int hinic_force_port_relink(void *hwdev); + +int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported, + enum hinic_link_mode *advertised); + +int hinic_set_port_link_status(void *hwdev, bool enable); + +int hinic_set_speed(void *hwdev, enum nic_speed_level speed); +/* SPEED_UNKNOWN = -1,SPEED_10MB_LINK = 0 */ +int hinic_get_speed(void *hwdev, enum nic_speed_level *speed); + +int hinic_get_link_state(void *hwdev, u8 *link_state); + +int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause); + +int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause); + +int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause); + +int hinic_set_rx_mode(void *hwdev, u32 enable); + +/* offload feature */ +int hinic_set_rx_vlan_offload(void *hwdev, u8 en); + +int hinic_set_rx_csum_offload(void *hwdev, u32 en); + +int hinic_set_tx_tso(void *hwdev, u8 tso_en); + +/* Linux NIC used */ +int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num); + +/* Win NIC used */ +int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num); + +/* Related command dcbtool */ +int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap); + +int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap); + +int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, + u8 *up_bw, u8 *prio); + +int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, + u8 *up_bw, u8 *prio); + +int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up); + +int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map); + +int hinic_set_pfc_threshold(void *hwdev, u16 op_type, u16 threshold); + +int hinic_set_bp_thd(void *hwdev, u16 threshold); + +int hinic_disable_fw_bp(void *hwdev); + +int hinic_set_iq_enable(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx); + +int hinic_set_iq_enable_mgmt(void *hwdev, u16 q_id, u16 lower_thd, + u16 prod_idx); + +/* nictool adaptation interface*/ +int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period); +/* There should be output parameters, add the + * output parameter struct nic_up_offload *cfg + */ +int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *lro_info); + +int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size); + +int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size); + +int hinic_set_loopback_mode(void *hwdev, bool enable); +int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable); +int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable); + +int hinic_get_port_enable_state(void *hwdev, bool *enable); + +int hinic_get_vport_enable_state(void *hwdev, bool *enable); + +int hinic_set_lli_state(void *hwdev, u8 lli_state); + +int hinic_set_vport_enable(void *hwdev, bool enable); + +int hinic_set_port_enable(void *hwdev, bool enable); + +/* rss */ +int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type); + +int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, + struct nic_rss_type *rss_type); + +int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp); + +int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp); + +int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type); + +int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type); + +int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table); + +int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table); + +int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc); + +int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx); + +int hinic_rss_template_free(void *hwdev, u8 tmpl_idx); + +/* disable or enable traffic of all functions in the same port */ +int hinic_set_port_funcs_state(void *hwdev, bool enable); + +int hinic_reset_port_link_cfg(void *hwdev); + +int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats); + +int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats); + +int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver); + +int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver); + +int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac); + +int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos); + +int hinic_kill_vf_vlan(void *hwdev, int vf_id); + +int hinic_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr); + +u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id); + +bool hinic_vf_is_registered(void *hwdev, u16 vf_id); + +void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi); + +void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link); + +void hinic_save_pf_link_status(void *hwdev, u8 link); + +int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link); + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk); +bool hinic_vf_info_spoofchk(void *hwdev, int vf_id); +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +int hinic_set_vf_trust(void *hwdev, u16 vf_id, bool trust); +bool hinic_vf_info_trust(void *hwdev, int vf_id); +#endif + +int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate); + +int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id); + +int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id); + +int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state); + +int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state); + +int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state); + +int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id, + u16 func_id); +int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id, + u16 *func_id); +int hinic_set_anti_attack(void *hwdev, bool enable); + +int hinic_flush_sq_res(void *hwdev); + +int hinic_set_super_cqe_state(void *hwdev, bool enable); + +int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en); + +int hinic_force_drop_tx_pkt(void *hwdev); + +int hinic_update_pf_bw(void *hwdev); + +int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit); + +int hinic_set_link_status_follow(void *hwdev, + enum hinic_link_follow_status status); +int hinic_disable_tx_promisc(void *hwdev); + +/* HILINK module */ +int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings); + + +int hinic_enable_netq(void *hwdev, u8 en); +int hinic_add_hw_rqfilter(void *hwdev, + struct hinic_rq_filter_info *filter_info); +int hinic_del_hw_rqfilter(void *hwdev, + struct hinic_rq_filter_info *filter_info); +int hinic_get_sfp_eeprom(void *hwdev, u8 *data, u16 *len); +int hinic_get_sfp_type(void *hwdev, u8 *data0, u8 *data1); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c new file mode 100644 index 000000000000..c1d6254b8786 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hwif.h" +#include "hinic_wq.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_nic_io.h" +#include "hinic_nic.h" +#include "hinic_dbg.h" + +#define INVALID_PI (0xFFFF) + +u16 hinic_dbg_get_qp_num(void *hwdev) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev) + return 0; + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + if (!nic_io) + return 0; + + return nic_io->num_qps; +} + +void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev) + return NULL; + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + if (!nic_io) + return NULL; + + if (q_id >= nic_io->num_qps) + return NULL; + + return &nic_io->qps[q_id]; +} + +void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return NULL; + + return qp->sq.wq; +} + +void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return NULL; + + return qp->rq.wq; +} + +u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id) +{ + struct hinic_wq *wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id); + + if (!wq) + return 0; + + return ((u16)wq->prod_idx) & wq->mask; +} + +u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (qp) + return cpu_to_be16(*qp->rq.pi_virt_addr); + + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Get rq hw pi failed!\n"); + + return INVALID_PI; +} + +u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id) +{ + struct hinic_wq *wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id); + + if (!wq) + return 0; + + return ((u16)wq->prod_idx) & wq->mask; +} + +void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return NULL; + + return qp->sq.cons_idx_addr; +} + +u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return 0; + + return qp->sq.wq->block_paddr; +} + +u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return 0; + + return qp->rq.wq->block_paddr; +} + +int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr, + u64 *phy_addr, u32 *pg_idx) +{ + struct hinic_qp *qp; + struct hinic_hwif *hwif; + + qp = hinic_dbg_get_qp_handle(hwdev, q_id); + if (!qp) + return -EFAULT; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + *map_addr = (u64 *)qp->sq.db_addr; + *pg_idx = DB_IDX(qp->sq.db_addr, hwif->db_base); + *phy_addr = hwif->db_base_phy + (*pg_idx) * HINIC_DB_PAGE_SIZE; + + return 0; +} + +u16 hinic_dbg_get_global_qpn(const void *hwdev) +{ + if (!hwdev) + return 0; + + return ((struct hinic_hwdev *)hwdev)->nic_io->global_qpn; +} + +static int get_wqe_info(struct hinic_wq *wq, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size) +{ + void *src_wqe; + u32 offset; + u16 i; + + if (idx + wqebb_cnt > wq->q_depth) + return -EFAULT; + + if (*wqe_size != (u16)(wq->wqebb_size * wqebb_cnt)) { + pr_err("Unexpect out buf size from user :%d, expect: %d\n", + *wqe_size, (u16)(wq->wqebb_size * wqebb_cnt)); + return -EFAULT; + } + + for (i = 0; i < wqebb_cnt; i++) { + src_wqe = (void *)hinic_slq_get_addr(wq, idx + i); + offset = i * wq->wqebb_size; + memcpy(wqe + offset, src_wqe, wq->wqebb_size); + } + + return 0; +} + +int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size) +{ + struct hinic_wq *wq; + int err; + + wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id); + if (!wq) + return -EFAULT; + + err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size); + + return err; +} + +int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size) +{ + struct hinic_wq *wq; + int err; + + wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id); + if (!wq) + return -EFAULT; + + err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size); + + return err; +} + +int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size) +{ + if (!hw_stats || *out_size != sizeof(struct hinic_hw_stats)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(struct hinic_hw_stats)); + return -EFAULT; + } + + memcpy(hw_stats, &((struct hinic_hwdev *)hwdev)->hw_stats, + sizeof(struct hinic_hw_stats)); + return 0; +} + +u16 hinic_dbg_clear_hw_stats(void *hwdev) +{ + memset((void *)&((struct hinic_hwdev *)hwdev)->hw_stats, 0, + sizeof(struct hinic_hw_stats)); + memset((void *)((struct hinic_hwdev *)hwdev)->chip_fault_stats, 0, + HINIC_CHIP_FAULT_SIZE); + return sizeof(struct hinic_hw_stats); +} + +void hinic_get_chip_fault_stats(const void *hwdev, + u8 *chip_fault_stats, int offset) +{ + int copy_len = HINIC_CHIP_FAULT_SIZE - offset; + + if (offset < 0 || offset > HINIC_CHIP_FAULT_SIZE) { + pr_err("Invalid chip offset value: %d\n", offset); + return; + } + + if (offset + MAX_DRV_BUF_SIZE <= HINIC_CHIP_FAULT_SIZE) + memcpy(chip_fault_stats, + ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset, + MAX_DRV_BUF_SIZE); + else + memcpy(chip_fault_stats, + ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset, + copy_len); +} + +int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg; + + if (!hwdev) + return -EINVAL; + + if (!dev->nic_io) + return -EINVAL; + + nic_cfg = &dev->nic_io->nic_cfg; + + *pf_bw_limit = nic_cfg->pf_bw_limit; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h new file mode 100644 index 000000000000..c1471a4b6183 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NIC_DEV_H +#define HINIC_NIC_DEV_H + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_nic_io.h" +#include "hinic_nic_cfg.h" +#include "hinic_tx.h" +#include "hinic_rx.h" + +#define HINIC_DRV_NAME "hinic" +#define HINIC_CHIP_NAME "hinic" + +#define HINIC_DRV_VERSION "2.3.2.14" +struct vf_data_storage; + +#define HINIC_FUNC_IS_VF(hwdev) (hinic_func_type(hwdev) == TYPE_VF) + +enum hinic_flags { + HINIC_INTF_UP, + HINIC_MAC_FILTER_CHANGED, + HINIC_LP_TEST, + HINIC_RSS_ENABLE, + HINIC_DCB_ENABLE, + HINIC_BP_ENABLE, + HINIC_SAME_RXTX, + HINIC_INTR_ADAPT, + HINIC_UPDATE_MAC_FILTER, + HINIC_ETS_ENABLE, +}; + +#define RX_BUFF_NUM_PER_PAGE 2 +#define HINIC_MAX_MAC_NUM 3 +#define LP_PKT_CNT 64 + +struct hinic_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; +}; + +enum hinic_rx_mode_state { + HINIC_HW_PROMISC_ON, + HINIC_HW_ALLMULTI_ON, + HINIC_PROMISC_FORCE_ON, + HINIC_ALLMULTI_FORCE_ON, +}; + +enum mac_filter_state { + HINIC_MAC_WAIT_HW_SYNC, + HINIC_MAC_HW_SYNCED, + HINIC_MAC_WAIT_HW_UNSYNC, + HINIC_MAC_HW_UNSYNCED, +}; + +struct hinic_mac_filter { + struct list_head list; + u8 addr[ETH_ALEN]; + unsigned long state; +}; + +/* TC bandwidth allocation per direction */ +struct hinic_tc_attr { + u8 pg_id; /* Priority Group(PG) ID */ + u8 bw_pct; /* % of PG's bandwidth */ + u8 up_map; /* User Priority to Traffic Class mapping */ + u8 prio_type; +}; + +/* User priority configuration */ +struct hinic_tc_cfg { + struct hinic_tc_attr path[2]; /* One each for Tx/Rx */ + + bool pfc_en; +}; + +struct hinic_dcb_config { + u8 pg_tcs; + u8 pfc_tcs; + + bool pfc_state; + + struct hinic_tc_cfg tc_cfg[HINIC_DCB_TC_MAX]; + u8 bw_pct[2][HINIC_DCB_PG_MAX]; /* One each for Tx/Rx */ +}; + +enum hinic_intr_flags { + HINIC_INTR_ON, + HINIC_RESEND_ON, +}; + +struct hinic_irq { + struct net_device *netdev; + /* IRQ corresponding index number */ + u16 msix_entry_idx; + u32 irq_id; /* The IRQ number from OS */ + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + struct hinic_txq *txq; + struct hinic_rxq *rxq; + unsigned long intr_flag; +}; + +struct hinic_intr_coal_info { + u8 pending_limt; + u8 coalesce_timer_cfg; + u8 resend_timer_cfg; + + u64 pkt_rate_low; + u8 rx_usecs_low; + u8 rx_pending_limt_low; + u64 pkt_rate_high; + u8 rx_usecs_high; + u8 rx_pending_limt_high; + + u8 user_set_intr_coal_flag; +}; + +#define HINIC_NIC_STATS_INC(nic_dev, field) \ +{ \ + u64_stats_update_begin(&nic_dev->stats.syncp); \ + nic_dev->stats.field++; \ + u64_stats_update_end(&nic_dev->stats.syncp); \ +} + +struct hinic_nic_stats { + u64 netdev_tx_timeout; + + /* Subdivision statistics show in private tool */ + u64 tx_carrier_off_drop; + u64 tx_invalid_qid; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#else + struct u64_stats_sync_empty syncp; +#endif +}; + +struct hinic_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + void *hwdev; + + int poll_weight; + + unsigned long *vlan_bitmap; + + u16 num_qps; + u16 max_qps; + + u32 msg_enable; + unsigned long flags; + + u16 sq_depth; + u16 rq_depth; + + /* mapping from priority */ + u8 sq_cos_mapping[HINIC_DCB_UP_MAX]; + u8 default_cos_id; + struct hinic_txq *txqs; + struct hinic_rxq *rxqs; + + struct nic_service_cap nic_cap; + + struct irq_info *qps_irq_info; + struct hinic_irq *irq_cfg; + struct work_struct rx_mode_work; + struct delayed_work moderation_task; + struct workqueue_struct *workq; + + struct list_head uc_filter_list; + struct list_head mc_filter_list; + unsigned long rx_mod_state; + int netdev_uc_cnt; + int netdev_mc_cnt; + int lb_test_rx_idx; + int lb_pkt_len; + u8 *lb_test_rx_buf; + + u8 rss_tmpl_idx; + u16 num_rss; + u16 rss_limit; + u8 rss_hash_engine; + struct nic_rss_type rss_type; + u8 *rss_hkey_user; + /* hkey in big endian */ + u32 *rss_hkey_user_be; + u32 *rss_indir_user; + + u8 dcbx_cap; + u32 dcb_changes; + u8 max_cos; + u8 up_valid_bitmap; + u8 up_cos[HINIC_DCB_UP_MAX]; + struct ieee_ets hinic_ieee_ets_default; + struct ieee_ets hinic_ieee_ets; + struct ieee_pfc hinic_ieee_pfc; + struct hinic_dcb_config dcb_cfg; + struct hinic_dcb_config tmp_dcb_cfg; + struct hinic_dcb_config save_dcb_cfg; + unsigned long dcb_flags; + int disable_port_cnt; + /* lock for disable or enable traffic flow */ + struct semaphore dcb_sem; + + u16 bp_lower_thd; + u16 bp_upper_thd; + bool heart_status; + + struct hinic_intr_coal_info *intr_coalesce; + unsigned long last_moder_jiffies; + u32 adaptive_rx_coal; + u8 intr_coal_set_flag; + u32 his_link_speed; + /* interrupt coalesce must be different in virtual machine */ + bool in_vm; + bool is_vm_slave; + int is_bm_slave; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + struct hinic_nic_stats stats; + /* lock for nic resource */ + struct mutex nic_mutex; + bool force_port_disable; + struct semaphore port_state_sem; + u8 link_status; + + struct hinic_environment_info env_info; + struct hinic_adaptive_cfg adaptive_cfg; + + /* pangea cpu affinity setting */ + bool force_affinity; + cpumask_t affinity_mask; + + u32 lro_replenish_thld; + u16 rx_buff_len; + u32 page_order; +}; + +extern struct hinic_uld_info nic_uld_info; + +int hinic_open(struct net_device *netdev); +int hinic_close(struct net_device *netdev); +void hinic_set_ethtool_ops(struct net_device *netdev); +void hinicvf_set_ethtool_ops(struct net_device *netdev); +void hinic_update_num_qps(struct net_device *netdev); +int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +int hinic_force_port_disable(struct hinic_nic_dev *nic_dev); +int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable); +int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable); +void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status); + +int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev); +int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev); + +#define hinic_msg(level, nic_dev, msglvl, format, arg...) \ +do { \ + if (nic_dev->netdev && nic_dev->netdev->reg_state \ + == NETREG_REGISTERED) \ + nicif_##level(nic_dev, msglvl, nic_dev->netdev, \ + format, ## arg); \ + else \ + nic_##level(&nic_dev->pdev->dev, \ + format, ## arg); \ +} while (0) + +#define hinic_info(nic_dev, msglvl, format, arg...) \ + hinic_msg(info, nic_dev, msglvl, format, ## arg) + +#define hinic_warn(nic_dev, msglvl, format, arg...) \ + hinic_msg(warn, nic_dev, msglvl, format, ## arg) + +#define hinic_err(nic_dev, msglvl, format, arg...) \ + hinic_msg(err, nic_dev, msglvl, format, ## arg) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c new file mode 100644 index 000000000000..5f5ae666b949 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c @@ -0,0 +1,1051 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_wq.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_nic_io.h" +#include "hinic_nic.h" +#include "hinic_ctx_def.h" +#include "hinic_wq.h" +#include "hinic_cmdq.h" + +#define HINIC_DEAULT_TX_CI_PENDING_LIMIT 0 +#define HINIC_DEAULT_TX_CI_COALESCING_TIME 0 + +static unsigned char tx_pending_limit = HINIC_DEAULT_TX_CI_PENDING_LIMIT; +module_param(tx_pending_limit, byte, 0444); +MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit (default=0)"); + +static unsigned char tx_coalescing_time = HINIC_DEAULT_TX_CI_COALESCING_TIME; +module_param(tx_coalescing_time, byte, 0444); +MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time (default=0)"); + +#define WQ_PREFETCH_MAX 4 +#define WQ_PREFETCH_MIN 1 +#define WQ_PREFETCH_THRESHOLD 256 + +struct hinic_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +struct hinic_sq_ctxt { + u32 ceq_attr; + + u32 ci_owner; + + u32 wq_pfn_hi; + u32 wq_pfn_lo; + + u32 pref_cache; + u32 pref_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 rsvd8; + u32 rsvd9; + + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic_rq_ctxt { + u32 ceq_attr; + + u32 pi_intr_attr; + + u32 wq_pfn_hi_ci; + u32 wq_pfn_lo; + + u32 pref_cache; + u32 pref_owner; + + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic_sq_ctxt_block { + struct hinic_qp_ctxt_header cmdq_hdr; + struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_rq_ctxt_block { + struct hinic_qp_ctxt_header cmdq_hdr; + struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_sq_db { + u32 db_info; +}; + +struct hinic_addr { + u32 addr_hi; + u32 addr_lo; +}; + +struct hinic_clean_queue_ctxt { + struct hinic_qp_ctxt_header cmdq_hdr; + u32 ctxt_size; + struct hinic_addr cqe_dma_addr[HINIC_RQ_CQ_MAX]; +}; + +static int init_sq(struct hinic_sq *sq, struct hinic_wq *wq, u16 q_id, + u16 sq_msix_idx, void *cons_idx_addr, void __iomem *db_addr) +{ + sq->wq = wq; + sq->q_id = q_id; + sq->owner = 1; + sq->msix_entry_idx = sq_msix_idx; + + sq->cons_idx_addr = cons_idx_addr; + sq->db_addr = db_addr; + + return 0; +} + +static int init_rq(struct hinic_rq *rq, void *dev_hdl, struct hinic_wq *wq, + u16 q_id, u16 rq_msix_idx) +{ + rq->wq = wq; + rq->q_id = q_id; + rq->cqe_dma_addr = 0; + + rq->msix_entry_idx = rq_msix_idx; + + rq->pi_virt_addr = dma_zalloc_coherent(dev_hdl, PAGE_SIZE, + &rq->pi_dma_addr, GFP_KERNEL); + if (!rq->pi_virt_addr) { + nic_err(dev_hdl, "Failed to allocate rq pi virtual addr\n"); + return -ENOMEM; + } + + return 0; +} + +void hinic_rq_cqe_addr_set(void *hwdev, u16 qid, dma_addr_t cqe_dma_ddr) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io; + + nic_io = dev->nic_io; + nic_io->qps[qid].rq.cqe_dma_addr = cqe_dma_ddr; +} + +static void clean_rq(struct hinic_rq *rq, void *dev_hdl) +{ + dma_free_coherent(dev_hdl, PAGE_SIZE, rq->pi_virt_addr, + rq->pi_dma_addr); +} + +static int create_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp, + u16 q_id, u16 qp_msix_idx, int max_sq_sge) +{ + struct hinic_sq *sq = &qp->sq; + struct hinic_rq *rq = &qp->rq; + void __iomem *db_addr; + int err; + + err = hinic_wq_allocate(&nic_io->wqs, &nic_io->sq_wq[q_id], + HINIC_SQ_WQEBB_SIZE, + nic_io->hwdev->wq_page_size, nic_io->sq_depth, + MAX_WQE_SIZE(max_sq_sge, HINIC_SQ_WQEBB_SIZE)); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for SQ\n"); + return err; + } + + err = hinic_wq_allocate(&nic_io->wqs, &nic_io->rq_wq[q_id], + HINIC_RQ_WQE_SIZE, nic_io->hwdev->wq_page_size, + nic_io->rq_depth, HINIC_RQ_WQE_SIZE); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for RQ\n"); + goto rq_alloc_err; + } + + /* we don't use direct wqe for sq */ + err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr, NULL); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to alloc sq doorbell addr\n"); + goto alloc_db_err; + } + + err = init_sq(sq, &nic_io->sq_wq[q_id], q_id, qp_msix_idx, + HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id), db_addr); + if (err != 0) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to init sq\n"); + goto sq_init_err; + } + + err = init_rq(rq, nic_io->hwdev->dev_hdl, &nic_io->rq_wq[q_id], + q_id, qp_msix_idx); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to init rq\n"); + goto rq_init_err; + } + + return 0; + +rq_init_err: +sq_init_err: + hinic_free_db_addr(nic_io->hwdev, db_addr, NULL); + +alloc_db_err: + hinic_wq_free(&nic_io->wqs, &nic_io->rq_wq[q_id]); + +rq_alloc_err: + hinic_wq_free(&nic_io->wqs, &nic_io->sq_wq[q_id]); + + return err; +} + +static void destroy_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp) +{ + clean_rq(&qp->rq, nic_io->hwdev->dev_hdl); + + hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr, NULL); + + hinic_wq_free(&nic_io->wqs, qp->sq.wq); + hinic_wq_free(&nic_io->wqs, qp->rq.wq); +} + +/* alloc qps and init qps ctxt */ +int hinic_create_qps(void *dev, u16 num_qp, u16 sq_depth, u16 rq_depth, + struct irq_info *qps_msix_arry, int max_sq_sge) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_nic_io *nic_io; + u16 q_id, i, max_qps; + int err; + + if (!hwdev || !qps_msix_arry) + return -EFAULT; + + max_qps = hinic_func_max_qnum(hwdev); + if (num_qp > max_qps) { + nic_err(hwdev->dev_hdl, "Create number of qps: %d > max number of qps:%d\n", + num_qp, max_qps); + return -EINVAL; + } + + nic_io = hwdev->nic_io; + + nic_io->max_qps = max_qps; + nic_io->num_qps = num_qp; + nic_io->sq_depth = sq_depth; + nic_io->rq_depth = rq_depth; + + err = hinic_wqs_alloc(&nic_io->wqs, 2 * num_qp, hwdev->dev_hdl); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to allocate WQS for IO\n"); + return err; + } + + nic_io->qps = kcalloc(num_qp, sizeof(*nic_io->qps), GFP_KERNEL); + if (!nic_io->qps) { + err = -ENOMEM; + goto alloc_qps_err; + } + + nic_io->ci_vaddr_base = + dma_zalloc_coherent(hwdev->dev_hdl, + CI_TABLE_SIZE(num_qp, PAGE_SIZE), + &nic_io->ci_dma_base, GFP_KERNEL); + if (!nic_io->ci_vaddr_base) { + err = -ENOMEM; + goto ci_base_err; + } + + nic_io->sq_wq = kcalloc(num_qp, sizeof(*nic_io->sq_wq), GFP_KERNEL); + if (!nic_io->sq_wq) { + err = -ENOMEM; + goto sq_wq_err; + } + + nic_io->rq_wq = kcalloc(num_qp, sizeof(*nic_io->rq_wq), GFP_KERNEL); + if (!nic_io->rq_wq) { + err = -ENOMEM; + goto rq_wq_err; + } + + for (q_id = 0; q_id < num_qp; q_id++) { + err = create_qp(nic_io, &nic_io->qps[q_id], q_id, + qps_msix_arry[q_id].msix_entry_idx, max_sq_sge); + if (err) { + nic_err(hwdev->dev_hdl, + "Failed to allocate qp %d, err: %d\n", + q_id, err); + goto create_qp_err; + } + } + + return 0; + +create_qp_err: + for (i = 0; i < q_id; i++) + destroy_qp(nic_io, &nic_io->qps[i]); + + kfree(nic_io->rq_wq); + +rq_wq_err: + kfree(nic_io->sq_wq); + +sq_wq_err: + dma_free_coherent(hwdev->dev_hdl, CI_TABLE_SIZE(num_qp, PAGE_SIZE), + nic_io->ci_vaddr_base, nic_io->ci_dma_base); + +ci_base_err: + kfree(nic_io->qps); + +alloc_qps_err: + hinic_wqs_free(&nic_io->wqs); + + return err; +} +EXPORT_SYMBOL(hinic_create_qps); + +void hinic_free_qps(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_nic_io *nic_io; + u16 i; + + if (!hwdev) + return; + + nic_io = hwdev->nic_io; + + for (i = 0; i < nic_io->num_qps; i++) + destroy_qp(nic_io, &nic_io->qps[i]); + + kfree(nic_io->rq_wq); + kfree(nic_io->sq_wq); + + dma_free_coherent(hwdev->dev_hdl, + CI_TABLE_SIZE(nic_io->num_qps, PAGE_SIZE), + nic_io->ci_vaddr_base, nic_io->ci_dma_base); + + kfree(nic_io->qps); + + hinic_wqs_free(&nic_io->wqs); +} +EXPORT_SYMBOL(hinic_free_qps); + +void hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, + enum hinic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 max_queues, u16 q_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + + if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) + qp_ctxt_hdr->addr_offset = + SQ_CTXT_OFFSET(max_queues, max_queues, q_id); + else + qp_ctxt_hdr->addr_offset = + RQ_CTXT_OFFSET(max_queues, max_queues, q_id); + + qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); + + hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn, + struct hinic_sq_ctxt *sq_ctxt) +{ + struct hinic_wq *wq = sq->wq; + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + ci_start = (u16)wq->cons_idx; + pi_start = (u16)wq->prod_idx; + + /* read the first page from the HW table */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + /* If only one page, use 0-level CLA */ + if (wq->num_q_pages == 1) + wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr); + else + wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr); + + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) | + SQ_CTXT_CEQ_ATTR_SET(0, EN); + + sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) | + SQ_CTXT_CI_SET(1, OWNER); + + sq_ctxt->wq_pfn_hi = + SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + SQ_CTXT_WQ_PAGE_SET(pi_start, PI); + + sq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->pref_cache = + SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctxt->pref_owner = 1; + + sq_ctxt->pref_wq_pfn_hi_ci = + SQ_CTXT_PREF_SET(ci_start, CI) | + SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI); + + sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->wq_block_pfn_hi = + SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); +} + +void hinic_rq_prepare_ctxt(struct hinic_rq *rq, struct hinic_rq_ctxt *rq_ctxt) +{ + struct hinic_wq *wq = rq->wq; + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + ci_start = (u16)wq->cons_idx; + pi_start = (u16)wq->prod_idx; + pi_start = pi_start & wq->mask; + + /* read the first page from the HW table */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + if (wq->num_q_pages == 1) + wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr); + else + wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr); + + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) | + RQ_CTXT_CEQ_ATTR_SET(1, OWNER); + + rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) | + RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR); + + rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + RQ_CTXT_WQ_PAGE_SET(ci_start, CI); + + rq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pref_cache = + RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctxt->pref_owner = 1; + + rq_ctxt->pref_wq_pfn_hi_ci = + RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | + RQ_CTXT_PREF_SET(ci_start, CI); + + rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); + rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); + + rq_ctxt->wq_block_pfn_hi = + RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); +} + +static int init_sq_ctxts(struct hinic_nic_io *nic_io) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_sq_ctxt_block *sq_ctxt_block; + struct hinic_sq_ctxt *sq_ctxt; + struct hinic_cmd_buf *cmd_buf; + struct hinic_qp *qp; + u64 out_param = 0; + u16 q_id, curr_id, global_qpn, max_ctxts, i; + int err = 0; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_io->num_qps) { + sq_ctxt_block = cmd_buf->buf; + sq_ctxt = sq_ctxt_block->sq_ctxt; + + max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ? + HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id); + + hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, + HINIC_QP_CTXT_TYPE_SQ, max_ctxts, + nic_io->max_qps, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + qp = &nic_io->qps[curr_id]; + global_qpn = nic_io->global_qpn + curr_id; + + hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]); + } + + cmd_buf->size = SQ_CTXT_SIZE(max_ctxts); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + nic_err(hwdev->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_rq_ctxts(struct hinic_nic_io *nic_io) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_rq_ctxt_block *rq_ctxt_block; + struct hinic_rq_ctxt *rq_ctxt; + struct hinic_cmd_buf *cmd_buf; + struct hinic_qp *qp; + u64 out_param = 0; + u16 q_id, curr_id, max_ctxts, i; + int err = 0; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_io->num_qps) { + rq_ctxt_block = cmd_buf->buf; + rq_ctxt = rq_ctxt_block->rq_ctxt; + + max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ? + HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id); + + hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, + HINIC_QP_CTXT_TYPE_RQ, max_ctxts, + nic_io->max_qps, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + qp = &nic_io->qps[curr_id]; + + hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]); + } + + cmd_buf->size = RQ_CTXT_SIZE(max_ctxts); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + + if (err || out_param != 0) { + nic_err(hwdev->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_qp_ctxts(struct hinic_nic_io *nic_io) +{ + int err; + + err = init_sq_ctxts(nic_io); + if (err) + return err; + + err = init_rq_ctxts(nic_io); + if (err) + return err; + + return 0; +} + +static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io, + enum hinic_qp_ctxt_type ctxt_type) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_clean_queue_ctxt *ctxt_block; + struct hinic_cmd_buf *cmd_buf; + dma_addr_t cqe_dma_addr; + struct hinic_addr *addr; + u64 out_param = 0; + int i, err; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.addr_offset = 0; + + /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */ + ctxt_block->ctxt_size = 0x3; + if ((hinic_func_type(hwdev) == TYPE_VF) && + ctxt_type == HINIC_QP_CTXT_TYPE_RQ) { + addr = ctxt_block->cqe_dma_addr; + for (i = 0; i < nic_io->max_qps; i++) { + cqe_dma_addr = nic_io->qps[i].rq.cqe_dma_addr; + addr[i].addr_hi = upper_32_bits(cqe_dma_addr); + addr[i].addr_lo = lower_32_bits(cqe_dma_addr); + } + } + + hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + + if ((err) || (out_param)) { + nic_err(hwdev->dev_hdl, "Failed to clean queue offload ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io) +{ + /* clean LRO/TSO context space */ + return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) || + clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ)); +} + +/* init qps ctxt and set sq ci attr and arm all sq */ +int hinic_init_qp_ctxts(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_nic_io *nic_io; + struct hinic_sq_attr sq_attr; + u16 q_id; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hwdev->nic_io; + + err = init_qp_ctxts(nic_io); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to init QP ctxts\n"); + return err; + } + + /* clean LRO/TSO context space */ + err = clean_qp_offload_ctxt(nic_io); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to clean qp offload ctxts\n"); + return err; + } + + err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth, + nic_io->sq_depth, nic_io->rx_buff_len); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set root context\n"); + return err; + } + + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { + sq_attr.ci_dma_base = + HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2; + sq_attr.pending_limit = tx_pending_limit; + sq_attr.coalescing_time = tx_coalescing_time; + sq_attr.intr_en = 1; + sq_attr.intr_idx = nic_io->qps[q_id].sq.msix_entry_idx; + sq_attr.l2nic_sqn = q_id; + sq_attr.dma_attr_off = 0; + err = hinic_set_ci_table(hwdev, q_id, &sq_attr); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set ci table\n"); + goto set_cons_idx_table_err; + } + } + + return 0; + +set_cons_idx_table_err: + hinic_clean_root_ctxt(hwdev); + + return err; +} +EXPORT_SYMBOL(hinic_init_qp_ctxts); + +void hinic_free_qp_ctxts(void *hwdev) +{ + int err; + + if (!hwdev) + return; + + hinic_qps_num_set(hwdev, 0); + + err = hinic_clean_root_ctxt(hwdev); + if (err) + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to clean root ctxt\n"); +} +EXPORT_SYMBOL(hinic_free_qp_ctxts); + +int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_io *nic_io; + u16 global_qpn; + int err; + + if (!hwdev) + return -EINVAL; + + if (is_multi_bm_slave(hwdev) && hinic_support_dynamic_q(hwdev)) { + err = hinic_reinit_cmdq_ctxts(dev); + if (err) { + nic_err(dev->dev_hdl, "Failed to reinit cmdq\n"); + return err; + } + } + + nic_io = dev->nic_io; + + err = hinic_get_base_qpn(hwdev, &global_qpn); + if (err) { + nic_err(dev->dev_hdl, "Failed to get base qpn\n"); + return err; + } + + nic_io->global_qpn = global_qpn; + nic_io->rx_buff_len = rx_buff_len; + err = hinic_init_function_table(hwdev, nic_io->rx_buff_len); + if (err) { + nic_err(dev->dev_hdl, "Failed to init function table\n"); + return err; + } + + err = hinic_enable_fast_recycle(hwdev, false); + if (err) { + nic_err(dev->dev_hdl, "Failed to disable fast recycle\n"); + return err; + } + + /* get default pf bandwidth from firmware witch setted by bios */ + err = hinic_get_bios_pf_bw_limit(hwdev, &nic_io->nic_cfg.pf_bw_limit); + if (err) { + nic_err(dev->dev_hdl, "Failed to get pf bandwidth limit\n"); + return err; + } + + if ((dev->func_mode == FUNC_MOD_MULTI_BM_MASTER) || + (dev->func_mode == FUNC_MOD_MULTI_VM_MASTER) || + (dev->board_info.service_mode == HINIC_WORK_MODE_OVS_SP)) { + if (hinic_func_type(dev) != TYPE_VF) { + err = hinic_disable_tx_promisc(dev); + if (err) { + nic_err(dev->dev_hdl, "Failed to set tx promisc\n"); + return err; + } + } + } + + /* VFs don't set port routine command report */ + if (hinic_func_type(dev) != TYPE_VF) { + /* Get the fw support mac reuse flag */ + err = hinic_get_fw_support_func(hwdev); + if (err) { + nic_err(dev->dev_hdl, "Failed to get mac reuse flag\n"); + return err; + } + + /* Inform mgmt to send sfp's information to driver */ + err = hinic_set_port_routine_cmd_report(hwdev, true); + } + + return err; +} +EXPORT_SYMBOL(hinic_init_nic_hwdev); + +void hinic_free_nic_hwdev(void *hwdev) +{ + if (hinic_func_type(hwdev) != TYPE_VF) + hinic_set_port_routine_cmd_report(hwdev, false); +} +EXPORT_SYMBOL(hinic_free_nic_hwdev); + +int hinic_enable_tx_irq(void *hwdev, u16 q_id) +{ + return hinic_set_arm_bit(hwdev, HINIC_SET_ARM_SQ, q_id); +} +EXPORT_SYMBOL(hinic_enable_tx_irq); + +int hinic_rx_tx_flush(void *hwdev) +{ + return hinic_func_rx_tx_flush(hwdev); +} +EXPORT_SYMBOL(hinic_rx_tx_flush); + +int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_wq *wq = &nic_io->sq_wq[q_id]; + + return atomic_read(&wq->delta) - 1; +} +EXPORT_SYMBOL(hinic_get_sq_free_wqebbs); + +int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return nic_io->rq_wq[q_id].delta.counter - 1; +} +EXPORT_SYMBOL(hinic_get_rq_free_wqebbs); + +u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return (u16)(nic_io->sq_wq[q_id].cons_idx & nic_io->sq_wq[q_id].mask); +} +EXPORT_SYMBOL(hinic_get_sq_local_ci); + +u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + return MASKED_SQ_IDX(sq, be16_to_cpu(*(u16 *)(sq->cons_idx_addr))); +} +EXPORT_SYMBOL(hinic_get_sq_hw_ci); + +void *hinic_get_sq_wqe(void *hwdev, u16 q_id, int wqebb_cnt, u16 *pi, u8 *owner) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + void *wqe; + + wqe = hinic_get_wqe(sq->wq, wqebb_cnt, pi); + if (wqe) { + *owner = sq->owner; + if ((*pi + wqebb_cnt) >= nic_io->sq_depth) + sq->owner = !sq->owner; + } + + return wqe; +} +EXPORT_SYMBOL(hinic_get_sq_wqe); + +void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + if (owner != sq->owner) + sq->owner = owner; + + atomic_add(num_wqebbs, &sq->wq->delta); + sq->wq->prod_idx -= num_wqebbs; +} +EXPORT_SYMBOL(hinic_return_sq_wqe); + +void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs, u16 *pi, + u8 *owner) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + *pi = MASKED_WQE_IDX(sq->wq, sq->wq->prod_idx); + + atomic_sub(num_wqebbs, &sq->wq->delta); + sq->wq->prod_idx += num_wqebbs; + + *owner = sq->owner; + if ((*pi + num_wqebbs) >= nic_io->sq_depth) + sq->owner = !sq->owner; +} +EXPORT_SYMBOL(hinic_update_sq_pi); + +static void sq_prepare_db(struct hinic_sq *sq, struct hinic_sq_db *db, + u16 prod_idx, int cos) +{ + u32 hi_prod_idx = SQ_DB_PI_HIGH(MASKED_SQ_IDX(sq, prod_idx)); + + db->db_info = SQ_DB_INFO_SET(hi_prod_idx, HI_PI) | + SQ_DB_INFO_SET(SQ_DB, TYPE) | + SQ_DB_INFO_SET(CFLAG_DATA_PATH, CFLAG) | + SQ_DB_INFO_SET(cos, COS) | + SQ_DB_INFO_SET(sq->q_id, QID); +} + +static void sq_write_db(struct hinic_sq *sq, u16 prod_idx, int cos) +{ + struct hinic_sq_db sq_db; + + sq_prepare_db(sq, &sq_db, prod_idx, cos); + + /* Data should be written to HW in Big Endian Format */ + sq_db.db_info = cpu_to_be32(sq_db.db_info); + + wmb(); /* Write all before the doorbell */ + + writel(sq_db.db_info, SQ_DB_ADDR(sq, prod_idx)); +} + +void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe, int wqebb_cnt, int cos) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + if (wqebb_cnt != 1) + hinic_write_wqe(sq->wq, wqe, wqebb_cnt); + + sq_write_db(sq, MASKED_SQ_IDX(sq, sq->wq->prod_idx), cos); +} +EXPORT_SYMBOL(hinic_send_sq_wqe); + +void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + sq->wq->cons_idx += wqebb_cnt; + atomic_add(wqebb_cnt, &sq->wq->delta); +} +EXPORT_SYMBOL(hinic_update_sq_local_ci); + +void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + return hinic_get_wqe(rq->wq, 1, pi); +} +EXPORT_SYMBOL(hinic_get_rq_wqe); + +void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + atomic_add(num_wqebbs, &rq->wq->delta); + rq->wq->prod_idx -= num_wqebbs; +} +EXPORT_SYMBOL(hinic_return_rq_wqe); + +void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + nic_io->qps[q_id].rq.wq->delta.counter -= num_wqebbs; +} +EXPORT_SYMBOL(hinic_update_rq_delta); + +void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + *rq->pi_virt_addr = cpu_to_be16(pi & rq->wq->mask); +} +EXPORT_SYMBOL(hinic_update_rq_hw_pi); + +u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return (u16)(nic_io->rq_wq[q_id].cons_idx & nic_io->rq_wq[q_id].mask); +} +EXPORT_SYMBOL(hinic_get_rq_local_ci); + +void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + nic_io->qps[q_id].rq.wq->cons_idx += wqe_cnt; + nic_io->qps[q_id].rq.wq->delta.counter += wqe_cnt; +} +EXPORT_SYMBOL(hinic_update_rq_local_ci); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h new file mode 100644 index 000000000000..338545fffa55 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_NIC_IO_H_ +#define HINIC_HW_NIC_IO_H_ + +#include "hinic_hw_mgmt.h" +#include "hinic_qe_def.h" + +#define HINIC_RX_BUF_SHIFT 11 +#define HINIC_RX_BUF_LEN 2048 /* buffer len must be 2^n */ + +#define SQ_CTRL_SET(val, member) ((u32)val << SQ_CTRL_##member##_SHIFT) + +int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len); +void hinic_free_nic_hwdev(void *hwdev); + +/* alloc qps resource */ +int hinic_create_qps(void *hwdev, u16 qp_num, u16 sq_depth, u16 rq_depth, + struct irq_info *rq_msix_arry, int max_sq_sge); +void hinic_free_qps(void *hwdev); + +/* init qps ctxt and set sq ci attr and arm all sq */ +int hinic_init_qp_ctxts(void *hwdev); +void hinic_free_qp_ctxts(void *hwdev); + +/* function table and root context set */ +int hinic_set_parameters(void *hwdev, u8 *mac, u16 rx_buf_size, u32 mtu); +void hinic_clear_parameters(void *hwdev); + +/* The function is internally invoked. set_arm_bit function */ +int hinic_enable_tx_irq(void *hwdev, u16 q_id); + +int hinic_rx_tx_flush(void *hwdev); + +/* Obtain sq/rq number of idle wqebb */ +int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id); +int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id); + +u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id); +u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id); + +void *hinic_get_sq_wqe(void *hwdev, u16 q_id, + int wqebb_cnt, u16 *pi, u8 *owner); + +void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner); + +void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs, + u16 *pi, u8 *owner); + +/* including cross-page process and press the doorbell */ +void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe, + int wqebb_cnt, int cos); + +void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt); + +/* Refreshes the rq buff */ +void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi); +/* gupdate rq pi, is the latest pi, function does not need to calculate */ +void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs); + +void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs); + +void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi); + +u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id); + +/* Clear rx done is not performed */ +void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt); + +struct hinic_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +void hinic_cpu_to_be32(void *data, int len); + +void hinic_be32_to_cpu(void *data, int len); + +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len); + +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge); + +void hinic_rq_cqe_addr_set(void *hwdev, u16 qid, dma_addr_t cqe_dma_ddr); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c new file mode 100644 index 000000000000..4306239f7b96 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c @@ -0,0 +1,2447 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_lld.h" +#include "hinic_nic_dev.h" +#include "hinic_dbg.h" +#include "hinic_nictool.h" +#include "hinic_qp.h" +#include "hinic_dcb.h" +#include "hinic_dbgtool_knl.h" + +#define HIADM_DEV_PATH "/dev/nictool_dev" +#define HIADM_DEV_CLASS "nictool_class" +#define HIADM_DEV_NAME "nictool_dev" + +#define MAJOR_DEV_NUM 921 +#define HINIC_CMDQ_BUF_MAX_SIZE 2048U +#define MSG_MAX_IN_SIZE (2048 * 1024) +#define MSG_MAX_OUT_SIZE (2048 * 1024) + +static dev_t g_dev_id = {0}; +static struct class *g_nictool_class; +static struct cdev g_nictool_cdev; + +static int g_nictool_init_flag; +static int g_nictool_ref_cnt; + +typedef int (*nic_driv_module)(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct nic_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + nic_driv_module driv_func; +}; + +typedef int (*hw_driv_module)(void *hwdev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct hw_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + hw_driv_module driv_func; +}; + +static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in) +{ + if (!buf_in) + return; + + if (nt_msg->module == SEND_TO_UCODE) + hinic_free_cmd_buf(hwdev, buf_in); + else + kfree(buf_in); +} + +static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, + u32 in_size, void **buf_in) +{ + void *msg_buf; + + if (!in_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE) { + struct hinic_cmd_buf *cmd_buf; + + if (in_size > HINIC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq in size(%u) more than 2KB\n", in_size); + return -ENOMEM; + } + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + pr_err("Alloc cmdq cmd buffer failed in %s\n", + __func__); + return -ENOMEM; + } + msg_buf = cmd_buf->buf; + *buf_in = (void *)cmd_buf; + cmd_buf->size = (u16)in_size; + } else { + if (in_size > MSG_MAX_IN_SIZE) { + pr_err("In size(%u) more than 2M\n", in_size); + return -ENOMEM; + } + msg_buf = kzalloc(in_size, GFP_KERNEL); + *buf_in = msg_buf; + } + if (!(*buf_in)) { + pr_err("Alloc buffer in failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) { + pr_err("%s:%d: Copy from user failed\n", + __func__, __LINE__); + free_buff_in(hwdev, nt_msg, *buf_in); + return -EFAULT; + } + + return 0; +} + +static void free_buff_out(void *hwdev, struct msg_module *nt_msg, + void *buf_out) +{ + if (!buf_out) + return; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + hinic_free_cmd_buf(hwdev, buf_out); + else + kfree(buf_out); +} + +static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, + u32 out_size, void **buf_out) +{ + if (!out_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) { + struct hinic_cmd_buf *cmd_buf; + + if (out_size > HINIC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq out size(%u) more than 2KB\n", out_size); + return -ENOMEM; + } + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + *buf_out = (void *)cmd_buf; + } else { + if (out_size > MSG_MAX_OUT_SIZE) { + pr_err("out size(%u) more than 2M\n", out_size); + return -ENOMEM; + } + *buf_out = kzalloc(out_size, GFP_KERNEL); + } + if (!(*buf_out)) { + pr_err("Alloc buffer out failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int copy_buf_out_to_user(struct msg_module *nt_msg, + u32 out_size, void *buf_out) +{ + int ret = 0; + void *msg_out; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + msg_out = ((struct hinic_cmd_buf *)buf_out)->buf; + else + msg_out = buf_out; + + if (copy_to_user(nt_msg->out_buf, msg_out, out_size)) + ret = -EFAULT; + + return ret; +} + +static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_sq_info *sq_info, + u32 *msg_size); +static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_rq_info *rq_info, + u32 *msg_size); + +static int get_tx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 q_id; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out || in_size != sizeof(int)) + return -EINVAL; + + q_id = *((u16 *)buf_in); + + err = hinic_dbg_get_sq_info(nic_dev, q_id, buf_out, out_size); + + return err; +} + +static int get_q_num(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 num_qp; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get queue number\n"); + return -EFAULT; + } + + if (!buf_out) + return -EFAULT; + + num_qp = hinic_dbg_get_qp_num(nic_dev->hwdev); + if (!num_qp) + return -EFAULT; + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *((u16 *)buf_out) = num_qp; + + return 0; +} + +static int get_tx_wqe_info(struct hinic_nic_dev *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic_wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0, wqebb_cnt = 1; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx wqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out || in_size != sizeof(*info)) + return -EFAULT; + + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + err = hinic_dbg_get_sq_wqe_info(nic_dev->hwdev, q_id, + idx, wqebb_cnt, + buf_out, (u16 *)out_size); + + return err; +} + +static int get_rx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 q_id; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out || in_size != sizeof(int)) + return -EINVAL; + + q_id = *((u16 *)buf_in); + + err = hinic_dbg_get_rq_info(nic_dev, q_id, buf_out, out_size); + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + nicif_info(nic_dev, drv, nic_dev->netdev, + "qid: %u, coalesc_timer:0x%x, pending_limit: 0x%x\n", + q_id, nic_dev->rxqs[q_id].last_coalesc_timer_cfg, + nic_dev->rxqs[q_id].last_pending_limt); + } + + return err; +} + +static int get_rx_wqe_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0, wqebb_cnt = 1; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx wqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out || in_size != sizeof(*info)) + return -EFAULT; + + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + err = hinic_dbg_get_rq_wqe_info(nic_dev->hwdev, q_id, + idx, wqebb_cnt, + buf_out, (u16 *)out_size); + + return err; +} + +static int get_inter_num(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u16 intr_num; + + intr_num = hinic_intr_num(nic_dev->hwdev); + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = intr_num; + + *out_size = sizeof(u16); + + return 0; +} + +static void clean_nicdev_stats(struct hinic_nic_dev *nic_dev) +{ + u64_stats_update_begin(&nic_dev->stats.syncp); + nic_dev->stats.netdev_tx_timeout = 0; + nic_dev->stats.tx_carrier_off_drop = 0; + nic_dev->stats.tx_invalid_qid = 0; + u64_stats_update_end(&nic_dev->stats.syncp); +} + +static int clear_func_static(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int i; + + *out_size = 0; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + memset(&nic_dev->net_stats, 0, sizeof(nic_dev->net_stats)); +#endif + clean_nicdev_stats(nic_dev); + for (i = 0; i < nic_dev->max_qps; i++) { + hinic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats); + hinic_txq_clean_stats(&nic_dev->txqs[i].txq_stats); + } + + return 0; +} + +static int get_num_cos(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 *num_cos = buf_out; + + if (!buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*num_cos)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*num_cos)); + return -EFAULT; + } + + return hinic_get_num_cos(nic_dev, num_cos); +} + +static int get_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_cos_up_map *map = buf_out; + + if (!buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*map)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*map)); + return -EFAULT; + } + + return hinic_get_cos_up_map(nic_dev, &map->num_cos, map->cos_up); +} + +static int set_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_cos_up_map *map = buf_in; + + if (!buf_in || !out_size || in_size != sizeof(*map)) + return -EINVAL; + + *out_size = sizeof(*map); + + return hinic_set_cos_up_map(nic_dev, map->cos_up); +} + +static int get_rx_cqe_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx cqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out || in_size != sizeof(*info)) + return -EFAULT; + + if (*out_size != sizeof(struct hinic_rq_cqe)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(struct hinic_rq_cqe)); + return -EFAULT; + } + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + if (q_id >= nic_dev->num_qps || idx >= nic_dev->rxqs[q_id].q_depth) + return -EFAULT; + + memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe, + sizeof(struct hinic_rq_cqe)); + + return 0; +} + +static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_sq_info *sq_info, + u32 *msg_size) +{ + int err; + + if (!nic_dev) + return -EINVAL; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get sq info\n"); + return -EFAULT; + } + + if (q_id >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Input queue id is larger than the actual queue number\n"); + return -EINVAL; + } + + if (*msg_size != sizeof(*sq_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *msg_size, sizeof(*sq_info)); + return -EFAULT; + } + sq_info->q_id = q_id; + sq_info->pi = hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id); + sq_info->ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id); + sq_info->fi = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id); + + sq_info->q_depth = nic_dev->txqs[q_id].q_depth; + /* pi_reverse */ + + sq_info->weqbb_size = HINIC_SQ_WQEBB_SIZE; + /* priority */ + + sq_info->ci_addr = hinic_dbg_get_sq_ci_addr(nic_dev->hwdev, q_id); + + sq_info->cla_addr = hinic_dbg_get_sq_cla_addr(nic_dev->hwdev, q_id); + sq_info->slq_handle = hinic_dbg_get_sq_wq_handle(nic_dev->hwdev, q_id); + + /* direct wqe */ + + err = hinic_dbg_get_sq_db_addr(nic_dev->hwdev, + q_id, &sq_info->db_addr.map_addr, + &sq_info->db_addr.phy_addr, + &sq_info->pg_idx); + + sq_info->glb_sq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id; + + return err; +} + +static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_rq_info *rq_info, + u32 *msg_size) +{ + if (!nic_dev) + return -EINVAL; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rq info\n"); + return -EFAULT; + } + + if (q_id >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Input queue id is larger than the actual queue number\n"); + return -EINVAL; + } + if (*msg_size != sizeof(*rq_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *msg_size, sizeof(*rq_info)); + return -EFAULT; + } + + rq_info->q_id = q_id; + rq_info->glb_rq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id; + + rq_info->hw_pi = hinic_dbg_get_rq_hw_pi(nic_dev->hwdev, q_id); + rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx & + nic_dev->rxqs[q_id].q_mask; + + rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update; + + rq_info->wqebb_size = HINIC_RQ_WQE_SIZE; + rq_info->q_depth = nic_dev->rxqs[q_id].q_depth; + + rq_info->buf_len = nic_dev->rxqs[q_id].buf_len; + + rq_info->slq_handle = hinic_dbg_get_rq_wq_handle(nic_dev->hwdev, q_id); + if (!rq_info->slq_handle) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get rq slq handle null\n"); + return -EFAULT; + } + rq_info->ci_wqe_page_addr = + hinic_slq_get_first_pageaddr(rq_info->slq_handle); + rq_info->ci_cla_tbl_addr = + hinic_dbg_get_rq_cla_addr(nic_dev->hwdev, q_id); + + rq_info->msix_idx = nic_dev->rxqs[q_id].msix_entry_idx; + rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id; + + return 0; +} + +static int get_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_loop_mode *mode = buf_out; + int err; + + if (!out_size || !mode) + return -EFAULT; + + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EFAULT; + } + err = hinic_get_loopback_mode_ex(nic_dev->hwdev, &mode->loop_mode, + &mode->loop_ctrl); + return err; +} + +static int set_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_loop_mode *mode = buf_in; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set loopback mode\n"); + return -EFAULT; + } + + if (!mode || !out_size || in_size != sizeof(*mode)) + return -EFAULT; + + err = hinic_set_loopback_mode_ex(nic_dev->hwdev, mode->loop_mode, + mode->loop_ctrl); + if (err) + return err; + + *out_size = sizeof(*mode); + return 0; +} + +static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + enum hinic_nic_link_mode *link = buf_in; + u8 link_status; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set link mode\n"); + return -EFAULT; + } + + if (!link || !out_size || in_size != sizeof(*link)) + return -EFAULT; + + switch (*link) { + case HINIC_LINK_MODE_AUTO: + if (hinic_get_link_state(nic_dev->hwdev, &link_status)) + link_status = false; + hinic_link_status_change(nic_dev, (bool)link_status); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: auto succeed, now is link %s\n", + (link_status ? "up" : "down")); + break; + case HINIC_LINK_MODE_UP: + hinic_link_status_change(nic_dev, true); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: up succeed\n"); + break; + case HINIC_LINK_MODE_DOWN: + hinic_link_status_change(nic_dev, false); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: down succeed\n"); + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid link mode %d to set\n", *link); + return -EINVAL; + } + + *out_size = sizeof(*link); + return 0; +} + +static int set_dcb_cfg(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + union _dcb_ctl dcb_ctl = {.data = 0}; + int err; + + if (!buf_in || !buf_out || *out_size != sizeof(u32) || + in_size != sizeof(u32)) + return -EINVAL; + + dcb_ctl.data = *((u32 *)buf_in); + + err = hinic_setup_dcb_tool(nic_dev->netdev, + &dcb_ctl.dcb_data.dcb_en, + !!dcb_ctl.dcb_data.wr_flag); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup dcb state to %d\n", + !!dcb_ctl.dcb_data.dcb_en); + err = EINVAL; + } + dcb_ctl.dcb_data.err = (u8)err; + *((u32 *)buf_out) = (u32)dcb_ctl.data; + *out_size = sizeof(u32); + + return 0; +} + +int get_pfc_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + union _pfc pfc = {.data = 0}; + + if (!buf_in || !buf_out || *out_size != sizeof(u32) || + in_size != sizeof(u32)) + return -EINVAL; + + pfc.data = *((u32 *)buf_in); + + hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev, + &pfc.pfc_data.pfc_en, false); + hinic_dcbnl_get_pfc_cfg_tool(nic_dev->netdev, + &pfc.pfc_data.pfc_priority); + hinic_dcbnl_get_tc_num_tool(nic_dev->netdev, + &pfc.pfc_data.num_of_tc); + *((u32 *)buf_out) = (u32)pfc.data; + *out_size = sizeof(u32); + + return 0; +} + +int set_pfc_control(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 pfc_en = 0; + u8 err = 0; + + if (!buf_in || !buf_out || *out_size != sizeof(u8) || + in_size != sizeof(u8)) + return -EINVAL; + + pfc_en = *((u8 *)buf_in); + if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable dcb first.\n"); + err = 0xff; + goto exit; + } + + hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev, &pfc_en, true); + err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set pfc to %s\n", + pfc_en ? "enable" : "disable"); + } + +exit: + *((u8 *)buf_out) = (u8)err; + *out_size = sizeof(u8); + return 0; +} + +int set_ets(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct _ets ets = {0}; + u8 err = 0; + u8 i; + u8 support_tc = nic_dev->max_cos; + + if (!buf_in || !buf_out || *out_size != sizeof(u8) || + in_size != sizeof(struct _ets)) + return -EINVAL; + + memcpy(&ets, buf_in, sizeof(struct _ets)); + + if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable dcb first.\n"); + err = 0xff; + goto exit; + } + if (ets.flag_com.ets_flag.flag_ets_enable) { + hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets.ets_en, true); + + if (!ets.ets_en) + goto exit; + } + + if (!(test_bit(HINIC_ETS_ENABLE, &nic_dev->flags))) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable ets first.\n"); + err = 0xff; + goto exit; + } + if (ets.flag_com.ets_flag.flag_ets_cos) + hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets.tc, true); + + if (ets.flag_com.ets_flag.flag_ets_percent) { + for (i = support_tc; i < HINIC_DCB_TC_MAX; i++) { + if (ets.ets_percent[i]) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "ETS setting out of range\n"); + break; + } + } + + hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev, + ets.ets_percent, true); + } + + if (ets.flag_com.ets_flag.flag_ets_strict) + hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev, + &ets.strict, true); + + err = hinic_dcbnl_set_ets_tool(nic_dev->netdev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set ets [%d].\n", err); + } +exit: + *((u8 *)buf_out) = err; + *out_size = sizeof(err); + return 0; +} + +int get_support_up(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 *up_num = buf_out; + u8 support_up = 0; + u8 i; + u8 up_valid_bitmap = nic_dev->up_valid_bitmap; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*up_num)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*up_num)); + return -EFAULT; + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (up_valid_bitmap & BIT(i)) + support_up++; + } + + *up_num = support_up; + + return 0; +} + +int get_support_tc(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 *tc_num = buf_out; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*tc_num)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*tc_num)); + return -EFAULT; + } + + hinic_dcbnl_get_tc_num_tool(nic_dev->netdev, tc_num); + + return 0; +} + +int get_ets_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct _ets *ets = buf_out; + + if (!buf_in || !buf_out || *out_size != sizeof(*ets)) + return -EINVAL; + + hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev, + ets->ets_percent, false); + hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets->tc, false); + hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets->ets_en, false); + hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev, &ets->strict, false); + ets->err = 0; + + *out_size = sizeof(*ets); + return 0; +} + +int set_pfc_priority(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 pfc_prority = 0; + u8 err = 0; + + if (!buf_in || !buf_out || *out_size != sizeof(u8) || + in_size != sizeof(u8)) + return -EINVAL; + + pfc_prority = *((u8 *)buf_in); + if (!((test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) && + nic_dev->tmp_dcb_cfg.pfc_state)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable pfc first.\n"); + err = 0xff; + goto exit; + } + + hinic_dcbnl_set_pfc_cfg_tool(nic_dev->netdev, pfc_prority); + + err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set pfc to %x priority\n", + pfc_prority); + } +exit: + *((u8 *)buf_out) = (u8)err; + *out_size = sizeof(u8); + + return 0; +} + +static int set_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 pf_bw_limit = 0; + int err; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "To set VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!buf_in || !buf_out || in_size != sizeof(u32) || + *out_size != sizeof(u8)) + return -EINVAL; + + pf_bw_limit = *((u32 *)buf_in); + + err = hinic_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set pf bandwidth limit to %d%%\n", + pf_bw_limit); + if (err < 0) + return err; + } + + *((u8 *)buf_out) = (u8)err; + *out_size = sizeof(u8); + + return 0; +} + +static int get_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 pf_bw_limit = 0; + int err; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "To get VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!buf_out || *out_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + err = hinic_dbg_get_pf_bw_limit(nic_dev->hwdev, &pf_bw_limit); + if (err) + return err; + + *((u32 *)buf_out) = pf_bw_limit; + + return 0; +} + +static int get_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_poll_weight *weight_info = buf_out; + + if (!buf_out || *out_size != sizeof(*weight_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*weight_info)); + return -EFAULT; + } + weight_info->poll_weight = nic_dev->poll_weight; + return 0; +} + +static int set_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_poll_weight *weight_info = buf_in; + + if (!buf_in || in_size != sizeof(*weight_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect in buf size from user :%u, expect: %lu\n", + *out_size, sizeof(*weight_info)); + return -EFAULT; + } + + nic_dev->poll_weight = weight_info->poll_weight; + *out_size = sizeof(u32); + return 0; +} + +static int get_homologue(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_homologues *homo = buf_out; + + if (!buf_out || *out_size != sizeof(*homo)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*homo)); + return -EFAULT; + } + + if (test_bit(HINIC_SAME_RXTX, &nic_dev->flags)) + homo->homo_state = HINIC_HOMOLOGUES_ON; + else + homo->homo_state = HINIC_HOMOLOGUES_OFF; + + *out_size = sizeof(*homo); + + return 0; +} + +static int set_homologue(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_homologues *homo = buf_in; + + if (!buf_in || in_size != sizeof(*homo)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect in buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*homo)); + return -EFAULT; + } + + if (homo->homo_state == HINIC_HOMOLOGUES_ON) { + set_bit(HINIC_SAME_RXTX, &nic_dev->flags); + } else if (homo->homo_state == HINIC_HOMOLOGUES_OFF) { + clear_bit(HINIC_SAME_RXTX, &nic_dev->flags); + } else { + pr_err("Invalid parameters.\n"); + return -EFAULT; + } + + *out_size = sizeof(*homo); + + return 0; +} + +static int get_sset_count(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 count; + + if (!buf_in || !buf_out || in_size != sizeof(u32) || + *out_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid parameters.\n"); + return -EINVAL; + } + + switch (*((u32 *)buf_in)) { + case HINIC_SHOW_SSET_IO_STATS: + count = hinic_get_io_stats_size(nic_dev); + break; + + default: + count = 0; + break; + } + + *((u32 *)buf_out) = count; + + return 0; +} + +static int get_sset_stats(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_show_item *items = buf_out; + u32 sset, count, size; + int err; + + if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out) + return -EINVAL; + + size = sizeof(u32); + err = get_sset_count(nic_dev, buf_in, in_size, &count, &size); + if (err) + return -EINVAL; + + if (count * sizeof(*items) != *out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, count * sizeof(*items)); + return -EINVAL; + } + + sset = *((u32 *)buf_in); + + switch (sset) { + case HINIC_SHOW_SSET_IO_STATS: + hinic_get_io_stats(nic_dev, items); + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %d to get stats\n", + sset); + err = -EINVAL; + break; + } + + return err; +} + +static int get_func_type(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_typ; + + func_typ = hinic_func_type(hwdev); + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = func_typ; + return 0; +} + +static int get_func_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_id; + + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_id = hinic_global_func_id_hw(hwdev); + *(u16 *)buf_out = func_id; + *out_size = sizeof(u16); + return 0; +} + +static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int offset = 0; + struct chip_fault_stats *fault_info; + + if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) || + in_size != sizeof(*fault_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*fault_info)); + return -EFAULT; + } + fault_info = (struct chip_fault_stats *)buf_in; + offset = fault_info->offset; + fault_info = (struct chip_fault_stats *)buf_out; + hinic_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset); + + return 0; +} + +static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return hinic_dbg_get_hw_stats(hwdev, buf_out, (u16 *)out_size); +} + +static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + *out_size = hinic_dbg_clear_hw_stats(hwdev); + return 0; +} + +static int get_drv_version(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct drv_version_info *ver_info; + char ver_str[MAX_VER_INFO_LEN] = {0}; + int err; + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EFAULT; + } + err = snprintf(ver_str, sizeof(ver_str), + "%s [compiled with the kernel]", HINIC_DRV_VERSION); + if (err <= 0 || err >= MAX_VER_INFO_LEN) { + pr_err("Failed snprintf driver version, function return(%d) and dest_len(%d)\n", + err, MAX_VER_INFO_LEN); + return -EFAULT; + } + ver_info = (struct drv_version_info *)buf_out; + memcpy(ver_info->ver, ver_str, sizeof(ver_str)); + + return 0; +} + +static int get_self_test(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!buf_in || !buf_out || in_size != sizeof(struct card_info) || + *out_size != sizeof(struct card_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(struct card_info)); + return -EFAULT; + } + + hinic_get_card_info(hwdev, buf_out); + *out_size = in_size; + + return 0; +} + +static int get_device_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 dev_id; + int err; + + if (!buf_out || !buf_in || *out_size != sizeof(u16) || + in_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + err = hinic_get_device_id(hwdev, &dev_id); + if (err) + return err; + + *((u32 *)buf_out) = dev_id; + *out_size = in_size; + + return 0; +} + +static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + bool in_host; + + if (!buf_out || (*out_size != sizeof(u8))) + return -EINVAL; + + in_host = hinic_is_in_host(); + if (in_host) + *((u8 *)buf_out) = 0; + else + *((u8 *)buf_out) = 1; + + return 0; +} + +static int get_pf_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic_pf_info *pf_info; + u32 port_id = 0; + int err; + + if (!buf_out || (*out_size != sizeof(*pf_info)) || + !buf_in || in_size != sizeof(u32)) + return -EINVAL; + + port_id = *((u32 *)buf_in); + pf_info = (struct hinic_pf_info *)buf_out; + err = hinic_get_pf_id(hwdev, port_id, &pf_info->pf_id, + &pf_info->isvalid); + if (err) + return err; + + *out_size = sizeof(*pf_info); + + return 0; +} + +static int __get_card_usr_api_chain_mem(int card_idx) +{ + unsigned char *tmp; + int i; + + mutex_lock(&g_addr_lock); + card_id = card_idx; + if (!g_card_vir_addr[card_idx]) { + g_card_vir_addr[card_idx] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_card_vir_addr[card_idx]) { + pr_err("Alloc api chain memory fail for card %d.\n", + card_idx); + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + memset(g_card_vir_addr[card_idx], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_card_phy_addr[card_idx] = + virt_to_phys(g_card_vir_addr[card_idx]); + if (!g_card_phy_addr[card_idx]) { + pr_err("phy addr for card %d is 0.\n", card_idx); + free_pages((unsigned long)g_card_vir_addr[card_idx], + DBGTOOL_PAGE_ORDER); + g_card_vir_addr[card_idx] = NULL; + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + tmp = g_card_vir_addr[card_idx]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_addr_lock); + + return 0; +} + +static int get_pf_dev_info(char *dev_name, struct msg_module *nt_msg) +{ + struct pf_dev_info dev_info[16] = { {0} }; + struct card_node *card_info = NULL; + int i; + int err; + + if (nt_msg->lenInfo.outBuffLen != (sizeof(dev_info) * 16) || + nt_msg->lenInfo.inBuffLen != (sizeof(dev_info) * 16)) { + pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n", + nt_msg->lenInfo.outBuffLen, nt_msg->lenInfo.inBuffLen, + (sizeof(dev_info) * 16)); + return -EINVAL; + } + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Can't find this card %s\n", dev_name); + return -EFAULT; + } + + err = __get_card_usr_api_chain_mem(i); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + dev_name); + return -EFAULT; + } + + chipif_get_all_pf_dev_info(dev_info, i, + card_info->func_handle_array); + + /* Copy the dev_info to user mode */ + if (copy_to_user(nt_msg->out_buf, dev_info, sizeof(dev_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +static int knl_free_mem(char *dev_name, struct msg_module *nt_msg) +{ + struct card_node *card_info = NULL; + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Can't find this card %s\n", dev_name); + return -EFAULT; + } + + dbgtool_knl_free_mem(i); + + return 0; +} + +extern void hinic_get_card_func_info_by_card_name( + const char *chip_name, struct hinic_card_func_info *card_func); + +static int get_card_func_info(char *dev_name, struct msg_module *nt_msg) +{ + struct hinic_card_func_info card_func_info = {0}; + int id, err; + + if (nt_msg->lenInfo.outBuffLen != sizeof(card_func_info) || + nt_msg->lenInfo.inBuffLen != sizeof(card_func_info)) { + pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n", + nt_msg->lenInfo.outBuffLen, nt_msg->lenInfo.inBuffLen, + sizeof(card_func_info)); + return -EINVAL; + } + + err = memcmp(dev_name, HINIC_CHIP_NAME, strlen(HINIC_CHIP_NAME)); + if (err) { + pr_err("Invalid chip name %s\n", dev_name); + return err; + } + + err = sscanf(dev_name, HINIC_CHIP_NAME "%d", &id); + if (err <= 0) { + pr_err("Failed to get hinic id\n"); + return err; + } + + if (id >= MAX_CARD_NUM) { + pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + hinic_get_card_func_info_by_card_name(dev_name, &card_func_info); + + if (!card_func_info.num_pf) { + pr_err("None function found for %s\n", dev_name); + return -EFAULT; + } + + err = __get_card_usr_api_chain_mem(id); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + dev_name); + return -EFAULT; + } + + card_func_info.usr_api_phy_addr = g_card_phy_addr[id]; + + /* Copy the dev_info to user mode */ + if (copy_to_user(nt_msg->out_buf, &card_func_info, + sizeof(card_func_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30 +static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 loop_cnt = 0; + + while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) { + if (!hinic_get_mgmt_channel_status(hwdev)) + return 0; + + msleep(1000); + loop_cnt++; + } + if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + + +struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { + {TX_INFO, get_tx_info}, + {Q_NUM, get_q_num}, + {TX_WQE_INFO, get_tx_wqe_info}, + {RX_INFO, get_rx_info}, + {RX_WQE_INFO, get_rx_wqe_info}, + {RX_CQE_INFO, get_rx_cqe_info}, + {GET_INTER_NUM, get_inter_num}, + {CLEAR_FUNC_STASTIC, clear_func_static}, + {GET_NUM_COS, get_num_cos}, + {GET_COS_UP_MAP, get_dcb_cos_up_map}, + {SET_COS_UP_MAP, set_dcb_cos_up_map}, + {GET_LOOPBACK_MODE, get_loopback_mode}, + {SET_LOOPBACK_MODE, set_loopback_mode}, + {SET_LINK_MODE, set_link_mode}, + {SET_PF_BW_LIMIT, set_pf_bw_limit}, + {GET_PF_BW_LIMIT, get_pf_bw_limit}, + {GET_POLL_WEIGHT, get_poll_weight}, + {SET_POLL_WEIGHT, set_poll_weight}, + {GET_HOMOLOGUE, get_homologue}, + {SET_HOMOLOGUE, set_homologue}, + {GET_SSET_COUNT, get_sset_count}, + {GET_SSET_ITEMS, get_sset_stats}, + {SET_PFC_CONTROL, set_pfc_control}, + {SET_ETS, set_ets}, + {GET_ETS_INFO, get_ets_info}, + {SET_PFC_PRIORITY, set_pfc_priority}, + {SET_DCB_CFG, set_dcb_cfg}, + {GET_PFC_INFO, get_pfc_info}, + {GET_SUPPORT_UP, get_support_up}, + {GET_SUPPORT_TC, get_support_tc}, +}; + +struct hw_drv_module_handle hw_driv_module_cmd_handle[] = { + {FUNC_TYPE, get_func_type}, + {GET_FUNC_IDX, get_func_id}, + {GET_DRV_VERSION, get_drv_version}, + {GET_HW_STATS, get_hw_stats}, + {CLEAR_HW_STATS, clear_hw_stats}, + {GET_SELF_TEST_RES, get_self_test}, + {GET_CHIP_FAULT_STATS, get_chip_faults_stats}, + {GET_CHIP_ID, get_chip_id_test}, + {GET_SINGLE_CARD_INFO, get_single_card_info}, + {GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status}, + {GET_DEVICE_ID, get_device_id}, + {IS_DRV_IN_VM, is_driver_in_vm}, + {GET_PF_ID, get_pf_id}, +}; + +static int send_to_nic_driver(struct hinic_nic_dev *nic_dev, + u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int index, num_cmds = sizeof(nic_driv_module_cmd_handle) / + sizeof(nic_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd; + int err = 0; + + mutex_lock(&nic_dev->nic_mutex); + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + nic_driv_module_cmd_handle[index].driv_cmd_name) { + err = nic_driv_module_cmd_handle[index].driv_func + (nic_dev, buf_in, + in_size, buf_out, out_size); + break; + } + } + mutex_unlock(&nic_dev->nic_mutex); + + if (index == num_cmds) + return -EINVAL; + return err; +} + +static int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int index, num_cmds = sizeof(hw_driv_module_cmd_handle) / + sizeof(hw_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type = + (enum driver_cmd_type)(nt_msg->msg_formate); + int err = 0; + + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + hw_driv_module_cmd_handle[index].driv_cmd_name) { + err = hw_driv_module_cmd_handle[index].driv_func + (hwdev, buf_in, + in_size, buf_out, out_size); + break; + } + } + + if (index == num_cmds) + return -EINVAL; + + return err; +} + +static int send_to_ucode(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int ret = 0; + + if (nt_msg->ucode_cmd.ucode_db.ucode_imm) { + ret = hinic_cmdq_direct_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send direct cmdq err: %d!\n", ret); + } else { + ret = hinic_cmdq_detail_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send detail cmdq err: %d!\n", ret); + } + + return ret; +} + +static int api_csr_read(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + + if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) || + *out_size != up_log_msg->rd_len) + return -EINVAL; + + rd_len = up_log_msg->rd_len; + rd_addr = up_log_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + rd_cnt = rd_len / 4; + + if (rd_len % 4) + rd_cnt++; + + for (i = 0; i < rd_cnt; i++) { + ret = hinic_api_csr_rd32(hwdev, node_id, + rd_addr + offset, + (u32 *)(((u8 *)buf_out) + offset)); + if (ret) { + pr_err("Csr rd fail, err: %d, node_id: %d, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += 4; + } + *out_size = rd_len; + + return ret; +} + +static int api_csr_write(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + u8 *data; + + if (!buf_in || in_size != sizeof(*csr_write_msg)) + return -EINVAL; + + rd_len = csr_write_msg->rd_len; + rd_addr = csr_write_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + if (rd_len % 4) { + pr_err("Csr length must be a multiple of 4\n"); + return -EFAULT; + } + + rd_cnt = rd_len / 4; + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) { + pr_err("No more memory\n"); + return -EFAULT; + } + if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) { + pr_err("Copy information from user failed\n"); + kfree(data); + return -EFAULT; + } + + for (i = 0; i < rd_cnt; i++) { + ret = hinic_api_csr_wr32(hwdev, node_id, + rd_addr + offset, + *((u32 *)(data + offset))); + if (ret) { + pr_err("Csr wr fail, ret: %d, node_id: %d, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += 4; + } + + *out_size = 0; + kfree(data); + return ret; +} + +static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd) +{ + if (mod == HINIC_MOD_L2NIC && cmd == NIC_UP_CMD_UPDATE_FW) + return UP_UPDATEFW_TIME_OUT_VAL; + else + return UP_COMP_TIME_OUT_VAL; +} + +static int check_useparam_valid(struct msg_module *nt_msg, void *buf_in) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + u32 rd_len = csr_write_msg->rd_len; + + if (rd_len > TOOL_COUNTER_MAX_LEN) { + pr_err("Csr read or write len is invalid!\n"); + return -EINVAL; + } + + return 0; +} + +static int send_to_up(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + if ((nt_msg->up_cmd.up_db.up_api_type == API_CMD) || + (nt_msg->up_cmd.up_db.up_api_type == API_CLP)) { + enum hinic_mod_type mod; + u8 cmd; + u32 timeout; + + mod = (enum hinic_mod_type)nt_msg->up_cmd.up_db.comm_mod_type; + cmd = nt_msg->up_cmd.up_db.chipif_cmd; + + timeout = get_up_timeout_val(mod, cmd); + + if (nt_msg->up_cmd.up_db.up_api_type == API_CMD) + ret = hinic_msg_to_mgmt_sync(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size, + timeout); + else + ret = hinic_clp_to_mgmt(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + if (ret) { + pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n", + mod, cmd); + return ret; + } + + } else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) { + if (check_useparam_valid(nt_msg, buf_in)) + return -EINVAL; + + if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) { + ret = api_csr_write(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + return ret; + } + + ret = api_csr_read(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + } + + return ret; +} + +static int sm_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hinic_sm_ctr_rd32(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (32 bits)failed!\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_pair(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0, val2 = 0; + int ret; + + ret = hinic_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2); + if (ret) { + pr_err("Get sm ctr information (64 bits pair)failed!\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hinic_sm_ctr_rd64(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (64 bits)failed!\n"); + val1 = 0xffffffff; + } + buf_out->val1 = val1; + + return ret; +} + +typedef int (*sm_module)(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out); + +struct sm_module_handle { + enum sm_cmd_type smCmdName; + sm_module smFunc; +}; + +struct sm_module_handle sm_module_cmd_handle[] = { + {SM_CTR_RD32, sm_rd32}, + {SM_CTR_RD64_PAIR, sm_rd64_pair}, + {SM_CTR_RD64, sm_rd64} +}; + +static int send_to_sm(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct sm_in_st *sm_in = buf_in; + struct sm_out_st *sm_out = buf_out; + u32 msg_formate = nt_msg->msg_formate; + int index, num_cmds = sizeof(sm_module_cmd_handle) / + sizeof(sm_module_cmd_handle[0]); + int ret = 0; + + if (!buf_in || !buf_out || in_size != sizeof(*sm_in) || + *out_size != sizeof(*sm_out)) + return -EINVAL; + + for (index = 0; index < num_cmds; index++) { + if (msg_formate == sm_module_cmd_handle[index].smCmdName) + ret = sm_module_cmd_handle[index].smFunc(hwdev, + (u32)sm_in->id, + (u8)sm_in->instance, + (u8)sm_in->node, sm_out); + } + + if (ret) + pr_err("Get sm information fail!\n"); + + *out_size = sizeof(struct sm_out_st); + + return ret; +} + +static bool is_hwdev_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + void *hwdev; + + hwdev = hinic_get_hwdev_by_ifname(ifname); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", ifname); + return false; + } + + switch (mod) { + case SEND_TO_UP: + case SEND_TO_SM: + if (FUNC_SUPPORT_MGMT(hwdev)) { + if (up_api_type == API_CLP) { + if (!hinic_is_hwdev_mod_inited + (hwdev, HINIC_HWDEV_CLP_INITED)) { + pr_err("CLP have not initialized\n"); + return false; + } + } else if (!hinic_is_hwdev_mod_inited + (hwdev, HINIC_HWDEV_MGMT_INITED)) { + pr_err("MGMT have not initialized\n"); + return false; + } + } else if (!hinic_is_hwdev_mod_inited + (hwdev, HINIC_HWDEV_MBOX_INITED)) { + pr_err("MBOX have not initialized\n"); + return false; + } + + if (mod == SEND_TO_SM && + ((hinic_func_type(hwdev) == TYPE_VF) || + (!hinic_is_hwdev_mod_inited(hwdev, + HINIC_HWDEV_MGMT_INITED)))) { + pr_err("Current function do not support this cmd\n"); + return false; + } + break; + + case SEND_TO_UCODE: + if (!hinic_is_hwdev_mod_inited(hwdev, + HINIC_HWDEV_CMDQ_INITED)) { + pr_err("CMDQ have not initialized\n"); + return false; + } + break; + + default: + return false; + } + + return true; +} + +static bool nictool_k_is_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + enum hinic_init_state init_state = + hinic_get_init_state_by_ifname(ifname); + bool support = true; + + if (init_state == HINIC_INIT_STATE_NONE) + return false; + + if (mod == SEND_TO_NIC_DRIVER) { + if (init_state < HINIC_INIT_STATE_NIC_INITED) { + pr_err("NIC driver have not initialized\n"); + return false; + } + } else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) { + return is_hwdev_cmd_support(mod, ifname, up_api_type); + } else if ((mod >= HINICADM_OVS_DRIVER && + mod <= HINICADM_FCOE_DRIVER) || + mod == SEND_TO_HW_DRIVER) { + if (init_state < HINIC_INIT_STATE_HWDEV_INITED) { + pr_err("Hwdev have not initialized\n"); + return false; + } + } else { + pr_err("Unsupport mod %d\n", mod); + support = false; + } + + return support; +} + +static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size, + void **buf_in, u32 out_size, void **buf_out) +{ + int ret; + + ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in); + if (ret) { + pr_err("Alloc tool cmd buff in failed\n"); + return ret; + } + + ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out); + if (ret) { + pr_err("Alloc tool cmd buff out failed\n"); + goto out_free_buf_in; + } + + return 0; + +out_free_buf_in: + free_buff_in(hwdev, nt_msg, *buf_in); + + return ret; +} + +static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg, + void *buf_in, void *buf_out) +{ + free_buff_out(hwdev, nt_msg, buf_out); + free_buff_in(hwdev, nt_msg, buf_in); +} + +static int get_self_test_cmd(struct msg_module *nt_msg) +{ + int ret; + u32 res = 0; + + ret = hinic_get_self_test_result(nt_msg->device_name, &res); + if (ret) { + pr_err("Get self test result failed!\n"); + return -EFAULT; + } + + ret = copy_buf_out_to_user(nt_msg, sizeof(res), &res); + if (ret) + pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__); + + return ret; +} + +static int get_all_chip_id_cmd(struct msg_module *nt_msg) +{ + struct nic_card_id card_id; + + memset(&card_id, 0, sizeof(card_id)); + + hinic_get_all_chip_id((void *)&card_id); + + if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) { + pr_err("Copy chip id to user failed\n"); + return -EFAULT; + } + + return 0; +} + +int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + return send_to_nic_driver(uld_dev, cmd, buf_in, + in_size, buf_out, out_size); +} + +static void *__get_dev_support_nic_cmd(struct msg_module *nt_msg, + enum hinic_service_type type) +{ + void *uld_dev = NULL; + + /* set/get qos must use chip_name(hinic0) */ + switch (nt_msg->msg_formate) { + case GET_COS_UP_MAP: + case SET_COS_UP_MAP: + case GET_NUM_COS: + uld_dev = hinic_get_uld_by_chip_name(nt_msg->device_name, type); + if (!uld_dev) + pr_err("Get/set cos_up must use chip_name(hinic0)\n"); + + return uld_dev; + + default: + break; + } + + uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type); + if (!uld_dev) + pr_err("Can not get the uld dev correctly: %s, nic driver may be not register\n", + nt_msg->device_name); + + return uld_dev; +} + +static void *get_support_uld_dev(struct msg_module *nt_msg, + enum hinic_service_type type) +{ + char *service_name[SERVICE_T_MAX] = {"NIC", "OVS", "ROCE", "TOE", + "IWARP", "FC", "FCOE"}; + void *hwdev = NULL; + void *uld_dev = NULL; + + switch (nt_msg->module) { + case SEND_TO_NIC_DRIVER: + hwdev = hinic_get_hwdev_by_ifname(nt_msg->device_name); + if (!hinic_support_nic(hwdev, NULL)) { + pr_err("Current function don't support NIC\n"); + return NULL; + } + return __get_dev_support_nic_cmd(nt_msg, type); + default: + break; + } + + uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type); + if (!uld_dev) + pr_err("Can not get the uld dev correctly: %s, %s driver may be not register\n", + nt_msg->device_name, service_name[type]); + + return uld_dev; +} + +static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + enum hinic_service_type type; + int ret = 0; + + type = nt_msg->module - SEND_TO_SM; + *out_size = sizeof(struct drv_version_info); + + if (!g_uld_info[type].ioctl) + return ret; + + ret = g_uld_info[type].ioctl(NULL, nt_msg->msg_formate, buf_in, in_size, + buf_out, out_size); + if (ret) + return ret; + + if (copy_to_user(nt_msg->out_buf, buf_out, *out_size)) + return -EFAULT; + + return ret; +} + +int send_to_service_driver(struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + enum hinic_service_type type; + void *uld_dev; + int ret = -EINVAL; + + if (nt_msg->module == SEND_TO_NIC_DRIVER) + type = SERVICE_T_NIC; + else + type = nt_msg->module - SEND_TO_SM; + + if (type < SERVICE_T_MAX) { + uld_dev = get_support_uld_dev(nt_msg, type); + if (!uld_dev) + return -EINVAL; + + if (g_uld_info[type].ioctl) + ret = g_uld_info[type].ioctl(uld_dev, + nt_msg->msg_formate, + buf_in, in_size, buf_out, + out_size); + } else { + pr_err("Ioctl input module id: %d is incorrectly\n", + nt_msg->module); + } + + return ret; +} + +static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int ret; + + switch (nt_msg->module) { + case SEND_TO_HW_DRIVER: + ret = send_to_hw_driver(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UP: + ret = send_to_up(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UCODE: + ret = send_to_ucode(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_SM: + ret = send_to_sm(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + default: + ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out, + out_size); + break; + } + + return ret; +} + +static bool hinic_is_special_handling_cmd(struct msg_module *nt_msg, int *ret) +{ + unsigned int cmd_raw = nt_msg->module; + + /* Get self test result directly whatever driver probe success or not */ + if (cmd_raw == SEND_TO_HW_DRIVER && + nt_msg->msg_formate == GET_SELF_TEST_RES) { + *ret = get_self_test_cmd(nt_msg); + return true; + } + + if (cmd_raw == SEND_TO_HW_DRIVER && + nt_msg->msg_formate == GET_CHIP_ID) { + *ret = get_all_chip_id_cmd(nt_msg); + return true; + } + + if (cmd_raw == SEND_TO_HW_DRIVER && + nt_msg->msg_formate == GET_PF_DEV_INFO) { + *ret = get_pf_dev_info(nt_msg->device_name, nt_msg); + return true; + } + + if (cmd_raw == SEND_TO_HW_DRIVER && + nt_msg->msg_formate == CMD_FREE_MEM) { + *ret = knl_free_mem(nt_msg->device_name, nt_msg); + return true; + } + + if (cmd_raw == SEND_TO_HW_DRIVER && + nt_msg->msg_formate == GET_CHIP_INFO) { + *ret = get_card_func_info(nt_msg->device_name, nt_msg); + return true; + } + + return false; +} + +static long nictool_k_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + void *hwdev; + struct msg_module nt_msg; + void *buf_out = NULL; + void *buf_in = NULL; + u32 out_size_expect = 0; + u32 out_size = 0; + u32 in_size = 0; + unsigned int cmd_raw = 0; + int ret = 0; + + memset(&nt_msg, 0, sizeof(nt_msg)); + + if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) { + pr_err("Copy information from user failed\n"); + return -EFAULT; + } + + /* end with '\0' */ + nt_msg.device_name[IFNAMSIZ - 1] = '\0'; + + cmd_raw = nt_msg.module; + + out_size_expect = nt_msg.lenInfo.outBuffLen; + in_size = nt_msg.lenInfo.inBuffLen; + + hinic_tool_cnt_inc(); + + if (hinic_is_special_handling_cmd(&nt_msg, &ret)) + goto out_free_lock; + + if (cmd_raw == HINICADM_FC_DRIVER && + nt_msg.msg_formate == GET_CHIP_ID) + get_fc_devname(nt_msg.device_name); + + if (!nictool_k_is_cmd_support(cmd_raw, nt_msg.device_name, + nt_msg.up_cmd.up_db.up_api_type)) { + ret = -EFAULT; + goto out_free_lock; + } + + /* get the netdevice */ + hwdev = hinic_get_hwdev_by_ifname(nt_msg.device_name); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", + nt_msg.device_name); + ret = -ENODEV; + goto out_free_lock; + } + + ret = alloc_tmp_buf(hwdev, &nt_msg, in_size, + &buf_in, out_size_expect, &buf_out); + if (ret) { + pr_err("Alloc tmp buff failed\n"); + goto out_free_lock; + } + + out_size = out_size_expect; + + if (nt_msg.msg_formate == GET_DRV_VERSION && + (cmd_raw == HINICADM_FC_DRIVER || cmd_raw == HINICADM_TOE_DRIVER)) { + ret = get_service_drv_version(hwdev, &nt_msg, buf_in, + in_size, buf_out, &out_size); + goto out_free_buf; + } + + ret = nictool_exec_cmd(hwdev, &nt_msg, buf_in, + in_size, buf_out, &out_size); + if (ret) + goto out_free_buf; + + ret = copy_buf_out_to_user(&nt_msg, out_size_expect, buf_out); + if (ret) + pr_err("Copy information to user failed\n"); + +out_free_buf: + free_tmp_buf(hwdev, &nt_msg, buf_in, buf_out); + +out_free_lock: + hinic_tool_cnt_dec(); + + return (long)ret; +} + +static int nictool_k_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static const struct file_operations fifo_operations = { + .owner = THIS_MODULE, + .open = nictool_k_open, + .read = nictool_k_read, + .write = nictool_k_write, + .unlocked_ioctl = nictool_k_unlocked_ioctl, + .mmap = hinic_mem_mmap, +}; + +int if_nictool_exist(void) +{ + struct file *fp = NULL; + int exist = 0; + + fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0); + if (IS_ERR(fp)) { + exist = 0; + } else { + (void)filp_close(fp, NULL); + exist = 1; + } + + return exist; +} + +/** + * nictool_k_init - initialize the hw interface + */ +int nictool_k_init(void) +{ + int ret; + struct device *pdevice; + + if (g_nictool_init_flag) { + g_nictool_ref_cnt++; + /* already initialized */ + return 0; + } + + if (if_nictool_exist()) { + pr_err("Nictool device exists\n"); + return 0; + } + + /* Device ID: primary device ID (12bit) | + * secondary device number (20bit) + */ + g_dev_id = MKDEV(MAJOR_DEV_NUM, 0); + + /* Static device registration number */ + ret = register_chrdev_region(g_dev_id, 1, HIADM_DEV_NAME); + if (ret < 0) { + ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME); + if (ret < 0) { + pr_err("Register nictool_dev fail(0x%x)\n", ret); + return ret; + } + } + + /* Create equipment */ + g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS); + if (IS_ERR(g_nictool_class)) { + pr_err("Create nictool_class fail\n"); + ret = -EFAULT; + goto class_create_err; + } + + /* Initializing the character device */ + cdev_init(&g_nictool_cdev, &fifo_operations); + + /* Add devices to the operating system */ + ret = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (ret < 0) { + pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret); + goto cdev_add_err; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(g_nictool_class, NULL, + g_dev_id, NULL, HIADM_DEV_NAME); + if (IS_ERR(pdevice)) { + pr_err("Export nictool device information to user space fail\n"); + ret = -EFAULT; + goto device_create_err; + } + + g_nictool_init_flag = 1; + g_nictool_ref_cnt = 1; + + pr_info("Register nictool_dev to system succeed\n"); + + return 0; + +device_create_err: + cdev_del(&g_nictool_cdev); + +cdev_add_err: + class_destroy(g_nictool_class); + +class_create_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +void nictool_k_uninit(void) +{ + if (g_nictool_init_flag) { + if ((--g_nictool_ref_cnt)) + return; + } + + g_nictool_init_flag = 0; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) + return; + + cdev_del(&g_nictool_cdev); + device_destroy(g_nictool_class, g_dev_id); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + + unregister_chrdev_region(g_dev_id, 1); + + pr_info("Unregister nictool_dev succeed\n"); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h new file mode 100644 index 000000000000..5f383137bcaa --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NICTOOL_H_ +#define HINIC_NICTOOL_H_ + +#include "hinic_dfx_def.h" +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +/* completion timeout interval, unit is jiffies*/ +#define UP_COMP_TIME_OUT_VAL 10000U + +struct sm_in_st { + int node; + int id; + int instance; +}; + +struct sm_out_st { + u64 val1; + u64 val2; +}; + +struct up_log_msg_st { + u32 rd_len; + u32 addr; +}; + +struct csr_write_st { + u32 rd_len; + u32 addr; + u8 *data; +}; + +struct ipsurx_stats_info { + u32 addr; + u32 rd_cnt; +}; + +struct ucode_cmd_st { + union { + struct { + u32 comm_mod_type:8; + u32 ucode_cmd_type:4; + u32 cmdq_ack_type:3; + u32 ucode_imm:1; + u32 len:16; + } ucode_db; + u32 value; + }; +}; + +struct up_cmd_st { + union { + struct { + u32 comm_mod_type:8; + u32 chipif_cmd:8; + u32 up_api_type:16; + } up_db; + u32 value; + }; +}; + +struct _dcb_data { + u8 wr_flag; + u8 dcb_en; + u8 err; + u8 rsvd; +}; + +union _dcb_ctl { + struct _dcb_data dcb_data; + u32 data; +}; + +struct _pfc_data { + u8 pfc_en; + u8 pfc_priority; + u8 num_of_tc; + u8 err; +}; + +union _pfc { + struct _pfc_data pfc_data; + u32 data; +}; + +union _flag_com { + struct _ets_flag { + u8 flag_ets_enable:1; + u8 flag_ets_percent:1; + u8 flag_ets_cos:1; + u8 flag_ets_strict:1; + u8 rev:4; + } ets_flag; + u8 data; +}; + +struct _ets { + u8 ets_en; + u8 err; + u8 strict; + u8 tc[8]; + u8 ets_percent[8]; + union _flag_com flag_com; +}; + +#define API_CMD 0x1 +#define API_CHAIN 0x2 +#define API_CLP 0x3 + +struct msg_module { + char device_name[IFNAMSIZ]; + unsigned int module; + union { + u32 msg_formate; + struct ucode_cmd_st ucode_cmd; + struct up_cmd_st up_cmd; + }; + + struct { + u32 inBuffLen; + u32 outBuffLen; + } lenInfo; + u32 res; + void *in_buff; + void *out_buf; +}; + +#define MAX_VER_INFO_LEN 128 +struct drv_version_info { + char ver[MAX_VER_INFO_LEN]; +}; + +struct chip_fault_stats { + int offset; + u8 chip_faults[MAX_DRV_BUF_SIZE]; +}; + +struct hinic_wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +struct hinic_cos_up_map { + u8 cos_up[HINIC_DCB_UP_MAX]; + u8 num_cos; +}; + +struct hinic_tx_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct hinic_dbg_sq_info { + u16 q_id; + u16 pi; + u16 ci;/* sw_ci */ + u16 fi;/* hw_ci */ + + u32 q_depth; + u16 pi_reverse;/* TODO: what is this? */ + u16 weqbb_size; + + u8 priority; + u16 *ci_addr; + u64 cla_addr; + + void *slq_handle; + + /* TODO: NIC don't use direct wqe */ + struct hinic_tx_hw_page direct_wqe; + struct hinic_tx_hw_page db_addr; + u32 pg_idx; + + u32 glb_sq_id; +}; + +struct hinic_dbg_rq_info { + u16 q_id; + u16 glb_rq_id; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u16 msix_idx; + u32 msix_vector; +}; + +#ifndef BUSINFO_LEN +#define BUSINFO_LEN (32) +#endif +struct pf_info { + char name[IFNAMSIZ]; + char bus_info[BUSINFO_LEN]; + u32 pf_type; +}; + +#ifndef MAX_SIZE +#define MAX_SIZE (16) +#endif +struct card_info { + struct pf_info pf[MAX_SIZE]; + u32 pf_num; +}; + +struct nic_card_id { + u32 id[MAX_SIZE]; + u32 num; +}; + +struct func_pdev_info { + u64 bar0_phy_addr; + u64 bar0_size; + u64 rsvd1[4]; +}; + +struct hinic_card_func_info { + u32 num_pf; + u32 rsvd0; + u64 usr_api_phy_addr; + struct func_pdev_info pdev_info[MAX_SIZE]; +}; + +#ifndef NIC_UP_CMD_UPDATE_FW +#define NIC_UP_CMD_UPDATE_FW (114) +#endif + +#ifndef MAX_CARD_NUM +#define MAX_CARD_NUM (64) +#endif +extern void *g_card_node_array[MAX_CARD_NUM]; +extern void *g_card_vir_addr[MAX_CARD_NUM]; +extern u64 g_card_phy_addr[MAX_CARD_NUM]; +extern struct mutex g_addr_lock; +extern int card_id; + +struct hinic_nic_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +struct hinic_nic_poll_weight { + int poll_weight; +}; + +enum hinic_homologues_state { + HINIC_HOMOLOGUES_OFF = 0, + HINIC_HOMOLOGUES_ON = 1, +}; + +struct hinic_homologues { + enum hinic_homologues_state homo_state; +}; + +struct hinic_pf_info { + u32 isvalid; + u32 pf_id; +}; + +int nictool_k_init(void); +void nictool_k_uninit(void); + +extern u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev); +extern void hinic_get_io_stats(struct hinic_nic_dev *nic_dev, + struct hinic_show_item *items); + +#define TOOL_COUNTER_MAX_LEN 512 + +#endif + diff --git a/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h new file mode 100644 index 000000000000..6b57078b15c0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_PCI_ID_TBL_H +#define HINIC_PCI_ID_TBL_H + +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HINIC_DEV_ID_1822_PF 0x1822 +#define HINIC_DEV_ID_1822_VF 0x375E +#define HINIC_DEV_ID_1822_VF_HV 0x379E +#define HINIC_DEV_ID_1822_SMTIO 0x020B +#define HINIC_DEV_ID_1822_PANGEA_100GE 0x0208 +#define HINIC_DEV_ID_1822_PANGEA_TP_10GE 0x0204 +#define HINIC_DEV_ID_1822_KR_40GE 0x020D +#define HINIC_DEV_ID_1822_KR_100GE 0x0205 +#define HINIC_DEV_ID_1822_DUAL_25GE 0x0206 +#define HINIC_DEV_ID_1822_KR_25GE 0x0210 +#define HINIC_DEV_ID_1822_MULTI_HOST 0x0211 +#define HINIC_DEV_ID_1822_100GE 0x0200 +#define HINIC_DEV_ID_1822_100GE_MULTI_HOST 0x0201 + +#define HIFC_DEV_ID_1822_8G 0x0212 +#define HIFC_DEV_ID_1822_16G 0x0203 +#define HIFC_DEV_ID_1822_32G 0x0202 + +#define HIFC_DEV_ID_1822_SMTIO 0x020C + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c deleted file mode 100644 index 1e389a004e50..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ /dev/null @@ -1,1070 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_dev.h" -#include "hinic_port.h" -#include "hinic_dev.h" - -#define HINIC_MIN_MTU_SIZE 256 -#define HINIC_MAX_JUMBO_FRAME_SIZE 15872 - -enum mac_op { - MAC_DEL, - MAC_SET, -}; - -/** - * change_mac - change(add or delete) mac address - * @nic_dev: nic device - * @addr: mac address - * @vlan_id: vlan number to set with the mac - * @op: add or delete the mac - * - * Return 0 - Success, negative - Failure - **/ -static int change_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id, enum mac_op op) -{ - struct net_device *netdev = nic_dev->netdev; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_mac_cmd port_mac_cmd; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - enum hinic_port_cmd cmd; - u16 out_size; - int err; - - if (vlan_id >= VLAN_N_VID) { - netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n"); - return -EINVAL; - } - - if (op == MAC_SET) - cmd = HINIC_PORT_CMD_SET_MAC; - else - cmd = HINIC_PORT_CMD_DEL_MAC; - - port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - port_mac_cmd.vlan_id = vlan_id; - memcpy(port_mac_cmd.mac, addr, ETH_ALEN); - - err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd, - sizeof(port_mac_cmd), - &port_mac_cmd, &out_size); - if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { - dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n", - port_mac_cmd.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_add_mac - add mac address - * @nic_dev: nic device - * @addr: mac address - * @vlan_id: vlan number to set with the mac - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_add_mac(struct hinic_dev *nic_dev, - const u8 *addr, u16 vlan_id) -{ - return change_mac(nic_dev, addr, vlan_id, MAC_SET); -} - -/** - * hinic_port_del_mac - remove mac address - * @nic_dev: nic device - * @addr: mac address - * @vlan_id: vlan number that is connected to the mac - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id) -{ - return change_mac(nic_dev, addr, vlan_id, MAC_DEL); -} - -/** - * hinic_port_get_mac - get the mac address of the nic device - * @nic_dev: nic device - * @addr: returned mac address - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_mac_cmd port_mac_cmd; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC, - &port_mac_cmd, sizeof(port_mac_cmd), - &port_mac_cmd, &out_size); - if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { - dev_err(&pdev->dev, "Failed to get mac, ret = %d\n", - port_mac_cmd.status); - return -EFAULT; - } - - memcpy(addr, port_mac_cmd.mac, ETH_ALEN); - return 0; -} - -/** - * hinic_port_set_mtu - set mtu - * @nic_dev: nic device - * @new_mtu: new mtu - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu) -{ - struct net_device *netdev = nic_dev->netdev; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_mtu_cmd port_mtu_cmd; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - int err, max_frame; - u16 out_size; - - if (new_mtu < HINIC_MIN_MTU_SIZE) { - netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size"); - return -EINVAL; - } - - max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) { - netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size"); - return -EINVAL; - } - - port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - port_mtu_cmd.mtu = new_mtu; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, - &port_mtu_cmd, sizeof(port_mtu_cmd), - &port_mtu_cmd, &out_size); - if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) { - dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n", - port_mtu_cmd.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_add_vlan - add vlan to the nic device - * @nic_dev: nic device - * @vlan_id: the vlan number to add - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_vlan_cmd port_vlan_cmd; - - port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); - port_vlan_cmd.vlan_id = vlan_id; - - return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN, - &port_vlan_cmd, sizeof(port_vlan_cmd), - NULL, NULL); -} - -/** - * hinic_port_del_vlan - delete vlan from the nic device - * @nic_dev: nic device - * @vlan_id: the vlan number to delete - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_vlan_cmd port_vlan_cmd; - - port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); - port_vlan_cmd.vlan_id = vlan_id; - - return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN, - &port_vlan_cmd, sizeof(port_vlan_cmd), - NULL, NULL); -} - -/** - * hinic_port_set_rx_mode - set rx mode in the nic device - * @nic_dev: nic device - * @rx_mode: the rx mode to set - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_rx_mode_cmd rx_mode_cmd; - - rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); - rx_mode_cmd.rx_mode = rx_mode; - - return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE, - &rx_mode_cmd, sizeof(rx_mode_cmd), - NULL, NULL); -} - -/** - * hinic_port_link_state - get the link state - * @nic_dev: nic device - * @link_state: the returned link state - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_link_state(struct hinic_dev *nic_dev, - enum hinic_port_link_state *link_state) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_port_link_cmd link_cmd; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return -EINVAL; - } - - link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, - &link_cmd, sizeof(link_cmd), - &link_cmd, &out_size); - if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) { - dev_err(&pdev->dev, "Failed to get link state, ret = %d\n", - link_cmd.status); - return -EINVAL; - } - - *link_state = link_cmd.state; - return 0; -} - -/** - * hinic_port_set_state - set port state - * @nic_dev: nic device - * @state: the state to set - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_state_cmd port_state; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return -EINVAL; - } - - port_state.state = state; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE, - &port_state, sizeof(port_state), - &port_state, &out_size); - if (err || (out_size != sizeof(port_state)) || port_state.status) { - dev_err(&pdev->dev, "Failed to set port state, ret = %d\n", - port_state.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_set_func_state- set func device state - * @nic_dev: nic device - * @state: the state to set - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_func_state(struct hinic_dev *nic_dev, - enum hinic_func_port_state state) -{ - struct hinic_port_func_state_cmd func_state; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - func_state.state = state; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE, - &func_state, sizeof(func_state), - &func_state, &out_size); - if (err || (out_size != sizeof(func_state)) || func_state.status) { - dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n", - func_state.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_get_cap - get port capabilities - * @nic_dev: nic device - * @port_cap: returned port capabilities - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_get_cap(struct hinic_dev *nic_dev, - struct hinic_port_cap *port_cap) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP, - port_cap, sizeof(*port_cap), - port_cap, &out_size); - if (err || (out_size != sizeof(*port_cap)) || port_cap->status) { - dev_err(&pdev->dev, - "Failed to get port capabilities, ret = %d\n", - port_cap->status); - return -EINVAL; - } - - return 0; -} - -/** - * hinic_port_set_tso - set port tso configuration - * @nic_dev: nic device - * @state: the tso state to set - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_tso_config tso_cfg = {0}; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); - tso_cfg.tso_en = state; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO, - &tso_cfg, sizeof(tso_cfg), - &tso_cfg, &out_size); - if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) { - dev_err(&pdev->dev, - "Failed to set port tso, ret = %d\n", - tso_cfg.status); - return -EINVAL; - } - - return 0; -} - -int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en) -{ - struct hinic_checksum_offload rx_csum_cfg = {0}; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u16 out_size; - int err; - - if (!hwdev) - return -EINVAL; - - hwif = hwdev->hwif; - pdev = hwif->pdev; - rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); - rx_csum_cfg.rx_csum_offload = en; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM, - &rx_csum_cfg, sizeof(rx_csum_cfg), - &rx_csum_cfg, &out_size); - if (err || !out_size || rx_csum_cfg.status) { - dev_err(&pdev->dev, - "Failed to set rx csum offload, ret = %d\n", - rx_csum_cfg.status); - return -EINVAL; - } - - return 0; -} - -int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_vlan_cfg vlan_cfg; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u16 out_size; - int err; - - if (!hwdev) - return -EINVAL; - - hwif = hwdev->hwif; - pdev = hwif->pdev; - vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); - vlan_cfg.vlan_rx_offload = en; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, - &vlan_cfg, sizeof(vlan_cfg), - &vlan_cfg, &out_size); - if (err || !out_size || vlan_cfg.status) { - dev_err(&pdev->dev, - "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", - err, vlan_cfg.status, out_size); - return -EINVAL; - } - - return 0; -} - -int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_rq_num rq_num = { 0 }; - u16 out_size = sizeof(rq_num); - int err; - - rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif); - rq_num.num_rqs = num_rqs; - rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP, - &rq_num, sizeof(rq_num), - &rq_num, &out_size); - if (err || !out_size || rq_num.status) { - dev_err(&pdev->dev, - "Failed to rxq number, ret = %d\n", - rq_num.status); - return -EINVAL; - } - - return 0; -} - -static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en, - u8 max_wqe_num) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_lro_config lro_cfg = { 0 }; - struct pci_dev *pdev = hwif->pdev; - u16 out_size = sizeof(lro_cfg); - int err; - - lro_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); - lro_cfg.lro_ipv4_en = ipv4_en; - lro_cfg.lro_ipv6_en = ipv6_en; - lro_cfg.lro_max_wqe_num = max_wqe_num; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO, - &lro_cfg, sizeof(lro_cfg), - &lro_cfg, &out_size); - if (err || !out_size || lro_cfg.status) { - dev_err(&pdev->dev, - "Failed to set lro offload, ret = %d\n", - lro_cfg.status); - return -EINVAL; - } - - return 0; -} - -static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_lro_timer lro_timer = { 0 }; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size = sizeof(lro_timer); - int err; - - lro_timer.status = 0; - lro_timer.type = 0; - lro_timer.enable = 1; - lro_timer.timer = timer_value; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER, - &lro_timer, sizeof(lro_timer), - &lro_timer, &out_size); - if (lro_timer.status == 0xFF) { - /* For this case, we think status (0xFF) is OK */ - lro_timer.status = 0; - dev_dbg(&pdev->dev, - "Set lro timer not supported by the current FW version, it will be 1ms default\n"); - } - - if (err || !out_size || lro_timer.status) { - dev_err(&pdev->dev, - "Failed to set lro timer, ret = %d\n", - lro_timer.status); - - return -EINVAL; - } - - return 0; -} - -int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en, - u32 lro_timer, u32 wqe_num) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - u8 ipv4_en; - u8 ipv6_en; - int err; - - if (!hwdev) - return -EINVAL; - - ipv4_en = lro_en ? 1 : 0; - ipv6_en = lro_en ? 1 : 0; - - err = hinic_set_rx_lro(nic_dev, ipv4_en, ipv6_en, (u8)wqe_num); - if (err) - return err; - - err = hinic_set_rx_lro_timer(nic_dev, lro_timer); - if (err) - return err; - - return 0; -} - -int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, - const u32 *indir_table) -{ - struct hinic_rss_indirect_tbl *indir_tbl; - struct hinic_func_to_io *func_to_io; - struct hinic_cmdq_buf cmd_buf; - struct hinic_hwdev *hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u32 indir_size; - u64 out_param; - int err, i; - u32 *temp; - - hwdev = nic_dev->hwdev; - func_to_io = &hwdev->func_to_io; - hwif = hwdev->hwif; - pdev = hwif->pdev; - - err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); - return err; - } - - cmd_buf.size = sizeof(*indir_tbl); - - indir_tbl = cmd_buf.buf; - indir_tbl->group_index = cpu_to_be32(tmpl_idx); - - for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { - indir_tbl->entry[i] = indir_table[i]; - - if (0x3 == (i & 0x3)) { - temp = (u32 *)&indir_tbl->entry[i - 3]; - *temp = cpu_to_be32(*temp); - } - } - - /* cfg the rss indirect table by command queue */ - indir_size = HINIC_RSS_INDIR_SIZE / 2; - indir_tbl->offset = 0; - indir_tbl->size = cpu_to_be32(indir_size); - - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, - &cmd_buf, &out_param); - if (err || out_param != 0) { - dev_err(&pdev->dev, "Failed to set rss indir table\n"); - err = -EFAULT; - goto free_buf; - } - - indir_tbl->offset = cpu_to_be32(indir_size); - indir_tbl->size = cpu_to_be32(indir_size); - memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size); - - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, - &cmd_buf, &out_param); - if (err || out_param != 0) { - dev_err(&pdev->dev, "Failed to set rss indir table\n"); - err = -EFAULT; - } - -free_buf: - hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); - - return err; -} - -int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, - u32 *indir_table) -{ - struct hinic_rss_indir_table rss_cfg = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size = sizeof(rss_cfg); - int err = 0, i; - - rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); - rss_cfg.template_id = tmpl_idx; - - err = hinic_port_msg_cmd(hwdev, - HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, - &rss_cfg, sizeof(rss_cfg), &rss_cfg, - &out_size); - if (err || !out_size || rss_cfg.status) { - dev_err(&pdev->dev, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n", - err, rss_cfg.status, out_size); - return -EINVAL; - } - - hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE); - for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) - indir_table[i] = rss_cfg.indir[i]; - - return 0; -} - -int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx, - struct hinic_rss_type rss_type) -{ - struct hinic_rss_context_tbl *ctx_tbl; - struct hinic_func_to_io *func_to_io; - struct hinic_cmdq_buf cmd_buf; - struct hinic_hwdev *hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u64 out_param; - u32 ctx = 0; - int err; - - hwdev = nic_dev->hwdev; - func_to_io = &hwdev->func_to_io; - hwif = hwdev->hwif; - pdev = hwif->pdev; - - err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - ctx |= HINIC_RSS_TYPE_SET(1, VALID) | - HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | - HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | - HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | - HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | - HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | - HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | - HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | - HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); - - cmd_buf.size = sizeof(struct hinic_rss_context_tbl); - - ctx_tbl = (struct hinic_rss_context_tbl *)cmd_buf.buf; - ctx_tbl->group_index = cpu_to_be32(tmpl_idx); - ctx_tbl->offset = 0; - ctx_tbl->size = sizeof(u32); - ctx_tbl->size = cpu_to_be32(ctx_tbl->size); - ctx_tbl->rsvd = 0; - ctx_tbl->ctx = cpu_to_be32(ctx); - - /* cfg the rss context table by command queue */ - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, - &cmd_buf, &out_param); - - hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); - - if (err || out_param != 0) { - dev_err(&pdev->dev, "Failed to set rss context table, err: %d\n", - err); - return -EFAULT; - } - - return 0; -} - -int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx, - struct hinic_rss_type *rss_type) -{ - struct hinic_rss_context_table ctx_tbl = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u16 out_size = sizeof(ctx_tbl); - int err; - - if (!hwdev || !rss_type) - return -EINVAL; - - hwif = hwdev->hwif; - pdev = hwif->pdev; - - ctx_tbl.func_id = HINIC_HWIF_FUNC_IDX(hwif); - ctx_tbl.template_id = tmpl_idx; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL, - &ctx_tbl, sizeof(ctx_tbl), - &ctx_tbl, &out_size); - if (err || !out_size || ctx_tbl.status) { - dev_err(&pdev->dev, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", - err, ctx_tbl.status, out_size); - return -EINVAL; - } - - rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); - rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); - rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); - rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); - rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); - rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, - TCP_IPV6_EXT); - rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); - rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); - - return 0; -} - -int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id, - const u8 *temp) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_rss_key rss_key = { 0 }; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif); - rss_key.template_id = template_id; - memcpy(rss_key.key, temp, HINIC_RSS_KEY_SIZE); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, - &rss_key, sizeof(rss_key), - &rss_key, &out_size); - if (err || !out_size || rss_key.status) { - dev_err(&pdev->dev, - "Failed to set rss hash key, err: %d, status: 0x%x, out size: 0x%x\n", - err, rss_key.status, out_size); - return -EINVAL; - } - - return 0; -} - -int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, - u8 *temp) -{ - struct hinic_rss_template_key temp_key = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u16 out_size = sizeof(temp_key); - int err; - - if (!hwdev || !temp) - return -EINVAL; - - hwif = hwdev->hwif; - pdev = hwif->pdev; - - temp_key.func_id = HINIC_HWIF_FUNC_IDX(hwif); - temp_key.template_id = tmpl_idx; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, - &temp_key, sizeof(temp_key), - &temp_key, &out_size); - if (err || !out_size || temp_key.status) { - dev_err(&pdev->dev, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n", - err, temp_key.status, out_size); - return -EINVAL; - } - - memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE); - - return 0; -} - -int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id, - u8 type) -{ - struct hinic_rss_engine_type rss_engine = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif); - rss_engine.hash_engine = type; - rss_engine.template_id = template_id; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, - &rss_engine, sizeof(rss_engine), - &rss_engine, &out_size); - if (err || !out_size || rss_engine.status) { - dev_err(&pdev->dev, - "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n", - err, rss_engine.status, out_size); - return -EINVAL; - } - - return 0; -} - -int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type) -{ - struct hinic_rss_engine_type hash_type = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u16 out_size = sizeof(hash_type); - int err; - - if (!hwdev || !type) - return -EINVAL; - - hwif = hwdev->hwif; - pdev = hwif->pdev; - - hash_type.func_id = HINIC_HWIF_FUNC_IDX(hwif); - hash_type.template_id = tmpl_idx; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, - &hash_type, sizeof(hash_type), - &hash_type, &out_size); - if (err || !out_size || hash_type.status) { - dev_err(&pdev->dev, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n", - err, hash_type.status, out_size); - return -EINVAL; - } - - *type = hash_type.hash_engine; - return 0; -} - -int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_rss_config rss_cfg = { 0 }; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); - rss_cfg.rss_en = rss_en; - rss_cfg.template_id = template_id; - rss_cfg.rq_priority_number = 0; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_CFG, - &rss_cfg, sizeof(rss_cfg), - &rss_cfg, &out_size); - if (err || !out_size || rss_cfg.status) { - dev_err(&pdev->dev, - "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", - err, rss_cfg.status, out_size); - return -EINVAL; - } - - return 0; -} - -int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx) -{ - struct hinic_rss_template_mgmt template_mgmt = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif); - template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, - &template_mgmt, sizeof(template_mgmt), - &template_mgmt, &out_size); - if (err || !out_size || template_mgmt.status) { - dev_err(&pdev->dev, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n", - err, template_mgmt.status, out_size); - return -EINVAL; - } - - *tmpl_idx = template_mgmt.template_id; - - return 0; -} - -int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx) -{ - struct hinic_rss_template_mgmt template_mgmt = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif); - template_mgmt.template_id = tmpl_idx; - template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, - &template_mgmt, sizeof(template_mgmt), - &template_mgmt, &out_size); - if (err || !out_size || template_mgmt.status) { - dev_err(&pdev->dev, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n", - err, template_mgmt.status, out_size); - return -EINVAL; - } - - return 0; -} - -int hinic_get_vport_stats(struct hinic_dev *nic_dev, - struct hinic_vport_stats *stats) -{ - struct hinic_cmd_vport_stats vport_stats = { 0 }; - struct hinic_port_stats_info stats_info = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - u16 out_size = sizeof(vport_stats); - struct pci_dev *pdev = hwif->pdev; - int err; - - stats_info.stats_version = HINIC_PORT_STATS_VERSION; - stats_info.func_id = HINIC_HWIF_FUNC_IDX(hwif); - stats_info.stats_size = sizeof(vport_stats); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT, - &stats_info, sizeof(stats_info), - &vport_stats, &out_size); - if (err || !out_size || vport_stats.status) { - dev_err(&pdev->dev, - "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", - err, vport_stats.status, out_size); - return -EFAULT; - } - - memcpy(stats, &vport_stats.stats, sizeof(*stats)); - return 0; -} - -int hinic_get_phy_port_stats(struct hinic_dev *nic_dev, - struct hinic_phy_port_stats *stats) -{ - struct hinic_port_stats_info stats_info = { 0 }; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_port_stats *port_stats; - u16 out_size = sizeof(*port_stats); - struct pci_dev *pdev = hwif->pdev; - int err; - - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); - if (!port_stats) - return -ENOMEM; - - stats_info.stats_version = HINIC_PORT_STATS_VERSION; - stats_info.stats_size = sizeof(*port_stats); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS, - &stats_info, sizeof(stats_info), - port_stats, &out_size); - if (err || !out_size || port_stats->status) { - dev_err(&pdev->dev, - "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", - err, port_stats->status, out_size); - err = -EINVAL; - goto out; - } - - memcpy(stats, &port_stats->stats, sizeof(*stats)); - -out: - kfree(port_stats); - - return err; -} - -int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_version_info up_ver = {0}; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u16 out_size; - int err; - - if (!hwdev) - return -EINVAL; - - hwif = hwdev->hwif; - pdev = hwif->pdev; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, - &up_ver, sizeof(up_ver), &up_ver, - &out_size); - if (err || !out_size || up_ver.status) { - dev_err(&pdev->dev, - "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", - err, up_ver.status, out_size); - return -EINVAL; - } - - snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver); - - return 0; -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h deleted file mode 100644 index 44772fd47fc1..000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ /dev/null @@ -1,588 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - */ - -#ifndef HINIC_PORT_H -#define HINIC_PORT_H - -#include -#include -#include - -#include "hinic_dev.h" - -#define HINIC_RSS_KEY_SIZE 40 -#define HINIC_RSS_INDIR_SIZE 256 -#define HINIC_PORT_STATS_VERSION 0 -#define HINIC_FW_VERSION_NAME 16 -#define HINIC_COMPILE_TIME_LEN 20 -#define HINIC_MGMT_VERSION_MAX_LEN 32 - -struct hinic_version_info { - u8 status; - u8 version; - u8 rsvd[6]; - - u8 ver[HINIC_FW_VERSION_NAME]; - u8 time[HINIC_COMPILE_TIME_LEN]; -}; - -enum hinic_rx_mode { - HINIC_RX_MODE_UC = BIT(0), - HINIC_RX_MODE_MC = BIT(1), - HINIC_RX_MODE_BC = BIT(2), - HINIC_RX_MODE_MC_ALL = BIT(3), - HINIC_RX_MODE_PROMISC = BIT(4), -}; - -enum hinic_port_link_state { - HINIC_LINK_STATE_DOWN, - HINIC_LINK_STATE_UP, -}; - -enum hinic_port_state { - HINIC_PORT_DISABLE = 0, - HINIC_PORT_ENABLE = 3, -}; - -enum hinic_func_port_state { - HINIC_FUNC_PORT_DISABLE = 0, - HINIC_FUNC_PORT_ENABLE = 2, -}; - -enum hinic_autoneg_cap { - HINIC_AUTONEG_UNSUPPORTED, - HINIC_AUTONEG_SUPPORTED, -}; - -enum hinic_autoneg_state { - HINIC_AUTONEG_DISABLED, - HINIC_AUTONEG_ACTIVE, -}; - -enum hinic_duplex { - HINIC_DUPLEX_HALF, - HINIC_DUPLEX_FULL, -}; - -enum hinic_speed { - HINIC_SPEED_10MB_LINK = 0, - HINIC_SPEED_100MB_LINK, - HINIC_SPEED_1000MB_LINK, - HINIC_SPEED_10GB_LINK, - HINIC_SPEED_25GB_LINK, - HINIC_SPEED_40GB_LINK, - HINIC_SPEED_100GB_LINK, - - HINIC_SPEED_UNKNOWN = 0xFF, -}; - -enum hinic_tso_state { - HINIC_TSO_DISABLE = 0, - HINIC_TSO_ENABLE = 1, -}; - -struct hinic_port_mac_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 vlan_id; - u16 rsvd1; - unsigned char mac[ETH_ALEN]; -}; - -struct hinic_port_mtu_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd1; - u32 mtu; -}; - -struct hinic_port_vlan_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 vlan_id; -}; - -struct hinic_port_rx_mode_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd; - u32 rx_mode; -}; - -struct hinic_port_link_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 state; - u8 rsvd1; -}; - -struct hinic_port_state_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u8 state; - u8 rsvd1[3]; -}; - -struct hinic_port_link_status { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 rsvd1; - u8 link; - u8 rsvd2; -}; - -struct hinic_port_func_state_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd1; - u8 state; - u8 rsvd2[3]; -}; - -struct hinic_port_cap { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd1; - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; - u8 rsvd2[3]; -}; - -struct hinic_tso_config { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u16 rsvd1; - u8 tso_en; - u8 resv2[3]; -}; - -struct hinic_checksum_offload { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u16 rsvd1; - u32 rx_csum_offload; -}; - -struct hinic_rq_num { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u16 rsvd1[33]; - u32 num_rqs; - u32 rq_depth; -}; - -struct hinic_lro_config { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u16 rsvd1; - u8 lro_ipv4_en; - u8 lro_ipv6_en; - u8 lro_max_wqe_num; - u8 resv2[13]; -}; - -struct hinic_lro_timer { - u8 status; - u8 version; - u8 rsvd0[6]; - - u8 type; /* 0: set timer value, 1: get timer value */ - u8 enable; /* when set lro time, enable should be 1 */ - u16 rsvd1; - u32 timer; -}; - -struct hinic_vlan_cfg { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 vlan_rx_offload; - u8 rsvd1[5]; -}; - -struct hinic_rss_template_mgmt { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 cmd; - u8 template_id; - u8 rsvd1[4]; -}; - -struct hinic_rss_template_key { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 template_id; - u8 rsvd1; - u8 key[HINIC_RSS_KEY_SIZE]; -}; - -struct hinic_rss_context_tbl { - u32 group_index; - u32 offset; - u32 size; - u32 rsvd; - u32 ctx; -}; - -struct hinic_rss_context_table { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 template_id; - u8 rsvd1; - u32 context; -}; - -struct hinic_rss_indirect_tbl { - u32 group_index; - u32 offset; - u32 size; - u32 rsvd; - u8 entry[HINIC_RSS_INDIR_SIZE]; -}; - -struct hinic_rss_indir_table { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 template_id; - u8 rsvd1; - u8 indir[HINIC_RSS_INDIR_SIZE]; -}; - -struct hinic_rss_key { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 template_id; - u8 rsvd1; - u8 key[HINIC_RSS_KEY_SIZE]; -}; - -struct hinic_rss_engine_type { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 template_id; - u8 hash_engine; - u8 rsvd1[4]; -}; - -struct hinic_rss_config { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u8 rss_en; - u8 template_id; - u8 rq_priority_number; - u8 rsvd1[11]; -}; - -struct hinic_stats { - char name[ETH_GSTRING_LEN]; - u32 size; - int offset; -}; - -struct hinic_vport_stats { - u64 tx_unicast_pkts_vport; - u64 tx_unicast_bytes_vport; - u64 tx_multicast_pkts_vport; - u64 tx_multicast_bytes_vport; - u64 tx_broadcast_pkts_vport; - u64 tx_broadcast_bytes_vport; - - u64 rx_unicast_pkts_vport; - u64 rx_unicast_bytes_vport; - u64 rx_multicast_pkts_vport; - u64 rx_multicast_bytes_vport; - u64 rx_broadcast_pkts_vport; - u64 rx_broadcast_bytes_vport; - - u64 tx_discard_vport; - u64 rx_discard_vport; - u64 tx_err_vport; - u64 rx_err_vport; -}; - -struct hinic_phy_port_stats { - u64 mac_rx_total_pkt_num; - u64 mac_rx_total_oct_num; - u64 mac_rx_bad_pkt_num; - u64 mac_rx_bad_oct_num; - u64 mac_rx_good_pkt_num; - u64 mac_rx_good_oct_num; - u64 mac_rx_uni_pkt_num; - u64 mac_rx_multi_pkt_num; - u64 mac_rx_broad_pkt_num; - - u64 mac_tx_total_pkt_num; - u64 mac_tx_total_oct_num; - u64 mac_tx_bad_pkt_num; - u64 mac_tx_bad_oct_num; - u64 mac_tx_good_pkt_num; - u64 mac_tx_good_oct_num; - u64 mac_tx_uni_pkt_num; - u64 mac_tx_multi_pkt_num; - u64 mac_tx_broad_pkt_num; - - u64 mac_rx_fragment_pkt_num; - u64 mac_rx_undersize_pkt_num; - u64 mac_rx_undermin_pkt_num; - u64 mac_rx_64_oct_pkt_num; - u64 mac_rx_65_127_oct_pkt_num; - u64 mac_rx_128_255_oct_pkt_num; - u64 mac_rx_256_511_oct_pkt_num; - u64 mac_rx_512_1023_oct_pkt_num; - u64 mac_rx_1024_1518_oct_pkt_num; - u64 mac_rx_1519_2047_oct_pkt_num; - u64 mac_rx_2048_4095_oct_pkt_num; - u64 mac_rx_4096_8191_oct_pkt_num; - u64 mac_rx_8192_9216_oct_pkt_num; - u64 mac_rx_9217_12287_oct_pkt_num; - u64 mac_rx_12288_16383_oct_pkt_num; - u64 mac_rx_1519_max_bad_pkt_num; - u64 mac_rx_1519_max_good_pkt_num; - u64 mac_rx_oversize_pkt_num; - u64 mac_rx_jabber_pkt_num; - - u64 mac_rx_pause_num; - u64 mac_rx_pfc_pkt_num; - u64 mac_rx_pfc_pri0_pkt_num; - u64 mac_rx_pfc_pri1_pkt_num; - u64 mac_rx_pfc_pri2_pkt_num; - u64 mac_rx_pfc_pri3_pkt_num; - u64 mac_rx_pfc_pri4_pkt_num; - u64 mac_rx_pfc_pri5_pkt_num; - u64 mac_rx_pfc_pri6_pkt_num; - u64 mac_rx_pfc_pri7_pkt_num; - u64 mac_rx_control_pkt_num; - u64 mac_rx_y1731_pkt_num; - u64 mac_rx_sym_err_pkt_num; - u64 mac_rx_fcs_err_pkt_num; - u64 mac_rx_send_app_good_pkt_num; - u64 mac_rx_send_app_bad_pkt_num; - - u64 mac_tx_fragment_pkt_num; - u64 mac_tx_undersize_pkt_num; - u64 mac_tx_undermin_pkt_num; - u64 mac_tx_64_oct_pkt_num; - u64 mac_tx_65_127_oct_pkt_num; - u64 mac_tx_128_255_oct_pkt_num; - u64 mac_tx_256_511_oct_pkt_num; - u64 mac_tx_512_1023_oct_pkt_num; - u64 mac_tx_1024_1518_oct_pkt_num; - u64 mac_tx_1519_2047_oct_pkt_num; - u64 mac_tx_2048_4095_oct_pkt_num; - u64 mac_tx_4096_8191_oct_pkt_num; - u64 mac_tx_8192_9216_oct_pkt_num; - u64 mac_tx_9217_12287_oct_pkt_num; - u64 mac_tx_12288_16383_oct_pkt_num; - u64 mac_tx_1519_max_bad_pkt_num; - u64 mac_tx_1519_max_good_pkt_num; - u64 mac_tx_oversize_pkt_num; - u64 mac_tx_jabber_pkt_num; - - u64 mac_tx_pause_num; - u64 mac_tx_pfc_pkt_num; - u64 mac_tx_pfc_pri0_pkt_num; - u64 mac_tx_pfc_pri1_pkt_num; - u64 mac_tx_pfc_pri2_pkt_num; - u64 mac_tx_pfc_pri3_pkt_num; - u64 mac_tx_pfc_pri4_pkt_num; - u64 mac_tx_pfc_pri5_pkt_num; - u64 mac_tx_pfc_pri6_pkt_num; - u64 mac_tx_pfc_pri7_pkt_num; - u64 mac_tx_control_pkt_num; - u64 mac_tx_y1731_pkt_num; - u64 mac_tx_1588_pkt_num; - u64 mac_tx_err_all_pkt_num; - u64 mac_tx_from_app_good_pkt_num; - u64 mac_tx_from_app_bad_pkt_num; - - u64 mac_rx_higig2_ext_pkt_num; - u64 mac_rx_higig2_message_pkt_num; - u64 mac_rx_higig2_error_pkt_num; - u64 mac_rx_higig2_cpu_ctrl_pkt_num; - u64 mac_rx_higig2_unicast_pkt_num; - u64 mac_rx_higig2_broadcast_pkt_num; - u64 mac_rx_higig2_l2_multicast_pkt_num; - u64 mac_rx_higig2_l3_multicast_pkt_num; - - u64 mac_tx_higig2_message_pkt_num; - u64 mac_tx_higig2_ext_pkt_num; - u64 mac_tx_higig2_cpu_ctrl_pkt_num; - u64 mac_tx_higig2_unicast_pkt_num; - u64 mac_tx_higig2_broadcast_pkt_num; - u64 mac_tx_higig2_l2_multicast_pkt_num; - u64 mac_tx_higig2_l3_multicast_pkt_num; -}; - -struct hinic_port_stats_info { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_id; - u16 rsvd1; - u32 stats_version; - u32 stats_size; -}; - -struct hinic_port_stats { - u8 status; - u8 version; - u8 rsvd[6]; - - struct hinic_phy_port_stats stats; -}; - -struct hinic_cmd_vport_stats { - u8 status; - u8 version; - u8 rsvd0[6]; - - struct hinic_vport_stats stats; -}; - -int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id); - -int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id); - -int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr); - -int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu); - -int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id); - -int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id); - -int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode); - -int hinic_port_link_state(struct hinic_dev *nic_dev, - enum hinic_port_link_state *link_state); - -int hinic_port_set_state(struct hinic_dev *nic_dev, - enum hinic_port_state state); - -int hinic_port_set_func_state(struct hinic_dev *nic_dev, - enum hinic_func_port_state state); - -int hinic_port_get_cap(struct hinic_dev *nic_dev, - struct hinic_port_cap *port_cap); - -int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs); - -int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state); - -int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en); - -int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en, - u32 lro_timer, u32 wqe_num); - -int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx, - struct hinic_rss_type rss_type); - -int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, - const u32 *indir_table); - -int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id, - const u8 *temp); - -int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id, - u8 type); - -int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id); - -int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx); - -int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx); - -void hinic_set_ethtool_ops(struct net_device *netdev); - -int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx, - struct hinic_rss_type *rss_type); - -int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, - u32 *indir_table); - -int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, - u8 *temp); - -int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, - u8 *type); - -int hinic_get_phy_port_stats(struct hinic_dev *nic_dev, - struct hinic_phy_port_stats *stats); - -int hinic_get_vport_stats(struct hinic_dev *nic_dev, - struct hinic_vport_stats *stats); - -int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en); - -int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h new file mode 100644 index 000000000000..c50bf2171c2b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_PORT_CMD_H__ +#define __HINIC_PORT_CMD_H__ + +#ifdef __cplusplus + #if __cplusplus +extern "C"{ + #endif +#endif /* __cplusplus */ + +/* cmd of mgmt CPU message for NIC module */ +enum hinic_port_cmd { + HINIC_PORT_CMD_VF_REGISTER = 0x0, + /* not defined in base line, only for PFD and VFD */ + HINIC_PORT_CMD_VF_UNREGISTER = 0x1, + /* not defined in base line, only for PFD and VFD */ + + HINIC_PORT_CMD_CHANGE_MTU = 0x2, + + HINIC_PORT_CMD_ADD_VLAN = 0x3, + HINIC_PORT_CMD_DEL_VLAN, + + HINIC_PORT_CMD_SET_PFC = 0x5, + HINIC_PORT_CMD_GET_PFC, + HINIC_PORT_CMD_SET_ETS, + HINIC_PORT_CMD_GET_ETS, + + HINIC_PORT_CMD_SET_MAC = 0x9, + HINIC_PORT_CMD_GET_MAC, + HINIC_PORT_CMD_DEL_MAC, + + HINIC_PORT_CMD_SET_RX_MODE = 0xc, + HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xd, + + HINIC_PORT_CMD_GET_AUTONEG_CAP = 0xf, + /* not defined in base line */ + HINIC_PORT_CMD_GET_AUTONET_STATE, + /* not defined in base line */ + HINIC_PORT_CMD_GET_SPEED, + /* not defined in base line */ + HINIC_PORT_CMD_GET_DUPLEX, + /* not defined in base line */ + HINIC_PORT_CMD_GET_MEDIA_TYPE, + /* not defined in base line */ + + HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14, + HINIC_PORT_CMD_SET_PAUSE_INFO, + + HINIC_PORT_CMD_GET_LINK_STATE = 0x18, + HINIC_PORT_CMD_SET_LRO = 0x19, + HINIC_PORT_CMD_SET_RX_CSUM = 0x1a, + HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1b, + + HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1c, + HINIC_PORT_CMD_CLEAR_PORT_STATISTICS, + HINIC_PORT_CMD_GET_VPORT_STAT, + HINIC_PORT_CMD_CLEAN_VPORT_STAT, + + HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25, + HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL, + + HINIC_PORT_CMD_SET_PORT_ENABLE = 0x29, + HINIC_PORT_CMD_GET_PORT_ENABLE, + + HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2b, + HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, + HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, + HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, + HINIC_PORT_CMD_GET_RSS_CTX_TBL, + HINIC_PORT_CMD_SET_RSS_CTX_TBL, + HINIC_PORT_CMD_RSS_TEMP_MGR, + + /* 0x36 ~ 0x40 have defined in base line */ + + HINIC_PORT_CMD_RSS_CFG = 0x42, + + HINIC_PORT_CMD_GET_PHY_TYPE = 0x44, + HINIC_PORT_CMD_INIT_FUNC = 0x45, + HINIC_PORT_CMD_SET_LLI_PRI = 0x46, + + HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48, + HINIC_PORT_CMD_SET_LOOPBACK_MODE, + + HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4a, + HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE, + + /* 0x4c ~ 0x57 have defined in base line */ + HINIC_PORT_CMD_DISABLE_PROMISIC = 0x4c, + HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4e, + HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58, + HINIC_PORT_CMD_GET_BOOT_VERSION, + HINIC_PORT_CMD_GET_MICROCODE_VERSION, + + HINIC_PORT_CMD_GET_PORT_TYPE = 0x5b, + /* not defined in base line */ + + HINIC_PORT_CMD_GET_VPORT_ENABLE = 0x5c, + HINIC_PORT_CMD_SET_VPORT_ENABLE, + + HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5e, + + HINIC_PORT_CMD_SET_LED_TEST = 0x5f, + + HINIC_PORT_CMD_SET_LLI_STATE = 0x60, + HINIC_PORT_CMD_SET_LLI_TYPE, + HINIC_PORT_CMD_GET_LLI_CFG, + + HINIC_PORT_CMD_GET_LRO = 0x63, + + HINIC_PORT_CMD_GET_DMA_CS = 0x64, + HINIC_PORT_CMD_SET_DMA_CS, + + HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66, + + HINIC_PORT_CMD_SET_PFC_MISC = 0x67, + HINIC_PORT_CMD_GET_PFC_MISC, + + HINIC_PORT_CMD_SET_VF_RATE = 0x69, + HINIC_PORT_CMD_SET_VF_VLAN, + HINIC_PORT_CMD_CLR_VF_VLAN, + + /* 0x6c,0x6e have defined in base line */ + HINIC_PORT_CMD_SET_UCAPTURE_OPT = 0x6F, + + HINIC_PORT_CMD_SET_TSO = 0x70, + HINIC_PORT_CMD_SET_PHY_POWER = 0x71, + HINIC_PORT_CMD_UPDATE_FW = 0x72, + HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73, + /* not defined in base line */ + HINIC_PORT_CMD_SET_PFC_THD = 0x75, + /* not defined in base line */ + HINIC_PORT_CMD_SET_PORT_LINK_STATUS = 0x76, + HINIC_PORT_CMD_SET_CGE_PAUSE_TIME_CFG = 0x77, + + HINIC_PORT_CMD_GET_FW_SUPPORT_FLAG = 0x79, + + HINIC_PORT_CMD_SET_PORT_REPORT = 0x7B, + + HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xa0, + + HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xa3, + HINIC_PORT_CMD_UPDATE_MAC = 0xa4, + + HINIC_PORT_CMD_GET_UART_LOG = 0xa5, + HINIC_PORT_CMD_SET_UART_LOG, + + HINIC_PORT_CMD_GET_PORT_INFO = 0xaa, + + HINIC_MISC_SET_FUNC_SF_ENBITS = 0xab, + /* not defined in base line */ + HINIC_MISC_GET_FUNC_SF_ENBITS, + /* not defined in base line */ + + HINIC_PORT_CMD_GET_SFP_INFO = 0xad, + + HINIC_PORT_CMD_SET_NETQ = 0xc1, + HINIC_PORT_CMD_ADD_RQ_FILTER = 0xc2, + HINIC_PORT_CMD_DEL_RQ_FILTER = 0xc3, + + HINIC_PORT_CMD_GET_FW_LOG = 0xca, + HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb, + HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc, + + HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4, + + HINIC_PORT_CMD_SET_IQ_ENABLE = 0xd6, + + HINIC_PORT_CMD_GET_LINK_MODE = 0xD9, + HINIC_PORT_CMD_SET_SPEED = 0xDA, + HINIC_PORT_CMD_SET_AUTONEG = 0xDB, + + HINIC_PORT_CMD_CLEAR_SQ_RES = 0xDD, + HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE, + HINIC_PORT_CMD_SET_VF_COS = 0xDF, + HINIC_PORT_CMD_GET_VF_COS = 0xE1, + + HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5, + HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6, + + HINIC_PORT_CMD_SET_PORT_FUNCS_STATE = 0xE7, + HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8, + + HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB, + HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0, + + HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3, + HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4, + + HINIC_PORT_CMD_SET_VHD_CFG = 0xF7, + HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8, + HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9, + HINIC_PORT_CMD_SET_RXQ_LRO_ADPT = 0xFA, + HINIC_PORT_CMD_GET_SFP_ABS = 0xFB, + HINIC_PORT_CMD_Q_FILTER = 0xFC, + HINIC_PORT_CMD_TCAM_FILTER = 0xFE, + HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF, +}; + +/* cmd of mgmt CPU message for HW module */ +enum hinic_mgmt_cmd { + HINIC_MGMT_CMD_RESET_MGMT = 0x0, + HINIC_MGMT_CMD_START_FLR = 0x1, + HINIC_MGMT_CMD_FLUSH_DOORBELL = 0x2, + HINIC_MGMT_CMD_GET_IO_STATUS = 0x3, + HINIC_MGMT_CMD_DMA_ATTR_SET = 0x4, + + HINIC_MGMT_CMD_CMDQ_CTXT_SET = 0x10, + HINIC_MGMT_CMD_CMDQ_CTXT_GET, + + HINIC_MGMT_CMD_VAT_SET = 0x12, + HINIC_MGMT_CMD_VAT_GET, + + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14, + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET, + + HINIC_MGMT_CMD_MQM_FIX_INFO_GET = 0x16, + HINIC_MGMT_CMD_MQM_CFG_INFO_SET = 0x18, + HINIC_MGMT_CMD_MQM_SRCH_GPA_SET = 0x20, + HINIC_MGMT_CMD_PPF_TMR_SET = 0x22, + HINIC_MGMT_CMD_PPF_HT_GPA_SET = 0x23, + HINIC_MGMT_CMD_RES_STATE_SET = 0x24, + HINIC_MGMT_CMD_FUNC_CACHE_OUT = 0x25, + HINIC_MGMT_CMD_FFM_SET = 0x26, + HINIC_MGMT_CMD_SMF_TMR_CLEAR = 0x27, + /* 0x29 not defined in base line, + * only used in open source driver + */ + HINIC_MGMT_CMD_FUNC_RES_CLEAR = 0x29, + + HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32, + + HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33, + HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + + HINIC_MGMT_CMD_VF_RANDOM_ID_SET = 0x36, + HINIC_MGMT_CMD_FAULT_REPORT = 0x37, + HINIC_MGMT_CMD_HEART_LOST_REPORT = 0x38, + + HINIC_MGMT_CMD_VPD_SET = 0x40, + HINIC_MGMT_CMD_VPD_GET, + HINIC_MGMT_CMD_LABEL_SET, + HINIC_MGMT_CMD_LABEL_GET, + HINIC_MGMT_CMD_SATIC_MAC_SET, + HINIC_MGMT_CMD_SATIC_MAC_GET, + HINIC_MGMT_CMD_SYNC_TIME = 0x46, + + HINIC_MGMT_CMD_REG_READ = 0x48, + + HINIC_MGMT_CMD_SET_LED_STATUS = 0x4A, + HINIC_MGMT_CMD_L2NIC_RESET = 0x4b, + HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET = 0x4d, + HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT = 0x4E, + HINIC_MGMT_CMD_ACTIVATE_FW = 0x4F, + HINIC_MGMT_CMD_PAGESIZE_SET = 0x50, + HINIC_MGMT_CMD_PAGESIZE_GET = 0x51, + HINIC_MGMT_CMD_GET_BOARD_INFO = 0x52, + HINIC_MGMT_CMD_WATCHDOG_INFO = 0x56, + HINIC_MGMT_CMD_FMW_ACT_NTC = 0x57, + HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61, + HINIC_MGMT_CMD_GET_PPF_STATE = 0x63, + HINIC_MGMT_CMD_PCIE_DFX_NTC = 0x65, + HINIC_MGMT_CMD_PCIE_DFX_GET = 0x66, + + HINIC_MGMT_CMD_GET_HOST_INFO = 0x67, + + HINIC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A, + HINIC_MGMT_CMD_HEARTBEAT_SUPPORTED = 0x6B, + HINIC_MGMT_CMD_HEARTBEAT_EVENT = 0x6C, + HINIC_MGMT_CMD_GET_HW_PF_INFOS = 0x6D, + HINIC_MGMT_CMD_GET_SDI_MODE = 0x6E, + + HINIC_MGMT_CMD_ENABLE_MIGRATE = 0x6F, +}; + +/* uCode relates commands */ +enum hinic_ucode_cmd { + HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0, + HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + HINIC_UCODE_CMD_ARM_SQ, + HINIC_UCODE_CMD_ARM_RQ, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE, + HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE, + HINIC_UCODE_CMD_SET_IQ_ENABLE, + HINIC_UCODE_CMD_SET_RQ_FLUSH = 10 +}; + +/* software cmds, vf->pf and multi-host */ +enum hinic_sw_funcs_cmd { + HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER = 0x0, + HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER = 0x1, + HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE = 0x2, + HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE = 0x3, + HINIC_SW_CMD_SEND_MSG_TO_VF = 0x4, + HINIC_SW_CMD_MIGRATE_READY = 0x5, +}; + +enum sq_l4offload_type { + OFFLOAD_DISABLE = 0, + TCP_OFFLOAD_ENABLE = 1, + SCTP_OFFLOAD_ENABLE = 2, + UDP_OFFLOAD_ENABLE = 3, +}; + +enum sq_vlan_offload_flag { + VLAN_OFFLOAD_DISABLE = 0, + VLAN_OFFLOAD_ENABLE = 1, +}; + +enum sq_pkt_parsed_flag { + PKT_NOT_PARSED = 0, + PKT_PARSED = 1, +}; + +enum sq_l3_type { + UNKNOWN_L3TYPE = 0, + IPV6_PKT = 1, + IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, + IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, +}; + +enum sq_md_type { + UNKNOWN_MD_TYPE = 0, +}; + +enum sq_l2type { + ETHERNET = 0, +}; + +enum sq_tunnel_l4_type { + NOT_TUNNEL, + TUNNEL_UDP_NO_CSUM, + TUNNEL_UDP_CSUM, +}; + +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 +#define NIC_RSS_CMD_TEMP_FREE 0x02 + +#define HINIC_RSS_TYPE_VALID_SHIFT 23 +#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define HINIC_RSS_TYPE_IPV6_SHIFT 27 +#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define HINIC_RSS_TYPE_IPV4_SHIFT 29 +#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31 + +#define HINIC_RSS_TYPE_SET(val, member) \ + (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT) + +#define HINIC_RSS_TYPE_GET(val, member) \ + (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1) + +enum hinic_speed { + HINIC_SPEED_10MB_LINK = 0, + HINIC_SPEED_100MB_LINK, + HINIC_SPEED_1000MB_LINK, + HINIC_SPEED_10GB_LINK, + HINIC_SPEED_25GB_LINK, + HINIC_SPEED_40GB_LINK, + HINIC_SPEED_100GB_LINK, + HINIC_SPEED_UNKNOWN = 0xFF, +}; + +/* In order to adapt different linux version */ +enum { + HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ + HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */ + HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */ +}; + +#define HINIC_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define HINIC_AF0_P2P_IDX_SHIFT 10 +#define HINIC_AF0_PCI_INTF_IDX_SHIFT 14 +#define HINIC_AF0_VF_IN_PF_SHIFT 16 +#define HINIC_AF0_FUNC_TYPE_SHIFT 24 + +#define HINIC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF +#define HINIC_AF0_P2P_IDX_MASK 0xF +#define HINIC_AF0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_AF0_VF_IN_PF_MASK 0xFF +#define HINIC_AF0_FUNC_TYPE_MASK 0x1 + +#define HINIC_AF0_GET(val, member) \ + (((val) >> HINIC_AF0_##member##_SHIFT) & HINIC_AF0_##member##_MASK) + +#define HINIC_AF1_PPF_IDX_SHIFT 0 +#define HINIC_AF1_AEQS_PER_FUNC_SHIFT 8 +#define HINIC_AF1_CEQS_PER_FUNC_SHIFT 12 +#define HINIC_AF1_IRQS_PER_FUNC_SHIFT 20 +#define HINIC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24 +#define HINIC_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define HINIC_AF1_PF_INIT_STATUS_SHIFT 31 + +#define HINIC_AF1_PPF_IDX_MASK 0x1F +#define HINIC_AF1_AEQS_PER_FUNC_MASK 0x3 +#define HINIC_AF1_CEQS_PER_FUNC_MASK 0x7 +#define HINIC_AF1_IRQS_PER_FUNC_MASK 0xF +#define HINIC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HINIC_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define HINIC_AF1_PF_INIT_STATUS_MASK 0x1 + +#define HINIC_AF1_GET(val, member) \ + (((val) >> HINIC_AF1_##member##_SHIFT) & HINIC_AF1_##member##_MASK) + +#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_SHIFT 16 +#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF + +#define HINIC_AF2_GET(val, member) \ + (((val) >> HINIC_AF2_##member##_SHIFT) & HINIC_AF2_##member##_MASK) + +#define HINIC_AF4_OUTBOUND_CTRL_SHIFT 0 +#define HINIC_AF4_DOORBELL_CTRL_SHIFT 1 +#define HINIC_AF4_OUTBOUND_CTRL_MASK 0x1 +#define HINIC_AF4_DOORBELL_CTRL_MASK 0x1 + +#define HINIC_AF4_GET(val, member) \ + (((val) >> HINIC_AF4_##member##_SHIFT) & HINIC_AF4_##member##_MASK) + +#define HINIC_AF4_SET(val, member) \ + (((val) & HINIC_AF4_##member##_MASK) << HINIC_AF4_##member##_SHIFT) + +#define HINIC_AF4_CLEAR(val, member) \ + ((val) & (~(HINIC_AF4_##member##_MASK << \ + HINIC_AF4_##member##_SHIFT))) + +#define HINIC_AF5_PF_STATUS_SHIFT 0 +#define HINIC_AF5_PF_STATUS_MASK 0xFFFF + +#define HINIC_AF5_SET(val, member) \ + (((val) & HINIC_AF5_##member##_MASK) << HINIC_AF5_##member##_SHIFT) + +#define HINIC_AF5_GET(val, member) \ + (((val) >> HINIC_AF5_##member##_SHIFT) & HINIC_AF5_##member##_MASK) + +#define HINIC_AF5_CLEAR(val, member) \ + ((val) & (~(HINIC_AF5_##member##_MASK << \ + HINIC_AF5_##member##_SHIFT))) + +#define HINIC_PPF_ELECTION_IDX_SHIFT 0 + +#define HINIC_PPF_ELECTION_IDX_MASK 0x1F + +#define HINIC_PPF_ELECTION_SET(val, member) \ + (((val) & HINIC_PPF_ELECTION_##member##_MASK) << \ + HINIC_PPF_ELECTION_##member##_SHIFT) + +#define HINIC_PPF_ELECTION_GET(val, member) \ + (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \ + HINIC_PPF_ELECTION_##member##_MASK) + +#define HINIC_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ + << HINIC_PPF_ELECTION_##member##_SHIFT))) + +#define HINIC_MPF_ELECTION_IDX_SHIFT 0 + +#define HINIC_MPF_ELECTION_IDX_MASK 0x1F + +#define HINIC_MPF_ELECTION_SET(val, member) \ + (((val) & HINIC_MPF_ELECTION_##member##_MASK) << \ + HINIC_MPF_ELECTION_##member##_SHIFT) + +#define HINIC_MPF_ELECTION_GET(val, member) \ + (((val) >> HINIC_MPF_ELECTION_##member##_SHIFT) & \ + HINIC_MPF_ELECTION_##member##_MASK) + +#define HINIC_MPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC_MPF_ELECTION_##member##_MASK \ + << HINIC_MPF_ELECTION_##member##_SHIFT))) + +#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define HINIC_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) +#define HINIC_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) +#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define HINIC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define HINIC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define HINIC_IS_PF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PF) +#define HINIC_IS_VF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_VF) +#define HINIC_IS_PPF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PPF) + +#define DB_IDX(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / \ + HINIC_DB_PAGE_SIZE)) + +enum hinic_pcie_nosnoop { + HINIC_PCIE_SNOOP = 0, + HINIC_PCIE_NO_SNOOP = 1, +}; + +enum hinic_pcie_tph { + HINIC_PCIE_TPH_DISABLE = 0, + HINIC_PCIE_TPH_ENABLE = 1, +}; + +enum hinic_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum hinic_doorbell_ctrl { + ENABLE_DOORBELL = 0x0, + DISABLE_DOORBELL = 0x1, +}; + +enum hinic_pf_status { + HINIC_PF_STATUS_INIT = 0X0, + HINIC_PF_STATUS_ACTIVE_FLAG = 0x11, + HINIC_PF_STATUS_FLR_START_FLAG = 0x12, + HINIC_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128 */ +#define HINIC_DB_DWQE_SIZE 0x00080000 +/* BMGW & VMGW VF db size 256k, have no dwqe space */ +#define HINIC_GW_VF_DB_SIZE 0x00040000 + +/* db/dwqe page size: 4K */ +#define HINIC_DB_PAGE_SIZE 0x00001000ULL + +#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE) + +#define HINIC_PCI_MSIX_ENTRY_SIZE 16 +#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + +#ifdef __cplusplus + #if __cplusplus +} + #endif +#endif /* __cplusplus */ +#endif /* __HINIC_PORT_CMD_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h new file mode 100644 index 000000000000..89c5ca54e543 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_QE_DEF_H__ +#define __HINIC_QE_DEF_H__ + +#ifdef __cplusplus + #if __cplusplus +extern "C"{ + #endif +#endif /* __cplusplus */ + +#define HINIC_SQ_WQEBB_SIZE 64 +#define HINIC_RQ_WQE_SIZE 32 +#define HINIC_SQ_WQEBB_SHIFT 6 +#define HINIC_RQ_WQEBB_SHIFT 5 + +#define HINIC_MAX_QUEUE_DEPTH 4096 +#define HINIC_MIN_QUEUE_DEPTH 128 +#define HINIC_TXD_ALIGN 1 +#define HINIC_RXD_ALIGN 1 + +#define HINIC_SQ_DEPTH 1024 +#define HINIC_RQ_DEPTH 1024 + +#define HINIC_RQ_WQE_MAX_SIZE 32 + +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3) + +/************** SQ_CTRL ***************/ +#define SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define SQ_CTRL_TASKSECT_LEN_SHIFT 16 +#define SQ_CTRL_DATA_FORMAT_SHIFT 22 +#define SQ_CTRL_LEN_SHIFT 29 +#define SQ_CTRL_OWNER_SHIFT 31 + +#define SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU +#define SQ_CTRL_TASKSECT_LEN_MASK 0x1FU +#define SQ_CTRL_DATA_FORMAT_MASK 0x1U +#define SQ_CTRL_LEN_MASK 0x3U +#define SQ_CTRL_OWNER_MASK 0x1U + +#define SQ_CTRL_GET(val, member) (((val) >> SQ_CTRL_##member##_SHIFT) \ + & SQ_CTRL_##member##_MASK) + +#define SQ_CTRL_CLEAR(val, member) ((val) & \ + (~(SQ_CTRL_##member##_MASK << \ + SQ_CTRL_##member##_SHIFT))) + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 +#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 +#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 +#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 +#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 +#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 +#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU +#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU +#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) \ + << SQ_CTRL_QUEUE_INFO_##member##_SHIFT) + +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) \ + & SQ_CTRL_QUEUE_INFO_##member##_MASK) + +#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) + +#define SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 +#define SQ_TASK_INFO0_L4OFFLOAD_SHIFT 8 +#define SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 +#define SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 +#define SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 +#define SQ_TASK_INFO0_UFO_AVD_SHIFT 14 +#define SQ_TASK_INFO0_TSO_UFO_SHIFT 15 +#define SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 + +#define SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFFU +#define SQ_TASK_INFO0_L4OFFLOAD_MASK 0x3U +#define SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3U +#define SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1U +#define SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1U +#define SQ_TASK_INFO0_UFO_AVD_MASK 0x1U +#define SQ_TASK_INFO0_TSO_UFO_MASK 0x1U +#define SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFFU + +#define SQ_TASK_INFO0_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ + SQ_TASK_INFO0_##member##_SHIFT) +#define SQ_TASK_INFO0_GET(val, member) \ + (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \ + SQ_TASK_INFO0_##member##_MASK) + +#define SQ_TASK_INFO1_MD_TYPE_SHIFT 8 +#define SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16 +#define SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24 + +#define SQ_TASK_INFO1_MD_TYPE_MASK 0xFFU +#define SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFFU +#define SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFFU + +#define SQ_TASK_INFO1_SET(val, member) \ + (((val) & SQ_TASK_INFO1_##member##_MASK) << \ + SQ_TASK_INFO1_##member##_SHIFT) +#define SQ_TASK_INFO1_GET(val, member) \ + (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \ + SQ_TASK_INFO1_##member##_MASK) + +#define SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0 +#define SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8 +#define SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16 +#define SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24 + +#define SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFFU +#define SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFFU +#define SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7U +#define SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3U + +#define SQ_TASK_INFO2_SET(val, member) \ + (((val) & SQ_TASK_INFO2_##member##_MASK) << \ + SQ_TASK_INFO2_##member##_SHIFT) +#define SQ_TASK_INFO2_GET(val, member) \ + (((val) >> SQ_TASK_INFO2_##member##_SHIFT) & \ + SQ_TASK_INFO2_##member##_MASK) + +#define SQ_TASK_INFO4_L2TYPE_SHIFT 31 + +#define SQ_TASK_INFO4_L2TYPE_MASK 0x1U + +#define SQ_TASK_INFO4_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO4_##member##_MASK) << \ + SQ_TASK_INFO4_##member##_SHIFT) + +/********************* SQ_DB *********************/ +#define SQ_DB_OFF 0x00000800 +#define SQ_DB_INFO_HI_PI_SHIFT 0 +#define SQ_DB_INFO_QID_SHIFT 8 +#define SQ_DB_INFO_CFLAG_SHIFT 23 +#define SQ_DB_INFO_COS_SHIFT 24 +#define SQ_DB_INFO_TYPE_SHIFT 27 +#define SQ_DB_INFO_HI_PI_MASK 0xFFU +#define SQ_DB_INFO_QID_MASK 0x3FFU +#define SQ_DB_INFO_CFLAG_MASK 0x1U +#define SQ_DB_INFO_COS_MASK 0x7U +#define SQ_DB_INFO_TYPE_MASK 0x1FU +#define SQ_DB_INFO_SET(val, member) (((u32)(val) & \ + SQ_DB_INFO_##member##_MASK) << \ + SQ_DB_INFO_##member##_SHIFT) + +#define SQ_DB_PI_LOW_MASK 0xFF +#define SQ_DB_PI_LOW(pi) ((pi) & SQ_DB_PI_LOW_MASK) +#define SQ_DB_PI_HI_SHIFT 8 +#define SQ_DB_PI_HIGH(pi) ((pi) >> SQ_DB_PI_HI_SHIFT) +#define SQ_DB_ADDR(sq, pi) \ + ((u64 *)((sq)->db_addr + SQ_DB_OFF) + SQ_DB_PI_LOW(pi)) +#define SQ_DB 1 +#define SQ_CFLAG_DP 0 /* CFLAG_DATA_PATH */ + +/*********************** RQ_CTRL ******************/ +#define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define RQ_CTRL_COMPLETE_FORMAT_SHIFT 15 +#define RQ_CTRL_COMPLETE_LEN_SHIFT 27 +#define RQ_CTRL_LEN_SHIFT 29 + +#define RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU +#define RQ_CTRL_COMPLETE_FORMAT_MASK 0x1U +#define RQ_CTRL_COMPLETE_LEN_MASK 0x3U +#define RQ_CTRL_LEN_MASK 0x3U + +#define RQ_CTRL_SET(val, member) (((val) & \ + RQ_CTRL_##member##_MASK) << \ + RQ_CTRL_##member##_SHIFT) + +#define RQ_CTRL_GET(val, member) (((val) >> \ + RQ_CTRL_##member##_SHIFT) & \ + RQ_CTRL_##member##_MASK) + +#define RQ_CTRL_CLEAR(val, member) ((val) & \ + (~(RQ_CTRL_##member##_MASK << \ + RQ_CTRL_##member##_SHIFT))) + +#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define RQ_CQE_STATUS_FLUSH_MASK 0x1U + +#define RQ_CQE_STATUS_GET(val, member) (((val) >> \ + RQ_CQE_STATUS_##member##_SHIFT) & \ + RQ_CQE_STATUS_##member##_MASK) + +#define RQ_CQE_STATUS_CLEAR(val, member) ((val) & \ + (~(RQ_CQE_STATUS_##member##_MASK << \ + RQ_CQE_STATUS_##member##_SHIFT))) + +#define RQ_CQE_SGE_VLAN_SHIFT 0 +#define RQ_CQE_SGE_LEN_SHIFT 16 + +#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define RQ_CQE_SGE_GET(val, member) (((val) >> \ + RQ_CQE_SGE_##member##_SHIFT) & \ + RQ_CQE_SGE_##member##_MASK) + +#define RQ_CQE_PKT_NUM_SHIFT 1 +#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19 +#define RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define RQ_CQE_SUPER_CQE_EN_SHIFT 0 + +#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_NUM_MASK 0x1FU +#define RQ_CQE_SUPER_CQE_EN_MASK 0x1 + +#define RQ_CQE_PKT_NUM_GET(val, member) (((val) >> \ + RQ_CQE_PKT_##member##_SHIFT) & \ + RQ_CQE_PKT_##member##_MASK) +#define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define RQ_CQE_SUPER_CQE_EN_GET(val, member) (((val) >> \ + RQ_CQE_##member##_SHIFT) & \ + RQ_CQE_##member##_MASK) +#define HINIC_GET_SUPER_CQE_EN(pkt_info) \ + RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +#define HINIC_GET_SUPER_CQE_EN_BE(pkt_info) ((pkt_info) & 0x1000000U) +#define RQ_CQE_PKT_LEN_GET(val, member) (((val) >> \ + RQ_CQE_PKT_##member##_SHIFT) & \ + RQ_CQE_PKT_##member##_MASK) + +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU + +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U + +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \ + RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define RQ_CQE_PKT_TYPES_NON_L2_MASK 0x800U +#define RQ_CQE_PKT_TYPES_L2_MASK 0x7FU + +#define RQ_CQE_STATUS_CSUM_BYPASS_VAL 0x80 +#define RQ_CQE_STATUS_CSUM_ERR_IP_MASK 0x31U +#define RQ_CQE_STATUS_CSUM_ERR_L4_MASK 0x4EU + +#define SECT_SIZE_BYTES(size) ((size) << 3) + +#define HINIC_PF_SET_VF_ALREADY 0x4 +#define HINIC_MGMT_STATUS_EXIST 0x6 + +#define WQS_BLOCKS_PER_PAGE 4 + +#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size) + +#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \ + ((wq)->num_q_pages - 1)) + +#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \ + ((idx) & ((wq)->num_wqebbs_per_page - 1))) + +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_PAGE_ADDR_SIZE_SHIFT 3 +#define WQ_PAGE_ADDR(wq, idx) \ + (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \ + (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT))) + +#define WQ_BLOCK_SIZE 4096UL +#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) +#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT) + +#define CMDQ_BLOCKS_PER_PAGE 8 +#define CMDQ_BLOCK_SIZE 512UL +#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \ + CMDQ_BLOCK_SIZE), PAGE_SIZE) + +#define ADDR_4K_ALIGNED(addr) (0 == (addr & 0xfff)) +#define ADDR_256K_ALIGNED(addr) (0 == (addr & 0x3ffff)) + +#define WQ_BASE_VADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \ + + (u64)(wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_ADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + (((u64)((cmdq_pages)->cmdq_page_paddr)) \ + + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) + +#define WQE_SHADOW_PAGE(wq, wqe) \ + (u16)(((ulong)(wqe) - (ulong)(wq)->shadow_wqe) \ + / (wq)->max_wqe_size) + +#define WQE_IN_RANGE(wqe, start, end) \ + (((ulong)(wqe) >= (ulong)(start)) && \ + ((ulong)(wqe) < (ulong)(end))) + +#define WQ_NUM_PAGES(num_wqs) \ + (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE) + +/* Qe buffer relates define */ + +enum hinic_rx_buf_size { + HINIC_RX_BUF_SIZE_32B = 0x20, + HINIC_RX_BUF_SIZE_64B = 0x40, + HINIC_RX_BUF_SIZE_96B = 0x60, + HINIC_RX_BUF_SIZE_128B = 0x80, + HINIC_RX_BUF_SIZE_192B = 0xC0, + HINIC_RX_BUF_SIZE_256B = 0x100, + HINIC_RX_BUF_SIZE_384B = 0x180, + HINIC_RX_BUF_SIZE_512B = 0x200, + HINIC_RX_BUF_SIZE_768B = 0x300, + HINIC_RX_BUF_SIZE_1K = 0x400, + HINIC_RX_BUF_SIZE_1_5K = 0x600, + HINIC_RX_BUF_SIZE_2K = 0x800, + HINIC_RX_BUF_SIZE_3K = 0xC00, + HINIC_RX_BUF_SIZE_4K = 0x1000, + HINIC_RX_BUF_SIZE_8K = 0x2000, + HINIC_RX_BUF_SIZE_16K = 0x4000, +}; + +enum ppf_tmr_status { + HINIC_PPF_TMR_FLAG_STOP, + HINIC_PPF_TMR_FLAG_START, +}; + +enum hinic_res_state { + HINIC_RES_CLEAN = 0, + HINIC_RES_ACTIVE = 1, +}; + +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) + +#define BUF_DESC_SIZE_SHIFT 4 + +#define HINIC_SQ_WQE_SIZE(num_sge) \ + (sizeof(struct hinic_sq_ctrl) + \ + sizeof(struct hinic_sq_task) + \ + (u32)((num_sge) << BUF_DESC_SIZE_SHIFT)) + +#define HINIC_SQ_WQEBB_CNT(num_sge) \ + (int)(ALIGN(HINIC_SQ_WQE_SIZE((u32)num_sge), \ + HINIC_SQ_WQEBB_SIZE) >> HINIC_SQ_WQEBB_SHIFT) + +#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define HINIC_GET_RSS_TYPES(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define HINIC_GET_PKT_TYPES(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) + +#define HINIC_GET_RX_PKT_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) + +#define HINIC_GET_RX_PKT_UMBCAST(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define HINIC_GET_RX_VLAN_TAG(vlan_len) \ + RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define HINIC_GET_RX_PKT_LEN(vlan_len) \ + RQ_CQE_SGE_GET(vlan_len, LEN) + +#define HINIC_GET_RX_CSUM_ERR(status) \ + RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define HINIC_GET_RX_DONE(status) \ + RQ_CQE_STATUS_GET(status, RXDONE) + +#define HINIC_GET_RX_FLUSH(status) \ + RQ_CQE_STATUS_GET(status, FLUSH) + +#define HINIC_GET_RX_BP_EN(status) \ + RQ_CQE_STATUS_GET(status, BP_EN) + +#define HINIC_GET_RX_NUM_LRO(status) \ + RQ_CQE_STATUS_GET(status, NUM_LRO) + +#define HINIC_PKT_TYPES_UNKNOWN(pkt_types) \ + (pkt_types & RQ_CQE_PKT_TYPES_NON_L2_MASK) + +#define HINIC_PKT_TYPES_L2(pkt_types) \ + (pkt_types & RQ_CQE_PKT_TYPES_L2_MASK) + +#define HINIC_CSUM_ERR_BYPASSED(csum_err) \ + (csum_err == RQ_CQE_STATUS_CSUM_BYPASS_VAL) + +#define HINIC_CSUM_ERR_IP(csum_err) \ + (csum_err & RQ_CQE_STATUS_CSUM_ERR_IP_MASK) + +#define HINIC_CSUM_ERR_L4(csum_err) \ + (csum_err & RQ_CQE_STATUS_CSUM_ERR_L4_MASK) + +#define TX_MSS_DEFAULT 0x3E00 +#define TX_MSS_MIN 0x50 + +enum sq_wqe_type { + SQ_NORMAL_WQE = 0, +}; + +enum rq_completion_fmt { + RQ_COMPLETE_SGE = 1 +}; + +#ifdef __cplusplus + #if __cplusplus +} + #endif +#endif /* __cplusplus */ +#endif /* __HINIC_QE_DEF_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_qp.c new file mode 100644 index 000000000000..799486f0d16b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_nic_io.h" +#include "hinic_qp.h" + +#define BUF_DESC_SHIFT 1 +#define BUF_DESC_SIZE(nr_descs) (((u32)nr_descs) << BUF_DESC_SHIFT) + +void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info, + int nr_descs, u8 owner) +{ + u32 ctrl_size, task_size, bufdesc_size; + + ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); + task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); + bufdesc_size = BUF_DESC_SIZE(nr_descs); + + ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | + SQ_CTRL_SET(task_size, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(ctrl_size, LEN) | + SQ_CTRL_SET(owner, OWNER); + + ctrl->ctrl_fmt = be32_to_cpu(ctrl->ctrl_fmt); + + ctrl->queue_info = queue_info; + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); + + if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) { + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) { + /* mss should not less than 80 */ + ctrl->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info, + MSS); + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); + } + ctrl->queue_info = be32_to_cpu(ctrl->queue_info); +} + +int hinic_get_rx_done(struct hinic_rq_cqe *cqe) +{ + u32 status; + int rx_done; + + status = be32_to_cpu(cqe->status); + + rx_done = RQ_CQE_STATUS_GET(status, RXDONE); + if (!rx_done) + return 0; + + return 1; +} + +void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old) +{ + u32 status; + + status = RQ_CQE_STATUS_CLEAR(status_old, RXDONE); + + cqe->status = cpu_to_be32(status); + + /* Make sure Rxdone has been set */ + wmb(); +} + +int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe) +{ + u32 pkt_info; + int super_cqe_en; + + pkt_info = be32_to_cpu(cqe->pkt_info); + + super_cqe_en = RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN); + if (!super_cqe_en) + return 0; + + return 1; +} + +u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe) +{ + u32 vlan_len = be32_to_cpu(cqe->vlan_len); + + return RQ_CQE_SGE_GET(vlan_len, LEN); +} + +u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe) +{ + u32 pkt_num = be32_to_cpu(cqe->pkt_info); + + return RQ_CQE_PKT_NUM_GET(pkt_num, NUM); +} + +u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe, + bool last) +{ + u32 pkt_len = be32_to_cpu(cqe->pkt_info); + + if (!last) + return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN); + else + return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN); +} + +void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr, + dma_addr_t cqe_dma) +{ + struct hinic_rq_wqe *rq_wqe = (struct hinic_rq_wqe *)wqe; + struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; + struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; + struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; + u32 rq_ceq_len = sizeof(struct hinic_rq_cqe); + + ctrl->ctrl_fmt = + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) | + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) | + RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); + + hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len); + + buf_desc->addr_high = upper_32_bits(buf_addr); + buf_desc->addr_low = lower_32_bits(buf_addr); +} + +void hinic_set_cs_inner_l4(struct hinic_sq_task *task, + u32 *queue_info, + enum sq_l4offload_type l4_offload, + u32 l4_len, u32 offset) +{ + u32 tcp_udp_cs = 0, sctp = 0; + u32 mss = TX_MSS_DEFAULT; + + /* tcp_udp_cs should be setted to calculate outter checksum when vxlan + * packets without inner l3 and l4 + */ + if (unlikely(l4_offload == SCTP_OFFLOAD_ENABLE)) + sctp = 1; + else + tcp_udp_cs = 1; + + task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD); + task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) | + SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) | + SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP); + + *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +void hinic_set_tso_inner_l4(struct hinic_sq_task *task, + u32 *queue_info, + enum sq_l4offload_type l4_offload, + u32 l4_len, + u32 offset, u32 ip_ident, u32 mss) +{ + u32 tso = 0, ufo = 0; + + if (l4_offload == TCP_OFFLOAD_ENABLE) + tso = 1; + else if (l4_offload == UDP_OFFLOAD_ENABLE) + ufo = 1; + + task->ufo_v6_identify = be32_to_cpu(ip_ident); + /* just keep the same code style here */ + + task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD); + task->pkt_info0 |= SQ_TASK_INFO0_SET(tso || ufo, TSO_UFO); + task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) | + SQ_CTRL_QUEUE_INFO_SET(tso, TSO) | + SQ_CTRL_QUEUE_INFO_SET(ufo, UFO) | + SQ_CTRL_QUEUE_INFO_SET(!!l4_offload, TCPUDP_CS); + /* cs must be calculate by hw if tso is enable */ + + *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); + /* qsf was initialized in prepare_sq_wqe */ + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +void hinic_set_vlan_tx_offload(struct hinic_sq_task *task, + u32 *queue_info, + u16 vlan_tag, u16 vlan_pri) +{ + task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI); +} + +void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len) +{ + task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_qp.h new file mode 100644 index 000000000000..513e3089cc54 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_QP_H +#define HINIC_QP_H + +#include "hinic_qe_def.h" +#include "hinic_port_cmd.h" + +/* frags and linner */ +#define HINIC_MAX_SQ_BUFDESCS (MAX_SKB_FRAGS + 1) +#define HINIC_MAX_SQ_SGE 17 +#define HINIC_MAX_SKB_NR_FRAGE (HINIC_MAX_SQ_SGE - 1) +#define HINIC_GSO_MAX_SIZE 65536 + +struct hinic_sq_ctrl { + u32 ctrl_fmt; + u32 queue_info; +}; + +struct hinic_sq_task { + u32 pkt_info0; + u32 pkt_info1; + u32 pkt_info2; + u32 ufo_v6_identify; + u32 pkt_info4; + u32 rsvd5; +}; + +struct hinic_sq_bufdesc { + u32 hi_addr; + u32 lo_addr; + u32 len; + u32 rsvd; +}; + +struct hinic_sq_wqe { + struct hinic_sq_ctrl ctrl; + struct hinic_sq_task task; + struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS]; +}; + +struct hinic_rq_ctrl { + u32 ctrl_fmt; +}; + +struct hinic_rq_cqe { + u32 status; + u32 vlan_len; + + u32 offload_type; + u32 hash_val; + u32 rsvd4; + u32 rsvd5; + u32 rsvd6; + u32 pkt_info; +}; + +struct hinic_rq_cqe_sect { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_rq_bufdesc { + u32 addr_high; + u32 addr_low; +}; + +struct hinic_rq_wqe { + struct hinic_rq_ctrl ctrl; + u32 rsvd; + struct hinic_rq_cqe_sect cqe_sect; + struct hinic_rq_bufdesc buf_desc; +}; + +void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info, + int nr_descs, u8 owner); + +u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe); + +int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe); + +u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe, bool last); + +u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe); + +int hinic_get_rx_done(struct hinic_rq_cqe *cqe); + +void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old); + +void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr, + dma_addr_t cqe_dma); + +#ifdef static +#undef static +#define LLT_STATIC_DEF_SAVED +#endif +static inline void hinic_task_set_outter_l3(struct hinic_sq_task *task, + enum sq_l3_type l3_type, + u32 network_len) +{ + task->pkt_info2 |= SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | + SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN); +} + +static inline void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, + enum sq_tunnel_l4_type l4_type, + u32 tunnel_len) +{ + task->pkt_info2 |= SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | + SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN); +} + +static inline void hinic_task_set_inner_l3(struct hinic_sq_task *task, + enum sq_l3_type l3_type, + u32 network_len) +{ + task->pkt_info0 |= SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); + task->pkt_info1 |= SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); +} + +#ifdef LLT_STATIC_DEF_SAVED +#define static +#undef LLT_STATIC_DEF_SAVED +#endif + +void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, + enum sq_l4offload_type l4_offload, + u32 l4_len, u32 offset); + +void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, + enum sq_l4offload_type l4_offload, u32 l4_len, + u32 offset, u32 ip_ident, u32 mss); + +void hinic_set_vlan_tx_offload(struct hinic_sq_task *task, u32 *queue_info, + u16 vlan_tag, u16 vlan_pri); + +void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 56ea6d692f1c..cc4ec6640cb6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -2,73 +2,381 @@ /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * */ -#include +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt #include #include -#include -#include -#include -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_dev.h" +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_nic_io.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_qp.h" #include "hinic_rx.h" -#include "hinic_dev.h" -#define RX_IRQ_NO_PENDING 0 -#define RX_IRQ_NO_COALESC 0 -#define RX_IRQ_NO_LLI_TIMER 0 -#define RX_IRQ_NO_CREDIT 0 -#define RX_IRQ_NO_RESEND_TIMER 0 -#define HINIC_RX_BUFFER_WRITE 16 +static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev); -#define HINIC_RX_IPV6_PKT 7 -#define LRO_PKT_HDR_LEN_IPV4 66 -#define LRO_PKT_HDR_LEN_IPV6 86 -#define LRO_REPLENISH_THLD 256 +#define HINIC_RX_HDR_SIZE 256 +#define HINIC_RX_IPV6_PKT 7 +#define HINIC_RX_VXLAN_PKT 0xb -#define LRO_PKT_HDR_LEN(cqe) \ - (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \ - HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) - -/** - * hinic_rxq_clean_stats - Clean the statistics of specific queue - * @rxq: Logical Rx Queue - **/ -void hinic_rxq_clean_stats(struct hinic_rxq *rxq) -{ - struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; - - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->pkts = 0; - rxq_stats->bytes = 0; - rxq_stats->errors = 0; - rxq_stats->csum_errors = 0; - rxq_stats->other_errors = 0; - u64_stats_update_end(&rxq_stats->syncp); +#define RXQ_STATS_INC(rxq, field) \ +{ \ + u64_stats_update_begin(&rxq->rxq_stats.syncp); \ + rxq->rxq_stats.field++; \ + u64_stats_update_end(&rxq->rxq_stats.syncp); \ } -/** - * hinic_rxq_get_stats - get statistics of Rx Queue - * @rxq: Logical Rx Queue - * @stats: return updated stats here - **/ -void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) +static bool rx_alloc_mapped_page(struct hinic_rxq *rxq, + struct hinic_rx_info *rx_info) +{ + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + + struct page *page = rx_info->page; + dma_addr_t dma = rx_info->buf_dma_addr; + + if (likely(dma)) + return true; + + /* alloc new page for storage */ + page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COLD | + __GFP_COMP, nic_dev->page_order); + if (unlikely(!page)) { + nicif_err(nic_dev, drv, netdev, "Alloc rxq: %d page failed\n", + rxq->q_id); + return false; + } + + /* map page for use */ + dma = dma_map_page(&pdev->dev, page, 0, rxq->dma_rx_buff_size, + DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (unlikely(dma_mapping_error(&pdev->dev, dma))) { + nicif_err(nic_dev, drv, netdev, "Failed to map page to rx buffer\n"); + __free_pages(page, nic_dev->page_order); + return false; + } + + rx_info->page = page; + rx_info->buf_dma_addr = dma; + rx_info->page_offset = 0; + + return true; +} + +static int hinic_rx_fill_wqe(struct hinic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rq_wqe *rq_wqe; + struct hinic_rx_info *rx_info; + dma_addr_t dma_addr = 0; + u16 pi = 0; + int rq_wqe_len; + int i; + + for (i = 0; i < rxq->q_depth; i++) { + rx_info = &rxq->rx_info[i]; + + rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi); + if (!rq_wqe) { + nicif_err(nic_dev, drv, netdev, "Failed to get rq wqe, rxq id: %d, wqe id: %d\n", + rxq->q_id, i); + break; + } + + hinic_prepare_rq_wqe(rq_wqe, pi, dma_addr, rx_info->cqe_dma); + + rq_wqe_len = sizeof(struct hinic_rq_wqe); + hinic_cpu_to_be32(rq_wqe, rq_wqe_len); + rx_info->rq_wqe = rq_wqe; + } + + hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, rxq->q_depth); + + return i; +} + +static int hinic_rx_fill_buffers(struct hinic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rq_wqe *rq_wqe; + struct hinic_rx_info *rx_info; + dma_addr_t dma_addr; + int i; + int free_wqebbs = rxq->delta - 1; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + if (unlikely(!rx_alloc_mapped_page(rxq, rx_info))) + break; + + dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; + + rq_wqe = rx_info->rq_wqe; + + rq_wqe->buf_desc.addr_high = + cpu_to_be32(upper_32_bits(dma_addr)); + rq_wqe->buf_desc.addr_low = + cpu_to_be32(lower_32_bits(dma_addr)); + rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; + } + + if (likely(i)) { + /* Write all the wqes before pi update */ + wmb(); + + hinic_update_rq_hw_pi(nic_dev->hwdev, rxq->q_id, + rxq->next_to_update); + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } else { + nicif_err(nic_dev, drv, netdev, "Failed to allocate rx buffers, rxq id: %d\n", + rxq->q_id); + } + + return i; +} + +void hinic_rx_free_buffers(struct hinic_rxq *rxq) +{ + u16 i; + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_rx_info *rx_info; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rxq->q_depth; i++) { + rx_info = &rxq->rx_info[i]; + + if (rx_info->buf_dma_addr) { + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, + DMA_FROM_DEVICE); + rx_info->buf_dma_addr = 0; + } + + if (rx_info->page) { + __free_pages(rx_info->page, nic_dev->page_order); + rx_info->page = NULL; + } + } +} + +static void hinic_reuse_rx_page(struct hinic_rxq *rxq, + struct hinic_rx_info *old_rx_info) +{ + struct hinic_rx_info *new_rx_info; + u16 nta = rxq->next_to_alloc; + + new_rx_info = &rxq->rx_info[nta]; + + /* update, and store next to alloc */ + nta++; + rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0; + + new_rx_info->page = old_rx_info->page; + new_rx_info->page_offset = old_rx_info->page_offset; + new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr, + new_rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); +} + +static bool hinic_add_rx_frag(struct hinic_rxq *rxq, + struct hinic_rx_info *rx_info, + struct sk_buff *skb, u32 size) +{ + struct page *page; + u8 *va; + + page = rx_info->page; + va = (u8 *)page_address(page) + rx_info->page_offset; + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + dma_sync_single_range_for_cpu(rxq->dev, + rx_info->buf_dma_addr, + rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); + + if (size <= HINIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + memcpy(__skb_put(skb, size), va, + ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (int)rx_info->page_offset, (int)size, rxq->buf_len); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_info->page_offset ^= rxq->buf_len; + +#ifdef HAVE_PAGE_COUNT + atomic_add(1, &page->_count); +#else + page_ref_inc(page); +#endif + + return true; +} + +static void __packaging_skb(struct hinic_rxq *rxq, struct sk_buff *head_skb, + u8 sge_num, u32 pkt_len) +{ + struct hinic_rx_info *rx_info; + struct sk_buff *skb; + u8 frag_num = 0; + u32 size; + u16 sw_ci; + + sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask; + skb = head_skb; + while (sge_num) { + rx_info = &rxq->rx_info[sw_ci]; + sw_ci = (sw_ci + 1) & rxq->q_mask; + if (unlikely(pkt_len > rxq->buf_len)) { + size = rxq->buf_len; + pkt_len -= rxq->buf_len; + } else { + size = pkt_len; + } + + if (unlikely(frag_num == MAX_SKB_FRAGS)) { + frag_num = 0; + if (skb == head_skb) + skb = skb_shinfo(skb)->frag_list; + else + skb = skb->next; + } + + if (unlikely(skb != head_skb)) { + head_skb->len += size; + head_skb->data_len += size; + head_skb->truesize += rxq->buf_len; + } + + if (likely(hinic_add_rx_frag(rxq, rx_info, skb, size))) { + hinic_reuse_rx_page(rxq, rx_info); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + } + /* clear contents of buffer_info */ + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + sge_num--; + frag_num++; + } +} + +static struct sk_buff *hinic_fetch_rx_buffer(struct hinic_rxq *rxq, u32 pkt_len) +{ + struct sk_buff *head_skb, *cur_skb, *skb = NULL; + struct net_device *netdev = rxq->netdev; + u8 sge_num, skb_num; + u16 wqebb_cnt = 0; + + head_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE); + if (unlikely(!head_skb)) + return NULL; + + sge_num = (u8)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + if (likely(sge_num <= MAX_SKB_FRAGS)) + skb_num = 1; + else + skb_num = (sge_num / MAX_SKB_FRAGS) + + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); + + while (unlikely(skb_num > 1)) { + cur_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE); + if (unlikely(!cur_skb)) + goto alloc_skb_fail; + + if (!skb) { + skb_shinfo(head_skb)->frag_list = cur_skb; + skb = cur_skb; + } else { + skb->next = cur_skb; + skb = cur_skb; + } + + skb_num--; + } + + prefetchw(head_skb->data); + wqebb_cnt = sge_num; + + __packaging_skb(rxq, head_skb, sge_num, pkt_len); + + rxq->cons_idx += wqebb_cnt; + rxq->delta += wqebb_cnt; + + return head_skb; + +alloc_skb_fail: + dev_kfree_skb_any(head_skb); + return NULL; +} + +void hinic_rxq_get_stats(struct hinic_rxq *rxq, + struct hinic_rxq_stats *stats) { struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; unsigned int start; @@ -76,35 +384,72 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) u64_stats_update_begin(&stats->syncp); do { start = u64_stats_fetch_begin(&rxq_stats->syncp); - stats->pkts = rxq_stats->pkts; stats->bytes = rxq_stats->bytes; + stats->packets = rxq_stats->packets; stats->errors = rxq_stats->csum_errors + rxq_stats->other_errors; stats->csum_errors = rxq_stats->csum_errors; stats->other_errors = rxq_stats->other_errors; + stats->unlock_bp = rxq_stats->unlock_bp; + stats->dropped = rxq_stats->dropped; } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); u64_stats_update_end(&stats->syncp); } -/** - * rxq_stats_init - Initialize the statistics of specific queue - * @rxq: Logical Rx Queue - **/ +void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats) +{ + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->bytes = 0; + rxq_stats->packets = 0; + rxq_stats->errors = 0; + rxq_stats->csum_errors = 0; + rxq_stats->unlock_bp = 0; + rxq_stats->other_errors = 0; + rxq_stats->dropped = 0; + + rxq_stats->alloc_skb_err = 0; + u64_stats_update_end(&rxq_stats->syncp); +} + static void rxq_stats_init(struct hinic_rxq *rxq) { struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; u64_stats_init(&rxq_stats->syncp); - hinic_rxq_clean_stats(rxq); + hinic_rxq_clean_stats(rxq_stats); } -static void rx_csum(struct hinic_rxq *rxq, u32 status, - struct sk_buff *skb) +static void hinic_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, HINIC_RX_HDR_SIZE); + + /* update all of the pointers */ + skb_frag_size_sub(frag, HINIC_RX_HDR_SIZE); + frag->bv_offset += HINIC_RX_HDR_SIZE; + skb->data_len -= HINIC_RX_HDR_SIZE; + skb->tail += HINIC_RX_HDR_SIZE; +} + +static void hinic_rx_csum(struct hinic_rxq *rxq, u32 status, + struct sk_buff *skb) { struct net_device *netdev = rxq->netdev; u32 csum_err; - csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR); + csum_err = HINIC_GET_RX_CSUM_ERR(status); + + if (unlikely(csum_err == HINIC_RX_CSUM_IPSU_OTHER_ERR)) + rxq->rxq_stats.other_errors++; if (!(netdev->features & NETIF_F_RXCSUM)) return; @@ -112,456 +457,768 @@ static void rx_csum(struct hinic_rxq *rxq, u32 status, if (!csum_err) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else { + /* pkt type is recognized by HW, and csum is err */ if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE | - HINIC_RX_CSUM_IPSU_OTHER_ERR))) + HINIC_RX_CSUM_IPSU_OTHER_ERR))) rxq->rxq_stats.csum_errors++; + skb->ip_summed = CHECKSUM_NONE; } } -/** - * rx_alloc_skb - allocate skb and map it to dma address - * @rxq: rx queue - * @dma_addr: returned dma address for the skb - * - * Return skb - **/ -static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, - dma_addr_t *dma_addr) + +#ifdef HAVE_SKBUFF_CSUM_LEVEL +static void hinic_rx_gro(struct hinic_rxq *rxq, u32 offload_type, + struct sk_buff *skb) { - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct sk_buff *skb; - dma_addr_t addr; - int err; - - skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); - if (!skb) { - netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); - return NULL; - } - - addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, - DMA_FROM_DEVICE); - err = dma_mapping_error(&pdev->dev, addr); - if (err) { - dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err); - goto err_rx_map; - } - - *dma_addr = addr; - return skb; - -err_rx_map: - dev_kfree_skb_any(skb); - return NULL; -} - -/** - * rx_unmap_skb - unmap the dma address of the skb - * @rxq: rx queue - * @dma_addr: dma address of the skb - **/ -static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) -{ - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - - dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, - DMA_FROM_DEVICE); -} - -/** - * rx_free_skb - unmap and free skb - * @rxq: rx queue - * @skb: skb to free - * @dma_addr: dma address of the skb - **/ -static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, - dma_addr_t dma_addr) -{ - rx_unmap_skb(rxq, dma_addr); - dev_kfree_skb_any(skb); -} - -/** - * rx_alloc_pkts - allocate pkts in rx queue - * @rxq: rx queue - * - * Return number of skbs allocated - **/ -static int rx_alloc_pkts(struct hinic_rxq *rxq) -{ - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_rq_wqe *rq_wqe; - unsigned int free_wqebbs; - struct hinic_sge sge; - dma_addr_t dma_addr; - struct sk_buff *skb; - u16 prod_idx; - int i; - - free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); - - /* Limit the allocation chunks */ - if (free_wqebbs > nic_dev->rx_weight) - free_wqebbs = nic_dev->rx_weight; - - for (i = 0; i < free_wqebbs; i++) { - skb = rx_alloc_skb(rxq, &dma_addr); - if (!skb) { - netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); - goto skb_out; - } - - hinic_set_sge(&sge, dma_addr, skb->len); - - rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, - &prod_idx); - if (!rq_wqe) { - rx_free_skb(rxq, skb, dma_addr); - goto skb_out; - } - - hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); - - hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); - } - -skb_out: - if (i) { - wmb(); /* write all the wqes before update PI */ - - hinic_rq_update(rxq->rq, prod_idx); - } - - return i; -} - -/** - * free_all_rx_skbs - free all skbs in rx queue - * @rxq: rx queue - **/ -static void free_all_rx_skbs(struct hinic_rxq *rxq) -{ - struct hinic_rq *rq = rxq->rq; - struct hinic_hw_wqe *hw_wqe; - struct hinic_sge sge; - u16 ci; - - while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { - if (IS_ERR(hw_wqe)) - break; - - hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); - - hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); - - rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); - } -} - -/** - * rx_recv_jumbo_pkt - Rx handler for jumbo pkt - * @rxq: rx queue - * @head_skb: the first skb in the list - * @left_pkt_len: left size of the pkt exclude head skb - * @ci: consumer index - * - * Return number of wqes that used for the left of the pkt - **/ -static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, - unsigned int left_pkt_len, u16 ci) -{ - struct sk_buff *skb, *curr_skb = head_skb; - struct hinic_rq_wqe *rq_wqe; - unsigned int curr_len; - struct hinic_sge sge; - int num_wqes = 0; - - while (left_pkt_len > 0) { - rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, - &skb, &ci); - - num_wqes++; - - hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); - - rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); - - prefetch(skb->data); - - curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : - left_pkt_len; - - left_pkt_len -= curr_len; - - __skb_put(skb, curr_len); - - if (curr_skb == head_skb) - skb_shinfo(head_skb)->frag_list = skb; - else - curr_skb->next = skb; - - head_skb->len += skb->len; - head_skb->data_len += skb->len; - head_skb->truesize += skb->truesize; - - curr_skb = skb; - } - - return num_wqes; -} - -/** - * rxq_recv - Rx handler - * @rxq: rx queue - * @budget: maximum pkts to process - * - * Return number of pkts received - **/ -static int rxq_recv(struct hinic_rxq *rxq, int budget) -{ - struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); struct net_device *netdev = rxq->netdev; - u64 pkt_len = 0, rx_bytes = 0; - struct hinic_rq *rq = rxq->rq; - struct hinic_rq_wqe *rq_wqe; - unsigned int free_wqebbs; - struct hinic_rq_cqe *cqe; - int num_wqes, pkts = 0; - struct hinic_sge sge; - unsigned int status; + bool l2_tunnel; + + if (!(netdev->features & NETIF_F_GRO)) + return; + + l2_tunnel = HINIC_GET_RX_PKT_TYPE(offload_type) == HINIC_RX_VXLAN_PKT ? + 1 : 0; + + if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY) + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; +} +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ + +#define HINIC_RX_BP_THD 128 + +static void hinic_unlock_bp(struct hinic_rxq *rxq, bool bp_en, bool force_en) +{ + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int free_wqebbs, err; + + if (bp_en) + set_bit(HINIC_RX_STATUS_BP_EN, &rxq->status); + + free_wqebbs = rxq->delta - 1; + if (test_bit(HINIC_RX_STATUS_BP_EN, &rxq->status) && + (nic_dev->rq_depth - free_wqebbs) >= nic_dev->bp_upper_thd && + (rxq->bp_cnt >= HINIC_RX_BP_THD || force_en)) { + err = hinic_set_iq_enable_mgmt(nic_dev->hwdev, rxq->q_id, + nic_dev->bp_lower_thd, + rxq->next_to_update); + if (!err) { + clear_bit(HINIC_RX_STATUS_BP_EN, &rxq->status); + rxq->bp_cnt = 0; + rxq->rxq_stats.unlock_bp++; + } else { + nicif_err(nic_dev, drv, netdev, "Failed to set iq enable\n"); + } + } +} + +static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev, + struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_buf = nic_dev->lb_test_rx_buf; + void *frag_data; + int lb_len = nic_dev->lb_pkt_len; + int pkt_offset, frag_len, i; + + if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { + nic_dev->lb_test_rx_idx = 0; + nicif_warn(nic_dev, drv, netdev, "Loopback test warning, recive too more test pkt\n"); + } + + if (skb->len != nic_dev->lb_pkt_len) { + nicif_warn(nic_dev, drv, netdev, "Wrong packet length\n"); + nic_dev->lb_test_rx_idx++; + return; + } + + pkt_offset = nic_dev->lb_test_rx_idx * lb_len; + frag_len = (int)skb_headlen(skb); + memcpy((lb_buf + pkt_offset), skb->data, frag_len); + pkt_offset += frag_len; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy((lb_buf + pkt_offset), frag_data, frag_len); + pkt_offset += frag_len; + } + nic_dev->lb_test_rx_idx++; +} + +int recv_one_pkt(struct hinic_rxq *rxq, struct hinic_rq_cqe *rx_cqe, + u32 pkt_len, u32 vlan_len, u32 status) +{ struct sk_buff *skb; + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); u32 offload_type; - u16 ci, num_lro; + + skb = hinic_fetch_rx_buffer(rxq, pkt_len); + if (unlikely(!skb)) { + RXQ_STATS_INC(rxq, alloc_skb_err); + return -ENOMEM; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + hinic_pull_tail(skb); + + hinic_rx_csum(rxq, status, skb); + + offload_type = be32_to_cpu(rx_cqe->offload_type); +#ifdef HAVE_SKBUFF_CSUM_LEVEL + hinic_rx_gro(rxq, offload_type, skb); +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { +#else + if ((netdev->features & NETIF_F_HW_VLAN_RX) && + HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { +#endif + u16 vid = HINIC_GET_RX_VLAN_TAG(vlan_len); + + /* if the packet is a vlan pkt, the vid may be 0 */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + if (unlikely(test_bit(HINIC_LP_TEST, &nic_dev->flags))) + hinic_copy_lp_data(nic_dev, skb); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { +#ifdef HAVE_NAPI_GRO_FLUSH_OLD + napi_gro_flush(&rxq->irq_cfg->napi, false); +#else + napi_gro_flush(&rxq->irq_cfg->napi); +#endif + netif_receive_skb(skb); + } else { + napi_gro_receive(&rxq->irq_cfg->napi, skb); + } + + return 0; +} + +void rx_pass_super_cqe(struct hinic_rxq *rxq, u32 index, u32 pkt_num, + struct hinic_rq_cqe *cqe) +{ + u8 sge_num = 0; + u32 pkt_len; + + while (index < pkt_num) { + pkt_len = hinic_get_pkt_len_for_super_cqe + (cqe, index == (pkt_num - 1)); + sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + index++; + } + + rxq->cons_idx += sge_num; + rxq->delta += sge_num; +} + +static inline int __recv_supper_cqe(struct hinic_rxq *rxq, + struct hinic_rq_cqe *rx_cqe, u32 pkt_info, + u32 vlan_len, u32 status, int *pkts, + u64 *rx_bytes, u32 *dropped) +{ + u32 pkt_len; + int i, pkt_num = 0; + + pkt_num = HINIC_GET_RQ_CQE_PKT_NUM(pkt_info); + i = 0; + while (i < pkt_num) { + pkt_len = ((i == (pkt_num - 1)) ? + RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) : + RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN)); + if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len, + vlan_len, status))) { + if (i) { + rx_pass_super_cqe(rxq, i, + pkt_num, + rx_cqe); + *dropped += (pkt_num - i); + } + break; + } + + *rx_bytes += pkt_len; + (*pkts)++; + i++; + } + + if (!i) + return -EFAULT; + + return 0; +} + + +#define LRO_PKT_HDR_LEN_IPV4 66 +#define LRO_PKT_HDR_LEN_IPV6 86 +#define LRO_PKT_HDR_LEN(cqe) \ + (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \ + HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) + +int hinic_rx_poll(struct hinic_rxq *rxq, int budget) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u32 status, pkt_len, vlan_len, pkt_info, dropped = 0; + struct hinic_rq_cqe *rx_cqe; + u64 rx_bytes = 0; + u16 sw_ci, num_lro; + int pkts = 0, nr_pkts = 0; + bool bp_en = false; u16 num_wqe = 0; - u32 vlan_len; - u16 vid; - while (pkts < budget) { - num_wqes = 0; + while (likely(pkts < budget)) { + sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask; + rx_cqe = rxq->rx_info[sw_ci].cqe; + status = be32_to_cpu(rx_cqe->status); - rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, - &ci); - if (!rq_wqe) + if (!HINIC_GET_RX_DONE(status)) break; - cqe = rq->cqe[ci]; - status = be32_to_cpu(cqe->status); - hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); + /* make sure we read rx_done before packet length */ + rmb(); - rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + vlan_len = be32_to_cpu(rx_cqe->vlan_len); + pkt_info = be32_to_cpu(rx_cqe->pkt_info); + pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len); - rx_csum(rxq, status, skb); - prefetch(skb->data); - - pkt_len = sge.len; - - if (pkt_len <= HINIC_RX_BUF_SZ) { - __skb_put(skb, pkt_len); + if (unlikely(HINIC_GET_SUPER_CQE_EN(pkt_info))) { + if (unlikely(__recv_supper_cqe(rxq, rx_cqe, pkt_info, + vlan_len, status, &pkts, + &rx_bytes, &dropped))) + break; + nr_pkts += (int)HINIC_GET_RQ_CQE_PKT_NUM(pkt_info); } else { - __skb_put(skb, HINIC_RX_BUF_SZ); - num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len - - HINIC_RX_BUF_SZ, ci); + if (recv_one_pkt(rxq, rx_cqe, pkt_len, + vlan_len, status)) + break; + rx_bytes += pkt_len; + pkts++; + nr_pkts++; + + num_lro = HINIC_GET_RX_NUM_LRO(status); + if (num_lro) { + rx_bytes += ((num_lro - 1) * + LRO_PKT_HDR_LEN(rx_cqe)); + + num_wqe += + (u16)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + } + } + if (unlikely(HINIC_GET_RX_BP_EN(status))) { + rxq->bp_cnt++; + bp_en = true; } - hinic_rq_put_wqe(rq, ci, - (num_wqes + 1) * HINIC_RQ_WQE_SIZE); + rx_cqe->status = 0; - offload_type = be32_to_cpu(cqe->offload_type); - vlan_len = be32_to_cpu(cqe->len); - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { - vid = HINIC_GET_RX_VLAN_TAG(vlan_len); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); - } - - skb_record_rx_queue(skb, qp->q_id); - skb->protocol = eth_type_trans(skb, rxq->netdev); - - napi_gro_receive(&rxq->napi, skb); - - pkts++; - rx_bytes += pkt_len; - - num_lro = HINIC_GET_RX_NUM_LRO(status); - if (num_lro) { - rx_bytes += ((num_lro - 1) * - LRO_PKT_HDR_LEN(cqe)); - - num_wqe += - (u16)(pkt_len >> rxq->rx_buff_shift) + - ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); - } - - cqe->status = 0; - - if (num_wqe >= LRO_REPLENISH_THLD) + if (num_wqe >= nic_dev->lro_replenish_thld) break; } - free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); - if (free_wqebbs > HINIC_RX_BUFFER_WRITE) - rx_alloc_pkts(rxq); + if (rxq->delta >= HINIC_RX_BUFFER_WRITE) + hinic_rx_fill_buffers(rxq); + + if (unlikely(bp_en || test_bit(HINIC_RX_STATUS_BP_EN, &rxq->status))) + hinic_unlock_bp(rxq, bp_en, pkts < budget); u64_stats_update_begin(&rxq->rxq_stats.syncp); - rxq->rxq_stats.pkts += pkts; + rxq->rxq_stats.packets += nr_pkts; rxq->rxq_stats.bytes += rx_bytes; + rxq->rxq_stats.dropped += dropped; u64_stats_update_end(&rxq->rxq_stats.syncp); - return pkts; } -static int rx_poll(struct napi_struct *napi, int budget) +static int rx_alloc_cqe(struct hinic_rxq *rxq) { - struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_rq *rq = rxq->rq; - int pkts; + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_rx_info *rx_info; + struct hinic_rq_cqe *cqe_va; + dma_addr_t cqe_pa; + u32 cqe_mem_size; + int idx; - pkts = rxq_recv(rxq, budget); - if (pkts >= budget) - return budget; - - napi_complete(napi); - hinic_hwdev_set_msix_state(nic_dev->hwdev, - rq->msix_entry, - HINIC_MSIX_ENABLE); - - return pkts; -} - -static void rx_add_napi(struct hinic_rxq *rxq) -{ - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - - netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); - napi_enable(&rxq->napi); -} - -static void rx_del_napi(struct hinic_rxq *rxq) -{ - napi_disable(&rxq->napi); - netif_napi_del(&rxq->napi); -} - -static irqreturn_t rx_irq(int irq, void *data) -{ - struct hinic_rxq *rxq = (struct hinic_rxq *)data; - struct hinic_rq *rq = rxq->rq; - struct hinic_dev *nic_dev; - - /* Disable the interrupt until napi will be completed */ - nic_dev = netdev_priv(rxq->netdev); - hinic_hwdev_set_msix_state(nic_dev->hwdev, - rq->msix_entry, - HINIC_MSIX_DISABLE); - - nic_dev = netdev_priv(rxq->netdev); - hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); - - napi_schedule(&rxq->napi); - return IRQ_HANDLED; -} - -static int rx_request_irq(struct hinic_rxq *rxq) -{ - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_rq *rq = rxq->rq; - struct hinic_qp *qp; - struct cpumask mask; - int err; - - rx_add_napi(rxq); - - hinic_hwdev_msix_set(hwdev, rq->msix_entry, - RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, - RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, - RX_IRQ_NO_RESEND_TIMER); - - err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); - if (err) { - rx_del_napi(rxq); - return err; + cqe_mem_size = sizeof(*rx_info->cqe) * rxq->q_depth; + rxq->cqe_start_vaddr = dma_zalloc_coherent(&pdev->dev, cqe_mem_size, + &rxq->cqe_start_paddr, + GFP_KERNEL); + if (!rxq->cqe_start_vaddr) { + nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate cqe dma\n"); + return -ENOMEM; } - qp = container_of(rq, struct hinic_qp, rq); - cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask); - return irq_set_affinity_hint(rq->irq, &mask); + cqe_va = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr; + cqe_pa = rxq->cqe_start_paddr; + + for (idx = 0; idx < rxq->q_depth; idx++) { + rx_info = &rxq->rx_info[idx]; + rx_info->cqe = cqe_va; + rx_info->cqe_dma = cqe_pa; + + cqe_va++; + cqe_pa += sizeof(*rx_info->cqe); + } + + hinic_rq_cqe_addr_set(nic_dev->hwdev, rxq->q_id, rxq->cqe_start_paddr); + return 0; } -static void rx_free_irq(struct hinic_rxq *rxq) +static void rx_free_cqe(struct hinic_rxq *rxq) { - struct hinic_rq *rq = rxq->rq; + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct pci_dev *pdev = nic_dev->pdev; + u32 cqe_mem_size; - irq_set_affinity_hint(rq->irq, NULL); - free_irq(rq->irq, rxq); - rx_del_napi(rxq); + cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth; + + dma_free_coherent(&pdev->dev, cqe_mem_size, + rxq->cqe_start_vaddr, rxq->cqe_start_paddr); } -/** - * hinic_init_rxq - Initialize the Rx Queue - * @rxq: Logical Rx Queue - * @rq: Hardware Rx Queue to connect the Logical queue with - * @netdev: network device to connect the Logical queue with - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, - struct net_device *netdev) +static int hinic_setup_rx_resources(struct hinic_rxq *rxq, + struct net_device *netdev, + struct irq_info *entry) { - struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u64 rx_info_sz; int err, pkts; - rxq->netdev = netdev; - rxq->rq = rq; - rxq->buf_len = HINIC_RX_BUF_SZ; - rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ); + rxq->irq_id = entry->irq_id; + rxq->msix_entry_idx = entry->msix_entry_idx; + rxq->next_to_alloc = 0; + rxq->next_to_update = 0; + rxq->delta = rxq->q_depth; + rxq->q_mask = rxq->q_depth - 1; + rxq->cons_idx = 0; - rxq_stats_init(rxq); - - rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, - "hinic_rxq%d", qp->q_id); - if (!rxq->irq_name) - return -ENOMEM; - - pkts = rx_alloc_pkts(rxq); - if (!pkts) { - err = -ENOMEM; - goto err_rx_pkts; + rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info); + if (!rx_info_sz) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size rx info\n"); + return -EINVAL; } - err = rx_request_irq(rxq); + rxq->rx_info = kzalloc(rx_info_sz, GFP_KERNEL); + if (!rxq->rx_info) + return -ENOMEM; + + err = rx_alloc_cqe(rxq); if (err) { - netdev_err(netdev, "Failed to request Rx irq\n"); - goto err_req_rx_irq; + nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx cqe\n"); + goto rx_cqe_err; + } + + pkts = hinic_rx_fill_wqe(rxq); + if (pkts != rxq->q_depth) { + nicif_err(nic_dev, drv, netdev, "Failed to fill rx wqe\n"); + err = -ENOMEM; + goto rx_pkts_err; + } + pkts = hinic_rx_fill_buffers(rxq); + if (!pkts) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx buffer\n"); + err = -ENOMEM; + goto rx_pkts_err; } return 0; -err_req_rx_irq: -err_rx_pkts: - free_all_rx_skbs(rxq); - devm_kfree(&netdev->dev, rxq->irq_name); +rx_pkts_err: + rx_free_cqe(rxq); + +rx_cqe_err: + kfree(rxq->rx_info); + return err; } -/** - * hinic_clean_rxq - Clean the Rx Queue - * @rxq: Logical Rx Queue - **/ -void hinic_clean_rxq(struct hinic_rxq *rxq) +static void hinic_free_rx_resources(struct hinic_rxq *rxq) { - struct net_device *netdev = rxq->netdev; - - rx_free_irq(rxq); - - free_all_rx_skbs(rxq); - devm_kfree(&netdev->dev, rxq->irq_name); + hinic_rx_free_buffers(rxq); + rx_free_cqe(rxq); + kfree(rxq->rx_info); +} + +int hinic_setup_all_rx_resources(struct net_device *netdev, + struct irq_info *msix_entires) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 i, q_id; + int err; + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + err = hinic_setup_rx_resources(&nic_dev->rxqs[q_id], + nic_dev->netdev, + &msix_entires[q_id]); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set up rxq resource\n"); + goto init_rxq_err; + } + } + + return 0; + +init_rxq_err: + for (i = 0; i < q_id; i++) + hinic_free_rx_resources(&nic_dev->rxqs[i]); + + return err; +} + +void hinic_free_all_rx_resources(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id; + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) + hinic_free_rx_resources(&nic_dev->rxqs[q_id]); +} + +int hinic_alloc_rxqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_rxq *rxq; + u16 num_rxqs = nic_dev->max_qps; + u16 q_id; + u64 rxq_size; + + rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); + if (!rxq_size) { + nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n"); + return -EINVAL; + } + + nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL); + if (!nic_dev->rxqs) { + nic_err(&pdev->dev, "Failed to allocate rxqs\n"); + return -ENOMEM; + } + + for (q_id = 0; q_id < num_rxqs; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rxq->netdev = netdev; + rxq->dev = &pdev->dev; + rxq->q_id = q_id; + rxq->buf_len = nic_dev->rx_buff_len; + rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len); + rxq->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE * + nic_dev->rx_buff_len; + rxq->q_depth = nic_dev->rq_depth; + rxq->q_mask = nic_dev->rq_depth - 1; + + + rxq_stats_init(rxq); + } + + return 0; +} + +void hinic_free_rxqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + hinic_clear_rss_config_user(nic_dev); + kfree(nic_dev->rxqs); +} + +void hinic_init_rss_parameters(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR; + + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_type.udp_ipv4 = 1; +} + +void hinic_set_default_rss_indir(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!nic_dev->rss_indir_user) + return; + + nicif_info(nic_dev, drv, netdev, + "Discard user configured Rx flow hash indirection\n"); + + kfree(nic_dev->rss_indir_user); + nic_dev->rss_indir_user = NULL; +} + +static void hinic_maybe_reconfig_rss_indir(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int i; + + if (!nic_dev->rss_indir_user) + return; + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + goto discard_user_rss_indir; + + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { + if (nic_dev->rss_indir_user[i] >= nic_dev->num_qps) + goto discard_user_rss_indir; + } + + return; + +discard_user_rss_indir: + hinic_set_default_rss_indir(netdev); +} + +static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev) +{ + kfree(nic_dev->rss_hkey_user); + + nic_dev->rss_hkey_user_be = NULL; + nic_dev->rss_hkey_user = NULL; + + kfree(nic_dev->rss_indir_user); + nic_dev->rss_indir_user = NULL; +} + +static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, + u8 num_tcs, u32 *indir) +{ + u16 num_rss, tc_group_size; + int i; + + if (num_tcs) + tc_group_size = HINIC_RSS_INDIR_SIZE / num_tcs; + else + tc_group_size = HINIC_RSS_INDIR_SIZE; + + num_rss = nic_dev->num_rss; + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) + indir[i] = (i / tc_group_size) * num_rss + i % num_rss; +} + +static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev) +{ + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + + hinic_rss_cfg(nic_dev->hwdev, 0, nic_dev->rss_tmpl_idx, 0, prio_tc); +} + +/* In rx, iq means cos */ +static u8 hinic_get_iqmap_by_tc(u8 *prio_tc, u8 num_iq, u8 tc) +{ + u8 i, map = 0; + + for (i = 0; i < num_iq; i++) { + if (prio_tc[i] == tc) + map |= (u8)(1U << ((num_iq - 1) - i)); + } + + return map; +} + +static u8 hinic_get_tcid_by_rq(u32 *indir_tbl, u8 num_tcs, u16 rq_id) +{ + u16 tc_group_size; + int i; + + tc_group_size = HINIC_RSS_INDIR_SIZE / num_tcs; + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { + if (indir_tbl[i] == rq_id) + return (u8)(i / tc_group_size); + } + + return 0xFF; /* Invalid TC */ +} + +#define HINIC_NUM_IQ_PER_FUNC 8 +static void hinic_get_rq2iq_map(struct hinic_nic_dev *nic_dev, + u16 num_rq, u8 num_tcs, u8 *prio_tc, + u32 *indir_tbl, u8 *map) +{ + u16 qid; + u8 tc_id; + + if (!num_tcs) + num_tcs = 1; + + for (qid = 0; qid < num_rq; qid++) { + tc_id = hinic_get_tcid_by_rq(indir_tbl, num_tcs, qid); + map[qid] = hinic_get_iqmap_by_tc(prio_tc, + HINIC_NUM_IQ_PER_FUNC, tc_id); + } +} + +int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, + u8 *prio_tc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 tmpl_idx = 0xFF; + u8 default_rss_key[HINIC_RSS_KEY_SIZE] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa}; + u32 *indir_tbl; + u8 *hkey; + int err; + + tmpl_idx = nic_dev->rss_tmpl_idx; + + /* RSS key */ + if (nic_dev->rss_hkey_user) + hkey = nic_dev->rss_hkey_user; + else + hkey = default_rss_key; + err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hkey); + if (err) + return err; + + hinic_maybe_reconfig_rss_indir(netdev); + indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!indir_tbl) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate set hw rss indir_tbl\n"); + return -ENOMEM; + } + + if (nic_dev->rss_indir_user) + memcpy(indir_tbl, nic_dev->rss_indir_user, + sizeof(u32) * HINIC_RSS_INDIR_SIZE); + else + hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl); + + err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indir_tbl); + if (err) + goto out; + + err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, nic_dev->rss_type); + if (err) + goto out; + + err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx, + nic_dev->rss_hash_engine); + if (err) + goto out; + + err = hinic_rss_cfg(nic_dev->hwdev, rss_en, tmpl_idx, num_tc, prio_tc); + if (err) + goto out; + + kfree(indir_tbl); + return 0; + +out: + kfree(indir_tbl); + return err; +} + +static int hinic_rss_init(struct hinic_nic_dev *nic_dev, u8 *rq2iq_map) +{ + struct net_device *netdev = nic_dev->netdev; + u32 *indir_tbl; + u8 cos, num_tc = 0; + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + int err; + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) { + num_tc = nic_dev->max_cos; + for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) { + if (cos < HINIC_DCB_COS_MAX - nic_dev->max_cos) + prio_tc[cos] = nic_dev->max_cos - 1; + else + prio_tc[cos] = (HINIC_DCB_COS_MAX - 1) - cos; + } + } else { + num_tc = 0; + } + + indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!indir_tbl) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate rss init indir_tbl\n"); + return -ENOMEM; + } + + if (nic_dev->rss_indir_user) + memcpy(indir_tbl, nic_dev->rss_indir_user, + sizeof(u32) * HINIC_RSS_INDIR_SIZE); + else + hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl); + err = hinic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc); + if (err) { + kfree(indir_tbl); + return err; + } + + hinic_get_rq2iq_map(nic_dev, nic_dev->num_qps, num_tc, + prio_tc, indir_tbl, rq2iq_map); + + kfree(indir_tbl); + return 0; +} + +int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 tmpl_idx = nic_dev->rss_tmpl_idx; + + /* RSS must be enable when dcb is enabled */ + return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc); +} + +int hinic_rx_configure(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 rq2iq_map[HINIC_MAX_NUM_RQ]; + int err; + + /* Set all rq mapping to all iq in default */ + memset(rq2iq_map, 0xFF, sizeof(rq2iq_map)); + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + err = hinic_rss_init(nic_dev, rq2iq_map); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to init rss\n"); + return -EFAULT; + } + } + + err = hinic_dcb_set_rq_iq_mapping(nic_dev->hwdev, + hinic_func_max_qnum(nic_dev->hwdev), + rq2iq_map); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set rq_iq mapping\n"); + goto set_rq_cos_mapping_err; + } + + return 0; + +set_rq_cos_mapping_err: + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + hinic_rss_deinit(nic_dev); + + return err; +} + +void hinic_rx_remove_configure(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + hinic_rss_deinit(nic_dev); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h index 507dcbae9085..cc0dd71b8236 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -2,52 +2,136 @@ /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * */ #ifndef HINIC_RX_H #define HINIC_RX_H -#include -#include -#include -#include - -#include "hinic_hw_qp.h" - -#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF +/* rx cqe checksum err */ +#define HINIC_RX_CSUM_IP_CSUM_ERR BIT(0) +#define HINIC_RX_CSUM_TCP_CSUM_ERR BIT(1) +#define HINIC_RX_CSUM_UDP_CSUM_ERR BIT(2) +#define HINIC_RX_CSUM_IGMP_CSUM_ERR BIT(3) +#define HINIC_RX_CSUM_ICMPv4_CSUM_ERR BIT(4) +#define HINIC_RX_CSUM_ICMPv6_CSUM_ERR BIT(5) +#define HINIC_RX_CSUM_SCTP_CRC_ERR BIT(6) #define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7) #define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) -struct hinic_rxq_stats { - u64 pkts; - u64 bytes; - u64 errors; - u64 csum_errors; - u64 other_errors; - u64 alloc_skb_err; - struct u64_stats_sync syncp; +#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF + +#define HINIC_RX_BP_LOWER_THD 200 +#define HINIC_RX_BP_UPPER_THD 400 + +#define HINIC_SUPPORT_LRO_ADAP_QPS_MAX 16 +#define HINIC_RX_BUFFER_WRITE 16 + +enum { + HINIC_RX_STATUS_BP_EN, }; +struct hinic_rxq_stats { + u64 packets; + u64 bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 unlock_bp; + u64 dropped; + + u64 alloc_skb_err; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#else + struct u64_stats_sync_empty syncp; +#endif +}; + +struct hinic_rx_info { + dma_addr_t buf_dma_addr; + + struct hinic_rq_cqe *cqe; + dma_addr_t cqe_dma; + struct page *page; + u32 page_offset; + struct hinic_rq_wqe *rq_wqe; +}; + + struct hinic_rxq { - struct net_device *netdev; - struct hinic_rq *rq; + struct net_device *netdev; - struct hinic_rxq_stats rxq_stats; + u16 q_id; + u16 q_depth; + u16 q_mask; - char *irq_name; u16 buf_len; u32 rx_buff_shift; + u32 dma_rx_buff_size; + + struct hinic_rxq_stats rxq_stats; + int cons_idx; + int delta; + + u32 irq_id; + u16 msix_entry_idx; + + struct hinic_rx_info *rx_info; + + struct hinic_irq *irq_cfg; + u16 next_to_alloc; + u16 next_to_update; + struct device *dev; /* device for DMA mapping */ + + u32 bp_cnt; + unsigned long status; + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; + u64 last_moder_packets; + u64 last_moder_bytes; + u8 last_coalesc_timer_cfg; + u8 last_pending_limt; - struct napi_struct napi; }; -void hinic_rxq_clean_stats(struct hinic_rxq *rxq); +void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats); -void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); +void hinic_rxq_get_stats(struct hinic_rxq *rxq, + struct hinic_rxq_stats *stats); -int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, - struct net_device *netdev); +int hinic_alloc_rxqs(struct net_device *netdev); + +void hinic_free_rxqs(struct net_device *netdev); + +void hinic_init_rss_parameters(struct net_device *netdev); + +void hinic_set_default_rss_indir(struct net_device *netdev); + +int hinic_setup_all_rx_resources(struct net_device *netdev, + struct irq_info *msix_entires); + +void hinic_free_all_rx_resources(struct net_device *netdev); + +void hinic_rx_remove_configure(struct net_device *netdev); + +int hinic_rx_configure(struct net_device *netdev); + +int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, + u8 *prio_tc); + +int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc); + +int hinic_rx_poll(struct hinic_rxq *rxq, int budget); -void hinic_clean_rxq(struct hinic_rxq *rxq); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h new file mode 100644 index 000000000000..84ece71e9673 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __CHIPIF_SM_LT_H__ +#define __CHIPIF_SM_LT_H__ + +#define SM_LT_LOAD (0x12) +#define SM_LT_STORE (0x14) + +#define SM_LT_NUM_OFFSET 13 +#define SM_LT_ABUF_FLG_OFFSET 12 +#define SM_LT_BC_OFFSET 11 + +#define SM_LT_ENTRY_16B 16 +#define SM_LT_ENTRY_32B 32 +#define SM_LT_ENTRY_48B 48 +#define SM_LT_ENTRY_64B 64 + +#define TBL_LT_OFFSET_DEFAULT 0 + +#define SM_CACHE_LINE_SHFT 4 /* log2(16) */ +#define SM_CACHE_LINE_SIZE 16 /* the size of cache line */ + +#define MAX_SM_LT_READ_LINE_NUM 4 +#define MAX_SM_LT_WRITE_LINE_NUM 3 + +#define SM_LT_FULL_BYTEENB 0xFFFF + +#define TBL_GET_ENB3_MASK(bitmask) (u16)(((bitmask) >> 32) & 0xFFFF) +#define TBL_GET_ENB2_MASK(bitmask) (u16)(((bitmask) >> 16) & 0xFFFF) +#define TBL_GET_ENB1_MASK(bitmask) (u16)((bitmask) & 0xFFFF) + +enum { + SM_LT_NUM_0 = 0, /* lt num = 0, load/store 16B */ + SM_LT_NUM_1, /* lt num = 1, load/store 32B */ + SM_LT_NUM_2, /* lt num = 2, load/store 48B */ + SM_LT_NUM_3 /* lt num = 3, load 64B */ +}; + +/* lt load request */ +typedef union { + struct { + u32 offset:8; + u32 pad:3; + u32 bc:1; + u32 abuf_flg:1; + u32 num:2; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +} sml_lt_req_head_u; + +typedef struct { + u32 extra; + sml_lt_req_head_u head; + u32 index; + u32 pad0; + u32 pad1; +} sml_lt_load_req_s; + +typedef struct { + u32 extra; + sml_lt_req_head_u head; + u32 index; + u32 byte_enb[2]; + u8 write_data[48]; +} sml_lt_store_req_s; + +enum { + SM_LT_OFFSET_1 = 1, + SM_LT_OFFSET_2, + SM_LT_OFFSET_3, + SM_LT_OFFSET_4, + SM_LT_OFFSET_5, + SM_LT_OFFSET_6, + SM_LT_OFFSET_7, + SM_LT_OFFSET_8, + SM_LT_OFFSET_9, + SM_LT_OFFSET_10, + SM_LT_OFFSET_11, + SM_LT_OFFSET_12, + SM_LT_OFFSET_13, + SM_LT_OFFSET_14, + SM_LT_OFFSET_15 +}; + +static inline void sml_lt_store_memcpy(u32 *dst, u32 *src, u8 num) +{ + switch (num) { + case SM_LT_NUM_2: + *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11); + *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10); + *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9); + *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8); + case SM_LT_NUM_1: + *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7); + *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6); + *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5); + *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4); + case SM_LT_NUM_0: + *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3); + *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2); + *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1); + *dst = *src; + break; + default: + break; + } +} + +static inline void sml_lt_load_memcpy(u32 *dst, u32 *src, u8 num) +{ + switch (num) { + case SM_LT_NUM_3: + *(dst + SM_LT_OFFSET_15) = *(src + SM_LT_OFFSET_15); + *(dst + SM_LT_OFFSET_14) = *(src + SM_LT_OFFSET_14); + *(dst + SM_LT_OFFSET_13) = *(src + SM_LT_OFFSET_13); + *(dst + SM_LT_OFFSET_12) = *(src + SM_LT_OFFSET_12); + case SM_LT_NUM_2: + *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11); + *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10); + *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9); + *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8); + case SM_LT_NUM_1: + *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7); + *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6); + *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5); + *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4); + case SM_LT_NUM_0: + *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3); + *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2); + *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1); + *dst = *src; + break; + default: + break; + } +} + +enum HINIC_CSR_API_DATA_OPERATION_ID { + HINIC_CSR_OPERATION_WRITE_CSR = 0x1E, + HINIC_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HINIC_CSR_API_DATA_NEED_RESPONSE_DATA { + HINIC_CSR_NO_RESP_DATA = 0, + HINIC_CSR_NEED_RESP_DATA = 1 +}; + +enum HINIC_CSR_API_DATA_DATA_SIZE { + HINIC_CSR_DATA_SZ_32 = 0, + HINIC_CSR_DATA_SZ_64 = 1 +}; + +struct hinic_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c new file mode 100644 index 000000000000..315625543a5f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_sml_counter.h" + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static void sml_ctr_htonl_n(u32 *node, u32 ulLen) +{ + u32 i; + + for (i = 0; i < ulLen; i++) { + *node = HTONL(*node); + node++; + } +} + +static void hinic_sml_ctr_read_build_req(chipif_sml_ctr_rd_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + + msg->initial = init_val; +} + +static void hinic_sml_ctr_write_build_req(chipif_sml_ctr_wr_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, + u64 val1, u64 val2) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + + msg->value1_h = val1 >> 32; + msg->value1_l = val1 & 0xFFFFFFFF; + + msg->value2_h = val2 >> 32; + msg->value2_l = val2 & 0xFFFFFFFF; +} + +/** + * hinic_sm_ctr_rd32 - small single 32 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} +EXPORT_SYMBOL(hinic_sm_ctr_rd32); + +/** + * hinic_sm_ctr_rd32_clear - small single 32 counter read and clear to zero + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc) + */ +int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hinic_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter clear fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} +EXPORT_SYMBOL(hinic_sm_ctr_rd32_clear); + +/** + * hinic_sm_ctr_wr32 - small single 32 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value) +{ + chipif_sml_ctr_wr_req_s req; + chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, (u64)value, 0ULL); + + return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hinic_sm_ctr_rd64 - big counter 64 read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter read fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2; + + return 0; +} + +/** + * hinic_sm_ctr_wr64 - big single 64 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value) +{ + chipif_sml_ctr_wr_req_s req; + chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value, 0ULL); + + return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hinic_sm_ctr_rd64_pair - big pair 128 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!value1) { + pr_err("value1 is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!value2) { + pr_err("value2 is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!hwdev || (0 != (ctr_id & 0x1))) { + pr_err("Hwdev is NULL or ctr_id(%d) is odd number for read 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit rd pair ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hinic_sm_ctr_wr64_pair - big pair 128 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value1: write counter value + * @value2: write counter value + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 value1, u64 value2) +{ + chipif_sml_ctr_wr_req_s req; + chipif_sml_ctr_wr_rsp_s rsp; + + /* pair pattern ctr_id must be even number */ + if (!hwdev || (0 != (ctr_id & 0x1))) { + pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value1, value2); + return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h new file mode 100644 index 000000000000..f8c4eec09988 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __CHIPIF_SML_COUNTER_H__ +#define __CHIPIF_SML_COUNTER_H__ + +#define CHIPIF_FUNC_PF 0 +#define CHIPIF_FUNC_VF 1 +#define CHIPIF_FUNC_PPF 2 + +#define CHIPIF_ACK 1 +#define CHIPIF_NOACK 0 + +#define CHIPIF_SM_CTR_OP_READ 0x2 +#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6 +#define CHIPIF_SM_CTR_OP_WRITE 0x3 + +#define SMALL_CNT_READ_RSP_SIZE 16 + +/* request head */ +typedef union { + struct { + u32 pad:15; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +} chipif_sml_ctr_req_head_u; +/* counter read request struct */ +typedef struct { + u32 extra; + chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 initial; + u32 pad; +} chipif_sml_ctr_rd_req_s; + +/* counter read response union */ +typedef union { + struct { + u32 value1:16; + u32 pad0:16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1:20; + u32 pad0:12; + u32 value2:12; + u32 pad1:20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; + +} ctr_rd_rsp_u; + +/* resopnse head */ +typedef union { + struct { + u32 pad:30; /* reserve */ + u32 code:2; /* error code */ + } bs; + + u32 value; +} sml_ctr_rsp_head_u; + +/* counter write request struct */ +typedef struct { + u32 extra; + chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 rsv1; + u32 rsv2; + u32 value1_h; + u32 value1_l; + u32 value2_h; + u32 value2_l; +} chipif_sml_ctr_wr_req_s; + +/* counter write response struct */ +typedef struct { + sml_ctr_rsp_head_u head; + u32 pad[3]; +} chipif_sml_ctr_wr_rsp_s; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c new file mode 100644 index 000000000000..cddd2d7572fb --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_sm_lt.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_dbg.h" + +#define ACK 1 +#define NOACK 0 + +#define LT_LOAD16_API_SIZE (16 + 4) +#define LT_STORE16_API_SIZE (32 + 4) + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static inline void sm_lt_build_head(sml_lt_req_head_u *head, + u8 instance_id, + u8 op_id, u8 ack, + u8 offset, u8 num) +{ + head->value = 0; + head->bs.instance = instance_id; + head->bs.op_id = op_id; + head->bs.ack = ack; + head->bs.num = num; + head->bs.abuf_flg = 0; + head->bs.bc = 1; + head->bs.offset = offset; + head->value = HTONL((head->value)); +} + +static inline void sm_lt_load_build_req(sml_lt_load_req_s *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index, + u8 offset, u8 num) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num); + req->extra = 0; + req->index = lt_index; + req->index = HTONL(req->index); +} + +static inline void sm_lt_store_build_req(sml_lt_store_req_s *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index, + u8 offset, + u8 num, + u16 byte_enb3, + u16 byte_enb2, + u16 byte_enb1, + u8 *data) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num); + req->index = lt_index; + req->index = HTONL(req->index); + req->extra = 0; + req->byte_enb[0] = (u32)(byte_enb3); + req->byte_enb[0] = HTONL(req->byte_enb[0]); + req->byte_enb[1] = HTONL((((u32)byte_enb2) << 16) | byte_enb1); + sml_lt_store_memcpy((u32 *)req->write_data, (u32 *)(void *)data, num); +} + +int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data) +{ + sml_lt_load_req_s req; + int ret; + + if (!hwdev) + return -EFAULT; + + sm_lt_load_build_req(&req, instance, SM_LT_LOAD, ACK, lt_index, 0, 0); + + ret = hinic_api_cmd_read_ack(hwdev, dest, &req, + LT_LOAD16_API_SIZE, (void *)data, 16); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Read linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_dbg_lt_rd_16byte); + +int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data, u16 mask) +{ + sml_lt_store_req_s req; + int ret; + + if (!hwdev || !data) + return -EFAULT; + + sm_lt_store_build_req(&req, instance, SM_LT_STORE, NOACK, lt_index, + 0, 0, 0, 0, mask, data); + + ret = hinic_api_cmd_write_nack(hwdev, dest, &req, LT_STORE16_API_SIZE); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Write linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_dbg_lt_wr_16byte_mask); + +int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val) +{ + struct hinic_csr_request_api_data api_data = {0}; + u32 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + memset(&api_data, 0, sizeof(struct hinic_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 4); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Read 32 bit csr fail, dest %d addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} +EXPORT_SYMBOL(hinic_api_csr_rd32); + +int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val) +{ + struct hinic_csr_request_api_data api_data; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev) + return -EFAULT; + + memset(&api_data, 0, sizeof(struct hinic_csr_request_api_data)); + api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_WRITE_CSR; + api_data.dw1.bits.need_response = HINIC_CSR_NO_RESP_DATA; + api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + api_data.csr_write_data_h = 0xffffffff; + api_data.csr_write_data_l = val; + + ret = hinic_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Write 32 bit csr fail! dest %d addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(hinic_api_csr_wr32); + +int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val) +{ + struct hinic_csr_request_api_data api_data = {0}; + u64 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + memset(&api_data, 0, sizeof(struct hinic_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_64; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 8); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Read 64 bit csr fail, dest %d addr 0x%x\n", + dest, addr); + return ret; + } + + *val = csr_val; + + return 0; +} +EXPORT_SYMBOL(hinic_api_csr_rd64); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c new file mode 100644 index 000000000000..152e59cb9c2a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_sriov.h" +#include "hinic_lld.h" + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) +ssize_t hinic_sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%d\n", pci_sriov_get_totalvfs(pdev)); +} + +ssize_t hinic_sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%d\n", pci_num_vf(pdev)); +} + +ssize_t hinic_sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + u16 num_vfs; + int cur_vfs, total_vfs; + + ret = kstrtou16(buf, 0, &num_vfs); + if (ret < 0) + return ret; + + cur_vfs = pci_num_vf(pdev); + total_vfs = pci_sriov_get_totalvfs(pdev); + + if (num_vfs > total_vfs) + return -ERANGE; + + if (num_vfs == cur_vfs) + return count; /* no change */ + + if (num_vfs == 0) { + /* disable VFs */ + ret = hinic_pci_sriov_configure(pdev, 0); + if (ret < 0) + return ret; + return count; + } + + /* enable VFs */ + if (cur_vfs) { + nic_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n", + cur_vfs, num_vfs); + return -EBUSY; + } + + ret = hinic_pci_sriov_configure(pdev, num_vfs); + if (ret < 0) + return ret; + + if (ret != num_vfs) + nic_warn(&pdev->dev, "%d VFs requested; only %d enabled\n", + num_vfs, ret); + + return count; +} + +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +int hinic_pci_sriov_disable(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + struct hinic_sriov_info *sriov_info; + u16 tmp_vfs; + + sriov_info = hinic_get_sriov_info_by_pcidev(dev); + /* if SR-IOV is already disabled then nothing will be done */ + if (!sriov_info->sriov_enabled) + return 0; + + if (test_and_set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) { + nic_err(&sriov_info->pdev->dev, + "SR-IOV disable in process, please wait"); + return -EPERM; + } + + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(sriov_info->pdev)) { + clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state); + nic_warn(&sriov_info->pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + sriov_info->sriov_enabled = false; + + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(sriov_info->pdev); + + tmp_vfs = (u16)sriov_info->num_vfs; + sriov_info->num_vfs = 0; + hinic_deinit_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0), + OS_VF_ID_TO_HW(tmp_vfs - 1)); + + clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state); + +#endif + + return 0; +} + +int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct hinic_sriov_info *sriov_info; + int err = 0; + int pre_existing_vfs = 0; + + sriov_info = hinic_get_sriov_info_by_pcidev(dev); + + if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) { + nic_err(&sriov_info->pdev->dev, + "SR-IOV enable in process, please wait, num_vfs %d\n", + num_vfs); + return -EPERM; + } + + pre_existing_vfs = pci_num_vf(sriov_info->pdev); + + if (num_vfs > pci_sriov_get_totalvfs(sriov_info->pdev)) { + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return -ERANGE; + } + if (pre_existing_vfs && pre_existing_vfs != num_vfs) { + err = hinic_pci_sriov_disable(sriov_info->pdev); + if (err) { + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + } else if (pre_existing_vfs == num_vfs) { + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return num_vfs; + } + + err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0), + OS_VF_ID_TO_HW((u16)num_vfs - 1)); + if (err) { + nic_err(&sriov_info->pdev->dev, + "Failed to init vf in hardware before enable sriov, error %d\n", + err); + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + err = pci_enable_sriov(sriov_info->pdev, num_vfs); + if (err) { + nic_err(&sriov_info->pdev->dev, + "Failed to enable SR-IOV, error %d\n", err); + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + sriov_info->sriov_enabled = true; + sriov_info->num_vfs = num_vfs; + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + + return num_vfs; +#else + + return 0; +#endif +} + +static bool hinic_is_support_sriov_configure(struct pci_dev *pdev) +{ + enum hinic_init_state state = hinic_get_init_state(pdev); + struct hinic_sriov_info *sriov_info; + + if (state < HINIC_INIT_STATE_NIC_INITED) { + nic_err(&pdev->dev, "NIC device not initialized, don't support to configure sriov\n"); + return false; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(pdev); + if (FUNC_SRIOV_FIX_NUM_VF(sriov_info->hwdev)) { + nic_err(&pdev->dev, "Don't support to changed sriov configuration\n"); + return false; + } + + return true; +} + +int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct hinic_sriov_info *sriov_info; + + if (!hinic_is_support_sriov_configure(dev)) + return -EFAULT; + + sriov_info = hinic_get_sriov_info_by_pcidev(dev); + + if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state)) + return -EFAULT; + + if (!num_vfs) + return hinic_pci_sriov_disable(dev); + else + return hinic_pci_sriov_enable(dev, num_vfs); +} + +int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + int err; + + if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf mac\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (!is_valid_ether_addr(mac) || + vf >= sriov_info->num_vfs) + return -EINVAL; + + err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac); + if (err) + return err; + + nic_info(&sriov_info->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf); + nic_info(&sriov_info->pdev->dev, "Reload the VF driver to make this change effective."); + + return 0; +} + +#ifdef IFLA_VF_MAX +static int set_hw_vf_vlan(struct hinic_sriov_info *sriov_info, + u16 cur_vlanprio, int vf, u16 vlan, u8 qos) +{ + int err = 0; + u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; + + if (vlan || qos) { + if (cur_vlanprio) { + err = hinic_kill_vf_vlan(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + if (err) { + nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d old vlan %d\n", + vf, old_vlan); + return err; + } + } + err = hinic_add_vf_vlan(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf), vlan, qos); + if (err) { + nic_err(&sriov_info->pdev->dev, "Failed to add vf %d new vlan %d\n", + vf, vlan); + return err; + } + } else { + err = hinic_kill_vf_vlan(sriov_info->hwdev, OS_VF_ID_TO_HW(vf)); + if (err) { + nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d vlan %d\n", + vf, old_vlan); + return err; + } + } + + return hinic_update_mac_vlan(sriov_info->hwdev, old_vlan, vlan, + OS_VF_ID_TO_HW(vf)); +} + +#ifdef IFLA_VF_VLAN_INFO_MAX +int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +#else +int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#endif +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + u16 vlanprio, cur_vlanprio; + + if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf vlan\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT; + cur_vlanprio = hinic_vf_info_vlanprio(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + /* duplicate request, so just return success */ + if (vlanprio == cur_vlanprio) + return 0; + + return set_hw_vf_vlan(sriov_info, cur_vlanprio, vf, vlan, qos); +} +#endif + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + int err = 0; + bool cur_spoofchk; + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs) + return -EINVAL; + + cur_spoofchk = hinic_vf_info_spoofchk(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk)) + return 0; + + err = hinic_set_vf_spoofchk(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf), setting); + + if (!err) { + nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s succeed\n", + vf, setting ? "on" : "off"); + } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) { + nicif_err(adapter, drv, netdev, + "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n"); + err = -EOPNOTSUPP; + } + + return err; +} +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + int err = 0; + bool cur_trust; + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs) + return -EINVAL; + + cur_trust = hinic_vf_info_trust(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_trust) || (!setting && !cur_trust)) + return 0; + + err = hinic_set_vf_trust(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf), setting); + if (!err) + nicif_info(adapter, drv, netdev, "Set VF %d trusted %s succeed\n", + vf, setting ? "on" : "off"); + else + nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n", + vf, setting ? "on" : "off"); + + return err; +} +#endif + +int hinic_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs) + return -EINVAL; + + hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi); + + return 0; +} + +/** + * hinic_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link: required link state + * Return: 0 - success, negative - failure + * Set the link state of a specified VF, regardless of physical link state + */ +int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + static const char * const vf_link[] = {"auto", "enable", "disable"}; + int err; + + if (FUNC_FORCE_LINK_UP(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf link state\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + /* validate the request */ + if (vf_id >= sriov_info->num_vfs) { + nicif_err(adapter, drv, netdev, + "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + err = hinic_set_vf_link_state(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf_id), link); + + if (!err) + nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n", + vf_id, vf_link[link]); + + return err; +} + +#define HINIC_TX_RATE_TABLE_FULL 12 + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int hinic_ndo_set_vf_bw(struct net_device *netdev, + int vf, int min_tx_rate, int max_tx_rate) +#else +int hinic_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + struct hinic_sriov_info *sriov_info; +#ifndef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + int min_tx_rate = 0; +#endif + u8 link_status = 0; + u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_100000}; + int err = 0; + + if (!FUNC_SUPPORT_RATE_LIMIT(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf rate limit\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + + /* verify VF is active */ + if (vf >= sriov_info->num_vfs) { + nicif_err(adapter, drv, netdev, "VF number must be less than %d\n", + sriov_info->num_vfs); + return -EINVAL; + } + + if (max_tx_rate < min_tx_rate) { + nicif_err(adapter, drv, netdev, "Invalid rate, max rate %d must greater than min rate %d\n", + max_tx_rate, min_tx_rate); + return -EINVAL; + } + + err = hinic_get_link_state(adapter->hwdev, &link_status); + if (err) { + nicif_err(adapter, drv, netdev, + "Get link status failed when set vf tx rate\n"); + return -EIO; + } + + if (!link_status) { + nicif_err(adapter, drv, netdev, + "Link status must be up when set vf tx rate\n"); + return -EINVAL; + } + + err = hinic_get_port_info(adapter->hwdev, &port_info); + if (err || port_info.speed > LINK_SPEED_100GB) + return -EIO; + + /* rate limit cannot be less than 0 and greater than link speed */ + if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) { + nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %d]\n", + speeds[port_info.speed]); + return -EINVAL; + } + + err = hinic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf), + max_tx_rate, min_tx_rate); + if (err) { + nicif_err(adapter, drv, netdev, + "Unable to set VF %d max rate %d min rate %d%s\n", + vf, max_tx_rate, min_tx_rate, + err == HINIC_TX_RATE_TABLE_FULL ? + ", tx rate profile is full" : ""); + return -EIO; + } + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + nicif_info(adapter, drv, netdev, + "Set VF %d max tx rate %d min tx rate %d successfully\n", + vf, max_tx_rate, min_tx_rate); +#else + nicif_info(adapter, drv, netdev, + "Set VF %d tx rate %d successfully\n", + vf, max_tx_rate); +#endif + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h new file mode 100644 index 000000000000..ef17460b3ec5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_SRIOV_H +#define HINIC_SRIOV_H + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) +ssize_t hinic_sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hinic_sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hinic_sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +enum hinic_sriov_state { + HINIC_SRIOV_DISABLE, + HINIC_SRIOV_ENABLE, + HINIC_FUNC_REMOVE, +}; + +struct hinic_sriov_info { + struct pci_dev *pdev; + void *hwdev; + bool sriov_enabled; + unsigned int num_vfs; + unsigned long state; +}; + +int hinic_pci_sriov_disable(struct pci_dev *dev); +int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs); +int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef IFLA_VF_MAX +int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +#ifdef IFLA_VF_VLAN_INFO_MAX +int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto); +#else +int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); +#endif + +int hinic_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif + +int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int hinic_ndo_set_vf_bw(struct net_device *netdev, + int vf, int min_tx_rate, int max_tx_rate); +#else +int hinic_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#endif +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 0e13d1c7e474..abe9c672eef4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -2,21 +2,26 @@ /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * */ -#include +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + #include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include -#include +#include +#include +#include #include #include #include @@ -24,33 +29,234 @@ #include #include #include +#include +#include +#include -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_dev.h" -#include "hinic_dev.h" +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_nic_io.h" +#include "hinic_nic_dev.h" +#include "hinic_qp.h" #include "hinic_tx.h" +#include "hinic_dbg.h" -#define TX_IRQ_NO_PENDING 0 -#define TX_IRQ_NO_COALESC 0 -#define TX_IRQ_NO_LLI_TIMER 0 -#define TX_IRQ_NO_CREDIT 0 -#define TX_IRQ_NO_RESEND_TIMER 0 +#define MIN_SKB_LEN 32 +#define MAX_PAYLOAD_OFFSET 221 -#define CI_UPDATE_NO_PENDING 0 -#define CI_UPDATE_NO_COALESC 0 +#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) -#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) +#define TXQ_STATS_INC(txq, field) \ +{ \ + u64_stats_update_begin(&txq->txq_stats.syncp); \ + txq->txq_stats.field++; \ + u64_stats_update_end(&txq->txq_stats.syncp); \ +} -#define MIN_SKB_LEN 17 +void hinic_txq_get_stats(struct hinic_txq *txq, + struct hinic_txq_stats *stats) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + unsigned int start; -#define MAX_PAYLOAD_OFFSET 221 -#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + stats->bytes = txq_stats->bytes; + stats->packets = txq_stats->packets; + stats->busy = txq_stats->busy; + stats->wake = txq_stats->wake; + stats->dropped = txq_stats->dropped; + stats->big_frags_pkts = txq_stats->big_frags_pkts; + stats->big_udp_pkts = txq_stats->big_udp_pkts; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} -union hinic_l3 { +void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats) +{ + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->bytes = 0; + txq_stats->packets = 0; + txq_stats->busy = 0; + txq_stats->wake = 0; + txq_stats->dropped = 0; + txq_stats->big_frags_pkts = 0; + txq_stats->big_udp_pkts = 0; + + txq_stats->ufo_pkt_unsupport = 0; + txq_stats->ufo_linearize_err = 0; + txq_stats->ufo_alloc_skb_err = 0; + txq_stats->skb_pad_err = 0; + txq_stats->frag_len_overflow = 0; + txq_stats->offload_cow_skb_err = 0; + txq_stats->alloc_cpy_frag_err = 0; + txq_stats->map_cpy_frag_err = 0; + txq_stats->map_frag_err = 0; + txq_stats->frag_size_err = 0; + u64_stats_update_end(&txq_stats->syncp); +} + +static void txq_stats_init(struct hinic_txq *txq) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + + u64_stats_init(&txq_stats->syncp); + hinic_txq_clean_stats(txq_stats); +} + +inline void hinic_set_buf_desc(struct hinic_sq_bufdesc *buf_descs, + dma_addr_t addr, u32 len) +{ + buf_descs->hi_addr = cpu_to_be32(upper_32_bits(addr)); + buf_descs->lo_addr = cpu_to_be32(lower_32_bits(addr)); + buf_descs->len = cpu_to_be32(len); +} + +static int tx_map_skb(struct hinic_nic_dev *nic_dev, struct sk_buff *skb, + struct hinic_txq *txq, struct hinic_tx_info *tx_info, + struct hinic_sq_bufdesc *buf_descs, u16 skb_nr_frags) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_dma_len *dma_len = tx_info->dma_len; + skb_frag_t *frag = NULL; + u16 base_nr_frags; + int j, i = 0; + int node, err = 0; + u32 nsize, cpy_nsize = 0; + u8 *vaddr, *cpy_buff = NULL; + + if (unlikely(skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)) { + for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++) + cpy_nsize += + skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); + if (!cpy_nsize) { + TXQ_STATS_INC(txq, alloc_cpy_frag_err); + return -EINVAL; + } + + node = dev_to_node(&nic_dev->pdev->dev); + if (node == NUMA_NO_NODE) + cpy_buff = kzalloc(cpy_nsize, + GFP_ATOMIC | __GFP_NOWARN); + else + cpy_buff = kzalloc_node(cpy_nsize, + GFP_ATOMIC | __GFP_NOWARN, + node); + + if (!cpy_buff) { + TXQ_STATS_INC(txq, alloc_cpy_frag_err); + return -ENOMEM; + } + + tx_info->cpy_buff = cpy_buff; + + for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i - 1]; + nsize = skb_frag_size(frag); + + vaddr = _kc_kmap_atomic(skb_frag_page(frag)); + memcpy(cpy_buff, vaddr + frag->bv_offset, nsize); + _kc_kunmap_atomic(vaddr); + cpy_buff += nsize; + } + } + + dma_len[0].dma = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_len[0].dma)) { + TXQ_STATS_INC(txq, map_frag_err); + err = -EFAULT; + goto map_single_err; + } + dma_len[0].len = skb_headlen(skb); + hinic_set_buf_desc(&buf_descs[0], dma_len[0].dma, + dma_len[0].len); + + if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE) + base_nr_frags = HINIC_MAX_SKB_NR_FRAGE - 1; + else + base_nr_frags = skb_nr_frags; + + for (i = 0; i < base_nr_frags; ) { + frag = &(skb_shinfo(skb)->frags[i]); + nsize = skb_frag_size(frag); + i++; + dma_len[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + nsize, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_len[i].dma)) { + TXQ_STATS_INC(txq, map_frag_err); + i--; + err = -EFAULT; + goto frag_map_err; + } + dma_len[i].len = nsize; + + hinic_set_buf_desc(&buf_descs[i], dma_len[i].dma, + dma_len[i].len); + } + + if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE) { + dma_len[HINIC_MAX_SKB_NR_FRAGE].dma = + dma_map_single(&pdev->dev, tx_info->cpy_buff, + cpy_nsize, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, + dma_len[HINIC_MAX_SKB_NR_FRAGE].dma)) { + TXQ_STATS_INC(txq, map_cpy_frag_err); + err = -EFAULT; + goto fusion_map_err; + } + + dma_len[HINIC_MAX_SKB_NR_FRAGE].len = cpy_nsize; + hinic_set_buf_desc(&buf_descs[HINIC_MAX_SKB_NR_FRAGE], + dma_len[HINIC_MAX_SKB_NR_FRAGE].dma, + dma_len[HINIC_MAX_SKB_NR_FRAGE].len); + } + + return 0; + +fusion_map_err: +frag_map_err: + for (j = 0; j < i;) { + j++; + dma_unmap_page(&pdev->dev, dma_len[j].dma, + dma_len[j].len, DMA_TO_DEVICE); + } + dma_unmap_single(&pdev->dev, dma_len[0].dma, dma_len[0].len, + DMA_TO_DEVICE); + +map_single_err: + kfree(tx_info->cpy_buff); + tx_info->cpy_buff = NULL; + + return err; +} + +static inline void tx_unmap_skb(struct hinic_nic_dev *nic_dev, + struct sk_buff *skb, + struct hinic_dma_len *dma_len, + u16 valid_nr_frags) +{ + struct pci_dev *pdev = nic_dev->pdev; + int i; + u16 nr_frags = valid_nr_frags; + + if (nr_frags > HINIC_MAX_SKB_NR_FRAGE) + nr_frags = HINIC_MAX_SKB_NR_FRAGE; + + for (i = 0; i < nr_frags; ) { + i++; + dma_unmap_page(&pdev->dev, + dma_len[i].dma, + dma_len[i].len, DMA_TO_DEVICE); + } + + dma_unmap_single(&pdev->dev, dma_len[0].dma, + dma_len[0].len, DMA_TO_DEVICE); +} + +union hinic_ip { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; @@ -62,184 +268,64 @@ union hinic_l4 { unsigned char *hdr; }; -enum hinic_offload_type { - TX_OFFLOAD_TSO = BIT(0), - TX_OFFLOAD_CSUM = BIT(1), - TX_OFFLOAD_VLAN = BIT(2), - TX_OFFLOAD_INVALID = BIT(3), -}; +#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) -/** - * hinic_txq_clean_stats - Clean the statistics of specific queue - * @txq: Logical Tx Queue - **/ -void hinic_txq_clean_stats(struct hinic_txq *txq) -{ - struct hinic_txq_stats *txq_stats = &txq->txq_stats; - - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->pkts = 0; - txq_stats->bytes = 0; - txq_stats->tx_busy = 0; - txq_stats->tx_wake = 0; - txq_stats->tx_dropped = 0; - txq_stats->big_frags_pkts = 0; - u64_stats_update_end(&txq_stats->syncp); -} - -/** - * hinic_txq_get_stats - get statistics of Tx Queue - * @txq: Logical Tx Queue - * @stats: return updated stats here - **/ -void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) -{ - struct hinic_txq_stats *txq_stats = &txq->txq_stats; - unsigned int start; - - u64_stats_update_begin(&stats->syncp); - do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - stats->pkts = txq_stats->pkts; - stats->bytes = txq_stats->bytes; - stats->tx_busy = txq_stats->tx_busy; - stats->tx_wake = txq_stats->tx_wake; - stats->tx_dropped = txq_stats->tx_dropped; - stats->big_frags_pkts = txq_stats->big_frags_pkts; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - u64_stats_update_end(&stats->syncp); -} - -/** - * txq_stats_init - Initialize the statistics of specific queue - * @txq: Logical Tx Queue - **/ -static void txq_stats_init(struct hinic_txq *txq) -{ - struct hinic_txq_stats *txq_stats = &txq->txq_stats; - - u64_stats_init(&txq_stats->syncp); - hinic_txq_clean_stats(txq); -} - -/** - * tx_map_skb - dma mapping for skb and return sges - * @nic_dev: nic device - * @skb: the skb - * @sges: returned sges - * - * Return 0 - Success, negative - Failure - **/ -static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, - struct hinic_sge *sges) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - skb_frag_t *frag; - dma_addr_t dma_addr; - int i, j; - - dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_addr)) { - dev_err(&pdev->dev, "Failed to map Tx skb data\n"); - return -EFAULT; - } - - hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); - - for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - - dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_addr)) { - dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); - goto err_tx_map; - } - - hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); - } - - return 0; - -err_tx_map: - for (j = 0; j < i; j++) - dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), - sges[j + 1].len, DMA_TO_DEVICE); - - dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, - DMA_TO_DEVICE); - return -EFAULT; -} - -/** - * tx_unmap_skb - unmap the dma address of the skb - * @nic_dev: nic device - * @skb: the skb - * @sges: the sges that are connected to the skb - **/ -static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, - struct hinic_sge *sges) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - int i; - - for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) - dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), - sges[i + 1].len, DMA_TO_DEVICE); - - dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, - DMA_TO_DEVICE); -} - -static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip, +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_ip *ip, union hinic_l4 *l4, - enum hinic_offload_type offload_type, - enum hinic_l3_offload_type *l3_type, - u8 *l4_proto) + enum tx_offload_type offload_type, + enum sq_l3_type *l3_type, u8 *l4_proto) { - u8 *exthdr; + unsigned char *exthdr; if (ip->v4->version == 4) { *l3_type = (offload_type == TX_OFFLOAD_CSUM) ? - IPV4_PKT_NO_CHKSUM_OFFLOAD : - IPV4_PKT_WITH_CHKSUM_OFFLOAD; + IPV4_PKT_NO_CHKSUM_OFFLOAD : IPV4_PKT_WITH_CHKSUM_OFFLOAD; *l4_proto = ip->v4->protocol; + +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + /* inner_transport_header is wrong in centos7.0 and suse12.1 */ + l4->hdr = ip->hdr + ((u8)ip->v4->ihl << 2); +#endif } else if (ip->v4->version == 6) { *l3_type = IPV6_PKT; exthdr = ip->hdr + sizeof(*ip->v6); *l4_proto = ip->v6->nexthdr; if (exthdr != l4->hdr) { - int start = exthdr - skb->data; - __be16 frag_off; + __be16 frag_off = 0; +#ifndef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), + l4_proto, &frag_off); +#else + int pld_off = 0; - ipv6_skip_exthdr(skb, start, l4_proto, &frag_off); + pld_off = ipv6_skip_exthdr(skb, + (int)(exthdr - skb->data), + l4_proto, &frag_off); + l4->hdr = skb->data + pld_off; + } else { + l4->hdr = exthdr; +#endif } } else { - *l3_type = L3TYPE_UNKNOWN; + *l3_type = UNKNOWN_L3TYPE; *l4_proto = 0; } } static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, - enum hinic_offload_type offload_type, u8 l4_proto, - enum hinic_l4_offload_type *l4_offload, + enum tx_offload_type offload_type, u8 l4_proto, + enum sq_l4offload_type *l4_offload, u32 *l4_len, u32 *offset) { - *l4_offload = OFFLOAD_DISABLE; *offset = 0; *l4_len = 0; + *l4_offload = OFFLOAD_DISABLE; switch (l4_proto) { case IPPROTO_TCP: *l4_offload = TCP_OFFLOAD_ENABLE; - /* doff in unit of 4B */ - *l4_len = l4->tcp->doff * 4; + *l4_len = l4->tcp->doff * 4; /* doff in unit of 4B */ + /* To keep same with TSO, payload offset begins from paylaod */ *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb); break; @@ -256,6 +342,9 @@ static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, *l4_offload = SCTP_OFFLOAD_ENABLE; *l4_len = sizeof(struct sctphdr); + /* To keep same with UFO, payload offset + * begins from L4 header + */ *offset = TRANSPORT_OFFSET(l4->hdr, skb); break; @@ -264,55 +353,168 @@ static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, } } -static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto) +static int hinic_tx_csum(struct hinic_sq_task *task, u32 *queue_info, + struct sk_buff *skb) +{ + union hinic_ip ip; + union hinic_l4 l4; + enum sq_l3_type l3_type; + enum sq_l4offload_type l4_offload; + u32 network_hdr_len; + u32 offset, l4_len; + u8 l4_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) + if (skb->encapsulation) { + u32 l4_tunnel_len; + u32 tunnel_type = TUNNEL_UDP_NO_CSUM; + + ip.hdr = skb_network_header(skb); + + if (ip.v4->version == 4) { + l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; + l4_proto = ip.v4->protocol; + } else if (ip.v4->version == 6) { + unsigned char *exthdr; + __be16 frag_off; + + l3_type = IPV6_PKT; +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + tunnel_type = TUNNEL_UDP_CSUM; +#endif + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4.hdr = skb_transport_header(skb); + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + l3_type = UNKNOWN_L3TYPE; + l4_proto = IPPROTO_RAW; + } + + hinic_task_set_outter_l3(task, l3_type, + skb_network_header_len(skb)); + + if (l4_proto == IPPROTO_UDP || l4_proto == IPPROTO_GRE) { + l4_tunnel_len = skb_inner_network_offset(skb) - + skb_transport_offset(skb); + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + network_hdr_len = skb_inner_network_header_len(skb); + } else { + tunnel_type = NOT_TUNNEL; + l4_tunnel_len = 0; + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); + } + + hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); + } +#else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); +#endif + get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, + &l3_type, &l4_proto); + + get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, + &l4_offload, &l4_len, &offset); + +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + /* get wrong network header length using skb_network_header_len */ + if (unlikely(l3_type == UNKNOWN_L3TYPE)) + network_hdr_len = 0; + else + network_hdr_len = l4.hdr - ip.hdr; + + /* payload offset must be setted */ + if (unlikely(!offset)) { + if (l3_type == UNKNOWN_L3TYPE) + offset = ip.hdr - skb->data; + else if (l4_offload == OFFLOAD_DISABLE) + offset = ip.hdr - skb->data + network_hdr_len; + } +#endif + + hinic_task_set_inner_l3(task, l3_type, network_hdr_len); + + hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset); + + return 1; +} + +static __sum16 csum_magic(union hinic_ip *ip, unsigned short proto) { return (ip->v4->version == 4) ? csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); } -static int offload_tso(struct hinic_sq_task *task, u32 *queue_info, - struct sk_buff *skb) +static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info, + struct sk_buff *skb) { - u32 offset, l4_len, ip_identify, network_hdr_len; - enum hinic_l3_offload_type l3_offload; - enum hinic_l4_offload_type l4_offload; - union hinic_l3 ip; + union hinic_ip ip; union hinic_l4 l4; + enum sq_l3_type l3_type; + enum sq_l4offload_type l4_offload; + u32 network_hdr_len; + u32 offset, l4_len; + u32 ip_identify = 0; u8 l4_proto; + int err; if (!skb_is_gso(skb)) return 0; - if (skb_cow_head(skb, 0) < 0) - return -EPROTONOSUPPORT; + err = skb_cow_head(skb, 0); + if (err < 0) + return err; +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) if (skb->encapsulation) { - u32 gso_type = skb_shinfo(skb)->gso_type; - u32 tunnel_type = 0; u32 l4_tunnel_len; + u32 tunnel_type = 0; + u32 gso_type = skb_shinfo(skb)->gso_type; ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); network_hdr_len = skb_inner_network_header_len(skb); - if (ip.v4->version == 4) { - ip.v4->tot_len = 0; - l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD; - } else if (ip.v4->version == 6) { - l3_offload = IPV6_PKT; - } else { - l3_offload = 0; - } + if (ip.v4->version == 4) + l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + else if (ip.v4->version == 6) + l3_type = IPV6_PKT; + else + l3_type = 0; - hinic_task_set_outter_l3(task, l3_offload, + hinic_task_set_outter_l3(task, l3_type, skb_network_header_len(skb)); if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); tunnel_type = TUNNEL_UDP_CSUM; } else if (gso_type & SKB_GSO_UDP_TUNNEL) { +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + if (l3_type == IPV6_PKT) { + tunnel_type = TUNNEL_UDP_CSUM; + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + } else { + tunnel_type = TUNNEL_UDP_NO_CSUM; + } +#else tunnel_type = TUNNEL_UDP_NO_CSUM; +#endif } l4_tunnel_len = skb_inner_network_offset(skb) - @@ -326,507 +528,841 @@ static int offload_tso(struct hinic_sq_task *task, u32 *queue_info, l4.hdr = skb_transport_header(skb); network_hdr_len = skb_network_header_len(skb); } +#else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); +#endif - /* initialize inner IP header fields */ - if (ip.v4->version == 4) - ip.v4->tot_len = 0; - else - ip.v6->payload_len = 0; + get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, + &l3_type, &l4_proto); - get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload, - &l4_proto); - - hinic_task_set_inner_l3(task, l3_offload, network_hdr_len); - - ip_identify = 0; if (l4_proto == IPPROTO_TCP) l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO + else if (l4_proto == IPPROTO_UDP && ip.v4->version == 6) + ip_identify = be32_to_cpu(skb_shinfo(skb)->ip6_frag_id); + /* changed to big endiant is just to keep the same code style here */ +#endif - get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload, - &l4_len, &offset); + get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, + &l4_offload, &l4_len, &offset); - hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset, - ip_identify, skb_shinfo(skb)->gso_size); +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + if (unlikely(l3_type == UNKNOWN_L3TYPE)) + network_hdr_len = 0; + else + network_hdr_len = l4.hdr - ip.hdr; - return 1; -} - -static int offload_csum(struct hinic_sq_task *task, u32 *queue_info, - struct sk_buff *skb) -{ - enum hinic_l4_offload_type l4_offload; - u32 offset, l4_len, network_hdr_len; - enum hinic_l3_offload_type l3_type; - union hinic_l3 ip; - union hinic_l4 l4; - u8 l4_proto; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (skb->encapsulation) { - u32 l4_tunnel_len; - - ip.hdr = skb_network_header(skb); - - if (ip.v4->version == 4) - l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; - else if (ip.v4->version == 6) - l3_type = IPV6_PKT; - else - l3_type = L3TYPE_UNKNOWN; - - hinic_task_set_outter_l3(task, l3_type, - skb_network_header_len(skb)); - - l4_tunnel_len = skb_inner_network_offset(skb) - - skb_transport_offset(skb); - - hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM, - l4_tunnel_len); - - ip.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - network_hdr_len = skb_inner_network_header_len(skb); - } else { - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - network_hdr_len = skb_network_header_len(skb); + if (unlikely(!offset)) { + if (l3_type == UNKNOWN_L3TYPE) + offset = ip.hdr - skb->data; + else if (l4_offload == OFFLOAD_DISABLE) + offset = ip.hdr - skb->data + network_hdr_len; } - - get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type, - &l4_proto); +#endif hinic_task_set_inner_l3(task, l3_type, network_hdr_len); - get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload, - &l4_len, &offset); - - hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset); + hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, + offset, ip_identify, skb_shinfo(skb)->gso_size); return 1; } -static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info, - u16 vlan_tag, u16 vlan_pri) +static enum tx_offload_type hinic_tx_offload(struct sk_buff *skb, + struct hinic_sq_task *task, + u32 *queue_info, u8 avd_flag) { - task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) | - HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD); - - *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI); -} - -static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task, - u32 *queue_info) -{ - enum hinic_offload_type offload = 0; + enum tx_offload_type offload = 0; + int tso_cs_en; u16 vlan_tag; - int enabled; - enabled = offload_tso(task, queue_info, skb); - if (enabled > 0) { + task->pkt_info0 = 0; + task->pkt_info1 = 0; + task->pkt_info2 = 0; + + tso_cs_en = hinic_tso(task, queue_info, skb); + if (tso_cs_en < 0) { + offload = TX_OFFLOAD_INVALID; + return offload; + } else if (tso_cs_en) { offload |= TX_OFFLOAD_TSO; - } else if (enabled == 0) { - enabled = offload_csum(task, queue_info, skb); - if (enabled) - offload |= TX_OFFLOAD_CSUM; } else { - return -EPROTONOSUPPORT; + tso_cs_en = hinic_tx_csum(task, queue_info, skb); + if (tso_cs_en) + offload |= TX_OFFLOAD_CSUM; } if (unlikely(skb_vlan_tag_present(skb))) { vlan_tag = skb_vlan_tag_get(skb); - offload_vlan(task, queue_info, vlan_tag, - vlan_tag >> VLAN_PRIO_SHIFT); + hinic_set_vlan_tx_offload(task, queue_info, vlan_tag, + vlan_tag >> VLAN_PRIO_SHIFT); offload |= TX_OFFLOAD_VLAN; } - if (offload) - hinic_task_set_l2hdr(task, skb_network_offset(skb)); + if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > + MAX_PAYLOAD_OFFSET)) { + offload = TX_OFFLOAD_INVALID; + return offload; + } - /* payload offset should not more than 221 */ - if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) > - MAX_PAYLOAD_OFFSET) { + if (avd_flag == HINIC_TX_UFO_AVD) + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, UFO_AVD); + + if (offload) { + hinic_task_set_tx_offload_valid(task, skb_network_offset(skb)); + task->pkt_info0 = be32_to_cpu(task->pkt_info0); + task->pkt_info1 = be32_to_cpu(task->pkt_info1); + task->pkt_info2 = be32_to_cpu(task->pkt_info2); + } + + return offload; +} + +static inline void __get_pkt_stats(struct hinic_tx_info *tx_info, + struct sk_buff *skb) +{ + u32 ihs, hdr_len; + + if (skb_is_gso(skb)) { +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \ + defined(HAVE_SK_BUFF_ENCAPSULATION)) + if (skb->encapsulation) { +#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET + ihs = skb_inner_transport_offset(skb) + + inner_tcp_hdrlen(skb); +#else + ihs = (skb_inner_transport_header(skb) - skb->data) + + inner_tcp_hdrlen(skb); +#endif + } else { +#endif +#endif + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \ + defined(HAVE_SK_BUFF_ENCAPSULATION)) + } +#endif +#endif + hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs; + tx_info->num_bytes = skb->len + (u64)hdr_len; + + } else { + tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + } + + tx_info->num_pkts = 1; +} + +inline u8 hinic_get_vlan_pri(struct sk_buff *skb) +{ + u16 vlan_tci = 0; + int err; + + err = vlan_get_tag(skb, &vlan_tci); + if (err) + return 0; + + return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +} + +static void *__try_to_get_wqe(struct net_device *netdev, u16 q_id, + int wqebb_cnt, u16 *pi, u8 *owner) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + void *wqe = NULL; + + netif_stop_subqueue(netdev, q_id); + /* We need to check again in a case another CPU has just + * made room available. + */ + if (unlikely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= + wqebb_cnt)) { + netif_start_subqueue(netdev, q_id); + /* there have enough wqebbs after queue is wake up */ + wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id, + wqebb_cnt, pi, owner); + } + + return wqe; +} + +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO +static int hinic_ufo_avoidance(struct sk_buff *skb, struct sk_buff **ufo_skb, + struct net_device *netdev, struct hinic_txq *txq) +{ + __be32 ip6_frag_id = skb_shinfo(skb)->ip6_frag_id; + u16 gso_size = skb_shinfo(skb)->gso_size; + struct frag_hdr ipv6_fhdr = {0}; + union hinic_ip ip; + u32 l2_l3_hlen, l3_payload_len, extra_len; + u32 skb_len, nsize, frag_data_len = 0; + u16 ipv6_fhl_ext = sizeof(struct frag_hdr); + __wsum frag_data_csum = 0; + u16 frag_offset = 0; + u8 *tmp = NULL; + int err; + + ip.hdr = skb_network_header(skb); + if (ip.v6->version == 6 && ip.v6->nexthdr != NEXTHDR_UDP) { + TXQ_STATS_INC(txq, ufo_pkt_unsupport); return -EPROTONOSUPPORT; } - /* mss should not less than 80 */ - if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) { - *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); - *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS); + /* linearize the big ufo packet */ + err = skb_linearize(skb); + if (err) { + TXQ_STATS_INC(txq, ufo_linearize_err); + return err; + } + + extra_len = skb->len - HINIC_GSO_MAX_SIZE; + l2_l3_hlen = (u32)(skb_transport_header(skb) - skb_mac_header(skb)); + l3_payload_len = skb->len - l2_l3_hlen; + frag_data_len += extra_len + + ((l3_payload_len - extra_len) % gso_size); + skb_len = skb->len - frag_data_len; + + /* ipv6 need external frag header */ + if (ip.v6->version == 6) + nsize = frag_data_len + l2_l3_hlen + ipv6_fhl_ext; + else + nsize = frag_data_len + l2_l3_hlen; + + *ufo_skb = netdev_alloc_skb_ip_align(netdev, nsize); + if (!*ufo_skb) { + TXQ_STATS_INC(txq, ufo_alloc_skb_err); + return -ENOMEM; + } + + /* copy l2_l3 layer header from original skb to ufo_skb */ + skb_copy_from_linear_data_offset(skb, 0, skb_put(*ufo_skb, l2_l3_hlen), + l2_l3_hlen); + + /* reserve ipv6 external frag header for ufo_skb */ + if (ip.v6->version == 6) { + ipv6_fhdr.nexthdr = NEXTHDR_UDP; + ipv6_fhdr.reserved = 0; + frag_offset = (u16)(l3_payload_len - frag_data_len); + ipv6_fhdr.frag_off = htons(frag_offset); + ipv6_fhdr.identification = ip6_frag_id; + tmp = skb_put(*ufo_skb, ipv6_fhl_ext); + memcpy(tmp, &ipv6_fhdr, ipv6_fhl_ext); + } + + /* split original one skb to two parts: skb and ufo_skb */ + skb_split(skb, (*ufo_skb), skb_len); + + /* modify skb ip total len */ + ip.hdr = skb_network_header(skb); + if (ip.v4->version == 4) + ip.v4->tot_len = htons(ntohs(ip.v4->tot_len) - + (u16)frag_data_len); + else + ip.v6->payload_len = htons(ntohs(ip.v6->payload_len) - + (u16)frag_data_len); + + /* set ufo_skb network header */ + skb_set_network_header(*ufo_skb, skb_network_offset(skb)); + + /* set vlan offload feature */ + (*ufo_skb)->vlan_tci = skb->vlan_tci; + + /* modify ufo_skb ip total len, flag, frag_offset and compute csum */ + ip.hdr = skb_network_header(*ufo_skb); + if (ip.v4->version == 4) { + /* compute ufo_skb data csum and put into skb udp csum */ + tmp = (*ufo_skb)->data + l2_l3_hlen; + frag_data_csum = csum_partial(tmp, (int)frag_data_len, 0); + udp_hdr(skb)->check = + (__sum16)csum_add(~csum_fold(frag_data_csum), + udp_hdr(skb)->check); + + ip.v4->tot_len = htons((u16)(skb_network_header_len(skb) + + frag_data_len)); + ip.v4->frag_off = 0; + frag_offset = (u16)((l3_payload_len - frag_data_len) >> 3); + ip.v4->frag_off = htons(frag_offset); + ip_send_check(ip.v4); + } else { + /* compute ufo_skb data csum and put into skb udp csum */ + tmp = (*ufo_skb)->data + l2_l3_hlen + ipv6_fhl_ext; + frag_data_csum = csum_partial(tmp, (int)frag_data_len, 0); + udp_hdr(skb)->check = + (__sum16)csum_add(~csum_fold(frag_data_csum), + udp_hdr(skb)->check); + + ip.v6->payload_len = htons(ipv6_fhl_ext + (u16)frag_data_len); + ip.v6->nexthdr = NEXTHDR_FRAGMENT; } return 0; } +#endif -netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +#define HINIC_FRAG_STATUS_OK 0 +#define HINIC_FRAG_STATUS_IGNORE 1 + +static netdev_tx_t hinic_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct hinic_txq *txq, + u8 *flag, u8 avd_flag) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - u16 prod_idx, q_id = skb->queue_mapping; - struct netdev_queue *netdev_txq; - int nr_sges, err = NETDEV_TX_OK; - struct hinic_sq_wqe *sq_wqe; - unsigned int wqe_size; - struct hinic_txq *txq; - struct hinic_qp *qp; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_tx_info *tx_info; + struct hinic_sq_wqe *wqe = NULL; + enum tx_offload_type offload = 0; + u16 q_id = txq->q_id; + u32 queue_info = 0; + u8 owner = 0; + u16 pi = 0; + int err, wqebb_cnt; + u16 num_sge = 0; + u16 original_nr_frags; + u16 new_nr_frags; + u16 i; + int frag_err = HINIC_FRAG_STATUS_OK; - txq = &nic_dev->txqs[q_id]; - qp = container_of(txq->sq, struct hinic_qp, sq); + /* skb->dev will not initialized when calling netdev_alloc_skb_ip_align + * and parameter of length is largger then PAGE_SIZE(under redhat7.3), + * but skb->dev will be used in vlan_get_tag or somewhere + */ + if (unlikely(!skb->dev)) + skb->dev = netdev; - if (skb->len < MIN_SKB_LEN) { - if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { - netdev_err(netdev, "Failed to pad skb\n"); - goto update_error_stats; + if (unlikely(skb->len < MIN_SKB_LEN)) { + if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) { + TXQ_STATS_INC(txq, skb_pad_err); + goto tx_skb_pad_err; } skb->len = MIN_SKB_LEN; } - nr_sges = skb_shinfo(skb)->nr_frags + 1; - if (nr_sges > 17) { - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.big_frags_pkts++; - u64_stats_update_end(&txq->txq_stats.syncp); - } + original_nr_frags = skb_shinfo(skb)->nr_frags; + new_nr_frags = original_nr_frags; - if (nr_sges > txq->max_sges) { - netdev_err(netdev, "Too many Tx sges\n"); - goto skb_error; - } + /* If size of lastest frags are all zero, should ignore this frags. + * If size of some frag in the middle is zero, should drop this skb. + */ + for (i = 0; i < original_nr_frags; i++) { + if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_OK) + continue; - err = tx_map_skb(nic_dev, skb, txq->sges); - if (err) - goto skb_error; - - wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); - - sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); - if (!sq_wqe) { - netif_stop_subqueue(netdev, qp->q_id); - - /* Check for the case free_tx_poll is called in another cpu - * and we stopped the subqueue after free_tx_poll check. - */ - sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); - if (sq_wqe) { - netif_wake_subqueue(nic_dev->netdev, qp->q_id); - goto process_sq_wqe; + if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_OK) { + frag_err = HINIC_FRAG_STATUS_IGNORE; + new_nr_frags = i + 1; + continue; } - tx_unmap_skb(nic_dev, skb, txq->sges); + if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_IGNORE) + continue; - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.tx_busy++; - u64_stats_update_end(&txq->txq_stats.syncp); - err = NETDEV_TX_BUSY; - wqe_size = 0; - goto flush_skbs; + if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_IGNORE) { + TXQ_STATS_INC(txq, frag_size_err); + goto tx_drop_pkts; + } } -process_sq_wqe: - hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); + num_sge = new_nr_frags + 1; - err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info); - if (err) - goto offload_error; + /* if skb->len is more than 65536B but num_sge is 1, + * driver will drop it + */ + if (unlikely(skb->len > HINIC_GSO_MAX_SIZE && num_sge == 1)) { + TXQ_STATS_INC(txq, frag_len_overflow); + goto tx_drop_pkts; + } - hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); + /* if sge number more than 17, driver will set 17 sges */ + if (unlikely(num_sge > HINIC_MAX_SQ_SGE)) { + TXQ_STATS_INC(txq, big_frags_pkts); + num_sge = HINIC_MAX_SQ_SGE; + } -flush_skbs: - netdev_txq = netdev_get_tx_queue(netdev, q_id); - if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq))) - hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); + wqebb_cnt = HINIC_SQ_WQEBB_CNT(num_sge); + if (likely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= + wqebb_cnt)) { + if (likely(wqebb_cnt == 1)) { + hinic_update_sq_pi(nic_dev->hwdev, q_id, + wqebb_cnt, &pi, &owner); + wqe = txq->tx_info[pi].wqe; + } else { + wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id, + wqebb_cnt, &pi, &owner); + } - return err; + } else { + wqe = __try_to_get_wqe(netdev, q_id, wqebb_cnt, &pi, &owner); + if (likely(!wqe)) { + TXQ_STATS_INC(txq, busy); + return NETDEV_TX_BUSY; + } + } -offload_error: - hinic_sq_return_wqe(txq->sq, wqe_size); - tx_unmap_skb(nic_dev, skb, txq->sges); + tx_info = &txq->tx_info[pi]; + tx_info->skb = skb; + tx_info->wqebb_cnt = wqebb_cnt; + tx_info->valid_nr_frags = new_nr_frags; -skb_error: + __get_pkt_stats(tx_info, skb); + + offload = hinic_tx_offload(skb, &wqe->task, &queue_info, avd_flag); + if (unlikely(offload == TX_OFFLOAD_INVALID)) { + hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner); + TXQ_STATS_INC(txq, offload_cow_skb_err); + goto tx_drop_pkts; + } + + err = tx_map_skb(nic_dev, skb, txq, tx_info, wqe->buf_descs, + new_nr_frags); + if (err) { + hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner); + goto tx_drop_pkts; + } + + hinic_prepare_sq_ctrl(&wqe->ctrl, queue_info, num_sge, owner); + + hinic_send_sq_wqe(nic_dev->hwdev, q_id, wqe, wqebb_cnt, + nic_dev->sq_cos_mapping[hinic_get_vlan_pri(skb)]); + + return NETDEV_TX_OK; + +tx_drop_pkts: dev_kfree_skb_any(skb); -update_error_stats: +tx_skb_pad_err: + TXQ_STATS_INC(txq, dropped); + + *flag = HINIC_TX_DROPED; + return NETDEV_TX_OK; +} + +netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + struct hinic_txq *txq; + u8 flag = 0; + + if (unlikely(!nic_dev->heart_status)) { + dev_kfree_skb_any(skb); + HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); + return NETDEV_TX_OK; + } + + txq = &nic_dev->txqs[q_id]; + + return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD); +} + +netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + struct hinic_txq *txq; + u8 flag = 0; +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO + struct sk_buff *ufo_skb; + int err; +#endif + + if (unlikely(!netif_carrier_ok(netdev) || + !nic_dev->heart_status)) { + dev_kfree_skb_any(skb); + HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); + return NETDEV_TX_OK; + } + + if (unlikely(q_id >= nic_dev->num_qps)) { + txq = &nic_dev->txqs[0]; + HINIC_NIC_STATS_INC(nic_dev, tx_invalid_qid); + goto tx_drop_pkts; + } + txq = &nic_dev->txqs[q_id]; + +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO + /* UFO avoidance */ + if (unlikely((skb->len > HINIC_GSO_MAX_SIZE) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP))) { + TXQ_STATS_INC(txq, big_udp_pkts); + + err = hinic_ufo_avoidance(skb, &ufo_skb, netdev, txq); + if (err) + goto tx_drop_pkts; + + err = hinic_send_one_skb(skb, netdev, txq, &flag, + HINIC_TX_UFO_AVD); + if (err == NETDEV_TX_BUSY) { + dev_kfree_skb_any(ufo_skb); + return NETDEV_TX_BUSY; + } + + if (flag == HINIC_TX_DROPED) { + nicif_err(nic_dev, drv, netdev, "Send first skb failed for HINIC_TX_SKB_DROPED\n"); + dev_kfree_skb_any(ufo_skb); + return NETDEV_TX_OK; + } + + err = hinic_send_one_skb(ufo_skb, netdev, txq, &flag, + HINIC_TX_NON_AVD); + if (err == NETDEV_TX_BUSY) { + nicif_err(nic_dev, drv, netdev, "Send second skb failed for NETDEV_TX_BUSY\n"); + dev_kfree_skb_any(ufo_skb); + return NETDEV_TX_OK; + } + + return NETDEV_TX_OK; + } +#endif + + return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD); + +tx_drop_pkts: + dev_kfree_skb_any(skb); u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.tx_dropped++; + txq->txq_stats.dropped++; u64_stats_update_end(&txq->txq_stats.syncp); return NETDEV_TX_OK; } -/** - * tx_free_skb - unmap and free skb - * @nic_dev: nic device - * @skb: the skb - * @sges: the sges that are connected to the skb - **/ -static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, - struct hinic_sge *sges) +static inline void tx_free_skb(struct hinic_nic_dev *nic_dev, + struct sk_buff *skb, + struct hinic_tx_info *tx_info) { - tx_unmap_skb(nic_dev, skb, sges); + tx_unmap_skb(nic_dev, skb, tx_info->dma_len, tx_info->valid_nr_frags); + kfree(tx_info->cpy_buff); + tx_info->cpy_buff = NULL; dev_kfree_skb_any(skb); } -/** - * free_all_rx_skbs - free all skbs in tx queue - * @txq: tx queue - **/ static void free_all_tx_skbs(struct hinic_txq *txq) { - struct hinic_dev *nic_dev = netdev_priv(txq->netdev); - struct hinic_sq *sq = txq->sq; - struct hinic_sq_wqe *sq_wqe; - unsigned int wqe_size; - struct sk_buff *skb; - int nr_sges; + struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic_tx_info *tx_info; u16 ci; + int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev, + txq->q_id) + 1; - while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { - sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci); - if (!sq_wqe) - break; + while (free_wqebbs < txq->q_depth) { + ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id); - nr_sges = skb_shinfo(skb)->nr_frags + 1; + tx_info = &txq->tx_info[ci]; - hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + tx_free_skb(nic_dev, tx_info->skb, tx_info); - hinic_sq_put_wqe(sq, wqe_size); + hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id, + tx_info->wqebb_cnt); - tx_free_skb(nic_dev, skb, txq->free_sges); + free_wqebbs += tx_info->wqebb_cnt; } } -/** - * free_tx_poll - free finished tx skbs in tx queue that connected to napi - * @napi: napi - * @budget: number of tx - * - * Return 0 - Success, negative - Failure - **/ -static int free_tx_poll(struct napi_struct *napi, int budget) +int hinic_tx_poll(struct hinic_txq *txq, int budget) { - struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); - struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); - struct hinic_dev *nic_dev = netdev_priv(txq->netdev); - struct netdev_queue *netdev_txq; - struct hinic_sq *sq = txq->sq; - struct hinic_wq *wq = sq->wq; - struct hinic_sq_wqe *sq_wqe; - unsigned int wqe_size; - int nr_sges, pkts = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev); struct sk_buff *skb; - u64 tx_bytes = 0; - u16 hw_ci, sw_ci; + struct hinic_tx_info *tx_info; + u64 tx_bytes = 0, wake = 0; + int pkts = 0, nr_pkts = 0, wqebb_cnt = 0; + u16 hw_ci, sw_ci = 0, q_id = txq->q_id; + + hw_ci = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id); + dma_rmb(); + sw_ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id); do { - hw_ci = HW_CONS_IDX(sq) & wq->mask; + tx_info = &txq->tx_info[sw_ci]; - /* Reading a WQEBB to get real WQE size and consumer index. */ - sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); - if ((!sq_wqe) || - (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) + /* Whether all of the wqebb of this wqe is completed */ + if (hw_ci == sw_ci || ((hw_ci - sw_ci) & + txq->q_mask) < tx_info->wqebb_cnt) { break; - - /* If this WQE have multiple WQEBBs, we will read again to get - * full size WQE. - */ - if (wqe_size > wq->wqebb_size) { - sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci); - if (unlikely(!sq_wqe)) - break; } - tx_bytes += skb->len; + sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask; + prefetch(&txq->tx_info[sw_ci]); + + wqebb_cnt += tx_info->wqebb_cnt; + + skb = tx_info->skb; + tx_bytes += tx_info->num_bytes; + nr_pkts += tx_info->num_pkts; pkts++; - nr_sges = skb_shinfo(skb)->nr_frags + 1; + tx_free_skb(nic_dev, skb, tx_info); - hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + } while (likely(pkts < budget)); - hinic_sq_put_wqe(sq, wqe_size); + hinic_update_sq_local_ci(nic_dev->hwdev, q_id, wqebb_cnt); - tx_free_skb(nic_dev, skb, txq->free_sges); - } while (pkts < budget); - - if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && - hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { - netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); + if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) && + hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= 1 && + test_bit(HINIC_INTF_UP, &nic_dev->flags))) { + struct netdev_queue *netdev_txq = + netdev_get_tx_queue(txq->netdev, q_id); __netif_tx_lock(netdev_txq, smp_processor_id()); - - netif_wake_subqueue(nic_dev->netdev, qp->q_id); - + /* To avoid re-waking subqueue with xmit_frame */ + if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) { + netif_wake_subqueue(nic_dev->netdev, q_id); + wake++; + } __netif_tx_unlock(netdev_txq); - - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.tx_wake++; - u64_stats_update_end(&txq->txq_stats.syncp); } u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.bytes += tx_bytes; - txq->txq_stats.pkts += pkts; + txq->txq_stats.packets += nr_pkts; + txq->txq_stats.wake += wake; u64_stats_update_end(&txq->txq_stats.syncp); - if (pkts < budget) { - napi_complete(napi); - hinic_hwdev_set_msix_state(nic_dev->hwdev, - sq->msix_entry, - HINIC_MSIX_ENABLE); - return pkts; + return pkts; +} + +int hinic_setup_tx_wqe(struct hinic_txq *txq) +{ + struct net_device *netdev = txq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_sq_wqe *wqe; + struct hinic_tx_info *tx_info; + u16 pi = 0; + int i; + u8 owner = 0; + + for (i = 0; i < txq->q_depth; i++) { + tx_info = &txq->tx_info[i]; + + wqe = hinic_get_sq_wqe(nic_dev->hwdev, txq->q_id, + 1, &pi, &owner); + if (!wqe) { + nicif_err(nic_dev, drv, netdev, "Failed to get SQ wqe\n"); + break; + } + + tx_info->wqe = wqe; } - return budget; + hinic_return_sq_wqe(nic_dev->hwdev, txq->q_id, txq->q_depth, owner); + + return i; } -static void tx_napi_add(struct hinic_txq *txq, int weight) +int hinic_setup_all_tx_resources(struct net_device *netdev) { - netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); - napi_enable(&txq->napi); -} - -static void tx_napi_del(struct hinic_txq *txq) -{ - napi_disable(&txq->napi); - netif_napi_del(&txq->napi); -} - -static irqreturn_t tx_irq(int irq, void *data) -{ - struct hinic_txq *txq = data; - struct hinic_dev *nic_dev; - - nic_dev = netdev_priv(txq->netdev); - - /* Disable the interrupt until napi will be completed */ - hinic_hwdev_set_msix_state(nic_dev->hwdev, - txq->sq->msix_entry, - HINIC_MSIX_DISABLE); - - hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); - - napi_schedule(&txq->napi); - return IRQ_HANDLED; -} - -static int tx_request_irq(struct hinic_txq *txq) -{ - struct hinic_dev *nic_dev = netdev_priv(txq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_sq *sq = txq->sq; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_txq *txq; + u64 tx_info_sz; + u16 i, q_id; int err; - tx_napi_add(txq, nic_dev->tx_weight); + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + txq = &nic_dev->txqs[q_id]; + tx_info_sz = txq->q_depth * sizeof(*txq->tx_info); + if (!tx_info_sz) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size tx%d info\n", + q_id); + err = -EINVAL; + goto init_txq_err; + } - hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, - TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, - TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, - TX_IRQ_NO_RESEND_TIMER); + txq->tx_info = kzalloc(tx_info_sz, GFP_KERNEL); + if (!txq->tx_info) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate Tx:%d info\n", + q_id); + err = -ENOMEM; + goto init_txq_err; + } - err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); - if (err) { - dev_err(&pdev->dev, "Failed to request Tx irq\n"); - tx_napi_del(txq); - return err; - } - - return 0; -} - -static void tx_free_irq(struct hinic_txq *txq) -{ - struct hinic_sq *sq = txq->sq; - - free_irq(sq->irq, txq); - tx_napi_del(txq); -} - -/** - * hinic_init_txq - Initialize the Tx Queue - * @txq: Logical Tx Queue - * @sq: Hardware Tx Queue to connect the Logical queue with - * @netdev: network device to connect the Logical queue with - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, - struct net_device *netdev) -{ - struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - int err, irqname_len; - size_t sges_size; - - txq->netdev = netdev; - txq->sq = sq; - - txq_stats_init(txq); - - txq->max_sges = HINIC_MAX_SQ_BUFDESCS; - - sges_size = txq->max_sges * sizeof(*txq->sges); - txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); - if (!txq->sges) - return -ENOMEM; - - sges_size = txq->max_sges * sizeof(*txq->free_sges); - txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); - if (!txq->free_sges) { - err = -ENOMEM; - goto err_alloc_free_sges; - } - - irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; - txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); - if (!txq->irq_name) { - err = -ENOMEM; - goto err_alloc_irqname; - } - - sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); - - err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, - CI_UPDATE_NO_COALESC); - if (err) - goto err_hw_ci; - - err = tx_request_irq(txq); - if (err) { - netdev_err(netdev, "Failed to request Tx irq\n"); - goto err_req_tx_irq; + err = hinic_setup_tx_wqe(txq); + if (err != txq->q_depth) { + nicif_err(nic_dev, drv, netdev, "Failed to setup Tx:%d wqe\n", + q_id); + q_id++; + goto init_txq_err; + } } return 0; -err_req_tx_irq: -err_hw_ci: - devm_kfree(&netdev->dev, txq->irq_name); +init_txq_err: + for (i = 0; i < q_id; i++) { + txq = &nic_dev->txqs[i]; + kfree(txq->tx_info); + } -err_alloc_irqname: - devm_kfree(&netdev->dev, txq->free_sges); - -err_alloc_free_sges: - devm_kfree(&netdev->dev, txq->sges); return err; } -/** - * hinic_clean_txq - Clean the Tx Queue - * @txq: Logical Tx Queue - **/ -void hinic_clean_txq(struct hinic_txq *txq) +void hinic_free_all_tx_resources(struct net_device *netdev) { - struct net_device *netdev = txq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_txq *txq; + u16 q_id; - tx_free_irq(txq); - - free_all_tx_skbs(txq); - - devm_kfree(&netdev->dev, txq->irq_name); - devm_kfree(&netdev->dev, txq->free_sges); - devm_kfree(&netdev->dev, txq->sges); + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + txq = &nic_dev->txqs[q_id]; + free_all_tx_skbs(txq); + kfree(txq->tx_info); + } +} + +void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int up; + + for (up = HINIC_DCB_UP_MAX - 1; up >= 0; up--) + nic_dev->sq_cos_mapping[up] = nic_dev->default_cos_id; +} + +int hinic_sq_cos_mapping(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_state dcb_state = {0}; + u8 default_cos = 0; + int err; + + if (HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = hinic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state); + if (err) { + hinic_info(nic_dev, drv, "Failed to get vf default cos\n"); + return err; + } + + default_cos = dcb_state.default_cos; + nic_dev->default_cos_id = default_cos; + hinic_set_sq_default_cos(nic_dev->netdev, default_cos); + } else { + default_cos = nic_dev->default_cos_id; + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + memcpy(nic_dev->sq_cos_mapping, nic_dev->up_cos, + sizeof(nic_dev->sq_cos_mapping)); + else + hinic_set_sq_default_cos(nic_dev->netdev, default_cos); + + dcb_state.dcb_on = !!test_bit(HINIC_DCB_ENABLE, + &nic_dev->flags); + dcb_state.default_cos = default_cos; + memcpy(dcb_state.up_cos, nic_dev->sq_cos_mapping, + sizeof(dcb_state.up_cos)); + + err = hinic_set_dcb_state(nic_dev->hwdev, &dcb_state); + if (err) + hinic_info(nic_dev, drv, "Failed to set vf default cos\n"); + } + + return err; +} + +int hinic_alloc_txqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_txq *txq; + u16 q_id, num_txqs = nic_dev->max_qps; + u64 txq_size; + + txq_size = num_txqs * sizeof(*nic_dev->txqs); + if (!txq_size) { + nic_err(&pdev->dev, "Cannot allocate zero size txqs\n"); + return -EINVAL; + } + + nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL); + + if (!nic_dev->txqs) { + nic_err(&pdev->dev, "Failed to allocate txqs\n"); + return -ENOMEM; + } + + for (q_id = 0; q_id < num_txqs; q_id++) { + txq = &nic_dev->txqs[q_id]; + txq->netdev = netdev; + txq->q_id = q_id; + txq->q_depth = nic_dev->sq_depth; + txq->q_mask = nic_dev->sq_depth - 1; + + txq_stats_init(txq); + } + + return 0; +} + +void hinic_free_txqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->txqs); +} + +/* should stop transmit any packets before calling this function */ +#define HINIC_FLUSH_QUEUE_TIMEOUT 1000 + +bool hinic_get_hw_handle_status(void *hwdev, u16 q_id) +{ + u16 sw_pi = 0, hw_ci = 0; + + sw_pi = hinic_dbg_get_sq_pi(hwdev, q_id); + hw_ci = hinic_get_sq_hw_ci(hwdev, q_id); + + return sw_pi == hw_ci; +} + +int hinic_stop_sq(struct hinic_txq *txq) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev); + unsigned long timeout; + int err; + + timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id)) + return 0; + + usleep_range(900, 1000); + } while (time_before(jiffies, timeout)); + + /* force hardware to drop packets */ + timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id)) + return 0; + + err = hinic_force_drop_tx_pkt(nic_dev->hwdev); + if (err) + break; + + usleep_range(9900, 10000); + } while (time_before(jiffies, timeout)); + + /* Avoid msleep takes too long and get a fake result */ + if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id)) + return 0; + + return -EFAULT; +} + +void hinic_flush_txqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid; + int err; + + for (qid = 0; qid < nic_dev->num_qps; qid++) { + err = hinic_stop_sq(&nic_dev->txqs[qid]); + if (err) + nicif_err(nic_dev, drv, netdev, + "Failed to stop sq%d\n", qid); + } } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h index f158b7db7fb8..4fedc02d1c34 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -2,53 +2,126 @@ /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * */ #ifndef HINIC_TX_H #define HINIC_TX_H -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_qp.h" +enum tx_offload_type { + TX_OFFLOAD_TSO = BIT(0), + TX_OFFLOAD_CSUM = BIT(1), + TX_OFFLOAD_VLAN = BIT(2), + TX_OFFLOAD_INVALID = BIT(3), +}; struct hinic_txq_stats { - u64 pkts; - u64 bytes; - u64 tx_busy; - u64 tx_wake; - u64 tx_dropped; + u64 packets; + u64 bytes; + u64 busy; + u64 wake; + u64 dropped; u64 big_frags_pkts; + u64 big_udp_pkts; - struct u64_stats_sync syncp; + /* Subdivision statistics show in private tool */ + u64 ufo_pkt_unsupport; + u64 ufo_linearize_err; + u64 ufo_alloc_skb_err; + u64 skb_pad_err; + u64 frag_len_overflow; + u64 offload_cow_skb_err; + u64 alloc_cpy_frag_err; + u64 map_cpy_frag_err; + u64 map_frag_err; + u64 frag_size_err; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#else + struct u64_stats_sync_empty syncp; +#endif +}; + +struct hinic_dma_len { + dma_addr_t dma; + u32 len; +}; + +#define MAX_SGE_NUM_PER_WQE 17 + +struct hinic_tx_info { + struct sk_buff *skb; + + int wqebb_cnt; + + int num_sge; + void *wqe; + u8 *cpy_buff; + u16 valid_nr_frags; + u16 num_pkts; + u64 num_bytes; + struct hinic_dma_len dma_len[MAX_SGE_NUM_PER_WQE]; }; struct hinic_txq { - struct net_device *netdev; - struct hinic_sq *sq; + struct net_device *netdev; - struct hinic_txq_stats txq_stats; - - int max_sges; - struct hinic_sge *sges; - struct hinic_sge *free_sges; - - char *irq_name; - struct napi_struct napi; + u16 q_id; + u16 q_depth; + u16 q_mask; + struct hinic_txq_stats txq_stats; + u64 last_moder_packets; + u64 last_moder_bytes; + struct hinic_tx_info *tx_info; }; -void hinic_txq_clean_stats(struct hinic_txq *txq); +enum hinic_tx_xmit_status { + HINIC_TX_OK = 0, + HINIC_TX_DROPED = 1, + HINIC_TX_BUSY = 2, +}; -void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); +enum hinic_tx_avd_type { + HINIC_TX_NON_AVD = 0, + HINIC_TX_UFO_AVD = 1, +}; + +void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats); + +void hinic_txq_get_stats(struct hinic_txq *txq, + struct hinic_txq_stats *stats); + +netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, - struct net_device *netdev); +int hinic_setup_all_tx_resources(struct net_device *netdev); -void hinic_clean_txq(struct hinic_txq *txq); +void hinic_free_all_tx_resources(struct net_device *netdev); + +void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id); + +int hinic_sq_cos_mapping(struct net_device *netdev); + +int hinic_alloc_txqs(struct net_device *netdev); + +void hinic_free_txqs(struct net_device *netdev); + +int hinic_tx_poll(struct hinic_txq *txq, int budget); + +u8 hinic_get_vlan_pri(struct sk_buff *skb); + +void hinic_flush_txqs(struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_wq.c new file mode 100644 index 000000000000..9198c1264ab1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwif.h" +#include "hinic_wq.h" +#include "hinic_qe_def.h" + +#define WQS_MAX_NUM_BLOCKS 128 +#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ + sizeof((wqs)->free_blocks[0])) + +static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx, + u32 *block_idx); + +static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx, + u32 block_idx); + +static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr, + u64 **shadow_vaddr, u64 page_sz) +{ + dma_addr_t dma_addr = 0; + + *vaddr = dma_zalloc_coherent(handle, page_sz, &dma_addr, + GFP_KERNEL); + if (!*vaddr) { + sdk_err(handle, "Failed to allocate dma to wqs page\n"); + return -ENOMEM; + } + + if (!ADDR_4K_ALIGNED(dma_addr)) { + sdk_err(handle, "Cla is not 4k aligned!\n"); + goto shadow_vaddr_err; + } + + *paddr = (u64)dma_addr; + + /* use vzalloc for big mem, shadow_vaddr only used at initialization */ + *shadow_vaddr = vzalloc(page_sz); + if (!*shadow_vaddr) { + sdk_err(handle, "Failed to allocate shadow page vaddr\n"); + goto shadow_vaddr_err; + } + + return 0; + +shadow_vaddr_err: + dma_free_coherent(handle, page_sz, *vaddr, dma_addr); + return -ENOMEM; +} + +static int wqs_allocate_page(struct hinic_wqs *wqs, u32 page_idx) +{ + return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx], + &wqs->page_paddr[page_idx], + &wqs->shadow_page_vaddr[page_idx], + WQS_PAGE_SIZE); +} + +static void wqs_free_page(struct hinic_wqs *wqs, u32 page_idx) +{ + dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE, + wqs->page_vaddr[page_idx], + (dma_addr_t)wqs->page_paddr[page_idx]); + vfree(wqs->shadow_page_vaddr[page_idx]); +} + +static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) +{ + return queue_alloc_page(cmdq_pages->dev_hdl, + &cmdq_pages->cmdq_page_vaddr, + &cmdq_pages->cmdq_page_paddr, + &cmdq_pages->cmdq_shadow_page_vaddr, + CMDQ_PAGE_SIZE); +} + +static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) +{ + dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE, + cmdq_pages->cmdq_page_vaddr, + (dma_addr_t)cmdq_pages->cmdq_page_paddr); + vfree(cmdq_pages->cmdq_shadow_page_vaddr); +} + +static int alloc_wqes_shadow(struct hinic_wq *wq) +{ + u64 size; + + /* if wq->max_wqe_size == 0, we don't need to alloc shadow */ + if (wq->max_wqe_size <= wq->wqebb_size) + return 0; + + size = (u64)wq->num_q_pages * wq->max_wqe_size; + wq->shadow_wqe = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_wqe) { + pr_err("Failed to allocate shadow wqe\n"); + return -ENOMEM; + } + + size = wq->num_q_pages * sizeof(wq->prod_idx); + wq->shadow_idx = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_idx) { + pr_err("Failed to allocate shadow index\n"); + goto shadow_idx_err; + } + + return 0; + +shadow_idx_err: + kfree(wq->shadow_wqe); + return -ENOMEM; +} + +static void free_wqes_shadow(struct hinic_wq *wq) +{ + if (wq->max_wqe_size <= wq->wqebb_size) + return; + + kfree(wq->shadow_idx); + kfree(wq->shadow_wqe); +} + +static void free_wq_pages(void *handle, struct hinic_wq *wq, + u32 num_q_pages) +{ + u32 i; + + for (i = 0; i < num_q_pages; i++) + hinic_dma_free_coherent_align(handle, &wq->mem_align[i]); + + free_wqes_shadow(wq); + + wq->block_vaddr = NULL; + wq->shadow_block_vaddr = NULL; + + kfree(wq->mem_align); +} + +static int alloc_wq_pages(void *dev_hdl, struct hinic_wq *wq) +{ + struct hinic_dma_addr_align *mem_align; + u64 *vaddr, *paddr; + u32 i, num_q_pages; + int err; + + vaddr = wq->shadow_block_vaddr; + paddr = wq->block_vaddr; + + num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; + if (num_q_pages > WQ_MAX_PAGES) { + sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n", + num_q_pages); + return -EINVAL; + } + + if (num_q_pages & (num_q_pages - 1)) { + sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n", + num_q_pages); + return -EINVAL; + } + + wq->num_q_pages = num_q_pages; + + err = alloc_wqes_shadow(wq); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wqe shadow\n"); + return err; + } + + wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align), + GFP_KERNEL); + if (!wq->mem_align) { + sdk_err(dev_hdl, "Failed to allocate mem_align\n"); + free_wqes_shadow(wq); + return -ENOMEM; + } + + for (i = 0; i < num_q_pages; i++) { + mem_align = &wq->mem_align[i]; + err = hinic_dma_zalloc_coherent_align(dev_hdl, wq->wq_page_size, + wq->wq_page_size, + GFP_KERNEL, mem_align); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wq page\n"); + goto alloc_wq_pages_err; + } + + *paddr = cpu_to_be64(mem_align->align_paddr); + *vaddr = (u64)mem_align->align_vaddr; + + paddr++; + vaddr++; + } + + return 0; + +alloc_wq_pages_err: + free_wq_pages(dev_hdl, wq, i); + + return -ENOMEM; +} + +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size) +{ + u32 num_wqebbs_per_page; + int err; + + if (wqebb_size == 0) { + sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + if (wq_page_size & (wq_page_size - 1)) { + sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n", + wq_page_size); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n", + num_wqebbs_per_page); + return -EINVAL; + } + + err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n"); + return err; + } + + wq->wqebb_size = wqebb_size; + wq->wq_page_size = wq_page_size; + wq->q_depth = q_depth; + wq->max_wqe_size = max_wqe_size; + wq->num_wqebbs_per_page = num_wqebbs_per_page; + + wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page); + + wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); + wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); + wq->block_paddr = WQ_BASE_PADDR(wqs, wq); + + err = alloc_wq_pages(wqs->dev_hdl, wq); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n"); + goto alloc_wq_pages_err; + } + + atomic_set(&wq->delta, q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + wq->mask = q_depth - 1; + + return 0; + +alloc_wq_pages_err: + wqs_return_block(wqs, wq->page_idx, wq->block_idx); + return err; +} + +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) +{ + free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages); + + wqs_return_block(wqs, wq->page_idx, wq->block_idx); +} + +static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx, + u32 *block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + if (wqs->num_free_blks <= 0) { + spin_unlock(&wqs->alloc_blocks_lock); + return -ENOMEM; + } + wqs->num_free_blks--; + + pos = wqs->alloc_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + *page_idx = wqs->free_blocks[pos].page_idx; + *block_idx = wqs->free_blocks[pos].block_idx; + + wqs->free_blocks[pos].page_idx = 0xFFFFFFFF; + wqs->free_blocks[pos].block_idx = 0xFFFFFFFF; + + spin_unlock(&wqs->alloc_blocks_lock); + + return 0; +} + +static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx, + u32 block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + wqs->num_free_blks++; + + pos = wqs->return_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = block_idx; + + spin_unlock(&wqs->alloc_blocks_lock); +} + +static void init_wqs_blocks_arr(struct hinic_wqs *wqs) +{ + u32 page_idx, blk_idx, pos = 0; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = blk_idx; + pos++; + } + } + + wqs->alloc_blk_pos = 0; + wqs->return_blk_pos = 0; + wqs->num_free_blks = WQS_MAX_NUM_BLOCKS; + spin_lock_init(&wqs->alloc_blocks_lock); +} + +void hinic_wq_wqe_pg_clear(struct hinic_wq *wq) +{ + u64 *block_vaddr; + u32 pg_idx; + + block_vaddr = wq->shadow_block_vaddr; + + atomic_set(&wq->delta, wq->q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + + for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++) + memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size); +} + +int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size) +{ + int i, j, err = -ENOMEM; + + if (q_depth & (q_depth - 1)) { + sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + cmdq_pages->dev_hdl = dev_hdl; + + err = cmdq_allocate_page(cmdq_pages); + if (err) { + sdk_err(dev_hdl, "Failed to allocate CMDQ page\n"); + return err; + } + + for (i = 0; i < cmdq_blocks; i++) { + wq[i].page_idx = 0; + wq[i].block_idx = (u32)i; + wq[i].wqebb_size = wqebb_size; + wq[i].wq_page_size = wq_page_size; + wq[i].q_depth = q_depth; + wq[i].max_wqe_size = max_wqe_size; + wq[i].num_wqebbs_per_page = + ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + wq[i].wqebbs_per_page_shift = + (u32)ilog2(wq[i].num_wqebbs_per_page); + + wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); + wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); + wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); + + err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]); + if (err) { + sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n"); + goto cmdq_block_err; + } + + atomic_set(&wq[i].delta, q_depth); + wq[i].cons_idx = 0; + wq[i].prod_idx = 0; + wq[i].mask = q_depth - 1; + } + + return 0; + +cmdq_block_err: + for (j = 0; j < i; j++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages); + + cmdq_free_page(cmdq_pages); + return err; +} + +void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages); + + cmdq_free_page(cmdq_pages); +} + +static int alloc_page_addr(struct hinic_wqs *wqs) +{ + u64 size = wqs->num_pages * sizeof(*wqs->page_paddr); + + wqs->page_paddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_paddr) + return -ENOMEM; + + size = wqs->num_pages * sizeof(*wqs->page_vaddr); + wqs->page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_vaddr) + goto page_vaddr_err; + + size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); + wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->shadow_page_vaddr) + goto page_shadow_vaddr_err; + + return 0; + +page_shadow_vaddr_err: + kfree(wqs->page_vaddr); + +page_vaddr_err: + kfree(wqs->page_paddr); + return -ENOMEM; +} + +static void free_page_addr(struct hinic_wqs *wqs) +{ + kfree(wqs->shadow_page_vaddr); + kfree(wqs->page_vaddr); + kfree(wqs->page_paddr); +} + +int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl) +{ + u32 i, page_idx; + int err; + + wqs->dev_hdl = dev_hdl; + wqs->num_pages = WQ_NUM_PAGES(num_wqs); + + if (alloc_page_addr(wqs)) { + sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + err = wqs_allocate_page(wqs, page_idx); + if (err) { + sdk_err(dev_hdl, "Failed wq page allocation\n"); + goto wq_allocate_page_err; + } + } + + wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL); + if (!wqs->free_blocks) { + err = -ENOMEM; + goto alloc_blocks_err; + } + + init_wqs_blocks_arr(wqs); + return 0; + +alloc_blocks_err: +wq_allocate_page_err: + for (i = 0; i < page_idx; i++) + wqs_free_page(wqs, i); + + free_page_addr(wqs); + return err; +} + +void hinic_wqs_free(struct hinic_wqs *wqs) +{ + u32 page_idx; + + spin_lock_deinit(&wqs->alloc_blocks_lock); + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) + wqs_free_page(wqs, page_idx); + + free_page_addr(wqs); + kfree(wqs->free_blocks); +} + +static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 prod_idx) +{ + u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr; + u32 i, offset; + u16 idx; + + for (i = 0; i < (u32)num_wqebbs; i++) { + offset = i * wq->wqebb_size; + shadow_wqebb_addr = (u8 *)shadow_addr + offset; + + idx = MASKED_WQE_IDX(wq, prod_idx + i); + wqe_page_addr = WQ_PAGE_ADDR(wq, idx); + wqebb_addr = wqe_page_addr + + WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx)); + + memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size); + } +} + +static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 prod_idx) +{ + u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr; + u32 i, offset; + u16 idx; + + for (i = 0; i < (u32)num_wqebbs; i++) { + offset = i * wq->wqebb_size; + shadow_wqebb_addr = (u8 *)shadow_addr + offset; + + idx = MASKED_WQE_IDX(wq, prod_idx + i); + wqe_page_addr = WQ_PAGE_ADDR(wq, idx); + wqebb_addr = wqe_page_addr + + WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx)); + + memcpy(wqebb_addr, shadow_wqebb_addr, wq->wqebb_size); + } +} + +void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index) +{ + return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index); +} + +u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq) +{ + return be64_to_cpu(*wq->block_vaddr); +} + +void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx) +{ + u32 curr_pg, end_pg; + u16 curr_prod_idx, end_prod_idx; + + if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) { + atomic_add(num_wqebbs, &wq->delta); + return NULL; + } + + /* use original cur_pi and end_pi, no need queue depth mask as + * WQE_PAGE_NUM will do num_queue_pages mask + */ + curr_prod_idx = (u16)wq->prod_idx; + wq->prod_idx += num_wqebbs; + + /* end prod index should points to the last wqebb of wqe, + * therefore minus 1 + */ + end_prod_idx = (u16)wq->prod_idx - 1; + + curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); + end_pg = WQE_PAGE_NUM(wq, end_prod_idx); + + *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); + + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ + if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + wq->shadow_idx[curr_pg] = *prod_idx; + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); +} + +void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs) +{ + atomic_add(num_wqebbs, &wq->delta); + wq->cons_idx += num_wqebbs; +} + +void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx) +{ + u32 curr_pg, end_pg; + u16 curr_cons_idx, end_cons_idx; + + if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) + return NULL; + + curr_cons_idx = (u16)wq->cons_idx; + + curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); + end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); + end_pg = WQE_PAGE_NUM(wq, end_cons_idx); + + *cons_idx = curr_cons_idx; + + if (curr_pg != end_pg) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); + + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); +} + +static inline int wqe_shadow(struct hinic_wq *wq, const void *wqe) +{ + void *end_wqe_shadow_addr; + u32 wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; + + end_wqe_shadow_addr = &wq->shadow_wqe[wqe_shadow_size]; + + return WQE_IN_RANGE(wqe, wq->shadow_wqe, end_wqe_shadow_addr); +} + +void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs) +{ + u16 curr_pg; + u16 prod_idx; + + if (wqe_shadow(wq, wqe)) { + curr_pg = WQE_SHADOW_PAGE(wq, wqe); + prod_idx = wq->shadow_idx[curr_pg]; + + copy_wqe_from_shadow(wq, wqe, num_wqebbs, prod_idx); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_wq.h new file mode 100644 index 000000000000..35d134149f4f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_WQ_H +#define HINIC_WQ_H + +struct hinic_free_block { + u32 page_idx; + u32 block_idx; +}; + +struct hinic_wq { + /* The addresses are 64 bit in the HW */ + u64 block_paddr; + u64 *shadow_block_vaddr; + u64 *block_vaddr; + + u32 wqebb_size; + u32 wq_page_size; + u16 q_depth; + u32 max_wqe_size; + u32 num_wqebbs_per_page; + + /* performance: replace mul/div as shift; + * num_wqebbs_per_page must be power of 2 + */ + u32 wqebbs_per_page_shift; + u32 page_idx; + u32 block_idx; + + u32 num_q_pages; + + struct hinic_dma_addr_align *mem_align; + + int cons_idx; + int prod_idx; + + atomic_t delta; + u16 mask; + + u8 *shadow_wqe; + u16 *shadow_idx; +}; + +struct hinic_cmdq_pages { + /* The addresses are 64 bit in the HW */ + u64 cmdq_page_paddr; + u64 *cmdq_page_vaddr; + u64 *cmdq_shadow_page_vaddr; + + void *dev_hdl; +}; + +struct hinic_wqs { + /* The addresses are 64 bit in the HW */ + u64 *page_paddr; + u64 **page_vaddr; + u64 **shadow_page_vaddr; + + struct hinic_free_block *free_blocks; + u32 alloc_blk_pos; + u32 return_blk_pos; + int num_free_blks; + + /* for allocate blocks */ + spinlock_t alloc_blocks_lock; + + u32 num_pages; + + void *dev_hdl; +}; + +void hinic_wq_wqe_pg_clear(struct hinic_wq *wq); + +int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size); + +void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks); + +int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl); + +void hinic_wqs_free(struct hinic_wqs *wqs); + +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size); + +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); + +void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index); + +u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq); + +void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx); + +void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs); + +void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx); + +void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl.h b/drivers/net/ethernet/huawei/hinic/ossl_knl.h new file mode 100644 index 000000000000..f442e0bd865d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/ossl_knl.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef OSSL_KNL_H +#define OSSL_KNL_H + +#include "ossl_knl_linux.h" + +#if defined(__WIN__) || defined(__VMWARE__) +#define __WIN_OR_VMWARE__ +#endif + +#if defined(__WIN__) || defined(__VMWARE__) || defined(__UEFI__) +#define __WIN_OR_VMWARE_OR_UEFI__ +#endif + +#if (defined(__WIN__) || defined(__VMWARE__)) && !defined(__HIFC__) +#define __WIN_OR_VMWARE_AND_NONHIFC__ +#endif + +#if defined(__WIN__) || defined(__UEFI__) +#define __WIN_OR_UEFI__ +#endif + + + +#define sdk_err(dev, format, ...) \ + dev_err(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) \ + dev_warn(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) \ + dev_notice(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) \ + dev_info(dev, "[COMM]"format, ##__VA_ARGS__) + +#define nic_err(dev, format, ...) \ + dev_err(dev, "[NIC]"format, ##__VA_ARGS__) +#define nic_warn(dev, format, ...) \ + dev_warn(dev, "[NIC]"format, ##__VA_ARGS__) +#define nic_notice(dev, format, ...) \ + dev_notice(dev, "[NIC]"format, ##__VA_ARGS__) +#define nic_info(dev, format, ...) \ + dev_info(dev, "[NIC]"format, ##__VA_ARGS__) + + +#endif /* OSSL_KNL_H */ diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c new file mode 100644 index 000000000000..6269022c3542 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include + +#include "ossl_knl_linux.h" + +#define OSSL_MINUTE_BASE (60) + +#if (KERNEL_VERSION(2, 6, 39) > LINUX_VERSION_CODE) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ +#if (KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#endif /* < 3.4.0 */ +#if (KERNEL_VERSION(3, 8, 0) > LINUX_VERSION_CODE) +/* + * pci_sriov_get_totalvfs -- get total VFs supported on this device + * @dev: the PCI PF device + * + * For a PCIe device with SRIOV support, return the PCIe + * SRIOV capability value of TotalVFs. Otherwise 0. + */ +int pci_sriov_get_totalvfs(struct pci_dev *dev) +{ + int sriov_cap_pos; + u16 total_vfs = 0; + + if (dev->is_virtfn) + return 0; + + sriov_cap_pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, sriov_cap_pos + PCI_SRIOV_TOTAL_VF, + &total_vfs); + + return total_vfs; +} + +#endif +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) +/* + * pci_vfs_assigned - returns number of VFs are assigned to a guest + * @dev: the PCI device + * + * Returns number of VFs belonging to this device that are assigned to a guest. + * If device is not a physical function returns -ENODEV. + */ +int pci_vfs_assigned(struct pci_dev *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + struct pci_dev *vfdev; + unsigned short dev_id = 0; + int sriov_cap_pos; + + /* only search if we are a PF. */ + if (dev->is_virtfn) + return 0; + + /* determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one. + */ + sriov_cap_pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, sriov_cap_pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned. */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set. + */ + if (vfdev->is_virtfn && vfdev->physfn == dev && + vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + + return vfs_assigned; +} + +#endif /* 3.10.0 */ +#if (KERNEL_VERSION(3, 13, 0) > LINUX_VERSION_CODE) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int err = dma_set_mask(dev, mask); + + if (!err) + /* + * coherent mask for the same size will always succeed if + * dma_set_mask does. However we store the error anyways, due + * to some kernels which use gcc's warn_unused_result on their + * definition of dma_set_coherent_mask. + */ + err = dma_set_coherent_mask(dev, mask); + return err; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} + +#endif /* 3.13.0 */ +#if (KERNEL_VERSION(3, 14, 0) > LINUX_VERSION_CODE) +int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} + +#endif +#if (KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct netdev_hw_addr *tmp; + struct netdev_hw_addr *ha; + int err; + + /* first go through and flush out any stale entries. */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list. */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct netdev_hw_addr *tmp; + struct netdev_hw_addr *ha; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct dev_addr_list **next = list; + struct dev_addr_list *da; + int err; + + /* first go through and flush out any stale entries. */ + while ((da = *next)) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list. */ + for (da = *list; da; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + unsigned int gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} + +#endif /* 3.16.0 */ +#if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} + +#endif +#endif +#if (KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE) +unsigned int cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } + BUG(); +} + +#endif +struct file *file_creat(const char *file_name) +{ + return filp_open(file_name, O_CREAT | O_RDWR | O_APPEND, 0); +} + +struct file *file_open(const char *file_name) +{ + return filp_open(file_name, O_RDONLY, 0); +} + +void file_close(struct file *file_handle) +{ + (void)filp_close(file_handle, NULL); +} + +u32 get_file_size(struct file *file_handle) +{ + struct inode *file_inode; + + #if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) + file_inode = file_handle->f_dentry->d_inode; + #else + file_inode = file_handle->f_inode; + #endif + + return (u32)(file_inode->i_size); +} + +void set_file_position(struct file *file_handle, u32 position) +{ + file_handle->f_pos = position; +} + +int file_read(struct file *file_handle, char *log_buffer, + u32 rd_length, u32 *file_pos) +{ + return (int)file_handle->f_op->read(file_handle, log_buffer, + rd_length, &file_handle->f_pos); +} + +u32 file_write(struct file *file_handle, char *log_buffer, u32 wr_length) +{ + return (u32)file_handle->f_op->write(file_handle, log_buffer, + wr_length, &file_handle->f_pos); +} + +static int _linux_thread_func(void *thread) +{ + struct sdk_thread_info *info = (struct sdk_thread_info *)thread; + + while (!kthread_should_stop()) + info->thread_fn(info->data); + + return 0; +} + +int creat_thread(struct sdk_thread_info *thread_info) +{ + thread_info->thread_obj = kthread_run(_linux_thread_func, + thread_info, thread_info->name); + if (!thread_info->thread_obj) + return -EFAULT; + + return 0; +} + +void stop_thread(struct sdk_thread_info *thread_info) +{ + if (thread_info->thread_obj) + (void)kthread_stop(thread_info->thread_obj); +} + +void utctime_to_localtime(u64 utctime, u64 *localtime) +{ + *localtime = utctime - sys_tz.tz_minuteswest * OSSL_MINUTE_BASE; +} + +#ifndef HAVE_TIMER_SETUP +void initialize_timer(void *adapter_hdl, struct timer_list *timer) +{ + if (!adapter_hdl || !timer) + return; + + init_timer(timer); +} +#endif + +void add_to_timer(struct timer_list *timer, long period) +{ + if (!timer) + return; + + add_timer(timer); +} + +void stop_timer(struct timer_list *timer) +{ +} + +void delete_timer(struct timer_list *timer) +{ + if (!timer) + return; + + del_timer_sync(timer); +} + +int local_atoi(const char *name) +{ + int val = 0; + + for (;; name++) { + switch (*name) { + case '0' ... '9': + val = 10 * val + (*name - '0'); + break; + default: + return val; + } + } +} diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h new file mode 100644 index 000000000000..e4a9985a9181 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h @@ -0,0 +1,1858 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef OSSL_KNL_LINUX_H_ +#define OSSL_KNL_LINUX_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef SUPPORTED_100000baseKR4_Full +#define SUPPORTED_100000baseKR4_Full 0 +#define ADVERTISED_100000baseKR4_Full 0 +#endif +#ifndef SUPPORTED_100000baseCR4_Full +#define SUPPORTED_100000baseCR4_Full 0 +#define ADVERTISED_100000baseCR4_Full 0 +#endif + +#ifndef SUPPORTED_40000baseKR4_Full +#define SUPPORTED_40000baseKR4_Full 0 +#define ADVERTISED_40000baseKR4_Full 0 +#endif +#ifndef SUPPORTED_40000baseCR4_Full +#define SUPPORTED_40000baseCR4_Full 0 +#define ADVERTISED_40000baseCR4_Full 0 +#endif + +#ifndef SUPPORTED_25000baseKR_Full +#define SUPPORTED_25000baseKR_Full 0 +#define ADVERTISED_25000baseKR_Full 0 +#endif +#ifndef SUPPORTED_25000baseCR_Full +#define SUPPORTED_25000baseCR_Full 0 +#define ADVERTISED_25000baseCR_Full 0 +#endif + +#ifndef ETHTOOL_GLINKSETTINGS +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, +}; +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a, b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5. */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23. + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if (KERNEL_VERSION(3, 0, 0) >= LINUX_VERSION_CODE) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, 0) << 8) + (d)) + +#ifndef DEEPIN_PRODUCT_VERSION +#define DEEPIN_PRODUCT_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#ifdef CONFIG_DEEPIN_KERNEL +#if (KERNEL_VERSION(4, 4, 102) == LINUX_VERSION_CODE) +#define DEEPIN_VERSION_CODE DEEPIN_PRODUCT_VERSION(15, 2, 0) +#endif +#endif + +#ifndef DEEPIN_VERSION_CODE +#define DEEPIN_VERSION_CODE 0 +#endif + +/* SuSE version macros are the same as Linux kernel version macro. */ +#ifndef SLE_VERSION +#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c) +#endif +#define SLE_LOCALVERSION(a, b, c) KERNEL_VERSION(a, b, c) +#ifdef CONFIG_SUSE_KERNEL +#if (KERNEL_VERSION(2, 6, 27) == LINUX_VERSION_CODE) +/* SLES11 GA is 2.6.27 based. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 0, 0) +#elif (KERNEL_VERSION(2, 6, 32) == LINUX_VERSION_CODE) +/* SLES11 SP1 is 2.6.32 based. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 1, 0) +#elif (KERNEL_VERSION(3, 0, 13) == LINUX_VERSION_CODE) +/* SLES11 SP2 GA is 3.0.13-0.27. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 2, 0) +#elif (KERNEL_VERSION(3, 0, 76) == LINUX_VERSION_CODE) +/* SLES11 SP3 GA is 3.0.76-0.11. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0) +#elif (KERNEL_VERSION(3, 0, 101) == LINUX_VERSION_CODE) +/* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ +#define SLE_VERSION_CODE SLE_VERSION(11, 4, 0) +#elif (KERNEL_VERSION(3, 12, 28) == LINUX_VERSION_CODE) +/* + * SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy]. + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 0, 0) +#elif (KERNEL_VERSION(3, 12, 49) == LINUX_VERSION_CODE) +/* + * SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 1, 0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 21)) +/* + * SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 2, 0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 73)) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0) +#elif (KERNEL_VERSION(4, 12, 14) <= LINUX_VERSION_CODE) +/* SLES15 Beta1 is 4.12.14-2. + * SLES12 SP4 will also use 4.12.14-nn.xx.y */ +#include +/* + * new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SUSE_PRODUCT_CODE +#define SUSE_PRODUCT_CODE 0 +#endif /* SUSE_PRODUCT_CODE */ +#ifndef SUSE_PRODUCT +#define SUSE_PRODUCT(product, version, patchlevel, auxrelease) \ + (((product) << 24) + ((version) << 16) + \ + ((patchlevel) << 8) + (auxrelease)) +#endif /* SUSE_PRODUCT */ + +#ifndef ALIGN_DOWN +#ifndef __ALIGN_KERNEL +#define __ALIGN_KERNEL(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1) +#endif +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif + +/*****************************************************************************/ +#if (KERNEL_VERSION(2, 6, 22) > LINUX_VERSION_CODE) +#define tcp_hdr(skb) ((skb)->h.th) +#define tcp_hdrlen(skb) ((skb)->h.th->doff << 2) +#define skb_transport_offset(skb) ((skb)->h.raw - (skb)->data) +#define skb_transport_header(skb) ((skb)->h.raw) +#define ipv6_hdr(skb) ((skb)->nh.ipv6h) +#define ip_hdr(skb) ((skb)->nh.iph) +#define skb_network_offset(skb) ((skb)->nh.raw - (skb)->data) +#define skb_network_header(skb) ((skb)->nh.raw) +#define skb_tail_pointer(skb) ((skb)->tail) +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) ((skb)->h.raw - (skb)->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) ((skb)->mac.raw) + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET, +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(2, 6, 32) > LINUX_VERSION_CODE) +#undef netdev_tx_t +#define netdev_tx_t int +#else /* < 2.6.32 */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(6, 2) <= RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE)) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(6, 6) <= RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE)) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE) +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ + +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(6, 3) <= RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(6, 2) <= RHEL_RELEASE_CODE))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while (0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(6, 4) <= RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE)) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_VERSION(6, 5) < RHEL_RELEASE_CODE) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(2, 6, 34) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_VERSION(6, 0) > RHEL_RELEASE_CODE) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +extern int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev, mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev), (mask)) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} + +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if (KERNEL_VERSION(2, 6, 0) > LINUX_VERSION_CODE) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while (0) +#elif (KERNEL_VERSION(2, 6, 21) > LINUX_VERSION_CODE) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while (0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; + +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif +#define HAVE_INET6_IFADDR_LIST +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(2, 6, 36) > LINUX_VERSION_CODE) +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align + (struct net_device *dev, unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while (0) +#define u64_stats_update_end(a) do { } while (0) +#define u64_stats_fetch_retry(a, b) (0) +#define u64_stats_fetch_begin(a) (0) +#define u64_stats_fetch_retry_bh(a, b) (0) +#define u64_stats_fetch_begin_bh(a) (0) +struct u64_stats_sync_empty { +}; + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_VERSION(6, 4) <= RHEL_RELEASE_CODE) && \ + !(SLE_VERSION(11, 2, 0) <= SLE_VERSION_CODE)) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ +} +#endif + +#else /* < 2.6.36 */ + +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(2, 6, 39) > LINUX_VERSION_CODE) + +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif + +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif + +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) + csum = csum_add(csum, skb->csum); + + return csum; +} +#endif /* udp_csum */ + +#else /* < 2.6.39 */ + +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif + +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(2, 6, 40) > LINUX_VERSION_CODE) + +#else +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 0, 0) > LINUX_VERSION_CODE) + +#else +#define HAVE_NETDEV_WANTED_FEAUTES +#endif + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 2, 0) > LINUX_VERSION_CODE) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + + if (ret) + memset(ret, 0, size); + + return ret; +} +#endif + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if (KERNEL_VERSION(2, 6, 0) <= LINUX_VERSION_CODE) +#include +#endif +#define skb_frag_dma_map(dev, frag, offset, size, dir) \ + _kc_skb_frag_dma_map(dev, frag, offset, size, dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_VERSION(6, 3) <= RHEL_RELEASE_CODE) ||\ + (SLE_VERSION_CODE && SLE_VERSION(11, 3, 0) <= SLE_VERSION_CODE)) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#endif /* < 3.2.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 3, 0) > LINUX_VERSION_CODE) +#if (SLE_VERSION_CODE && (SLE_VERSION(11, 2, 0) >= SLE_VERSION_CODE)) +#define NOT_HAVE_GET_RXFH_INDIR_SIZE +#endif + +/* + * NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs. + */ +static inline struct workqueue_struct *__attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if (KERNEL_VERSION(2, 6, 36) > LINUX_VERSION_CODE) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} + +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_VERSION(6, 5) <= RHEL_RELEASE_CODE) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} + +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a, b, c, d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define NUMTCS_RETURNS_U8 +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, + int, int, unsigned int); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#if (RHEL_RELEASE_VERSION(7,0) > RHEL_RELEASE_CODE) +#define _kc_kmap_atomic(page) kmap_atomic(page, KM_SKB_DATA_SOFTIRQ) +#define _kc_kunmap_atomic(addr) kunmap_atomic(addr, KM_SKB_DATA_SOFTIRQ) +#else +#define _kc_kmap_atomic(page) __kmap_atomic(page) +#define _kc_kunmap_atomic(addr) __kunmap_atomic(addr) +#endif + +#else /* < 3.4.0 */ +#include + +#define _kc_kmap_atomic(page) kmap_atomic(page) +#define _kc_kunmap_atomic(addr) kunmap_atomic(addr) +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 5, 0) > LINUX_VERSION_CODE) + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} + +#define ether_addr_equal(_addr1, _addr2) \ + __kc_ether_addr_equal((_addr1), (_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 6, 0) > LINUX_VERSION_CODE) +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ +#endif /* < 3.6.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 7, 0) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_VERSION(6, 8) <= RHEL_RELEASE_CODE) +#define HAVE_NAPI_GRO_FLUSH_OLD +#endif + +#else /* >= 3.7.0 */ +#define HAVE_NAPI_GRO_FLUSH_OLD +#endif /* < 3.7.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 8, 0) > LINUX_VERSION_CODE) +#if (SLE_VERSION_CODE && SLE_VERSION(11, 4, 0) <= SLE_VERSION_CODE) +#define HAVE_SRIOV_CONFIGURE +#endif +#else /* >= 3.8.0 */ +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif +#endif /* < 3.8.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +extern int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + + return skb; +} + +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define HAVE_SKB_INNER_NETWORK_HEADER +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(7, 0) <= RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_VERSION(7, 2) <= RHEL_RELEASE_CODE) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_VERSION(7, 3) <= RHEL_RELEASE_CODE) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#ifdef ETHTOOL_GLINKSETTINGS +/* pay attention pangea platform when use this micro */ +#define HAVE_ETHTOOL_25G_BITS +#endif /* ETHTOOL_GLINKSETTINGS */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_VERSION(7, 4) <= RHEL_RELEASE_CODE) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ + +#if (RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif /* RHEL > 7.5 */ + +#endif /* RHEL >= 7.0 && RHEL < 8.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 11, 0) > LINUX_VERSION_CODE) +#define netdev_notifier_info_to_dev(ptr) ptr +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 2) < RHEL_RELEASE_CODE) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if (RHEL_RELEASE_VERSION(7, 3) <= RHEL_RELEASE_CODE) +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#endif +#if (RHEL_RELEASE_VERSION(7, 4) <= RHEL_RELEASE_CODE) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#endif +#if (RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU +#endif + +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 12, 0) <= LINUX_VERSION_CODE) +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if (KERNEL_VERSION(4, 8, 0) > LINUX_VERSION_CODE) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 13, 0) > LINUX_VERSION_CODE) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while (0) +#endif +#ifndef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if (KERNEL_VERSION(2, 6, 20) < LINUX_VERSION_CODE) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#else /* >= 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0, 24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif /* 3.13.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 14, 0) > LINUX_VERSION_CODE) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#if (!(RHEL_RELEASE_CODE && \ + RHEL_RELEASE_VERSION(7, 0) <= RHEL_RELEASE_CODE) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; + +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#ifndef pci_enable_msix_range +extern int __kc_pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE) +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync + (struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} + +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync + (struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} + +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync + (struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif +} + +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync + (struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} + +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 1) < RHEL_RELEASE_CODE) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* + * if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes. + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif + +#else +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE) +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 1) < RHEL_RELEASE_CODE) +#define HAVE_MULTI_VLAN_OFFLOAD_EN +#endif + +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +#else +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_MULTI_VLAN_OFFLOAD_EN +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} + +#define napi_complete_done _kc_napi_complete_done + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(6, 7) <= RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE)))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +extern void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | \ + __GFP_COLD | __GFP_COMP | \ + __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/* + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} + +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} + +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} + +#define napi_alloc_skb(napi, len) __kc_napi_alloc_skb(napi, len) +#define __napi_alloc_skb(napi, len, mask) __kc_napi_alloc_skb(napi, len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME + +#if defined(RHEL_RELEASE_CODE) +#if ((RHEL_RELEASE_VERSION(6, 7) < RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE)) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if (RHEL_RELEASE_VERSION(7, 1) < RHEL_RELEASE_CODE) +#define HAVE_RXFH_HASHFUNC +#endif /* RHEL > 7.1 */ +#endif /* RHEL_RELEASE_CODE */ + +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#else /* 3.19.0 */ +#define HAVE_RXFH_HASHFUNC +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present(__skb) vlan_tx_tag_present(__skb) +#define skb_vlan_tag_get(__skb) vlan_tx_tag_get(__skb) +#define skb_vlan_tag_get_id(__skb) vlan_tx_tag_get_id(__skb) +#endif +#endif /* 4.0.0 */ + +/****************************************************************/ +#if (KERNEL_VERSION(4, 2, 0) > LINUX_VERSION_CODE) +#define SPEED_25000 25000 +#define SPEED_100000 100000 +#endif /* 4.2.0 */ + +/****************************************************************/ +#if (KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ + +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST +#endif /* 4.4.0 */ + +/****************************************************************/ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#endif /* 4.5.0 */ + +/****************************************************************/ +#if (KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) && \ + !(DEEPIN_VERSION_CODE && (DEEPIN_VERSION_CODE == DEEPIN_PRODUCT_VERSION(15,2,0))) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#endif /* 4.6.0 */ + +/****************************************************************/ +#if (KERNEL_VERSION(4, 7, 0) > LINUX_VERSION_CODE) +#define HAVE_PAGE_COUNT + +#ifdef ETHTOOL_GLINKSETTINGS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 4, 0, 62)) +#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 +#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 +#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 +#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 +#endif +#endif + +#endif /* 4.7.0 */ + +/****************************************************************/ +#ifdef ETHTOOL_GMODULEEEPROM +#ifndef ETH_MODULE_SFF_8472 +#define ETH_MODULE_SFF_8472 0x2 +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8472_LEN +#define ETH_MODULE_SFF_8472_LEN 512 +#endif +#ifndef ETH_MODULE_SFF_8636_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#endif +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif +#endif + +/****************************************************************/ +#if (KERNEL_VERSION(4, 8, 0) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) +#define HAVE_IO_MAP_WC_SIZE +#endif + +#else /* > 4.8.0 */ +#define HAVE_IO_MAP_WC_SIZE +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE) +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) || \ + (DEEPIN_VERSION_CODE && (DEEPIN_VERSION_CODE == DEEPIN_PRODUCT_VERSION(15,2,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)) +#define HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU +#endif +#else /* >= 4.10.0 */ +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE) +#define HAVE_STRUCT_CURRENT +#if (SLE_VERSION_CODE && (SLE_VERSION(12, 3, 0) <= SLE_VERSION_CODE)) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)) || \ + (DEEPIN_VERSION_CODE && (DEEPIN_VERSION_CODE == DEEPIN_PRODUCT_VERSION(15,2,0))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(4, 13, 0) > LINUX_VERSION_CODE) +#else /* > 4.13 */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE >= SUSE_PRODUCT(1, 12, 4, 0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#else /* > 4.14 */ +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE) +#if ((KERNEL_VERSION(3, 10, 0) == LINUX_VERSION_CODE) && \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 6) <= RHEL_RELEASE_CODE )) || \ + (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1, 15, 1, 0))) || \ + (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1, 12, 5, 0))) +#else +#ifndef KEYLIN_V7UPDATE6 +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif +#endif +#if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE) +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE >= SUSE_PRODUCT(1, 12, 4, 0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE) +#else /* 4.12-4.15*/ +#define HAVE_IP6_FRAG_ID_ENABLE_UFO +#endif +#else /* < 4.12.0 */ +#define HAVE_IP6_FRAG_ID_ENABLE_UFO +#endif +#else /* >= 4.15.0 */ +#define HAVE_TIMER_SETUP +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(8, 0) <= RHEL_RELEASE_CODE )) || \ + (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1, 15, 1, 0))) || \ + (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1, 12, 5, 0))) +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif + +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0)) +#define dev_open(x) dev_open(x, NULL) +#endif +#else /* >= 5.0.0 */ +#define dev_open(x) dev_open(x, NULL) +#define HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY + +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _hinic_dma_zalloc_coherent(d, s, h, f) +static inline void *_hinic_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + /* Above kernel 5.0, fixed up all remaining architectures + * to zero the memory in dma_alloc_coherent, and made + * dma_zalloc_coherent a no-op wrapper around dma_alloc_coherent, + * which fixes all of the above issues. + */ + return dma_alloc_coherent(dev, size, dma_handle, gfp); +} +#endif + +#ifndef do_gettimeofday +#define do_gettimeofday(time) _kc_do_gettimeofday(time) +static inline void _kc_do_gettimeofday(struct timeval *tv) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} +#endif +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE) +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 3, 0) > LINUX_VERSION_CODE) +#if (KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if (KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE) +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif +#endif /* 5.3.0 */ + + +/*****************************************************************************/ +/* vxlan outer udp checksum will offload and skb->inner_transport_header is wrong */ +#if (SLE_VERSION_CODE && ((SLE_VERSION(12, 1, 0) == SLE_VERSION_CODE) || \ + (SLE_VERSION(12, 0, 0) == SLE_VERSION_CODE))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 0) == RHEL_RELEASE_CODE)) +#define HAVE_OUTER_IPV6_TUNNEL_OFFLOAD +#endif + +#if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) +#define HAVE_ENCAPSULATION_TSO +#endif + +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) +#define HAVE_ENCAPSULATION_CSUM +#endif + +#ifndef eth_zero_addr +static inline void __kc_eth_zero_addr(u8 *addr) +{ + memset(addr, 0x00, ETH_ALEN); +} + +#define eth_zero_addr(_addr) __kc_eth_zero_addr(_addr) +#endif + +#ifndef netdev_hw_addr_list_for_each +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) +#endif + +#if (KERNEL_VERSION(3, 8, 0) > LINUX_VERSION_CODE) +int pci_sriov_get_totalvfs(struct pci_dev *dev); +#endif +#if (KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE) +unsigned int cpumask_local_spread(unsigned int i, int node); +#endif +#define spin_lock_deinit(lock) + +struct file *file_creat(const char *file_name); + +struct file *file_open(const char *file_name); + +void file_close(struct file *file_handle); + +u32 get_file_size(struct file *file_handle); + +void set_file_position(struct file *file_handle, u32 position); + +int file_read(struct file *file_handle, char *log_buffer, + u32 rd_length, u32 *file_pos); + +u32 file_write(struct file *file_handle, char *log_buffer, u32 wr_length); + +struct sdk_thread_info { + struct task_struct *thread_obj; + char *name; + void (*thread_fn)(void *x); + void *thread_event; + void *data; +}; + +int creat_thread(struct sdk_thread_info *thread_info); + +void stop_thread(struct sdk_thread_info *thread_info); + +#define destroy_work(work) +void utctime_to_localtime(u64 utctime, u64 *localtime); +#ifndef HAVE_TIMER_SETUP +void initialize_timer(void *adapter_hdl, struct timer_list *timer); +#endif +void add_to_timer(struct timer_list *timer, long period); +void stop_timer(struct timer_list *timer); +void delete_timer(struct timer_list *timer); + +int local_atoi(const char *name); + +#define nicif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, "[NIC]"fmt, ##args) +#define nicif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, "[NIC]"fmt, ##args) +#define nicif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, "[NIC]"fmt, ##args) +#define nicif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, "[NIC]"fmt, ##args) +#define nicif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, "[NIC]"fmt, ##args) + +#define destory_completion(completion) +#define sema_deinit(lock) +#define mutex_deinit(lock) +#define rwlock_deinit(lock) + +#define tasklet_state(tasklet) ((tasklet)->state) + +#endif +/*****************************************************************************/ + diff --git a/drivers/net/ethernet/huawei/hinic/ossl_types.h b/drivers/net/ethernet/huawei/hinic/ossl_types.h new file mode 100644 index 000000000000..e2ca29d1c779 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/ossl_types.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Statement: + * It must include "ossl_knl.h" or "ossl_user.h" before include "ossl_types.h" + */ + +#ifndef _OSSL_TYPES_H +#define _OSSL_TYPES_H + +#undef NULL +#if defined(__cplusplus) +#define NULL 0 +#else +#define NULL ((void *)0) +#endif + + + +#define uda_handle void * + +#define UDA_TRUE 1 +#define UDA_FALSE 0 + +#ifndef UINT8_MAX +#define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */ +#define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */ +#define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */ +#define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */ +#define ASCII_MAX (0x7F) +#endif + +#endif /* OSSL_TYPES_H */ diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0686ded7ad3a..e1ab2feeae53 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -176,7 +176,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = adapter->map_id; adapter->map_id++; - init_completion(&adapter->fw_done); + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); if (rc) { @@ -215,7 +215,7 @@ static int reset_long_term_buff(struct ibmvnic_adapter *adapter, memset(ltb->buff, 0, ltb->size); - init_completion(&adapter->fw_done); + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); if (rc) return rc; @@ -943,7 +943,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (adapter->vpd->buff) len = adapter->vpd->len; - init_completion(&adapter->fw_done); + reinit_completion(&adapter->fw_done); crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; crq.get_vpd_size.cmd = GET_VPD_SIZE; rc = ibmvnic_send_crq(adapter, &crq); @@ -1689,7 +1689,7 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); - init_completion(&adapter->fw_done); + reinit_completion(&adapter->fw_done); rc = ibmvnic_send_crq(adapter, &crq); if (rc) { rc = -EIO; @@ -2316,7 +2316,7 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); if (rc) @@ -2332,7 +2332,7 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->desired.rx_entries = adapter->fallback.rx_entries; adapter->desired.tx_entries = adapter->fallback.tx_entries; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); if (rc) @@ -2603,7 +2603,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, cpu_to_be32(sizeof(struct ibmvnic_statistics)); /* Wait for data to be written */ - init_completion(&adapter->stats_done); + reinit_completion(&adapter->stats_done); rc = ibmvnic_send_crq(adapter, &crq); if (rc) return; @@ -4408,7 +4408,7 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter) memset(&crq, 0, sizeof(crq)); crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; - init_completion(&adapter->fw_done); + reinit_completion(&adapter->fw_done); rc = ibmvnic_send_crq(adapter, &crq); if (rc) return rc; @@ -4960,6 +4960,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) INIT_LIST_HEAD(&adapter->rwi_list); spin_lock_init(&adapter->rwi_lock); init_completion(&adapter->init_done); + init_completion(&adapter->fw_done); + init_completion(&adapter->reset_done); + init_completion(&adapter->stats_done); clear_bit(0, &adapter->resetting); do { diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6c51b1bad8c4..37a2314d3e6b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -185,13 +185,12 @@ struct e1000_phy_regs { /* board specific private data structure */ struct e1000_adapter { + struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct timer_list blink_timer; struct work_struct reset_task; - struct delayed_work watchdog_task; - - struct workqueue_struct *e1000_workqueue; + struct work_struct watchdog_task; const struct e1000_info *ei; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d7d56e42a6aa..8c4507838325 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -4281,6 +4278,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) napi_synchronize(&adapter->napi); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); spin_lock(&adapter->stats64_lock); @@ -4715,12 +4713,12 @@ int e1000e_close(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); - if (!test_bit(__E1000_DOWN, &adapter->state)) { + if (netif_device_present(netdev)) { e1000e_down(adapter, true); e1000_free_irq(adapter); /* Link status message must follow this format */ - pr_info("%s NIC Link is Down\n", adapter->netdev->name); + pr_info("%s NIC Link is Down\n", netdev->name); } napi_disable(&adapter->napi); @@ -5152,11 +5150,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) } } +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(struct timer_list *t) +{ + struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, - watchdog_task.work); + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@ -5404,9 +5416,8 @@ link_up: /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, - round_jiffies(2 * HZ)); + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); } #define E1000_TX_FLAGS_CSUM 0x00000001 @@ -6298,10 +6309,14 @@ static int e1000e_pm_freeze(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); + bool present; + rtnl_lock(); + + present = netif_device_present(netdev); netif_device_detach(netdev); - if (netif_running(netdev)) { + if (present && netif_running(netdev)) { int count = E1000_CHECK_RESET_COUNT; while (test_bit(__E1000_RESETTING, &adapter->state) && count--) @@ -6313,6 +6328,8 @@ static int e1000e_pm_freeze(struct device *dev) e1000e_down(adapter, false); e1000_free_irq(adapter); } + rtnl_unlock(); + e1000e_reset_interrupt_capability(adapter); /* Allow time for pending master requests to run */ @@ -6560,6 +6577,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) __e1000e_disable_aspm(pdev, state, 1); } +static int e1000e_pm_thaw(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct e1000_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + e1000e_set_interrupt_capability(adapter); + + rtnl_lock(); + if (netif_running(netdev)) { + rc = e1000_request_irq(adapter); + if (rc) + goto err_irq; + + e1000e_up(adapter); + } + + netif_device_attach(netdev); +err_irq: + rtnl_unlock(); + + return rc; +} + #ifdef CONFIG_PM static int __e1000_resume(struct pci_dev *pdev) { @@ -6627,26 +6668,6 @@ static int __e1000_resume(struct pci_dev *pdev) } #ifdef CONFIG_PM_SLEEP -static int e1000e_pm_thaw(struct device *dev) -{ - struct net_device *netdev = dev_get_drvdata(dev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - e1000e_set_interrupt_capability(adapter); - if (netif_running(netdev)) { - u32 err = e1000_request_irq(adapter); - - if (err) - return err; - - e1000e_up(adapter); - } - - netif_device_attach(netdev); - - return 0; -} - static int e1000e_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -6818,16 +6839,11 @@ static void e1000_netpoll(struct net_device *netdev) static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); + e1000e_pm_freeze(&pdev->dev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; - if (netif_running(netdev)) - e1000e_down(adapter, true); pci_disable_device(pdev); /* Request a slot slot reset. */ @@ -6893,10 +6909,7 @@ static void e1000_io_resume(struct pci_dev *pdev) e1000_init_manageability_pt(adapter); - if (netif_running(netdev)) - e1000e_up(adapter); - - netif_device_attach(netdev); + e1000e_pm_thaw(&pdev->dev); /* If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now @@ -7259,21 +7272,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, - e1000e_driver_name); - - if (!adapter->e1000_workqueue) { - err = -ENOMEM; - goto err_workqueue; - } - - INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task); - queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task, - 0); - + timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); @@ -7367,9 +7370,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_register: - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); -err_workqueue: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: @@ -7407,26 +7407,22 @@ static void e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - bool down = test_bit(__E1000_DOWN, &adapter->state); e1000e_ptp_remove(adapter); /* The timers may be rescheduled, so explicitly disable them * from being rescheduled. */ - if (!down) - set_bit(__E1000_DOWN, &adapter->state); + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->downshift_task); cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); - cancel_delayed_work(&adapter->watchdog_task); - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); - if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { @@ -7435,9 +7431,6 @@ static void e1000_remove(struct pci_dev *pdev) } } - /* Don't lie to e1000_close() down the road. */ - if (!down) - clear_bit(__E1000_DOWN, &adapter->state); unregister_netdev(netdev); if (pci_dev_run_wake(pdev)) diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 2f21b3e89fd0..89ae35b2ef74 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -21,9 +21,11 @@ i40e-objs := i40e_main.o \ i40e_diag.o \ i40e_txrx.o \ i40e_ptp.o \ + i40e_filters.o \ i40e_ddp.o \ i40e_client.o \ i40e_virtchnl_pf.o \ - i40e_xsk.o + kcompat.o \ + kcompat_vfd.o -i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o +i40e-$(CONFIG_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2af9f6308f84..e07b7b90b4f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_H_ #define _I40E_H_ @@ -13,10 +13,9 @@ #include #include #include -#include #include #include -#include +#include #include #include #include @@ -24,37 +23,56 @@ #include #include #include +#include #include +#ifdef SIOCETHTOOL #include +#endif #include -#include #include +#include "kcompat.h" +#ifdef HAVE_IOMMU_PRESENT +#include +#endif +#ifdef HAVE_SCTP +#include +#endif +#ifdef HAVE_PTP_1588_CLOCK #include #include #include +#endif /* HAVE_PTP_1588_CLOCK */ +#ifdef __TC_MQPRIO_MODE_MAX #include -#include -#include -#include +#endif #include "i40e_type.h" #include "i40e_prototype.h" #include "i40e_client.h" -#include +#include "virtchnl.h" #include "i40e_virtchnl_pf.h" #include "i40e_txrx.h" #include "i40e_dcb.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif + /* Useful i40e defaults */ #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 +#define L4_MODE_UDP 0 +#define L4_MODE_TCP 1 +#define L4_MODE_BOTH 2 +#define L4_MODE_DISABLED -1 +bool i40e_is_l4mode_enabled(void); #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 #define I40E_MIN_NUM_DESCRIPTORS 64 #define I40E_MIN_MSIX 2 #define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ -#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */ +#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF, 16 VMDQ */ /* max 16 qps */ #define i40e_default_queues_per_vmdq(pf) \ (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) @@ -68,6 +86,10 @@ #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ +/* + * If I40E_MAX_USER_PRIORITY is updated please also update + * I40E_CLIENT_MAX_USER_PRIORITY in i40e_client.h and i40evf_client.h + */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) #define I40E_DEFAULT_MSG_ENABLE 4 @@ -97,7 +119,7 @@ #define I40E_CURRENT_NVM_VERSION_LO 0x40 #define I40E_RX_DESC(R, i) \ - (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) + (&(((union i40e_rx_desc *)((R)->desc))[i])) #define I40E_TX_DESC(R, i) \ (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ @@ -110,8 +132,7 @@ /* BW rate limiting */ #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ -#define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */ -#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ +#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ /* driver state flags */ enum i40e_state_t { @@ -131,17 +152,20 @@ enum i40e_state_t { __I40E_PF_RESET_REQUESTED, __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, + __I40E_EMP_RESET_REQUESTED, __I40E_EMP_RESET_INTR_RECEIVED, __I40E_SUSPENDED, - __I40E_PTP_TX_IN_PROGRESS, __I40E_BAD_EEPROM, + __I40E_DEBUG_MODE, __I40E_DOWN_REQUESTED, __I40E_FD_FLUSH_REQUESTED, __I40E_FD_ATR_AUTO_DISABLED, __I40E_FD_SB_AUTO_DISABLED, __I40E_RESET_FAILED, __I40E_PORT_SUSPENDED, + __I40E_PTP_TX_IN_PROGRESS, __I40E_VF_DISABLE, + __I40E_RECOVERY_MODE, __I40E_MACVLAN_SYNC_PENDING, __I40E_UDP_FILTER_SYNC_PENDING, __I40E_TEMP_LINK_POLLING, @@ -149,7 +173,6 @@ enum i40e_state_t { __I40E_CLIENT_L2_CHANGE, __I40E_CLIENT_RESET, __I40E_VIRTCHNL_OP_PENDING, - __I40E_RECOVERY_MODE, /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, }; @@ -184,6 +207,10 @@ struct i40e_lump_tracking { #define I40E_DEFAULT_ATR_SAMPLE_RATE 20 #define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 +#define I40E_TCPIP_DUMMY_PACKET_LEN 54 +#define I40E_SCTPIP_DUMMY_PACKET_LEN 46 +#define I40E_UDPIP_DUMMY_PACKET_LEN 42 +#define I40E_IP_DUMMY_PACKET_LEN 34 #define I40E_FDIR_BUFFER_FULL_MARGIN 10 #define I40E_FDIR_BUFFER_HEAD_ROOM 32 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) @@ -210,6 +237,11 @@ enum i40e_fd_stat_idx { * field of the ethtool_rx_flow_spec structure. */ struct i40e_rx_flow_userdef { + bool cloud_filter; + bool tenant_id_valid; + u32 tenant_id; + bool tunnel_type_valid; + u8 tunnel_type; bool flex_filter; u16 flex_word; u16 flex_offset; @@ -217,7 +249,7 @@ struct i40e_rx_flow_userdef { struct i40e_fdir_filter { struct hlist_node fdir_node; - /* filter ipnut set */ + /* filter input set */ u8 flow_type; u8 ip4_proto; /* TX packet view of src and dst */ @@ -234,7 +266,6 @@ struct i40e_fdir_filter { /* filter control */ u16 q_index; - u8 flex_off; u8 pctype; u16 dest_vsi; u8 dest_ctl; @@ -249,24 +280,31 @@ struct i40e_fdir_filter { #define I40E_CLOUD_FIELD_TEN_ID BIT(3) #define I40E_CLOUD_FIELD_IIP BIT(4) -#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC -#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC -#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \ - I40E_CLOUD_FIELD_IVLAN) -#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \ - I40E_CLOUD_FIELD_TEN_ID) +#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC +#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC +#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \ + I40E_CLOUD_FIELD_IVLAN) +#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \ + I40E_CLOUD_FIELD_TEN_ID) #define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \ I40E_CLOUD_FIELD_IMAC | \ I40E_CLOUD_FIELD_TEN_ID) #define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \ I40E_CLOUD_FIELD_IVLAN | \ I40E_CLOUD_FIELD_TEN_ID) -#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP +#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP struct i40e_cloud_filter { struct hlist_node cloud_node; unsigned long cookie; /* cloud filter input set follows */ + u8 outer_mac[ETH_ALEN]; + u8 inner_mac[ETH_ALEN]; + __be16 inner_vlan; + __be32 inner_ip[4]; + u16 queue_id; + u32 id; + /* cloud filter input set follows */ u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; __be16 vlan_id; @@ -291,10 +329,12 @@ struct i40e_cloud_filter { u16 n_proto; /* Ethernet Protocol */ u8 ip_proto; /* IPPROTO value */ u8 flags; -#define I40E_CLOUD_TNL_TYPE_NONE 0xff +#define I40E_CLOUD_TNL_TYPE_NONE 0xff u8 tunnel_type; }; +#define I40E_ETH_P_LLDP 0x88cc + #define I40E_DCB_PRIO_TYPE_STRICT 0 #define I40E_DCB_PRIO_TYPE_ETS 1 #define I40E_DCB_STRICT_PRIO_CREDITS 127 @@ -326,7 +366,6 @@ struct i40e_udp_port_config { #define I40E_PROFILE_LIST_SIZE \ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4) #define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/" -#define I40E_DDP_PROFILE_NAME_MAX 64 int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, bool is_add); @@ -406,17 +445,26 @@ struct i40e_ddp_old_profile_list { I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \ I40E_FLEX_56_MASK | I40E_FLEX_57_MASK) +#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \ + (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \ + (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \ + ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \ + ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \ + (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)) + +#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \ + (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \ + (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \ + ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \ + ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \ + (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)) + struct i40e_flex_pit { struct list_head list; u16 src_offset; u8 pit_index; }; -struct i40e_fwd_adapter { - struct net_device *netdev; - int bit_no; -}; - struct i40e_channel { struct list_head list; bool initialized; @@ -431,25 +479,11 @@ struct i40e_channel { struct i40e_aqc_vsi_properties_data info; u64 max_tx_rate; - struct i40e_fwd_adapter *fwd; /* track this channel belongs to which VSI */ struct i40e_vsi *parent_vsi; }; -static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch) -{ - return !!ch->fwd; -} - -static inline u8 *i40e_channel_mac(struct i40e_channel *ch) -{ - if (i40e_is_channel_macvlan(ch)) - return ch->fwd->netdev->dev_addr; - else - return NULL; -} - /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; @@ -462,7 +496,7 @@ struct i40e_pf { u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ - u16 num_req_vfs; /* num VFs requested for this PF */ + u16 num_req_vfs; /* num VFs requested for this VF */ u16 num_vf_qps; /* num queue pairs per VF */ u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ @@ -539,7 +573,7 @@ struct i40e_pf { #define I40E_HW_PORT_ID_VALID BIT(17) #define I40E_HW_RESTART_AUTONEG BIT(18) - u32 flags; + u64 flags; #define I40E_FLAG_RX_CSUM_ENABLED BIT(0) #define I40E_FLAG_MSI_ENABLED BIT(1) #define I40E_FLAG_MSIX_ENABLED BIT(2) @@ -557,7 +591,9 @@ struct i40e_pf { #define I40E_FLAG_LINK_POLLING_ENABLED BIT(14) #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15) #define I40E_FLAG_LEGACY_RX BIT(16) +#ifdef HAVE_PTP_1588_CLOCK #define I40E_FLAG_PTP BIT(17) +#endif /* HAVE_PTP_1588_CLOCK */ #define I40E_FLAG_IWARP_ENABLED BIT(18) #define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19) #define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20) @@ -567,6 +603,9 @@ struct i40e_pf { #define I40E_FLAG_DISABLE_FW_LLDP BIT(24) #define I40E_FLAG_RS_FEC BIT(25) #define I40E_FLAG_BASE_R_FEC BIT(26) +#define I40E_FLAG_TOTAL_PORT_SHUTDOWN BIT(27) + /* flag to enable/disable vf base mode support */ + bool vf_base_mode_only; struct i40e_client_instance *cinst; bool stat_offsets_loaded; @@ -624,6 +663,7 @@ struct i40e_pf { u16 dcbx_cap; struct i40e_filter_control_settings filter_settings; +#ifdef HAVE_PTP_1588_CLOCK struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; @@ -642,6 +682,23 @@ struct i40e_pf { unsigned long latch_events[4]; bool ptp_tx; bool ptp_rx; +#endif /* HAVE_PTP_1588_CLOCK */ +#ifdef I40E_ADD_PROBES + u64 tcp_segs; + u64 tx_tcp_cso; + u64 tx_udp_cso; + u64 tx_sctp_cso; + u64 tx_ip4_cso; + u64 rx_tcp_cso; + u64 rx_udp_cso; + u64 rx_sctp_cso; + u64 rx_ip4_cso; + u64 hw_csum_rx_outer; + u64 rx_tcp_cso_err; + u64 rx_udp_cso_err; + u64 rx_sctp_cso_err; + u64 rx_ip4_cso_err; +#endif u16 rss_table_size; /* HW RSS table size */ u32 max_bw; u32 min_bw; @@ -649,10 +706,16 @@ struct i40e_pf { u32 ioremap_len; u32 fd_inv; u16 phy_led_val; - - u16 override_q_count; u16 last_sw_conf_flags; u16 last_sw_conf_valid_flags; + + u16 override_q_count; + struct vfd_objects *vfd_obj; + u16 ingress_rule_id; + int ingress_vlan; + u16 egress_rule_id; + int egress_vlan; + bool vf_bw_applied; /* List to keep previous DDP profiles to be rolled back in the future */ struct list_head ddp_old_prof; }; @@ -730,7 +793,11 @@ struct i40e_veb { /* struct that defines a VSI, associated with a dev */ struct i40e_vsi { struct net_device *netdev; +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; +#else unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif bool netdev_registered; bool stat_offsets_loaded; @@ -738,7 +805,7 @@ struct i40e_vsi { DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); #define I40E_VSI_FLAG_FILTER_CHANGED BIT(0) #define I40E_VSI_FLAG_VEB_OWNER BIT(1) - unsigned long flags; + u64 flags; /* Per VSI lock to protect elements/hash (MAC filter) */ spinlock_t mac_filter_hash_lock; @@ -747,8 +814,13 @@ struct i40e_vsi { bool has_vlan_filter; /* VSI stats */ +#ifdef HAVE_NDO_GET_STATS64 struct rtnl_link_stats64 net_stats; struct rtnl_link_stats64 net_stats_offsets; +#else + struct net_device_stats net_stats; + struct net_device_stats net_stats_offsets; +#endif struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; u32 tx_restart; @@ -774,7 +846,6 @@ struct i40e_vsi { u8 *rss_hkey_user; /* User configured hash keys */ u8 *rss_lut_user; /* User configured lookup table entries */ - u16 max_frame; u16 rx_buf_len; @@ -798,8 +869,9 @@ struct i40e_vsi { u16 num_rx_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ s16 vf_id; /* Virtual function ID for SRIOV VSIs */ - +#ifdef __TC_MQPRIO_MODE_MAX struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ +#endif struct i40e_tc_configuration tc_config; struct i40e_aqc_vsi_properties_data info; @@ -820,7 +892,6 @@ struct i40e_vsi { struct kobject *kobj; /* sysfs object */ bool current_isup; /* Sync 'link up' logging */ enum i40e_aq_link_speed current_speed; /* Sync link speed logging */ - /* channel specific fields */ u16 cnt_q_avail; /* num of queues available for channel usage */ u16 orig_rss_size; @@ -832,19 +903,14 @@ struct i40e_vsi { struct list_head ch_list; u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS]; - /* macvlan fields */ -#define I40E_MAX_MACVLANS 128 /* Max HW vectors - 1 on FVL */ -#define I40E_MIN_MACVLAN_VECTORS 2 /* Min vectors to enable macvlans */ - DECLARE_BITMAP(fwd_bitmask, I40E_MAX_MACVLANS); - struct list_head macvlan_list; - int macvlan_cnt; - void *priv; /* client driver data reference. */ + bool block_tx_timeout; /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); +#ifdef ETHTOOL_GRXRINGS +#endif - unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -866,8 +932,10 @@ struct i40e_q_vector { u8 itr_countdown; /* when 0 should adjust adaptive ITR */ u8 num_ringpairs; /* total number of ring pairs in vector */ +#ifdef HAVE_IRQ_AFFINITY_NOTIFY cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; +#endif struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; @@ -991,7 +1059,7 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf, /* needed by i40e_ethtool.c */ int i40e_up(struct i40e_vsi *vsi); void i40e_down(struct i40e_vsi *vsi); -extern const char i40e_driver_name[]; +extern char i40e_driver_name[]; extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired); @@ -1000,6 +1068,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, u16 rss_table_size, u16 rss_size); struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); +struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_pf *pf, u16 seid); /** * i40e_find_vsi_by_type - Find and return Flow Director VSI * @pf: PF to search for VSI @@ -1022,7 +1091,11 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type) void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_veb_stats(struct i40e_veb *veb); void i40e_update_eth_stats(struct i40e_vsi *vsi); +#ifdef HAVE_NDO_GET_STATS64 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); +#else +struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); +#endif int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig); @@ -1035,6 +1108,8 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); u32 i40e_get_global_fd_count(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); +struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, + const u8 *macaddr, s16 vlan); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f); @@ -1043,6 +1118,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); +int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type); +int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi); +int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi); +int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc); +int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename); void i40e_service_event_schedule(struct i40e_pf *pf); void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len); @@ -1054,6 +1134,10 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi); int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi); +void i40e_quiesce_vsi(struct i40e_vsi *vsi); +void i40e_unquiesce_vsi(struct i40e_vsi *vsi); +void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf); +void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf); int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc); @@ -1062,6 +1146,7 @@ void i40e_veb_release(struct i40e_veb *veb); int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); +int i40e_get_cloud_filter_type(u8 flags, u16 *type); void i40e_vsi_reset_stats(struct i40e_vsi *vsi); void i40e_pf_reset_stats(struct i40e_pf *pf); #ifdef CONFIG_DEBUG_FS @@ -1118,18 +1203,22 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); +int i40e_count_filters(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); -#ifdef CONFIG_I40E_DCB +#ifdef CONFIG_DCB +#ifdef HAVE_DCBNL_IEEE void i40e_dcbnl_flush_apps(struct i40e_pf *pf, struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg); void i40e_dcbnl_set_all(struct i40e_vsi *vsi); void i40e_dcbnl_setup(struct i40e_vsi *vsi); +#endif /* HAVE_DCBNL_IEEE */ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg); -#endif /* CONFIG_I40E_DCB */ +#endif /* CONFIG_DCB */ +#ifdef HAVE_PTP_1588_CLOCK void i40e_ptp_rx_hang(struct i40e_pf *pf); void i40e_ptp_tx_hang(struct i40e_pf *pf); void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf); @@ -1141,25 +1230,38 @@ void i40e_ptp_save_hw_time(struct i40e_pf *pf); void i40e_ptp_restore_hw_time(struct i40e_pf *pf); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); +#endif /* HAVE_PTP_1588_CLOCK */ +u8 i40e_pf_get_num_tc(struct i40e_pf *pf); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf); -void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); - -void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); - -static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) -{ - return !!vsi->xdp_prog; -} - -int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); +int i40e_add_del_cloud_filter_ex(struct i40e_pf *pf, + struct i40e_cloud_filter *filter, + bool add); int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, struct i40e_cloud_filter *filter, bool add); int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, struct i40e_cloud_filter *filter, bool add); +void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); +int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); +int i40e_get_link_speed(struct i40e_vsi *vsi); + +void i40e_set_fec_in_flags(u8 fec_cfg, u64 *flags); + +#ifdef HAVE_XDP_SUPPORT +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); +#endif + +static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) +{ + return !!vsi->xdp_prog; +} +int i40e_restore_ingress_egress_mirror(struct i40e_vsi *src_vsi, int mirror, u16 rule_type, + u16 *rule_id); + #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 72c04881d290..15b9d9c8961f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include "i40e_status.h" #include "i40e_type.h" @@ -7,8 +7,6 @@ #include "i40e_adminq.h" #include "i40e_prototype.h" -static void i40e_resume_aq(struct i40e_hw *hw); - /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -96,6 +94,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) **/ static void i40e_free_adminq_asq(struct i40e_hw *hw) { + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); } @@ -146,21 +145,21 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) /* now configure the descriptors for use */ desc = I40E_ADMINQ_DESC(hw->aq.arq, i); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) - desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration */ - desc->datalen = cpu_to_le16((u16)bi->size); + desc->datalen = CPU_TO_LE16((u16)bi->size); desc->retval = 0; desc->cookie_high = 0; desc->cookie_low = 0; desc->params.external.addr_high = - cpu_to_le32(upper_32_bits(bi->pa)); + CPU_TO_LE32(upper_32_bits(bi->pa)); desc->params.external.addr_low = - cpu_to_le32(lower_32_bits(bi->pa)); + CPU_TO_LE32(lower_32_bits(bi->pa)); desc->params.external.param0 = 0; desc->params.external.param1 = 0; } @@ -268,7 +267,7 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw) **/ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u32 reg = 0; /* Clear Head and Tail */ @@ -297,7 +296,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) **/ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u32 reg = 0; /* Clear Head and Tail */ @@ -336,7 +335,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) **/ static i40e_status i40e_init_asq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -356,18 +355,18 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = i40e_alloc_asq_bufs(hw); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto init_adminq_free_rings; /* initialize base registers */ ret_code = i40e_config_asq_regs(hw); - if (ret_code) - goto init_adminq_free_rings; + if (ret_code != I40E_SUCCESS) + goto init_config_regs; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; @@ -375,6 +374,10 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) init_adminq_free_rings: i40e_free_adminq_asq(hw); + return ret_code; + +init_config_regs: + i40e_free_asq_bufs(hw); init_adminq_exit: return ret_code; @@ -395,7 +398,7 @@ init_adminq_exit: **/ static i40e_status i40e_init_arq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -415,17 +418,17 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = i40e_alloc_arq_bufs(hw); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto init_adminq_free_rings; /* initialize base registers */ ret_code = i40e_config_arq_regs(hw); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto init_adminq_free_rings; /* success! */ @@ -447,9 +450,9 @@ init_adminq_exit: **/ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; - mutex_lock(&hw->aq.asq_mutex); + i40e_acquire_spinlock(&hw->aq.asq_spinlock); if (hw->aq.asq.count == 0) { ret_code = I40E_ERR_NOT_READY; @@ -469,7 +472,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) i40e_free_asq_bufs(hw); shutdown_asq_out: - mutex_unlock(&hw->aq.asq_mutex); + i40e_release_spinlock(&hw->aq.asq_spinlock); return ret_code; } @@ -481,9 +484,9 @@ shutdown_asq_out: **/ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; - mutex_lock(&hw->aq.arq_mutex); + i40e_acquire_spinlock(&hw->aq.arq_spinlock); if (hw->aq.arq.count == 0) { ret_code = I40E_ERR_NOT_READY; @@ -503,10 +506,86 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) i40e_free_arq_bufs(hw); shutdown_arq_out: - mutex_unlock(&hw->aq.arq_mutex); + i40e_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } +/** + * i40e_resume_aq - resume AQ processing from 0 + * @hw: pointer to the hardware structure + **/ +static void i40e_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} + +/** + * i40e_set_hw_flags - set HW flags + * @hw: pointer to the hardware structure + **/ +static void i40e_set_hw_flags(struct i40e_hw *hw) +{ + struct i40e_adminq_info *aq = &hw->aq; + + hw->flags = 0; + + switch (hw->mac.type) { + case I40E_MAC_XL710: + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + /* The ability to RX (not drop) 802.1ad frames */ + hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; + } + break; + case I40E_MAC_X722: + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | + I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + /* fall through */ + default: + break; + } + + /* Newer versions of firmware require lock when reading the NVM */ + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 5)) + hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 8)) { + hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; + hw->flags |= I40E_HW_FLAG_DROP_MODE; + } + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 9)) + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; +} + /** * i40e_init_adminq - main initialization routine for Admin Queue * @hw: pointer to the hardware structure @@ -520,19 +599,22 @@ shutdown_arq_out: **/ i40e_status i40e_init_adminq(struct i40e_hw *hw) { + struct i40e_adminq_info *aq = &hw->aq; + i40e_status ret_code; u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; - i40e_status ret_code; int retry = 0; /* verify input for valid configuration */ - if ((hw->aq.num_arq_entries == 0) || - (hw->aq.num_asq_entries == 0) || - (hw->aq.arq_buf_size == 0) || - (hw->aq.asq_buf_size == 0)) { + if (aq->num_arq_entries == 0 || + aq->num_asq_entries == 0 || + aq->arq_buf_size == 0 || + aq->asq_buf_size == 0) { ret_code = I40E_ERR_CONFIG; goto init_adminq_exit; } + i40e_init_spinlock(&aq->asq_spinlock); + i40e_init_spinlock(&aq->arq_spinlock); /* Set up register offsets */ i40e_adminq_init_regs(hw); @@ -542,12 +624,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) /* allocate the ASQ */ ret_code = i40e_init_asq(hw); - if (ret_code) - goto init_adminq_destroy_locks; + if (ret_code != I40E_SUCCESS) + goto init_adminq_destroy_spinlocks; /* allocate the ARQ */ ret_code = i40e_init_arq(hw); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto init_adminq_free_asq; /* There are some cases where the firmware may not be quite ready @@ -556,11 +638,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) */ do { ret_code = i40e_aq_get_firmware_version(hw, - &hw->aq.fw_maj_ver, - &hw->aq.fw_min_ver, - &hw->aq.fw_build, - &hw->aq.api_maj_ver, - &hw->aq.api_min_ver, + &aq->fw_maj_ver, + &aq->fw_min_ver, + &aq->fw_build, + &aq->api_maj_ver, + &aq->api_min_ver, NULL); if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) break; @@ -571,6 +653,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) if (ret_code != I40E_SUCCESS) goto init_adminq_free_arq; + /* + * Some features were introduced in different FW API version + * for different MAC type. + */ + i40e_set_hw_flags(hw); + /* get the NVM version info */ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, &hw->nvm.version); @@ -578,44 +666,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); - i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), - &oem_hi); - i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), - &oem_lo); + i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), &oem_hi); + i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), &oem_lo); hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; - if (hw->mac.type == I40E_MAC_XL710 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; - } - if (hw->mac.type == I40E_MAC_X722 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) { - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; - } - - /* Newer versions of firmware require lock when reading the NVM */ - if (hw->aq.api_maj_ver > 1 || - (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 5)) - hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; - - /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ - if (hw->aq.api_maj_ver > 1 || - (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 7)) - hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; - - if (hw->aq.api_maj_ver > 1 || - (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 8)) { - hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; - hw->flags |= I40E_HW_FLAG_DROP_MODE; - } - - if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; goto init_adminq_free_arq; } @@ -625,7 +680,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) hw->nvm_release_on_done = false; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - ret_code = 0; + ret_code = I40E_SUCCESS; /* success! */ goto init_adminq_exit; @@ -634,7 +689,9 @@ init_adminq_free_arq: i40e_shutdown_arq(hw); init_adminq_free_asq: i40e_shutdown_asq(hw); -init_adminq_destroy_locks: +init_adminq_destroy_spinlocks: + i40e_destroy_spinlock(&aq->asq_spinlock); + i40e_destroy_spinlock(&aq->arq_spinlock); init_adminq_exit: return ret_code; @@ -646,13 +703,15 @@ init_adminq_exit: **/ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; if (i40e_check_asq_alive(hw)) i40e_aq_queue_shutdown(hw, true); i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); + i40e_destroy_spinlock(&hw->aq.asq_spinlock); + i40e_destroy_spinlock(&hw->aq.arq_spinlock); if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); @@ -683,11 +742,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = (I40E_ADMINQ_CALLBACK)details->callback; - desc_cb = *desc; + i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_DMA); cb_func(hw, &desc_cb); } - memset(desc, 0, sizeof(*desc)); - memset(details, 0, sizeof(*details)); + i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); + i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); ntc++; if (ntc == asq->count) ntc = 0; @@ -717,23 +777,26 @@ static bool i40e_asq_done(struct i40e_hw *hw) } /** - * i40e_asq_send_command - send command to Admin Queue + * i40e_asq_send_command_atomic - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure + * @is_atomic_context: is the function called in an atomic context? * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ -i40e_status i40e_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) +enum i40e_status_code +i40e_asq_send_command_atomic(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details, + bool is_atomic_context) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; @@ -741,7 +804,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, u16 retval = 0; u32 val = 0; - mutex_lock(&hw->aq.asq_mutex); + i40e_acquire_spinlock(&hw->aq.asq_spinlock); + + hw->aq.asq_last_status = I40E_AQ_RC_OK; if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -750,8 +815,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, goto asq_send_command_error; } - hw->aq.asq_last_status = I40E_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -762,25 +825,30 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { - *details = *cmd_details; + i40e_memcpy(details, + cmd_details, + sizeof(struct i40e_asq_cmd_details), + I40E_NONDMA_TO_NONDMA); /* If the cmd_details are defined copy the cookie. The - * cpu_to_le32 is not needed here because the data is ignored + * CPU_TO_LE32 is not needed here because the data is ignored * by the FW, only used by the driver */ if (details->cookie) { desc->cookie_high = - cpu_to_le32(upper_32_bits(details->cookie)); + CPU_TO_LE32(upper_32_bits(details->cookie)); desc->cookie_low = - cpu_to_le32(lower_32_bits(details->cookie)); + CPU_TO_LE32(lower_32_bits(details->cookie)); } } else { - memset(details, 0, sizeof(struct i40e_asq_cmd_details)); + i40e_memset(details, 0, + sizeof(struct i40e_asq_cmd_details), + I40E_NONDMA_MEM); } /* clear requested flags and then set additional flags if defined */ - desc->flags &= ~cpu_to_le16(details->flags_dis); - desc->flags |= cpu_to_le16(details->flags_ena); + desc->flags &= ~CPU_TO_LE16(details->flags_dis); + desc->flags |= CPU_TO_LE16(details->flags_ena); if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, @@ -818,22 +886,24 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ - *desc_on_ring = *desc; + i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), + I40E_NONDMA_TO_DMA); /* if buff is not NULL assume indirect command */ if (buff != NULL) { dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); /* copy the user buff into the respective DMA buff */ - memcpy(dma_buff->va, buff, buff_size); - desc_on_ring->datalen = cpu_to_le16(buff_size); + i40e_memcpy(dma_buff->va, buff, buff_size, + I40E_NONDMA_TO_DMA); + desc_on_ring->datalen = CPU_TO_LE16(buff_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.external.addr_high = - cpu_to_le32(upper_32_bits(dma_buff->pa)); + CPU_TO_LE32(upper_32_bits(dma_buff->pa)); desc_on_ring->params.external.addr_low = - cpu_to_le32(lower_32_bits(dma_buff->pa)); + CPU_TO_LE32(lower_32_bits(dma_buff->pa)); } /* bump the tail */ @@ -858,17 +928,22 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - udelay(50); + if (is_atomic_context) + udelay(50); + else + usleep_range(40, 60); total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } /* if ready, copy the desc back to temp */ if (i40e_asq_done(hw)) { - *desc = *desc_on_ring; + i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_NONDMA); if (buff != NULL) - memcpy(buff, dma_buff->va, buff_size); - retval = le16_to_cpu(desc->retval); + i40e_memcpy(buff, dma_buff->va, buff_size, + I40E_DMA_TO_NONDMA); + retval = LE16_TO_CPU(desc->retval); if (retval != 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -880,7 +955,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, } cmd_completed = true; if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) - status = 0; + status = I40E_SUCCESS; else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) status = I40E_ERR_NOT_READY; else @@ -894,7 +969,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, /* save writeback aq if requested */ if (details->wb_desc) - *details->wb_desc = *desc_on_ring; + i40e_memcpy(details->wb_desc, desc_on_ring, + sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); /* update the error if time out occurred */ if ((!cmd_completed) && @@ -911,10 +987,22 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, } asq_send_command_error: - mutex_unlock(&hw->aq.asq_mutex); + i40e_release_spinlock(&hw->aq.asq_spinlock); return status; } +// inline function with previous signature to avoid modifying +// all existing calls to i40e_asq_send_command +inline i40e_status i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_asq_send_command_atomic(hw, desc, buff, buff_size, + cmd_details, false); +} + /** * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) @@ -926,9 +1014,10 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) { /* zero out the desc */ - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); - desc->opcode = cpu_to_le16(opcode); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); + i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), + I40E_NONDMA_MEM); + desc->opcode = CPU_TO_LE16(opcode); + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); } /** @@ -945,7 +1034,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *pending) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; @@ -955,10 +1044,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, u16 ntu; /* pre-clean the event info */ - memset(&e->desc, 0, sizeof(e->desc)); + i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); /* take the lock before we start messing with the ring */ - mutex_lock(&hw->aq.arq_mutex); + i40e_acquire_spinlock(&hw->aq.arq_spinlock); if (hw->aq.arq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -980,8 +1069,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, desc_idx = ntc; hw->aq.arq_last_status = - (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); - flags = le16_to_cpu(desc->flags); + (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); + flags = LE16_TO_CPU(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; i40e_debug(hw, @@ -990,12 +1079,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq_last_status); } - e->desc = *desc; - datalen = le16_to_cpu(desc->datalen); + i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_NONDMA); + datalen = LE16_TO_CPU(desc->datalen); e->msg_len = min(datalen, e->buf_len); if (e->msg_buf != NULL && (e->msg_len != 0)) - memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_len); + i40e_memcpy(e->msg_buf, + hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_len, I40E_DMA_TO_NONDMA); i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, @@ -1006,14 +1097,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) - desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); - desc->datalen = cpu_to_le16((u16)bi->size); - desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); - desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); + desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); + desc->datalen = CPU_TO_LE16((u16)bi->size); + desc->params.external.addr_high = CPU_TO_LE32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = CPU_TO_LE32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ wr32(hw, hw->aq.arq.tail, ntc); @@ -1024,27 +1115,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; - i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc); + i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc); clean_arq_element_out: /* Set pending if needed, unlock and return */ - if (pending) + if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: - mutex_unlock(&hw->aq.arq_mutex); + i40e_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } -static void i40e_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -} diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index edec3df78971..f0af697c4a20 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ @@ -75,8 +75,8 @@ struct i40e_adminq_info { u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - struct mutex asq_mutex; /* Send queue lock */ - struct mutex arq_mutex; /* Receive queue lock */ + struct i40e_spinlock asq_spinlock; /* Send queue spinlock */ + struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */ /* last status values on send and receive queues */ enum i40e_admin_queue_err asq_last_status; @@ -88,7 +88,7 @@ struct i40e_adminq_info { * aq_ret: AdminQ handler error code can override aq_rc * aq_rc: AdminQ firmware error code to convert **/ -static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) +static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 69a2daaca5c5..f4940fe538c2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ @@ -12,7 +12,7 @@ #define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MINOR_X722 0x0009 -#define I40E_FW_API_VERSION_MINOR_X710 0x0009 +#define I40E_FW_API_VERSION_MINOR_X710 0x000A #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ @@ -67,17 +67,17 @@ struct i40e_aq_desc { #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -135,6 +135,7 @@ enum i40e_admin_queue_opc { /* WoL commands */ i40e_aqc_opc_set_wol_filter = 0x0120, i40e_aqc_opc_get_wake_reason = 0x0121, + i40e_aqc_opc_clear_all_wol_filters = 0x025E, /* internal switch commands */ i40e_aqc_opc_get_switch_config = 0x0200, @@ -175,6 +176,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_add_cloud_filters = 0x025C, i40e_aqc_opc_remove_cloud_filters = 0x025D, i40e_aqc_opc_clear_wol_switch_filters = 0x025E, + i40e_aqc_opc_replace_cloud_filters = 0x025F, i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, @@ -211,6 +213,8 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_query_hmc_resource_profile = 0x0500, i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + /* phy commands*/ + /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, @@ -235,6 +239,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_progress = 0x0706, i40e_aqc_opc_oem_post_update = 0x0720, i40e_aqc_opc_thermal_sensor = 0x0721, @@ -524,6 +529,7 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 #define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_WOL_PRESERVE_STATUS 0x200 #define I40E_AQC_ADDR_VALID_MASK 0x3F0 u8 reserved[6]; __le32 addr_high; @@ -584,6 +590,7 @@ struct i40e_aqc_set_wol_filter { __le16 cmd_flags; #define I40E_AQC_SET_WOL_FILTER 0x8000 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 +#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 #define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 #define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 __le16 valid_flags; @@ -757,6 +764,7 @@ struct i40e_aqc_set_switch_config { /* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 +#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004 __le16 valid_flags; /* The ethertype in switch_tag is dropped on ingress and used * internally by the switch. Set this to zero for the default @@ -1355,7 +1363,8 @@ struct i40e_aqc_add_remove_cloud_filters { #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) u8 big_buffer_flag; -#define I40E_AQC_ADD_CLOUD_CMD_BB 1 +#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1 +#define I40E_AQC_ADD_CLOUD_CMD_BB 1 u8 reserved2[3]; __le32 addr_high; __le32 addr_low; @@ -1437,6 +1446,45 @@ struct i40e_aqc_cloud_filters_element_data { u8 response_reserved[7]; }; +/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when + * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. + */ +struct i40e_aqc_add_rm_cloud_filt_elem_ext { + struct i40e_aqc_cloud_filters_element_data element; + u16 general_fields[32]; +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 +}; + I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); /* i40e_aqc_cloud_filters_element_bb is used when @@ -1503,16 +1551,17 @@ struct i40e_filter_data { I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); struct i40e_aqc_replace_cloud_filters_cmd { - u8 valid_flags; + u8 valid_flags; #define I40E_AQC_REPLACE_L1_FILTER 0x0 #define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 #define I40E_AQC_GET_CLOUD_FILTERS 0x2 #define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 #define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 - u8 old_filter_type; - u8 new_filter_type; - u8 tr_bit; - u8 reserved[4]; + u8 old_filter_type; + u8 new_filter_type; + u8 tr_bit; + u8 tr_bit2; + u8 reserved[3]; __le32 addr_high; __le32 addr_low; }; @@ -1520,27 +1569,27 @@ struct i40e_aqc_replace_cloud_filters_cmd { I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); struct i40e_aqc_replace_cloud_filters_cmd_buf { - u8 data[32]; + u8 data[32]; /* Filter type INPUT codes*/ #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL) /* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 /* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 /* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 - struct i40e_filter_data filters[8]; +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 + struct i40e_filter_data filters[8]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); @@ -1951,14 +2000,14 @@ enum i40e_aq_phy_type { enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT), I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT), - I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), - I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT), }; struct i40e_aqc_module_desc { @@ -1984,18 +2033,21 @@ struct i40e_aq_get_phy_abilities_resp { #define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 #define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 __le16 eee_capability; +#define I40E_AQ_EEE_AUTO 0x0001 #define I40E_AQ_EEE_100BASE_TX 0x0002 #define I40E_AQ_EEE_1000BASE_T 0x0004 #define I40E_AQ_EEE_10GBASE_T 0x0008 #define I40E_AQ_EEE_1000BASE_KX 0x0010 #define I40E_AQ_EEE_10GBASE_KX4 0x0020 #define I40E_AQ_EEE_10GBASE_KR 0x0040 +#define I40E_AQ_EEE_2_5GBASE_T 0x0100 +#define I40E_AQ_EEE_5GBASE_T 0x0200 __le32 eeer_val; u8 d3_lpan; #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 +#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01 +#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 #define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 @@ -2035,10 +2087,6 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */ __le32 eeer; u8 low_power_ctrl; u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 u8 fec_config; #define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) #define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) @@ -2197,11 +2245,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { - __le16 lb_mode; + u8 lb_level; +#define I40E_AQ_LB_NONE 0 +#define I40E_AQ_LB_MAC 1 +#define I40E_AQ_LB_SERDES 2 +#define I40E_AQ_LB_PHY_INT 3 +#define I40E_AQ_LB_PHY_EXT 4 +#define I40E_AQ_LB_BASE_T_PCS 5 +#define I40E_AQ_LB_BASE_T_EXT 6 #define I40E_AQ_LB_PHY_LOCAL 0x01 #define I40E_AQ_LB_PHY_REMOTE 0x02 #define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; + u8 lb_type; +#define I40E_AQ_LB_LOCAL 0 +#define I40E_AQ_LB_FAR 0x01 + u8 speed; +#define I40E_AQ_LB_SPEED_NONE 0 +#define I40E_AQ_LB_SPEED_1G 1 +#define I40E_AQ_LB_SPEED_10G 2 +#define I40E_AQ_LB_SPEED_40G 3 +#define I40E_AQ_LB_SPEED_20G 4 + u8 force_speed; + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); @@ -2218,7 +2283,7 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 /* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 -/* Disable link manageability on all ports */ +/* Disable link manageability on all ports needs both bits 4 and 5 */ #define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20 u8 reserved[15]; }; @@ -2231,15 +2296,32 @@ enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +#pragma pack(1) /* Run PHY Activity (0x0626) */ struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; + u8 cmd_flags; + __le16 activity_id; +#define I40E_AQ_RUN_PHY_ACT_ID_USR_DFND 0x10 + u8 reserved; + union { + struct { + __le32 dnl_opcode; +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR 0x801a +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT 0x801b +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR 0x1801b + __le32 data; + u8 reserved2[4]; + } cmd; + struct { + __le32 cmd_status; +#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC 0x4 +#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK 0xFFFF + __le32 data0; + __le32 data1; + } resp; + } params; }; +#pragma pack() I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); @@ -2250,8 +2332,14 @@ struct i40e_aqc_phy_register_access { #define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 - u8 dev_address; - u8 reserved1[2]; + u8 dev_addres; + u8 cmd_flags; +#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01 +#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02 +#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2 +#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \ + I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) + u8 reserved1; __le32 reg_address; __le32 reg_value; u8 reserved2[4]; @@ -2285,8 +2373,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 +#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 __le16 element_count; __le16 element_id; /* Feature/field ID */ @@ -2311,9 +2399,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) + (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 @@ -2598,20 +2686,7 @@ struct i40e_aqc_get_cee_dcb_cfg_resp { u8 oper_tc_bw[8]; u8 oper_pfc_en; __le16 oper_app_prio; -#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 -#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) -#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 -#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) -#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 -#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) -#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) __le32 tlv_status; -#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 -#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) -#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 -#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) -#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 -#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) u8 reserved[12]; }; @@ -2622,11 +2697,12 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); */ struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \ - BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; @@ -2638,13 +2714,21 @@ struct i40e_aqc_lldp_set_local_mib { I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); +struct i40e_aqc_lldp_set_local_mib_resp { +#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01 + u8 status; + u8 reserved[15]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp); + /* Stop/Start LLDP Agent (direct 0x0A09) * Used for stopping/starting specific LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_stop_start_specific_agent { #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \ - BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT) + (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; @@ -2707,7 +2791,7 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) @@ -2727,13 +2811,14 @@ struct i40e_aqc_get_set_rss_key_data { I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index cb8689222c8b..d7feb645fb29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index e81530ca08d0..2877a120358f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include #include @@ -30,17 +30,23 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, bool is_vf, u32 vf_id, u32 flag, u32 valid_flag); +static int i40e_client_device_register(struct i40e_info *ldev); + +static void i40e_client_device_unregister(struct i40e_info *ldev); + static struct i40e_ops i40e_lan_ops = { .virtchnl_send = i40e_client_virtchnl_send, .setup_qvlist = i40e_client_setup_qvlist, .request_reset = i40e_client_request_reset, .update_vsi_ctxt = i40e_client_update_vsi_ctxt, + .client_device_register = i40e_client_device_register, + .client_device_unregister = i40e_client_device_unregister, }; /** * i40e_client_get_params - Get the params that can change at runtime * @vsi: the VSI with the message - * @params: client param struct + * @params: clinet param struct * **/ static @@ -71,6 +77,10 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) return 0; } +static void i40e_client_device_release(struct device *dev) +{ +} + /** * i40e_notify_client_of_vf_msg - call the client vf message callback * @vsi: the VSI with the message @@ -278,18 +288,13 @@ void i40e_client_update_msix_info(struct i40e_pf *pf) /** * i40e_client_add_instance - add a client instance struct to the instance list * @pf: pointer to the board struct - * @client: pointer to a client struct in the client list. - * @existing: if there was already an existing instance - * **/ static void i40e_client_add_instance(struct i40e_pf *pf) { struct i40e_client_instance *cdev = NULL; struct netdev_hw_addr *mac = NULL; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - - if (!registered_client || pf->cinst) - return; + struct platform_device *platform_dev; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) @@ -308,6 +313,12 @@ static void i40e_client_add_instance(struct i40e_pf *pf) cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver; cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver; cdev->lan_info.fw_build = pf->hw.aq.fw_build; + platform_dev = &cdev->lan_info.platform_dev; + platform_dev->name = "i40e_rdma"; + platform_dev->id = PLATFORM_DEVID_AUTO; + platform_dev->id_auto = true; + platform_dev->dev.release = i40e_client_device_release; + platform_dev->dev.parent = &pf->pdev->dev; set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state); if (i40e_client_get_params(vsi, &cdev->lan_info.params)) { @@ -323,10 +334,12 @@ static void i40e_client_add_instance(struct i40e_pf *pf) else dev_err(&pf->pdev->dev, "MAC address list is empty!\n"); - cdev->client = registered_client; + cdev->client = NULL; pf->cinst = cdev; - i40e_client_update_msix_info(pf); + cdev->lan_info.msix_count = pf->num_iwarp_msix; + cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; + platform_device_register(platform_dev); } /** @@ -347,7 +360,7 @@ void i40e_client_del_instance(struct i40e_pf *pf) **/ void i40e_client_subtask(struct i40e_pf *pf) { - struct i40e_client *client = registered_client; + struct i40e_client *client; struct i40e_client_instance *cdev; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int ret = 0; @@ -361,9 +374,11 @@ void i40e_client_subtask(struct i40e_pf *pf) test_bit(__I40E_CONFIG_BUSY, pf->state)) return; - if (!client || !cdev) + if (!cdev || !cdev->client) return; + client = cdev->client; + /* Here we handle client opens. If the client is down, and * the netdev is registered, then open the client. */ @@ -424,16 +439,7 @@ int i40e_lan_add_device(struct i40e_pf *pf) pf->hw.pf_id, pf->hw.bus.bus_id, pf->hw.bus.device, pf->hw.bus.func); - /* If a client has already been registered, we need to add an instance - * of it to our new LAN device. - */ - if (registered_client) - i40e_client_add_instance(pf); - - /* Since in some cases register may have happened before a device gets - * added, we can schedule a subtask to go initiate the clients if - * they can be launched at probe time. - */ + i40e_client_add_instance(pf); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); @@ -453,6 +459,8 @@ int i40e_lan_del_device(struct i40e_pf *pf) struct i40e_device *ldev, *tmp; int ret = -ENODEV; + platform_device_unregister(&pf->cinst->lan_info.platform_dev); + /* First, remove any client instance. */ i40e_client_del_instance(pf); @@ -468,6 +476,7 @@ int i40e_lan_del_device(struct i40e_pf *pf) break; } } + mutex_unlock(&i40e_device_mutex); return ret; } @@ -505,10 +514,7 @@ static void i40e_client_release(struct i40e_client *client) "Client %s instance for PF id %d closed\n", client->name, pf->hw.pf_id); } - /* delete the client instance */ - i40e_client_del_instance(pf); - dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", - client->name); + cdev->client = NULL; clear_bit(__I40E_SERVICE_SCHED, pf->state); } mutex_unlock(&i40e_device_mutex); @@ -527,7 +533,7 @@ static void i40e_client_prepare(struct i40e_client *client) mutex_lock(&i40e_device_mutex); list_for_each_entry(ldev, &i40e_devices, list) { pf = ldev->pf; - i40e_client_add_instance(pf); + pf->cinst->client = registered_client; /* Start the client subtask */ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); @@ -554,7 +560,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev, i40e_status err; err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP, - 0, msg, len, NULL); + I40E_SUCCESS, msg, len, NULL); if (err) dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n", err, hw->aq.asq_last_status); @@ -578,9 +584,11 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev, struct i40e_hw *hw = &pf->hw; struct i40e_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; + u32 size; - ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info, - qvlist_info->num_vectors - 1), GFP_KERNEL); + size = sizeof(struct i40e_qvlist_info) + + (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1)); + ldev->qvlist_info = kzalloc(size, GFP_KERNEL); if (!ldev->qvlist_info) return -ENOMEM; ldev->qvlist_info->num_vectors = qvlist_info->num_vectors; @@ -733,6 +741,66 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, return err; } +static int i40e_client_device_register(struct i40e_info *ldev) +{ + struct i40e_client *client; + struct i40e_pf *pf; + + if (!ldev) { + pr_err("Failed to reg client dev: ldev ptr NULL\n"); + return -EINVAL; + } + + client = ldev->client; + pf = ldev->pf; + if (!client) { + pr_err("Failed to reg client dev: client ptr NULL\n"); + return -EINVAL; + } + + if (!ldev->ops || !client->ops) { + pr_err("Failed to reg client dev: client dev peer_ops/ops NULL\n"); + return -EINVAL; + } + + if (client->version.major != I40E_CLIENT_VERSION_MAJOR || + client->version.minor != I40E_CLIENT_VERSION_MINOR) { + pr_err("i40e: Failed to register client %s due to mismatched client interface version\n", + client->name); + pr_err("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", + client->version.major, client->version.minor, + client->version.build, + i40e_client_interface_version_str); + return -EINVAL; + } + + pf->cinst->client = ldev->client; + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); + i40e_service_event_schedule(pf); + + return 0; +} + +static void i40e_client_device_unregister(struct i40e_info *ldev) +{ + struct i40e_pf *pf = ldev->pf; + struct i40e_client_instance *cdev = pf->cinst; + + while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) + usleep_range(500, 1000); + + if (!cdev || !cdev->client || !cdev->client->ops || + !cdev->client->ops->close) { + dev_err(&pf->pdev->dev, "Cannot close client device\n"); + return; + } + cdev->client->ops->close(&cdev->lan_info, cdev->client, false); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + i40e_client_release_qvlist(&cdev->lan_info); + pf->cinst->client = NULL; + clear_bit(__I40E_SERVICE_SCHED, pf->state); +} + /** * i40e_register_client - Register a i40e client driver with the L2 driver * @client: pointer to the i40e_client struct @@ -777,7 +845,7 @@ int i40e_register_client(struct i40e_client *client) i40e_client_prepare(client); - pr_info("i40e: Registered client %s\n", client->name); + pr_info("i40e: Registered client %s\n", client->name); out: return ret; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index 72994baf4941..8d64175c6217 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -1,9 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ +#include + #define I40E_CLIENT_STR_LENGTH 10 /* Client interface version should be updated anytime there is a change in the @@ -80,6 +82,7 @@ struct i40e_params { /* Structure to hold Lan device info for a client device */ struct i40e_info { + struct platform_device platform_dev; struct i40e_client_version version; u8 lanmac[6]; struct net_device *netdev; @@ -97,6 +100,7 @@ struct i40e_info { struct i40e_qvlist_info *qvlist_info; struct i40e_params params; struct i40e_ops *ops; + struct i40e_client *client; u16 msix_count; /* number of msix vectors*/ /* Array down below will be dynamically allocated based on msix_count */ @@ -132,6 +136,10 @@ struct i40e_ops { struct i40e_client *client, bool is_vf, u32 vf_id, u32 flag, u32 valid_flag); + + int (*client_device_register)(struct i40e_info *ldev); + + void (*client_device_unregister)(struct i40e_info *ldev); }; struct i40e_client_ops { @@ -188,7 +196,8 @@ struct i40e_client { #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) u8 type; #define I40E_CLIENT_IWARP 0 - const struct i40e_client_ops *ops; /* client ops provided by the client */ + /* client ops provided by the client */ + const struct i40e_client_ops *ops; }; static inline bool i40e_client_is_registered(struct i40e_client *client) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 7560f06768e0..1faef41f54b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ -#include "i40e.h" #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" -#include +#include "virtchnl.h" /** * i40e_set_mac_type - Sets MAC type @@ -16,7 +15,7 @@ **/ i40e_status i40e_set_mac_type(struct i40e_hw *hw) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { @@ -29,8 +28,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_B: case I40E_DEV_ID_10G_SFP: + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: case I40E_DEV_ID_25G_B: @@ -128,7 +129,7 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) { switch (stat_err) { - case 0: + case I40E_SUCCESS: return "OK"; case I40E_ERR_NVM: return "I40E_ERR_NVM"; @@ -283,40 +284,39 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u32 effective_mask = hw->debug_mask & mask; - char prefix[27]; - u16 len; u8 *buf = (u8 *)buffer; + u16 len; + char prefix[27]; if (!effective_mask || !desc) return; - len = le16_to_cpu(aq_desc->datalen); + len = LE16_TO_CPU(aq_desc->datalen); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - le16_to_cpu(aq_desc->opcode), - le16_to_cpu(aq_desc->flags), - le16_to_cpu(aq_desc->datalen), - le16_to_cpu(aq_desc->retval)); + LE16_TO_CPU(aq_desc->opcode), + LE16_TO_CPU(aq_desc->flags), + LE16_TO_CPU(aq_desc->datalen), + LE16_TO_CPU(aq_desc->retval)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\tcookie (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->cookie_high), - le32_to_cpu(aq_desc->cookie_low)); + LE32_TO_CPU(aq_desc->cookie_high), + LE32_TO_CPU(aq_desc->cookie_low)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\tparam (0,1) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.internal.param0), - le32_to_cpu(aq_desc->params.internal.param1)); + LE32_TO_CPU(aq_desc->params.internal.param0), + LE32_TO_CPU(aq_desc->params.internal.param1)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\taddr (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.external.addr_high), - le32_to_cpu(aq_desc->params.external.addr_low)); + LE32_TO_CPU(aq_desc->params.external.addr_high), + LE32_TO_CPU(aq_desc->params.external.addr_low)); - if (buffer && buf_len != 0 && len != 0 && + if (buffer && (buf_len != 0) && (len != 0) && (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - snprintf(prefix, sizeof(prefix), "i40e %02x:%02x.%x: \t0x", hw->bus.bus_id, @@ -338,9 +338,8 @@ bool i40e_check_asq_alive(struct i40e_hw *hw) { if (hw->aq.asq.len) return !!(rd32(hw, hw->aq.asq.len) & - I40E_PF_ATQLEN_ATQENABLE_MASK); - else - return false; + I40E_PF_ATQLEN_ATQENABLE_MASK); + return false; } /** @@ -363,7 +362,7 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, i40e_aqc_opc_queue_shutdown); if (unloading) - cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; @@ -381,9 +380,9 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, * Internal function to get or set RSS look up table **/ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, - u16 vsi_id, bool pf_lut, - u8 *lut, u16 lut_size, - bool set) + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) { i40e_status status; struct i40e_aq_desc desc; @@ -398,22 +397,22 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, i40e_aqc_opc_get_rss_lut); /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << + CPU_TO_LE16((u16)((vsi_id << I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); if (pf_lut) - cmd_resp->flags |= cpu_to_le16((u16) + cmd_resp->flags |= CPU_TO_LE16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); else - cmd_resp->flags |= cpu_to_le16((u16) + cmd_resp->flags |= CPU_TO_LE16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); @@ -434,7 +433,7 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, * get the RSS lookup table, PF or VSI type **/ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) + bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false); @@ -451,7 +450,7 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, * set the RSS lookup table, PF or VSI type **/ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) + bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); } @@ -484,14 +483,14 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, i40e_aqc_opc_get_rss_key); /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << + CPU_TO_LE16((u16)((vsi_id << I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); @@ -506,8 +505,8 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, * **/ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); } @@ -521,8 +520,8 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, * set the RSS key per VSI **/ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } @@ -906,7 +905,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { **/ i40e_status i40e_init_shared_code(struct i40e_hw *hw) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; u32 port, ari, func_rid; i40e_set_mac_type(hw); @@ -933,9 +932,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) else hw->pf_id = (u8)(func_rid & 0x7); - if (hw->mac.type == I40E_MAC_X722) - hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | - I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + /* NVMUpdate features structure initialization */ + hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR; + hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR; + hw->nvmupd_features.size = sizeof(hw->nvmupd_features); + i40e_memset(hw->nvmupd_features.features, 0x0, + I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN * + sizeof(*hw->nvmupd_features.features), + I40E_NONDMA_MEM); + + hw->nvmupd_features.features[0] = I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT; status = i40e_init_nvm(hw); return status; @@ -959,11 +965,11 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, addrs, sizeof(*addrs), cmd_details); - *flags = le16_to_cpu(cmd_data->command_flags); + *flags = LE16_TO_CPU(cmd_data->command_flags); return status; } @@ -986,9 +992,9 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); - cmd_data->command_flags = cpu_to_le16(flags); - cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); - cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | + cmd_data->command_flags = CPU_TO_LE16(flags); + cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]); + cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) | ((u32)mac_addr[3] << 16) | ((u32)mac_addr[4] << 8) | mac_addr[5]); @@ -1047,7 +1053,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure - * @queue: target PF queue index + * @queue: target pf queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable @@ -1085,28 +1091,28 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) * Reads the part number string from the EEPROM. **/ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size) + u32 pba_num_size) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; u16 i = 0; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); - if (status || (pba_word != 0xFAFA)) { + if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) { hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); return status; } status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); - if (status) { + if (status != I40E_SUCCESS) { hw_dbg(hw, "Failed to read PBA Block pointer.\n"); return status; } status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); - if (status) { + if (status != I40E_SUCCESS) { hw_dbg(hw, "Failed to read PBA Block size.\n"); return status; } @@ -1122,7 +1128,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, for (i = 0; i < pba_size; i++) { status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); - if (status) { + if (status != I40E_SUCCESS) { hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); return status; } @@ -1200,14 +1206,14 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) * @retry_limit: how many times to retry before failure **/ static i40e_status i40e_poll_globr(struct i40e_hw *hw, - u32 retry_limit) + u32 retry_limit) { u32 cnt, reg = 0; for (cnt = 0; cnt < retry_limit; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) - return 0; + return I40E_SUCCESS; msleep(100); } @@ -1217,7 +1223,6 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw, return I40E_ERR_RESET_FAILED; } -#define I40E_PF_RESET_WAIT_COUNT_A0 200 #define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF @@ -1238,13 +1243,10 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) * couple counts longer to be sure we don't just miss the end. */ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & - I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> - I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - /* It can take upto 15 secs for GRST steady state. - * Bump it to 16 secs max to be safe. - */ - grst_del = grst_del * 20; + grst_del = min(grst_del * 20, 160U); for (cnt = 0; cnt < grst_del; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); @@ -1281,14 +1283,11 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) */ if (!cnt) { u32 reg2 = 0; - if (hw->revision_id == 0) - cnt = I40E_PF_RESET_WAIT_COUNT_A0; - else - cnt = I40E_PF_RESET_WAIT_COUNT; + reg = rd32(hw, I40E_PFGEN_CTRL); wr32(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); - for (; cnt; cnt--) { + for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; @@ -1298,7 +1297,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) usleep_range(1000, 2000); } if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { - if (i40e_poll_globr(hw, grst_del)) + if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS) return I40E_ERR_RESET_FAILED; } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { hw_dbg(hw, "PF reset polling failed to complete.\n"); @@ -1308,7 +1307,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) i40e_clear_pxe_mode(hw); - return 0; + return I40E_SUCCESS; } /** @@ -1329,18 +1328,18 @@ void i40e_clear_hw(struct i40e_hw *hw) u32 val; u32 eol = 0x7ff; - /* get number of interrupts, queues, and VFs */ + /* get number of interrupts, queues, and vfs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> - I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; + I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> - I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; + I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; val = rd32(hw, I40E_PFLAN_QALLOC); base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> - I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; + I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> - I40E_PFLAN_QALLOC_LASTQ_SHIFT; + I40E_PFLAN_QALLOC_LASTQ_SHIFT; if (val & I40E_PFLAN_QALLOC_VALID_MASK) num_queues = (j - base_queue) + 1; else @@ -1348,9 +1347,9 @@ void i40e_clear_hw(struct i40e_hw *hw) val = rd32(hw, I40E_PF_VT_PFALLOC); i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> - I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; + I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> - I40E_PF_VT_PFALLOC_LASTVF_SHIFT; + I40E_PF_VT_PFALLOC_LASTVF_SHIFT; if (val & I40E_PF_VT_PFALLOC_VALID_MASK) num_vfs = (j - i) + 1; else @@ -1413,20 +1412,8 @@ void i40e_clear_hw(struct i40e_hw *hw) **/ void i40e_clear_pxe_mode(struct i40e_hw *hw) { - u32 reg; - if (i40e_check_asq_alive(hw)) i40e_aq_clear_pxe_mode(hw, NULL); - - /* Clear single descriptor fetch/write-back mode */ - reg = rd32(hw, I40E_GLLAN_RCTL_0); - - if (hw->revision_id == 0) { - /* As a work around clear PXE_MODE instead of setting it */ - wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); - } else { - wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); - } } /** @@ -1441,9 +1428,9 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) u32 gpio_val = 0; u32 port; - if (!hw->func_caps.led[idx]) + if (!I40E_IS_X710TL_DEVICE(hw->device_id) && + !hw->func_caps.led[idx]) return 0; - gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; @@ -1462,8 +1449,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) #define I40E_FILTER_ACTIVITY 0xE #define I40E_LINK_ACTIVITY 0xC #define I40E_MAC_ACTIVITY 0xD +#define I40E_FW_LED BIT(4) +#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) + #define I40E_LED0 22 +#define I40E_PIN_FUNC_SDP 0x0 +#define I40E_PIN_FUNC_LED 0x1 + /** * i40e_led_get - return current on/off mode * @hw: pointer to the hw struct @@ -1475,6 +1469,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) **/ u32 i40e_led_get(struct i40e_hw *hw) { + u32 current_mode = 0; u32 mode = 0; int i; @@ -1487,6 +1482,21 @@ u32 i40e_led_get(struct i40e_hw *hw) if (!gpio_val) continue; + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: + continue; + default: + break; + } + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; @@ -1506,10 +1516,13 @@ u32 i40e_led_get(struct i40e_hw *hw) **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { + u32 current_mode = 0; int i; - if (mode & 0xfffffff0) + if (mode & ~I40E_LED_MODE_VALID) { hw_dbg(hw, "invalid mode passed in %X\n", mode); + return; + } /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 @@ -1519,6 +1532,35 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) if (!gpio_val) continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: + continue; + default: + break; + } + + if (I40E_IS_X710TL_DEVICE(hw->device_id)) { + u32 pin_func = 0; + + if (mode & I40E_FW_LED) + pin_func = I40E_PIN_FUNC_SDP; + else + pin_func = I40E_PIN_FUNC_LED; + + gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; + gpio_val |= ((pin_func << + I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & + I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); + } gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & @@ -1553,8 +1595,8 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, { struct i40e_aq_desc desc; i40e_status status; - u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; + u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); if (!abilities) return I40E_ERR_PARAM; @@ -1563,17 +1605,17 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_abilities); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (abilities_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); if (qualified_modules) desc.params.external.param0 |= - cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); + CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); if (report_init) desc.params.external.param0 |= - cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); + CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); @@ -1595,7 +1637,7 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && (total_delay < max_delay)); - if (status) + if (status != I40E_SUCCESS) return status; if (report_init) { @@ -1604,7 +1646,7 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { status = i40e_aq_get_link_info(hw, true, NULL, NULL); } else { - hw->phy.phy_types = le32_to_cpu(abilities->phy_type); + hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type); hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32); } @@ -1625,14 +1667,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, * of the PHY Config parameters. This status will be indicated by the * command response. **/ -enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, +i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_phy_config *cmd = - (struct i40e_aq_set_phy_config *)&desc.params.raw; - enum i40e_status_code status; + (struct i40e_aq_set_phy_config *)&desc.params.raw; + i40e_status status; if (!config) return I40E_ERR_PARAM; @@ -1647,15 +1689,25 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, return status; } -static noinline_for_stack enum i40e_status_code -i40e_set_fc_status(struct i40e_hw *hw, - struct i40e_aq_get_phy_abilities_resp *abilities, - bool atomic_restart) +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart + * + * Set the requested flow control mode using set_phy_config. + **/ +i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) { - struct i40e_aq_set_phy_config config; enum i40e_fc_mode fc_mode = hw->fc.requested_mode; + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + i40e_status status; u8 pause_mask = 0x0; + *aq_failures = 0x0; + switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; @@ -1671,48 +1723,6 @@ i40e_set_fc_status(struct i40e_hw *hw, break; } - memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); - /* clear the old pause settings */ - config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & - ~(I40E_AQ_PHY_FLAG_PAUSE_RX); - /* set the new abilities */ - config.abilities |= pause_mask; - /* If the abilities have changed, then set the new config */ - if (config.abilities == abilities->abilities) - return 0; - - /* Auto restart link so settings take effect */ - if (atomic_restart) - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - /* Copy over all the old settings */ - config.phy_type = abilities->phy_type; - config.phy_type_ext = abilities->phy_type_ext; - config.link_speed = abilities->link_speed; - config.eee_capability = abilities->eee_capability; - config.eeer = abilities->eeer_val; - config.low_power_ctrl = abilities->d3_lpan; - config.fec_config = abilities->fec_cfg_curr_mod_ext_info & - I40E_AQ_PHY_FEC_CONFIG_MASK; - - return i40e_aq_set_phy_config(hw, &config, NULL); -} - -/** - * i40e_set_fc - * @hw: pointer to the hw struct - * @aq_failures: buffer to return AdminQ failure information - * @atomic_restart: whether to enable atomic link restart - * - * Set the requested flow control mode using set_phy_config. - **/ -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, - bool atomic_restart) -{ - struct i40e_aq_get_phy_abilities_resp abilities; - enum i40e_status_code status; - - *aq_failures = 0x0; - /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); @@ -1721,10 +1731,31 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, return status; } - status = i40e_set_fc_status(hw, &abilities, atomic_restart); - if (status) - *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; + memset(&config, 0, sizeof(config)); + /* clear the old pause settings */ + config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities != abilities.abilities) { + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities.phy_type; + config.phy_type_ext = abilities.phy_type_ext; + config.link_speed = abilities.link_speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + status = i40e_aq_set_phy_config(hw, &config, NULL); + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; + } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { @@ -1749,7 +1780,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, * Tell the firmware that the driver is taking over from PXE **/ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details) { i40e_status status; struct i40e_aq_desc desc; @@ -1777,8 +1808,7 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, * Sets up the link and restarts the Auto-Negotiation over the link. **/ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, - bool enable_link, - struct i40e_asq_cmd_details *cmd_details) + bool enable_link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_link_restart_an *cmd = @@ -1826,15 +1856,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, command_flags = I40E_AQ_LSE_ENABLE; else command_flags = I40E_AQ_LSE_DISABLE; - resp->command_flags = cpu_to_le16(command_flags); + resp->command_flags = CPU_TO_LE16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (status) + if (status != I40E_SUCCESS) goto aq_get_link_info_exit; /* save off old link status information */ - hw->phy.link_info_old = *hw_link_info; + i40e_memcpy(&hw->phy.link_info_old, hw_link_info, + sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; @@ -1846,7 +1877,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, I40E_AQ_CONFIG_FEC_RS_ENA); hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; - hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); + hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size); hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; /* update fc info */ @@ -1866,7 +1897,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, else hw_link_info->crc_enable = false; - if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) + if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED)) hw_link_info->lse_enable = true; else hw_link_info->lse_enable = false; @@ -1880,14 +1911,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw->mac.type != I40E_MAC_X722) { __le32 tmp; - memcpy(&tmp, resp->link_type, sizeof(tmp)); - hw->phy.phy_types = le32_to_cpu(tmp); + i40e_memcpy(&tmp, resp->link_type, sizeof(tmp), + I40E_NONDMA_TO_NONDMA); + hw->phy.phy_types = LE32_TO_CPU(tmp); hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); } /* save link status information */ if (link) - *link = *hw_link_info; + i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info), + I40E_NONDMA_TO_NONDMA); /* flag cleared so helper functions don't call AQ again */ hw->phy.get_link_info = false; @@ -1905,8 +1938,8 @@ aq_get_link_info_exit: * Set link interrupt mask. **/ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, - u16 mask, - struct i40e_asq_cmd_details *cmd_details) + u16 mask, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_int_mask *cmd = @@ -1916,7 +1949,7 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_int_mask); - cmd->event_mask = cpu_to_le16(mask); + cmd->event_mask = CPU_TO_LE16(mask); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -1932,7 +1965,7 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, * Reset the external PHY. **/ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = @@ -1972,23 +2005,23 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vsi); - cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid); cmd->connection_type = vsi_ctx->connection_type; cmd->vf_id = vsi_ctx->vf_num; - cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); + cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); - if (status) + if (status != I40E_SUCCESS) goto aq_add_vsi_exit; - vsi_ctx->seid = le16_to_cpu(resp->seid); - vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); - vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); - vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + vsi_ctx->seid = LE16_TO_CPU(resp->seid); + vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_add_vsi_exit: return status; @@ -2001,8 +2034,8 @@ aq_add_vsi_exit: * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, - u16 seid, - struct i40e_asq_cmd_details *cmd_details) + u16 seid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -2011,11 +2044,11 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_vsi_promiscuous_modes); + i40e_aqc_opc_set_vsi_promiscuous_modes); - cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); - cmd->seid = cpu_to_le16(seid); + cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2029,8 +2062,8 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, - u16 seid, - struct i40e_asq_cmd_details *cmd_details) + u16 seid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -2039,11 +2072,11 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_vsi_promiscuous_modes); + i40e_aqc_opc_set_vsi_promiscuous_modes); - cmd->promiscuous_flags = cpu_to_le16(0); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); - cmd->seid = cpu_to_le16(seid); + cmd->promiscuous_flags = CPU_TO_LE16(0); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2080,14 +2113,14 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, flags |= I40E_AQC_SET_VSI_PROMISC_TX; } - cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->promiscuous_flags = CPU_TO_LE16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); + (hw->aq.api_maj_ver > 1)) + cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX); - cmd->seid = cpu_to_le16(seid); + cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; @@ -2115,11 +2148,48 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, if (set) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; - cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->promiscuous_flags = CPU_TO_LE16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); - cmd->seid = cpu_to_le16(seid); + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** +* i40e_aq_set_vsi_full_promiscuous +* @hw: pointer to the hw struct +* @seid: VSI number +* @set: set promiscuous enable/disable +* @cmd_details: pointer to command details structure or NULL +**/ +i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags = I40E_AQC_SET_VSI_PROMISC_UNICAST | + I40E_AQC_SET_VSI_PROMISC_MULTICAST | + I40E_AQC_SET_VSI_PROMISC_BROADCAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST | + I40E_AQC_SET_VSI_PROMISC_MULTICAST | + I40E_AQC_SET_VSI_PROMISC_BROADCAST); + + cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; @@ -2133,29 +2203,29 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ -enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, +i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - enum i40e_status_code status; + i40e_status status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_vsi_promiscuous_modes); + i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; - cmd->promiscuous_flags = cpu_to_le16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); - cmd->seid = cpu_to_le16(seid); - cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, + cmd_details, true); return status; } @@ -2168,29 +2238,29 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ -enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, +i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - enum i40e_status_code status; + i40e_status status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_vsi_promiscuous_modes); + i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - cmd->promiscuous_flags = cpu_to_le16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - cmd->seid = cpu_to_le16(seid); - cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, + cmd_details, true); return status; } @@ -2219,10 +2289,10 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; - cmd->promiscuous_flags = cpu_to_le16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); - cmd->seid = cpu_to_le16(seid); - cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2252,13 +2322,13 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, if (set_filter) cmd->promiscuous_flags - |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); else cmd->promiscuous_flags - &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); + &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); - cmd->seid = cpu_to_le16(seid); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; @@ -2272,8 +2342,8 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, - u16 seid, bool enable, - struct i40e_asq_cmd_details *cmd_details) + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -2286,9 +2356,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; - cmd->promiscuous_flags = cpu_to_le16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); - cmd->seid = cpu_to_le16(seid); + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN); + cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2316,20 +2386,20 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); - cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), NULL); - if (status) + if (status != I40E_SUCCESS) goto aq_get_vsi_params_exit; - vsi_ctx->seid = le16_to_cpu(resp->seid); - vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); - vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); - vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + vsi_ctx->seid = LE16_TO_CPU(resp->seid); + vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_get_vsi_params_exit: return status; @@ -2357,15 +2427,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); - cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); - vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); - vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); return status; } @@ -2392,13 +2462,13 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_config); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - scfg->seid = cpu_to_le16(*start_seid); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + scfg->seid = CPU_TO_LE16(*start_seid); status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); - *start_seid = le16_to_cpu(scfg->seid); + *start_seid = LE16_TO_CPU(scfg->seid); return status; } @@ -2409,30 +2479,28 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * @flags: bit flag values to set * @mode: cloud filter mode * @valid_flags: which bit flags to set - * @mode: cloud filter mode * @cmd_details: pointer to command details structure or NULL * * Set switch configuration bits **/ -enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, - u16 flags, - u16 valid_flags, u8 mode, +i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_switch_config *scfg = (struct i40e_aqc_set_switch_config *)&desc.params.raw; - enum i40e_status_code status; + i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_switch_config); - scfg->flags = cpu_to_le16(flags); - scfg->valid_flags = cpu_to_le16(valid_flags); + scfg->flags = CPU_TO_LE16(flags); + scfg->valid_flags = CPU_TO_LE16(valid_flags); scfg->mode = mode; if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { - scfg->switch_tag = cpu_to_le16(hw->switch_tag); - scfg->first_tag = cpu_to_le16(hw->first_tag); - scfg->second_tag = cpu_to_le16(hw->second_tag); + scfg->switch_tag = CPU_TO_LE16(hw->switch_tag); + scfg->first_tag = CPU_TO_LE16(hw->first_tag); + scfg->second_tag = CPU_TO_LE16(hw->second_tag); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2466,17 +2534,17 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (!status) { - if (fw_major_version) - *fw_major_version = le16_to_cpu(resp->fw_major); - if (fw_minor_version) - *fw_minor_version = le16_to_cpu(resp->fw_minor); - if (fw_build) - *fw_build = le32_to_cpu(resp->fw_build); - if (api_major_version) - *api_major_version = le16_to_cpu(resp->api_major); - if (api_minor_version) - *api_minor_version = le16_to_cpu(resp->api_minor); + if (status == I40E_SUCCESS) { + if (fw_major_version != NULL) + *fw_major_version = LE16_TO_CPU(resp->fw_major); + if (fw_minor_version != NULL) + *fw_minor_version = LE16_TO_CPU(resp->fw_minor); + if (fw_build != NULL) + *fw_build = LE32_TO_CPU(resp->fw_build); + if (api_major_version != NULL) + *api_major_version = LE16_TO_CPU(resp->api_major); + if (api_minor_version != NULL) + *api_minor_version = LE16_TO_CPU(resp->api_minor); } return status; @@ -2505,7 +2573,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; @@ -2528,18 +2596,18 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, * @link_up: pointer to bool (true/false = linkup/linkdown) * * Variable link_up true if link is up, false if link is down. - * The variable link_up is invalid if returned value of status != 0 + * The variable link_up is invalid if returned value of status != I40E_SUCCESS * * Side effect: LinkStatusEvent reporting becomes enabled **/ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; if (hw->phy.get_link_info) { status = i40e_update_link_info(hw); - if (status) + if (status != I40E_SUCCESS) i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", status); } @@ -2553,10 +2621,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ -noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) +i40e_status i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) @@ -2571,14 +2639,20 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) if (status) return status; - hw->phy.link_info.req_fec_info = - abilities.fec_cfg_curr_mod_ext_info & - (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); + if (abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_ENABLE_FEC_AUTO) + hw->phy.link_info.req_fec_info = + (I40E_AQ_REQUEST_FEC_KR | + I40E_AQ_REQUEST_FEC_RS); + else + hw->phy.link_info.req_fec_info = + abilities.fec_cfg_curr_mod_ext_info & + (I40E_AQ_REQUEST_FEC_KR | + I40E_AQ_REQUEST_FEC_RS); - memcpy(hw->phy.link_info.module_type, &abilities.module_type, - sizeof(hw->phy.link_info.module_type)); + i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type, + sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA); } - return status; } @@ -2616,8 +2690,8 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); - cmd->uplink_seid = cpu_to_le16(uplink_seid); - cmd->downlink_seid = cpu_to_le16(downlink_seid); + cmd->uplink_seid = CPU_TO_LE16(uplink_seid); + cmd->downlink_seid = CPU_TO_LE16(downlink_seid); cmd->enable_tcs = enabled_tc; if (!uplink_seid) veb_flags |= I40E_AQC_ADD_VEB_FLOATING; @@ -2630,12 +2704,12 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, if (!enable_stats) veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; - cmd->veb_flags = cpu_to_le16(veb_flags); + cmd->veb_flags = CPU_TO_LE16(veb_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && veb_seid) - *veb_seid = le16_to_cpu(resp->veb_seid); + *veb_seid = LE16_TO_CPU(resp->veb_seid); return status; } @@ -2671,22 +2745,22 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_veb_parameters); - cmd_resp->seid = cpu_to_le16(veb_seid); + cmd_resp->seid = CPU_TO_LE16(veb_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto get_veb_exit; if (switch_id) - *switch_id = le16_to_cpu(cmd_resp->switch_id); + *switch_id = LE16_TO_CPU(cmd_resp->switch_id); if (statistic_index) - *statistic_index = le16_to_cpu(cmd_resp->statistic_index); + *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index); if (vebs_used) - *vebs_used = le16_to_cpu(cmd_resp->vebs_used); + *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used); if (vebs_free) - *vebs_free = le16_to_cpu(cmd_resp->vebs_free); + *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free); if (floating) { - u16 flags = le16_to_cpu(cmd_resp->veb_flags); + u16 flags = LE16_TO_CPU(cmd_resp->veb_flags); if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = true; @@ -2726,19 +2800,19 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); - cmd->num_addresses = cpu_to_le16(count); - cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; for (i = 0; i < count; i++) if (is_multicast_ether_addr(mv_list[i].mac_addr)) mv_list[i].flags |= - cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); + CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); @@ -2773,14 +2847,14 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); - cmd->num_addresses = cpu_to_le16(count); - cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); @@ -2806,10 +2880,10 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, * VEBs/VEPA elements only **/ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, - u16 opcode, u16 sw_seid, u16 rule_type, u16 id, - u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free) + u16 opcode, u16 sw_seid, u16 rule_type, u16 id, + u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) { struct i40e_aq_desc desc; struct i40e_aqc_add_delete_mirror_rule *cmd = @@ -2823,29 +2897,29 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, opcode); - cmd->seid = cpu_to_le16(sw_seid); - cmd->rule_type = cpu_to_le16(rule_type & + cmd->seid = CPU_TO_LE16(sw_seid); + cmd->rule_type = CPU_TO_LE16(rule_type & I40E_AQC_MIRROR_RULE_TYPE_MASK); - cmd->num_entries = cpu_to_le16(count); + cmd->num_entries = CPU_TO_LE16(count); /* Dest VSI for add, rule_id for delete */ - cmd->destination = cpu_to_le16(id); + cmd->destination = CPU_TO_LE16(id); if (mr_list) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); } status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, cmd_details); - if (!status || + if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { if (rule_id) - *rule_id = le16_to_cpu(resp->rule_id); + *rule_id = LE16_TO_CPU(resp->rule_id); if (rules_used) - *rules_used = le16_to_cpu(resp->mirror_rules_used); + *rules_used = LE16_TO_CPU(resp->mirror_rules_used); if (rules_free) - *rules_free = le16_to_cpu(resp->mirror_rules_free); + *rules_free = LE16_TO_CPU(resp->mirror_rules_free); } return status; } @@ -2919,7 +2993,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure - * @vfid: VF id to send msg + * @vfid: vf id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer @@ -2938,16 +3012,16 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); - cmd->id = cpu_to_le32(vfid); - desc.cookie_high = cpu_to_le32(v_opcode); - desc.cookie_low = cpu_to_le32(v_retval); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + cmd->id = CPU_TO_LE32(vfid); + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); if (msglen) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(msglen); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); } status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); @@ -2977,13 +3051,13 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); - cmd_resp->address = cpu_to_le32(reg_addr); + cmd_resp->address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (!status) { - *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | - (u64)le32_to_cpu(cmd_resp->value_low); + if (status == I40E_SUCCESS) { + *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) | + (u64)LE32_TO_CPU(cmd_resp->value_low); } return status; @@ -2999,8 +3073,8 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, * Write to a register using the admin queue commands **/ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, - u32 reg_addr, u64 reg_val, - struct i40e_asq_cmd_details *cmd_details) + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd = @@ -3009,9 +3083,9 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); - cmd->address = cpu_to_le32(reg_addr); - cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); - cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); + cmd->address = CPU_TO_LE32(reg_addr); + cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32)); + cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -3042,9 +3116,9 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); - cmd_resp->resource_id = cpu_to_le16(resource); - cmd_resp->access_type = cpu_to_le16(access); - cmd_resp->resource_number = cpu_to_le32(sdp_number); + cmd_resp->resource_id = CPU_TO_LE16(resource); + cmd_resp->access_type = CPU_TO_LE16(access); + cmd_resp->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); /* The completion specifies the maximum time in ms that the driver @@ -3053,8 +3127,8 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw, * busy return value and the timeout field indicates the maximum time * the current owner of the resource has to free it. */ - if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) - *timeout = le32_to_cpu(cmd_resp->timeout); + if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) + *timeout = LE32_TO_CPU(cmd_resp->timeout); return status; } @@ -3080,8 +3154,8 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); - cmd->resource_id = cpu_to_le16(resource); - cmd->resource_number = cpu_to_le32(sdp_number); + cmd->resource_id = CPU_TO_LE16(resource); + cmd->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -3122,12 +3196,12 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; - cmd->offset = cpu_to_le32(offset); - cmd->length = cpu_to_le16(length); + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); @@ -3135,6 +3209,100 @@ i40e_aq_read_nvm_exit: return status; } +/** + * i40e_aq_read_nvm_config - read an nvm config block + * @hw: pointer to the hw struct + * @cmd_flags: NVM access admin command bits + * @field_id: field or feature id + * @data: buffer for result + * @buf_size: buffer size + * @element_count: pointer to count of elements read by FW + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, u32 field_id, void *data, + u16 buf_size, u16 *element_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_config_read *cmd = + (struct i40e_aqc_nvm_config_read *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id)); + if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK) + cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16)); + else + cmd->element_id_msw = 0; + + status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); + + if (!status && element_count) + *element_count = LE16_TO_CPU(cmd->element_count); + + return status; +} + +/** + * i40e_aq_write_nvm_config - write an nvm config block + * @hw: pointer to the hw struct + * @cmd_flags: NVM access admin command bits + * @data: buffer for result + * @buf_size: buffer size + * @element_count: count of elements to be written + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, void *data, u16 buf_size, + u16 element_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_config_write *cmd = + (struct i40e_aqc_nvm_config_write *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->element_count = CPU_TO_LE16(element_count); + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); + + return status; +} + +/** + * i40e_aq_oem_post_update - triggers an OEM specific flow after update + * @hw: pointer to the hw struct + * @buff: buffer for result + * @buff_size: buffer size + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH) + status = I40E_ERR_NOT_IMPLEMENTED; + + return status; +} + /** * i40e_aq_erase_nvm * @hw: pointer to the hw struct @@ -3147,8 +3315,8 @@ i40e_aq_read_nvm_exit: * Erase the NVM sector using the admin queue commands **/ i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, bool last_command, - struct i40e_asq_cmd_details *cmd_details) + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = @@ -3167,8 +3335,8 @@ i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; - cmd->offset = cpu_to_le32(offset); - cmd->length = cpu_to_le16(length); + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -3193,30 +3361,33 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; - u16 id, ocp_cfg_word0; i40e_status status; + u16 id, ocp_cfg_word0; u8 major_rev; u32 i = 0; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) - p = &hw->dev_caps; + p = (struct i40e_hw_capabilities *)&hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) - p = &hw->func_caps; + p = (struct i40e_hw_capabilities *)&hw->func_caps; else return; for (i = 0; i < cap_count; i++, cap++) { - id = le16_to_cpu(cap->id); - number = le32_to_cpu(cap->number); - logical_id = le32_to_cpu(cap->logical_id); - phys_id = le32_to_cpu(cap->phys_id); + id = LE16_TO_CPU(cap->id); + number = LE32_TO_CPU(cap->number); + logical_id = LE32_TO_CPU(cap->logical_id); + phys_id = LE32_TO_CPU(cap->phys_id); major_rev = cap->major_rev; switch (id) { case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Switch mode = %d\n", + p->switch_mode); break; case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; @@ -3228,38 +3399,67 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, } else { p->mng_protocols_over_mctp = 0; } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Management Mode = %d\n", + p->management_mode); break; case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: NPAR enable = %d\n", + p->npar_enable); break; case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: OS2BMC = %d\n", p->os2bmc); break; case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Valid Functions = %d\n", + p->valid_functions); break; case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: SR-IOV = %d\n", + p->sr_iov_1_1); break; case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VF count = %d\n", + p->num_vfs); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VF base_id = %d\n", + p->vf_base_id); break; case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VMDQ = %d\n", p->vmdq); break; case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: 802.1Qbg = %d\n", number); break; case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: 802.1Qbh = %d\n", number); break; case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VSI count = %d\n", + p->num_vsis); break; case I40E_AQ_CAP_ID_DCB: if (number == 1) { @@ -3267,27 +3467,56 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->enabled_tcmap = logical_id; p->maxtc = phys_id; } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: DCB = %d\n", p->dcb); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: TC Mapping = %d\n", + logical_id); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: TC Max = %d\n", p->maxtc); break; case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: FCOE = %d\n", p->fcoe); break; case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: iSCSI = %d\n", p->iscsi); break; case I40E_AQ_CAP_ID_RSS: p->rss = true; p->rss_table_size = number; p->rss_table_entry_width = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS = %d\n", p->rss); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS table size = %d\n", + p->rss_table_size); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS table width = %d\n", + p->rss_table_entry_width); break; case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Rx QP = %d\n", number); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: base_queue = %d\n", + p->base_queue); break; case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Tx QP = %d\n", number); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: base_queue = %d\n", + p->base_queue); break; case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; @@ -3297,6 +3526,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MSIX VF vector count = %d\n", + p->num_msix_vectors_vf); break; case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { @@ -3313,41 +3545,72 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, } p->flex10_mode = logical_id; p->flex10_status = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flex10 mode = %d\n", + p->flex10_mode); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flex10 status = %d\n", + p->flex10_status); break; case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: CEM = %d\n", p->mgmt_cem); break; case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: iWARP = %d\n", p->iwarp); break; case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: LED - PIN %d\n", phys_id); break; case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: SDP - PIN %d\n", phys_id); break; case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MDIO port number = %d\n", + p->mdio_port_num); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MDIO port mode = %d\n", + p->mdio_port_mode); break; case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: IEEE 1588 = %d\n", + p->ieee_1588); break; case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = true; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flow Director = 1\n"); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Guaranteed FD filters = %d\n", + p->fd_filters_guaranteed); break; case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: wr_csr_prot = 0x%llX\n\n", + (p->wr_csr_prot & 0xffff)); break; case I40E_AQ_CAP_ID_NVM_MGMT: if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) @@ -3355,6 +3618,19 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, if (number & I40E_NVM_MGMT_UPDATE_DISABLED) p->update_disabled = true; break; + case I40E_AQ_CAP_ID_WOL_AND_PROXY: + hw->num_wol_proxy_filters = (u16)number; + hw->wol_proxy_vsi_seid = (u16)logical_id; + p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK; + if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK) + p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK; + else + p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL; + p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: WOL proxy filters = %d\n", + hw->num_wol_proxy_filters); + break; default: break; } @@ -3363,11 +3639,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, if (p->fcoe) i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); - /* Software override ensuring FCoE is disabled if npar or mfp - * mode because it is not supported in these modes. - */ - if (p->npar_enable || p->flex10_enable) - p->fcoe = false; + /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ + p->fcoe = false; /* count the enabled ports (aka the "not disabled" ports) */ hw->num_ports = 0; @@ -3383,7 +3656,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, hw->num_ports++; } - /* OCP cards case: if a mezz is removed the Ethernet port is at + /* OCP cards case: if a mezz is removed the ethernet port is at * disabled state in PRTGEN_CNF register. Additional NVM read is * needed in order to check if we are dealing with OCP card. * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting @@ -3391,12 +3664,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, * not supporting WoL. */ if (hw->mac.type == I40E_MAC_X722) { - if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { + if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) { status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 2 * I40E_SR_OCP_CFG_WORD0, sizeof(ocp_cfg_word0), &ocp_cfg_word0, true, NULL); - if (!status && + if (status == I40E_SUCCESS && (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) hw->num_ports = 4; i40e_release_nvm(hw); @@ -3443,7 +3716,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, { struct i40e_aqc_list_capabilites *cmd; struct i40e_aq_desc desc; - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; @@ -3455,17 +3728,17 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - *data_size = le16_to_cpu(desc.datalen); + *data_size = LE16_TO_CPU(desc.datalen); if (status) goto exit; - i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), + i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count), list_type_opc); exit: @@ -3486,9 +3759,9 @@ exit: * Update the NVM using the admin queue commands **/ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, void *data, + u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = @@ -3517,12 +3790,12 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); } cmd->module_pointer = module_pointer; - cmd->offset = cpu_to_le32(offset); - cmd->length = cpu_to_le16(length); + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (length > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); @@ -3539,8 +3812,8 @@ i40e_aq_update_nvm_exit: * Rearrange NVM structure, available only for transition FW **/ i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, - struct i40e_asq_cmd_details *cmd_details) + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_nvm_update *cmd; i40e_status status; @@ -3565,6 +3838,26 @@ i40e_aq_rearrange_nvm_exit: return status; } +/** + * i40e_aq_nvm_progress + * @hw: pointer to the hw struct + * @progress: pointer to progress returned from AQ + * @cmd_details: pointer to command details structure or NULL + * + * Gets progress of flash rearrangement process + **/ +i40e_status i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + *progress = desc.params.raw[0]; + return status; +} + /** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct @@ -3595,29 +3888,68 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); /* Indirect Command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); - desc.datalen = cpu_to_le16(buff_size); + desc.datalen = CPU_TO_LE16(buff_size); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (local_len != NULL) - *local_len = le16_to_cpu(resp->local_len); + *local_len = LE16_TO_CPU(resp->local_len); if (remote_len != NULL) - *remote_len = le16_to_cpu(resp->remote_len); + *remote_len = LE16_TO_CPU(resp->remote_len); } return status; } + /** + * i40e_aq_set_lldp_mib - Set the LLDP MIB + * @hw: pointer to the hw struct + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buff: pointer to a user supplied buffer to store the MIB block + * @buff_size: size of the buffer (in bytes) + * @cmd_details: pointer to command details structure or NULL + * + * Set the LLDP MIB. + **/ +i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw, + u8 mib_type, void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_set_local_mib *cmd = + (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_lldp_set_local_mib); + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->type = mib_type; + cmd->length = CPU_TO_LE16(buff_size); + cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)buff)); + cmd->address_low = CPU_TO_LE32(lower_32_bits((u64)buff)); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + return status; +} + /** * i40e_aq_cfg_lldp_mib_change_event * @hw: pointer to the hw struct @@ -3723,15 +4055,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, /** * i40e_aq_start_lldp * @hw: pointer to the hw struct - * @buff: buffer for result * @persist: True if start of LLDP should be persistent across power cycles - * @buff_size: buffer size * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. **/ -i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, - struct i40e_asq_cmd_details *cmd_details) +i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, + bool persist, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_start *cmd = @@ -3796,8 +4127,8 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, * Get CEE DCBX mode operational configuration from firmware **/ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, - void *buff, u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; i40e_status status; @@ -3807,13 +4138,43 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, cmd_details); return status; } +/** + * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW + * @hw: pointer to the hw struct + * @start_agent: True if DCBx Agent needs to be Started + * False if DCBx Agent needs to be Stopped + * @cmd_details: pointer to command details structure or NULL + * + * Start/Stop the embedded dcbx Agent + **/ +i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw, + bool start_agent, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_stop_start_specific_agent *cmd = + (struct i40e_aqc_lldp_stop_start_specific_agent *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_lldp_stop_start_spec_agent); + + if (start_agent) + cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct @@ -3823,7 +4184,7 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, * @cmd_details: pointer to command details structure or NULL * * Note: Firmware expects the udp_port value to be in Little Endian format, - * and this function will call cpu_to_le16 to convert from Host byte order to + * and this function will call CPU_TO_LE16 to convert from Host byte order to * Little Endian order. **/ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, @@ -3840,7 +4201,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); - cmd->udp_port = cpu_to_le16(udp_port); + cmd->udp_port = CPU_TO_LE16(udp_port); cmd->protocol_type = protocol_index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -3874,6 +4235,45 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, return status; } +/** + * i40e_aq_get_switch_resource_alloc (0x0204) + * @hw: pointer to the hw struct + * @num_entries: pointer to u8 to store the number of resource entries returned + * @buf: pointer to a user supplied buffer. This buffer must be large enough + * to store the resource information for all resource types. Each + * resource type is a i40e_aqc_switch_resource_alloc_data structure. + * @count: size, in bytes, of the buffer provided + * @cmd_details: pointer to command details structure or NULL + * + * Query the resources allocated to a function. + **/ +i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, + u8 *num_entries, + struct i40e_aqc_switch_resource_alloc_element_resp *buf, + u16 count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_switch_resource_alloc *cmd_resp = + (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw; + i40e_status status; + u16 length = count * sizeof(*buf); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_switch_resource_alloc); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); + + if (!status && num_entries) + *num_entries = cmd_resp->num_entries; + + return status; +} + /** * i40e_aq_delete_element - Delete switch element * @hw: pointer to the hw struct @@ -3895,7 +4295,7 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); - cmd->seid = cpu_to_le16(seid); + cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -3907,6 +4307,14 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * + * When LLDP is handled in PF this command is used by the PF + * to notify EMP that a DCB setting is modified. + * When LLDP is handled in EMP this command is used by the PF + * to notify EMP whenever one of the following parameters get + * modified: + * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA + * - PCIRTT in PRTDCB_GENC.PCIRTT + * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME. * EMP will return when the shared RPB settings have been * recomputed and modified. The retval field in the descriptor * will be set to 0 when RPB is modified. @@ -3970,15 +4378,15 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, i40e_fill_default_direct_cmd_desc(&desc, opcode); /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (cmd_param_flag) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); + desc.datalen = CPU_TO_LE16(buff_size); - cmd->vsi_seid = cpu_to_le16(seid); + cmd->vsi_seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); @@ -4005,8 +4413,8 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_vsi_bw_limit); - cmd->vsi_seid = cpu_to_le16(seid); - cmd->credit = cpu_to_le16(credit); + cmd->vsi_seid = CPU_TO_LE16(seid); + cmd->credit = CPU_TO_LE16(credit); cmd->max_credit = max_credit; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -4160,7 +4568,7 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, * The function checks for the valid filter/context sizes being * passed for FCoE and PE. * - * Returns 0 if the values passed are valid and within + * Returns I40E_SUCCESS if the values passed are valid and within * range else returns an error. **/ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, @@ -4169,6 +4577,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, u32 fcoe_cntx_size, fcoe_filt_size; u32 pe_cntx_size, pe_filt_size; u32 fcoe_fmax; + u32 val; /* Validate FCoE settings passed */ @@ -4243,7 +4652,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) return I40E_ERR_INVALID_SIZE; - return 0; + return I40E_SUCCESS; } /** @@ -4258,7 +4667,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, i40e_status i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; u32 hash_lut_size = 0; u32 val; @@ -4310,7 +4719,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); - return 0; + return I40E_SUCCESS; } /** @@ -4350,7 +4759,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, if (is_add) { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_control_packet_filter); - cmd->queue = cpu_to_le16(queue); + cmd->queue = CPU_TO_LE16(queue); } else { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_control_packet_filter); @@ -4359,17 +4768,17 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, if (mac_addr) ether_addr_copy(cmd->mac, mac_addr); - cmd->etype = cpu_to_le16(ethtype); - cmd->flags = cpu_to_le16(flags); - cmd->seid = cpu_to_le16(vsi_seid); + cmd->etype = CPU_TO_LE16(ethtype); + cmd->flags = CPU_TO_LE16(flags); + cmd->seid = CPU_TO_LE16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stats) { - stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); - stats->etype_used = le16_to_cpu(resp->etype_used); - stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); - stats->etype_free = le16_to_cpu(resp->etype_free); + stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used); + stats->etype_used = LE16_TO_CPU(resp->etype_used); + stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free); + stats->etype_free = LE16_TO_CPU(resp->etype_free); } return status; @@ -4397,6 +4806,278 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); } +/** + * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue + * @filters: list of cloud filters + * @filter_count: length of list + * + * There's an issue in the device where the Geneve VNI layout needs + * to be shifted 1 byte over from the VxLAN VNI + **/ +static void i40e_fix_up_geneve_vni( + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aqc_cloud_filters_element_data *f = filters; + int i; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (LE16_TO_CPU(f[i].flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = LE32_TO_CPU(f[i].tenant_id); + f[i].tenant_id = CPU_TO_LE32(ti << 8); + } + } +} + +/** + * i40e_aq_add_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to add cloud filters from + * @filters: Buffer which contains the filters to be added + * @filter_count: number of filters contained in the buffer + * + * Set the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_data are filled + * in by the caller of the function. + * + **/ +i40e_status i40e_aq_add_cloud_filters(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + i40e_status status; + u16 buff_len; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + + i40e_fix_up_geneve_vni(filters, filter_count); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_add_cloud_filters_bb + * @hw: pointer to the hardware structure + * @seid: VSI seid to add cloud filters from + * @filters: Buffer which contains the filters in big buffer to be added + * @filter_count: number of filters contained in the buffer + * + * Set the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the + * the function. + * + **/ +enum i40e_status_code +i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + i40e_status status; + u16 buff_len; + int i; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (LE16_TO_CPU(filters[i].element.flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + + /* Due to hardware eccentricities, the VNI for Geneve is shifted + * one more byte further than normally used for Tenant ID in + * other tunnel types. + */ + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = LE32_TO_CPU(filters[i].element.tenant_id); + filters[i].element.tenant_id = CPU_TO_LE32(ti << 8); + } + } + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_rem_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to remove cloud filters from + * @filters: Buffer which contains the filters to be removed + * @filter_count: number of filters contained in the buffer + * + * Remove the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_data are filled in by the caller + * of the function. + * + **/ +enum i40e_status_code +i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + i40e_status status; + u16 buff_len; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + + i40e_fix_up_geneve_vni(filters, filter_count); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_rem_cloud_filters_bb + * @hw: pointer to the hardware structure + * @seid: VSI seid to remove cloud filters from + * @filters: Buffer which contains the filters in big buffer to be removed + * @filter_count: number of filters contained in the buffer + * + * Remove the big buffer cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the + * function. + * + **/ +enum i40e_status_code +i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + i40e_status status; + u16 buff_len; + int i; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (LE16_TO_CPU(filters[i].element.flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + + /* Due to hardware eccentricities, the VNI for Geneve is shifted + * one more byte further than normally used for Tenant ID in + * other tunnel types. + */ + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = LE32_TO_CPU(filters[i].element.tenant_id); + filters[i].element.tenant_id = CPU_TO_LE32(ti << 8); + } + } + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_replace_cloud_filters - Replace cloud filter command + * @hw: pointer to the hw struct + * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct + * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct + * + **/ +enum +i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw, + struct i40e_aqc_replace_cloud_filters_cmd *filters, + struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_replace_cloud_filters_cmd *cmd = + (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw; + i40e_status status = I40E_SUCCESS; + int i = 0; + + /* X722 doesn't support this command */ + if (hw->mac.type == I40E_MAC_X722) + return I40E_ERR_DEVICE_NOT_SUPPORTED; + + /* need FW version greater than 6.00 */ + if (hw->aq.fw_maj_ver < 6) + return I40E_NOT_SUPPORTED; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_replace_cloud_filters); + + desc.datalen = CPU_TO_LE16(32); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->old_filter_type = filters->old_filter_type; + cmd->new_filter_type = filters->new_filter_type; + cmd->valid_flags = filters->valid_flags; + cmd->tr_bit = filters->tr_bit; + cmd->tr_bit2 = filters->tr_bit2; + + status = i40e_asq_send_command(hw, &desc, cmd_buf, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL); + + /* for get cloud filters command */ + for (i = 0; i < 32; i += 4) { + cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i]; + cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1]; + cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2]; + cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3]; + } + + return status; +} + /** * i40e_aq_alternate_read * @hw: pointer to the hardware structure @@ -4410,29 +5091,29 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, * is not passed then only register at 'reg_addr0' is read. * **/ -static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, - u32 reg_addr0, u32 *reg_val0, - u32 reg_addr1, u32 *reg_val1) +i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; i40e_status status; - if (!reg_val0) + if (reg_val0 == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); - cmd_resp->address0 = cpu_to_le32(reg_addr0); - cmd_resp->address1 = cpu_to_le32(reg_addr1); + cmd_resp->address0 = CPU_TO_LE32(reg_addr0); + cmd_resp->address1 = CPU_TO_LE32(reg_addr1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); - if (!status) { - *reg_val0 = le32_to_cpu(cmd_resp->data0); + if (status == I40E_SUCCESS) { + *reg_val0 = LE32_TO_CPU(cmd_resp->data0); - if (reg_val1) - *reg_val1 = le32_to_cpu(cmd_resp->data1); + if (reg_val1 != NULL) + *reg_val1 = LE32_TO_CPU(cmd_resp->data1); } return status; @@ -4446,7 +5127,7 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, * Resume port's Tx traffic **/ i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; i40e_status status; @@ -4520,10 +5201,10 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) * **/ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, - u8 table_id, u32 start_index, u16 buff_size, - void *buff, u16 *ret_buff_size, - u8 *ret_next_table, u32 *ret_next_index, - struct i40e_asq_cmd_details *cmd_details) + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_dump_internals *cmd = @@ -4538,24 +5219,24 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_dump_internals); /* Indirect Command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->cluster_id = cluster_id; cmd->table_id = table_id; - cmd->idx = cpu_to_le32(start_index); + cmd->idx = CPU_TO_LE32(start_index); - desc.datalen = cpu_to_le16(buff_size); + desc.datalen = CPU_TO_LE16(buff_size); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { - if (ret_buff_size) - *ret_buff_size = le16_to_cpu(desc.datalen); - if (ret_next_table) + if (ret_buff_size != NULL) + *ret_buff_size = LE16_TO_CPU(desc.datalen); + if (ret_next_table != NULL) *ret_next_table = resp->table_id; - if (ret_next_index) - *ret_next_index = le32_to_cpu(resp->idx); + if (ret_next_index != NULL) + *ret_next_index = LE32_TO_CPU(resp->idx); } return status; @@ -4572,8 +5253,8 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, * Read bw from the alternate ram for the given pf **/ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, - u32 *max_bw, u32 *min_bw, - bool *min_valid, bool *max_valid) + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) { i40e_status status; u32 max_bw_addr, min_bw_addr; @@ -4620,19 +5301,15 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, u16 bwd_size = sizeof(*bw_data); i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_configure_partition_bw); + i40e_aqc_opc_configure_partition_bw); /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); - if (bwd_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(bwd_size); - desc.datalen = cpu_to_le16(bwd_size); - - status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, - cmd_details); + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details); return status; } @@ -4647,7 +5324,7 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, * Reads specified PHY register value **/ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 *value) + u16 reg, u8 phy_addr, u16 *value) { i40e_status status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; @@ -4663,7 +5340,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { - status = 0; + status = I40E_SUCCESS; break; } udelay(10); @@ -4692,7 +5369,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, * Writes specified PHY register value **/ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 value) + u16 reg, u8 phy_addr, u16 value) { i40e_status status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; @@ -4712,7 +5389,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { - status = 0; + status = I40E_SUCCESS; break; } udelay(10); @@ -4736,9 +5413,9 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { i40e_status status = I40E_ERR_TIMEOUT; - u32 command = 0; + u32 command = 0; u16 retry = 1000; - u8 port_num = hw->func_caps.mdio_port_num; + u8 port_num = (u8)hw->func_caps.mdio_port_num; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | @@ -4751,10 +5428,10 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { - status = 0; + status = I40E_SUCCESS; break; } - usleep_range(10, 20); + udelay(10); retry--; } while (retry); @@ -4776,10 +5453,10 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { - status = 0; + status = I40E_SUCCESS; break; } - usleep_range(10, 20); + udelay(10); retry--; } while (retry); @@ -4810,9 +5487,9 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { i40e_status status = I40E_ERR_TIMEOUT; - u32 command = 0; + u32 command = 0; u16 retry = 1000; - u8 port_num = hw->func_caps.mdio_port_num; + u8 port_num = (u8)hw->func_caps.mdio_port_num; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | @@ -4825,10 +5502,10 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { - status = 0; + status = I40E_SUCCESS; break; } - usleep_range(10, 20); + udelay(10); retry--; } while (retry); if (status) { @@ -4852,10 +5529,10 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { - status = 0; + status = I40E_SUCCESS; break; } - usleep_range(10, 20); + udelay(10); retry--; } while (retry); @@ -4874,22 +5551,24 @@ phy_write_end: * Writes value to specified PHY register **/ i40e_status i40e_write_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 value) + u8 page, u16 reg, u8 phy_addr, u16 value) { i40e_status status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: - status = i40e_write_phy_register_clause22(hw, reg, phy_addr, - value); + status = i40e_write_phy_register_clause22(hw, + reg, phy_addr, value); break; case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: - status = i40e_write_phy_register_clause45(hw, page, reg, - phy_addr, value); + status = i40e_write_phy_register_clause45(hw, + page, reg, phy_addr, value); break; default: status = I40E_ERR_UNKNOWN_PHY; @@ -4910,7 +5589,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, * Reads specified PHY register value **/ i40e_status i40e_read_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 *value) + u8 page, u16 reg, u8 phy_addr, u16 *value) { i40e_status status; @@ -4922,6 +5601,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: @@ -4945,7 +5625,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, **/ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) { - u8 port_num = hw->func_caps.mdio_port_num; + u8 port_num = (u8)hw->func_caps.mdio_port_num; u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; @@ -4960,11 +5640,11 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) * Blinks PHY link LED **/ i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval) + u32 time, u32 interval) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; u32 i; - u16 led_ctl; + u16 led_ctl = 0; u16 gpio_led_port; u16 led_reg; u16 led_addr = I40E_PHY_LED_PROV_REG_1; @@ -5031,26 +5711,21 @@ phy_blinking_end: * @led_addr: LED register address * @reg_val: read register value **/ -static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, +static i40e_status i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, u32 *reg_val) { - enum i40e_status_code status; + i40e_status status; u8 phy_addr = 0; - u8 port_num; - u32 i; *reg_val = 0; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_get_phy_register(hw, + status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, + I40E_PHY_COM_REG_PAGE, true, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); + phy_addr = i40e_get_phy_address(hw, hw->port); status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, @@ -5065,25 +5740,20 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, * @led_addr: LED register address * @reg_val: register value to write **/ -static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, +static i40e_status i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, u32 reg_val) { - enum i40e_status_code status; + i40e_status status; u8 phy_addr = 0; - u8 port_num; - u32 i; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_set_phy_register(hw, + status = i40e_aq_set_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, + I40E_PHY_COM_REG_PAGE, true, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); + phy_addr = i40e_get_phy_address(hw, hw->port); status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, @@ -5101,33 +5771,27 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, * **/ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, - u16 *val) + u16 *val) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; u16 gpio_led_port; + u32 reg_val_aq; + u16 temp_addr; u8 phy_addr = 0; u16 reg_val; - u16 temp_addr; - u8 port_num; - u32 i; - u32 reg_val_aq; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_get_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - ®_val_aq, NULL); + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, true, + I40E_PHY_LED_PROV_REG_1, + ®_val_aq, NULL); if (status == I40E_SUCCESS) *val = (u16)reg_val_aq; return status; } temp_addr = I40E_PHY_LED_PROV_REG_1; - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); - + phy_addr = i40e_get_phy_address(hw, hw->port); for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, temp_addr++) { status = i40e_read_phy_register_clause45(hw, @@ -5156,9 +5820,9 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, * **/ i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, - u16 led_addr, u32 mode) + u16 led_addr, u32 mode) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; u32 led_ctl = 0; u32 led_reg = 0; @@ -5179,7 +5843,6 @@ i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, led_reg = I40E_PHY_LED_MANUAL_ON; else led_reg = 0; - status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) goto restore_config; @@ -5194,6 +5857,196 @@ restore_config: return status; } +/** + * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register + * @hw: pointer to the hw struct + * @stat: pointer to structure with status of rx and tx lpi + * + * Read LPI state directly from external PHY register or from MAC + * register, depending on device ID and current link speed. + */ +i40e_status i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat) +{ + i40e_status ret = I40E_SUCCESS; + u32 val; + + stat->rx_lpi_status = 0; + stat->tx_lpi_status = 0; + + if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC || + hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) && + (hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB || + hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB)) { + ret = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_BCM_PHY_PCS_STATUS1_PAGE, + true, + I40E_BCM_PHY_PCS_STATUS1_REG, + &val, NULL); + + if (ret != I40E_SUCCESS) + return ret; + + stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI); + stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI); + + return ret; + } + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + + return ret; +} + +/** + * i40e_get_lpi_counters - read LPI counters from EEE statistics + * @hw: pointer to the hw struct + * @tx_counter: pointer to memory for TX LPI counter + * @rx_counter: pointer to memory for RX LPI counter + * @is_clear: returns true if counters are clear after read + * + * Read Low Power Idle (LPI) mode counters from Energy Efficient + * Ethernet (EEE) statistics. + **/ +i40e_status i40e_get_lpi_counters(struct i40e_hw *hw, + u32 *tx_counter, u32 *rx_counter, + bool *is_clear) +{ + /* only X710-T*L requires special handling of counters + * for other devices we just read the MAC registers + */ + if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC || + hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) && + hw->phy.link_info.link_speed != I40E_LINK_SPEED_1GB) { + i40e_status retval; + u32 cmd_status; + + *is_clear = false; + retval = i40e_aq_run_phy_activity(hw, + I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT, + &cmd_status, tx_counter, rx_counter, NULL); + + if (cmd_status != I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + retval = I40E_ERR_ADMIN_QUEUE_ERROR; + + return retval; + } + + *is_clear = true; + *tx_counter = rd32(hw, I40E_PRTPM_TLPIC); + *rx_counter = rd32(hw, I40E_PRTPM_RLPIC); + + return I40E_SUCCESS; +} + +/** + * i40e_get_lpi_duration - read LPI time duration from EEE statistics + * @hw: pointer to the hw struct + * @stat: pointer to structure with status of rx and tx lpi + * @tx_duration: pointer to memory for TX LPI time duration + * @rx_duration: pointer to memory for RX LPI time duration + * + * Read Low Power Idle (LPI) mode time duration from Energy Efficient + * Ethernet (EEE) statistics. + */ +i40e_status i40e_get_lpi_duration(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat, + u64 *tx_duration, u64 *rx_duration) +{ + u32 tx_time_dur, rx_time_dur; + i40e_status retval; + u32 cmd_status; + + if (hw->device_id != I40E_DEV_ID_10G_BASE_T_BC && + hw->device_id != I40E_DEV_ID_5G_BASE_T_BC) + return I40E_ERR_NOT_IMPLEMENTED; + + retval = i40e_aq_run_phy_activity + (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR, + &cmd_status, &tx_time_dur, &rx_time_dur, NULL); + + if (retval) + return retval; + if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) != + I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + return I40E_ERR_ADMIN_QUEUE_ERROR; + + if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB && + !tx_time_dur && !rx_time_dur && + stat->tx_lpi_status && stat->rx_lpi_status) { + retval = i40e_aq_run_phy_activity + (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR, + &cmd_status, + &tx_time_dur, &rx_time_dur, NULL); + + if (retval) + return retval; + if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) != + I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + return I40E_ERR_ADMIN_QUEUE_ERROR; + tx_time_dur = 0; + rx_time_dur = 0; + } + + *tx_duration = tx_time_dur; + *rx_duration = rx_time_dur; + + return retval; +} + +/** + * i40e_lpi_stat_update - update LPI counters with values relative to offset + * @hw: pointer to the hw struct + * @offset_loaded: flag indicating need of writing current value to offset + * @tx_offset: pointer to offset of TX LPI counter + * @tx_stat: pointer to value of TX LPI counter + * @rx_offset: pointer to offset of RX LPI counter + * @rx_stat: pointer to value of RX LPI counter + * + * Update Low Power Idle (LPI) mode counters while having regard to passed + * offsets. + **/ +i40e_status i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat) +{ + i40e_status retval; + u32 tx_counter, rx_counter; + bool is_clear; + + retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter, &is_clear); + if (retval) + goto err; + + if (is_clear) { + *tx_stat += tx_counter; + *rx_stat += rx_counter; + } else { + if (!offset_loaded) { + *tx_offset = tx_counter; + *rx_offset = rx_counter; + } + + *tx_stat = (tx_counter >= *tx_offset) ? + (u32)(tx_counter - *tx_offset) : + (u32)((tx_counter + BIT_ULL(32)) - *tx_offset); + *rx_stat = (rx_counter >= *rx_offset) ? + (u32)(rx_counter - *rx_offset) : + (u32)((rx_counter + BIT_ULL(32)) - *rx_offset); + } +err: + return retval; +} + /** * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register * @hw: pointer to the hw struct @@ -5213,17 +6066,17 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; i40e_status status; - if (!reg_val) + if (reg_val == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); - cmd_resp->address = cpu_to_le32(reg_addr); + cmd_resp->address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (status == 0) - *reg_val = le32_to_cpu(cmd_resp->value); + if (status == I40E_SUCCESS) + *reg_val = LE32_TO_CPU(cmd_resp->value); return status; } @@ -5235,7 +6088,7 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, **/ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; bool use_register; int retry = 5; u32 val = 0; @@ -5281,8 +6134,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); - cmd->address = cpu_to_le32(reg_addr); - cmd->value = cpu_to_le32(reg_val); + cmd->address = CPU_TO_LE32(reg_addr); + cmd->value = CPU_TO_LE32(reg_val); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -5297,7 +6150,7 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, **/ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; bool use_register; int retry = 5; @@ -5321,20 +6174,51 @@ do_retry: } /** - * i40e_aq_set_phy_register + * i40e_mdio_if_number_selection - MDIO I/F number selection + * @hw: pointer to the hw struct + * @set_mdio: use MDIO I/F number specified by mdio_num + * @mdio_num: MDIO I/F number + * @cmd: pointer to PHY Register command structure + **/ +static void +i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num, + struct i40e_aqc_phy_register_access *cmd) +{ + if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) + cmd->cmd_flags |= + I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | + ((mdio_num << + I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & + I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); + else + i40e_debug(hw, I40E_DEBUG_PHY, + "MDIO I/F number selection not supported by current FW version.\n"); + } +} + +/** + * i40e_aq_set_phy_register_ext * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address + * @page_change: enable auto page change + * @set_mdio: use MDIO I/F number specified by mdio_num + * @mdio_num: MDIO I/F number * @reg_addr: PHY register address * @reg_val: new register value * @cmd_details: pointer to command details structure or NULL * * Write the external PHY register. + * NOTE: In common cases MDIO I/F number should not be changed, thats why you + * may use simple wrapper i40e_aq_set_phy_register. **/ -i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) +enum i40e_status_code +i40e_aq_set_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = @@ -5345,9 +6229,14 @@ i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, i40e_aqc_opc_set_phy_register); cmd->phy_interface = phy_select; - cmd->dev_address = dev_addr; - cmd->reg_address = cpu_to_le32(reg_addr); - cmd->reg_value = cpu_to_le32(reg_val); + cmd->dev_addres = dev_addr; + cmd->reg_address = CPU_TO_LE32(reg_addr); + cmd->reg_value = CPU_TO_LE32(reg_val); + + if (!page_change) + cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; + + i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -5355,20 +6244,27 @@ i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, } /** - * i40e_aq_get_phy_register + * i40e_aq_get_phy_register_ext * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address + * @page_change: enable auto page change + * @set_mdio: use MDIO I/F number specified by mdio_num + * @mdio_num: MDIO I/F number * @reg_addr: PHY register address * @reg_val: read register value * @cmd_details: pointer to command details structure or NULL * * Read the external PHY register. + * NOTE: In common cases MDIO I/F number should not be changed, thats why you + * may use simple wrapper i40e_aq_get_phy_register. **/ -i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) +enum i40e_status_code +i40e_aq_get_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = @@ -5379,12 +6275,251 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, i40e_aqc_opc_get_phy_register); cmd->phy_interface = phy_select; - cmd->dev_address = dev_addr; - cmd->reg_address = cpu_to_le32(reg_addr); + cmd->dev_addres = dev_addr; + cmd->reg_address = CPU_TO_LE32(reg_addr); + + if (!page_change) + cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; + + i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) - *reg_val = le32_to_cpu(cmd->reg_value); + *reg_val = LE32_TO_CPU(cmd->reg_value); + + return status; +} + +/** + * i40e_aq_run_phy_activity + * @hw: pointer to the hw struct + * @activity_id: ID of DNL activity to run + * @dnl_opcode: opcode passed to DNL script + * @cmd_status: pointer to memory to write return value of DNL script + * @data0: pointer to memory for first 4 bytes of data returned by DNL script + * @data1: pointer to memory for last 4 bytes of data returned by DNL script + * @cmd_details: pointer to command details structure or NULL + * + * Run DNL admin command. + **/ +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 dnl_opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_run_phy_activity *cmd; + i40e_status retval; + struct i40e_aq_desc desc; + + cmd = (struct i40e_aqc_run_phy_activity *)&desc.params.raw; + + if (!cmd_status || !data0 || !data1) { + retval = I40E_ERR_PARAM; + goto err; + } + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_run_phy_activity); + + cmd->activity_id = CPU_TO_LE16(activity_id); + cmd->params.cmd.dnl_opcode = CPU_TO_LE32(dnl_opcode); + + retval = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (retval) + goto err; + + *cmd_status = LE32_TO_CPU(cmd->params.resp.cmd_status); + *data0 = LE32_TO_CPU(cmd->params.resp.data0); + *data1 = LE32_TO_CPU(cmd->params.resp.data1); +err: + return retval; +} + +/** + * i40e_aq_set_arp_proxy_config + * @hw: pointer to the HW structure + * @proxy_config: pointer to proxy config command table struct + * @cmd_details: pointer to command details + * + * Set ARP offload parameters from pre-populated + * i40e_aqc_arp_proxy_data struct + **/ +i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, + struct i40e_aqc_arp_proxy_data *proxy_config, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + if (!proxy_config) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + desc.params.external.addr_high = + CPU_TO_LE32(upper_32_bits((u64)proxy_config)); + desc.params.external.addr_low = + CPU_TO_LE32(lower_32_bits((u64)proxy_config)); + desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data)); + + status = i40e_asq_send_command(hw, &desc, proxy_config, + sizeof(struct i40e_aqc_arp_proxy_data), + cmd_details); + + return status; +} + +/** + * i40e_aq_opc_set_ns_proxy_table_entry + * @hw: pointer to the HW structure + * @ns_proxy_table_entry: pointer to NS table entry command struct + * @cmd_details: pointer to command details + * + * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters + * from pre-populated i40e_aqc_ns_proxy_data struct + **/ +i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, + struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + if (!ns_proxy_table_entry) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_ns_proxy_table_entry); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + desc.params.external.addr_high = + CPU_TO_LE32(upper_32_bits((u64)ns_proxy_table_entry)); + desc.params.external.addr_low = + CPU_TO_LE32(lower_32_bits((u64)ns_proxy_table_entry)); + desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data)); + + status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry, + sizeof(struct i40e_aqc_ns_proxy_data), + cmd_details); + + return status; +} + +/** + * i40e_aq_set_clear_wol_filter + * @hw: pointer to the hw struct + * @filter_index: index of filter to modify (0-7) + * @filter: buffer containing filter to be set + * @set_filter: true to set filter, false to clear filter + * @no_wol_tco: if true, pass through packets cannot cause wake-up + * if false, pass through packets may cause wake-up + * @filter_valid: true if filter action is valid + * @no_wol_tco_valid: true if no WoL in TCO traffic action valid + * @cmd_details: pointer to command details structure or NULL + * + * Set or clear WoL filter for port attached to the PF + **/ +i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, + u8 filter_index, + struct i40e_aqc_set_wol_filter_data *filter, + bool set_filter, bool no_wol_tco, + bool filter_valid, bool no_wol_tco_valid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_wol_filter *cmd = + (struct i40e_aqc_set_wol_filter *)&desc.params.raw; + i40e_status status; + u16 cmd_flags = 0; + u16 valid_flags = 0; + u16 buff_len = 0; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter); + + if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS) + return I40E_ERR_PARAM; + cmd->filter_index = CPU_TO_LE16(filter_index); + + if (set_filter) { + if (!filter) + return I40E_ERR_PARAM; + + cmd_flags |= I40E_AQC_SET_WOL_FILTER; + cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR; + } + + if (no_wol_tco) + cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL; + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + + if (filter_valid) + valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID; + if (no_wol_tco_valid) + valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID; + cmd->valid_flags = CPU_TO_LE16(valid_flags); + + buff_len = sizeof(*filter); + desc.datalen = CPU_TO_LE16(buff_len); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)filter)); + cmd->address_low = CPU_TO_LE32(lower_32_bits((u64)filter)); + + status = i40e_asq_send_command(hw, &desc, filter, + buff_len, cmd_details); + + return status; +} + +/** + * i40e_aq_get_wake_event_reason + * @hw: pointer to the hw struct + * @wake_reason: return value, index of matching filter + * @cmd_details: pointer to command details structure or NULL + * + * Get information for the reason of a Wake Up event + **/ +i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw, + u16 *wake_reason, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_wake_reason_completion *resp = + (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) + *wake_reason = LE16_TO_CPU(resp->wake_reason); + + return status; +} + +/** +* i40e_aq_clear_all_wol_filters +* @hw: pointer to the hw struct +* @cmd_details: pointer to command details structure or NULL +* +* Get information for the reason of a Wake Up event +**/ +i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_clear_all_wol_filters); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } @@ -5413,23 +6548,23 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_write_personalization_profile); + i40e_aqc_opc_write_personalization_profile); - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); + desc.datalen = CPU_TO_LE16(buff_size); - cmd->profile_track_id = cpu_to_le32(track_id); + cmd->profile_track_id = CPU_TO_LE32(track_id); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); + *error_offset = LE32_TO_CPU(resp->error_offset); if (error_info) - *error_info = le32_to_cpu(resp->error_info); + *error_info = LE32_TO_CPU(resp->error_info); } return status; @@ -5454,12 +6589,12 @@ i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_personalization_profile_list); + i40e_aqc_opc_get_personalization_profile_list); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); cmd->flags = flags; @@ -5560,22 +6695,23 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, u16 msglen; i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); - desc.flags |= cpu_to_le16(aq->flags); - memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); + desc.flags |= CPU_TO_LE16(aq->flags); + i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw), + I40E_NONDMA_TO_NONDMA); msglen = aq->datalen; if (msglen) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(msglen); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); msg = &aq->data[0]; } status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); - if (status) { + if (status != I40E_SUCCESS) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "unable to exec DDP AQ opcode %u, error %d\n", aq->opcode, status); @@ -5583,9 +6719,10 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, } /* copy returned desc to aq_buf */ - memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); + i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw), + I40E_NONDMA_TO_NONDMA); - return 0; + return I40E_SUCCESS; } /** @@ -5602,7 +6739,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id, bool rollback) { struct i40e_profile_section_header *sec = NULL; - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; struct i40e_section_table *sec_tbl; u32 vendor_dev_id; u32 dev_cnt; @@ -5621,7 +6758,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, hw->device_id == (vendor_dev_id & 0xFFFF)) break; } - if (dev_cnt && i == dev_cnt) { + if (dev_cnt && (i == dev_cnt)) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP\n"); return I40E_ERR_DEVICE_NOT_SUPPORTED; @@ -5666,7 +6803,7 @@ enum i40e_status_code i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; struct i40e_section_table *sec_tbl; struct i40e_profile_section_header *sec = NULL; struct i40e_profile_aq_section *ddp_aq; @@ -5730,7 +6867,7 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id) { struct i40e_profile_section_header *sec = NULL; - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; struct i40e_section_table *sec_tbl; u32 offset = 0, info = 0; u32 section_size = 0; @@ -5782,7 +6919,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; struct i40e_profile_section_header *sec = NULL; struct i40e_profile_info *pinfo; u32 offset = 0, info = 0; @@ -5799,198 +6936,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, pinfo->track_id = track_id; pinfo->version = profile->version; pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); + i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE, + I40E_NONDMA_TO_NONDMA); status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); - - return status; -} - -/** - * i40e_aq_add_cloud_filters - * @hw: pointer to the hardware structure - * @seid: VSI seid to add cloud filters from - * @filters: Buffer which contains the filters to be added - * @filter_count: number of filters contained in the buffer - * - * Set the cloud filters for a given VSI. The contents of the - * i40e_aqc_cloud_filters_element_data are filled in by the caller - * of the function. - * - **/ -enum i40e_status_code -i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_cloud_filters_element_data *filters, - u8 filter_count) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_remove_cloud_filters *cmd = - (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - enum i40e_status_code status; - u16 buff_len; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_add_cloud_filters); - - buff_len = filter_count * sizeof(*filters); - desc.datalen = cpu_to_le16(buff_len); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - cmd->num_filters = filter_count; - cmd->seid = cpu_to_le16(seid); - - status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); - - return status; -} - -/** - * i40e_aq_add_cloud_filters_bb - * @hw: pointer to the hardware structure - * @seid: VSI seid to add cloud filters from - * @filters: Buffer which contains the filters in big buffer to be added - * @filter_count: number of filters contained in the buffer - * - * Set the big buffer cloud filters for a given VSI. The contents of the - * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the - * function. - * - **/ -enum i40e_status_code -i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_cloud_filters_element_bb *filters, - u8 filter_count) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_remove_cloud_filters *cmd = - (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - i40e_status status; - u16 buff_len; - int i; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_add_cloud_filters); - - buff_len = filter_count * sizeof(*filters); - desc.datalen = cpu_to_le16(buff_len); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - cmd->num_filters = filter_count; - cmd->seid = cpu_to_le16(seid); - cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; - - for (i = 0; i < filter_count; i++) { - u16 tnl_type; - u32 ti; - - tnl_type = (le16_to_cpu(filters[i].element.flags) & - I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> - I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; - - /* Due to hardware eccentricities, the VNI for Geneve is shifted - * one more byte further than normally used for Tenant ID in - * other tunnel types. - */ - if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { - ti = le32_to_cpu(filters[i].element.tenant_id); - filters[i].element.tenant_id = cpu_to_le32(ti << 8); - } - } - - status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); - - return status; -} - -/** - * i40e_aq_rem_cloud_filters - * @hw: pointer to the hardware structure - * @seid: VSI seid to remove cloud filters from - * @filters: Buffer which contains the filters to be removed - * @filter_count: number of filters contained in the buffer - * - * Remove the cloud filters for a given VSI. The contents of the - * i40e_aqc_cloud_filters_element_data are filled in by the caller - * of the function. - * - **/ -enum i40e_status_code -i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_cloud_filters_element_data *filters, - u8 filter_count) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_remove_cloud_filters *cmd = - (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - enum i40e_status_code status; - u16 buff_len; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_remove_cloud_filters); - - buff_len = filter_count * sizeof(*filters); - desc.datalen = cpu_to_le16(buff_len); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - cmd->num_filters = filter_count; - cmd->seid = cpu_to_le16(seid); - - status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); - - return status; -} - -/** - * i40e_aq_rem_cloud_filters_bb - * @hw: pointer to the hardware structure - * @seid: VSI seid to remove cloud filters from - * @filters: Buffer which contains the filters in big buffer to be removed - * @filter_count: number of filters contained in the buffer - * - * Remove the big buffer cloud filters for a given VSI. The contents of the - * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the - * function. - * - **/ -enum i40e_status_code -i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_cloud_filters_element_bb *filters, - u8 filter_count) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_remove_cloud_filters *cmd = - (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - i40e_status status; - u16 buff_len; - int i; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_remove_cloud_filters); - - buff_len = filter_count * sizeof(*filters); - desc.datalen = cpu_to_le16(buff_len); - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - cmd->num_filters = filter_count; - cmd->seid = cpu_to_le16(seid); - cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; - - for (i = 0; i < filter_count; i++) { - u16 tnl_type; - u32 ti; - - tnl_type = (le16_to_cpu(filters[i].element.flags) & - I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> - I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; - - /* Due to hardware eccentricities, the VNI for Geneve is shifted - * one more byte further than normally used for Tenant ID in - * other tunnel types. - */ - if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { - ti = le32_to_cpu(filters[i].element.tenant_id); - filters[i].element.tenant_id = cpu_to_le32(ti << 8); - } - } - - status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); - return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 200a1cb3b536..0580a51fe26e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include "i40e_adminq.h" #include "i40e_prototype.h" @@ -23,7 +23,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); - return 0; + return I40E_SUCCESS; } /** @@ -364,7 +364,6 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, I40E_LLDP_TLV_LEN_SHIFT); dcbcfg->numapps = length / sizeof(*app); - if (!dcbcfg->numapps) return; if (dcbcfg->numapps > I40E_DCBX_MAX_APPS) @@ -500,7 +499,7 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, struct i40e_dcbx_config *dcbcfg) { - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; struct i40e_lldp_org_tlv *tlv; u16 type; u16 length; @@ -555,7 +554,7 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, u8 bridgetype, struct i40e_dcbx_config *dcbcfg) { - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; struct i40e_virt_mem mem; u8 *lldpmib; @@ -590,8 +589,8 @@ static void i40e_cee_to_dcb_v1_config( struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, struct i40e_dcbx_config *dcbcfg) { - u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); - u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status); + u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); u8 i, tc, err; /* CEE PG data to ETS config */ @@ -604,7 +603,7 @@ static void i40e_cee_to_dcb_v1_config( tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); - dcbcfg->etscfg.prioritytable[i * 2] = tc; + dcbcfg->etscfg.prioritytable[i*2] = tc; tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); @@ -671,8 +670,8 @@ static void i40e_cee_to_dcb_config( struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, struct i40e_dcbx_config *dcbcfg) { - u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); - u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status); + u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); u8 i, tc, err, sync, oper; /* CEE PG data to ETS config */ @@ -685,11 +684,11 @@ static void i40e_cee_to_dcb_config( tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); - dcbcfg->etscfg.prioritytable[i * 2] = tc; + dcbcfg->etscfg.prioritytable[i*2] = tc; tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); - dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc; + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) @@ -769,7 +768,7 @@ static void i40e_cee_to_dcb_config( **/ static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw) { - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; /* IEEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; @@ -785,7 +784,7 @@ static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw) &hw->remote_dcbx_config); /* Don't treat ENOENT as an error for Remote MIBs */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) - ret = 0; + ret = I40E_SUCCESS; out: return ret; @@ -799,7 +798,7 @@ out: **/ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) { - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; @@ -814,22 +813,22 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); - if (!ret) { + if (ret == I40E_SUCCESS) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; hw->local_dcbx_config.tlv_status = - le16_to_cpu(cee_v1_cfg.tlv_status); + LE16_TO_CPU(cee_v1_cfg.tlv_status); i40e_cee_to_dcb_v1_config(&cee_v1_cfg, &hw->local_dcbx_config); } } else { ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, sizeof(cee_cfg), NULL); - if (!ret) { + if (ret == I40E_SUCCESS) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; hw->local_dcbx_config.tlv_status = - le32_to_cpu(cee_cfg.tlv_status); + LE32_TO_CPU(cee_cfg.tlv_status); i40e_cee_to_dcb_config(&cee_cfg, &hw->local_dcbx_config); } @@ -839,7 +838,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) return i40e_get_ieee_dcb_config(hw); - if (ret) + if (ret != I40E_SUCCESS) goto out; /* Get CEE DCB Desired Config */ @@ -850,11 +849,11 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) /* Get Remote DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, - I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, - &hw->remote_dcbx_config); + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); /* Don't treat ENOENT as an error for Remote MIBs */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) - ret = 0; + ret = I40E_SUCCESS; out: return ret; @@ -869,7 +868,7 @@ out: **/ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) { - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; struct i40e_lldp_variables lldp_cfg; u8 adminstatus = 0; @@ -889,7 +888,9 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) ret = i40e_read_nvm_module_data(hw, I40E_SR_EMP_SR_SETTINGS_PTR, - offset, 1, + offset, + I40E_LLDP_CURRENT_STATUS_OFFSET, + I40E_LLDP_CURRENT_STATUS_SIZE, &lldp_cfg.adminstatus); } else { ret = i40e_read_lldp_cfg(hw, &lldp_cfg); @@ -930,6 +931,376 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) return ret; } +/** + * i40e_get_fw_lldp_status + * @hw: pointer to the hw struct + * @lldp_status: pointer to the status enum + * + * Get status of FW Link Layer Discovery Protocol (LLDP) Agent. + * Status of agent is reported via @lldp_status parameter. + **/ +enum i40e_status_code +i40e_get_fw_lldp_status(struct i40e_hw *hw, + enum i40e_get_fw_lldp_status_resp *lldp_status) +{ + i40e_status ret; + struct i40e_virt_mem mem; + u8 *lldpmib; + + if (!lldp_status) + return I40E_ERR_PARAM; + + /* Allocate buffer for the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib, + I40E_LLDPDU_SIZE, NULL, NULL, NULL); + + if (ret == I40E_SUCCESS) { + *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) { + /* MIB is not available yet but the agent is running */ + *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; + ret = I40E_SUCCESS; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED; + ret = I40E_SUCCESS; + } + + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format + * @tlv: Fill the ETS config data in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 priority0, priority1, maxtcwilling = 0; + struct i40e_dcb_ets_config *etscfg; + u16 offset = 0, typelength, i; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_ETS_TLV_LENGTH); + tlv->typelength = htons(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_ETS_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + if (etscfg->willing) + maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT); + maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK; + buf[offset] = maxtcwilling; + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority0 = etscfg->prioritytable[i * 2] & 0xF; + priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | + priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etscfg->tcbwtable[i]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etscfg->tsatable[i]; +} + +/** + * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format + * @tlv: Fill ETS Recommended TLV in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etsrec; + u16 offset = 0, typelength, i; + u8 priority0, priority1; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_ETS_TLV_LENGTH); + tlv->typelength = htons(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = htonl(ouisubtype); + + etsrec = &dcbcfg->etsrec; + /* First Octet is reserved */ + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority0 = etsrec->prioritytable[i * 2] & 0xF; + priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | + priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etsrec->tcbwtable[i]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etsrec->tsatable[i]; +} + + /** + * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store to get PFC CFG data + * + * Prepare IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelength; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_PFC_TLV_LENGTH); + tlv->typelength = htons(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + if (dcbcfg->pfc.willing) + buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT); + + if (dcbcfg->pfc.mbc) + buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT); + + buf[0] |= dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcenable; +} + +/** + * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format + * @tlv: Fill APP TLV in IEEE format + * @dcbcfg: Local store to get APP CFG data + * + * Prepare IEEE 802.1Qaz APP CFG TLV + **/ +static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength, length, offset = 0; + u8 priority, selector, i = 0; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + /* No APP TLVs then just return */ + if (dcbcfg->numapps == 0) + return; + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_APP_PRI); + tlv->ouisubtype = htonl(ouisubtype); + + /* Move offset to App Priority Table */ + offset++; + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (i < dcbcfg->numapps) { + priority = dcbcfg->app[i].priority & 0x7; + selector = dcbcfg->app[i].selector & 0x7; + buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector; + buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF; + buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + /* length includes size of ouisubtype + 1 reserved + 3*numapps */ + length = sizeof(tlv->ouisubtype) + 1 + (i*3); + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + (length & 0x1FF)); + tlv->typelength = htons(typelength); +} + + /** + * i40e_add_dcb_tlv - Add all IEEE TLVs + * @tlv: pointer to org tlv + * + * add tlv information + **/ +static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg, + u16 tlvid) +{ + switch (tlvid) { + case I40E_IEEE_TLV_ID_ETS_CFG: + i40e_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_ETS_REC: + i40e_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_PFC_CFG: + i40e_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_APP_PRI: + i40e_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + + /** + * i40e_set_dcb_config - Set the local LLDP MIB to FW + * @hw: pointer to the hw struct + * + * Set DCB configuration to the Firmware + **/ +i40e_status i40e_set_dcb_config(struct i40e_hw *hw) +{ + i40e_status ret = I40E_SUCCESS; + struct i40e_dcbx_config *dcbcfg; + struct i40e_virt_mem mem; + u8 mib_type, *lldpmib; + u16 miblen; + + /* update the hw local config */ + dcbcfg = &hw->local_dcbx_config; + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB; + if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) { + mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS << + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT; + } + lldpmib = (u8 *)mem.va; + ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg); + ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL); + + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format + * @lldpmib: pointer to mib to be output + * @miblen: pointer to u16 for length of lldpmib + * @dcbcfg: store for LLDPDU data + * + * send DCB configuration to FW + **/ +i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg) +{ + u16 length, offset = 0, tlvid = I40E_TLV_ID_START; + i40e_status ret = I40E_SUCCESS; + struct i40e_lldp_org_tlv *tlv; + u16 typelength; + + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++); + typelength = ntohs(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + if (length) + offset += length + 2; + /* END TLV or beyond LLDPDU size */ + if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) || + (offset > I40E_LLDPDU_SIZE)) + break; + /* Move to next TLV */ + if (length) + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + length); + } + *miblen = offset; + return ret; +} + /** * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM * @hw: pointer to the HW structure @@ -940,8 +1311,8 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) * Reads the LLDP configuration data from NVM using passed addresses **/ static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw, - struct i40e_lldp_variables *lldp_cfg, - u8 module, u32 word_offset) + struct i40e_lldp_variables *lldp_cfg, + u8 module, u32 word_offset) { u32 address, offset = (2 * word_offset); i40e_status ret; @@ -949,16 +1320,16 @@ static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw, u16 mem; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (ret) + if (ret != I40E_SUCCESS) return ret; ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem, true, NULL); i40e_release_nvm(hw); - if (ret) + if (ret != I40E_SUCCESS) return ret; - mem = le16_to_cpu(raw_mem); + mem = LE16_TO_CPU(raw_mem); /* Check if this pointer needs to be read in word size or 4K sector * units. */ @@ -968,21 +1339,21 @@ static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw, address = (0x7FFF & mem) * 2; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (ret) + if (ret != I40E_SUCCESS) goto err_lldp_cfg; ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem, true, NULL); i40e_release_nvm(hw); - if (ret) + if (ret != I40E_SUCCESS) return ret; - mem = le16_to_cpu(raw_mem); + mem = LE16_TO_CPU(raw_mem); offset = mem + word_offset; offset *= 2; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (ret) + if (ret != I40E_SUCCESS) goto err_lldp_cfg; ret = i40e_aq_read_nvm(hw, 0, address + offset, @@ -1002,22 +1373,22 @@ err_lldp_cfg: * Reads the LLDP configuration data from NVM **/ i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, - struct i40e_lldp_variables *lldp_cfg) + struct i40e_lldp_variables *lldp_cfg) { - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; u32 mem; if (!lldp_cfg) return I40E_ERR_PARAM; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (ret) + if (ret != I40E_SUCCESS) return ret; ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem), &mem, true, NULL); i40e_release_nvm(hw); - if (ret) + if (ret != I40E_SUCCESS) return ret; /* Read a bit that holds information whether we are running flat or diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 2a80c5daa376..3cdaeeb15419 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_DCB_H_ #define _I40E_DCB_H_ @@ -32,6 +32,9 @@ #define I40E_CEE_MAX_FEAT_TYPE 3 #define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B #define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31 +#define I40E_LLDP_CURRENT_STATUS_OFFSET 1 +#define I40E_LLDP_CURRENT_STATUS_SIZE 1 + /* Defines for LLDP TLV header */ #define I40E_LLDP_TLV_LEN_SHIFT 0 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) @@ -77,6 +80,20 @@ #define I40E_IEEE_APP_PRIO_SHIFT 5 #define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT) +/* TLV definitions for preparing MIB */ +#define I40E_TLV_ID_CHASSIS_ID 0 +#define I40E_TLV_ID_PORT_ID 1 +#define I40E_TLV_ID_TIME_TO_LIVE 2 +#define I40E_IEEE_TLV_ID_ETS_CFG 3 +#define I40E_IEEE_TLV_ID_ETS_REC 4 +#define I40E_IEEE_TLV_ID_PFC_CFG 5 +#define I40E_IEEE_TLV_ID_APP_PRI 6 +#define I40E_TLV_ID_END_OF_LLDPPDU 7 +#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG + +#define I40E_IEEE_ETS_TLV_LENGTH 25 +#define I40E_IEEE_PFC_TLV_LENGTH 6 +#define I40E_IEEE_APP_TLV_LENGTH 11 #pragma pack(1) @@ -118,6 +135,11 @@ struct i40e_cee_app_prio { }; #pragma pack() +enum i40e_get_fw_lldp_status_resp { + I40E_GET_FW_LLDP_STATUS_DISABLED = 0, + I40E_GET_FW_LLDP_STATUS_ENABLED = 1 +}; + i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status); i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, @@ -126,5 +148,12 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, u8 bridgetype, struct i40e_dcbx_config *dcbcfg); i40e_status i40e_get_dcb_config(struct i40e_hw *hw); -i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change); +i40e_status i40e_init_dcb(struct i40e_hw *hw, + bool enable_mib_change); +enum i40e_status_code +i40e_get_fw_lldp_status(struct i40e_hw *hw, + enum i40e_get_fw_lldp_status_resp *lldp_status); +i40e_status i40e_set_dcb_config(struct i40e_hw *hw); +i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg); #endif /* _I40E_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 9deae9a35423..1610197b4281 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ -#ifdef CONFIG_I40E_DCB +#ifdef CONFIG_DCB #include "i40e.h" #include +#ifdef HAVE_DCBNL_IEEE /** * i40e_get_pfc_delay - retrieve PFC Link Delay * @hw: pointer to hardware struct @@ -176,8 +177,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) } } +#ifdef HAVE_DCBNL_IEEE_DELAPP /* Notify user-space of the changes */ dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); +#endif } /** @@ -295,4 +298,5 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) /* Set initial IEEE DCB settings */ i40e_dcbnl_set_all(vsi); } -#endif /* CONFIG_I40E_DCB */ +#endif /* HAVE_DCBNL_IEEE */ +#endif /* CONFIG_DCB */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index 5e08f100c413..bda29f1a83d8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include "i40e.h" @@ -41,7 +41,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw, status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, NULL); - if (status) + if (status != I40E_SUCCESS) return -1; profile_list = (struct i40e_ddp_profile_list *)buff; @@ -87,7 +87,7 @@ static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new, * Returns <0 if error. **/ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw, - struct i40e_profile_info *pinfo) + struct i40e_profile_info *pinfo) { struct i40e_ddp_profile_list *profile_list; u8 buff[I40E_PROFILE_LIST_SIZE]; @@ -96,7 +96,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw, status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, NULL); - if (status) + if (status != I40E_SUCCESS) return -EIO; profile_list = (struct i40e_ddp_profile_list *)buff; @@ -108,6 +108,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw, return 0; } + /** * i40e_add_pinfo * @hw: pointer to the hardware structure @@ -141,7 +142,8 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, /* Clear reserved field */ memset(pinfo->reserved, 0, sizeof(pinfo->reserved)); - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); + i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE, + I40E_NONDMA_TO_NONDMA); status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); @@ -211,9 +213,8 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev, if (pkg_hdr->version.major > 0) { struct i40e_ddp_version ver = pkg_hdr->version; - netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u", - ver.major, ver.minor, ver.update, ver.draft); + ver.major, ver.minor, ver.update, ver.draft); return false; } if (size_huge > size) { @@ -235,15 +236,11 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev, u32 offset = pkg_hdr->segment_offset[segment]; if (0xFU & offset) { - netdev_err(netdev, - "Invalid DDP profile %u segment alignment", - segment); + netdev_err(netdev, "Invalid DDP profile %u segment alignment", segment); return false; } if (pkg_hdr_size > offset || offset >= size) { - netdev_err(netdev, - "Invalid DDP profile %u segment offset", - segment); + netdev_err(netdev, "Invalid DDP profile %u segment offset", segment); return false; } } @@ -345,9 +342,8 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, status = i40e_write_profile(&pf->hw, profile_hdr, track_id); if (status) { if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) { - netdev_err(netdev, - "Profile is not supported by the device."); - return -EPERM; + netdev_err(netdev, "Profile is not supported by the device."); + return -EPERM; } netdev_err(netdev, "Failed to write DDP profile."); return -EIO; @@ -405,6 +401,9 @@ static int i40e_ddp_restore(struct i40e_pf *pf) return status; } +#define I40E_DDP_PROFILE_NAME_MAX \ + (sizeof(I40E_DDP_PROFILE_PATH) + ETHTOOL_FLASH_MAX_FILENAME) + /** * i40e_ddp_flash - callback function for ethtool flash feature * @netdev: net device structure @@ -435,13 +434,11 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash) */ if (strncmp(flash->data, "-", 2) != 0) { struct i40e_ddp_old_profile_list *list_entry; - char profile_name[sizeof(I40E_DDP_PROFILE_PATH) - + I40E_DDP_PROFILE_NAME_MAX]; + char profile_name[I40E_DDP_PROFILE_NAME_MAX]; - profile_name[sizeof(profile_name) - 1] = 0; - strncpy(profile_name, I40E_DDP_PROFILE_PATH, - sizeof(profile_name) - 1); - strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX); + profile_name[sizeof(profile_name)-1]=0; + strncpy(profile_name, I40E_DDP_PROFILE_PATH, sizeof(profile_name)-1); + strncat(profile_name, flash->data, ETHTOOL_FLASH_MAX_FILENAME); /* Load DDP recipe. */ status = request_firmware(&ddp_config, profile_name, &netdev->dev); @@ -454,9 +451,9 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash) ddp_config->size, true); if (!status) { - list_entry = - kzalloc(sizeof(struct i40e_ddp_old_profile_list) + - ddp_config->size, GFP_KERNEL); + list_entry = kzalloc( + sizeof(struct i40e_ddp_old_profile_list) + + ddp_config->size, GFP_KERNEL); if (!list_entry) { netdev_info(netdev, "Failed to allocate memory for previous DDP profile data."); netdev_info(netdev, "New profile loaded but roll-back will be impossible."); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 99ea543dd245..928ef5b27403 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifdef CONFIG_DEBUG_FS @@ -65,7 +65,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; - int buf_size = 256; + size_t buf_size = 256; char *buf; int len; @@ -102,48 +102,15 @@ static char *i40e_filter_state_string[] = { }; /** - * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum + * i40e_dbg_dump_vsi_filters - handles dump of mac/vlan filters for a VSI * @pf: the i40e_pf created in command write - * @seid: the seid the user put in - **/ -static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) + * @vsi: the vsi to dump + */ +static void i40e_dbg_dump_vsi_filters(struct i40e_pf *pf, struct i40e_vsi *vsi) { - struct rtnl_link_stats64 *nstat; struct i40e_mac_filter *f; - struct i40e_vsi *vsi; - int i, bkt; + int bkt; - vsi = i40e_dbg_find_vsi(pf, seid); - if (!vsi) { - dev_info(&pf->pdev->dev, - "dump %d: seid not found\n", seid); - return; - } - dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); - if (vsi->netdev) { - struct net_device *nd = vsi->netdev; - - dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", - nd->name, nd->state, nd->flags); - dev_info(&pf->pdev->dev, " features = 0x%08lx\n", - (unsigned long int)nd->features); - dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", - (unsigned long int)nd->hw_features); - dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", - (unsigned long int)nd->vlan_features); - } - dev_info(&pf->pdev->dev, - " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", - vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); - for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) - dev_info(&pf->pdev->dev, - " state[%d] = %08lx\n", - i, vsi->state[i]); - if (vsi == pf->vsi[pf->lan_vsi]) - dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", - pf->hw.mac.addr, - pf->hw.mac.san_addr, - pf->hw.mac.port_addr); hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { dev_info(&pf->pdev->dev, " mac_filter_hash: %pM vid=%d, state %s\n", @@ -154,6 +121,91 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->active_filters, vsi->promisc_threshold, (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ? "ON" : "OFF")); +} + +/** + * i40e_dbg_dump_all_vsi_filters - dump mac/vlan filters for all VSI on a PF + * @pf: the i40e_pf created in command write + */ +static void i40e_dbg_dump_all_vsi_filters(struct i40e_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i]) { + dev_info(&pf->pdev->dev, "vsi seid %d\n", + pf->vsi[i]->seid); + i40e_dbg_dump_vsi_filters(pf, pf->vsi[i]); + } +} + +/** + * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum + * @pf: the i40e_pf created in command write + * @seid: the seid the user put in + **/ +static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) +{ +#ifdef HAVE_NDO_GET_STATS64 + struct rtnl_link_stats64 *nstat; +#else + struct net_device_stats *nstat; +#endif + struct i40e_vsi *vsi; + int i; + + vsi = i40e_dbg_find_vsi(pf, seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "dump %d: seid not found\n", seid); + return; + } + dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); + if (vsi->netdev) { + struct net_device *nd = vsi->netdev; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + u32 hw_features; +#endif + + dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", + nd->name, nd->state, nd->flags); + dev_info(&pf->pdev->dev, " features = 0x%08lx\n", + (unsigned long int)nd->features); +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = get_netdev_hw_features(vsi->netdev); + dev_info(&pf->pdev->dev, " hw_features = 0x%08x\n", + hw_features); +#else + dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", + (unsigned long int)nd->hw_features); +#endif +#endif +#ifdef HAVE_NETDEV_VLAN_FEATURES + dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", + (unsigned long int)nd->vlan_features); +#endif + } +#ifdef HAVE_VLAN_RX_REGISTER + dev_info(&pf->pdev->dev, " vlgrp is %s\n", + vsi->vlgrp ? "" : ""); +#else + dev_info(&pf->pdev->dev, " active_vlans is %s\n", + vsi->active_vlans ? "" : ""); +#endif /* HAVE_VLAN_RX_REGISTER */ + dev_info(&pf->pdev->dev, + " flags = 0x%016llx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", + vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); + for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) + dev_info(&pf->pdev->dev, + " state[%d] = %08lx\n", + i, vsi->state[i]); + if (vsi == pf->vsi[pf->lan_vsi]) + dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + pf->hw.mac.addr, + pf->hw.mac.san_addr, + pf->hw.mac.port_addr); + i40e_dbg_dump_vsi_filters(pf, vsi); nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", @@ -277,9 +329,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " rx_rings[%i]: size = %i\n", i, rx_ring->size); dev_info(&pf->pdev->dev, - " rx_rings[%i]: itr_setting = %d (%s)\n", + " rx_rings[%i]: rx_itr_setting = %d (%s)\n", i, rx_ring->itr_setting, - ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed"); + ITR_IS_DYNAMIC(rx_ring->itr_setting) ? + "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); @@ -315,9 +368,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " tx_rings[%i]: DCB tc = %d\n", i, tx_ring->dcb_tc); dev_info(&pf->pdev->dev, - " tx_rings[%i]: itr_setting = %d (%s)\n", + " tx_rings[%i]: tx_itr_setting = %d (%s)\n", i, tx_ring->itr_setting, - ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); + ITR_IS_DYNAMIC(tx_ring->itr_setting) ? + "dynamic" : "fixed"); } rcu_read_unlock(); dev_info(&pf->pdev->dev, @@ -482,6 +536,15 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) } } +/* Helper macros for printing upper half of the 32byte descriptor. */ +#ifdef I40E_32BYTE_RX +#define RXD_RSVD1(_rxd) ((_rxd)->read.rsvd1) +#define RXD_RSVD2(_rxd) ((_rxd)->read.rsvd2) +#else +#define RXD_RSVD1(_rxd) 0ULL +#define RXD_RSVD2(_rxd) 0ULL +#endif + /** * i40e_dbg_dump_desc - handles dump desc write into command datum * @cnt: number of arguments that the user supplied @@ -496,7 +559,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, { struct i40e_tx_desc *txd; union i40e_rx_desc *rxd; - struct i40e_ring *ring; + struct i40e_ring ring; struct i40e_vsi *vsi; int i; @@ -515,58 +578,59 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, vsi_seid); return; } - - ring = kmemdup(is_rx_ring - ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], - sizeof(*ring), GFP_KERNEL); - if (!ring) - return; - + if (is_rx_ring) + ring = *vsi->rx_rings[ring_id]; + else + ring = *vsi->tx_rings[ring_id]; if (cnt == 2) { + void *head = (struct i40e_tx_desc *)ring.desc + ring.count; + u32 tx_head = le32_to_cpu(*(volatile __le32 *)head); + dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); - for (i = 0; i < ring->count; i++) { + dev_info(&pf->pdev->dev, "head = %04x tail = %04x\n", + is_rx_ring ? 0 : tx_head, readl(ring.tail)); + dev_info(&pf->pdev->dev, "ntc = %04x ntu = %04x\n", + ring.next_to_clean, ring.next_to_use); + for (i = 0; i < ring.count; i++) { if (!is_rx_ring) { - txd = I40E_TX_DESC(ring, i); + txd = I40E_TX_DESC(&ring, i); dev_info(&pf->pdev->dev, " d[%03x] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else { - rxd = I40E_RX_DESC(ring, i); + rxd = I40E_RX_DESC(&ring, i); dev_info(&pf->pdev->dev, " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr, - rxd->read.rsvd1, rxd->read.rsvd2); + RXD_RSVD1(rxd), RXD_RSVD2(rxd)); } } } else if (cnt == 3) { - if (desc_n >= ring->count || desc_n < 0) { + if (desc_n >= ring.count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); - goto out; + return; } if (!is_rx_ring) { - txd = I40E_TX_DESC(ring, desc_n); + txd = I40E_TX_DESC(&ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else { - rxd = I40E_RX_DESC(ring, desc_n); + rxd = I40E_RX_DESC(&ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr, - rxd->read.rsvd1, rxd->read.rsvd2); + RXD_RSVD1(rxd), RXD_RSVD2(rxd)); } } else { dev_info(&pf->pdev->dev, "dump desc rx/tx []\n"); } - -out: - kfree(ring); } /** @@ -584,7 +648,161 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) } /** - * i40e_dbg_dump_stats - handles dump stats write into command datum + * i40e_dbg_dump_resources - handles dump resources request + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_resources(struct i40e_pf *pf) +{ + struct i40e_aqc_switch_resource_alloc_element_resp *buf; + int buf_len; + u16 count = 32; + u8 num_entries; + int ret, i; + + buf_len = count * sizeof(*buf); + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) { + dev_err(&pf->pdev->dev, "Can't get memory\n"); + return; + } + + ret = i40e_aq_get_switch_resource_alloc(&pf->hw, &num_entries, + buf, count, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "fail to get resources, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + kfree(buf); + return; + } + + dev_info(&pf->pdev->dev, " resources:\n"); + dev_info(&pf->pdev->dev, " guar total used unalloc name\n"); + for (i = 0; i < num_entries; i++) { + char *p; + + switch (buf[i].resource_type) { + case I40E_AQ_RESOURCE_TYPE_VEB: + p = "vebs"; + break; + case I40E_AQ_RESOURCE_TYPE_VSI: + p = "vsis"; + break; + case I40E_AQ_RESOURCE_TYPE_MACADDR: + p = "macaddrs"; + break; + case I40E_AQ_RESOURCE_TYPE_STAG: + p = "stags"; + break; + case I40E_AQ_RESOURCE_TYPE_ETAG: + p = "etags"; + break; + case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: + p = "multicast hash"; + break; + case I40E_AQ_RESOURCE_TYPE_UNICAST_HASH: + p = "unicast hash"; + break; + case I40E_AQ_RESOURCE_TYPE_VLAN: + p = "vlans"; + break; + case I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY: + p = "vsi list entries"; + break; + case I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY: + p = "etag list entries"; + break; + case I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL: + p = "vlan stat pools"; + break; + case I40E_AQ_RESOURCE_TYPE_MIRROR_RULE: + p = "mirror rules"; + break; + case I40E_AQ_RESOURCE_TYPE_QUEUE_SETS: + p = "queue sets"; + break; + case I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS: + p = "vlan filters"; + break; + case I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS: + p = "inner mac filters"; + break; + case I40E_AQ_RESOURCE_TYPE_IP_FILTERS: + p = "ip filters"; + break; + case I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS: + p = "gre vn keys"; + break; + case I40E_AQ_RESOURCE_TYPE_VN2_KEYS: + p = "vn2 keys"; + break; + case I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS: + p = "tunnel ports"; + break; + default: + p = "unknown"; + break; + } + + dev_info(&pf->pdev->dev, " %4d %4d %4d %4d %s\n", + buf[i].guaranteed, buf[i].total, buf[i].used, + buf[i].total_unalloced, p); + } + + kfree(buf); +} + +/** + * i40e_dbg_dump_capabilities - handles dump capabilities request + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_capabilities(struct i40e_pf *pf) +{ + struct i40e_hw_capabilities *p; + + p = (struct i40e_hw_capabilities *)&pf->hw.func_caps; + dev_info(&pf->pdev->dev, " capabilities:\n"); + dev_info(&pf->pdev->dev, + " switch_mode = %d\tmgmt_mode = %d\tnpar = %d\tos2bmc = %d\n", + p->switch_mode, p->management_mode, p->npar_enable, p->os2bmc); + dev_info(&pf->pdev->dev, + " valid_functions = 0x%04x\tsr_iov_1_1 = %d\tnum_vfs = %d\tvf_base_id = %d\n", + p->valid_functions, p->sr_iov_1_1, p->num_vfs, p->vf_base_id); + dev_info(&pf->pdev->dev, " nvm_image_type = %d\n", p->nvm_image_type); + dev_info(&pf->pdev->dev, + " num_vsis = %d\tvmdq = %d\tflex10_enable = %d\tflex10_capable = %d\n", + p->num_vsis, p->vmdq, p->flex10_enable, p->flex10_capable); + dev_info(&pf->pdev->dev, + " evb_802_1_qbg = %d\tevb_802_1_qbh = %d\tmgmt_cem = %d\tieee_1588 = %d\n", + p->evb_802_1_qbg, p->evb_802_1_qbh, p->mgmt_cem, p->ieee_1588); + dev_info(&pf->pdev->dev, + " fcoe = %d\tiwarp = %d\tmdio_port_num = %d\tmdio_port_mode = %d\n", + p->fcoe, p->iwarp, p->mdio_port_num, p->mdio_port_mode); + dev_info(&pf->pdev->dev, + " dcb = %d\tenabled_tcmap = %d\tmaxtc = %d\tiscsi = %d\n", + p->dcb, p->enabled_tcmap, p->maxtc, p->iscsi); + dev_info(&pf->pdev->dev, + " fd = %d\tfd_filters_guaranteed = %d\tfd_filters_best_effort = %d\tnum_flow_director_filters = %d\n", + p->fd, p->fd_filters_guaranteed, p->fd_filters_best_effort, + p->num_flow_director_filters); + dev_info(&pf->pdev->dev, + " rss = %d\trss_table_size = %d\trss_table_entry_width = %d\n", + p->rss, p->rss_table_size, p->rss_table_entry_width); + dev_info(&pf->pdev->dev, + " led[0] = %d\tsdp[0] = %d\tled_pin_num = %d\tsdp_pin_num = %d\n", + p->led[0], p->sdp[0], p->led_pin_num, p->sdp_pin_num); + dev_info(&pf->pdev->dev, + " num_rx_qp = %d\tnum_tx_qp = %d\tbase_queue = %d\n", + p->num_rx_qp, p->num_tx_qp, p->base_queue); + dev_info(&pf->pdev->dev, + " num_msix_vectors = %d\tnum_msix_vectors_vf = %d\trx_buf_chain_len = %d\n", + p->num_msix_vectors, p->num_msix_vectors_vf, + p->rx_buf_chain_len); +} + +/** + * i40e_dbg_dump_eth_stats - handles dump stats write into command datum * @pf: the i40e_pf created in command write * @estats: the eth stats structure to be dumped **/ @@ -617,17 +835,35 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) { struct i40e_veb *veb; + int i; veb = i40e_dbg_find_veb(pf, seid); if (!veb) { dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); return; } +#ifdef HAVE_BRIDGE_ATTRIBS dev_info(&pf->pdev->dev, "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, veb->uplink_seid, veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); +#else + dev_info(&pf->pdev->dev, + "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", + veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, + veb->uplink_seid, + "VEB"); +#endif + dev_info(&pf->pdev->dev, + "veb bw: enabled_tc=0x%x bw_limit=%d bw_max_quanta=%d is_abs_credits=%d\n", + veb->enabled_tc, veb->bw_limit, veb->bw_max_quanta, + veb->is_abs_credits); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "veb bw: tc=%d bw_share=%d bw_limit=%d max_quanta=%d\n", + i, veb->bw_tc_share_credits[i], + veb->bw_tc_limit_credits[i], veb->bw_tc_max_quanta[i]); + } i40e_dbg_dump_eth_stats(pf, &veb->stats); } @@ -688,6 +924,100 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) i40e_dbg_dump_vf(pf, i); } +/** + * i40e_dbg_dump_dcb_cfg - Dump DCB config data struct + * @pf: the corresponding PF + * @cfg: DCB Config data structure + * @prefix: Prefix string + **/ +static void i40e_dbg_dump_dcb_cfg(struct i40e_pf *pf, + struct i40e_dcbx_config *cfg, + char *prefix) +{ + int i; + + dev_info(&pf->pdev->dev, + "%s ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", + prefix, cfg->etscfg.willing, cfg->etscfg.cbs, + cfg->etscfg.maxtcs); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "%s ets_cfg: up=%d tc=%d\n", + prefix, i, cfg->etscfg.prioritytable[i]); + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "%s ets_cfg: tc=%d tcbw=%d tctsa=%d\n", + prefix, i, cfg->etscfg.tcbwtable[i], + cfg->etscfg.tsatable[i]); + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "%s ets_rec: up=%d tc=%d\n", + prefix, i, cfg->etsrec.prioritytable[i]); + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "%s ets_rec: tc=%d tcbw=%d tctsa=%d\n", + prefix, i, cfg->etsrec.tcbwtable[i], + cfg->etsrec.tsatable[i]); + } + dev_info(&pf->pdev->dev, + "%s pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", + prefix, cfg->pfc.willing, cfg->pfc.mbc, + cfg->pfc.pfccap, cfg->pfc.pfcenable); + + dev_info(&pf->pdev->dev, + "%s app_table: num_apps=%d\n", prefix, (int)cfg->numapps); + for (i = 0; i < (int)cfg->numapps; i++) { + dev_info(&pf->pdev->dev, "%s app_table: %d prio=%d selector=%d protocol=0x%x\n", + prefix, i, cfg->app[i].priority, + cfg->app[i].selector, + cfg->app[i].protocolid); + } +} + +/** + * i40e_dbg_dump_fdir_filter - Dump out flow director filter contents + * @pf: the corresponding PF + * @f: the flow director filter + **/ +static inline void i40e_dbg_dump_fdir_filter(struct i40e_pf *pf, + struct i40e_fdir_filter *f) +{ + dev_info(&pf->pdev->dev, "fdir filter %d:\n", f->fd_id); + dev_info(&pf->pdev->dev, " flow_type=%d ip4_proto=%d\n", + f->flow_type, f->ip4_proto); + dev_info(&pf->pdev->dev, " dst_ip= %pi4 dst_port=%d\n", + &f->dst_ip, f->dst_port); + dev_info(&pf->pdev->dev, " src_ip= %pi4 src_port=%d\n", + &f->src_ip, f->src_port); + dev_info(&pf->pdev->dev, " sctp_v_tag=%d q_index=%d\n", + f->sctp_v_tag, f->q_index); + if (f->flex_filter) + dev_info(&pf->pdev->dev, " flex_word=%04x flex_offset=%d\n", + f->flex_word, f->flex_offset); + dev_info(&pf->pdev->dev, " pctype=%d dest_vsi=%d dest_ctl=%d\n", + f->pctype, f->dest_vsi, f->dest_ctl); + dev_info(&pf->pdev->dev, " fd_status=%d cnt_index=%d\n", + f->fd_status, f->cnt_index); +} + +/** + * i40e_dbg_dump_cloud_filter - Dump out cloud filter contents + * @pf: the corresponding PF + * @f: the flow director filter + **/ +static inline void i40e_dbg_dump_cloud_filter(struct i40e_pf *pf, + struct i40e_cloud_filter *f) +{ + dev_info(&pf->pdev->dev, "cloud filter %d:\n", f->id); + dev_info(&pf->pdev->dev, " outer_mac[]=%pM inner_mac=%pM\n", + f->outer_mac, f->inner_mac); + dev_info(&pf->pdev->dev, " inner_vlan %d, inner_ip[0] %pi4\n", + be16_to_cpu(f->inner_vlan), f->inner_ip); + dev_info(&pf->pdev->dev, " tenant_id=%d flags=0x%02x, tunnel_type=0x%02x\n", + f->tenant_id, f->flags, f->tunnel_type); + dev_info(&pf->pdev->dev, " seid=%d queue_id=%d\n", + f->seid, f->queue_id); +} + #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum @@ -729,7 +1059,48 @@ static ssize_t i40e_dbg_command_write(struct file *filp, count = cmd_buf_tmp - cmd_buf + 1; } - if (strncmp(cmd_buf, "add vsi", 7) == 0) { + if (strncmp(cmd_buf, "read", 4) == 0) { + u32 address; + u32 value; + + cnt = sscanf(&cmd_buf[4], "%i", &address); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + /* check the range on address */ + if (address > (pf->ioremap_len - sizeof(u32))) { + dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", + address, (pf->ioremap_len - sizeof(u32))); + goto command_write_done; + } + + value = rd32(&pf->hw, address); + dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", + address, value); + + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 address, value; + + cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); + if (cnt != 2) { + dev_info(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + + /* check the range on address */ + if (address > (pf->ioremap_len - sizeof(u32))) { + dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", + address, (pf->ioremap_len - sizeof(u32))); + goto command_write_done; + } + wr32(&pf->hw, address, value); + value = rd32(&pf->hw, address); + dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", + address, value); + + } else if (strncmp(cmd_buf, "add vsi", 7) == 0) { vsi_seid = -1; cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); if (cnt == 0) { @@ -818,6 +1189,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "del relay", 9) == 0) { int i; + cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, @@ -845,9 +1217,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { i40e_status ret; u16 vid; - unsigned int v; + int v; - cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); + cnt = sscanf(&cmd_buf[8], "%i %d", &vsi_seid, &v); if (cnt != 2) { dev_info(&pf->pdev->dev, "add pvid: bad command string, cnt=%d\n", cnt); @@ -861,7 +1233,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - vid = v; + vid = (unsigned)v; ret = i40e_vsi_add_pvid(vsi, vid); if (!ret) dev_info(&pf->pdev->dev, @@ -896,6 +1268,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "dump", 4) == 0) { if (strncmp(&cmd_buf[5], "switch", 6) == 0) { i40e_fetch_switch_configuration(pf, true); + } else if (strncmp(&cmd_buf[5], "resources", 9) == 0) { + i40e_dbg_dump_resources(pf); + } else if (strncmp(&cmd_buf[5], "capabilities", 7) == 0) { + i40e_dbg_dump_capabilities(pf); } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); if (cnt > 0) @@ -916,6 +1292,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, i40e_dbg_dump_vf_all(pf); } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { int ring_id, desc_n; + if (strncmp(&cmd_buf[10], "rx", 2) == 0) { cnt = sscanf(&cmd_buf[12], "%i %i %i", &vsi_seid, &ring_id, &desc_n); @@ -954,6 +1331,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, &pf->hw.local_dcbx_config; struct i40e_dcbx_config *r_cfg = &pf->hw.remote_dcbx_config; + struct i40e_dcbx_config *d_cfg = + &pf->hw.desired_dcbx_config; int i, ret; u16 switch_id; @@ -996,68 +1375,18 @@ static ssize_t i40e_dbg_command_write(struct file *filp, kfree(bw_data); bw_data = NULL; - dev_info(&pf->pdev->dev, - "port dcbx_mode=%d\n", cfg->dcbx_mode); - dev_info(&pf->pdev->dev, - "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", - cfg->etscfg.willing, cfg->etscfg.cbs, - cfg->etscfg.maxtcs); - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", - i, cfg->etscfg.prioritytable[i], - cfg->etscfg.tcbwtable[i], - cfg->etscfg.tsatable[i]); - } - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", - i, cfg->etsrec.prioritytable[i], - cfg->etsrec.tcbwtable[i], - cfg->etsrec.tsatable[i]); - } - dev_info(&pf->pdev->dev, - "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", - cfg->pfc.willing, cfg->pfc.mbc, - cfg->pfc.pfccap, cfg->pfc.pfcenable); - dev_info(&pf->pdev->dev, - "port app_table: num_apps=%d\n", cfg->numapps); - for (i = 0; i < cfg->numapps; i++) { - dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", - i, cfg->app[i].priority, - cfg->app[i].selector, - cfg->app[i].protocolid); - } - /* Peer TLV DCBX data */ - dev_info(&pf->pdev->dev, - "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", - r_cfg->etscfg.willing, - r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", - i, r_cfg->etscfg.prioritytable[i], - r_cfg->etscfg.tcbwtable[i], - r_cfg->etscfg.tsatable[i]); - } - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", - i, r_cfg->etsrec.prioritytable[i], - r_cfg->etsrec.tcbwtable[i], - r_cfg->etsrec.tsatable[i]); - } - dev_info(&pf->pdev->dev, - "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", - r_cfg->pfc.willing, - r_cfg->pfc.mbc, - r_cfg->pfc.pfccap, - r_cfg->pfc.pfcenable); - dev_info(&pf->pdev->dev, - "remote port app_table: num_apps=%d\n", - r_cfg->numapps); - for (i = 0; i < r_cfg->numapps; i++) { - dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", - i, r_cfg->app[i].priority, - r_cfg->app[i].selector, - r_cfg->app[i].protocolid); + if (cfg->dcbx_mode == I40E_DCBX_MODE_CEE) { + dev_info(&pf->pdev->dev, + "CEE DCBX mode with Oper TLV Status = 0x%x\n", + cfg->tlv_status); + i40e_dbg_dump_dcb_cfg(pf, d_cfg, "DesiredCfg"); + } else { + dev_info(&pf->pdev->dev, "IEEE DCBX mode\n"); } + + i40e_dbg_dump_dcb_cfg(pf, cfg, "OperCfg"); + i40e_dbg_dump_dcb_cfg(pf, r_cfg, "PeerCfg"); + } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { int cluster_id, table_id; int index, ret; @@ -1102,69 +1431,65 @@ static ssize_t i40e_dbg_command_write(struct file *filp, buff, rlen, true); kfree(buff); buff = NULL; + } else if (strncmp(&cmd_buf[5], "filters", 7) == 0) { + struct i40e_fdir_filter *f_rule; + struct i40e_cloud_filter *c_rule; + struct hlist_node *node2; + + hlist_for_each_entry_safe(f_rule, node2, + &pf->fdir_filter_list, + fdir_node) { + i40e_dbg_dump_fdir_filter(pf, f_rule); + } + + /* find the cloud filter rule ids */ + hlist_for_each_entry_safe(c_rule, node2, + &pf->cloud_filter_list, + cloud_node) { + i40e_dbg_dump_cloud_filter(pf, c_rule); + } + i40e_dbg_dump_all_vsi_filters(pf); } else { dev_info(&pf->pdev->dev, "dump desc tx [], dump desc rx [],\n"); dev_info(&pf->pdev->dev, "dump switch\n"); dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); + dev_info(&pf->pdev->dev, "dump capabilities\n"); + dev_info(&pf->pdev->dev, "dump resources\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); dev_info(&pf->pdev->dev, "dump port\n"); - dev_info(&pf->pdev->dev, "dump vf [vf_id]\n"); + dev_info(&pf->pdev->dev, "dump VF [vf_id]\n"); dev_info(&pf->pdev->dev, "dump debug fwdata \n"); + dev_info(&pf->pdev->dev, "dump filters\n"); } - } else if (strncmp(cmd_buf, "pfr", 3) == 0) { - dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); + + } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) { + u32 level; + + cnt = sscanf(&cmd_buf[10], "%i", &level); + if (cnt) { + if (I40E_DEBUG_USER & level) { + pf->hw.debug_mask = level; + dev_info(&pf->pdev->dev, + "set hw.debug_mask = 0x%08x\n", + pf->hw.debug_mask); + } + pf->msg_enable = level; + dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n", + pf->msg_enable); + } else { + dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n", + pf->msg_enable); + } + } else if (strncmp(cmd_buf, "defport on", 10) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport enabled\n"); + pf->cur_promisc = true; + i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); + } else if (strncmp(cmd_buf, "defport off", 11) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport disabled\n"); + pf->cur_promisc = false; i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); - - } else if (strncmp(cmd_buf, "corer", 5) == 0) { - dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); - i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); - - } else if (strncmp(cmd_buf, "globr", 5) == 0) { - dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); - i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); - - } else if (strncmp(cmd_buf, "read", 4) == 0) { - u32 address; - u32 value; - - cnt = sscanf(&cmd_buf[4], "%i", &address); - if (cnt != 1) { - dev_info(&pf->pdev->dev, "read \n"); - goto command_write_done; - } - - /* check the range on address */ - if (address > (pf->ioremap_len - sizeof(u32))) { - dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", - address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); - goto command_write_done; - } - - value = rd32(&pf->hw, address); - dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", - address, value); - - } else if (strncmp(cmd_buf, "write", 5) == 0) { - u32 address, value; - - cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); - if (cnt != 2) { - dev_info(&pf->pdev->dev, "write \n"); - goto command_write_done; - } - - /* check the range on address */ - if (address > (pf->ioremap_len - sizeof(u32))) { - dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", - address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); - goto command_write_done; - } - wr32(&pf->hw, address, value); - value = rd32(&pf->hw, address); - dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", - address, value); } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); @@ -1203,7 +1528,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, struct i40e_aq_desc *desc; i40e_status ret; - desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[11], @@ -1251,7 +1576,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, u16 buffer_len; u8 *buff; - desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[20], @@ -1282,7 +1607,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, desc = NULL; goto command_write_done; } - desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc->flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); ret = i40e_asq_send_command(&pf->hw, desc, buff, buffer_len, NULL); if (!ret) { @@ -1314,10 +1639,77 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_get_current_fd_count(pf)); + /* vf base mode on/off hooks needs to be used by validation only to + * make sure vf base mode driver is not broken + */ + } else if (strncmp(cmd_buf, "vf base mode on", 15) == 0) { + if (!pf->num_alloc_vfs) { + pf->vf_base_mode_only = true; + dev_info(&pf->pdev->dev, "VF Base mode is enabled\n"); + } else + dev_info(&pf->pdev->dev, + "cannot configure VF Base mode when VFs are allocated\n"); + } else if (strncmp(cmd_buf, "vf base mode off", 16) == 0) { + if (!pf->num_alloc_vfs) { + pf->vf_base_mode_only = false; + dev_info(&pf->pdev->dev, "VF Base mode is disabled\n"); + } else + dev_info(&pf->pdev->dev, + "cannot configure VF Base mode when VFs are allocated\n"); + } else if ((strncmp(cmd_buf, "add ethtype filter", 18) == 0) || + (strncmp(cmd_buf, "rem ethtype filter", 18) == 0)) { + u16 ethtype; + u16 queue; + bool add = false; + int ret; + + if (strncmp(cmd_buf, "add", 3) == 0) + add = true; + + cnt = sscanf(&cmd_buf[18], + "%hi %hi", + ðtype, &queue); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "%s ethtype filter: bad command string, cnt=%d\n", + add ? "add" : "rem", + cnt); + goto command_write_done; + } + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, + pf->hw.mac.addr, + ethtype, 0, + pf->vsi[pf->lan_vsi]->seid, + queue, add, NULL, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: add/rem Control Packet Filter AQ command failed =0x%x\n", + add ? "add" : "rem", + pf->hw.aq.asq_last_status); + goto command_write_done; + } + + } else if (strncmp(cmd_buf, "dcb off", 7) == 0) { + u8 tc = i40e_pf_get_num_tc(pf); + /* Allow disabling only when in single TC mode */ + if (tc > 1) { + dev_info(&pf->pdev->dev, "Failed to disable DCB as TC count(%d) is greater than 1.\n", + tc); + goto command_write_done; + } + pf->flags &= ~I40E_FLAG_DCB_ENABLED; + } else if (strncmp(cmd_buf, "dcb on", 6) == 0) { + pf->flags |= I40E_FLAG_DCB_ENABLED; } else if (strncmp(cmd_buf, "lldp", 4) == 0) { if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; + if (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) { + dev_info(&pf->pdev->dev, + "Use ethtool to disable LLDP firmware agent:"\ + "\"ethtool --set-priv-flags disable-fw-lldp on\".\n"); + goto command_write_done; + } ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL); if (ret) { dev_info(&pf->pdev->dev, @@ -1327,34 +1719,41 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, - ETH_P_LLDP, 0, + I40E_ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, true, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, - "%s: Add Control Packet Filter AQ command failed =0x%x\n", - __func__, pf->hw.aq.asq_last_status); + "%s: Add Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); goto command_write_done; } -#ifdef CONFIG_I40E_DCB +#ifdef CONFIG_DCB +#ifdef HAVE_DCBNL_IEEE pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; -#endif /* CONFIG_I40E_DCB */ +#endif /* HAVE_DCBNL_IEEE */ +#endif /* CONFIG_DCB */ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { int ret; + if (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) { + dev_info(&pf->pdev->dev, + "Use ethtool to enable LLDP firmware agent:"\ + "\"ethtool --set-priv-flags disable-fw-lldp off\".\n"); + goto command_write_done; + } ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, - ETH_P_LLDP, 0, + I40E_ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, false, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, - "%s: Remove Control Packet Filter AQ command failed =0x%x\n", - __func__, pf->hw.aq.asq_last_status); + "%s: Remove Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); /* Continue and start FW LLDP anyways */ } - ret = i40e_aq_start_lldp(&pf->hw, false, NULL); if (ret) { dev_info(&pf->pdev->dev, @@ -1362,10 +1761,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp, pf->hw.aq.asq_last_status); goto command_write_done; } -#ifdef CONFIG_I40E_DCB +#ifdef CONFIG_DCB +#ifdef HAVE_DCBNL_IEEE pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; -#endif /* CONFIG_I40E_DCB */ +#endif /* HAVE_DCBNL_IEEE */ +#endif /* CONFIG_DCB */ } else if (strncmp(&cmd_buf[5], "get local", 9) == 0) { u16 llen, rlen; @@ -1507,6 +1908,251 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } kfree(buff); buff = NULL; + } else if (strncmp(cmd_buf, "set rss_size", 12) == 0) { + int q_count; + + cnt = sscanf(&cmd_buf[12], "%i", &q_count); + if (cnt != 1) { + dev_info(&pf->pdev->dev, + "set rss_size: bad command string, cnt=%d\n", cnt); + goto command_write_done; + } + if (q_count <= 0) { + dev_info(&pf->pdev->dev, + "set rss_size: %d is too small\n", + q_count); + goto command_write_done; + } + dev_info(&pf->pdev->dev, + "set rss_size requesting %d queues\n", q_count); + rtnl_lock(); + i40e_reconfig_rss_queues(pf, q_count); + rtnl_unlock(); + dev_info(&pf->pdev->dev, "new rss_size %d\n", + pf->alloc_rss_size); + } else if (strncmp(cmd_buf, "get bw", 6) == 0) { + i40e_status status; + u32 max_bw, min_bw; + bool min_valid, max_valid; + + status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, + &min_valid, &max_valid); + + if (status) { + dev_info(&pf->pdev->dev, "get bw failed with status %d\n", + status); + goto command_write_done; + } + if (!min_valid) { + dev_info(&pf->pdev->dev, "min bw invalid\n"); + } else if (min_bw & I40E_ALT_BW_RELATIVE_MASK) { + dev_info(&pf->pdev->dev, "relative min bw = %d%%\n", + min_bw & I40E_ALT_BW_VALUE_MASK); + } else { + dev_info(&pf->pdev->dev, "absolute min bw = %dMb/s\n", + (min_bw & I40E_ALT_BW_VALUE_MASK)*128); + } + if (!max_valid) { + dev_info(&pf->pdev->dev, "max bw invalid\n"); + } else if (max_bw & I40E_ALT_BW_RELATIVE_MASK) { + dev_info(&pf->pdev->dev, "relative max bw = %d%%\n", + max_bw & I40E_ALT_BW_VALUE_MASK); + } else { + dev_info(&pf->pdev->dev, "absolute max bw = %dMb/s\n", + (max_bw & I40E_ALT_BW_VALUE_MASK)*128); + } + } else if (strncmp(cmd_buf, "set bw", 6) == 0) { + struct i40e_aqc_configure_partition_bw_data bw_data; + i40e_status status; + u32 max_bw, min_bw; + + /* Set the valid bit for this PF */ + bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); + + /* Get the bw's */ + cnt = sscanf(&cmd_buf[7], "%u %u", &max_bw, &min_bw); + if (cnt != 2) { + dev_info(&pf->pdev->dev,"set bw \n"); + goto command_write_done; + } + bw_data.max_bw[pf->hw.pf_id] = max_bw; + bw_data.min_bw[pf->hw.pf_id] = min_bw; + + /* Set the new bandwidths */ + status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); + if (status) { + dev_info(&pf->pdev->dev, "configure partition bw failed with status %d\n", + status); + goto command_write_done; + } + } else if (strncmp(cmd_buf, "commit bw", 9) == 0) { + /* Commit temporary BW setting to permanent NVM image */ + enum i40e_admin_queue_err last_aq_status; + i40e_status aq_status; + u16 nvm_word; + + if (pf->hw.partition_id != 1) { + dev_info(&pf->pdev->dev, + "Commit BW only works on first partition!\n"); + goto command_write_done; + } + + /* Acquire NVM for read access */ + aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + if (aq_status) { + dev_info(&pf->pdev->dev, + "Error %d: Cannot acquire NVM for Read Access\n", + aq_status); + goto command_write_done; + } + + /* Read word 0x10 of NVM - SW compatibility word 1 */ + aq_status = i40e_aq_read_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), &nvm_word, + false, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (aq_status) { + dev_info(&pf->pdev->dev, "NVM read error %d:%d\n", + aq_status, last_aq_status); + goto command_write_done; + } + + /* Wait a bit for NVM release to complete */ + msleep(100); + + /* Acquire NVM for write access */ + aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); + if (aq_status) { + dev_info(&pf->pdev->dev, + "Error %d: Cannot acquire NVM for Write Access\n", + aq_status); + goto command_write_done; + } + /* Write it back out unchanged to initiate update NVM, + * which will force a write of the shadow (alt) RAM to + * the NVM - thus storing the bandwidth values permanently. + */ + aq_status = i40e_aq_update_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), + &nvm_word, true, 0, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (aq_status) + dev_info(&pf->pdev->dev, + "BW settings NOT SAVED - error %d:%d updating NVM\n", + aq_status, last_aq_status); + } else if (strncmp(cmd_buf, "add switch ingress mirror", 25) == 0) { + u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; + u16 switch_seid, dst_vsi_seid, rule_id; + i40e_status aq_status; + + cnt = sscanf(&cmd_buf[25], "%hu %hu", + &switch_seid, &dst_vsi_seid); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "add mirror: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + aq_status = + i40e_aq_add_mirrorrule(&pf->hw, + switch_seid, rule_type, + dst_vsi_seid, 0, NULL, NULL, + &rule_id, NULL, NULL); + if (aq_status) + dev_info(&pf->pdev->dev, + "add ingress mirror failed with status %d\n", + aq_status); + else + dev_info(&pf->pdev->dev, + "Ingress mirror rule %d added\n", rule_id); + } else if (strncmp(cmd_buf, "add switch egress mirror", 24) == 0) { + u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; + u16 switch_seid, dst_vsi_seid, rule_id; + i40e_status aq_status; + + cnt = sscanf(&cmd_buf[24], "%hu %hu", + &switch_seid, &dst_vsi_seid); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "add mirror: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + aq_status = + i40e_aq_add_mirrorrule(&pf->hw, + switch_seid, rule_type, + dst_vsi_seid, 0, NULL, NULL, + &rule_id, NULL, NULL); + if (aq_status) + dev_info(&pf->pdev->dev, + "add egress mirror failed with status %d\n", + aq_status); + else + dev_info(&pf->pdev->dev, + "Egress mirror rule %d added\n", rule_id); + } else if (strncmp(cmd_buf, "del switch ingress mirror", 25) == 0) { + u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; + i40e_status aq_status; + u16 switch_seid, rule_id; + + cnt = sscanf(&cmd_buf[25], "%hu %hu", + &switch_seid, &rule_id); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "del mirror: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + aq_status = + i40e_aq_delete_mirrorrule(&pf->hw, switch_seid, + rule_type, rule_id, 0, NULL, + NULL, NULL, NULL); + if (aq_status) + dev_info(&pf->pdev->dev, + "mirror rule remove failed with status %d\n", + aq_status); + else + dev_info(&pf->pdev->dev, + "Mirror rule %d removed\n", rule_id); + } else if (strncmp(cmd_buf, "del switch egress mirror", 24) == 0) { + u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; + i40e_status aq_status; + u16 switch_seid, rule_id; + + cnt = sscanf(&cmd_buf[24], "%hu %hu", + &switch_seid, &rule_id); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "del mirror: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + aq_status = + i40e_aq_delete_mirrorrule(&pf->hw, switch_seid, + rule_type, rule_id, 0, NULL, + NULL, NULL, NULL); + if (aq_status) + dev_info(&pf->pdev->dev, + "mirror rule remove failed with status %d\n", + aq_status); + else + dev_info(&pf->pdev->dev, + "Mirror rule %d removed\n", rule_id); + } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); dev_info(&pf->pdev->dev, "available commands\n"); @@ -1518,21 +2164,27 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " del pvid \n"); dev_info(&pf->pdev->dev, " dump switch\n"); dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); + dev_info(&pf->pdev->dev, " dump capabilities\n"); + dev_info(&pf->pdev->dev, " dump resources\n"); dev_info(&pf->pdev->dev, " dump desc tx []\n"); dev_info(&pf->pdev->dev, " dump desc rx []\n"); dev_info(&pf->pdev->dev, " dump desc aq\n"); dev_info(&pf->pdev->dev, " dump reset stats\n"); dev_info(&pf->pdev->dev, " dump debug fwdata \n"); + dev_info(&pf->pdev->dev, " msg_enable [level]\n"); dev_info(&pf->pdev->dev, " read \n"); dev_info(&pf->pdev->dev, " write \n"); dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); dev_info(&pf->pdev->dev, " clear_stats port\n"); - dev_info(&pf->pdev->dev, " pfr\n"); - dev_info(&pf->pdev->dev, " corer\n"); - dev_info(&pf->pdev->dev, " globr\n"); + dev_info(&pf->pdev->dev, " defport on\n"); + dev_info(&pf->pdev->dev, " defport off\n"); dev_info(&pf->pdev->dev, " send aq_cmd \n"); dev_info(&pf->pdev->dev, " send indirect aq_cmd \n"); dev_info(&pf->pdev->dev, " fd current cnt"); + dev_info(&pf->pdev->dev, " vf base mode on\n"); + dev_info(&pf->pdev->dev, " vf base mode off\n"); + dev_info(&pf->pdev->dev, " add ethtype filter "); + dev_info(&pf->pdev->dev, " rem ethtype filter "); dev_info(&pf->pdev->dev, " lldp start\n"); dev_info(&pf->pdev->dev, " lldp stop\n"); dev_info(&pf->pdev->dev, " lldp get local\n"); @@ -1540,6 +2192,16 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " lldp event on\n"); dev_info(&pf->pdev->dev, " lldp event off\n"); dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); + dev_info(&pf->pdev->dev, " set rss_size \n"); + dev_info(&pf->pdev->dev, " dcb off\n"); + dev_info(&pf->pdev->dev, " dcb on\n"); + dev_info(&pf->pdev->dev, " get bw\n"); + dev_info(&pf->pdev->dev, " set bw \n"); + dev_info(&pf->pdev->dev, " commit bw\n"); + dev_info(&pf->pdev->dev, " add switch ingress mirror \n"); + dev_info(&pf->pdev->dev, " add switch egress mirror \n"); + dev_info(&pf->pdev->dev, " del switch ingress mirror \n"); + dev_info(&pf->pdev->dev, " del switch egress mirror \n"); } command_write_done: @@ -1574,7 +2236,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; - int buf_size = 256; + size_t buf_size = 256; char *buf; int len; @@ -1656,8 +2318,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", vsi_seid); } else if (rtnl_trylock()) { +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + vsi->netdev->netdev_ops->extended.ndo_change_mtu( + vsi->netdev, mtu); +#else vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, mtu); +#endif rtnl_unlock(); dev_info(&pf->pdev->dev, "change_mtu called\n"); } else { @@ -1703,6 +2370,25 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, napi_schedule(&vsi->q_vectors[i]->napi); dev_info(&pf->pdev->dev, "napi called\n"); } + } else if (strncmp(i40e_dbg_netdev_ops_buf, + "toggle_tx_timeout", 17) == 0) { + cnt = sscanf(&i40e_dbg_netdev_ops_buf[17], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "toggle_tx_timeout \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "toggle_tx_timeout: VSI %d not found\n", + vsi_seid); + } else { + if (vsi->block_tx_timeout) + vsi->block_tx_timeout = false; + else + vsi->block_tx_timeout = true; + dev_info(&pf->pdev->dev, "toggle_tx_timeout: block_tx_timeout = %d\n", + vsi->block_tx_timeout); + } } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", i40e_dbg_netdev_ops_buf); @@ -1710,6 +2396,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, dev_info(&pf->pdev->dev, " change_mtu \n"); dev_info(&pf->pdev->dev, " set_rx_mode \n"); dev_info(&pf->pdev->dev, " napi \n"); + dev_info(&pf->pdev->dev, " toggle_tx_timeout \n"); } netdev_ops_write_done: return count; @@ -1728,15 +2415,29 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { + struct dentry *pfile; const char *name = pci_name(pf->pdev); + const struct device *dev = &pf->pdev->dev; pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); + if (!pf->i40e_dbg_pf) + return; - debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_command_fops); + pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); + if (!pfile) + goto create_failed; - debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_netdev_ops_fops); + pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->i40e_dbg_pf); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index bac4da031f9b..55a7c1079dd9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ -#ifndef _I40E_DEVIDS_H_ #define _I40E_DEVIDS_H_ /* Device IDs */ @@ -23,6 +22,10 @@ #define I40E_DEV_ID_10G_BASE_T_BC 0x15FF #define I40E_DEV_ID_10G_B 0x104F #define I40E_DEV_ID_10G_SFP 0x104E +#define I40E_DEV_ID_5G_BASE_T_BC 0x101F +#define I40E_IS_X710TL_DEVICE(d) \ + (((d) == I40E_DEV_ID_10G_BASE_T_BC) || \ + ((d) == I40E_DEV_ID_5G_BASE_T_BC)) #define I40E_DEV_ID_KX_X722 0x37CE #define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 @@ -34,4 +37,6 @@ (d) == I40E_DEV_ID_QSFP_B || \ (d) == I40E_DEV_ID_QSFP_C) -#endif /* _I40E_DEVIDS_H_ */ +#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \ + (d) == I40E_DEV_ID_25G_SFP28) + diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index ef4d3762bf37..dc2e9d6b4e25 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include "i40e_diag.h" #include "i40e_prototype.h" @@ -13,9 +13,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, u32 reg, u32 mask) { - static const u32 patterns[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF - }; + const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; u32 pat, val, orig_val; int i; @@ -25,9 +23,11 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, wr32(hw, reg, (pat & mask)); val = rd32(hw, reg); if ((val & mask) != (pat & mask)) { +#ifdef ETHTOOL_TEST i40e_debug(hw, I40E_DEBUG_DIAG, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n", __func__, reg, pat, val); +#endif return I40E_ERR_DIAG_TEST_FAILED; } } @@ -35,35 +35,29 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, wr32(hw, reg, orig_val); val = rd32(hw, reg); if (val != orig_val) { +#ifdef ETHTOOL_TEST i40e_debug(hw, I40E_DEBUG_DIAG, "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n", __func__, reg, orig_val, val); +#endif return I40E_ERR_DIAG_TEST_FAILED; } - return 0; + return I40E_SUCCESS; } struct i40e_diag_reg_test_info i40e_reg_list[] = { /* offset mask elements stride */ - {I40E_QTX_CTL(0), 0x0000FFBF, 1, - I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, - {I40E_PFINT_ITR0(0), 0x00000FFF, 3, - I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, - {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, - I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, - {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, - I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, - {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, - I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, + {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, + {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, + {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, + {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, - {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, - I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, - {I40E_QINT_TQCTL(0), 0x000000FF, 1, - I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, - {I40E_QINT_RQCTL(0), 0x000000FF, 1, - I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, + {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, + {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, + {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, { 0 } }; @@ -76,12 +70,12 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = { **/ i40e_status i40e_diag_reg_test(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u32 reg, mask; u32 i, j; for (i = 0; i40e_reg_list[i].offset != 0 && - !ret_code; i++) { + ret_code == I40E_SUCCESS; i++) { /* set actual reg range for dynamically allocated resources */ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && @@ -98,9 +92,10 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw) /* test register access */ mask = i40e_reg_list[i].mask; - for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) { - reg = i40e_reg_list[i].offset + - (j * i40e_reg_list[i].stride); + for (j = 0; j < i40e_reg_list[i].elements && + ret_code == I40E_SUCCESS; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); } } @@ -121,7 +116,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) /* read NVM control word and if NVM valid, validate EEPROM checksum*/ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); - if (!ret_code && + if ((ret_code == I40E_SUCCESS) && ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) return i40e_validate_nvm_checksum(hw, NULL); diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index c3340f320a18..697015b4f154 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_DIAG_H_ #define _I40E_DIAG_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 41e1240acaea..7f93f74d41a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,230 +1,19 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ /* ethtool support for i40e */ #include "i40e.h" #include "i40e_diag.h" -#include "i40e_txrx_common.h" -/* ethtool statistics helpers */ +#ifdef SIOCETHTOOL +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 -/** - * struct i40e_stats - definition for an ethtool statistic - * @stat_string: statistic name to display in ethtool -S output - * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) - * @stat_offset: offsetof() the stat from a base pointer - * - * This structure defines a statistic to be added to the ethtool stats buffer. - * It defines a statistic as offset from a common base pointer. Stats should - * be defined in constant arrays using the I40E_STAT macro, with every element - * of the array using the same _type for calculating the sizeof_stat and - * stat_offset. - * - * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or - * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from - * the i40e_add_ethtool_stat() helper function. - * - * The @stat_string is interpreted as a format string, allowing formatted - * values to be inserted while looping over multiple structures for a given - * statistics array. Thus, every statistic string in an array should have the - * same type and number of format specifiers, to be formatted by variadic - * arguments to the i40e_add_stat_string() helper function. - **/ -struct i40e_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; +#endif +#ifdef ETHTOOL_GSTATS -/* Helper macro to define an i40e_stat structure with proper size and type. - * Use this when defining constant statistics arrays. Note that @_type expects - * only a type name and is used multiple times. - */ -#define I40E_STAT(_type, _name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ - .stat_offset = offsetof(_type, _stat) \ -} - -/* Helper macro for defining some statistics directly copied from the netdev - * stats structure. - */ -#define I40E_NETDEV_STAT(_net_stat) \ - I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) - -/* Helper macro for defining some statistics related to queues */ -#define I40E_QUEUE_STAT(_name, _stat) \ - I40E_STAT(struct i40e_ring, _name, _stat) - -/* Stats associated with a Tx or Rx ring */ -static const struct i40e_stats i40e_gstrings_queue_stats[] = { - I40E_QUEUE_STAT("%s-%u.packets", stats.packets), - I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), -}; - -/** - * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer - * @data: location to store the stat value - * @pointer: basis for where to copy from - * @stat: the stat definition - * - * Copies the stat data defined by the pointer and stat structure pair into - * the memory supplied as data. Used to implement i40e_add_ethtool_stats and - * i40e_add_queue_stats. If the pointer is null, data will be zero'd. - */ -static void -i40e_add_one_ethtool_stat(u64 *data, void *pointer, - const struct i40e_stats *stat) -{ - char *p; - - if (!pointer) { - /* ensure that the ethtool data buffer is zero'd for any stats - * which don't have a valid pointer. - */ - *data = 0; - return; - } - - p = (char *)pointer + stat->stat_offset; - switch (stat->sizeof_stat) { - case sizeof(u64): - *data = *((u64 *)p); - break; - case sizeof(u32): - *data = *((u32 *)p); - break; - case sizeof(u16): - *data = *((u16 *)p); - break; - case sizeof(u8): - *data = *((u8 *)p); - break; - default: - WARN_ONCE(1, "unexpected stat size for %s", - stat->stat_string); - *data = 0; - } -} - -/** - * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location to copy stats from - * @stats: array of stats to copy - * @size: the size of the stats definition - * - * Copy the stats defined by the stats array using the pointer as a base into - * the data buffer supplied by ethtool. Updates the data pointer to point to - * the next empty location for successive calls to __i40e_add_ethtool_stats. - * If pointer is null, set the data values to zero and update the pointer to - * skip these stats. - **/ -static void -__i40e_add_ethtool_stats(u64 **data, void *pointer, - const struct i40e_stats stats[], - const unsigned int size) -{ - unsigned int i; - - for (i = 0; i < size; i++) - i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); -} - -/** - * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location where stats are stored - * @stats: static const array of stat definitions - * - * Macro to ease the use of __i40e_add_ethtool_stats by taking a static - * constant stats array and passing the ARRAY_SIZE(). This avoids typos by - * ensuring that we pass the size associated with the given stats array. - * - * The parameter @stats is evaluated twice, so parameters with side effects - * should be avoided. - **/ -#define i40e_add_ethtool_stats(data, pointer, stats) \ - __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) - -/** - * i40e_add_queue_stats - copy queue statistics into supplied buffer - * @data: ethtool stats buffer - * @ring: the ring to copy - * - * Queue statistics must be copied while protected by - * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. - * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the - * ring pointer is null, zero out the queue stat values and update the data - * pointer. Otherwise safely copy the stats from the ring into the supplied - * buffer and update the data pointer when finished. - * - * This function expects to be called while under rcu_read_lock(). - **/ -static void -i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) -{ - const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); - const struct i40e_stats *stats = i40e_gstrings_queue_stats; - unsigned int start; - unsigned int i; - - /* To avoid invalid statistics values, ensure that we keep retrying - * the copy until we get a consistent value according to - * u64_stats_fetch_retry_irq. But first, make sure our ring is - * non-null before attempting to access its syncp. - */ - do { - start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); - for (i = 0; i < size; i++) { - i40e_add_one_ethtool_stat(&(*data)[i], ring, - &stats[i]); - } - } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); - - /* Once we successfully copy the stats in, update the data pointer */ - *data += size; -} - -/** - * __i40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * @size: size of the stats array - * - * Format and copy the strings described by stats into the buffer pointed at - * by p. - **/ -static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], - const unsigned int size, ...) -{ - unsigned int i; - - for (i = 0; i < size; i++) { - va_list args; - - va_start(args, size); - vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); - *p += ETH_GSTRING_LEN; - va_end(args); - } -} - -/** - * 40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * - * Format and copy the strings described by the const static stats value into - * the buffer pointed at by p. - * - * The parameter @stats is evaluated twice, so parameters with side effects - * should be avoided. Additionally, stats must be an array such that - * ARRAY_SIZE can be called on it. - **/ -#define i40e_add_stat_strings(p, stats, ...) \ - __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) +#include "i40e_ethtool_stats.h" #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) @@ -341,21 +130,41 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("port.rx_jabber", stats.rx_jabber), I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests), I40E_PF_STAT("port.arq_overflows", arq_overflows), +#ifdef HAVE_PTP_1588_CLOCK I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped), +#endif /* HAVE_PTP_1588_CLOCK */ I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status), I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match), I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status), +#ifdef I40E_ADD_PROBES + I40E_PF_STAT("port.tx_tcp_segments", tcp_segs), + I40E_PF_STAT("port.tx_tcp_cso", tx_tcp_cso), + I40E_PF_STAT("port.tx_udp_cso", tx_udp_cso), + I40E_PF_STAT("port.tx_sctp_cso", tx_sctp_cso), + I40E_PF_STAT("port.tx_ip4_cso", tx_ip4_cso), + I40E_PF_STAT("port.rx_tcp_cso", rx_tcp_cso), + I40E_PF_STAT("port.rx_udp_cso", rx_udp_cso), + I40E_PF_STAT("port.rx_sctp_cso", rx_sctp_cso), + I40E_PF_STAT("port.rx_ip4_cso", rx_ip4_cso), + I40E_PF_STAT("port.rx_csum_offload_outer", hw_csum_rx_outer), + I40E_PF_STAT("port.rx_tcp_cso_error", rx_tcp_cso_err), + I40E_PF_STAT("port.rx_udp_cso_error", rx_udp_cso_err), + I40E_PF_STAT("port.rx_sctp_cso_error", rx_sctp_cso_err), + I40E_PF_STAT("port.rx_ip4_cso_error", rx_ip4_cso_err), +#endif /* LPI stats */ I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status), I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status), I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count), I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count), + I40E_PF_STAT("port.tx_lpi_duration", stats.tx_lpi_duration), + I40E_PF_STAT("port.rx_lpi_duration", stats.rx_lpi_duration), }; struct i40e_pfc_stats { @@ -389,6 +198,7 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) +/* Length (number) of PF core stats only (i.e. without queues / extra stats): */ #define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ I40E_VEB_STATS_LEN + \ @@ -396,7 +206,53 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { /* Length of stats for a single queue */ #define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats) +#ifdef HAVE_XDP_SUPPORT +#define I40E_QUEUE_STATS_XDP_LEN ARRAY_SIZE(i40e_gstrings_rx_queue_xdp_stats) +#endif +#ifndef I40E_PF_EXTRA_STATS_OFF + +#define I40E_STATS_NAME_VFID_EXTRA "vf___." +#define I40E_STATS_NAME_VFID_EXTRA_LEN (sizeof(I40E_STATS_NAME_VFID_EXTRA) - 1) + +static struct i40e_stats i40e_gstrings_eth_stats_extra[] = { + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "rx_bytes", eth_stats.rx_bytes), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "rx_unicast", eth_stats.rx_unicast), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "rx_multicast", eth_stats.rx_multicast), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "rx_broadcast", eth_stats.rx_broadcast), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "rx_discards", eth_stats.rx_discards), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "rx_unknown_protocol", eth_stats.rx_unknown_protocol), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "tx_bytes", eth_stats.tx_bytes), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "tx_unicast", eth_stats.tx_unicast), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "tx_multicast", eth_stats.tx_multicast), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "tx_broadcast", eth_stats.tx_broadcast), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "tx_discards", eth_stats.tx_discards), + I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA + "tx_errors", eth_stats.tx_errors), +}; + +#define I40E_STATS_EXTRA_COUNT 128 /* as for now only I40E_MAX_VF_COUNT */ +/* Following length value does not include the length values for queues stats */ +#define I40E_STATS_EXTRA_LEN ARRAY_SIZE(i40e_gstrings_eth_stats_extra) +/* Length (number) of PF extra stats only (i.e. without core stats / queues): */ +#define I40E_PF_STATS_EXTRA_LEN (I40E_STATS_EXTRA_COUNT * I40E_STATS_EXTRA_LEN) +/* Length (number) of enhanced/all PF stats (i.e. core with extra stats): */ +#define I40E_PF_STATS_ENHANCE_LEN (I40E_PF_STATS_LEN + I40E_PF_STATS_EXTRA_LEN) + +#endif /* !I40E_PF_EXTRA_STATS_OFF */ +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST enum i40e_ethtool_test_id { I40E_ETH_TEST_REG = 0, I40E_ETH_TEST_EEPROM, @@ -413,6 +269,9 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT struct i40e_priv_flags { char flag_string[ETH_GSTRING_LEN]; u64 flag; @@ -428,13 +287,16 @@ struct i40e_priv_flags { static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { /* NOTE: MFP setting cannot be changed */ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1), + I40E_PRIV_FLAG("total-port-shutdown", I40E_FLAG_TOTAL_PORT_SHUTDOWN, 1), I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), I40E_PRIV_FLAG("link-down-on-close", I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0), +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), +#endif I40E_PRIV_FLAG("disable-source-pruning", I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0), @@ -452,6 +314,7 @@ static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = { #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags) +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ /** * i40e_partition_setting_complaint - generic complaint for MFP restriction * @pf: the PF struct @@ -508,6 +371,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); } +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) { ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseT_Full); @@ -515,6 +379,8 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 2500baseT_Full); } +#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */ +#ifdef HAVE_ETHTOOL_5G_BITS if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) { ethtool_link_ksettings_add_link_mode(ks, supported, 5000baseT_Full); @@ -522,6 +388,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 5000baseT_Full); } +#endif /* HAVE_ETHTOOL_5G_BITS */ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || phy_types & I40E_CAP_PHY_TYPE_XLPPI || phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) @@ -597,6 +464,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseKX_Full); } +#ifdef HAVE_ETHTOOL_25G_BITS /* need to add 25G PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) { ethtool_link_ksettings_add_link_mode(ks, supported, @@ -628,6 +496,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); } +#ifdef ETHTOOL_GFECPARAM if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || @@ -646,6 +515,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, FEC_BASER); } } +#endif /* ETHTOOL_GFECPARAM */ +#endif /* HAVE_ETHTOOL_25G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_10G_BITS /* need to add new 10G PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) { @@ -678,6 +550,28 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseX_Full); } +#else + /* need to keep backward compatibility with older kernels */ + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ /* Autoneg PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_SGMII || phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 || @@ -710,9 +604,10 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, } } +#ifdef ETHTOOL_GFECPARAM /** * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask - * @req_fec_info: mask request FEC info + * @req_fec_info: mask request fec info * @ks: ethtool ksettings to fill in **/ static void i40e_get_settings_link_up_fec(u8 req_fec_info, @@ -722,7 +617,14 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info, ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) { + if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) && + (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) { ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) { ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -730,17 +632,12 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info, } else { ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - if (I40E_AQ_SET_FEC_AUTO & req_fec_info) { - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); - } } } +#endif /* ETHTOOL_GFECPARAM */ /** - * i40e_get_settings_link_up - Get the Link settings for when link is up + * i40e_get_settings_link_up - Get Link settings for when link is up * @hw: hw structure * @ks: ethtool ksettings to fill in * @netdev: network interface device structure @@ -794,11 +691,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_1000BASE_LX: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); +#ifdef HAVE_ETHTOOL_25G_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); +#ifdef ETHTOOL_GFECPARAM i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); +#endif /* ETHTOOL_GFECPARAM */ +#endif /* HAVE_ETHTOOL_25G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_10G_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -811,6 +713,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 1000baseX_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseX_Full); +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); if (hw_link_info->module_type[2] & @@ -836,10 +739,14 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); +#ifdef HAVE_ETHTOOL_5G_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 5000baseT_Full); +#endif /* HAVE_ETHTOOL_5G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseT_Full); +#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */ ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); ethtool_link_ksettings_add_link_mode(ks, supported, @@ -848,12 +755,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); +#ifdef HAVE_ETHTOOL_5G_BITS if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 5000baseT_Full); +#endif /* HAVE_ETHTOOL_5G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 2500baseT_Full); +#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); @@ -913,8 +824,10 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_1000BASE_KX: ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseKR4_Full); +#ifdef HAVE_ETHTOOL_25G_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); +#endif /* HAVE_ETHTOOL_25G_BITS */ ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, supported, @@ -926,9 +839,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseKR4_Full); +#ifdef HAVE_ETHTOOL_25G_BITS ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); +#ifdef ETHTOOL_GFECPARAM i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); +#endif /* ETHTOOL_GFECPARAM */ +#endif /* HAVE_ETHTOOL_25G_BITS */ ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -942,27 +859,37 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_25GBASE_CR: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); +#ifdef HAVE_ETHTOOL_25G_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); +#ifdef ETHTOOL_GFECPARAM i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); +#endif /* ETHTOOL_GFECPARAM */ +#endif /* HAVE_ETHTOOL_25G_BITS */ break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); +#ifdef HAVE_ETHTOOL_25G_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); +#ifdef ETHTOOL_GFECPARAM i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); +#endif /* ETHTOOL_GFECPARAM */ +#endif /* HAVE_ETHTOOL_25G_BITS */ +#ifdef HAVE_ETHTOOL_NEW_10G_BITS ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseCR_Full); +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ break; default: /* if we got here and link is up something bad is afoot */ @@ -1013,7 +940,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, } /** - * i40e_get_settings_link_down - Get the Link settings for when link is down + * i40e_get_settings_link_down - Get the Link settings when link is down * @hw: hw structure * @ks: ethtool ksettings to fill in * @pf: pointer to physical function struct @@ -1028,7 +955,6 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, * supported phy types to figure out what info to display */ i40e_phy_type_to_ethtool(pf, ks); - /* With no link speed and duplex are unknown */ ks->base.speed = SPEED_UNKNOWN; ks->base.duplex = DUPLEX_UNKNOWN; @@ -1060,7 +986,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev, /* Now set the settings that don't rely on link being up/down */ /* Set autoneg settings */ - ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + ks->base.autoneg = (hw_link_info->an_info & I40E_AQ_AN_COMPLETED ? AUTONEG_ENABLE : AUTONEG_DISABLE); /* Set media type settings */ @@ -1117,10 +1043,10 @@ static int i40e_get_link_ksettings(struct net_device *netdev, Asym_Pause); break; } - return 0; } +#ifdef ETHTOOL_GLINKSETTINGS /** * i40e_set_link_ksettings - Set Speed and Duplex * @netdev: network interface device structure @@ -1184,7 +1110,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, i40e_get_link_ksettings(netdev, &safe_ks); /* Get link modes supported by hardware and check against modes - * requested by the user. Return an error if unsupported mode was set. + * requested by user. Return an error if unsupported mode was set. */ if (!bitmap_subset(copy_ks.link_modes.advertising, safe_ks.link_modes.supported, @@ -1227,9 +1153,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { /* If autoneg is not supported, return error */ - if (!ethtool_link_ksettings_test_link_mode(&safe_ks, - supported, - Autoneg)) { + if (!ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg)) { netdev_info(netdev, "Autoneg not supported on this phy\n"); err = -EINVAL; goto done; @@ -1245,9 +1170,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* If autoneg is supported 10GBASE_T is the only PHY * that can disable it, so otherwise return error */ - if (ethtool_link_ksettings_test_link_mode(&safe_ks, - supported, - Autoneg) && + if (ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg) && hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); @@ -1266,8 +1190,10 @@ static int i40e_set_link_ksettings(struct net_device *netdev, config.link_speed |= I40E_LINK_SPEED_100MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseT_Full) || +#ifdef HAVE_ETHTOOL_NEW_10G_BITS ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseX_Full) || +#endif ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseKX_Full)) config.link_speed |= I40E_LINK_SPEED_1GB; @@ -1277,22 +1203,31 @@ static int i40e_set_link_ksettings(struct net_device *netdev, 10000baseKX4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseKR_Full) || +#ifdef HAVE_ETHTOOL_NEW_10G_BITS ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseCR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseSR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseLR_Full)) +#else + 0) +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ config.link_speed |= I40E_LINK_SPEED_10GB; +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2500baseT_Full)) config.link_speed |= I40E_LINK_SPEED_2_5GB; +#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */ +#ifdef HAVE_ETHTOOL_5G_BITS if (ethtool_link_ksettings_test_link_mode(ks, advertising, 5000baseT_Full)) config.link_speed |= I40E_LINK_SPEED_5GB; +#endif /* HAVE_ETHTOOL_5G_BITS */ if (ethtool_link_ksettings_test_link_mode(ks, advertising, 20000baseKR2_Full)) config.link_speed |= I40E_LINK_SPEED_20GB; +#ifdef HAVE_ETHTOOL_25G_BITS if (ethtool_link_ksettings_test_link_mode(ks, advertising, 25000baseCR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -1300,6 +1235,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_test_link_mode(ks, advertising, 25000baseSR_Full)) config.link_speed |= I40E_LINK_SPEED_25GB; +#endif /* HAVE_ETHTOOL_25G_BITS */ if (ethtool_link_ksettings_test_link_mode(ks, advertising, 40000baseKR4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -1316,7 +1252,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, */ if (!config.link_speed) config.link_speed = abilities.link_speed; - if (autoneg_changed || abilities.link_speed != config.link_speed) { + if (autoneg_changed || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; @@ -1368,6 +1304,251 @@ done: return err; } +#else /* ETHTOOL_GLINKSETTINGS */ +/** + * i40e_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type. Since we've backported + * the new API constructs to use in the old API, this ends up just being a + * wrapper to i40e_get_link_ksettings. + **/ +static int i40e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ethtool_link_ksettings ks; + + i40e_get_link_ksettings(netdev, &ks); + _kc_ethtool_ksettings_to_cmd(&ks, ecmd); + ecmd->transceiver = XCVR_EXTERNAL; + return 0; +} + +/** + * i40e_set_settings - Set Speed and Duplex + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Set speed/duplex per media_types advertised/forced + **/ +static int i40e_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw = &pf->hw; + struct ethtool_cmd safe_ecmd; + i40e_status status = 0; + bool change = false; + int timeout = 50; + int err = 0; + u8 autoneg; + u32 advertise; + u32 old_ethtool_advertising = 0; + + /* Changing port settings is not supported if this isn't the + * port's controlling PF + */ + if (hw->partition_id != 1) { + i40e_partition_setting_complaint(pf); + return -EOPNOTSUPP; + } + + if (vsi != pf->vsi[pf->lan_vsi]) + return -EOPNOTSUPP; + + if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && + hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && + hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && + hw->phy.media_type != I40E_MEDIA_TYPE_DA && + hw->phy.link_info.link_info & I40E_AQ_LINK_UP) + return -EOPNOTSUPP; + + if (hw->device_id == I40E_DEV_ID_KX_B || + hw->device_id == I40E_DEV_ID_KX_C || + hw->device_id == I40E_DEV_ID_20G_KR2 || + hw->device_id == I40E_DEV_ID_20G_KR2_A) { + netdev_info(netdev, "Changing settings is not supported on backplane.\n"); + return -EOPNOTSUPP; + } + + /* get our own copy of the bits to check against */ + memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); + i40e_get_settings(netdev, &safe_ecmd); + + /* save autoneg and speed out of ecmd */ + autoneg = ecmd->autoneg; + advertise = ecmd->advertising; + + /* set autoneg and speed back to what they currently are */ + ecmd->autoneg = safe_ecmd.autoneg; + ecmd->advertising = safe_ecmd.advertising; + + /* Due to a bug in ethtool versions < 3.6 this check is necessary */ + old_ethtool_advertising = ecmd->supported & + (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_2500baseX_Full | + ADVERTISED_10000baseT_Full); + old_ethtool_advertising |= (old_ethtool_advertising | + ADVERTISED_20000baseMLD2_Full | + ADVERTISED_20000baseKR2_Full); + + if (advertise == old_ethtool_advertising) + netdev_info(netdev, "If you are not setting advertising to %x then you may have an old version of ethtool. Please update.\n", + advertise); + ecmd->cmd = safe_ecmd.cmd; + /* If ecmd and safe_ecmd are not the same now, then they are + * trying to set something that we do not support + */ + if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd))) + return -EOPNOTSUPP; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + /* Get the current phy config */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + /* Copy abilities to config in case autoneg is not + * set below + */ + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + config.abilities = abilities.abilities; + + /* Check autoneg */ + if (autoneg == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + config.abilities = abilities.abilities | + I40E_AQ_PHY_ENABLE_AN; + change = true; + } + } else { + /* If autoneg is currently enabled */ + if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only phy + * that can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg && + hw->phy.link_info.phy_type != + I40E_PHY_TYPE_10GBASE_T) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + config.abilities = abilities.abilities & + ~I40E_AQ_PHY_ENABLE_AN; + change = true; + } + } + + if (advertise & ~safe_ecmd.supported) { + err = -EINVAL; + goto done; + } + + if (advertise & ADVERTISED_100baseT_Full) + config.link_speed |= I40E_LINK_SPEED_100MB; + if (advertise & ADVERTISED_1000baseT_Full || + advertise & ADVERTISED_1000baseKX_Full) + config.link_speed |= I40E_LINK_SPEED_1GB; + if (advertise & ADVERTISED_10000baseT_Full || + advertise & ADVERTISED_10000baseKX4_Full || + advertise & ADVERTISED_10000baseKR_Full) + config.link_speed |= I40E_LINK_SPEED_10GB; + if (advertise & ADVERTISED_20000baseKR2_Full) + config.link_speed |= I40E_LINK_SPEED_20GB; + if (advertise & ADVERTISED_40000baseKR4_Full || + advertise & ADVERTISED_40000baseCR4_Full || + advertise & ADVERTISED_40000baseSR4_Full || + advertise & ADVERTISED_40000baseLR4_Full) + config.link_speed |= I40E_LINK_SPEED_40GB; + + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + if (!config.link_speed) + config.link_speed = abilities.link_speed; + + if (change || abilities.link_speed != config.link_speed) { + /* copy over the rest of the abilities */ + config.phy_type = abilities.phy_type; + config.phy_type_ext = abilities.phy_type_ext; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + /* save the requested speeds */ + hw->phy.link_info.requested_speeds = config.link_speed; + /* set link and auto negotiation so changes take effect */ + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* If link is up put link down */ + if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + i40e_print_link_message(vsi, false); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* make the aq call */ + status = i40e_aq_set_phy_config(hw, &config, NULL); + if (status) { + netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + err = -EAGAIN; + goto done; + } + + status = i40e_update_link_info(hw); + if (status) + netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + + } else { + netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); + } + +done: + clear_bit(__I40E_CONFIG_BUSY, pf->state); + + return err; +} + +#endif /* ETHTOOL_GLINKSETTINGS */ + static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -1375,7 +1556,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; i40e_status status = 0; - u32 flags = 0; + u64 flags = 0; int err = 0; flags = READ_ONCE(pf->flags); @@ -1428,6 +1609,7 @@ done: return err; } +#ifdef ETHTOOL_GFECPARAM static int i40e_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) { @@ -1437,6 +1619,7 @@ static int i40e_get_fec_param(struct net_device *netdev, struct i40e_hw *hw = &pf->hw; i40e_status status = 0; int err = 0; + u8 fec_cfg; /* Get the current phy config */ memset(&abilities, 0, sizeof(abilities)); @@ -1448,18 +1631,16 @@ static int i40e_get_fec_param(struct net_device *netdev, } fecparam->fec = 0; - if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO) + fec_cfg = abilities.fec_cfg_curr_mod_ext_info; + if (fec_cfg & I40E_AQ_SET_FEC_AUTO) fecparam->fec |= ETHTOOL_FEC_AUTO; - if ((abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_SET_FEC_REQUEST_RS) || - (abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_SET_FEC_ABILITY_RS)) + else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS | + I40E_AQ_SET_FEC_ABILITY_RS)) fecparam->fec |= ETHTOOL_FEC_RS; - if ((abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_SET_FEC_REQUEST_KR) || - (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR)) + else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR | + I40E_AQ_SET_FEC_ABILITY_KR)) fecparam->fec |= ETHTOOL_FEC_BASER; - if (abilities.fec_cfg_curr_mod_ext_info == 0) + if (fec_cfg == 0) fecparam->fec |= ETHTOOL_FEC_OFF; if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) @@ -1515,6 +1696,7 @@ static int i40e_set_fec_param(struct net_device *netdev, done: return err; } +#endif /* ETHTOOL_GFECPARAM */ static int i40e_nway_reset(struct net_device *netdev) { @@ -1549,12 +1731,9 @@ static void i40e_get_pauseparam(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - struct i40e_link_status *hw_link_info = &hw->phy.link_info; struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; - pause->autoneg = - ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + pause->autoneg = hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED; /* PFC enabled so report LFC as off */ if (dcbx_cfg->pfc.pfcenable) { @@ -1618,7 +1797,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, if (dcbx_cfg->pfc.pfcenable) { netdev_info(netdev, - "Priority flow control enabled. Cannot set link flow control.\n"); + "Priority flow control enabled. Cannot set link flow control.\n"); return -EOPNOTSUPP; } @@ -1672,14 +1851,133 @@ static int i40e_set_pauseparam(struct net_device *netdev, return err; } +#ifndef HAVE_NDO_SET_FEATURES +static u32 i40e_get_rx_csum(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + return pf->flags & I40E_FLAG_RX_CSUM_ENABLED; +} + +static int i40e_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + if (data) + pf->flags |= I40E_FLAG_RX_CSUM_ENABLED; + else + pf->flags &= ~I40E_FLAG_RX_CSUM_ENABLED; + + return 0; +} + +static u32 i40e_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_IP_CSUM) != 0; +} + +static int i40e_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) { +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + netdev->features |= NETIF_F_IP_CSUM; +#endif + netdev->features |= NETIF_F_SCTP_CRC; + } else { +#ifdef NETIF_F_IPV6_CSUM + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SCTP_CRC); +#else + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC); +#endif + } + + return 0; +} + +static int i40e_set_tso(struct net_device *netdev, u32 data) +{ + if (data) { +#ifndef HAVE_NDO_FEATURES_CHECK + if (netdev->mtu >= 576) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev_info(netdev, "MTU setting is too low to enable TSO\n"); + } +#else + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; +#endif + } else { +#ifndef HAVE_NETDEV_VLAN_FEATURES + struct i40e_netdev_priv *np = netdev_priv(netdev); + /* disable TSO on all VLANs if they're present */ + if (np->vsi->vlgrp) { + int i; + struct net_device *v_netdev; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = + vlan_group_get_device(np->vsi->vlgrp, i); + if (v_netdev) { + v_netdev->features &= ~NETIF_F_TSO; + v_netdev->features &= ~NETIF_F_TSO6; + vlan_group_set_device(np->vsi->vlgrp, i, + v_netdev); + } + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + + return 0; +} +#ifdef ETHTOOL_GFLAGS +static int i40e_set_flags(struct net_device *netdev, u32 data) +{ +#ifdef ETHTOOL_GRXRINGS + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; +#endif + u32 supported_flags = 0; + bool need_reset = false; + int rc; + +#ifdef NETIF_F_RXHASH + supported_flags |= ETH_FLAG_RXHASH; + +#endif +#ifdef ETHTOOL_GRXRINGS + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + supported_flags |= ETH_FLAG_NTUPLE; +#endif + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; + + /* if state changes we need to update pf->flags and maybe reset */ +#ifdef ETHTOOL_GRXRINGS + need_reset = i40e_set_ntuple(pf, netdev->features); +#endif /* ETHTOOL_GRXRINGS */ + if (need_reset) + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); + + return 0; +} +#endif /* ETHTOOL_GFLAGS */ + +#endif /* HAVE_NDO_SET_FEATURES */ static u32 i40e_get_msglevel(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - u32 debug_mask = pf->hw.debug_mask; - - if (debug_mask) - netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask); return pf->msg_enable; } @@ -1691,8 +1989,7 @@ static void i40e_set_msglevel(struct net_device *netdev, u32 data) if (I40E_DEBUG_USER & data) pf->hw.debug_mask = data; - else - pf->msg_enable = data; + pf->msg_enable = data; } static int i40e_get_regs_len(struct net_device *netdev) @@ -1899,9 +2196,11 @@ static void i40e_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; if (pf->hw.pf_id == 0) drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; +#endif } static void i40e_get_ringparam(struct net_device *netdev, @@ -1967,13 +2266,6 @@ static int i40e_set_ringparam(struct net_device *netdev, (new_rx_count == vsi->rx_rings[0]->count)) return 0; - /* If there is a AF_XDP UMEM attached to any of Rx rings, - * disallow changing the number of descriptors -- regardless - * if the netdev is running or not. - */ - if (i40e_xsk_any_rx_ring_enabled(vsi)) - return -EBUSY; - while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) @@ -2004,7 +2296,7 @@ static int i40e_set_ringparam(struct net_device *netdev, (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); if (new_tx_count != vsi->tx_rings[0]->count) { netdev_info(netdev, - "Changing Tx descriptor count from %d to %d.\n", + "Changing Tx descriptor count from %d to %d\n", vsi->tx_rings[0]->count, new_tx_count); tx_rings = kcalloc(tx_alloc_queue_pairs, sizeof(struct i40e_ring), GFP_KERNEL); @@ -2016,7 +2308,7 @@ static int i40e_set_ringparam(struct net_device *netdev, for (i = 0; i < tx_alloc_queue_pairs; i++) { if (!i40e_active_tx_ring_index(vsi, i)) continue; - + /* clone ring and setup updated count */ tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_count; /* the desc and bi pointers will be reallocated in the @@ -2063,8 +2355,11 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].desc = NULL; rx_rings[i].rx_bi = NULL; +#ifdef HAVE_XDP_BUFF_RXQ /* Clear cloned XDP RX-queue info before setup call */ - memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq)); + memset(&rx_rings[i].xdp_rxq, 0, + sizeof(rx_rings[i].xdp_rxq)); +#endif /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers */ @@ -2188,11 +2483,27 @@ static int i40e_get_stats_count(struct net_device *netdev) * works because the num_tx_queues is set at device creation and never * changes. */ - stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues; +#ifndef I40E_PF_EXTRA_STATS_OFF + /* The same applies to additional stats showing here the network usage + * counters for VFs. In order to handle it in a safe way, we also + * report here, similarly as in the queues case described above, + * the maximum possible, fixed number of these extra stats items. + */ +#endif /* !I40E_PF_EXTRA_STATS_OFF */ + stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->real_num_tx_queues; +#ifdef HAVE_XDP_SUPPORT + stats_len += I40E_QUEUE_STATS_XDP_LEN * netdev->real_num_tx_queues; +#endif +#ifndef I40E_PF_EXTRA_STATS_OFF + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) + stats_len += I40E_PF_STATS_EXTRA_LEN; + +#endif /* !I40E_PF_EXTRA_STATS_OFF */ return stats_len; } +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT static int i40e_get_sset_count(struct net_device *netdev, int sset) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -2211,6 +2522,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) return -EOPNOTSUPP; } } +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ /** * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure @@ -2258,6 +2570,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; +#ifndef I40E_PF_EXTRA_STATS_OFF + unsigned int vsi_idx; + unsigned int vf_idx; + unsigned int vf_id; + bool is_vf_valid; +#endif /* !I40E_PF_EXTRA_STATS_OFF */ unsigned int i; bool veb_stats; u64 *p = data; @@ -2270,9 +2588,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats); rcu_read_lock(); - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i])); i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i])); +#ifdef HAVE_XDP_SUPPORT + i40e_add_rx_queue_xdp_stats(&data, READ_ONCE(vsi->rx_rings[i])); +#endif } rcu_read_unlock(); @@ -2307,11 +2628,94 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats); } +#ifndef I40E_PF_EXTRA_STATS_OFF + /* As for now, we only process the SRIOV type VSIs (as extra stats to + * PF core stats) which are correlated with VF LAN VSI (hence below, + * in this for-loop instruction block, only VF's LAN VSIs are currently + * processed). + */ + for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { + is_vf_valid = true; + for (vf_idx = 0; vf_idx < pf->num_alloc_vfs; vf_idx++) + if (pf->vf[vf_idx].vf_id == vf_id) + break; + if (vf_idx >= pf->num_alloc_vfs) { + dev_info(&pf->pdev->dev, + "In the PF's array, there is no VF instance with VF_ID identifier %d or it is not set/initialized correctly yet\n", + vf_id); + is_vf_valid = false; + goto check_vf; + } + vsi_idx = pf->vf[vf_idx].lan_vsi_idx; + + vsi = pf->vsi[vsi_idx]; + if (!vsi) { + /* It means empty field in the PF VSI array... */ + dev_info(&pf->pdev->dev, + "No LAN VSI instance referenced by VF %d or it is not set/initialized correctly yet\n", + vf_id); + is_vf_valid = false; + goto check_vf; + } + if (vsi->vf_id != vf_id) { + dev_info(&pf->pdev->dev, + "In the PF's array, there is incorrectly set/initialized LAN VSI or reference to it from VF %d is not set/initialized correctly yet\n", + vf_id); + is_vf_valid = false; + goto check_vf; + } + if (vsi->vf_id != pf->vf[vf_idx].vf_id || + !i40e_find_vsi_from_id(pf, pf->vf[vsi->vf_id].lan_vsi_id)) { + /* Disjointed identifiers or broken references VF-VSI */ + dev_warn(&pf->pdev->dev, + "SRIOV LAN VSI (index %d in PF VSI array) with invalid VF Identifier %d (referenced by VF %d, ordered as %d in VF array)\n", + vsi_idx, pf->vsi[vsi_idx]->vf_id, + pf->vf[vf_idx].vf_id, vf_idx); + is_vf_valid = false; + } +check_vf: + if (!is_vf_valid) { + i40e_add_ethtool_stats(&data, NULL, + i40e_gstrings_eth_stats_extra); + } else { + i40e_update_eth_stats(vsi); + i40e_add_ethtool_stats(&data, vsi, + i40e_gstrings_eth_stats_extra); + } + } + for (; vf_id < I40E_STATS_EXTRA_COUNT; vf_id++) + i40e_add_ethtool_stats(&data, NULL, + i40e_gstrings_eth_stats_extra); + +#endif /* !I40E_PF_EXTRA_STATS_OFF */ check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev), "ethtool stats count mismatch!"); } +#ifndef I40E_PF_EXTRA_STATS_OFF +/** + * i40e_update_vfid_in_stats - print VF num to stats names + * @stats_extra: array of stats structs with stats name strings + * @strings_num: number of stats name strings in array above (length) + * @vf_id: VF number to update stats name strings with + * + * Helper function to i40e_get_stat_strings() in case of extra stats. + **/ +static inline void +i40e_update_vfid_in_stats(struct i40e_stats stats_extra[], + int strings_num, int vf_id) +{ + int i; + + for (i = 0; i < strings_num; i++) { + snprintf(stats_extra[i].stat_string, + I40E_STATS_NAME_VFID_EXTRA_LEN, "vf%03d", vf_id); + stats_extra[i].stat_string[I40E_STATS_NAME_VFID_EXTRA_LEN - + 1] = '.'; + } +} +#endif /* !I40E_PF_EXTRA_STATS_OFF */ /** * i40e_get_stat_strings - copy stat strings into supplied buffer * @netdev: the netdev to collect strings for @@ -2334,11 +2738,15 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) i40e_add_stat_strings(&data, i40e_gstrings_misc_stats); - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, "tx", i); i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, "rx", i); +#ifdef HAVE_XDP_SUPPORT + i40e_add_stat_strings(&data, i40e_gstrings_rx_queue_xdp_stats, + "rx", i); +#endif } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) @@ -2354,11 +2762,21 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); +#ifndef I40E_PF_EXTRA_STATS_OFF + for (i = 0; i < I40E_STATS_EXTRA_COUNT; i++) { + i40e_update_vfid_in_stats + (i40e_gstrings_eth_stats_extra, + ARRAY_SIZE(i40e_gstrings_eth_stats_extra), i); + i40e_add_stat_strings(&data, i40e_gstrings_eth_stats_extra); + } + +#endif /* !I40E_PF_EXTRA_STATS_OFF */ check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); } +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -2380,6 +2798,7 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) p += ETH_GSTRING_LEN; } } +#endif static void i40e_get_strings(struct net_device *netdev, u32 stringset, u8 *data) @@ -2392,17 +2811,21 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, case ETH_SS_STATS: i40e_get_stat_strings(netdev, data); break; +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT case ETH_SS_PRIV_FLAGS: i40e_get_priv_flag_strings(netdev, data); break; +#endif default: break; } } +#ifdef HAVE_ETHTOOL_GET_TS_INFO static int i40e_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { +#ifdef HAVE_PTP_1588_CLOCK struct i40e_pf *pf = i40e_netdev_to_pf(dev); /* only report HW timestamping if PTP is enabled */ @@ -2439,8 +2862,12 @@ static int i40e_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); return 0; +#else /* HAVE_PTP_1588_CLOCK */ + return ethtool_op_get_ts_info(dev, info); +#endif /* HAVE_PTP_1588_CLOCK */ } +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ static u64 i40e_link_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -2450,7 +2877,7 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data) netif_info(pf, hw, netdev, "link test\n"); status = i40e_get_link_status(&pf->hw, &link_up); - if (status) { + if (status != I40E_SUCCESS) { netif_err(pf, drv, netdev, "link query timed out, please retry test\n"); *data = 1; return *data; @@ -2508,6 +2935,13 @@ static u64 i40e_intr_test(struct net_device *netdev, u64 *data) return *data; } +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int i40e_diag_test_count(struct net_device *netdev) +{ + return I40E_TEST_LEN; +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ static inline bool i40e_active_vfs(struct i40e_pf *pf) { struct i40e_vf *vfs = pf->vf; @@ -2644,7 +3078,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; /* only magic packet is supported */ - if (wol->wolopts & ~WAKE_MAGIC) + if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) return -EOPNOTSUPP; /* is this a new value? */ @@ -2656,11 +3090,12 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return 0; } +#ifdef HAVE_ETHTOOL_SET_PHYS_ID static int i40e_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct i40e_netdev_priv *np = netdev_priv(netdev); - i40e_status ret = 0; + i40e_status ret = I40E_SUCCESS; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int blink_freq = 2; @@ -2705,11 +3140,58 @@ static int i40e_set_phys_id(struct net_device *netdev, default: break; } + if (ret) + return -ENOENT; + else + return 0; +} +#else /* HAVE_ETHTOOL_SET_PHYS_ID */ +static int i40e_phys_id(struct net_device *netdev, u32 data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + i40e_status ret = I40E_SUCCESS; + u16 temp_status; + int i; + + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { + pf->led_status = i40e_led_get(hw); + } else { + ret = i40e_led_get_phy(hw, &temp_status, + &pf->phy_led_val); + pf->led_status = temp_status; + } + + if (!data || data > 300) + data = 300; + + /* 10GBaseT PHY controls led's through PHY, not MAC */ + for (i = 0; i < (data * 1000); i += 400) { + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) + i40e_led_set(hw, 0xF, false); + else + ret = i40e_led_set_phy(hw, true, pf->led_status, 0); + msleep_interruptible(200); + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) + i40e_led_set(hw, 0x0, false); + else + ret = i40e_led_set_phy(hw, false, pf->led_status, 0); + msleep_interruptible(200); + } + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) + i40e_led_set(hw, pf->led_status, false); + else + ret = i40e_led_set_phy(hw, false, pf->led_status, + (pf->led_status | + I40E_PHY_LED_MODE_ORIG)); + if (ret) return -ENOENT; else return 0; } +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also @@ -2784,6 +3266,7 @@ static int i40e_get_coalesce(struct net_device *netdev, return __i40e_get_coalesce(netdev, ec, -1); } +#ifdef ETHTOOL_PERQUEUE /** * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue * @netdev: netdev structure @@ -2798,6 +3281,7 @@ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue, return __i40e_get_coalesce(netdev, ec, queue); } +#endif /* ETHTOOL_PERQUEUE */ /** * i40e_set_itr_per_queue - set ITR values for specific queue * @vsi: the VSI to set values for @@ -2847,6 +3331,52 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, i40e_flush(hw); } +/** + * i40e_is_coalesce_param_invalid - check for unsupported coalesce parameters + * @netdev: pointer to the netdev associated with this query + * @ec: ethtool structure to fill with driver's coalesce settings + * + * Print netdev info if driver doesn't support one of the parameters + * and return error. When any parameters will be implemented, remove only + * this parameter from param array. + */ +static +int i40e_is_coalesce_param_invalid(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40e_ethtool_not_used { + u32 value; + const char *name; + } param[] = { + {ec->stats_block_coalesce_usecs, "stats-block-usecs"}, + {ec->rate_sample_interval, "sample-interval"}, + {ec->pkt_rate_low, "pkt-rate-low"}, + {ec->pkt_rate_high, "pkt-rate-high"}, + {ec->rx_max_coalesced_frames, "rx-frames"}, + {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"}, + {ec->tx_max_coalesced_frames, "tx-frames"}, + {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"}, + {ec->rx_coalesce_usecs_low, "rx-usecs-low"}, + {ec->rx_max_coalesced_frames_low, "rx-frames-low"}, + {ec->tx_coalesce_usecs_low, "tx-usecs-low"}, + {ec->tx_max_coalesced_frames_low, "tx-frames-low"}, + {ec->rx_max_coalesced_frames_high, "rx-frames-high"}, + {ec->tx_max_coalesced_frames_high, "tx-frames-high"} + }; + int i; + + for (i = 0; i < ARRAY_SIZE(param); i++) { + if (param[i].value) { + netdev_info(netdev, + "Setting %s not supported\n", + param[i].name); + return -EOPNOTSUPP; + } + } + + return 0; +} + /** * __i40e_set_coalesce - set coalesce settings for particular queue * @netdev: the netdev to change @@ -2865,6 +3395,9 @@ static int __i40e_set_coalesce(struct net_device *netdev, struct i40e_pf *pf = vsi->back; int i; + if (i40e_is_coalesce_param_invalid(netdev, ec)) + return -EOPNOTSUPP; + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; @@ -2956,6 +3489,20 @@ static int i40e_set_coalesce(struct net_device *netdev, return __i40e_set_coalesce(netdev, ec, -1); } +#ifdef ETHTOOL_SRXNTUPLE +/* We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid + * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag + * was defined that this function was present. + */ +static int i40e_set_rx_ntuple(struct net_device *dev, + struct ethtool_rx_ntuple *cmd) +{ + return -EOPNOTSUPP; +} + +#endif /* ETHTOOL_SRXNTUPLE */ + +#ifdef ETHTOOL_PERQUEUE /** * i40e_set_per_queue_coalesce - set specific queue's coalesce settings * @netdev: the netdev to change @@ -2969,7 +3516,9 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, { return __i40e_set_coalesce(netdev, ec, queue); } +#endif /* ETHTOOL_PERQUEUE */ +#ifdef ETHTOOL_GRXRINGS /** * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type * @pf: pointer to the physical function struct @@ -3102,18 +3651,69 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, value = be64_to_cpu(*((__be64 *)fsp->h_ext.data)); mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data)); -#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0) -#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16) +#define I40E_USERDEF_CLOUD_FILTER BIT_ULL(63) + +#define I40E_USERDEF_CLOUD_RESERVED GENMASK_ULL(62, 32) +#define I40E_USERDEF_TUNNEL_TYPE GENMASK_ULL(31, 24) +#define I40E_USERDEF_TENANT_ID GENMASK_ULL(23, 0) + +#define I40E_USERDEF_RESERVED GENMASK_ULL(62, 32) #define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0) - valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER); - if (valid < 0) { - return -EINVAL; - } else if (valid) { - data->flex_word = value & I40E_USERDEF_FLEX_WORD; - data->flex_offset = - (value & I40E_USERDEF_FLEX_OFFSET) >> 16; - data->flex_filter = true; +#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16) +#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0) + + if ((mask & I40E_USERDEF_CLOUD_FILTER) && + (value & I40E_USERDEF_CLOUD_FILTER)) + data->cloud_filter = true; + + if (data->cloud_filter) { + /* Make sure that the reserved bits are not set */ + valid = i40e_check_mask(mask, I40E_USERDEF_CLOUD_RESERVED); + if (valid < 0) { + return -EINVAL; + } else if (valid) { + if ((value & I40E_USERDEF_CLOUD_RESERVED) != 0) + return -EINVAL; + } + + /* These fields are only valid if this is a cloud filter */ + valid = i40e_check_mask(mask, I40E_USERDEF_TENANT_ID); + if (valid < 0) { + return -EINVAL; + } else if (valid) { + data->tenant_id = value & I40E_USERDEF_TENANT_ID; + data->tenant_id_valid = true; + } + + valid = i40e_check_mask(mask, I40E_USERDEF_TUNNEL_TYPE); + if (valid < 0) { + return -EINVAL; + } else if (valid) { + data->tunnel_type = + (value & I40E_USERDEF_TUNNEL_TYPE) >> 24; + data->tunnel_type_valid = true; + } + } else { + /* Make sure that the reserved bits are not set */ + valid = i40e_check_mask(mask, I40E_USERDEF_RESERVED); + if (valid < 0) { + return -EINVAL; + } else if (valid) { + if ((value & I40E_USERDEF_RESERVED) != 0) + return -EINVAL; + } + + /* These fields are only valid if this isn't a cloud filter */ + valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER); + if (valid < 0) { + return -EINVAL; + } else if (valid) { + data->flex_word = value & I40E_USERDEF_FLEX_WORD; + data->flex_offset = + (value & I40E_USERDEF_FLEX_OFFSET) >> 16; + data->flex_filter = true; + } } return 0; @@ -3132,10 +3732,25 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, { u64 value = 0, mask = 0; - if (data->flex_filter) { - value |= data->flex_word; - value |= (u64)data->flex_offset << 16; - mask |= I40E_USERDEF_FLEX_FILTER; + if (data->cloud_filter) { + value |= I40E_USERDEF_CLOUD_FILTER; + mask |= I40E_USERDEF_CLOUD_FILTER; + + if (data->tenant_id_valid) { + value |= data->tenant_id; + mask |= I40E_USERDEF_TENANT_ID; + } + + if (data->tunnel_type_valid) { + value |= (u64)data->tunnel_type << 24; + mask |= I40E_USERDEF_TUNNEL_TYPE; + } + } else { + if (data->flex_filter) { + value |= data->flex_word; + value |= (u64)data->flex_offset << 16; + mask |= I40E_USERDEF_FLEX_FILTER; + } } if (value || mask) @@ -3146,7 +3761,7 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, } /** - * i40e_get_ethtool_fdir_all - Populates the rule count of a command + * i40e_get_rx_filter_ids - Populates the rule count of a command * @pf: Pointer to the physical function struct * @cmd: The command to get or set Rx flow classification rules * @rule_locs: Array of used rule locations @@ -3156,23 +3771,34 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, * * Returns 0 on success or -EMSGSIZE if entry not found **/ -static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) +static int i40e_get_rx_filter_ids(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) { - struct i40e_fdir_filter *rule; + struct i40e_fdir_filter *f_rule; + struct i40e_cloud_filter *c_rule; struct hlist_node *node2; - int cnt = 0; + unsigned int cnt = 0; /* report total rule count */ cmd->data = i40e_get_fd_cnt_all(pf); - hlist_for_each_entry_safe(rule, node2, + hlist_for_each_entry_safe(f_rule, node2, &pf->fdir_filter_list, fdir_node) { if (cnt == cmd->rule_cnt) return -EMSGSIZE; - rule_locs[cnt] = rule->fd_id; + rule_locs[cnt] = f_rule->fd_id; + cnt++; + } + + /* find the cloud filter rule ids */ + hlist_for_each_entry_safe(c_rule, node2, + &pf->cloud_filter_list, cloud_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = c_rule->id; cnt++; } @@ -3297,6 +3923,100 @@ no_input_set: return 0; } +#define VXLAN_PORT 8472 + +/** + * i40e_get_cloud_filter_entry - get a cloud filter by loc + * @pf: pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * + * get cloud filter by loc. + * Returns 0 if success. + **/ +static int i40e_get_cloud_filter_entry(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + struct i40e_cloud_filter *rule, *filter = NULL; + struct i40e_rx_flow_userdef userdef = {0}; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, + &pf->cloud_filter_list, cloud_node) { + /* filter found */ + if (rule->id == fsp->location) + filter = rule; + + /* bail out if we've passed the likely location in the list */ + if (rule->id >= fsp->location) + break; + + } + if (!filter) { + dev_info(&pf->pdev->dev, "No cloud filter with loc %d\n", + fsp->location); + return -ENOENT; + } + + userdef.cloud_filter = true; + + fsp->ring_cookie = filter->queue_id; + if (filter->seid != pf->vsi[pf->lan_vsi]->seid) { + struct i40e_vsi *vsi; + + vsi = i40e_find_vsi_from_seid(pf, filter->seid); + if (vsi && vsi->type == I40E_VSI_SRIOV) { + /* VFs are zero-indexed by the driver, but ethtool + * expects them to be one-indexed, so add one here + */ + u64 ring_vf = vsi->vf_id + 1; + + ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fsp->ring_cookie |= ring_vf; + } + } + + ether_addr_copy(fsp->h_u.ether_spec.h_dest, filter->outer_mac); + ether_addr_copy(fsp->h_u.ether_spec.h_source, filter->inner_mac); + + if (filter->flags & I40E_CLOUD_FIELD_OMAC) + ether_addr_copy(fsp->m_u.ether_spec.h_dest, mac_broadcast); + if (filter->flags & I40E_CLOUD_FIELD_IMAC) + ether_addr_copy(fsp->m_u.ether_spec.h_source, mac_broadcast); + if (filter->flags & I40E_CLOUD_FIELD_IVLAN) + fsp->h_ext.vlan_tci = filter->inner_vlan; + if (filter->flags & I40E_CLOUD_FIELD_TEN_ID) { + userdef.tenant_id_valid = true; + userdef.tenant_id = filter->tenant_id; + } + if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) { + userdef.tunnel_type_valid = true; + userdef.tunnel_type = filter->tunnel_type; + } + + if (filter->flags & I40E_CLOUD_FIELD_IIP) { + if (i40e_is_l4mode_enabled()) { + fsp->flow_type = UDP_V4_FLOW; + fsp->h_u.udp_ip4_spec.pdst = filter->dst_port; + } else { + fsp->flow_type = IP_USER_FLOW; + } + + fsp->h_u.usr_ip4_spec.ip4dst = filter->inner_ip[0]; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + } else { + fsp->flow_type = ETHER_FLOW; + } + + i40e_fill_rx_flow_user_data(fsp, &userdef); + + fsp->flow_type |= FLOW_EXT; + + return 0; +} + /** * i40e_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure @@ -3306,7 +4026,11 @@ no_input_set: * Returns Success if the command is supported. **/ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else u32 *rule_locs) +#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -3323,15 +4047,23 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = pf->fdir_pf_active_filters; + cmd->rule_cnt += pf->num_cloud_filters; /* report total rule count */ cmd->data = i40e_get_fd_cnt_all(pf); ret = 0; break; case ETHTOOL_GRXCLSRULE: ret = i40e_get_ethtool_fdir_entry(pf, cmd); + /* if no such fdir filter then try the cloud list */ + if (ret) + ret = i40e_get_cloud_filter_entry(pf, cmd); break; case ETHTOOL_GRXCLSRLALL: - ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + ret = i40e_get_rx_filter_ids(pf, cmd, (u32 *)rule_locs); +#else + ret = i40e_get_rx_filter_ids(pf, cmd, rule_locs); +#endif break; default: break; @@ -3497,12 +4229,325 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return 0; } +/** + * i40e_cloud_filter_mask2flags- Convert cloud filter details to filter type + * @pf: pointer to the physical function struct + * @fsp: RX flow classification rules + * @userdef: pointer to userdef field data + * @flags: Resultant combination of all the fields to decide the tuple + * + * The general trick in setting these flags is that if the mask field for + * a value is non-zero, then the field itself was set to something, so we + * use this to tell us what has been selected. + * + * Returns 0 if a valid filter type was identified. + **/ +static int i40e_cloud_filter_mask2flags(struct i40e_pf *pf, + struct ethtool_rx_flow_spec *fsp, + struct i40e_rx_flow_userdef *userdef, + u8 *flags) +{ + u8 i = 0; + + *flags = 0; + + switch (fsp->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + /* use is_broadcast and is_zero to check for all 0xf or 0 */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + i |= I40E_CLOUD_FIELD_OMAC; + } else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) { + i &= ~I40E_CLOUD_FIELD_OMAC; + } else { + dev_info(&pf->pdev->dev, "Bad ether dest mask %pM\n", + fsp->m_u.ether_spec.h_dest); + return I40E_ERR_CONFIG; + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + i |= I40E_CLOUD_FIELD_IMAC; + } else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_source)) { + i &= ~I40E_CLOUD_FIELD_IMAC; + } else { + dev_info(&pf->pdev->dev, "Bad ether source mask %pM\n", + fsp->m_u.ether_spec.h_source); + return I40E_ERR_CONFIG; + } + break; + + case IP_USER_FLOW: + if (pf->hw.mac.type == I40E_MAC_X722) { + dev_info(&pf->pdev->dev, "Failed to set filter. Destination IP filters are not supported for this device.\n"); + return I40E_ERR_CONFIG; + } + if (fsp->m_u.usr_ip4_spec.ip4dst == cpu_to_be32(0xffffffff)) { + i |= I40E_CLOUD_FIELD_IIP; + } else if (!fsp->m_u.usr_ip4_spec.ip4dst) { + i &= ~I40E_CLOUD_FIELD_IIP; + } else { + dev_info(&pf->pdev->dev, "Bad ip dst mask 0x%08x\n", + be32_to_cpu(fsp->m_u.usr_ip4_spec.ip4dst)); + return I40E_ERR_CONFIG; + } + break; + + case UDP_V4_FLOW: + if (fsp->m_u.udp_ip4_spec.pdst == cpu_to_be16(0xffff)) { + i |= I40E_CLOUD_FIELD_IIP; + } else { + dev_info(&pf->pdev->dev, "Bad UDP dst mask 0x%04x\n", + be32_to_cpu(fsp->m_u.udp_ip4_spec.pdst)); + return I40E_ERR_CONFIG; + } + break; + + default: + return I40E_ERR_CONFIG; + } + + switch (be16_to_cpu(fsp->m_ext.vlan_tci)) { + case 0xffff: + if (fsp->h_ext.vlan_tci & cpu_to_be16(~0x7fff)) { + dev_info(&pf->pdev->dev, "Bad vlan %u\n", + be16_to_cpu(fsp->h_ext.vlan_tci)); + return I40E_ERR_CONFIG; + } + i |= I40E_CLOUD_FIELD_IVLAN; + break; + case 0: + i &= ~I40E_CLOUD_FIELD_IVLAN; + break; + default: + dev_info(&pf->pdev->dev, "Bad vlan mask %u\n", + be16_to_cpu(fsp->m_ext.vlan_tci)); + return I40E_ERR_CONFIG; + } + + /* We already know that we're a cloud filter, so we don't need to + * re-check that. + */ + if (userdef->tenant_id_valid) { + if (userdef->tenant_id == 0) + i &= ~I40E_CLOUD_FIELD_TEN_ID; + else + i |= I40E_CLOUD_FIELD_TEN_ID; + } + + /* Make sure the flags produce a valid type */ + if (i40e_get_cloud_filter_type(i, NULL)) { + dev_info(&pf->pdev->dev, "Invalid mask config, flags = %d\n", + i); + return I40E_ERR_CONFIG; + } + + *flags = i; + return I40E_SUCCESS; +} + +/* i40e_add_cloud_filter_ethtool needs i40e_del_fdir_ethtool() */ +static int i40e_del_fdir_entry(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd); + +/** + * i40e_add_cloud_filter_ethtool - Add cloud filter + * @vsi: pointer to the VSI structure + * @cmd: The command to get or set Rx flow classification rules + * @userdef: pointer to userdef field data + * + * Add cloud filter for a specific flow spec. + * Returns 0 if the filter were successfully added. + **/ +static int i40e_add_cloud_filter_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd, + struct i40e_rx_flow_userdef *userdef) +{ + struct i40e_cloud_filter *rule, *parent, *filter = NULL; + struct ethtool_rx_flow_spec *fsp; + u16 dest_seid = 0, q_index = 0; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node2; + u32 ring, vf; + u8 flags = 0; + int ret; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return -EOPNOTSUPP; + + if (pf->flags & I40E_FLAG_MFP_ENABLED) + return -EOPNOTSUPP; + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) + return -EBUSY; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + /* The ring_cookie is a mask of queue index and VF id we wish to + * target. This is the same for regular flow director filters. + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + dev_warn(&pf->pdev->dev, "Cloud filters do not support the drop action.\n"); + return -EOPNOTSUPP; + } + + ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf) { + if (ring >= vsi->num_queue_pairs) + return -EINVAL; + dest_seid = vsi->seid; + } else { + /* VFs are zero-indexed, so we subtract one here */ + vf--; + + if (vf >= pf->num_alloc_vfs) + return -EINVAL; + if (!i40e_is_l4mode_enabled() && + ring >= pf->vf[vf].num_queue_pairs) + return -EINVAL; + dest_seid = pf->vsi[pf->vf[vf].lan_vsi_idx]->seid; + } + q_index = ring; + + ret = i40e_cloud_filter_mask2flags(pf, fsp, userdef, &flags); + if (ret) + return -EINVAL; + + /* if filter exists with same id, delete the old one */ + parent = NULL; + hlist_for_each_entry_safe(rule, node2, + &pf->cloud_filter_list, cloud_node) { + /* filter exists with the id */ + if (rule->id == fsp->location) + filter = rule; + + /* bail out if we've passed the likely location in the list */ + if (rule->id >= fsp->location) + break; + + /* track where we left off */ + parent = rule; + } + if (filter && (filter->id == fsp->location)) { + /* found it in the cloud list, so remove it */ + ret = i40e_add_del_cloud_filter_ex(pf, filter, false); + if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) + return ret; + hlist_del(&filter->cloud_node); + kfree(filter); + pf->num_cloud_filters--; + } else { + /* not in the cloud list, so check the PF's fdir list */ + (void)i40e_del_fdir_entry(pf->vsi[pf->lan_vsi], cmd); + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + switch (fsp->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + ether_addr_copy(filter->outer_mac, + fsp->h_u.ether_spec.h_dest); + ether_addr_copy(filter->inner_mac, + fsp->h_u.ether_spec.h_source); + break; + + case IP_USER_FLOW: + if (flags & I40E_CLOUD_FIELD_TEN_ID) { + dev_info(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); + kfree(filter); + return I40E_ERR_CONFIG; + } + filter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst; + break; + + case UDP_V4_FLOW: + filter->dst_port = fsp->h_u.udp_ip4_spec.pdst; + break; + + default: + dev_info(&pf->pdev->dev, "unknown flow type 0x%x\n", + (fsp->flow_type & ~FLOW_EXT)); + kfree(filter); + return I40E_ERR_CONFIG; + } + + if (userdef->tenant_id_valid) + filter->tenant_id = userdef->tenant_id; + else + filter->tenant_id = 0; + if (userdef->tunnel_type_valid) + filter->tunnel_type = userdef->tunnel_type; + else + filter->tunnel_type = I40E_CLOUD_TNL_TYPE_NONE; + + filter->id = fsp->location; + filter->seid = dest_seid; + filter->queue_id = q_index; + filter->flags = flags; + filter->inner_vlan = fsp->h_ext.vlan_tci; + + ret = i40e_add_del_cloud_filter_ex(pf, filter, true); + if (ret) { + kfree(filter); + return ret; + } + + /* add filter to the ordered list */ + INIT_HLIST_NODE(&filter->cloud_node); + if (parent) + hlist_add_behind(&filter->cloud_node, &parent->cloud_node); + else + hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); + pf->num_cloud_filters++; + + return 0; +} + +/** + * i40e_del_cloud_filter_ethtool - del vxlan filter + * @pf: pointer to the physical function struct + * @cmd: RX flow classification rules + * + * Delete vxlan filter for a specific flow spec. + * Returns 0 if the filter was successfully deleted. + **/ +static int i40e_del_cloud_filter_ethtool(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd) +{ + struct i40e_cloud_filter *rule, *filter = NULL; + struct ethtool_rx_flow_spec *fsp; + struct hlist_node *node2; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + hlist_for_each_entry_safe(rule, node2, + &pf->cloud_filter_list, cloud_node) { + /* filter found */ + if (rule->id == fsp->location) + filter = rule; + + /* bail out if we've passed the likely location in the list */ + if (rule->id >= fsp->location) + break; + } + if (!filter) + return -ENOENT; + + /* remove filter from the list even if failed to remove from device */ + (void)i40e_add_del_cloud_filter_ex(pf, filter, false); + hlist_del(&filter->cloud_node); + kfree(filter); + pf->num_cloud_filters--; + + return 0; +} /** * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry * @vsi: Pointer to the targeted VSI * @input: The filter to update or NULL to indicate deletion * @sw_idx: Software index to the filter - * @cmd: The command to get or set Rx flow classification rules * * This function updates (or deletes) a Flow Director entry from * the hlist of the corresponding PF @@ -3511,38 +4556,38 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) **/ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, - u16 sw_idx, - struct ethtool_rxnfc *cmd) + u16 sw_idx) { struct i40e_fdir_filter *rule, *parent; struct i40e_pf *pf = vsi->back; struct hlist_node *node2; - int err = -EINVAL; + int err = -ENOENT; parent = NULL; rule = NULL; hlist_for_each_entry_safe(rule, node2, &pf->fdir_filter_list, fdir_node) { - /* hash found, or no matching entry */ + /* rule id found, or passed its spot in the list */ if (rule->fd_id >= sw_idx) break; parent = rule; } - /* if there is an old rule occupying our place remove it */ + /* is there is an old rule occupying our target filter slot? */ if (rule && (rule->fd_id == sw_idx)) { /* Remove this rule, since we're either deleting it, or * replacing it. */ err = i40e_add_del_fdir(vsi, rule, false); hlist_del(&rule->fdir_node); - kfree(rule); pf->fdir_pf_active_filters--; + + kfree(rule); } /* If we weren't given an input, this is a delete, so just return the - * error code indicating if there was an entry at the requested slot + * error code indicating if there was an entry at the requested slot. */ if (!input) return err; @@ -3550,12 +4595,11 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, /* Otherwise, install the new rule as requested */ INIT_HLIST_NODE(&input->fdir_node); - /* add filter to the list */ + /* add filter to the ordered list */ if (parent) hlist_add_behind(&input->fdir_node, &parent->fdir_node); else - hlist_add_head(&input->fdir_node, - &pf->fdir_filter_list); + hlist_add_head(&input->fdir_node, &pf->fdir_filter_list); /* update counts */ pf->fdir_pf_active_filters++; @@ -3651,7 +4695,7 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return -EBUSY; - ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); + ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location); i40e_prune_flex_pit_list(pf); @@ -4360,7 +5404,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a, struct i40e_fdir_filter *b) { - /* The filters do not much if any of these criteria differ. */ + /* The filters do not match if any of these criteria differ. */ if (a->dst_ip != b->dst_ip || a->src_ip != b->src_ip || a->dst_port != b->dst_port || @@ -4470,6 +5514,9 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (i40e_parse_rx_flow_user_data(fsp, &userdef)) return -EINVAL; + if (userdef.cloud_filter) + return i40e_add_cloud_filter_ethtool(vsi, cmd, &userdef); + /* Extended MAC field is not supported */ if (fsp->flow_type & FLOW_MAC_EXT) return -EINVAL; @@ -4511,7 +5558,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, } input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) return -ENOMEM; @@ -4521,8 +5567,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->dest_ctl = dest_ctl; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); - input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; - input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; input->flow_type = fsp->flow_type & ~FLOW_EXT; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; @@ -4545,14 +5589,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (ret) goto free_filter_memory; - /* Add the input filter to the fdir_input_list, possibly replacing + /* Add the input filter to the fdir_filter_list, possibly replacing * a previous filter. Do not free the input structure after adding it - * to the list as this would cause a use-after-free bug. + * to the list as this would cause a use after free bug. */ - i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); + i40e_update_ethtool_fdir_entry(vsi, input, fsp->location); + + (void)i40e_del_cloud_filter_ethtool(pf, cmd); ret = i40e_add_del_fdir(vsi, input, true); + if (ret) goto remove_sw_rule; + return 0; remove_sw_rule: @@ -4581,12 +5629,17 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXFH: ret = i40e_set_rss_hash_opt(pf, cmd); break; + case ETHTOOL_SRXCLSRLINS: ret = i40e_add_fdir_ethtool(vsi, cmd); break; + case ETHTOOL_SRXCLSRLDEL: ret = i40e_del_fdir_entry(vsi, cmd); + if (ret == -ENOENT) + ret = i40e_del_cloud_filter_ethtool(pf, cmd); break; + default: break; } @@ -4594,6 +5647,8 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) return ret; } +#endif /* ETHTOOL_GRXRINGS */ +#ifdef ETHTOOL_SCHANNELS /** * i40e_max_channels - get Max number of combined channels supported * @vsi: vsi pointer @@ -4708,6 +5763,9 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } +#endif /* ETHTOOL_SCHANNELS */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) /** * i40e_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure @@ -4740,8 +5798,12 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) * Reads the indirection table directly from the hardware. Returns 0 on * success. **/ +#ifdef HAVE_RXFH_HASHFUNC static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -4749,9 +5811,11 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, int ret; u16 i; +#ifdef HAVE_RXFH_HASHFUNC if (hfunc) *hfunc = ETH_RSS_HASH_TOP; +#endif if (!indir) return 0; @@ -4781,8 +5845,17 @@ out: * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ +#ifdef HAVE_RXFH_HASHFUNC static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int i40e_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -4790,8 +5863,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, u8 *seed = NULL; u16 i; +#ifdef HAVE_RXFH_HASHFUNC if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; +#endif + + /* Verify user input. */ + if (indir) { + for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) { + if (indir[i] >= vsi->rss_size) + return -EINVAL; + } + } if (key) { if (!vsi->rss_hkey_user) { @@ -4820,7 +5903,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, return i40e_config_rss(vsi, seed, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE); } +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT /** * i40e_get_priv_flags - report device private flags * @dev: network interface device structure @@ -5006,6 +6091,13 @@ flags_complete: dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); } + if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && + (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN)) { + dev_err(&pf->pdev->dev, + "Setting link-down-on-close not supported on this port\n"); + return -EOPNOTSUPP; + } + if ((changed_flags & new_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && (new_flags & I40E_FLAG_MFP_ENABLED)) @@ -5032,7 +6124,7 @@ flags_complete: dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; } else { status = i40e_aq_start_lldp(&pf->hw, false, NULL); - if (status) { + if (status != I40E_SUCCESS) { adq_err = pf->hw.aq.asq_last_status; switch (adq_err) { case I40E_AQ_RC_EEXIST: @@ -5043,7 +6135,7 @@ flags_complete: case I40E_AQ_RC_EPERM: dev_warn(&pf->pdev->dev, "Device configuration forbids SW from starting the LLDP agent.\n"); - return -EINVAL; + return (-EINVAL); default: dev_warn(&pf->pdev->dev, "Starting FW LLDP agent failed: error: %s, %s\n", @@ -5051,7 +6143,7 @@ flags_complete: status), i40e_aq_str(&pf->hw, adq_err)); - return -EINVAL; + return (-EINVAL); } } } @@ -5072,6 +6164,9 @@ flags_complete: return 0; } +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +#ifdef ETHTOOL_GMODULEINFO /** * i40e_get_module_info - get (Q)SFP+ module type info @@ -5081,6 +6176,7 @@ flags_complete: static int i40e_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { + i40e_status status; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -5088,8 +6184,7 @@ static int i40e_get_module_info(struct net_device *netdev, u32 sff8472_comp = 0; u32 sff8472_swap = 0; u32 sff8636_rev = 0; - i40e_status status; - u32 type = 0; + u8 type; /* Check if firmware supports reading module EEPROM. */ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { @@ -5112,7 +6207,7 @@ static int i40e_get_module_info(struct net_device *netdev, case I40E_MODULE_TYPE_SFP: status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - I40E_I2C_EEPROM_DEV_ADDR, + I40E_I2C_EEPROM_DEV_ADDR, true, I40E_MODULE_SFF_8472_COMP, &sff8472_comp, NULL); if (status) @@ -5120,7 +6215,7 @@ static int i40e_get_module_info(struct net_device *netdev, status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - I40E_I2C_EEPROM_DEV_ADDR, + I40E_I2C_EEPROM_DEV_ADDR, true, I40E_MODULE_SFF_8472_SWAP, &sff8472_swap, NULL); if (status) @@ -5133,26 +6228,21 @@ static int i40e_get_module_info(struct net_device *netdev, netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n"); modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - } else if (sff8472_comp == 0x00) { + } else if (sff8472_comp && + (sff8472_swap & I40E_MODULE_SFF_DIAG_CAPAB)) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { /* Module is not SFF-8472 compliant */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) { - /* Module is SFF-8472 compliant but doesn't implement - * Digital Diagnostic Monitoring (DDM). - */ - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - } else { - modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } break; case I40E_MODULE_TYPE_QSFP_PLUS: /* Read from memory page 0. */ status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - 0, + 0, true, I40E_MODULE_REVISION_ADDR, &sff8636_rev, NULL); if (status) @@ -5188,12 +6278,12 @@ static int i40e_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { + i40e_status status; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; bool is_sfp = false; - i40e_status status; u32 value = 0; int i; @@ -5223,31 +6313,159 @@ static int i40e_get_module_eeprom(struct net_device *netdev, status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - addr, offset, &value, NULL); + addr, true, offset, &value, NULL); if (status) return -EIO; data[i] = value; } return 0; } +#endif /* ETHTOOL_GMODULEINFO */ +#ifdef ETHTOOL_GEEE static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { - return -EOPNOTSUPP; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_aq_get_phy_abilities_resp phy_cfg; + i40e_status status = 0; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + /* Get initial PHY capabilities */ + status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL); + if (status) + return -EAGAIN; + + /* Check whether NIC configuration is compatible with Energy Efficient + * Ethernet (EEE) mode. + */ + if (phy_cfg.eee_capability == 0) + return -EOPNOTSUPP; + + edata->supported = SUPPORTED_Autoneg; + edata->lp_advertised = edata->supported; + + /* Get current configuration */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL); + if (status) + return -EAGAIN; + + edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U; + edata->eee_enabled = !!edata->advertised; + edata->tx_lpi_enabled = pf->stats.tx_lpi_status; + + edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status; + + return 0; +} +#endif /* ETHTOOL_GEEE */ + +#ifdef ETHTOOL_SEEE +static int i40e_is_eee_param_supported(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_ethtool_not_used { + u32 value; + const char *name; + } param[] = { + {edata->advertised & ~SUPPORTED_Autoneg, "advertise"}, + {edata->tx_lpi_timer, "tx-timer"}, + {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"} + }; + int i; + + for (i = 0; i < ARRAY_SIZE(param); i++) { + if (param[i].value) { + netdev_info(netdev, + "EEE setting %s not supported\n", + param[i].name); + return -EOPNOTSUPP; + } + } + + return 0; } static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { - return -EOPNOTSUPP; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_aq_get_phy_abilities_resp abilities; + i40e_status status = I40E_SUCCESS; + struct i40e_aq_set_phy_config config; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + __le16 eee_capability; + + /* Deny parameters we don't support */ + if (i40e_is_eee_param_supported(netdev, edata)) + return -EOPNOTSUPP; + + /* Get initial PHY capabilities */ + status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, + NULL); + if (status) + return -EAGAIN; + + /* Check whether NIC configuration is compatible with Energy Efficient + * Ethernet (EEE) mode. + */ + if (abilities.eee_capability == 0) + return -EOPNOTSUPP; + + /* Cache initial EEE capability */ + eee_capability = abilities.eee_capability; + + /* Get current PHY configuration */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) + return -EAGAIN; + + /* Cache current PHY configuration */ + config.phy_type = abilities.phy_type; + config.link_speed = abilities.link_speed; + config.abilities = abilities.abilities | + I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + /* Set desired EEE state */ + if (edata->eee_enabled) { + config.eee_capability = eee_capability; + config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } else { + config.eee_capability = 0; + config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } + + /* Apply modified PHY configuration */ + status = i40e_aq_set_phy_config(hw, &config, NULL); + if (status) + return -EAGAIN; + + return 0; } +#endif /* ETHTOOL_SEEE */ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = { + .get_drvinfo = i40e_get_drvinfo, .set_eeprom = i40e_set_eeprom, .get_eeprom_len = i40e_get_eeprom_len, .get_eeprom = i40e_get_eeprom, }; static const struct ethtool_ops i40e_ethtool_ops = { +#ifndef ETHTOOL_GLINKSETTINGS + .get_settings = i40e_get_settings, + .set_settings = i40e_set_settings, +#endif .get_drvinfo = i40e_get_drvinfo, .get_regs_len = i40e_get_regs_len, .get_regs = i40e_get_regs, @@ -5264,35 +6482,117 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_pauseparam = i40e_set_pauseparam, .get_msglevel = i40e_get_msglevel, .set_msglevel = i40e_set_msglevel, +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = i40e_get_rx_csum, + .set_rx_csum = i40e_set_rx_csum, + .get_tx_csum = i40e_get_tx_csum, + .set_tx_csum = i40e_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = i40e_set_tso, +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, + .set_flags = i40e_set_flags, +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS .get_rxnfc = i40e_get_rxnfc, .set_rxnfc = i40e_set_rxnfc, +#ifdef ETHTOOL_SRXNTUPLE + .set_rx_ntuple = i40e_set_rx_ntuple, +#endif +#endif +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = i40e_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ .self_test = i40e_diag_test, .get_strings = i40e_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_GEEE .get_eee = i40e_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE .set_eee = i40e_set_eee, +#endif /* ETHTOOL_SEEE */ +#ifdef HAVE_ETHTOOL_SET_PHYS_ID .set_phys_id = i40e_set_phys_id, +#else + .phys_id = i40e_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = i40e_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ .get_sset_count = i40e_get_sset_count, + .get_priv_flags = i40e_get_priv_flags, + .set_priv_flags = i40e_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ .get_ethtool_stats = i40e_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif .get_coalesce = i40e_get_coalesce, .set_coalesce = i40e_set_coalesce, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) .get_rxfh_key_size = i40e_get_rxfh_key_size, .get_rxfh_indir_size = i40e_get_rxfh_indir_size, .get_rxfh = i40e_get_rxfh, .set_rxfh = i40e_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_SCHANNELS .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, +#endif +#ifdef ETHTOOL_GMODULEINFO .get_module_info = i40e_get_module_info, .get_module_eeprom = i40e_get_module_eeprom, +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO .get_ts_info = i40e_get_ts_info, - .get_priv_flags = i40e_get_priv_flags, - .set_priv_flags = i40e_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ +#ifdef ETHTOOL_PERQUEUE .get_per_queue_coalesce = i40e_get_per_queue_coalesce, .set_per_queue_coalesce = i40e_set_per_queue_coalesce, - .get_link_ksettings = i40e_get_link_ksettings, - .set_link_ksettings = i40e_set_link_ksettings, +#endif /* ETHTOOL_PERQUEUE */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = i40e_get_link_ksettings, + .set_link_ksettings = i40e_set_link_ksettings, +#endif /* ETHTOOL_GLINKSETTINGS */ +#ifdef ETHTOOL_GFECPARAM .get_fecparam = i40e_get_fec_param, .set_fecparam = i40e_set_fec_param, +#endif /* ETHTOOL_GFECPARAM */ +#ifdef HAVE_DDP_PROFILE_UPLOAD_SUPPORT .flash_device = i40e_ddp_flash, +#endif /* DDP_PROFILE_UPLOAD_SUPPORT */ +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext i40e_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = i40e_get_ts_info, + .set_phys_id = i40e_set_phys_id, + .get_channels = i40e_get_channels, + .set_channels = i40e_set_channels, +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = i40e_get_rxfh_key_size, + .get_rxfh_indir_size = i40e_get_rxfh_indir_size, + .get_rxfh = i40e_get_rxfh, + .set_rxfh = i40e_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_GEEE + .get_eee = i40e_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = i40e_set_eee, +#endif /* ETHTOOL_SEEE */ +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = i40e_get_module_info, + .get_module_eeprom = i40e_get_module_eeprom, +#endif }; void i40e_set_ethtool_ops(struct net_device *netdev) @@ -5300,8 +6600,23 @@ void i40e_set_ethtool_ops(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) - netdev->ethtool_ops = &i40e_ethtool_ops; - else + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops; + } else { + netdev->ethtool_ops = &i40e_ethtool_ops; + set_ethtool_ops_ext(netdev, &i40e_ethtool_ops_ext); + } } +#else +void i40e_set_ethtool_ops(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) + netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops; + else + netdev->ethtool_ops = &i40e_ethtool_ops; +} +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#endif /* SIOCETHTOOL */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h b/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h new file mode 100644 index 000000000000..dc26d38ee639 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +/* ethtool statistics helpers */ + +/** + * struct i40e_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the I40E_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the i40e_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the i40e_add_stat_string() helper function. + **/ +struct i40e_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro to define an i40e_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ +#define I40E_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#ifdef HAVE_NDO_GET_STATS64 +#define I40E_NETDEV_STAT(_net_stat) \ + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) +#else +#define I40E_NETDEV_STAT(_net_stat) \ + I40E_STAT(struct net_device_stats, #_net_stat, _net_stat) +#endif + +/* Helper macro for defining some statistics related to queues */ +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct i40e_stats i40e_gstrings_queue_stats[] = { + I40E_QUEUE_STAT("%s-%u.packets", stats.packets), + I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + +#ifdef HAVE_XDP_SUPPORT +/* Stats associated with Rx ring's XDP prog */ +static const struct i40e_stats i40e_gstrings_rx_queue_xdp_stats[] = { + I40E_QUEUE_STAT("%s-%u.xdp.pass", xdp_stats.xdp_pass), + I40E_QUEUE_STAT("%s-%u.xdp.drop", xdp_stats.xdp_drop), + I40E_QUEUE_STAT("%s-%u.xdp.tx", xdp_stats.xdp_tx), + I40E_QUEUE_STAT("%s-%u.xdp.unknown", xdp_stats.xdp_unknown), + I40E_QUEUE_STAT("%s-%u.xdp.redirect", xdp_stats.xdp_redirect), + I40E_QUEUE_STAT("%s-%u.xdp.redirect_fail", xdp_stats.xdp_redirect_fail), +}; +#endif + +/** + * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement i40e_add_ethtool_stats and + * i40e_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +i40e_add_one_ethtool_stat(u64 *data, void *pointer, + const struct i40e_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __i40e_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__i40e_add_ethtool_stats(u64 **data, void *pointer, + const struct i40e_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __i40e_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define i40e_add_ethtool_stats(data, pointer, stats) \ + __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * i40e_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); + const struct i40e_stats *stats = i40e_gstrings_queue_stats; +#ifdef HAVE_NDO_GET_STATS64 + unsigned int start; +#endif + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ +#ifdef HAVE_NDO_GET_STATS64 + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); +#endif + for (i = 0; i < size; i++) { + i40e_add_one_ethtool_stat(&(*data)[i], ring, + &stats[i]); + } +#ifdef HAVE_NDO_GET_STATS64 + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +#ifdef HAVE_XDP_SUPPORT +/** + * i40e_add_rx_queue_xdp_stats - copy XDP statistics into supplied buffer + * @data: ethtool stats buffer + * @rx_ring: the rx ring to copy + * + * RX queue XDP statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * Assumes that queue stats are defined in i40e_gstrings_rx_queue_xdp_stats. If + * the ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +i40e_add_rx_queue_xdp_stats(u64 **data, struct i40e_ring *rx_ring) +{ + const unsigned int xdp_size = + ARRAY_SIZE(i40e_gstrings_rx_queue_xdp_stats); + const struct i40e_stats *xdp_stats = i40e_gstrings_rx_queue_xdp_stats; +#ifdef HAVE_NDO_GET_STATS64 + unsigned int start; +#endif + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ +#ifdef HAVE_NDO_GET_STATS64 + do { + start = !rx_ring ? 0 : + u64_stats_fetch_begin_irq(&rx_ring->syncp); +#endif + for (i = 0; i < xdp_size; i++) { + i40e_add_one_ethtool_stat(&(*data)[i], rx_ring, + &xdp_stats[i]); + } +#ifdef HAVE_NDO_GET_STATS64 + } while (rx_ring && u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); +#endif + + /* Once we successfully copy the stats in, update the data pointer */ + *data += xdp_size; +} +#endif + +/** + * __i40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +/** + * 40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define i40e_add_stat_strings(p, stats, ...) \ + __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) diff --git a/drivers/net/ethernet/intel/i40e/i40e_filters.c b/drivers/net/ethernet/intel/i40e/i40e_filters.c new file mode 100644 index 000000000000..ce301a41e333 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_filters.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#include "i40e_filters.h" + +/** + * __i40e_del_filter - Remove a specific filter from the VSI + * @vsi: VSI to remove from + * @f: the filter to remove from the list + * + * This function should be called instead of i40e_del_filter only if you know + * the exact filter you will remove already, such as via i40e_find_filter or + * i40e_find_mac. + * + * NOTE: This function is expected to be called with mac_filter_hash_lock + * being held. + * ANOTHER NOTE: This function MUST be called from within the context of + * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() + * instead of list_for_each_entry(). + **/ +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) +{ + if (!f) + return; + + /* If the filter was never added to firmware then we can just delete it + * directly and we don't want to set the status to remove or else an + * admin queue command will unnecessarily fire. + */ + if (f->state == I40E_FILTER_FAILED || f->state == I40E_FILTER_NEW) { + hash_del(&f->hlist); + kfree(f); + } else { + f->state = I40E_FILTER_REMOVE; + } + + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); +} + diff --git a/drivers/net/ethernet/intel/i40e/i40e_filters.h b/drivers/net/ethernet/intel/i40e/i40e_filters.h new file mode 100644 index 000000000000..7f618fdab9e9 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_filters.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#ifndef _I40E_FILTERS_H_ +#define _I40E_FILTERS_H_ + +#include "i40e.h" + +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f); + +#endif /* _I40E_FILTERS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_helper.h b/drivers/net/ethernet/intel/i40e/i40e_helper.h new file mode 100644 index 000000000000..4e25e230c4c6 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_helper.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#ifndef _I40E_HELPER_H_ +#define _I40E_HELPER_H_ + +#include "i40e_alloc.h" + +/** + * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +inline int i40e_allocate_dma_mem_d(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + __always_unused enum i40e_memory_type mtype, + u64 size, u32 alignment) +{ + struct i40e_pf *nf = (struct i40e_pf *)hw->back; + + mem->size = ALIGN(size, alignment); +#ifdef HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM + mem->va = dma_alloc_coherent(&nf->pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); +#else + mem->va = dma_zalloc_coherent(&nf->pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); +#endif + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * i40e_free_dma_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +inline int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +{ + struct i40e_pf *nf = (struct i40e_pf *)hw->back; + + dma_free_coherent(&nf->pdev->dev, mem->size, mem->va, mem->pa); + mem->va = NULL; + mem->pa = 0; + mem->size = 0; + + return 0; +} + +/** + * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + **/ +inline int i40e_allocate_virt_mem_d(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size) +{ + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * i40e_free_virt_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +inline int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) +{ + /* it's ok to kfree a NULL pointer */ + kfree(mem->va); + mem->va = NULL; + mem->size = 0; + + return 0; +} + +/* prototype */ +inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp); +inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp); +inline void i40e_release_spinlock_d(struct i40e_spinlock *sp); + +/** + * i40e_init_spinlock_d - OS specific spinlock init for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +static inline void i40e_init_spinlock_d(struct i40e_spinlock *sp) +{ + mutex_init((struct mutex *)sp); +} + +/** + * i40e_acquire_spinlock_d - OS specific spinlock acquire for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp) +{ + mutex_lock((struct mutex *)sp); +} + +/** + * i40e_release_spinlock_d - OS specific spinlock release for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +inline void i40e_release_spinlock_d(struct i40e_spinlock *sp) +{ + mutex_unlock((struct mutex *)sp); +} + +/** + * i40e_destroy_spinlock_d - OS specific spinlock destroy for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp) +{ + mutex_destroy((struct mutex *)sp); +} +#endif /* _I40E_HELPER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 163ee8c6311c..5fb10c407166 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ -#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_status.h" #include "i40e_alloc.h" #include "i40e_hmc.h" +#ifndef I40E_NO_TYPE_HEADER #include "i40e_type.h" +#endif /** * i40e_add_sd_table_entry - Adds a segment descriptor to the table @@ -23,11 +24,11 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, enum i40e_sd_entry_type type, u64 direct_mode_sz) { - enum i40e_memory_type mem_type __attribute__((unused)); + i40e_status ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; + enum i40e_memory_type mem_type; bool dma_mem_alloc_done = false; struct i40e_dma_mem mem; - i40e_status ret_code = I40E_SUCCESS; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { @@ -67,9 +68,13 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, sd_entry->u.pd_table.pd_entry = (struct i40e_hmc_pd_entry *) sd_entry->u.pd_table.pd_entry_virt_mem.va; - sd_entry->u.pd_table.pd_page_addr = mem; + i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr, + &mem, sizeof(struct i40e_dma_mem), + I40E_NONDMA_TO_NONDMA); } else { - sd_entry->u.bp.addr = mem; + i40e_memcpy(&sd_entry->u.bp.addr, + &mem, sizeof(struct i40e_dma_mem), + I40E_NONDMA_TO_NONDMA); sd_entry->u.bp.sd_pd_index = sd_index; } /* initialize the sd entry */ @@ -82,7 +87,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) I40E_INC_BP_REFCNT(&sd_entry->u.bp); exit: - if (ret_code) + if (I40E_SUCCESS != ret_code) if (dma_mem_alloc_done) i40e_free_dma_mem(hw, &mem); @@ -111,7 +116,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, u32 pd_index, struct i40e_dma_mem *rsrc_pg) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; @@ -149,7 +154,8 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, pd_entry->rsrc_pg = false; } - pd_entry->bp.addr = *page; + i40e_memcpy(&pd_entry->bp.addr, page, + sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ @@ -159,7 +165,8 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, pd_addr += rel_pd_idx; /* Add the backing page physical address in the pd entry */ - memcpy(pd_addr, &page_desc, sizeof(u64)); + i40e_memcpy(pd_addr, &page_desc, sizeof(u64), + I40E_NONDMA_TO_DMA); pd_entry->sd_index = sd_idx; pd_entry->valid = true; @@ -189,7 +196,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; struct i40e_hmc_pd_entry *pd_entry; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_sd_entry *sd_entry; @@ -222,13 +229,13 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, I40E_DEC_PD_REFCNT(pd_table); pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; - memset(pd_addr, 0, sizeof(u64)); + i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM); I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ if (!pd_entry->rsrc_pg) - ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); - if (ret_code) + ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (I40E_SUCCESS != ret_code) goto exit; if (!pd_table->ref_cnt) i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); @@ -244,7 +251,7 @@ exit: i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, u32 idx) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; /* get the entry and decrease its ref counter */ @@ -282,7 +289,7 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); - return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr); + return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); } /** @@ -293,7 +300,7 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, u32 idx) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; sd_entry = &hmc_info->sd_table.sd_entry[idx]; @@ -330,5 +337,5 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); - return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr); + return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 1c78de838857..ddfef160339c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index be24d42280d8..4e55a095eeb2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ -#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_type.h" @@ -79,7 +78,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, u32 fcoe_filt_num) { struct i40e_hmc_obj_info *obj, *full_obj; - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u64 l2fpm_size; u32 size_exp; @@ -114,7 +113,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", txq_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ @@ -137,7 +136,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", rxq_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ @@ -160,7 +159,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_cntx_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ @@ -183,7 +182,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_filt_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ @@ -204,7 +203,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, (sizeof(struct i40e_hmc_sd_entry) * hw->hmc.sd_table.sd_cnt)); if (ret_code) - goto init_lan_hmc_out; + goto free_hmc_out; hw->hmc.sd_table.sd_entry = (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; } @@ -212,6 +211,11 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, full_obj->size = l2fpm_size; init_lan_hmc_out: + return ret_code; +free_hmc_out: + if (hw->hmc.hmc_obj_virt_mem.va) + i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); + return ret_code; } @@ -233,9 +237,9 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; - if (!i40e_prep_remove_pd_page(hmc_info, idx)) + if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS) ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); return ret_code; @@ -260,9 +264,9 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; - if (!i40e_prep_remove_sd_bp(hmc_info, idx)) + if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS) ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); return ret_code; @@ -279,7 +283,7 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_create_obj_info *info) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; u32 pd_idx1 = 0, pd_lmt1 = 0; u32 pd_idx = 0, pd_lmt = 0; @@ -349,7 +353,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, info->entry_type, sd_size); - if (ret_code) + if (I40E_SUCCESS != ret_code) goto exit_sd_error; sd_entry = &info->hmc_info->sd_table.sd_entry[j]; if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { @@ -366,7 +370,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, i, NULL); - if (ret_code) { + if (I40E_SUCCESS != ret_code) { pd_error = true; break; } @@ -439,9 +443,9 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, enum i40e_hmc_model model) { struct i40e_hmc_lan_create_obj_info info; - i40e_status ret_code = 0; u8 hmc_fn_id = hw->hmc.hmc_fn_id; struct i40e_hmc_obj_info *obj; + i40e_status ret_code = I40E_SUCCESS; /* Initialize part of the create object info struct */ info.hmc_info = &hw->hmc; @@ -457,9 +461,9 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, /* Make one big object, a single SD */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); - if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) + if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) goto try_type_paged; - else if (ret_code) + else if (ret_code != I40E_SUCCESS) goto configure_lan_hmc_out; /* else clause falls through the break */ break; @@ -469,7 +473,7 @@ try_type_paged: /* Make one big object in the PD table */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto configure_lan_hmc_out; break; default: @@ -523,7 +527,7 @@ configure_lan_hmc_out: static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_delete_obj_info *info) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; struct i40e_hmc_pd_table *pd_table; u32 pd_idx, pd_lmt, rel_pd_idx; u32 sd_idx, sd_lmt; @@ -588,7 +592,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; if (pd_table->pd_entry[rel_pd_idx].valid) { ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); - if (ret_code) + if (I40E_SUCCESS != ret_code) goto exit; } } @@ -609,12 +613,12 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { case I40E_SD_TYPE_DIRECT: ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); - if (ret_code) + if (I40E_SUCCESS != ret_code) goto exit; break; case I40E_SD_TYPE_PAGED: ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); - if (ret_code) + if (I40E_SUCCESS != ret_code) goto exit; break; default: @@ -659,7 +663,7 @@ i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw) #define I40E_HMC_STORE(_struct, _ele) \ offsetof(struct _struct, _ele), \ - FIELD_SIZEOF(struct _struct, _ele) + sizeof_field(struct _struct, _ele) struct i40e_context_ele { u16 offset; @@ -752,13 +756,13 @@ static void i40e_write_byte(u8 *hmc_bits, /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); - memcpy(&dest_byte, dest, sizeof(dest_byte)); + i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA); dest_byte &= ~mask; /* get the bits not changing */ dest_byte |= src_byte; /* add in the new bits */ /* put it all back */ - memcpy(dest, &dest_byte, sizeof(dest_byte)); + i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA); } /** @@ -796,13 +800,13 @@ static void i40e_write_word(u8 *hmc_bits, /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); - memcpy(&dest_word, dest, sizeof(dest_word)); + i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA); - dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ - dest_word |= cpu_to_le16(src_word); /* add in the new bits */ + dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ + dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ /* put it all back */ - memcpy(dest, &dest_word, sizeof(dest_word)); + i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); } /** @@ -848,13 +852,13 @@ static void i40e_write_dword(u8 *hmc_bits, /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); - memcpy(&dest_dword, dest, sizeof(dest_dword)); + i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA); - dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ - dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ + dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ + dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ /* put it all back */ - memcpy(dest, &dest_dword, sizeof(dest_dword)); + i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA); } /** @@ -900,13 +904,13 @@ static void i40e_write_qword(u8 *hmc_bits, /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); - memcpy(&dest_qword, dest, sizeof(dest_qword)); + i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA); - dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ - dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ + dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ + dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ /* put it all back */ - memcpy(dest, &dest_qword, sizeof(dest_qword)); + i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA); } /** @@ -920,9 +924,10 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw, enum i40e_hmc_lan_rsrc_type hmc_type) { /* clean the bit array */ - memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); + i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size, + I40E_DMA_MEM); - return 0; + return I40E_SUCCESS; } /** @@ -959,12 +964,12 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, } } - return 0; + return I40E_SUCCESS; } /** * i40e_hmc_get_object_va - retrieves an object's virtual address - * @hw: the hardware struct, from which we obtain the i40e_hmc_info pointer + * @hw: pointer to the hw structure * @object_base: pointer to u64 to get the va * @rsrc_type: the hmc resource type * @obj_idx: hmc object index @@ -973,24 +978,20 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, * base pointer. This function is used for LAN Queue contexts. **/ static -i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, - enum i40e_hmc_lan_rsrc_type rsrc_type, - u32 obj_idx) +i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, + u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) { - struct i40e_hmc_info *hmc_info = &hw->hmc; u32 obj_offset_in_sd, obj_offset_in_pd; + struct i40e_hmc_info *hmc_info = &hw->hmc; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; - if (NULL == hmc_info) { - ret_code = I40E_ERR_BAD_PTR; - hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n"); - goto exit; - } if (NULL == hmc_info->hmc_obj) { ret_code = I40E_ERR_BAD_PTR; hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); @@ -1048,8 +1049,7 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(hw, &context_bytes, - I40E_HMC_LAN_TX, queue); + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1069,8 +1069,7 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(hw, &context_bytes, - I40E_HMC_LAN_TX, queue); + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1089,8 +1088,7 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(hw, &context_bytes, - I40E_HMC_LAN_RX, queue); + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; @@ -1110,8 +1108,7 @@ i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(hw, &context_bytes, - I40E_HMC_LAN_RX, queue); + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index c46a2c449e60..00b7d6de4134 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6031223eafab..b67de06e5f1b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,17 +1,29 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ -#include -#include -#include +#ifdef HAVE_XDP_SUPPORT #include - +#endif /* Local includes */ #include "i40e.h" +#include "i40e_helper.h" #include "i40e_diag.h" -#include "i40e_xsk.h" +#ifdef HAVE_VXLAN_RX_OFFLOAD +#if IS_ENABLED(CONFIG_VXLAN) +#include +#endif +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_GRE_ENCAP_OFFLOAD +#include +#endif /* HAVE_GRE_ENCAP_OFFLOAD */ +#ifdef HAVE_GENEVE_RX_OFFLOAD +#if IS_ENABLED(CONFIG_GENEVE) +#include +#endif +#endif /* HAVE_GENEVE_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD #include -#include +#endif /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -19,20 +31,25 @@ #define CREATE_TRACE_POINTS #include "i40e_trace.h" -const char i40e_driver_name[] = "i40e"; +char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = - "Intel(R) Ethernet Connection XL710 Network Driver"; + "Intel(R) 40-10 Gigabit Ethernet Connection Network Driver"; -#define DRV_KERN "-k" +#ifndef DRV_VERSION_LOCAL +#define DRV_VERSION_LOCAL +#endif /* DRV_VERSION_LOCAL */ + +#define DRV_VERSION_DESC "" #define DRV_VERSION_MAJOR 2 -#define DRV_VERSION_MINOR 8 -#define DRV_VERSION_BUILD 20 +#define DRV_VERSION_MINOR 11 +#define DRV_VERSION_BUILD 25 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ - __stringify(DRV_VERSION_MINOR) "." \ - __stringify(DRV_VERSION_BUILD) DRV_KERN + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) \ + DRV_VERSION_DESC __stringify(DRV_VERSION_LOCAL) const char i40e_driver_version_str[] = DRV_VERSION; -static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation."; +static const char i40e_copyright[] = "Copyright(c) 2013 - 2020 Intel Corporation."; /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); @@ -43,6 +60,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_clear_rss_config_user(struct i40e_vsi *vsi); static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired); static int i40e_reset(struct i40e_pf *pf); static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); @@ -50,12 +68,12 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf); static int i40e_restore_interrupt_scheme(struct i40e_pf *pf); static bool i40e_check_recovery_mode(struct i40e_pf *pf); static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw); +static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up); +static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type); - - /* i40e_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -76,16 +94,16 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0}, /* required last entry */ @@ -94,86 +112,38 @@ static const struct pci_device_id i40e_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); #define I40E_MAX_VF_COUNT 128 +#define OPTION_UNSET -1 +#define I40E_PARAM_INIT { [0 ... I40E_MAX_NIC] = OPTION_UNSET} +#define I40E_MAX_NIC 64 +#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE) +#ifdef CONFIG_PCI_IOV +static int max_vfs[I40E_MAX_NIC+1] = I40E_PARAM_INIT; +module_param_array_named(max_vfs, max_vfs, int, NULL, 0); +MODULE_PARM_DESC(max_vfs, + "Number of Virtual Functions: 0 = disable (default), 1-" + __stringify(I40E_MAX_VF_COUNT) " = enable " + "this many VFs"); +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_SRIOV_CONFIGURE */ + static int debug = -1; -module_param(debug, uint, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +static int l4mode = L4_MODE_DISABLED; +module_param(l4mode, int, 0000); +MODULE_PARM_DESC(l4mode, "L4 cloud filter mode: 0=UDP,1=TCP,2=Both,-1=Disabled(default)"); + MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); -MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel(R) 40-10 Gigabit Ethernet Connection Network Driver"); +MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *i40e_wq; -/** - * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to fill out - * @size: size of memory requested - * @alignment: what to align the allocation to - **/ -int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, - u64 size, u32 alignment) +bool i40e_is_l4mode_enabled(void) { - struct i40e_pf *pf = (struct i40e_pf *)hw->back; - - mem->size = ALIGN(size, alignment); - mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, - GFP_KERNEL); - if (!mem->va) - return -ENOMEM; - - return 0; -} - -/** - * i40e_free_dma_mem_d - OS specific memory free for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to free - **/ -int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) -{ - struct i40e_pf *pf = (struct i40e_pf *)hw->back; - - dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); - mem->va = NULL; - mem->pa = 0; - mem->size = 0; - - return 0; -} - -/** - * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to fill out - * @size: size of memory requested - **/ -int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, - u32 size) -{ - mem->size = size; - mem->va = kzalloc(size, GFP_KERNEL); - - if (!mem->va) - return -ENOMEM; - - return 0; -} - -/** - * i40e_free_virt_mem_d - OS specific memory free for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to free - **/ -int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) -{ - /* it's ok to kfree a NULL pointer */ - kfree(mem->va); - mem->va = NULL; - mem->size = 0; - - return 0; + return l4mode > L4_MODE_DISABLED; } /** @@ -279,6 +249,22 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) return NULL; } +/** + * i40e_find_vsi_from_seid - searches for the vsi with the given seid + * @pf: the pf structure to search for the vsi + * @seid: seid of the vsi it is searching for + **/ +struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_pf *pf, u16 seid) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) + return pf->vsi[i]; + + return NULL; +} + /** * i40e_service_event_schedule - Schedule the service task to wake up * @pf: board private structure @@ -342,6 +328,13 @@ static void i40e_tx_timeout(struct net_device *netdev) } } +#ifdef CONFIG_DEBUG_FS + if (vsi->block_tx_timeout) { + netdev_info(netdev, "tx_timeout recovery disabled\n"); + return; + } +#endif + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) pf->tx_timeout_recovery_level = 1; /* reset after some time */ else if (time_before(jiffies, @@ -383,7 +376,9 @@ static void i40e_tx_timeout(struct net_device *netdev) set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: - netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(__I40E_DOWN_REQUESTED, pf->state); + set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); break; } @@ -398,11 +393,23 @@ static void i40e_tx_timeout(struct net_device *netdev) * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ +#ifdef HAVE_NDO_GET_STATS64 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) { return &vsi->net_stats; } +#else +struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) +{ + /* It is possible for a VSIs to not have a netdev */ + if (vsi->netdev) + return &vsi->netdev->stats; + else + return &vsi->net_stats; +} +#endif +#ifdef HAVE_NDO_GET_STATS64 /** * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring * @ring: Tx ring to get statistics from @@ -432,46 +439,58 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ +#ifdef HAVE_VOID_NDO_GET_STATS64 static void i40e_get_netdev_stats_struct(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( + struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif /* HAVE_VOID_NDO_GET_STATS64 */ { struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); - struct i40e_ring *ring; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) +#ifdef HAVE_VOID_NDO_GET_STATS64 return; +#else + return stats; +#endif /* HAVE_VOID_NDO_GET_STATS_64 */ if (!vsi->tx_rings) +#ifdef HAVE_VOID_NDO_GET_STATS64 return; +#else + return stats; +#endif /* HAVE_VOID_NDO_GET_STATS_64 */ rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { u64 bytes, packets; unsigned int start; - ring = READ_ONCE(vsi->tx_rings[i]); - if (!ring) + tx_ring = READ_ONCE(vsi->tx_rings[i]); + if (!tx_ring) continue; - i40e_get_netdev_stats_struct_tx(ring, stats); - if (i40e_enabled_xdp_vsi(vsi)) { - ring++; - i40e_get_netdev_stats_struct_tx(ring, stats); - } + i40e_get_netdev_stats_struct_tx(tx_ring, stats); + rx_ring = &tx_ring[1]; - ring++; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; + if (i40e_enabled_xdp_vsi(vsi)) + i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats); } rcu_read_unlock(); @@ -483,7 +502,21 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + + return stats; +#endif } +#else +static struct net_device_stats *i40e_get_netdev_stats_struct( + struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + return i40e_get_vsi_stats_struct(vsi); +} +#endif /* HAVE_NDO_GET_STATS64 */ /** * i40e_vsi_reset_stats - Resets all stats of the given vsi @@ -491,7 +524,11 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, **/ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) { +#ifdef HAVE_NDO_GET_STATS64 struct rtnl_link_stats64 *ns; +#else + struct net_device_stats *ns; +#endif int i; if (!vsi) @@ -508,6 +545,10 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) sizeof(vsi->rx_rings[i]->stats)); memset(&vsi->rx_rings[i]->rx_stats, 0, sizeof(vsi->rx_rings[i]->rx_stats)); +#ifdef HAVE_XDP_SUPPORT + memset(&vsi->rx_rings[i]->xdp_stats, 0, + sizeof(vsi->rx_rings[i]->xdp_stats)); +#endif memset(&vsi->tx_rings[i]->stats, 0, sizeof(vsi->tx_rings[i]->stats)); memset(&vsi->tx_rings[i]->tx_stats, 0, @@ -543,6 +584,22 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) } } pf->hw_csum_rx_error = 0; +#ifdef I40E_ADD_PROBES + pf->tcp_segs = 0; + pf->tx_tcp_cso = 0; + pf->tx_udp_cso = 0; + pf->tx_sctp_cso = 0; + pf->tx_ip4_cso = 0; + pf->rx_tcp_cso = 0; + pf->rx_udp_cso = 0; + pf->rx_sctp_cso = 0; + pf->rx_ip4_cso = 0; + pf->rx_tcp_cso_err = 0; + pf->rx_udp_cso_err = 0; + pf->rx_sctp_cso_err = 0; + pf->rx_ip4_cso_err = 0; + pf->hw_csum_rx_outer = 0; +#endif } /** @@ -702,11 +759,10 @@ void i40e_update_veb_stats(struct i40e_veb *veb) i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), veb->stat_offsets_loaded, &oes->tx_discards, &es->tx_discards); - if (hw->revision_id > 0) - i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), - veb->stat_offsets_loaded, - &oes->rx_unknown_protocol, - &es->rx_unknown_protocol); + i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), + veb->stat_offsets_loaded, + &oes->rx_unknown_protocol, &es->rx_unknown_protocol); + i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), veb->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); @@ -770,15 +826,22 @@ void i40e_update_veb_stats(struct i40e_veb *veb) static void i40e_update_vsi_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; +#ifdef HAVE_NDO_GET_STATS64 struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ns; /* netdev stats */ +#else + struct net_device_stats *ons; + struct net_device_stats *ns; /* netdev stats */ +#endif struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; +#ifdef HAVE_NDO_GET_STATS64 unsigned int start; +#endif u64 tx_linearize; u64 tx_force_wb; u64 rx_p, rx_b; @@ -807,11 +870,15 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) /* locate Tx ring */ p = READ_ONCE(vsi->tx_rings[q]); +#ifdef HAVE_NDO_GET_STATS64 do { start = u64_stats_fetch_begin_irq(&p->syncp); +#endif packets = p->stats.packets; bytes = p->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); +#endif tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; @@ -821,11 +888,15 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) /* Rx queue is part of the same block as Tx queue */ p = &p[1]; +#ifdef HAVE_NDO_GET_STATS64 do { start = u64_stats_fetch_begin_irq(&p->syncp); +#endif packets = p->stats.packets; bytes = p->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); +#endif rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; @@ -872,7 +943,7 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) struct i40e_hw_port_stats *osd = &pf->stats_offsets; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw *hw = &pf->hw; - u32 val; + int i; i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), @@ -1064,19 +1135,12 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), &nsd->fd_atr_tunnel_match); - val = rd32(hw, I40E_PRTPM_EEE_STAT); - nsd->tx_lpi_status = - (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> - I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; - nsd->rx_lpi_status = - (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> - I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; - i40e_stat_update32(hw, I40E_PRTPM_TLPIC, - pf->stat_offsets_loaded, - &osd->tx_lpi_count, &nsd->tx_lpi_count); - i40e_stat_update32(hw, I40E_PRTPM_RLPIC, - pf->stat_offsets_loaded, - &osd->rx_lpi_count, &nsd->rx_lpi_count); + i40e_get_phy_lpi_status(hw, nsd); + i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, + &osd->tx_lpi_count, &nsd->tx_lpi_count, + &osd->rx_lpi_count, &nsd->rx_lpi_count); + i40e_get_lpi_duration(hw, nsd, + &nsd->tx_lpi_duration, &nsd->rx_lpi_duration); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) @@ -1109,6 +1173,25 @@ void i40e_update_stats(struct i40e_vsi *vsi) i40e_update_vsi_stats(vsi); } +/** + * i40e_count_filters - counts VSI mac filters + * @vsi: the VSI to be searched + * + * Returns count of mac filters + **/ +int i40e_count_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct hlist_node *h; + int bkt; + int cnt = 0; + + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) + ++cnt; + + return cnt; +} + /** * i40e_find_filter - Search VSI filter list for specific mac/vlan filter * @vsi: the VSI to be searched @@ -1117,8 +1200,8 @@ void i40e_update_stats(struct i40e_vsi *vsi) * * Returns ptr to the filter object or NULL **/ -static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, - const u8 *macaddr, s16 vlan) +struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, + const u8 *macaddr, s16 vlan) { struct i40e_mac_filter *f; u64 key; @@ -1153,7 +1236,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr) key = i40e_addr_to_hkey(macaddr); hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { - if ((ether_addr_equal(macaddr, f->macaddr))) + if (ether_addr_equal(macaddr, f->macaddr)) return f; } return NULL; @@ -1183,11 +1266,11 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) * i40e_sync_filters_subtask. * * Thus, we can simply use a boolean value, has_vlan_filters which we - * will set to true when we add a VLAN filter in i40e_add_filter. Then + * will set to true when we add a vlan filter in i40e_add_filter. Then * we have to perform the full search after deleting filters in * i40e_sync_filters_subtask, but we already have to search * filters here and can perform the check at the same time. This - * results in avoiding embedding a loop for VLAN mode inside another + * results in avoiding embedding a loop for vlan mode inside another * loop over all the filters, and should maintain correctness as noted * above. */ @@ -1196,7 +1279,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) /** * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary - * @vsi: the VSI to configure + * @vsi: the vsi to configure * @tmp_add_list: list of filters ready to be added * @tmp_del_list: list of filters ready to be deleted * @vlan_filters: the number of active VLAN filters @@ -1261,7 +1344,7 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, /* Update the remaining active filters */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { /* Combine the checks for whether a filter needs to be changed - * and then determine the new VLAN inside the if block, in + * and then determine the new vlan inside the if block, in * order to avoid duplicating code for adding the new filter * then deleting the old filter. */ @@ -1364,7 +1447,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, return NULL; /* Update the boolean indicating if we need to function in - * VLAN mode. + * vlan mode. */ if (vlan >= 0) vsi->has_vlan_filter = true; @@ -1372,6 +1455,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ether_addr_copy(f->macaddr, macaddr); f->vlan = vlan; f->state = I40E_FILTER_NEW; + INIT_HLIST_NODE(&f->hlist); key = i40e_addr_to_hkey(macaddr); @@ -1396,46 +1480,10 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, } /** - * __i40e_del_filter - Remove a specific filter from the VSI - * @vsi: VSI to remove from - * @f: the filter to remove from the list - * - * This function should be called instead of i40e_del_filter only if you know - * the exact filter you will remove already, such as via i40e_find_filter or - * i40e_find_mac. - * - * NOTE: This function is expected to be called with mac_filter_hash_lock - * being held. - * ANOTHER NOTE: This function MUST be called from within the context of - * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() - * instead of list_for_each_entry(). - **/ -void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) -{ - if (!f) - return; - - /* If the filter was never added to firmware then we can just delete it - * directly and we don't want to set the status to remove or else an - * admin queue command will unnecessarily fire. - */ - if ((f->state == I40E_FILTER_FAILED) || - (f->state == I40E_FILTER_NEW)) { - hash_del(&f->hlist); - kfree(f); - } else { - f->state = I40E_FILTER_REMOVE; - } - - vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); -} - -/** - * i40e_del_filter - Remove a MAC/VLAN filter from the VSI + * i40e_del_filter - Remove a mac/vlan filter from the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address - * @vlan: the VLAN + * @vlan: the vlan * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. @@ -1508,7 +1556,6 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) bool found = false; int bkt; - lockdep_assert_held(&vsi->mac_filter_hash_lock); hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (ether_addr_equal(macaddr, f->macaddr)) { __i40e_del_filter(vsi, f); @@ -1557,17 +1604,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p) netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); /* Copy the address first, so that we avoid a possible race with - * .set_rx_mode(). - * - Remove old address from MAC filter - * - Copy new address - * - Add new address to MAC filter + * .set_rx_mode(). If we copy after changing the address in the filter + * list, we might open ourselves to a narrow race window where + * .set_rx_mode could delete our dev_addr filter and prevent traffic + * from passing. */ + ether_addr_copy(netdev->dev_addr, addr->sa_data); + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); - ether_addr_copy(netdev->dev_addr, addr->sa_data); - i40e_add_mac_filter(vsi, netdev->dev_addr); + i40e_add_mac_filter(vsi, addr->sa_data); spin_unlock_bh(&vsi->mac_filter_hash_lock); - if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; @@ -1590,6 +1637,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries **/ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) @@ -1663,6 +1712,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) return ret; } +#ifdef __TC_MQPRIO_MODE_MAX /** * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config * @vsi: the VSI being configured, @@ -1749,6 +1799,7 @@ static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, } return 0; } +#endif /** * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc @@ -1767,8 +1818,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; u16 sections = 0; u8 netdev_tc = 0; + u16 qcount = 0; u16 numtc = 1; - u16 qcount; u8 offset; u16 qmap; int i; @@ -1805,7 +1856,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) { - /* TC is enabled */ int pow, num_qps; switch (vsi->type) { @@ -1860,6 +1910,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset; if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { + /* This code helps add more queue to the VSI if we have + * more cores than RSS can support, the higher cores will + * be served by ATR or other filters. + */ if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; else if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -1952,11 +2006,16 @@ static void i40e_set_rx_mode(struct net_device *netdev) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries - * @vsi: Pointer to VSI struct + * @vsi: Pointer to vsi struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * @@ -2188,7 +2247,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, * * There are different ways of setting promiscuous mode on a PF depending on * what state/environment we're in. This identifies and sets it appropriately. - * Returns 0 on success. + * Returns I40E_SUCCESS on success. **/ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) { @@ -2248,6 +2307,56 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) return aq_ret; } + /** + * i40e_set_switch_mode - sets up switch mode correctly + * @pf: working PF + * @l4type: TCP, UDP, or both + * + * Sets up switch mode correctly + **/ +static void i40e_set_switch_mode(struct i40e_pf *pf, u8 l4type) +{ + struct i40e_hw *hw; + u8 mode; + int ret; + + if (!pf) + return; + + hw = &pf->hw; + + /* Set Bit 7 to be valid */ + mode = I40E_AQ_SET_SWITCH_BIT7_VALID; + + /* We only support destination port filters, so don't set the + * source port bit here. + */ + if (l4type > I40E_AQ_SET_SWITCH_L4_TYPE_BOTH || + l4type < I40E_AQ_SET_SWITCH_L4_TYPE_TCP) { + dev_warn(&pf->pdev->dev, + "invalid L4 type 0x%x, unable to set switch mode\n", + l4type); + return; + } + + mode |= l4type; + + /* Set cloud filter mode */ + mode |= I40E_AQ_SET_SWITCH_MODE_L4_PORT; + + dev_dbg(&pf->pdev->dev, "setting switch mode to 0x%x\n", mode); + /* Prep mode field for set_switch_config */ + ret = i40e_aq_set_switch_config(hw, 0, 0, mode, NULL); + /* If the driver is reloaded, the AQ call will fail. So don't make a + * big deal about it. + */ + if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) + dev_dbg(&pf->pdev->dev, + "couldn't set switch config bits, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); +} + /** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI @@ -2378,7 +2487,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; } else { del_list[num_del].vlan_tag = - cpu_to_le16((u16)(f->vlan)); + CPU_TO_LE16((u16)(f->vlan)); } cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; @@ -2443,13 +2552,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { add_list[num_add].vlan_tag = - cpu_to_le16((u16)(new->f->vlan)); + CPU_TO_LE16((u16)(new->f->vlan)); } add_list[num_add].queue_number = 0; /* set invalid match method for later detection */ add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES; cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; - add_list[num_add].flags = cpu_to_le16(cmd_flags); + add_list[num_add].flags = CPU_TO_LE16(cmd_flags); num_add++; /* flush a full buffer */ @@ -2503,7 +2612,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); vsi->promisc_threshold = 0; } - /* if the VF is not trusted do not do promisc */ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); @@ -2558,6 +2666,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } + } out: /* if something went wrong then set the changed flag so we try again */ @@ -2590,18 +2699,14 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) if (!pf) return; + if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; - if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { - set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); - return; - } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); - if (ret) { /* come back and try again later */ set_bit(__I40E_MACVLAN_SYNC_PENDING, @@ -2610,7 +2715,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } - clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -2635,16 +2739,41 @@ static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi) static int i40e_change_mtu(struct net_device *netdev, int new_mtu) { struct i40e_netdev_priv *np = netdev_priv(netdev); + int max_frame = new_mtu + I40E_PACKET_HDR_PAD; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - if (i40e_enabled_xdp_vsi(vsi)) { - int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) + return -EINVAL; - if (frame_size > i40e_max_xdp_frame_size(vsi)) + if (i40e_enabled_xdp_vsi(vsi)) { + if (max_frame > i40e_max_xdp_frame_size(vsi)) return -EINVAL; } +#ifndef HAVE_NDO_FEATURES_CHECK + + /* MTU < 576 causes problems with TSO */ + if (new_mtu < 576) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; +#ifdef HAVE_NDO_SET_FEATURES + } else { +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + if (netdev->wanted_features & NETIF_F_TSO) + netdev->features |= NETIF_F_TSO; + if (netdev->wanted_features & NETIF_F_TSO6) + netdev->features |= NETIF_F_TSO6; +#else + if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO) + netdev->features |= NETIF_F_TSO; + if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO6) + netdev->features |= NETIF_F_TSO6; +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_FEATURES */ + } +#endif /* ! HAVE_NDO_FEATURES_CHECK */ netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; @@ -2655,6 +2784,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL) /** * i40e_ioctl - Access the hwtstamp interface * @netdev: network interface device structure @@ -2663,19 +2793,26 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) **/ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { +#ifdef HAVE_PTP_1588_CLOCK struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; +#endif /* HAVE_PTP_1588_CLOCK */ switch (cmd) { +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SIOCGHWTSTAMP case SIOCGHWTSTAMP: return i40e_ptp_get_ts_config(pf, ifr); +#endif case SIOCSHWTSTAMP: return i40e_ptp_set_ts_config(pf, ifr); +#endif /* HAVE_PTP_1588_CLOCK */ default: return -EOPNOTSUPP; } } +#endif /** * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI * @vsi: the vsi being adjusted @@ -2685,7 +2822,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; - /* Don't modify stripping options if a port VLAN is active */ + /* Don't modify stripping options if a port vlan is active */ if (vsi->info.pvid) return; @@ -2719,7 +2856,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; - /* Don't modify stripping options if a port VLAN is active */ + /* Don't modify stripping options if a port vlan is active */ if (vsi->info.pvid) return; @@ -2741,10 +2878,32 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) "update vlan stripping failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); + vsi->back->hw.aq.asq_last_status)); } } +#ifdef HAVE_VLAN_RX_REGISTER +/** + * i40e_vlan_rx_register - Setup or shutdown vlan offload + * @netdev: network interface to be adjusted + * @grp: new vlan group list, NULL if disabling + **/ +static void i40e_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + bool enable; + + vsi->vlgrp = grp; + enable = (grp || (vsi->back->flags & I40E_FLAG_DCB_ENABLED)); + if (enable) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); +} +#endif /* HAVE_VLAN_RX_REGISTER */ + /** * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address * @vsi: the vsi being configured @@ -2753,7 +2912,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) * This is a helper function for adding a new MAC/VLAN filter with the * specified VLAN for each existing MAC address already in the hash table. * This function does *not* perform any accounting to update filters based on - * VLAN mode. + * vlan mode. * * NOTE: this function expects to be called while under the * mac_filter_hash_lock @@ -2780,17 +2939,14 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) } /** - * i40e_vsi_add_vlan - Add VSI membership for given VLAN - * @vsi: the VSI being configured - * @vid: VLAN id to be added + * i40e_vsi_add_vlan - Add vsi membership for given vlan + * @vsi: the vsi being configured + * @vid: vlan id to be added **/ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; - if (vsi->info.pvid) - return -EINVAL; - /* The network stack will attempt to add VID=0, with the intention to * receive priority tagged packets with a VLAN of 0. Our HW receives * these packets by default when configured to receive untagged @@ -2842,9 +2998,9 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) } /** - * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN - * @vsi: the VSI being configured - * @vid: VLAN id to be removed + * i40e_vsi_kill_vlan - Remove vsi membership for given vlan + * @vsi: the vsi being configured + * @vid: vlan id to be removed **/ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) { @@ -2869,21 +3025,56 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) * * net_device_ops implementation for adding vlan ids **/ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX static int i40e_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +#else +static int i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif +#else +static void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; int ret = 0; if (vid >= VLAN_N_VID) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID return -EINVAL; +#else + return; +#endif ret = i40e_vsi_add_vlan(vsi, vid); +#ifndef HAVE_VLAN_RX_REGISTER if (!ret) set_bit(vid, vsi->active_vlans); +#endif /* !HAVE_VLAN_RX_REGISTER */ +#ifndef HAVE_NETDEV_VLAN_FEATURES + /* Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (vsi->vlgrp) { + struct vlan_group *vlgrp = vsi->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; +#ifdef HAVE_ENCAP_CSUM_OFFLOAD + v_netdev->enc_features |= netdev->enc_features; +#endif + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID return ret; +#endif } /** @@ -2892,15 +3083,43 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, * @proto: unused protocol value * @vid: vlan id to be added **/ +#ifdef NETIF_F_HW_VLAN_CTAG_RX static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +#else +static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, u16 vid) +#endif { +#if (!defined(HAVE_NETDEV_VLAN_FEATURES) || !defined(HAVE_VLAN_RX_REGISTER)) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; +#endif if (vid >= VLAN_N_VID) return; +#ifndef HAVE_VLAN_RX_REGISTER set_bit(vid, vsi->active_vlans); +#endif /* !HAVE_VLAN_RX_REGISTER */ +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (vsi->vlgrp) { + struct vlan_group *vlgrp = vsi->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + + if (v_netdev) { + v_netdev->features |= netdev->features; +#ifdef HAVE_ENCAP_CSUM_OFFLOAD + v_netdev->enc_features |= netdev->enc_features; +#endif + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ } /** @@ -2911,8 +3130,16 @@ static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, * * net_device_ops implementation for removing vlan ids **/ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +#else +static int i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif +#else +static void i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -2922,10 +3149,14 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, * already printed from the other function */ i40e_vsi_kill_vlan(vsi, vid); +#ifndef HAVE_VLAN_RX_REGISTER clear_bit(vid, vsi->active_vlans); +#endif /* HAVE_VLAN_RX_REGISTER */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID return 0; +#endif } /** @@ -2939,14 +3170,40 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) if (!vsi->netdev) return; +#ifdef HAVE_VLAN_RX_REGISTER + i40e_vlan_rx_register(vsi->netdev, vsi->vlgrp); + + if (vsi->vlgrp) { + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(vsi->vlgrp, vid)) + continue; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), + vid); +#else + i40e_vlan_rx_add_vid_up(vsi->netdev, vid); +#endif + } + } +#else /* HAVE_VLAN_RX_REGISTER */ +#ifdef NETIF_F_HW_VLAN_CTAG_RX if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); +#else + if (vsi->netdev->features & NETIF_F_HW_VLAN_RX) + i40e_vlan_stripping_enable(vsi); +#endif else i40e_vlan_stripping_disable(vsi); for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) +#ifdef NETIF_F_HW_VLAN_CTAG_RX i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), vid); +#else + i40e_vlan_rx_add_vid_up(vsi->netdev, vid); +#endif +#endif } /** @@ -2973,7 +3230,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) "add pvid failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); + vsi->back->hw.aq.asq_last_status)); return -ENOENT; } @@ -2992,6 +3249,44 @@ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) i40e_vlan_stripping_disable(vsi); } +/** + * i40e_get_cloud_filter_type - Get cloud filter type + * @flags: set of enabled fields + * @type: location to return type + * + * Given the set of flags indicating which fields are active, look up the type + * number for programming the cloud filter in firmware. If the flags are + * invalid, return I40E_ERR_CONFIG. @type may be NULL, in which case the + * function may be used to verify that the flags would produce a valid type. + **/ +int i40e_get_cloud_filter_type(u8 flags, u16 *type) +{ + static const u16 table[128] = { + [I40E_CLOUD_FILTER_FLAGS_OMAC] = + I40E_AQC_ADD_CLOUD_FILTER_OMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN, + [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID, + [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] = + I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID, + [I40E_CLOUD_FILTER_FLAGS_IIP] = + I40E_AQC_ADD_CLOUD_FILTER_IIP, + }; + + if (flags >= ARRAY_SIZE(table) || table[flags] == 0) + return I40E_ERR_CONFIG; + + /* Return type if we're given space to do so */ + if (type) + *type = table[flags]; + + return 0; +} /** * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources @@ -3003,7 +3298,7 @@ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) * * Return 0 on success, negative on failure **/ -static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) +int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) { int i, err = 0; @@ -3015,7 +3310,6 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); - return err; } @@ -3052,7 +3346,7 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) * * Return 0 on success, negative on failure **/ -static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) +int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) { int i, err = 0; @@ -3088,38 +3382,53 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) **/ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { +#ifndef HAVE_XPS_QOS_SUPPORT + struct i40e_vsi *vsi = ring->vsi; +#endif int cpu; if (!ring->q_vector || !ring->netdev || ring->ch) return; +#ifndef HAVE_XPS_QOS_SUPPORT + /* Some older kernels do not support XPS with QoS */ + if (vsi->tc_config.numtc > 1) { +#ifndef HAVE_NETDEV_TC_RESETS_XPS + /* Additionally, some kernels do not properly clear the XPS + * mapping when the number of traffic classes is changed. In + * order to support these kernels we work around this by + * setting the XPS mapping to the empty cpu set. + */ + cpumask_var_t mask; + + /* Only clear the settings if we initialized XPS */ + if (!test_and_clear_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) + return; + + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) + return; + + netif_set_xps_queue(ring->netdev, mask, ring->queue_index); + free_cpumask_var(mask); +#endif /* !HAVE_NETDEV_TC_RESETS_XPS */ + return; + } + +#endif /* !HAVE_XPS_QOS_SUPPORT */ /* We only initialize XPS once, so as not to overwrite user settings */ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) return; cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); - netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), +#ifndef HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK + /* In kernels before 3.12 the second parameter has no const qualifier. + * It is generating warning in older kernels. + */ + netif_set_xps_queue(ring->netdev, (struct cpumask *)get_cpu_mask(cpu), ring->queue_index); -} - -/** - * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled - * @ring: The Tx or Rx ring - * - * Returns the UMEM or NULL. - **/ -static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) -{ - bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); - int qid = ring->queue_index; - - if (ring_is_xdp(ring)) - qid -= ring->vsi->alloc_queue_pairs; - - if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) - return NULL; - - return xdp_get_umem_from_qid(ring->vsi->netdev, qid); +#else /* !HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK */ + netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); +#endif /* !HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK */ } /** @@ -3137,9 +3446,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) i40e_status err = 0; u32 qtx_ctl = 0; - if (ring_is_xdp(ring)) - ring->xsk_umem = i40e_xsk_umem(ring); - /* some ATR related tx ring init */ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; @@ -3147,7 +3453,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } else { ring->atr_sample_rate = 0; } - /* configure XPS */ i40e_config_xps_tx_ring(ring); @@ -3159,7 +3464,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.qlen = ring->count; tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); +#ifdef HAVE_PTP_1588_CLOCK tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); +#endif /* HAVE_PTP_1588_CLOCK */ /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) tx_ctx.head_wb_ena = 1; @@ -3249,46 +3556,13 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; i40e_status err = 0; - bool ok; - int ret; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); - if (ring->vsi->type == I40E_VSI_MAIN) - xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - - ring->xsk_umem = i40e_xsk_umem(ring); - if (ring->xsk_umem) { - ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - - XDP_PACKET_HEADROOM; - /* For AF_XDP ZC, we disallow packets to span on - * multiple buffers, thus letting us skip that - * handling in the fast-path. - */ - chain_len = 1; - ring->zca.free = i40e_zca_free; - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_ZERO_COPY, - &ring->zca); - if (ret) - return ret; - dev_info(&vsi->back->pdev->dev, - "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", - ring->queue_index); - - } else { - ring->rx_buf_len = vsi->rx_buf_len; - if (ring->vsi->type == I40E_VSI_MAIN) { - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, - NULL); - if (ret) - return ret; - } - } + ring->rx_buf_len = vsi->rx_buf_len; rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -3297,7 +3571,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.qlen = ring->count; /* use 32 byte descriptors */ +#ifdef I40E_32BYTE_RX rx_ctx.dsize = 1; +#else + /* use 16 byte descriptors */ + rx_ctx.dsize = 0; +#endif /* descriptor type is always zero * rx_ctx.dtype = 0; @@ -3305,10 +3584,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.hsplit_0 = 0; rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); - if (hw->revision_id == 0) - rx_ctx.lrxqthresh = 0; - else - rx_ctx.lrxqthresh = 1; + rx_ctx.lrxqthresh = 1; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; /* this controls whether VLAN is stripped from inner headers */ @@ -3344,18 +3620,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - ok = ring->xsk_umem ? - i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : - !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); - if (!ok) { - /* Log this in case the user has forgotten to give the kernel - * any buffers, even later in the application. - */ - dev_info(&vsi->back->pdev->dev, - "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", - ring->xsk_umem ? "UMEM enabled " : "", - ring->queue_index, pf_q); - } + i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); return 0; } @@ -3374,7 +3639,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->tx_rings[i]); - if (err || !i40e_enabled_xdp_vsi(vsi)) + if (!i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) @@ -3393,6 +3658,17 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) { int err = 0; u16 i; +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT + u16 max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + + if (!vsi->netdev) + max_frame = I40E_RXBUFFER_2048; + else if (vsi->netdev->mtu + I40E_PACKET_HDR_PAD > max_frame) + max_frame = vsi->netdev->mtu + I40E_PACKET_HDR_PAD; + + vsi->max_frame = max_frame; + vsi->rx_buf_len = max_frame; +#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { vsi->max_frame = I40E_MAX_RXBUFFER; @@ -3408,6 +3684,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; } +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -3460,6 +3737,7 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); + } /** @@ -3478,7 +3756,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; - /* Reset FDir counters as we're replaying all existing filters */ + /* reset FDIR counters as we're replaying all existing filters */ pf->fd_tcp4_filter_cnt = 0; pf->fd_udp4_filter_cnt = 0; pf->fd_sctp4_filter_cnt = 0; @@ -3490,6 +3768,209 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) } } +/** + * i40e_cloud_filter_restore - Restore the switch's cloud filters + * @pf: Pointer to the targeted VSI + * + * This function replays the cloud filter hlist into the hw switch + **/ +static void i40e_cloud_filter_restore(struct i40e_pf *pf) +{ + struct i40e_cloud_filter *filter; + struct hlist_node *node; + + hlist_for_each_entry_safe(filter, node, + &pf->cloud_filter_list, cloud_node) { + i40e_add_del_cloud_filter_ex(pf, filter, true); + } +} + +/** + * i40e_set_cld_element - sets cloud filter element data + * @filter: cloud filter rule + * @cld: ptr to cloud filter element data + * + * This is helper function to copy data into cloud filter element + **/ +static inline void +i40e_set_cld_element(struct i40e_cloud_filter *filter, + struct i40e_aqc_cloud_filters_element_data *cld) +{ + int i, j; + u32 ipa; + + memset(cld, 0, sizeof(*cld)); + ether_addr_copy(cld->outer_mac, filter->dst_mac); + ether_addr_copy(cld->inner_mac, filter->src_mac); + + if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) { + /* copy parameters from filter to cloud filters element + * which are not specific to IP protos + */ + ether_addr_copy(cld->outer_mac, filter->outer_mac); + ether_addr_copy(cld->inner_mac, filter->inner_mac); + cld->inner_vlan = cpu_to_le16(ntohs(filter->inner_vlan)); + cld->tenant_id = cpu_to_le32(filter->tenant_id); + return; + } + + if (filter->n_proto == ETH_P_IPV6) { +#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) + for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6); + i++, j += 2) { + ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); + memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa)); + } + } else { + ipa = be32_to_cpu(filter->dst_ipv4); + memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); + } + + cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); + + /* tenant_id is not supported by FW now, once the support is enabled + * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) + */ + if (filter->tenant_id) + return; +} + +/* i40e_add_del_cloud_filter_ex - Add/del cloud filter using big_buf + * @pf: working PF + * @filter: cloud filter rule + * @add: if true, add, if false, delete + * + * Add or delete a cloud filter for a specific flow spec using big buffer. + * Returns 0 if the filter were successfully added. + **/ +int i40e_add_del_cloud_filter_ex(struct i40e_pf *pf, + struct i40e_cloud_filter *filter, + bool add) +{ + struct i40e_aqc_cloud_filters_element_bb cld_filter; + int ret; + + if (!i40e_is_l4mode_enabled()) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + + return i40e_add_del_cloud_filter(vsi, filter, add); + } + + /* Make sure port is specified, otherwise bail out, for channel + * specific cloud filter needs 'L4 port' to be non-zero + */ + if (!filter->dst_port) + return -EINVAL; + + memset(&cld_filter, 0, sizeof(cld_filter)); + + /* Switch is in Mode 1, so this is an L4 port filter */ + cld_filter.element.flags = + cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); + + /* Now copy L4 port in Byte 6..7 in general fields */ + cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] = + be16_to_cpu(filter->dst_port); + + if (add) + ret = i40e_aq_add_cloud_filters_bb(&pf->hw, + filter->seid, + &cld_filter, 1); + else + ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, + filter->seid, + &cld_filter, 1); + + if (ret) + dev_err(&pf->pdev->dev, + "fail to %s cloud filter err %d aq_err %d\n", + add ? "add" : "delete", + ret, pf->hw.aq.asq_last_status); + + dev_info(&pf->pdev->dev, + "%s cloud filter for VSI: %d, L4 port: %d\n", + add ? "add" : "delete", + filter->seid, be16_to_cpu(filter->dst_port)); + + return ret; +} + +/** + * i40e_add_del_cloud_filter - Add/del cloud filter + * @vsi: pointer to VSI + * @filter: cloud filter rule + * @add: if true, add, if false, delete + * + * Add or delete a cloud filter for a specific flow spec. + * Returns 0 if the filter were successfully added. + **/ +int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, bool add) +{ + struct i40e_aqc_cloud_filters_element_data cld_filter; + struct i40e_pf *pf = vsi->back; + int ret; + static const u16 flag_table[128] = { + [I40E_CLOUD_FILTER_FLAGS_OMAC] = + I40E_AQC_ADD_CLOUD_FILTER_OMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN, + [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID, + [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] = + I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID, + [I40E_CLOUD_FILTER_FLAGS_IIP] = + I40E_AQC_ADD_CLOUD_FILTER_IIP, + }; + + if (filter->flags >= ARRAY_SIZE(flag_table)) + return I40E_ERR_CONFIG; + + /* copy element needed to add cloud filter from filter */ + i40e_set_cld_element(filter, &cld_filter); + + if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) + cld_filter.flags = cpu_to_le16(filter->tunnel_type << + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); + + /* copy queue number from filter to pass to cloud filter engine + * with flags for targeting traffic to specific queue + */ + if (filter->flags != I40E_CLOUD_FILTER_FLAGS_OMAC) { + cld_filter.flags |= + cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE); + cld_filter.queue_number = cpu_to_le16(filter->queue_id); + } + + if (filter->n_proto == ETH_P_IPV6) + cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | + I40E_AQC_ADD_CLOUD_FLAGS_IPV6); + else + cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | + I40E_AQC_ADD_CLOUD_FLAGS_IPV4); + + if (add) + ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, + &cld_filter, 1); + else + ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, + &cld_filter, 1); + if (ret) + dev_dbg(&pf->pdev->dev, + "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n", + add ? "add" : "delete", filter->dst_port, ret, + pf->hw.aq.asq_last_status); + else + dev_info(&pf->pdev->dev, + "%s cloud filter for VSI: %d\n", + add ? "Added" : "Deleted", filter->seid); + return ret; +} + /** * i40e_vsi_configure - Set up the VSI for action * @vsi: the VSI being configured @@ -3534,56 +4015,42 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), - q_vector->rx.target_itr); + q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), - q_vector->tx.target_itr); + q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); - /* Linked list for the queuepairs assigned to this vector */ + /* begin of linked list for RX queue assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; u32 val; - val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | - (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | - (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | - (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | - (I40E_QUEUE_TYPE_TX << - I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); - + /* RX queue in linked list with next queue set to TX */ + val = I40E_QINT_RQCTL_VAL(nextqp, vector, TX); wr32(hw, I40E_QINT_RQCTL(qp), val); if (has_xdp) { - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | - (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | - (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | - (I40E_QUEUE_TYPE_TX << - I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - + /* TX queue with next queue set to TX */ + val = I40E_QINT_TQCTL_VAL(qp, vector, TX); wr32(hw, I40E_QINT_TQCTL(nextqp), val); } - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | - (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | - ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | - (I40E_QUEUE_TYPE_RX << - I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); + /* TX queue with next RX or end of linked list */ + val = I40E_QINT_TQCTL_VAL((qp + 1), vector, RX); /* Terminate the linked list */ if (q == (q_vector->num_ringpairs - 1)) - val |= (I40E_QUEUE_END_OF_LIST << - I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); + val |= (I40E_QUEUE_END_OF_LIST + << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(qp), val); qp++; @@ -3614,12 +4081,13 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; - if (pf->flags & I40E_FLAG_IWARP_ENABLED) val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; +#ifdef HAVE_PTP_1588_CLOCK if (pf->flags & I40E_FLAG_PTP) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; +#endif /* HAVE_PTP_1588_CLOCK */ wr32(hw, I40E_PFINT_ICR0_ENA, val); @@ -3641,16 +4109,15 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - u32 val; /* set the ITR configuration */ q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); - wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr); + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); - wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr); + wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; i40e_enable_misc_int_causes(pf); @@ -3658,28 +4125,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); - /* Associate the queue pair to the vector and enable the queue int */ - val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | - (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | - (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); + /* Associate the queue pair to the vector and enable the queue + * interrupt RX queue in linked list with next queue set to TX + */ + wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX)); - wr32(hw, I40E_QINT_RQCTL(0), val); - - if (i40e_enabled_xdp_vsi(vsi)) { - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX - << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(nextqp), val); + if (nextqp) { + /* TX queue in linked list with next queue set to TX */ + wr32(hw, I40E_QINT_TQCTL(nextqp), + I40E_QINT_TQCTL_VAL(nextqp, 0, TX)); } - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | - (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(0), val); + /* last TX queue so the next RX queue doesn't matter */ + wr32(hw, I40E_QINT_TQCTL(0), + I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX)); i40e_flush(hw); } @@ -3730,6 +4189,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) return IRQ_HANDLED; } +#ifdef HAVE_IRQ_AFFINITY_NOTIFY /** * i40e_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed @@ -3756,6 +4216,7 @@ static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, * receive notifications. **/ static void i40e_irq_affinity_release(struct kref *ref) {} +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ /** * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts @@ -3764,7 +4225,7 @@ static void i40e_irq_affinity_release(struct kref *ref) {} * * Allocates MSI-X vectors and requests interrupts from the kernel. **/ -static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) +int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct i40e_pf *pf = vsi->back; @@ -3773,7 +4234,9 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) int tx_int_idx = 0; int vector, err; int irq_num; +#ifdef HAVE_IRQ_AFFINITY_HINT int cpu; +#endif for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; @@ -3805,10 +4268,13 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) goto free_queue_irqs; } +#ifdef HAVE_IRQ_AFFINITY_NOTIFY /* register for affinity change notifications */ q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT /* Spread affinity hints out across online CPUs. * * get_cpu_mask returns a static constant mask with @@ -3817,6 +4283,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) */ cpu = cpumask_local_spread(q_vector->v_idx, -1); irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); +#endif /* HAVE_IRQ_AFFINITY_HINT */ } vsi->irqs_ready = true; @@ -3826,8 +4293,12 @@ free_queue_irqs: while (vector) { vector--; irq_num = pf->msix_entries[base + vector].vector; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY irq_set_affinity_notifier(irq_num, NULL); +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT irq_set_affinity_hint(irq_num, NULL); +#endif free_irq(irq_num, &vsi->q_vectors[vector]); } return err; @@ -3944,7 +4415,6 @@ static irqreturn_t i40e_intr(int irq, void *data) if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; - if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; @@ -4008,6 +4478,7 @@ static irqreturn_t i40e_intr(int irq, void *data) rd32(hw, I40E_PFHMC_ERRORDATA)); } +#ifdef HAVE_PTP_1588_CLOCK if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); @@ -4017,6 +4488,7 @@ static irqreturn_t i40e_intr(int irq, void *data) } } +#endif /* HAVE_PTP_1588_CLOCK */ /* If a critical error is pending we have no choice but to reset the * device. * Report and mask out any remaining unexpected interrupts. @@ -4074,7 +4546,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - smp_rmb(); + read_barrier_depends(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & @@ -4409,6 +4881,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q + vsi->alloc_queue_pairs, true /*is xdp*/, enable); + if (ret) break; } @@ -4526,9 +4999,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) } } - /* Due to HW errata, on Rx disable only, the register can indicate done - * before it really is. Needs 50ms to be sure - */ + /* HW needs up to 50ms to finish RX queue disable*/ if (!enable) mdelay(50); @@ -4624,10 +5095,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) !vsi->q_vectors[i]->num_ringpairs) continue; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY /* clear the affinity notifier in the IRQ descriptor */ irq_set_affinity_notifier(irq_num, NULL); +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT /* remove our suggested affinity mask for this IRQ */ irq_set_affinity_hint(irq_num, NULL); +#endif synchronize_irq(irq_num); free_irq(irq_num, vsi->q_vectors[i]); @@ -4816,7 +5291,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi) for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; - if (q_vector->rx.ring || q_vector->tx.ring) + if (q_vector->tx.ring || q_vector->rx.ring) napi_enable(&q_vector->napi); } } @@ -4835,7 +5310,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; - if (q_vector->rx.ring || q_vector->tx.ring) + if (q_vector->tx.ring || q_vector->rx.ring) napi_disable(&q_vector->napi); } } @@ -4862,14 +5337,18 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) * i40e_quiesce_vsi - Pause a given VSI * @vsi: the VSI being paused **/ -static void i40e_quiesce_vsi(struct i40e_vsi *vsi) +void i40e_quiesce_vsi(struct i40e_vsi *vsi) { if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) +#ifdef HAVE_NET_DEVICE_OPS vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); +#else /* HAVE_NET_DEVICE_OPS */ + vsi->netdev->stop(vsi->netdev); +#endif /* HAVE_NET_DEVICE_OPS */ else i40e_vsi_close(vsi); } @@ -4878,13 +5357,17 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) * i40e_unquiesce_vsi - Resume a given VSI * @vsi: the VSI being resumed **/ -static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) +void i40e_unquiesce_vsi(struct i40e_vsi *vsi) { if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) return; if (vsi->netdev && netif_running(vsi->netdev)) +#ifdef HAVE_NET_DEVICE_OPS vsi->netdev->netdev_ops->ndo_open(vsi->netdev); +#else /* HAVE_NET_DEVICE_OPS */ + vsi->netdev->open(vsi->netdev); +#endif /* HAVE_NET_DEVICE_OPS */ else i40e_vsi_open(vsi); /* this clears the DOWN bit */ } @@ -4893,7 +5376,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF * @pf: the PF **/ -static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) +void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) { int v; @@ -4907,7 +5390,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF * @pf: the PF **/ -static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) +void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) { int v; @@ -4922,7 +5405,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) * @vsi: the VSI being configured * * Wait until all queues on a given VSI have been disabled. - **/ +**/ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; @@ -4945,6 +5428,7 @@ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) /* Check and wait for the XDP Tx queue */ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, false); + if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d XDP Tx ring %d disable timeout\n", @@ -4965,7 +5449,7 @@ wait_rx: return 0; } -#ifdef CONFIG_I40E_DCB +#ifdef CONFIG_DCB /** * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF @@ -4987,8 +5471,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) return ret; } - -#endif +#endif /* I40E_DCB */ /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP @@ -5080,6 +5563,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) return enabled_tc; } +#ifdef __TC_MQPRIO_MODE_MAX /** * i40e_mqprio_get_enabled_tc - Get enabled traffic classes * @pf: PF being queried @@ -5097,6 +5581,7 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) enabled_tc |= BIT(i); return enabled_tc; } +#endif /** * i40e_pf_get_num_tc - Get enabled traffic classes for PF @@ -5104,15 +5589,17 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) * * Return number of traffic classes enabled for the given PF **/ -static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) +u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - u8 i, enabled_tc = 1; + u8 i, enabled_tc; u8 num_tc = 0; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; +#ifdef __TC_MQPRIO_MODE_MAX if (pf->flags & I40E_FLAG_TC_MQPRIO) return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; +#endif /* If neither MQPRIO nor DCB is enabled, then always use single TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) @@ -5126,7 +5613,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else - return 1; /* Only TC0 */ + return 1;/* Only TC0 */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) @@ -5143,8 +5630,10 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) **/ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) { +#ifdef __TC_MQPRIO_MODE_MAX if (pf->flags & I40E_FLAG_TC_MQPRIO) return i40e_mqprio_get_enabled_tc(pf); +#endif /* If neither MQPRIO nor DCB is enabled for this PF then just return * default TC @@ -5208,14 +5697,14 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) /* Still continuing */ } - vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); + vsi->bw_limit = LE16_TO_CPU(bw_config.port_bw_limit); vsi->bw_max_quanta = bw_config.max_bw; - tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | - (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); + tc_bw_max = LE16_TO_CPU(bw_ets_config.tc_bw_max[0]) | + (LE16_TO_CPU(bw_ets_config.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; vsi->bw_ets_limit_credits[i] = - le16_to_cpu(bw_ets_config.credits[i]); + LE16_TO_CPU(bw_ets_config.credits[i]); /* 3 bits out of 4 for each TC */ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); } @@ -5239,6 +5728,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, i40e_status ret; int i; +#ifdef __TC_MQPRIO_MODE_MAX /* There is no need to reset BW when mqprio mode is on. */ if (pf->flags & I40E_FLAG_TC_MQPRIO) return 0; @@ -5250,6 +5740,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, vsi->seid); return ret; } +#endif bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; @@ -5356,7 +5847,7 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, * It is expected that the VSI queues have been quisced before calling * this function. **/ -static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) +int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) { u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; struct i40e_pf *pf = vsi->back; @@ -5364,11 +5855,15 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) struct i40e_vsi_context ctxt; int ret = 0; int i; - +#ifdef __TC_MQPRIO_MODE_MAX /* Check if enabled_tc is same as existing or new TCs */ if (vsi->tc_config.enabled_tc == enabled_tc && vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) return ret; +#else + if (vsi->tc_config.enabled_tc == enabled_tc) + return ret; +#endif /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -5420,6 +5915,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; + +#ifdef __TC_MQPRIO_MODE_MAX if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); if (ret) @@ -5427,7 +5924,6 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) } else { i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); } - /* On destroying the qdisc, reset vsi->rss_size, as number of enabled * queues changed. */ @@ -5442,6 +5938,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) } vsi->reconfig_rss = false; } +#else + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); +#endif + if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); @@ -5479,12 +5979,572 @@ out: return ret; } +/** + * i40e_veb_config_tc - Configure TCs for given VEB + * @veb: given VEB + * @enabled_tc: TC bitmap + * + * Configures given TC bitmap for VEB (switching) element + **/ +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; + struct i40e_pf *pf = veb->pf; + int ret = 0; + int i; + + /* No TCs or already enabled TCs just return */ + if (!enabled_tc || veb->enabled_tc == enabled_tc) + return ret; + + bw_data.tc_valid_bits = enabled_tc; + /* bw_data.absolute_credits is not set (relative) */ + + /* Enable ETS TCs with equal BW Share for now */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & BIT(i)) + bw_data.tc_bw_share_credits[i] = 1; + } + + ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, + &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "VEB bw config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + goto out; + } + + /* Update the BW information */ + ret = i40e_veb_get_bw_info(veb); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed getting veb bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } + +out: + return ret; +} + +#ifdef CONFIG_DCB +/** + * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs + * @pf: PF struct + * + * Reconfigure VEB/VSIs on a given PF; it is assumed that + * the caller would've quiesce all the VSIs before calling + * this function + **/ +static void i40e_dcb_reconfigure(struct i40e_pf *pf) +{ + u8 tc_map = 0; + int ret; + u8 v; + + /* Enable the TCs available on PF to all VEBs */ + tc_map = i40e_pf_get_tc_map(pf); + for (v = 0; v < I40E_MAX_VEB; v++) { + if (!pf->veb[v]) + continue; + ret = i40e_veb_config_tc(pf->veb[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VEB seid=%d\n", + pf->veb[v]->seid); + /* Will try to configure as many components */ + } + } + + /* Update each VSI */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (!pf->vsi[v]) + continue; + + /* - Enable all TCs for the LAN VSI + * - For all others keep them at TC0 for now + */ + if (v == pf->lan_vsi) + tc_map = i40e_pf_get_tc_map(pf); + else + tc_map = I40E_DEFAULT_TRAFFIC_CLASS; + + ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VSI seid=%d\n", + pf->vsi[v]->seid); + /* Will try to configure as many components */ + } else { + /* Re-configure VSI vectors based on updated TC map */ + i40e_vsi_map_rings_to_vectors(pf->vsi[v]); +#ifdef HAVE_DCBNL_IEEE + if (pf->vsi[v]->netdev) + i40e_dcbnl_set_all(pf->vsi[v]); +#endif /* HAVE_DCBNL_IEEE */ + } + } +} + +/** + * i40e_resume_port_tx - Resume port Tx + * @pf: PF struct + * + * Resume a port's Tx and issue a PF reset in case of failure to + * resume. + **/ +static int i40e_resume_port_tx(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_aq_resume_port_tx(hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Resume Port Tx failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); + i40e_service_event_schedule(pf); + } + + return ret; +} + +/** + * i40e_init_pf_dcb - Initialize DCB configuration + * @pf: PF being configured + * + * Query the current DCB configuration and cache it + * in the hardware structure + **/ +static int i40e_init_pf_dcb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int err = 0; + + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable + * Also do not enable DCBx if FW LLDP agent is disabled + */ + if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || + (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) { + dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n"); + err = I40E_NOT_SUPPORTED; + goto out; + } + + err = i40e_init_dcb(hw, true); + if (!err) { + /* Device/Function is not DCBX capable */ + if ((!hw->func_caps.dcb) || + (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { + dev_info(&pf->pdev->dev, + "DCBX offload is not supported or is disabled for this PF.\n"); + } else { + /* When status is not DISABLED then DCBX in FW */ + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_IEEE; + + pf->flags |= I40E_FLAG_DCB_CAPABLE; + /* Enable DCB tagging only when more than one TC + * or explicity disable if only one TC + */ + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) + pf->flags |= I40E_FLAG_DCB_ENABLED; + else + pf->flags &= ~I40E_FLAG_DCB_ENABLED; + dev_dbg(&pf->pdev->dev, + "DCBX offload is supported for this PF.\n"); + } + } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { + dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); + pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; + } else { + dev_info(&pf->pdev->dev, + "Query for DCB configuration failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } + +out: + return err; +} + +#endif /* CONFIG_DCB */ +#define SPEED_SIZE 14 +#define FC_SIZE 8 +/** + * i40e_print_link_message - print link up or down + * @vsi: the VSI for which link needs a message + * @isup: true of link is up, false otherwise + */ +void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) +{ + enum i40e_aq_link_speed new_speed; + struct i40e_pf *pf = vsi->back; + char *speed = "Unknown "; + char *fc = "Unknown"; + char *fec = ""; + char *req_fec = ""; + char *an = ""; + + if (isup) + new_speed = pf->hw.phy.link_info.link_speed; + else + new_speed = I40E_LINK_SPEED_UNKNOWN; + + if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) + return; + + vsi->current_isup = isup; + vsi->current_speed = new_speed; + + if (!isup) { + netdev_info(vsi->netdev, "NIC Link is Down\n"); + return; + } + + /* Warn user if link speed on NPAR enabled partition is not at + * least 10GB + */ + if (pf->hw.func_caps.npar_enable && + (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || + pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) + netdev_warn(vsi->netdev, + "The partition detected link speed that is less than 10Gbps\n"); + + switch (pf->hw.phy.link_info.link_speed) { + case I40E_LINK_SPEED_40GB: + speed = "40 G"; + break; + case I40E_LINK_SPEED_20GB: + speed = "20 G"; + break; + case I40E_LINK_SPEED_25GB: + speed = "25 G"; + break; + case I40E_LINK_SPEED_10GB: + speed = "10 G"; + break; + case I40E_LINK_SPEED_5GB: + speed = "5 G"; + break; + case I40E_LINK_SPEED_2_5GB: + speed = "2.5 G"; + break; + case I40E_LINK_SPEED_1GB: + speed = "1000 M"; + break; + case I40E_LINK_SPEED_100MB: + speed = "100 M"; + break; + default: + break; + } + + switch (pf->hw.fc.current_mode) { + case I40E_FC_FULL: + fc = "RX/TX"; + break; + case I40E_FC_TX_PAUSE: + fc = "TX"; + break; + case I40E_FC_RX_PAUSE: + fc = "RX"; + break; + default: + fc = "None"; + break; + } + + if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { + req_fec = "None"; + fec = "None"; + an = "False"; + + if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) + an = "True"; + + if (pf->hw.phy.link_info.fec_info & + I40E_AQ_CONFIG_FEC_KR_ENA) + fec = "CL74 FC-FEC/BASE-R"; + else if (pf->hw.phy.link_info.fec_info & + I40E_AQ_CONFIG_FEC_RS_ENA) + fec = "CL108 RS-FEC"; + + /* 'CL108 RS-FEC' should be displayed when RS is requested, or + * both RS and FC are requested + */ + if (pf->hw.phy.link_info.req_fec_info & + (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { + if (pf->hw.phy.link_info.req_fec_info & + I40E_AQ_REQUEST_FEC_RS) + req_fec = "CL108 RS-FEC"; + else + req_fec = "CL74 FC-FEC/BASE-R"; + } + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", + speed, req_fec, fec, an, fc); + } else { + struct ethtool_eee edata; + + edata.supported = 0; + edata.eee_enabled = false; +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + if (get_ethtool_ops_ext(vsi->netdev)->get_eee) + get_ethtool_ops_ext(vsi->netdev) + ->get_eee(vsi->netdev, &edata); +#else + if (vsi->netdev->ethtool_ops->get_eee) + vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &edata); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + + if (edata.supported) + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n", + speed, fc, + edata.eee_enabled ? "Enabled" : "Disabled"); + else + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", + speed, fc); + } + +} + +/** + * i40e_up_complete - Finish the last steps of bringing up a connection + * @vsi: the VSI being configured + **/ +static int i40e_up_complete(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int err; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + i40e_vsi_configure_msix(vsi); + else + i40e_configure_msi_and_legacy(vsi); + + /* start rings */ + err = i40e_vsi_start_rings(vsi); + if (err) + return err; + + clear_bit(__I40E_VSI_DOWN, vsi->state); + i40e_napi_enable_all(vsi); + i40e_vsi_enable_irq(vsi); + + if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && + (vsi->netdev)) { + i40e_print_link_message(vsi, true); + netif_tx_start_all_queues(vsi->netdev); + netif_carrier_on(vsi->netdev); + } + + /* replay flow filters */ + if (vsi->type == I40E_VSI_FDIR) { + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = 0; + i40e_fdir_filter_restore(vsi); + i40e_cloud_filter_restore(pf); + } + + /* On the next run of the service_task, notify any clients of the new + * opened netdev + */ + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); + i40e_service_event_schedule(pf); + + return 0; +} + +/** + * i40e_vsi_reinit_locked - Reset the VSI + * @vsi: the VSI being configured + * + * Rebuild the ring structs after some configuration + * has changed, e.g. MTU size. + **/ +static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + WARN_ON(in_interrupt()); + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) + usleep_range(1000, 2000); + i40e_down(vsi); + i40e_up(vsi); + clear_bit(__I40E_CONFIG_BUSY, pf->state); +} + +/** + * i40e_up - Bring the connection back up after being down + * @vsi: the VSI being configured + **/ +int i40e_up(struct i40e_vsi *vsi) +{ + int err; + + if ((vsi->type == I40E_VSI_MAIN && + vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) || + (vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN)) + i40e_force_link_state(vsi->back, true); + + err = i40e_vsi_configure(vsi); + if (!err) + err = i40e_up_complete(vsi); + + return err; +} + +/** + * i40e_force_link_state - Force the link status + * @pf: board private structure + * @is_up: whether the link state should be forced up or down + **/ +static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config = {0}; + struct i40e_hw *hw = &pf->hw; + bool non_zero_phy_type; + i40e_status err; + u64 mask; + u8 speed; + + non_zero_phy_type = is_up; + /* Card might've been put in an unstable state by other drivers + * and applications, which causes incorrect speed values being + * set on startup. In order to clear speed registers, we call + * get_phy_capabilities twice, once to get initial state of + * available speeds, and once to get current phy config. + */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, + NULL); + if (err) { + dev_err(&pf->pdev->dev, + "failed to get phy cap., ret = %s last_status = %s\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return err; + } + speed = abilities.link_speed; + + /* Get the current phy config */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (err) { + dev_err(&pf->pdev->dev, + "failed to get phy cap., ret = %s last_status = %s\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return err; + } + + /* If link needs to go up, but was not forced to go down, + * and its speed values are OK, no need for a flap + * if non_zero_phy_type was set, still need to force up + */ + if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN) + non_zero_phy_type = true; + else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) + return I40E_SUCCESS; + + /* To force link we need to set bits for all supported PHY types, + * but there are now more than 32, so we need to split the bitmap + * across two fields. + */ + mask = I40E_PHY_TYPES_BITMASK; + config.phy_type = + non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; + config.phy_type_ext = + non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; + /* Copy the old settings, except of phy_type */ + config.abilities = abilities.abilities; + if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN) { + if (is_up) + config.abilities |= I40E_AQ_PHY_ENABLE_LINK; + else + config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK); + } + if (abilities.link_speed != 0) + config.link_speed = abilities.link_speed; + else + config.link_speed = speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + err = i40e_aq_set_phy_config(hw, &config, NULL); + + if (err) { + dev_err(&pf->pdev->dev, + "set phy config ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + return err; + } + + /* Update the link info */ + err = i40e_update_link_info(hw); + if (err) { + /* Wait a little bit (on 40G cards it sometimes takes a really + * long time for link to come back from the atomic reset) + * and try once more + */ + msleep(1000); + i40e_update_link_info(hw); + } + + i40e_aq_set_link_restart_an(hw, is_up, NULL); + + return I40E_SUCCESS; +} + +/** + * i40e_down - Shutdown the connection processing + * @vsi: the VSI being stopped + **/ +void i40e_down(struct i40e_vsi *vsi) +{ + int i; + + /* It is assumed that the caller of this function + * sets the vsi->state __I40E_VSI_DOWN bit. + */ + if (vsi->netdev) { + netif_carrier_off(vsi->netdev); + netif_tx_disable(vsi->netdev); + } + i40e_vsi_disable_irq(vsi); + i40e_vsi_stop_rings(vsi); + if ((vsi->type == I40E_VSI_MAIN && + vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) || + (vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN)) + i40e_force_link_state(vsi->back, false); + i40e_napi_disable_all(vsi); + + for (i = 0; i < vsi->num_queue_pairs; i++) { + i40e_clean_tx_ring(vsi->tx_rings[i]); + if (i40e_enabled_xdp_vsi(vsi)) + i40e_clean_tx_ring(vsi->xdp_rings[i]); + i40e_clean_rx_ring(vsi->rx_rings[i]); + } + +} + /** * i40e_get_link_speed - Returns link speed for the interface * @vsi: VSI to be configured * **/ -static int i40e_get_link_speed(struct i40e_vsi *vsi) +int i40e_get_link_speed(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; @@ -5515,7 +6575,6 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi) int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) { struct i40e_pf *pf = vsi->back; - u64 credits = 0; int speed = 0; int ret = 0; @@ -5533,9 +6592,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ - credits = max_tx_rate; - do_div(credits, I40E_BW_CREDIT_DIVISOR); - ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, I40E_MAX_BW_INACTIVE_ACCUM, NULL); if (ret) dev_err(&pf->pdev->dev, @@ -5545,6 +6603,177 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) return ret; } +#ifdef __TC_MQPRIO_MODE_MAX +/** + * i40e_validate_and_set_switch_mode - sets up switch mode correctly + * @vsi: ptr to VSI which has PF backing + * + * Sets up switch mode correctly if it needs to be changed and perform + * what are allowed modes. + **/ +static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) +{ + u8 mode; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); + if (ret) + return -EINVAL; + + if (hw->dev_caps.switch_mode) { + /* if switch mode is set, support mode2 (non-tunneled for + * cloud filter) for now + */ + u32 switch_mode = hw->dev_caps.switch_mode & + I40E_SWITCH_MODE_MASK; + if (switch_mode >= I40E_CLOUD_FILTER_MODE1) { + if (switch_mode == I40E_CLOUD_FILTER_MODE2) + return 0; + dev_err(&pf->pdev->dev, + "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", + hw->dev_caps.switch_mode); + return -EINVAL; + } + } + + /* Set Bit 7 to be valid */ + mode = I40E_AQ_SET_SWITCH_BIT7_VALID; + + /* Set L4type for TCP support */ + mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP; + + /* Set cloud filter mode */ + mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; + + /* Prep mode field for set_switch_config */ + ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, + pf->last_sw_conf_valid_flags, + mode, NULL); + if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) + dev_err(&pf->pdev->dev, + "couldn't set switch config bits, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + + return ret; +} + +/** + * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf + * @vsi: pointer to VSI + * @filter: cloud filter rule + * @add: if true, add, if false, delete + * + * Add or delete a cloud filter for a specific flow spec using big buffer. + * Returns 0 if the filter were successfully added. + **/ +int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, + bool add) +{ + struct i40e_aqc_cloud_filters_element_bb cld_filter; + struct i40e_pf *pf = vsi->back; + int ret; + + if (i40e_is_l4mode_enabled()) { + dev_err(&pf->pdev->dev, + "Not expected to run in L4 cloud filter mode\n"); + return -EINVAL; + } + + /* Both (src/dst) valid mac_addr are not supported */ + if ((is_valid_ether_addr(filter->dst_mac) && + is_valid_ether_addr(filter->src_mac)) || + (is_multicast_ether_addr(filter->dst_mac) && + is_multicast_ether_addr(filter->src_mac))) + return -EOPNOTSUPP; + + /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP + * ports are not supported via big buffer now. + */ + if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) + return -EOPNOTSUPP; + + /* adding filter using src_port/src_ip is not supported at this stage */ + if (filter->src_port || filter->src_ipv4 || + !ipv6_addr_any(&filter->ip.v6.src_ip6)) + return -EOPNOTSUPP; + + /* copy element needed to add cloud filter from filter */ + i40e_set_cld_element(filter, &cld_filter.element); + + if (is_valid_ether_addr(filter->dst_mac) || + is_valid_ether_addr(filter->src_mac) || + is_multicast_ether_addr(filter->dst_mac) || + is_multicast_ether_addr(filter->src_mac)) { + /* MAC + IP : unsupported mode */ + if (filter->dst_ipv4) + return -EOPNOTSUPP; + + /* since we validated that L4 port must be valid before + * we get here, start with respective "flags" value + * and update if vlan is present or not + */ + cld_filter.element.flags = + cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT); + + if (filter->vlan_id) { + cld_filter.element.flags = + cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); + } + + } else if (filter->dst_ipv4 || + !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { + cld_filter.element.flags = + cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); + if (filter->n_proto == ETH_P_IPV6) + cld_filter.element.flags |= + cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6); + else + cld_filter.element.flags |= + cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4); + } else { + dev_err(&pf->pdev->dev, + "either mac or ip has to be valid for cloud filter\n"); + return -EINVAL; + } + + /* Now copy L4 port in Byte 6..7 in general fields */ + cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] = + be16_to_cpu(filter->dst_port); + + if (add) { + /* Validate current device switch mode, change if necessary */ + ret = i40e_validate_and_set_switch_mode(vsi); + if (ret) { + dev_err(&pf->pdev->dev, + "failed to set switch mode, ret %d\n", + ret); + return ret; + } + + ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, + &cld_filter, 1); + } else { + ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, + &cld_filter, 1); + } + + if (ret) + dev_dbg(&pf->pdev->dev, + "Failed to %s cloud filter(big buffer) err %d aq_err %d\n", + add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); + else + dev_info(&pf->pdev->dev, + "%s cloud filter for VSI: %d, L4 port: %d\n", + add ? "add" : "delete", filter->seid, + ntohs(filter->dst_port)); + return ret; +} + /** * i40e_remove_queue_channels - Remove queue channels for the TCs * @vsi: VSI to be configured @@ -5621,6 +6850,29 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) kfree(cfilter); } + /* delete cloud filters associated with this channel */ + hlist_for_each_entry_safe(cfilter, node, + &pf->cloud_filter_list, cloud_node) { + if (cfilter->seid != ch->seid) + continue; + + hash_del(&cfilter->cloud_node); + if (cfilter->dst_port) + ret = i40e_add_del_cloud_filter_big_buf(vsi, + cfilter, + false); + else + ret = i40e_add_del_cloud_filter(vsi, cfilter, + false); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) + dev_info(&pf->pdev->dev, + "Failed to delete cloud filter, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); + kfree(cfilter); + } + /* delete VSI from FW */ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, NULL); @@ -5633,24 +6885,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) INIT_LIST_HEAD(&vsi->ch_list); } -/** - * i40e_is_any_channel - channel exist or not - * @vsi: ptr to VSI to which channels are associated with - * - * Returns true or false if channel(s) exist for associated VSI or not - **/ -static bool i40e_is_any_channel(struct i40e_vsi *vsi) -{ - struct i40e_channel *ch, *ch_tmp; - - list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { - if (ch->initialized) - return true; - } - - return false; -} - /** * i40e_get_max_queues_for_channel * @vsi: ptr to VSI to which channels are associated with @@ -5787,29 +7021,31 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) /** * i40e_channel_setup_queue_map - Setup a channel queue map * @pf: ptr to PF device - * @vsi: the VSI being setup * @ctxt: VSI context structure * @ch: ptr to channel structure * * Setup queue map for a specific channel **/ -static void i40e_channel_setup_queue_map(struct i40e_pf *pf, - struct i40e_vsi_context *ctxt, - struct i40e_channel *ch) +static int i40e_channel_setup_queue_map(struct i40e_pf *pf, + struct i40e_vsi_context *ctxt, + struct i40e_channel *ch) { - u16 qcount, qmap, sections = 0; + u16 qmap, sections = 0; u8 offset = 0; int pow; + if (ch->num_queue_pairs > pf->num_lan_msix) { + dev_err(&pf->pdev->dev, + "Requested queues number exceeded max available MSI-X vectors. Refused to set queue map\n"); + return -EINVAL; + } + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; sections |= I40E_AQ_VSI_PROP_SCHED_VALID; - qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); - ch->num_queue_pairs = qcount; - /* find the next higher power-of-2 of num queue pairs */ - pow = ilog2(qcount); - if (!is_power_of_2(qcount)) + pow = ilog2(ch->num_queue_pairs); + if (!is_power_of_2(ch->num_queue_pairs)) pow++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | @@ -5822,6 +7058,8 @@ static void i40e_channel_setup_queue_map(struct i40e_pf *pf, ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); ctxt->info.valid_sections |= cpu_to_le16(sections); + + return 0; } /** @@ -5862,7 +7100,9 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, } /* Set queue map for a given VSI context */ - i40e_channel_setup_queue_map(pf, &ctxt, ch); + ret = i40e_channel_setup_queue_map(pf, &ctxt, ch); + if (ret) + return ret; /* Now time to create VSI */ ret = i40e_aq_add_vsi(hw, &ctxt, NULL); @@ -5875,13 +7115,11 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, return -ENOENT; } - /* Success, update channel, set enabled_tc only if the channel - * is not a macvlan - */ - ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; + /* Success, update channel */ + ch->enabled_tc = enabled_tc; ch->seid = ctxt.seid; ch->vsi_number = ctxt.vsi_number; - ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); + ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx); /* copy just the sections touched not the entire info * since not all sections are valid as returned by @@ -6030,8 +7268,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf, /** * i40e_setup_channel - setup new channel using uplink element * @pf: ptr to PF device - * @type: type of channel to be created (VMDq2/VF) - * @uplink_seid: underlying HW switching element (VEB) ID + * @vsi: the VSI being setup * @ch: ptr to channel structure * * Setup new channel (VSI) based on specified type (VMDq2/VF) @@ -6065,63 +7302,6 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, return ch->initialized ? true : false; } -/** - * i40e_validate_and_set_switch_mode - sets up switch mode correctly - * @vsi: ptr to VSI which has PF backing - * - * Sets up switch mode correctly if it needs to be changed and perform - * what are allowed modes. - **/ -static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) -{ - u8 mode; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int ret; - - ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); - if (ret) - return -EINVAL; - - if (hw->dev_caps.switch_mode) { - /* if switch mode is set, support mode2 (non-tunneled for - * cloud filter) for now - */ - u32 switch_mode = hw->dev_caps.switch_mode & - I40E_SWITCH_MODE_MASK; - if (switch_mode >= I40E_CLOUD_FILTER_MODE1) { - if (switch_mode == I40E_CLOUD_FILTER_MODE2) - return 0; - dev_err(&pf->pdev->dev, - "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", - hw->dev_caps.switch_mode); - return -EINVAL; - } - } - - /* Set Bit 7 to be valid */ - mode = I40E_AQ_SET_SWITCH_BIT7_VALID; - - /* Set L4type for TCP support */ - mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP; - - /* Set cloud filter mode */ - mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; - - /* Prep mode field for set_switch_config */ - ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, - pf->last_sw_conf_valid_flags, - mode, NULL); - if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) - dev_err(&pf->pdev->dev, - "couldn't set switch config bits, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, - hw->aq.asq_last_status)); - - return ret; -} - /** * i40e_create_queue_channel - function to create channel * @vsi: VSI to be configured @@ -6158,30 +7338,18 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ - if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || - (!i40e_is_any_channel(vsi))) { - if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { - dev_dbg(&pf->pdev->dev, - "Failed to create channel. Override queues (%u) not power of 2\n", - vsi->tc_config.tc_info[0].qcount); - return -EINVAL; - } + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - - if (vsi->type == I40E_VSI_MAIN) { - if (pf->flags & I40E_FLAG_TC_MQPRIO) - i40e_do_reset(pf, I40E_PF_RESET_FLAG, - true); - else - i40e_do_reset_safe(pf, - I40E_PF_RESET_FLAG); - } + if (vsi->type == I40E_VSI_MAIN) { + if (pf->flags & I40E_FLAG_TC_MQPRIO) + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + else + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } - /* now onwards for main VSI, number of queues will be value - * of TC0's queue count - */ + /* now onwards for main VSI, number of queues will be value + * of TC0's queue count + */ } /* By this time, vsi->cnt_q_avail shall be set to non-zero and @@ -6195,7 +7363,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, } /* reconfig_rss only if vsi type is MAIN_VSI */ - if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { + if (reconfig_rss && vsi->type == I40E_VSI_MAIN) { err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); if (err) { dev_info(&pf->pdev->dev, @@ -6216,17 +7384,13 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, /* configure VSI for BW limit */ if (ch->max_tx_rate) { - u64 credits = ch->max_tx_rate; - if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) return -EINVAL; - do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&pf->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", ch->max_tx_rate, - credits, - ch->seid); + ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, ch->seid); } /* in case of VF, this will be main SRIOV VSI */ @@ -6247,7 +7411,6 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, static int i40e_configure_queue_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch; - u64 max_rate = 0; int ret = 0, i; /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ @@ -6269,9 +7432,8 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) /* Bandwidth limit through tc interface is in bytes/s, * change to Mbit/s */ - max_rate = vsi->mqprio_qopt.max_rate[i]; - do_div(max_rate, I40E_BW_MBPS_DIVISOR); - ch->max_tx_rate = max_rate; + ch->max_tx_rate = + vsi->mqprio_qopt.max_rate[i] / (1000000 / 8); list_add_tail(&ch->list, &vsi->ch_list); @@ -6292,529 +7454,6 @@ err_free: return ret; } -/** - * i40e_veb_config_tc - Configure TCs for given VEB - * @veb: given VEB - * @enabled_tc: TC bitmap - * - * Configures given TC bitmap for VEB (switching) element - **/ -int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) -{ - struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; - struct i40e_pf *pf = veb->pf; - int ret = 0; - int i; - - /* No TCs or already enabled TCs just return */ - if (!enabled_tc || veb->enabled_tc == enabled_tc) - return ret; - - bw_data.tc_valid_bits = enabled_tc; - /* bw_data.absolute_credits is not set (relative) */ - - /* Enable ETS TCs with equal BW Share for now */ - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT(i)) - bw_data.tc_bw_share_credits[i] = 1; - } - - ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, - &bw_data, NULL); - if (ret) { - dev_info(&pf->pdev->dev, - "VEB bw config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - goto out; - } - - /* Update the BW information */ - ret = i40e_veb_get_bw_info(veb); - if (ret) { - dev_info(&pf->pdev->dev, - "Failed getting veb bw config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - } - -out: - return ret; -} - -#ifdef CONFIG_I40E_DCB -/** - * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs - * @pf: PF struct - * - * Reconfigure VEB/VSIs on a given PF; it is assumed that - * the caller would've quiesce all the VSIs before calling - * this function - **/ -static void i40e_dcb_reconfigure(struct i40e_pf *pf) -{ - u8 tc_map = 0; - int ret; - u8 v; - - /* Enable the TCs available on PF to all VEBs */ - tc_map = i40e_pf_get_tc_map(pf); - for (v = 0; v < I40E_MAX_VEB; v++) { - if (!pf->veb[v]) - continue; - ret = i40e_veb_config_tc(pf->veb[v], tc_map); - if (ret) { - dev_info(&pf->pdev->dev, - "Failed configuring TC for VEB seid=%d\n", - pf->veb[v]->seid); - /* Will try to configure as many components */ - } - } - - /* Update each VSI */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (!pf->vsi[v]) - continue; - - /* - Enable all TCs for the LAN VSI - * - For all others keep them at TC0 for now - */ - if (v == pf->lan_vsi) - tc_map = i40e_pf_get_tc_map(pf); - else - tc_map = I40E_DEFAULT_TRAFFIC_CLASS; - - ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); - if (ret) { - dev_info(&pf->pdev->dev, - "Failed configuring TC for VSI seid=%d\n", - pf->vsi[v]->seid); - /* Will try to configure as many components */ - } else { - /* Re-configure VSI vectors based on updated TC map */ - i40e_vsi_map_rings_to_vectors(pf->vsi[v]); - if (pf->vsi[v]->netdev) - i40e_dcbnl_set_all(pf->vsi[v]); - } - } -} - -/** - * i40e_resume_port_tx - Resume port Tx - * @pf: PF struct - * - * Resume a port's Tx and issue a PF reset in case of failure to - * resume. - **/ -static int i40e_resume_port_tx(struct i40e_pf *pf) -{ - struct i40e_hw *hw = &pf->hw; - int ret; - - ret = i40e_aq_resume_port_tx(hw, NULL); - if (ret) { - dev_info(&pf->pdev->dev, - "Resume Port Tx failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - /* Schedule PF reset to recover */ - set_bit(__I40E_PF_RESET_REQUESTED, pf->state); - i40e_service_event_schedule(pf); - } - - return ret; -} - -/** - * i40e_init_pf_dcb - Initialize DCB configuration - * @pf: PF being configured - * - * Query the current DCB configuration and cache it - * in the hardware structure - **/ -static int i40e_init_pf_dcb(struct i40e_pf *pf) -{ - struct i40e_hw *hw = &pf->hw; - int err = 0; - - /* Do not enable DCB for SW1 and SW2 images even if the FW is capable - * Also do not enable DCBx if FW LLDP agent is disabled - */ - if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || - (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) { - dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n"); - err = I40E_NOT_SUPPORTED; - goto out; - } - - err = i40e_init_dcb(hw, true); - if (!err) { - /* Device/Function is not DCBX capable */ - if ((!hw->func_caps.dcb) || - (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { - dev_info(&pf->pdev->dev, - "DCBX offload is not supported or is disabled for this PF.\n"); - } else { - /* When status is not DISABLED then DCBX in FW */ - pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | - DCB_CAP_DCBX_VER_IEEE; - - pf->flags |= I40E_FLAG_DCB_CAPABLE; - /* Enable DCB tagging only when more than one TC - * or explicitly disable if only one TC - */ - if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; - else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; - dev_dbg(&pf->pdev->dev, - "DCBX offload is supported for this PF.\n"); - } - } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { - dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); - pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; - } else { - dev_info(&pf->pdev->dev, - "Query for DCB configuration failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - } - -out: - return err; -} -#endif /* CONFIG_I40E_DCB */ -#define SPEED_SIZE 14 -#define FC_SIZE 8 -/** - * i40e_print_link_message - print link up or down - * @vsi: the VSI for which link needs a message - * @isup: true of link is up, false otherwise - */ -void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) -{ - enum i40e_aq_link_speed new_speed; - struct i40e_pf *pf = vsi->back; - char *speed = "Unknown"; - char *fc = "Unknown"; - char *fec = ""; - char *req_fec = ""; - char *an = ""; - - if (isup) - new_speed = pf->hw.phy.link_info.link_speed; - else - new_speed = I40E_LINK_SPEED_UNKNOWN; - - if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) - return; - vsi->current_isup = isup; - vsi->current_speed = new_speed; - if (!isup) { - netdev_info(vsi->netdev, "NIC Link is Down\n"); - return; - } - - /* Warn user if link speed on NPAR enabled partition is not at - * least 10GB - */ - if (pf->hw.func_caps.npar_enable && - (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || - pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) - netdev_warn(vsi->netdev, - "The partition detected link speed that is less than 10Gbps\n"); - - switch (pf->hw.phy.link_info.link_speed) { - case I40E_LINK_SPEED_40GB: - speed = "40 G"; - break; - case I40E_LINK_SPEED_20GB: - speed = "20 G"; - break; - case I40E_LINK_SPEED_25GB: - speed = "25 G"; - break; - case I40E_LINK_SPEED_10GB: - speed = "10 G"; - break; - case I40E_LINK_SPEED_5GB: - speed = "5 G"; - break; - case I40E_LINK_SPEED_2_5GB: - speed = "2.5 G"; - break; - case I40E_LINK_SPEED_1GB: - speed = "1000 M"; - break; - case I40E_LINK_SPEED_100MB: - speed = "100 M"; - break; - default: - break; - } - - switch (pf->hw.fc.current_mode) { - case I40E_FC_FULL: - fc = "RX/TX"; - break; - case I40E_FC_TX_PAUSE: - fc = "TX"; - break; - case I40E_FC_RX_PAUSE: - fc = "RX"; - break; - default: - fc = "None"; - break; - } - - if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { - req_fec = "None"; - fec = "None"; - an = "False"; - - if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) - an = "True"; - - if (pf->hw.phy.link_info.fec_info & - I40E_AQ_CONFIG_FEC_KR_ENA) - fec = "CL74 FC-FEC/BASE-R"; - else if (pf->hw.phy.link_info.fec_info & - I40E_AQ_CONFIG_FEC_RS_ENA) - fec = "CL108 RS-FEC"; - - /* 'CL108 RS-FEC' should be displayed when RS is requested, or - * both RS and FC are requested - */ - if (vsi->back->hw.phy.link_info.req_fec_info & - (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { - if (vsi->back->hw.phy.link_info.req_fec_info & - I40E_AQ_REQUEST_FEC_RS) - req_fec = "CL108 RS-FEC"; - else - req_fec = "CL74 FC-FEC/BASE-R"; - } - netdev_info(vsi->netdev, - "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", - speed, req_fec, fec, an, fc); - } else { - netdev_info(vsi->netdev, - "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", - speed, fc); - } - -} - -/** - * i40e_up_complete - Finish the last steps of bringing up a connection - * @vsi: the VSI being configured - **/ -static int i40e_up_complete(struct i40e_vsi *vsi) -{ - struct i40e_pf *pf = vsi->back; - int err; - - if (pf->flags & I40E_FLAG_MSIX_ENABLED) - i40e_vsi_configure_msix(vsi); - else - i40e_configure_msi_and_legacy(vsi); - - /* start rings */ - err = i40e_vsi_start_rings(vsi); - if (err) - return err; - - clear_bit(__I40E_VSI_DOWN, vsi->state); - i40e_napi_enable_all(vsi); - i40e_vsi_enable_irq(vsi); - - if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && - (vsi->netdev)) { - i40e_print_link_message(vsi, true); - netif_tx_start_all_queues(vsi->netdev); - netif_carrier_on(vsi->netdev); - } - - /* replay FDIR SB filters */ - if (vsi->type == I40E_VSI_FDIR) { - /* reset fd counters */ - pf->fd_add_err = 0; - pf->fd_atr_cnt = 0; - i40e_fdir_filter_restore(vsi); - } - - /* On the next run of the service_task, notify any clients of the new - * opened netdev - */ - set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); - i40e_service_event_schedule(pf); - - return 0; -} - -/** - * i40e_vsi_reinit_locked - Reset the VSI - * @vsi: the VSI being configured - * - * Rebuild the ring structs after some configuration - * has changed, e.g. MTU size. - **/ -static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) -{ - struct i40e_pf *pf = vsi->back; - - WARN_ON(in_interrupt()); - while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) - usleep_range(1000, 2000); - i40e_down(vsi); - - i40e_up(vsi); - clear_bit(__I40E_CONFIG_BUSY, pf->state); -} - -/** - * i40e_up - Bring the connection back up after being down - * @vsi: the VSI being configured - **/ -int i40e_up(struct i40e_vsi *vsi) -{ - int err; - - err = i40e_vsi_configure(vsi); - if (!err) - err = i40e_up_complete(vsi); - - return err; -} - -/** - * i40e_force_link_state - Force the link status - * @pf: board private structure - * @is_up: whether the link state should be forced up or down - **/ -static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) -{ - struct i40e_aq_get_phy_abilities_resp abilities; - struct i40e_aq_set_phy_config config = {0}; - struct i40e_hw *hw = &pf->hw; - i40e_status err; - u64 mask; - u8 speed; - - /* Card might've been put in an unstable state by other drivers - * and applications, which causes incorrect speed values being - * set on startup. In order to clear speed registers, we call - * get_phy_capabilities twice, once to get initial state of - * available speeds, and once to get current PHY config. - */ - err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, - NULL); - if (err) { - dev_err(&pf->pdev->dev, - "failed to get phy cap., ret = %s last_status = %s\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - return err; - } - speed = abilities.link_speed; - - /* Get the current phy config */ - err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, - NULL); - if (err) { - dev_err(&pf->pdev->dev, - "failed to get phy cap., ret = %s last_status = %s\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - return err; - } - - /* If link needs to go up, but was not forced to go down, - * and its speed values are OK, no need for a flap - */ - if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) - return I40E_SUCCESS; - - /* To force link we need to set bits for all supported PHY types, - * but there are now more than 32, so we need to split the bitmap - * across two fields. - */ - mask = I40E_PHY_TYPES_BITMASK; - config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; - config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; - /* Copy the old settings, except of phy_type */ - config.abilities = abilities.abilities; - if (abilities.link_speed != 0) - config.link_speed = abilities.link_speed; - else - config.link_speed = speed; - config.eee_capability = abilities.eee_capability; - config.eeer = abilities.eeer_val; - config.low_power_ctrl = abilities.d3_lpan; - config.fec_config = abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_PHY_FEC_CONFIG_MASK; - err = i40e_aq_set_phy_config(hw, &config, NULL); - - if (err) { - dev_err(&pf->pdev->dev, - "set phy config ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - return err; - } - - /* Update the link info */ - err = i40e_update_link_info(hw); - if (err) { - /* Wait a little bit (on 40G cards it sometimes takes a really - * long time for link to come back from the atomic reset) - * and try once more - */ - msleep(1000); - i40e_update_link_info(hw); - } - - i40e_aq_set_link_restart_an(hw, true, NULL); - - return I40E_SUCCESS; -} - -/** - * i40e_down - Shutdown the connection processing - * @vsi: the VSI being stopped - **/ -void i40e_down(struct i40e_vsi *vsi) -{ - int i; - - /* It is assumed that the caller of this function - * sets the vsi->state __I40E_VSI_DOWN bit. - */ - if (vsi->netdev) { - netif_carrier_off(vsi->netdev); - netif_tx_disable(vsi->netdev); - } - i40e_vsi_disable_irq(vsi); - i40e_vsi_stop_rings(vsi); - if (vsi->type == I40E_VSI_MAIN && - vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) - i40e_force_link_state(vsi->back, false); - i40e_napi_disable_all(vsi); - - for (i = 0; i < vsi->num_queue_pairs; i++) { - i40e_clean_tx_ring(vsi->tx_rings[i]); - if (i40e_enabled_xdp_vsi(vsi)) { - /* Make sure that in-progress ndo_xdp_xmit - * calls are completed. - */ - synchronize_rcu(); - i40e_clean_tx_ring(vsi->xdp_rings[i]); - } - i40e_clean_rx_ring(vsi->rx_rings[i]); - } - -} - /** * i40e_validate_mqprio_qopt- validate queue mapping info * @vsi: the VSI being configured @@ -6824,7 +7463,6 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 sum_max_rate = 0; - u64 max_rate = 0; int i; if (mqprio_qopt->qopt.offset[0] != 0 || @@ -6839,9 +7477,7 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, "Invalid min tx rate (greater than 0) specified\n"); return -EINVAL; } - max_rate = mqprio_qopt->max_rate[i]; - do_div(max_rate, I40E_BW_MBPS_DIVISOR); - sum_max_rate += max_rate; + sum_max_rate += (mqprio_qopt->max_rate[i] / (1000000 / 8)); if (i >= mqprio_qopt->qopt.num_tc - 1) break; @@ -6851,6 +7487,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, } if (vsi->num_queue_pairs < (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { + dev_err(&vsi->back->pdev->dev, + "Failed to create traffic channel, insufficient number of queues.\n"); return -EINVAL; } if (sum_max_rate > i40e_get_link_speed(vsi)) { @@ -6889,507 +7527,141 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) } /** - * i40e_del_macvlan_filter - * @hw: pointer to the HW structure - * @seid: seid of the channel VSI - * @macaddr: the mac address to apply as a filter - * @aq_err: store the admin Q error + * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs + * @vsi: PF main vsi + * @seid: seid of main or channel VSIs * - * This function deletes a mac filter on the channel VSI which serves as the - * macvlan. Returns 0 on success. + * Rebuilds cloud filters associated with main VSI and channel VSIs if they + * existed before reset **/ -static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, - const u8 *macaddr, int *aq_err) +static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) { - struct i40e_aqc_remove_macvlan_element_data element; - i40e_status status; + struct i40e_cloud_filter *cfilter; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node; + i40e_status ret; - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, macaddr); - element.vlan_tag = 0; - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL); - *aq_err = hw->aq.asq_last_status; - - return status; -} - -/** - * i40e_add_macvlan_filter - * @hw: pointer to the HW structure - * @seid: seid of the channel VSI - * @macaddr: the mac address to apply as a filter - * @aq_err: store the admin Q error - * - * This function adds a mac filter on the channel VSI which serves as the - * macvlan. Returns 0 on success. - **/ -static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, - const u8 *macaddr, int *aq_err) -{ - struct i40e_aqc_add_macvlan_element_data element; - i40e_status status; - u16 cmd_flags = 0; - - ether_addr_copy(element.mac_addr, macaddr); - element.vlan_tag = 0; - element.queue_number = 0; - element.match_method = I40E_AQC_MM_ERR_NO_RES; - cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; - element.flags = cpu_to_le16(cmd_flags); - status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL); - *aq_err = hw->aq.asq_last_status; - - return status; -} - -/** - * i40e_reset_ch_rings - Reset the queue contexts in a channel - * @vsi: the VSI we want to access - * @ch: the channel we want to access - */ -static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch) -{ - struct i40e_ring *tx_ring, *rx_ring; - u16 pf_q; - int i; - - for (i = 0; i < ch->num_queue_pairs; i++) { - pf_q = ch->base_queue + i; - tx_ring = vsi->tx_rings[pf_q]; - tx_ring->ch = NULL; - rx_ring = vsi->rx_rings[pf_q]; - rx_ring->ch = NULL; - } -} - -/** - * i40e_free_macvlan_channels - * @vsi: the VSI we want to access - * - * This function frees the Qs of the channel VSI from - * the stack and also deletes the channel VSIs which - * serve as macvlans. - */ -static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) -{ - struct i40e_channel *ch, *ch_tmp; - int ret; - - if (list_empty(&vsi->macvlan_list)) - return; - - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - struct i40e_vsi *parent_vsi; - - if (i40e_is_channel_macvlan(ch)) { - i40e_reset_ch_rings(vsi, ch); - clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); - netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); - netdev_set_sb_channel(ch->fwd->netdev, 0); - kfree(ch->fwd); - ch->fwd = NULL; - } - - list_del(&ch->list); - parent_vsi = ch->parent_vsi; - if (!parent_vsi || !ch->initialized) { - kfree(ch); + /* Add cloud filters back if they exist */ + hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, + cloud_node) { + if (cfilter->seid != seid) continue; - } - /* remove the VSI */ - ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, - NULL); - if (ret) - dev_err(&vsi->back->pdev->dev, - "unable to remove channel (%d) for parent VSI(%d)\n", - ch->seid, parent_vsi->seid); - kfree(ch); + if (cfilter->dst_port) + ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, + true); + else + ret = i40e_add_del_cloud_filter(vsi, cfilter, true); + + if (ret) { + dev_dbg(&pf->pdev->dev, + "Failed to rebuild cloud filter, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } } - vsi->macvlan_cnt = 0; + return 0; } /** - * i40e_fwd_ring_up - bring the macvlan device up - * @vsi: the VSI we want to access - * @vdev: macvlan netdevice - * @fwd: the private fwd structure - */ -static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, - struct i40e_fwd_adapter *fwd) -{ - int ret = 0, num_tc = 1, i, aq_err; - struct i40e_channel *ch, *ch_tmp; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - - if (list_empty(&vsi->macvlan_list)) - return -EINVAL; - - /* Go through the list and find an available channel */ - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - if (!i40e_is_channel_macvlan(ch)) { - ch->fwd = fwd; - /* record configuration for macvlan interface in vdev */ - for (i = 0; i < num_tc; i++) - netdev_bind_sb_channel_queue(vsi->netdev, vdev, - i, - ch->num_queue_pairs, - ch->base_queue); - for (i = 0; i < ch->num_queue_pairs; i++) { - struct i40e_ring *tx_ring, *rx_ring; - u16 pf_q; - - pf_q = ch->base_queue + i; - - /* Get to TX ring ptr */ - tx_ring = vsi->tx_rings[pf_q]; - tx_ring->ch = ch; - - /* Get the RX ring ptr */ - rx_ring = vsi->rx_rings[pf_q]; - rx_ring->ch = ch; - } - break; - } - } - - /* Guarantee all rings are updated before we update the - * MAC address filter. - */ - wmb(); - - /* Add a mac filter */ - ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); - if (ret) { - /* if we cannot add the MAC rule then disable the offload */ - macvlan_release_l2fw_offload(vdev); - for (i = 0; i < ch->num_queue_pairs; i++) { - struct i40e_ring *rx_ring; - u16 pf_q; - - pf_q = ch->base_queue + i; - rx_ring = vsi->rx_rings[pf_q]; - rx_ring->netdev = NULL; - } - dev_info(&pf->pdev->dev, - "Error adding mac filter on macvlan err %s, aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, aq_err)); - netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n"); - } - - return ret; -} - -/** - * i40e_setup_macvlans - create the channels which will be macvlans - * @vsi: the VSI we want to access - * @macvlan_cnt: no. of macvlans to be setup - * @qcnt: no. of Qs per macvlan - * @vdev: macvlan netdevice - */ -static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, - struct net_device *vdev) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - struct i40e_vsi_context ctxt; - u16 sections, qmap, num_qps; - struct i40e_channel *ch; - int i, pow, ret = 0; - u8 offset = 0; - - if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) - return -EINVAL; - - num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); - - /* find the next higher power-of-2 of num queue pairs */ - pow = fls(roundup_pow_of_two(num_qps) - 1); - - qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | - (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); - - /* Setup context bits for the main VSI */ - sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; - sections |= I40E_AQ_VSI_PROP_SCHED_VALID; - memset(&ctxt, 0, sizeof(ctxt)); - ctxt.seid = vsi->seid; - ctxt.pf_num = vsi->back->hw.pf_id; - ctxt.vf_num = 0; - ctxt.uplink_seid = vsi->uplink_seid; - ctxt.info = vsi->info; - ctxt.info.tc_mapping[0] = cpu_to_le16(qmap); - ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); - ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); - ctxt.info.valid_sections |= cpu_to_le16(sections); - - /* Reconfigure RSS for main VSI with new max queue count */ - vsi->rss_size = max_t(u16, num_qps, qcnt); - ret = i40e_vsi_config_rss(vsi); - if (ret) { - dev_info(&pf->pdev->dev, - "Failed to reconfig RSS for num_queues (%u)\n", - vsi->rss_size); - return ret; - } - vsi->reconfig_rss = true; - dev_dbg(&vsi->back->pdev->dev, - "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); - vsi->next_base_queue = num_qps; - vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; - - /* Update the VSI after updating the VSI queue-mapping - * information - */ - ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); - if (ret) { - dev_info(&pf->pdev->dev, - "Update vsi tc config failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - /* update the local VSI info with updated queue map */ - i40e_vsi_update_queue_map(vsi, &ctxt); - vsi->info.valid_sections = 0; - - /* Create channels for macvlans */ - INIT_LIST_HEAD(&vsi->macvlan_list); - for (i = 0; i < macvlan_cnt; i++) { - ch = kzalloc(sizeof(*ch), GFP_KERNEL); - if (!ch) { - ret = -ENOMEM; - goto err_free; - } - INIT_LIST_HEAD(&ch->list); - ch->num_queue_pairs = qcnt; - if (!i40e_setup_channel(pf, vsi, ch)) { - ret = -EINVAL; - goto err_free; - } - ch->parent_vsi = vsi; - vsi->cnt_q_avail -= ch->num_queue_pairs; - vsi->macvlan_cnt++; - list_add_tail(&ch->list, &vsi->macvlan_list); - } - - return ret; - -err_free: - dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); - i40e_free_macvlan_channels(vsi); - - return ret; -} - -/** - * i40e_fwd_add - configure macvlans - * @netdev: net device to configure - * @vdev: macvlan netdevice + * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset + * @vsi: PF main vsi + * + * Rebuilds channel VSIs if they existed before reset **/ -static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors; - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_fwd_adapter *fwd; - int avail_macvlan, ret; - - if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { - netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); - return ERR_PTR(-EINVAL); - } - if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { - netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); - return ERR_PTR(-EINVAL); - } - if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { - netdev_info(netdev, "Not enough vectors available to support macvlans\n"); - return ERR_PTR(-EINVAL); - } - - /* The macvlan device has to be a single Q device so that the - * tc_to_txq field can be reused to pick the tx queue. - */ - if (netif_is_multiqueue(vdev)) - return ERR_PTR(-ERANGE); - - if (!vsi->macvlan_cnt) { - /* reserve bit 0 for the pf device */ - set_bit(0, vsi->fwd_bitmask); - - /* Try to reserve as many queues as possible for macvlans. First - * reserve 3/4th of max vectors, then half, then quarter and - * calculate Qs per macvlan as you go - */ - vectors = pf->num_lan_msix; - if (vectors <= I40E_MAX_MACVLANS && vectors > 64) { - /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ - q_per_macvlan = 4; - macvlan_cnt = (vectors - 32) / 4; - } else if (vectors <= 64 && vectors > 32) { - /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ - q_per_macvlan = 2; - macvlan_cnt = (vectors - 16) / 2; - } else if (vectors <= 32 && vectors > 16) { - /* allocate 1 Q per macvlan and 16 Qs to the PF*/ - q_per_macvlan = 1; - macvlan_cnt = vectors - 16; - } else if (vectors <= 16 && vectors > 8) { - /* allocate 1 Q per macvlan and 8 Qs to the PF */ - q_per_macvlan = 1; - macvlan_cnt = vectors - 8; - } else { - /* allocate 1 Q per macvlan and 1 Q to the PF */ - q_per_macvlan = 1; - macvlan_cnt = vectors - 1; - } - - if (macvlan_cnt == 0) - return ERR_PTR(-EBUSY); - - /* Quiesce VSI queues */ - i40e_quiesce_vsi(vsi); - - /* sets up the macvlans but does not "enable" them */ - ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan, - vdev); - if (ret) - return ERR_PTR(ret); - - /* Unquiesce VSI */ - i40e_unquiesce_vsi(vsi); - } - avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, - vsi->macvlan_cnt); - if (avail_macvlan >= I40E_MAX_MACVLANS) - return ERR_PTR(-EBUSY); - - /* create the fwd struct */ - fwd = kzalloc(sizeof(*fwd), GFP_KERNEL); - if (!fwd) - return ERR_PTR(-ENOMEM); - - set_bit(avail_macvlan, vsi->fwd_bitmask); - fwd->bit_no = avail_macvlan; - netdev_set_sb_channel(vdev, avail_macvlan); - fwd->netdev = vdev; - - if (!netif_running(netdev)) - return fwd; - - /* Set fwd ring up */ - ret = i40e_fwd_ring_up(vsi, vdev, fwd); - if (ret) { - /* unbind the queues and drop the subordinate channel config */ - netdev_unbind_sb_channel(netdev, vdev); - netdev_set_sb_channel(vdev, 0); - - kfree(fwd); - return ERR_PTR(-EINVAL); - } - - return fwd; -} - -/** - * i40e_del_all_macvlans - Delete all the mac filters on the channels - * @vsi: the VSI we want to access - */ -static void i40e_del_all_macvlans(struct i40e_vsi *vsi) +static int i40e_rebuild_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int aq_err, ret = 0; + i40e_status ret; - if (list_empty(&vsi->macvlan_list)) - return; + if (list_empty(&vsi->ch_list)) + return 0; - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - if (i40e_is_channel_macvlan(ch)) { - ret = i40e_del_macvlan_filter(hw, ch->seid, - i40e_channel_mac(ch), - &aq_err); - if (!ret) { - /* Reset queue contexts */ - i40e_reset_ch_rings(vsi, ch); - clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); - netdev_unbind_sb_channel(vsi->netdev, - ch->fwd->netdev); - netdev_set_sb_channel(ch->fwd->netdev, 0); - kfree(ch->fwd); - ch->fwd = NULL; - } - } - } -} - -/** - * i40e_fwd_del - delete macvlan interfaces - * @netdev: net device to configure - * @vdev: macvlan netdevice - */ -static void i40e_fwd_del(struct net_device *netdev, void *vdev) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_fwd_adapter *fwd = vdev; - struct i40e_channel *ch, *ch_tmp; - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int aq_err, ret = 0; - - /* Find the channel associated with the macvlan and del mac filter */ - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - if (i40e_is_channel_macvlan(ch) && - ether_addr_equal(i40e_channel_mac(ch), - fwd->netdev->dev_addr)) { - ret = i40e_del_macvlan_filter(hw, ch->seid, - i40e_channel_mac(ch), - &aq_err); - if (!ret) { - /* Reset queue contexts */ - i40e_reset_ch_rings(vsi, ch); - clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); - netdev_unbind_sb_channel(netdev, fwd->netdev); - netdev_set_sb_channel(fwd->netdev, 0); - kfree(ch->fwd); - ch->fwd = NULL; - } else { - dev_info(&pf->pdev->dev, - "Error deleting mac filter on macvlan err %s, aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, aq_err)); - } + list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { + if (!ch->initialized) break; + /* Proceed with creation of channel (VMDq2) VSI */ + ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "failed to rebuild channels using uplink_seid %u\n", + vsi->uplink_seid); + return ret; + } + if (ch->max_tx_rate) { + if (i40e_set_bw_limit(vsi, ch->seid, + ch->max_tx_rate)) + return -EINVAL; + + dev_dbg(&vsi->back->pdev->dev, + "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", + ch->max_tx_rate, + ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, + ch->seid); + } + ret = i40e_rebuild_cloud_filters(vsi, ch->seid); + if (ret) { + dev_dbg(&vsi->back->pdev->dev, + "Failed to rebuild cloud filters for channel VSI %u\n", + ch->seid); + return ret; } } + return 0; } +#endif /* __TC_MQPRIO_MODE_MAX */ +#ifdef HAVE_SETUP_TC /** * i40e_setup_tc - configure multiple traffic classes * @netdev: net device to configure * @type_data: tc offload data **/ +#ifdef __TC_MQPRIO_MODE_MAX static int i40e_setup_tc(struct net_device *netdev, void *type_data) +#else +static int i40e_setup_tc(struct net_device *netdev, u8 tc) +#endif { +#ifdef __TC_MQPRIO_MODE_MAX struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; +#endif struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u8 enabled_tc = 0, num_tc, hw; + u8 enabled_tc = 0, num_tc; +#ifdef __TC_MQPRIO_MODE_MAX bool need_reset = false; int old_queue_pairs; +#endif int ret = -EINVAL; +#ifdef __TC_MQPRIO_MODE_MAX u16 mode; + u8 hw; +#endif int i; +#ifdef __TC_MQPRIO_MODE_MAX old_queue_pairs = vsi->num_queue_pairs; +#endif + /* Check if MFP enabled */ + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + netdev_info(netdev, + "Configuring TC not supported in MFP mode\n"); + goto exit; + } + +#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + /* Check whether tc count is within enabled limit */ + if (tc > i40e_pf_get_num_tc(pf)) { + netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); + goto exit; + } +#endif + +#ifdef __TC_MQPRIO_MODE_MAX num_tc = mqprio_qopt->qopt.num_tc; hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; @@ -7399,12 +7671,6 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) goto config_tc; } - /* Check if MFP enabled */ - if (pf->flags & I40E_FLAG_MFP_ENABLED) { - netdev_info(netdev, - "Configuring TC not supported in MFP mode\n"); - return ret; - } switch (mode) { case TC_MQPRIO_MODE_DCB: pf->flags &= ~I40E_FLAG_TC_MQPRIO; @@ -7444,48 +7710,72 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) } config_tc: +#else + /* Check if DCB enabled to continue */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { + netdev_info(netdev, "DCB is not enabled for adapter\n"); + goto exit; + } + num_tc = tc; +#endif /* Generate TC map for number of tc requested */ for (i = 0; i < num_tc; i++) enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ +#ifdef __TC_MQPRIO_MODE_MAX if (enabled_tc == vsi->tc_config.enabled_tc && mode != TC_MQPRIO_MODE_CHANNEL) return 0; +#else + if (enabled_tc == vsi->tc_config.enabled_tc) + return 0; +#endif /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); +#ifdef __TC_MQPRIO_MODE_MAX if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) i40e_remove_queue_channels(vsi); +#endif /* Configure VSI for enabled TCs */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", vsi->seid); +#ifdef __TC_MQPRIO_MODE_MAX + need_reset = true; +#endif + goto exit; +#ifdef __TC_MQPRIO_MODE_MAX + } else if (enabled_tc && + (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { + netdev_info(netdev, + "Failed to create channel. Override queues (%u) not power of 2\n", + vsi->tc_config.tc_info[0].qcount); + ret = -EINVAL; need_reset = true; goto exit; - } else { - dev_info(&vsi->back->pdev->dev, - "Setup channel (id:%u) utilizing num_queues %d\n", - vsi->seid, vsi->tc_config.tc_info[0].qcount); +#endif } + dev_info(&vsi->back->pdev->dev, + "Setup channel (id:%u) utilizing num_queues %d\n", + vsi->seid, vsi->tc_config.tc_info[0].qcount); + +#ifdef __TC_MQPRIO_MODE_MAX if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; - - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / + (1000000 / 8); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { - u64 credits = max_tx_rate; - - do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", max_tx_rate, - credits, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, vsi->seid); } else { need_reset = true; @@ -7502,239 +7792,26 @@ config_tc: } } +#endif + exit: +#ifdef __TC_MQPRIO_MODE_MAX /* Reset the configuration data to defaults, only TC0 is enabled */ if (need_reset) { i40e_vsi_set_default_tc_config(vsi); need_reset = false; } - +#endif /* Unquiesce VSI */ i40e_unquiesce_vsi(vsi); return ret; } -/** - * i40e_set_cld_element - sets cloud filter element data - * @filter: cloud filter rule - * @cld: ptr to cloud filter element data - * - * This is helper function to copy data into cloud filter element - **/ -static inline void -i40e_set_cld_element(struct i40e_cloud_filter *filter, - struct i40e_aqc_cloud_filters_element_data *cld) -{ - int i, j; - u32 ipa; - - memset(cld, 0, sizeof(*cld)); - ether_addr_copy(cld->outer_mac, filter->dst_mac); - ether_addr_copy(cld->inner_mac, filter->src_mac); - - if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) - return; - - if (filter->n_proto == ETH_P_IPV6) { -#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) - for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6); - i++, j += 2) { - ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); - ipa = cpu_to_le32(ipa); - memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa)); - } - } else { - ipa = be32_to_cpu(filter->dst_ipv4); - memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); - } - - cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); - - /* tenant_id is not supported by FW now, once the support is enabled - * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) - */ - if (filter->tenant_id) - return; -} - -/** - * i40e_add_del_cloud_filter - Add/del cloud filter - * @vsi: pointer to VSI - * @filter: cloud filter rule - * @add: if true, add, if false, delete - * - * Add or delete a cloud filter for a specific flow spec. - * Returns 0 if the filter were successfully added. - **/ -int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, - struct i40e_cloud_filter *filter, bool add) -{ - struct i40e_aqc_cloud_filters_element_data cld_filter; - struct i40e_pf *pf = vsi->back; - int ret; - static const u16 flag_table[128] = { - [I40E_CLOUD_FILTER_FLAGS_OMAC] = - I40E_AQC_ADD_CLOUD_FILTER_OMAC, - [I40E_CLOUD_FILTER_FLAGS_IMAC] = - I40E_AQC_ADD_CLOUD_FILTER_IMAC, - [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] = - I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN, - [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] = - I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID, - [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] = - I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC, - [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] = - I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID, - [I40E_CLOUD_FILTER_FLAGS_IIP] = - I40E_AQC_ADD_CLOUD_FILTER_IIP, - }; - - if (filter->flags >= ARRAY_SIZE(flag_table)) - return I40E_ERR_CONFIG; - - /* copy element needed to add cloud filter from filter */ - i40e_set_cld_element(filter, &cld_filter); - - if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) - cld_filter.flags = cpu_to_le16(filter->tunnel_type << - I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); - - if (filter->n_proto == ETH_P_IPV6) - cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | - I40E_AQC_ADD_CLOUD_FLAGS_IPV6); - else - cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | - I40E_AQC_ADD_CLOUD_FLAGS_IPV4); - - if (add) - ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, - &cld_filter, 1); - else - ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, - &cld_filter, 1); - if (ret) - dev_dbg(&pf->pdev->dev, - "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n", - add ? "add" : "delete", filter->dst_port, ret, - pf->hw.aq.asq_last_status); - else - dev_info(&pf->pdev->dev, - "%s cloud filter for VSI: %d\n", - add ? "Added" : "Deleted", filter->seid); - return ret; -} - -/** - * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf - * @vsi: pointer to VSI - * @filter: cloud filter rule - * @add: if true, add, if false, delete - * - * Add or delete a cloud filter for a specific flow spec using big buffer. - * Returns 0 if the filter were successfully added. - **/ -int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, - struct i40e_cloud_filter *filter, - bool add) -{ - struct i40e_aqc_cloud_filters_element_bb cld_filter; - struct i40e_pf *pf = vsi->back; - int ret; - - /* Both (src/dst) valid mac_addr are not supported */ - if ((is_valid_ether_addr(filter->dst_mac) && - is_valid_ether_addr(filter->src_mac)) || - (is_multicast_ether_addr(filter->dst_mac) && - is_multicast_ether_addr(filter->src_mac))) - return -EOPNOTSUPP; - - /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP - * ports are not supported via big buffer now. - */ - if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) - return -EOPNOTSUPP; - - /* adding filter using src_port/src_ip is not supported at this stage */ - if (filter->src_port || filter->src_ipv4 || - !ipv6_addr_any(&filter->ip.v6.src_ip6)) - return -EOPNOTSUPP; - - /* copy element needed to add cloud filter from filter */ - i40e_set_cld_element(filter, &cld_filter.element); - - if (is_valid_ether_addr(filter->dst_mac) || - is_valid_ether_addr(filter->src_mac) || - is_multicast_ether_addr(filter->dst_mac) || - is_multicast_ether_addr(filter->src_mac)) { - /* MAC + IP : unsupported mode */ - if (filter->dst_ipv4) - return -EOPNOTSUPP; - - /* since we validated that L4 port must be valid before - * we get here, start with respective "flags" value - * and update if vlan is present or not - */ - cld_filter.element.flags = - cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT); - - if (filter->vlan_id) { - cld_filter.element.flags = - cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); - } - - } else if (filter->dst_ipv4 || - !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { - cld_filter.element.flags = - cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); - if (filter->n_proto == ETH_P_IPV6) - cld_filter.element.flags |= - cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6); - else - cld_filter.element.flags |= - cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4); - } else { - dev_err(&pf->pdev->dev, - "either mac or ip has to be valid for cloud filter\n"); - return -EINVAL; - } - - /* Now copy L4 port in Byte 6..7 in general fields */ - cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] = - be16_to_cpu(filter->dst_port); - - if (add) { - /* Validate current device switch mode, change if necessary */ - ret = i40e_validate_and_set_switch_mode(vsi); - if (ret) { - dev_err(&pf->pdev->dev, - "failed to set switch mode, ret %d\n", - ret); - return ret; - } - - ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, - &cld_filter, 1); - } else { - ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, - &cld_filter, 1); - } - - if (ret) - dev_dbg(&pf->pdev->dev, - "Failed to %s cloud filter(big buffer) err %d aq_err %d\n", - add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); - else - dev_info(&pf->pdev->dev, - "%s cloud filter for VSI: %d, L4 port: %d\n", - add ? "add" : "delete", filter->seid, - ntohs(filter->dst_port)); - return ret; -} - +#ifdef __TC_MQPRIO_MODE_MAX /** * i40e_parse_cls_flower - Parse tc flower filters provided by kernel * @vsi: Pointer to VSI - * @cls_flower: Pointer to struct flow_cls_offload + * @f: Pointer to struct flow_cls_offload * @filter: Pointer to cloud filter structure * **/ @@ -7752,16 +7829,21 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS BIT(FLOW_DISSECTOR_KEY_VLAN) | +#endif BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { +#ifdef HAVE_TC_FLOWER_ENC + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | +#endif + BIT(FLOW_DISSECTOR_KEY_PORTS))) { dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", dissector->used_keys); return -EOPNOTSUPP; } +#ifdef HAVE_TC_FLOWER_ENC if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_match_enc_keyid match; @@ -7771,6 +7853,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, filter->tenant_id = be32_to_cpu(match.key->keyid); } +#endif /* HAVE_TC_FLOWER_ENC */ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -7816,6 +7899,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, ether_addr_copy(filter->src_mac, match.key->src); } +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; @@ -7833,6 +7917,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, filter->vlan_id = cpu_to_be16(match.key->vlan_id); } +#endif /* !HAVE_TC_FLOWER_VLAN_IN_TAGS */ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; @@ -7989,7 +8074,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, if (tc < 0) { dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); - return -EOPNOTSUPP; + return -EINVAL; } if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || @@ -8000,7 +8085,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, (!hlist_empty(&pf->fdir_filter_list))) { dev_err(&vsi->back->pdev->dev, "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n"); - return -EINVAL; + return -EOPNOTSUPP; } if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { @@ -8115,14 +8200,17 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi, /** * i40e_setup_tc_cls_flower - flower classifier offloads - * @netdev: net device to configure - * @type_data: offload data + * @np: net device to configure + * @cls_flower: pointer to cls flower offload structure **/ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, struct flow_cls_offload *cls_flower) { struct i40e_vsi *vsi = np->vsi; + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; + switch (cls_flower->command) { case FLOW_CLS_REPLACE: return i40e_configure_clsflower(vsi, cls_flower); @@ -8140,9 +8228,6 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct i40e_netdev_priv *np = cb_priv; - if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) - return -EOPNOTSUPP; - switch (type) { case TC_SETUP_CLSFLOWER: return i40e_setup_tc_cls_flower(np, type_data); @@ -8153,12 +8238,35 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } static LIST_HEAD(i40e_block_cb_list); +#endif +#ifdef NETIF_F_HW_TC +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) +#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX) +static int __i40e_setup_tc(struct net_device *netdev, u32 handle, + u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) +#else +static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +#endif { +#ifdef __TC_MQPRIO_MODE_MAX struct i40e_netdev_priv *np = netdev_priv(netdev); - +#endif +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#ifndef __TC_MQPRIO_MODE_MAX + struct tc_mqprio_qopt *mqprio = type_data; +#endif +#else +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX + struct tc_mqprio_qopt *mqprio = tc->mqprio; +#endif /* TC_MQPRIO_HW_OFFLOAD_MAX*/ + unsigned int type = tc->type; +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ +#ifdef __TC_MQPRIO_MODE_MAX switch (type) { case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); @@ -8170,7 +8278,21 @@ static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, default: return -EOPNOTSUPP; } +#else + if (type != TC_SETUP_QDISC_MQPRIO) + return -EINVAL; + +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return i40e_setup_tc(netdev, mqprio->num_tc); +#else + return i40e_setup_tc(netdev, tc->tc); +#endif /* TC_MQPRIO_HW_OFFLOAD_MAX */ +#endif /* __TC_MQPRIO_MODE_MAX */ } +#endif /* NETIF_F_HW_TC */ +#endif /* HAVE_SETUP_TC */ /** * i40e_open - Called when a network interface is made active @@ -8204,7 +8326,6 @@ int i40e_open(struct net_device *netdev) err = i40e_vsi_open(vsi); if (err) return err; - /* configure global TSO hardware offload settings */ wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | TCP_FLAG_FIN) >> 16); @@ -8213,7 +8334,20 @@ int i40e_open(struct net_device *netdev) TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); +#ifdef HAVE_VXLAN_RX_OFFLOAD +#if IS_ENABLED(CONFIG_VXLAN) + vxlan_get_rx_port(netdev); +#endif +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_GENEVE_RX_OFFLOAD +#if IS_ENABLED(CONFIG_GENEVE) + if (pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE) + geneve_get_rx_port(netdev); +#endif +#endif /* HAVE_GENEVE_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD udp_tunnel_get_rx_info(netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ return 0; } @@ -8259,6 +8393,15 @@ int i40e_vsi_open(struct i40e_vsi *vsi) if (err) goto err_set_queues; + /* When reducing the number of Tx queues, any pre-existing + * skbuffs might target a now removed queue. Older versions of + * the Linux kernel do not check for this, and it can result + * in a kernel panic. Avoid this by flushing all skbs now, so + * that we avoid attempting to transmit one that has an + * invalid queue mapping. + */ + qdisc_reset_all_tx(vsi->netdev); + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) @@ -8356,11 +8499,10 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) } /** - * i40e_cloud_filter_exit - Cleans up the cloud filters + * i40e_cloud_filter_exit - Cleans up Cloud Filters * @pf: Pointer to PF * - * This function destroys the hlist where all the cloud filters - * were saved. + * This function destroys the hlist which keeps all the Cloud Filters. **/ static void i40e_cloud_filter_exit(struct i40e_pf *pf) { @@ -8501,7 +8643,20 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) } } -#ifdef CONFIG_I40E_DCB +/** + * i40e_do_reset_safe - Protected reset path for userland calls. + * @pf: board private structure + * @reset_flags: which reset is requested + * + **/ +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) +{ + rtnl_lock(); + i40e_do_reset(pf, reset_flags, true); + rtnl_unlock(); +} + +#ifdef CONFIG_DCB /** * i40e_dcb_need_reconfig - Check if DCB needs reconfig * @pf: board private structure @@ -8573,6 +8728,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, int ret = 0; u8 type; + /* X710-T*L 2.5G and 5G speeds don't support DCB */ + if (hw->device_id == I40E_DEV_ID_10G_BASE_T_BC && + (hw->phy.link_info.link_speed & + ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) && + !(pf->flags & I40E_FLAG_DCB_CAPABLE)) + /* let firmware decide if the DCB should be disabled */ + pf->flags |= I40E_FLAG_DCB_CAPABLE; + /* Not DCB capable or capability disabled */ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return ret; @@ -8604,10 +8767,20 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, - "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* X710-T*L 2.5G and 5G speeds don't support DCB */ + if (hw->device_id == I40E_DEV_ID_10G_BASE_T_BC && + (hw->phy.link_info.link_speed & + (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) { + dev_warn(&pf->pdev->dev, + "DCB is not supported for X710-T*L 2.5/5G speeds\n"); + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + } else { + dev_info(&pf->pdev->dev, + "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + } goto exit; } @@ -8621,7 +8794,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); +#ifdef HAVE_DCBNL_IEEE i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); +#endif /* HAVE_DCBNL_IEEE */ if (!need_reconfig) goto exit; @@ -8654,28 +8829,15 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); - set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); - set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); + set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } exit: return ret; } -#endif /* CONFIG_I40E_DCB */ - -/** - * i40e_do_reset_safe - Protected reset path for userland calls. - * @pf: board private structure - * @reset_flags: which reset is requested - * - **/ -void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) -{ - rtnl_lock(); - i40e_do_reset(pf, reset_flags, true); - rtnl_unlock(); -} +#endif /* CONFIG_DCB */ /** * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event * @pf: board private structure @@ -8919,6 +9081,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) break; } while (flush_wait_retry--); + if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); } else { @@ -8947,7 +9110,6 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) * reacting will make sure we don't cause flush too often. */ #define I40E_MAX_FD_PROGRAM_ERROR 256 - /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure @@ -8973,7 +9135,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { - if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) + if (!vsi || (test_bit(__I40E_VSI_DOWN, vsi->state))) return; switch (vsi->type) { @@ -9076,9 +9238,11 @@ static void i40e_link_event(struct i40e_pf *pf) if (pf->vf) i40e_vc_notify_link_state(pf); +#ifdef HAVE_PTP_1588_CLOCK if (pf->flags & I40E_FLAG_PTP) i40e_ptp_set_increment(pf); +#endif /* HAVE_PTP_1588_CLOCK */ } /** @@ -9117,9 +9281,11 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) if (pf->veb[i]) i40e_update_veb_stats(pf->veb[i]); } +#ifdef HAVE_PTP_1588_CLOCK i40e_ptp_rx_hang(pf); i40e_ptp_tx_hang(pf); +#endif /* HAVE_PTP_1588_CLOCK */ } /** @@ -9130,26 +9296,16 @@ static void i40e_reset_subtask(struct i40e_pf *pf) { u32 reset_flags = 0; - if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { + if (test_and_clear_bit(__I40E_REINIT_REQUESTED, pf->state)) reset_flags |= BIT(__I40E_REINIT_REQUESTED); - clear_bit(__I40E_REINIT_REQUESTED, pf->state); - } - if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { + if (test_and_clear_bit(__I40E_PF_RESET_REQUESTED, pf->state)) reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); - clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); - } - if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { + if (test_and_clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); - clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); - } - if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { + if (test_and_clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); - clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); - } - if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { + if (test_and_clear_bit(__I40E_DOWN_REQUESTED, pf->state)) reset_flags |= BIT(__I40E_DOWN_REQUESTED); - clear_bit(__I40E_DOWN_REQUESTED, pf->state); - } /* If there's a recovery already waiting, it takes * precedence before starting a new reset sequence. @@ -9202,7 +9358,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf, (!(status->link_info & I40E_AQ_LINK_UP)) && (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { dev_err(&pf->pdev->dev, - "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); + "Rx/Tx is disabled on this device because an unsupported SFP+ module type was detected.\n"); dev_err(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); } @@ -9283,7 +9439,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) break; } - opcode = le16_to_cpu(event.desc.opcode); + opcode = LE16_TO_CPU(event.desc.opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: @@ -9299,11 +9455,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) break; case i40e_aqc_opc_lldp_update_mib: dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); -#ifdef CONFIG_I40E_DCB +#ifdef CONFIG_DCB rtnl_lock(); ret = i40e_handle_lldp_event(pf, &event); rtnl_unlock(); -#endif /* CONFIG_I40E_DCB */ +#endif /* CONFIG_DCB */ break; case i40e_aqc_opc_event_lan_overflow: dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); @@ -9448,6 +9604,7 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; +#ifdef HAVE_BRIDGE_ATTRIBS if (pf->hw.debug_mask & I40E_DEBUG_LAN) dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); @@ -9455,6 +9612,12 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb) i40e_disable_pf_switch_lb(pf); else i40e_enable_pf_switch_lb(pf); +#else + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + i40e_enable_pf_switch_lb(pf); + else + i40e_disable_pf_switch_lb(pf); +#endif } /** @@ -9504,10 +9667,12 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; +#ifdef HAVE_BRIDGE_ATTRIBS if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) veb->bridge_mode = BRIDGE_MODE_VEB; else veb->bridge_mode = BRIDGE_MODE_VEPA; +#endif i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ @@ -9547,6 +9712,7 @@ end_reconstitute: /** * i40e_get_capabilities - get info about the HW * @pf: the PF struct + * @list_type: admin queue opcode list type **/ static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type) @@ -9609,17 +9775,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf, pf->hw.dev_caps.num_tx_qp); } } - if (list_type == i40e_aqc_opc_list_func_capabilities) { -#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ - + pf->hw.func_caps.num_vfs) - if (pf->hw.revision_id == 0 && - pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { - dev_info(&pf->pdev->dev, - "got num_vsis %d, setting num_vsis to %d\n", - pf->hw.func_caps.num_vsis, DEF_NUM_VSI); - pf->hw.func_caps.num_vsis = DEF_NUM_VSI; - } - } + return 0; } @@ -9633,21 +9789,6 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) { struct i40e_vsi *vsi; - /* quick workaround for an NVM issue that leaves a critical register - * uninitialized - */ - if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { - static const u32 hkey[] = { - 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, - 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, - 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, - 0x95b3a76d}; - int i; - - for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) - wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); - } - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; @@ -9678,111 +9819,12 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) struct i40e_vsi *vsi; i40e_fdir_filter_exit(pf); + i40e_cloud_filter_exit(pf); vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); if (vsi) i40e_vsi_release(vsi); } -/** - * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs - * @vsi: PF main vsi - * @seid: seid of main or channel VSIs - * - * Rebuilds cloud filters associated with main VSI and channel VSIs if they - * existed before reset - **/ -static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) -{ - struct i40e_cloud_filter *cfilter; - struct i40e_pf *pf = vsi->back; - struct hlist_node *node; - i40e_status ret; - - /* Add cloud filters back if they exist */ - hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, - cloud_node) { - if (cfilter->seid != seid) - continue; - - if (cfilter->dst_port) - ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, - true); - else - ret = i40e_add_del_cloud_filter(vsi, cfilter, true); - - if (ret) { - dev_dbg(&pf->pdev->dev, - "Failed to rebuild cloud filter, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); - return ret; - } - } - return 0; -} - -/** - * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset - * @vsi: PF main vsi - * - * Rebuilds channel VSIs if they existed before reset - **/ -static int i40e_rebuild_channels(struct i40e_vsi *vsi) -{ - struct i40e_channel *ch, *ch_tmp; - i40e_status ret; - - if (list_empty(&vsi->ch_list)) - return 0; - - list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { - if (!ch->initialized) - break; - /* Proceed with creation of channel (VMDq2) VSI */ - ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "failed to rebuild channels using uplink_seid %u\n", - vsi->uplink_seid); - return ret; - } - /* Reconfigure TX queues using QTX_CTL register */ - ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "failed to configure TX rings for channel %u\n", - ch->seid); - return ret; - } - /* update 'next_base_queue' */ - vsi->next_base_queue = vsi->next_base_queue + - ch->num_queue_pairs; - if (ch->max_tx_rate) { - u64 credits = ch->max_tx_rate; - - if (i40e_set_bw_limit(vsi, ch->seid, - ch->max_tx_rate)) - return -EINVAL; - - do_div(credits, I40E_BW_CREDIT_DIVISOR); - dev_dbg(&vsi->back->pdev->dev, - "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", - ch->max_tx_rate, - credits, - ch->seid); - } - ret = i40e_rebuild_cloud_filters(vsi, ch->seid); - if (ret) { - dev_dbg(&vsi->back->pdev->dev, - "Failed to rebuild cloud filters for channel VSI %u\n", - ch->seid); - return ret; - } - } - return 0; -} - /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure @@ -9805,6 +9847,7 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + /* TODO: warn any registered clients */ /* quiesce the VSIs and their queues that are not already DOWN */ /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */ if (!lock_acquired) @@ -9828,10 +9871,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) "shutdown_lan_hmc failed: %d\n", ret); } +#ifdef HAVE_PTP_1588_CLOCK /* Save the current PTP time so that we can restore the time after the * reset completes. */ i40e_ptp_save_hw_time(pf); +#endif /* HAVE_PTP_1588_CLOCK */ } /** @@ -9934,7 +9979,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && i40e_check_recovery_mode(pf)) { +#ifdef SIOCETHTOOL i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); +#endif } if (test_bit(__I40E_DOWN, pf->state) && @@ -9965,8 +10012,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { i40e_verify_eeprom(pf); + } /* if we are going out of or into recovery mode we have to act * accordingly with regard to resources initialization @@ -10026,14 +10074,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) /* Enable FW to write a default DCB config on link-up */ i40e_aq_set_dcb_parameters(hw, true, NULL); -#ifdef CONFIG_I40E_DCB +#ifdef CONFIG_DCB ret = i40e_init_pf_dcb(pf); if (ret) { dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); pf->flags &= ~I40E_FLAG_DCB_CAPABLE; /* Continue without DCB enabled */ } -#endif /* CONFIG_I40E_DCB */ + +#endif /* CONFIG_DCB */ /* do basic switch setup */ if (!lock_acquired) rtnl_lock(); @@ -10113,34 +10162,33 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } } +#ifdef __TC_MQPRIO_MODE_MAX if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; - u64 credits = 0; + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / (1000000 / 8); - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); + if (!ret) + dev_dbg(&vsi->back->pdev->dev, + "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", + max_tx_rate, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, + vsi->seid); + else + goto end_unlock; + } +#endif + +#ifdef __TC_MQPRIO_MODE_MAX + /* Not going to support channel VSI in L4 cloud filter mode */ + if (!i40e_is_l4mode_enabled()) { + /* PF Main VSI is rebuild by now, go ahead and + * rebuild channel VSIs for this main VSI if they exist + */ + ret = i40e_rebuild_channels(vsi); if (ret) goto end_unlock; - - credits = max_tx_rate; - do_div(credits, I40E_BW_CREDIT_DIVISOR); - dev_dbg(&vsi->back->pdev->dev, - "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", - max_tx_rate, - credits, - vsi->seid); } - - ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); - if (ret) - goto end_unlock; - - /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs - * for this main VSI if they exist - */ - ret = i40e_rebuild_channels(vsi); - if (ret) - goto end_unlock; +#endif /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing @@ -10193,9 +10241,49 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) pf->cur_promisc ? "on" : "off", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* Restore all VF-d config */ + + /* If uncommitted changes were made to the BW share settings, then warn + * the user that the configuration may not be restored correctly. + */ + if (!pf->vf_bw_applied) + dev_info(&pf->pdev->dev, "VF BW shares not restored\n"); + + if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->ingress_vlan)) { + u16 rule_type; + + /* The Admin Queue mirroring rules refer to the traffic + * directions from the perspective of the switch, not the VSI + * we apply the mirroring rule on - so the behaviour of a VSI + * ingress mirror is classified as an egress rule + */ + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS; + ret = i40e_restore_ingress_egress_mirror(vsi, pf->ingress_vlan, + rule_type, + &pf->ingress_rule_id); + if (ret) + pf->ingress_vlan = I40E_NO_VF_MIRROR; + } + + if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->egress_vlan)) { + u16 rule_type; + + /* The Admin Queue mirroring rules refer to the traffic + * directions from the perspective of the switch, not the VSI + * we apply the mirroring rule on - so the behaviour of a VSI + * egress mirror is classified as an ingress rule + */ + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + ret = i40e_restore_ingress_egress_mirror(vsi, pf->egress_vlan, + rule_type, + &pf->egress_rule_id); + if (ret) + pf->egress_vlan = I40E_NO_VF_MIRROR; + } i40e_reset_all_vfs(pf, true); + /* TODO: restart clients */ /* tell the firmware that we're starting */ i40e_send_version(pf); @@ -10345,6 +10433,8 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } +#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD) +#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD) static const char *i40e_tunnel_name(u8 type) { switch (type) { @@ -10427,7 +10517,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) filter_index, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + pf->hw.aq.asq_last_status)); if (port) { /* failed to add, just reset port, * drop pending bit for any deletion @@ -10445,6 +10535,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) rtnl_unlock(); } +#endif /* HAVE_UDP_ENC_TUNNEL || HAVE_UDP_ENC_RX_OFFLOAD */ +#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */ + /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data @@ -10458,8 +10551,9 @@ static void i40e_service_task(struct work_struct *work) /* don't bother with service tasks if a reset is in progress */ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || - test_bit(__I40E_SUSPENDED, pf->state)) + test_bit(__I40E_SUSPENDED, pf->state)) { return; + } if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) return; @@ -10474,17 +10568,21 @@ static void i40e_service_task(struct work_struct *work) i40e_fdir_reinit_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { /* Client subtask will reopen next time through. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], - true); + i40e_notify_client_of_netdev_close( + pf->vsi[pf->lan_vsi], true); } else { i40e_client_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, pf->state)) i40e_notify_client_of_l2_param_changes( - pf->vsi[pf->lan_vsi]); + pf->vsi[pf->lan_vsi]); } - i40e_sync_filters_subtask(pf); +#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD) +#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD) i40e_sync_udp_filters_subtask(pf); + +#endif +#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */ } else { i40e_reset_subtask(pf); } @@ -10508,7 +10606,7 @@ static void i40e_service_task(struct work_struct *work) /** * i40e_service_timer - timer callback - * @data: pointer to PF struct + * @t: pointer to timer_list struct **/ static void i40e_service_timer(struct timer_list *t) { @@ -10536,8 +10634,9 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { vsi->num_q_vectors = pf->num_lan_msix; + } else vsi->num_q_vectors = 1; @@ -10598,6 +10697,7 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) /* allocate memory for both Tx, XDP Tx and Rx ring pointers */ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 3 : 2); + vsi->tx_rings = kzalloc(size, GFP_KERNEL); if (!vsi->tx_rings) return -ENOMEM; @@ -10632,7 +10732,7 @@ err_vectors: * On error: returns error code (negative) * On success: returns vsi index in PF (positive) **/ -static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) +int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) { int ret = -ENODEV; struct i40e_vsi *vsi; @@ -10683,16 +10783,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) hash_init(vsi->mac_filter_hash); vsi->irqs_ready = false; - if (type == I40E_VSI_MAIN) { - vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); - if (!vsi->af_xdp_zc_qps) - goto err_rings; - } - ret = i40e_set_num_rings_in_vsi(vsi); if (ret) goto err_rings; + vsi->block_tx_timeout = false; + ret = i40e_vsi_alloc_arrays(vsi, true); if (ret) goto err_rings; @@ -10704,10 +10800,10 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) spin_lock_init(&vsi->mac_filter_hash_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; + goto unlock_pf; err_rings: - bitmap_free(vsi->af_xdp_zc_qps); pf->next_vsi = i - 1; kfree(vsi); unlock_pf: @@ -10736,23 +10832,6 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) vsi->xdp_rings = NULL; } -/** - * i40e_clear_rss_config_user - clear the user configured RSS hash keys - * and lookup table - * @vsi: Pointer to VSI structure - */ -static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) -{ - if (!vsi) - return; - - kfree(vsi->rss_hkey_user); - vsi->rss_hkey_user = NULL; - - kfree(vsi->rss_lut_user); - vsi->rss_lut_user = NULL; -} - /** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured @@ -10788,7 +10867,6 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); - bitmap_free(vsi->af_xdp_zc_qps); i40e_vsi_free_arrays(vsi, true); i40e_clear_rss_config_user(vsi); @@ -10849,6 +10927,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; + if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; @@ -10892,25 +10971,28 @@ err_out: i40e_vsi_clear_rings(vsi); return -ENOMEM; } +#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT) /** * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel * @pf: board private structure - * @vectors: the number of MSI-X vectors to request + * @v_budget: the number of MSI-X vectors to request * * Returns the number of vectors reserved, or error **/ -static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) +static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int v_budget) { - vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, - I40E_MIN_MSIX, vectors); - if (vectors < 0) { - dev_info(&pf->pdev->dev, - "MSI-X vector reservation failed: %d\n", vectors); - vectors = 0; - } + int v_actual = 0; - return vectors; + v_actual = pci_enable_msix_range(pf->pdev, + pf->msix_entries, + I40E_MIN_MSIX, + v_budget); + if (v_actual < 0) + dev_info(&pf->pdev->dev, + "MSI-X vector reservation failed: %d\n", v_actual); + + return v_actual; } /** @@ -10936,6 +11018,7 @@ static int i40e_init_msix(struct i40e_pf *pf) /* The number of vectors we'll request will be comprised of: * - Add 1 for "other" cause for Admin Queue events, etc. * - The number of LAN queue pairs + * - This takes into account queues for each TC in DCB mode. * - Queues being used for RSS. * We don't need as many as max_rss_size vectors. * use rss_size instead in the calculation since that @@ -11002,11 +11085,11 @@ static int i40e_init_msix(struct i40e_pf *pf) int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); - /* if we're short on vectors for what's desired, we limit - * the queues per vmdq. If this is still more than are - * available, the user will need to change the number of - * queues/vectors used by the PF later with the ethtool - * channels command + /* if we're short on vectors for what's desired, we + * limit the queues per vmdq. If this is still more + * than are available, the user will need to change + * the number of queues/vectors used by the PF later + * with the ethtool channels command */ if (vectors_left < vmdq_vecs_wanted) { pf->num_vmdq_qps = 1; @@ -11047,22 +11130,22 @@ static int i40e_init_msix(struct i40e_pf *pf) for (i = 0; i < v_budget; i++) pf->msix_entries[i].entry = i; v_actual = i40e_reserve_msix_vectors(pf, v_budget); - if (v_actual < I40E_MIN_MSIX) { pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; pci_disable_msix(pf->pdev); return -ENODEV; + } - } else if (v_actual == I40E_MIN_MSIX) { + if (v_actual == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; pf->num_lan_qps = 1; pf->num_lan_msix = 1; - } else if (v_actual != v_budget) { + } else if (!vectors_left) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable @@ -11071,8 +11154,7 @@ static int i40e_init_msix(struct i40e_pf *pf) int vec; dev_info(&pf->pdev->dev, - "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", - v_actual, v_budget); + "MSI-X vector limit reached, attempting to redistribute vectors\n"); /* reserve the misc vector */ vec = v_actual - 1; @@ -11090,8 +11172,6 @@ static int i40e_init_msix(struct i40e_pf *pf) if (pf->flags & I40E_FLAG_IWARP_ENABLED) { pf->num_lan_msix = 1; pf->num_iwarp_msix = 1; - } else { - pf->num_lan_msix = 2; } break; default: @@ -11111,7 +11191,6 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_lan_msix = min_t(int, (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), pf->num_lan_msix); - pf->num_lan_qps = pf->num_lan_msix; break; } } @@ -11133,25 +11212,18 @@ static int i40e_init_msix(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_IWARP_ENABLED; } - i40e_debug(&pf->hw, I40E_DEBUG_INIT, - "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", - pf->num_lan_msix, - pf->num_vmdq_msix * pf->num_vmdq_vsis, - pf->num_fdsb_msix, - pf->num_iwarp_msix); - return v_actual; } +#endif /** * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * @vsi: the VSI being configured * @v_idx: index of the vector in the vsi struct - * @cpu: cpu to be used on affinity_mask * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ -static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector; @@ -11162,8 +11234,9 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) q_vector->vsi = vsi; q_vector->v_idx = v_idx; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - +#endif if (vsi->netdev) netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll, NAPI_POLL_WEIGHT); @@ -11184,7 +11257,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - int err, v_idx, num_q_vectors, current_cpu; + int v_idx, num_q_vectors; + int err; /* if not MSIX, give the one vector only to the LAN VSI */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -11194,15 +11268,10 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) else return -EINVAL; - current_cpu = cpumask_first(cpu_online_mask); - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu); + err = i40e_vsi_alloc_q_vector(vsi, v_idx); if (err) goto err_out; - current_cpu = cpumask_next(current_cpu, cpu_online_mask); - if (unlikely(current_cpu >= nr_cpu_ids)) - current_cpu = cpumask_first(cpu_online_mask); } return 0; @@ -11224,7 +11293,11 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) ssize_t size; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { +#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT) vectors = i40e_init_msix(pf); +#else + vectors = -1; +#endif if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_IWARP_ENABLED | @@ -11245,7 +11318,11 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); +#ifndef I40E_LEGACY_INTERRUPT vectors = pci_enable_msi(pf->pdev); +#else + vectors = -1; +#endif if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", vectors); @@ -11260,70 +11337,19 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) /* set up vector assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); pf->irq_pile = kzalloc(size, GFP_KERNEL); - if (!pf->irq_pile) + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); return -ENOMEM; - + } pf->irq_pile->num_entries = vectors; pf->irq_pile->search_hint = 0; /* track first vector for misc interrupts, ignore return */ - (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); return 0; } -/** - * i40e_restore_interrupt_scheme - Restore the interrupt scheme - * @pf: private board data structure - * - * Restore the interrupt scheme that was cleared when we suspended the - * device. This should be called during resume to re-allocate the q_vectors - * and reacquire IRQs. - */ -static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) -{ - int err, i; - - /* We cleared the MSI and MSI-X flags when disabling the old interrupt - * scheme. We need to re-enabled them here in order to attempt to - * re-acquire the MSI or MSI-X vectors - */ - pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); - - err = i40e_init_interrupt_scheme(pf); - if (err) - return err; - - /* Now that we've re-acquired IRQs, we need to remap the vectors and - * rings together again. - */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i]) { - err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); - if (err) - goto err_unwind; - i40e_vsi_map_rings_to_vectors(pf->vsi[i]); - } - } - - err = i40e_setup_misc_vector(pf); - if (err) - goto err_unwind; - - if (pf->flags & I40E_FLAG_IWARP_ENABLED) - i40e_client_update_msix_info(pf); - - return 0; - -err_unwind: - while (i--) { - if (pf->vsi[i]) - i40e_vsi_free_q_vectors(pf->vsi[i]); - } - - return err; -} - /** * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle * non queue events in recovery mode @@ -11396,7 +11422,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) /* associate no queues to the misc vector */ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); - wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1); i40e_flush(hw); @@ -11405,6 +11431,58 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) return err; } +/** + * i40e_restore_interrupt_scheme - Restore the interrupt scheme + * @pf: private board data structure + * + * Restore the interrupt scheme that was cleared when we suspended the + * device. This should be called during resume to re-allocate the q_vectors + * and reacquire IRQs. + */ +static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) +{ + int err, i; + + /* We cleared the MSI and MSI-X flags when disabling the old interrupt + * scheme. We need to re-enabled them here in order to attempt to + * re-acquire the MSI or MSI-X vectors + */ + pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + + err = i40e_init_interrupt_scheme(pf); + if (err) + return err; + + /* Now that we've re-acquired IRQs, we need to remap the vectors and + * rings together again. + */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i]) { + err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); + if (err) + goto err_unwind; + i40e_vsi_map_rings_to_vectors(pf->vsi[i]); + } + } + + err = i40e_setup_misc_vector(pf); + if (err) + goto err_unwind; + + if (pf->flags & I40E_FLAG_IWARP_ENABLED) + i40e_client_update_msix_info(pf); + + return 0; + +err_unwind: + while (i--) { + if (pf->vsi[i]) + i40e_vsi_free_q_vectors(pf->vsi[i]); + } + + return err; +} + /** * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands * @vsi: Pointer to vsi structure @@ -11447,7 +11525,6 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, return ret; } } - return ret; } @@ -11471,7 +11548,6 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, /* Fill out hash function seed */ if (seed) { u32 *seed_dw = (u32 *)seed; - if (vsi->type == I40E_VSI_MAIN) { for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); @@ -11589,8 +11665,9 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, { u16 i; - for (i = 0; i < rss_table_size; i++) + for (i = 0; i < rss_table_size; i++) { lut[i] = i % rss_size; + } } /** @@ -11601,10 +11678,10 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 seed[I40E_HKEY_ARRAY_SIZE]; - u8 *lut; struct i40e_hw *hw = &pf->hw; u32 reg_val; u64 hena; + u8 *lut; int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ @@ -11660,6 +11737,23 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) return ret; } +/** + * i40e_clear_rss_config_user - clear the user configured RSS hash keys + * and lookup table + * @vsi: Pointer to VSI structure + */ +static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) +{ + if (!vsi) + return; + + kfree(vsi->rss_hkey_user); + vsi->rss_hkey_user = NULL; + + kfree(vsi->rss_lut_user); + vsi->rss_lut_user = NULL; +} + /** * i40e_reconfig_rss_queues - change number of queues for rss and rebuild * @pf: board private structure @@ -11677,7 +11771,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; - queue_count = min_t(int, queue_count, num_online_cpus()); new_rss_size = min_t(int, queue_count, pf->rss_size_max); if (queue_count != vsi->num_queue_pairs) { @@ -11721,7 +11814,7 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) u32 max_bw, min_bw; status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, - &min_valid, &max_valid); + &min_valid, &max_valid); if (!status) { if (min_valid) @@ -11836,6 +11929,60 @@ bw_commit_out: return ret; } +/** + * i40e_is_total_port_shutdown_enabled - read nvm and return value + * if total port shutdown feature is enabled for this pf + * @pf: board private structure + **/ +static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) +{ +#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4) +#define I40E_FEATURES_ENABLE_PTR 0x2A +#define I40E_CURRENT_SETTING_PTR 0x2B +#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D +#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 +#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) +#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 + i40e_status read_status = I40E_SUCCESS; + u16 sr_emp_sr_settings_ptr = 0; + u16 features_enable = 0; + u16 link_behavior = 0; + bool ret = false; + + read_status = i40e_read_nvm_word(&pf->hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + &sr_emp_sr_settings_ptr); + if (read_status) + goto err_nvm; + read_status = i40e_read_nvm_word(&pf->hw, + sr_emp_sr_settings_ptr + + I40E_FEATURES_ENABLE_PTR, + &features_enable); + if (read_status) + goto err_nvm; + if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) { + read_status = + i40e_read_nvm_module_data(&pf->hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + I40E_CURRENT_SETTING_PTR, + I40E_LINK_BEHAVIOR_WORD_OFFSET, + I40E_LINK_BEHAVIOR_WORD_LENGTH, + &link_behavior); + if (read_status) + goto err_nvm; + link_behavior >>= + (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); + ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior; + } + return ret; + +err_nvm: + dev_warn(&pf->pdev->dev, + "Total Port Shutdown feature is off due to read nvm error:%d\n", + read_status); + return ret; +} + /** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize @@ -11848,6 +11995,16 @@ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; + u16 pow; + + pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, + (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); + if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { + if (I40E_DEBUG_USER & debug) + pf->hw.debug_mask = debug; + pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), + I40E_DEFAULT_MSG_ENABLE); + } /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | @@ -11857,7 +12014,6 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default ITR */ pf->rx_itr_default = I40E_ITR_RX_DEF; pf->tx_itr_default = I40E_ITR_TX_DEF; - /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ @@ -11866,15 +12022,20 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); + + /* find the next higher power-of-2 of num cpus */ + pow = roundup_pow_of_two(num_online_cpus()); + pf->rss_size_max = min_t(int, pf->rss_size_max, pow); + if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->alloc_rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); } - /* MFP mode enabled */ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; + dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_partition_bw_setting(pf)) { dev_warn(&pf->pdev->dev, @@ -11920,9 +12081,8 @@ static int i40e_sw_init(struct i40e_pf *pf) #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != - I40E_FDEVICT_PCTYPE_DEFAULT) { - dev_warn(&pf->pdev->dev, - "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); + I40E_FDEVICT_PCTYPE_DEFAULT) { + dev_warn(&pf->pdev->dev, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; } } else if ((pf->hw.aq.api_maj_ver > 1) || @@ -11930,6 +12090,9 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; + + /* supports mpls header skip and csum for following headers */ + pf->hw_features |= I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE; } /* Enable HW ATR eviction if possible */ @@ -11957,8 +12120,8 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; /* Enable PTP L4 if FW > v6.0 */ - if (pf->hw.mac.type == I40E_MAC_XL710 && - pf->hw.aq.fw_maj_ver >= 6) + if ((pf->hw.mac.type == I40E_MAC_XL710) && + (pf->hw.aq.fw_maj_ver >= 6)) pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { @@ -11982,13 +12145,37 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; +#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC + /* force legacy Rx if SKIP_CPU_SYNC is not supported */ + pf->flags |= I40E_FLAG_LEGACY_RX; +#endif #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { - pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; +#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE) + pf->num_req_vfs = 0; + if (max_vfs[pf->instance] > 0 && + max_vfs[pf->instance] <= pf->hw.func_caps.num_vfs) { + pf->flags |= I40E_FLAG_SRIOV_ENABLED; + /* assign number of SR-IOV VFs */ + pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; + pf->num_req_vfs = max_vfs[pf->instance]; + } else if (max_vfs[pf->instance] == 0) { + dev_info(&pf->pdev->dev, + " SR-IOV is disabled, Module Parameter max_vfs value %d = disabled\n", + max_vfs[pf->instance]); + } else if (max_vfs[pf->instance] != -1) { + dev_err(&pf->pdev->dev, + "Module Parameter max_vfs value %d is out of range. Maximum value for the device: %d - resetting to zero\n", + max_vfs[pf->instance], + pf->hw.func_caps.num_vfs); + } +#else pf->flags |= I40E_FLAG_SRIOV_ENABLED; + pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); +#endif /* HAVE_SRIOV_CONFIGURE */ } #endif /* CONFIG_PCI_IOV */ pf->eeprom_version = 0xDEAD; @@ -12011,12 +12198,24 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->tx_timeout_recovery_level = 1; + if (pf->hw.mac.type != I40E_MAC_X722 && + i40e_is_total_port_shutdown_enabled(pf)) { + /* Link down on close must be on when total port shutdown + * is enabled for a given port + */ + pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN + | I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED); + dev_info(&pf->pdev->dev, + "Total Port Shutdown is enabled, link-down-on-close forced on\n"); + } mutex_init(&pf->switch_mutex); sw_init_done: return err; } +#ifdef HAVE_NDO_SET_FEATURES +#endif /** * i40e_set_ntuple - set the ntuple feature flag and take action * @pf: board private structure to initialize @@ -12047,14 +12246,13 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { need_reset = true; i40e_fdir_filter_exit(pf); + i40e_cloud_filter_exit(pf); } pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; - /* reset fd counters */ - pf->fd_add_err = 0; - pf->fd_atr_cnt = 0; + pf->fd_add_err = pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && @@ -12064,6 +12262,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) return need_reset; } +#ifdef HAVE_NDO_SET_FEATURES /** * i40e_clear_rss_lut - clear the rx hash lookup table * @vsi: the VSI being configured @@ -12092,8 +12291,12 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi) * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int i40e_set_features(struct net_device *netdev, u32 features) +#else static int i40e_set_features(struct net_device *netdev, netdev_features_t features) +#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -12106,20 +12309,22 @@ static int i40e_set_features(struct net_device *netdev, netdev->features & NETIF_F_RXHASH) i40e_clear_rss_lut(vsi); +#ifdef NETIF_F_HW_VLAN_CTAG_RX if (features & NETIF_F_HW_VLAN_CTAG_RX) +#else + if (features & NETIF_F_HW_VLAN_RX) +#endif i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); +#ifdef NETIF_F_HW_TC if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) { dev_err(&pf->pdev->dev, "Offloaded tc filters active, can't turn hw_tc_offload off"); return -EINVAL; } - - if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) - i40e_del_all_macvlans(vsi); - +#endif need_reset = i40e_set_ntuple(pf, features); if (need_reset) @@ -12128,6 +12333,7 @@ static int i40e_set_features(struct net_device *netdev, return 0; } +#endif /* HAVE_NDO_SET_FEATURES */ /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure @@ -12157,6 +12363,7 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) * @netdev: This physical port's netdev * @ti: Tunnel endpoint information **/ +__maybe_unused static void i40e_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) { @@ -12209,6 +12416,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, * @netdev: This physical port's netdev * @ti: Tunnel endpoint information **/ +__maybe_unused static void i40e_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) { @@ -12230,6 +12438,8 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, goto not_found; break; case UDP_TUNNEL_TYPE_GENEVE: + if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)) + return; if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) goto not_found; break; @@ -12255,6 +12465,86 @@ not_found: port); } +#if defined(HAVE_VXLAN_RX_OFFLOAD) && !defined(HAVE_UDP_ENC_RX_OFFLOAD) +#if IS_ENABLED(CONFIG_VXLAN) +/** + * i40e_add_vxlan_port - Get notifications about vxlan ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that vxlan is notifying us about + * @port: New UDP port number that vxlan started listening to + **/ +static void i40e_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct udp_tunnel_info ti = { + .type = UDP_TUNNEL_TYPE_VXLAN, + .sa_family = sa_family, + .port = port, + }; + + i40e_udp_tunnel_add(netdev, &ti); +} + +/** + * i40e_del_vxlan_port - Get notifications about vxlan ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that vxlan is notifying us about + * @port: UDP port number that vxlan stopped listening to + **/ +static void i40e_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct udp_tunnel_info ti = { + .type = UDP_TUNNEL_TYPE_VXLAN, + .sa_family = sa_family, + .port = port, + }; + + i40e_udp_tunnel_del(netdev, &ti); +} +#endif /* CONFIG_VXLAN */ +#endif /* HAVE_VXLAN_RX_OFFLOAD && !HAVE_UDP_ENC_RX_OFFLOAD */ +#if defined(HAVE_GENEVE_RX_OFFLOAD) && !defined(HAVE_UDP_ENC_RX_OFFLOAD) +#if IS_ENABLED(CONFIG_GENEVE) +/** + * i40e_add_geneve_port - Get notifications about GENEVE ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: New UDP port number that GENEVE started listening to + **/ +static void i40e_add_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct udp_tunnel_info ti = { + .type = UDP_TUNNEL_TYPE_GENEVE, + .sa_family = sa_family, + .port = port, + }; + + i40e_udp_tunnel_add(netdev, &ti); +} + +/* + * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: UDP port number that GENEVE stopped listening to + **/ +static void i40e_del_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct udp_tunnel_info ti = { + .type = UDP_TUNNEL_TYPE_GENEVE, + .sa_family = sa_family, + .port = port, + }; + + i40e_udp_tunnel_del(netdev, &ti); +} + +#endif /* CONFIG_GENEVE */ +#endif /* HAVE_GENEVE_RX_OFFLOAD && !HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_NDO_GET_PHYS_PORT_ID static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { @@ -12265,12 +12555,14 @@ static int i40e_get_phys_port_id(struct net_device *netdev, if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) return -EOPNOTSUPP; - ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); + ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), + sizeof(ppid->id)); memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); return 0; } +#endif /* HAVE_NDO_GET_PHYS_PORT_ID */ /** * i40e_ndo_fdb_add - add an entry to the hardware database * @ndm: the input from the stack @@ -12279,12 +12571,28 @@ static int i40e_get_phys_port_id(struct net_device *netdev, * @addr: the MAC address entry being added * @vid: VLAN ID * @flags: instructions from stack about fdb operation + * @extack: netdev extended ack structure */ +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_EXTACK) static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid, - u16 flags, - struct netlink_ext_ack *extack) + struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags, struct netlink_ext_ack *extack) +#elif defined(HAVE_NDO_FDB_ADD_VID) +static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags) +#elif defined(HAVE_NDO_FDB_ADD_NLATTR) +static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#elif defined(USE_CONST_DEV_UC_CHAR) +static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags) +#else +static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; @@ -12293,11 +12601,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) return -EOPNOTSUPP; - if (vid) { - pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); - return -EINVAL; - } - /* Hardware does not support aging addresses so if a * ndm_state is given only allow permanent addresses */ @@ -12320,125 +12623,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -/** - * i40e_ndo_bridge_setlink - Set the hardware bridge mode - * @dev: the netdev being configured - * @nlh: RTNL message - * @flags: bridge flags - * @extack: netlink extended ack - * - * Inserts a new hardware bridge if not already created and - * enables the bridging mode requested (VEB or VEPA). If the - * hardware bridge has already been inserted and the request - * is to change the mode then that requires a PF reset to - * allow rebuild of the components with required hardware - * bridge mode enabled. - * - * Note: expects to be called while under rtnl_lock() - **/ -static int i40e_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, - u16 flags, - struct netlink_ext_ack *extack) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = NULL; - struct nlattr *attr, *br_spec; - int i, rem; - - /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) - return -EOPNOTSUPP; - - /* Find the HW bridge for PF VSI */ - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } - - br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); - - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; - - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - - mode = nla_get_u16(attr); - if ((mode != BRIDGE_MODE_VEPA) && - (mode != BRIDGE_MODE_VEB)) - return -EINVAL; - - /* Insert a new HW bridge */ - if (!veb) { - veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, - vsi->tc_config.enabled_tc); - if (veb) { - veb->bridge_mode = mode; - i40e_config_bridge_mode(veb); - } else { - /* No Bridge HW offload available */ - return -ENOENT; - } - break; - } else if (mode != veb->bridge_mode) { - /* Existing HW bridge but different mode needs reset */ - veb->bridge_mode = mode; - /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ - if (mode == BRIDGE_MODE_VEB) - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - else - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); - break; - } - } - - return 0; -} - -/** - * i40e_ndo_bridge_getlink - Get the hardware bridge mode - * @skb: skb buff - * @pid: process id - * @seq: RTNL message seq # - * @dev: the netdev being configured - * @filter_mask: unused - * @nlflags: netlink flags passed in - * - * Return the mode in which the hardware bridge is operating in - * i.e VEB or VEPA. - **/ -static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, - u32 __always_unused filter_mask, - int nlflags) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = NULL; - int i; - - /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) - return -EOPNOTSUPP; - - /* Find the HW bridge for the PF VSI */ - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } - - if (!veb) - return 0; - - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, - 0, 0, nlflags, filter_mask, NULL); -} - +#ifdef HAVE_NDO_FEATURES_CHECK /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff @@ -12497,56 +12682,7 @@ out_err: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } -/** - * i40e_xdp_setup - add/remove an XDP program - * @vsi: VSI to changed - * @prog: XDP program - **/ -static int i40e_xdp_setup(struct i40e_vsi *vsi, - struct bpf_prog *prog) -{ - int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - struct i40e_pf *pf = vsi->back; - struct bpf_prog *old_prog; - bool need_reset; - int i; - - /* Don't allow frames that span over multiple buffers */ - if (frame_size > vsi->rx_buf_len) - return -EINVAL; - - if (!i40e_enabled_xdp_vsi(vsi) && !prog) - return 0; - - /* When turning XDP on->off/off->on we reset and rebuild the rings. */ - need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); - - if (need_reset) - i40e_prep_for_reset(pf, true); - - old_prog = xchg(&vsi->xdp_prog, prog); - - if (need_reset) - i40e_reset_and_rebuild(pf, true, true); - - for (i = 0; i < vsi->num_queue_pairs; i++) - WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); - - if (old_prog) - bpf_prog_put(old_prog); - - /* Kick start the NAPI context if there is an AF_XDP socket open - * on that queue id. This so that receiving will start. - */ - if (need_reset && prog) - for (i = 0; i < vsi->num_queue_pairs; i++) - if (vsi->xdp_rings[i]->xsk_umem) - (void)i40e_xsk_wakeup(vsi->netdev, i, - XDP_WAKEUP_RX); - - return 0; -} - +#ifdef HAVE_XDP_SUPPORT /** * i40e_enter_busy_conf - Enters busy config state * @vsi: vsi @@ -12604,13 +12740,8 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) { i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); - if (i40e_enabled_xdp_vsi(vsi)) { - /* Make sure that in-progress ndo_xdp_xmit calls are - * completed. - */ - synchronize_rcu(); + if (i40e_enabled_xdp_vsi(vsi)) i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); - } i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); } @@ -12803,12 +12934,58 @@ int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) } /** - * i40e_xdp - implements ndo_bpf for i40e + * i40e_xdp_setup - add/remove an XDP program + * @vsi: VSI to changed + * @prog: XDP program + **/ +static int i40e_xdp_setup(struct i40e_vsi *vsi, + struct bpf_prog *prog) +{ + int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct i40e_pf *pf = vsi->back; + struct bpf_prog *old_prog; + bool need_reset; + int i; + + /* Don't allow frames that span over multiple buffers */ + if (frame_size > vsi->rx_buf_len) + return -EINVAL; + + if (!i40e_enabled_xdp_vsi(vsi) && !prog) + return 0; + + /* When turning XDP on->off/off->on we reset and rebuild the rings. */ + need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); + + if (need_reset) + i40e_prep_for_reset(pf, true); + + old_prog = xchg(&vsi->xdp_prog, prog); + + if (need_reset) + i40e_reset_and_rebuild(pf, true, true); + + for (i = 0; i < vsi->num_queue_pairs; i++) + WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +/** + * i40e_xdp - implements ndo_xdp for i40e * @dev: netdevice * @xdp: XDP command **/ +#ifdef HAVE_NDO_BPF static int i40e_xdp(struct net_device *dev, struct netdev_bpf *xdp) +#else +static int i40e_xdp(struct net_device *dev, + struct netdev_xdp *xdp) +#endif { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -12820,55 +12997,400 @@ static int i40e_xdp(struct net_device *dev, case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: +#ifndef NO_NETDEV_BPF_PROG_ATTACHED + xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); +#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */ xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; - case XDP_SETUP_XSK_UMEM: - return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, - xdp->xsk.queue_id); default: return -EINVAL; } } +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifndef USE_DEFAULT_FDB_DEL_DUMP +#if defined(HAVE_NDO_FDB_ADD_VID) +static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 vid) +#elif defined(HAVE_FDB_DEL_NLATTR) +static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#elif defined(USE_CONST_DEV_UC_CHAR) +static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#else +static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif /* HAVE_NDO_FDB_ADD_VID */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_pf *pf = np->vsi->back; + int err = -EOPNOTSUPP; + if (ndm->ndm_state & NUD_PERMANENT) { + netdev_info(dev, "FDB only supports static addresses\n"); + return -EINVAL; + } + + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + if (is_unicast_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + } + + return err; +} + +static int i40e_ndo_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_pf *pf = np->vsi->back; + + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) + idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); + + return idx; +} + +#endif /* USE_DEFAULT_FDB_DEL_DUMP */ +#ifdef HAVE_BRIDGE_ATTRIBS +/** + * i40e_ndo_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * @flags: bridge flags + * @extack: netdev extended ack structure + * + * Inserts a new hardware bridge if not already created and + * enables the bridging mode requested (VEB or VEPA). If the + * hardware bridge has already been inserted and the request + * is to change the mode then that requires a PF reset to + * allow rebuild of the components with required hardware + * bridge mode enabled. + * + * Note: expects to be called while under rtnl_lock() + **/ +#if defined(HAVE_NDO_BRIDGE_SETLINK_EXTACK) +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags, + struct netlink_ext_ack *extack) +#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS) +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags) +#else +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + struct nlattr *attr, *br_spec; + int i, rem; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if ((mode != BRIDGE_MODE_VEPA) && + (mode != BRIDGE_MODE_VEB)) + return -EINVAL; + + /* Insert a new HW bridge */ + if (!veb) { + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + veb->bridge_mode = mode; + i40e_config_bridge_mode(veb); + } else { + /* No Bridge HW offload available */ + return -ENOENT; + } + break; + } else if (mode != veb->bridge_mode) { + /* Existing HW bridge but different mode needs reset */ + veb->bridge_mode = mode; + if (mode == BRIDGE_MODE_VEB) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + break; + } + } + + return 0; +} + +/** + * i40e_ndo_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq # + * @dev: the netdev being configured + * @filter_mask: unused + * @nlflags: netlink flags passed in + * + * Return the mode in which the hardware bridge is operating in + * i.e VEB or VEPA. + **/ +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask, + int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* NDO_BRIDGE_STUFF */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + int i; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for the PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + if (!veb) + return 0; + +#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, + 0, 0, nlflags, filter_mask, NULL); +#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, + 0, 0, nlflags); +#elif defined(HAVE_NDO_FDB_ADD_VID) || \ + defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, + 0, 0); +#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); +#endif /* HAVE_NDO_BRIDGE_XX */ +} +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ + +#ifdef HAVE_NET_DEVICE_OPS static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, .ndo_start_xmit = i40e_lan_xmit_frame, +#if defined(HAVE_NDO_GET_STATS64) || defined(HAVE_VOID_NDO_GET_STATS64) .ndo_get_stats64 = i40e_get_netdev_stats_struct, +#else + .ndo_get_stats = i40e_get_netdev_stats_struct, +#endif .ndo_set_rx_mode = i40e_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = i40e_change_mtu, +#else .ndo_change_mtu = i40e_change_mtu, +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ +#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL) .ndo_do_ioctl = i40e_ioctl, +#endif .ndo_tx_timeout = i40e_tx_timeout, +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = i40e_vlan_rx_register, +#endif .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, #endif +#ifdef HAVE_SETUP_TC +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC + .extended.ndo_setup_tc_rh = __i40e_setup_tc, +#else +#ifdef NETIF_F_HW_TC .ndo_setup_tc = __i40e_setup_tc, - .ndo_set_features = i40e_set_features, +#else + .ndo_setup_tc = i40e_setup_tc, +#endif /* NETIF_F_HW_TC */ +#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */ +#endif /* HAVE_SETUP_TC */ +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef IFLA_VF_MAX .ndo_set_vf_mac = i40e_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, +#else .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, +#endif +#ifdef HAVE_VF_STATS + .ndo_get_vf_stats = i40e_get_vf_stats, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE .ndo_set_vf_rate = i40e_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, +#endif .ndo_get_vf_config = i40e_ndo_get_vf_config, - .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = i40e_ndo_set_vf_trust, +#else .ndo_set_vf_trust = i40e_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ +#endif /* IFLA_VF_MAX */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL + .extended.ndo_udp_tunnel_add = i40e_udp_tunnel_add, + .extended.ndo_udp_tunnel_del = i40e_udp_tunnel_del, +#else .ndo_udp_tunnel_add = i40e_udp_tunnel_add, .ndo_udp_tunnel_del = i40e_udp_tunnel_del, - .ndo_get_phys_port_id = i40e_get_phys_port_id, +#endif +#else /* !HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD +#if IS_ENABLED(CONFIG_VXLAN) + .ndo_add_vxlan_port = i40e_add_vxlan_port, + .ndo_del_vxlan_port = i40e_del_vxlan_port, +#endif +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_GENEVE_RX_OFFLOAD +#if IS_ENABLED(CONFIG_GENEVE) + .ndo_add_geneve_port = i40e_add_geneve_port, + .ndo_del_geneve_port = i40e_del_geneve_port, +#endif +#endif /* HAVE_GENEVE_RX_OFFLOAD */ +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_NDO_GET_PHYS_PORT_ID + .ndo_get_phys_port_id = i40e_get_phys_port_id, +#endif /* HAVE_NDO_GET_PHYS_PORT_ID */ +#ifdef HAVE_FDB_OPS .ndo_fdb_add = i40e_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = i40e_ndo_fdb_del, + .ndo_fdb_dump = i40e_ndo_fdb_dump, +#endif +#ifdef HAVE_NDO_FEATURES_CHECK .ndo_features_check = i40e_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifdef HAVE_BRIDGE_ATTRIBS .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, - .ndo_xsk_wakeup = i40e_xsk_wakeup, - .ndo_dfwd_add_station = i40e_fwd_add, - .ndo_dfwd_del_station = i40e_fwd_del, +#else + .ndo_xdp = i40e_xdp, + .ndo_xdp_xmit = i40e_xdp_xmit, + .ndo_xdp_flush = i40e_xdp_flush, +#endif /* HAVE_NDO_BPF */ +#endif /* HAVE_XDP_SUPPORT */ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT }; +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext i40e_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = i40e_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, +#endif +}; +#else /* HAVE_NET_DEVICE_OPS */ +/** + * i40e_assign_netdev_ops - Initialize netdev operations function pointers + * @dev: ptr to the netdev struct + **/ +#ifdef HAVE_CONFIG_HOTPLUG +static void __devinit i40e_assign_netdev_ops(struct net_device *dev) +#else +static void i40e_assign_netdev_ops(struct net_device *dev) +#endif +{ + dev->open = i40e_open; + dev->stop = i40e_close; + dev->hard_start_xmit = i40e_lan_xmit_frame; + dev->get_stats = i40e_get_netdev_stats_struct; + +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = i40e_set_rx_mode; +#endif + dev->set_multicast_list = i40e_set_rx_mode; + dev->set_mac_address = i40e_set_mac; + dev->change_mtu = i40e_change_mtu; +#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL) + dev->do_ioctl = i40e_ioctl; +#endif + dev->tx_timeout = i40e_tx_timeout; +#ifdef NETIF_F_HW_VLAN_TX +#ifdef HAVE_VLAN_RX_REGISTER + dev->vlan_rx_register = i40e_vlan_rx_register; +#endif + dev->vlan_rx_add_vid = i40e_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = i40e_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = i40e_netpoll; +#endif +#ifdef HAVE_NETDEV_SELECT_QUEUE + dev->select_queue = i40e_lan_select_queue; +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +} +#endif /* HAVE_NET_DEVICE_OPS */ + /** * i40e_config_netdev - Setup the netdev flags * @vsi: the VSI being configured @@ -12898,48 +13420,112 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | +#ifdef NETIF_F_IPV6_CSUM NETIF_F_IPV6_CSUM | +#endif NETIF_F_HIGHDMA | +#ifdef NETIF_F_SOFT_FEATURES NETIF_F_SOFT_FEATURES | +#endif NETIF_F_TSO | +#ifdef HAVE_ENCAP_TSO_OFFLOAD NETIF_F_TSO_ECN | NETIF_F_TSO6 | +#ifdef HAVE_GRE_ENCAP_OFFLOAD NETIF_F_GSO_GRE | +#ifdef NETIF_F_GSO_PARTIAL NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | +#endif +#ifdef NETIF_F_GSO_IPXIP4 NETIF_F_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_IPXIP6 NETIF_F_GSO_IPXIP6 | +#endif +#else +#ifdef NETIF_F_GSO_IPIP + NETIF_F_GSO_IPIP | +#endif +#ifdef NETIF_F_GSO_SIT + NETIF_F_GSO_SIT | +#endif +#endif +#endif NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ NETIF_F_SCTP_CRC | +#ifdef NETIF_F_RXHASH NETIF_F_RXHASH | +#endif +#ifdef HAVE_NDO_SET_FEATURES NETIF_F_RXCSUM | +#endif 0; if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) +#ifndef NETIF_F_GSO_PARTIAL + hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM; +#else netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; +#endif +#ifdef HAVE_ENCAP_CSUM_OFFLOAD netdev->hw_enc_features |= hw_enc_features; +#endif +#ifdef HAVE_NETDEV_VLAN_FEATURES /* record features VLANs can make use of */ +#ifdef NETIF_F_GSO_PARTIAL netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; - - /* enable macvlan offloads */ - netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; +#else + netdev->vlan_features |= hw_enc_features; +#endif +#endif hw_features = hw_enc_features | +#ifdef NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; +#else /* NETIF_F_HW_VLAN_CTAG_RX */ + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; +#endif /* !NETIF_F_HW_VLAN_CTAG_RX */ +#if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GRXRINGS) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) +#ifdef NETIF_F_HW_TC hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; +#else + hw_features |= NETIF_F_NTUPLE; +#endif +#endif +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features |= get_netdev_hw_features(netdev); + set_netdev_hw_features(netdev, hw_features); +#else netdev->hw_features |= hw_features; +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef NETIF_F_HW_VLAN_CTAG_RX netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; +#else + netdev->features |= hw_features | NETIF_F_HW_VLAN_FILTER; +#endif +#ifdef NETIF_F_GSO_PARTIAL netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; +#endif + +#ifndef HAVE_NDO_SET_FEATURES +#ifdef NETIF_F_GRO + netdev->features |= NETIF_F_GRO; +#endif +#endif if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); @@ -12993,23 +13579,47 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, mac_addr); +#ifdef ETHTOOL_GPERMADDR ether_addr_copy(netdev->perm_addr, mac_addr); +#endif - /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ - netdev->neigh_priv_len = sizeof(u32) * 4; +#ifdef HAVE_MPLS_FEATURES + if (pf->hw_features & I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE) + netdev->mpls_features = NETIF_F_HW_CSUM; +#endif +#ifdef IFF_UNICAST_FLT netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +#ifdef IFF_SUPP_NOFCS netdev->priv_flags |= IFF_SUPP_NOFCS; +#endif /* Setup netdev TC information */ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); +#ifdef HAVE_NET_DEVICE_OPS netdev->netdev_ops = &i40e_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(netdev, &i40e_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#else /* HAVE_NET_DEVICE_OPS */ + i40e_assign_netdev_ops(netdev); +#endif /* HAVE_NET_DEVICE_OPS */ netdev->watchdog_timeo = 5 * HZ; +#ifdef SIOCETHTOOL i40e_set_ethtool_ops(netdev); +#endif +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU /* MTU range: 68 - 9706 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; +#else netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ +#endif /* HAVE_NETDEVICE_MIN_MAX_NTU */ return 0; } @@ -13051,6 +13661,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) return -ENOENT; } +#ifdef HAVE_BRIDGE_ATTRIBS /* Uplink is a bridge in VEPA mode */ if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; @@ -13058,6 +13669,10 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) /* Uplink is a bridge in VEB mode */ return 1; } +#else + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + return 1; +#endif /* VEPA is now default bridge, so return 0 */ return 0; @@ -13082,6 +13697,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; + u32 val; memset(&ctxt, 0, sizeof(ctxt)); switch (vsi->type) { @@ -13122,7 +13738,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ctxt.info.valid_sections |= - cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); @@ -13131,7 +13747,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) "update vsi failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -13278,6 +13894,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; + val = rd32(&pf->hw, 0x208800 + (4*(vsi->id))); + if (!(val & 0x1)) /* MACVSIPRUNEENABLE = 1*/ + dev_warn(&vsi->back->pdev->dev, + "Note: VSI source pruning is not being set correctly by FW\n"); } vsi->active_filters = 0; @@ -13443,6 +14063,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) goto vector_setup_out; } +#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT) /* In Legacy mode, we do not have to get any other vector since we * piggyback on the misc/ICR0 for queue interrupts. */ @@ -13460,6 +14081,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) goto vector_setup_out; } +#endif vector_setup_out: return ret; } @@ -13557,7 +14179,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, { struct i40e_vsi *vsi = NULL; struct i40e_veb *veb = NULL; - u16 alloc_queue_pairs; int ret, i; int v_idx; @@ -13607,6 +14228,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; } +#ifdef HAVE_BRIDGE_ATTRIBS /* We come up by default in VEPA mode if SRIOV is not * already enabled, in which case we can't force VEPA * mode. @@ -13615,6 +14237,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, veb->bridge_mode = BRIDGE_MODE_VEPA; pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; } +#endif i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { @@ -13645,14 +14268,12 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, else if (type == I40E_VSI_SRIOV) vsi->vf_id = param1; /* assign it some queues */ - alloc_queue_pairs = vsi->alloc_queue_pairs * - (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); - - ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); + ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, + vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d queues for VSI %d err=%d\n", - alloc_queue_pairs, vsi->seid, ret); + vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; @@ -13675,10 +14296,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, goto err_netdev; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); -#ifdef CONFIG_I40E_DCB + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(vsi->netdev); +#ifdef CONFIG_DCB +#ifdef HAVE_DCBNL_IEEE /* Setup DCB netlink interface */ i40e_dcbnl_setup(vsi); -#endif /* CONFIG_I40E_DCB */ +#endif /* HAVE_DCBNL_IEEE */ +#endif /* CONFIG_DCB */ /* fall through */ case I40E_VSI_FDIR: @@ -13761,16 +14386,16 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) goto out; } - veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); + veb->bw_limit = LE16_TO_CPU(ets_data.port_bw_limit); veb->bw_max_quanta = ets_data.tc_bw_max; veb->is_abs_credits = bw_data.absolute_credits_enable; veb->enabled_tc = ets_data.tc_valid_bits; - tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | - (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); + tc_bw_max = LE16_TO_CPU(bw_data.tc_bw_max[0]) | + (LE16_TO_CPU(bw_data.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; veb->bw_tc_limit_credits[i] = - le16_to_cpu(bw_data.tc_bw_limits[i]); + LE16_TO_CPU(bw_data.tc_bw_limits[i]); veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); } @@ -14318,11 +14943,17 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); +#ifdef HAVE_PTP_1588_CLOCK i40e_ptp_init(pf); +#endif /* HAVE_PTP_1588_CLOCK */ +#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD) +#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD) /* repopulate tunnel port filters */ i40e_sync_udp_filters(pf); +#endif /* HAVE_UDP_ENC_TUNNEL || HAVE_UDP_ENC_RX_OFFLOAD */ +#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */ return ret; } @@ -14333,7 +14964,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) static void i40e_determine_queue_usage(struct i40e_pf *pf) { int queues_left; - int q_max; pf->num_lan_qps = 0; @@ -14376,18 +15006,31 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } else { /* Not enough queues for all TCs */ + bool is_max_n_of_queues_required; + if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && (queues_left < I40E_MAX_TRAFFIC_CLASS)) { pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } - - /* limit lan qps to the smaller of qps, cpus or msix */ - q_max = max_t(int, pf->rss_size_max, num_online_cpus()); - q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); - q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); - pf->num_lan_qps = q_max; + pf->num_lan_qps = max_t(int, pf->rss_size_max, + num_online_cpus()); + is_max_n_of_queues_required = pf->hw.func_caps.num_tx_qp < + pf->num_req_vfs * pf->num_vf_qps + pf->num_vf_qps; + if (is_max_n_of_queues_required) + dev_warn(&pf->pdev->dev, "not enough %u queues for PF and %u VFs. Using maximum available queues for PF.\n", + pf->hw.func_caps.num_tx_qp, pf->num_req_vfs); + if (pf->hw.func_caps.npar_enable || + is_max_n_of_queues_required) + pf->num_lan_qps = min_t + (int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp); + else + pf->num_lan_qps = min_t + (int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp - + pf->num_req_vfs * pf->num_vf_qps); queues_left -= pf->num_lan_qps; } @@ -14484,12 +15127,22 @@ static void i40e_print_features(struct i40e_pf *pf) i += snprintf(&buf[i], REMAIN(i), " FD_SB"); i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); } + i += snprintf(&buf[i], REMAIN(i), " CloudF"); if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); +#if IS_ENABLED(CONFIG_VXLAN) i += snprintf(&buf[i], REMAIN(i), " VxLAN"); +#endif +#if IS_ENABLED(CONFIG_GENEVE) i += snprintf(&buf[i], REMAIN(i), " Geneve"); +#endif +#ifdef HAVE_GRE_ENCAP_OFFLOAD + i += snprintf(&buf[i], REMAIN(i), " NVGRE"); +#endif +#ifdef HAVE_PTP_1588_CLOCK if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); +#endif if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) i += snprintf(&buf[i], REMAIN(i), " VEB"); else @@ -14516,29 +15169,6 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); } -/** - * i40e_set_fec_in_flags - helper function for setting FEC options in flags - * @fec_cfg: FEC option to set in flags - * @flags: ptr to flags in which we set FEC option - **/ -void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) -{ - if (fec_cfg & I40E_AQ_SET_FEC_AUTO) - *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; - if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || - (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { - *flags |= I40E_FLAG_RS_FEC; - *flags &= ~I40E_FLAG_BASE_R_FEC; - } - if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || - (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { - *flags |= I40E_FLAG_BASE_R_FEC; - *flags &= ~I40E_FLAG_RS_FEC; - } - if (fec_cfg == 0) - *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); -} - /** * i40e_check_recovery_mode - check if we are running transition firmware * @pf: board private structure @@ -14550,28 +15180,17 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) **/ static bool i40e_check_recovery_mode(struct i40e_pf *pf) { - u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; - bool is_recovery_mode = false; + u32 val = rd32(&pf->hw, I40E_GL_FWSTS); - if (pf->hw.mac.type == I40E_MAC_XL710) - is_recovery_mode = - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK || - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK; - if (pf->hw.mac.type == I40E_MAC_X722) - is_recovery_mode = - val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || - val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK; - if (is_recovery_mode) { - dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); - dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + if (val & I40E_GL_FWSTS_FWS1B_MASK) { + dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); + dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); set_bit(__I40E_RECOVERY_MODE, pf->state); return true; } - if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state)) - dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n"); + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) + dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n"); return false; } @@ -14597,30 +15216,71 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf) * * Return 0 on success, negative on failure. **/ -static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) +static i40e_status i40e_pf_loop_reset(struct i40e_pf * const pf) { - const unsigned short MAX_CNT = 1000; - const unsigned short MSECS = 10; + /* wait max 10 seconds for PF reset to succeed */ + const unsigned long time_end = jiffies + 10 * HZ; + struct i40e_hw *hw = &pf->hw; i40e_status ret; - int cnt; - for (cnt = 0; cnt < MAX_CNT; ++cnt) { + ret = i40e_pf_reset(hw); + while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { + usleep_range(10000, 20000); ret = i40e_pf_reset(hw); - if (!ret) - break; - msleep(MSECS); } - if (cnt == MAX_CNT) { + if (ret == I40E_SUCCESS) + pf->pfr_count++; + else dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); - return ret; - } - pf->pfr_count++; return ret; } +/** + * i40e_check_fw_empr - check if FW issued unexpected EMP Reset + * @pf: board private structure + * + * Check FW registers to determine if FW issued unexpected EMP Reset. + * Every time when unexpected EMP Reset occurs the FW increments + * a counter of unexpected EMP Resets. When the counter reaches 10 + * the FW should enter the Recovery mode + * + * Returns true if FW issued unexpected EMP Reset + **/ +static inline bool i40e_check_fw_empr(struct i40e_pf * const pf) +{ + const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & + I40E_GL_FWSTS_FWS1B_MASK; + const bool is_empr = (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) && + (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10); + + return is_empr; +} + +/** + * i40e_handle_resets - handle EMP resets and PF resets + * @pf: board private structure + * + * Handle both EMP resets and PF resets and conclude whether there are + * any issues regarding these resets. If there are any issues then + * generate log entry. + * + * Return 0 if NIC is healthy or negative value when there are issues + * with resets + **/ +static inline i40e_status i40e_handle_resets(struct i40e_pf * const pf) +{ + const i40e_status pfr = i40e_pf_loop_reset(pf); + const bool is_empr = i40e_check_fw_empr(pf); + + if (is_empr || pfr != I40E_SUCCESS) + dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); + + return is_empr ? I40E_ERR_RESET_FAILED : pfr; +} + /** * i40e_init_recovery_mode - initialize subsystems needed in recovery mode * @pf: board private structure @@ -14637,7 +15297,9 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) int err; int v_idx; +#ifdef HAVE_PCI_ERS pci_save_state(pf->pdev); +#endif /* set up periodic task facility */ timer_setup(&pf->service_timer, i40e_service_timer, 0); @@ -14704,14 +15366,32 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) err_switch_setup: i40e_reset_interrupt_capability(pf); del_timer_sync(&pf->service_timer); - i40e_shutdown_adminq(hw); - iounmap(hw->hw_addr); - pci_disable_pcie_error_reporting(pf->pdev); - pci_release_mem_regions(pf->pdev); - pci_disable_device(pf->pdev); - kfree(pf); + dev_warn(&pf->pdev->dev, "previous errors forcing module to load in debug mode\n"); + i40e_dbg_pf_init(pf); + set_bit(__I40E_DEBUG_MODE, pf->state); + return 0; +} - return err; +/** + * i40e_set_fec_in_flags - helper function for setting FEC options in flags + * @fec_cfg: FEC option to set in flags + * @flags: ptr to flags in which we set FEC option + **/ +void i40e_set_fec_in_flags(u8 fec_cfg, u64 *flags) +{ + if (fec_cfg & I40E_AQ_SET_FEC_AUTO) + *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; + else if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || + (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { + *flags |= I40E_FLAG_RS_FEC; + *flags &= ~I40E_FLAG_BASE_R_FEC; + } else if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || + (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { + *flags |= I40E_FLAG_BASE_R_FEC; + *flags &= ~I40E_FLAG_RS_FEC; + } + if (fec_cfg == 0) + *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); } /** @@ -14725,15 +15405,24 @@ err_switch_setup: * * Returns 0 on success, negative on failure **/ +#ifdef HAVE_CONFIG_HOTPLUG +static int __devinit i40e_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +#else static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +#endif { struct i40e_aq_get_phy_abilities_resp abilities; +#ifdef CONFIG_DCB + enum i40e_get_fw_lldp_status_resp lldp_status; + i40e_status status; +#endif /* CONFIG_DCB */ struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; u16 wol_nvm_bits; u16 link_status; - int err; + int err = 0; u32 val; u32 i; u8 set_fc_aq_fail; @@ -14757,7 +15446,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, - "pci_request_selected_regions failed %d\n", err); + "pci_request_mem_regions failed %d\n", err); goto err_pci_reg; } @@ -14776,6 +15465,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } pf->next_vsi = 0; pf->pdev = pdev; + pci_set_drvdata(pdev, pf); set_bit(__I40E_DOWN, pf->state); hw = &pf->hw; @@ -14823,31 +15513,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&pf->l4_flex_pit_list); INIT_LIST_HEAD(&pf->ddp_old_prof); - /* set up the locks for the AQ, do this only once in probe + /* set up the spinlocks for the AQ, do this only once in probe * and destroy them only once in remove */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); + i40e_init_spinlock_d(&hw->aq.asq_spinlock); + i40e_init_spinlock_d(&hw->aq.arq_spinlock); - pf->msg_enable = netif_msg_init(debug, - NETIF_MSG_DRV | - NETIF_MSG_PROBE | - NETIF_MSG_LINK); - if (debug < -1) - pf->hw.debug_mask = debug; - - /* do a special CORER for clearing PXE mode once at init */ - if (hw->revision_id == 0 && - (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { - wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); - i40e_flush(hw); - msleep(200); - pf->corer_count++; - - i40e_clear_pxe_mode(hw); - } + if (debug != -1) + pf->msg_enable = pf->hw.debug_mask = debug; /* Reset here to make sure all is clean and to define PF 'n' */ + /* have to do the PF reset first to "graceful abort" all queues */ i40e_clear_hw(hw); err = i40e_set_mac_type(hw); @@ -14857,11 +15533,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pf_reset; } - err = i40e_pf_loop_reset(pf); - if (err) { - dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); + err = i40e_handle_resets(pf); + if (err) goto err_pf_reset; - } i40e_check_recovery_mode(pf); @@ -14902,12 +15576,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } i40e_get_oem_version(hw); - /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */ - dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", + /* provide nvm, fw, api versions */ + dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, - i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, - hw->subsystem_vendor_id, hw->subsystem_device_id); + i40e_nvm_version_str(hw)); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) @@ -14983,9 +15656,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_valid_ether_addr(hw->mac.port_addr)) pf->hw_features |= I40E_HW_PORT_ID_VALID; - pci_set_drvdata(pdev, pf); +#ifdef HAVE_PCI_ERS pci_save_state(pdev); - +#endif +#ifdef CONFIG_DCB + status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); + (status == I40E_SUCCESS && + lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ? + (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : + (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); dev_info(&pdev->dev, (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? "FW LLDP is disabled\n" : @@ -14994,14 +15673,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Enable FW to write default DCB config on link-up */ i40e_aq_set_dcb_parameters(hw, true, NULL); -#ifdef CONFIG_I40E_DCB err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); /* Continue without DCB enabled */ } -#endif /* CONFIG_I40E_DCB */ +#endif /* CONFIG_DCB */ /* set up periodic task facility */ timer_setup(&pf->service_timer, i40e_service_timer, 0); @@ -15012,7 +15690,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) + if (BIT(hw->port) & wol_nvm_bits || hw->partition_id != 1) pf->wol_en = false; else pf->wol_en = true; @@ -15049,6 +15727,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; +#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE) + else if (pf->num_req_vfs) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; +#endif } #endif err = i40e_setup_pf_switch(pf, false); @@ -15056,6 +15738,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } + if (i40e_is_l4mode_enabled() && hw->pf_id == 0) { + u8 l4type = I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; + + switch (l4mode) { + case L4_MODE_UDP: + l4type = I40E_AQ_SET_SWITCH_L4_TYPE_UDP; + break; + case L4_MODE_TCP: + l4type = I40E_AQ_SET_SWITCH_L4_TYPE_TCP; + break; + } + i40e_set_switch_mode(pf, l4type); + } INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); /* Make sure flow control is set according to current settings */ @@ -15155,6 +15850,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "Error %d allocating resources for existing VFs\n", err); +#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE) + } else if (pf->num_req_vfs) { + err = i40e_alloc_vfs(pf, pf->num_req_vfs); + if (err) { + pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + dev_info(&pdev->dev, + "failed to alloc vfs: %d\n", err); + } +#endif /* HAVE_SRIOV_CONFIGURE */ } } #endif /* CONFIG_PCI_IOV */ @@ -15171,6 +15875,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + pfs_found++; i40e_dbg_pf_init(pf); /* tell the firmware that we're starting */ @@ -15187,7 +15892,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", err); } - #define PCI_SPEED_SIZE 8 #define PCI_WIDTH_SIZE 8 /* Devices on the IOSF bus do not have this information @@ -15267,7 +15971,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->main_vsi_seid); if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || - (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) + (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; @@ -15292,7 +15996,10 @@ err_init_lan_hmc: err_sw_init: err_adminq_setup: err_pf_reset: - iounmap(hw->hw_addr); + dev_warn(&pdev->dev, "previous errors forcing module to load in debug mode\n"); + i40e_dbg_pf_init(pf); + set_bit(__I40E_DEBUG_MODE, pf->state); + return err; err_ioremap: kfree(pf); err_pf_alloc: @@ -15313,7 +16020,11 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ +#ifdef HAVE_CONFIG_HOTPLUG +static void __devexit i40e_remove(struct pci_dev *pdev) +#else static void i40e_remove(struct pci_dev *pdev) +#endif { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; @@ -15321,13 +16032,14 @@ static void i40e_remove(struct pci_dev *pdev) int i; i40e_dbg_pf_exit(pf); - +#ifdef HAVE_PTP_1588_CLOCK i40e_ptp_stop(pf); /* Disable RSS in hw */ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); +#endif /* HAVE_PTP_1588_CLOCK */ /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); @@ -15335,7 +16047,12 @@ static void i40e_remove(struct pci_dev *pdev) del_timer_sync(&pf->service_timer); if (pf->service_task.func) cancel_work_sync(&pf->service_task); - + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + if (test_bit(__I40E_DEBUG_MODE, pf->state)) + goto debug_mode_clear; if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { struct i40e_vsi *vsi = pf->vsi[0]; @@ -15345,15 +16062,11 @@ static void i40e_remove(struct pci_dev *pdev) */ unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); + vsi->netdev = NULL; goto unmap; } - /* Client close must be called explicitly here because the timer - * has been stopped. - */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; @@ -15399,18 +16112,16 @@ static void i40e_remove(struct pci_dev *pdev) } unmap: - /* Free MSI/legacy interrupt 0 when in recovery mode. */ + /* Free MSI/legacy interrupt 0 when in recovery mode. + * This is normally done in i40e_vsi_free_irq on + * VSI close but since recovery mode doesn't allow to up + * an interface and we do not allocate all Rx/Tx resources + * for it we'll just do it here + */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && !(pf->flags & I40E_FLAG_MSIX_ENABLED)) free_irq(pf->pdev->irq, pf); - /* shutdown the adminq */ - i40e_shutdown_adminq(hw); - - /* destroy the locks only once, here */ - mutex_destroy(&hw->aq.arq_mutex); - mutex_destroy(&hw->aq.asq_mutex); - /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ rtnl_lock(); i40e_clear_interrupt_scheme(pf); @@ -15424,6 +16135,14 @@ unmap: } rtnl_unlock(); +debug_mode_clear: + /* shutdown the adminq */ + i40e_shutdown_adminq(hw); + + /* destroy the locks only once, here */ + i40e_destroy_spinlock_d(&hw->aq.arq_spinlock); + i40e_destroy_spinlock_d(&hw->aq.asq_spinlock); + for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); pf->veb[i] = NULL; @@ -15440,6 +16159,7 @@ unmap: pci_disable_device(pdev); } +#ifdef HAVE_PCI_ERS /** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct @@ -15458,7 +16178,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, if (!pf) { dev_info(&pdev->dev, - "Cannot recover - error happened during device probe\n"); + "Cannot recover -error happened during device probe\n"); return PCI_ERS_RESULT_DISCONNECT; } @@ -15483,6 +16203,7 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); pci_ers_result_t result; + int err; u32 reg; dev_dbg(&pdev->dev, "%s\n", __func__); @@ -15503,9 +16224,18 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_DISCONNECT; } + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_info(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); + /* non-fatal, continue */ + } + return result; } +#if defined(HAVE_PCI_ERROR_HANDLER_RESET_PREPARE) || defined(HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY) || defined(HAVE_RHEL7_PCI_RESET_NOTIFY) /** * i40e_pci_error_reset_prepare - prepare device driver for pci reset * @pdev: PCI device information struct @@ -15528,6 +16258,26 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) i40e_reset_and_rebuild(pf, false, false); } +#endif +#if defined(HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY) || defined(HAVE_RHEL7_PCI_RESET_NOTIFY) +/** + * i40e_pci_error_reset_notify - notify device driver of pci reset + * @pdev: PCI device information struct + * @prepare: true if device is about to be reset; false if reset attempt + * completed + * + * Called to perform pf reset when a pci function level reset is triggered + **/ +static void i40e_pci_error_reset_notify(struct pci_dev *pdev, bool prepare) +{ + dev_dbg(&pdev->dev, "%s\n", __func__); + if (prepare) + i40e_pci_error_reset_prepare(pdev); + else + i40e_pci_error_reset_done(pdev); +} + +#endif /** * i40e_pci_error_resume - restart operations after PCI error recovery * @pdev: PCI device information struct @@ -15593,6 +16343,11 @@ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) "Failed to enable Multicast Magic Packet wake up\n"); } +/* FW state indicating on X722 that we need to disable WoL to + * allow adapter to shutdown completely + */ +#define I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED 0x0F + /** * i40e_shutdown - PCI callback for shutting down * @pdev: PCI device information struct @@ -15601,10 +16356,14 @@ static void i40e_shutdown(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; + u32 val = 0; set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); + if (test_bit(__I40E_DEBUG_MODE, pf->state)) + goto debug_mode; + del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); i40e_cloud_filter_exit(pf); @@ -15615,17 +16374,42 @@ static void i40e_shutdown(struct pci_dev *pdev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) - i40e_enable_mc_magic_wake(pf); + val = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS0B_MASK; - i40e_prep_for_reset(pf, false); + if (pf->hw.mac.type == I40E_MAC_X722) { + /* We check here if we need to disable the WoL to allow adapter + * to shutdown completely after a FW update + */ + if (val != I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED && + pf->wol_en) { + if (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE) + i40e_enable_mc_magic_wake(pf); - wr32(hw, I40E_PFPM_APM, - (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); - wr32(hw, I40E_PFPM_WUFC, - (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_prep_for_reset(pf, false); - /* Free MSI/legacy interrupt 0 when in recovery mode. */ + wr32(hw, I40E_PFPM_APM, I40E_PFPM_APM_APME_MASK); + wr32(hw, I40E_PFPM_WUFC, I40E_PFPM_WUFC_MAG_MASK); + } else { + i40e_prep_for_reset(pf, false); + + wr32(hw, I40E_PFPM_APM, 0); + wr32(hw, I40E_PFPM_WUFC, 0); + } + } else { + i40e_prep_for_reset(pf, false); + + wr32(hw, I40E_PFPM_APM, + (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, + (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + } + + /* Free MSI/legacy interrupt 0 when in recovery mode. + * This is normally done in i40e_vsi_free_irq on + * VSI close but since recovery mode doesn't allow to up + * an interface and we do not allocate all Rx/Tx resources + * for it we'll just do it here + */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && !(pf->flags & I40E_FLAG_MSIX_ENABLED)) free_irq(pf->pdev->irq, pf); @@ -15638,19 +16422,27 @@ static void i40e_shutdown(struct pci_dev *pdev) i40e_clear_interrupt_scheme(pf); rtnl_unlock(); - if (system_state == SYSTEM_POWER_OFF) { +debug_mode: + + if (pf->hw.mac.type == I40E_MAC_X722 && + val == I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED) { + pci_wake_from_d3(pdev, false); + device_set_wakeup_enable(&pdev->dev, false); + } else if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); - pci_set_power_state(pdev, PCI_D3hot); } + pci_set_power_state(pdev, PCI_D3hot); } +#ifdef CONFIG_PM /** * i40e_suspend - PM callback for moving to D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_suspend(struct device *dev) +static int i40e_suspend(struct device *dev) { - struct i40e_pf *pf = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ @@ -15668,6 +16460,9 @@ static int __maybe_unused i40e_suspend(struct device *dev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + if (test_bit(__I40E_DEBUG_MODE, pf->state)) + return 0; + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); @@ -15698,9 +16493,10 @@ static int __maybe_unused i40e_suspend(struct device *dev) * i40e_resume - PM callback for waking up from D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_resume(struct device *dev) +static int i40e_resume(struct device *dev) { - struct i40e_pf *pf = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct i40e_pf *pf = pci_get_drvdata(pdev); int err; /* If we're not suspended, then there is nothing to do */ @@ -15717,7 +16513,7 @@ static int __maybe_unused i40e_resume(struct device *dev) */ err = i40e_restore_interrupt_scheme(pf); if (err) { - dev_err(dev, "Cannot restore interrupt scheme: %d\n", + dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", err); } @@ -15736,27 +16532,131 @@ static int __maybe_unused i40e_resume(struct device *dev) return 0; } +#ifdef USE_LEGACY_PM_SUPPORT +/** + * i40e_legacy_suspend - PCI callback for moving to D3 + * @pdev: PCI device information struct + * @state: PCI power state + * + * Legacy suspend handler for older kernels which do not support the newer + * generic callbacks + **/ +static int i40e_legacy_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + int retval = i40e_suspend(&pdev->dev); + + /* Some older kernels may not handle state correctly for legacy power + * management, so we'll handle it here ourselves + */ + retval = pci_save_state(pdev); + if (retval) + return retval; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, pf->wol_en); + pci_set_power_state(pdev, PCI_D3hot); + + return retval; +} + +/** + * i40e_legacy_resume - PCI callback for waking up from D3 + * @pdev: PCI device information struct + * + * Legacy resume handler for kernels which do not support the newer generic + * callbacks. + **/ +static int i40e_legacy_resume(struct pci_dev *pdev) +{ + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state() clears dev->state_saves, so + * call pci_save_state() again to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(pci_dev_to_dev(pdev), "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + /* no wakeup events while running */ + pci_wake_from_d3(pdev, false); + + return i40e_resume(&pdev->dev); +} +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif /* CONFIG_PM */ + +#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS static const struct pci_error_handlers i40e_err_handler = { +#else +static struct pci_error_handlers i40e_err_handler = { +#endif .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, +#ifdef HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY + .reset_notify = i40e_pci_error_reset_notify, +#endif +#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE .reset_prepare = i40e_pci_error_reset_prepare, .reset_done = i40e_pci_error_reset_done, +#endif .resume = i40e_pci_error_resume, }; +#if defined(HAVE_RHEL6_SRIOV_CONFIGURE) || defined(HAVE_RHEL7_PCI_DRIVER_RH) +static struct pci_driver_rh i40e_driver_rh = { +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE + .sriov_configure = i40e_pci_sriov_configure, +#elif defined(HAVE_RHEL7_PCI_RESET_NOTIFY) + .reset_notify = i40e_pci_error_reset_notify, +#endif +}; + +#endif +#endif /* HAVE_PCI_ERS */ +#if defined(CONFIG_PM) && !defined(USE_LEGACY_PM_SUPPORT) static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); +#endif /* CONFIG_PM && !USE_LEGACY_PM_SUPPORT */ static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, +#ifdef HAVE_CONFIG_HOTPLUG + .remove = __devexit_p(i40e_remove), +#else .remove = i40e_remove, +#endif +#ifdef CONFIG_PM +#ifdef USE_LEGACY_PM_SUPPORT + .suspend = i40e_legacy_suspend, + .resume = i40e_legacy_resume, +#else /* USE_LEGACY_PM_SUPPORT */ .driver = { .pm = &i40e_pm_ops, }, +#endif /* !USE_LEGACY_PM_SUPPORT */ +#endif /* CONFIG_PM */ .shutdown = i40e_shutdown, +#ifdef HAVE_PCI_ERS .err_handler = &i40e_err_handler, +#endif +#ifdef HAVE_SRIOV_CONFIGURE .sriov_configure = i40e_pci_sriov_configure, +#endif +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE + .rh_reserved = &i40e_driver_rh, +#endif +#ifdef HAVE_RHEL7_PCI_DRIVER_RH + .pci_driver_rh = &i40e_driver_rh, +#endif }; /** @@ -15783,7 +16683,13 @@ static int __init i40e_init_module(void) pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; } +#ifdef HAVE_RHEL7_PCI_DRIVER_RH + /* The size member must be initialized in the driver via a call to + * set_pci_driver_rh_size before pci_register_driver is called + */ + set_pci_driver_rh_size(i40e_driver_rh); +#endif i40e_dbg_init(); return pci_register_driver(&i40e_driver); } @@ -15800,5 +16706,8 @@ static void __exit i40e_exit_module(void) pci_unregister_driver(&i40e_driver); destroy_workqueue(i40e_wq); i40e_dbg_exit(); +#ifdef HAVE_KFREE_RCU_BARRIER + rcu_barrier(); +#endif } module_exit(i40e_exit_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index e4d8d20baf3b..04fe349dda19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include "i40e_prototype.h" @@ -11,12 +11,12 @@ * once per NVM initialization, e.g. inside the i40e_init_shared_code(). * Please notice that the NVM term is used here (& in all methods covered * in this file) as an equivalent of the FLASH part mapped into the SR. - * We are accessing FLASH always thru the Shadow RAM. + * We are accessing FLASH always through the Shadow RAM. **/ i40e_status i40e_init_nvm(struct i40e_hw *hw) { struct i40e_nvm_info *nvm = &hw->nvm; - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u32 fla, gens; u8 sr_size; @@ -55,7 +55,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) i40e_status i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u64 gtime, timeout; u64 time_left = 0; @@ -85,13 +85,13 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, I40E_NVM_RESOURCE_ID, access, 0, &time_left, NULL); - if (!ret_code) { + if (ret_code == I40E_SUCCESS) { hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; break; } } - if (ret_code) { + if (ret_code != I40E_SUCCESS) { hw->nvm.hw_semaphore_timeout = 0; i40e_debug(hw, I40E_DEBUG_NVM, "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", @@ -124,11 +124,10 @@ void i40e_release_nvm(struct i40e_hw *hw) */ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && (total_delay < hw->aq.asq_cmd_timeout)) { - usleep_range(1000, 2000); - ret_code = i40e_aq_release_resource(hw, - I40E_NVM_RESOURCE_ID, - 0, NULL); - total_delay++; + usleep_range(1000, 2000); + ret_code = i40e_aq_release_resource(hw, + I40E_NVM_RESOURCE_ID, 0, NULL); + total_delay++; } } @@ -147,7 +146,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { srctl = rd32(hw, I40E_GLNVM_SRCTL); if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { - ret_code = 0; + ret_code = I40E_SUCCESS; break; } udelay(5); @@ -165,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, - u16 *data) +static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, + u16 offset, u16 *data) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; if (offset >= hw->nvm.sr_size) { i40e_debug(hw, I40E_DEBUG_NVM, - "NVM read error: offset %d beyond Shadow RAM limit %d\n", + "NVM read error: Offset %d beyond Shadow RAM limit %d\n", offset, hw->nvm.sr_size); ret_code = I40E_ERR_PARAM; goto read_nvm_exit; @@ -181,7 +180,7 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, /* Poll the done bit first */ ret_code = i40e_poll_sr_srctl_done_bit(hw); - if (!ret_code) { + if (ret_code == I40E_SUCCESS) { /* Write the address and start reading */ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | BIT(I40E_GLNVM_SRCTL_START_SHIFT); @@ -189,14 +188,14 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, /* Poll I40E_GLNVM_SRCTL until the done bit is set */ ret_code = i40e_poll_sr_srctl_done_bit(hw); - if (!ret_code) { + if (ret_code == I40E_SUCCESS) { sr_reg = rd32(hw, I40E_GLNVM_SRDATA); *data = (u16)((sr_reg & I40E_GLNVM_SRDATA_RDDATA_MASK) >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); } } - if (ret_code) + if (ret_code != I40E_SUCCESS) i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", offset); @@ -217,9 +216,9 @@ read_nvm_exit: * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, - u8 module_pointer, u32 offset, - u16 words, void *data, - bool last_command) + u8 module_pointer, u32 offset, + u16 words, void *data, + bool last_command) { i40e_status ret_code = I40E_ERR_NVM; struct i40e_asq_cmd_details cmd_details; @@ -265,18 +264,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, * Reads one 16 bit word from the Shadow RAM using the AdminQ **/ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, - u16 *data) + u16 *data) { i40e_status ret_code = I40E_ERR_TIMEOUT; ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); - *data = le16_to_cpu(*(__le16 *)data); + *data = LE16_TO_CPU(*(__le16 *)data); return ret_code; } /** - * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking + * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM @@ -287,8 +286,9 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, * taken via i40e_acquire_nvm(). **/ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, - u16 offset, u16 *data) + u16 offset, u16 *data) { + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_word_aq(hw, offset, data); @@ -296,7 +296,7 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, } /** - * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary + * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM @@ -304,43 +304,43 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, * Reads one 16 bit word from the Shadow RAM. **/ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) + u16 *data) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret_code) return ret_code; - ret_code = __i40e_read_nvm_word(hw, offset, data); if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) i40e_release_nvm(hw); - return ret_code; } /** * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location - * @hw: pointer to the HW structure + * @hw: Pointer to the HW structure * @module_ptr: Pointer to module in words with respect to NVM beginning - * @offset: offset in words from module start + * @module_offset: Offset in words from module start + * @data_offset: Offset in words from reading data area start * @words_data_size: Words to read from NVM * @data_ptr: Pointer to memory location where resulting buffer will be stored **/ -i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, - u8 module_ptr, u16 offset, - u16 words_data_size, - u16 *data_ptr) +enum i40e_status_code +i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset, + u16 data_offset, u16 words_data_size, u16 *data_ptr) { i40e_status status; + u16 specific_ptr = 0; u16 ptr_value = 0; - u32 flat_offset; + u16 offset = 0; if (module_ptr != 0) { status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); - if (status) { + if (status != I40E_SUCCESS) { i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm word failed.Error code: %d.\n", status); @@ -352,37 +352,36 @@ i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, /* Pointer not initialized */ if (ptr_value == I40E_NVM_INVALID_PTR_VAL || - ptr_value == I40E_NVM_INVALID_VAL) + ptr_value == I40E_NVM_INVALID_VAL) { + i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n"); return I40E_ERR_BAD_PTR; + } /* Check whether the module is in SR mapped area or outside */ if (ptr_value & I40E_PTR_TYPE) { /* Pointer points outside of the Shared RAM mapped area */ - ptr_value &= ~I40E_PTR_TYPE; + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n"); - /* PtrValue in 4kB units, need to convert to words */ - ptr_value /= 2; - flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset; - status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!status) { - status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset, - 2 * words_data_size, - data_ptr, true, NULL); - i40e_release_nvm(hw); - if (status) { - i40e_debug(hw, I40E_DEBUG_ALL, - "Reading nvm aq failed.Error code: %d.\n", - status); - return I40E_ERR_NVM; - } - } else { - return I40E_ERR_NVM; - } + return I40E_ERR_PARAM; } else { /* Read from the Shadow RAM */ - status = i40e_read_nvm_buffer(hw, ptr_value + offset, - &words_data_size, data_ptr); - if (status) { + + status = i40e_read_nvm_word(hw, ptr_value + module_offset, + &specific_ptr); + if (status != I40E_SUCCESS) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm word failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + + offset = ptr_value + module_offset + specific_ptr + + data_offset; + + status = i40e_read_nvm_buffer(hw, offset, &words_data_size, + data_ptr); + if (status != I40E_SUCCESS) { i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm buffer failed.Error code: %d.\n", status); @@ -404,16 +403,16 @@ i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, * and followed by the release. **/ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) + u16 *words, u16 *data) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u16 index, word; - /* Loop thru the selected region */ + /* Loop through the selected region */ for (word = 0; word < *words; word++) { index = offset + word; ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); - if (ret_code) + if (ret_code != I40E_SUCCESS) break; } @@ -435,10 +434,10 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, * and followed by the release. **/ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) + u16 *words, u16 *data) { i40e_status ret_code; - u16 read_size; + u16 read_size = *words; bool last_cmd = false; u16 words_read = 0; u16 i = 0; @@ -462,7 +461,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, data + words_read, last_cmd); - if (ret_code) + if (ret_code != I40E_SUCCESS) goto read_nvm_buffer_aq_exit; /* Increment counter for words already read and move offset to @@ -473,7 +472,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, } while (words_read < *words); for (i = 0; i < *words; i++) - data[i] = le16_to_cpu(((__le16 *)data)[i]); + data[i] = LE16_TO_CPU(((__le16 *)data)[i]); read_nvm_buffer_aq_exit: *words = words_read; @@ -481,7 +480,7 @@ read_nvm_buffer_aq_exit: } /** - * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock + * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read @@ -491,8 +490,8 @@ read_nvm_buffer_aq_exit: * method. **/ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, - u16 offset, u16 *words, - u16 *data) + u16 offset, u16 *words, + u16 *data) { if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_buffer_aq(hw, offset, words, data); @@ -512,15 +511,15 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, * and followed by the release. **/ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) + u16 *words, u16 *data) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (!ret_code) { ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, - data); + data); i40e_release_nvm(hw); } } else { @@ -542,8 +541,8 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 words, void *data, - bool last_command) + u32 offset, u16 words, void *data, + bool last_command) { i40e_status ret_code = I40E_ERR_NVM; struct i40e_asq_cmd_details cmd_details; @@ -557,20 +556,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) - i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", - (offset + words), hw->nvm.sr_size); + hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n"); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can write only up to 4KB (one sector), in one AQ write */ - i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", - words, I40E_SR_SECTOR_SIZE_IN_WORDS); + hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n"); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single write cannot spread over two sectors */ - i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", - offset, words); + hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n"); else ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ @@ -594,7 +587,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { - i40e_status ret_code; + i40e_status ret_code = I40E_SUCCESS; struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; @@ -610,7 +603,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, /* read pointer to VPD area */ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); - if (ret_code) { + if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } @@ -618,7 +611,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, /* read pointer to PCIe Alt Auto-load module */ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); - if (ret_code) { + if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } @@ -632,7 +625,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); - if (ret_code) { + if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } @@ -674,13 +667,13 @@ i40e_calc_nvm_checksum_exit: **/ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) { - i40e_status ret_code; + i40e_status ret_code = I40E_SUCCESS; u16 checksum; __le16 le_sum; ret_code = i40e_calc_nvm_checksum(hw, &checksum); - le_sum = cpu_to_le16(checksum); - if (!ret_code) + le_sum = CPU_TO_LE16(checksum); + if (ret_code == I40E_SUCCESS) ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 1, &le_sum, true); @@ -698,7 +691,7 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { - i40e_status ret_code = 0; + i40e_status ret_code = I40E_SUCCESS; u16 checksum_sr = 0; u16 checksum_local = 0; @@ -730,51 +723,51 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, } static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno); + struct i40e_nvm_access *cmd, + int *perrno); static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno); + struct i40e_nvm_access *cmd, + int *perrno); static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static inline u8 i40e_nvmupd_get_module(u32 val) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static INLINE u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); } -static inline u8 i40e_nvmupd_get_transaction(u32 val) +static INLINE u8 i40e_nvmupd_get_transaction(u32 val) { return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } -static inline u8 i40e_nvmupd_get_preservation_flags(u32 val) +static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val) { return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> I40E_NVM_PRESERVATION_FLAGS_SHIFT); } -static const char * const i40e_nvm_update_state_str[] = { +static const char *i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", "I40E_NVMUPD_READ_SNT", @@ -792,6 +785,7 @@ static const char * const i40e_nvm_update_state_str[] = { "I40E_NVMUPD_EXEC_AQ", "I40E_NVMUPD_GET_AQ_RESULT", "I40E_NVMUPD_GET_AQ_EVENT", + "I40E_NVMUPD_GET_FEATURES", }; /** @@ -804,8 +798,8 @@ static const char * const i40e_nvm_update_state_str[] = { * Dispatches command depending on what update state is current **/ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { i40e_status status; enum i40e_nvmupd_cmd upd_cmd; @@ -849,7 +843,32 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - return 0; + return I40E_SUCCESS; + } + + /* + * A supported features request returns immediately + * rather than going into state machine + */ + if (upd_cmd == I40E_NVMUPD_FEATURES) { + if (cmd->data_size < hw->nvmupd_features.size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + + /* + * If buffer is bigger than i40e_nvmupd_features structure, + * make sure the trailing bytes are set to 0x0. + */ + if (cmd->data_size > hw->nvmupd_features.size) + i40e_memset(bytes + hw->nvmupd_features.size, 0x0, + cmd->data_size - hw->nvmupd_features.size, + I40E_NONDMA_MEM); + + i40e_memcpy(bytes, &hw->nvmupd_features, + hw->nvmupd_features.size, I40E_NONDMA_MEM); + + return I40E_SUCCESS; } /* Clear status even it is not read and log */ @@ -867,7 +886,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * ~5ms for most commands. However lock is held for ~60ms for * NVMUPD_CSUM_LCB command. */ - mutex_lock(&hw->aq.arq_mutex); + i40e_acquire_spinlock(&hw->aq.arq_spinlock); switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); @@ -888,7 +907,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, */ if (cmd->offset == 0xffff) { i40e_nvmupd_clear_wait_state(hw); - status = 0; + status = I40E_SUCCESS; break; } @@ -905,7 +924,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, break; } - mutex_unlock(&hw->aq.arq_mutex); + i40e_release_spinlock(&hw->aq.arq_spinlock); return status; } @@ -920,10 +939,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * state. Reject all other commands. **/ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; enum i40e_nvmupd_cmd upd_cmd; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1059,10 +1078,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, * change in state; reject all other commands. **/ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; enum i40e_nvmupd_cmd upd_cmd; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1101,10 +1120,10 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, * change in state; reject all other commands **/ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; enum i40e_nvmupd_cmd upd_cmd; bool retry_attempt = false; @@ -1257,7 +1276,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, u32 aq_desc_len = sizeof(struct i40e_aq_desc); if (opcode == hw->nvm_wait_opcode) { - memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); + i40e_memcpy(&hw->nvm_aq_event_desc, desc, + aq_desc_len, I40E_NONDMA_TO_NONDMA); i40e_nvmupd_clear_wait_state(hw); } } @@ -1271,8 +1291,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, * Return one of the valid command types or I40E_NVMUPD_INVALID **/ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno) + struct i40e_nvm_access *cmd, + int *perrno) { enum i40e_nvmupd_cmd upd_cmd; u8 module, transaction; @@ -1309,10 +1329,20 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, upd_cmd = I40E_NVMUPD_READ_SA; break; case I40E_NVM_EXEC: - if (module == 0xf) - upd_cmd = I40E_NVMUPD_STATUS; - else if (module == 0) + switch (module) { + case I40E_NVM_EXEC_GET_AQ_RESULT: upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; + case I40E_NVM_EXEC_FEATURES: + upd_cmd = I40E_NVMUPD_FEATURES; + break; + case I40E_NVM_EXEC_STATUS: + upd_cmd = I40E_NVMUPD_STATUS; + break; + default: + *perrno = -EFAULT; + return I40E_NVMUPD_INVALID; + } break; case I40E_NVM_AQE: upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; @@ -1367,8 +1397,8 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, * cmd structure contains identifiers and data buffer **/ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; i40e_status status; @@ -1380,7 +1410,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); if (cmd->offset == 0xffff) - return 0; + return I40E_SUCCESS; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -1400,7 +1430,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, /* if data buffer needed, make sure it's ready */ aq_data_len = cmd->data_size - aq_desc_len; - buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); + buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen)); if (buff_size) { if (!hw->nvm_buff.va) { status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, @@ -1413,7 +1443,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, if (hw->nvm_buff.va) { buff = hw->nvm_buff.va; - memcpy(buff, &bytes[aq_desc_len], aq_data_len); + i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len, + I40E_NONDMA_TO_NONDMA); } } @@ -1451,8 +1482,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, * cmd structure contains identifiers and data buffer **/ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; @@ -1462,7 +1493,7 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); aq_desc_len = sizeof(struct i40e_aq_desc); - aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); + aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen); /* check offset range */ if (cmd->offset > aq_total_len) { @@ -1490,13 +1521,13 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, __func__, cmd->offset, cmd->offset + len); buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; - memcpy(bytes, buff, len); + i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA); bytes += len; remainder -= len; buff = hw->nvm_buff.va; } else { - buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); + buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len); } if (remainder > 0) { @@ -1504,10 +1535,10 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", __func__, start_byte, start_byte + remainder); - memcpy(bytes, buff, remainder); + i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA); } - return 0; + return I40E_SUCCESS; } /** @@ -1520,8 +1551,8 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, * cmd structure contains identifiers and data buffer **/ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; @@ -1529,7 +1560,7 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); aq_desc_len = sizeof(struct i40e_aq_desc); - aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); + aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen); /* check copylength range */ if (cmd->data_size > aq_total_len) { @@ -1539,9 +1570,10 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, cmd->data_size = aq_total_len; } - memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); + i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size, + I40E_NONDMA_TO_NONDMA); - return 0; + return I40E_SUCCESS; } /** @@ -1554,8 +1586,8 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, * cmd structure contains identifiers and data buffer **/ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; i40e_status status; @@ -1593,10 +1625,10 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, * module, offset, data_size and data are in cmd structure **/ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno) + struct i40e_nvm_access *cmd, + int *perrno) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; bool last; @@ -1633,10 +1665,10 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, * module, offset, data_size and data are in cmd structure **/ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; + i40e_status status = I40E_SUCCESS; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; u8 preservation_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index c302ef2524f8..f6ac4b2bdabc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_OSDEP_H_ #define _I40E_OSDEP_H_ @@ -11,25 +11,48 @@ #include #include -/* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include +#include +#include + +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif +#include "kcompat.h" /* File to be the magic between shared code and * actual OS primitives */ -#define hw_dbg(hw, S, A...) \ -do { \ - dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \ +#define hw_dbg(h, s, ...) do { \ + pr_debug("i40e %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ } while (0) + #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) - /* memory allocation tracking */ struct i40e_dma_mem { void *va; @@ -38,7 +61,8 @@ struct i40e_dma_mem { }; #define i40e_allocate_dma_mem(h, m, unused, s, a) \ - i40e_allocate_dma_mem_d(h, m, s, a) + i40e_allocate_dma_mem_d(h, m, unused, s, a) + #define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) struct i40e_virt_mem { @@ -57,5 +81,40 @@ do { \ (h)->bus.func, ##__VA_ARGS__); \ } while (0) +/* these things are all directly replaced with sed during the kernel build */ +#define INLINE inline + + +#define CPU_TO_LE16(o) cpu_to_le16(o) +#define CPU_TO_LE32(s) cpu_to_le32(s) +#define CPU_TO_LE64(h) cpu_to_le64(h) +#define LE16_TO_CPU(a) le16_to_cpu(a) +#define LE32_TO_CPU(c) le32_to_cpu(c) +#define LE64_TO_CPU(k) le64_to_cpu(k) + +/* SW spinlock */ +struct i40e_spinlock { + struct mutex spinlock; +}; + +static inline void i40e_no_action(struct i40e_spinlock *sp) +{ + /* nothing */ +} + +/* the locks are initialized in _probe and destroyed in _remove + * so make sure NOT to implement init/destroy here, as to + * avoid the i40e_init_adminq code trying to reinitialize + * the persistent lock memory + */ +#define i40e_init_spinlock(_sp) i40e_no_action(_sp) +#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp) +#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp) +#define i40e_destroy_spinlock(_sp) i40e_no_action(_sp) + + +#define i40e_memset(a, b, c, d) memset((a), (b), (c)) +#define i40e_memcpy(a, b, c, d) memcpy((a), (b), (c)) + typedef enum i40e_status_code i40e_status; #endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 5250441bf75b..0f61206098a9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,12 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ #include "i40e_type.h" #include "i40e_alloc.h" -#include +#include "virtchnl.h" /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are @@ -22,6 +22,13 @@ void i40e_adminq_init_ring_data(struct i40e_hw *hw); i40e_status i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *events_pending); +enum i40e_status_code +i40e_asq_send_command_atomic(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details, + bool is_atomic_context); i40e_status i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ @@ -35,29 +42,39 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); + bool pf_lut, u8 *lut, u16 lut_size); i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); + bool pf_lut, u8 *lut, u16 lut_size); i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, - u16 led_addr, u32 mode); + u16 led_addr, u32 mode); i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, - u16 *val); + u16 *val); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); - + u32 time, u32 interval); +i40e_status i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stats); +i40e_status i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter, + u32 *rx_counter, bool *is_clear); +i40e_status i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat); +i40e_status i40e_get_lpi_duration(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat, + u64 *tx_duration, u64 *rx_duration); /* admin send queue commands */ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, @@ -66,8 +83,8 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, - u32 reg_addr, u64 reg_val, - struct i40e_asq_cmd_details *cmd_details); + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details); @@ -76,23 +93,22 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, +i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, +i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, - bool enable_link, - struct i40e_asq_cmd_details *cmd_details); + bool enable_link, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); @@ -113,13 +129,14 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, bool rx_only_promisc); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, +i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, +i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, @@ -165,9 +182,8 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, - u16 flags, - u16 valid_flags, u8 mode, +i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, @@ -183,8 +199,19 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, bool last_command, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, bool last_command, - struct i40e_asq_cmd_details *cmd_details); + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, u32 field_id, void *data, + u16 buf_size, u16 *element_count, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, void *data, u16 buf_size, + u16 element_count, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, @@ -194,12 +221,17 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, - struct i40e_asq_cmd_details *cmd_details); + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw, + u8 mib_type, void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details); @@ -207,23 +239,32 @@ enum i40e_status_code i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, - bool persist, + bool persist, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw, - bool dcb_enable, - struct i40e_asq_cmd_details - *cmd_details); -i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, - struct i40e_asq_cmd_details *cmd_details); + bool dcb_enable, + struct i40e_asq_cmd_details + *cmd_details); +i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, + bool persist, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, - void *buff, u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw, + bool start_agent, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, + u8 *num_entries, + struct i40e_aqc_switch_resource_alloc_element_resp *buf, + u16 count, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, @@ -270,7 +311,7 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details); + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, @@ -288,7 +329,13 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count); i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, - struct i40e_lldp_variables *lldp_cfg); + struct i40e_lldp_variables *lldp_cfg); +i40e_status i40e_aq_replace_cloud_filters(struct i40e_hw *hw, + struct i40e_aqc_replace_cloud_filters_cmd *filters, + struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf); +i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1); /* i40e_common */ i40e_status i40e_init_shared_code(struct i40e_hw *hw); i40e_status i40e_pf_reset(struct i40e_hw *hw); @@ -298,15 +345,13 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up); i40e_status i40e_update_link_info(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, - u32 *max_bw, u32 *min_bw, bool *min_valid, - bool *max_valid); + u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid); i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size); -i40e_status i40e_validate_mac_addr(u8 *mac_addr); + u32 pba_num_size); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); /* prototype for functions used for NVM access */ i40e_status i40e_init_nvm(struct i40e_hw *hw); @@ -315,18 +360,17 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, void i40e_release_nvm(struct i40e_hw *hw); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); -i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, - u8 module_ptr, u16 offset, - u16 words_data_size, - u16 *data_ptr); +enum i40e_status_code +i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset, + u16 data_offset, u16 words_data_size, u16 *data_ptr); i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data); + u16 *words, u16 *data); i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); i40e_status i40e_nvmupd_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *); + struct i40e_nvm_access *cmd, + u8 *bytes, int *); void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc); void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); @@ -336,7 +380,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw); extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) { return i40e_ptype_lookup[ptype]; } @@ -350,7 +394,7 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) * talking to virtchnl devices. If we can't represent the link speed properly, * report LINK_SPEED_UNKNOWN. **/ -static inline enum virtchnl_link_speed +static INLINE enum virtchnl_link_speed i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) { switch (link_speed) { @@ -375,8 +419,11 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) return VIRTCHNL_LINK_SPEED_UNKNOWN; } } - -/* prototype for functions used for SW locks */ +/* prototype for functions used for SW spinlocks */ +void i40e_init_spinlock(struct i40e_spinlock *sp); +void i40e_acquire_spinlock(struct i40e_spinlock *sp); +void i40e_release_spinlock(struct i40e_spinlock *sp); +void i40e_destroy_spinlock(struct i40e_spinlock *sp); /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, @@ -395,10 +442,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, - u8 table_id, u32 start_index, u16 buff_size, - void *buff, u16 *ret_buff_size, - u8 *ret_next_table, u32 *ret_next_index, - struct i40e_asq_cmd_details *cmd_details); + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, @@ -409,39 +456,71 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code +i40e_aq_set_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code +i40e_aq_get_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +/* Convenience wrappers for most common use case */ +#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \ + i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd) +#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \ + i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd) + +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details); + +i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, + struct i40e_aqc_arp_proxy_data *proxy_config, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, + struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, + u8 filter_index, + struct i40e_aqc_set_wol_filter_data *filter, + bool set_filter, bool no_wol_tco, + bool filter_valid, bool no_wol_tco_valid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw, + u16 *wake_reason, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 *value); + u16 reg, u8 phy_addr, u16 *value); i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 value); + u16 reg, u8 phy_addr, u16 value); i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); +i40e_status i40e_read_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value); +i40e_status i40e_write_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value); u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); + u32 time, u32 interval); i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details * - cmd_details); + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details * + cmd_details); i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details * - cmd_details); + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details * + cmd_details); struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 9bf1ad4319f5..b4de5c89f03c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1,7 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ +/* this lets the macros that return timespec64 or structs compile cleanly with + * W=2 + */ +#pragma GCC diagnostic ignored "-Waggregate-return" #include "i40e.h" +#ifdef HAVE_PTP_1588_CLOCK #include /* The XL710 timesync is very much like Intel's 82599 design when it comes to @@ -16,9 +21,9 @@ * At 1Gb link, the period is multiplied by 20. (32ns) * 1588 functionality is not supported at 100Mbps. */ -#define I40E_PTP_40GB_INCVAL 0x0199999999ULL -#define I40E_PTP_10GB_INCVAL_MULT 2 -#define I40E_PTP_1GB_INCVAL_MULT 20 +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL_MULT 2 +#define I40E_PTP_1GB_INCVAL_MULT 20 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ @@ -28,23 +33,19 @@ * i40e_ptp_read - Read the PHC time from the device * @pf: Board private structure * @ts: timespec structure to hold the current time value - * @sts: structure to hold the system time before and after reading the PHC * * This function reads the PRTTSYN_TIME registers and stores them in a * timespec. However, since the registers are 64 bits of nanoseconds, we must * convert the result to a timespec before we can return. **/ -static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts, - struct ptp_system_timestamp *sts) +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) { struct i40e_hw *hw = &pf->hw; u32 hi, lo; u64 ns; /* The timer latches on the lowest register read. */ - ptp_read_system_prets(sts); lo = rd32(hw, I40E_PRTTSYN_TIME_L); - ptp_read_system_postts(sts); hi = rd32(hw, I40E_PRTTSYN_TIME_H); ns = (((u64)hi) << 32) | lo; @@ -69,8 +70,8 @@ static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) /* The timer will not update until the high register is written, so * write the low register first. */ - wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF); - wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32); + wr32(hw, I40E_PRTTSYN_TIME_L, (u32)ns); + wr32(hw, I40E_PRTTSYN_TIME_H, (u32)(ns >> 32)); } /** @@ -129,8 +130,8 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) smp_mb(); /* Force any pending update before accessing. */ adj *= READ_ONCE(pf->ptp_adj_mult); - wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); - wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); + wr32(hw, I40E_PRTTSYN_INC_L, (u32)adj); + wr32(hw, I40E_PRTTSYN_INC_H, (u32)(adj >> 32)); return 0; } @@ -150,40 +151,36 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); - i40e_ptp_read(pf, &now, NULL); + i40e_ptp_read(pf, &now); now = timespec64_add(now, then); i40e_ptp_write(pf, (const struct timespec64 *)&now); mutex_unlock(&pf->tmreg_lock); - return 0; } /** - * i40e_ptp_gettimex - Get the time of the PHC + * i40e_ptp_gettime - Get the time of the PHC * @ptp: The PTP clock structure - * @ts: timespec structure to hold the current time value - * @sts: structure to hold the system time before and after reading the PHC + * @ts: timespec64 structure to hold the current time value * * Read the device clock and return the correct value on ns, after converting it * into a timespec struct. **/ -static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, - struct ptp_system_timestamp *sts) +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); mutex_lock(&pf->tmreg_lock); - i40e_ptp_read(pf, ts, sts); + i40e_ptp_read(pf, ts); mutex_unlock(&pf->tmreg_lock); - return 0; } /** * i40e_ptp_settime - Set the time of the PHC * @ptp: The PTP clock structure - * @ts: timespec structure that holds the new time value + * @ts: timespec64 structure that holds the new time value * * Set the device clock to the user input value. The conversion from timespec * to ns happens in the write function. @@ -196,10 +193,48 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp, mutex_lock(&pf->tmreg_lock); i40e_ptp_write(pf, ts); mutex_unlock(&pf->tmreg_lock); - return 0; } +#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 +/** + * i40e_ptp_gettime32 - Get the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure to hold the current time value + * + * Read the device clock and return the correct value on ns, after converting it + * into a timespec struct. + **/ +static int i40e_ptp_gettime32(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct timespec64 ts64; + int err; + + err = i40e_ptp_gettime(ptp, &ts64); + if (err) + return err; + + *ts = timespec64_to_timespec(ts64); + return 0; +} + +/** + * i40e_ptp_settime32 - Set the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + **/ +static int i40e_ptp_settime32(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return i40e_ptp_settime(ptp, &ts64); +} +#endif + /** * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem * @ptp: The PTP clock structure @@ -259,7 +294,6 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf) /** * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung * @pf: The PF private data structure - * @vsi: The VSI with the rings relevant to 1588 * * This watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The @@ -496,8 +530,8 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) * hardware will not update the clock until both registers have been * written. */ - wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF); - wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); + wr32(hw, I40E_PRTTSYN_INC_L, (u32)incval); + wr32(hw, I40E_PRTTSYN_INC_H, (u32)(incval >> 32)); /* Update the base adjustement value. */ WRITE_ONCE(pf->ptp_adj_mult, mult); @@ -600,7 +634,9 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; } break; +#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL case HWTSTAMP_FILTER_NTP_ALL: +#endif /* HAVE_HWTSTAMP_FILTER_NTP_ALL */ case HWTSTAMP_FILTER_ALL: default: return -ERANGE; @@ -708,8 +744,13 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->ptp_caps.pps = 0; pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; pf->ptp_caps.adjtime = i40e_ptp_adjtime; - pf->ptp_caps.gettimex64 = i40e_ptp_gettimex; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 + pf->ptp_caps.gettime64 = i40e_ptp_gettime; pf->ptp_caps.settime64 = i40e_ptp_settime; +#else + pf->ptp_caps.gettime = i40e_ptp_gettime32; + pf->ptp_caps.settime = i40e_ptp_settime32; +#endif pf->ptp_caps.enable = i40e_ptp_feature_enable; /* Attempt to register the clock before enabling the hardware. */ @@ -746,7 +787,7 @@ void i40e_ptp_save_hw_time(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_PTP)) return; - i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL); + i40e_ptp_gettime(&pf->ptp_caps, &pf->ptp_prev_hw_time); /* Get a monotonic starting time for this reset */ pf->ptp_reset_start = ktime_get(); } @@ -789,7 +830,6 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf) **/ void i40e_ptp_init(struct i40e_pf *pf) { - struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; struct i40e_hw *hw = &pf->hw; u32 pf_id; long err; @@ -801,9 +841,7 @@ void i40e_ptp_init(struct i40e_pf *pf) I40E_PRTTSYN_CTL0_PF_ID_SHIFT; if (hw->pf_id != pf_id) { pf->flags &= ~I40E_FLAG_PTP; - dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", - __func__, - netdev->name); + dev_info(&pf->pdev->dev, "PTP not supported on this device\n"); return; } @@ -814,9 +852,9 @@ void i40e_ptp_init(struct i40e_pf *pf) err = i40e_ptp_create_clock(pf); if (err) { pf->ptp_clock = NULL; - dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", - __func__); - } else if (pf->ptp_clock) { + dev_err(&pf->pdev->dev, + "PTP clock register failed: %ld\n", err); + } else { u32 regval; if (pf->hw.debug_mask & I40E_DEBUG_LAN) @@ -866,7 +904,8 @@ void i40e_ptp_stop(struct i40e_pf *pf) if (pf->ptp_clock) { ptp_clock_unregister(pf->ptp_clock); pf->ptp_clock = NULL; - dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, + dev_info(&pf->pdev->dev, "removed PHC from %s\n", pf->vsi[pf->lan_vsi]->netdev->name); } } +#endif /* HAVE_PTP_1588_CLOCK */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index d35d690ca10f..19b237c97f72 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -1,28 +1,28 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ -#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ #define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) -#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) +#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ #define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) -#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) +#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ #define I40E_GL_ARQH_ARQH_SHIFT 0 -#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) -#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ +#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) +#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ #define I40E_GL_ARQT_ARQT_SHIFT 0 -#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) -#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ +#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) +#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ #define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) -#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ +#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) +#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ #define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) -#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ +#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) +#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ #define I40E_GL_ATQH_ATQH_SHIFT 0 #define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) #define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ @@ -363,6 +363,8 @@ #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT) @@ -5273,6 +5275,87 @@ #define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ #define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 #define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) +/* Flow Director */ +#define I40E_REG_INSET_L2_DMAC_SHIFT 60 +#define I40E_REG_INSET_L2_DMAC_MASK I40E_MASK(0xEULL, I40E_REG_INSET_L2_DMAC_SHIFT) +#define I40E_REG_INSET_L2_SMAC_SHIFT 56 +#define I40E_REG_INSET_L2_SMAC_MASK I40E_MASK(0x1CULL, I40E_REG_INSET_L2_SMAC_SHIFT) +#define I40E_REG_INSET_L2_OUTER_VLAN_SHIFT 26 +#define I40E_REG_INSET_L2_OUTER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_OUTER_VLAN_SHIFT) +#define I40E_REG_INSET_L2_INNER_VLAN_SHIFT 55 +#define I40E_REG_INSET_L2_INNER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_INNER_VLAN_SHIFT) +#define I40E_REG_INSET_TUNNEL_VLAN_SHIFT 56 +#define I40E_REG_INSET_TUNNEL_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_VLAN_SHIFT) +#define I40E_REG_INSET_L3_SRC_IP4_SHIFT 47 +#define I40E_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_SRC_IP4_SHIFT) +#define I40E_REG_INSET_L3_DST_IP4_SHIFT 35 +#define I40E_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_DST_IP4_SHIFT) +#define I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT 49 +#define I40E_X722_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT) +#define I40E_X722_REG_INSET_L3_DST_IP4_SHIFT 41 +#define I40E_X722_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_DST_IP4_SHIFT) +#define I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT 52 +#define I40E_X722_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT) +#define I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT 52 +#define I40E_X722_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT) +#define I40E_REG_INSET_L3_IP4_TOS_SHIFT 54 +#define I40E_REG_INSET_L3_IP4_TOS_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TOS_SHIFT) +#define I40E_REG_INSET_L3_IP4_PROTO_SHIFT 50 +#define I40E_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_PROTO_SHIFT) +#define I40E_REG_INSET_L3_IP4_TTL_SHIFT 50 +#define I40E_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TTL_SHIFT) +#define I40E_REG_INSET_L3_SRC_IP6_SHIFT 43 +#define I40E_REG_INSET_L3_SRC_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_SRC_IP6_SHIFT) +#define I40E_REG_INSET_L3_DST_IP6_SHIFT 35 +#define I40E_REG_INSET_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_DST_IP6_SHIFT) +#define I40E_REG_INSET_L3_IP6_TC_SHIFT 54 +#define I40E_REG_INSET_L3_IP6_TC_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_TC_SHIFT) +#define I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT 51 +#define I40E_REG_INSET_L3_IP6_NEXT_HDR_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT) +#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT 51 +#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT) +#define I40E_REG_INSET_L4_SRC_PORT_SHIFT 34 +#define I40E_REG_INSET_L4_SRC_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_SRC_PORT_SHIFT) +#define I40E_REG_INSET_L4_DST_PORT_SHIFT 33 +#define I40E_REG_INSET_L4_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_DST_PORT_SHIFT) +#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT 31 +#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT) +#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT 22 +#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT) +#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT 11 +#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT) +#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT 21 +#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT) +#define I40E_REG_INSET_TUNNEL_ID_SHIFT 18 +#define I40E_REG_INSET_TUNNEL_ID_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_ID_SHIFT) +#define I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT 14 +#define I40E_REG_INSET_LAST_ETHER_TYPE_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT) +#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT 8 +#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT) +#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT 6 +#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT) +#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT 6 +#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT 13 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT 12 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT 11 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT 10 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT 9 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT 8 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT 7 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT 6 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT 6 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT) +#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL + #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) #define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 @@ -5333,4 +5416,5 @@ #define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) + #endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index 77be0702d07c..ab12f1311996 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_STATUS_H_ #define _I40E_STATUS_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index 424f02077e2e..27e47ccf325e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -1,9 +1,23 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ -/* Modeled on trace-events-sample.h */ +#ifndef CONFIG_TRACEPOINTS +#if !defined(_I40E_TRACE_H_) +#define _I40E_TRACE_H_ +/* If the Linux kernel tracepoints are not available then the i40e_trace* + * macros become nops. + */ -/* The trace subsystem name for i40e will be "i40e". +#define i40e_trace(trace_name, args...) +#define i40e_trace_enabled(trace_name) (0) +#endif /* !defined(_I40E_TRACE_H_) */ +#else /* CONFIG_TRACEPOINTS */ +/* + * Modeled on trace-events-sample.h + */ + +/* + * The trace subsystem name for i40e will be "i40e". * * This file is named i40e_trace.h. * @@ -14,7 +28,8 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM i40e -/* See trace-events-sample.h for a detailed description of why this +/* + * See trace-events-sample.h for a detailed description of why this * guard clause is different from most normal include files. */ #if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) @@ -49,7 +64,8 @@ #define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)() -/* Events common to PF and VF. Corresponding versions will be defined +/* + * Events common to PF and VF. Corresponding versions will be defined * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace() * macro above will select the right trace point name for the driver * being built from shared code. @@ -65,7 +81,8 @@ DECLARE_EVENT_CLASS( TP_ARGS(ring, desc, buf), - /* The convention here is to make the first fields in the + /* + * The convention here is to make the first fields in the * TP_STRUCT match the TP_PROTO exactly. This enables the use * of the args struct generated by the tplist tool (from the * bcc-tools package) to be used for those fields. To access @@ -112,7 +129,7 @@ DECLARE_EVENT_CLASS( i40e_rx_template, TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + union i40e_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb), @@ -140,7 +157,7 @@ DECLARE_EVENT_CLASS( DEFINE_EVENT( i40e_rx_template, i40e_clean_rx_irq, TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + union i40e_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); @@ -148,7 +165,7 @@ DEFINE_EVENT( DEFINE_EVENT( i40e_rx_template, i40e_clean_rx_irq_rx, TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + union i40e_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); @@ -193,7 +210,9 @@ DEFINE_EVENT( TP_ARGS(skb, ring)); -/* Events unique to the PF. */ +/* + * Events unique to the PF. + */ #endif /* _I40E_TRACE_H_ */ /* This must be outside ifdef _I40E_TRACE_H */ @@ -207,3 +226,4 @@ DEFINE_EVENT( #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE i40e_trace #include +#endif /* CONFIG_TRACEPOINTS */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index e3f29dc8b290..96bc531ac1da 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,14 +1,23 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include -#include +#ifdef HAVE_XDP_SUPPORT #include +#endif #include "i40e.h" #include "i40e_trace.h" #include "i40e_prototype.h" -#include "i40e_txrx_common.h" -#include "i40e_xsk.h" + +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** @@ -36,9 +45,6 @@ static void i40e_fdir(struct i40e_ring *tx_ring, flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK & (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); - flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK & - (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); - flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); @@ -121,6 +127,7 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, /* grab the next descriptor */ i = tx_ring->next_to_use; first = &tx_ring->tx_bi[i]; + i40e_fdir(tx_ring, fdir_data, add); /* Now program a dummy descriptor */ @@ -161,7 +168,6 @@ dma_fail: } #define IP_HEADER_OFFSET 14 -#define I40E_UDPIP_DUMMY_PACKET_LEN 42 /** * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters * @vsi: pointer to the targeted VSI @@ -233,7 +239,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, return 0; } -#define I40E_TCPIP_DUMMY_PACKET_LEN 54 /** * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters * @vsi: pointer to the targeted VSI @@ -254,7 +259,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, /* Dummy packet */ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0}; raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); @@ -311,7 +316,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, return 0; } -#define I40E_SCTPIP_DUMMY_PACKET_LEN 46 /** * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for * a specific flow spec @@ -385,7 +389,6 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, return 0; } -#define I40E_IP_DUMMY_PACKET_LEN 34 /** * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for * a specific flow spec @@ -434,9 +437,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, dev_info(&pf->pdev->dev, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); - /* The packet buffer wasn't added to the ring so we - * need to free it now. - */ kfree(raw_packet); return -EOPNOTSUPP; } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { @@ -472,7 +472,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; int ret; - switch (input->flow_type & ~FLOW_EXT) { + switch (input->flow_type) { case TCP_V4_FLOW: ret = i40e_add_del_fdir_tcpv4(vsi, input, add); break; @@ -527,8 +527,8 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ -void i40e_fd_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id) +static void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) { struct i40e_pf *pf = rx_ring->vsi->back; struct pci_dev *pdev = pf->pdev; @@ -604,8 +604,14 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, if (tx_buffer->skb) { if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); +#ifdef HAVE_XDP_SUPPORT else if (ring_is_xdp(ring)) +#ifdef HAVE_XDP_FRAME_STRUCT xdp_return_frame(tx_buffer->xdpf); +#else + page_frag_free(tx_buffer->raw_buf); +#endif +#endif else dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) @@ -635,18 +641,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) unsigned long bi_size; u16 i; - if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { - i40e_xsk_clean_tx_ring(tx_ring); - } else { - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_bi) - return; + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, - &tx_ring->tx_bi[i]); - } + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); @@ -763,6 +764,8 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi) } } +#define WB_STRIDE 4 + /** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about @@ -795,7 +798,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - smp_rmb(); + read_barrier_depends(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ @@ -810,9 +813,15 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, total_packets += tx_buf->gso_segs; /* free the skb/XDP data */ +#ifdef HAVE_XDP_SUPPORT if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT xdp_return_frame(tx_buf->xdpf); +#else + page_frag_free(tx_buf->raw_buf); +#endif else +#endif napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ @@ -867,8 +876,27 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, i += tx_ring->count; tx_ring->next_to_clean = i; - i40e_update_tx_stats(tx_ring, total_packets, total_bytes); - i40e_arm_wb(tx_ring, vsi, budget); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; + + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + unsigned int j = i40e_get_tx_pending(tx_ring, false); + + if (budget && + ((j / WB_STRIDE) == 0) && (j > 0) && + !test_bit(__I40E_VSI_DOWN, vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } if (ring_is_xdp(tx_ring)) return !!budget; @@ -919,8 +947,8 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->reg_idx), - val); + I40E_PFINT_DYN_CTLN(q_vector->reg_idx), + val); } else { val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ @@ -1195,6 +1223,7 @@ clear_counts: rc->total_packets = 0; } +#ifndef CONFIG_I40E_DISABLE_PACKET_SPLIT /** * i40e_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on @@ -1219,13 +1248,35 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, new_buff->page = old_buff->page; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; - - rx_ring->rx_stats.page_reuse_count++; - - /* clear contents of buffer_info */ - old_buff->page = NULL; } +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT +/** + * i40e_reuse_rx_skb - Recycle unused skb and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have skb reused + * + * Synchronizes skb for reuse by the adapter + **/ +static void i40e_reuse_rx_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_bi[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->skb = old_buff->skb; +} + +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ /** * i40e_rx_is_programming_status - check for programming status descriptor * @qw: qword representing status_error_len in CPU ordering @@ -1246,7 +1297,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) } /** - * i40e_clean_programming_status - try clean the programming status descriptor + * i40e_clean_programming_status - clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor * @rx_desc: the rx descriptor written back by HW * @qw: qword representing status_error_len in CPU ordering @@ -1255,22 +1306,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw) * status being successful or not and take actions accordingly. FCoE should * handle its context/filter programming/invalidation status and take actions. * - * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. **/ -struct i40e_rx_buffer *i40e_clean_programming_status( - struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - u64 qw) +static void i40e_clean_programming_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + u64 qw) { struct i40e_rx_buffer *rx_buffer; - u32 ntc; + u32 ntc = rx_ring->next_to_clean; u8 id; - if (!i40e_rx_is_programming_status(qw)) - return NULL; - - ntc = rx_ring->next_to_clean; - /* fetch, update, and store next to clean */ rx_buffer = &rx_ring->rx_bi[ntc++]; ntc = (ntc < rx_ring->count) ? ntc : 0; @@ -1278,13 +1322,26 @@ struct i40e_rx_buffer *i40e_clean_programming_status( prefetch(I40E_RX_DESC(rx_ring, ntc)); +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT + /* place unused page back on the ring */ + i40e_reuse_rx_skb(rx_ring, rx_buffer); + + /* clear contents of buffer_info */ + rx_buffer->skb = NULL; +#else + /* place unused page back on the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +#endif + id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) i40e_fd_handle_status(rx_ring, rx_desc, id); - - return rx_buffer; } /** @@ -1308,8 +1365,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!tx_ring->tx_bi) goto err; - u64_stats_init(&tx_ring->syncp); - /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); /* add u32 for head writeback, align after this takes care of @@ -1354,15 +1409,19 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring->skb = NULL; } - if (rx_ring->xsk_umem) { - i40e_xsk_clean_rx_ring(rx_ring); - goto skip_free; - } - /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT + if (!rx_bi->skb) + continue; + + dma_unmap_single(rx_ring->dev, rx_bi->dma, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + dev_kfree_skb(rx_bi->skb); + rx_bi->skb = NULL; +#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ if (!rx_bi->page) continue; @@ -1385,9 +1444,9 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_bi->page = NULL; rx_bi->page_offset = 0; +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ } -skip_free: bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; memset(rx_ring->rx_bi, 0, bi_size); @@ -1408,8 +1467,10 @@ skip_free: void i40e_free_rx_resources(struct i40e_ring *rx_ring) { i40e_clean_rx_ring(rx_ring); +#ifdef HAVE_XDP_BUFF_RXQ if (rx_ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +#endif rx_ring->xdp_prog = NULL; kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; @@ -1439,11 +1500,13 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) goto err; +#ifdef HAVE_NDO_GET_STATS64 u64_stats_init(&rx_ring->syncp); +#endif /* HAVE_NDO_GET_STATS64 */ /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -1457,7 +1520,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - +#ifdef HAVE_XDP_BUFF_RXQ /* XDP RX-queue info only needed for RX rings exposed to XDP */ if (rx_ring->vsi->type == I40E_VSI_MAIN) { err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, @@ -1467,7 +1530,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) } rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; - +#endif return 0; err: kfree(rx_ring->rx_bi); @@ -1480,7 +1543,7 @@ err: * @rx_ring: ring to bump * @val: new head index **/ -void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -1496,6 +1559,46 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) writel(val, rx_ring->tail); } +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT +static bool i40e_alloc_mapped_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma; + + if (unlikely(skb)) + return true; + + if (likely(!skb)) { + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + rx_ring->rx_buf_len, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buff_failed++; + return false; + } + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.alloc_buff_failed++; + return false; + } + + bi->skb = skb; + bi->dma = dma; + + return true; +} + +#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ /** * i40e_rx_offset - Return expected offset into page to access data * @rx_ring: Ring we are requesting offset of @@ -1552,12 +1655,52 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = i40e_rx_offset(rx_ring); - page_ref_add(page, USHRT_MAX - 1); - bi->pagecnt_bias = USHRT_MAX; + + /* initialize pagecnt_bias to 1 representing we fully own page */ + bi->pagecnt_bias = 1; return true; } +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; +#ifdef HAVE_VLAN_RX_REGISTER + struct i40e_vsi *vsi = rx_ring->vsi; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (vlan_tag & VLAN_VID_MASK) { + if (!vsi->vlgrp) + dev_kfree_skb_any(skb); + else + vlan_gro_receive(&q_vector->napi, vsi->vlgrp, + vlan_tag, skb); + } else { + napi_gro_receive(&q_vector->napi, skb); + } +#else /* HAVE_VLAN_RX_REGISTER */ +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) +#else + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_RX) && + (vlan_tag & VLAN_VID_MASK)) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&q_vector->napi, skb); +#endif /* HAVE_VLAN_RX_REGISTER */ +} + /** * i40e_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on @@ -1579,6 +1722,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_bi[ntu]; do { +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT + if (!i40e_alloc_mapped_skb(rx_ring, bi)) + goto no_buffers; + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#else if (!i40e_alloc_mapped_page(rx_ring, bi)) goto no_buffers; @@ -1592,6 +1741,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ rx_desc++; bi++; @@ -1623,6 +1773,58 @@ no_buffers: return true; } +#define I40E_XDP_PASS 0 +#define I40E_XDP_CONSUMED BIT(0) +#define I40E_XDP_TX BIT(1) +#define I40E_XDP_REDIR BIT(2) + +static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) +{ + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); +} + +#ifdef I40E_ADD_PROBES +static void i40e_rx_extra_counters(struct i40e_vsi *vsi, u32 rx_error, + const struct i40e_rx_ptype_decoded decoded) +{ + bool ipv4; + + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + + if (ipv4 && + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + vsi->back->rx_ip4_cso_err++; + + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) { + if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP) + vsi->back->rx_tcp_cso_err++; + else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP) + vsi->back->rx_udp_cso_err++; + else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP) + vsi->back->rx_sctp_cso_err++; + } + + if ((decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)) + vsi->back->rx_ip4_cso++; + if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP) + vsi->back->rx_tcp_cso++; + else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP) + vsi->back->rx_udp_cso++; + else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP) + vsi->back->rx_sctp_cso++; +} + +#endif /* I40E_ADD_PROBES */ +#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD) +#define I40E_TUNNEL_SUPPORT +#endif /** * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about @@ -1652,8 +1854,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, skb_checksum_none_assert(skb); /* Rx csum enabled and ip headers found? */ +#ifdef HAVE_NDO_SET_FEATURES if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; +#else + if (!(vsi->back->flags & I40E_FLAG_RX_CSUM_ENABLED)) + return; +#endif /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) @@ -1662,12 +1869,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, /* both known and outer_ip must be set for the below code to work */ if (!(decoded.known && decoded.outer_ip)) return; +#ifdef I40E_ADD_PROBES + vsi->back->hw_csum_rx_outer++; +#endif ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); +#ifdef I40E_ADD_PROBES + i40e_rx_extra_counters(vsi, rx_error, decoded); + +#endif /* I40E_ADD_PROBES */ if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) @@ -1690,12 +1904,18 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; +#ifdef I40E_TUNNEL_SUPPORT /* If there is an outer header present that might contain a checksum * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) +#ifdef HAVE_SKBUFF_CSUM_LEVEL skb->csum_level = 1; +#else + skb->encapsulation = 1; +#endif +#endif /* I40E_TUNNEL_SUPPORT */ /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { @@ -1703,11 +1923,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, case I40E_RX_PTYPE_INNER_PROT_UDP: case I40E_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; - /* fall though */ + /* fall through */ default: break; } - return; checksum_fail: @@ -1720,7 +1939,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline int i40e_ptype_to_htype(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1749,6 +1968,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, struct sk_buff *skb, u8 rx_ptype) { +#ifdef NETIF_F_RXHASH u32 hash; const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << @@ -1761,6 +1981,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); } +#endif /* NETIF_F_RXHASH */ } /** @@ -1774,20 +1995,22 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ +static inline void i40e_process_skb_fields(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, struct sk_buff *skb) + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { +#ifdef HAVE_PTP_1588_CLOCK u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; - u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; if (unlikely(tsynvalid)) i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); +#endif /* HAVE_PTP_1588_CLOCK */ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); @@ -1795,13 +2018,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, skb_record_rx_queue(skb, rx_ring->queue_index); - if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { - u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; - - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - le16_to_cpu(vlan_tag)); - } - /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); } @@ -1822,7 +2038,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, **/ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, union i40e_rx_desc *rx_desc) - { /* XDP packets use error pointer so abort at this point */ if (IS_ERR(skb)) @@ -1846,6 +2061,51 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, return false; } +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT +/** + * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use + * @rx_ring: rx descriptor ring to transact packets on + * @size: size of buffer to add to skb + * + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. + * + * ONE-BUFF version + */ +static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, + const unsigned int size) +{ + struct i40e_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_unmap_single(rx_ring->dev, rx_buffer->dma, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + prefetch(rx_buffer->skb->data); + + return rx_buffer; +} + +/** + * i40e_put_rx_buffer - Clean up used buffer and either recycle or free + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: rx buffer to pull data from + * + * This function will clean up the contents of the rx_buffer. It will + * either recycle the bufer or unmap it and free the associated resources. + * + * ONE-BUFF version + */ +static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer) +{ + /* clear contents of buffer_info */ + rx_buffer->skb = NULL; +} + +#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ /** * i40e_page_is_reusable - check if any reuse is possible * @page: page struct to check @@ -1910,10 +2170,17 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); rx_buffer->pagecnt_bias = USHRT_MAX; } +#else + if (likely(!pagecnt_bias)) { + get_page(page); + rx_buffer->pagecnt_bias = 1; + } +#endif return true; } @@ -1938,7 +2205,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring, #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); + unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); #endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, @@ -1995,11 +2262,13 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, struct xdp_buff *xdp) { - unsigned int size = xdp->data_end - xdp->data; + unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data; + #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(I40E_SKB_PAD + size); #endif unsigned int headlen; struct sk_buff *skb; @@ -2007,23 +2276,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, /* prefetch first cache line of first page */ prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data + L1_CACHE_BYTES); + prefetch((void *)((u8 *)xdp->data + L1_CACHE_BYTES)); #endif - /* Note, we get here by enabling legacy-rx via: - * - * ethtool --set-priv-flags legacy-rx on - * - * In this mode, we currently get 0 extra XDP headroom as - * opposed to having legacy-rx off, where we process XDP - * packets going to stack via i40e_build_skb(). The latter - * provides us currently with 192 bytes of headroom. - * - * For i40e_construct_skb() mode it means that the - * xdp->data_meta will always point to xdp->data, since - * the helper cannot expand the head. Should this ever - * change in future for legacy-rx mode on, then lets also - * add xdp->data_meta handling here. - */ /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, @@ -2063,10 +2317,11 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, return skb; } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC /** * i40e_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buffer: Rx buffer to pull data from + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: rx buffer to pull data from * @xdp: xdp_buff pointing to the data * * This function builds an skb around an existing Rx buffer, taking care @@ -2076,7 +2331,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, struct xdp_buff *xdp) { - unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data; + #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else @@ -2086,14 +2342,10 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, #endif struct sk_buff *skb; - /* Prefetch first cache line of first page. If xdp->data_meta - * is unused, this points exactly as xdp->data, otherwise we - * likely have a consumer accessing first few bytes of meta - * data, and then actual data. - */ - prefetch(xdp->data_meta); + /* prefetch first cache line of first page */ + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data_meta + L1_CACHE_BYTES); + prefetch(xdp->data + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ skb = build_skb(xdp->data_hard_start, truesize); @@ -2102,9 +2354,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, /* update pointers within the skb to store the data */ skb_reserve(skb, xdp->data - xdp->data_hard_start); - __skb_put(skb, xdp->data_end - xdp->data); - if (metasize) - skb_metadata_set(skb, metasize); + __skb_put(skb, size); /* buffer is used by skb, update page_offset */ #if (PAGE_SIZE < 8192) @@ -2116,6 +2366,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, return skb; } +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ /** * i40e_put_rx_buffer - Clean up used buffer and either recycle or free * @rx_ring: rx descriptor ring to transact packets on @@ -2130,6 +2381,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, if (i40e_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, @@ -2137,11 +2389,13 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); - /* clear contents of buffer_info */ - rx_buffer->page = NULL; } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; } +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ /** * i40e_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed @@ -2175,10 +2429,13 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, return true; } -static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT +static int i40e_xmit_xdp_ring(struct xdp_frame *xdp, struct i40e_ring *xdp_ring); -int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) +static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, + struct i40e_ring *xdp_ring) { struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); @@ -2187,6 +2444,11 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) return i40e_xmit_xdp_ring(xdpf, xdp_ring); } +#else +static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, + struct i40e_ring *xdp_ring); +#endif +#endif /** * i40e_run_xdp - run an XDP program @@ -2196,10 +2458,12 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { - int err, result = I40E_XDP_PASS; + int result = I40E_XDP_PASS; +#ifdef HAVE_XDP_SUPPORT struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; u32 act; + int err; rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); @@ -2212,28 +2476,41 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: + rx_ring->xdp_stats.xdp_pass++; break; case XDP_TX: xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; +#ifdef HAVE_XDP_FRAME_STRUCT result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); +#else + result = i40e_xmit_xdp_ring(xdp, xdp_ring); +#endif + rx_ring->xdp_stats.xdp_tx++; break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; + if (!err) + rx_ring->xdp_stats.xdp_redirect++; + else + rx_ring->xdp_stats.xdp_redirect_fail++; break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + /* fallthrough -- abort and drop */ case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fall through -- handle aborts by dropping packet */ + rx_ring->xdp_stats.xdp_unknown++; + /* fallthrough -- handle aborts by dropping packet */ case XDP_DROP: result = I40E_XDP_CONSUMED; + rx_ring->xdp_stats.xdp_drop++; break; } xdp_out: rcu_read_unlock(); - return ERR_PTR(-result); +#endif /* HAVE_XDP_SUPPORT */ + return (struct sk_buff *)ERR_PTR(-result); } /** @@ -2257,63 +2534,6 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, #endif } -/** - * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register - * @xdp_ring: XDP Tx ring - * - * This function updates the XDP Tx ring tail register. - **/ -void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) -{ - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); -} - -/** - * i40e_update_rx_stats - Update Rx ring statistics - * @rx_ring: rx descriptor ring - * @total_rx_bytes: number of bytes received - * @total_rx_packets: number of packets received - * - * This function updates the Rx ring statistics. - **/ -void i40e_update_rx_stats(struct i40e_ring *rx_ring, - unsigned int total_rx_bytes, - unsigned int total_rx_packets) -{ - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; -} - -/** - * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map - * @rx_ring: Rx ring - * @xdp_res: Result of the receive batch - * - * This function bumps XDP Tx tail and/or flush redirect map, and - * should be called when a batch of packets has been processed in the - * napi loop. - **/ -void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) -{ - if (xdp_res & I40E_XDP_REDIR) - xdp_do_flush_map(); - - if (xdp_res & I40E_XDP_TX) { - struct i40e_ring *xdp_ring = - rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - - i40e_xdp_ring_update_tail(xdp_ring); - } -} - /** * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on @@ -2335,12 +2555,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) bool failure = false; struct xdp_buff xdp; +#ifdef HAVE_XDP_BUFF_RXQ xdp.rxq = &rx_ring->xdp_rxq; +#endif while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; unsigned int size; + u16 vlan_tag; + u8 rx_ptype; u64 qword; /* return some buffers to hardware, one at a time is too slow */ @@ -2365,14 +2589,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc, - qword); - if (unlikely(rx_buffer)) { - i40e_reuse_rx_page(rx_ring, rx_buffer); + if (unlikely(i40e_rx_is_programming_status(qword))) { + i40e_clean_programming_status(rx_ring, rx_desc, qword); cleaned_count++; continue; } - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) @@ -2382,13 +2603,18 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_buffer = i40e_get_rx_buffer(rx_ring, size); /* retrieve a buffer from the ring */ +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT + /* we are leaking memory if an skb is already present */ + WARN_ON(skb); + skb = rx_buffer->skb; + __skb_put(skb, size); +#else if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; - xdp.data_meta = xdp.data; - xdp.data_hard_start = xdp.data - - i40e_rx_offset(rx_ring); - xdp.data_end = xdp.data + size; + xdp.data_hard_start = (void *)((u8 *)xdp.data - + i40e_rx_offset(rx_ring)); + xdp.data_end = (void *)((u8 *)xdp.data + size); skb = i40e_run_xdp(rx_ring, &xdp); } @@ -2406,8 +2632,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_packets++; } else if (skb) { i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC } else if (ring_uses_build_skb(rx_ring)) { skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); +#endif } else { skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); } @@ -2418,6 +2646,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_buffer->pagecnt_bias++; break; } +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ i40e_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; @@ -2433,21 +2662,41 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + /* populate checksum, VLAN, and protocol */ - i40e_process_skb_fields(rx_ring, rx_desc, skb); + i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); - napi_gro_receive(&rx_ring->q_vector->napi, skb); + i40e_receive_skb(rx_ring, skb, vlan_tag); skb = NULL; /* update budget accounting */ total_rx_packets++; } - i40e_finalize_xdp_rx(rx_ring, xdp_xmit); + if (xdp_xmit & I40E_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & I40E_XDP_TX) { + struct i40e_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + + i40e_xdp_ring_update_tail(xdp_ring); + } rx_ring->skb = skb; - i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; @@ -2571,6 +2820,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) container_of(napi, struct i40e_q_vector, napi); struct i40e_vsi *vsi = q_vector->vsi; struct i40e_ring *ring; + u64 flags = vsi->back->flags; bool clean_complete = true; bool arm_wb = false; int budget_per_ring; @@ -2585,11 +2835,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - bool wd = ring->xsk_umem ? - i40e_clean_xdp_tx_irq(vsi, ring, budget) : - i40e_clean_tx_irq(vsi, ring, budget); - - if (!wd) { + if (!i40e_clean_tx_irq(vsi, ring, budget)) { clean_complete = false; continue; } @@ -2607,9 +2853,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned = ring->xsk_umem ? - i40e_clean_rx_irq_zc(ring, budget_per_ring) : - i40e_clean_rx_irq(ring, budget_per_ring); + int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ @@ -2617,8 +2861,15 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) clean_complete = false; } +#ifndef HAVE_NETDEV_NAPI_LIST + /* if netdev is disabled we need to stop polling */ + if (!netif_running(vsi->netdev)) + clean_complete = true; + +#endif /* If work not completed, return budget and polling will return */ if (!clean_complete) { +#ifdef HAVE_IRQ_AFFINITY_NOTIFY int cpu_id = smp_processor_id(); /* It is possible that the interrupt affinity has changed but, @@ -2638,6 +2889,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* Return budget-1 so that polling stops */ return budget - 1; } +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; @@ -2646,14 +2898,13 @@ tx_only: return budget; } - if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + if (flags & I40E_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; - /* Exit the polling mode, but don't re-enable interrupts if stack might - * poll us due to busy-polling - */ - if (likely(napi_complete_done(napi, work_done))) - i40e_update_enable_itr(vsi, q_vector); + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + + i40e_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } @@ -2696,8 +2947,12 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, return; /* snag network header to get L4 type and address */ - hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? +#ifdef HAVE_SKB_INNER_NETWORK_HEADER + hdr.network = (tx_flags & I40E_TX_FLAGS_TUNNEL) ? skb_inner_network_header(skb) : skb_network_header(skb); +#else + hdr.network = skb_network_header(skb); +#endif /* HAVE_SKB_INNER_NETWORK_HEADER */ /* Note: tx_flags gets modified to reflect inner protocols in * tx_enable_csum function if encap is enabled. @@ -2712,8 +2967,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, unsigned int h_offset = inner_hlen; /* this function updates h_offset to the end of the header */ - l4_proto = - ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); + l4_proto = ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); /* hlen will contain our best estimate of the tcp header */ hlen = h_offset - inner_hlen; } @@ -2726,6 +2980,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. @@ -2777,7 +3032,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) + if (!(tx_flags & I40E_TX_FLAGS_TUNNEL)) dtype_cmd |= ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & @@ -2816,8 +3071,13 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, __be16 protocol = skb->protocol; u32 tx_flags = 0; +#ifdef NETIF_F_HW_VLAN_CTAG_RX if (protocol == htons(ETH_P_8021Q) && !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { +#else + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_TX)) { +#endif /* When HW VLAN acceleration is turned off by the user the * stack sets the protocol to 8021q so that the driver * can take any steps required to support the SW only @@ -2875,6 +3135,14 @@ out: return 0; } +#ifndef HAVE_ENCAP_TSO_OFFLOAD +#define inner_ip_hdr(skb) 0 +#define inner_tcp_hdr(skb) 0 +#define inner_ipv6_hdr(skb) 0 +#define inner_tcp_hdrlen(skb) 0 +#define inner_tcp_hdrlen(skb) 0 +#define skb_inner_transport_header(skb) ((skb)->data) +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ /** * i40e_tso - set up the tso context descriptor * @first: pointer to first Tx buffer for xmit @@ -2923,14 +3191,32 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, ip.v6->payload_len = 0; } +#ifdef HAVE_ENCAP_TSO_OFFLOAD if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | +#ifdef NETIF_F_GSO_PARTIAL SKB_GSO_GRE_CSUM | +#endif +#ifdef NETIF_F_GSO_IPXIP4 SKB_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_IPXIP6 SKB_GSO_IPXIP6 | +#endif +#else +#ifdef NETIF_F_GSO_IPIP + SKB_GSO_IPIP | +#endif +#ifdef NETIF_F_GSO_SIT + SKB_GSO_SIT | +#endif +#endif SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { +#ifndef NETIF_F_GSO_PARTIAL + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { +#else if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { +#endif l4.udp->len = 0; /* determine offset of outer transport header */ @@ -2955,6 +3241,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, } } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; @@ -2969,7 +3256,14 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, gso_size = skb_shinfo(skb)->gso_size; gso_segs = skb_shinfo(skb)->gso_segs; - /* update GSO size and bytecount with header size */ +#ifndef HAVE_NDO_FEATURES_CHECK + /* too small a TSO segment size causes problems */ + if (gso_size < 64) { + gso_size = 64; + gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64); + } +#endif + /* update gso size and bytecount with header size */ first->gso_segs = gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; @@ -2983,6 +3277,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, return 1; } +#ifdef HAVE_PTP_1588_CLOCK /** * i40e_tsyn - set up the tsyn context descriptor * @tx_ring: ptr to the ring to send @@ -2997,7 +3292,11 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, { struct i40e_pf *pf; +#ifdef SKB_SHARED_TX_IS_UNION + if (likely(!(skb_tx(skb)->hardware))) +#else if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) +#endif return 0; /* Tx timestamps cannot be sampled when doing TSO */ @@ -3013,7 +3312,11 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, if (pf->ptp_tx && !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { +#ifdef SKB_SHARED_TX_IS_UNION + skb_tx(skb)->in_progress = 1; +#else skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif pf->ptp_tx_start = jiffies; pf->ptp_tx_skb = skb_get(skb); } else { @@ -3027,6 +3330,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, return 1; } +#endif /* HAVE_PTP_1588_CLOCK */ /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer @@ -3065,6 +3369,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* compute outer L2 header size */ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; +#ifdef HAVE_ENCAP_CSUM_OFFLOAD if (skb->encapsulation) { u32 tunnel = 0; /* define outer network header type */ @@ -3088,17 +3393,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, switch (l4_proto) { case IPPROTO_UDP: tunnel |= I40E_TXD_CTX_UDP_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; + *tx_flags |= I40E_TX_FLAGS_TUNNEL; break; +#ifdef HAVE_GRE_ENCAP_OFFLOAD case IPPROTO_GRE: tunnel |= I40E_TXD_CTX_GRE_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; + *tx_flags |= I40E_TX_FLAGS_TUNNEL; + /* There was a long-standing issue in GRE where GSO + * was not setting the outer transport header unless + * a GRE checksum was requested. This was fixed in + * the 4.6 version of the kernel. In the 4.7 kernel + * support for GRE over IPv6 was added to GSO. So we + * can assume this workaround for all IPv4 headers + * without impacting later versions of the GRE. + */ + if (ip.v4->version == 4) + l4.hdr = ip.hdr + (ip.v4->ihl * 4); break; case IPPROTO_IPIP: case IPPROTO_IPV6: - *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; + *tx_flags |= I40E_TX_FLAGS_TUNNEL; l4.hdr = skb_inner_network_header(skb); break; +#endif default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; @@ -3107,6 +3424,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, return 0; } +#ifdef I40E_ADD_PROBES + if (*tx_flags & I40E_TX_FLAGS_IPV4) + if (*tx_flags & I40E_TX_FLAGS_TSO) + tx_ring->vsi->back->tx_ip4_cso++; +#endif /* compute outer L3 header size */ tunnel |= ((l4.hdr - ip.hdr) / 4) << I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; @@ -3120,7 +3442,9 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* indicate if we need to offload outer UDP header */ if ((*tx_flags & I40E_TX_FLAGS_TSO) && +#ifdef NETIF_F_GSO_PARTIAL !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && +#endif (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; @@ -3138,16 +3462,22 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, if (ip.v6->version == 6) *tx_flags |= I40E_TX_FLAGS_IPV6; } +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_proto = ip.v4->protocol; +#ifdef I40E_ADD_PROBES + if (*tx_flags & I40E_TX_FLAGS_TSO) + tx_ring->vsi->back->tx_ip4_cso++; +#endif /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : I40E_TX_DESC_CMD_IIPT_IPV4; +#ifdef NETIF_F_IPV6_CSUM } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; @@ -3156,6 +3486,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, if (l4.hdr != exthdr) ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); +#endif } /* compute inner L3 header size */ @@ -3167,18 +3498,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* enable checksum offloads */ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; +#ifdef I40E_ADD_PROBES + tx_ring->vsi->back->tx_tcp_cso++; +#endif break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ +#ifdef HAVE_SCTP cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; offset |= (sizeof(struct sctphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; +#ifdef I40E_ADD_PROBES + tx_ring->vsi->back->tx_sctp_cso++; +#endif +#endif /* HAVE_SCTP */ break; case IPPROTO_UDP: /* enable UDP checksum offload */ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; offset |= (sizeof(struct udphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; +#ifdef I40E_ADD_PROBES + tx_ring->vsi->back->tx_udp_cso++; +#endif break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) @@ -3341,7 +3683,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc * - * Returns 0 on success, -1 on failure to DMA + * Returns 0 on success, negative error code on DMA failure. **/ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, @@ -3363,6 +3705,11 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TX_FLAGS_VLAN_SHIFT; } +#ifdef I40E_ADD_PROBES + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) + tx_ring->vsi->back->tcp_segs += first->gso_segs; + +#endif first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -3456,6 +3803,9 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ skb_tx_timestamp(skb); /* Force memory writes to complete before letting h/w know there @@ -3470,9 +3820,37 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, first->next_to_watch = tx_desc; /* notify HW of packet */ +#ifdef HAVE_SKB_XMIT_MORE if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); + +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + /* We need this mmiowb on IA64/Altix systems where wmb() isn't + * guaranteed to synchronize I/O. + * + * Note that mmiowb() only provides a guarantee about ordering + * when in conjunction with a spin_unlock(). This barrier is + * used to guarantee the I/O ordering with respect to a spin + * lock in the networking core code. + */ + mmiowb(); +#endif } +#else + writel(i, tx_ring->tail); + +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + /* We need this mmiowb on IA64/Altix systems where wmb() isn't + * guaranteed to synchronize I/O. + * + * Note that mmiowb() only provides a guarantee about ordering when in + * conjunction with a spin_unlock(). This barrier is used to guarantee + * the I/O ordering with respect to a spin lock in the networking core + * code. + */ + mmiowb(); +#endif +#endif /* HAVE_XMIT_MORE */ return 0; @@ -3492,23 +3870,47 @@ dma_error: tx_ring->next_to_use = i; - return -1; + return -EIO; } +#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE) +/** + * i40e_lan_select_queue - Select the right Tx queue for the skb for LAN VSI + * @netdev: network interface device structure + * @skb: send buffer + * + * Returns the index of the selected Tx queue + **/ +u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb) +{ + return skb_tx_hash(netdev, skb); +} + +#endif /* !HAVE_NET_DEVICE_OPS && HAVE_NETDEV_SELECT_QUEUE */ + +#ifdef HAVE_XDP_SUPPORT /** * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring - * @xdp: data to transmit + * @xdp: frame data to transmit * @xdp_ring: XDP Tx ring **/ -static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, +#ifdef HAVE_XDP_FRAME_STRUCT +static int i40e_xmit_xdp_ring(struct xdp_frame *xdp, struct i40e_ring *xdp_ring) +#else +static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, + struct i40e_ring *xdp_ring) +#endif { u16 i = xdp_ring->next_to_use; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; - void *data = xdpf->data; - u32 size = xdpf->len; dma_addr_t dma; + void *data; + u32 size; + + size = xdp_get_len(xdp); + data = xdp->data; if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { xdp_ring->tx_stats.tx_busy++; @@ -3517,11 +3919,14 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); if (dma_mapping_error(xdp_ring->dev, dma)) return I40E_XDP_CONSUMED; - tx_bi = &xdp_ring->tx_bi[i]; tx_bi->bytecount = size; tx_bi->gso_segs = 1; - tx_bi->xdpf = xdpf; +#ifdef HAVE_XDP_FRAME_STRUCT + tx_bi->xdpf = xdp; +#else + tx_bi->raw_buf = data; +#endif /* record length, and DMA address */ dma_unmap_len_set(tx_bi, len, size); @@ -3541,12 +3946,11 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, i++; if (i == xdp_ring->count) i = 0; - tx_bi->next_to_watch = tx_desc; xdp_ring->next_to_use = i; - return I40E_XDP_TX; } +#endif /** * i40e_xmit_frame_ring - Sends buffer on Tx ring @@ -3567,7 +3971,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u32 td_cmd = 0; u8 hdr_len = 0; int tso, count; +#ifdef HAVE_PTP_1588_CLOCK int tsyn; +#endif /* HAVE_PTP_1588_CLOCK */ /* prefetch the data, we'll need it later */ prefetch(skb->data); @@ -3627,11 +4033,14 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tso < 0) goto out_drop; +#ifdef HAVE_PTP_1588_CLOCK tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; +#endif /* HAVE_PTP_1588_CLOCK */ + /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; @@ -3644,16 +4053,25 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, */ i40e_atr(tx_ring, skb, tx_flags); +#ifdef HAVE_PTP_1588_CLOCK if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset)) goto cleanup_tx_tstamp; +#else + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE + tx_ring->netdev->trans_start = jiffies; +#endif return NETDEV_TX_OK; out_drop: i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; +#ifdef HAVE_PTP_1588_CLOCK cleanup_tx_tstamp: if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) { struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); @@ -3662,7 +4080,7 @@ cleanup_tx_tstamp: pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); } - +#endif return NETDEV_TX_OK; } @@ -3688,10 +4106,13 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return i40e_xmit_frame_ring(skb, tx_ring); } +#ifdef HAVE_XDP_SUPPORT /** * i40e_xdp_xmit - Implements ndo_xdp_xmit * @dev: netdev - * @xdp: XDP buffer + * @n: amount of frames + * @frames: XDP frames + * @flags: XDP xmit flags * * Returns number of frames successfully sent. Frames that fail are * free'ed via XDP return API. @@ -3699,24 +4120,29 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * For error cases, a negative errno code is returned and no-frames * are transmitted (caller must handle freeing frames). **/ +#ifdef HAVE_XDP_FRAME_STRUCT int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) +#else +int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +#endif { struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; +#ifdef HAVE_XDP_FRAME_STRUCT struct i40e_ring *xdp_ring; int drops = 0; int i; +#endif + int err; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; - if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || - test_bit(__I40E_CONFIG_BUSY, pf->state)) + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) return -ENXIO; - +#ifdef HAVE_XDP_FRAME_STRUCT if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -3724,7 +4150,6 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; - int err; err = i40e_xmit_xdp_ring(xdpf, xdp_ring); if (err != I40E_XDP_TX) { @@ -3737,4 +4162,33 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, i40e_xdp_ring_update_tail(xdp_ring); return n - drops; +#else + err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]); + + if (err != I40E_XDP_TX) + return -ENOSPC; + + return 0; +#endif } + +/** + * i40e_xdp_flush - Implements ndo_xdp_flush + * @dev: netdev + **/ +void i40e_xdp_flush(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int queue_index = smp_processor_id(); + struct i40e_vsi *vsi = np->vsi; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + return; + + i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]); +} +#endif + diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 36d37f31a287..da598721f78f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,11 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ -#include - /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_DEFAULT_IRQ_WORK 256 @@ -18,7 +16,7 @@ #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 10 /* all values below must be even */ +#define I40E_ITR_100K 10 /* all values below must be even */ #define I40E_ITR_50K 20 #define I40E_ITR_20K 50 #define I40E_ITR_18K 60 @@ -28,8 +26,8 @@ #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) -#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) -#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) +#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) +#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) /* 0x40 is the enable bit for interrupt rate limiting, and must be set if * the value of the rate limit is non-zero @@ -117,10 +115,18 @@ enum i40e_dyn_idx_t { */ #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) +#ifdef I40E_32BYTE_RX #define i40e_rx_desc i40e_32byte_rx_desc +#else +#define i40e_rx_desc i40e_16byte_rx_desc +#endif +#ifdef HAVE_STRUCT_DMA_ATTRS +#define I40E_RX_DMA_ATTR NULL +#else #define I40E_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif /* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for @@ -271,9 +277,11 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) #define I40E_TX_FLAGS_IPV6 BIT(5) #define I40E_TX_FLAGS_FCCRC BIT(6) #define I40E_TX_FLAGS_FSO BIT(7) +#ifdef HAVE_PTP_1588_CLOCK #define I40E_TX_FLAGS_TSYN BIT(8) +#endif /* HAVE_PTP_1588_CLOCK */ #define I40E_TX_FLAGS_FD_SB BIT(9) -#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) +#define I40E_TX_FLAGS_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -296,17 +304,17 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; - union { - struct { - struct page *page; - __u32 page_offset; - __u16 pagecnt_bias; - }; - struct { - void *addr; - u64 handle; - }; - }; +#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT + struct sk_buff *skb; +#else + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */ }; struct i40e_queue_stats { @@ -314,6 +322,17 @@ struct i40e_queue_stats { u64 bytes; }; +#ifdef HAVE_XDP_SUPPORT +struct i40e_xdp_stats { + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_tx; + u64 xdp_unknown; + u64 xdp_redirect; + u64 xdp_redirect_fail; +}; +#endif + struct i40e_tx_queue_stats { u64 restart_queue; u64 tx_busy; @@ -393,7 +412,12 @@ struct i40e_ring { /* stats structs */ struct i40e_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 struct u64_stats_sync syncp; +#endif +#ifdef HAVE_XDP_SUPPORT + struct i40e_xdp_stats xdp_stats; +#endif union { struct i40e_tx_queue_stats tx_stats; struct i40e_rx_queue_stats rx_stats; @@ -417,9 +441,9 @@ struct i40e_ring { */ struct i40e_channel *ch; +#ifdef HAVE_XDP_BUFF_RXQ struct xdp_rxq_info xdp_rxq; - struct xdp_umem *xsk_umem; - struct zero_copy_allocator zca; /* ZC allocator anchor */ +#endif } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) @@ -437,6 +461,13 @@ static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; } +#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 +#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e +#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 +#define I40E_ITR_ADAPTIVE_BULK 0x0000 +#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) + static inline bool ring_is_xdp(struct i40e_ring *ring) { return !!(ring->flags & I40E_TXR_FLAGS_XDP); @@ -447,13 +478,6 @@ static inline void set_ring_xdp(struct i40e_ring *ring) ring->flags |= I40E_TXR_FLAGS_XDP; } -#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 -#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 -#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e -#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 -#define I40E_ITR_ADAPTIVE_BULK 0x0000 -#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) - struct i40e_ring_container { struct i40e_ring *ring; /* pointer to linked list of ring(s) */ unsigned long next_update; /* jiffies value of next update */ @@ -481,6 +505,10 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE) +extern u16 i40e_lan_select_queue(struct net_device *netdev, + struct sk_buff *skb); +#endif void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); @@ -493,8 +521,27 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); void i40e_detect_recover_hung(struct i40e_vsi *vsi); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); +#ifdef HAVE_XDP_FRAME_STRUCT int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +#else +int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp); +#endif +void i40e_xdp_flush(struct net_device *dev); + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT +static inline u32 xdp_get_len(struct xdp_frame *xdp) +{ + return xdp->len; +} +#else +static inline u32 xdp_get_len(struct xdp_buff *xdp) +{ + return xdp->data_end - xdp->data; +} +#endif +#endif /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h deleted file mode 100644 index 8af0e99c6c0d..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ /dev/null @@ -1,91 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2018 Intel Corporation. */ - -#ifndef I40E_TXRX_COMMON_ -#define I40E_TXRX_COMMON_ - -void i40e_fd_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id); -int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); -struct i40e_rx_buffer *i40e_clean_programming_status( - struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - u64 qw); -void i40e_process_skb_fields(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, struct sk_buff *skb); -void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); -void i40e_update_rx_stats(struct i40e_ring *rx_ring, - unsigned int total_rx_bytes, - unsigned int total_rx_packets); -void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); -void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); - -#define I40E_XDP_PASS 0 -#define I40E_XDP_CONSUMED BIT(0) -#define I40E_XDP_TX BIT(1) -#define I40E_XDP_REDIR BIT(2) - -/** - * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword - **/ -static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, - u32 td_tag) -{ - return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | - ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | - ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | - ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); -} - -/** - * i40e_update_tx_stats - Update the egress statistics for the Tx ring - * @tx_ring: Tx ring to update - * @total_packet: total packets sent - * @total_bytes: total bytes sent - **/ -static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, - unsigned int total_packets, - unsigned int total_bytes) -{ - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - tx_ring->q_vector->tx.total_bytes += total_bytes; - tx_ring->q_vector->tx.total_packets += total_packets; -} - -#define WB_STRIDE 4 - -/** - * i40e_arm_wb - (Possibly) arms Tx write-back - * @tx_ring: Tx ring to update - * @vsi: the VSI - * @budget: the NAPI budget left - **/ -static inline void i40e_arm_wb(struct i40e_ring *tx_ring, - struct i40e_vsi *vsi, - int budget) -{ - if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - /* check to see if there are < 4 descriptors - * waiting to be written back, then kick the hardware to force - * them to be written back in case we stay in NAPI. - * In this mode on X722 we do not enable Interrupt. - */ - unsigned int j = i40e_get_tx_pending(tx_ring, false); - - if (budget && - ((j / WB_STRIDE) == 0) && j > 0 && - !test_bit(__I40E_VSI_DOWN, vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - } -} - -void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); -void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); -bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi); - -#endif /* I40E_TXRX_COMMON_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index b43ec94a0f29..325ad8272f96 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ @@ -13,7 +13,7 @@ #include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) +#define I40E_MASK(mask, shift) (mask << shift) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 4 @@ -33,6 +33,9 @@ struct i40e_hw; typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif /* Data type manipulation macros. */ #define I40E_DESC_UNUSED(R) \ @@ -59,7 +62,9 @@ enum i40e_debug_mask { I40E_DEBUG_DIAG = 0x00000800, I40E_DEBUG_FD = 0x00001000, I40E_DEBUG_PACKAGE = 0x00002000, + I40E_DEBUG_IWARP = 0x00F00000, + I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, @@ -85,16 +90,16 @@ enum i40e_debug_mask { #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) + I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) + I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_PHY_COM_REG_PAGE 0x1E -#define I40E_PHY_LED_LINK_MODE_MASK 0xF0 -#define I40E_PHY_LED_MANUAL_ON 0x100 -#define I40E_PHY_LED_PROV_REG_1 0xC430 -#define I40E_PHY_LED_MODE_MASK 0xFFFF -#define I40E_PHY_LED_MODE_ORIG 0x80000000 +#define I40E_PHY_COM_REG_PAGE 0x1E +#define I40E_PHY_LED_LINK_MODE_MASK 0xF0 +#define I40E_PHY_LED_MANUAL_ON 0x100 +#define I40E_PHY_LED_PROV_REG_1 0xC430 +#define I40E_PHY_LED_MODE_MASK 0xFFFF +#define I40E_PHY_LED_MODE_ORIG 0x80000000 /* These are structs for managing the hardware information and the operations. * The structures of function pointers are filled out at init time when we @@ -233,7 +238,8 @@ struct i40e_phy_info { #define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) #define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) -/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some +/* + * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, * a shift is needed to adjust for this with values larger than 31. The @@ -259,6 +265,15 @@ struct i40e_phy_info { #define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \ I40E_PHY_TYPE_OFFSET2) #define I40E_HW_CAP_MAX_GPIO 30 +enum i40e_acpi_programming_method { + I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0, + I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1 +}; + +#define I40E_WOL_SUPPORT_MASK 0x1 +#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2 +#define I40E_PROXY_SUPPORT_MASK 0x4 + /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { u32 switch_mode; @@ -336,6 +351,9 @@ struct i40e_hw_capabilities { u32 enabled_tcmap; u32 maxtc; u64 wr_csr_prot; + bool apm_wol_support; + enum i40e_acpi_programming_method acpi_prog_method; + bool proxy_support; }; struct i40e_mac_info { @@ -386,6 +404,7 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_EXEC_AQ, I40E_NVMUPD_GET_AQ_RESULT, I40E_NVMUPD_GET_AQ_EVENT, + I40E_NVMUPD_FEATURES, }; enum i40e_nvmupd_state { @@ -421,8 +440,12 @@ enum i40e_nvmupd_state { #define I40E_NVM_AQE 0xe #define I40E_NVM_EXEC 0xf +#define I40E_NVM_EXEC_GET_AQ_RESULT 0x0 +#define I40E_NVM_EXEC_FEATURES 0xe +#define I40E_NVM_EXEC_STATUS 0xf + #define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) +#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT) #define I40E_NVMUPD_MAX_DATA 4096 #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ @@ -435,6 +458,20 @@ struct i40e_nvm_access { u8 data[1]; }; +/* NVMUpdate features API */ +#define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0 +#define I40E_NVMUPD_FEATURES_API_VER_MINOR 14 +#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12 + +#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0) + +struct i40e_nvmupd_features { + u8 major; + u8 minor; + u16 size; + u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN]; +}; + /* (Q)SFP module access definitions */ #define I40E_I2C_EEPROM_DEV_ADDR 0xA0 #define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 @@ -443,7 +480,7 @@ struct i40e_nvm_access { #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C #define I40E_MODULE_SFF_ADDR_MODE 0x04 -#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40 +#define I40E_MODULE_SFF_DIAG_CAPAB 0x40 #define I40E_MODULE_TYPE_QSFP_PLUS 0x0D #define I40E_MODULE_TYPE_QSFP28 0x11 #define I40E_MODULE_QSFP_MAX_LEN 640 @@ -618,13 +655,18 @@ struct i40e_hw { struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ + /* WoL and proxy support */ + u16 num_wol_proxy_filters; + u16 wol_proxy_vsi_seid; + #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) #define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) -#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) +#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) #define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) -#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) +#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6) +#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) u64 flags; /* Used in set switch config AQ command */ @@ -632,12 +674,15 @@ struct i40e_hw { u16 first_tag; u16 second_tag; + /* NVMUpdate features */ + struct i40e_nvmupd_features nvmupd_features; + /* debug mask */ u32 debug_mask; char err_str[16]; }; -static inline bool i40e_is_vf(struct i40e_hw *hw) +static INLINE bool i40e_is_vf(struct i40e_hw *hw) { return (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF); @@ -737,32 +782,28 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - /* Note: Bit 8 is reserved in X710 and XL710 */ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, - I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - /* Note: For non-tunnel packets INT_UDP_0 is the right status for - * UDP header - */ + I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ - << I40E_RXD_QW1_STATUS_SHIFT) +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \ + I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ - BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -1076,8 +1117,7 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK @@ -1183,10 +1223,6 @@ enum i40e_filter_program_desc_pcmd { I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) -#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) - #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) @@ -1300,6 +1336,8 @@ struct i40e_hw_port_stats { u32 rx_lpi_status; u64 tx_lpi_count; /* etlpic */ u64 rx_lpi_count; /* erlpic */ + u64 tx_lpi_duration; + u64 rx_lpi_duration; }; /* Checksum and Shadow RAM pointers */ @@ -1313,6 +1351,10 @@ struct i40e_hw_port_stats { #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28 +#define I40E_SR_NVM_MAP_VERSION 0x29 +#define I40E_SR_NVM_IMAGE_VERSION 0x2A +#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E #define I40E_SR_VPD_PTR 0x2F @@ -1444,8 +1486,8 @@ enum i40e_reset_type { }; /* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0x06 -#define I40E_SR_LLDP_CFG_PTR 0x31 +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 struct i40e_lldp_variables { u16 length; u16 adminstatus; @@ -1606,4 +1648,10 @@ struct i40e_profile_info { u8 reserved[7]; u8 name[I40E_DDP_NAME_SIZE]; }; + +#define I40E_BCM_PHY_PCS_STATUS1_PAGE 0x3 +#define I40E_BCM_PHY_PCS_STATUS1_REG 0x0001 +#define I40E_BCM_PHY_PCS_STATUS1_RX_LPI BIT(8) +#define I40E_BCM_PHY_PCS_STATUS1_TX_LPI BIT(9) + #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3d2440838822..76b92bd4df85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #include "i40e.h" @@ -39,6 +39,39 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, } } +/** + * i40e_vc_link_speed2mbps - Convert AdminQ link_speed bit represented + * to integer value of Mbps + * @link_speed: the speed to convert + * + * Returns the speed as direct value of Mbps. + **/ +static INLINE u32 +i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) +{ + switch (link_speed) { + case I40E_LINK_SPEED_100MB: + return SPEED_100; + case I40E_LINK_SPEED_1GB: + return SPEED_1000; + case I40E_LINK_SPEED_2_5GB: + return SPEED_2500; + case I40E_LINK_SPEED_5GB: + return SPEED_5000; + case I40E_LINK_SPEED_10GB: + return SPEED_10000; + case I40E_LINK_SPEED_20GB: + return SPEED_20000; + case I40E_LINK_SPEED_25GB: + return SPEED_25000; + case I40E_LINK_SPEED_40GB: + return SPEED_40000; + case I40E_LINK_SPEED_UNKNOWN: + return SPEED_UNKNOWN; + } + return SPEED_UNKNOWN; +} + /** * i40e_vc_notify_vf_link_state * @vf: pointer to the VF structure @@ -56,23 +89,44 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->queues_enabled) { + pfe.event_data.link_event_adv.link_status = false; + pfe.event_data.link_event_adv.link_speed = 0; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + } else if (vf->link_forced) { + pfe.event_data.link_event_adv.link_status = vf->link_up; + pfe.event_data.link_event_adv.link_speed = vf->link_up ? + i40e_vc_link_speed2mbps(ls->link_speed) : 0; +#endif + } else { + pfe.event_data.link_event_adv.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event_adv.link_speed = + i40e_vc_link_speed2mbps(ls->link_speed); + } +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ /* Always report link is down if the VF queues aren't enabled */ if (!vf->queues_enabled) { pfe.event_data.link_event.link_status = false; pfe.event_data.link_event.link_speed = 0; +#ifdef HAVE_NDO_SET_VF_LINK_STATE } else if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; - pfe.event_data.link_event.link_speed = - (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); + pfe.event_data.link_event.link_speed = (vf->link_up ? + i40e_virtchnl_link_speed(ls->link_speed) : 0); +#endif } else { pfe.event_data.link_event.link_status = ls->link_info & I40E_AQ_LINK_UP; pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); } +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, - 0, (u8 *)&pfe, sizeof(pfe), NULL); + I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); } /** @@ -101,7 +155,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, + i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } @@ -130,7 +184,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, - 0, (u8 *)&pfe, + I40E_SUCCESS, (u8 *)&pfe, sizeof(struct virtchnl_pf_event), NULL); } /***********************misc routines*****************************/ @@ -266,8 +320,8 @@ static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) * given VSI. */ queue_id -= vf->ch[i].num_qps; - } } + } return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); } @@ -381,156 +435,6 @@ irq_list_done: i40e_flush(hw); } -/** - * i40e_release_iwarp_qvlist - * @vf: pointer to the VF. - * - **/ -static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) -{ - struct i40e_pf *pf = vf->pf; - struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; - u32 msix_vf; - u32 i; - - if (!vf->qvlist_info) - return; - - msix_vf = pf->hw.func_caps.num_msix_vectors_vf; - for (i = 0; i < qvlist_info->num_vectors; i++) { - struct virtchnl_iwarp_qv_info *qv_info; - u32 next_q_index, next_q_type; - struct i40e_hw *hw = &pf->hw; - u32 v_idx, reg_idx, reg; - - qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; - v_idx = qv_info->v_idx; - if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { - /* Figure out the queue after CEQ and make that the - * first queue. - */ - reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; - reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); - next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) - >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; - next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) - >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; - - reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); - reg = (next_q_index & - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | - (next_q_type << - I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); - - wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); - } - } - kfree(vf->qvlist_info); - vf->qvlist_info = NULL; -} - -/** - * i40e_config_iwarp_qvlist - * @vf: pointer to the VF info - * @qvlist_info: queue and vector list - * - * Return 0 on success or < 0 on error - **/ -static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, - struct virtchnl_iwarp_qvlist_info *qvlist_info) -{ - struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - struct virtchnl_iwarp_qv_info *qv_info; - u32 v_idx, i, reg_idx, reg; - u32 next_q_idx, next_q_type; - u32 msix_vf; - int ret = 0; - - msix_vf = pf->hw.func_caps.num_msix_vectors_vf; - - if (qvlist_info->num_vectors > msix_vf) { - dev_warn(&pf->pdev->dev, - "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", - qvlist_info->num_vectors, - msix_vf); - ret = -EINVAL; - goto err_out; - } - - kfree(vf->qvlist_info); - vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info, - qvlist_info->num_vectors - 1), - GFP_KERNEL); - if (!vf->qvlist_info) { - ret = -ENOMEM; - goto err_out; - } - vf->qvlist_info->num_vectors = qvlist_info->num_vectors; - - msix_vf = pf->hw.func_caps.num_msix_vectors_vf; - for (i = 0; i < qvlist_info->num_vectors; i++) { - qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; - - /* Validate vector id belongs to this vf */ - if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { - ret = -EINVAL; - goto err_free; - } - - v_idx = qv_info->v_idx; - - vf->qvlist_info->qv_info[i] = *qv_info; - - reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); - /* We might be sharing the interrupt, so get the first queue - * index and type, push it down the list by adding the new - * queue on top. Also link it with the new queue in CEQCTL. - */ - reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); - next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); - next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> - I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); - - if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { - reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; - reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | - (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | - (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | - (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | - (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); - wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); - - reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); - reg = (qv_info->ceq_idx & - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | - (I40E_QUEUE_TYPE_PE_CEQ << - I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); - wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); - } - - if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { - reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | - (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | - (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); - - wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); - } - } - - return 0; -err_free: - kfree(vf->qvlist_info); - vf->qvlist_info = NULL; -err_out: - return ret; -} - /** * i40e_config_vsi_tx_queue * @vf: pointer to the VF info @@ -648,7 +552,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, } rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; - /* set split mode 10b */ + /* set splitalways mode 10b */ rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; } @@ -699,6 +603,542 @@ error_param: return ret; } +/** + * i40e_validate_vf + * @pf: the physical function + * @vf_id: VF identifier + * + * Check that the VF is enabled and the vsi exists. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) +{ + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret = 0; + + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, + "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto err_out; + } + vf = &pf->vf[vf_id]; + vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); + if (!vsi) + ret = -EINVAL; +err_out: + return ret; +} + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + +/** + * i40e_configure_vf_loopback + * @vsi: VF VSI to configure + * @vf_id: VF identifier + * @enable: enable or disable + * + * This function configures the VF VSI with the loopback settings + * + * Returns 0 on success, negative on failure + * + **/ +static int i40e_configure_vf_loopback(struct i40e_vsi *vsi, int vf_id, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_vsi_context ctxt; + int ret = 0; + + vsi->info.valid_sections = CPU_TO_LE16(I40E_AQ_VSI_PROP_SWITCH_VALID); + if (enable) + vsi->info.switch_id |= + CPU_TO_LE16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + else + vsi->info.switch_id &= + ~CPU_TO_LE16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = vsi->seid; + ctxt.pf_num = vsi->back->hw.pf_id; + ctxt.info = vsi->info; + ret = i40e_aq_update_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Error %d configuring loopback for VF %d\n", + ret, vf_id); + ret = -EIO; + } + return ret; +} + +/** + * i40e_configure_vf_vlan_stripping + * @vsi: VF VSI to configure + * @vf_id: VF identifier + * @enable: enable or disable + * + * This function enables or disables vlan stripping on the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_configure_vf_vlan_stripping(struct i40e_vsi *vsi, int vf_id, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_vsi_context ctxt; + int ret = 0; + u8 flag; + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + if (enable) { + /* Don't enable vlan stripping if port vlan is set */ + if (vsi->info.pvid) { + dev_err(&pf->pdev->dev, + "Cannot enable vlan stripping when port VLAN is set\n"); + ret = -EINVAL; + goto err_out; + } + flag = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + } else { + flag = I40E_AQ_VSI_PVLAN_EMOD_NOTHING; + } + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | flag; + ctxt.seid = vsi->seid; + ctxt.info = vsi->info; + ret = i40e_aq_update_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Error %d configuring vlan stripping for VF %d\n", + ret, vf_id); + ret = -EIO; + } +err_out: + return ret; +} + +/** + * i40e_configure_vf_promisc_mode + * @vf: VF + * @vsi: VF VSI to configure + * @promisc_mode: promisc mode to configure + * + * This function configures the requested promisc mode for a vf + * + * Returns 0 on success, negative on failure + **/ +static int i40e_configure_vf_promisc_mode(struct i40e_vf *vf, + struct i40e_vsi *vsi, + u8 promisc_mode) +{ + struct i40e_pf *pf = vsi->back; + int ret = 0; + + if (promisc_mode & VFD_PROMISC_MULTICAST) { + ret = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, vsi->seid, + true, NULL); + if (ret) + goto err; + vf->promisc_mode |= VFD_PROMISC_MULTICAST; + } else { + ret = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, vsi->seid, + false, NULL); + if (ret) + goto err; + vf->promisc_mode &= ~VFD_PROMISC_MULTICAST; + } + if (promisc_mode & VFD_PROMISC_UNICAST) { + ret = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, vsi->seid, + true, NULL, true); + if (ret) + goto err; + vf->promisc_mode |= VFD_PROMISC_UNICAST; + } else { + ret = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, vsi->seid, + false, NULL, true); + if (ret) + goto err; + vf->promisc_mode &= ~VFD_PROMISC_UNICAST; + } +err: + if (ret) + dev_err(&pf->pdev->dev, "Error %d configuring promisc mode for VF %d\n", + ret, vf->vf_id); + + return ret; +} + +/** + * i40e_add_ingress_egress_mirror + * @src_vsi: VSI to mirror from + * @mirror_vsi: VSI to mirror to + * @rule_type: rule type to configure + * @rule_id: rule id to store + * + * This function adds the requested ingress/egress mirror for a vsi + * + * Returns 0 on success, negative on failure + **/ +static int i40e_add_ingress_egress_mirror(struct i40e_vsi *src_vsi, + struct i40e_vsi *mirror_vsi, + u16 rule_type, u16 *rule_id) +{ + u16 dst_seid, rules_used, rules_free, sw_seid; + struct i40e_pf *pf = src_vsi->back; + int ret, num = 0, cnt = 1; + int *vsi_ingress_vlan; + int *vsi_egress_vlan; + __le16 *mr_list; + + mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL); + if (!mr_list) { + ret = -ENOMEM; + goto err_out; + } + + if (src_vsi->type == I40E_VSI_MAIN) { + vsi_ingress_vlan = &pf->ingress_vlan; + vsi_egress_vlan = &pf->egress_vlan; + } else { + vsi_ingress_vlan = &pf->vf[src_vsi->vf_id].ingress_vlan; + vsi_egress_vlan = &pf->vf[src_vsi->vf_id].egress_vlan; + } + + if (I40E_IS_MIRROR_VLAN_ID_VALID(*vsi_ingress_vlan)) { + if (src_vsi->type == I40E_VSI_MAIN) + dev_err(&pf->pdev->dev, + "PF already has an ingress mirroring configured, only one rule per PF is supported!\n"); + else + dev_err(&pf->pdev->dev, + "VF=%d already has an ingress mirroring configured, only one rule per VF is supported!\n", + src_vsi->vf_id); + ret = -EPERM; + goto err_out; + } else if (I40E_IS_MIRROR_VLAN_ID_VALID(*vsi_egress_vlan)) { + if (src_vsi->type == I40E_VSI_MAIN) + dev_err(&pf->pdev->dev, + "PF already has an egress mirroring configured, only one rule per PF is supported!\n"); + else + dev_err(&pf->pdev->dev, + "VF=%d already has an egress mirroring configured, only one rule per VF is supported!\n", + src_vsi->vf_id); + ret = -EPERM; + goto err_out; + } + + sw_seid = src_vsi->uplink_seid; + dst_seid = mirror_vsi->seid; + mr_list[num] = CPU_TO_LE16(src_vsi->seid); + ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid, + rule_type, dst_seid, + cnt, mr_list, NULL, + rule_id, &rules_used, + &rules_free); + kfree(mr_list); +err_out: + return ret; +} + +/** + * i40e_del_ingress_egress_mirror + * @src_vsi: the mirrored VSI + * @rule_type: rule type to configure + * @rule_id : rule id to delete + * + * This function deletes the ingress/egress mirror on a VSI + * + * Returns 0 on success, negative on failure + **/ +static int i40e_del_ingress_egress_mirror(struct i40e_vsi *src_vsi, + u16 rule_type, u16 rule_id) +{ + u16 rules_used, rules_free, sw_seid; + struct i40e_pf *pf = src_vsi->back; + int ret; + + sw_seid = src_vsi->uplink_seid; + ret = i40e_aq_delete_mirrorrule(&pf->hw, sw_seid, rule_type, + rule_id, 0, NULL, NULL, + &rules_used, &rules_free); + return ret; +} + +/** + * i40e_restore_ingress_egress_mirror + * @src_vsi: the mirrored VSI + * @mirror: VSI to mirror to + * @rule_type: rule type to configure + * @rule_id : rule id to delete + * + * This function restores the configured ingress/egress mirrors + * + * Returns 0 on success, negative on failure + **/ +int i40e_restore_ingress_egress_mirror(struct i40e_vsi *src_vsi, + int mirror, u16 rule_type, u16 *rule_id) +{ + struct i40e_vsi *mirror_vsi; + struct i40e_vf *mirror_vf; + struct i40e_pf *pf; + int ret = 0; + + pf = src_vsi->back; + + /* validate the mirror */ + ret = i40e_validate_vf(pf, mirror); + if (ret) + goto err_out; + mirror_vf = &pf->vf[mirror]; + mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx]; + ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi, rule_type, + rule_id); + +err_out: + return ret; +} + +/** + * i40e_configure_vf_link + * @vf: VF + * @link: link state to configure + * + * This function configures the requested link state for a VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_configure_vf_link(struct i40e_vf *vf, u8 link) +{ + struct virtchnl_pf_event pfe; + struct i40e_link_status *ls; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw; + int abs_vf_id; + int ret = 0; + + hw = &pf->hw; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; + ls = &pf->hw.phy.link_info; + switch (link) { + case VFD_LINKSTATE_AUTO: + vf->link_forced = false; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event_adv.link_speed = + i40e_vc_link_speed2mbps(ls->link_speed); +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + pfe.event_data.link_event.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = + i40e_virtchnl_link_speed(ls->link_speed); +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + break; + case VFD_LINKSTATE_ON: + vf->link_forced = true; + vf->link_up = true; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = true; + pfe.event_data.link_event_adv.link_speed = + i40e_vc_link_speed2mbps(ls->link_speed); +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + pfe.event_data.link_event.link_status = true; + pfe.event_data.link_event.link_speed = + i40e_virtchnl_link_speed(ls->link_speed); +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + break; + case VFD_LINKSTATE_OFF: + vf->link_forced = true; + vf->link_up = false; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = false; + pfe.event_data.link_event_adv.link_speed = 0; +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + break; + default: + ret = -EINVAL; + goto error_out; + } + /* Do not allow change link state when VF is disabled + * Check if requested link state is not VFD_LINKSTATE_OFF, to prevent + * false positive warning in case of reloading the driver + */ + if (vf->pf_ctrl_disable && link != VFD_LINKSTATE_OFF) { + vf->link_up = false; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = false; + pfe.event_data.link_event_adv.link_speed = 0; +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + dev_warn(&pf->pdev->dev, + "Not possible to change VF link state, please enable it first\n"); + } + + /* Notify the VF of its new link state */ + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, + I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); +error_out: + return ret; +} + +/** + * i40e_vf_del_vlan_mirror + * @vf: pointer to the VF structure + * @vsi: pointer to the VSI structure + * + * Delete configured mirror vlans + * + * Returns 0 on success, negative on failure + * + **/ +static int i40e_vf_del_vlan_mirror(struct i40e_vf *vf, struct i40e_vsi *vsi) +{ + u16 rules_used, rules_free, vid; + struct i40e_pf *pf = vf->pf; + int ret = 0, num = 0, cnt; + __le16 *mr_list; + + cnt = bitmap_weight(vf->mirror_vlans, VLAN_N_VID); + if (cnt) { + mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL); + if (!mr_list) + return -ENOMEM; + + for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) { + mr_list[num] = CPU_TO_LE16(vid); + num++; + } + + ret = i40e_aq_delete_mirrorrule(&pf->hw, vsi->uplink_seid, + I40E_AQC_MIRROR_RULE_TYPE_VLAN, + vf->vlan_rule_id, cnt, mr_list, + NULL, &rules_used, + &rules_free); + + vf->vlan_rule_id = 0; + kfree(mr_list); + } + + return ret; +} + +/** + * i40e_restore_vfd_config + * @vf: pointer to the VF structure + * @vsi: VF VSI to be configured + * + * Restore the VF-d config as per the stored configuration + * + * Returns 0 on success, negative on failure + * + **/ +static int i40e_restore_vfd_config(struct i40e_vf *vf, struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vf->pf; + int ret = 0, cnt = 0; + u16 vid; + + /* Restore all VF-d configuration on reset */ + for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) { + ret = i40e_vsi_add_vlan(vsi, vid); + if (ret) + goto err_out; + } + if (!vf->allow_untagged) { + spin_lock_bh(&vsi->mac_filter_hash_lock); + i40e_rm_vlan_all_mac(vsi, 0); + spin_unlock_bh(&vsi->mac_filter_hash_lock); + i40e_service_event_schedule(vsi->back); + } + + cnt = bitmap_weight(vf->mirror_vlans, VLAN_N_VID); + if (cnt) { + u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; + u16 rule_id, rules_used, rules_free; + u16 sw_seid = vsi->uplink_seid; + u16 dst_seid = vsi->seid; + __le16 *mr_list; + int num = 0; + + mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL); + if (!mr_list) + return -ENOMEM; + for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) { + mr_list[num] = CPU_TO_LE16(vid); + num++; + } + ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid, rule_type, + dst_seid, cnt, mr_list, NULL, + &rule_id, &rules_used, + &rules_free); + if (!ret) + vf->vlan_rule_id = rule_id; + kfree(mr_list); + } + + ret = i40e_configure_vf_loopback(vsi, vf->vf_id, vf->loopback); + if (ret) { + vf->loopback = false; + goto err_out; + } + + if (vf->vlan_stripping) { + ret = i40e_configure_vf_vlan_stripping(vsi, vf->vf_id, true); + if (ret) { + vf->vlan_stripping = false; + goto err_out; + } + } + + if (vf->promisc_mode) { + ret = i40e_configure_vf_promisc_mode(vf, vsi, vf->promisc_mode); + if (ret) { + vf->promisc_mode = VFD_PROMISC_OFF; + goto err_out; + } + } + + if (vf->link_forced) { + u8 link; + + link = (vf->link_up ? VFD_LINKSTATE_ON : VFD_LINKSTATE_OFF); + ret = i40e_configure_vf_link(vf, link); + if (ret) { + vf->link_forced = false; + goto err_out; + } + } + + if (vf->bw_share_applied && vf->bw_share) { + struct i40e_aqc_configure_vsi_tc_bw_data bw_data = {0}; + int i; + + bw_data.tc_valid_bits = 1; + bw_data.tc_bw_credits[0] = vf->bw_share; + + ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "AQ command Config VSI BW allocation per TC failed = %d\n", + pf->hw.aq.asq_last_status); + vf->bw_share_applied = false; + goto err_out; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + vsi->info.qs_handle[i] = bw_data.qs_handles[i]; + } + +err_out: + return ret; +} +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ /** * i40e_alloc_vsi_res * @vf: pointer to the VF info @@ -727,7 +1167,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) if (!idx) { u64 hena = i40e_pf_get_default_rss_hena(pf); + bool trunk_conf = false; u8 broadcast[ETH_ALEN]; + u16 vid; vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; @@ -737,7 +1179,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) * a port VLAN and restore the VSI configuration if * needed. */ - if (vf->port_vlan_id) + for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) { + if (vid != vf->port_vlan_id) + trunk_conf = true; + } + if (vf->port_vlan_id && !trunk_conf) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -746,7 +1192,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) vf->default_lan_addr.addr); if (!f) dev_info(&pf->pdev->dev, - "Could not add MAC filter %pM for VF %d\n", + "Could not add MAC filter %pM for VF %d\n", vf->default_lan_addr.addr, vf->vf_id); } eth_broadcast_addr(broadcast); @@ -784,6 +1230,12 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", vf->vf_id, ret); } +#ifdef HAVE_NDO_SET_VF_LINK_STATE + ret = i40e_restore_vfd_config(vf, vsi); + if (ret) + dev_err(&pf->pdev->dev, + "Failed to restore VF-d config error %d\n", ret); +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ error_alloc_vsi_res: return ret; @@ -942,12 +1394,18 @@ static void i40e_free_vf_res(struct i40e_vf *vf) */ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); +#ifdef HAVE_NDO_SET_VF_LINK_STATE + /* Release vlan mirror */ + if (vf->lan_vsi_idx) + i40e_vf_del_vlan_mirror(vf, pf->vsi[vf->lan_vsi_idx]); +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ + /* It's possible the VF had requeuested more queues than the default so * do the accounting here when we're about to free them. */ if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { - pf->queues_left += vf->num_queue_pairs - - I40E_DEFAULT_QUEUES_PER_VF; + pf->queues_left += + vf->num_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; } /* free vsi & disconnect it from the parent uplink */ @@ -955,7 +1413,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf) i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); vf->lan_vsi_idx = 0; vf->lan_vsi_id = 0; - vf->num_mac = 0; } /* do the accounting and remove additional ADq VSI's */ @@ -1124,10 +1581,10 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, bool allmulti, bool alluni) { + i40e_status aq_ret = I40E_SUCCESS; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_mac_filter *f; - i40e_status aq_ret = 0; struct i40e_vsi *vsi; int bkt; @@ -1166,6 +1623,7 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, } return aq_ret; } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + spin_lock_bh(&vsi->mac_filter_hash_lock); hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) continue; @@ -1199,6 +1657,7 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, i40e_aq_str(&pf->hw, aq_err)); } } + spin_unlock_bh(&vsi->mac_filter_hash_lock); return aq_ret; } aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, @@ -1263,7 +1722,6 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); - i40e_flush(hw); } /* clear the VFLR bit in GLGEN_VFLRSTAT */ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; @@ -1312,7 +1770,7 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) /* reallocate VF resources to finish resetting the VSI state */ if (!i40e_alloc_vf_res(vf)) { - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); @@ -1505,6 +1963,9 @@ void i40e_free_vfs(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; u32 reg_idx, bit_idx; int i, tmp, vf_id; + struct i40e_vsi *src_vsi; + u16 rule_type, rule_id; + int ret; if (!pf->vf) return; @@ -1513,6 +1974,35 @@ void i40e_free_vfs(struct i40e_pf *pf) i40e_notify_client_of_vf_enable(pf, 0); + /* At start we need to clear all ingress and egress mirroring setup. + * We can contiune when we remove all mirroring. + */ + for (i = 0; i < pf->num_alloc_vfs; i++) { + src_vsi = pf->vsi[pf->vf[i].lan_vsi_idx]; + if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->vf[i].ingress_vlan)) { + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS; + rule_id = pf->vf[i].ingress_rule_id; + ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, rule_id); + if (ret) + dev_warn(&pf->pdev->dev, + "Error %s when tried to remove ingress mirror on VF %d", + i40e_aq_str + (hw, hw->aq.asq_last_status), + pf->vf[i].vf_id); + } + if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->vf[i].egress_vlan)) { + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + rule_id = pf->vf[i].egress_rule_id; + ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, rule_id); + if (ret) + dev_warn(&pf->pdev->dev, + "Error %s when tried to remove egress mirror on VF %d", + i40e_aq_str + (hw, hw->aq.asq_last_status), + pf->vf[i].vf_id); + } + } + /* Amortize wait time by stopping all VFs at the same time */ for (i = 0; i < pf->num_alloc_vfs; i++) { if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) @@ -1546,6 +2036,12 @@ void i40e_free_vfs(struct i40e_pf *pf) /* disable qp mappings */ i40e_disable_vf_mappings(&pf->vf[i]); } +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (pf->vfd_obj) { + destroy_vfd_sysfs(pf->pdev, pf->vfd_obj); + pf->vfd_obj = NULL; + } +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ kfree(pf->vf); pf->vf = NULL; @@ -1600,18 +2096,32 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) } pf->vf = vfs; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + /* set vfd ops */ + vfd_ops = &i40e_vfd_ops; + /* create the sriov kobjects */ + pf->vfd_obj = create_vfd_sysfs(pf->pdev, num_alloc_vfs); +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ + /* apply default profile */ for (i = 0; i < num_alloc_vfs; i++) { vfs[i].pf = pf; vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; vfs[i].vf_id = i; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + /* setup default mirror values */ + vfs[i].ingress_vlan = I40E_NO_VF_MIRROR; + vfs[i].egress_vlan = I40E_NO_VF_MIRROR; +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ + /* assign default loopback value */ + vfs[i].loopback = true; + /* assign default allow_untagged value */ + vfs[i].allow_untagged = true; /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); - } pf->num_alloc_vfs = num_alloc_vfs; @@ -1630,6 +2140,7 @@ err_iov: } #endif +#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE) /** * i40e_pci_sriov_enable * @pdev: pointer to a pci_dev structure @@ -1682,7 +2193,7 @@ err_out: /** * i40e_pci_sriov_configure * @pdev: pointer to a pci_dev structure - * @num_vfs: number of VFs to allocate + * @num_vfs: number of vfs to allocate * * Enable or change the number of VFs. Called when the user updates the number * of VFs in sysfs. @@ -1705,8 +2216,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) ret = i40e_pci_sriov_enable(pdev, num_vfs); goto sriov_configure_out; } - - if (!pci_vfs_assigned(pf->pdev)) { + if (!pci_vfs_assigned(pdev)) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); @@ -1719,21 +2229,24 @@ sriov_configure_out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } +#endif /***********************virtual channel routines******************/ /** - * i40e_vc_send_msg_to_vf + * i40e_vc_send_msg_to_vf_ex * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length + * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) +static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen, + bool is_quiet) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1749,7 +2262,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; /* single place to detect unsuccessful return values */ - if (v_retval) { + if (v_retval && !is_quiet) { vf->num_invalid_msgs++; dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, v_opcode, v_retval); @@ -1767,7 +2280,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, vf->num_invalid_msgs = 0; } - aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, + aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, @@ -1779,6 +2292,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, return 0; } +/** + * i40e_vc_send_msg_to_vf + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + **/ +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, + msg, msglen, false); +} + /** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info @@ -1816,6 +2346,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) sizeof(struct virtchnl_version_info)); } +#ifdef __TC_MQPRIO_MODE_MAX /** * i40e_del_qch - delete all the additional VSIs created as a part of ADq * @vf: pointer to VF structure @@ -1837,6 +2368,8 @@ static void i40e_del_qch(struct i40e_vf *vf) } } +#endif /* __TC_MQPRIO_MODE_MAX */ + /** * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info @@ -1851,7 +2384,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; int num_vsis = 1; - size_t len = 0; + int len = 0; int ret; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -1859,7 +2392,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) goto err; } - len = struct_size(vfres, vsi_res, num_vsis); + len = (sizeof(struct virtchnl_vf_resource) + + sizeof(struct virtchnl_vsi_resource) * num_vsis); + vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { aq_ret = I40E_ERR_NO_MEMORY; @@ -1874,18 +2409,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; - if (i40e_vf_client_capable(pf, vf->vf_id) && - (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; - set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); - } else { - clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); - } - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { @@ -1895,7 +2426,6 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) else vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= @@ -1929,8 +2459,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; +#ifdef __TC_MQPRIO_MODE_MAX if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; +#endif /* __TC_MQPRIO_MODE_MAX */ vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; @@ -1944,12 +2476,17 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; /* VFs only use TC 0 */ vfres->vsi_res[0].qset_handle - = le16_to_cpu(vsi->info.qs_handle[0]); + = LE16_TO_CPU(vsi->info.qs_handle[0]); ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); } set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); - + set_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states); + /* if vf is in base mode, keep only the base capabilities that are + * negotiated + */ + if (pf->vf_base_mode_only) + vfres->vf_cap_flags &= VF_BASE_MODE_OFFLOADS; err: /* send the response back to the VF */ ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, @@ -1969,8 +2506,10 @@ err: **/ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) { - if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { i40e_reset_vf(vf, false); + clear_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states); + } } /** @@ -2004,8 +2543,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; + i40e_status aq_ret = I40E_SUCCESS; struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; bool allmulti = false; bool alluni = false; @@ -2017,11 +2556,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", vf->vf_id); + if (pf->vf_base_mode_only) + dev_err(&pf->pdev->dev, "VF %d is in base mode only, promiscuous mode is not be supported\n", + vf->vf_id); /* Lie to the VF on purpose, because this is an error we can * ignore. Unprivileged VF is not a virtual channel error. */ - aq_ret = 0; + aq_ret = I40E_SUCCESS; goto err_out; } @@ -2041,6 +2583,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) if (info->flags & FLAG_VF_UNICAST_PROMISC) alluni = true; + aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, alluni); if (aq_ret) @@ -2178,6 +2721,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } } } + /* set vsi num_queue_pairs in use to num configured by VF */ if (!vf->adq_enabled) { pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = @@ -2196,6 +2740,7 @@ error_param: /** * i40e_validate_queue_map + * @vf: pointer to the VF info * @vsi_id: vsi id * @queuemap: Tx or Rx queue map * @@ -2337,6 +2882,13 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; + if (vf->pf_ctrl_disable) { + aq_ret = I40E_ERR_PARAM; + dev_err(&pf->pdev->dev, + "Admin has disabled VF %d via sysfs, will not enable queues", + vf->vf_id); + goto error_param; + } if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -2368,8 +2920,16 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) if (vf->adq_enabled) { /* zero belongs to LAN VSI */ for (i = 1; i < vf->num_tc; i++) { - if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) + if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->ch[i].vsi_idx], + vqs->rx_queues, true)) { aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } + if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->ch[i].vsi_idx], + vqs->tx_queues, true)) { + aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } } } @@ -2531,6 +3091,7 @@ error_param: * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl + * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2545,29 +3106,25 @@ error_param: * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al) + struct virtchnl_ether_addr_list *al, + bool *is_quiet) { struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; + int mac2add_cnt = 0; int i; - /* If this VF is not privileged, then we can't add more than a limited - * number of addresses. Check to make sure that the additions do not - * push us over the limit. - */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { - dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); - return -EPERM; - } + if (!is_quiet) + return -EINVAL; + *is_quiet = false; for (i = 0; i < al->num_elements; i++) { + struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; if (is_broadcast_ether_addr(addr) || is_zero_ether_addr(addr)) { - dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", - addr); + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", addr); return I40E_ERR_INVALID_MAC_ADDR; } @@ -2583,10 +3140,30 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + *is_quiet = true; return -EPERM; } + + /* count filters that really will be added */ + f = i40e_find_mac(vsi, addr); + if (!f) + ++mac2add_cnt; } + /* If this VF is not privileged, then we can't add more than a limited + * number of addresses. Check to make sure that the additions do not + * push us over the limit. + */ + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && + (i40e_count_filters(vsi) + mac2add_cnt) > + I40E_VC_MAX_MAC_ADDR_PER_VF) { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); + if (pf->vf_base_mode_only) + dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d filters\n", + vf->vf_id, I40E_VC_MAX_MAC_ADDR_PER_VF); + return -EPERM; + } return 0; } @@ -2603,6 +3180,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; + bool is_quiet = false; i40e_status ret = 0; int i; @@ -2619,7 +3197,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock); - ret = i40e_check_vf_permission(vf, al); + ret = i40e_check_vf_permission(vf, al, &is_quiet); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2632,7 +3210,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) f = i40e_find_mac(vsi, al->list[i].addr); if (!f) { f = i40e_add_mac_filter(vsi, al->list[i].addr); - if (!f) { dev_err(&pf->pdev->dev, "Unable to add MAC filter %pM for VF %d\n", @@ -2640,9 +3217,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) ret = I40E_ERR_PARAM; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; - } else { - vf->num_mac++; } + if (is_valid_ether_addr(al->list[i].addr)) + ether_addr_copy(vf->default_lan_addr.addr, + al->list[i].addr); } } spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -2655,8 +3233,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret); + return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0, is_quiet); } /** @@ -2689,16 +3267,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } - - if (vf->pf_set_mac && - ether_addr_equal(al->list[i].addr, - vf->default_lan_addr.addr)) { - dev_err(&pf->pdev->dev, - "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", - vf->default_lan_addr.addr, vf->vf_id); - ret = I40E_ERR_PARAM; - goto error_param; - } } vsi = pf->vsi[vf->lan_vsi_idx]; @@ -2709,8 +3277,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) ret = I40E_ERR_INVALID_MAC_ADDR; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; - } else { - vf->num_mac--; } spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -2747,6 +3313,9 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { dev_err(&pf->pdev->dev, "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); + if (pf->vf_base_mode_only) + dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d vlans\n", + vf->vf_id, I40E_VC_MAX_VLAN_PER_VF); goto error_param; } if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || @@ -2755,6 +3324,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && + bitmap_weight(vf->trunk_vlans, VLAN_N_VID)) + /* Silently fail the request if the VF is untrusted and trunk + * VLANs are configured. + */ + goto error_param; + for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { aq_ret = I40E_ERR_PARAM; @@ -2820,6 +3396,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && + bitmap_weight(vf->trunk_vlans, VLAN_N_VID)) + /* Silently fail the request if the VF is untrusted and trunk + * VLANs are configured. + */ + goto error_param; + for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { aq_ret = I40E_ERR_PARAM; @@ -2855,70 +3438,6 @@ error_param: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); } -/** - * i40e_vc_iwarp_msg - * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * called from the VF for the iwarp msgs - **/ -static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) -{ - struct i40e_pf *pf = vf->pf; - int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; - i40e_status aq_ret = 0; - - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - - i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, - msg, msglen); - -error_param: - /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, - aq_ret); -} - -/** - * i40e_vc_iwarp_qvmap_msg - * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * @config: config qvmap or release it - * - * called from the VF for the iwarp msgs - **/ -static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) -{ - struct virtchnl_iwarp_qvlist_info *qvlist_info = - (struct virtchnl_iwarp_qvlist_info *)msg; - i40e_status aq_ret = 0; - - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - - if (config) { - if (i40e_config_iwarp_qvlist(vf, qvlist_info)) - aq_ret = I40E_ERR_PARAM; - } else { - i40e_release_iwarp_qvlist(vf); - } - -error_param: - /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, - config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, - aq_ret); -} - /** * i40e_vc_config_rss_key * @vf: pointer to the VF info @@ -3101,10 +3620,11 @@ err: aq_ret); } +#ifdef __TC_MQPRIO_MODE_MAX /** * i40e_validate_cloud_filter - * @mask: mask for TC filter - * @data: data for TC filter + * @vf: pointer to the VF info + * @tc_filter: pointer to virtchnl_filter * * This function validates cloud filter programmed as TC filter for ADq **/ @@ -3235,11 +3755,11 @@ err: } /** - * i40e_find_vsi_from_seid - searches for the vsi with the given seid + * i40e_find_vf_vsi_from_seid - searches for the vsi with the given seid * @vf: pointer to the VF info - * @seid - seid of the vsi it is searching for + * @seid: seid of the vsi it is searching for **/ -static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) +static struct i40e_vsi *i40e_find_vf_vsi_from_seid(struct i40e_vf *vf, u16 seid) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; @@ -3269,7 +3789,7 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf) hlist_for_each_entry_safe(cfilter, node, &vf->cloud_filter_list, cloud_node) { - vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); + vsi = i40e_find_vf_vsi_from_seid(vf, cfilter->seid); if (!vsi) { dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", @@ -3443,6 +3963,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; i40e_status aq_ret = 0; + char err_msg_buf[100]; + bool is_quiet = false; + u16 err_msglen = 0; + u8 *err_msg = NULL; int i, ret; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { @@ -3458,12 +3982,23 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) goto err_out; } + if (pf->fdir_pf_active_filters || + (!hlist_empty(&pf->fdir_filter_list))) { + aq_ret = I40E_ERR_PARAM; + err_msglen = strlcpy(err_msg_buf, + "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters", + sizeof(err_msg_buf)); + err_msg = err_msg_buf; + is_quiet = true; + goto err_out; + } + if (i40e_validate_cloud_filter(vf, vcf)) { dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; - goto err_out; + aq_ret = I40E_ERR_PARAM; + goto err_out; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); @@ -3535,8 +4070,9 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) err_free: kfree(cfilter); err_out: - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, - aq_ret); + return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, + aq_ret, err_msg, err_msglen, + is_quiet); } /** @@ -3546,14 +4082,14 @@ err_out: **/ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) { - struct virtchnl_tc_info *tci = - (struct virtchnl_tc_info *)msg; + struct virtchnl_tc_info *tci = (struct virtchnl_tc_info *)msg; struct i40e_pf *pf = vf->pf; - struct i40e_link_status *ls = &pf->hw.phy.link_info; + struct i40e_link_status *ls; int i, adq_request_qps = 0; i40e_status aq_ret = 0; - u64 speed = 0; + u32 speed; + ls = &pf->hw.phy.link_info; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; @@ -3614,28 +4150,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) } /* get link speed in MB to validate rate limit */ - switch (ls->link_speed) { - case VIRTCHNL_LINK_SPEED_100MB: - speed = SPEED_100; - break; - case VIRTCHNL_LINK_SPEED_1GB: - speed = SPEED_1000; - break; - case VIRTCHNL_LINK_SPEED_10GB: - speed = SPEED_10000; - break; - case VIRTCHNL_LINK_SPEED_20GB: - speed = SPEED_20000; - break; - case VIRTCHNL_LINK_SPEED_25GB: - speed = SPEED_25000; - break; - case VIRTCHNL_LINK_SPEED_40GB: - speed = SPEED_40000; - break; - default: - dev_err(&pf->pdev->dev, - "Cannot detect link speed\n"); + speed = i40e_vc_link_speed2mbps(ls->link_speed); + if (speed == SPEED_UNKNOWN) { + dev_err(&pf->pdev->dev, "Cannot detect link speed\n"); aq_ret = I40E_ERR_PARAM; goto err; } @@ -3719,6 +4236,8 @@ err: aq_ret); } +#endif /* __TC_MQPRIO_MODE_MAX */ + /** * i40e_vc_process_vf_msg * @pf: pointer to the PF structure @@ -3806,15 +4325,6 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_GET_STATS: ret = i40e_vc_get_stats_msg(vf, msg); break; - case VIRTCHNL_OP_IWARP: - ret = i40e_vc_iwarp_msg(vf, msg, msglen); - break; - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); - break; - case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); - break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ret = i40e_vc_config_rss_key(vf, msg); break; @@ -3836,6 +4346,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_REQUEST_QUEUES: ret = i40e_vc_request_queues_msg(vf, msg); break; +#ifdef __TC_MQPRIO_MODE_MAX case VIRTCHNL_OP_ENABLE_CHANNELS: ret = i40e_vc_add_qch_msg(vf, msg); break; @@ -3848,6 +4359,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_DEL_CLOUD_FILTER: ret = i40e_vc_del_cloud_filter(vf, msg); break; +#endif /* __TC_MQPRIO_MODE_MAX */ case VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", @@ -3902,52 +4414,26 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) return 0; } +#ifdef IFLA_VF_MAX + /** - * i40e_validate_vf - * @pf: the physical function - * @vf_id: VF identifier + * i40e_set_vf_mac + * @vf: the VF + * @vsi: VF VSI to configure + * @mac: the mac address * - * Check that the VF is enabled and the VSI exists. + * This function allows the administrator to set the mac address for the VF * * Returns 0 on success, negative on failure - **/ -static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) -{ - struct i40e_vsi *vsi; - struct i40e_vf *vf; - int ret = 0; - - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, - "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; - goto err_out; - } - vf = &pf->vf[vf_id]; - vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); - if (!vsi) - ret = -EINVAL; -err_out: - return ret; -} - -/** - * i40e_ndo_set_vf_mac - * @netdev: network interface device structure - * @vf_id: VF identifier - * @mac: mac address * - * program VF mac address **/ -int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +static int i40e_set_vf_mac(struct i40e_vf *vf, struct i40e_vsi *vsi, + const u8 *mac) { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_mac_filter *f; - struct i40e_vf *vf; - int ret = 0; struct hlist_node *h; + int ret = 0; int bkt; u8 i; @@ -3956,13 +4442,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) return -EAGAIN; } - /* validate the request */ - ret = i40e_validate_vf(pf, vf_id); - if (ret) + if (is_multicast_ether_addr(mac)) { + dev_err(&pf->pdev->dev, + "Invalid Ethernet address %pM for VF %d\n", + mac, vf->vf_id); + ret = -EINVAL; goto error_param; - - vf = &pf->vf[vf_id]; - vsi = pf->vsi[vf->lan_vsi_idx]; + } /* When the VF is resetting wait until it is done. * It can take up to 200 milliseconds, @@ -3980,18 +4466,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + vf->vf_id); ret = -EAGAIN; goto error_param; } - if (is_multicast_ether_addr(mac)) { - dev_err(&pf->pdev->dev, - "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); - ret = -EINVAL; - goto error_param; - } - /* Lock once because below invoked function add/del_filter requires * mac_filter_hash_lock to be held */ @@ -4019,11 +4498,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) if (is_zero_ether_addr(mac)) { vf->pf_set_mac = false; - dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); + dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf->vf_id); } else { vf->pf_set_mac = true; dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", - mac, vf_id); + mac, vf->vf_id); } /* Force the VF interface down so it has to bring up with new MAC @@ -4031,12 +4510,39 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) */ i40e_vc_disable_vf(vf); dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); - error_param: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } +/** + * i40e_ndo_set_vf_mac + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: mac address + * + * program VF mac address + **/ +int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto error_param; + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_set_vf_mac(vf, vsi, mac); +error_param: + return ret; +} + /** * i40e_vsi_has_vlans - True if VSI has configured VLANs * @vsi: pointer to the vsi @@ -4075,8 +4581,13 @@ static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) * * program VF vlan id and/or qos **/ +#ifdef IFLA_VF_VLAN_INFO_MAX int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) +#else +int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, + int vf_id, u16 vlan_id, u8 qos) +#endif /* IFLA_VF_VLAN_INFO_MAX */ { u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -4084,7 +4595,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi; struct i40e_vf *vf; - int ret = 0; + int ret; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); @@ -4101,12 +4612,14 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ret = -EINVAL; goto error_pvid; } +#ifdef IFLA_VF_VLAN_INFO_MAX if (vlan_proto != htons(ETH_P_8021Q)) { dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); ret = -EPROTONOSUPPORT; goto error_pvid; } +#endif vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; @@ -4174,10 +4687,26 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, goto error_pvid; } - if (vlan_id || qos) + if (vlan_id || qos) { + u16 vid = 0; + /* remove trunk if port vlan setting is called */ + if (bitmap_weight(vf->trunk_vlans, VLAN_N_VID)) { + for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) { + i40e_vsi_kill_vlan(vsi, vid); + clear_bit(vid, vf->trunk_vlans); + } + } + ret = i40e_vsi_add_pvid(vsi, vlanprio); - else + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add VF VLAN failed, ret=%d aq_err=%d\n", ret, + vsi->back->hw.aq.asq_last_status); + goto error_pvid; + } + } else { i40e_vsi_remove_pvid(vsi); + } spin_lock_bh(&vsi->mac_filter_hash_lock); if (vlan_id) { @@ -4206,26 +4735,23 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) allmulti = true; - /* Schedule the worker thread to take care of applying changes */ - i40e_service_event_schedule(vsi->back); - - if (ret) { - dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); - goto error_pvid; - } - /* The Port VLAN needs to be saved across resets the same as the * default LAN MAC address. */ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); - - ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); - if (ret) { - dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); - goto error_pvid; + if (vsi->info.pvid) { + ret = i40e_config_vf_promiscuous_mode(vf, + vsi->id, + allmulti, + alluni); + if (ret) { + dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); + goto error_pvid; + } } - ret = 0; + /* Schedule the worker thread to take care of applying changes */ + i40e_service_event_schedule(vsi->back); error_pvid: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); @@ -4241,8 +4767,12 @@ error_pvid: * * configure VF Tx rate **/ +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) +#else +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate) +#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -4260,13 +4790,6 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (ret) goto error; - if (min_tx_rate) { - dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", - min_tx_rate, vf_id); - ret = -EINVAL; - goto error; - } - vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -4286,6 +4809,19 @@ error: return ret; } +/** + * i40e_ndo_enable_vf + * @netdev: network interface device structure + * @vf_id: VF identifier + * @enable: true to enable & false to disable + * + * enable/disable VF + **/ +int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable) +{ + return -EOPNOTSUPP; +} + /** * i40e_ndo_get_vf_config * @netdev: network interface device structure @@ -4325,19 +4861,29 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ivi->max_tx_rate = vf->tx_rate; ivi->min_tx_rate = 0; +#else + ivi->tx_rate = vf->tx_rate; +#endif ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> I40E_VLAN_PRIORITY_SHIFT; +#ifdef HAVE_NDO_SET_VF_LINK_STATE if (vf->link_forced == false) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->link_up == true) ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; else ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE ivi->spoofchk = vf->spoofchk; +#endif +#ifdef HAVE_NDO_SET_VF_TRUST ivi->trusted = vf->trusted; +#endif /* HAVE_NDO_SET_VF_TRUST */ ret = 0; error_param: @@ -4345,6 +4891,7 @@ error_param: return ret; } +#ifdef HAVE_NDO_SET_VF_LINK_STATE /** * i40e_ndo_set_vf_link_state * @netdev: network interface device structure @@ -4357,6 +4904,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + struct i40e_link_status *ls = &pf->hw.phy.link_info; struct virtchnl_pf_event pfe; struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; @@ -4384,37 +4932,74 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) switch (link) { case IFLA_VF_LINK_STATE_AUTO: vf->link_forced = false; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event_adv.link_speed = + i40e_vc_link_speed2mbps(ls->link_speed); +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ pfe.event_data.link_event.link_status = - pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; + ls->link_info & I40E_AQ_LINK_UP; pfe.event_data.link_event.link_speed = - (enum virtchnl_link_speed) - pf->hw.phy.link_info.link_speed; + i40e_virtchnl_link_speed(ls->link_speed); +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ break; case IFLA_VF_LINK_STATE_ENABLE: vf->link_forced = true; vf->link_up = true; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = true; + pfe.event_data.link_event_adv.link_speed = + i40e_vc_link_speed2mbps(ls->link_speed); +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ pfe.event_data.link_event.link_status = true; - pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; + pfe.event_data.link_event.link_speed = + i40e_virtchnl_link_speed(ls->link_speed); +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; vf->link_up = false; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = false; + pfe.event_data.link_event_adv.link_speed = 0; +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ pfe.event_data.link_event.link_status = false; pfe.event_data.link_event.link_speed = 0; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ break; default: ret = -EINVAL; goto error_out; } + /* Do not allow change link state when VF is disabled + * Check if requested link state is not IFLA_VF_LINK_STATE_DISABLE, + * to prevent false positive warning in case of reloading the driver + */ + if (vf->pf_ctrl_disable && link != IFLA_VF_LINK_STATE_DISABLE) { + vf->link_up = false; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + pfe.event_data.link_event_adv.link_status = false; + pfe.event_data.link_event_adv.link_speed = 0; +#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + dev_warn(&pf->pdev->dev, + "Not possible to change VF link state, please enable it first\n"); + } + /* Notify the VF of its new link state */ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, - 0, (u8 *)&pfe, sizeof(pfe), NULL); + I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); error_out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE /** * i40e_ndo_set_vf_spoofchk * @netdev: network interface device structure @@ -4475,6 +5060,8 @@ out: return ret; } +#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ +#ifdef HAVE_NDO_SET_VF_TRUST /** * i40e_ndo_set_vf_trust * @netdev: network interface device structure of the pf @@ -4510,6 +5097,9 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) vf = &pf->vf[vf_id]; + /* if vf is in base mode, make it untrusted */ + if (pf->vf_base_mode_only) + setting = false; if (setting == vf->trusted) goto out; @@ -4518,6 +5108,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, setting ? "" : "un"); +#ifdef __TC_MQPRIO_MODE_MAX if (vf->adq_enabled) { if (!vf->trusted) { dev_info(&pf->pdev->dev, @@ -4526,8 +5117,1960 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) i40e_del_all_cloud_filters(vf); } } +#endif /* __TC_MQPRIO_MODE_MAX */ out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } +#endif /* HAVE_NDO_SET_VF_TRUST */ +#ifdef HAVE_VF_STATS + +/** + * i40e_get_vf_stats - populate some stats for the VF + * @netdev: the netdev of the PF + * @vf_id: the host OS identifier (0-127) + * @vf_stats: pointer to the OS memory to be initialized + */ +int i40e_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_eth_stats *stats; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + + /* validate the request */ + if (i40e_validate_vf(pf, vf_id)) + return -EINVAL; + + vf = &pf->vf[vf_id]; + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) + return -EINVAL; + + i40e_update_eth_stats(vsi); + stats = &vsi->eth_stats; + + memset(vf_stats, 0, sizeof(*vf_stats)); + + vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + + stats->rx_multicast; + vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + + stats->tx_multicast; + vf_stats->rx_bytes = stats->rx_bytes; + vf_stats->tx_bytes = stats->tx_bytes; + vf_stats->broadcast = stats->rx_broadcast; + vf_stats->multicast = stats->rx_multicast; +#ifdef HAVE_VF_STATS_DROPPED + vf_stats->rx_dropped = stats->rx_discards; + vf_stats->tx_dropped = stats->tx_discards; +#endif + + return 0; +} +#endif /* HAVE_VF_STATS */ +#endif /* IFLA_VF_MAX */ +#ifdef HAVE_NDO_SET_VF_LINK_STATE + +/** + * i40e_get_trunk - Gets the configured VLAN filters + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @trunk_vlans: trunk vlans + * + * Gets the active trunk vlans + * + * Returns the number of active vlans filters on success, + * negative on failure + **/ +static int i40e_get_trunk(struct pci_dev *pdev, int vf_id, + unsigned long *trunk_vlans) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto out; + vf = &pf->vf[vf_id]; + /* checking if pvid has been set through netdev */ + vsi = pf->vsi[vf->lan_vsi_idx]; + if (vsi->info.pvid) { + memset(trunk_vlans, 0, + BITS_TO_LONGS(VLAN_N_VID) * sizeof(long)); + set_bit(vsi->info.pvid, trunk_vlans); + } else { + bitmap_copy(trunk_vlans, vf->trunk_vlans, VLAN_N_VID); + } + + ret = bitmap_weight(trunk_vlans, VLAN_N_VID); +out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40e_set_trunk - Configure VLAN filters + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @vlan_bitmap: vlans to filter on + * + * Applies the VLAN filters + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_trunk(struct pci_dev *pdev, int vf_id, + const unsigned long *vlan_bitmap) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + u16 vid = 0, pvid = 0; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_vlan_stripping_enable(vsi); + + /* checking if pvid has been set through netdev */ + pvid = vsi->info.pvid; + if (pvid) { + struct i40e_vsi *pf_vsi = pf->vsi[pf->lan_vsi]; + + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); +#ifdef IFLA_VF_VLAN_INFO_MAX +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + pf_vsi->netdev->netdev_ops->extended.ndo_set_vf_vlan + (pf_vsi->netdev, vf_id, 0, 0, htons(ETH_P_8021Q)); +#else + pf_vsi->netdev->netdev_ops->ndo_set_vf_vlan + (pf_vsi->netdev, vf_id, 0, 0, htons(ETH_P_8021Q)); +#endif // HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#else // IFLA_VF_VLAN_INFO_MAX +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + pf_vsi->netdev->netdev_ops->extended.ndo_set_vf_vlan + (pf_vsi->netdev, vf_id, 0, 0); +#else + pf_vsi->netdev->netdev_ops->ndo_set_vf_vlan + (pf_vsi->netdev, vf_id, 0, 0); +#endif // HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#endif // IFLA_VF_VLAN_INFO_MAX + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + } + + /* Add vlans */ + for_each_set_bit(vid, vlan_bitmap, VLAN_N_VID) { + if (!test_bit(vid, vf->trunk_vlans)) { + ret = i40e_vsi_add_vlan(vsi, vid); + if (ret) + goto out; + /* VLAN 0 filters are added by default + * but only for the first injection + */ + if (!bitmap_weight(vf->trunk_vlans, VLAN_N_VID)) + vf->allow_untagged = true; + } + } + + /* If deleting all vlan filters, check if we have VLAN 0 filters + * existing. If we don't, add filters to allow all traffic i.e + * VLAN tag = -1 before deleting all filters because the in the + * delete all filters flow, we check if there are VLAN 0 filters + * and then replace them with filters of VLAN id = -1 + */ + if (!bitmap_weight(vlan_bitmap, VLAN_N_VID) && !vf->allow_untagged) + i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); + + /* Del vlans */ + for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) { + if (!test_bit(vid, vlan_bitmap)) + i40e_vsi_kill_vlan(vsi, vid); + } + /* Copy over the updated bitmap */ + bitmap_copy(vf->trunk_vlans, vlan_bitmap, VLAN_N_VID); + /* back fill port vlan set by netdev */ + if (pvid) { + if (i40e_vsi_add_vlan(vsi, pvid)) { + dev_warn(&pdev->dev, "Unable to restore Port VLAN for trunking.\n"); + goto out; + } + set_bit(pvid, vf->trunk_vlans); + } + + /* When trunk is empty allow must be set */ + if (!bitmap_weight(vf->trunk_vlans, VLAN_N_VID)) + vf->allow_untagged = true; +out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40e_get_mirror - Gets the configured VLAN mirrors + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @mirror_vlans: mirror vlans + * + * Gets the active mirror vlans + * + * Returns the number of active mirror vlans on success, + * negative on failure + **/ +static int i40e_get_mirror(struct pci_dev *pdev, int vf_id, + unsigned long *mirror_vlans) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto out; + vf = &pf->vf[vf_id]; + bitmap_copy(mirror_vlans, vf->mirror_vlans, VLAN_N_VID); + ret = bitmap_weight(mirror_vlans, VLAN_N_VID); +out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40e_set_mirror - Configure VLAN mirrors + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @vlan_bitmap: vlans to configure as mirrors + * + * Configures the mirror vlans + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_mirror(struct pci_dev *pdev, int vf_id, + const unsigned long *vlan_bitmap) +{ + u16 vid, sw_seid, dst_seid, rule_id, rule_type; + struct i40e_pf *pf = pci_get_drvdata(pdev); + int ret = 0, num = 0, cnt = 0, add = 0; + u16 rules_used, rules_free; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + __le16 *mr_list; + + DECLARE_BITMAP(num_vlans, VLAN_N_VID); + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + sw_seid = vsi->uplink_seid; + dst_seid = vsi->seid; + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; + bitmap_xor(num_vlans, vf->mirror_vlans, vlan_bitmap, VLAN_N_VID); + cnt = bitmap_weight(num_vlans, VLAN_N_VID); + if (!cnt) + goto out; + mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL); + if (!mr_list) { + ret = -ENOMEM; + goto out; + } + + /* figure out if adding or deleting */ + bitmap_and(num_vlans, vlan_bitmap, num_vlans, VLAN_N_VID); + add = bitmap_weight(num_vlans, VLAN_N_VID); + if (add) { + /* Add mirrors */ + for_each_set_bit(vid, vlan_bitmap, VLAN_N_VID) { + if (!test_bit(vid, vf->mirror_vlans)) { + mr_list[num] = CPU_TO_LE16(vid); + num++; + } + } + ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid, + rule_type, dst_seid, + cnt, mr_list, NULL, + &rule_id, &rules_used, + &rules_free); + if (ret) + goto err_free; + vf->vlan_rule_id = rule_id; + } else { + /* Del mirrors */ + for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) { + if (!test_bit(vid, vlan_bitmap)) { + mr_list[num] = CPU_TO_LE16(vid); + num++; + } + } + ret = i40e_aq_delete_mirrorrule(&pf->hw, sw_seid, rule_type, + vf->vlan_rule_id, cnt, mr_list, + NULL, &rules_used, + &rules_free); + if (ret) + goto err_free; + } + + /* Copy over the updated bitmap */ + bitmap_copy(vf->mirror_vlans, vlan_bitmap, VLAN_N_VID); +err_free: + kfree(mr_list); +out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40e_get_allow_untagged + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @on: on or off + * + * This functions checks if the untagged packets + * are allowed or not. By default, allow_untagged is on + * when VLAN filters are present on the VF. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_allow_untagged(struct pci_dev *pdev, int vf_id, + bool *on) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto out; + vf = &pf->vf[vf_id]; + *on = vf->allow_untagged; +out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40e_set_allow_untagged + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @on: on or off + * + * This functions allows or stops untagged packets + * on the VF. By default, allow_untagged is on when + * VLAN filters are present on the VF. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_allow_untagged(struct pci_dev *pdev, int vf_id, + const bool on) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (vsi->info.pvid) { + ret = -EINVAL; + goto out; + } + spin_lock_bh(&vsi->mac_filter_hash_lock); + if (!on) { + i40e_rm_vlan_all_mac(vsi, 0); + i40e_service_event_schedule(vsi->back); + } else { + ret = i40e_add_vlan_all_mac(vsi, 0); + i40e_service_event_schedule(vsi->back); + } + spin_unlock_bh(&vsi->mac_filter_hash_lock); + if (!ret) + vf->allow_untagged = on; +out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40e_get_loopback + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enable: enable or disable + * + * This function checks loopback is enabled + * + * Returns 1 if enabled, 0 if disabled + **/ +static int i40e_get_loopback(struct pci_dev *pdev, int vf_id, bool *enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + *enable = vf->loopback; +err_out: + return ret; +} + +/** + * i40e_set_loopback + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enable: enable or disable + * + * This function enables or disables loopback + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_loopback(struct pci_dev *pdev, int vf_id, + const bool enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_configure_vf_loopback(vsi, vf_id, enable); + if (!ret) + vf->loopback = enable; +err_out: + return ret; +} + +/** + * i40e_get_vlan_strip + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enable: enable or disable + * + * This function checks if vlan stripping is enabled or not + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_vlan_strip(struct pci_dev *pdev, int vf_id, bool *enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + *enable = vf->vlan_stripping; +err_out: + return ret; +} + +/** + * i40e_set_vlan_strip + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enable: enable/disable + * + * This function enables or disables VLAN stripping on a VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_vlan_strip(struct pci_dev *pdev, int vf_id, + const bool enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_configure_vf_vlan_stripping(vsi, vf_id, enable); + if (!ret) + vf->vlan_stripping = enable; +err_out: + return ret; +} + +/** + * i40e_reset_vf_stats + * @pdev: PCI device information struct + * @vf_id: VF identifier + * + * This function resets all the stats for the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_reset_vf_stats(struct pci_dev *pdev, int vf_id) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_vsi_reset_stats(vsi); + +err_out: + return ret; +} + +/** + * i40e_get_vf_bw_share + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @bw_share: bw share of the VF + * + * This function retrieves the bw share configured for the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_vf_bw_share(struct pci_dev *pdev, int vf_id, u8 *bw_share) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + if (vf->bw_share_applied) + *bw_share = vf->bw_share; + else + ret = -EINVAL; + +err_out: + return ret; +} + +/** + * i40e_store_vf_bw_share + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @bw_share: bw share of the VF + * + * This function stores bw share configured for the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_store_vf_bw_share(struct pci_dev *pdev, int vf_id, u8 bw_share) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vf->bw_share = bw_share; + + /* this tracking bool is set to true when 'apply' attribute is used */ + vf->bw_share_applied = false; + pf->vf_bw_applied = false; +err_out: + return ret; +} + +/** + * i40e_get_link_state + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enabled: link state + * @link_speed: link speed of the VF + * + * Gets the status of link and the link speed + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_link_state(struct pci_dev *pdev, int vf_id, bool *enabled, + enum vfd_link_speed *link_speed) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_link_status *ls; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + ls = &pf->hw.phy.link_info; + if (vf->link_forced) + *enabled = vf->link_up; + else + *enabled = ls->link_info & I40E_AQ_LINK_UP; + switch (ls->link_speed) { + case I40E_LINK_SPEED_UNKNOWN: + *link_speed = VFD_LINK_SPEED_UNKNOWN; + break; + case I40E_LINK_SPEED_100MB: + *link_speed = VFD_LINK_SPEED_100MB; + break; + case I40E_LINK_SPEED_1GB: + *link_speed = VFD_LINK_SPEED_1GB; + break; + case I40E_LINK_SPEED_2_5GB: + *link_speed = VFD_LINK_SPEED_2_5GB; + break; + case I40E_LINK_SPEED_5GB: + *link_speed = VFD_LINK_SPEED_5GB; + break; + case I40E_LINK_SPEED_10GB: + *link_speed = VFD_LINK_SPEED_10GB; + break; + case I40E_LINK_SPEED_20GB: + *link_speed = VFD_LINK_SPEED_20GB; + break; + case I40E_LINK_SPEED_25GB: + *link_speed = VFD_LINK_SPEED_25GB; + break; + case I40E_LINK_SPEED_40GB: + *link_speed = VFD_LINK_SPEED_40GB; + break; + default: + *link_speed = VFD_LINK_SPEED_UNKNOWN; + } +err_out: + return ret; +} + +/** + * i40e_set_link_state + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @link: the link state to configure + * + * Configures link for a vf + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_link_state(struct pci_dev *pdev, int vf_id, const u8 link) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto error_out; + vf = &pf->vf[vf_id]; + ret = i40e_configure_vf_link(vf, link); +error_out: + return ret; +} + +/** + * i40e_set_vf_enable + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enable: enable/disable + * + * This function enables or disables a VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_vf_enable(struct pci_dev *pdev, int vf_id, const bool enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + unsigned long q_map; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + q_map = BIT(vsi->num_queue_pairs) - 1; + + /* force link down to prevent tx hangs */ + if (!enable) { + ret = i40e_set_link_state(pdev, vf_id, VFD_LINKSTATE_OFF); + if (ret) + goto err_out; + vf->pf_ctrl_disable = true; + ret = i40e_ctrl_vf_tx_rings(vsi, q_map, enable); + if (ret) + goto err_out; + ret = i40e_ctrl_vf_rx_rings(vsi, q_map, enable); + if (ret) + goto err_out; + vf->queues_enabled = false; + } else { + /* Do nothing when there is no iavf driver loaded */ + if (!test_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states)) + goto err_out; + ret = i40e_ctrl_vf_rx_rings(vsi, q_map, enable); + if (ret) + goto err_out; + ret = i40e_ctrl_vf_tx_rings(vsi, q_map, enable); + if (ret) + goto err_out; + vf->queues_enabled = true; + vf->pf_ctrl_disable = false; + /* reset need to reinit VF resources */ + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); + ret = i40e_set_link_state(pdev, vf_id, VFD_LINKSTATE_AUTO); + } + +err_out: + return ret; +} + +static int i40e_get_vf_enable(struct pci_dev *pdev, int vf_id, bool *enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + return ret; + vf = &pf->vf[vf_id]; + *enable = !vf->pf_ctrl_disable; + return 0; +} + +/** + * i40e_get_rx_bytes + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @rx_bytes: pointer to the caller's rx_bytes variable + * + * This function gets the received bytes on the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_rx_bytes(struct pci_dev *pdev, int vf_id, + u64 *rx_bytes) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_update_eth_stats(vsi); + *rx_bytes = vsi->eth_stats.rx_bytes; +err_out: + return ret; +} + +/** + * i40e_get_rx_dropped + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @rx_dropped: pointer to the caller's rx_dropped variable + * + * This function gets the dropped received bytes on the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_rx_dropped(struct pci_dev *pdev, int vf_id, + u64 *rx_dropped) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_update_eth_stats(vsi); + *rx_dropped = vsi->eth_stats.rx_discards; +err_out: + return ret; +} + +/** + * i40e_get_rx_packets + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @rx_packets: pointer to the caller's rx_packets variable + * + * This function gets the number of packets received on the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_rx_packets(struct pci_dev *pdev, int vf_id, + u64 *rx_packets) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_update_eth_stats(vsi); + *rx_packets = vsi->eth_stats.rx_unicast + vsi->eth_stats.rx_multicast + + vsi->eth_stats.rx_broadcast; +err_out: + return ret; +} + +/** + * i40e_get_tx_bytes + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @tx_bytes: pointer to the caller's tx_bytes variable + * + * This function gets the transmitted bytes by the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_tx_bytes(struct pci_dev *pdev, int vf_id, + u64 *tx_bytes) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_update_eth_stats(vsi); + *tx_bytes = vsi->eth_stats.tx_bytes; +err_out: + return ret; +} + +/** + * i40e_get_tx_dropped + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @tx_dropped: pointer to the caller's tx_dropped variable + * + * This function gets the dropped tx bytes by the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_tx_dropped(struct pci_dev *pdev, int vf_id, + u64 *tx_dropped) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_update_eth_stats(vsi); + *tx_dropped = vsi->eth_stats.tx_discards; +err_out: + return ret; +} + +/** + * i40e_get_tx_packets + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @tx_packets: pointer to the caller's tx_packets variable + * + * This function gets the number of packets transmitted by the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_tx_packets(struct pci_dev *pdev, int vf_id, + u64 *tx_packets) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_update_eth_stats(vsi); + *tx_packets = vsi->eth_stats.tx_unicast + vsi->eth_stats.tx_multicast + + vsi->eth_stats.tx_broadcast; +err_out: + return ret; +} + +/** + * i40e_get_tx_errors + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @tx_errors: pointer to the caller's tx_errors variable + * + * This function gets the number of packets transmitted by the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_tx_errors(struct pci_dev *pdev, int vf_id, + u64 *tx_errors) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + i40e_update_eth_stats(vsi); + *tx_errors = vsi->eth_stats.tx_errors; +err_out: + return ret; +} + +/** + * i40e_get_mac + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @mac: the default mac address + * + * This function gets the default mac address + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_mac(struct pci_dev *pdev, int vf_id, u8 *mac) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + ether_addr_copy(mac, vf->default_lan_addr.addr); +err_out: + return ret; +} + +/** + * i40e_set_mac + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @mac: the default mac address to set + * + * This function sets the default mac address for the VF + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_mac(struct pci_dev *pdev, int vf_id, const u8 *mac) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_set_vf_mac(vf, vsi, mac); +err_out: + return ret; +} + +/** + * i40e_get_promisc + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @promisc_mode: current promiscuous mode + * + * This function gets the current promiscuous mode configuration. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_promisc(struct pci_dev *pdev, int vf_id, u8 *promisc_mode) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + *promisc_mode = vf->promisc_mode; +err_out: + return ret; +} + +/** + * i40e_set_promisc + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @promisc_mode: promiscuous mode to be set + * + * This function sets the promiscuous mode configuration. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_promisc(struct pci_dev *pdev, int vf_id, + const u8 promisc_mode) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_configure_vf_promisc_mode(vf, vsi, promisc_mode); +err: + return ret; +} + +/** + * i40e_get_ingress_mirror - Gets the configured ingress mirror + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @mirror: pointer to return the ingress mirror + * + * Gets the ingress mirror configured + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_ingress_mirror(struct pci_dev *pdev, int vf_id, int *mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + *mirror = vf->ingress_vlan; +err_out: + return ret; +} + +/** + * i40e_set_ingress_mirror - Configure ingress mirror + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @mirror: mirror vf + * + * Configures the ingress mirror + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_ingress_mirror(struct pci_dev *pdev, int vf_id, + const int mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *src_vsi, *mirror_vsi; + struct i40e_vf *vf, *mirror_vf; + u16 rule_type, rule_id; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + + /* The Admin Queue mirroring rules refer to the traffic + * directions from the perspective of the switch, not the VSI + * we apply the mirroring rule on - so the behaviour of a VSI + * ingress mirror is classified as an egress rule + */ + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS; + src_vsi = pf->vsi[vf->lan_vsi_idx]; + if (mirror == I40E_NO_VF_MIRROR) { + /* Del mirrors */ + rule_id = vf->ingress_rule_id; + ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, + rule_id); + if (ret) + goto err_out; + vf->ingress_vlan = I40E_NO_VF_MIRROR; + } else { + /* validate the mirror */ + ret = i40e_validate_vf(pf, mirror); + if (ret) + goto err_out; + mirror_vf = &pf->vf[mirror]; + mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx]; + + /* Add mirrors */ + ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi, + rule_type, &rule_id); + if (ret) + goto err_out; + vf->ingress_vlan = mirror; + vf->ingress_rule_id = rule_id; + } +err_out: + return ret; +} + +/** + * i40e_get_egress_mirror - Gets the configured egress mirror + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @mirror: pointer to return the egress mirror + * + * Gets the egress mirror configured + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_egress_mirror(struct pci_dev *pdev, int vf_id, int *mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + *mirror = vf->egress_vlan; +err_out: + return ret; +} + +/** + * i40e_set_egress_mirror - Configure egress mirror + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @mirror: mirror vf + * + * Configures the egress mirror + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_egress_mirror(struct pci_dev *pdev, int vf_id, + const int mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *src_vsi, *mirror_vsi; + struct i40e_vf *vf, *mirror_vf; + u16 rule_type, rule_id; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto err_out; + vf = &pf->vf[vf_id]; + + /* The Admin Queue mirroring rules refer to the traffic + * directions from the perspective of the switch, not the VSI + * we apply the mirroring rule on - so the behaviour of a VSI + * egress mirror is classified as an ingress rule + */ + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + src_vsi = pf->vsi[vf->lan_vsi_idx]; + if (mirror == I40E_NO_VF_MIRROR) { + /* Del mirrors */ + rule_id = vf->egress_rule_id; + ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, + rule_id); + if (ret) + goto err_out; + vf->egress_vlan = I40E_NO_VF_MIRROR; + } else { + /* validate the mirror */ + ret = i40e_validate_vf(pf, mirror); + if (ret) + goto err_out; + mirror_vf = &pf->vf[mirror]; + mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx]; + + /* Add mirrors */ + ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi, + rule_type, &rule_id); + if (ret) + goto err_out; + vf->egress_vlan = mirror; + vf->egress_rule_id = rule_id; + } +err_out: + return ret; +} + +/* + * i40e_get_mac_list + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @list_head: list of mac addresses + * + * This function returns the list of mac address configured on the VF. It is + * the responsibility of the caller to free the allocated list when finished. + * + * Returns 0 on success, negative on failure + */ +static int i40e_get_mac_list(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_mac_filter *f; + struct vfd_macaddr *elem; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret, bkt; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto error_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + spin_lock_bh(&vsi->mac_filter_hash_lock); + hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { + elem = kzalloc(sizeof(*elem), GFP_KERNEL); + if (!elem) { + ret = -ENOMEM; + goto error_unlock; + } + INIT_LIST_HEAD(&elem->list); + ether_addr_copy(elem->mac, f->macaddr); + list_add_tail(&elem->list, mac_list); + } +error_unlock: + spin_unlock_bh(&vsi->mac_filter_hash_lock); +error_out: + return ret; +} + +/* + * i40e_add_macs_to_list + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @list_head: list of mac addresses + * + * This function adds a list of mac addresses for a VF + * + * Returns 0 on success, negative on failure + */ +static int i40e_add_macs_to_list(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct vfd_macaddr *tmp; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto error_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + spin_lock_bh(&vsi->mac_filter_hash_lock); + list_for_each_entry(tmp, mac_list, list) { + struct i40e_mac_filter *f; + + f = i40e_find_mac(vsi, tmp->mac); + if (!f) { + f = i40e_add_mac_filter(vsi, tmp->mac); + if (!f) { + dev_err(&pf->pdev->dev, + "Unable to add MAC filter %pM for VF %d\n", + tmp->mac, vf->vf_id); + ret = I40E_ERR_PARAM; + spin_unlock_bh(&vsi->mac_filter_hash_lock); + goto error_out; + } + } + } + spin_unlock_bh(&vsi->mac_filter_hash_lock); + + /* program the updated filter list */ + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); +error_out: + return ret; +} + +/* + * i40e_rem_macs_from_list + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @list_head: list of mac addresses + * + * This function removes a list of mac addresses from a VF + * + * Returns 0 on success, negative on failure + */ +static int i40e_rem_macs_from_list(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct vfd_macaddr *tmp; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto error_out; + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + spin_lock_bh(&vsi->mac_filter_hash_lock); + list_for_each_entry(tmp, mac_list, list) { + if (i40e_del_mac_filter(vsi, tmp->mac)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + spin_unlock_bh(&vsi->mac_filter_hash_lock); + goto error_out; + } + } + spin_unlock_bh(&vsi->mac_filter_hash_lock); + + /* program the updated filter list */ + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); +error_out: + return ret; +} + +/* + * i40e_set_pf_qos_apply + * @pdev: PCI device information struct + * + * This function applies the bw shares stored across all VFs + * + * Returns 0 on success, negative on failure + */ +static int i40e_set_pf_qos_apply(struct pci_dev *pdev) +{ + struct i40e_aqc_configure_vsi_tc_bw_data bw_data; + struct i40e_pf *pf = pci_get_drvdata(pdev); + int i, j, ret = 0, total_share = 0; + struct i40e_vf *vf = pf->vf; + struct i40e_vsi *vsi; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + total_share += vf->bw_share; + } + + /* verify BW share distribution */ + if (total_share > 100) { + dev_err(&pdev->dev, "Total share is greater than 100 percent"); + return I40E_ERR_PARAM; + } + + memset(&bw_data, 0, sizeof(struct i40e_aqc_configure_vsi_tc_bw_data)); + for (i = 0; i < pf->num_alloc_vfs; i++) { + ret = i40e_validate_vf(pf, vf->vf_id); + if (ret) + continue; + vf = &pf->vf[i]; + if (!vf->bw_share) + continue; + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf->vf_id); + ret = I40E_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; + bw_data.tc_valid_bits = 1; + bw_data.tc_bw_credits[0] = vf->bw_share; + + ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "AQ command Config VSI BW allocation per TC failed = %d\n", + pf->hw.aq.asq_last_status); + vf->bw_share_applied = false; + return -EINVAL; + } + + for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) + vsi->info.qs_handle[j] = bw_data.qs_handles[j]; + + /* set the tracking bool to true */ + vf->bw_share_applied = true; + } + pf->vf_bw_applied = true; + +error_param: + return ret; +} + +/** + * i40e_get_pf_ingress_mirror - Gets the configured ingress mirror for PF + * @pdev: PCI device information struct + * @mirror: pointer to return the ingress mirror + * + * Gets the ingress mirror configured + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_pf_ingress_mirror(struct pci_dev *pdev, int *mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + *mirror = pf->ingress_vlan; + return 0; +} + +/** + * i40e_set_pf_ingress_mirror - Sets the configured ingress mirror for PF + * @pdev: PCI device information struct + * @mirror: pointer to return the ingress mirror + * + * Gets the ingress mirror configured + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_pf_ingress_mirror(struct pci_dev *pdev, const int mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *src_vsi, *mirror_vsi; + struct i40e_vf *mirror_vf; + u16 rule_type, rule_id; + int ret; + + /* The Admin Queue mirroring rules refer to the traffic + * directions from the perspective of the switch, not the VSI + * we apply the mirroring rule on - so the behaviour of a VSI + * ingress mirror is classified as an egress rule + */ + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS; + src_vsi = pf->vsi[pf->lan_vsi]; + if (mirror == I40E_NO_VF_MIRROR) { + /* Del mirrors */ + rule_id = pf->ingress_rule_id; + ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, + rule_id); + if (ret) + goto err_out; + pf->ingress_vlan = I40E_NO_VF_MIRROR; + } else { + /* validate the mirror */ + ret = i40e_validate_vf(pf, mirror); + if (ret) + goto err_out; + mirror_vf = &pf->vf[mirror]; + mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx]; + + /* Add mirrors */ + ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi, + rule_type, &rule_id); + if (ret) + goto err_out; + pf->ingress_vlan = mirror; + pf->ingress_rule_id = rule_id; + } +err_out: + return ret; +} + +/** + * i40e_get_pf_egress_mirror - Gets the configured egress mirror for PF + * @pdev: PCI device information struct + * @mirror: pointer to return the ingress mirror + * + * Gets the ingress mirror configured + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_pf_egress_mirror(struct pci_dev *pdev, int *mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + *mirror = pf->egress_vlan; + return 0; +} + +/** + * i40e_set_pf_egress_mirror - Sets the configured egress mirror for PF + * @pdev: PCI device information struct + * @mirror: pointer to return the ingress mirror + * + * Gets the ingress mirror configured + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_pf_egress_mirror(struct pci_dev *pdev, const int mirror) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *src_vsi, *mirror_vsi; + struct i40e_vf *mirror_vf; + u16 rule_type, rule_id; + int ret; + + /* The Admin Queue mirroring rules refer to the traffic + * directions from the perspective of the switch, not the VSI + * we apply the mirroring rule on - so the behaviour of a VSI + * egress mirror is classified as an ingress rule + */ + rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + src_vsi = pf->vsi[pf->lan_vsi]; + if (mirror == I40E_NO_VF_MIRROR) { + /* Del mirrors */ + rule_id = pf->egress_rule_id; + ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, + rule_id); + if (ret) + goto err_out; + pf->egress_vlan = I40E_NO_VF_MIRROR; + } else { + /* validate the mirror */ + ret = i40e_validate_vf(pf, mirror); + if (ret) + goto err_out; + mirror_vf = &pf->vf[mirror]; + mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx]; + + /* Add mirrors */ + ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi, + rule_type, &rule_id); + if (ret) + goto err_out; + pf->egress_vlan = mirror; + pf->egress_rule_id = rule_id; + } +err_out: + return ret; +} + +#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4)) +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16 +#define OUTER_TAG_IDX 2 +static int i40e_get_pf_tpid(struct pci_dev *pdev, u16 *tp_id) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + if (!(pf->hw.flags & I40E_HW_FLAG_802_1AD_CAPABLE)) + return -EOPNOTSUPP; + + *tp_id = (u16)(rd32(&pf->hw, I40E_GL_SWT_L2TAGCTRL(OUTER_TAG_IDX)) >> + I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); + + return 0; +} + +static int i40e_set_pf_tpid(struct pci_dev *pdev, u16 tp_id) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + int ret; + + if (!(pf->hw.flags & I40E_HW_FLAG_802_1AD_CAPABLE)) + return -EOPNOTSUPP; + + if (pf->hw.pf_id != 0) { + dev_err(&pdev->dev, + "TPID configuration only supported for PF 0\n"); + return -EOPNOTSUPP; + } + + pf->hw.first_tag = tp_id; + ret = i40e_aq_set_switch_config(&pf->hw, 0, 0, 0, NULL); + + return ret; +} + +static int i40e_get_num_queues(struct pci_dev *pdev, int vf_id, int *num_queues) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + return ret; + vf = &pf->vf[vf_id]; + + *num_queues = vf->num_queue_pairs; + + return ret; +} + +static int i40e_set_num_queues(struct pci_dev *pdev, int vf_id, int num_queues) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + return ret; + vf = &pf->vf[vf_id]; + + if (test_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states)) { + dev_err(&pdev->dev, + "Unable to configure %d queues, please unbind the driver for VF %d\n", + num_queues, vf_id); + return -EAGAIN; + } + if (num_queues > I40E_MAX_VF_QUEUES) { + dev_err(&pdev->dev, + "Unable to configure %d VF queues, the maximum is %d\n", + num_queues, I40E_MAX_VF_QUEUES); + return -EINVAL; + } + if (num_queues - vf->num_queue_pairs > pf->queues_left) { + dev_err(&pdev->dev, + "Unable to configure %d VF queues, only %d available\n", + num_queues, vf->num_queue_pairs + pf->queues_left); + return -EINVAL; + } + + /* Set vf->num_req_queues to the desired value and reset the VF. When + * the VSI is reallocated it will be configured with the new queue + * count. + */ + vf->num_req_queues = num_queues; + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); + + return 0; +} + +/** + * i40e_get_max_tx_rate + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @max_tx_rate: max transmit bandwidth rate + * + * This function returns the value of transmit bandwidth, in Mbps, + * for the specified VF, + * value 0 means rate limiting is disabled. + * + * Returns 0 on success, negative on failure + */ +static int i40e_get_max_tx_rate(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, + "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto error_param; + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + ret = -ENOENT; + goto error_param; + } + + *max_tx_rate = vf->tx_rate; + +error_param: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40e_set_max_tx_rate + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @max_tx_rate: max transmit bandwidth rate to set + * + * This function sets the value of max transmit bandwidth, in Mbps, + * for the specified VF, + * value 0 means rate limiting is disabled. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_max_tx_rate(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, + "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + return ret; + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; + goto error; + } + + ret = i40e_set_bw_limit(vsi, vsi->seid, *max_tx_rate); + if (ret) + goto error; + + vf->tx_rate = *max_tx_rate; +error: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +/** + * i40_get_trust_state + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enable: on success, true if enabled, false if not + * + * Gets VF trust configure. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_get_trust_state(struct pci_dev *pdev, int vf_id, bool *enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + ret = i40e_validate_vf(pf, vf_id); + if (ret) + return ret; + vf = &pf->vf[vf_id]; + + *enable = vf->trusted; + + return ret; +} + +/** + * i40e_set_trust_state + * @pdev: PCI device information struct + * @vf_id: VF identifier + * @enable: enable or disable trust + * + * Sets the VF trust configure + * + * Returns 0 on success, negative on failure + **/ +static int i40e_set_trust_state(struct pci_dev *pdev, int vf_id, bool enable) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_vf *vf; + int ret; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); + ret = -EINVAL; + goto out; + } + + /* validate the request */ + ret = i40e_validate_vf(pf, vf_id); + if (ret) + goto out; + + vf = &pf->vf[vf_id]; + /* if vf is in base mode, make it untrusted */ + if (pf->vf_base_mode_only) + enable = false; + if (enable == vf->trusted) + goto out; + + vf->trusted = enable; + i40e_vc_disable_vf(vf); + dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + vf_id, enable ? "" : "un"); + +#ifdef __TC_MQPRIO_MODE_MAX + if (vf->adq_enabled) { + if (!vf->trusted) { + dev_info(&pf->pdev->dev, + "VF %u no longer Trusted, deleting all cloud filters\n", + vf_id); + i40e_del_all_cloud_filters(vf); + } + } +#endif /* __TC_MQPRIO_MODE_MAX */ + +out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; +} + +const struct vfd_ops i40e_vfd_ops = { + .get_trunk = i40e_get_trunk, + .set_trunk = i40e_set_trunk, + .get_vlan_mirror = i40e_get_mirror, + .set_vlan_mirror = i40e_set_mirror, + .set_allow_untagged = i40e_set_allow_untagged, + .get_allow_untagged = i40e_get_allow_untagged, + .get_loopback = i40e_get_loopback, + .set_loopback = i40e_set_loopback, + .get_vlan_strip = i40e_get_vlan_strip, + .set_vlan_strip = i40e_set_vlan_strip, + .get_rx_bytes = i40e_get_rx_bytes, + .get_rx_dropped = i40e_get_rx_dropped, + .get_rx_packets = i40e_get_rx_packets, + .get_tx_bytes = i40e_get_tx_bytes, + .get_tx_dropped = i40e_get_tx_dropped, + .get_tx_packets = i40e_get_tx_packets, + .get_tx_errors = i40e_get_tx_errors, + .get_mac = i40e_get_mac, + .set_mac = i40e_set_mac, + .get_promisc = i40e_get_promisc, + .set_promisc = i40e_set_promisc, + .get_ingress_mirror = i40e_get_ingress_mirror, + .set_ingress_mirror = i40e_set_ingress_mirror, + .get_egress_mirror = i40e_get_egress_mirror, + .set_egress_mirror = i40e_set_egress_mirror, + .get_link_state = i40e_get_link_state, + .set_link_state = i40e_set_link_state, + .get_mac_list = i40e_get_mac_list, + .add_macs_to_list = i40e_add_macs_to_list, + .rem_macs_from_list = i40e_rem_macs_from_list, + .get_vf_enable = i40e_get_vf_enable, + .set_vf_enable = i40e_set_vf_enable, + .reset_stats = i40e_reset_vf_stats, + .set_vf_bw_share = i40e_store_vf_bw_share, + .get_vf_bw_share = i40e_get_vf_bw_share, + .set_pf_qos_apply = i40e_set_pf_qos_apply, + .get_pf_ingress_mirror = i40e_get_pf_ingress_mirror, + .set_pf_ingress_mirror = i40e_set_pf_ingress_mirror, + .get_pf_egress_mirror = i40e_get_pf_egress_mirror, + .set_pf_egress_mirror = i40e_set_pf_egress_mirror, + .get_pf_tpid = i40e_get_pf_tpid, + .set_pf_tpid = i40e_set_pf_tpid, + .get_num_queues = i40e_get_num_queues, + .set_num_queues = i40e_set_num_queues, + .get_max_tx_rate = i40e_get_max_tx_rate, + .set_max_tx_rate = i40e_set_max_tx_rate, + .get_trust_state = i40e_get_trust_state, + .set_trust_state = i40e_set_trust_state, +}; +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 7164b9bb294f..241b929321b0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ #ifndef _I40E_VIRTCHNL_PF_H_ #define _I40E_VIRTCHNL_PF_H_ @@ -34,18 +34,17 @@ enum i40e_queue_ctrl { enum i40e_vf_states { I40E_VF_STATE_INIT = 0, I40E_VF_STATE_ACTIVE, - I40E_VF_STATE_IWARPENA, I40E_VF_STATE_DISABLED, I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, I40E_VF_STATE_PRE_ENABLE, + I40E_VF_STATE_LOADED_VF_DRIVER, }; /* VF capabilities */ enum i40e_vf_capabilities { I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0, I40E_VIRTCHNL_VF_CAP_L2, - I40E_VIRTCHNL_VF_CAP_IWARP, }; /* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI. @@ -97,12 +96,30 @@ struct i40e_vf { unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ +#ifdef HAVE_NDO_SET_VF_LINK_STATE bool link_forced; bool link_up; /* only valid if VF link is forced */ +#endif bool queues_enabled; /* true if the VF queues are enabled */ bool spoofchk; - u16 num_mac; u16 num_vlan; + DECLARE_BITMAP(mirror_vlans, VLAN_N_VID); + u16 vlan_rule_id; +#define I40E_NO_VF_MIRROR -1 +/* assuming vf ids' range is <0..max_supported> */ +#define I40E_IS_MIRROR_VLAN_ID_VALID(id) ((id) >= 0) + u16 ingress_rule_id; + int ingress_vlan; + u16 egress_rule_id; + int egress_vlan; + DECLARE_BITMAP(trunk_vlans, VLAN_N_VID); + bool allow_untagged; + bool loopback; + bool vlan_stripping; + u8 promisc_mode; + u8 bw_share; + bool bw_share_applied; /* true if config is applied to the device */ + bool pf_ctrl_disable; /* tracking bool for PF ctrl of VF enable/disable */ /* ADq related variables */ bool adq_enabled; /* flag to enable adq */ @@ -110,13 +127,12 @@ struct i40e_vf { struct i40evf_channel ch[I40E_MAX_VF_VSI]; struct hlist_head cloud_filter_list; u16 num_cloud_filters; - - /* RDMA Client */ - struct virtchnl_iwarp_qvlist_info *qvlist_info; }; void i40e_free_vfs(struct i40e_pf *pf); +#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE) int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#endif int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); @@ -127,17 +143,40 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf); /* VF configuration related iplink handlers */ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +#ifdef IFLA_VF_VLAN_INFO_MAX int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto); +#else +int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, + int vf_id, u16 vlan_id, u8 qos); +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate); +#else +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting); +#endif +int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable); +#ifdef IFLA_VF_MAX int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); +#ifdef HAVE_NDO_SET_VF_LINK_STATE int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); +#endif +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); +#endif +#endif void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +#ifdef HAVE_VF_STATS +int i40e_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats); +#endif +extern const struct vfd_ops i40e_vfd_ops; #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c deleted file mode 100644 index d07e1a890428..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ /dev/null @@ -1,880 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2018 Intel Corporation. */ - -#include -#include -#include - -#include "i40e.h" -#include "i40e_txrx_common.h" -#include "i40e_xsk.h" - -/** - * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev - * @vsi: Current VSI - * @umem: UMEM to DMA map - * - * Returns 0 on success, <0 on failure - **/ -static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) -{ - struct i40e_pf *pf = vsi->back; - struct device *dev; - unsigned int i, j; - dma_addr_t dma; - - dev = &pf->pdev->dev; - for (i = 0; i < umem->npgs; i++) { - dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, - DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); - if (dma_mapping_error(dev, dma)) - goto out_unmap; - - umem->pages[i].dma = dma; - } - - return 0; - -out_unmap: - for (j = 0; j < i; j++) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); - umem->pages[i].dma = 0; - } - - return -1; -} - -/** - * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev - * @vsi: Current VSI - * @umem: UMEM to DMA map - **/ -static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) -{ - struct i40e_pf *pf = vsi->back; - struct device *dev; - unsigned int i; - - dev = &pf->pdev->dev; - - for (i = 0; i < umem->npgs; i++) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); - - umem->pages[i].dma = 0; - } -} - -/** - * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid - * @vsi: Current VSI - * @umem: UMEM - * @qid: Rx ring to associate UMEM to - * - * Returns 0 on success, <0 on failure - **/ -static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, - u16 qid) -{ - struct net_device *netdev = vsi->netdev; - struct xdp_umem_fq_reuse *reuseq; - bool if_running; - int err; - - if (vsi->type != I40E_VSI_MAIN) - return -EINVAL; - - if (qid >= vsi->num_queue_pairs) - return -EINVAL; - - if (qid >= netdev->real_num_rx_queues || - qid >= netdev->real_num_tx_queues) - return -EINVAL; - - reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); - if (!reuseq) - return -ENOMEM; - - xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); - - err = i40e_xsk_umem_dma_map(vsi, umem); - if (err) - return err; - - set_bit(qid, vsi->af_xdp_zc_qps); - - if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); - - if (if_running) { - err = i40e_queue_pair_disable(vsi, qid); - if (err) - return err; - - err = i40e_queue_pair_enable(vsi, qid); - if (err) - return err; - - /* Kick start the NAPI context so that receiving will start */ - err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); - if (err) - return err; - } - - return 0; -} - -/** - * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid - * @vsi: Current VSI - * @qid: Rx ring to associate UMEM to - * - * Returns 0 on success, <0 on failure - **/ -static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) -{ - struct net_device *netdev = vsi->netdev; - struct xdp_umem *umem; - bool if_running; - int err; - - umem = xdp_get_umem_from_qid(netdev, qid); - if (!umem) - return -EINVAL; - - if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); - - if (if_running) { - err = i40e_queue_pair_disable(vsi, qid); - if (err) - return err; - } - - clear_bit(qid, vsi->af_xdp_zc_qps); - i40e_xsk_umem_dma_unmap(vsi, umem); - - if (if_running) { - err = i40e_queue_pair_enable(vsi, qid); - if (err) - return err; - } - - return 0; -} - -/** - * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid - * @vsi: Current VSI - * @umem: UMEM to enable/associate to a ring, or NULL to disable - * @qid: Rx ring to (dis)associate UMEM (from)to - * - * This function enables or disables a UMEM to a certain ring. - * - * Returns 0 on success, <0 on failure - **/ -int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, - u16 qid) -{ - return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : - i40e_xsk_umem_disable(vsi, qid); -} - -/** - * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff - * @rx_ring: Rx ring - * @xdp: xdp_buff used as input to the XDP program - * - * This function enables or disables a UMEM to a certain ring. - * - * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} - **/ -static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - int err, result = I40E_XDP_PASS; - struct i40e_ring *xdp_ring; - struct bpf_prog *xdp_prog; - u64 offset; - u32 act; - - rcu_read_lock(); - /* NB! xdp_prog will always be !NULL, due to the fact that - * this path is enabled by setting an XDP program. - */ - xdp_prog = READ_ONCE(rx_ring->xdp_prog); - act = bpf_prog_run_xdp(xdp_prog, xdp); - offset = xdp->data - xdp->data_hard_start; - - xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); - - switch (act) { - case XDP_PASS: - break; - case XDP_TX: - xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); - break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; - break; - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ - case XDP_DROP: - result = I40E_XDP_CONSUMED; - break; - } - rcu_read_unlock(); - return result; -} - -/** - * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer - * @rx_ring: Rx ring - * @bi: Rx buffer to populate - * - * This function allocates an Rx buffer. The buffer can come from fill - * queue, or via the recycle queue (next_to_alloc). - * - * Returns true for a successful allocation, false otherwise - **/ -static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - void *addr = bi->addr; - u64 handle, hr; - - if (addr) { - rx_ring->rx_stats.page_reuse_count++; - return true; - } - - if (!xsk_umem_peek_addr(umem, &handle)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - hr = umem->headroom + XDP_PACKET_HEADROOM; - - bi->dma = xdp_umem_get_dma(umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); - - xsk_umem_discard_addr(umem); - return true; -} - -/** - * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer - * @rx_ring: Rx ring - * @bi: Rx buffer to populate - * - * This function allocates an Rx buffer. The buffer can come from fill - * queue, or via the reuse queue. - * - * Returns true for a successful allocation, false otherwise - **/ -static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - u64 handle, hr; - - if (!xsk_umem_peek_addr_rq(umem, &handle)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - handle &= rx_ring->xsk_umem->chunk_mask; - - hr = umem->headroom + XDP_PACKET_HEADROOM; - - bi->dma = xdp_umem_get_dma(umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); - - xsk_umem_discard_addr_rq(umem); - return true; -} - -static __always_inline bool -__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, - bool alloc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi)) -{ - u16 ntu = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - bool ok = true; - - rx_desc = I40E_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_bi[ntu]; - do { - if (!alloc(rx_ring, bi)) { - ok = false; - goto no_buffers; - } - - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, - rx_ring->rx_buf_len, - DMA_BIDIRECTIONAL); - - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - - rx_desc++; - bi++; - ntu++; - - if (unlikely(ntu == rx_ring->count)) { - rx_desc = I40E_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_bi; - ntu = 0; - } - - rx_desc->wb.qword1.status_error_len = 0; - count--; - } while (count); - -no_buffers: - if (rx_ring->next_to_use != ntu) - i40e_release_rx_desc(rx_ring, ntu); - - return ok; -} - -/** - * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers - * @rx_ring: Rx ring - * @count: The number of buffers to allocate - * - * This function allocates a number of Rx buffers from the reuse queue - * or fill ring and places them on the Rx ring. - * - * Returns true for a successful allocation, false otherwise - **/ -bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) -{ - return __i40e_alloc_rx_buffers_zc(rx_ring, count, - i40e_alloc_buffer_slow_zc); -} - -/** - * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers - * @rx_ring: Rx ring - * @count: The number of buffers to allocate - * - * This function allocates a number of Rx buffers from the fill ring - * or the internal recycle mechanism and places them on the Rx ring. - * - * Returns true for a successful allocation, false otherwise - **/ -static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) -{ - return __i40e_alloc_rx_buffers_zc(rx_ring, count, - i40e_alloc_buffer_zc); -} - -/** - * i40e_get_rx_buffer_zc - Return the current Rx buffer - * @rx_ring: Rx ring - * @size: The size of the rx buffer (read from descriptor) - * - * This function returns the current, received Rx buffer, and also - * does DMA synchronization. the Rx ring. - * - * Returns the received Rx buffer - **/ -static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, - const unsigned int size) -{ - struct i40e_rx_buffer *bi; - - bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - bi->dma, 0, - size, - DMA_BIDIRECTIONAL); - - return bi; -} - -/** - * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer - * @rx_ring: Rx ring - * @old_bi: The Rx buffer to recycle - * - * This function recycles a finished Rx buffer, and places it on the - * recycle queue (next_to_alloc). - **/ -static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *old_bi) -{ - struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; - u16 nta = rx_ring->next_to_alloc; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_bi->dma = old_bi->dma; - new_bi->addr = old_bi->addr; - new_bi->handle = old_bi->handle; - - old_bi->addr = NULL; -} - -/** - * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations - * @alloc: Zero-copy allocator - * @handle: Buffer handle - **/ -void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) -{ - struct i40e_rx_buffer *bi; - struct i40e_ring *rx_ring; - u64 hr, mask; - u16 nta; - - rx_ring = container_of(alloc, struct i40e_ring, zca); - hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; - mask = rx_ring->xsk_umem->chunk_mask; - - nta = rx_ring->next_to_alloc; - bi = &rx_ring->rx_bi[nta]; - - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - handle &= mask; - - bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, - rx_ring->xsk_umem->headroom); -} - -/** - * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer - * @rx_ring: Rx ring - * @bi: Rx buffer - * @xdp: xdp_buff - * - * This functions allocates a new skb from a zero-copy Rx buffer. - * - * Returns the skb, or NULL on failure. - **/ -static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi, - struct xdp_buff *xdp) -{ - unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; - struct sk_buff *skb; - - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - xdp->data_end - xdp->data_hard_start, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) - return NULL; - - skb_reserve(skb, xdp->data - xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), xdp->data, datasize); - if (metasize) - skb_metadata_set(skb, metasize); - - i40e_reuse_rx_buffer_zc(rx_ring, bi); - return skb; -} - -/** - * i40e_inc_ntc: Advance the next_to_clean index - * @rx_ring: Rx ring - **/ -static void i40e_inc_ntc(struct i40e_ring *rx_ring) -{ - u32 ntc = rx_ring->next_to_clean + 1; - - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - prefetch(I40E_RX_DESC(rx_ring, ntc)); -} - -/** - * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring - * @rx_ring: Rx ring - * @budget: NAPI budget - * - * Returns amount of work completed - **/ -int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) -{ - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - unsigned int xdp_res, xdp_xmit = 0; - bool failure = false; - struct sk_buff *skb; - struct xdp_buff xdp; - - xdp.rxq = &rx_ring->xdp_rxq; - - while (likely(total_rx_packets < (unsigned int)budget)) { - struct i40e_rx_buffer *bi; - union i40e_rx_desc *rx_desc; - unsigned int size; - u64 qword; - - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - failure = failure || - !i40e_alloc_rx_buffers_fast_zc(rx_ring, - cleaned_count); - cleaned_count = 0; - } - - rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we have - * verified the descriptor has been written back. - */ - dma_rmb(); - - bi = i40e_clean_programming_status(rx_ring, rx_desc, - qword); - if (unlikely(bi)) { - i40e_reuse_rx_buffer_zc(rx_ring, bi); - cleaned_count++; - continue; - } - - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - if (!size) - break; - - bi = i40e_get_rx_buffer_zc(rx_ring, size); - xdp.data = bi->addr; - xdp.data_meta = xdp.data; - xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; - xdp.data_end = xdp.data + size; - xdp.handle = bi->handle; - - xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); - if (xdp_res) { - if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { - xdp_xmit |= xdp_res; - bi->addr = NULL; - } else { - i40e_reuse_rx_buffer_zc(rx_ring, bi); - } - - total_rx_bytes += size; - total_rx_packets++; - - cleaned_count++; - i40e_inc_ntc(rx_ring); - continue; - } - - /* XDP_PASS path */ - - /* NB! We are not checking for errors using - * i40e_test_staterr with - * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that - * SBP is *not* set in PRT_SBPVSI (default not set). - */ - skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - break; - } - - cleaned_count++; - i40e_inc_ntc(rx_ring); - - if (eth_skb_pad(skb)) - continue; - - total_rx_bytes += skb->len; - total_rx_packets++; - - i40e_process_skb_fields(rx_ring, rx_desc, skb); - napi_gro_receive(&rx_ring->q_vector->napi, skb); - } - - i40e_finalize_xdp_rx(rx_ring, xdp_xmit); - i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); - - if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { - if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) - xsk_set_rx_need_wakeup(rx_ring->xsk_umem); - else - xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); - - return (int)total_rx_packets; - } - return failure ? budget : (int)total_rx_packets; -} - -/** - * i40e_xmit_zc - Performs zero-copy Tx AF_XDP - * @xdp_ring: XDP Tx ring - * @budget: NAPI budget - * - * Returns true if the work is finished. - **/ -static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) -{ - struct i40e_tx_desc *tx_desc = NULL; - struct i40e_tx_buffer *tx_bi; - bool work_done = true; - struct xdp_desc desc; - dma_addr_t dma; - - while (budget-- > 0) { - if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { - xdp_ring->tx_stats.tx_busy++; - work_done = false; - break; - } - - if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) - break; - - dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); - - dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, - DMA_BIDIRECTIONAL); - - tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; - tx_bi->bytecount = desc.len; - - tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); - tx_desc->buffer_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = - build_ctob(I40E_TX_DESC_CMD_ICRC - | I40E_TX_DESC_CMD_EOP, - 0, desc.len, 0); - - xdp_ring->next_to_use++; - if (xdp_ring->next_to_use == xdp_ring->count) - xdp_ring->next_to_use = 0; - } - - if (tx_desc) { - /* Request an interrupt for the last frame and bump tail ptr. */ - tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << - I40E_TXD_QW1_CMD_SHIFT); - i40e_xdp_ring_update_tail(xdp_ring); - - xsk_umem_consume_tx_done(xdp_ring->xsk_umem); - } - - return !!budget && work_done; -} - -/** - * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry - * @tx_ring: XDP Tx ring - * @tx_bi: Tx buffer info to clean - **/ -static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, - struct i40e_tx_buffer *tx_bi) -{ - xdp_return_frame(tx_bi->xdpf); - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_bi, dma), - dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); - dma_unmap_len_set(tx_bi, len, 0); -} - -/** - * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries - * @tx_ring: XDP Tx ring - * @tx_bi: Tx buffer info to clean - * - * Returns true if cleanup/tranmission is done. - **/ -bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget) -{ - unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; - u32 i, completed_frames, frames_ready, xsk_frames = 0; - struct xdp_umem *umem = tx_ring->xsk_umem; - u32 head_idx = i40e_get_head(tx_ring); - bool work_done = true, xmit_done; - struct i40e_tx_buffer *tx_bi; - - if (head_idx < tx_ring->next_to_clean) - head_idx += tx_ring->count; - frames_ready = head_idx - tx_ring->next_to_clean; - - if (frames_ready == 0) { - goto out_xmit; - } else if (frames_ready > budget) { - completed_frames = budget; - work_done = false; - } else { - completed_frames = frames_ready; - } - - ntc = tx_ring->next_to_clean; - - for (i = 0; i < completed_frames; i++) { - tx_bi = &tx_ring->tx_bi[ntc]; - - if (tx_bi->xdpf) - i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); - else - xsk_frames++; - - tx_bi->xdpf = NULL; - total_bytes += tx_bi->bytecount; - - if (++ntc >= tx_ring->count) - ntc = 0; - } - - tx_ring->next_to_clean += completed_frames; - if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) - tx_ring->next_to_clean -= tx_ring->count; - - if (xsk_frames) - xsk_umem_complete_tx(umem, xsk_frames); - - i40e_arm_wb(tx_ring, vsi, budget); - i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); - -out_xmit: - if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) - xsk_set_tx_need_wakeup(tx_ring->xsk_umem); - - xmit_done = i40e_xmit_zc(tx_ring, budget); - - return work_done && xmit_done; -} - -/** - * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup - * @dev: the netdevice - * @queue_id: queue id to wake up - * @flags: ignored in our case since we have Rx and Tx in the same NAPI. - * - * Returns <0 for errors, 0 otherwise. - **/ -int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_ring *ring; - - if (test_bit(__I40E_VSI_DOWN, vsi->state)) - return -ENETDOWN; - - if (!i40e_enabled_xdp_vsi(vsi)) - return -ENXIO; - - if (queue_id >= vsi->num_queue_pairs) - return -ENXIO; - - if (!vsi->xdp_rings[queue_id]->xsk_umem) - return -ENXIO; - - ring = vsi->xdp_rings[queue_id]; - - /* The idea here is that if NAPI is running, mark a miss, so - * it will run again. If not, trigger an interrupt and - * schedule the NAPI from interrupt context. If NAPI would be - * scheduled here, the interrupt affinity would not be - * honored. - */ - if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) - i40e_force_wb(vsi, ring->q_vector); - - return 0; -} - -void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) -{ - u16 i; - - for (i = 0; i < rx_ring->count; i++) { - struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; - - if (!rx_bi->addr) - continue; - - xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); - rx_bi->addr = NULL; - } -} - -/** - * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown - * @xdp_ring: XDP Tx ring - **/ -void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) -{ - u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; - struct xdp_umem *umem = tx_ring->xsk_umem; - struct i40e_tx_buffer *tx_bi; - u32 xsk_frames = 0; - - while (ntc != ntu) { - tx_bi = &tx_ring->tx_bi[ntc]; - - if (tx_bi->xdpf) - i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); - else - xsk_frames++; - - tx_bi->xdpf = NULL; - - ntc++; - if (ntc >= tx_ring->count) - ntc = 0; - } - - if (xsk_frames) - xsk_umem_complete_tx(umem, xsk_frames); -} - -/** - * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached - * @vsi: vsi - * - * Returns true if any of the Rx rings has an AF_XDP UMEM attached - **/ -bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) -{ - struct net_device *netdev = vsi->netdev; - int i; - - for (i = 0; i < vsi->num_queue_pairs; i++) { - if (xdp_get_umem_from_qid(netdev, i)) - return true; - } - - return false; -} diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h deleted file mode 100644 index 9ed59c14eb55..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2018 Intel Corporation. */ - -#ifndef _I40E_XSK_H_ -#define _I40E_XSK_H_ - -struct i40e_vsi; -struct xdp_umem; -struct zero_copy_allocator; - -int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); -int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); -int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, - u16 qid); -void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); -bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); -int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); - -bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget); -int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); - -#endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/kcompat.c b/drivers/net/ethernet/intel/i40e/kcompat.c new file mode 100644 index 000000000000..20faa6c38d88 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/kcompat.c @@ -0,0 +1,2759 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#include "i40e.h" +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + if (pf->flags & I40E_FLAG_DCB_ENABLED) + return vsi->tc_config.numtc; + + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + + if (num_tc > I40E_MAX_TRAFFIC_CLASS) + return -EINVAL; + + vsi->tc_config.numtc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + return dcbcfg->etscfg.prioritytable[up]; +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* !RHEL || RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* !RHEL || RHEL < 8.1 */ +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* 5.3.0 */ diff --git a/drivers/net/ethernet/intel/i40e/kcompat.h b/drivers/net/ethernet/intel/i40e/kcompat.h new file mode 100644 index 000000000000..c3ee144db4e7 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/kcompat.h @@ -0,0 +1,6812 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct i40e_pf +#define adapter_q_vector i40e_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#ifndef CONFIG_I40E_NAPI +#define CONFIG_I40E_NAPI +#endif +#else +#undef CONFIG_I40E_NAPI +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_I40E_DISABLE_PACKET_SPLIT +#define CONFIG_I40E_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif +#endif /* __KLOCWORK__ */ + +#include "kcompat_vfd.h" +struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs); +void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj); + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#else /* < 2.6.25 */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#define qdisc_reset_all_tx(a) +#else /* < 2.6.27 */ +#include +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) do { \ + void __kc_kfree_rcu(struct rcu_head *rcu_head) \ + { \ + void *ptr = container_of(rcu_head, \ + typeof(*_ptr), \ + _rcu_head); \ + kfree(ptr); \ + } \ + call_rcu(&(_ptr)->_rcu_head, __kc_kfree_rcu); \ +} while (0) +#define HAVE_KFREE_RCU_BARRIER +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0))) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL >=7.3 && RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#endif /* RHEL >= 7.0 && RHEL < 8.0 */ + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#endif /* RHEL >= 8.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#else /* >= 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#else /* >= 4,1,0 */ +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_DEVLINK_SUPPORT +#endif /* UBUNTU 4,4,0,21, RHEL 7.4, SLES12 SP3 */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_DEVLINK_SUPPORT +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* =4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#else /* > 4.13 */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#ifdef NETIF_F_HW_L2FW_DOFFLOAD +#include +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +static inline void *_kc_macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct +#endif /* !SLES || SLES < 15.1 */ + +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT +#ifndef xdp_umem_get_data +static inline char *__kc_xdp_umem_get_data(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); +} + +#define xdp_umem_get_data __kc_xdp_umem_get_data +#endif /* !xdp_umem_get_data */ +#ifndef xdp_umem_get_dma +static inline dma_addr_t __kc_xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); +} + +#define xdp_umem_get_dma __kc_xdp_umem_get_dma +#endif /* !xdp_umem_get_dma */ +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_TCF_VLAN_TPID +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#else /* >= 4.20.0 */ +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#ifdef HAVE_SKB_XMIT_MORE +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#ifndef eth_get_headlen +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_offload tc_cls_flower_offload +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* >= 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#define HAVE_FLOW_BLOCK_API +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} + +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} + +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/kcompat_overflow.h b/drivers/net/ethernet/intel/i40e/kcompat_overflow.h new file mode 100644 index 000000000000..b7848fed0167 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/kcompat_overflow.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/drivers/net/ethernet/intel/i40e/kcompat_vfd.c b/drivers/net/ethernet/intel/i40e/kcompat_vfd.c new file mode 100644 index 000000000000..04b71de046ec --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/kcompat_vfd.c @@ -0,0 +1,2363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#include "kcompat.h" +#include "kcompat_vfd.h" + +#define to_dev(obj) container_of(obj, struct device, kobj) + +const struct vfd_ops *vfd_ops = NULL; + +/** + * __get_pf_pdev - helper function to get the pdev + * @kobj: kobject passed + * @pdev: PCI device information struct + */ +static int __get_pf_pdev(struct kobject *kobj, struct pci_dev **pdev) +{ + struct device *dev; + + if (!kobj->parent) + return -EINVAL; + + /* get pdev */ + dev = to_dev(kobj->parent); + *pdev = to_pci_dev(dev); + + return 0; +} + +/** + * __get_pdev_and_vfid - helper function to get the pdev and the vf id + * @kobj: kobject passed + * @pdev: PCI device information struct + * @vf_id: VF id of the VF under consideration + */ +static int __get_pdev_and_vfid(struct kobject *kobj, struct pci_dev **pdev, + int *vf_id) +{ + struct device *dev; + + if (!kobj->parent->parent) + return -EINVAL; + + /* get pdev */ + dev = to_dev(kobj->parent->parent); + *pdev = to_pci_dev(dev); + + /* get vf_id */ + if (kstrtoint(kobj->name, 10, vf_id) != 0) { + dev_err(&(*pdev)->dev, "Failed to convert %s to vf_id\n", + kobj->name); + return -EINVAL; + } + + return 0; +} + +/** + * __parse_bool_data - helper function to parse boolean data + * @pdev: PCI device information struct + * @buff: buffer with input data + * @attr_name: name of the attribute + * @data: pointer to output data + */ +static int __parse_bool_data(struct pci_dev *pdev, const char *buff, + const char *attr_name, bool *data) +{ + if (sysfs_streq("on", buff)) { + *data = true; + } else if (sysfs_streq("off", buff)) { + *data = false; + } else { + dev_err(&pdev->dev, "set %s: invalid input string", attr_name); + return -EINVAL; + } + return 0; +} + +/** + * __parse_egress_ingress_input - helper function for ingress/egress_mirror attributes + * @pdev: PCI device information struct + * @buff: buffer with input data + * @attr_name: name of the attribute + * @data_new: pointer to input data merged with the old data + * @data_old: pointer to old data of the attribute + * + * Get the input data for egress_mirror or ingress_mirror attribute in the form + * "add " or "rem ". + * Set the output data to off if in "rem ", matches old data. + * + */ +static int __parse_egress_ingress_input(struct pci_dev *pdev, const char *buff, + const char *attr_name, int *data_new, + int *data_old) +{ + int ret = 0; + char *p; + + if (strstr(buff, "add")) { + p = strstr(buff, "add"); + + ret = kstrtoint(p + sizeof("add"), 10, data_new); + if (ret) { + dev_err(&pdev->dev, + "add %s: input error %d\n", attr_name, ret); + return ret; + } + } else if (strstr(buff, "rem")) { + p = strstr(buff, "rem"); + + ret = kstrtoint(p + sizeof("rem"), 10, data_new); + if (ret) { + dev_err(&pdev->dev, + "rem %s: input error %d\n", attr_name, ret); + return ret; + } + + if (*data_new == *data_old) { + if (!strcmp(attr_name, "egress_mirror")) + *data_new = VFD_EGRESS_MIRROR_OFF; + else if (!strcmp(attr_name, "ingress_mirror")) + *data_new = VFD_INGRESS_MIRROR_OFF; + } else { + dev_err(&pdev->dev, + "rem %s: input doesn't match current value", + attr_name); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "set %s: invalid input string", attr_name); + return -EINVAL; + } + + return ret; +} + +/** + * __parse_add_rem_bitmap - helper function to parse bitmap data + * @pdev: PCI device information struct + * @buff: buffer with input data + * @attr_name: name of the attribute + * @data_new: pointer to input data merged with the old data + * @data_old: pointer to old data of the attribute + * + * If passed add: set data_new to "data_old || data_input" + * If passed rem: set data_new to "data_old || ~data_input" + */ +static int __parse_add_rem_bitmap(struct pci_dev *pdev, const char *buff, + const char *attr_name, + unsigned long *data_new, + unsigned long *data_old) +{ + int ret = 0; + char *p; + + if (strstr(buff, "add")) { + p = strstr(buff, "add"); + bitmap_zero(data_new, VLAN_N_VID); + + ret = bitmap_parselist(p + sizeof("add"), data_new, VLAN_N_VID); + if (ret) { + dev_err(&pdev->dev, + "add %s: input error %d\n", attr_name, ret); + return ret; + } + + bitmap_or(data_new, data_new, data_old, VLAN_N_VID); + } else if (strstr(buff, "rem")) { + p = strstr(buff, "rem"); + bitmap_zero(data_new, VLAN_N_VID); + + ret = bitmap_parselist(p + sizeof("rem"), data_new, VLAN_N_VID); + if (ret) { + dev_err(&pdev->dev, + "rem %s: input error %d\n", attr_name, ret); + return ret; + } + + /* new = old & ~rem */ + bitmap_andnot(data_new, data_old, data_new, VLAN_N_VID); + } else { + dev_err(&pdev->dev, "set %s: invalid input string", attr_name); + return -EINVAL; + } + return 0; +} + +/** + * __parse_promisc_input - helper function for promisc attributes + * @buff: buffer with input data + * @count: size of buff + * @cmd: return pointer to cmd into buff + * @subcmd: return pointer to subcmd into buff + * + * Get the input data for promisc attributes in the form "add/rem mcast/ucast". + */ +static int __parse_promisc_input(const char *buff, size_t count, + const char **cmd, const char **subcmd) +{ + size_t idx = 0; + + /* Remove start spaces */ + while (buff[idx] == ' ' && idx < count) + idx++; + + /* Parse cmd */ + if (strncmp(&buff[idx], "add", strlen("add")) == 0) { + *cmd = &buff[idx]; + idx += strlen("add"); + } else if (strncmp(&buff[idx], "rem", strlen("rem")) == 0) { + *cmd = &buff[idx]; + idx += strlen("rem"); + } else { + return -EINVAL; + } + + if (buff[idx++] != ' ') + return -EINVAL; + + /* Remove spaces between cmd */ + while (buff[idx] == ' ' && idx < count) + idx++; + + /* Parse subcmd */ + if (strncmp(&buff[idx], "ucast", strlen("ucast")) == 0) { + *subcmd = &buff[idx]; + idx += strlen("ucast"); + } else if (strncmp(&buff[idx], "mcast", strlen("mcast")) == 0) { + *subcmd = &buff[idx]; + idx += strlen("mcast"); + } else { + return -EINVAL; + } + + /* Remove spaces after subcmd */ + while ((buff[idx] == ' ' || buff[idx] == '\n') && idx < count) + idx++; + + if (idx != count) + return -EINVAL; + + return 0; +} + +/* Handlers for each VFd operation */ + +/** + * vfd_trunk_show - handler for trunk show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * + * Get current data from driver and copy to buffer + **/ +static ssize_t vfd_trunk_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + + DECLARE_BITMAP(data, VLAN_N_VID); + bitmap_zero(data, VLAN_N_VID); + + if (!vfd_ops->get_trunk) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_trunk(pdev, vf_id, data); + if (ret) + ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID); + + return ret; +} + +/** + * vfd_trunk_store - handler for trunk store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * Get current data from driver, compose new data based on input values + * depending on "add" or "rem" command, and pass new data to the driver to set. + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_trunk_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + unsigned long *data_old, *data_new; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_trunk || !vfd_ops->get_trunk) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_old) + return -ENOMEM; + data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_new) { + kfree(data_old); + return -ENOMEM; + } + + ret = vfd_ops->get_trunk(pdev, vf_id, data_old); + if (ret < 0) + goto err_free; + + ret = __parse_add_rem_bitmap(pdev, buff, "trunk", data_new, data_old); + if (ret) + goto err_free; + + if (!bitmap_equal(data_new, data_old, VLAN_N_VID)) + ret = vfd_ops->set_trunk(pdev, vf_id, data_new); + +err_free: + kfree(data_old); + kfree(data_new); + return ret ? ret : count; +} + +/** + * vfd_vlan_mirror_show - handler for vlan_mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * + * Get current data from driver and copy to buffer + **/ +static ssize_t vfd_vlan_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + + DECLARE_BITMAP(data, VLAN_N_VID); + bitmap_zero(data, VLAN_N_VID); + + if (!vfd_ops->get_vlan_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data); + if (ret) + ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID); + + return ret; +} + +/** + * vfd_vlan_mirror_store - handler for vlan_mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * Get current data from driver, compose new data based on input values + * depending on "add" or "rem" command, and pass new data to the driver to set. + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_vlan_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + unsigned long *data_old, *data_new; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_vlan_mirror || !vfd_ops->get_vlan_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_old) + return -ENOMEM; + data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_new) { + kfree(data_old); + return -ENOMEM; + } + + ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data_old); + if (ret < 0) + goto err_free; + + ret = __parse_add_rem_bitmap(pdev, buff, "vlan_mirror", + data_new, data_old); + if (ret) + goto err_free; + + if (!bitmap_equal(data_new, data_old, VLAN_N_VID)) + ret = vfd_ops->set_vlan_mirror(pdev, vf_id, data_new); + +err_free: + kfree(data_old); + kfree(data_new); + return ret ? ret : count; +} + +/** + * vfd_egress_mirror_show - handler for egress_mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_egress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + int data; + + if (!vfd_ops->get_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data == VFD_EGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * vfd_egress_mirror_store - handler for egress_mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_egress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_egress_mirror || !vfd_ops->get_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror", + &data_new, &data_old); + if (ret) + return ret; + if(data_new == vf_id) { + dev_err(&pdev->dev, "VF %d: Setting egress_mirror to itself is not allowed\n", vf_id); + return -EINVAL; + } + + if (data_new != data_old) + ret = vfd_ops->set_egress_mirror(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_ingress_mirror_show - handler for ingress_mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_ingress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + int data; + + if (!vfd_ops->get_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data == VFD_INGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * vfd_ingress_mirror_store - handler for ingress_mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_ingress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_ingress_mirror || !vfd_ops->get_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror", + &data_new, &data_old); + if (ret) + return ret; + if(data_new == vf_id) { + dev_err(&pdev->dev, "VF %d: Setting ingress_mirror to itself is not allowed\n", vf_id); + return -EINVAL; + } + + if (data_new != data_old) + ret = vfd_ops->set_ingress_mirror(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_allow_untagged_show - handler for allow_untagged show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_allow_untagged_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_allow_untagged) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_allow_untagged_store - handler for allow_untagged store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_allow_untagged_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_allow_untagged || !vfd_ops->get_allow_untagged) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "allow_untagged", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_allow_untagged(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_loopback_show - handler for loopback show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_loopback_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_loopback) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_loopback(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_loopback_store - handler for loopback store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_loopback_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_loopback || !vfd_ops->get_loopback) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_loopback(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "loopback", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_loopback(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_mac_show - handler for mac show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_mac_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buff) +{ + u8 macaddr[ETH_ALEN]; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->get_mac) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac(pdev, vf_id, macaddr); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%pM\n", macaddr); + + return ret; +} + +/** + * vfd_mac_store - handler for mac store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_mac_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + u8 macaddr[ETH_ALEN]; + u8 macaddr_old[ETH_ALEN]; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_mac || !vfd_ops->get_mac) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac(pdev, vf_id, macaddr_old); + if (ret < 0) + return ret; + + ret = sscanf(buff, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &macaddr[0], &macaddr[1], &macaddr[2], + &macaddr[3], &macaddr[4], &macaddr[5]); + + if (ret != 6) + return -EINVAL; + + if (!ether_addr_equal(macaddr, macaddr_old)) + ret = vfd_ops->set_mac(pdev, vf_id, macaddr); + + return ret ? ret : count; +} + +/** + * vfd_mac_list_show - handler for mac_list show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + * + * This function also frees the memory allocated for mac_list in another function. + * + **/ +static ssize_t vfd_mac_list_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + unsigned int mac_num_allowed, mac_num_list, mac_num_count; + const char *overflow_msg = "... and more\n"; + unsigned int mac_msg_len = 3*ETH_ALEN; + struct list_head *pos, *n; + struct pci_dev *pdev; + int vf_id, ret; + char *written; + LIST_HEAD(mac_list); + + if (!vfd_ops->get_mac_list) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac_list(pdev, vf_id, &mac_list); + if (ret < 0) + goto err_free; + + mac_num_list = 0; + mac_num_count = 0; + list_for_each_safe(pos, n, &mac_list) + mac_num_list++; + + mac_num_allowed = (PAGE_SIZE - 1) / mac_msg_len; + if (mac_num_list > mac_num_allowed) + mac_num_allowed = (PAGE_SIZE - 1 - strlen(overflow_msg)) / + mac_msg_len; + + written = buff; + list_for_each_safe(pos, n, &mac_list) { + struct vfd_macaddr *mac = NULL; + + mac_num_count++; + mac = list_entry(pos, struct vfd_macaddr, list); + if (mac_num_count > mac_num_allowed) { + ret += scnprintf(written, PAGE_SIZE - ret, + "%s", overflow_msg); + goto err_free; + } else if (list_is_last(pos, &mac_list)) { + ret += scnprintf(written, PAGE_SIZE - ret, + "%pM\n", mac->mac); + } else { + ret += scnprintf(written, PAGE_SIZE - ret, + "%pM,", mac->mac); + } + written += mac_msg_len; + } + +err_free: + list_for_each_safe(pos, n, &mac_list) { + struct vfd_macaddr *mac = NULL; + + mac = list_entry(pos, struct vfd_macaddr, list); + list_del(pos); + kfree(mac); + } + return ret; +} + +/** + * vfd_mac_list_store - handler for mac_list store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * Get input mac list into the linked list and depending on "add" or "rem" command + * pass the input mac list to the driver to either add or remove macs to the list. + * + * This function also frees the memory allocated for mac_list in another function. + * + **/ +static ssize_t vfd_mac_list_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct list_head *pos, *n; + struct pci_dev *pdev; + u8 macaddr[ETH_ALEN]; + int vf_id, ret; + size_t shift; + bool add; + LIST_HEAD(mac_list_inp); + + if (!vfd_ops->add_macs_to_list || !vfd_ops->rem_macs_from_list) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + if (strstr(buff, "add")) { + shift = sizeof("add"); + add = true; + } else if (strstr(buff, "rem")) { + shift = sizeof("rem"); + add = false; + } else { + dev_err(&pdev->dev, "Invalid input string"); + ret = -EINVAL; + goto err_free; + } + + /* Get input data */ + for (;;) { + struct vfd_macaddr *mac_new; + + if (*(buff + shift) == ' ' || *(buff + shift) == ',') { + shift++; + continue; + } + + ret = sscanf(buff + shift, + "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &macaddr[0], &macaddr[1], &macaddr[2], + &macaddr[3], &macaddr[4], &macaddr[5]); + + if (ret != 6) + break; + + if (!is_valid_ether_addr(macaddr)) { + shift += 3*ETH_ALEN; + continue; + } + + mac_new = kmalloc(sizeof(struct vfd_macaddr), GFP_KERNEL); + if (!mac_new) { + ret = -ENOMEM; + goto err_free; + } + + ether_addr_copy(mac_new->mac, macaddr); + list_add(&mac_new->list, &mac_list_inp); + + shift += 3*ETH_ALEN; + } + + if (add) + ret = vfd_ops->add_macs_to_list(pdev, vf_id, &mac_list_inp); + else + ret = vfd_ops->rem_macs_from_list(pdev, vf_id, &mac_list_inp); + +err_free: + list_for_each_safe(pos, n, &mac_list_inp) { + struct vfd_macaddr *mac = NULL; + + mac = list_entry(pos, struct vfd_macaddr, list); + list_del(pos); + kfree(mac); + } + return ret ? ret : count; +} + +/** + * vfd_promisc_show - handler for promisc show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_promisc_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u8 data; + + if (!vfd_ops->get_promisc) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_promisc(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data == VFD_PROMISC_UNICAST) + ret = scnprintf(buff, PAGE_SIZE, "ucast\n"); + else if (data == VFD_PROMISC_MULTICAST) + ret = scnprintf(buff, PAGE_SIZE, "mcast\n"); + else if (data == (VFD_PROMISC_UNICAST | VFD_PROMISC_MULTICAST)) + ret = scnprintf(buff, PAGE_SIZE, "ucast, mcast\n"); + else if (data == VFD_PROMISC_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_promisc_store - handler for promisc store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_promisc_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + u8 data_new, data_old; + struct pci_dev *pdev; + const char *subcmd; + const char *cmd; + int vf_id, ret; + + if (!vfd_ops->get_promisc || !vfd_ops->set_promisc) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_promisc(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_promisc_input(buff, count, &cmd, &subcmd); + if (ret) + goto promisc_err; + + if (strncmp(cmd, "add", strlen("add")) == 0) { + if (strncmp(subcmd, "ucast", strlen("ucast")) == 0) + data_new = data_old | VFD_PROMISC_UNICAST; + else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0) + data_new = data_old | VFD_PROMISC_MULTICAST; + else + goto promisc_err; + } else if (strncmp(cmd, "rem", strlen("rem")) == 0) { + if (strncmp(subcmd, "ucast", strlen("ucast")) == 0) + data_new = data_old & ~VFD_PROMISC_UNICAST; + else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0) + data_new = data_old & ~VFD_PROMISC_MULTICAST; + else + goto promisc_err; + } else { + goto promisc_err; + } + + if (data_new != data_old) + ret = vfd_ops->set_promisc(pdev, vf_id, data_new); + + return ret ? ret : count; + +promisc_err: + dev_err(&pdev->dev, "Invalid input string"); + return -EINVAL; +} + +/** + * vfd_vlan_strip_show - handler for vlan_strip show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_vlan_strip_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_vlan_strip) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_vlan_strip_store - handler for vlan_strip store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_vlan_strip_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_vlan_strip || !vfd_ops->get_vlan_strip) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "vlan_strip", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_vlan_strip(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_link_state_show - handler for link_state show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_link_state_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + enum vfd_link_speed link_speed; + struct pci_dev *pdev; + int vf_id, ret = 0; + bool enabled; + + if (!vfd_ops->get_link_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_link_state(pdev, vf_id, &enabled, &link_speed); + if (ret < 0) + return ret; + + if (enabled) { + const char *speed_str; + + switch (link_speed) { + case VFD_LINK_SPEED_100MB: + speed_str = "100 Mbps"; + break; + case VFD_LINK_SPEED_1GB: + speed_str = "1 Gbps"; + break; + case VFD_LINK_SPEED_2_5GB: + speed_str = "2.5 Gbps"; + break; + case VFD_LINK_SPEED_5GB: + speed_str = "5 Gbps"; + break; + case VFD_LINK_SPEED_10GB: + speed_str = "10 Gbps"; + break; + case VFD_LINK_SPEED_40GB: + speed_str = "40 Gbps"; + break; + case VFD_LINK_SPEED_20GB: + speed_str = "20 Gbps"; + break; + case VFD_LINK_SPEED_25GB: + speed_str = "25 Gbps"; + break; + case VFD_LINK_SPEED_UNKNOWN: + speed_str = "unknown speed"; + break; + default: + dev_err(&pdev->dev, "Link speed is not supported"); + return -EOPNOTSUPP; + } + + ret = scnprintf(buff, PAGE_SIZE, "%s, %s\n", "up", speed_str); + } else { + ret = scnprintf(buff, PAGE_SIZE, "down\n"); + } + + return ret; +} + +/** + * vfd_link_state_store - handler for link_state store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_link_state_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u8 data; + + if (!vfd_ops->set_link_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + if (sysfs_streq("enable", buff)) { + data = VFD_LINKSTATE_ON; + } else if (sysfs_streq("disable", buff)) { + data = VFD_LINKSTATE_OFF; + } else if (sysfs_streq("auto", buff)) { + data = VFD_LINKSTATE_AUTO; + } else { + dev_err(&pdev->dev, "Invalid input string"); + return -EINVAL; + } + + ret = vfd_ops->set_link_state(pdev, vf_id, data); + + return ret ? ret : count; +} + +/** + * vfd_enable_show - handler for VF enable/disable show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + bool data; + + if (!vfd_ops->get_vf_enable) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vf_enable(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_enable_store - handler for VF enable/disable store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + bool data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_vf_enable || !vfd_ops->get_vf_enable) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vf_enable(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "enable", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_vf_enable(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_max_tx_rate_show - handler for mac_tx_rate show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_max_tx_rate_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + unsigned int max_tx_rate; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->get_max_tx_rate) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_max_tx_rate(pdev, vf_id, &max_tx_rate); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%u\n", max_tx_rate); + return ret; +} + +/** + * vfd_max_tx_rate_store - handler for max_tx_rate store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_max_tx_rate_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + unsigned int max_tx_rate; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_max_tx_rate) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = kstrtouint(buff, 10, &max_tx_rate); + if (ret) { + dev_err(&pdev->dev, + "Invalid argument, not a decimal number: %s", buff); + return ret; + } + + ret = vfd_ops->set_max_tx_rate(pdev, vf_id, &max_tx_rate); + + return ret ? ret : count; +} + +/** + * vfd_min_tx_rate_show - handler for min_tx_rate show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_min_tx_rate_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + if (!vfd_ops->get_min_tx_rate) + return -EOPNOTSUPP; + + return vfd_ops->get_min_tx_rate(kobj, attr, buff); +} + +/** + * vfd_min_tx_rate_store - handler for min_tx_rate store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_min_tx_rate_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + if (!vfd_ops->set_min_tx_rate) + return -EOPNOTSUPP; + + return vfd_ops->set_min_tx_rate(kobj, attr, buff, count); +} + +/** + * vfd_trust_show - handler for trust show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_trust_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + bool data; + + if (!vfd_ops->get_trust_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_trust_state(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_trust_store - handler for trust store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_trust_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + bool data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_trust_state || !vfd_ops->get_trust_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_trust_state(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "trust", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_trust_state(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_reset_stats_store - handler for reset stats store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_reset_stats_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int vf_id, reset, ret; + struct pci_dev *pdev; + + if (!vfd_ops->reset_stats) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + ret = kstrtoint(buff, 10, &reset); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + if (reset != 1) + return -EINVAL; + + ret = vfd_ops->reset_stats(pdev, vf_id); + + return ret ? ret : count; +} + +/** + * vfd_rx_bytes_show - handler for rx_bytes show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_rx_bytes_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_rx_bytes) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_rx_bytes(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_rx_dropped_show - handler for rx_dropped show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_rx_dropped_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_rx_dropped) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_rx_dropped(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_rx_packets_show - handler for rx_packets show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_rx_packets_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_rx_packets) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_rx_packets(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_bytes_show - handler for tx_bytes show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_bytes_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_bytes) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_bytes(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_dropped_show - handler for tx_dropped show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_dropped_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_dropped) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_dropped(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_packets_show - handler for tx_packets show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_packets_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_packets) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_packets(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_spoofed_show - handler for tx_spoofed show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_spoofed_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_spoofed) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_spoofed(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_errors_show - handler for tx_errors show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_errors_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_errors) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_errors(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * qos_share_show - handler for the bw_share show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t qos_share_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + u8 data = 0; + + if (!vfd_ops->get_vf_bw_share) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vf_bw_share(pdev, vf_id, &data); + if (ret < 0) { + dev_err(&pdev->dev, "No bw share applied for VF %d\n", vf_id); + return ret; + } + + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * qos_share_store - handler for the bw_share store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t qos_share_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret; + u8 bw_share; + + if (!vfd_ops->set_vf_bw_share) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + /* parse the bw_share */ + ret = kstrtou8(buff, 10, &bw_share); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + /* check that the BW is between 1 and 100 */ + if (bw_share < 1 || bw_share > 100) { + dev_err(&pdev->dev, "BW share has to be between 1-100\n"); + return -EINVAL; + } + ret = vfd_ops->set_vf_bw_share(pdev, vf_id, bw_share); + return ret ? ret : count; +} + +/** + * pf_qos_apply_store - handler for pf qos apply store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_qos_apply_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int ret, apply; + struct pci_dev *pdev; + + if (!vfd_ops->set_pf_qos_apply) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = kstrtoint(buff, 10, &apply); + if (ret) { + dev_err(&pdev->dev, + "Invalid input\n"); + return ret; + } + + if (apply != 1) + return -EINVAL; + + ret = vfd_ops->set_pf_qos_apply(pdev); + + return ret ? ret : count; +} + +/** + * pf_ingress_mirror_show - handler for PF ingress mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t pf_ingress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int ret, data; + + if (!vfd_ops->get_pf_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_ingress_mirror(pdev, &data); + if (ret < 0) + return ret; + + if (data == VFD_INGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * pf_ingress_mirror_store - handler for pf ingress mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_ingress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int ret; + + if (!vfd_ops->set_pf_ingress_mirror || !vfd_ops->get_pf_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_ingress_mirror(pdev, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror", + &data_new, &data_old); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_pf_ingress_mirror(pdev, data_new); + + return ret ? ret : count; +} + +/** + * pf_egress_mirror_show - handler for PF egress mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t pf_egress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int ret, data; + + if (!vfd_ops->get_pf_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_egress_mirror(pdev, &data); + if (ret < 0) + return ret; + + if (data == VFD_EGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * pf_egress_mirror_store - handler for pf egress mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_egress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int ret; + + if (!vfd_ops->set_pf_egress_mirror || !vfd_ops->get_pf_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_egress_mirror(pdev, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror", + &data_new, &data_old); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_pf_egress_mirror(pdev, data_new); + + return ret ? ret : count; +} + +/** + * pf_tpid_show - handler for pf tpid show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t pf_tpid_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + u16 data; + int ret; + + if (!vfd_ops->get_pf_tpid) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_tpid(pdev, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%x\n", data); + + return ret; +} + +/** + * pf_tpid_store - handler for pf tpid store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_tpid_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + u16 data; + int ret; + + if (!vfd_ops->set_pf_tpid) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = kstrtou16(buff, 16, &data); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + ret = vfd_ops->set_pf_tpid(pdev, data); + + return ret ? ret : count; +} + +/** + * vfd_num_queues_show - handler for num_queues show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_num_queues_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + int data; + + if (!vfd_ops->get_num_queues) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_num_queues(pdev, vf_id, &data); + if (ret) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%d\n", data); + + return ret; +} + +/** + * vfd_num_queues_store - handler for num_queues store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_num_queues_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_num_queues) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_num_queues(pdev, vf_id, &data_old); + if (ret) + return ret; + + ret = kstrtoint(buff, 10, &data_new); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + if (data_new < 1) { + dev_err(&pdev->dev, "VF queue count must be at least 1\n"); + return -EINVAL; + } + + if (data_new != data_old) + ret = vfd_ops->set_num_queues(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +static struct kobj_attribute trunk_attribute = + __ATTR(trunk, 0644, vfd_trunk_show, vfd_trunk_store); +static struct kobj_attribute vlan_mirror_attribute = + __ATTR(vlan_mirror, 0644, vfd_vlan_mirror_show, vfd_vlan_mirror_store); +static struct kobj_attribute egress_mirror_attribute = + __ATTR(egress_mirror, 0644, + vfd_egress_mirror_show, vfd_egress_mirror_store); +static struct kobj_attribute ingress_mirror_attribute = + __ATTR(ingress_mirror, 0644, + vfd_ingress_mirror_show, vfd_ingress_mirror_store); +static struct kobj_attribute allow_untagged_attribute = + __ATTR(allow_untagged, 0644, + vfd_allow_untagged_show, vfd_allow_untagged_store); +static struct kobj_attribute loopback_attribute = + __ATTR(loopback, 0644, vfd_loopback_show, vfd_loopback_store); +static struct kobj_attribute mac_attribute = + __ATTR(mac, 0644, vfd_mac_show, vfd_mac_store); +static struct kobj_attribute mac_list_attribute = + __ATTR(mac_list, 0644, vfd_mac_list_show, vfd_mac_list_store); +static struct kobj_attribute promisc_attribute = + __ATTR(promisc, 0644, vfd_promisc_show, vfd_promisc_store); +static struct kobj_attribute vlan_strip_attribute = + __ATTR(vlan_strip, 0644, vfd_vlan_strip_show, vfd_vlan_strip_store); +static struct kobj_attribute link_state_attribute = + __ATTR(link_state, 0644, vfd_link_state_show, vfd_link_state_store); +static struct kobj_attribute max_tx_rate_attribute = + __ATTR(max_tx_rate, 0644, vfd_max_tx_rate_show, vfd_max_tx_rate_store); +static struct kobj_attribute min_tx_rate_attribute = + __ATTR(min_tx_rate, 0644, vfd_min_tx_rate_show, vfd_min_tx_rate_store); +static struct kobj_attribute trust_attribute = + __ATTR(trust, 0644, vfd_trust_show, vfd_trust_store); +static struct kobj_attribute reset_stats_attribute = + __ATTR(reset_stats, 0200, NULL, vfd_reset_stats_store); +static struct kobj_attribute enable_attribute = + __ATTR(enable, 0644, vfd_enable_show, vfd_enable_store); +static struct kobj_attribute num_queues_attribute = + __ATTR(num_queues, 0644, vfd_num_queues_show, vfd_num_queues_store); + +static struct attribute *s_attrs[] = { + &trunk_attribute.attr, + &vlan_mirror_attribute.attr, + &egress_mirror_attribute.attr, + &ingress_mirror_attribute.attr, + &allow_untagged_attribute.attr, + &loopback_attribute.attr, + &mac_attribute.attr, + &mac_list_attribute.attr, + &promisc_attribute.attr, + &vlan_strip_attribute.attr, + &link_state_attribute.attr, + &max_tx_rate_attribute.attr, + &min_tx_rate_attribute.attr, + &trust_attribute.attr, + &reset_stats_attribute.attr, + &enable_attribute.attr, + &num_queues_attribute.attr, + NULL, +}; + +static struct attribute_group vfd_group = { + .attrs = s_attrs, +}; + +static struct kobj_attribute rx_bytes_attribute = + __ATTR(rx_bytes, 0444, vfd_rx_bytes_show, NULL); +static struct kobj_attribute rx_dropped_attribute = + __ATTR(rx_dropped, 0444, vfd_rx_dropped_show, NULL); +static struct kobj_attribute rx_packets_attribute = + __ATTR(rx_packets, 0444, vfd_rx_packets_show, NULL); +static struct kobj_attribute tx_bytes_attribute = + __ATTR(tx_bytes, 0444, vfd_tx_bytes_show, NULL); +static struct kobj_attribute tx_dropped_attribute = + __ATTR(tx_dropped, 0444, vfd_tx_dropped_show, NULL); +static struct kobj_attribute tx_packets_attribute = + __ATTR(tx_packets, 0444, vfd_tx_packets_show, NULL); +static struct kobj_attribute tx_spoofed_attribute = + __ATTR(tx_spoofed, 0444, vfd_tx_spoofed_show, NULL); +static struct kobj_attribute tx_errors_attribute = + __ATTR(tx_errors, 0444, vfd_tx_errors_show, NULL); + +static struct attribute *stats_attrs[] = { + &rx_bytes_attribute.attr, + &rx_dropped_attribute.attr, + &rx_packets_attribute.attr, + &tx_bytes_attribute.attr, + &tx_dropped_attribute.attr, + &tx_packets_attribute.attr, + &tx_spoofed_attribute.attr, + &tx_errors_attribute.attr, + NULL, +}; + +static struct attribute_group stats_group = { + .name = "stats", + .attrs = stats_attrs, +}; + +static struct kobj_attribute share_attribute = + __ATTR(share, 0644, qos_share_show, qos_share_store); + +static struct attribute *qos_attrs[] = { + &share_attribute.attr, + NULL, +}; + +static struct attribute_group qos_group = { + .name = "qos", + .attrs = qos_attrs, +}; + +static struct kobj_attribute apply_attribute = + __ATTR(apply, 0200, NULL, pf_qos_apply_store); + +static struct attribute *pf_qos_attrs[] = { + &apply_attribute.attr, + NULL, +}; + +static struct attribute_group pf_qos_group = { + .name = "qos", + .attrs = pf_qos_attrs, +}; + +static struct kobj_attribute pf_ingress_mirror_attribute = + __ATTR(ingress_mirror, 0644, pf_ingress_mirror_show, pf_ingress_mirror_store); +static struct kobj_attribute pf_egress_mirror_attribute = + __ATTR(egress_mirror, 0644, pf_egress_mirror_show, pf_egress_mirror_store); +static struct kobj_attribute pf_tpid_attribute = + __ATTR(tpid, 0644, pf_tpid_show, pf_tpid_store); + +static struct attribute *pf_attrs[] = { + &pf_ingress_mirror_attribute.attr, + &pf_egress_mirror_attribute.attr, + &pf_tpid_attribute.attr, + NULL, +}; + +static struct attribute_group pf_attr_group = { + .attrs = pf_attrs, +}; + +/** + * create_vfs_sysfs - create sysfs hierarchy for VF + * @pdev: PCI device information struct + * @vfd_obj: VF-d kobjects information struct + * + * Creates a kobject for Virtual Function and assigns attributes to it. + **/ +static int create_vfs_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj) +{ + struct kobject *vf_kobj; + char kname[4]; + int ret, i; + + for (i = 0; i < vfd_obj->num_vfs; i++) { + int length = snprintf(kname, sizeof(kname), "%d", i); + + if (length >= sizeof(kname)) { + dev_err(&pdev->dev, + "cannot request %d vfs, try again with smaller number of vfs\n", + i); + --i; + ret = -EINVAL; + goto err_vfs_sysfs; + } + + vf_kobj = kobject_create_and_add(kname, vfd_obj->sriov_kobj); + if (!vf_kobj) { + dev_err(&pdev->dev, + "failed to create VF kobj: %s\n", kname); + i--; + ret = -ENOMEM; + goto err_vfs_sysfs; + } + dev_info(&pdev->dev, "created VF %s sysfs", vf_kobj->name); + vfd_obj->vf_kobj[i] = vf_kobj; + + /* create VF sys attr */ + ret = sysfs_create_group(vfd_obj->vf_kobj[i], &vfd_group); + if (ret) { + dev_err(&pdev->dev, "failed to create VF sys attribute: %d", i); + goto err_vfs_sysfs; + } + + /* create VF stats sys attr */ + ret = sysfs_create_group(vfd_obj->vf_kobj[i], &stats_group); + if (ret) { + dev_err(&pdev->dev, "failed to create VF stats attribute: %d", i); + goto err_vfs_sysfs; + } + + /* create VF qos sys attr */ + ret = sysfs_create_group(vfd_obj->vf_kobj[i], &qos_group); + if (ret) { + dev_err(&pdev->dev, "failed to create VF qos attribute: %d", i); + goto err_vfs_sysfs; + } + } + + return 0; + +err_vfs_sysfs: + for (; i >= 0; i--) + kobject_put(vfd_obj->vf_kobj[i]); + return ret; +} + +/** + * create_vfd_sysfs - create sysfs hierarchy used by VF-d + * @pdev: PCI device information struct + * @num_alloc_vfs: number of VFs to allocate + * + * If the kobjects were not able to be created, NULL will be returned. + **/ +struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs) +{ + struct vfd_objects *vfd_obj; + int ret; + + vfd_obj = kzalloc(sizeof(*vfd_obj) + + sizeof(struct kobject *)*num_alloc_vfs, GFP_KERNEL); + if (!vfd_obj) + return NULL; + + vfd_obj->num_vfs = num_alloc_vfs; + + vfd_obj->sriov_kobj = kobject_create_and_add("sriov", &pdev->dev.kobj); + if (!vfd_obj->sriov_kobj) + goto err_sysfs; + + dev_info(&pdev->dev, "created %s sysfs", vfd_obj->sriov_kobj->name); + + ret = create_vfs_sysfs(pdev, vfd_obj); + if (ret) + goto err_sysfs; + + /* create PF qos sys attr */ + ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_qos_group); + if (ret) { + dev_err(&pdev->dev, "failed to create PF qos sys attribute"); + goto err_sysfs; + } + + /* create PF attrs */ + ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_attr_group); + if (ret) { + dev_err(&pdev->dev, "failed to create PF attr sys attribute"); + goto err_sysfs; + } + return vfd_obj; + +err_sysfs: + kobject_put(vfd_obj->sriov_kobj); + kfree(vfd_obj); + return NULL; +} + +/** + * destroy_vfd_sysfs - destroy sysfs hierarchy used by VF-d + * @pdev: PCI device information struct + * @vfd_obj: VF-d kobjects information struct + **/ +void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj) +{ + int i; + + for (i = 0; i < vfd_obj->num_vfs; i++) { + dev_info(&pdev->dev, "deleting VF %s sysfs", + vfd_obj->vf_kobj[i]->name); + kobject_put(vfd_obj->vf_kobj[i]); + } + + dev_info(&pdev->dev, "deleting %s sysfs", vfd_obj->sriov_kobj->name); + kobject_put(vfd_obj->sriov_kobj); + kfree(vfd_obj); +} diff --git a/drivers/net/ethernet/intel/i40e/kcompat_vfd.h b/drivers/net/ethernet/intel/i40e/kcompat_vfd.h new file mode 100644 index 000000000000..3f41ce4be6ef --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/kcompat_vfd.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#ifndef _KCOMPAT_VFD_H_ +#define _KCOMPAT_VFD_H_ + +#define VFD_PROMISC_OFF 0x00 +#define VFD_PROMISC_UNICAST 0x01 +#define VFD_PROMISC_MULTICAST 0x02 + +#define VFD_LINKSTATE_OFF 0x00 +#define VFD_LINKSTATE_ON 0x01 +#define VFD_LINKSTATE_AUTO 0x02 + +#define VFD_EGRESS_MIRROR_OFF -1 +#define VFD_INGRESS_MIRROR_OFF -1 + +/** + * struct vfd_objects - VF-d kobjects information struct + * @num_vfs: number of VFs allocated + * @sriov_kobj: pointer to the top sriov kobject + * @vf_kobj: array of pointer to each VF's kobjects + */ +struct vfd_objects { + int num_vfs; + struct kobject *sriov_kobj; + struct kobject *vf_kobj[0]; +}; + +struct vfd_macaddr { + u8 mac[ETH_ALEN]; + struct list_head list; +}; + +#define VFD_LINK_SPEED_2_5GB_SHIFT 0x0 +#define VFD_LINK_SPEED_100MB_SHIFT 0x1 +#define VFD_LINK_SPEED_1GB_SHIFT 0x2 +#define VFD_LINK_SPEED_10GB_SHIFT 0x3 +#define VFD_LINK_SPEED_40GB_SHIFT 0x4 +#define VFD_LINK_SPEED_20GB_SHIFT 0x5 +#define VFD_LINK_SPEED_25GB_SHIFT 0x6 +#define VFD_LINK_SPEED_5GB_SHIFT 0x7 + + +enum vfd_link_speed { + VFD_LINK_SPEED_UNKNOWN = 0, + VFD_LINK_SPEED_100MB = BIT(VFD_LINK_SPEED_100MB_SHIFT), + VFD_LINK_SPEED_1GB = BIT(VFD_LINK_SPEED_1GB_SHIFT), + VFD_LINK_SPEED_2_5GB = BIT(VFD_LINK_SPEED_2_5GB_SHIFT), + VFD_LINK_SPEED_5GB = BIT(VFD_LINK_SPEED_5GB_SHIFT), + VFD_LINK_SPEED_10GB = BIT(VFD_LINK_SPEED_10GB_SHIFT), + VFD_LINK_SPEED_40GB = BIT(VFD_LINK_SPEED_40GB_SHIFT), + VFD_LINK_SPEED_20GB = BIT(VFD_LINK_SPEED_20GB_SHIFT), + VFD_LINK_SPEED_25GB = BIT(VFD_LINK_SPEED_25GB_SHIFT), +}; + +struct vfd_ops { + int (*get_trunk)(struct pci_dev *pdev, int vf_id, unsigned long *buff); + int (*set_trunk)(struct pci_dev *pdev, int vf_id, + const unsigned long *buff); + int (*get_vlan_mirror)(struct pci_dev *pdev, int vf_id, + unsigned long *buff); + int (*set_vlan_mirror)(struct pci_dev *pdev, int vf_id, + const unsigned long *buff); + int (*get_egress_mirror)(struct pci_dev *pdev, int vf_id, int *data); + int (*set_egress_mirror)(struct pci_dev *pdev, int vf_id, + const int data); + int (*get_ingress_mirror)(struct pci_dev *pdev, int vf_id, int *data); + int (*set_ingress_mirror)(struct pci_dev *pdev, int vf_id, + const int data); + int (*get_allow_untagged)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_allow_untagged)(struct pci_dev *pdev, int vf_id, + const bool data); + int (*get_loopback)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_loopback)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_mac)(struct pci_dev *pdev, int vf_id, u8 *macaddr); + int (*set_mac)(struct pci_dev *pdev, int vf_id, const u8 *macaddr); + int (*get_mac_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*add_macs_to_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*rem_macs_from_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*get_promisc)(struct pci_dev *pdev, int vf_id, u8 *data); + int (*set_promisc)(struct pci_dev *pdev, int vf_id, const u8 data); + int (*get_vlan_strip)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vlan_strip)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_link_state)(struct pci_dev *pdev, int vf_id, bool *enabled, + enum vfd_link_speed *link_speed); + int (*set_link_state)(struct pci_dev *pdev, int vf_id, const u8 data); + int (*get_max_tx_rate)(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate); + int (*set_max_tx_rate)(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate); + int (*get_min_tx_rate)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_min_tx_rate)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_spoofcheck)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_spoofcheck)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_trust)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_trust)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_vf_enable)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vf_enable)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_rx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_rx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_rx_packets)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_packets)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_spoofed)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_errors)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*reset_stats)(struct pci_dev *pdev, int vf_id); + int (*set_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 bw_share); + int (*get_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 *bw_share); + int (*set_pf_qos_apply)(struct pci_dev *pdev); + int (*get_pf_ingress_mirror)(struct pci_dev *pdev, int *data); + int (*set_pf_ingress_mirror)(struct pci_dev *pdev, const int data); + int (*get_pf_egress_mirror)(struct pci_dev *pdev, int *data); + int (*set_pf_egress_mirror)(struct pci_dev *pdev, const int data); + int (*get_pf_tpid)(struct pci_dev *pdev, u16 *data); + int (*set_pf_tpid)(struct pci_dev *pdev, const u16 data); + int (*get_num_queues)(struct pci_dev *pdev, int vf_id, int *num_queues); + int (*set_num_queues)(struct pci_dev *pdev, int vf_id, const int num_queues); + int (*get_trust_state)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_trust_state)(struct pci_dev *pdev, int vf_id, bool data); +}; + +extern const struct vfd_ops *vfd_ops; + +#endif /* _KCOMPAT_VFD_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/virtchnl.h b/drivers/net/ethernet/intel/i40e/virtchnl.h new file mode 100644 index 000000000000..9f131076f0e0 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/virtchnl.h @@ -0,0 +1,951 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2020 Intel Corporation. */ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +/* Backward compatibility */ +#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM +#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED + +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + /* opcode 19 is reserved */ + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_ENABLE_CHANNELS = 30, + VIRTCHNL_OP_DISABLE_CHANNELS = 31, + VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, + VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, + /* opcodes 34, 35, 36, 37 and 38 are reserved */ +}; + +/* These macros are used to generate compilation errors if a structure/union + * is not exactly the correct length. It gives a divide by zero error if the + * structure/union is not of the correct size, otherwise it creates an enum + * that is never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } +#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures. */ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF capability flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 +#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 +#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 +#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000 +#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000 + /* 0X80000000 is reserved */ + +/* Define below the capability flags that are not offloads */ +#ifndef NO_VF_CAP_ADV_LINK_SPEED +#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 +#endif +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. The + * crc_disable flag disables CRC stripping on the VF. Setting + * the crc_disable flag to 1 will disable CRC stripping for each + * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC + * offload must have been set prior to sending this info or the PF + * will ignore the request. This flag should be set the same for + * all of the queues for a VF. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u8 crc_disable; + u8 pad1[3]; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + * NOTE: The VF is not required to configure all queues in a single request. + * It may send multiple messages. PF drivers must correctly handle all VF + * requests. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_REQUEST_QUEUES + * VF sends this message to request the PF to allocate additional queues to + * this VF. Each VF gets a guaranteed number of queues on init but asking for + * additional queues must be negotiated. This is a best effort request as it + * is possible the PF does not have enough queues left to support the request. + * If the PF cannot support the number requested it will respond with the + * maximum number it is able to support. If the request is successful, PF will + * then reset the VF to institute required changes. + */ + +/* VF resource request */ +struct virtchnl_vf_res_request { + u16 num_queue_pairs; +}; + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. The VF may not request + * that vector 0 be used for traffic. + * PF configures interrupt mapping and returns status. + * NOTE: due to hardware requirements, all active queues (both TX and RX) + * should be mapped to interrupts, even if the driver intends to operate + * only in polling mode. In this case the interrupt may be disabled, but + * the ITR timer will still run to trigger writebacks. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + * NOTE: The VF is not required to enable/disable all queues in a single + * request. It may send multiple messages. + * PF drivers must correctly handle all VF requests. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct virtchnl_eth_stats in an external buffer. + */ + +struct virtchnl_eth_stats { + u64 rx_bytes; /* received bytes */ + u64 rx_unicast; /* received unicast pkts */ + u64 rx_multicast; /* received multicast pkts */ + u64 rx_broadcast; /* received broadcast pkts */ + u64 rx_discards; + u64 rx_unknown_protocol; + u64 tx_bytes; /* transmitted bytes */ + u64 tx_unicast; /* transmitted unicast pkts */ + u64 tx_multicast; /* transmitted multicast pkts */ + u64 tx_broadcast; /* transmitted broadcast pkts */ + u64 tx_discards; + u64 tx_errors; +}; + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* This is used by PF driver to enforce how many channels can be supported. + * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise + * PF driver will allow only max 4 channels + */ +#define VIRTCHNL_MAX_ADQ_CHANNELS 4 +#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16 + +/* VIRTCHNL_OP_ENABLE_CHANNELS + * VIRTCHNL_OP_DISABLE_CHANNELS + * VF sends these messages to enable or disable channels based on + * the user specified queue count and queue offset for each traffic class. + * This struct encompasses all the information that the PF needs from + * VF to create a channel. + */ +struct virtchnl_channel_info { + u16 count; /* number of queues in a channel */ + u16 offset; /* queues in a channel start from 'offset' */ + u32 pad; + u64 max_tx_rate; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); + +struct virtchnl_tc_info { + u32 num_tc; + u32 pad; + struct virtchnl_channel_info list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); + +/* VIRTCHNL_ADD_CLOUD_FILTER + * VIRTCHNL_DEL_CLOUD_FILTER + * VF sends these messages to add or delete a cloud filter based on the + * user specified match and action filters. These structures encompass + * all the information that the PF needs from the VF to add/delete a + * cloud filter. + */ + +struct virtchnl_l4_spec { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + /* vlan_prio is part of this 16 bit field even from OS perspective + * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio + * in future, when decided to offload vlan_prio, pass that information + * as part of the "vlan_id" field, Bit14..12 + */ + __be16 vlan_id; + __be16 pad; /* reserved for future use */ + __be32 src_ip[4]; + __be32 dst_ip[4]; + __be16 src_port; + __be16 dst_port; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); + +union virtchnl_flow_spec { + struct virtchnl_l4_spec tcp_spec; + u8 buffer[128]; /* reserved for future use */ +}; + +VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); + +enum virtchnl_action { + /* action types */ + VIRTCHNL_ACTION_DROP = 0, + VIRTCHNL_ACTION_TC_REDIRECT, +}; + +enum virtchnl_flow_type { + /* flow types */ + VIRTCHNL_TCP_V4_FLOW = 0, + VIRTCHNL_TCP_V6_FLOW, + VIRTCHNL_UDP_V4_FLOW, + VIRTCHNL_UDP_V6_FLOW, +}; + +struct virtchnl_filter { + union virtchnl_flow_spec data; + union virtchnl_flow_spec mask; + enum virtchnl_flow_type flow_type; + enum virtchnl_action action; + u32 action_meta; + u8 field_flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + /* If the PF driver does not support the new speed reporting + * capabilities then use link_event else use link_event_adv to + * get the speed and link information. The ability to understand + * new speeds is indicated by setting the capability flag + * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter + * in virtchnl_vf_resource struct and can be used to determine + * which link event struct to use below. + */ + struct { + enum virtchnl_link_speed link_speed; + u8 link_status; + } link_event; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u8 link_status; + } link_event_adv; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +/* Since VF messages are limited by u16 size, precalculate the maximum possible + * values of nested elements in virtchnl structures that virtual channel can + * possibly handle in a single message. + */ +enum virtchnl_vector_limits { + VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) / + sizeof(struct virtchnl_queue_pair_info), + + VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX = + ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) / + sizeof(struct virtchnl_vector_map), + + VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX = + ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) / + sizeof(struct virtchnl_ether_addr), + + VIRTCHNL_OP_ADD_DEL_VLAN_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) / + sizeof(u16), + + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX = + ((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) / + sizeof(struct virtchnl_iwarp_qv_info), + + VIRTCHNL_OP_ENABLE_CHANNELS_MAX = + ((u16)(~0) - sizeof(struct virtchnl_tc_info)) / + sizeof(struct virtchnl_channel_info), +}; + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + + if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs > + VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + + if (vimi->num_vectors == 0 || vimi->num_vectors > + VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + + if (veal->num_elements == 0 || veal->num_elements > + VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) { + err_msg_format = true; + break; + } + + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + + if (vfl->num_elements == 0 || vfl->num_elements > + VIRTCHNL_OP_ADD_DEL_VLAN_MAX) { + err_msg_format = true; + break; + } + + valid_len += vfl->num_elements * sizeof(u16); + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + + if (qv->num_vectors == 0 || qv->num_vectors > + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) { + err_msg_format = true; + break; + } + + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + + if (vrk->key_len == 0) { + /* zero length is allowed as input */ + break; + } + + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries == 0) { + /* zero entries is allowed as input */ + break; + } + + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + valid_len = sizeof(struct virtchnl_vf_res_request); + break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + valid_len = sizeof(struct virtchnl_tc_info); + if (msglen >= valid_len) { + struct virtchnl_tc_info *vti = + (struct virtchnl_tc_info *)msg; + + if (vti->num_tc == 0 || vti->num_tc > + VIRTCHNL_OP_ENABLE_CHANNELS_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vti->num_tc - 1) * + sizeof(struct virtchnl_channel_info); + } + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: + case VIRTCHNL_OP_DEL_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_STATUS_ERR_PARAM; + } + /* few more checks */ + if (err_msg_format || valid_len != msglen) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index c997063ed728..ca0a730164df 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -12,4 +12,5 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_IAVF) += iavf.o iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ - iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o + iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o \ + kcompat_vfd.o kcompat.o diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 29de3ae96ef2..1b804c22cb98 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_H_ #define _IAVF_H_ @@ -28,15 +28,24 @@ #include #include #include +#include #include -#include #include +#ifdef HAVE_SCTP +#include +#endif +#ifdef __TC_MQPRIO_MODE_MAX +#include #include #include +#endif /* __TC_MQPRIO_MODE_MAX */ + +#include "kcompat.h" #include "iavf_type.h" -#include +#include "virtchnl.h" #include "iavf_txrx.h" +#include #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " @@ -52,14 +61,20 @@ enum iavf_vsi_state_t { struct iavf_vsi { struct iavf_adapter *back; struct net_device *netdev; +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; +#else unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + /* dummy pointer - VF plans to add this functionality in the future */ + struct iavf_ring **xdp_rings; u16 seid; u16 id; DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); int base_vector; u16 work_limit; u16 qs_handle; - void *priv; /* client driver data reference. */ + void *priv; /* client driver data reference. */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -81,7 +96,7 @@ struct iavf_vsi { #define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i])) #define IAVF_TX_CTXTDESC(R, i) \ (&(((struct iavf_tx_context_desc *)((R)->desc))[i])) -#define IAVF_MAX_REQ_QUEUES 4 +#define IAVF_MAX_REQ_QUEUES 16 #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) @@ -103,8 +118,10 @@ struct iavf_q_vector { u16 reg_idx; /* register index of the interrupt */ char name[IFNAMSIZ + 15]; bool arm_wb_state; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; +#endif }; /* Helper macros to switch between ints/sec and what the register uses. @@ -130,6 +147,7 @@ struct iavf_q_vector { struct iavf_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; + bool is_new_mac; /* filter is new, wait for PF decision */ bool remove; /* filter needs to be removed */ bool add; /* filter needs to be added */ }; @@ -141,7 +159,6 @@ struct iavf_vlan_filter { bool add; /* filter needs to be added */ }; -#define IAVF_MAX_TRAFFIC_CLASS 4 /* State of traffic class creation */ enum iavf_tc_state_t { __IAVF_TC_INVALID, /* no traffic class, default state */ @@ -150,7 +167,7 @@ enum iavf_tc_state_t { /* channel info */ struct iavf_channel_config { - struct virtchnl_channel_info ch_info[IAVF_MAX_TRAFFIC_CLASS]; + struct virtchnl_channel_info ch_info[VIRTCHNL_MAX_ADQ_V2_CHANNELS]; enum iavf_tc_state_t state; u8 total_qps; }; @@ -170,13 +187,14 @@ enum iavf_state_t { __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ __IAVF_INIT_SW, /* got resources, setting up structs */ + __IAVF_INIT_FAILED, /* init failed, restarting procedure */ __IAVF_RESETTING, /* in reset */ __IAVF_COMM_FAILED, /* communication with PF failed */ /* Below here, watchdog is running */ __IAVF_DOWN, /* ready, can be opened */ __IAVF_DOWN_PENDING, /* descending, waiting for watchdog */ __IAVF_TESTING, /* in ethtool self-test */ - __IAVF_RUNNING, /* opened, working */ + __IAVF_RUNNING /* opened, working */ }; enum iavf_critical_section_t { @@ -217,10 +235,9 @@ struct iavf_cloud_filter { /* board specific private data structure */ struct iavf_adapter { - struct work_struct reset_task; struct work_struct adminq_task; + struct delayed_work watchdog_task; struct delayed_work client_task; - struct delayed_work init_task; wait_queue_head_t down_waitqueue; struct iavf_q_vector *q_vectors; struct list_head vlan_filter_list; @@ -250,8 +267,8 @@ struct iavf_adapter { u32 flags; #define IAVF_FLAG_RX_CSUM_ENABLED BIT(0) #define IAVF_FLAG_PF_COMMS_FAILED BIT(3) -#define IAVF_FLAG_RESET_PENDING BIT(4) -#define IAVF_FLAG_RESET_NEEDED BIT(5) +#define IAVF_FLAG_RESET_PENDING BIT(4) +#define IAVF_FLAG_RESET_NEEDED BIT(5) #define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) #define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) @@ -261,7 +278,9 @@ struct iavf_adapter { #define IAVF_FLAG_ALLMULTI_ON BIT(14) #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) -#define IAVF_FLAG_QUEUES_DISABLED BIT(17) +#define IAVF_FLAG_QUEUES_ENABLED BIT(17) +#define IAVF_FLAG_QUEUES_DISABLED BIT(18) +#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ @@ -287,7 +306,7 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) #define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) -#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) +#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) #define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) #define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) @@ -296,22 +315,33 @@ struct iavf_adapter { /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; + struct net_device_stats net_stats; struct iavf_hw hw; /* defined in iavf_type.h */ enum iavf_state_t state; + enum iavf_state_t last_state; unsigned long crit_section; - struct delayed_work watchdog_task; bool netdev_registered; bool link_up; enum virtchnl_link_speed link_speed; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set + * in vf_res->vf_cap_flags. Use ADV_LINK_SUPPORT macro to determine if + * this field is valid. This field should be used going forward and the + * enum virtchnl_link_speed above should be considered the legacy way of + * storing/communicating link speeds. + */ + u32 link_speed_mbps; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_IWARP : \ 0) -#define CLIENT_ENABLED(_a) ((_a)->cinst) +#define CLIENT_ENABLED(_a) ((_a)->cinst != NULL) /* RSS by the PF should be preferred over RSS via other methods. */ #define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_RSS_PF) @@ -322,6 +352,20 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN) +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED +#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_CAP_ADV_LINK_SPEED) +#ifdef SPEED_25000 +#define ALL_SPEEDS (SPEED_100000 | SPEED_50000 | SPEED_25000 | SPEED_10000 | \ + SPEED_5000 | SPEED_2500 | SPEED_1000 | SPEED_100 | SPEED_10) +#else +#define ALL_SPEEDS (SPEED_100000 | SPEED_50000 | SPEED_10000 | SPEED_5000 | \ + SPEED_2500 | SPEED_1000 | SPEED_100 | SPEED_10) +#endif +#define SUPPORTED_SPEED(_s) ((_s) & ALL_SPEEDS) +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ +#define ADQ_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_ADQ_V2) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; @@ -344,9 +388,29 @@ struct iavf_adapter { /* lock to protect access to the cloud filter list */ spinlock_t cloud_filter_list_lock; u16 num_cloud_filters; +#ifdef IAVF_ADD_PROBES + u64 tcp_segs; + u64 udp_segs; + u64 tx_tcp_cso; + u64 tx_udp_cso; + u64 tx_sctp_cso; + u64 tx_ip4_cso; + u64 tx_vlano; + u64 rx_tcp_cso; + u64 rx_udp_cso; + u64 rx_sctp_cso; + u64 rx_ip4_cso; + u64 rx_vlano; + u64 rx_tcp_cso_err; + u64 hw_csum_rx_vxlan; + u64 hw_csum_rx_geneve; + u64 hw_csum_rx_outer; + u64 rx_udp_cso_err; + u64 rx_sctp_cso_err; + u64 rx_ip4_cso_err; +#endif }; - /* Ethtool Private Flags */ /* lan device, used by client interface */ @@ -360,6 +424,27 @@ extern char iavf_driver_name[]; extern const char iavf_driver_version[]; extern struct workqueue_struct *iavf_wq; +static inline void iavf_change_state(struct iavf_adapter *adapter, + enum iavf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } +} + +/** + * iavf_is_reset - Check if reset has been triggered + * @hw: pointer to iavf_hw + * + * Return true if reset has been already triggered, false otherwise + * + **/ +static inline bool iavf_is_reset(struct iavf_hw *hw) +{ + return !(rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK); +} + int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); @@ -393,7 +478,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter); void iavf_del_vlans(struct iavf_adapter *adapter); void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags); void iavf_request_stats(struct iavf_adapter *adapter); -void iavf_request_reset(struct iavf_adapter *adapter); +int iavf_request_reset(struct iavf_adapter *adapter); void iavf_get_hena(struct iavf_adapter *adapter); void iavf_set_hena(struct iavf_adapter *adapter); void iavf_set_rss_key(struct iavf_adapter *adapter); @@ -404,6 +489,10 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, enum virtchnl_ops v_opcode, enum iavf_status v_retval, u8 *msg, u16 msglen); int iavf_config_rss(struct iavf_adapter *adapter); +void iavf_enable_channels(struct iavf_adapter *adapter); +void iavf_disable_channels(struct iavf_adapter *adapter); +void iavf_add_cloud_filter(struct iavf_adapter *adapter); +void iavf_del_cloud_filter(struct iavf_adapter *adapter); int iavf_lan_add_device(struct iavf_adapter *adapter); int iavf_lan_del_device(struct iavf_adapter *adapter); void iavf_client_subtask(struct iavf_adapter *adapter); @@ -411,8 +500,10 @@ void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len); void iavf_notify_client_l2_params(struct iavf_vsi *vsi); void iavf_notify_client_open(struct iavf_vsi *vsi); void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset); -void iavf_enable_channels(struct iavf_adapter *adapter); -void iavf_disable_channels(struct iavf_adapter *adapter); -void iavf_add_cloud_filter(struct iavf_adapter *adapter); -void iavf_del_cloud_filter(struct iavf_adapter *adapter); +#ifdef CONFIG_DEBUG_FS +void iavf_dbg_vf_init(struct iavf_adapter *adapter); +void iavf_dbg_vf_exit(struct iavf_adapter *adapter); +void iavf_dbg_init(void); +void iavf_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS*/ #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index 9fa3fa99b4c2..c950de1f40ab 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #include "iavf_status.h" #include "iavf_type.h" @@ -81,6 +81,7 @@ static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw) **/ static void iavf_free_adminq_asq(struct iavf_hw *hw) { + iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); } @@ -102,9 +103,9 @@ static void iavf_free_adminq_arq(struct iavf_hw *hw) **/ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw) { + enum iavf_status ret_code; struct iavf_aq_desc *desc; struct iavf_dma_mem *bi; - enum iavf_status ret_code; int i; /* We'll be allocating the buffer info memory first, then we can @@ -113,8 +114,7 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw) /* buffer_info structures do not need alignment */ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head, - (hw->aq.num_arq_entries * - sizeof(struct iavf_dma_mem))); + (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_arq_bufs; hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; @@ -170,14 +170,13 @@ unwind_alloc_arq_bufs: **/ static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw) { - struct iavf_dma_mem *bi; enum iavf_status ret_code; + struct iavf_dma_mem *bi; int i; /* No mapped memory needed yet, just the buffer info structures */ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head, - (hw->aq.num_asq_entries * - sizeof(struct iavf_dma_mem))); + (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_asq_bufs; hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; @@ -354,7 +353,7 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw) /* initialize base registers */ ret_code = iavf_config_asq_regs(hw); if (ret_code) - goto init_adminq_free_rings; + goto init_config_regs; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; @@ -362,6 +361,10 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw) init_adminq_free_rings: iavf_free_adminq_asq(hw); + return ret_code; + +init_config_regs: + iavf_free_asq_bufs(hw); init_adminq_exit: return ret_code; @@ -436,7 +439,7 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; - mutex_lock(&hw->aq.asq_mutex); + iavf_acquire_spinlock(&hw->aq.asq_spinlock); if (hw->aq.asq.count == 0) { ret_code = IAVF_ERR_NOT_READY; @@ -456,7 +459,7 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) iavf_free_asq_bufs(hw); shutdown_asq_out: - mutex_unlock(&hw->aq.asq_mutex); + iavf_release_spinlock(&hw->aq.asq_spinlock); return ret_code; } @@ -470,7 +473,7 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; - mutex_lock(&hw->aq.arq_mutex); + iavf_acquire_spinlock(&hw->aq.arq_spinlock); if (hw->aq.arq.count == 0) { ret_code = IAVF_ERR_NOT_READY; @@ -490,7 +493,7 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) iavf_free_arq_bufs(hw); shutdown_arq_out: - mutex_unlock(&hw->aq.arq_mutex); + iavf_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } @@ -517,6 +520,8 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw) ret_code = IAVF_ERR_CONFIG; goto init_adminq_exit; } + iavf_init_spinlock(&hw->aq.asq_spinlock); + iavf_init_spinlock(&hw->aq.arq_spinlock); /* Set up register offsets */ iavf_adminq_init_regs(hw); @@ -527,7 +532,7 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw) /* allocate the ASQ */ ret_code = iavf_init_asq(hw); if (ret_code) - goto init_adminq_destroy_locks; + goto init_adminq_destroy_spinlocks; /* allocate the ARQ */ ret_code = iavf_init_arq(hw); @@ -539,7 +544,9 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw) init_adminq_free_asq: iavf_shutdown_asq(hw); -init_adminq_destroy_locks: +init_adminq_destroy_spinlocks: + iavf_destroy_spinlock(&hw->aq.asq_spinlock); + iavf_destroy_spinlock(&hw->aq.arq_spinlock); init_adminq_exit: return ret_code; @@ -558,6 +565,8 @@ enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) iavf_shutdown_asq(hw); iavf_shutdown_arq(hw); + iavf_destroy_spinlock(&hw->aq.asq_spinlock); + iavf_destroy_spinlock(&hw->aq.arq_spinlock); return ret_code; } @@ -570,7 +579,7 @@ enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) **/ static u16 iavf_clean_asq(struct iavf_hw *hw) { - struct iavf_adminq_ring *asq = &hw->aq.asq; + struct iavf_adminq_ring *asq = &(hw->aq.asq); struct iavf_asq_cmd_details *details; u16 ntc = asq->next_to_clean; struct iavf_aq_desc desc_cb; @@ -585,12 +594,11 @@ static u16 iavf_clean_asq(struct iavf_hw *hw) if (details->callback) { IAVF_ADMINQ_CALLBACK cb_func = (IAVF_ADMINQ_CALLBACK)details->callback; - desc_cb = *desc; + memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc)); cb_func(hw, &desc_cb); } - memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); - memset((void *)details, 0, - sizeof(struct iavf_asq_cmd_details)); + memset(desc, 0, sizeof(*desc)); + memset(details, 0, sizeof(*details)); ntc++; if (ntc == asq->count) ntc = 0; @@ -616,6 +624,7 @@ bool iavf_asq_done(struct iavf_hw *hw) * timing reliability than DD bit */ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + } /** @@ -630,20 +639,22 @@ bool iavf_asq_done(struct iavf_hw *hw) * queue. It runs the queue, cleans the queue, etc **/ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, - struct iavf_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct iavf_asq_cmd_details *cmd_details) + struct iavf_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct iavf_asq_cmd_details *cmd_details) { + enum iavf_status status = 0; struct iavf_dma_mem *dma_buff = NULL; struct iavf_asq_cmd_details *details; struct iavf_aq_desc *desc_on_ring; bool cmd_completed = false; - enum iavf_status status = 0; u16 retval = 0; u32 val = 0; - mutex_lock(&hw->aq.asq_mutex); + iavf_acquire_spinlock(&hw->aq.asq_spinlock); + + hw->aq.asq_last_status = IAVF_AQ_RC_OK; if (hw->aq.asq.count == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, @@ -652,8 +663,6 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, goto asq_send_command_error; } - hw->aq.asq_last_status = IAVF_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, @@ -664,7 +673,8 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { - *details = *cmd_details; + memcpy(details, cmd_details, + sizeof(struct iavf_asq_cmd_details)); /* If the cmd_details are defined copy the cookie. The * cpu_to_le32 is not needed here because the data is ignored @@ -720,11 +730,11 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ - *desc_on_ring = *desc; + memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc)); /* if buff is not NULL assume indirect command */ - if (buff) { - dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); /* copy the user buff into the respective DMA buff */ memcpy(dma_buff->va, buff, buff_size); desc_on_ring->datalen = cpu_to_le16(buff_size); @@ -767,8 +777,8 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, /* if ready, copy the desc back to temp */ if (iavf_asq_done(hw)) { - *desc = *desc_on_ring; - if (buff) + memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc)); + if (buff != NULL) memcpy(buff, dma_buff->va, buff_size); retval = le16_to_cpu(desc->retval); if (retval != 0) { @@ -796,7 +806,8 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, /* save writeback aq if requested */ if (details->wb_desc) - *details->wb_desc = *desc_on_ring; + memcpy(details->wb_desc, desc_on_ring, + sizeof(struct iavf_aq_desc)); /* update the error if time out occurred */ if ((!cmd_completed) && @@ -813,7 +824,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, } asq_send_command_error: - mutex_unlock(&hw->aq.asq_mutex); + iavf_release_spinlock(&hw->aq.asq_spinlock); return status; } @@ -824,7 +835,8 @@ asq_send_command_error: * * Fill the desc with default values **/ -void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode) +void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, + u16 opcode) { /* zero out the desc */ memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); @@ -843,12 +855,12 @@ void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode) * left to process through 'pending' **/ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, - struct iavf_arq_event_info *e, - u16 *pending) + struct iavf_arq_event_info *e, + u16 *pending) { + enum iavf_status ret_code = 0; u16 ntc = hw->aq.arq.next_to_clean; struct iavf_aq_desc *desc; - enum iavf_status ret_code = 0; struct iavf_dma_mem *bi; u16 desc_idx; u16 datalen; @@ -859,7 +871,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, memset(&e->desc, 0, sizeof(e->desc)); /* take the lock before we start messing with the ring */ - mutex_lock(&hw->aq.arq_mutex); + iavf_acquire_spinlock(&hw->aq.arq_spinlock); if (hw->aq.arq.count == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, @@ -891,10 +903,10 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, hw->aq.arq_last_status); } - e->desc = *desc; + memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc)); datalen = le16_to_cpu(desc->datalen); e->msg_len = min(datalen, e->buf_len); - if (e->msg_buf && (e->msg_len != 0)) + if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len); @@ -927,11 +939,11 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, clean_arq_element_out: /* Set pending if needed, unlock and return */ - if (pending) + if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); - clean_arq_element_err: - mutex_unlock(&hw->aq.arq_mutex); + iavf_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } + diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h index baf2fe26f302..1e88fbe0cf58 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_ADMINQ_H_ #define _IAVF_ADMINQ_H_ @@ -75,8 +75,8 @@ struct iavf_adminq_info { u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - struct mutex asq_mutex; /* Send queue lock */ - struct mutex arq_mutex; /* Receive queue lock */ + struct iavf_spinlock asq_spinlock; /* Send queue spinlock */ + struct iavf_spinlock arq_spinlock; /* Receive queue spinlock */ /* last status values on send and receive queues */ enum iavf_admin_queue_err asq_last_status; @@ -130,6 +130,7 @@ static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc) #define IAVF_AQ_LARGE_BUF 512 #define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */ -void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode); +void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, + u16 opcode); #endif /* _IAVF_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h index bc512308557b..3484d80d0e44 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h @@ -1,18 +1,20 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_ADMINQ_CMD_H_ #define _IAVF_ADMINQ_CMD_H_ /* This header file defines the iavf Admin Queue commands and is shared between - * iavf Firmware and Software. + * iavf Firmware and Software. Do not change the names in this file to IAVF + * because this file should be diff-able against the iavf version, even + * though many parts have been removed in this VF version. * * This file needs to comply with the Linux Kernel coding style. */ #define IAVF_FW_API_VERSION_MAJOR 0x0001 -#define IAVF_FW_API_VERSION_MINOR_X722 0x0005 -#define IAVF_FW_API_VERSION_MINOR_X710 0x0008 +#define IAVF_FW_API_VERSION_MINOR_X722 0x0006 +#define IAVF_FW_API_VERSION_MINOR_X710 0x0007 #define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \ IAVF_FW_API_VERSION_MINOR_X710 : \ @@ -20,6 +22,8 @@ /* API version 1.7 implements additional link and PHY-specific APIs */ #define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007 +/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ +#define IAVF_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 struct iavf_aq_desc { __le16 flags; @@ -63,17 +67,17 @@ struct iavf_aq_desc { #define IAVF_AQ_FLAG_EI_SHIFT 14 #define IAVF_AQ_FLAG_FE_SHIFT 15 -#define IAVF_AQ_FLAG_DD BIT(IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define IAVF_AQ_FLAG_CMP BIT(IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define IAVF_AQ_FLAG_ERR BIT(IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define IAVF_AQ_FLAG_VFE BIT(IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define IAVF_AQ_FLAG_LB BIT(IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define IAVF_AQ_FLAG_RD BIT(IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define IAVF_AQ_FLAG_VFC BIT(IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define IAVF_AQ_FLAG_BUF BIT(IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define IAVF_AQ_FLAG_SI BIT(IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define IAVF_AQ_FLAG_EI BIT(IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define IAVF_AQ_FLAG_FE BIT(IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define IAVF_AQ_FLAG_DD (1 << IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define IAVF_AQ_FLAG_CMP (1 << IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define IAVF_AQ_FLAG_ERR (1 << IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define IAVF_AQ_FLAG_VFE (1 << IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define IAVF_AQ_FLAG_LB (1 << IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define IAVF_AQ_FLAG_RD (1 << IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define IAVF_AQ_FLAG_VFC (1 << IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define IAVF_AQ_FLAG_BUF (1 << IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define IAVF_AQ_FLAG_SI (1 << IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define IAVF_AQ_FLAG_EI (1 << IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define IAVF_AQ_FLAG_FE (1 << IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum iavf_admin_queue_err { @@ -131,6 +135,7 @@ enum iavf_admin_queue_opc { /* WoL commands */ iavf_aqc_opc_set_wol_filter = 0x0120, iavf_aqc_opc_get_wake_reason = 0x0121, + iavf_aqc_opc_clear_all_wol_filters = 0x025E, /* internal switch commands */ iavf_aqc_opc_get_switch_config = 0x0200, @@ -171,6 +176,7 @@ enum iavf_admin_queue_opc { iavf_aqc_opc_add_cloud_filters = 0x025C, iavf_aqc_opc_remove_cloud_filters = 0x025D, iavf_aqc_opc_clear_wol_switch_filters = 0x025E, + iavf_aqc_opc_replace_cloud_filters = 0x025F, iavf_aqc_opc_add_mirror_rule = 0x0260, iavf_aqc_opc_delete_mirror_rule = 0x0261, @@ -207,6 +213,8 @@ enum iavf_admin_queue_opc { iavf_aqc_opc_query_hmc_resource_profile = 0x0500, iavf_aqc_opc_set_hmc_resource_profile = 0x0501, + /* phy commands*/ + /* phy commands*/ iavf_aqc_opc_get_phy_abilities = 0x0600, iavf_aqc_opc_set_phy_config = 0x0601, @@ -231,6 +239,7 @@ enum iavf_admin_queue_opc { iavf_aqc_opc_nvm_update = 0x0703, iavf_aqc_opc_nvm_config_read = 0x0704, iavf_aqc_opc_nvm_config_write = 0x0705, + iavf_aqc_opc_nvm_progress = 0x0706, iavf_aqc_opc_oem_post_update = 0x0720, iavf_aqc_opc_thermal_sensor = 0x0721, @@ -256,6 +265,9 @@ enum iavf_admin_queue_opc { iavf_aqc_opc_lldp_delete_tlv = 0x0A04, iavf_aqc_opc_lldp_stop = 0x0A05, iavf_aqc_opc_lldp_start = 0x0A06, + iavf_aqc_opc_get_cee_dcb_cfg = 0x0A07, + iavf_aqc_opc_lldp_set_local_mib = 0x0A08, + iavf_aqc_opc_lldp_stop_start_spec_agent = 0x0A09, /* Tunnel commands */ iavf_aqc_opc_add_udp_tunnel = 0x0B00, @@ -300,7 +312,7 @@ enum iavf_admin_queue_opc { * never used. */ #define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \ - { iavf_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) } + { iavf_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. @@ -316,6 +328,10 @@ struct iavf_aqc_queue_shutdown { IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown); +#define IAVF_AQC_WOL_PRESERVE_STATUS 0x200 +#define IAVF_AQC_MC_MAG_EN 0x0100 +#define IAVF_AQC_WOL_PRESERVE_ON_PFR 0x0200 + struct iavf_aqc_vsi_properties_data { /* first 96 byte are written by SW */ __le16 valid_sections; @@ -456,23 +472,29 @@ struct iavf_aqc_get_veb_parameters_completion { IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion); +#define IAVF_LINK_SPEED_2_5GB_SHIFT 0x0 #define IAVF_LINK_SPEED_100MB_SHIFT 0x1 #define IAVF_LINK_SPEED_1000MB_SHIFT 0x2 #define IAVF_LINK_SPEED_10GB_SHIFT 0x3 #define IAVF_LINK_SPEED_40GB_SHIFT 0x4 #define IAVF_LINK_SPEED_20GB_SHIFT 0x5 #define IAVF_LINK_SPEED_25GB_SHIFT 0x6 +#define IAVF_LINK_SPEED_5GB_SHIFT 0x7 enum iavf_aq_link_speed { IAVF_LINK_SPEED_UNKNOWN = 0, - IAVF_LINK_SPEED_100MB = BIT(IAVF_LINK_SPEED_100MB_SHIFT), - IAVF_LINK_SPEED_1GB = BIT(IAVF_LINK_SPEED_1000MB_SHIFT), - IAVF_LINK_SPEED_10GB = BIT(IAVF_LINK_SPEED_10GB_SHIFT), - IAVF_LINK_SPEED_40GB = BIT(IAVF_LINK_SPEED_40GB_SHIFT), - IAVF_LINK_SPEED_20GB = BIT(IAVF_LINK_SPEED_20GB_SHIFT), - IAVF_LINK_SPEED_25GB = BIT(IAVF_LINK_SPEED_25GB_SHIFT), + IAVF_LINK_SPEED_100MB = (1 << IAVF_LINK_SPEED_100MB_SHIFT), + IAVF_LINK_SPEED_1GB = (1 << IAVF_LINK_SPEED_1000MB_SHIFT), + IAVF_LINK_SPEED_2_5GB = (1 << IAVF_LINK_SPEED_2_5GB_SHIFT), + IAVF_LINK_SPEED_5GB = (1 << IAVF_LINK_SPEED_5GB_SHIFT), + IAVF_LINK_SPEED_10GB = (1 << IAVF_LINK_SPEED_10GB_SHIFT), + IAVF_LINK_SPEED_40GB = (1 << IAVF_LINK_SPEED_40GB_SHIFT), + IAVF_LINK_SPEED_20GB = (1 << IAVF_LINK_SPEED_20GB_SHIFT), + IAVF_LINK_SPEED_25GB = (1 << IAVF_LINK_SPEED_25GB_SHIFT), }; +#define IAVF_AQ_LINK_UP_FUNCTION 0x01 + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -486,8 +508,111 @@ struct iavf_aqc_pf_vf_message { IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message); +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response + */ + +#define IAVF_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define IAVF_AQC_CEE_APP_FCOE_MASK (0x7 << IAVF_AQC_CEE_APP_FCOE_SHIFT) +#define IAVF_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define IAVF_AQC_CEE_APP_ISCSI_MASK (0x7 << IAVF_AQC_CEE_APP_ISCSI_SHIFT) +#define IAVF_AQC_CEE_APP_FIP_SHIFT 0x8 +#define IAVF_AQC_CEE_APP_FIP_MASK (0x7 << IAVF_AQC_CEE_APP_FIP_SHIFT) + +#define IAVF_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define IAVF_AQC_CEE_PG_STATUS_MASK (0x7 << IAVF_AQC_CEE_PG_STATUS_SHIFT) +#define IAVF_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define IAVF_AQC_CEE_PFC_STATUS_MASK (0x7 << IAVF_AQC_CEE_PFC_STATUS_SHIFT) +#define IAVF_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define IAVF_AQC_CEE_APP_STATUS_MASK (0x7 << IAVF_AQC_CEE_APP_STATUS_SHIFT) +#define IAVF_AQC_CEE_FCOE_STATUS_SHIFT 0x8 +#define IAVF_AQC_CEE_FCOE_STATUS_MASK (0x7 << IAVF_AQC_CEE_FCOE_STATUS_SHIFT) +#define IAVF_AQC_CEE_ISCSI_STATUS_SHIFT 0xB +#define IAVF_AQC_CEE_ISCSI_STATUS_MASK (0x7 << IAVF_AQC_CEE_ISCSI_STATUS_SHIFT) +#define IAVF_AQC_CEE_FIP_STATUS_SHIFT 0x10 +#define IAVF_AQC_CEE_FIP_STATUS_MASK (0x7 << IAVF_AQC_CEE_FIP_STATUS_SHIFT) + +/* struct iavf_aqc_get_cee_dcb_cfg_v1_resp was originally defined with + * word boundary layout issues, which the Linux compilers silently deal + * with by adding padding, making the actual struct larger than designed. + * However, the FW compiler for the NIC is less lenient and complains + * about the struct. Hence, the struct defined here has an extra byte in + * fields reserved3 and reserved4 to directly acknowledge that padding, + * and the new length is used in the length check macro. + */ +struct iavf_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3[2]; + __le16 oper_app_prio; + u8 reserved4[2]; + __le16 tlv_status; +}; + +IAVF_CHECK_STRUCT_LEN(0x18, iavf_aqc_get_cee_dcb_cfg_v1_resp); + +struct iavf_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; + __le32 tlv_status; + u8 reserved[12]; +}; + +IAVF_CHECK_STRUCT_LEN(0x20, iavf_aqc_get_cee_dcb_cfg_resp); + +/* Set Local LLDP MIB (indirect 0x0A08) + * Used to replace the local MIB of a given LLDP agent. e.g. DCBx + */ +struct iavf_aqc_lldp_set_local_mib { +#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 + u8 type; + u8 reserved0; + __le16 length; + u8 reserved1[4]; + __le32 address_high; + __le32 address_low; +}; + +IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_set_local_mib); + +struct iavf_aqc_lldp_set_local_mib_resp { +#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01 + u8 status; + u8 reserved[15]; +}; + +IAVF_CHECK_STRUCT_LEN(0x10, iavf_aqc_lldp_set_local_mib_resp); + +/* Stop/Start LLDP Agent (direct 0x0A09) + * Used for stopping/starting specific LLDP agent. e.g. DCBx + */ +struct iavf_aqc_lldp_stop_start_specific_agent { +#define IAVF_AQC_START_SPECIFIC_AGENT_SHIFT 0 +#define IAVF_AQC_START_SPECIFIC_AGENT_MASK \ + (1 << IAVF_AQC_START_SPECIFIC_AGENT_SHIFT) + u8 command; + u8 reserved[15]; +}; + +IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_stop_start_specific_agent); + struct iavf_aqc_get_set_rss_key { -#define IAVF_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define IAVF_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) #define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) @@ -507,14 +632,14 @@ struct iavf_aqc_get_set_rss_key_data { IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data); struct iavf_aqc_get_set_rss_lut { -#define IAVF_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define IAVF_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) #define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ - BIT(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) +#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h index 2711573c14ec..b7e0f7d7cf79 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_alloc.h +++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_ALLOC_H_ #define _IAVF_ALLOC_H_ @@ -27,7 +27,8 @@ enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem); enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, - struct iavf_virt_mem *mem, u32 size); + struct iavf_virt_mem *mem, + u32 size); enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem); diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c index 0c77e4171808..4da8571e7140 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.c +++ b/drivers/net/ethernet/intel/iavf/iavf_client.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #include #include @@ -14,6 +14,7 @@ static struct iavf_client *vf_registered_client; static LIST_HEAD(iavf_devices); static DEFINE_MUTEX(iavf_device_mutex); + static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, struct iavf_client *client, u8 *msg, u16 len); @@ -59,9 +60,6 @@ void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) { struct iavf_client_instance *cinst; - if (!vsi) - return; - cinst = vsi->back->cinst; if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->virtchnl_receive) { @@ -84,17 +82,14 @@ void iavf_notify_client_l2_params(struct iavf_vsi *vsi) struct iavf_client_instance *cinst; struct iavf_params params; - if (!vsi) - return; - cinst = vsi->back->cinst; - if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->l2_param_change) { dev_dbg(&vsi->back->pdev->dev, "Cannot locate client instance l2_param_change function\n"); return; } + iavf_client_get_params(vsi, ¶ms); cinst->lan_info.params = params; cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, @@ -220,7 +215,7 @@ iavf_client_add_instance(struct iavf_adapter *adapter) &adapter->msix_entries[adapter->iwarp_base_vector]; mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, - struct netdev_hw_addr, list); + struct netdev_hw_addr, list); if (mac) ether_addr_copy(cinst->lan_info.lanmac, mac->addr); else @@ -247,6 +242,7 @@ void iavf_client_del_instance(struct iavf_adapter *adapter) /** * iavf_client_subtask - client maintenance work * @adapter: board private structure + * Called under client lock **/ void iavf_client_subtask(struct iavf_adapter *adapter) { @@ -254,6 +250,7 @@ void iavf_client_subtask(struct iavf_adapter *adapter) struct iavf_client_instance *cinst; int ret = 0; + if (adapter->state < __IAVF_DOWN) return; @@ -262,7 +259,7 @@ void iavf_client_subtask(struct iavf_adapter *adapter) return; /* Add the client instance to the instance list */ - cinst = iavf_client_add_instance(adapter); + cinst = adapter->cinst; if (!cinst) return; @@ -369,6 +366,14 @@ static void iavf_client_release(struct iavf_client *client) cinst = adapter->cinst; if (!cinst) continue; + adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | + IAVF_FLAG_CLIENT_NEEDS_CLOSE | + IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | + IAVF_FLAG_SERVICE_CLIENT_REQUESTED); + cancel_delayed_work_sync(&adapter->client_task); + while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { if (client->ops && client->ops->close) client->ops->close(&cinst->lan_info, client, @@ -383,10 +388,10 @@ static void iavf_client_release(struct iavf_client *client) iavf_client_del_instance(adapter); dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", client->name); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); } mutex_unlock(&iavf_device_mutex); } - /** * iavf_client_prepare - prepare client specific resources * @client: pointer to the registered client @@ -400,6 +405,7 @@ static void iavf_client_prepare(struct iavf_client *client) mutex_lock(&iavf_device_mutex); list_for_each_entry(ldev, &iavf_devices, list) { adapter = ldev->vf; + iavf_client_add_instance(adapter); /* Signal the watchdog to service the client */ adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } @@ -451,7 +457,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev, struct iavf_qv_info *qv_info; enum iavf_status err; u32 v_idx, i; - size_t msg_size; + u32 msg_size; if (adapter->aq_required) return -EAGAIN; @@ -469,8 +475,9 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev, } v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; - msg_size = struct_size(v_qvlist_info, qv_info, - v_qvlist_info->num_vectors - 1); + msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) + + (sizeof(struct virtchnl_iwarp_qv_info) * + (v_qvlist_info->num_vectors - 1)); adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); err = iavf_aq_send_msg_to_pf(&adapter->hw, @@ -558,12 +565,6 @@ int iavf_unregister_client(struct iavf_client *client) { int ret = 0; - /* When a unregister request comes through we would have to send - * a close for each of the client instances that were opened. - * client_release function is called to handle this. - */ - iavf_client_release(client); - if (vf_registered_client != client) { pr_info("iavf: Client %s has not been registered\n", client->name); @@ -571,6 +572,12 @@ int iavf_unregister_client(struct iavf_client *client) goto out; } vf_registered_client = NULL; + /* When a unregister request comes through we would have to send + * a close for each of the client instances that were opened. + * client_release function is called to handle this. + */ + iavf_client_release(client); + pr_info("iavf: Unregistered client %s\n", client->name); out: return ret; diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h index 9a7cf39ea75a..95f5f5032994 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.h +++ b/drivers/net/ethernet/intel/iavf/iavf_client.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_CLIENT_H_ #define _IAVF_CLIENT_H_ @@ -41,7 +41,7 @@ struct iavf_client; * In order for us to keep the interface simple, SW will define a * unique type value for AEQ. */ -#define IAVF_QUEUE_TYPE_PE_AEQ 0x80 +#define IAVF_QUEUE_TYPE_PE_AEQ 0x80 #define IAVF_QUEUE_INVALID_IDX 0xFFFF struct iavf_qv_info { @@ -67,7 +67,7 @@ struct iavf_prio_qos_params { u8 reserved; }; -#define IAVF_CLIENT_MAX_USER_PRIORITY 8 +#define IAVF_CLIENT_MAX_USER_PRIORITY 8 /* Struct to hold Client QoS */ struct iavf_qos_params { struct iavf_prio_qos_params prio_qos[IAVF_CLIENT_MAX_USER_PRIORITY]; diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 8547fc8fdfd6..e2eed8083e37 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #include "iavf_type.h" #include "iavf_adminq.h" #include "iavf_prototype.h" -#include +#include "virtchnl.h" /** * iavf_set_mac_type - Sets MAC type @@ -35,7 +35,8 @@ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw) status = IAVF_ERR_DEVICE_NOT_SUPPORTED; } - hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status); + hw_dbg(hw, "iavf_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); return status; } @@ -107,7 +108,7 @@ const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err) const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) { switch (stat_err) { - case 0: + case IAVF_SUCCESS: return "OK"; case IAVF_ERR_NVM: return "IAVF_ERR_NVM"; @@ -262,10 +263,13 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, { struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc; u8 *buf = (u8 *)buffer; + u16 len; - if ((!(mask & hw->debug_mask)) || !desc) + if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; + len = le16_to_cpu(aq_desc->datalen); + iavf_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), @@ -282,9 +286,7 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, le32_to_cpu(aq_desc->params.external.addr_high), le32_to_cpu(aq_desc->params.external.addr_low)); - if (buffer && aq_desc->datalen) { - u16 len = le16_to_cpu(aq_desc->datalen); - + if ((buffer != NULL) && (aq_desc->datalen != 0)) { iavf_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; @@ -314,7 +316,7 @@ bool iavf_check_asq_alive(struct iavf_hw *hw) { if (hw->aq.asq.len) return !!(rd32(hw, hw->aq.asq.len) & - IAVF_VF_ATQLEN1_ATQENABLE_MASK); + IAVF_VF_ATQLEN1_ATQENABLE_MASK); else return false; } @@ -327,14 +329,16 @@ bool iavf_check_asq_alive(struct iavf_hw *hw) * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ -enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) +enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, + bool unloading) { struct iavf_aq_desc desc; struct iavf_aqc_queue_shutdown *cmd = (struct iavf_aqc_queue_shutdown *)&desc.params.raw; enum iavf_status status; - iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown); + iavf_fill_default_direct_cmd_desc(&desc, + iavf_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = cpu_to_le32(IAVF_AQ_DRIVER_UNLOADING); @@ -439,10 +443,10 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, * * get the RSS key per VSI **/ -static enum -iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, - struct iavf_aqc_get_set_rss_key_data *key, - bool set) +static enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, + u16 vsi_id, + struct iavf_aqc_get_set_rss_key_data *key, + bool set) { enum iavf_status status; struct iavf_aq_desc desc; @@ -479,7 +483,8 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, * @key: pointer to key info struct * **/ -enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, +enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, + u16 vsi_id, struct iavf_aqc_get_set_rss_key_data *key) { return iavf_aq_get_set_rss_key(hw, vsi_id, key, false); @@ -493,7 +498,8 @@ enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, * * set the RSS key per VSI **/ -enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, +enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, + u16 vsi_id, struct iavf_aqc_get_set_rss_key_data *key) { return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); @@ -662,7 +668,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = { /* Non Tunneled IPv6 */ IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(91), IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), @@ -878,13 +884,13 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = { * completion before returning. **/ enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, - enum virtchnl_ops v_opcode, - enum iavf_status v_retval, - u8 *msg, u16 msglen, - struct iavf_asq_cmd_details *cmd_details) + enum virtchnl_ops v_opcode, + enum iavf_status v_retval, + u8 *msg, u16 msglen, + struct iavf_asq_cmd_details *cmd_details) { - struct iavf_asq_cmd_details details; struct iavf_aq_desc desc; + struct iavf_asq_cmd_details details; enum iavf_status status; iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf); @@ -903,7 +909,8 @@ enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, details.async = true; cmd_details = &details; } - status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details); + status = iavf_asq_send_command(hw, (struct iavf_aq_desc *)&desc, msg, + msglen, cmd_details); return status; } @@ -929,7 +936,6 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw, hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.dcb = msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_L2; - hw->dev_caps.fcoe = 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { ether_addr_copy(hw->mac.perm_addr, @@ -952,5 +958,26 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw, enum iavf_status iavf_vf_reset(struct iavf_hw *hw) { return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, - 0, NULL, 0, NULL); + IAVF_SUCCESS, NULL, 0, NULL); +} + +/** +* iavf_aq_clear_all_wol_filters +* @hw: pointer to the hw struct +* @cmd_details: pointer to command details structure or NULL +* +* Get information for the reason of a Wake Up event +**/ +enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw, + struct iavf_asq_cmd_details *cmd_details) +{ + struct iavf_aq_desc desc; + enum iavf_status status; + + iavf_fill_default_direct_cmd_desc(&desc, + iavf_aqc_opc_clear_all_wol_filters); + + status = iavf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_devids.h b/drivers/net/ethernet/intel/iavf/iavf_devids.h index 8eb7b697e96c..0cb272d0e975 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_devids.h +++ b/drivers/net/ethernet/intel/iavf/iavf_devids.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_DEVIDS_H_ #define _IAVF_DEVIDS_H_ @@ -9,4 +9,5 @@ #define IAVF_DEV_ID_VF_HV 0x1571 #define IAVF_DEV_ID_ADAPTIVE_VF 0x1889 #define IAVF_DEV_ID_X722_VF 0x37CD + #endif /* _IAVF_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index dad3eec8ccd8..b9cf537507d6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1,221 +1,21 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ /* ethtool support for iavf */ #include "iavf.h" +#ifdef SIOCETHTOOL #include -/* ethtool statistics helpers */ +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif -/** - * struct iavf_stats - definition for an ethtool statistic - * @stat_string: statistic name to display in ethtool -S output - * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) - * @stat_offset: offsetof() the stat from a base pointer - * - * This structure defines a statistic to be added to the ethtool stats buffer. - * It defines a statistic as offset from a common base pointer. Stats should - * be defined in constant arrays using the IAVF_STAT macro, with every element - * of the array using the same _type for calculating the sizeof_stat and - * stat_offset. - * - * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or - * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from - * the iavf_add_ethtool_stat() helper function. - * - * The @stat_string is interpreted as a format string, allowing formatted - * values to be inserted while looping over multiple structures for a given - * statistics array. Thus, every statistic string in an array should have the - * same type and number of format specifiers, to be formatted by variadic - * arguments to the iavf_add_stat_string() helper function. - **/ -struct iavf_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif -/* Helper macro to define an iavf_stat structure with proper size and type. - * Use this when defining constant statistics arrays. Note that @_type expects - * only a type name and is used multiple times. - */ -#define IAVF_STAT(_type, _name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ - .stat_offset = offsetof(_type, _stat) \ -} - -/* Helper macro for defining some statistics related to queues */ -#define IAVF_QUEUE_STAT(_name, _stat) \ - IAVF_STAT(struct iavf_ring, _name, _stat) - -/* Stats associated with a Tx or Rx ring */ -static const struct iavf_stats iavf_gstrings_queue_stats[] = { - IAVF_QUEUE_STAT("%s-%u.packets", stats.packets), - IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes), -}; - -/** - * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer - * @data: location to store the stat value - * @pointer: basis for where to copy from - * @stat: the stat definition - * - * Copies the stat data defined by the pointer and stat structure pair into - * the memory supplied as data. Used to implement iavf_add_ethtool_stats and - * iavf_add_queue_stats. If the pointer is null, data will be zero'd. - */ -static void -iavf_add_one_ethtool_stat(u64 *data, void *pointer, - const struct iavf_stats *stat) -{ - char *p; - - if (!pointer) { - /* ensure that the ethtool data buffer is zero'd for any stats - * which don't have a valid pointer. - */ - *data = 0; - return; - } - - p = (char *)pointer + stat->stat_offset; - switch (stat->sizeof_stat) { - case sizeof(u64): - *data = *((u64 *)p); - break; - case sizeof(u32): - *data = *((u32 *)p); - break; - case sizeof(u16): - *data = *((u16 *)p); - break; - case sizeof(u8): - *data = *((u8 *)p); - break; - default: - WARN_ONCE(1, "unexpected stat size for %s", - stat->stat_string); - *data = 0; - } -} - -/** - * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location to copy stats from - * @stats: array of stats to copy - * @size: the size of the stats definition - * - * Copy the stats defined by the stats array using the pointer as a base into - * the data buffer supplied by ethtool. Updates the data pointer to point to - * the next empty location for successive calls to __iavf_add_ethtool_stats. - * If pointer is null, set the data values to zero and update the pointer to - * skip these stats. - **/ -static void -__iavf_add_ethtool_stats(u64 **data, void *pointer, - const struct iavf_stats stats[], - const unsigned int size) -{ - unsigned int i; - - for (i = 0; i < size; i++) - iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); -} - -/** - * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location where stats are stored - * @stats: static const array of stat definitions - * - * Macro to ease the use of __iavf_add_ethtool_stats by taking a static - * constant stats array and passing the ARRAY_SIZE(). This avoids typos by - * ensuring that we pass the size associated with the given stats array. - * - * The parameter @stats is evaluated twice, so parameters with side effects - * should be avoided. - **/ -#define iavf_add_ethtool_stats(data, pointer, stats) \ - __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) - -/** - * iavf_add_queue_stats - copy queue statistics into supplied buffer - * @data: ethtool stats buffer - * @ring: the ring to copy - * - * Queue statistics must be copied while protected by - * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats. - * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the - * ring pointer is null, zero out the queue stat values and update the data - * pointer. Otherwise safely copy the stats from the ring into the supplied - * buffer and update the data pointer when finished. - * - * This function expects to be called while under rcu_read_lock(). - **/ -static void -iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) -{ - const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats); - const struct iavf_stats *stats = iavf_gstrings_queue_stats; - unsigned int start; - unsigned int i; - - /* To avoid invalid statistics values, ensure that we keep retrying - * the copy until we get a consistent value according to - * u64_stats_fetch_retry_irq. But first, make sure our ring is - * non-null before attempting to access its syncp. - */ - do { - start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); - for (i = 0; i < size; i++) - iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); - } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); - - /* Once we successfully copy the stats in, update the data pointer */ - *data += size; -} - -/** - * __iavf_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * @size: size of the stats array - * - * Format and copy the strings described by stats into the buffer pointed at - * by p. - **/ -static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[], - const unsigned int size, ...) -{ - unsigned int i; - - for (i = 0; i < size; i++) { - va_list args; - - va_start(args, size); - vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); - *p += ETH_GSTRING_LEN; - va_end(args); - } -} - -/** - * iavf_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * - * Format and copy the strings described by the const static stats value into - * the buffer pointed at by p. - * - * The parameter @stats is evaluated twice, so parameters with side effects - * should be avoided. Additionally, stats must be an array such that - * ARRAY_SIZE can be called on it. - **/ -#define iavf_add_stat_strings(p, stats, ...) \ - __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) +#include "iavf_ethtool_stats.h" #define VF_STAT(_name, _stat) \ IAVF_STAT(struct iavf_adapter, _name, _stat) @@ -233,12 +33,31 @@ static const struct iavf_stats iavf_gstrings_stats[] = { VF_STAT("tx_broadcast", current_stats.tx_broadcast), VF_STAT("tx_discards", current_stats.tx_discards), VF_STAT("tx_errors", current_stats.tx_errors), +#ifdef IAVF_ADD_PROBES + VF_STAT("tx_tcp_segments", tcp_segs), + VF_STAT("tx_udp_segments", udp_segs), + VF_STAT("tx_tcp_cso", tx_tcp_cso), + VF_STAT("tx_udp_cso", tx_udp_cso), + VF_STAT("tx_sctp_cso", tx_sctp_cso), + VF_STAT("tx_ip4_cso", tx_ip4_cso), + VF_STAT("tx_vlano", tx_vlano), + VF_STAT("rx_tcp_cso", rx_tcp_cso), + VF_STAT("rx_udp_cso", rx_udp_cso), + VF_STAT("rx_sctp_cso", rx_sctp_cso), + VF_STAT("rx_ip4_cso", rx_ip4_cso), + VF_STAT("rx_vlano", rx_vlano), + VF_STAT("rx_tcp_cso_error", rx_tcp_cso_err), + VF_STAT("rx_udp_cso_error", rx_udp_cso_err), + VF_STAT("rx_sctp_cso_error", rx_sctp_cso_err), + VF_STAT("rx_ip4_cso_error", rx_ip4_cso_err), +#endif }; #define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats) #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC /* For now we have one and only one private flag and it is only defined * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead * of leaving all this code sitting around empty we will strip it unless @@ -261,6 +80,7 @@ static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = { }; #define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags) +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ /** * iavf_get_link_ksettings - Get Link Speed and Duplex settings @@ -276,14 +96,27 @@ static int iavf_get_link_ksettings(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; /* Set speed and duplex */ +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + if (ADV_LINK_SUPPORT(adapter)) { + if (SUPPORTED_SPEED(adapter->link_speed_mbps)) + cmd->base.speed = adapter->link_speed_mbps; + else + cmd->base.speed = SPEED_UNKNOWN; + + goto set_duplex; + } + +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: + case VIRTCHNL_LINK_SPEED_40GB: cmd->base.speed = SPEED_40000; break; - case IAVF_LINK_SPEED_25GB: + case VIRTCHNL_LINK_SPEED_25GB: #ifdef SPEED_25000 cmd->base.speed = SPEED_25000; #else @@ -291,26 +124,53 @@ static int iavf_get_link_ksettings(struct net_device *netdev, "Speed is 25G, display not supported by this version of ethtool.\n"); #endif break; - case IAVF_LINK_SPEED_20GB: + case VIRTCHNL_LINK_SPEED_20GB: cmd->base.speed = SPEED_20000; break; - case IAVF_LINK_SPEED_10GB: + case VIRTCHNL_LINK_SPEED_10GB: cmd->base.speed = SPEED_10000; break; - case IAVF_LINK_SPEED_1GB: + case VIRTCHNL_LINK_SPEED_1GB: cmd->base.speed = SPEED_1000; break; - case IAVF_LINK_SPEED_100MB: + case VIRTCHNL_LINK_SPEED_100MB: cmd->base.speed = SPEED_100; break; default: + cmd->base.speed = SPEED_UNKNOWN; break; } + +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED +set_duplex: +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ cmd->base.duplex = DUPLEX_FULL; return 0; } +#ifndef ETHTOOL_GLINKSETTINGS +/** + * iavf_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media type. Since we've backported + * the new API constructs to use in the old API, this ends up just being + * a wrapper to iavf_get_link_ksettings. + **/ +static int iavf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ethtool_link_ksettings ks; + + iavf_get_link_ksettings(netdev, &ks); + _kc_ethtool_ksettings_to_cmd(&ks, ecmd); + ecmd->transceiver = XCVR_EXTERNAL; + return 0; +} +#endif /* !ETHTOOL_GLINKSETTINGS */ + /** * iavf_get_sset_count - Get length of string set * @netdev: network interface device structure @@ -323,8 +183,10 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset) if (sset == ETH_SS_STATS) return IAVF_STATS_LEN + (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC else if (sset == ETH_SS_PRIV_FLAGS) return IAVF_PRIV_FLAGS_STR_LEN; +#endif else return -EINVAL; } @@ -362,6 +224,7 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, rcu_read_unlock(); } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC /** * iavf_get_priv_flag_strings - Get private flag strings * @netdev: network interface device structure @@ -379,6 +242,7 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) data += ETH_GSTRING_LEN; } } +#endif /** * iavf_get_stat_strings - Get stat strings @@ -418,14 +282,17 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) case ETH_SS_STATS: iavf_get_stat_strings(netdev, data); break; +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC case ETH_SS_PRIV_FLAGS: iavf_get_priv_flag_strings(netdev, data); break; +#endif default: break; } } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC /** * iavf_get_priv_flags - report device private flags * @netdev: network interface device structure @@ -508,15 +375,112 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) /* issue a reset to force legacy-rx change to take effect */ if (changed_flags & IAVF_FLAG_LEGACY_RX) { - if (netif_running(netdev)) { - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); - } + if (netif_running(netdev)) + iavf_schedule_reset(adapter); } return 0; } +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#ifndef HAVE_NDO_SET_FEATURES +/** + * iavf_get_rx_csum - Get RX checksum settings + * @netdev: network interface device structure + * + * Returns true or false depending upon RX checksum enabled. + **/ +static u32 iavf_get_rx_csum(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + return adapter->flags & IAVF_FLAG_RX_CSUM_ENABLED; +} + +/** + * iavf_set_rx_csum - Set RX checksum settings + * @netdev: network interface device structure + * @data: RX checksum setting (boolean) + * + **/ +static int iavf_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + if (data) + adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~IAVF_FLAG_RX_CSUM_ENABLED; + + return 0; +} + +/** + * iavf_get_tx_csum - Get TX checksum settings + * @netdev: network interface device structure + * + * Returns true or false depending upon TX checksum enabled. + **/ +static u32 iavf_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_IP_CSUM) != 0; +} + +/** + * iavf_set_tx_csum - Set TX checksum settings + * @netdev: network interface device structure + * @data: TX checksum setting (boolean) + * + **/ +static int iavf_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + + return 0; +} + +/** + * iavf_set_tso - Set TX segmentation offload settings + * @netdev: network interface device structure + * @data: TSO setting (boolean) + * + **/ +static int iavf_set_tso(struct net_device *netdev, u32 data) +{ + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netif_tx_stop_all_queues(netdev); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; +#ifndef HAVE_NETDEV_VLAN_FEATURES + /* disable TSO on all VLANs if they're present */ + if (adapter->vsi.vlgrp) { + struct iavf_adapter *adapter = netdev_priv(netdev); + struct vlan_group *vlgrp = adapter->vsi.vlgrp; + struct net_device *v_netdev; + int i; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(vlgrp, i); + if (v_netdev) { + v_netdev->features &= ~NETIF_F_TSO; + v_netdev->features &= ~NETIF_F_TSO6; + vlan_group_set_device(vlgrp, i, + v_netdev); + } + } + } +#endif + netif_tx_start_all_queues(netdev); + } + return 0; +} + +#endif /* HAVE_NDO_SET_FEATURES */ /** * iavf_get_msglevel - Get debug message level * @netdev: network interface device structure @@ -563,7 +527,9 @@ static void iavf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, iavf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN; +#endif } /** @@ -620,10 +586,8 @@ static int iavf_set_ringparam(struct net_device *netdev, adapter->tx_desc_count = new_tx_count; adapter->rx_desc_count = new_rx_count; - if (netif_running(netdev)) { - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); - } + if (netif_running(netdev)) + iavf_schedule_reset(adapter); return 0; } @@ -687,6 +651,7 @@ static int iavf_get_coalesce(struct net_device *netdev, return __iavf_get_coalesce(netdev, ec, -1); } +#ifdef ETHTOOL_PERQUEUE /** * iavf_get_per_queue_coalesce - get coalesce values for specific queue * @netdev: netdev to read @@ -700,6 +665,7 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, { return __iavf_get_coalesce(netdev, ec, queue); } +#endif /* ETHTOOL_PERQUEUE */ /** * iavf_set_itr_per_queue - set ITR values for specific queue @@ -709,12 +675,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, - struct ethtool_coalesce *ec, int queue) +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; + u16 itr_setting; + + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + } rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); @@ -737,6 +722,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ + return 0; } /** @@ -764,7 +750,9 @@ static int __iavf_set_coalesce(struct net_device *netdev, (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) { netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; - } else if (ec->tx_coalesce_usecs == 0) { + } + + if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) || @@ -778,9 +766,11 @@ static int __iavf_set_coalesce(struct net_device *netdev, */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) - iavf_set_itr_per_queue(adapter, ec, i); + if (iavf_set_itr_per_queue(adapter, ec, i)) + return -EINVAL; } else if (queue < adapter->num_active_queues) { - iavf_set_itr_per_queue(adapter, ec, queue); + if (iavf_set_itr_per_queue(adapter, ec, queue)) + return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); @@ -803,6 +793,7 @@ static int iavf_set_coalesce(struct net_device *netdev, return __iavf_set_coalesce(netdev, ec, -1); } +#ifdef ETHTOOL_PERQUEUE /** * iavf_set_per_queue_coalesce - set specific queue's coalesce settings * @netdev: the netdev to change @@ -816,7 +807,9 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, { return __iavf_set_coalesce(netdev, ec, queue); } +#endif /* ETHTOOL_PERQUEUE */ +#ifdef ETHTOOL_GRXRINGS /** * iavf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure @@ -825,8 +818,13 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Returns Success if the command is supported. **/ -static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, +static int iavf_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else u32 *rule_locs) +#endif { struct iavf_adapter *adapter = netdev_priv(netdev); int ret = -EOPNOTSUPP; @@ -846,6 +844,8 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, return ret; } +#endif /* ETHTOOL_GRXRINGS */ +#ifdef ETHTOOL_GCHANNELS /** * iavf_get_channels: get the number of channels supported by the device * @netdev: network interface device structure @@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = IAVF_MAX_REQ_QUEUES; + ch->max_combined = adapter->vsi_res->num_queue_pairs; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; @@ -881,34 +881,37 @@ static int iavf_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct iavf_adapter *adapter = netdev_priv(netdev); - int num_req = ch->combined_count; - - if (num_req != adapter->num_active_queues && - !(adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { - dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); - return -EINVAL; - } + u32 num_req = ch->combined_count; +#ifdef __TC_MQPRIO_MODE_MAX if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { - dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); + dev_info(&adapter->pdev->dev, "Cannot set channels since ADQ is enabled.\n"); return -EINVAL; } +#endif /* __TC_MQPRIO_MODE_MAX */ /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ - if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES) + if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) return -EINVAL; + if (num_req == adapter->num_active_queues) + return 0; + if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) return -EINVAL; adapter->num_req_queues = num_req; - return iavf_request_queues(adapter, num_req); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_schedule_reset(adapter); + return 0; } +#endif /* ETHTOOL_GCHANNELS */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) /** * iavf_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure @@ -944,22 +947,29 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) * * Reads the indirection table directly from the hardware. Always returns 0. **/ +#ifdef HAVE_RXFH_HASHFUNC static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; +#ifdef HAVE_RXFH_HASHFUNC if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - memcpy(key, adapter->rss_key, adapter->rss_key_size); +#endif - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; + if (key) + memcpy(key, adapter->rss_key, adapter->rss_key_size); + + if (indir) + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; return 0; } @@ -974,21 +984,38 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * Returns -EINVAL if the table specifies an inavlid queue id, otherwise * returns 0 after programming the table. **/ +#ifdef HAVE_RXFH_HASHFUNC static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) + const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int iavf_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; + +#endif if (!indir) return 0; - if (key) + /* Verify user input. */ + for (i = 0; i < adapter->rss_lut_size; i++) { + if (indir[i] >= adapter->num_active_queues) + return -EINVAL; + } + + if (key) { memcpy(adapter->rss_key, key, adapter->rss_key_size); + } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) @@ -996,33 +1023,74 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, return iavf_config_rss(adapter); } +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ static const struct ethtool_ops iavf_ethtool_ops = { .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = iavf_get_ringparam, .set_ringparam = iavf_set_ringparam, +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = iavf_get_rx_csum, + .set_rx_csum = iavf_set_rx_csum, + .get_tx_csum = iavf_get_tx_csum, + .set_tx_csum = iavf_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = iavf_set_tso, +#endif /* HAVE_NDO_SET_FEATURES */ .get_strings = iavf_get_strings, .get_ethtool_stats = iavf_get_ethtool_stats, .get_sset_count = iavf_get_sset_count, +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC .get_priv_flags = iavf_get_priv_flags, .set_priv_flags = iavf_set_priv_flags, +#endif .get_msglevel = iavf_get_msglevel, .set_msglevel = iavf_set_msglevel, .get_coalesce = iavf_get_coalesce, .set_coalesce = iavf_set_coalesce, +#ifdef ETHTOOL_PERQUEUE .get_per_queue_coalesce = iavf_get_per_queue_coalesce, .set_per_queue_coalesce = iavf_set_per_queue_coalesce, +#endif +#ifdef ETHTOOL_GRXRINGS .get_rxnfc = iavf_get_rxnfc, +#endif +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = iavf_get_rxfh_key_size, .get_rxfh_indir_size = iavf_get_rxfh_indir_size, .get_rxfh = iavf_get_rxfh, .set_rxfh = iavf_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_GCHANNELS .get_channels = iavf_get_channels, .set_channels = iavf_set_channels, - .get_rxfh_key_size = iavf_get_rxfh_key_size, +#endif +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifdef ETHTOOL_GLINKSETTINGS .get_link_ksettings = iavf_get_link_ksettings, +#else + .get_settings = iavf_get_settings, +#endif /* ETHTOOL_GLINKSETTINGS */ }; +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext iavf_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_channels = iavf_get_channels, + .set_channels = iavf_set_channels, +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = iavf_get_rxfh_key_size, + .get_rxfh_indir_size = iavf_get_rxfh_indir_size, + .get_rxfh = iavf_get_rxfh, + .set_rxfh = iavf_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +}; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + /** * iavf_set_ethtool_ops - Initialize ethtool ops struct * @netdev: network interface device structure @@ -1033,4 +1101,9 @@ static const struct ethtool_ops iavf_ethtool_ops = { void iavf_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &iavf_ethtool_ops; +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &iavf_ethtool_ops_ext); +#endif } + +#endif /* SIOCETHTOOL */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool_stats.h b/drivers/net/ethernet/intel/iavf/iavf_ethtool_stats.h new file mode 100644 index 000000000000..8ffb919ff441 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool_stats.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2013, Intel Corporation. */ + +/* ethtool statistics helpers */ + +/** + * struct iavf_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the IAVF_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the iavf_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the iavf_add_stat_string() helper function. + **/ +struct iavf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro to define an iavf_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ +#define IAVF_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + + +/* Helper macro for defining some statistics related to queues */ +#define IAVF_QUEUE_STAT(_name, _stat) \ + IAVF_STAT(struct iavf_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct iavf_stats iavf_gstrings_queue_stats[] = { + IAVF_QUEUE_STAT("%s-%u.packets", stats.packets), + IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + + +/** + * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement iavf_add_ethtool_stats and + * iavf_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +iavf_add_one_ethtool_stat(u64 *data, void *pointer, + const struct iavf_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __iavf_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__iavf_add_ethtool_stats(u64 **data, void *pointer, + const struct iavf_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __iavf_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define iavf_add_ethtool_stats(data, pointer, stats) \ + __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * iavf_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats. + * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats); + const struct iavf_stats *stats = iavf_gstrings_queue_stats; +#ifdef HAVE_NDO_GET_STATS64 + unsigned int start; +#endif + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ +#ifdef HAVE_NDO_GET_STATS64 + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); +#endif + for (i = 0; i < size; i++) { + iavf_add_one_ethtool_stat(&(*data)[i], ring, + &stats[i]); + } +#ifdef HAVE_NDO_GET_STATS64 + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + + +/** + * __iavf_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +/** + * 40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define iavf_add_stat_strings(p, stats, ...) \ + __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) diff --git a/drivers/net/ethernet/intel/iavf/iavf_helper.h b/drivers/net/ethernet/intel/iavf/iavf_helper.h new file mode 100644 index 000000000000..edfa21698ea4 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_helper.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2013, Intel Corporation. */ + +#ifndef _IAVF_HELPER_H_ +#define _IAVF_HELPER_H_ + +#include "iavf_alloc.h" + +/** + * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +inline int iavf_allocate_dma_mem_d(struct iavf_hw *hw, + struct iavf_dma_mem *mem, + __always_unused enum iavf_memory_type mtype, + u64 size, u32 alignment) +{ + struct iavf_adapter *nf = (struct iavf_adapter *)hw->back; + + mem->size = ALIGN(size, alignment); +#ifdef HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM + mem->va = dma_alloc_coherent(&nf->pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); +#else /* HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM */ + mem->va = dma_zalloc_coherent(&nf->pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); +#endif /* HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM */ + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * iavf_free_dma_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +inline int iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem) +{ + struct iavf_adapter *nf = (struct iavf_adapter *)hw->back; + + dma_free_coherent(&nf->pdev->dev, mem->size, mem->va, mem->pa); + mem->va = NULL; + mem->pa = 0; + mem->size = 0; + + return 0; +} + +/** + * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + **/ +inline int iavf_allocate_virt_mem_d(struct iavf_hw *hw, + struct iavf_virt_mem *mem, + u32 size) +{ + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * iavf_free_virt_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +inline int iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem) +{ + /* it's ok to kfree a NULL pointer */ + kfree(mem->va); + mem->va = NULL; + mem->size = 0; + + return 0; +} + +/* prototype */ +inline void iavf_destroy_spinlock_d(struct iavf_spinlock *sp); +inline void iavf_acquire_spinlock_d(struct iavf_spinlock *sp); +inline void iavf_release_spinlock_d(struct iavf_spinlock *sp); + +/** + * iavf_init_spinlock_d - OS specific spinlock init for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +static inline void iavf_init_spinlock_d(struct iavf_spinlock *sp) +{ + mutex_init((struct mutex *)sp); +} + +/** + * iavf_acquire_spinlock_d - OS specific spinlock acquire for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +inline void iavf_acquire_spinlock_d(struct iavf_spinlock *sp) +{ + mutex_lock((struct mutex *)sp); +} + +/** + * iavf_release_spinlock_d - OS specific spinlock release for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +inline void iavf_release_spinlock_d(struct iavf_spinlock *sp) +{ + mutex_unlock((struct mutex *)sp); +} + +/** + * iavf_destroy_spinlock_d - OS specific spinlock destroy for shared code + * @sp: pointer to a spinlock declared in driver space + **/ +inline void iavf_destroy_spinlock_d(struct iavf_spinlock *sp) +{ + mutex_destroy((struct mutex *)sp); +} +#endif /* _IAVF_HELPER_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 821987da5698..5a3122e0b825 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #include "iavf.h" +#include "iavf_helper.h" #include "iavf_prototype.h" #include "iavf_client.h" /* All iavf tracepoints are defined by the include below, which must @@ -14,25 +15,21 @@ static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); static int iavf_close(struct net_device *netdev); -static int iavf_init_get_resources(struct iavf_adapter *adapter); +static void iavf_init_get_resources(struct iavf_adapter *adapter); static int iavf_check_reset_complete(struct iavf_hw *hw); +static void iavf_handle_reset(struct iavf_adapter *adapter); char iavf_driver_name[] = "iavf"; static const char iavf_driver_string[] = "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; -#define DRV_KERN "-k" - -#define DRV_VERSION_MAJOR 3 -#define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 3 -#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ - __stringify(DRV_VERSION_MINOR) "." \ - __stringify(DRV_VERSION_BUILD) \ - DRV_KERN +#define DRV_VERSION_MAJOR (3) +#define DRV_VERSION_MINOR (9) +#define DRV_VERSION_BUILD (3) +#define DRV_VERSION "3.9.3" const char iavf_driver_version[] = DRV_VERSION; static const char iavf_copyright[] = - "Copyright (c) 2013 - 2018 Intel Corporation."; + "Copyright (c) 2013, Intel Corporation."; /* iavf_pci_tbl - PCI Device ID Table * @@ -55,104 +52,24 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); -MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Driver"); +MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const struct net_device_ops iavf_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static const struct net_device_ops_ext iavf_netdev_ops_ext; +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ struct workqueue_struct *iavf_wq; -/** - * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to fill out - * @size: size of memory requested - * @alignment: what to align the allocation to - **/ -enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, - struct iavf_dma_mem *mem, - u64 size, u32 alignment) -{ - struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; - - if (!mem) - return IAVF_ERR_PARAM; - - mem->size = ALIGN(size, alignment); - mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, - (dma_addr_t *)&mem->pa, GFP_KERNEL); - if (mem->va) - return 0; - else - return IAVF_ERR_NO_MEMORY; -} - -/** - * iavf_free_dma_mem_d - OS specific memory free for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to free - **/ -enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, - struct iavf_dma_mem *mem) -{ - struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; - - if (!mem || !mem->va) - return IAVF_ERR_PARAM; - dma_free_coherent(&adapter->pdev->dev, mem->size, - mem->va, (dma_addr_t)mem->pa); - return 0; -} - -/** - * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to fill out - * @size: size of memory requested - **/ -enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, - struct iavf_virt_mem *mem, u32 size) -{ - if (!mem) - return IAVF_ERR_PARAM; - - mem->size = size; - mem->va = kzalloc(size, GFP_KERNEL); - - if (mem->va) - return 0; - else - return IAVF_ERR_NO_MEMORY; -} - -/** - * iavf_free_virt_mem_d - OS specific memory free for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to free - **/ -enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, - struct iavf_virt_mem *mem) -{ - if (!mem) - return IAVF_ERR_PARAM; - - /* it's ok to kfree a NULL pointer */ - kfree(mem->va); - - return 0; -} - /** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ void iavf_schedule_reset(struct iavf_adapter *adapter) { - if (!(adapter->flags & - (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); - } + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); } /** @@ -265,7 +182,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_hw *hw = &adapter->hw; - /* handle non-queue interrupts, these reads clear the registers */ + /* handle non-queue interrupts */ rd32(hw, IAVF_VFINT_ICR01); rd32(hw, IAVF_VFINT_ICR0_ENA1); @@ -376,6 +293,7 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; } +#ifdef HAVE_IRQ_AFFINITY_NOTIFY /** * iavf_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed @@ -402,6 +320,7 @@ static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, * receive notifications. **/ static void iavf_irq_affinity_release(struct kref *ref) {} +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ /** * iavf_request_traffic_irqs - Initialize MSI-X interrupts @@ -425,19 +344,18 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) for (vector = 0; vector < q_vectors; vector++) { struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; - irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%u", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%u", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%u", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; @@ -452,17 +370,21 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) "Request_irq failed, error: %d\n", err); goto free_queue_irqs; } +#ifdef HAVE_IRQ_AFFINITY_NOTIFY /* register for affinity change notifications */ q_vector->affinity_notify.notify = iavf_irq_affinity_notify; q_vector->affinity_notify.release = iavf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT /* Spread the IRQ affinity hints across online CPUs. Note that * get_cpu_mask returns a mask with a permanent lifetime so * it's safe to use as a hint for irq_set_affinity_hint. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); +#endif /* HAVE_IRQ_AFFINITY_HINT */ } return 0; @@ -471,8 +393,12 @@ free_queue_irqs: while (vector) { vector--; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY irq_set_affinity_notifier(irq_num, NULL); +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT irq_set_affinity_hint(irq_num, NULL); +#endif free_irq(irq_num, &adapter->q_vectors[vector]); } return err; @@ -523,8 +449,12 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) for (vector = 0; vector < q_vectors; vector++) { irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY irq_set_affinity_notifier(irq_num, NULL); +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT irq_set_affinity_hint(irq_num, NULL); +#endif free_irq(irq_num, &adapter->q_vectors[vector]); } } @@ -604,6 +534,24 @@ static void iavf_configure_rx(struct iavf_adapter *adapter) } } +#ifdef HAVE_VLAN_RX_REGISTER +/** + * iavf_vlan_rx_register - Register for RX vlan filtering, enable VLAN + * tag stripping + * @netdev: netdevice structure + * @grp: vlan group data + **/ +static void iavf_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + /* Since VLAN tag stripping is always enabled, just store vlgrp. */ + adapter->vsi.vlgrp = grp; + +} + +#endif /** * iavf_find_vlan - Search filter list for specific vlan filter * @adapter: board private structure @@ -676,23 +624,70 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) spin_unlock_bh(&adapter->mac_vlan_list_lock); } +/** + * iavf_restore_filters + * @adapter: board private structure + * + * Restore existing non MAC filters when VF netdev comes back up + **/ +static void iavf_restore_filters(struct iavf_adapter *adapter) +{ + /* re-add all VLAN filters */ +#ifdef HAVE_VLAN_RX_REGISTER + if (adapter->vsi.vlgrp) + iavf_vlan_rx_register(adapter->netdev, adapter->vsi.vlgrp); +#else /* HAVE_VLAN_RX_REGISTER */ + if (VLAN_ALLOWED(adapter)) { + u16 vid; + + for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) + iavf_add_vlan(adapter, vid); + } + +#endif /* HAVE_VLAN_RX_REGISTER */ +} + /** * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX static int iavf_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +#else +static int iavf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif { struct iavf_adapter *adapter = netdev_priv(netdev); if (!VLAN_ALLOWED(adapter)) return -EIO; + if (iavf_add_vlan(adapter, vid) == NULL) return -ENOMEM; + +#ifndef HAVE_VLAN_RX_REGISTER + set_bit(vid, adapter->vsi.active_vlans); +#endif /* HAVE_VLAN_RX_REGISTER */ return 0; } +#else +static void iavf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + if (!VLAN_ALLOWED(adapter)) + return; + + iavf_add_vlan(adapter, vid); +#ifndef HAVE_VLAN_RX_REGISTER + set_bit(vid, adapter->vsi.active_vlans); +#endif /* HAVE_VLAN_RX_REGISTER */ +} +#endif /** * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device @@ -700,17 +695,40 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, * @proto: unused protocol data * @vid: VLAN tag **/ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX static int iavf_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +#else +static int iavf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif { struct iavf_adapter *adapter = netdev_priv(netdev); - if (VLAN_ALLOWED(adapter)) { - iavf_del_vlan(adapter, vid); - return 0; - } - return -EIO; + if (!VLAN_ALLOWED(adapter)) + return -EIO; + + iavf_del_vlan(adapter, vid); +#ifndef HAVE_VLAN_RX_REGISTER + clear_bit(vid, adapter->vsi.active_vlans); +#endif /* HAVE_VLAN_RX_REGISTER */ + + return 0; } +#else +static void iavf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + if (!VLAN_ALLOWED(adapter)) + return; + + iavf_del_vlan(adapter, vid); +#ifndef HAVE_VLAN_RX_REGISTER + clear_bit(vid, adapter->vsi.active_vlans); +#endif /* HAVE_VLAN_RX_REGISTER */ +} +#endif /** * iavf_find_filter - Search filter list for specific mac filter @@ -762,6 +780,7 @@ iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; + f->is_new_mac = true; adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; @@ -853,6 +872,7 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) f->remove = true; adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } + return 0; } @@ -865,8 +885,10 @@ static void iavf_set_rx_mode(struct net_device *netdev) struct iavf_adapter *adapter = netdev_priv(netdev); spin_lock_bh(&adapter->mac_vlan_list_lock); + __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); + spin_unlock_bh(&adapter->mac_vlan_list_lock); if (netdev->flags & IFF_PROMISC && @@ -909,9 +931,9 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter) **/ static void iavf_napi_disable_all(struct iavf_adapter *adapter) { - int q_idx; - struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + struct iavf_q_vector *q_vector; + int q_idx; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; @@ -949,7 +971,12 @@ static void iavf_configure(struct iavf_adapter *adapter) **/ static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __IAVF_RUNNING; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (adapter->num_active_queues > 1) + netdev->features |= NETIF_F_MULTI_QUEUE; + +#endif + iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_napi_enable_all(adapter); @@ -970,8 +997,8 @@ void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct iavf_vlan_filter *vlf; - struct iavf_mac_filter *f; struct iavf_cloud_filter *cf; + struct iavf_mac_filter *f; if (adapter->state <= __IAVF_DOWN_PENDING) return; @@ -979,8 +1006,8 @@ void iavf_down(struct iavf_adapter *adapter) netif_carrier_off(netdev); netif_tx_disable(netdev); adapter->link_up = false; - iavf_napi_disable_all(adapter); iavf_irq_disable(adapter); + iavf_napi_disable_all(adapter); spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1015,10 +1042,16 @@ void iavf_down(struct iavf_adapter *adapter) * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; + /* In case the queue configure or enable operations are still + * pending from when the interface was opened, make sure + * they're canceled here. + */ + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; } mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); @@ -1036,34 +1069,36 @@ void iavf_down(struct iavf_adapter *adapter) static int iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) { - int err, vector_threshold; + int v_actual; /* We'll want at least 3 (vector_threshold): * 0) Other (Admin Queue and link, mostly) * 1) TxQ[0] Cleanup * 2) RxQ[0] Cleanup - */ - vector_threshold = MIN_MSIX_COUNT; - - /* The more we get, the more we will assign to Tx/Rx Cleanup + * + * The more we get, the more we will assign to Tx/Rx Cleanup * for the separate queues...where Rx Cleanup >= Tx Cleanup. * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ - err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, - vector_threshold, vectors); - if (err < 0) { - dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); + v_actual = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + MIN_MSIX_COUNT, vectors); + if (v_actual < 0) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts: %d\n", + v_actual); kfree(adapter->msix_entries); adapter->msix_entries = NULL; - return err; + return v_actual; + } + + if (CLIENT_ALLOWED(adapter)) { + adapter->num_iwarp_msix = (v_actual - 1) / 2; + adapter->num_msix_vectors = v_actual - adapter->num_iwarp_msix; + adapter->iwarp_base_vector = adapter->num_msix_vectors; + } else { + adapter->num_msix_vectors = v_actual; } - /* Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NONQ_VECS, or the number of - * vectors we were allocated. - */ - adapter->num_msix_vectors = err; return 0; } @@ -1103,9 +1138,11 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) */ if (adapter->num_req_queues) num_active_queues = adapter->num_req_queues; +#ifdef __TC_MQPRIO_MODE_MAX else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) num_active_queues = adapter->ch_config.total_qps; +#endif /* __TC_MQPRIO_MODE_MAX */ else num_active_queues = min_t(int, adapter->vsi_res->num_queue_pairs, @@ -1129,16 +1166,17 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) tx_ring->queue_index = i; tx_ring->netdev = adapter->netdev; - tx_ring->dev = &adapter->pdev->dev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); tx_ring->count = adapter->tx_desc_count; tx_ring->itr_setting = IAVF_ITR_TX_DEF; + if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; - rx_ring->dev = &adapter->pdev->dev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); rx_ring->count = adapter->rx_desc_count; rx_ring->itr_setting = IAVF_ITR_RX_DEF; } @@ -1190,10 +1228,147 @@ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) adapter->msix_entries[vector].entry = vector; err = iavf_acquire_msix_vectors(adapter, v_budget); - out: +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + /* Notify the stack of the (possibly) reduced Tx Queue count. */ + adapter->netdev->egress_subqueue_count = pairs; +#else /* CONFIG_NETDEVICES_MULTIQUEUE */ netif_set_real_num_rx_queues(adapter->netdev, pairs); netif_set_real_num_tx_queues(adapter->netdev, pairs); +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ + return err; +} + +/** + * iavf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) +{ + int q_idx = 0, num_q_vectors; + struct iavf_q_vector *q_vector; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), + GFP_KERNEL); + if (!adapter->q_vectors) + return -ENOMEM; + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + q_vector->adapter = adapter; + q_vector->vsi = &adapter->vsi; + q_vector->v_idx = q_idx; + q_vector->reg_idx = q_idx; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); +#endif + netif_napi_add(adapter->netdev, &q_vector->napi, + iavf_napi_poll, NAPI_POLL_WEIGHT); + } + + return 0; +} + +/** + * iavf_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void iavf_free_q_vectors(struct iavf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + if (!adapter->q_vectors) + return; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + napi_vectors = adapter->num_active_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + } + kfree(adapter->q_vectors); + adapter->q_vectors = NULL; +} + +/** + * iavf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) +{ + if (!adapter->msix_entries) + return; + + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * iavf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) +{ + int err; + + err = iavf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + rtnl_lock(); + err = iavf_set_interrupt_capability(adapter); + rtnl_unlock(); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = iavf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + +#ifdef __TC_MQPRIO_MODE_MAX + /* If we've made it so far while ADQ flag being ON, then we haven't + * bailed out anywhere in middle. And ADQ isn't just enabled but actual + * resources have been allocated in the reset path. + * Now we can truly claim that ADQ is enabled. + */ + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) + dev_info(&adapter->pdev->dev, "ADQ Enabled, %u TCs created", + adapter->num_tc); +#endif /* __TC_MQPRIO_MODE_MAX */ + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); + + return 0; +err_alloc_q_vectors: + iavf_reset_interrupt_capability(adapter); +err_set_interrupt: + iavf_free_queues(adapter); +err_alloc_queues: return err; } @@ -1212,7 +1387,8 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter) if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", + dev_err(&adapter->pdev->dev, + "Cannot configure RSS, command %d pending\n", adapter->current_op); return -EBUSY; } @@ -1223,9 +1399,9 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter) iavf_stat_str(hw, ret), iavf_aq_str(hw, hw->aq.asq_last_status)); return ret; - } + ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, adapter->rss_lut, adapter->rss_lut_size); if (ret) { @@ -1235,7 +1411,6 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter) } return ret; - } /** @@ -1325,137 +1500,6 @@ static int iavf_init_rss(struct iavf_adapter *adapter) return ret; } -/** - * iavf_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) -{ - int q_idx = 0, num_q_vectors; - struct iavf_q_vector *q_vector; - - num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; - adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), - GFP_KERNEL); - if (!adapter->q_vectors) - return -ENOMEM; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = &adapter->q_vectors[q_idx]; - q_vector->adapter = adapter; - q_vector->vsi = &adapter->vsi; - q_vector->v_idx = q_idx; - q_vector->reg_idx = q_idx; - cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add(adapter->netdev, &q_vector->napi, - iavf_napi_poll, NAPI_POLL_WEIGHT); - } - - return 0; -} - -/** - * iavf_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void iavf_free_q_vectors(struct iavf_adapter *adapter) -{ - int q_idx, num_q_vectors; - int napi_vectors; - - if (!adapter->q_vectors) - return; - - num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; - napi_vectors = adapter->num_active_queues; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; - - if (q_idx < napi_vectors) - netif_napi_del(&q_vector->napi); - } - kfree(adapter->q_vectors); - adapter->q_vectors = NULL; -} - -/** - * iavf_reset_interrupt_capability - Reset MSIX setup - * @adapter: board private structure - * - **/ -void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) -{ - if (!adapter->msix_entries) - return; - - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; -} - -/** - * iavf_init_interrupt_scheme - Determine if MSIX is supported and init - * @adapter: board private structure to initialize - * - **/ -int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) -{ - int err; - - err = iavf_alloc_queues(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - rtnl_lock(); - err = iavf_set_interrupt_capability(adapter); - rtnl_unlock(); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to setup interrupt capabilities\n"); - goto err_set_interrupt; - } - - err = iavf_alloc_q_vectors(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for queue vectors\n"); - goto err_alloc_q_vectors; - } - - /* If we've made it so far while ADq flag being ON, then we haven't - * bailed out anywhere in middle. And ADq isn't just enabled but actual - * resources have been allocated in the reset path. - * Now we can truly claim that ADq is enabled. - */ - if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) - dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", - adapter->num_tc); - - dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", - (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", - adapter->num_active_queues); - - return 0; -err_alloc_q_vectors: - iavf_reset_interrupt_capability(adapter); -err_set_interrupt: - iavf_free_queues(adapter); -err_alloc_queues: - return err; -} - /** * iavf_free_rss - Free memory used by RSS structs * @adapter: board private structure @@ -1480,7 +1524,7 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) struct net_device *netdev = adapter->netdev; int err; - if (netif_running(netdev)) + if (!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) iavf_free_traffic_irqs(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); @@ -1500,13 +1544,257 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_map_rings_to_vectors(adapter); +err: + return err; +} +/** + * iavf_startup - first step of driver startup + * @adapter: board private structure + * + * Function process __IAVF_STARTUP driver state. + * When success the state is changed to __IAVF_INIT_VERSION_CHECK + * when fails the state is changed to __IAVF_INIT_FAILED + **/ +static void iavf_startup(struct iavf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct iavf_hw *hw = &adapter->hw; + int ret; + + WARN_ON(adapter->state != __IAVF_STARTUP); + + /* driver loaded, probe complete */ + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + ret = iavf_set_mac_type(hw); + if (ret) { + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", ret); + goto err; + } + + ret = iavf_check_reset_complete(hw); + if (ret) { + dev_dbg(&pdev->dev, "Device is still in reset (%d), retrying\n", + ret); + goto err; + } + + hw->aq.num_arq_entries = IAVF_AQ_LEN; + hw->aq.num_asq_entries = IAVF_AQ_LEN; + hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; + + ret = iavf_init_adminq(hw); + if (ret) { + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", ret); + goto err; + } + ret = iavf_send_api_ver(adapter); + if (ret) { + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret); + iavf_shutdown_adminq(hw); + goto err; + } + iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); + return; +err: + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_version_check - second step of driver startup + * @adapter: board private structure + * + * Function process __IAVF_INIT_VERSION_CHECK driver state. + * When success the state is changed to __IAVF_INIT_GET_RESOURCES + * when fails the state is changed to __IAVF_INIT_FAILED + **/ +static void iavf_init_version_check(struct iavf_adapter *adapter) +{ + struct iavf_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int ret; + + WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); + + if (!iavf_asq_done(hw)) { + dev_err(&pdev->dev, "Admin queue command never completed\n"); + iavf_shutdown_adminq(hw); + iavf_change_state(adapter, __IAVF_STARTUP); + goto err; + } + + /* aq msg sent, awaiting reply */ + ret = iavf_verify_api_ver(adapter); + if (ret) { + if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK) + ret = iavf_send_api_ver(adapter); + else + dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", + adapter->pf_version.major, + adapter->pf_version.minor, + VIRTCHNL_VERSION_MAJOR, + VIRTCHNL_VERSION_MINOR); + goto err; + } + ret = iavf_send_vf_config_msg(adapter); + if (ret) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", + ret); + goto err; + } + iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); + return; +err: + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_get_resources - third step of driver startup + * @adapter: board private structure + * + * Function process __IAVF_INIT_GET_RESOURCES driver state and + * finishes driver initialization procedure. + * When success the state is changed to __IAVF_DOWN + * when fails the state is changed to __IAVF_INIT_FAILED + **/ +static void iavf_init_get_resources(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct iavf_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int ret, bufsz; + + WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); + /* aq msg sent, awaiting reply */ + if (!adapter->vf_res) { + bufsz = sizeof(struct virtchnl_vf_resource) + + (IAVF_MAX_VF_VSI * + sizeof(struct virtchnl_vsi_resource)); + adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + ret = iavf_get_vf_config(adapter); + if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { + ret = iavf_send_vf_config_msg(adapter); + goto err; + } else if (ret == IAVF_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + iavf_shutdown_adminq(hw); + dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); + return; + } + if (ret) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", ret); + goto err_alloc; + } + + if (iavf_process_config(adapter)) + goto err_alloc; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; + +#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC + /* force legacy Rx mode if SKIP_CPU_SYNC is not supported */ + adapter->flags |= IAVF_FLAG_LEGACY_RX; +#endif + netdev->netdev_ops = &iavf_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(netdev, &iavf_netdev_ops_ext); +#endif + iavf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9710 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ +#endif /* HAVE_NETDEVICE_MIN_MAX_NTU */ + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + adapter->tx_desc_count = IAVF_DEFAULT_TXD; + adapter->rx_desc_count = IAVF_DEFAULT_RXD; + ret = iavf_init_interrupt_scheme(adapter); + if (ret) + goto err_sw_init; + iavf_map_rings_to_vectors(adapter); + if (adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; + + ret = iavf_request_misc_irq(adapter); + if (ret) + goto err_sw_init; + + netif_carrier_off(netdev); + adapter->link_up = false; + + if (!adapter->netdev_registered) { + ret = register_netdev(netdev); + if (ret) + goto err_register; + } + + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + if (CLIENT_ALLOWED(adapter)) { + ret = iavf_lan_add_device(adapter); + if (ret) + dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", + ret); + } + dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); + if (netdev->features & NETIF_F_GRO) + dev_info(&pdev->dev, "GRO is enabled\n"); + + iavf_change_state(adapter, __IAVF_DOWN); + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); + + iavf_misc_irq_enable(adapter); + wake_up(&adapter->down_waitqueue); + + adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); + adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); + if (!adapter->rss_key || !adapter->rss_lut) + goto err_mem; if (RSS_AQ(adapter)) adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else - err = iavf_init_rss(adapter); + iavf_init_rss(adapter); + + return; +err_mem: + iavf_free_rss(adapter); +err_register: + iavf_free_misc_irq(adapter); +err_sw_init: + iavf_reset_interrupt_capability(adapter); +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1526,59 +1814,49 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_disable_queues(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { iavf_map_queues(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { iavf_add_ether_addrs(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { iavf_add_vlans(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { iavf_del_ether_addrs(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { iavf_del_vlans(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { iavf_enable_vlan_stripping(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { iavf_disable_vlan_stripping(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { iavf_configure_queues(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { iavf_enable_queues(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { /* This message goes straight to the firmware, not the * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; - return 0; + return iavf_init_rss(adapter); } if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { iavf_get_hena(adapter); @@ -1596,24 +1874,21 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_set_rss_lut(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); return 0; } - - if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { iavf_set_promiscuous(adapter, 0); return 0; } - +#ifdef __TC_MQPRIO_MODE_MAX if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { iavf_enable_channels(adapter); return 0; @@ -1623,15 +1898,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_disable_channels(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { - iavf_add_cloud_filter(adapter); - return 0; - } - - if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { - iavf_del_cloud_filter(adapter); - return 0; - } +#endif /* __TC_MQPRIO_MODE_MAX */ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { iavf_del_cloud_filter(adapter); return 0; @@ -1644,245 +1911,30 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) } /** - * iavf_startup - first step of driver startup - * @adapter: board private structure + * iavf_send_reset_request - prepare driver and send reset request + * @adapter: pointer to iavf_adapter * - * Function process __IAVF_STARTUP driver state. - * When success the state is changed to __IAVF_INIT_VERSION_CHECK - * when fails it returns -EAGAIN + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. **/ -static int iavf_startup(struct iavf_adapter *adapter) +static void iavf_send_reset_request(struct iavf_adapter *adapter) { - struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; - int err; - WARN_ON(adapter->state != __IAVF_STARTUP); + iavf_misc_irq_disable(adapter); + adapter->flags &= ~IAVF_FLAG_QUEUES_ENABLED; - /* driver loaded, probe complete */ - adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - err = iavf_set_mac_type(hw); - if (err) { - dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); - goto err; - } - - err = iavf_check_reset_complete(hw); - if (err) { - dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); - goto err; - } - hw->aq.num_arq_entries = IAVF_AQ_LEN; - hw->aq.num_asq_entries = IAVF_AQ_LEN; - hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - - err = iavf_init_adminq(hw); - if (err) { - dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); - goto err; - } - err = iavf_send_api_ver(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); - iavf_shutdown_adminq(hw); - goto err; - } - adapter->state = __IAVF_INIT_VERSION_CHECK; -err: - return err; -} - -/** - * iavf_init_version_check - second step of driver startup - * @adapter: board private structure - * - * Function process __IAVF_INIT_VERSION_CHECK driver state. - * When success the state is changed to __IAVF_INIT_GET_RESOURCES - * when fails it returns -EAGAIN - **/ -static int iavf_init_version_check(struct iavf_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct iavf_hw *hw = &adapter->hw; - int err = -EAGAIN; - - WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); - - if (!iavf_asq_done(hw)) { - dev_err(&pdev->dev, "Admin queue command never completed\n"); - iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; - goto err; - } - - /* aq msg sent, awaiting reply */ - err = iavf_verify_api_ver(adapter); - if (err) { - if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) - err = iavf_send_api_ver(adapter); - else - dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", - adapter->pf_version.major, - adapter->pf_version.minor, - VIRTCHNL_VERSION_MAJOR, - VIRTCHNL_VERSION_MINOR); - goto err; - } - err = iavf_send_vf_config_msg(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send config request (%d)\n", - err); - goto err; - } - adapter->state = __IAVF_INIT_GET_RESOURCES; - -err: - return err; -} - -/** - * iavf_init_get_resources - third step of driver startup - * @adapter: board private structure - * - * Function process __IAVF_INIT_GET_RESOURCES driver state and - * finishes driver initialization procedure. - * When success the state is changed to __IAVF_DOWN - * when fails it returns -EAGAIN - **/ -static int iavf_init_get_resources(struct iavf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct iavf_hw *hw = &adapter->hw; - int err = 0, bufsz; - - WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); - /* aq msg sent, awaiting reply */ - if (!adapter->vf_res) { - bufsz = sizeof(struct virtchnl_vf_resource) + - (IAVF_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource)); - adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); - if (!adapter->vf_res) - goto err; - } - err = iavf_get_vf_config(adapter); - if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { - err = iavf_send_vf_config_msg(adapter); - goto err; - } else if (err == IAVF_ERR_PARAM) { - /* We only get ERR_PARAM if the device is in a very bad - * state or if we've been disabled for previous bad - * behavior. Either way, we're done now. - */ - iavf_shutdown_adminq(hw); - dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); - return 0; - } - if (err) { - dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); - goto err_alloc; - } - - if (iavf_process_config(adapter)) - goto err_alloc; - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - - adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; - - netdev->netdev_ops = &iavf_netdev_ops; - iavf_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - - /* MTU range: 68 - 9710 */ - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; - - if (!is_valid_ether_addr(adapter->hw.mac.addr)) { - dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", - adapter->hw.mac.addr); - eth_hw_addr_random(netdev); - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); - } else { - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - } - - adapter->tx_desc_count = IAVF_DEFAULT_TXD; - adapter->rx_desc_count = IAVF_DEFAULT_RXD; - err = iavf_init_interrupt_scheme(adapter); - if (err) - goto err_sw_init; - iavf_map_rings_to_vectors(adapter); - if (adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; - - err = iavf_request_misc_irq(adapter); - if (err) - goto err_sw_init; - - netif_carrier_off(netdev); - adapter->link_up = false; - - /* set the semaphore to prevent any callbacks after device registration - * up to time when state of driver will be set to __IAVF_DOWN + /* Restart the AQ here. If we have been reset but didn't + * detect it, or if the PF had to reinit, our AQ will be hosed. */ - rtnl_lock(); - if (!adapter->netdev_registered) { - err = register_netdevice(netdev); - if (err) { - rtnl_unlock(); - goto err_register; - } - } - - adapter->netdev_registered = true; - - netif_tx_stop_all_queues(netdev); - if (CLIENT_ALLOWED(adapter)) { - err = iavf_lan_add_device(adapter); - if (err) { - rtnl_unlock(); - dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", - err); - } - } - dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); - if (netdev->features & NETIF_F_GRO) - dev_info(&pdev->dev, "GRO is enabled\n"); - - adapter->state = __IAVF_DOWN; - set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - rtnl_unlock(); + iavf_shutdown_adminq(hw); + iavf_init_adminq(hw); iavf_misc_irq_enable(adapter); - wake_up(&adapter->down_waitqueue); - adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); - adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); - if (!adapter->rss_key || !adapter->rss_lut) - goto err_mem; - if (RSS_AQ(adapter)) - adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; - else - iavf_init_rss(adapter); - - return err; -err_mem: - iavf_free_rss(adapter); -err_register: - iavf_free_misc_irq(adapter); -err_sw_init: - iavf_reset_interrupt_capability(adapter); -err_alloc: - kfree(adapter->vf_res); - adapter->vf_res = NULL; -err: - return err; + if (!iavf_request_reset(adapter)) + adapter->flags |= IAVF_FLAG_RESET_PENDING; } /** @@ -1901,9 +1953,63 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) - adapter->state = __IAVF_COMM_FAILED; + iavf_change_state(adapter, __IAVF_COMM_FAILED); + + if (adapter->flags & IAVF_FLAG_RESET_NEEDED && + adapter->state != __IAVF_RESETTING) { + adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; + iavf_change_state(adapter, __IAVF_RESETTING); + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + if (CLIENT_ENABLED(adapter)) { + adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | + IAVF_FLAG_CLIENT_NEEDS_CLOSE | + IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | + IAVF_FLAG_SERVICE_CLIENT_REQUESTED); + cancel_delayed_work_sync(&adapter->client_task); + iavf_notify_client_close(&adapter->vsi, true); + } + } switch (adapter->state) { + case __IAVF_STARTUP: + iavf_startup(adapter); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_VERSION_CHECK: + iavf_init_version_check(adapter); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_GET_RESOURCES: + iavf_init_get_resources(adapter); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_FAILED: + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { + dev_err(&adapter->pdev->dev, + "Failed to communicate with PF; waiting before retry\n"); + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_shutdown_adminq(hw); + clear_bit(__IAVF_IN_CRITICAL_TASK, + &adapter->crit_section); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, (5 * HZ)); + return; + } + /* Try again from failed step*/ + iavf_change_state(adapter, adapter->last_state); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); + return; case __IAVF_COMM_FAILED: reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; @@ -1912,17 +2018,12 @@ static void iavf_watchdog_task(struct work_struct *work) /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __IAVF_STARTUP; - adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - queue_delayed_work(iavf_wq, &adapter->init_task, 10); - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); - /* Don't reschedule the watchdog, since we've restarted - * the init task. When init_task contacts the PF and + /* When init_task contacts the PF and * gets everything set up again, it'll restart the * watchdog for us. Down, boy. Sit. Stay. Woof. */ - return; + iavf_change_state(adapter, __IAVF_STARTUP); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -1931,10 +2032,16 @@ static void iavf_watchdog_task(struct work_struct *work) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); - goto watchdog_done; + return; case __IAVF_RESETTING: + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) + iavf_send_reset_request(adapter); + else + iavf_handle_reset(adapter); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(2)); return; case __IAVF_DOWN: case __IAVF_DOWN_PENDING: @@ -1947,50 +2054,56 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_send_api_ver(adapter); } } else { - if (!iavf_process_aq_command(adapter) && + if (iavf_process_aq_command(adapter) && adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } + if (adapter->state == __IAVF_RUNNING) + iavf_detect_recover_hung(&adapter->vsi); break; case __IAVF_REMOVE: clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return; default: - goto restart_watchdog; + break; } - /* check for hw reset */ - reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; - if (!reg_val) { - adapter->state = __IAVF_RESETTING; - adapter->flags |= IAVF_FLAG_RESET_PENDING; + /* check for hw reset */ + if (iavf_is_reset(hw)) { + iavf_schedule_reset(adapter); adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); - queue_work(iavf_wq, &adapter->reset_task); - goto watchdog_done; + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_work(iavf_wq, &adapter->watchdog_task.work); + return; } schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); -watchdog_done: - if (adapter->state == __IAVF_RUNNING || - adapter->state == __IAVF_COMM_FAILED) - iavf_detect_recover_hung(&adapter->vsi); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); restart_watchdog: + queue_work(iavf_wq, &adapter->adminq_task); if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); else queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); - queue_work(iavf_wq, &adapter->adminq_task); } +/** + * iavf_disable_vf - disable a VF that failed to reset + * @adapter: private adapter structure + * + * Helper function to shut down the VF when a reset never finishes. + **/ static void iavf_disable_vf(struct iavf_adapter *adapter) { + struct net_device *netdev = adapter->netdev; struct iavf_mac_filter *f, *ftmp; struct iavf_vlan_filter *fv, *fvtmp; struct iavf_cloud_filter *cf, *cftmp; + /* reset never finished */ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; @@ -1998,13 +2111,13 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - if (adapter->state == __IAVF_RUNNING) { + if (!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - netif_carrier_off(adapter->netdev); - netif_tx_disable(adapter->netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); adapter->link_up = false; - iavf_napi_disable_all(adapter); iavf_irq_disable(adapter); + iavf_napi_disable_all(adapter); iavf_free_traffic_irqs(adapter); iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); @@ -2013,12 +2126,14 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); /* Delete all of the filters */ - list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, + list) { list_del(&f->list); kfree(f); } - list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, + list) { list_del(&fv->list); kfree(fv); } @@ -2035,14 +2150,18 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - iavf_free_queues(adapter); iavf_free_q_vectors(adapter); + iavf_free_queues(adapter); kfree(adapter->vf_res); + adapter->vf_res = NULL; iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; adapter->state = __IAVF_DOWN; + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } @@ -2050,68 +2169,30 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) #define IAVF_RESET_WAIT_MS 10 #define IAVF_RESET_WAIT_COUNT 500 /** - * iavf_reset_task - Call-back task to handle hardware reset - * @work: pointer to work_struct + * iavf_handle_reset - Handle hardware reset + * @adapter: pointer to iavf_adapter * * During reset we need to shut down and reinitialize the admin queue * before we can use it to communicate with the PF again. We also clear * and reinit the rings because that context is lost as well. **/ -static void iavf_reset_task(struct work_struct *work) +static void iavf_handle_reset(struct iavf_adapter *adapter) { - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - reset_task); - struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; - struct iavf_vlan_filter *vlf; - struct iavf_cloud_filter *cf; - struct iavf_mac_filter *f; - u32 reg_val; int i = 0, err; bool running; - - /* When device is being removed it doesn't make sense to run the reset - * task, just return in such a case. - */ - if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) - return; - - while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); - if (CLIENT_ENABLED(adapter)) { - adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | - IAVF_FLAG_CLIENT_NEEDS_CLOSE | - IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | - IAVF_FLAG_SERVICE_CLIENT_REQUESTED); - cancel_delayed_work_sync(&adapter->client_task); - iavf_notify_client_close(&adapter->vsi, true); - } - iavf_misc_irq_disable(adapter); - if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { - adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; - /* Restart the AQ here. If we have been reset but didn't - * detect it, or if the PF had to reinit, our AQ will be hosed. - */ - iavf_shutdown_adminq(hw); - iavf_init_adminq(hw); - iavf_request_reset(adapter); - } - adapter->flags |= IAVF_FLAG_RESET_PENDING; + u32 reg_val; /* poll until we see the reset actually happen */ for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { - reg_val = rd32(hw, IAVF_VF_ARQLEN1) & - IAVF_VF_ARQLEN1_ARQENABLE_MASK; - if (!reg_val) + if (iavf_is_reset(hw)) break; usleep_range(5000, 10000); } if (i == IAVF_RESET_WAIT_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); - goto continue_reset; /* act like the reset happened */ + return; /* bail out, I'll be back */ } /* wait until the reset is complete and the PF is responding to us */ @@ -2126,32 +2207,32 @@ static void iavf_reset_task(struct work_struct *work) } pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); if (i == IAVF_RESET_WAIT_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - return; /* Do not attempt to reinit. It's dead, Jim. */ + return; } -continue_reset: + iavf_misc_irq_disable(adapter); + iavf_irq_disable(adapter); + /* We don't use netif_running() because it may be true prior to * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __IAVF_RUNNING) || - (adapter->state == __IAVF_RESETTING)); + running = (adapter->last_state == __IAVF_RUNNING); if (running) { + netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); } - iavf_irq_disable(adapter); - adapter->state = __IAVF_RESETTING; adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just @@ -2160,7 +2241,16 @@ continue_reset: iavf_free_all_rx_resources(adapter); iavf_free_all_tx_resources(adapter); + /* Set the queues_disabled flag when VF is going through reset + * to avoid a race condition especially for ADQ i.e. when a VF ADQ is + * configured, PF resets the VF to allocate ADQ resources. When this + * happens there's a possibility to hit a condition where VF is in + * running state but the queues haven't been enabled yet. So wait for + * virtchnl success message for enable queues and then unset this flag. + * Don't allow the link to come back up until that happens. + */ adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; + /* kill and reinit the admin queue */ iavf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -2170,45 +2260,25 @@ continue_reset: err); adapter->aq_required = 0; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_reinit_interrupt_scheme(adapter); if (err) goto reset_err; } + if (RSS_AQ(adapter)) { + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; + } else { + err = iavf_init_rss(adapter); + if (err) + goto reset_err; + } adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; - spin_lock_bh(&adapter->mac_vlan_list_lock); - - /* re-add all MAC filters */ - list_for_each_entry(f, &adapter->mac_filter_list, list) { - f->add = true; - } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - /* check if TCs are running and re-add all cloud filters */ - spin_lock_bh(&adapter->cloud_filter_list_lock); - if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) { - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - cf->add = true; - } - } - spin_unlock_bh(&adapter->cloud_filter_list_lock); - - adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); - /* We were running when the reset started, so we need to restore some * state here. */ @@ -2223,30 +2293,37 @@ continue_reset: if (err) goto reset_err; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err; - adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED; } iavf_configure(adapter); + /* iavf_up_complete() will switch device back + * to __IAVF_RUNNING + */ iavf_up_complete(adapter); - - iavf_irq_enable(adapter, true); + netdev->flags |= IFF_UP; } else { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); return; reset_err: + if (running) { + iavf_change_state(adapter, __IAVF_RUNNING); + netdev->flags |= IFF_UP; + } clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2282,8 +2359,12 @@ static void iavf_adminq_task(struct work_struct *work) if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); if (pending != 0) memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); @@ -2295,7 +2376,7 @@ static void iavf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); - if (val == 0xdeadbeef) /* indicates device in reset */ + if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { @@ -2468,6 +2549,9 @@ void iavf_free_all_rx_resources(struct iavf_adapter *adapter) iavf_free_rx_resources(&adapter->rx_rings[i]); } +#ifdef HAVE_SETUP_TC +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#ifdef __TC_MQPRIO_MODE_MAX /** * iavf_validate_tx_bandwidth - validate the max Tx bandwidth * @adapter: board private structure @@ -2478,32 +2562,46 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, { int speed = 0, ret = 0; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + if (ADV_LINK_SUPPORT(adapter)) { + if (SUPPORTED_SPEED(adapter->link_speed_mbps)) { + speed = adapter->link_speed_mbps; + goto validate_bw; + } else { + dev_err(&adapter->pdev->dev, "Unknown link speed\n"); + return -EINVAL; + } + } + +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: + case VIRTCHNL_LINK_SPEED_40GB: speed = 40000; break; - case IAVF_LINK_SPEED_25GB: + case VIRTCHNL_LINK_SPEED_25GB: speed = 25000; break; - case IAVF_LINK_SPEED_20GB: + case VIRTCHNL_LINK_SPEED_20GB: speed = 20000; break; - case IAVF_LINK_SPEED_10GB: + case VIRTCHNL_LINK_SPEED_10GB: speed = 10000; break; - case IAVF_LINK_SPEED_1GB: + case VIRTCHNL_LINK_SPEED_1GB: speed = 1000; break; - case IAVF_LINK_SPEED_100MB: + case VIRTCHNL_LINK_SPEED_100MB: speed = 100; break; default: break; } +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED +validate_bw: +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ if (max_tx_rate > speed) { - dev_err(&adapter->pdev->dev, - "Invalid tx rate specified\n"); + dev_err(&adapter->pdev->dev, "Invalid tx rate specified\n"); ret = -EINVAL; } @@ -2514,23 +2612,80 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, * iavf_validate_channel_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters + * @max_tc_allowed: MAX TC allowed, it could be 4 or 16 depends. * * This function validates if the config provided by the user to * configure queue channels is valid or not. Returns 0 on a valid * config. **/ static int iavf_validate_ch_config(struct iavf_adapter *adapter, - struct tc_mqprio_qopt_offload *mqprio_qopt) + struct tc_mqprio_qopt_offload *mqprio_qopt, + u8 max_tc_allowed) { + u32 tc, qcount, non_power_2_qcount = 0; u64 total_max_rate = 0; int i, num_qps = 0; u64 tx_rate = 0; - int ret = 0; - if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || + if (mqprio_qopt->qopt.num_tc > max_tc_allowed || mqprio_qopt->qopt.num_tc < 1) return -EINVAL; + /* for ADQ there are few rules on queue allocation for each TC + * 1. Number of queues for TC0 should always be a power of 2 + * 2. Number of queues for rest of TCs can be non-power of 2 + * 3. If the previous TC has non-power of 2 queues, then all the + * following TCs should be either + * a. same number of queues as that of the previous non-power + * of 2 or + * b. less than previous non-power of 2 and power of 2 + * ex: 1@0 2@1 3@3 4@6 - Invalid + * 1@0 2@1 3@3 3@6 - Valid + * 1@0 2@1 3@3 2@6 - Valid + * 1@0 2@1 3@3 1@6 - Valid + */ + for (tc = 0; tc < mqprio_qopt->qopt.num_tc; tc++) { + qcount = mqprio_qopt->qopt.count[tc]; + + /* case 1. check for first TC to be always power of 2 in ADQ */ + if (!tc && !is_power_of_2(qcount)) { + dev_err(&adapter->pdev->dev, + "TC0:qcount[%d] must be a power of 2\n", + qcount); + return -EINVAL; + } + + /* case 2 & 3, check for non-power of 2 number of queues */ + if (tc && non_power_2_qcount) { + if (qcount > non_power_2_qcount) { + dev_err(&adapter->pdev->dev, + "TC%d has %d qcount cannot be > non_power_of_2 qcount [%d]\n", + tc, qcount, non_power_2_qcount); + return -EINVAL; + } else if (qcount < non_power_2_qcount) { + /* it must be power of 2, otherwise fail */ + if (!is_power_of_2(qcount)) { + dev_err(&adapter->pdev->dev, + "TC%d has %d qcount must be a power of 2 < non_power_of_2 qcount [%d]\n", + tc, qcount, non_power_2_qcount); + return -EINVAL; + } + } + } else if (tc && !is_power_of_2(qcount)) { + /* this is the first TC to have a non-power of 2 queue + * count and the code is going to enter this section + * only once. The qcount for this TC will serve as + * our reference/guide to allocate number of queues + * for all the further TCs as per section a. and b. in + * case 3 mentioned above. + */ + non_power_2_qcount = qcount; + dev_dbg(&adapter->pdev->dev, + "TC%d:count[%d] non power of 2\n", tc, + qcount); + } + } + for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { if (!mqprio_qopt->qopt.count[i] || mqprio_qopt->qopt.offset[i] != num_qps) @@ -2546,16 +2701,28 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > IAVF_MAX_REQ_QUEUES) + if (num_qps > adapter->num_active_queues) { + dev_err(&adapter->pdev->dev, + "Cannot support requested number of queues\n"); return -EINVAL; + } - ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); - return ret; + /* no point in validating TX bandwidth rate limit if the user hasn't + * specified any rate limit for any TCs, so validate only if it's set. + */ + if (total_max_rate) + return iavf_validate_tx_bandwidth(adapter, total_max_rate); + else + return 0; } /** * iavf_del_all_cloud_filters - delete all cloud filters * on the traffic classes + * @adapter: board private structure + * + * This function will loop through the list of cloud filters and + * deletes them. **/ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) { @@ -2572,9 +2739,9 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) } /** - * __iavf_setup_tc - configure multiple traffic classes + *__iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure - * @type_date: tc offload data + * @type_data: tc offload data * * This function processes the config information provided by the * user to configure traffic classes/queue channels and packages the @@ -2589,6 +2756,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) struct virtchnl_vf_resource *vfres = adapter->vf_res; u8 num_tc = 0, total_qps = 0; int ret = 0, netdev_tc = 0; + u8 max_tc_allowed; u64 max_tx_rate; u16 mode; int i; @@ -2615,7 +2783,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) /* add queue channel */ if (mode == TC_MQPRIO_MODE_CHANNEL) { if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { - dev_err(&adapter->pdev->dev, "ADq not supported\n"); + dev_err(&adapter->pdev->dev, "ADQ not supported\n"); return -EOPNOTSUPP; } if (adapter->ch_config.state != __IAVF_TC_INVALID) { @@ -2623,7 +2791,18 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) return -EINVAL; } - ret = iavf_validate_ch_config(adapter, mqprio_qopt); + /* if negotiated capability between VF and PF indicated that + * ADQ_V2 is enabled, means it's OK to allow max_tc + * to be 16. This is needed to handle the case where iAVF + * is newer but PF is older or different generation + */ + if (ADQ_V2_ALLOWED(adapter)) + max_tc_allowed = VIRTCHNL_MAX_ADQ_V2_CHANNELS; + else + max_tc_allowed = VIRTCHNL_MAX_ADQ_CHANNELS; + + ret = iavf_validate_ch_config(adapter, mqprio_qopt, + max_tc_allowed); if (ret) return ret; /* Return if same TC config is requested */ @@ -2631,7 +2810,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) return 0; adapter->num_tc = num_tc; - for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < max_tc_allowed; i++) { if (i < num_tc) { adapter->ch_config.ch_info[i].count = mqprio_qopt->qopt.count[i]; @@ -2656,7 +2835,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) netdev_reset_tc(netdev); /* Report the tc mapping up the stack */ netdev_set_num_tc(adapter->netdev, num_tc); - for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < max_tc_allowed; i++) { u16 qcount = mqprio_qopt->qopt.count[i]; u16 qoffset = mqprio_qopt->qopt.offset[i]; @@ -2672,7 +2851,7 @@ exit: /** * iavf_parse_cls_flower - Parse tc flower filters provided by kernel * @adapter: board private structure - * @cls_flower: pointer to struct flow_cls_offload + * @f: pointer to struct flow_cls_offload * @filter: pointer to cloud filter structure */ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, @@ -2681,13 +2860,15 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; + struct virtchnl_filter *cf = &filter->f; + enum virtchnl_flow_type flow_type; u16 n_proto_mask = 0; u16 n_proto_key = 0; u8 field_flags = 0; u16 addr_type = 0; u16 n_proto = 0; + u8 ip_proto = 0; int i = 0; - struct virtchnl_filter *vf = &filter->f; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | @@ -2696,25 +2877,31 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { +#ifdef HAVE_TC_FLOWER_ENC + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | +#endif /* HAVE_TC_FLOWER_ENC */ + BIT(FLOW_DISSECTOR_KEY_PORTS))) { dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", dissector->used_keys); return -EOPNOTSUPP; } +#ifdef HAVE_TC_FLOWER_ENC if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_match_enc_keyid match; flow_rule_match_enc_keyid(rule, &match); + if (match.mask->keyid != 0) field_flags |= IAVF_CLOUD_FIELD_TEN_ID; } +#endif /* HAVE_TC_FLOWER_ENC */ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; flow_rule_match_basic(rule, &match); + n_proto_key = ntohs(match.key->n_proto); n_proto_mask = ntohs(match.mask->n_proto); @@ -2725,15 +2912,32 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, n_proto = n_proto_key & n_proto_mask; if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) return -EINVAL; - if (n_proto == ETH_P_IPV6) { - /* specify flow type as TCP IPv6 */ - vf->flow_type = VIRTCHNL_TCP_V6_FLOW; - } - if (match.key->ip_proto != IPPROTO_TCP) { - dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); + if (ADQ_V2_ALLOWED(adapter)) { + if (match.key->ip_proto != IPPROTO_TCP && + match.key->ip_proto != IPPROTO_UDP) { + dev_err(&adapter->pdev->dev, + "Only TCP or UDP transport is supported\n"); + return -EINVAL; + } + } else if (match.key->ip_proto != IPPROTO_TCP) { + dev_err(&adapter->pdev->dev, + "Only TCP transport is supported\n"); return -EINVAL; } + ip_proto = match.key->ip_proto; + + /* determine VIRTCHNL flow_type based on L3 and L4 protocol */ + if (n_proto == ETH_P_IP) + flow_type = (ip_proto == IPPROTO_TCP) ? + VIRTCHNL_TCP_V4_FLOW : + VIRTCHNL_UDP_V4_FLOW; + else /* means IPV6 */ + flow_type = (ip_proto == IPPROTO_TCP) ? + VIRTCHNL_TCP_V6_FLOW : + VIRTCHNL_UDP_V6_FLOW; + cf->flow_type = flow_type; + filter->f.flow_type = flow_type; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { @@ -2748,7 +2952,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2758,7 +2962,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2767,18 +2971,18 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, is_multicast_ether_addr(match.key->dst)) { /* set the mask if a valid dst_mac address */ for (i = 0; i < ETH_ALEN; i++) - vf->mask.tcp_spec.dst_mac[i] |= 0xff; - ether_addr_copy(vf->data.tcp_spec.dst_mac, + cf->mask.tcp_spec.dst_mac[i] |= 0xff; + ether_addr_copy(cf->data.tcp_spec.dst_mac, match.key->dst); } if (!is_zero_ether_addr(match.key->src)) if (is_valid_ether_addr(match.key->src) || is_multicast_ether_addr(match.key->src)) { - /* set the mask if a valid dst_mac address */ + /* set the mask if a valid src_mac address */ for (i = 0; i < ETH_ALEN; i++) - vf->mask.tcp_spec.src_mac[i] |= 0xff; - ether_addr_copy(vf->data.tcp_spec.src_mac, + cf->mask.tcp_spec.src_mac[i] |= 0xff; + ether_addr_copy(cf->data.tcp_spec.src_mac, match.key->src); } } @@ -2787,17 +2991,18 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id) { if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= IAVF_CLOUD_FIELD_IVLAN; } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", match.mask->vlan_id); - return IAVF_ERR_CONFIG; + return -EINVAL; } } - vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); - vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); + cf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); + cf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { @@ -2811,13 +3016,14 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); + if (match.mask->dst) { if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2827,21 +3033,21 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (match.key->dst) { - vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.dst_ip[0] = match.key->dst; + cf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); + cf->data.tcp_spec.dst_ip[0] = match.key->dst; } if (match.key->src) { - vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.src_ip[0] = match.key->src; + cf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); + cf->data.tcp_spec.src_ip[0] = match.key->src; } } @@ -2854,7 +3060,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); - return IAVF_ERR_CONFIG; + return -EINVAL; } /* src and dest IPv6 address should not be LOOPBACK @@ -2864,55 +3070,51 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) field_flags |= IAVF_CLOUD_FIELD_IIP; for (i = 0; i < 4; i++) - vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, - sizeof(vf->data.tcp_spec.dst_ip)); + cf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); + memcpy(&cf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, + sizeof(cf->data.tcp_spec.dst_ip)); for (i = 0; i < 4; i++) - vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, - sizeof(vf->data.tcp_spec.src_ip)); + cf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); + memcpy(&cf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, + sizeof(cf->data.tcp_spec.src_ip)); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); - if (match.mask->src) { - if (match.mask->src == cpu_to_be16(0xffff)) { - field_flags |= IAVF_CLOUD_FIELD_IIP; - } else { - dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", - be16_to_cpu(match.mask->src)); - return IAVF_ERR_CONFIG; - } - } - if (match.mask->dst) { + if (match.key->dst) { if (match.mask->dst == cpu_to_be16(0xffff)) { - field_flags |= IAVF_CLOUD_FIELD_IIP; + cf->mask.tcp_spec.dst_port |= + cpu_to_be16(0xffff); + cf->data.tcp_spec.dst_port = match.key->dst; } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } - if (match.key->dst) { - vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.dst_port = match.key->dst; - } if (match.key->src) { - vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.src_port = match.key->src; + if (match.mask->src == cpu_to_be16(0xffff)) { + cf->mask.tcp_spec.src_port |= + cpu_to_be16(0xffff); + cf->data.tcp_spec.src_port = match.key->src; + } else { + dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", + be16_to_cpu(match.mask->src)); + return -EINVAL; + } } } - vf->field_flags = field_flags; + cf->field_flags = field_flags; return 0; } @@ -2922,18 +3124,19 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, * @adapter: board private structure * @tc: traffic class index on the device * @filter: pointer to cloud filter structure + * + * Return 0 on success, negative on failure */ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, struct iavf_cloud_filter *filter) { if (tc == 0) return 0; - if (tc < adapter->num_tc) { - if (!filter->f.data.tcp_spec.dst_port) { - dev_err(&adapter->pdev->dev, - "Specify destination port to redirect to traffic class other than TC0\n"); - return -EINVAL; - } + if (tc < adapter->num_tc && (!ADQ_V2_ALLOWED(adapter)) && + !filter->f.data.tcp_spec.dst_port) { + dev_err(&adapter->pdev->dev, + "Specify destination port to redirect to traffic classother than TC0\n"); + return -EINVAL; } /* redirect to a traffic class on the same device */ filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; @@ -2941,6 +3144,28 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, return 0; } +/* iavf_find_cf - Find the cloud filter in the list + * @adapter: Board private structure + * @cookie: filter specific cookie + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * cloud_filter_list_lock. + */ +static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, + unsigned long *cookie) +{ + struct iavf_cloud_filter *filter = NULL; + + if (!cookie) + return NULL; + + list_for_each_entry(filter, &adapter->cloud_filter_list, list) { + if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) + return filter; + } + return NULL; +} + /** * iavf_configure_clsflower - Add tc flower filters * @adapter: board private structure @@ -2964,23 +3189,33 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) { - if (--count == 0) - goto err; + if (--count == 0) { + kfree(filter); + return err; + } udelay(1); } - filter->cookie = cls_flower->cookie; + /* bail out here if filter already exists */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + if (iavf_find_cf(adapter, &cls_flower->cookie)) { + dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n"); + err = -EEXIST; + goto spin_unlock; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + /* set the mask to all zeroes to begin with */ memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; err = iavf_parse_cls_flower(adapter, cls_flower, filter); - if (err < 0) + if (err) goto err; err = iavf_handle_tclass(adapter, tc, filter); - if (err < 0) + if (err) goto err; /* add filter to the list */ @@ -2989,37 +3224,15 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, adapter->num_cloud_filters++; filter->add = true; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; +spin_unlock: spin_unlock_bh(&adapter->cloud_filter_list_lock); err: if (err) kfree(filter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; } -/* iavf_find_cf - Find the cloud filter in the list - * @adapter: Board private structure - * @cookie: filter specific cookie - * - * Returns ptr to the filter object or NULL. Must be called while holding the - * cloud_filter_list_lock. - */ -static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, - unsigned long *cookie) -{ - struct iavf_cloud_filter *filter = NULL; - - if (!cookie) - return NULL; - - list_for_each_entry(filter, &adapter->cloud_filter_list, list) { - if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) - return filter; - } - return NULL; -} - /** * iavf_delete_clsflower - Remove tc flower filters * @adapter: board private structure @@ -3046,8 +3259,8 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, /** * iavf_setup_tc_cls_flower - flower classifier offloads - * @netdev: net device to configure - * @type_data: offload data + * @adapter: board private structure + * @cls_flower: pointer to struct flow_cls_offload */ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) @@ -3063,7 +3276,7 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, case FLOW_CLS_STATS: return -EOPNOTSUPP; default: - return -EOPNOTSUPP; + return -EINVAL; } } @@ -3090,23 +3303,23 @@ static LIST_HEAD(iavf_block_cb_list); /** * iavf_setup_tc - configure multiple traffic classes - * @netdev: network interface device structure + * @dev: network interface device structure * @type: type of offload - * @type_date: tc offload data + * @type_data: tc offload data * * This function is the callback to ndo_setup_tc in the * netdev_ops. * * Returns 0 on success **/ -static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, +static int iavf_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(dev); switch (type) { case TC_SETUP_QDISC_MQPRIO: - return __iavf_setup_tc(netdev, type_data); + return __iavf_setup_tc(dev, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &iavf_block_cb_list, @@ -3116,6 +3329,9 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, return -EOPNOTSUPP; } } +#endif /* __TC_MQPRIO_MODE_MAX */ +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ +#endif /* HAVE_SETUP_TC */ /** * iavf_open - Called when a network interface is made active @@ -3134,15 +3350,16 @@ static int iavf_open(struct net_device *netdev) struct iavf_adapter *adapter = netdev_priv(netdev); int err; - if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { - dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); - return -EIO; - } - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + err = -EIO; + goto err_unlock; + } + if (adapter->state != __IAVF_DOWN) { err = -EBUSY; goto err_unlock; @@ -3169,6 +3386,9 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* Restore VLAN and Cloud filters that were removed with IFF_DOWN */ + iavf_restore_filters(adapter); + iavf_configure(adapter); iavf_up_complete(adapter); @@ -3208,19 +3428,21 @@ static int iavf_close(struct net_device *netdev) struct iavf_adapter *adapter = netdev_priv(netdev); int status; - if (adapter->state <= __IAVF_DOWN_PENDING) - return 0; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); + if (adapter->state <= __IAVF_DOWN_PENDING) { + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + return 0; + } + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; iavf_down(adapter); - adapter->state = __IAVF_DOWN_PENDING; + iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); @@ -3240,10 +3462,25 @@ static int iavf_close(struct net_device *netdev) adapter->state == __IAVF_DOWN, msecs_to_jiffies(500)); if (!status) - netdev_warn(netdev, "Device resources not yet released\n"); + netdev_dbg(netdev, "Device resources not yet released\n"); return 0; } +/** + * iavf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog task. + **/ +static struct net_device_stats *iavf_get_stats(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + /** * iavf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure @@ -3254,14 +3491,36 @@ static int iavf_close(struct net_device *netdev) static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { struct iavf_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + IAVF_PACKET_HDR_PAD; - netdev->mtu = new_mtu; - if (CLIENT_ENABLED(adapter)) { - iavf_notify_client_l2_params(&adapter->vsi); - adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; + if ((new_mtu < 68) || (max_frame > IAVF_MAX_RXBUFFER)) + return -EINVAL; + +#ifndef HAVE_NDO_FEATURES_CHECK + /* MTU < 576 causes problems with TSO */ + if (new_mtu < 576) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; +#ifdef HAVE_NDO_SET_FEATURES + } else { +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + if (netdev->wanted_features & NETIF_F_TSO) + netdev->features |= NETIF_F_TSO; + if (netdev->wanted_features & NETIF_F_TSO6) + netdev->features |= NETIF_F_TSO6; +#else + if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO) + netdev->features |= NETIF_F_TSO; + if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO6) + netdev->features |= NETIF_F_TSO6; +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_FEATURES */ } - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); +#endif /* !HAVE_NDO_FEATURES_CHECK */ + netdev_info(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + iavf_schedule_reset(adapter); return 0; } @@ -3272,8 +3531,12 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int iavf_set_features(struct net_device *netdev, u32 features) +#else static int iavf_set_features(struct net_device *netdev, netdev_features_t features) +#endif { struct iavf_adapter *adapter = netdev_priv(netdev); @@ -3281,10 +3544,19 @@ static int iavf_set_features(struct net_device *netdev, * of VLAN offload */ if (!VLAN_ALLOWED(adapter)) { +#ifdef NETIF_F_HW_VLAN_CTAG_RX if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) +#else + if ((netdev->features ^ features) & NETIF_F_HW_VLAN_RX) +#endif return -EINVAL; +#ifdef NETIF_F_HW_VLAN_CTAG_RX } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) +#else + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_RX) +#endif adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else @@ -3295,6 +3567,7 @@ static int iavf_set_features(struct net_device *netdev, return 0; } +#ifdef HAVE_NDO_FEATURES_CHECK /** * iavf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff @@ -3353,6 +3626,8 @@ out_err: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifdef HAVE_NDO_SET_FEATURES /** * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device @@ -3360,34 +3635,83 @@ out_err: * * Returns fixed-up features bits **/ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 iavf_fix_features(struct net_device *netdev, u32 features) +#else static netdev_features_t iavf_fix_features(struct net_device *netdev, netdev_features_t features) +#endif { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + if (adapter->vf_res && + !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) +#ifdef NETIF_F_HW_VLAN_CTAG_RX features &= ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER); +#else + features &= ~(NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER); +#endif return features; } +#endif /* HAVE_NDO_SET_FEATURES */ static const struct net_device_ops iavf_netdev_ops = { +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif .ndo_open = iavf_open, .ndo_stop = iavf_close, - .ndo_start_xmit = iavf_xmit_frame, + .ndo_start_xmit = iavf_lan_xmit_frame, + .ndo_get_stats = iavf_get_stats, .ndo_set_rx_mode = iavf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = iavf_set_mac, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = iavf_change_mtu, +#else .ndo_change_mtu = iavf_change_mtu, +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ .ndo_tx_timeout = iavf_tx_timeout, +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = iavf_vlan_rx_register, +#endif .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, - .ndo_features_check = iavf_features_check, +#ifdef HAVE_SETUP_TC +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#ifdef __TC_MQPRIO_MODE_MAX +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC + .extended.ndo_setup_tc_rh = iavf_setup_tc, +#else + .ndo_setup_tc = iavf_setup_tc, +#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */ +#endif /* __TC_MQPRIO_MODE_MAX */ +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ +#endif /* HAVE_SETUP_TC */ +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = iavf_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext iavf_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES .ndo_fix_features = iavf_fix_features, .ndo_set_features = iavf_set_features, - .ndo_setup_tc = iavf_setup_tc, +#endif /* HAVE_NDO_SET_FEATURES */ }; /** @@ -3401,12 +3725,13 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) u32 rstat; int i; - for (i = 0; i < 100; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { rstat = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((rstat == VIRTCHNL_VFR_VFACTIVE) || (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; + usleep_range(10, 20); } return -EBUSY; @@ -3439,7 +3764,7 @@ int iavf_process_config(struct iavf_adapter *adapter) } if (num_req_queues && - num_req_queues != adapter->vsi_res->num_queue_pairs) { + num_req_queues > adapter->vsi_res->num_queue_pairs) { /* Problem. The PF gave us fewer queues than what we had * negotiated in our request. Need a reset to see if we can't * get back to a working state. @@ -3448,7 +3773,7 @@ int iavf_process_config(struct iavf_adapter *adapter) "Requested %d queues, but PF only gave us %d.\n", num_req_queues, adapter->vsi_res->num_queue_pairs); - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED; adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; iavf_schedule_reset(adapter); return -ENODEV; @@ -3457,41 +3782,79 @@ int iavf_process_config(struct iavf_adapter *adapter) hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | +#ifdef NETIF_F_IPV6_CSUM NETIF_F_IPV6_CSUM | +#endif NETIF_F_HIGHDMA | +#ifdef NETIF_F_SOFT_FEATURES NETIF_F_SOFT_FEATURES | +#endif NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_SCTP_CRC | +#ifdef NETIF_F_RXHASH NETIF_F_RXHASH | +#endif +#ifdef HAVE_NDO_SET_FEATURES NETIF_F_RXCSUM | +#endif 0; +#ifdef HAVE_ENCAP_CSUM_OFFLOAD /* advertise to stack only if offloads for encapsulated packets is * supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { +#ifdef HAVE_ENCAP_TSO_OFFLOAD hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | +#ifdef HAVE_GRE_ENCAP_OFFLOAD NETIF_F_GSO_GRE | +#ifdef NETIF_F_GSO_PARTIAL NETIF_F_GSO_GRE_CSUM | - NETIF_F_GSO_IPXIP4 | - NETIF_F_GSO_IPXIP6 | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL | +#endif + NETIF_F_GSO_UDP_TUNNEL_CSUM | +#ifdef NETIF_F_GSO_IPXIP4 + NETIF_F_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_IPXIP6 + NETIF_F_GSO_IPXIP6 | +#endif +#else /* NETIF_F_GSO_IPXIP4 */ +#ifdef NETIF_F_GSO_IPIP + NETIF_F_GSO_IPIP | +#endif +#ifdef NETIF_F_GSO_SIT + NETIF_F_GSO_SIT | +#endif +#endif /* NETIF_F_GSO_IPXIP4 */ +#endif /* NETIF_F_GRE_ENCAP_OFFLOAD */ 0; if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) +#ifndef NETIF_F_GSO_PARTIAL + hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM; +#else netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; +#endif /* !NETIF_F_GSO_PARTIAL */ +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ netdev->hw_enc_features |= hw_enc_features; } +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ + +#ifdef HAVE_NETDEV_VLAN_FEATURES /* record features VLANs can make use of */ +#ifdef NETIF_F_GSO_PARTIAL netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; +#else + netdev->vlan_features |= hw_enc_features; +#endif +#endif /* Write features and hw_features separately to avoid polluting * with, or dropping, features that are set when we registered. @@ -3500,24 +3863,49 @@ int iavf_process_config(struct iavf_adapter *adapter) /* Enable VLAN features if supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) +#ifdef NETIF_F_HW_VLAN_CTAG_RX hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); +#else + hw_features |= (NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX); +#endif +#ifdef NETIF_F_HW_TC /* Enable cloud filter if ADQ is supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) hw_features |= NETIF_F_HW_TC; +#endif +#ifdef NETIF_F_GSO_UDP_L4 + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) + hw_features |= NETIF_F_GSO_UDP_L4; +#endif /* NETIF_F_GSO_UDP_L4 */ +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features |= get_netdev_hw_features(netdev); + set_netdev_hw_features(netdev, hw_features); +#else netdev->hw_features |= hw_features; +#endif +#endif /* HAVE_NDO_SET_FEATURES */ netdev->features |= hw_features; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) +#ifdef NETIF_F_HW_VLAN_CTAG_RX netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#else + netdev->features |= NETIF_F_HW_VLAN_FILTER; +#endif +#ifdef IFF_UNICAST_FLT netdev->priv_flags |= IFF_UNICAST_FLT; +#endif /* Do not turn on offloads when they are requested to be turned off. * TSO needs minimum 576 bytes to work correctly. */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT if (netdev->wanted_features) { if (!(netdev->wanted_features & NETIF_F_TSO) || netdev->mtu < 576) @@ -3531,6 +3919,22 @@ int iavf_process_config(struct iavf_adapter *adapter) netdev->features &= ~NETIF_F_GRO; if (!(netdev->wanted_features & NETIF_F_GSO)) netdev->features &= ~NETIF_F_GSO; +#else + if (netdev_extended(netdev)->wanted_features) { + if (!(netdev_extended(netdev)->wanted_features & + NETIF_F_TSO) || netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO; + if (!(netdev_extended(netdev)->wanted_features & + NETIF_F_TSO6) || netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO6; + if (!(netdev_extended(netdev)->wanted_features & + NETIF_F_TSO_ECN)) + netdev->features &= ~NETIF_F_TSO_ECN; + if (!(netdev_extended(netdev)->wanted_features & NETIF_F_GRO)) + netdev->features &= ~NETIF_F_GRO; + if (!(netdev_extended(netdev)->wanted_features & NETIF_F_GSO)) + netdev->features &= ~NETIF_F_GSO; +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ } adapter->vsi.id = adapter->vsi_res->vsi_id; @@ -3551,58 +3955,6 @@ int iavf_process_config(struct iavf_adapter *adapter) return 0; } -/** - * iavf_init_task - worker thread to perform delayed initialization - * @work: pointer to work_struct containing our data - * - * This task completes the work that was begun in probe. Due to the nature - * of VF-PF communications, we may need to wait tens of milliseconds to get - * responses back from the PF. Rather than busy-wait in probe and bog down the - * whole system, we'll do it in a task so we can sleep. - * This task only runs during driver init. Once we've established - * communications with the PF driver and set up our netdev, the watchdog - * takes over. - **/ -static void iavf_init_task(struct work_struct *work) -{ - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - init_task.work); - struct iavf_hw *hw = &adapter->hw; - - switch (adapter->state) { - case __IAVF_STARTUP: - if (iavf_startup(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_VERSION_CHECK: - if (iavf_init_version_check(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_GET_RESOURCES: - if (iavf_init_get_resources(adapter) < 0) - goto init_failed; - return; - default: - goto init_failed; - } - - queue_delayed_work(iavf_wq, &adapter->init_task, - msecs_to_jiffies(30)); - return; -init_failed: - if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { - dev_err(&adapter->pdev->dev, - "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; - iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; - queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); - return; - } - queue_delayed_work(iavf_wq, &adapter->init_task, HZ); -} - /** * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure @@ -3618,7 +3970,7 @@ static void iavf_shutdown(struct pci_dev *pdev) iavf_close(netdev); /* Prevent the watchdog from running. */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; #ifdef CONFIG_PM @@ -3639,7 +3991,12 @@ static void iavf_shutdown(struct pci_dev *pdev) * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ +#ifdef HAVE_CONFIG_HOTPLUG +static int __devinit iavf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +#else static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +#endif { struct net_device *netdev; struct iavf_adapter *adapter = NULL; @@ -3662,7 +4019,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_request_regions(pdev, iavf_driver_name); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "pci_request_regions failed 0x%x\n", err); goto err_pci_reg; } @@ -3689,8 +4046,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __IAVF_STARTUP; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + iavf_change_state(adapter, __IAVF_STARTUP); /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3710,11 +4067,11 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; - /* set up the locks for the AQ, do this only once in probe + /* set up the spinlocks for the AQ, do this only once in probe * and destroy them only once in remove */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); + iavf_init_spinlock_d(&hw->aq.asq_spinlock); + iavf_init_spinlock_d(&hw->aq.arq_spinlock); spin_lock_init(&adapter->mac_vlan_list_lock); spin_lock_init(&adapter->cloud_filter_list_lock); @@ -3723,14 +4080,11 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list); - INIT_WORK(&adapter->reset_task, iavf_reset_task); INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); - queue_delayed_work(iavf_wq, &adapter->init_task, + queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); - /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); @@ -3814,7 +4168,6 @@ static int iavf_resume(struct pci_dev *pdev) rtnl_lock(); err = iavf_set_interrupt_capability(adapter); if (err) { - rtnl_unlock(); dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); return err; } @@ -3825,8 +4178,7 @@ static int iavf_resume(struct pci_dev *pdev) return err; } - queue_work(iavf_wq, &adapter->reset_task); - + iavf_schedule_reset(adapter); netif_device_attach(netdev); return err; @@ -3842,24 +4194,32 @@ static int iavf_resume(struct pci_dev *pdev) * Hot-Plug event, or because the driver is going to be removed from * memory. **/ +#ifdef HAVE_CONFIG_HOTPLUG +static void __devexit iavf_remove(struct pci_dev *pdev) +#else static void iavf_remove(struct pci_dev *pdev) +#endif { struct net_device *netdev = pci_get_drvdata(pdev); struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_vlan_filter *vlf, *vlftmp; - struct iavf_mac_filter *f, *ftmp; struct iavf_cloud_filter *cf, *cftmp; + struct iavf_mac_filter *f, *ftmp; struct iavf_hw *hw = &adapter->hw; int err; /* Indicate we are in remove and not to run reset_task */ set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); - cancel_delayed_work_sync(&adapter->init_task); - cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); if (adapter->netdev_registered) { + /* will call iavf_close if the device was open previously */ unregister_netdev(netdev); adapter->netdev_registered = false; + /* wait for device down */ + wait_event_timeout(adapter->down_waitqueue, + adapter->state == __IAVF_DOWN, + msecs_to_jiffies(200)); } + if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_del_device(adapter); if (err) @@ -3867,8 +4227,9 @@ static void iavf_remove(struct pci_dev *pdev) err); } + dev_info(&adapter->pdev->dev, "Removing device\n"); /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_request_reset(adapter); @@ -3878,33 +4239,35 @@ static void iavf_remove(struct pci_dev *pdev) iavf_request_reset(adapter); msleep(50); } - iavf_free_all_tx_resources(adapter); - iavf_free_all_rx_resources(adapter); + iavf_misc_irq_disable(adapter); - iavf_free_misc_irq(adapter); - iavf_reset_interrupt_capability(adapter); - iavf_free_q_vectors(adapter); + cancel_work_sync(&adapter->adminq_task); cancel_delayed_work_sync(&adapter->watchdog_task); - cancel_work_sync(&adapter->adminq_task); - + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_q_vectors(adapter); iavf_free_rss(adapter); if (hw->aq.asq.count) iavf_shutdown_adminq(hw); /* destroy the locks only once, here */ - mutex_destroy(&hw->aq.arq_mutex); - mutex_destroy(&hw->aq.asq_mutex); + iavf_destroy_spinlock_d(&hw->aq.arq_spinlock); + iavf_destroy_spinlock_d(&hw->aq.asq_spinlock); iounmap(hw->hw_addr); pci_release_regions(pdev); - iavf_free_all_tx_resources(adapter); - iavf_free_all_rx_resources(adapter); + iavf_free_queues(adapter); kfree(adapter->vf_res); + adapter->vf_res = NULL; + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* If we got removed before an up/down sequence, we've got a filter * hanging out there that we need to get rid of. */ @@ -3938,7 +4301,11 @@ static struct pci_driver iavf_driver = { .name = iavf_driver_name, .id_table = iavf_pci_tbl, .probe = iavf_probe, +#ifdef HAVE_CONFIG_HOTPLUG + .remove = __devexit_p(iavf_remove), +#else .remove = iavf_remove, +#endif #ifdef CONFIG_PM .suspend = iavf_suspend, .resume = iavf_resume, @@ -3961,7 +4328,7 @@ static int __init iavf_init_module(void) pr_info("%s\n", iavf_copyright); - iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + iavf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, iavf_driver_name); if (!iavf_wq) { pr_err("%s: Failed to create workqueue\n", iavf_driver_name); diff --git a/drivers/net/ethernet/intel/iavf/iavf_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h index a452ce90679a..87daf3fb21d1 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_osdep.h +++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_OSDEP_H_ #define _IAVF_OSDEP_H_ @@ -9,15 +9,43 @@ #include #include #include +#include -/* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include +#include +#include + +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif +#include "kcompat.h" /* File to be the magic between shared code and * actual OS primitives */ -#define hw_dbg(hw, S, A...) do {} while (0) +#define hw_dbg(h, s, ...) do { \ + pr_debug("iavf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ +} while (0) + #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) @@ -25,7 +53,6 @@ #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define iavf_flush(a) readl((a)->hw_addr + IAVF_VFGEN_RSTAT) - /* memory allocation tracking */ struct iavf_dma_mem { void *va; @@ -34,13 +61,15 @@ struct iavf_dma_mem { }; #define iavf_allocate_dma_mem(h, m, unused, s, a) \ - iavf_allocate_dma_mem_d(h, m, s, a) + iavf_allocate_dma_mem_d(h, m, unused, s, a) + #define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m) struct iavf_virt_mem { void *va; u32 size; }; + #define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s) #define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m) @@ -52,4 +81,29 @@ do { \ (h)->bus.func, ##__VA_ARGS__); \ } while (0) +/* SW spinlock */ +struct iavf_spinlock { + struct mutex spinlock; +}; + +static inline void iavf_no_action(struct iavf_spinlock *sp) +{ + /* nothing */ +} + +/* the locks are initialized in _probe and destroyed in _remove + * so make sure NOT to implement init/destroy here, as to + * avoid the iavf_init_adminq code trying to reinitialize + * the persistent lock memory + */ +#define iavf_init_spinlock(_sp) iavf_no_action(_sp) +#define iavf_acquire_spinlock(_sp) iavf_acquire_spinlock_d(_sp) +#define iavf_release_spinlock(_sp) iavf_release_spinlock_d(_sp) +#define iavf_destroy_spinlock(_sp) iavf_no_action(_sp) + +#define IAVF_HTONL(a) htonl(a) + +#define iavf_memset(a, b, c, d) memset((a), (b), (c)) +#define iavf_memcpy(a, b, c, d) memcpy((a), (b), (c)) + #endif /* _IAVF_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h index edebfbbcffdc..d0be03842af7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -1,12 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_PROTOTYPE_H_ #define _IAVF_PROTOTYPE_H_ #include "iavf_type.h" #include "iavf_alloc.h" -#include +#include "virtchnl.h" /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are @@ -23,10 +23,10 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, struct iavf_arq_event_info *e, u16 *events_pending); enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, - struct iavf_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct iavf_asq_cmd_details *cmd_details); + struct iavf_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct iavf_asq_cmd_details *cmd_details); bool iavf_asq_done(struct iavf_hw *hw); /* debug function for adminq */ @@ -34,20 +34,21 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, void *buffer, u16 buf_len); void iavf_idle_aq(struct iavf_hw *hw); -void iavf_resume_aq(struct iavf_hw *hw); bool iavf_check_asq_alive(struct iavf_hw *hw); enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading); -const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err); -const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err); enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); -enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid, +enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, + u16 seid, struct iavf_aqc_get_set_rss_key_data *key); -enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, +enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, + u16 seid, struct iavf_aqc_get_set_rss_key_data *key); +const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err); +const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err); enum iavf_status iavf_set_mac_type(struct iavf_hw *hw); @@ -58,12 +59,25 @@ static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) return iavf_ptype_lookup[ptype]; } +/* prototype for functions used for SW spinlocks */ +void iavf_init_spinlock(struct iavf_spinlock *sp); +void iavf_acquire_spinlock(struct iavf_spinlock *sp); +void iavf_release_spinlock(struct iavf_spinlock *sp); +void iavf_destroy_spinlock(struct iavf_spinlock *sp); + void iavf_vf_parse_hw_config(struct iavf_hw *hw, struct virtchnl_vf_resource *msg); enum iavf_status iavf_vf_reset(struct iavf_hw *hw); enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, - enum virtchnl_ops v_opcode, - enum iavf_status v_retval, - u8 *msg, u16 msglen, - struct iavf_asq_cmd_details *cmd_details); + enum virtchnl_ops v_opcode, + enum iavf_status v_retval, + u8 *msg, u16 msglen, + struct iavf_asq_cmd_details *cmd_details); +enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct iavf_asq_cmd_details *cmd_details); +enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw, + struct iavf_asq_cmd_details *cmd_details); #endif /* _IAVF_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h index bf793332fc9d..5c59fdced0be 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_register.h +++ b/drivers/net/ethernet/intel/iavf/iavf_register.h @@ -1,68 +1,92 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_REGISTER_H_ #define _IAVF_REGISTER_H_ -#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */ #define IAVF_VF_ARQH1_ARQH_SHIFT 0 -#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT) -#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQVFE_SHIFT) -#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT) -#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT) +#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT) +#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQVFE_SHIFT) +#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT) +#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT) #define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT) -#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQVFE_SHIFT) -#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT) -#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT) +#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT) +#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQVFE_SHIFT) +#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT) +#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT) #define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT) -#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT) +#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ #define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT) -#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT) +#define IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define IAVF_VFINT_DYN_CTL01_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define IAVF_VFINT_DYN_CTLN1_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT) #define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(0x1, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define IAVF_VFQF_HKEY_MAX_INDEX 12 -#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define IAVF_VFQF_HLUT_MAX_INDEX 15 -#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define IAVF_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define IAVF_VFINT_ICR01_QUEUE_0_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_QUEUE_0_SHIFT) +#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define IAVF_VFINT_ICR01_ADMINQ_SHIFT 30 +#define IAVF_VFINT_ICR01_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_ADMINQ_SHIFT) +#define IAVF_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ +#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define IAVF_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ +#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define IAVF_VFQF_HKEY_MAX_INDEX 12 +#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define IAVF_VFQF_HLUT_MAX_INDEX 15 +#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) + #endif /* _IAVF_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 46e3d1f6b604..39464899dc01 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_STATUS_H_ #define _IAVF_STATUS_H_ diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h index 1058e68a02b4..816c5266ea24 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_trace.h +++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h @@ -1,9 +1,23 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ -/* Modeled on trace-events-sample.h */ +#ifndef CONFIG_TRACEPOINTS +#if !defined(_IAVF_TRACE_H_) +#define _IAVF_TRACE_H_ +/* If the Linux kernel tracepoints are not available then the iavf_trace* + * macros become nops. + */ -/* The trace subsystem name for iavf will be "iavf". +#define iavf_trace(trace_name, args...) +#define iavf_trace_enabled(trace_name) (0) +#endif /* !defined(_IAVF_TRACE_H_) */ +#else /* CONFIG_TRACEPOINTS */ +/* + * Modeled on trace-events-sample.h + */ + +/* + * The trace subsystem name for iavf will be "iavf". * * This file is named iavf_trace.h. * @@ -14,7 +28,8 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM iavf -/* See trace-events-sample.h for a detailed description of why this +/* + * See trace-events-sample.h for a detailed description of why this * guard clause is different from most normal include files. */ #if !defined(_IAVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) @@ -26,7 +41,7 @@ * iavf_trace() macro enables shared code to refer to trace points * like: * - * trace_iavf{,vf}_example(args...) + * trace_i40e{,vf}_example(args...) * * ... as: * @@ -40,7 +55,7 @@ * macro. * * Similarly, iavf_trace_enabled(trace_name) wraps references to - * trace_iavf{,vf}__enabled() functions. + * trace_i40e{,vf}__enabled() functions. */ #define _IAVF_TRACE_NAME(trace_name) (trace_ ## iavf ## _ ## trace_name) #define IAVF_TRACE_NAME(trace_name) _IAVF_TRACE_NAME(trace_name) @@ -49,7 +64,8 @@ #define iavf_trace_enabled(trace_name) IAVF_TRACE_NAME(trace_name##_enabled)() -/* Events common to PF and VF. Corresponding versions will be defined +/* + * Events common to PF and VF. Corresponding versions will be defined * for both, named trace_iavf_* and trace_iavf_*. The iavf_trace() * macro above will select the right trace point name for the driver * being built from shared code. @@ -65,7 +81,8 @@ DECLARE_EVENT_CLASS( TP_ARGS(ring, desc, buf), - /* The convention here is to make the first fields in the + /* + * The convention here is to make the first fields in the * TP_STRUCT match the TP_PROTO exactly. This enables the use * of the args struct generated by the tplist tool (from the * bcc-tools package) to be used for those fields. To access @@ -112,7 +129,7 @@ DECLARE_EVENT_CLASS( iavf_rx_template, TP_PROTO(struct iavf_ring *ring, - union iavf_32byte_rx_desc *desc, + union iavf_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb), @@ -140,7 +157,7 @@ DECLARE_EVENT_CLASS( DEFINE_EVENT( iavf_rx_template, iavf_clean_rx_irq, TP_PROTO(struct iavf_ring *ring, - union iavf_32byte_rx_desc *desc, + union iavf_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); @@ -148,7 +165,7 @@ DEFINE_EVENT( DEFINE_EVENT( iavf_rx_template, iavf_clean_rx_irq_rx, TP_PROTO(struct iavf_ring *ring, - union iavf_32byte_rx_desc *desc, + union iavf_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); @@ -193,7 +210,9 @@ DEFINE_EVENT( TP_ARGS(skb, ring)); -/* Events unique to the VF. */ +/* + * Events unique to the VF. + */ #endif /* _IAVF_TRACE_H_ */ /* This must be outside ifdef _IAVF_TRACE_H */ @@ -207,3 +226,4 @@ DEFINE_EVENT( #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE iavf_trace #include +#endif /* CONFIG_TRACEPOINTS */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 7a30d5d5ef53..026d17344a93 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #include - #include "iavf.h" #include "iavf_trace.h" #include "iavf_prototype.h" @@ -103,9 +102,9 @@ void iavf_free_tx_resources(struct iavf_ring *tx_ring) } /** - * iavf_get_tx_pending - how many Tx descriptors not processed + * iavf_get_tx_pending - how many tx descriptors not processed * @ring: the ring of descriptors - * @in_sw: is tx_pending being checked in SW or HW + * @in_sw: use SW variables * * Since there is no access to the ring head register * in XL710, we need to use our local copies @@ -172,7 +171,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi) */ smp_rmb(); tx_ring->tx_stats.prev_pkt_ctr = - iavf_get_tx_pending(tx_ring, true) ? packets : -1; + iavf_get_tx_pending(tx_ring, true) ? packets : -1; } } } @@ -208,7 +207,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - smp_rmb(); + read_barrier_depends(); iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ @@ -223,8 +222,8 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, total_bytes += tx_buf->bytecount; total_packets += tx_buf->gso_segs; - /* free the skb */ - napi_consume_skb(tx_buf->skb, napi_budget); + /* free the skb/XDP data */ + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -300,6 +299,9 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, tx_ring->arm_wb = true; } + if (ring_is_xdp(tx_ring)) + return !!budget; + /* notify netdev of completed buffers */ netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); @@ -379,19 +381,19 @@ static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) unsigned int divisor; switch (q_vector->adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: + case VIRTCHNL_LINK_SPEED_40GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; break; - case IAVF_LINK_SPEED_25GB: - case IAVF_LINK_SPEED_20GB: + case VIRTCHNL_LINK_SPEED_25GB: + case VIRTCHNL_LINK_SPEED_20GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; break; default: - case IAVF_LINK_SPEED_10GB: + case VIRTCHNL_LINK_SPEED_10GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; break; - case IAVF_LINK_SPEED_1GB: - case IAVF_LINK_SPEED_100MB: + case VIRTCHNL_LINK_SPEED_1GB: + case VIRTCHNL_LINK_SPEED_100MB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; break; } @@ -605,6 +607,32 @@ clear_counts: rc->total_packets = 0; } +/** + * iavf_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *old_buff) +{ + struct iavf_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_bi[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + /** * iavf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up @@ -714,6 +742,7 @@ void iavf_clean_rx_ring(struct iavf_ring *rx_ring) void iavf_free_rx_resources(struct iavf_ring *rx_ring) { iavf_clean_rx_ring(rx_ring); + rx_ring->xdp_prog = NULL; kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; @@ -733,6 +762,7 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring) int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) { struct device *dev = rx_ring->dev; + int err = -ENOMEM; int bi_size; /* warn if we are about to overwrite the pointer */ @@ -741,11 +771,13 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) goto err; +#ifdef HAVE_NDO_GET_STATS64 u64_stats_init(&rx_ring->syncp); +#endif /* HAVE_NDO_GET_STATS64 */ /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union iavf_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -759,12 +791,11 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - return 0; err: kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; - return -ENOMEM; + return err; } /** @@ -861,12 +892,42 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct iavf_q_vector *q_vector = rx_ring->q_vector; +#ifdef HAVE_VLAN_RX_REGISTER + struct iavf_vsi *vsi = rx_ring->vsi; +#endif +#ifdef HAVE_VLAN_RX_REGISTER + if (vlan_tag & VLAN_VID_MASK) { + if (!vsi->vlgrp) + dev_kfree_skb_any(skb); + else + vlan_gro_receive(&q_vector->napi, vsi->vlgrp, + vlan_tag, skb); + } else { + napi_gro_receive(&q_vector->napi, skb); + } +#else /* HAVE_VLAN_RX_REGISTER */ +#ifdef NETIF_F_HW_VLAN_CTAG_RX if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) +#else + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_RX) && + (vlan_tag & VLAN_VID_MASK)) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); +#ifdef IAVF_ADD_PROBES +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) +#else + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_RX) && + (vlan_tag & VLAN_VID_MASK)) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ + rx_ring->vsi->back->rx_vlano++; +#endif /* IAVF_ADD_PROBES */ napi_gro_receive(&q_vector->napi, skb); +#endif /* HAVE_VLAN_RX_REGISTER */ } /** @@ -934,6 +995,44 @@ no_buffers: return true; } +#ifdef IAVF_ADD_PROBES +static void iavf_rx_extra_counters(struct iavf_vsi *vsi, u32 rx_error, + const struct iavf_rx_ptype_decoded decoded) +{ + bool ipv4; + + ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); + + if (ipv4 && + (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | + BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) + vsi->back->rx_ip4_cso_err++; + + if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) { + if (decoded.inner_prot == IAVF_RX_PTYPE_INNER_PROT_TCP) + vsi->back->rx_tcp_cso_err++; + else if (decoded.inner_prot == IAVF_RX_PTYPE_INNER_PROT_UDP) + vsi->back->rx_udp_cso_err++; + else if (decoded.inner_prot == IAVF_RX_PTYPE_INNER_PROT_SCTP) + vsi->back->rx_sctp_cso_err++; + } + + if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && + decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4) + vsi->back->rx_ip4_cso++; + if (decoded.inner_prot == IAVF_RX_PTYPE_INNER_PROT_TCP) + vsi->back->rx_tcp_cso++; + else if (decoded.inner_prot == IAVF_RX_PTYPE_INNER_PROT_UDP) + vsi->back->rx_udp_cso++; + else if (decoded.inner_prot == IAVF_RX_PTYPE_INNER_PROT_SCTP) + vsi->back->rx_sctp_cso++; +} + +#endif /* IAVF_ADD_PROBES */ +#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD) +#define IAVF_TUNNEL_SUPPORT +#endif /** * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about @@ -963,8 +1062,13 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi, skb_checksum_none_assert(skb); /* Rx csum enabled and ip headers found? */ +#ifdef HAVE_NDO_SET_FEATURES if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; +#else + if (!(vsi->back->flags & IAVF_FLAG_RX_CSUM_ENABLED)) + return; +#endif /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) @@ -973,12 +1077,19 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi, /* both known and outer_ip must be set for the below code to work */ if (!(decoded.known && decoded.outer_ip)) return; +#ifdef IAVF_ADD_PROBES + vsi->back->hw_csum_rx_outer++; +#endif ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); +#ifdef IAVF_ADD_PROBES + iavf_rx_extra_counters(vsi, rx_error, decoded); + +#endif /* IAVF_ADD_PROBES */ if (ipv4 && (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) @@ -1007,11 +1118,10 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi, case IAVF_RX_PTYPE_INNER_PROT_UDP: case IAVF_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; - /* fall though */ + /* fall through */ default: break; } - return; checksum_fail: @@ -1024,7 +1134,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline int iavf_ptype_to_htype(u8 ptype) +static inline enum pkt_hash_types iavf_ptype_to_htype(u8 ptype) { struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1053,18 +1163,20 @@ static inline void iavf_rx_hash(struct iavf_ring *ring, struct sk_buff *skb, u8 rx_ptype) { +#ifdef NETIF_F_RXHASH u32 hash; const __le64 rss_mask = cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (ring->netdev->features & NETIF_F_RXHASH) + if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); } +#endif /* NETIF_F_RXHASH */ } /** @@ -1097,6 +1209,7 @@ void iavf_process_skb_fields(struct iavf_ring *rx_ring, * iavf_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being fixed + * @rx_desc: pointer to the EOP Rx descriptor * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. @@ -1106,8 +1219,24 @@ void iavf_process_skb_fields(struct iavf_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) +static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb, + union iavf_rx_desc *rx_desc) { + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + /* ERR_MASK will only have valid bits if EOP set, and + * what we are doing here is actually checking + * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * the error field + */ + if (unlikely(iavf_test_staterr(rx_desc, + BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { + dev_kfree_skb_any(skb); + return true; + } + /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -1115,32 +1244,6 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) return false; } -/** - * iavf_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *old_buff) -{ - struct iavf_rx_buffer *new_buff; - u16 nta = rx_ring->next_to_alloc; - - new_buff = &rx_ring->rx_bi[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->dma = old_buff->dma; - new_buff->page = old_buff->page; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - /** * iavf_page_is_reusable - check if any reuse is possible * @page: page struct to check @@ -1205,10 +1308,17 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE if (unlikely(!pagecnt_bias)) { page_ref_add(page, USHRT_MAX); rx_buffer->pagecnt_bias = USHRT_MAX; } +#else + if (likely(!pagecnt_bias)) { + get_page(page); + rx_buffer->pagecnt_bias = 1; + } +#endif return true; } @@ -1233,12 +1343,11 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring, #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); + unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); #endif if (!size) return; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); @@ -1265,7 +1374,6 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, if (!size) return NULL; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); @@ -1286,7 +1394,7 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, * iavf_construct_skb - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from - * @size: size of buffer to add to skb + * @xdp: xdp_buff pointing to the data * * This function allocates an skb. It then populates it with the page * data from the current receive descriptor, taking care to set up the @@ -1294,13 +1402,15 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, */ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer, - unsigned int size) + struct xdp_buff *xdp) { - void *va; + unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data; + #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IAVF_SKB_PAD + size); #endif unsigned int headlen; struct sk_buff *skb; @@ -1308,10 +1418,9 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, if (!rx_buffer) return NULL; /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - prefetch(va); + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch((void *)((u8 *)xdp->data + L1_CACHE_BYTES)); #endif /* allocate a skb to store the frags */ @@ -1324,10 +1433,12 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IAVF_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, + IAVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen), xdp->data, + ALIGN(headlen, sizeof(long))); /* update all of the pointers */ size -= headlen; @@ -1350,43 +1461,46 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, return skb; } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC /** * iavf_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buffer: Rx buffer to pull data from - * @size: size of buffer to add to skb + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: rx buffer to pull data from + * @xdp: xdp_buff pointing to the data * * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer, - unsigned int size) + struct xdp_buff *xdp) { - void *va; + unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data; + #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IAVF_SKB_PAD + size); + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif struct sk_buff *skb; if (!rx_buffer) return NULL; + /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - prefetch(va); + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch(xdp->data + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ - skb = build_skb(va - IAVF_SKB_PAD, truesize); + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IAVF_SKB_PAD); + skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, size); /* buffer is used by skb, update page_offset */ @@ -1399,6 +1513,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, return skb; } +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ /** * iavf_put_rx_buffer - Clean up used buffer and either recycle or free * @rx_ring: rx descriptor ring to transact packets on @@ -1463,6 +1578,57 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, return true; } +#define IAVF_XDP_PASS 0 +#define IAVF_XDP_CONSUMED BIT(0) +#define IAVF_XDP_TX BIT(1) +#define IAVF_XDP_REDIR BIT(2) + +/** + * iavf_run_xdp - run an XDP program + * @rx_ring: Rx ring being processed + * @xdp: XDP buffer containing the frame + **/ +static struct sk_buff *iavf_run_xdp(struct iavf_ring *rx_ring, + struct xdp_buff *xdp) +{ + int result = IAVF_XDP_PASS; + return (struct sk_buff *)ERR_PTR(-result); +} + +/** + * iavf_rx_buffer_flip - adjusted rx_buffer to point to an unused region + * @rx_ring: Rx ring + * @rx_buffer: Rx buffer to adjust + * @size: Size of adjustment + **/ +static void iavf_rx_buffer_flip(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; + + if (!rx_buffer) + return; + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = SKB_DATA_ALIGN(iavf_rx_offset(rx_ring) + size); + + if (!rx_buffer) + return; + rx_buffer->page_offset += truesize; +#endif +} + +static inline void iavf_xdp_ring_update_tail(struct iavf_ring *xdp_ring) +{ + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); +} + /** * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on @@ -1480,7 +1646,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); + unsigned int xdp_xmit = 0; bool failure = false; + struct xdp_buff xdp; + +#ifdef HAVE_XDP_BUFF_RXQ + xdp.rxq = &rx_ring->xdp_rxq; +#endif while (likely(total_rx_packets < (unsigned int)budget)) { struct iavf_rx_buffer *rx_buffer; @@ -1522,12 +1694,40 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) rx_buffer = iavf_get_rx_buffer(rx_ring, size); /* retrieve a buffer from the ring */ - if (skb) + if (!skb) { + if (rx_buffer) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_hard_start = (void *)((u8 *)xdp.data - + iavf_rx_offset(rx_ring)); + xdp.data_end = (void *)((u8 *)xdp.data + size); + skb = iavf_run_xdp(rx_ring, &xdp); + } else { + break; + } + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IAVF_XDP_TX | IAVF_XDP_REDIR)) { + xdp_xmit |= xdp_res; + iavf_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + if (rx_buffer) + rx_buffer->pagecnt_bias++; + } + total_rx_bytes += size; + total_rx_packets++; + } else if (skb) { iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); - else if (ring_uses_build_skb(rx_ring)) - skb = iavf_build_skb(rx_ring, rx_buffer, size); - else - skb = iavf_construct_skb(rx_ring, rx_buffer, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = iavf_build_skb(rx_ring, rx_buffer, &xdp); +#endif + } else { + skb = iavf_construct_skb(rx_ring, rx_buffer, &xdp); + } /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1543,18 +1743,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) if (iavf_is_non_eop(rx_ring, rx_desc, skb)) continue; - /* ERR_MASK will only have valid bits if EOP set, and - * what we are doing here is actually checking - * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in - * the error field - */ - if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { - dev_kfree_skb_any(skb); - skb = NULL; - continue; - } - - if (iavf_cleanup_headers(rx_ring, skb)) { + if (iavf_cleanup_headers(rx_ring, skb, rx_desc)) { skb = NULL; continue; } @@ -1569,7 +1758,6 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* populate checksum, VLAN, and protocol */ iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; @@ -1581,6 +1769,15 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) total_rx_packets++; } + if (xdp_xmit & IAVF_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IAVF_XDP_TX) { + struct iavf_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + + iavf_xdp_ring_update_tail(xdp_ring); + } rx_ring->skb = skb; u64_stats_update_begin(&rx_ring->syncp); @@ -1706,6 +1903,7 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) container_of(napi, struct iavf_q_vector, napi); struct iavf_vsi *vsi = q_vector->vsi; struct iavf_ring *ring; + u64 flags = vsi->back->flags; bool clean_complete = true; bool arm_wb = false; int budget_per_ring; @@ -1746,8 +1944,15 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) clean_complete = false; } +#ifndef HAVE_NETDEV_NAPI_LIST + /* if netdev is disabled we need to stop polling */ + if (!netif_running(vsi->netdev)) + clean_complete = true; + +#endif /* If work not completed, return budget and polling will return */ if (!clean_complete) { +#ifdef HAVE_IRQ_AFFINITY_NOTIFY int cpu_id = smp_processor_id(); /* It is possible that the interrupt affinity has changed but, @@ -1767,6 +1972,7 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) /* Return budget-1 so that polling stops */ return budget - 1; } +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; @@ -1775,16 +1981,15 @@ tx_only: return budget; } - if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) + if (flags & IAVF_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; - /* Exit the polling mode, but don't re-enable interrupts if stack might - * poll us due to busy-polling - */ - if (likely(napi_complete_done(napi, work_done))) - iavf_update_enable_itr(vsi, q_vector); + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); - return min(work_done, budget - 1); + iavf_update_enable_itr(vsi, q_vector); + + return min_t(int, work_done, budget - 1); } /** @@ -1806,8 +2011,13 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, __be16 protocol = skb->protocol; u32 tx_flags = 0; +#ifdef NETIF_F_HW_VLAN_CTAG_RX if (protocol == htons(ETH_P_8021Q) && !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { +#else + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_TX)) { +#endif /* When HW VLAN acceleration is turned off by the user the * stack sets the protocol to 8021q so that the driver * can take any steps required to support the SW only @@ -1841,6 +2051,32 @@ out: return 0; } +#ifdef IAVF_ADD_PROBES +/** + * iavf_update_gso_counter - update TSO/USO counter + * @tx_buffer: Tx buffer with necessary data to update counter + */ +static void iavf_update_gso_counter(struct iavf_tx_buffer *tx_buffer) +{ + struct sk_buff *skb = tx_buffer->skb; + struct iavf_adapter *adapter = + netdev_priv(skb->dev); + + if (skb->csum_offset == offsetof(struct tcphdr, check)) + adapter->tcp_segs += tx_buffer->gso_segs; + else + adapter->udp_segs += tx_buffer->gso_segs; +} +#endif + +#ifndef HAVE_ENCAP_TSO_OFFLOAD +#define inner_ip_hdr(skb) 0 +#define inner_tcp_hdr(skb) 0 +#define inner_ipv6_hdr(skb) 0 +#define inner_tcp_hdrlen(skb) 0 +#define inner_tcp_hdrlen(skb) 0 +#define skb_inner_transport_header(skb) ((skb)->data) +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ /** * iavf_tso - set up the tso context descriptor * @first: pointer to first Tx buffer for xmit @@ -1889,14 +2125,32 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, ip.v6->payload_len = 0; } +#ifdef HAVE_ENCAP_TSO_OFFLOAD if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | +#ifdef NETIF_F_GSO_PARTIAL SKB_GSO_GRE_CSUM | +#endif +#ifdef NETIF_F_GSO_IPXIP4 SKB_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_IPXIP6 SKB_GSO_IPXIP6 | +#endif +#else +#ifdef NETIF_F_GSO_IPIP + SKB_GSO_IPIP | +#endif +#ifdef NETIF_F_GSO_SIT + SKB_GSO_SIT | +#endif +#endif SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { +#ifndef NETIF_F_GSO_PARTIAL + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { +#else if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { +#endif l4.udp->len = 0; /* determine offset of outer transport header */ @@ -1921,21 +2175,36 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, } } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; - /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); - /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; + if (skb->csum_offset == offsetof(struct tcphdr, check)) { + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + /* compute length of TCP segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + } else { + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + /* compute length of UDP segmentation header */ + *hdr_len = sizeof(l4.udp) + l4_offset; + } /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; gso_segs = skb_shinfo(skb)->gso_segs; - /* update GSO size and bytecount with header size */ +#ifndef HAVE_NDO_FEATURES_CHECK + /* too small a TSO segment size causes problems */ + if (gso_size < 64) { + gso_size = 64; + gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64); + } +#endif + /* update gso size and bytecount with header size */ first->gso_segs = gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; @@ -1946,6 +2215,9 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT); +#ifdef IAVF_ADD_PROBES + iavf_update_gso_counter(first); +#endif return 1; } @@ -1987,6 +2259,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* compute outer L2 header size */ offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; +#ifdef HAVE_ENCAP_CSUM_OFFLOAD if (skb->encapsulation) { u32 tunnel = 0; /* define outer network header type */ @@ -2010,17 +2283,29 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, switch (l4_proto) { case IPPROTO_UDP: tunnel |= IAVF_TXD_CTX_UDP_TUNNELING; - *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= IAVF_TX_FLAGS_TUNNEL; break; +#ifdef HAVE_GRE_ENCAP_OFFLOAD case IPPROTO_GRE: tunnel |= IAVF_TXD_CTX_GRE_TUNNELING; - *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= IAVF_TX_FLAGS_TUNNEL; + /* There was a long-standing issue in GRE where GSO + * was not setting the outer transport header unless + * a GRE checksum was requested. This was fixed in + * the 4.6 version of the kernel. In the 4.7 kernel + * support for GRE over IPv6 was added to GSO. So we + * can assume this workaround for all IPv4 headers + * without impacting later versions of the GRE. + */ + if (ip.v4->version == 4) + l4.hdr = ip.hdr + (ip.v4->ihl * 4); break; case IPPROTO_IPIP: case IPPROTO_IPV6: - *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= IAVF_TX_FLAGS_TUNNEL; l4.hdr = skb_inner_network_header(skb); break; +#endif default: if (*tx_flags & IAVF_TX_FLAGS_TSO) return -1; @@ -2029,6 +2314,11 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, return 0; } +#ifdef IAVF_ADD_PROBES + if (*tx_flags & IAVF_TX_FLAGS_IPV4) + if (*tx_flags & IAVF_TX_FLAGS_TSO) + tx_ring->vsi->back->tx_ip4_cso++; +#endif /* compute outer L3 header size */ tunnel |= ((l4.hdr - ip.hdr) / 4) << IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT; @@ -2042,7 +2332,9 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* indicate if we need to offload outer UDP header */ if ((*tx_flags & IAVF_TX_FLAGS_TSO) && +#ifdef NETIF_F_GSO_PARTIAL !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && +#endif (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; @@ -2060,16 +2352,21 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, if (ip.v6->version == 6) *tx_flags |= IAVF_TX_FLAGS_IPV6; } +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ /* Enable IP checksum offloads */ if (*tx_flags & IAVF_TX_FLAGS_IPV4) { l4_proto = ip.v4->protocol; +#ifdef IAVF_ADD_PROBES + tx_ring->vsi->back->tx_ip4_cso++; +#endif /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : IAVF_TX_DESC_CMD_IIPT_IPV4; +#ifdef NETIF_F_IPV6_CSUM } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; @@ -2078,6 +2375,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, if (l4.hdr != exthdr) ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); +#endif } /* compute inner L3 header size */ @@ -2089,18 +2387,29 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* enable checksum offloads */ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; +#ifdef IAVF_ADD_PROBES + tx_ring->vsi->back->tx_tcp_cso++; +#endif break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ +#ifdef HAVE_SCTP cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; offset |= (sizeof(struct sctphdr) >> 2) << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; +#ifdef IAVF_ADD_PROBES + tx_ring->vsi->back->tx_sctp_cso++; +#endif +#endif /* HAVE_SCTP */ break; case IPPROTO_UDP: /* enable UDP checksum offload */ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; offset |= (sizeof(struct udphdr) >> 2) << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; +#ifdef IAVF_ADD_PROBES + tx_ring->vsi->back->tx_udp_cso++; +#endif break; default: if (*tx_flags & IAVF_TX_FLAGS_TSO) @@ -2146,6 +2455,29 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } +/** + * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + /** * __iavf_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer @@ -2230,29 +2562,6 @@ bool __iavf_chk_linearize(struct sk_buff *skb) return false; } -/** - * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - /** * iavf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on @@ -2262,10 +2571,12 @@ int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) * @hdr_len: size of the packet header * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc + * + * Returns 0 on success, negative error code on DMA failure. **/ -static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, - struct iavf_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline int iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, + struct iavf_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@ -2280,6 +2591,9 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> IAVF_TX_FLAGS_VLAN_SHIFT; +#ifdef IAVF_ADD_PROBES + tx_ring->vsi->back->tx_vlano++; +#endif } first->tx_flags = tx_flags; @@ -2361,6 +2675,9 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ skb_tx_timestamp(skb); /* Force memory writes to complete before letting h/w know there @@ -2375,11 +2692,39 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, first->next_to_watch = tx_desc; /* notify HW of packet */ +#ifdef HAVE_SKB_XMIT_MORE if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - } - return; +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + /* We need this mmiowb on IA64/Altix systems where wmb() isn't + * guaranteed to synchronize I/O. + * + * Note that mmiowb() only provides a guarantee about ordering + * when in conjunction with a spin_unlock(). This barrier is + * used to guarantee the I/O ordering with respect to a spin + * lock in the networking core code. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ + } +#else + writel(i, tx_ring->tail); + +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + /* We need this mmiowb on IA64/Altix systems where wmb() isn't + * guaranteed to synchronize I/O. + * + * Note that mmiowb() only provides a guarantee about ordering when in + * conjunction with a spin_unlock(). This barrier is used to guarantee + * the I/O ordering with respect to a spin lock in the networking core + * code. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ +#endif /* HAVE_XMIT_MORE */ + + return 0; dma_error: dev_info(tx_ring->dev, "TX DMA map failed\n"); @@ -2396,8 +2741,12 @@ dma_error: } tx_ring->next_to_use = i; + + return -EIO; } + + /** * iavf_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer @@ -2485,6 +2834,9 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); +#ifndef HAVE_TRANS_START_IN_QUEUE + tx_ring->netdev->trans_start = jiffies; +#endif return NETDEV_TX_OK; out_drop: @@ -2495,13 +2847,13 @@ out_drop: } /** - * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * iavf_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code **/ -netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +netdev_tx_t iavf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; @@ -2509,12 +2861,9 @@ netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* hardware can't handle really short frames, hardware padding works * beyond this point */ - if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { - if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) - return NETDEV_TX_OK; - skb->len = IAVF_MIN_TX_LEN; - skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN); - } + if (skb_put_padto(skb, IAVF_MIN_TX_LEN)) + return NETDEV_TX_OK; return iavf_xmit_frame_ring(skb, tx_ring); } + diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index dd3348f9da9d..d1232452fa66 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_TXRX_H_ #define _IAVF_TXRX_H_ @@ -16,7 +16,7 @@ #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */ #define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define IAVF_ITR_100K 10 /* all values below must be even */ +#define IAVF_ITR_100K 10 /* all values below must be even */ #define IAVF_ITR_50K 20 #define IAVF_ITR_20K 50 #define IAVF_ITR_18K 60 @@ -26,8 +26,8 @@ #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK) #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC)) -#define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) -#define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) +#define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) +#define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) /* 0x40 is the enable bit for interrupt rate limiting, and must be set if * the value of the rate limit is non-zero @@ -35,7 +35,21 @@ #define INTRL_ENA BIT(6) #define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) -#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) + +/** + * iavf_intrl_usec_to_reg - convert interrupt rate limit to register + * @intrl: interrupt rate limit to convert + * + * This function converts a decimal interrupt rate limit to the appropriate + * register format expected by the firmware when setting interrupt rate limit. + */ +static inline u16 iavf_intrl_usec_to_reg(int intrl) +{ + if (intrl >> 2) + return ((intrl >> 2) | INTRL_ENA); + else + return 0; +} #define IAVF_INTRL_8K 125 /* 8000 ints/sec */ #define IAVF_INTRL_62K 16 /* 62500 ints/sec */ #define IAVF_INTRL_83K 12 /* 83333 ints/sec */ @@ -81,6 +95,7 @@ enum iavf_dyn_idx_t { BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + /* Supported Rx Buffer Sizes (a multiple of 128) */ #define IAVF_RXBUFFER_256 256 #define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ @@ -99,8 +114,12 @@ enum iavf_dyn_idx_t { #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define iavf_rx_desc iavf_32byte_rx_desc +#ifdef HAVE_STRUCT_DMA_ATTRS +#define IAVF_RX_DMA_ATTR NULL +#else #define IAVF_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif /* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for @@ -251,7 +270,7 @@ static inline unsigned int iavf_txd_use_count(unsigned int size) #define IAVF_TX_FLAGS_FCCRC BIT(6) #define IAVF_TX_FLAGS_FSO BIT(7) #define IAVF_TX_FLAGS_FD_SB BIT(9) -#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define IAVF_TX_FLAGS_TUNNEL BIT(10) #define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -260,6 +279,7 @@ static inline unsigned int iavf_txd_use_count(unsigned int size) struct iavf_tx_buffer { struct iavf_tx_desc *next_to_watch; union { + struct xdp_frame *xdpf; struct sk_buff *skb; void *raw_buf; }; @@ -294,7 +314,6 @@ struct iavf_tx_queue_stats { u64 tx_linearize; u64 tx_force_wb; int prev_pkt_ctr; - u64 tx_lost_interrupt; }; struct iavf_rx_queue_stats { @@ -328,6 +347,7 @@ struct iavf_ring { void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ + struct bpf_prog *xdp_prog; union { struct iavf_tx_buffer *tx_bi; struct iavf_rx_buffer *rx_bi; @@ -337,7 +357,7 @@ struct iavf_ring { u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; - /* high bit set means dynamic, use accessors routines to read/write. + /* high bit set means dynamic, use accessor routines to read/write. * hardware only supports 2us resolution for the ITR registers. * these values always store the USER setting, and must be converted * before programming to a register. @@ -362,10 +382,13 @@ struct iavf_ring { u16 flags; #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) +#define IAVF_TXR_FLAGS_XDP BIT(2) /* stats structs */ struct iavf_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 struct u64_stats_sync syncp; +#endif union { struct iavf_tx_queue_stats tx_stats; struct iavf_rx_queue_stats rx_stats; @@ -387,6 +410,11 @@ struct iavf_ring { * iavf_clean_rx_ring_irq() is called * for this ring. */ + + struct iavf_channel *ch; +#ifdef HAVE_XDP_BUFF_RXQ + struct xdp_rxq_info xdp_rxq; +#endif } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct iavf_ring *ring) @@ -404,13 +432,23 @@ static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring) ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; } -#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 -#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002 -#define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e -#define IAVF_ITR_ADAPTIVE_LATENCY 0x8000 -#define IAVF_ITR_ADAPTIVE_BULK 0x0000 +#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 +#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e +#define IAVF_ITR_ADAPTIVE_LATENCY 0x8000 +#define IAVF_ITR_ADAPTIVE_BULK 0x0000 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY)) +static inline bool ring_is_xdp(struct iavf_ring *ring) +{ + return !!(ring->flags & IAVF_TXR_FLAGS_XDP); +} + +static inline void set_ring_xdp(struct iavf_ring *ring) +{ + ring->flags |= IAVF_TXR_FLAGS_XDP; +} + struct iavf_ring_container { struct iavf_ring *ring; /* pointer to linked list of ring(s) */ unsigned long next_update; /* jiffies value of next update */ @@ -437,7 +475,7 @@ static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring) #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring)) bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); -netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +netdev_tx_t iavf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void iavf_clean_tx_ring(struct iavf_ring *tx_ring); void iavf_clean_rx_ring(struct iavf_ring *rx_ring); int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring); @@ -450,6 +488,14 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw); void iavf_detect_recover_hung(struct iavf_vsi *vsi); int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size); bool __iavf_chk_linearize(struct sk_buff *skb); +#ifdef HAVE_XDP_FRAME_STRUCT +int iavf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); +#else +int iavf_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp); +#endif +void iavf_xdp_flush(struct net_device *dev); + /** * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed @@ -513,7 +559,9 @@ static inline bool iavf_chk_linearize(struct sk_buff *skb, int count) /* we can support up to 8 data buffers for a single send */ return count != IAVF_MAX_BUFFER_TXD; } + /** + * txring_txq - Find the netdev Tx ring based on the iavf Tx ring * @ring: Tx ring to find the netdev equivalent of **/ static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring) diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index 7190a40c540c..ea5008524237 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #ifndef _IAVF_TYPE_H_ #define _IAVF_TYPE_H_ @@ -10,19 +10,22 @@ #include "iavf_adminq.h" #include "iavf_devids.h" -#define IAVF_RXQ_CTX_DBUFF_SHIFT 7 +#define IAVF_RXQ_CTX_DBUFF_SHIFT 7 /* IAVF_MASK is a macro used on 32 bit registers */ -#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift)) +#define IAVF_MASK(mask, shift) (mask << shift) #define IAVF_MAX_VSI_QP 16 -#define IAVF_MAX_VF_VSI 3 +#define IAVF_MAX_VF_VSI 4 #define IAVF_MAX_CHAINED_RX_BUFFERS 5 /* forward declaration */ struct iavf_hw; typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *); +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif /* Data type manipulation macros. */ #define IAVF_DESC_UNUSED(R) \ @@ -50,6 +53,8 @@ enum iavf_debug_mask { IAVF_DEBUG_FD = 0x00001000, IAVF_DEBUG_PACKAGE = 0x00002000, + IAVF_DEBUG_IWARP = 0x00F00000, + IAVF_DEBUG_AQ_MESSAGE = 0x01000000, IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000, IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000, @@ -61,6 +66,31 @@ enum iavf_debug_mask { IAVF_DEBUG_ALL = 0xFFFFFFFF }; +#define IAVF_MDIO_CLAUSE22_STCODE_MASK IAVF_MASK(1, \ + IAVF_GLGEN_MSCA_STCODE_SHIFT) +#define IAVF_MDIO_CLAUSE22_OPCODE_WRITE_MASK IAVF_MASK(1, \ + IAVF_GLGEN_MSCA_OPCODE_SHIFT) +#define IAVF_MDIO_CLAUSE22_OPCODE_READ_MASK IAVF_MASK(2, \ + IAVF_GLGEN_MSCA_OPCODE_SHIFT) + +#define IAVF_MDIO_CLAUSE45_STCODE_MASK IAVF_MASK(0, \ + IAVF_GLGEN_MSCA_STCODE_SHIFT) +#define IAVF_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK IAVF_MASK(0, \ + IAVF_GLGEN_MSCA_OPCODE_SHIFT) +#define IAVF_MDIO_CLAUSE45_OPCODE_WRITE_MASK IAVF_MASK(1, \ + IAVF_GLGEN_MSCA_OPCODE_SHIFT) +#define IAVF_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK IAVF_MASK(2, \ + IAVF_GLGEN_MSCA_OPCODE_SHIFT) +#define IAVF_MDIO_CLAUSE45_OPCODE_READ_MASK IAVF_MASK(3, \ + IAVF_GLGEN_MSCA_OPCODE_SHIFT) + +#define IAVF_PHY_COM_REG_PAGE 0x1E +#define IAVF_PHY_LED_LINK_MODE_MASK 0xF0 +#define IAVF_PHY_LED_MANUAL_ON 0x100 +#define IAVF_PHY_LED_PROV_REG_1 0xC430 +#define IAVF_PHY_LED_MODE_MASK 0xFFFF +#define IAVF_PHY_LED_MODE_ORIG 0x80000000 + /* These are structs for managing the hardware information and the operations. * The structures of function pointers are filled out at init time when we * know for sure exactly which hardware we're working with. This gives us the @@ -87,6 +117,7 @@ enum iavf_vsi_type { IAVF_VSI_MIRROR = 5, IAVF_VSI_SRIOV = 6, IAVF_VSI_FDIR = 7, + IAVF_VSI_IWARP = 8, IAVF_VSI_TYPE_UNKNOWN }; @@ -97,16 +128,39 @@ enum iavf_queue_type { IAVF_QUEUE_TYPE_UNKNOWN }; -#define IAVF_HW_CAP_MAX_GPIO 30 +#define IAVF_HW_CAP_MAX_GPIO 30 +enum iavf_acpi_programming_method { + IAVF_ACPI_PROGRAMMING_METHOD_HW_FVL = 0, + IAVF_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1 +}; + +#define IAVF_WOL_SUPPORT_MASK 0x1 +#define IAVF_ACPI_PROGRAMMING_METHOD_MASK 0x2 +#define IAVF_PROXY_SUPPORT_MASK 0x4 + /* Capabilities of a PF or a VF or the whole device */ struct iavf_hw_capabilities { + /* Cloud filter modes: + * Mode1: Filter on L4 port only + * Mode2: Filter for non-tunneled traffic + * Mode3: Filter for tunnel traffic + */ +#define IAVF_CLOUD_FILTER_MODE1 0x6 +#define IAVF_CLOUD_FILTER_MODE2 0x7 +#define IAVF_CLOUD_FILTER_MODE3 0x8 +#define IAVF_SWITCH_MODE_MASK 0xF + bool dcb; bool fcoe; + bool iwarp; u32 num_vsis; u32 num_rx_qp; u32 num_tx_qp; u32 base_queue; u32 num_msix_vectors_vf; + bool apm_wol_support; + enum iavf_acpi_programming_method acpi_prog_method; + bool proxy_support; }; struct iavf_mac_info { @@ -114,9 +168,29 @@ struct iavf_mac_info { u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; u16 max_fcoeq; }; +#define IAVF_NVM_EXEC_GET_AQ_RESULT 0x0 +#define IAVF_NVM_EXEC_FEATURES 0xe +#define IAVF_NVM_EXEC_STATUS 0xf + +/* NVMUpdate features API */ +#define IAVF_NVMUPD_FEATURES_API_VER_MAJOR 0 +#define IAVF_NVMUPD_FEATURES_API_VER_MINOR 14 +#define IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12 + +#define IAVF_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0) + +struct iavf_nvmupd_features { + u8 major; + u8 minor; + u16 size; + u8 features[IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN]; +}; + +#define IAVF_MODULE_SFF_DIAG_CAPAB 0x40 /* PCI bus types */ enum iavf_bus_type { iavf_bus_type_unknown = 0, @@ -165,6 +239,18 @@ struct iavf_bus_info { }; #define IAVF_MAX_USER_PRIORITY 8 +#define IAVF_TLV_STATUS_OPER 0x1 +#define IAVF_TLV_STATUS_SYNC 0x2 +#define IAVF_TLV_STATUS_ERR 0x4 +#define IAVF_CEE_OPER_MAX_APPS 3 +#define IAVF_APP_PROTOID_FCOE 0x8906 +#define IAVF_APP_PROTOID_ISCSI 0x0cbc +#define IAVF_APP_PROTOID_FIP 0x8914 +#define IAVF_APP_SEL_ETHTYPE 0x1 +#define IAVF_APP_SEL_TCPIP 0x2 +#define IAVF_CEE_APP_SEL_ETHTYPE 0x0 +#define IAVF_CEE_APP_SEL_TCPIP 0x1 + /* Port hardware description */ struct iavf_hw { u8 __iomem *hw_addr; @@ -187,6 +273,20 @@ struct iavf_hw { /* Admin Queue info */ struct iavf_adminq_info aq; + /* WoL and proxy support */ + u16 num_wol_proxy_filters; + u16 wol_proxy_vsi_seid; + +#define IAVF_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) +#define IAVF_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) +#define IAVF_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) +#define IAVF_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) +#define IAVF_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) + u64 flags; + + /* NVMUpdate features */ + struct iavf_nvmupd_features nvmupd_features; + /* debug mask */ u32 debug_mask; char err_str[16]; @@ -286,17 +386,14 @@ enum iavf_rx_desc_status_bits { IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4, IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - /* Note: Bit 8 is reserved in X710 and XL710 */ IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, + IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ IAVF_RX_DESC_STATUS_FLM_SHIFT = 11, IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14, IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - /* Note: For non-tunnel packets INT_UDP_0 is the right status for - * UDP header - */ IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; @@ -310,8 +407,7 @@ enum iavf_rx_desc_status_bits { IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT -#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK \ - BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum iavf_rx_desc_fltstat_values { IAVF_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -574,6 +670,9 @@ struct iavf_tx_context_desc { __le64 type_cmd_tso_mss; }; +#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define IAVF_TXD_CTX_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT) + #define IAVF_TXD_CTX_QW1_CMD_SHIFT 4 #define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT) @@ -621,6 +720,11 @@ enum iavf_filter_pctype { IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63, }; +#define IAVF_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + IAVF_TXD_FLTR_QW1_CMD_SHIFT) +#define IAVF_TXD_FLTR_QW1_ATR_MASK BIT_ULL(IAVF_TXD_FLTR_QW1_ATR_SHIFT) + + #define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30 #define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) @@ -685,4 +789,60 @@ struct iavf_eth_stats { u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ }; +#define IAVF_SR_PBA_FLAGS 0x15 +#define IAVF_SR_PBA_BLOCK_PTR 0x16 +#define IAVF_SR_BOOT_CONFIG_PTR 0x17 +#define IAVF_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28 +#define IAVF_SR_NVM_MAP_VERSION 0x29 +#define IAVF_SR_NVM_IMAGE_VERSION 0x2A +#define IAVF_SR_NVM_STRUCTURE_VERSION 0x2B +#define IAVF_SR_OCP_CFG_WORD0 0x2B +#define IAVF_SR_OCP_ENABLED BIT(15) + +struct iavf_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + +/* Offsets into Alternate Ram */ +#define IAVF_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define IAVF_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define IAVF_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define IAVF_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define IAVF_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define IAVF_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define IAVF_ALT_BW_VALUE_MASK 0xFF +#define IAVF_ALT_BW_RELATIVE_MASK 0x40000000 +#define IAVF_ALT_BW_VALID_MASK 0x80000000 + +#define IAVF_DDP_TRACKID_RDONLY 0 +#define IAVF_DDP_TRACKID_INVALID 0xFFFFFFFF +#define SECTION_TYPE_RB_MMIO 0x00001800 +#define SECTION_TYPE_RB_AQ 0x00001801 +#define SECTION_TYPE_PROTO 0x80000002 +#define SECTION_TYPE_PCTYPE 0x80000003 +#define SECTION_TYPE_PTYPE 0x80000004 +struct iavf_profile_tlv_section_record { + u8 rtype; + u8 type; + u16 len; + u8 data[12]; +}; + +/* Generic AQ section in proflie */ +struct iavf_profile_aq_section { + u16 opcode; + u16 flags; + u8 param[16]; + u16 datalen; + u8 data[1]; +}; + #endif /* _IAVF_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index c46770eba320..8311e06588a6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright (c) 2013, Intel Corporation. */ #include "iavf.h" #include "iavf_prototype.h" @@ -27,7 +27,8 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter, if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ - err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + err = iavf_aq_send_msg_to_pf(hw, op, VIRTCHNL_STATUS_SUCCESS, msg, len, + NULL); if (err) dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", op, iavf_stat_str(hw, err), @@ -91,18 +92,10 @@ int iavf_verify_api_ver(struct iavf_adapter *adapter) break; } - err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; - if (op != VIRTCHNL_OP_VERSION) { - dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - op); - err = -EIO; - goto out_alloc; - } - pf_vvi = (struct virtchnl_version_info *)event.msg_buf; adapter->pf_version = *pf_vvi; @@ -130,6 +123,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) u32 caps; caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_IWARP | VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_RSS_AQ | VIRTCHNL_VF_OFFLOAD_RSS_REG | @@ -137,18 +131,29 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | - VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | - VIRTCHNL_VF_OFFLOAD_ADQ; +#ifdef __TC_MQPRIO_MODE_MAX + VIRTCHNL_VF_OFFLOAD_ADQ | + VIRTCHNL_VF_OFFLOAD_ADQ_V2 | +#endif /* __TC_MQPRIO_MODE_MAX */ + VIRTCHNL_VF_OFFLOAD_USO | +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | + VIRTCHNL_VF_CAP_ADV_LINK_SPEED; +#else + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) - return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, - (u8 *)&caps, sizeof(caps)); + return iavf_send_pf_msg(adapter, + VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); else - return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); + return iavf_send_pf_msg(adapter, + VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); } /** @@ -160,6 +165,14 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) **/ static void iavf_validate_num_queues(struct iavf_adapter *adapter) { + /* When ADQ is enabled PF allocates 16 queues to VF but enables only + * the specified number of queues it's been requested for (as per TC + * info). So this check should be skipped when ADQ is enabled. + */ + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) + return; + if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { struct virtchnl_vsi_resource *vsi_res; int i; @@ -242,8 +255,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_queue_pair_info *vqpi; int pairs = adapter->num_active_queues; - int i, max_frame = IAVF_MAX_RXBUFFER; - size_t len; + int i, len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -252,16 +264,12 @@ void iavf_configure_queues(struct iavf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; - len = struct_size(vqci, qpair, pairs); + len = sizeof(struct virtchnl_vsi_queue_config_info) + + (sizeof(struct virtchnl_queue_pair_info) * pairs); vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; - /* Limit maximum frame size when jumbo frames is not enabled */ - if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && - (adapter->netdev->mtu <= ETH_DATA_LEN)) - max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; - vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; vqpi = vqci->qpair; @@ -277,7 +285,8 @@ void iavf_configure_queues(struct iavf_adapter *adapter) vqpi->rxq.queue_id = i; vqpi->rxq.ring_len = adapter->rx_rings[i].count; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; - vqpi->rxq.max_pkt_size = max_frame; + vqpi->rxq.max_pkt_size = adapter->netdev->mtu + + IAVF_PACKET_HDR_PAD; vqpi->rxq.databuffer_size = ALIGN(adapter->rx_rings[i].rx_buf_len, BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); @@ -351,9 +360,8 @@ void iavf_map_queues(struct iavf_adapter *adapter) { struct virtchnl_irq_map_info *vimi; struct virtchnl_vector_map *vecmap; + int v_idx, q_vectors, len; struct iavf_q_vector *q_vector; - int v_idx, q_vectors; - size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -365,7 +373,9 @@ void iavf_map_queues(struct iavf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NONQ_VECS; - len = struct_size(vimi, vecmap, adapter->num_msix_vectors); + len = sizeof(struct virtchnl_irq_map_info) + + (adapter->num_msix_vectors * + sizeof(struct virtchnl_vector_map)); vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; @@ -432,10 +442,9 @@ int iavf_request_queues(struct iavf_adapter *adapter, int num) void iavf_add_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; + int len, i = 0, count = 0; struct iavf_mac_filter *f; - int i = 0, count = 0; bool more = false; - size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -457,13 +466,15 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter) } adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; - len = struct_size(veal, list, count); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); - len = struct_size(veal, list, count); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); more = true; } @@ -489,7 +500,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter) spin_unlock_bh(&adapter->mac_vlan_list_lock); - iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, + (u8 *)veal, len); kfree(veal); } @@ -503,9 +515,8 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; struct iavf_mac_filter *f, *ftmp; - int i = 0, count = 0; + int len, i = 0, count = 0; bool more = false; - size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -527,13 +538,15 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) } adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; - len = struct_size(veal, list, count); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); - len = struct_size(veal, list, count); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); more = true; } veal = kzalloc(len, GFP_ATOMIC); @@ -559,10 +572,52 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) spin_unlock_bh(&adapter->mac_vlan_list_lock); - iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, + (u8 *)veal, len); kfree(veal); } +/** + * iavf_is_mac_add_ok + * @adapter: adapter structure + * + * Submit list of filters based on PF response. + **/ +static void iavf_mac_add_ok(struct iavf_adapter *adapter) +{ + struct iavf_mac_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + f->is_new_mac = false; + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * iavf_is_mac_add_reject + * @adapter: adapter structure + * + * Remove filters from list based on PF response. + **/ +static void iavf_mac_add_reject(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct iavf_mac_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) + f->remove = false; + + if (f->is_new_mac) { + list_del(&f->list); + kfree(f); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + /** * iavf_add_vlans * @adapter: adapter structure @@ -734,15 +789,25 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) if (flags & FLAG_VF_MULTICAST_PROMISC) { adapter->flags |= IAVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + dev_info(&adapter->pdev->dev, + "%s is entering multicast promiscuous mode\n", + adapter->netdev->name); } if (!flags) { - adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | - IAVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | - IAVF_FLAG_AQ_RELEASE_ALLMULTI); - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + if (adapter->flags & IAVF_FLAG_PROMISC_ON) { + adapter->flags &= ~IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + + if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { + adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; + dev_info(&adapter->pdev->dev, + "%s is leaving multicast promiscuous mode\n", + adapter->netdev->name); + } } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; @@ -769,8 +834,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ - if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, - sizeof(vqs))) + if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, + (u8 *)&vqs, sizeof(vqs))) /* if the request failed, don't lock out others */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; } @@ -918,6 +983,8 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } +#define IAVF_MAX_SPEED_STRLEN 13 + /** * iavf_print_link_message - print link up or down * @adapter: adapter structure @@ -927,39 +994,115 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - char *speed = "Unknown "; + int link_speed_mbps; + char *speed; if (!adapter->link_up) { netdev_info(netdev, "NIC Link is Down\n"); return; } + speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + if (ADV_LINK_SUPPORT(adapter)) { + link_speed_mbps = adapter->link_speed_mbps; + goto print_link_msg; + } + +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: - speed = "40 G"; + case VIRTCHNL_LINK_SPEED_40GB: + link_speed_mbps = SPEED_40000; break; - case IAVF_LINK_SPEED_25GB: - speed = "25 G"; + case VIRTCHNL_LINK_SPEED_25GB: + link_speed_mbps = SPEED_25000; break; - case IAVF_LINK_SPEED_20GB: - speed = "20 G"; + case VIRTCHNL_LINK_SPEED_20GB: + link_speed_mbps = SPEED_20000; break; - case IAVF_LINK_SPEED_10GB: - speed = "10 G"; + case VIRTCHNL_LINK_SPEED_10GB: + link_speed_mbps = SPEED_10000; break; - case IAVF_LINK_SPEED_1GB: - speed = "1000 M"; + case VIRTCHNL_LINK_SPEED_5GB: + link_speed_mbps = SPEED_5000; break; - case IAVF_LINK_SPEED_100MB: - speed = "100 M"; + case VIRTCHNL_LINK_SPEED_2_5GB: + link_speed_mbps = SPEED_2500; + break; + case VIRTCHNL_LINK_SPEED_1GB: + link_speed_mbps = SPEED_1000; + break; + case VIRTCHNL_LINK_SPEED_100MB: + link_speed_mbps = SPEED_100; break; default: + link_speed_mbps = SPEED_UNKNOWN; break; } - netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED +print_link_msg: +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ + if (link_speed_mbps > SPEED_1000) { + if (link_speed_mbps == SPEED_2500) + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); + else + /* convert to Gbps inline */ + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", + link_speed_mbps / 1000, "Gbps"); + } else if (link_speed_mbps == SPEED_UNKNOWN) + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); + else + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", + link_speed_mbps, "Mbps"); + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); +#ifndef SPEED_25000 + netdev_info(netdev, "Ethtool won't report 25 Gbps Link Speed correctly on this Kernel, Time for an Upgrade\n"); +#endif + kfree(speed); } +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED +/** + * iavf_get_vpe_link_status + * @adapter: adapter structure + * @vpe: virtchnl_pf_event structure + * + * Helper function for determining the link status + **/ +static bool +iavf_get_vpe_link_status(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + return vpe->event_data.link_event_adv.link_status; + else + return vpe->event_data.link_event.link_status; +} + +/** + * iavf_set_adapter_link_speed_from_vpe + * @adapter: adapter structure for which we are setting the link speed + * @vpe: virtchnl_pf_event structure that contains the link speed we are setting + * + * Helper function for setting iavf_adapter link speed + **/ +static void +iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + adapter->link_speed_mbps = + vpe->event_data.link_event_adv.link_speed; + else + adapter->link_speed = vpe->event_data.link_event.link_speed; +} + +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ /** * iavf_enable_channel * @adapter: adapter structure @@ -970,7 +1113,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter) void iavf_enable_channels(struct iavf_adapter *adapter) { struct virtchnl_tc_info *vti = NULL; - size_t len; + u16 len; int i; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -980,7 +1123,9 @@ void iavf_enable_channels(struct iavf_adapter *adapter) return; } - len = struct_size(vti, list, adapter->num_tc - 1); + len = ((adapter->num_tc - 1) * sizeof(struct virtchnl_channel_info)) + + sizeof(struct virtchnl_tc_info); + vti = kzalloc(len, GFP_KERNEL); if (!vti) return; @@ -1035,7 +1180,7 @@ static void iavf_print_cloud_filter(struct iavf_adapter *adapter, { switch (f->flow_type) { case VIRTCHNL_TCP_V4_FLOW: - dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 TCP: dst_port %hu src_port %hu\n", &f->data.tcp_spec.dst_mac, &f->data.tcp_spec.src_mac, ntohs(f->data.tcp_spec.vlan_id), @@ -1045,7 +1190,27 @@ static void iavf_print_cloud_filter(struct iavf_adapter *adapter, ntohs(f->data.tcp_spec.src_port)); break; case VIRTCHNL_TCP_V6_FLOW: - dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 TCP: dst_port %hu tcp_src_port %hu\n", + &f->data.tcp_spec.dst_mac, + &f->data.tcp_spec.src_mac, + ntohs(f->data.tcp_spec.vlan_id), + &f->data.tcp_spec.dst_ip, + &f->data.tcp_spec.src_ip, + ntohs(f->data.tcp_spec.dst_port), + ntohs(f->data.tcp_spec.src_port)); + break; + case VIRTCHNL_UDP_V4_FLOW: + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 UDP: dst_port %hu udp_src_port %hu\n", + &f->data.tcp_spec.dst_mac, + &f->data.tcp_spec.src_mac, + ntohs(f->data.tcp_spec.vlan_id), + &f->data.tcp_spec.dst_ip[0], + &f->data.tcp_spec.src_ip[0], + ntohs(f->data.tcp_spec.dst_port), + ntohs(f->data.tcp_spec.src_port)); + break; + case VIRTCHNL_UDP_V6_FLOW: + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 UDP: dst_port %hu tcp_src_port %hu\n", &f->data.tcp_spec.dst_mac, &f->data.tcp_spec.src_mac, ntohs(f->data.tcp_spec.vlan_id), @@ -1095,7 +1260,7 @@ void iavf_add_cloud_filter(struct iavf_adapter *adapter) list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->add) { - memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); + *f = cf->f; cf->add = false; cf->state = __IAVF_CF_ADD_PENDING; iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, @@ -1143,7 +1308,7 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter) list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { if (cf->del) { - memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); + *f = cf->f; cf->del = false; cf->state = __IAVF_CF_DEL_PENDING; iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, @@ -1159,11 +1324,13 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter) * * Request that the PF reset this VF. No response is expected. **/ -void iavf_request_reset(struct iavf_adapter *adapter) +int iavf_request_reset(struct iavf_adapter *adapter) { + enum iavf_status status; /* Don't check CURRENT_OP - this is always higher priority */ - iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + status = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); adapter->current_op = VIRTCHNL_OP_UNKNOWN; + return status; } /** @@ -1180,19 +1347,28 @@ void iavf_request_reset(struct iavf_adapter *adapter) **/ void iavf_virtchnl_completion(struct iavf_adapter *adapter, enum virtchnl_ops v_opcode, - enum iavf_status v_retval, u8 *msg, u16 msglen) + enum iavf_status v_retval, + u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + bool link_up = iavf_get_vpe_link_status(adapter, vpe); +#else bool link_up = vpe->event_data.link_event.link_status; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: +#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED + iavf_set_adapter_link_speed_from_vpe(adapter, vpe); +#else adapter->link_speed = vpe->event_data.link_event.link_speed; +#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */ /* we've already got the right link status, bail */ if (adapter->link_up == link_up) @@ -1209,7 +1385,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (adapter->state != __IAVF_RUNNING) break; - /* For ADq enabled VF, we reconfigure VSIs and + /* For ADQ enabled VF, we reconfigure VSIs and * re-allocate queues. Hence wait till all * queues are enabled. */ @@ -1220,21 +1396,22 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, adapter->link_up = link_up; if (link_up) { - netif_tx_start_all_queues(netdev); - netif_carrier_on(netdev); + if (adapter->flags & + IAVF_FLAG_QUEUES_ENABLED) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } } else { netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } iavf_print_link_message(adapter); + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; break; case VIRTCHNL_EVENT_RESET_IMPENDING: - dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); - if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { - adapter->flags |= IAVF_FLAG_RESET_PENDING; - dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); - queue_work(iavf_wq, &adapter->reset_task); - } + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); + adapter->flags |= IAVF_FLAG_RESET_PENDING; + iavf_schedule_reset(adapter); break; default: dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", @@ -1243,6 +1420,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } return; } + + /* In earlier versions of ADQ implementation, VF reset was initiated by + * PF in response to enable ADQ request from VF. However for performance + * of ADQ we need the response back and based on that additional configs + * will be done. So don't let the PF reset the VF instead let the VF + * reset itself. + */ + if (ADQ_V2_ALLOWED(adapter) && !v_retval && + (v_opcode == VIRTCHNL_OP_ENABLE_CHANNELS || + v_opcode == VIRTCHNL_OP_DISABLE_CHANNELS)) { + struct virtchnl_tc_info *ch_info = + (struct virtchnl_tc_info *)msg; + if (ch_info->num_tc) { + dev_info(&adapter->pdev->dev, "Scheduling reset\n"); + iavf_schedule_reset(adapter); + } + } + if (v_retval) { switch (v_opcode) { case VIRTCHNL_OP_ADD_VLAN: @@ -1252,7 +1447,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); - /* restore administratively set MAC address */ + iavf_mac_add_reject(adapter); + /* restore administratively set mac address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); break; case VIRTCHNL_OP_DEL_VLAN: @@ -1291,6 +1487,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, v_retval)); iavf_print_cloud_filter(adapter, &cf->f); + if (msglen) + dev_err(&adapter->pdev->dev, + "%s\n", msg); list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; @@ -1321,32 +1520,38 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } switch (v_opcode) { - case VIRTCHNL_OP_ADD_ETH_ADDR: { + case VIRTCHNL_OP_ADD_ETH_ADDR: + if (!v_retval) + iavf_mac_add_ok(adapter); if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - } break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = (struct iavf_eth_stats *)msg; - netdev->stats.rx_packets = stats->rx_unicast + - stats->rx_multicast + - stats->rx_broadcast; - netdev->stats.tx_packets = stats->tx_unicast + - stats->tx_multicast + - stats->tx_broadcast; - netdev->stats.rx_bytes = stats->rx_bytes; - netdev->stats.tx_bytes = stats->tx_bytes; - netdev->stats.tx_errors = stats->tx_errors; - netdev->stats.rx_dropped = stats->rx_discards; - netdev->stats.tx_dropped = stats->tx_discards; + adapter->net_stats.rx_packets = stats->rx_unicast + + stats->rx_multicast + + stats->rx_broadcast; + adapter->net_stats.tx_packets = stats->tx_unicast + + stats->tx_multicast + + stats->tx_broadcast; + adapter->net_stats.rx_bytes = stats->rx_bytes; + adapter->net_stats.tx_bytes = stats->tx_bytes; + adapter->net_stats.tx_errors = stats->tx_errors; + adapter->net_stats.rx_dropped = stats->rx_discards; + adapter->net_stats.tx_dropped = stats->tx_discards; adapter->current_stats = *stats; } break; case VIRTCHNL_OP_GET_VF_RESOURCES: { + struct iavf_vlan_filter *vlf; + struct iavf_cloud_filter *cf; + struct iavf_mac_filter *f; + bool was_mac_changed; u16 len = sizeof(struct virtchnl_vf_resource) + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); + memcpy(adapter->vf_res, msg, min(msglen, len)); iavf_validate_num_queues(adapter); iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); @@ -1355,25 +1560,76 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { /* refresh current mac address if changed */ - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } iavf_process_config(adapter); + + was_mac_changed = !ether_addr_equal(netdev->dev_addr, + adapter->hw.mac.addr); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (was_mac_changed && + ether_addr_equal(netdev->dev_addr, f->macaddr)) + ether_addr_copy(f->macaddr, + adapter->hw.mac.addr); + + f->add = true; + f->remove = false; + } + + /* re-add all VLAN filters */ + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->add = true; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + /* check if TCs are running and re-add all cloud filters */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) { + list_for_each_entry(cf, &adapter->cloud_filter_list, + list) { + cf->add = true; + } + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; } break; case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ - iavf_irq_enable(adapter, true); + if (adapter->state == __IAVF_RUNNING) { + iavf_irq_enable(adapter, true); + + /* If queues not enabled when handling link event, + * then set carrier on now + */ + if (adapter->link_up && !netif_carrier_ok(netdev)) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } + } + adapter->flags |= IAVF_FLAG_QUEUES_ENABLED; adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; break; case VIRTCHNL_OP_DISABLE_QUEUES: iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); if (adapter->state == __IAVF_DOWN_PENDING) { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } + adapter->flags &= ~IAVF_FLAG_QUEUES_ENABLED; break; case VIRTCHNL_OP_VERSION: case VIRTCHNL_OP_CONFIG_IRQ_MAP: @@ -1392,14 +1648,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (msglen && CLIENT_ENABLED(adapter)) iavf_notify_client_message(&adapter->vsi, msg, msglen); break; - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: adapter->client_pending &= ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; - if (msglen == sizeof(*vrh)) adapter->hena = vrh->hena; else @@ -1410,7 +1664,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_REQUEST_QUEUES: { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; - if (vfres->num_queue_pairs != adapter->num_req_queues) { dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", @@ -1428,6 +1681,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (cf->state == __IAVF_CF_ADD_PENDING) cf->state = __IAVF_CF_ACTIVE; } + if (!v_retval) + dev_info(&adapter->pdev->dev, + "Cloud filters have been added\n"); } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { @@ -1442,12 +1698,15 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, adapter->num_cloud_filters--; } } + if (!v_retval) + dev_info(&adapter->pdev->dev, + "Cloud filters have been deleted\n"); } break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) - dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", - adapter->current_op, v_opcode); + dev_dbg(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); break; } /* switch v_opcode */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; diff --git a/drivers/net/ethernet/intel/iavf/kcompat.c b/drivers/net/ethernet/intel/iavf/kcompat.c new file mode 100644 index 000000000000..c116eec4b76d --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/kcompat.c @@ -0,0 +1,2495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2013, Intel Corporation. */ + +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct adapter_struct *adapter = netdev_priv(netdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct adapter_struct *adapter = netdev_priv(netdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + struct adapter_struct *adapter = netdev_priv(netdev); + struct napi_struct *napi = &adapter->rx_ring[0].napi; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* !RHEL || RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* !RHEL || RHEL < 8.1 */ +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !RHEL >= 8.2 */ +#endif /* 5.3.0 */ diff --git a/drivers/net/ethernet/intel/iavf/kcompat.h b/drivers/net/ethernet/intel/iavf/kcompat.h new file mode 100644 index 000000000000..b37a3f3ac77a --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/kcompat.h @@ -0,0 +1,6728 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2013, Intel Corporation. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following defines only provide limited support for NAPI calls and + * should only be used by drivers which are not multi-queue enabled. + */ + +#define napi_to_poll_dev(_napi) (_napi)->dev + +static inline void __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = __kc_adapter_clean; + dev->weight = weight; + napi->poll = poll; + napi->dev = dev; +} +#define netif_napi_add __kc_netif_napi_add + +#define netif_napi_del(_a) do {} while (0) +#define napi_schedule_prep(_napi) netif_rx_schedule_prep((_napi)->dev) +#define napi_schedule(_napi) netif_rx_schedule((_napi)->dev) +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#else /* < 2.6.25 */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) do { \ + void __kc_kfree_rcu(struct rcu_head *rcu_head) \ + { \ + void *ptr = container_of(rcu_head, \ + typeof(*_ptr), \ + _rcu_head); \ + kfree(ptr); \ + } \ + call_rcu(&(_ptr)->_rcu_head, __kc_kfree_rcu); \ +} while (0) +#define HAVE_KFREE_RCU_BARRIER +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + eth_broadcast_addr(addr); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0))) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL >=7.3 && RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#endif /* RHEL >= 7.0 && RHEL < 8.0 */ + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#endif /* RHEL >= 8.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#else /* >= 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#else /* >= 4,1,0 */ +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_DEVLINK_SUPPORT +#endif /* UBUNTU 4,4,0,21, RHEL 7.4, SLES12 SP3 */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_DEVLINK_SUPPORT +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* =4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#else /* > 4.13 */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#ifdef NETIF_F_HW_L2FW_DOFFLOAD +#include +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +static inline void *_kc_macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct +#endif /* !SLES || SLES < 15.1 */ + +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT +#ifndef xdp_umem_get_data +static inline char *__kc_xdp_umem_get_data(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); +} + +#define xdp_umem_get_data __kc_xdp_umem_get_data +#endif /* !xdp_umem_get_data */ +#ifndef xdp_umem_get_dma +static inline dma_addr_t __kc_xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); +} + +#define xdp_umem_get_dma __kc_xdp_umem_get_dma +#endif /* !xdp_umem_get_dma */ +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_TCF_VLAN_TPID +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#else /* >= 4.20.0 */ +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +}; + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#endif /* RHEL >= 8.2 */ +#else /* >= 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#define HAVE_FLOW_BLOCK_API +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0))) +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} + +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */ +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/kcompat_overflow.h b/drivers/net/ethernet/intel/iavf/kcompat_overflow.h new file mode 100644 index 000000000000..8de90dd3563f --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/kcompat_overflow.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2013, Intel Corporation. */ + +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/drivers/net/ethernet/intel/iavf/kcompat_vfd.c b/drivers/net/ethernet/intel/iavf/kcompat_vfd.c new file mode 100644 index 000000000000..03c2c5f8bba6 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/kcompat_vfd.c @@ -0,0 +1,2550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2013, Intel Corporation. */ + +#include "kcompat.h" +#include "kcompat_vfd.h" + +#define to_dev(obj) container_of(obj, struct device, kobj) + +const struct vfd_ops *vfd_ops = NULL; + +/** + * __get_pf_pdev - helper function to get the pdev + * @kobj: kobject passed + * @pdev: PCI device information struct + */ +static int __get_pf_pdev(struct kobject *kobj, struct pci_dev **pdev) +{ + struct device *dev; + + if (!kobj->parent) + return -EINVAL; + + /* get pdev */ + dev = to_dev(kobj->parent); + *pdev = to_pci_dev(dev); + + return 0; +} + +/** + * __get_pdev_and_vfid - helper function to get the pdev and the vf id + * @kobj: kobject passed + * @pdev: PCI device information struct + * @vf_id: VF id of the VF under consideration + */ +static int __get_pdev_and_vfid(struct kobject *kobj, struct pci_dev **pdev, + int *vf_id) +{ + struct device *dev; + + if (!kobj->parent->parent) + return -EINVAL; + + /* get pdev */ + dev = to_dev(kobj->parent->parent); + *pdev = to_pci_dev(dev); + + /* get vf_id */ + if (kstrtoint(kobj->name, 10, vf_id) != 0) { + dev_err(&(*pdev)->dev, "Failed to convert %s to vf_id\n", + kobj->name); + return -EINVAL; + } + + return 0; +} + +/** + * __parse_bool_data - helper function to parse boolean data + * @pdev: PCI device information struct + * @buff: buffer with input data + * @attr_name: name of the attribute + * @data: pointer to output data + */ +static int __parse_bool_data(struct pci_dev *pdev, const char *buff, + const char *attr_name, bool *data) +{ + if (sysfs_streq("on", buff)) { + *data = true; + } else if (sysfs_streq("off", buff)) { + *data = false; + } else { + dev_err(&pdev->dev, "set %s: invalid input string", attr_name); + return -EINVAL; + } + return 0; +} + +/** + * __parse_egress_ingress_input - helper function for ingress/egress_mirror attributes + * @pdev: PCI device information struct + * @buff: buffer with input data + * @attr_name: name of the attribute + * @data_new: pointer to input data merged with the old data + * @data_old: pointer to old data of the attribute + * + * Get the input data for egress_mirror or ingress_mirror attribute in the form + * "add " or "rem ". + * Set the output data to off if in "rem ", matches old data. + * + */ +static int __parse_egress_ingress_input(struct pci_dev *pdev, const char *buff, + const char *attr_name, int *data_new, + int *data_old) +{ + int ret = 0; + char *p; + + if (strstr(buff, "add")) { + p = strstr(buff, "add"); + + ret = kstrtoint(p + sizeof("add"), 10, data_new); + if (ret) { + dev_err(&pdev->dev, + "add %s: input error %d\n", attr_name, ret); + return ret; + } + } else if (strstr(buff, "rem")) { + p = strstr(buff, "rem"); + + ret = kstrtoint(p + sizeof("rem"), 10, data_new); + if (ret) { + dev_err(&pdev->dev, + "rem %s: input error %d\n", attr_name, ret); + return ret; + } + + if (*data_new == *data_old) { + if (!strcmp(attr_name, "egress_mirror")) + *data_new = VFD_EGRESS_MIRROR_OFF; + else if (!strcmp(attr_name, "ingress_mirror")) + *data_new = VFD_INGRESS_MIRROR_OFF; + } else { + dev_err(&pdev->dev, + "rem %s: input doesn't match current value", + attr_name); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "set %s: invalid input string", attr_name); + return -EINVAL; + } + + return ret; +} + +/** + * __parse_add_rem_bitmap - helper function to parse bitmap data + * @pdev: PCI device information struct + * @buff: buffer with input data + * @attr_name: name of the attribute + * @data_new: pointer to input data merged with the old data + * @data_old: pointer to old data of the attribute + * + * If passed add: set data_new to "data_old || data_input" + * If passed rem: set data_new to "data_old || ~data_input" + */ +static int __parse_add_rem_bitmap(struct pci_dev *pdev, const char *buff, + const char *attr_name, + unsigned long *data_new, + unsigned long *data_old) +{ + int ret = 0; + char *p; + + if (strstr(buff, "add")) { + p = strstr(buff, "add"); + bitmap_zero(data_new, VLAN_N_VID); + + ret = bitmap_parselist(p + sizeof("add"), data_new, VLAN_N_VID); + if (ret) { + dev_err(&pdev->dev, + "add %s: input error %d\n", attr_name, ret); + return ret; + } + + bitmap_or(data_new, data_new, data_old, VLAN_N_VID); + } else if (strstr(buff, "rem")) { + p = strstr(buff, "rem"); + bitmap_zero(data_new, VLAN_N_VID); + + ret = bitmap_parselist(p + sizeof("rem"), data_new, VLAN_N_VID); + if (ret) { + dev_err(&pdev->dev, + "rem %s: input error %d\n", attr_name, ret); + return ret; + } + + /* new = old & ~rem */ + bitmap_andnot(data_new, data_old, data_new, VLAN_N_VID); + } else { + dev_err(&pdev->dev, "set %s: invalid input string", attr_name); + return -EINVAL; + } + return 0; +} + +/** + * __parse_promisc_input - helper function for promisc attributes + * @buff: buffer with input data + * @count: size of buff + * @cmd: return pointer to cmd into buff + * @subcmd: return pointer to subcmd into buff + * + * Get the input data for promisc attributes in the form "add/rem mcast/ucast". + */ +static int __parse_promisc_input(const char *buff, size_t count, + const char **cmd, const char **subcmd) +{ + size_t idx = 0; + + /* Remove start spaces */ + while (buff[idx] == ' ' && idx < count) + idx++; + + /* Parse cmd */ + if (strncmp(&buff[idx], "add", strlen("add")) == 0) { + *cmd = &buff[idx]; + idx += strlen("add"); + } else if (strncmp(&buff[idx], "rem", strlen("rem")) == 0) { + *cmd = &buff[idx]; + idx += strlen("rem"); + } else { + return -EINVAL; + } + + if (buff[idx++] != ' ') + return -EINVAL; + + /* Remove spaces between cmd */ + while (buff[idx] == ' ' && idx < count) + idx++; + + /* Parse subcmd */ + if (strncmp(&buff[idx], "ucast", strlen("ucast")) == 0) { + *subcmd = &buff[idx]; + idx += strlen("ucast"); + } else if (strncmp(&buff[idx], "mcast", strlen("mcast")) == 0) { + *subcmd = &buff[idx]; + idx += strlen("mcast"); + } else { + return -EINVAL; + } + + /* Remove spaces after subcmd */ + while ((buff[idx] == ' ' || buff[idx] == '\n') && idx < count) + idx++; + + if (idx != count) + return -EINVAL; + + return 0; +} + +/* Handlers for each VFd operation */ + +/** + * vfd_trunk_show - handler for trunk show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * + * Get current data from driver and copy to buffer + **/ +static ssize_t vfd_trunk_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + + DECLARE_BITMAP(data, VLAN_N_VID); + bitmap_zero(data, VLAN_N_VID); + + if (!vfd_ops->get_trunk) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_trunk(pdev, vf_id, data); + if (ret) + ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID); + + return ret; +} + +/** + * vfd_trunk_store - handler for trunk store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * Get current data from driver, compose new data based on input values + * depending on "add" or "rem" command, and pass new data to the driver to set. + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_trunk_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + unsigned long *data_old, *data_new; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_trunk || !vfd_ops->get_trunk) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_old) + return -ENOMEM; + data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_new) { + kfree(data_old); + return -ENOMEM; + } + + ret = vfd_ops->get_trunk(pdev, vf_id, data_old); + if (ret < 0) + goto err_free; + + ret = __parse_add_rem_bitmap(pdev, buff, "trunk", data_new, data_old); + if (ret) + goto err_free; + + if (!bitmap_equal(data_new, data_old, VLAN_N_VID)) + ret = vfd_ops->set_trunk(pdev, vf_id, data_new); + +err_free: + kfree(data_old); + kfree(data_new); + return ret ? ret : count; +} + +/** + * vfd_vlan_mirror_show - handler for vlan_mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * + * Get current data from driver and copy to buffer + **/ +static ssize_t vfd_vlan_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + + DECLARE_BITMAP(data, VLAN_N_VID); + bitmap_zero(data, VLAN_N_VID); + + if (!vfd_ops->get_vlan_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data); + if (ret) + ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID); + + return ret; +} + +/** + * vfd_vlan_mirror_store - handler for vlan_mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * Get current data from driver, compose new data based on input values + * depending on "add" or "rem" command, and pass new data to the driver to set. + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_vlan_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + unsigned long *data_old, *data_new; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_vlan_mirror || !vfd_ops->get_vlan_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_old) + return -ENOMEM; + data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long), + GFP_KERNEL); + if (!data_new) { + kfree(data_old); + return -ENOMEM; + } + + ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data_old); + if (ret < 0) + goto err_free; + + ret = __parse_add_rem_bitmap(pdev, buff, "vlan_mirror", + data_new, data_old); + if (ret) + goto err_free; + + if (!bitmap_equal(data_new, data_old, VLAN_N_VID)) + ret = vfd_ops->set_vlan_mirror(pdev, vf_id, data_new); + +err_free: + kfree(data_old); + kfree(data_new); + return ret ? ret : count; +} + +/** + * vfd_egress_mirror_show - handler for egress_mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_egress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + int data; + + if (!vfd_ops->get_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data == VFD_EGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * vfd_egress_mirror_store - handler for egress_mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_egress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_egress_mirror || !vfd_ops->get_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror", + &data_new, &data_old); + if (ret) + return ret; + if(data_new == vf_id) { + dev_err(&pdev->dev, "VF %d: Setting egress_mirror to itself is not allowed\n", vf_id); + return -EINVAL; + } + + if (data_new != data_old) + ret = vfd_ops->set_egress_mirror(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_ingress_mirror_show - handler for ingress_mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_ingress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + int data; + + if (!vfd_ops->get_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data == VFD_INGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * vfd_ingress_mirror_store - handler for ingress_mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_ingress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_ingress_mirror || !vfd_ops->get_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror", + &data_new, &data_old); + if (ret) + return ret; + if(data_new == vf_id) { + dev_err(&pdev->dev, "VF %d: Setting ingress_mirror to itself is not allowed\n", vf_id); + return -EINVAL; + } + + if (data_new != data_old) + ret = vfd_ops->set_ingress_mirror(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_mac_anti_spoof_show - handler for mac_anti_spoof show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_mac_anti_spoof_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_mac_anti_spoof) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac_anti_spoof(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_mac_anti_spoof_store - handler for mac_anti_spoof store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_mac_anti_spoof_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_mac_anti_spoof || !vfd_ops->get_mac_anti_spoof) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac_anti_spoof(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "mac_anti_spoof", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_mac_anti_spoof(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_vlan_anti_spoof_show - handler for vlan_anti_spoof show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_vlan_anti_spoof_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_vlan_anti_spoof) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_anti_spoof(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_vlan_anti_spoof_store - handler for vlan_anti_spoof store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_vlan_anti_spoof_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_vlan_anti_spoof || !vfd_ops->get_vlan_anti_spoof) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_anti_spoof(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "vlan_anti_spoof", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_vlan_anti_spoof(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_allow_untagged_show - handler for allow_untagged show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_allow_untagged_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_allow_untagged) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_allow_untagged_store - handler for allow_untagged store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_allow_untagged_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_allow_untagged || !vfd_ops->get_allow_untagged) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "allow_untagged", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_allow_untagged(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_loopback_show - handler for loopback show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_loopback_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_loopback) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_loopback(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_loopback_store - handler for loopback store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_loopback_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_loopback || !vfd_ops->get_loopback) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_loopback(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "loopback", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_loopback(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_mac_show - handler for mac show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_mac_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buff) +{ + u8 macaddr[ETH_ALEN]; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->get_mac) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac(pdev, vf_id, macaddr); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%pM\n", macaddr); + + return ret; +} + +/** + * vfd_mac_store - handler for mac store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_mac_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + u8 macaddr[ETH_ALEN]; + u8 macaddr_old[ETH_ALEN]; + struct pci_dev *pdev; + int vf_id, ret = 0; + + if (!vfd_ops->set_mac || !vfd_ops->get_mac) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac(pdev, vf_id, macaddr_old); + if (ret < 0) + return ret; + + ret = sscanf(buff, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &macaddr[0], &macaddr[1], &macaddr[2], + &macaddr[3], &macaddr[4], &macaddr[5]); + + if (ret != 6) + return -EINVAL; + + if (!ether_addr_equal(macaddr, macaddr_old)) + ret = vfd_ops->set_mac(pdev, vf_id, macaddr); + + return ret ? ret : count; +} + +/** + * vfd_mac_list_show - handler for mac_list show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + * + * This function also frees the memory allocated for mac_list in another function. + * + **/ +static ssize_t vfd_mac_list_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + unsigned int mac_num_allowed, mac_num_list, mac_num_count; + const char *overflow_msg = "... and more\n"; + unsigned int mac_msg_len = 3*ETH_ALEN; + struct list_head *pos, *n; + struct pci_dev *pdev; + int vf_id, ret; + char *written; + LIST_HEAD(mac_list); + + if (!vfd_ops->get_mac_list) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_mac_list(pdev, vf_id, &mac_list); + if (ret < 0) + goto err_free; + + mac_num_list = 0; + mac_num_count = 0; + list_for_each_safe(pos, n, &mac_list) + mac_num_list++; + + mac_num_allowed = (PAGE_SIZE - 1) / mac_msg_len; + if (mac_num_list > mac_num_allowed) + mac_num_allowed = (PAGE_SIZE - 1 - strlen(overflow_msg)) / + mac_msg_len; + + written = buff; + list_for_each_safe(pos, n, &mac_list) { + struct vfd_macaddr *mac = NULL; + + mac_num_count++; + mac = list_entry(pos, struct vfd_macaddr, list); + if (mac_num_count > mac_num_allowed) { + ret += scnprintf(written, PAGE_SIZE - ret, + "%s", overflow_msg); + goto err_free; + } else if (list_is_last(pos, &mac_list)) { + ret += scnprintf(written, PAGE_SIZE - ret, + "%pM\n", mac->mac); + } else { + ret += scnprintf(written, PAGE_SIZE - ret, + "%pM,", mac->mac); + } + written += mac_msg_len; + } + +err_free: + list_for_each_safe(pos, n, &mac_list) { + struct vfd_macaddr *mac = NULL; + + mac = list_entry(pos, struct vfd_macaddr, list); + list_del(pos); + kfree(mac); + } + return ret; +} + +/** + * vfd_mac_list_store - handler for mac_list store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * Get input mac list into the linked list and depending on "add" or "rem" command + * pass the input mac list to the driver to either add or remove macs to the list. + * + * This function also frees the memory allocated for mac_list in another function. + * + **/ +static ssize_t vfd_mac_list_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct list_head *pos, *n; + struct pci_dev *pdev; + u8 macaddr[ETH_ALEN]; + int vf_id, ret; + size_t shift; + bool add; + LIST_HEAD(mac_list_inp); + + if (!vfd_ops->add_macs_to_list || !vfd_ops->rem_macs_from_list) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + if (strstr(buff, "add")) { + shift = sizeof("add"); + add = true; + } else if (strstr(buff, "rem")) { + shift = sizeof("rem"); + add = false; + } else { + dev_err(&pdev->dev, "Invalid input string"); + ret = -EINVAL; + goto err_free; + } + + /* Get input data */ + for (;;) { + struct vfd_macaddr *mac_new; + + if (*(buff + shift) == ' ' || *(buff + shift) == ',') { + shift++; + continue; + } + + ret = sscanf(buff + shift, + "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &macaddr[0], &macaddr[1], &macaddr[2], + &macaddr[3], &macaddr[4], &macaddr[5]); + + if (ret != 6) + break; + + if (!is_valid_ether_addr(macaddr)) { + shift += 3*ETH_ALEN; + continue; + } + + mac_new = kmalloc(sizeof(struct vfd_macaddr), GFP_KERNEL); + if (!mac_new) { + ret = -ENOMEM; + goto err_free; + } + + ether_addr_copy(mac_new->mac, macaddr); + list_add(&mac_new->list, &mac_list_inp); + + shift += 3*ETH_ALEN; + } + + if (add) + ret = vfd_ops->add_macs_to_list(pdev, vf_id, &mac_list_inp); + else + ret = vfd_ops->rem_macs_from_list(pdev, vf_id, &mac_list_inp); + +err_free: + list_for_each_safe(pos, n, &mac_list_inp) { + struct vfd_macaddr *mac = NULL; + + mac = list_entry(pos, struct vfd_macaddr, list); + list_del(pos); + kfree(mac); + } + return ret ? ret : count; +} + +/** + * vfd_promisc_show - handler for promisc show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_promisc_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u8 data; + + if (!vfd_ops->get_promisc) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_promisc(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data == VFD_PROMISC_UNICAST) + ret = scnprintf(buff, PAGE_SIZE, "ucast\n"); + else if (data == VFD_PROMISC_MULTICAST) + ret = scnprintf(buff, PAGE_SIZE, "mcast\n"); + else if (data == (VFD_PROMISC_UNICAST | VFD_PROMISC_MULTICAST)) + ret = scnprintf(buff, PAGE_SIZE, "ucast, mcast\n"); + else if (data == VFD_PROMISC_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_promisc_store - handler for promisc store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_promisc_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + u8 data_new, data_old; + struct pci_dev *pdev; + const char *subcmd; + const char *cmd; + int vf_id, ret; + + if (!vfd_ops->get_promisc || !vfd_ops->set_promisc) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_promisc(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_promisc_input(buff, count, &cmd, &subcmd); + if (ret) + goto promisc_err; + + if (strncmp(cmd, "add", strlen("add")) == 0) { + if (strncmp(subcmd, "ucast", strlen("ucast")) == 0) + data_new = data_old | VFD_PROMISC_UNICAST; + else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0) + data_new = data_old | VFD_PROMISC_MULTICAST; + else + goto promisc_err; + } else if (strncmp(cmd, "rem", strlen("rem")) == 0) { + if (strncmp(subcmd, "ucast", strlen("ucast")) == 0) + data_new = data_old & ~VFD_PROMISC_UNICAST; + else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0) + data_new = data_old & ~VFD_PROMISC_MULTICAST; + else + goto promisc_err; + } else { + goto promisc_err; + } + + if (data_new != data_old) + ret = vfd_ops->set_promisc(pdev, vf_id, data_new); + + return ret ? ret : count; + +promisc_err: + dev_err(&pdev->dev, "Invalid input string"); + return -EINVAL; +} + +/** + * vfd_vlan_strip_show - handler for vlan_strip show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_vlan_strip_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data; + + if (!vfd_ops->get_vlan_strip) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_vlan_strip_store - handler for vlan_strip store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_vlan_strip_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + bool data_new, data_old; + + if (!vfd_ops->set_vlan_strip || !vfd_ops->get_vlan_strip) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "vlan_strip", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_vlan_strip(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_link_state_show - handler for link_state show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_link_state_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + enum vfd_link_speed link_speed; + struct pci_dev *pdev; + int vf_id, ret = 0; + bool enabled; + + if (!vfd_ops->get_link_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_link_state(pdev, vf_id, &enabled, &link_speed); + if (ret < 0) + return ret; + + if (enabled) { + const char *speed_str; + + switch (link_speed) { + case VFD_LINK_SPEED_100MB: + speed_str = "100 Mbps"; + break; + case VFD_LINK_SPEED_1GB: + speed_str = "1 Gbps"; + break; + case VFD_LINK_SPEED_2_5GB: + speed_str = "2.5 Gbps"; + break; + case VFD_LINK_SPEED_5GB: + speed_str = "5 Gbps"; + break; + case VFD_LINK_SPEED_10GB: + speed_str = "10 Gbps"; + break; + case VFD_LINK_SPEED_40GB: + speed_str = "40 Gbps"; + break; + case VFD_LINK_SPEED_20GB: + speed_str = "20 Gbps"; + break; + case VFD_LINK_SPEED_25GB: + speed_str = "25 Gbps"; + break; + case VFD_LINK_SPEED_UNKNOWN: + speed_str = "unknown speed"; + break; + default: + dev_err(&pdev->dev, "Link speed is not supported"); + return -EOPNOTSUPP; + } + + ret = scnprintf(buff, PAGE_SIZE, "%s, %s\n", "up", speed_str); + } else { + ret = scnprintf(buff, PAGE_SIZE, "down\n"); + } + + return ret; +} + +/** + * vfd_link_state_store - handler for link_state store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_link_state_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u8 data; + + if (!vfd_ops->set_link_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + if (sysfs_streq("enable", buff)) { + data = VFD_LINKSTATE_ON; + } else if (sysfs_streq("disable", buff)) { + data = VFD_LINKSTATE_OFF; + } else if (sysfs_streq("auto", buff)) { + data = VFD_LINKSTATE_AUTO; + } else { + dev_err(&pdev->dev, "Invalid input string"); + return -EINVAL; + } + + ret = vfd_ops->set_link_state(pdev, vf_id, data); + + return ret ? ret : count; +} + +/** + * vfd_enable_show - handler for VF enable/disable show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + bool data; + + if (!vfd_ops->get_vf_enable) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vf_enable(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_enable_store - handler for VF enable/disable store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + * + * On success return count, indicating that we used the whole buffer. On + * failure return a negative error condition. + **/ +static ssize_t vfd_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + bool data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_vf_enable || !vfd_ops->get_vf_enable) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vf_enable(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "enable", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_vf_enable(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_max_tx_rate_show - handler for mac_tx_rate show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_max_tx_rate_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + unsigned int max_tx_rate; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->get_max_tx_rate) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_max_tx_rate(pdev, vf_id, &max_tx_rate); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%u\n", max_tx_rate); + return ret; +} + +/** + * vfd_max_tx_rate_store - handler for max_tx_rate store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_max_tx_rate_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + unsigned int max_tx_rate; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_max_tx_rate) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = kstrtouint(buff, 10, &max_tx_rate); + if (ret) { + dev_err(&pdev->dev, + "Invalid argument, not a decimal number: %s", buff); + return ret; + } + + ret = vfd_ops->set_max_tx_rate(pdev, vf_id, &max_tx_rate); + + return ret ? ret : count; +} + +/** + * vfd_min_tx_rate_show - handler for min_tx_rate show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_min_tx_rate_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + if (!vfd_ops->get_min_tx_rate) + return -EOPNOTSUPP; + + return vfd_ops->get_min_tx_rate(kobj, attr, buff); +} + +/** + * vfd_min_tx_rate_store - handler for min_tx_rate store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_min_tx_rate_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + if (!vfd_ops->set_min_tx_rate) + return -EOPNOTSUPP; + + return vfd_ops->set_min_tx_rate(kobj, attr, buff, count); +} + +/** + * vfd_spoofcheck_show - handler for spoofcheck show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_spoofcheck_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + if (!vfd_ops->get_spoofcheck) + return -EOPNOTSUPP; + + return vfd_ops->get_spoofcheck(kobj, attr, buff); +} + +/** + * vfd_spoofcheck_store - handler for spoofcheck store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_spoofcheck_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + if (!vfd_ops->set_spoofcheck) + return -EOPNOTSUPP; + + return vfd_ops->set_spoofcheck(kobj, attr, buff, count); +} + +/** + * vfd_trust_show - handler for trust show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_trust_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + bool data; + + if (!vfd_ops->get_trust_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_trust_state(pdev, vf_id, &data); + if (ret < 0) + return ret; + + if (data) + ret = scnprintf(buff, PAGE_SIZE, "on\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + + return ret; +} + +/** + * vfd_trust_store - handler for trust store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_trust_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + bool data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_trust_state || !vfd_ops->get_trust_state) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_trust_state(pdev, vf_id, &data_old); + if (ret < 0) + return ret; + + ret = __parse_bool_data(pdev, buff, "trust", &data_new); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_trust_state(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +/** + * vfd_reset_stats_store - handler for reset stats store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_reset_stats_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int vf_id, reset, ret; + struct pci_dev *pdev; + + if (!vfd_ops->reset_stats) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + ret = kstrtoint(buff, 10, &reset); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + if (reset != 1) + return -EINVAL; + + ret = vfd_ops->reset_stats(pdev, vf_id); + + return ret ? ret : count; +} + +/** + * vfd_rx_bytes_show - handler for rx_bytes show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_rx_bytes_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_rx_bytes) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_rx_bytes(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_rx_dropped_show - handler for rx_dropped show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_rx_dropped_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_rx_dropped) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_rx_dropped(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_rx_packets_show - handler for rx_packets show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_rx_packets_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_rx_packets) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_rx_packets(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_bytes_show - handler for tx_bytes show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_bytes_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_bytes) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_bytes(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_dropped_show - handler for tx_dropped show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_dropped_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_dropped) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_dropped(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_packets_show - handler for tx_packets show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_packets_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_packets) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_packets(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_spoofed_show - handler for tx_spoofed show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_spoofed_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_spoofed) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_spoofed(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * vfd_tx_errors_show - handler for tx_errors show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_tx_errors_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret = 0; + u64 data; + + if (!vfd_ops->get_tx_errors) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_tx_errors(pdev, vf_id, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data); + + return ret; +} + +/** + * qos_share_show - handler for the bw_share show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t qos_share_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + u8 data = 0; + + if (!vfd_ops->get_vf_bw_share) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_vf_bw_share(pdev, vf_id, &data); + if (ret < 0) { + dev_err(&pdev->dev, "No bw share applied for VF %d\n", vf_id); + return ret; + } + + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * qos_share_store - handler for the bw_share store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t qos_share_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + int vf_id, ret; + u8 bw_share; + + if (!vfd_ops->set_vf_bw_share) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + /* parse the bw_share */ + ret = kstrtou8(buff, 10, &bw_share); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + /* check that the BW is between 1 and 100 */ + if (bw_share < 1 || bw_share > 100) { + dev_err(&pdev->dev, "BW share has to be between 1-100\n"); + return -EINVAL; + } + ret = vfd_ops->set_vf_bw_share(pdev, vf_id, bw_share); + return ret ? ret : count; +} + +/** + * pf_qos_apply_store - handler for pf qos apply store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_qos_apply_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int ret, apply; + struct pci_dev *pdev; + + if (!vfd_ops->set_pf_qos_apply) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = kstrtoint(buff, 10, &apply); + if (ret) { + dev_err(&pdev->dev, + "Invalid input\n"); + return ret; + } + + if (apply != 1) + return -EINVAL; + + ret = vfd_ops->set_pf_qos_apply(pdev); + + return ret ? ret : count; +} + +/** + * pf_ingress_mirror_show - handler for PF ingress mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t pf_ingress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int ret, data; + + if (!vfd_ops->get_pf_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_ingress_mirror(pdev, &data); + if (ret < 0) + return ret; + + if (data == VFD_INGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * pf_ingress_mirror_store - handler for pf ingress mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_ingress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int ret; + + if (!vfd_ops->set_pf_ingress_mirror || !vfd_ops->get_pf_ingress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_ingress_mirror(pdev, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror", + &data_new, &data_old); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_pf_ingress_mirror(pdev, data_new); + + return ret ? ret : count; +} + +/** + * pf_egress_mirror_show - handler for PF egress mirror show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t pf_egress_mirror_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int ret, data; + + if (!vfd_ops->get_pf_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_egress_mirror(pdev, &data); + if (ret < 0) + return ret; + + if (data == VFD_EGRESS_MIRROR_OFF) + ret = scnprintf(buff, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buff, PAGE_SIZE, "%u\n", data); + + return ret; +} + +/** + * pf_egress_mirror_store - handler for pf egress mirror store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_egress_mirror_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int ret; + + if (!vfd_ops->set_pf_egress_mirror || !vfd_ops->get_pf_egress_mirror) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_egress_mirror(pdev, &data_old); + if (ret < 0) + return ret; + + ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror", + &data_new, &data_old); + if (ret) + return ret; + + if (data_new != data_old) + ret = vfd_ops->set_pf_egress_mirror(pdev, data_new); + + return ret ? ret : count; +} + +/** + * pf_tpid_show - handler for pf tpid show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t pf_tpid_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + u16 data; + int ret; + + if (!vfd_ops->get_pf_tpid) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = vfd_ops->get_pf_tpid(pdev, &data); + if (ret < 0) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%x\n", data); + + return ret; +} + +/** + * pf_tpid_store - handler for pf tpid store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t pf_tpid_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct pci_dev *pdev; + u16 data; + int ret; + + if (!vfd_ops->set_pf_tpid) + return -EOPNOTSUPP; + + ret = __get_pf_pdev(kobj, &pdev); + if (ret) + return ret; + + ret = kstrtou16(buff, 16, &data); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + ret = vfd_ops->set_pf_tpid(pdev, data); + + return ret ? ret : count; +} + +/** + * vfd_num_queues_show - handler for num_queues show function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer for data + **/ +static ssize_t vfd_num_queues_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + struct pci_dev *pdev; + int vf_id, ret; + int data; + + if (!vfd_ops->get_num_queues) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_num_queues(pdev, vf_id, &data); + if (ret) + return ret; + + ret = scnprintf(buff, PAGE_SIZE, "%d\n", data); + + return ret; +} + +/** + * vfd_num_queues_store - handler for num_queues store function + * @kobj: kobject being called + * @attr: struct kobj_attribute + * @buff: buffer with input data + * @count: size of buff + **/ +static ssize_t vfd_num_queues_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + int data_new, data_old; + struct pci_dev *pdev; + int vf_id, ret; + + if (!vfd_ops->set_num_queues) + return -EOPNOTSUPP; + + ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id); + if (ret) + return ret; + + ret = vfd_ops->get_num_queues(pdev, vf_id, &data_old); + if (ret) + return ret; + + ret = kstrtoint(buff, 10, &data_new); + if (ret) { + dev_err(&pdev->dev, "Invalid input\n"); + return ret; + } + + if (data_new < 1) { + dev_err(&pdev->dev, "VF queue count must be at least 1\n"); + return -EINVAL; + } + + if (data_new != data_old) + ret = vfd_ops->set_num_queues(pdev, vf_id, data_new); + + return ret ? ret : count; +} + +static struct kobj_attribute trunk_attribute = + __ATTR(trunk, 0644, vfd_trunk_show, vfd_trunk_store); +static struct kobj_attribute vlan_mirror_attribute = + __ATTR(vlan_mirror, 0644, vfd_vlan_mirror_show, vfd_vlan_mirror_store); +static struct kobj_attribute egress_mirror_attribute = + __ATTR(egress_mirror, 0644, + vfd_egress_mirror_show, vfd_egress_mirror_store); +static struct kobj_attribute ingress_mirror_attribute = + __ATTR(ingress_mirror, 0644, + vfd_ingress_mirror_show, vfd_ingress_mirror_store); +static struct kobj_attribute mac_anti_spoof_attribute = + __ATTR(mac_anti_spoof, 0644, + vfd_mac_anti_spoof_show, vfd_mac_anti_spoof_store); +static struct kobj_attribute vlan_anti_spoof_attribute = + __ATTR(vlan_anti_spoof, 0644, + vfd_vlan_anti_spoof_show, vfd_vlan_anti_spoof_store); +static struct kobj_attribute allow_untagged_attribute = + __ATTR(allow_untagged, 0644, + vfd_allow_untagged_show, vfd_allow_untagged_store); +static struct kobj_attribute loopback_attribute = + __ATTR(loopback, 0644, vfd_loopback_show, vfd_loopback_store); +static struct kobj_attribute mac_attribute = + __ATTR(mac, 0644, vfd_mac_show, vfd_mac_store); +static struct kobj_attribute mac_list_attribute = + __ATTR(mac_list, 0644, vfd_mac_list_show, vfd_mac_list_store); +static struct kobj_attribute promisc_attribute = + __ATTR(promisc, 0644, vfd_promisc_show, vfd_promisc_store); +static struct kobj_attribute vlan_strip_attribute = + __ATTR(vlan_strip, 0644, vfd_vlan_strip_show, vfd_vlan_strip_store); +static struct kobj_attribute link_state_attribute = + __ATTR(link_state, 0644, vfd_link_state_show, vfd_link_state_store); +static struct kobj_attribute max_tx_rate_attribute = + __ATTR(max_tx_rate, 0644, vfd_max_tx_rate_show, vfd_max_tx_rate_store); +static struct kobj_attribute min_tx_rate_attribute = + __ATTR(min_tx_rate, 0644, vfd_min_tx_rate_show, vfd_min_tx_rate_store); +static struct kobj_attribute spoofcheck_attribute = + __ATTR(spoofcheck, 0644, vfd_spoofcheck_show, vfd_spoofcheck_store); +static struct kobj_attribute trust_attribute = + __ATTR(trust, 0644, vfd_trust_show, vfd_trust_store); +static struct kobj_attribute reset_stats_attribute = + __ATTR(reset_stats, 0200, NULL, vfd_reset_stats_store); +static struct kobj_attribute enable_attribute = + __ATTR(enable, 0644, vfd_enable_show, vfd_enable_store); +static struct kobj_attribute num_queues_attribute = + __ATTR(num_queues, 0644, vfd_num_queues_show, vfd_num_queues_store); + +static struct attribute *s_attrs[] = { + &trunk_attribute.attr, + &vlan_mirror_attribute.attr, + &egress_mirror_attribute.attr, + &ingress_mirror_attribute.attr, + &mac_anti_spoof_attribute.attr, + &vlan_anti_spoof_attribute.attr, + &allow_untagged_attribute.attr, + &loopback_attribute.attr, + &mac_attribute.attr, + &mac_list_attribute.attr, + &promisc_attribute.attr, + &vlan_strip_attribute.attr, + &link_state_attribute.attr, + &max_tx_rate_attribute.attr, + &min_tx_rate_attribute.attr, + &spoofcheck_attribute.attr, + &trust_attribute.attr, + &reset_stats_attribute.attr, + &enable_attribute.attr, + &num_queues_attribute.attr, + NULL, +}; + +static struct attribute_group vfd_group = { + .attrs = s_attrs, +}; + +static struct kobj_attribute rx_bytes_attribute = + __ATTR(rx_bytes, 0444, vfd_rx_bytes_show, NULL); +static struct kobj_attribute rx_dropped_attribute = + __ATTR(rx_dropped, 0444, vfd_rx_dropped_show, NULL); +static struct kobj_attribute rx_packets_attribute = + __ATTR(rx_packets, 0444, vfd_rx_packets_show, NULL); +static struct kobj_attribute tx_bytes_attribute = + __ATTR(tx_bytes, 0444, vfd_tx_bytes_show, NULL); +static struct kobj_attribute tx_dropped_attribute = + __ATTR(tx_dropped, 0444, vfd_tx_dropped_show, NULL); +static struct kobj_attribute tx_packets_attribute = + __ATTR(tx_packets, 0444, vfd_tx_packets_show, NULL); +static struct kobj_attribute tx_spoofed_attribute = + __ATTR(tx_spoofed, 0444, vfd_tx_spoofed_show, NULL); +static struct kobj_attribute tx_errors_attribute = + __ATTR(tx_errors, 0444, vfd_tx_errors_show, NULL); + +static struct attribute *stats_attrs[] = { + &rx_bytes_attribute.attr, + &rx_dropped_attribute.attr, + &rx_packets_attribute.attr, + &tx_bytes_attribute.attr, + &tx_dropped_attribute.attr, + &tx_packets_attribute.attr, + &tx_spoofed_attribute.attr, + &tx_errors_attribute.attr, + NULL, +}; + +static struct attribute_group stats_group = { + .name = "stats", + .attrs = stats_attrs, +}; + +static struct kobj_attribute share_attribute = + __ATTR(share, 0644, qos_share_show, qos_share_store); + +static struct attribute *qos_attrs[] = { + &share_attribute.attr, + NULL, +}; + +static struct attribute_group qos_group = { + .name = "qos", + .attrs = qos_attrs, +}; + +static struct kobj_attribute apply_attribute = + __ATTR(apply, 0200, NULL, pf_qos_apply_store); + +static struct attribute *pf_qos_attrs[] = { + &apply_attribute.attr, + NULL, +}; + +static struct attribute_group pf_qos_group = { + .name = "qos", + .attrs = pf_qos_attrs, +}; + +static struct kobj_attribute pf_ingress_mirror_attribute = + __ATTR(ingress_mirror, 0644, pf_ingress_mirror_show, pf_ingress_mirror_store); +static struct kobj_attribute pf_egress_mirror_attribute = + __ATTR(egress_mirror, 0644, pf_egress_mirror_show, pf_egress_mirror_store); +static struct kobj_attribute pf_tpid_attribute = + __ATTR(tpid, 0644, pf_tpid_show, pf_tpid_store); + +static struct attribute *pf_attrs[] = { + &pf_ingress_mirror_attribute.attr, + &pf_egress_mirror_attribute.attr, + &pf_tpid_attribute.attr, + NULL, +}; + +static struct attribute_group pf_attr_group = { + .attrs = pf_attrs, +}; + +/** + * create_vfs_sysfs - create sysfs hierarchy for VF + * @pdev: PCI device information struct + * @vfd_obj: VF-d kobjects information struct + * + * Creates a kobject for Virtual Function and assigns attributes to it. + **/ +static int create_vfs_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj) +{ + struct kobject *vf_kobj; + char kname[4]; + int ret, i; + + for (i = 0; i < vfd_obj->num_vfs; i++) { + int length = snprintf(kname, sizeof(kname), "%d", i); + + if (length >= sizeof(kname)) { + dev_err(&pdev->dev, + "cannot request %d vfs, try again with smaller number of vfs\n", + i); + --i; + ret = -EINVAL; + goto err_vfs_sysfs; + } + + vf_kobj = kobject_create_and_add(kname, vfd_obj->sriov_kobj); + if (!vf_kobj) { + dev_err(&pdev->dev, + "failed to create VF kobj: %s\n", kname); + i--; + ret = -ENOMEM; + goto err_vfs_sysfs; + } + dev_info(&pdev->dev, "created VF %s sysfs", vf_kobj->name); + vfd_obj->vf_kobj[i] = vf_kobj; + + /* create VF sys attr */ + ret = sysfs_create_group(vfd_obj->vf_kobj[i], &vfd_group); + if (ret) { + dev_err(&pdev->dev, "failed to create VF sys attribute: %d", i); + goto err_vfs_sysfs; + } + + /* create VF stats sys attr */ + ret = sysfs_create_group(vfd_obj->vf_kobj[i], &stats_group); + if (ret) { + dev_err(&pdev->dev, "failed to create VF stats attribute: %d", i); + goto err_vfs_sysfs; + } + + /* create VF qos sys attr */ + ret = sysfs_create_group(vfd_obj->vf_kobj[i], &qos_group); + if (ret) { + dev_err(&pdev->dev, "failed to create VF qos attribute: %d", i); + goto err_vfs_sysfs; + } + } + + return 0; + +err_vfs_sysfs: + for (; i >= 0; i--) + kobject_put(vfd_obj->vf_kobj[i]); + return ret; +} + +/** + * create_vfd_sysfs - create sysfs hierarchy used by VF-d + * @pdev: PCI device information struct + * @num_alloc_vfs: number of VFs to allocate + * + * If the kobjects were not able to be created, NULL will be returned. + **/ +struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs) +{ + struct vfd_objects *vfd_obj; + int ret; + + vfd_obj = kzalloc(sizeof(*vfd_obj) + + sizeof(struct kobject *)*num_alloc_vfs, GFP_KERNEL); + if (!vfd_obj) + return NULL; + + vfd_obj->num_vfs = num_alloc_vfs; + + vfd_obj->sriov_kobj = kobject_create_and_add("sriov", &pdev->dev.kobj); + if (!vfd_obj->sriov_kobj) + goto err_sysfs; + + dev_info(&pdev->dev, "created %s sysfs", vfd_obj->sriov_kobj->name); + + ret = create_vfs_sysfs(pdev, vfd_obj); + if (ret) + goto err_sysfs; + + /* create PF qos sys attr */ + ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_qos_group); + if (ret) { + dev_err(&pdev->dev, "failed to create PF qos sys attribute"); + goto err_sysfs; + } + + /* create PF attrs */ + ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_attr_group); + if (ret) { + dev_err(&pdev->dev, "failed to create PF attr sys attribute"); + goto err_sysfs; + } + return vfd_obj; + +err_sysfs: + kobject_put(vfd_obj->sriov_kobj); + kfree(vfd_obj); + return NULL; +} + +/** + * destroy_vfd_sysfs - destroy sysfs hierarchy used by VF-d + * @pdev: PCI device information struct + * @vfd_obj: VF-d kobjects information struct + **/ +void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj) +{ + int i; + + for (i = 0; i < vfd_obj->num_vfs; i++) { + dev_info(&pdev->dev, "deleting VF %s sysfs", + vfd_obj->vf_kobj[i]->name); + kobject_put(vfd_obj->vf_kobj[i]); + } + + dev_info(&pdev->dev, "deleting %s sysfs", vfd_obj->sriov_kobj->name); + kobject_put(vfd_obj->sriov_kobj); + kfree(vfd_obj); +} diff --git a/drivers/net/ethernet/intel/iavf/kcompat_vfd.h b/drivers/net/ethernet/intel/iavf/kcompat_vfd.h new file mode 100644 index 000000000000..e5f3918643c3 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/kcompat_vfd.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2013, Intel Corporation. */ + +#ifndef _KCOMPAT_VFD_H_ +#define _KCOMPAT_VFD_H_ + +#define VFD_PROMISC_OFF 0x00 +#define VFD_PROMISC_UNICAST 0x01 +#define VFD_PROMISC_MULTICAST 0x02 + +#define VFD_LINKSTATE_OFF 0x00 +#define VFD_LINKSTATE_ON 0x01 +#define VFD_LINKSTATE_AUTO 0x02 + +#define VFD_EGRESS_MIRROR_OFF -1 +#define VFD_INGRESS_MIRROR_OFF -1 + +/** + * struct vfd_objects - VF-d kobjects information struct + * @num_vfs: number of VFs allocated + * @sriov_kobj: pointer to the top sriov kobject + * @vf_kobj: array of pointer to each VF's kobjects + */ +struct vfd_objects { + int num_vfs; + struct kobject *sriov_kobj; + struct kobject *vf_kobj[0]; +}; + +struct vfd_macaddr { + u8 mac[ETH_ALEN]; + struct list_head list; +}; + +#define VFD_LINK_SPEED_2_5GB_SHIFT 0x0 +#define VFD_LINK_SPEED_100MB_SHIFT 0x1 +#define VFD_LINK_SPEED_1GB_SHIFT 0x2 +#define VFD_LINK_SPEED_10GB_SHIFT 0x3 +#define VFD_LINK_SPEED_40GB_SHIFT 0x4 +#define VFD_LINK_SPEED_20GB_SHIFT 0x5 +#define VFD_LINK_SPEED_25GB_SHIFT 0x6 +#define VFD_LINK_SPEED_5GB_SHIFT 0x7 + + +enum vfd_link_speed { + VFD_LINK_SPEED_UNKNOWN = 0, + VFD_LINK_SPEED_100MB = BIT(VFD_LINK_SPEED_100MB_SHIFT), + VFD_LINK_SPEED_1GB = BIT(VFD_LINK_SPEED_1GB_SHIFT), + VFD_LINK_SPEED_2_5GB = BIT(VFD_LINK_SPEED_2_5GB_SHIFT), + VFD_LINK_SPEED_5GB = BIT(VFD_LINK_SPEED_5GB_SHIFT), + VFD_LINK_SPEED_10GB = BIT(VFD_LINK_SPEED_10GB_SHIFT), + VFD_LINK_SPEED_40GB = BIT(VFD_LINK_SPEED_40GB_SHIFT), + VFD_LINK_SPEED_20GB = BIT(VFD_LINK_SPEED_20GB_SHIFT), + VFD_LINK_SPEED_25GB = BIT(VFD_LINK_SPEED_25GB_SHIFT), +}; + +struct vfd_ops { + int (*get_trunk)(struct pci_dev *pdev, int vf_id, unsigned long *buff); + int (*set_trunk)(struct pci_dev *pdev, int vf_id, + const unsigned long *buff); + int (*get_vlan_mirror)(struct pci_dev *pdev, int vf_id, + unsigned long *buff); + int (*set_vlan_mirror)(struct pci_dev *pdev, int vf_id, + const unsigned long *buff); + int (*get_egress_mirror)(struct pci_dev *pdev, int vf_id, int *data); + int (*set_egress_mirror)(struct pci_dev *pdev, int vf_id, + const int data); + int (*get_ingress_mirror)(struct pci_dev *pdev, int vf_id, int *data); + int (*set_ingress_mirror)(struct pci_dev *pdev, int vf_id, + const int data); + int (*get_mac_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_mac_anti_spoof)(struct pci_dev *pdev, int vf_id, + const bool data); + int (*get_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id, + const bool data); + int (*get_allow_untagged)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_allow_untagged)(struct pci_dev *pdev, int vf_id, + const bool data); + int (*get_loopback)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_loopback)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_mac)(struct pci_dev *pdev, int vf_id, u8 *macaddr); + int (*set_mac)(struct pci_dev *pdev, int vf_id, const u8 *macaddr); + int (*get_mac_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*add_macs_to_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*rem_macs_from_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*get_promisc)(struct pci_dev *pdev, int vf_id, u8 *data); + int (*set_promisc)(struct pci_dev *pdev, int vf_id, const u8 data); + int (*get_vlan_strip)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vlan_strip)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_link_state)(struct pci_dev *pdev, int vf_id, bool *enabled, + enum vfd_link_speed *link_speed); + int (*set_link_state)(struct pci_dev *pdev, int vf_id, const u8 data); + int (*get_max_tx_rate)(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate); + int (*set_max_tx_rate)(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate); + int (*get_min_tx_rate)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_min_tx_rate)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_spoofcheck)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_spoofcheck)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_trust)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_trust)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_vf_enable)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vf_enable)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_rx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_rx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_rx_packets)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_packets)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_spoofed)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_errors)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*reset_stats)(struct pci_dev *pdev, int vf_id); + int (*set_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 bw_share); + int (*get_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 *bw_share); + int (*set_pf_qos_apply)(struct pci_dev *pdev); + int (*get_pf_ingress_mirror)(struct pci_dev *pdev, int *data); + int (*set_pf_ingress_mirror)(struct pci_dev *pdev, const int data); + int (*get_pf_egress_mirror)(struct pci_dev *pdev, int *data); + int (*set_pf_egress_mirror)(struct pci_dev *pdev, const int data); + int (*get_pf_tpid)(struct pci_dev *pdev, u16 *data); + int (*set_pf_tpid)(struct pci_dev *pdev, const u16 data); + int (*get_num_queues)(struct pci_dev *pdev, int vf_id, int *num_queues); + int (*set_num_queues)(struct pci_dev *pdev, int vf_id, const int num_queues); + int (*get_trust_state)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_trust_state)(struct pci_dev *pdev, int vf_id, bool data); +}; + +extern const struct vfd_ops *vfd_ops; + +#endif /* _KCOMPAT_VFD_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/virtchnl.h b/drivers/net/ethernet/intel/iavf/virtchnl.h new file mode 100644 index 000000000000..217798188400 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/virtchnl.h @@ -0,0 +1,949 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2013, Intel Corporation. */ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +/* Backward compatibility */ +#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM +#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED + +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + /* opcode 19 is reserved */ + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_ENABLE_CHANNELS = 30, + VIRTCHNL_OP_DISABLE_CHANNELS = 31, + VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, + VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, + /* opcodes 34, 35, 36, 37 and 38 are reserved */ +}; + +/* These macros are used to generate compilation errors if a structure/union + * is not exactly the correct length. It gives a divide by zero error if the + * structure/union is not of the correct size, otherwise it creates an enum + * that is never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } +#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures. */ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF capability flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 +#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 +#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 +#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000 +#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000 + /* 0X80000000 is reserved */ + +/* Define below the capability flags that are not offloads */ +#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. The + * crc_disable flag disables CRC stripping on the VF. Setting + * the crc_disable flag to 1 will disable CRC stripping for each + * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC + * offload must have been set prior to sending this info or the PF + * will ignore the request. This flag should be set the same for + * all of the queues for a VF. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u8 crc_disable; + u8 pad1[3]; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + * NOTE: The VF is not required to configure all queues in a single request. + * It may send multiple messages. PF drivers must correctly handle all VF + * requests. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_REQUEST_QUEUES + * VF sends this message to request the PF to allocate additional queues to + * this VF. Each VF gets a guaranteed number of queues on init but asking for + * additional queues must be negotiated. This is a best effort request as it + * is possible the PF does not have enough queues left to support the request. + * If the PF cannot support the number requested it will respond with the + * maximum number it is able to support. If the request is successful, PF will + * then reset the VF to institute required changes. + */ + +/* VF resource request */ +struct virtchnl_vf_res_request { + u16 num_queue_pairs; +}; + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. The VF may not request + * that vector 0 be used for traffic. + * PF configures interrupt mapping and returns status. + * NOTE: due to hardware requirements, all active queues (both TX and RX) + * should be mapped to interrupts, even if the driver intends to operate + * only in polling mode. In this case the interrupt may be disabled, but + * the ITR timer will still run to trigger writebacks. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + * NOTE: The VF is not required to enable/disable all queues in a single + * request. It may send multiple messages. + * PF drivers must correctly handle all VF requests. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct virtchnl_eth_stats in an external buffer. + */ + +struct virtchnl_eth_stats { + u64 rx_bytes; /* received bytes */ + u64 rx_unicast; /* received unicast pkts */ + u64 rx_multicast; /* received multicast pkts */ + u64 rx_broadcast; /* received broadcast pkts */ + u64 rx_discards; + u64 rx_unknown_protocol; + u64 tx_bytes; /* transmitted bytes */ + u64 tx_unicast; /* transmitted unicast pkts */ + u64 tx_multicast; /* transmitted multicast pkts */ + u64 tx_broadcast; /* transmitted broadcast pkts */ + u64 tx_discards; + u64 tx_errors; +}; + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* This is used by PF driver to enforce how many channels can be supported. + * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise + * PF driver will allow only max 4 channels + */ +#define VIRTCHNL_MAX_ADQ_CHANNELS 4 +#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16 + +/* VIRTCHNL_OP_ENABLE_CHANNELS + * VIRTCHNL_OP_DISABLE_CHANNELS + * VF sends these messages to enable or disable channels based on + * the user specified queue count and queue offset for each traffic class. + * This struct encompasses all the information that the PF needs from + * VF to create a channel. + */ +struct virtchnl_channel_info { + u16 count; /* number of queues in a channel */ + u16 offset; /* queues in a channel start from 'offset' */ + u32 pad; + u64 max_tx_rate; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); + +struct virtchnl_tc_info { + u32 num_tc; + u32 pad; + struct virtchnl_channel_info list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); + +/* VIRTCHNL_ADD_CLOUD_FILTER + * VIRTCHNL_DEL_CLOUD_FILTER + * VF sends these messages to add or delete a cloud filter based on the + * user specified match and action filters. These structures encompass + * all the information that the PF needs from the VF to add/delete a + * cloud filter. + */ + +struct virtchnl_l4_spec { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + /* vlan_prio is part of this 16 bit field even from OS perspective + * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio + * in future, when decided to offload vlan_prio, pass that information + * as part of the "vlan_id" field, Bit14..12 + */ + __be16 vlan_id; + __be16 pad; /* reserved for future use */ + __be32 src_ip[4]; + __be32 dst_ip[4]; + __be16 src_port; + __be16 dst_port; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); + +union virtchnl_flow_spec { + struct virtchnl_l4_spec tcp_spec; + u8 buffer[128]; /* reserved for future use */ +}; + +VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); + +enum virtchnl_action { + /* action types */ + VIRTCHNL_ACTION_DROP = 0, + VIRTCHNL_ACTION_TC_REDIRECT, +}; + +enum virtchnl_flow_type { + /* flow types */ + VIRTCHNL_TCP_V4_FLOW = 0, + VIRTCHNL_TCP_V6_FLOW, + VIRTCHNL_UDP_V4_FLOW, + VIRTCHNL_UDP_V6_FLOW, +}; + +struct virtchnl_filter { + union virtchnl_flow_spec data; + union virtchnl_flow_spec mask; + enum virtchnl_flow_type flow_type; + enum virtchnl_action action; + u32 action_meta; + u8 field_flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + /* If the PF driver does not support the new speed reporting + * capabilities then use link_event else use link_event_adv to + * get the speed and link information. The ability to understand + * new speeds is indicated by setting the capability flag + * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter + * in virtchnl_vf_resource struct and can be used to determine + * which link event struct to use below. + */ + struct { + enum virtchnl_link_speed link_speed; + u8 link_status; + } link_event; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u8 link_status; + } link_event_adv; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +/* Since VF messages are limited by u16 size, precalculate the maximum possible + * values of nested elements in virtchnl structures that virtual channel can + * possibly handle in a single message. + */ +enum virtchnl_vector_limits { + VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) / + sizeof(struct virtchnl_queue_pair_info), + + VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX = + ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) / + sizeof(struct virtchnl_vector_map), + + VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX = + ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) / + sizeof(struct virtchnl_ether_addr), + + VIRTCHNL_OP_ADD_DEL_VLAN_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) / + sizeof(u16), + + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX = + ((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) / + sizeof(struct virtchnl_iwarp_qv_info), + + VIRTCHNL_OP_ENABLE_CHANNELS_MAX = + ((u16)(~0) - sizeof(struct virtchnl_tc_info)) / + sizeof(struct virtchnl_channel_info), +}; + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + + if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs > + VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + + if (vimi->num_vectors == 0 || vimi->num_vectors > + VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + + if (veal->num_elements == 0 || veal->num_elements > + VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) { + err_msg_format = true; + break; + } + + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + + if (vfl->num_elements == 0 || vfl->num_elements > + VIRTCHNL_OP_ADD_DEL_VLAN_MAX) { + err_msg_format = true; + break; + } + + valid_len += vfl->num_elements * sizeof(u16); + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + + if (qv->num_vectors == 0 || qv->num_vectors > + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) { + err_msg_format = true; + break; + } + + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + + if (vrk->key_len == 0) { + /* zero length is allowed as input */ + break; + } + + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries == 0) { + /* zero entries is allowed as input */ + break; + } + + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + valid_len = sizeof(struct virtchnl_vf_res_request); + break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + valid_len = sizeof(struct virtchnl_tc_info); + if (msglen >= valid_len) { + struct virtchnl_tc_info *vti = + (struct virtchnl_tc_info *)msg; + + if (vti->num_tc == 0 || vti->num_tc > + VIRTCHNL_OP_ENABLE_CHANNELS_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vti->num_tc - 1) * + sizeof(struct virtchnl_channel_info); + } + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: + case VIRTCHNL_OP_DEL_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_STATUS_ERR_PARAM; + } + /* few more checks */ + if (err_msg_format || valid_len != msglen) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/virtchnl_inline_ipsec.h b/drivers/net/ethernet/intel/iavf/virtchnl_inline_ipsec.h new file mode 100644 index 000000000000..93880652d74f --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/virtchnl_inline_ipsec.h @@ -0,0 +1,420 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2013, Intel Corporation. */ + +#ifndef _VIRTCHNL_INLINE_IPSEC_H_ +#define _VIRTCHNL_INLINE_IPSEC_H_ + +#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3 +#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16 +#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128 +#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2 +#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128 +#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8 +#define VIRTCHNL_IPSEC_SELECTED_SA_DESTROY 0 +#define VIRTCHNL_IPSEC_ALL_SA_DESTROY 1 + +/* crypto type */ +#define VIRTCHNL_AUTH 1 +#define VIRTCHNL_CIPHER 2 +#define VIRTCHNL_AEAD 3 + +/* algorithm type */ +/* Hash Algorithm */ +#define VIRTCHNL_NO_ALG 0 /* NULL algorithm */ +#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */ +#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */ +#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */ +#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */ +#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */ +#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */ +#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */ +#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */ +#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */ +#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */ +#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */ +#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */ +#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */ +#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */ +/* Cipher Algorithm */ +#define VIRTCHNL_3DES_CBC 15 /* Triple DES algorithm in CBC mode */ +#define VIRTCHNL_AES_CBC 16 /* AES algorithm in CBC mode */ +#define VIRTCHNL_AES_CTR 17 /* AES algorithm in Counter mode */ +/* AEAD Algorithm */ +#define VIRTCHNL_AES_CCM 18 /* AES algorithm in CCM mode */ +#define VIRTCHNL_AES_GCM 19 /* AES algorithm in GCM mode */ +#define VIRTCHNL_CHACHA20_POLY1305 20 /* algorithm of ChaCha20-Poly1305 */ + +/* protocol type */ +#define VIRTCHNL_PROTO_ESP 1 +#define VIRTCHNL_PROTO_AH 2 +#define VIRTCHNL_PROTO_RSVD1 3 + +/* sa mode */ +#define VIRTCHNL_SA_MODE_TRANSPORT 1 +#define VIRTCHNL_SA_MODE_TUNNEL 2 +#define VIRTCHNL_SA_MODE_TRAN_TUN 3 +#define VIRTCHNL_SA_MODE_UNKNOWN 4 + +/* sa direction */ +#define VIRTCHNL_DIR_INGRESS 1 +#define VIRTCHNL_DIR_EGRESS 2 +#define VIRTCHNL_DIR_INGRESS_EGRESS 3 + +/* sa termination */ +#define VIRTCHNL_TERM_SOFTWARE 1 +#define VIRTCHNL_TERM_HARDWARE 2 + +/* sa ip type */ +#define VIRTCHNL_IPV4 1 +#define VIRTCHNL_IPV6 2 + +/* Not all valid, if certain field is invalid, set 1 for all bits */ +struct virtchnl_algo_cap { + u32 algo_type; + + u16 block_size; + + u16 min_key_size; + u16 max_key_size; + u16 inc_key_size; + + u16 min_iv_size; + u16 max_iv_size; + u16 inc_iv_size; + + u16 min_digest_size; + u16 max_digest_size; + u16 inc_digest_size; + + u16 min_aad_size; + u16 max_aad_size; + u16 inc_aad_size; +}; + +/* vf record the capability of crypto from the virtchnl */ +struct virtchnl_sym_crypto_cap { + u8 crypto_type; + u8 algo_cap_num; + struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM]; +}; + +/* VIRTCHNL_OP_GET_IPSEC_CAP + * VF pass virtchnl_ipsec_cap to PF + * and PF return capability of ipsec from virtchnl. + */ +struct virtchnl_ipsec_cap { + /* max number of SA per VF */ + u16 max_sa_num; + + /* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */ + u8 virtchnl_protocol_type; + + /* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */ + u8 virtchnl_sa_mode; + + /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ + u8 virtchnl_direction; + + /* type of esn - !0:enable/0:disable */ + u8 esn_enabled; + + /* type of udp_encap - !0:enable/0:disable */ + u8 udp_encap_enabled; + + /* termination mode - value ref VIRTCHNL_TERM_XXX */ + u8 termination_mode; + + /* SA index mode - !0:enable/0:disable */ + u8 sa_index_sw_enabled; + + /* auditing mode - !0:enable/0:disable */ + u8 audit_enabled; + + /* lifetime byte limit - !0:enable/0:disable */ + u8 byte_limit_enabled; + + /* drop on authentication failure - !0:enable/0:disable */ + u8 drop_on_auth_fail_enabled; + + /* anti-replay window check - !0:enable/0:disable */ + u8 arw_check_enabled; + + /* number of supported crypto capability */ + u8 crypto_cap_num; + + /* descriptor ID */ + u16 desc_id; + + /* crypto capabilities */ + struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM]; +}; + +/* using desc_id to record the format of rx descriptor */ +struct virtchnl_rx_desc_fmt { + u16 desc_id; +}; + +/* using desc_id to record the format of tx descriptor */ +struct virtchnl_tx_desc_fmt { + u8 desc_num; + u16 desc_ids[VIRTCHNL_IPSEC_MAX_TX_DESC_NUM]; +}; + +/* configuration of crypto function */ +struct virtchnl_ipsec_crypto_cfg_item { + u8 crypto_type; + + u32 algo_type; + + /* Length of valid IV data. */ + u16 iv_len; + + /* Length of digest */ + u16 digest_len; + + /* The length of the symmetric key */ + u16 key_len; + + /* key data buffer */ + u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN]; +}; + +struct virtchnl_ipsec_sym_crypto_cfg { + struct virtchnl_ipsec_crypto_cfg_item + items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER]; +}; + +/* VIRTCHNL_OP_IPSEC_SA_CREATE + * VF send this SA configuration to PF using virtchnl; + * PF create SA as configuration and PF driver will return + * an unique index (sa_idx) for the created SA. + */ +struct virtchnl_ipsec_sa_cfg { + /* IPsec SA Protocol - AH/ESP */ + u8 virtchnl_protocol_type; + + /* termination mode - value ref VIRTCHNL_TERM_XXX */ + u8 virtchnl_termination; + + /* type of outer IP - IPv4/IPv6 */ + u8 virtchnl_ip_type; + + /* type of esn - !0:enable/0:disable */ + u8 esn_enabled; + + /* udp encap - !0:enable/0:disable */ + u8 udp_encap_enabled; + + /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ + u8 virtchnl_direction; + + /* reserved */ + u8 reserved1; + + /* SA security parameter index */ + u32 spi; + + /* outer src ip address */ + u8 src_addr[16]; + + /* outer dst ip address */ + u8 dst_addr[16]; + + /* SA salt */ + u32 salt; + + /* SPD reference. Used to link an SA with its policy. + * PF drivers may ignore this field. + */ + u16 spd_ref; + + /* high 32 bits of esn */ + u32 esn_hi; + + /* low 32 bits of esn */ + u32 esn_low; + + /* When enabled, sa_index must be valid */ + u8 sa_index_en; + + /* SA index when sa_index_en is true */ + u32 sa_index; + + /* auditing mode - enable/disable */ + u8 audit_en; + + /* lifetime byte limit - enable/disable + * When enabled, byte_limit_hard and byte_limit_soft + * must be valid. + */ + u8 byte_limit_en; + + /* hard byte limit count */ + u64 byte_limit_hard; + + /* soft byte limit count */ + u64 byte_limit_soft; + + /* drop on authentication failure - enable/disable */ + u8 drop_on_auth_fail_en; + + /* anti-reply window check - enable/disable + * When enabled, arw_size must be valid. + */ + u8 arw_check_en; + + /* size of arw window, offset by 1. Setting to 0 + * represents ARW window size of 1. Setting to 127 + * represents ARW window size of 128 + */ + u8 arw_size; + + /* no ip offload mode - enable/disable + * When enabled, ip type and address must not be valid. + */ + u8 no_ip_offload_en; + + /* SA Domain. Used to logical separate an SADB into groups. + * PF drivers supporting a single group ignore this field. + */ + u16 sa_domain; + + /* crypto configuration */ + struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; +}; + +/* VIRTCHNL_OP_IPSEC_SA_UPDATE + * VF send configuration of index of SA to PF + * PF will update SA according to configuration + */ +struct virtchnl_ipsec_sa_update { + u32 sa_index; /* SA to update */ + u32 esn_hi; /* high 32 bits of esn */ + u32 esn_low; /* low 32 bits of esn */ +}; + +/* VIRTCHNL_OP_IPSEC_SA_DESTROY + * VF send configuration of index of SA to PF + * PF will destroy SA according to configuration + * flag bitmap indicate all SA or just selected SA will + * be destroyed + */ +struct virtchnl_ipsec_sa_destroy { + /* VIRTCHNL_SELECTED_SA_DESTROY: selected SA will be destroyed. + * VIRTCHNL_ALL_SA_DESTROY: all SA will be destroyed. + */ + u8 flag; + + u8 pad1; /* pading */ + u16 pad2; /* pading */ + + /* selected SA index */ + u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM]; +}; + +/* VIRTCHNL_OP_IPSEC_SA_READ + * VF send this SA configuration to PF using virtchnl; + * PF read SA and will return configuration for the created SA. + */ +struct virtchnl_ipsec_sa_read { + /* SA valid - invalid/valid */ + u8 valid; + + /* SA active - inactive/active */ + u8 active; + + /* SA SN rollover - not_rollover/rollover */ + u8 sn_rollover; + + /* IPsec SA Protocol - AH/ESP */ + u8 virtchnl_protocol_type; + + /* termination mode - value ref VIRTCHNL_TERM_XXX */ + u8 virtchnl_termination; + + /* auditing mode - enable/disable */ + u8 audit_en; + + /* lifetime byte limit - enable/disable + * When set to limit, byte_limit_hard and byte_limit_soft + * must be valid. + */ + u8 byte_limit_en; + + /* hard byte limit count */ + u64 byte_limit_hard; + + /* soft byte limit count */ + u64 byte_limit_soft; + + /* drop on authentication failure - enable/disable */ + u8 drop_on_auth_fail_en; + + /* anti-replay window check - enable/disable + * When set to check, arw_size, arw_top, and arw must be valid + */ + u8 arw_check_en; + + /* size of arw window, offset by 1. Setting to 0 + * represents ARW window size of 1. Setting to 127 + * represents ARW window size of 128 + */ + u8 arw_size; + + /* reserved */ + u8 reserved1; + + /* top of anti-replay-window */ + u64 arw_top; + + /* anti-replay-window */ + u8 arw[16]; + + /* packets processed */ + u64 packets_processed; + + /* bytes processed */ + u64 bytes_processed; + + /* packets dropped */ + u32 packets_dropped; + + /* authentication failures */ + u32 auth_fails; + + /* ARW check failures */ + u32 arw_fails; + + /* type of esn - enable/disable */ + u8 esn; + + /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ + u8 virtchnl_direction; + + /* SA security parameter index */ + u32 spi; + + /* SA salt */ + u32 salt; + + /* high 32 bits of esn */ + u32 esn_hi; + + /* low 32 bits of esn */ + u32 esn_low; + + /* SA Domain. Used to logical separate an SADB into groups. + * PF drivers supporting a single group ignore this field. + */ + u16 sa_domain; + + /* SPD reference. Used to link an SA with its policy. + * PF drivers may ignore this field. + */ + u16 spd_ref; + + /* crypto configuration. Salt and keys are set to 0 */ + struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; +}; + +#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 3a6b3950eb0e..171f0b625407 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -934,7 +934,7 @@ void ice_deinit_hw(struct ice_hw *hw) */ enum ice_status ice_check_reset(struct ice_hw *hw) { - u32 cnt, reg = 0, grst_delay; + u32 cnt, reg = 0, grst_delay, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. @@ -956,13 +956,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw) return ICE_ERR_RESET_FAILED; } -#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ - GLNVM_ULD_GLOBR_DONE_M) +#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ + GLNVM_ULD_PCIER_DONE_1_M |\ + GLNVM_ULD_CORER_DONE_M |\ + GLNVM_ULD_GLOBR_DONE_M |\ + GLNVM_ULD_POR_DONE_M |\ + GLNVM_ULD_POR_DONE_1_M |\ + GLNVM_ULD_PCIER_DONE_2_M) + + uld_mask = ICE_RESET_DONE_MASK; /* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { - reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; - if (reg == ICE_RESET_DONE_MASK) { + reg = rd32(hw, GLNVM_ULD) & uld_mask; + if (reg == uld_mask) { ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break; diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 2353166c654e..c68709c7ef81 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -948,7 +948,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (ice_sq_done(hw, cq)) break; - mdelay(1); + udelay(ICE_CTL_Q_SQ_CMD_USEC); total_delay++; } while (total_delay < cq->sq_cmd_timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 44945c2165d8..4df9da359135 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -31,8 +31,9 @@ enum ice_ctl_q { ICE_CTL_Q_MAILBOX, }; -/* Control Queue default settings */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */ +/* Control Queue timeout settings - max delay 250ms */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ +#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to DMA head */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 7e23034df955..62673e27af0e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2916,13 +2916,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) else return -EINVAL; - /* Tell the OS link is going down, the link will go back up when fw - * says it is ready asynchronously - */ - ice_print_link_msg(vsi, false); - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - /* Set the FC mode and only restart AN if link is up */ status = ice_set_fc(pi, &aq_failures, link_up); @@ -3368,10 +3361,17 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct ice_vsi *vsi = np->vsi; if (q_num < 0) { - int i; + int v_idx; - ice_for_each_q_vector(vsi, i) { - if (ice_set_q_coalesce(vsi, ec, i)) + ice_for_each_q_vector(vsi, v_idx) { + /* In some cases if DCB is configured the num_[rx|tx]q + * can be less than vsi->num_q_vectors. This check + * accounts for that so we don't report a false failure + */ + if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq) + goto set_complete; + + if (ice_set_q_coalesce(vsi, ec, v_idx)) return -EINVAL; } goto set_complete; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 152fbd556e9b..9138b19de87e 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -273,8 +273,14 @@ #define GLNVM_GENS_SR_SIZE_S 5 #define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5) #define GLNVM_ULD 0x000B6008 +#define GLNVM_ULD_PCIER_DONE_M BIT(0) +#define GLNVM_ULD_PCIER_DONE_1_M BIT(1) #define GLNVM_ULD_CORER_DONE_M BIT(3) #define GLNVM_ULD_GLOBR_DONE_M BIT(4) +#define GLNVM_ULD_POR_DONE_M BIT(5) +#define GLNVM_ULD_POR_DONE_1_M BIT(8) +#define GLNVM_ULD_PCIER_DONE_2_M BIT(9) +#define GLNVM_ULD_PE_DONE_M BIT(10) #define GLPCI_CNF2 0x000BE004 #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) #define PF_FUNC_RID 0x0009E880 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 214cd6eca405..2408f0de95fc 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3970,8 +3970,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) } ice_for_each_txq(vsi, i) { - vsi->tx_rings[i]->netdev = vsi->netdev; - err = ice_setup_tx_ring(vsi->tx_rings[i]); + struct ice_ring *ring = vsi->tx_rings[i]; + + if (!ring) + return -EINVAL; + + ring->netdev = vsi->netdev; + err = ice_setup_tx_ring(ring); if (err) break; } @@ -3996,8 +4001,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } ice_for_each_rxq(vsi, i) { - vsi->rx_rings[i]->netdev = vsi->netdev; - err = ice_setup_rx_ring(vsi->rx_rings[i]); + struct ice_ring *ring = vsi->rx_rings[i]; + + if (!ring) + return -EINVAL; + + ring->netdev = vsi->netdev; + err = ice_setup_rx_ring(ring); if (err) break; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index b45797f39b2f..e92a00a61755 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -317,8 +317,9 @@ void ice_free_vfs(struct ice_pf *pf) pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { - /* disable VF qp mappings */ + /* disable VF qp mappings and set VF disable state */ ice_dis_vf_mappings(&pf->vf[i]); + set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); ice_free_vf_res(&pf->vf[i]); } } @@ -1287,9 +1288,12 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf) if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) return; - /* verify if the VF is in either init or active before proceeding */ - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && - !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(ICE_VF_STATE_DIS, vf->vf_states)) return; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; @@ -1869,8 +1873,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; + struct ice_eth_stats stats = { 0 }; struct ice_pf *pf = vf->pf; - struct ice_eth_stats stats; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -1889,7 +1893,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - memset(&stats, 0, sizeof(struct ice_eth_stats)); ice_update_eth_stats(vsi); stats = vsi->eth_stats; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 8a6ef3514129..438b42ce2cd9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) dev_spec->module_plugged = true; if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { hw->phy.media_type = e1000_media_type_internal_serdes; - } else if (eth_flags->e100_base_fx) { + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { dev_spec->sgmii_active = true; hw->phy.media_type = e1000_media_type_internal_serdes; } else if (eth_flags->e1000_base_t) { @@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; } - /* do not change link mode for 100BaseFX */ - if (dev_spec->eth_flags.e100_base_fx) - break; - /* change current link mode setting */ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; - if (hw->phy.media_type == e1000_media_type_copper) + if (dev_spec->sgmii_active) ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; else ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 3182b059bf55..8959418776f6 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev, advertising &= ~ADVERTISED_1000baseKX_Full; } } - if (eth_flags->e100_base_fx) { + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { supported |= SUPPORTED_100baseT_Full; advertising |= ADVERTISED_100baseT_Full; } diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile old mode 100644 new mode 100755 index 4fb0d9e3f2da..9112776ebdbd --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,20 +1,44 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright(c) 1999 - 2018 Intel Corporation. +################################################################################ # -# Makefile for the Intel(R) 10GbE PCI Express ethernet driver +# Intel 10 Gigabit PCI Express Linux driver +# Copyright (c) 1999 - 2014 Intel Corporation. # +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +########################################################################### +# Driver files obj-$(CONFIG_IXGBE) += ixgbe.o +ixgbe-objs = ixgbe_main.o ixgbe_common.o ixgbe_api.o ixgbe_param.o \ + ixgbe_lib.o ixgbe_ethtool.o kcompat.o ixgbe_82598.o \ + ixgbe_82599.o ixgbe_x540.o ixgbe_x550.o ixgbe_sriov.o \ + ixgbe_mbx.o ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_82599.o \ + ixgbe_sysfs.o ixgbe_procfs.o ixgbe_phy.o ixgbe_dcb_nl.o \ + ixgbe_xsk.o -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ - ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ - ixgbe_xsk.o +ixgbe-${CONFIG_DCB} += ixgbe_dcb_nl.o -ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ - ixgbe_dcb_82599.o ixgbe_dcb_nl.o +ixgbe-${CONFIG_DEBUG_FS} += ixgbe_debugfs.o -ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o -ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o -ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o -ixgbe-$(CONFIG_IXGBE_IPSEC) += ixgbe_ipsec.o +ixgbe-${CONFIG_FCOE:m=y} += ixgbe_fcoe.o + +ixgbe-$(CONFIG_PTP_1588_CLOCK:m=y) += ixgbe_ptp.o + +ixgbe-${CONFIG_SYSFS} += ixgbe_sysfs.o \ No newline at end of file diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 39e73ad60352..a57e05b44c08 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,71 +1,102 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBE_H_ #define _IXGBE_H_ -#include -#include +#include + #include #include -#include -#include +#include + +#ifdef SIOCETHTOOL +#include +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) #include -#include -#include - -#include -#include -#include - -#include "ixgbe_type.h" -#include "ixgbe_common.h" -#include "ixgbe_dcb.h" -#if IS_ENABLED(CONFIG_FCOE) -#define IXGBE_FCOE -#include "ixgbe_fcoe.h" -#endif /* IS_ENABLED(CONFIG_FCOE) */ -#ifdef CONFIG_IXGBE_DCA +#endif +/* Can't use IS_ENABLED until after kcompat is loaded */ +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define IXGBE_DCA #include #endif -#include "ixgbe_ipsec.h" +#include "ixgbe_dcb.h" +#include "kcompat.h" + +#ifdef HAVE_XDP_BUFF_RXQ #include +#endif -/* common prefix used by pr_<> macros */ -#undef pr_fmt -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#ifdef HAVE_NDO_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +#ifdef HAVE_SCTP +#include +#endif + +#ifdef HAVE_INCLUDE_LINUX_MDIO_H +#include +#endif + +#if IS_ENABLED(CONFIG_FCOE) +#include "ixgbe_fcoe.h" +#endif /* CONFIG_FCOE */ + +#include "ixgbe_api.h" + +#include "ixgbe_common.h" + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ + (void)(netdev_printk(KERN_##klevel, adapter->netdev, \ + "%s: " fmt, __func__, ## args)) : NULL) + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ +#include +#include +#include +#endif /* TX/RX descriptor defines */ -#define IXGBE_DEFAULT_TXD 512 -#define IXGBE_DEFAULT_TX_WORK 256 -#define IXGBE_MAX_TXD 4096 -#define IXGBE_MIN_TXD 64 +#define IXGBE_DEFAULT_TXD 512 +#define IXGBE_DEFAULT_TX_WORK 256 +#define IXGBE_MAX_TXD 4096 +#define IXGBE_MIN_TXD 64 -#if (PAGE_SIZE < 8192) -#define IXGBE_DEFAULT_RXD 512 -#else -#define IXGBE_DEFAULT_RXD 128 -#endif -#define IXGBE_MAX_RXD 4096 -#define IXGBE_MIN_RXD 64 +#define IXGBE_DEFAULT_RXD 512 +#define IXGBE_MAX_RXD 4096 +#define IXGBE_MIN_RXD 64 + +#define IXGBE_ETH_P_LLDP 0x88CC /* flow control */ -#define IXGBE_MIN_FCRTL 0x40 +#define IXGBE_MIN_FCRTL 0x40 #define IXGBE_MAX_FCRTL 0x7FF80 -#define IXGBE_MIN_FCRTH 0x600 +#define IXGBE_MIN_FCRTH 0x600 #define IXGBE_MAX_FCRTH 0x7FFF0 -#define IXGBE_DEFAULT_FCPAUSE 0xFFFF -#define IXGBE_MIN_FCPAUSE 0 -#define IXGBE_MAX_FCPAUSE 0xFFFF +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF +#define IXGBE_MIN_FCPAUSE 0 +#define IXGBE_MAX_FCPAUSE 0xFFFF /* Supported Rx Buffer Sizes */ -#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ -#define IXGBE_RXBUFFER_1536 1536 -#define IXGBE_RXBUFFER_2K 2048 -#define IXGBE_RXBUFFER_3K 3072 -#define IXGBE_RXBUFFER_4K 4096 -#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_1536 1536 +#define IXGBE_RXBUFFER_2K 2048 +#define IXGBE_RXBUFFER_3K 3072 +#define IXGBE_RXBUFFER_4K 4096 +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#define IXGBE_RXBUFFER_7K 7168 +#define IXGBE_RXBUFFER_8K 8192 +#define IXGBE_RXBUFFER_15K 15360 +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ +#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ /* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for @@ -127,13 +158,26 @@ static inline int ixgbe_skb_pad(void) * Since netdev_alloc_skb now allocates a page fragment we can use a value * of 256 and the resultant skb will have a truesize of 960 or less. */ -#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#ifdef HAVE_STRUCT_DMA_ATTRS +#define IXGBE_RX_DMA_ATTR NULL +#else #define IXGBE_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +/* assume the kernel supports 8021p to avoid stripping vlan tags */ +#ifdef IXGBE_DISABLE_8021P_SUPPORT +#ifndef HAVE_8021P_SUPPORT +#define HAVE_8021P_SUPPORT +#endif +#endif /* IXGBE_DISABLE_8021P_SUPPORT */ enum ixgbe_tx_flags { /* cmd_type flags */ @@ -145,52 +189,85 @@ enum ixgbe_tx_flags { IXGBE_TX_FLAGS_CC = 0x08, IXGBE_TX_FLAGS_IPV4 = 0x10, IXGBE_TX_FLAGS_CSUM = 0x20, - IXGBE_TX_FLAGS_IPSEC = 0x40, /* software defined flags */ - IXGBE_TX_FLAGS_SW_VLAN = 0x80, - IXGBE_TX_FLAGS_FCOE = 0x100, + IXGBE_TX_FLAGS_SW_VLAN = 0x40, + IXGBE_TX_FLAGS_FCOE = 0x80, }; /* VLAN info */ #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 -#define IXGBE_MAX_VF_MC_ENTRIES 30 -#define IXGBE_MAX_VF_FUNCTIONS 64 -#define IXGBE_MAX_VFTA_ENTRIES 128 -#define MAX_EMULATION_MAC_ADDRS 16 -#define IXGBE_MAX_PF_MACVLANS 15 -#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) -#define IXGBE_82599_VF_DEVICE_ID 0x10ED -#define IXGBE_X540_VF_DEVICE_ID 0x1515 +#define IXGBE_MAX_RX_DESC_POLL 10 +#define IXGBE_MAX_VF_MC_ENTRIES 30 +#define IXGBE_MAX_VF_FUNCTIONS 64 +#define IXGBE_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define IXGBE_MAX_PF_MACVLANS 15 + +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = IXGBE_READ_REG(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ + u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } + +struct vf_stats { + u64 gprc; + u64 gorc; + u64 gptc; + u64 gotc; + u64 mprc; +}; struct vf_data_storage { struct pci_dev *vfdev; unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; bool clear_to_send; + struct vf_stats vfstats; + struct vf_stats last_vfstats; + struct vf_stats saved_rst_vfstats; bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; u8 spoofchk_enabled; +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN bool rss_query_enabled; +#endif u8 trusted; int xcast_mode; unsigned int vf_api; }; -enum ixgbevf_xcast_modes { - IXGBEVF_XCAST_MODE_NONE = 0, - IXGBEVF_XCAST_MODE_MULTI, - IXGBEVF_XCAST_MODE_ALLMULTI, - IXGBEVF_XCAST_MODE_PROMISC, -}; - struct vf_macvlans { struct list_head l; int vf; @@ -200,11 +277,17 @@ struct vf_macvlans { }; #define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ @@ -213,7 +296,12 @@ struct ixgbe_tx_buffer { unsigned long time_stamp; union { struct sk_buff *skb; +#ifdef HAVE_XDP_FRAME_STRUCT struct xdp_frame *xdpf; +#else + /* XDP uses address ptr on irq_clean */ + void *data; +#endif }; unsigned int bytecount; unsigned short gso_segs; @@ -226,22 +314,31 @@ struct ixgbe_tx_buffer { struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT union { struct { struct page *page; __u32 page_offset; __u16 pagecnt_bias; }; +#ifdef HAVE_AF_XDP_ZC_SUPPORT struct { void *addr; u64 handle; }; +#endif }; +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ }; struct ixgbe_queue_stats { u64 packets; u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ }; struct ixgbe_tx_queue_stats { @@ -261,31 +358,30 @@ struct ixgbe_rx_queue_stats { }; #define IXGBE_TS_HDR_LEN 8 - enum ixgbe_ring_state_t { +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT __IXGBE_RX_3K_BUFFER, __IXGBE_RX_BUILD_SKB_ENABLED, +#endif __IXGBE_RX_RSC_ENABLED, __IXGBE_RX_CSUM_UDP_ZERO_ERR, +#if IS_ENABLED(CONFIG_FCOE) __IXGBE_RX_FCOE, +#endif __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, __IXGBE_TX_XDP_RING, +#ifdef HAVE_AF_XDP_ZC_SUPPORT __IXGBE_TX_DISABLED, +#endif }; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT #define ring_uses_build_skb(ring) \ test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) - -struct ixgbe_fwd_adapter { - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct net_device *netdev; - unsigned int tx_base_queue; - unsigned int rx_base_queue; - int pool; -}; +#endif #define check_for_tx_hang(ring) \ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) @@ -305,6 +401,10 @@ struct ixgbe_fwd_adapter { set_bit(__IXGBE_TX_XDP_RING, &(ring)->state) #define clear_ring_xdp(ring) \ clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state) +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) + + struct ixgbe_ring { struct ixgbe_ring *next; /* pointer to next ring in q_vector */ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ @@ -332,10 +432,16 @@ struct ixgbe_ring { u16 next_to_use; u16 next_to_clean; +#ifdef HAVE_PTP_1588_CLOCK unsigned long last_rx_timestamp; +#endif union { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + u16 rx_buf_len; +#else u16 next_to_alloc; +#endif struct { u8 atr_sample_rate; u8 atr_count; @@ -344,16 +450,22 @@ struct ixgbe_ring { u8 dcb_tc; struct ixgbe_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 struct u64_stats_sync syncp; +#endif union { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; }; +#ifdef HAVE_XDP_BUFF_RXQ struct xdp_rxq_info xdp_rxq; +#ifdef HAVE_AF_XDP_ZC_SUPPORT struct xdp_umem *xsk_umem; struct zero_copy_allocator zca; /* ZC allocator anchor */ - u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ + u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ u16 rx_buf_len; +#endif +#endif } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -361,43 +473,49 @@ enum ixgbe_ring_f_enum { RING_F_VMDQ, /* SR-IOV uses the same ring feature */ RING_F_RSS, RING_F_FDIR, -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) RING_F_FCOE, -#endif /* IXGBE_FCOE */ - - RING_F_ARRAY_SIZE /* must be last in enum set */ +#endif /* CONFIG_FCOE */ + RING_F_ARRAY_SIZE /* must be last in enum set */ }; +#define IXGBE_MAX_DCB_INDICES 8 #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_RSS_INDICES_X550 63 #define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ -#define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define IXGBE_MAX_L2A_QUEUES 4 -#define IXGBE_BAD_L2A_QUEUE 3 -#define IXGBE_MAX_MACVLANS 63 +#define IXGBE_MAX_FDIR_INDICES 63 +#if IS_ENABLED(CONFIG_FCOE) +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#endif /* CONFIG_FCOE */ +#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ u16 mask; /* Mask used for feature to ring mapping */ u16 offset; /* offset to start of feature */ -} ____cacheline_internodealigned_in_smp; +}; #define IXGBE_82599_VMDQ_8Q_MASK 0x78 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C #define IXGBE_82599_VMDQ_2Q_MASK 0x7E +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT /* * FCoE requires that all Rx buffers be over 2200 bytes in length. Since * this is twice the size of a half page we need to double the page order * for FCoE enabled Rx queues. */ -static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) +static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring __maybe_unused *ring) { +#if MAX_SKB_FRAGS < 8 + return ALIGN(IXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) return IXGBE_RXBUFFER_3K; #if (PAGE_SIZE < 8192) @@ -405,9 +523,10 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) return IXGBE_MAX_2K_FRAME_BUILD_SKB; #endif return IXGBE_RXBUFFER_2K; +#endif } -static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring __maybe_unused *ring) { #if (PAGE_SIZE < 8192) if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) @@ -417,11 +536,14 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) } #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) +#endif #define IXGBE_ITR_ADAPTIVE_MIN_INC 2 #define IXGBE_ITR_ADAPTIVE_MIN_USECS 10 -#define IXGBE_ITR_ADAPTIVE_MAX_USECS 126 +#define IXGBE_ITR_ADAPTIVE_MAX_USECS 84 #define IXGBE_ITR_ADAPTIVE_LATENCY 0x80 #define IXGBE_ITR_ADAPTIVE_BULK 0x00 +#define IXGBE_ITR_ADAPTIVE_MASK_USECS (IXGBE_ITR_ADAPTIVE_LATENCY - \ + IXGBE_ITR_ADAPTIVE_MIN_INC) struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ @@ -437,35 +559,122 @@ struct ixgbe_ring_container { #define ixgbe_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ - ? 8 : 1) -#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS -/* MAX_Q_VECTORS of these are allocated, +#define IXGBE_IFNAMSIZ (IFNAMSIZ + 9) + +/* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ struct ixgbe_q_vector { struct ixgbe_adapter *adapter; -#ifdef CONFIG_IXGBE_DCA - int cpu; /* CPU for DCA */ -#endif - u16 v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring */ - u16 itr; /* Interrupt throttle rate written to EITR */ + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ struct ixgbe_ring_container rx, tx; struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT cpumask_t affinity_mask; - int numa_node; +#endif + int node; struct rcu_head rcu; /* to avoid race with update stats on free */ - char name[IFNAMSIZ + 9]; + char name[IXGBE_IFNAMSIZ]; + bool netpoll_rx; + +#ifdef HAVE_NDO_BUSY_POLL + atomic_t state; +#endif /* HAVE_NDO_BUSY_POLL */ /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; -#ifdef CONFIG_IXGBE_HWMON +#ifdef HAVE_NDO_BUSY_POLL +enum ixgbe_qv_state_t { + IXGBE_QV_STATE_IDLE = 0, + IXGBE_QV_STATE_NAPI, + IXGBE_QV_STATE_POLL, + IXGBE_QV_STATE_DISABLE +}; + +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == IXGBE_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->rx.ring->stats.yields++; +#endif + return rc == IXGBE_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_DISABLE); + + return rc == IXGBE_QV_STATE_IDLE; +} + +#endif /* HAVE_NDO_BUSY_POLL */ +#ifdef IXGBE_HWMON #define IXGBE_HWMON_TYPE_LOC 0 #define IXGBE_HWMON_TYPE_TEMP 1 @@ -480,13 +689,18 @@ struct hwmon_attr { }; struct hwmon_buff { +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS struct attribute_group group; const struct attribute_group *groups[2]; struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1]; struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4]; +#else + struct device *device; + struct hwmon_attr *hwmon_list; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ unsigned int n_hwmon; }; -#endif /* CONFIG_IXGBE_HWMON */ +#endif /* IXGBE_HWMON */ /* * microsecond values for various ITR rates shifted by 2 to fit itr register @@ -495,6 +709,7 @@ struct hwmon_buff { #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 +#define IXGBE_16K_ITR 248 #define IXGBE_12K_ITR 336 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ @@ -504,6 +719,7 @@ static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); } +/* ixgbe_desc_unused - calculate if we have unused descriptors */ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) { u16 ntc = ring->next_to_clean; @@ -512,26 +728,25 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } -#define IXGBE_RX_DESC(R, i) \ +#define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBE_TX_DESC(R, i) \ +#define IXGBE_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBE_TX_CTXTDESC(R, i) \ +#define IXGBE_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ -#ifdef IXGBE_FCOE -/* Use 3K as the baby jumbo frame size for FCoE */ -#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 -#endif /* IXGBE_FCOE */ +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 +#if IS_ENABLED(CONFIG_FCOE) +/* use 3K as the baby jumbo frame size for FCoE */ +#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* CONFIG_FCOE */ -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) -#define MAX_MSIX_VECTORS_82599 64 -#define MAX_Q_VECTORS_82599 64 -#define MAX_MSIX_VECTORS_82598 18 -#define MAX_Q_VECTORS_82598 16 +#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64 +#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16 struct ixgbe_mac_addr { u8 addr[ETH_ALEN]; @@ -543,24 +758,42 @@ struct ixgbe_mac_addr { #define IXGBE_MAC_STATE_MODIFIED 0x2 #define IXGBE_MAC_STATE_IN_USE 0x4 -#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 -#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 +#ifdef IXGBE_PROCFS +struct ixgbe_therm_proc_data { + struct ixgbe_hw *hw; + struct ixgbe_thermal_diode_data *sensor_data; +}; -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#endif /* IXGBE_PROCFS */ +/* + * Only for array allocations in our adapter struct. On 82598, there will be + * unused entries in the array, but that's not a big deal. Also, in 82599, + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. This is different than 82598, which is limited to 16. + */ +#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599 +#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599 + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* default to trying for four seconds */ -#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) -#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ /* board specific private data structure */ struct ixgbe_adapter { +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */ +#else unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ /* OS defined structs */ struct net_device *netdev; struct bpf_prog *xdp_prog; struct pci_dev *pdev; - struct mii_bus *mii_bus; unsigned long state; @@ -568,72 +801,98 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_ENABLED BIT(1) -#define IXGBE_FLAG_MSIX_ENABLED BIT(3) -#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) -#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) -#define IXGBE_FLAG_RX_PS_ENABLED BIT(6) -#define IXGBE_FLAG_DCA_ENABLED BIT(8) -#define IXGBE_FLAG_DCA_CAPABLE BIT(9) -#define IXGBE_FLAG_IMIR_ENABLED BIT(10) -#define IXGBE_FLAG_MQ_CAPABLE BIT(11) -#define IXGBE_FLAG_DCB_ENABLED BIT(12) -#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) -#define IXGBE_FLAG_VMDQ_ENABLED BIT(14) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) -#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) -#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) -#define IXGBE_FLAG_FCOE_CAPABLE BIT(20) -#define IXGBE_FLAG_FCOE_ENABLED BIT(21) -#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) -#define IXGBE_FLAG_SRIOV_ENABLED BIT(23) -#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) -#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) -#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) -#define IXGBE_FLAG_DCB_CAPABLE BIT(27) -#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) +#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#ifndef IXGBE_NO_LLI +#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4) +#endif + +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 6) +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 7) +#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 8) +#else +#define IXGBE_FLAG_DCA_ENABLED (u32)0 +#define IXGBE_FLAG_DCA_CAPABLE (u32)0 +#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0 +#endif +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#if IS_ENABLED(CONFIG_FCOE) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 17) +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 18) +#endif /* CONFIG_FCOE */ +#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define IXGBE_FLAG_MDD_ENABLED (u32)(1 << 29) +#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 30) +#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(31) + +/* preset defaults */ +#define IXGBE_FLAGS_82598_INIT (IXGBE_FLAG_MSI_CAPABLE | \ + IXGBE_FLAG_MSIX_CAPABLE | \ + IXGBE_FLAG_MQ_CAPABLE) + +#define IXGBE_FLAGS_82599_INIT (IXGBE_FLAGS_82598_INIT | \ + IXGBE_FLAG_SRIOV_CAPABLE) + +#define IXGBE_FLAGS_X540_INIT IXGBE_FLAGS_82599_INIT + +#define IXGBE_FLAGS_X550_INIT (IXGBE_FLAGS_82599_INIT | \ + IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE BIT(0) -#define IXGBE_FLAG2_RSC_ENABLED BIT(1) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) -#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) -#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) -#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) -#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) -#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) -#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) -#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) -#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) -#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) -#define IXGBE_FLAG2_EEE_ENABLED BIT(15) -#define IXGBE_FLAG2_RX_LEGACY BIT(16) -#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) -#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) +#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 3) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 4) +#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 5) +#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 8) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 9) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 10) +#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG2_EEE_CAPABLE (u32)(1 << 14) +#define IXGBE_FLAG2_EEE_ENABLED (u32)(1 << 15) +#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED (u32)(1 << 16) +#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 17) +#define IXGBE_FLAG2_VLAN_PROMISC (u32)(1 << 18) +#define IXGBE_FLAG2_RX_LEGACY (u32)(1 << 19) /* Tx fast path data */ int num_tx_queues; u16 tx_itr_setting; u16 tx_work_limit; - u64 tx_ipsec; + +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + __be16 vxlan_port; +#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + __be16 geneve_port; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ /* Rx fast path data */ int num_rx_queues; u16 rx_itr_setting; - u64 rx_ipsec; - - /* Port number used to identify VXLAN traffic */ - __be16 vxlan_port; - __be16 geneve_port; /* XDP */ int num_xdp_queues; struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; - unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */ /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -644,8 +903,8 @@ struct ixgbe_adapter { /* RX */ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; - int num_rx_pools; /* == num_rx_queues in 82598 */ - int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ + int num_rx_pools; /* does not include pools assigned to VFs */ + int num_rx_queues_per_pool; u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; u64 rsc_total_count; @@ -655,32 +914,48 @@ struct ixgbe_adapter { u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; - struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; + struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - /* DCB parameters */ +#ifdef HAVE_DCBNL_IEEE struct ieee_pfc *ixgbe_ieee_pfc; struct ieee_ets *ixgbe_ieee_ets; +#endif struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; - u8 hw_tcs; u8 dcb_set_bitmap; u8 dcbx_cap; +#ifndef HAVE_MQPRIO + u8 dcb_tc; +#endif enum ixgbe_fc_mode last_lfc_mode; int num_q_vectors; /* current number of q_vectors for device */ - int max_q_vectors; /* true count of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct msix_entry *msix_entries; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + +#ifdef ETHTOOL_TEST u32 test_icr; struct ixgbe_ring test_tx_ring; struct ixgbe_ring test_rx_ring; +#endif /* structs defined in ixgbe_hw.h */ struct ixgbe_hw hw; u16 msg_enable; struct ixgbe_hw_stats stats; +#ifndef IXGBE_NO_LLI + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; +#endif /* IXGBE_NO_LLI */ + u32 *config_space; u64 tx_busy; unsigned int tx_ring_count; unsigned int xdp_ring_count; @@ -688,6 +963,9 @@ struct ixgbe_adapter { u32 link_speed; bool link_up; + + bool cloud_mode; + unsigned long sfp_poll_time; unsigned long link_check_timeout; @@ -702,20 +980,27 @@ struct ixgbe_adapter { u32 atr_sample_rate; spinlock_t fdir_perfect_lock; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) struct ixgbe_fcoe fcoe; -#endif /* IXGBE_FCOE */ - u8 __iomem *io_addr; /* Mainly for iounmap use */ +#endif /* CONFIG_FCOE */ + u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; + u16 bd_number; + +#ifdef HAVE_BRIDGE_ATTRIBS u16 bridge_mode; +#endif - char eeprom_id[NVM_VER_SIZE]; + char eeprom_id[32]; u16 eeprom_cap; - + bool netdev_registered; u32 interrupt_event; +#ifdef HAVE_ETHTOOL_SET_PHYS_ID u32 led_reg; +#endif +#ifdef HAVE_PTP_1588_CLOCK struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; struct work_struct ptp_tx_work; @@ -724,7 +1009,6 @@ struct ixgbe_adapter { unsigned long ptp_tx_start; unsigned long last_overflow_check; unsigned long last_rx_ptp_check; - unsigned long last_rx_timestamp; spinlock_t tmreg_lock; struct cyclecounter hw_cc; struct timecounter hw_tc; @@ -732,34 +1016,48 @@ struct ixgbe_adapter { u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; - void (*ptp_setup_sdp)(struct ixgbe_adapter *); + void (*ptp_setup_sdp) (struct ixgbe_adapter *); +#endif /* HAVE_PTP_1588_CLOCK */ - /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); unsigned int num_vfs; + unsigned int max_vfs; struct vf_data_storage *vfinfo; int vf_rate_link_speed; struct vf_macvlans vf_mvs; struct vf_macvlans *mv_list; - +#ifdef CONFIG_PCI_IOV u32 timer_event_accumulator; u32 vferr_refcount; +#endif struct ixgbe_mac_addr *mac_table; - struct kobject *info_kobj; -#ifdef CONFIG_IXGBE_HWMON +#ifdef IXGBE_SYSFS +#ifdef IXGBE_HWMON +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS struct hwmon_buff *ixgbe_hwmon_buff; -#endif /* CONFIG_IXGBE_HWMON */ -#ifdef CONFIG_DEBUG_FS +#else + struct hwmon_buff ixgbe_hwmon_buff; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ +#endif /* IXGBE_HWMON */ +#else /* IXGBE_SYSFS */ +#ifdef IXGBE_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + u64 old_lsc; + struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS]; + struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS]; +#endif /* IXGBE_PROCFS */ +#endif /* IXGBE_SYSFS */ + +#ifdef HAVE_IXGBE_DEBUG_FS struct dentry *ixgbe_dbg_adapter; -#endif /*CONFIG_DEBUG_FS*/ - +#endif /*HAVE_IXGBE_DEBUG_FS*/ u8 default_up; - /* Bitmask indicating in use pools */ - DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1); - +#ifdef HAVE_TC_SETUP_CLSU32 #define IXGBE_MAX_LINK_HANDLE 10 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; unsigned long tables; +#endif /* NETIF_F_HW_TC */ /* maximum number of RETA entries among all devices supported by ixgbe * driver: currently it's x550 device in non-SRIOV mode @@ -770,9 +1068,17 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 *rss_key; -#ifdef CONFIG_IXGBE_IPSEC - struct ixgbe_ipsec *ipsec; -#endif /* CONFIG_IXGBE_IPSEC */ +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + unsigned int indices; +#endif +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT + /* AF_XDP zero-copy */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; +#endif }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -782,17 +1088,20 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) case ixgbe_mac_82599EB: case ixgbe_mac_X540: return IXGBE_MAX_RSS_INDICES; + break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: return IXGBE_MAX_RSS_INDICES_X550; + break; default: return 0; + break; } } struct ixgbe_fdir_filter { - struct hlist_node fdir_node; + struct hlist_node fdir_node; union ixgbe_atr_input filter; u16 sw_idx; u64 action; @@ -807,50 +1116,61 @@ enum ixgbe_state_t { __IXGBE_SERVICE_SCHED, __IXGBE_SERVICE_INITED, __IXGBE_IN_SFP_INIT, +#ifdef HAVE_PTP_1588_CLOCK __IXGBE_PTP_RUNNING, __IXGBE_PTP_TX_IN_PROGRESS, +#endif __IXGBE_RESET_REQUESTED, }; struct ixgbe_cb { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT union { /* Union defining head/tail partner */ struct sk_buff *head; struct sk_buff *tail; }; +#endif dma_addr_t dma; - u16 append_cnt; - bool page_released; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif + u16 append_cnt; /* number of skb's appended */ +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + bool page_released; +#endif }; #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) -enum ixgbe_boards { - board_82598, - board_82599, - board_X540, - board_X550, - board_X550EM_x, - board_x550em_x_fw, - board_x550em_a, - board_x550em_a_fw, -}; +/* ESX ixgbe CIM IOCTL definition */ -extern const struct ixgbe_info ixgbe_82598_info; -extern const struct ixgbe_info ixgbe_82599_info; -extern const struct ixgbe_info ixgbe_X540_info; -extern const struct ixgbe_info ixgbe_X550_info; -extern const struct ixgbe_info ixgbe_X550EM_x_info; -extern const struct ixgbe_info ixgbe_x550em_x_fw_info; -extern const struct ixgbe_info ixgbe_x550em_a_info; -extern const struct ixgbe_info ixgbe_x550em_a_fw_info; -#ifdef CONFIG_IXGBE_DCB -extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; -#endif +#ifdef IXGBE_SYSFS +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); +#endif /* IXGBE_SYSFS */ +#ifdef IXGBE_PROCFS +void ixgbe_procfs_exit(struct ixgbe_adapter *adapter); +int ixgbe_procfs_init(struct ixgbe_adapter *adapter); +int ixgbe_procfs_topdir_init(void); +void ixgbe_procfs_topdir_exit(void); +#endif /* IXGBE_PROCFS */ +extern struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max); + +u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); + +/* needed by ixgbe_main.c */ +int ixgbe_validate_mac_addr(u8 *mc_addr); +void ixgbe_check_options(struct ixgbe_adapter *adapter); +void ixgbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by ixgbe_ethtool.c */ +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME extern char ixgbe_driver_name[]; +#else +extern const char ixgbe_driver_name[]; +#endif extern const char ixgbe_driver_version[]; -#ifdef IXGBE_FCOE -extern char ixgbe_default_device_descr[]; -#endif /* IXGBE_FCOE */ int ixgbe_open(struct net_device *netdev); int ixgbe_close(struct net_device *netdev); @@ -863,110 +1183,129 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); int ixgbe_setup_tx_resources(struct ixgbe_ring *); void ixgbe_free_rx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *); -void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_disable_rx(struct ixgbe_adapter *adapter); -void ixgbe_disable_tx(struct ixgbe_adapter *adapter); +void ixgbe_configure_rx_ring(struct ixgbe_adapter *, + struct ixgbe_ring *); +void ixgbe_configure_tx_ring(struct ixgbe_adapter *, + struct ixgbe_ring *); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id); -#ifdef CONFIG_PCI_IOV -void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); -#endif -int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 queue); -int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 queue); -void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); +void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter); +void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter); void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, - struct ixgbe_ring *); +bool ixgbe_is_ixgbe(struct pci_dev *pcidev); +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, + struct ixgbe_adapter *, + struct ixgbe_ring *); void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, - struct ixgbe_tx_buffer *); + struct ixgbe_tx_buffer *); void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); -void ixgbe_write_eitr(struct ixgbe_q_vector *); -int ixgbe_poll(struct napi_struct *napi, int budget); -int ethtool_ioctl(struct ifreq *ifr); -s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue); -s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask); -s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id, u8 queue); -s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id); -void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - union ixgbe_atr_input *mask); +void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *); +void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *); +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) +void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *, u32); +#endif int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_fdir_filter *input, u16 sw_idx); void ixgbe_set_rx_mode(struct net_device *netdev); -#ifdef CONFIG_IXGBE_DCB -void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); -#endif +int ixgbe_write_mc_addr_list(struct net_device *netdev); int ixgbe_setup_tc(struct net_device *dev, u8 tc); void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); void ixgbe_do_reset(struct net_device *netdev); -#ifdef CONFIG_IXGBE_HWMON -void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); -int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); -#endif /* CONFIG_IXGBE_HWMON */ -#ifdef IXGBE_FCOE +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector); +int ixgbe_poll(struct napi_struct *napi, int budget); +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter); +void ixgbe_disable_tx_queue(struct ixgbe_adapter *adapter); +void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter); +void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter); +#ifdef ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr); +#endif + +#if IS_ENABLED(CONFIG_FCOE) void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); -int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u8 *hdr_len); +int ixgbe_fso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len); int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); + struct scatterlist *sgl, unsigned int sgc); +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); + struct scatterlist *sgl, unsigned int sgc); +#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE int ixgbe_fcoe_enable(struct net_device *netdev); int ixgbe_fcoe_disable(struct net_device *netdev); -#ifdef CONFIG_IXGBE_DCB -u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); +#else +int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter); +void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter); +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_OPS_GETAPP +u8 ixgbe_fcoe_getapp(struct net_device *netdev); +#endif /* HAVE_DCBNL_OPS_GETAPP */ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); -#endif /* CONFIG_IXGBE_DCB */ -int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); -int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, - struct netdev_fcoe_hbainfo *info); +#endif /* CONFIG_DCB */ u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); -#endif /* IXGBE_FCOE */ -#ifdef CONFIG_DEBUG_FS +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +#endif +#endif /* CONFIG_FCOE */ + +#ifdef HAVE_IXGBE_DEBUG_FS void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); void ixgbe_dbg_init(void); void ixgbe_dbg_exit(void); -#else -static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} -static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} -static inline void ixgbe_dbg_init(void) {} -static inline void ixgbe_dbg_exit(void) {} -#endif /* CONFIG_DEBUG_FS */ +#endif /* HAVE_IXGBE_DEBUG_FS */ + static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); } +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_IEEE +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame); +#endif /* HAVE_DCBNL_IEEE */ +#endif /* CONFIG_DCB */ + +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); +void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 queue); +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 queue); +int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool); +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); +#ifndef HAVE_VLAN_RX_REGISTER +void ixgbe_vlan_mode(struct net_device *, u32); +#else +#ifdef CONFIG_PCI_IOV +int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan); +#endif +#endif + +#ifdef HAVE_PTP_1588_CLOCK void ixgbe_ptp_init(struct ixgbe_adapter *adapter); -void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); +void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter); -void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); -void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -987,56 +1326,18 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, rx_ring->last_rx_timestamp = jiffies; } -int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); +#endif /* HAVE_PTP_1588_CLOCK */ #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); #endif - -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring); u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); -s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); -#ifdef CONFIG_IXGBE_IPSEC -void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter); -void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter); -void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter); -void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); -int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - struct ixgbe_ipsec_tx_data *itd); -void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf); -int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); -int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); -#else -static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { } -static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { } -static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { } -static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) { } -static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - struct ixgbe_ipsec_tx_data *itd) { return 0; } -static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, - u32 vf) { } -static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, - u32 *mbuf, u32 vf) { return -EACCES; } -static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, - u32 *mbuf, u32 vf) { return -EACCES; } -#endif /* CONFIG_IXGBE_IPSEC */ - -static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter) -{ - return !!adapter->xdp_prog; -} +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index eee277c1bedf..60695063ca57 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#include -#include -#include - -#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_82598.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" #include "ixgbe_phy.h" #define IXGBE_82598_MAX_TX_QUEUES 32 @@ -13,32 +12,45 @@ #define IXGBE_82598_RAR_ENTRIES 16 #define IXGBE_82598_MC_TBL_SIZE 128 #define IXGBE_82598_VFT_TBL_SIZE 128 -#define IXGBE_82598_RX_PB_SIZE 512 +#define IXGBE_82598_RX_PB_SIZE 512 -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete); +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data); - +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); /** - * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout - * @hw: pointer to the HW structure + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure * - * The defaults for 82598 should be in the range of 50us to 50ms, - * however the hardware default for these parts is 500us to 1ms which is less - * than the 10ms recommended by the pci-e spec. To address this we need to - * increase the value to either 10ms to 250ms for capability version 1 config, - * or 16ms to 55ms for version 2. + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. **/ -static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) { u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); u16 pcie_devctl2; - if (ixgbe_removed(hw->hw_addr)) - return; - /* only take action if timeout value is defaulted to 0 */ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) goto out; @@ -57,125 +69,207 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) * directly in order to set the completion timeout value for * 16ms to 55ms */ - pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; - ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); + IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); } -static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - /* Call PHY identify routine to get the phy type */ - ixgbe_identify_phy_generic(hw); - - mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; - mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - return 0; -} - /** - * ixgbe_init_phy_ops_82598 - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during get_invariants because the PHY/SFP type was - * not known. Perform the SFP init if necessary. + * ixgbe_init_ops_82598 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure * + * Initialize the function pointers and assign the MAC type for 82598. + * Does not touch the hardware. **/ -static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_82598"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_82598; + + /* MAC */ + mac->ops.start_hw = ixgbe_start_hw_82598; + mac->ops.reset_hw = ixgbe_reset_hw_82598; + mac->ops.get_media_type = ixgbe_get_media_type_82598; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82598; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_82598; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; + mac->ops.set_vfta = ixgbe_set_vfta_82598; + mac->ops.set_vlvf = NULL; + mac->ops.clear_vfta = ixgbe_clear_vfta_82598; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_82598; + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* SFP+ Module */ + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; + + /* Link */ + mac->ops.check_link = ixgbe_check_mac_link_82598; + mac->ops.setup_link = ixgbe_setup_mac_link_82598; + mac->ops.flap_tx_laser = NULL; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; + mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = NULL; + + mac->ops.get_rtrup2tc = NULL; + + return ret_val; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; u16 list_offset, data_offset; + DEBUGFUNC("ixgbe_init_phy_ops_82598"); + /* Identify the PHY */ phy->ops.identify(hw); /* Overwrite the link function pointers if copper PHY */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.setup_link = ixgbe_setup_copper_link_82598; mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; + ixgbe_get_copper_link_capabilities_generic; } switch (hw->phy.type) { case ixgbe_phy_tn: - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: - phy->ops.reset = &ixgbe_reset_phy_nl; + phy->ops.reset = ixgbe_reset_phy_nl; /* Call SFP+ identify routine to get the SFP+ module type */ ret_val = phy->ops.identify_sfp(hw); - if (ret_val) - return ret_val; - if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + if (ret_val != IXGBE_SUCCESS) + goto out; + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } /* Check to see if SFP+ module is supported */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, - &list_offset, - &data_offset); - if (ret_val) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } break; default: break; } - return 0; +out: + return ret_val; } /** - * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure * - * Starts the hardware using the generic start_hw function. - * Disables relaxed ordering for archs other than SPARC - * Then set pcie completion timeout + * Starts the hardware using the generic start_hw function. + * Disables relaxed ordering Then set pcie completion timeout * **/ -static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { - s32 ret_val; + u32 regval; + u32 i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82598"); ret_val = ixgbe_start_hw_generic(hw); if (ret_val) return ret_val; + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + /* set the completion timeout for interface */ ixgbe_set_pcie_completion_timeout(hw); - return 0; + return ret_val; } /** - * ixgbe_get_link_capabilities_82598 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value + * ixgbe_get_link_capabilities_82598 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value * - * Determines the link capabilities by reading the AUTOC register. + * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { + s32 status = IXGBE_SUCCESS; u32 autoc = 0; + DEBUGFUNC("ixgbe_get_link_capabilities_82598"); + /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not been @@ -213,26 +307,31 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, break; default: - return IXGBE_ERR_LINK_SETUP; + status = IXGBE_ERR_LINK_SETUP; + break; } - return 0; + return status; } /** - * ixgbe_get_media_type_82598 - Determines media type - * @hw: pointer to hardware structure + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure * - * Returns the media type (fiber, copper, backplane) + * Returns the media type (fiber, copper, backplane) **/ -static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) { + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82598"); + /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: - return ixgbe_media_type_copper; - + media_type = ixgbe_media_type_copper; + goto out; default: break; } @@ -242,37 +341,41 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82598_BX: /* Default device ID is mezzanine card KX/KX4 */ - return ixgbe_media_type_backplane; - + media_type = ixgbe_media_type_backplane; + break; case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598_DA_DUAL_PORT: case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: case IXGBE_DEV_ID_82598EB_XF_LR: case IXGBE_DEV_ID_82598EB_SFP_LOM: - return ixgbe_media_type_fiber; - + media_type = ixgbe_media_type_fiber; + break; case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: - return ixgbe_media_type_cx4; - + media_type = ixgbe_media_type_cx4; + break; case IXGBE_DEV_ID_82598AT: case IXGBE_DEV_ID_82598AT2: - return ixgbe_media_type_copper; - + media_type = ixgbe_media_type_copper; + break; default: - return ixgbe_media_type_unknown; + media_type = ixgbe_media_type_unknown; + break; } +out: + return media_type; } /** - * ixgbe_fc_enable_82598 - Enable flow control - * @hw: pointer to hardware structure + * ixgbe_fc_enable_82598 - Enable flow control + * @hw: pointer to hardware structure * - * Enable flow control according to the current settings. + * Enable flow control according to the current settings. **/ -static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) { + s32 ret_val = IXGBE_SUCCESS; u32 fctrl_reg; u32 rmcs_reg; u32 reg; @@ -281,18 +384,23 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) int i; bool link_up; + DEBUGFUNC("ixgbe_fc_enable_82598"); + /* Validate the water mark configuration */ - if (!hw->fc.pause_time) - return IXGBE_ERR_INVALID_LINK_SETTINGS; + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } /* Low water mark of zero causes XOFF floods */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { hw_dbg(hw, "Invalid water mark configuration\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; } } } @@ -318,7 +426,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) } /* Negotiate the fc mode to use */ - hw->mac.ops.fc_autoneg(hw); + ixgbe_fc_autoneg(hw); /* Disable any previous flow control settings */ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); @@ -369,7 +477,9 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) break; default: hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; } /* Set 802.3x based flow control settings. */ @@ -378,7 +488,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; @@ -394,30 +504,33 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - return 0; +out: + return ret_val; } /** - * ixgbe_start_mac_link_82598 - Configures MAC link settings - * @hw: pointer to hardware structure - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_start_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed * - * Configures link settings based on values in the ixgbe_hw struct. - * Restarts the link. Performs autonegotiation if needed. + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. **/ -static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; - s32 status = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_mac_link_82598"); /* Restart link */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -435,7 +548,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; - msleep(100); + msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; @@ -445,35 +558,36 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, } /* Add delay to filter out noises during initial link setup */ - msleep(50); + msec_delay(50); return status; } /** - * ixgbe_validate_link_ready - Function looks for phy link - * @hw: pointer to hardware structure + * ixgbe_validate_link_ready - Function looks for phy link + * @hw: pointer to hardware structure * - * Function indicates success when phy link is available. If phy is not ready - * within 5 seconds of MAC indicating link, the function returns error. + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. **/ -static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) { u32 timeout; u16 an_reg; if (hw->device_id != IXGBE_DEV_ID_82598AT2) - return 0; + return IXGBE_SUCCESS; for (timeout = 0; timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { - hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); - if ((an_reg & MDIO_AN_STAT1_COMPLETE) && - (an_reg & MDIO_STAT1_LSTATUS)) + if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && + (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) break; - msleep(100); + msec_delay(100); } if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { @@ -481,19 +595,19 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) return IXGBE_ERR_LINK_SETUP; } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_check_mac_link_82598 - Get link/speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true is link is up, false otherwise - * @link_up_wait_to_complete: bool used to wait for link up or not + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not * - * Reads the links register to determine if link is up and the current speed + * Reads the links register to determine if link is up and the current speed **/ -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { @@ -501,19 +615,21 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 i; u16 link_reg, adapt_comp_reg; + DEBUGFUNC("ixgbe_check_mac_link_82598"); + /* - * SERDES PHY requires us to read link status from register 0xC79F. - * Bit 0 set indicates link is up/ready; clear indicates link down. - * 0xC00C is read to check that the XAUI lanes are active. Bit 0 - * clear indicates active; set indicates inactive. + * SERDES PHY requires us to read link status from undocumented + * register 0xC79F. Bit 0 set indicates link is up/ready; clear + * indicates link down. OxC00C is read to check that the XAUI lanes + * are active. Bit 0 clear indicates active; set indicates inactive. */ if (hw->phy.type == ixgbe_phy_nl) { - hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); - hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); - hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, &adapt_comp_reg); if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) { *link_up = true; @@ -521,12 +637,12 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, } else { *link_up = false; } - msleep(100); + msec_delay(100); hw->phy.ops.read_reg(hw, 0xC79F, - MDIO_MMD_PMAPMD, + IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, - MDIO_MMD_PMAPMD, + IXGBE_TWINAX_DEV, &adapt_comp_reg); } } else { @@ -536,20 +652,20 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, *link_up = false; } - if (!*link_up) - return 0; + if (*link_up == false) + goto out; } links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { if (links_reg & IXGBE_LINKS_UP) { *link_up = true; break; } else { *link_up = false; } - msleep(100); + msec_delay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { @@ -564,37 +680,41 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, else *speed = IXGBE_LINK_SPEED_1GB_FULL; - if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && - (ixgbe_validate_link_ready(hw) != 0)) + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && + (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) *link_up = false; - return 0; +out: + return IXGBE_SUCCESS; } /** - * ixgbe_setup_mac_link_82598 - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_setup_mac_link_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed * - * Set the link speed in the AUTOC register and restarts link. + * Set the link speed in the AUTOC register and restarts link. **/ -static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - bool autoneg = false; + bool autoneg = false; + s32 status = IXGBE_SUCCESS; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 autoc = curr_autoc; - u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + DEBUGFUNC("ixgbe_setup_mac_link_82598"); /* Check to see if speed passed in is supported. */ - ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) - return IXGBE_ERR_LINK_SETUP; + status = IXGBE_ERR_LINK_SETUP; /* Set KX4/KX support according to speed requested */ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || @@ -608,28 +728,36 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } - /* Setup and restart the link based on the new values in - * ixgbe_hw This will write the AUTOC register based on the new - * stored values - */ - return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + if (status == IXGBE_SUCCESS) { + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + status = ixgbe_start_mac_link_82598(hw, + autoneg_wait_to_complete); + } + + return status; } /** - * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true if waiting is needed to complete + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete * - * Sets the link speed in the AUTOC register in the MAC and restarts link. + * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { s32 status; + DEBUGFUNC("ixgbe_setup_copper_link_82598"); + /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); @@ -640,27 +768,29 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, } /** - * ixgbe_reset_hw_82598 - Performs hardware reset - * @hw: pointer to hardware structure + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure * - * Resets the hardware by resetting the transmit and receive units, masks and - * clears all interrupts, performing a PHY reset, and performing a link (MAC) - * reset. + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. **/ -static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) { - s32 status; - s32 phy_status = 0; + s32 status = IXGBE_SUCCESS; + s32 phy_status = IXGBE_SUCCESS; u32 ctrl; u32 gheccr; u32 i; u32 autoc; u8 analog_val; + DEBUGFUNC("ixgbe_reset_hw_82598"); + /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; /* * Power up the Atlas Tx lanes if they are currently powered down. @@ -702,7 +832,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) /* Init PHY and function pointers, perform SFP setup */ phy_status = hw->phy.ops.init(hw); if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return phy_status; + goto reset_hw_out; if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) goto mac_reset_top; @@ -717,21 +847,20 @@ mac_reset_top: ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); - usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { + usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; - udelay(1); } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } - msleep(50); + msec_delay(50); /* * Double resets are required for recovery from certain error @@ -744,7 +873,7 @@ mac_reset_top: } gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); - gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6)); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* @@ -769,23 +898,26 @@ mac_reset_top: */ hw->mac.ops.init_rx_addrs(hw); - if (phy_status) +reset_hw_out: + if (phy_status != IXGBE_SUCCESS) status = phy_status; return status; } /** - * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq set index + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index **/ -static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_set_vmdq_82598"); + /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); @@ -796,20 +928,21 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) rar_high &= ~IXGBE_RAH_VIND_MASK; rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) **/ -static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; + UNREFERENCED_1PARAMETER(vmdq); /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { @@ -823,27 +956,31 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_set_vfta_82598 - Set VLAN filter table - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFTA - * @vlan_on: boolean flag to turn on/off VLAN in VFTA - * @vlvf_bypass: boolean flag - unused + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @vlvf_bypass: boolean flag - unused * - * Turn on/off specified VLAN in the VLAN filter table. + * Turn on/off specified VLAN in the VLAN filter table. **/ -static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool vlvf_bypass) +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) { u32 regindex; u32 bitindex; u32 bits; u32 vftabyte; + UNREFERENCED_1PARAMETER(vlvf_bypass); + + DEBUGFUNC("ixgbe_set_vfta_82598"); + if (vlan > 4095) return IXGBE_ERR_PARAM; @@ -866,26 +1003,28 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ - bits |= BIT(bitindex); + bits |= (1 << bitindex); else /* Turn off this VLAN id */ - bits &= ~BIT(bitindex); + bits &= ~(1 << bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_clear_vfta_82598 - Clear VLAN filter table - * @hw: pointer to hardware structure + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure * - * Clears the VLAN filer table, and the VMDq index associated with the filter + * Clears the VLAN filer table, and the VMDq index associated with the filter **/ -static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) { u32 offset; u32 vlanbyte; + DEBUGFUNC("ixgbe_clear_vfta_82598"); + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); @@ -894,101 +1033,107 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 0); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value * - * Performs read operation to Atlas analog register specified. + * Performs read operation to Atlas analog register specified. **/ -static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 atlas_ctl; + DEBUGFUNC("ixgbe_read_analog_reg8_82598"); + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); *val = (u8)atlas_ctl; - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write * - * Performs write operation to Atlas analog register specified. + * Performs write operation to Atlas analog register specified. **/ -static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 atlas_ctl; + DEBUGFUNC("ixgbe_write_analog_reg8_82598"); + atlas_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. - * @hw: pointer to hardware structure - * @dev_addr: address to read from - * @byte_offset: byte offset to read from dev_addr - * @eeprom_data: value read + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @dev_addr: address to read from + * @byte_offset: byte offset to read from dev_addr + * @eeprom_data: value read * - * Performs 8 byte read operation to SFP module's data over I2C interface. + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ -static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, +STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, u8 byte_offset, u8 *eeprom_data) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; u16 sfp_addr = 0; u16 sfp_data = 0; u16 sfp_stat = 0; u16 gssr; u32 i; + DEBUGFUNC("ixgbe_read_i2c_phy_82598"); + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) gssr = IXGBE_GSSR_PHY1_SM; else gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; if (hw->phy.type == ixgbe_phy_nl) { /* - * phy SDA/SCL registers are at addresses 0xC30A to - * 0xC30D. These registers are used to talk to the SFP+ + * NetLogic phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ * module's EEPROM through the SDA/SCL (I2C) interface. */ sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); hw->phy.ops.write_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, - MDIO_MMD_PMAPMD, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, sfp_addr); /* Poll status */ for (i = 0; i < 100; i++) { hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, - MDIO_MMD_PMAPMD, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_stat); sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) break; - usleep_range(10000, 20000); + msec_delay(10); } if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { @@ -999,7 +1144,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, /* Read data */ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, - MDIO_MMD_PMAPMD, &sfp_data); + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); *eeprom_data = (u8)(sfp_data >> 8); } else { @@ -1012,49 +1157,153 @@ out: } /** - * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read - * @eeprom_data: value read + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read * - * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, byte_offset, eeprom_data); } /** - * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. - * @hw: pointer to hardware structure - * @byte_offset: byte offset at address 0xA2 - * @sff8472_data: value read + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @sff8472_data: value read * - * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C **/ -static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data) +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, byte_offset, sff8472_data); } /** - * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple - * port devices. - * @hw: pointer to the HW structure + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + * @hw: pointer to hardware structure * - * Calls common function and corrects issue with some single port devices - * that enable LAN1 but not LAN0. + * Determines physical layer capabilities of the current configuration. **/ -static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); + + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else /* XAUI */ + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + break; + default: + break; + } + + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.identify_sfp(hw); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_sfp_type_sr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case ixgbe_sfp_type_lr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + } + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + +out: + return physical_layer; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + * port devices. + * @hw: pointer to the HW structure + * + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; u16 pci_gen = 0; u16 pci_ctrl2 = 0; + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); + ixgbe_set_lan_id_multi_port_pcie(hw); /* check if LAN0 is disabled */ @@ -1080,11 +1329,12 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) * @headroom: reserve n KB of headroom * @strategy: packet buffer allocation strategy **/ -static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy) { u32 rxpktsize = IXGBE_RXPBSIZE_64KB; - u8 i = 0; + u8 i = 0; + UNREFERENCED_1PARAMETER(headroom); if (!num_pb) return; @@ -1112,85 +1362,18 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); } -static const struct ixgbe_mac_operations mac_ops_82598 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_82598, - .start_hw = &ixgbe_start_hw_82598, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_82598, - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, - .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, - .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, - .setup_link = &ixgbe_setup_mac_link_82598, - .set_rxpba = &ixgbe_set_rxpba_82598, - .check_link = &ixgbe_check_mac_link_82598, - .get_link_capabilities = &ixgbe_get_link_capabilities_82598, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_82598, - .clear_vmdq = &ixgbe_clear_vmdq_82598, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_82598, - .set_vfta = &ixgbe_set_vfta_82598, - .fc_enable = &ixgbe_fc_enable_82598, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = NULL, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, - .init_swfw_sync = NULL, - .get_thermal_sensor_data = NULL, - .init_thermal_sensor_thresh = NULL, - .prot_autoc_read = &prot_autoc_read_generic, - .prot_autoc_write = &prot_autoc_write_generic, - .enable_rx = &ixgbe_enable_rx_generic, - .disable_rx = &ixgbe_disable_rx_generic, -}; +/** + * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_82598"); -static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { - .init_params = &ixgbe_init_eeprom_params_generic, - .read = &ixgbe_read_eerd_generic, - .write = &ixgbe_write_eeprom_generic, - .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, - .read_buffer = &ixgbe_read_eerd_buffer_generic, - .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, - .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, - .update_checksum = &ixgbe_update_eeprom_checksum_generic, -}; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); -static const struct ixgbe_phy_operations phy_ops_82598 = { - .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_module_generic, - .init = &ixgbe_init_phy_ops_82598, - .reset = &ixgbe_reset_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .read_reg_mdi = &ixgbe_read_phy_reg_mdi, - .write_reg_mdi = &ixgbe_write_phy_reg_mdi, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, - .check_overtemp = &ixgbe_tn_check_overtemp, -}; - -const struct ixgbe_info ixgbe_82598_info = { - .mac = ixgbe_mac_82598EB, - .get_invariants = &ixgbe_get_invariants_82598, - .mac_ops = &mac_ops_82598, - .eeprom_ops = &eeprom_ops_82598, - .phy_ops = &phy_ops_82598, - .mvals = ixgbe_mvals_8259X, -}; + return IXGBE_SUCCESS; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h new file mode 100644 index 000000000000..61c0de3c4916 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_82598_H_ +#define _IXGBE_82598_H_ + +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass); +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval); +#endif /* _IXGBE_82598_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 109f8de5a1c2..dad058ab4701 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,13 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#include -#include -#include - -#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_82599.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" #include "ixgbe_phy.h" -#include "ixgbe_mbx.h" #define IXGBE_82599_MAX_TX_QUEUES 128 #define IXGBE_82599_MAX_RX_QUEUES 128 @@ -16,64 +14,37 @@ #define IXGBE_82599_VFT_TBL_SIZE 128 #define IXGBE_82599_RX_PB_SIZE 512 -static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void -ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); -static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data); +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); -bool ixgbe_mng_enabled(struct ixgbe_hw *hw) -{ - u32 fwsm, manc, factps; - - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); - if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) - return false; - - manc = IXGBE_READ_REG(hw, IXGBE_MANC); - if (!(manc & IXGBE_MANC_RCV_TCO_EN)) - return false; - - factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); - if (factps & IXGBE_FACTPS_MNGCG) - return false; - - return true; -} - -static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; - /* enable the laser control functions for SFP+ fiber + DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); + + /* + * enable the laser control functions for SFP+ fiber * and MNG not enabled */ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && !ixgbe_mng_enabled(hw)) { mac->ops.disable_tx_laser = - &ixgbe_disable_tx_laser_multispeed_fiber; + ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = - &ixgbe_enable_tx_laser_multispeed_fiber; - mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; + ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; + } else { mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; @@ -82,27 +53,96 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; mac->ops.set_rate_select_speed = ixgbe_set_hard_rate_select_speed; } else { - if ((mac->ops.get_media_type(hw) == - ixgbe_media_type_backplane) && - (hw->phy.smart_speed == ixgbe_smart_speed_auto || - hw->phy.smart_speed == ixgbe_smart_speed_on) && - !ixgbe_verify_lesm_fw_enabled_82599(hw)) - mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; - else - mac->ops.setup_link = &ixgbe_setup_mac_link_82599; + if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) { + mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; + } else { + mac->ops.setup_link = ixgbe_setup_mac_link_82599; + } } } -static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) { - s32 ret_val; + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + u32 esdp; + + DEBUGFUNC("ixgbe_init_phy_ops_82599"); + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = TRUE; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; + } + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on PHY type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; + } +init_phy_ops_out: + return ret_val; +} + +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; u16 list_offset, data_offset, data_value; + DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); @@ -110,14 +150,16 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto setup_sfp_out; /* PHY config will finish before releasing the semaphore */ ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) - return IXGBE_ERR_SWFW_SYNC; + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto setup_sfp_out; + } if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) goto setup_sfp_err; @@ -130,12 +172,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* - * Delay obtaining semaphore again to allow FW access, - * semaphore_delay is in ms usleep_range needs us. + /* Delay obtaining semaphore again to allow FW access + * prot_autoc_write uses the semaphore too. */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); + msec_delay(hw->eeprom.semaphore_delay); /* Restart DSP and set SFI mode */ ret_val = hw->mac.ops.prot_autoc_write(hw, @@ -143,54 +183,53 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) false); if (ret_val) { - hw_dbg(hw, " sfp module setup not complete\n"); - return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + hw_dbg(hw, "sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; } + } - return 0; +setup_sfp_out: + return ret_val; setup_sfp_err: /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* Delay obtaining semaphore again to allow FW access, - * semaphore_delay is in ms usleep_range needs us. - */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); - hw_err(hw, "eeprom read at offset %d failed\n", data_offset); - return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + /* Delay obtaining semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); + return IXGBE_ERR_PHY; } /** - * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read - * @hw: pointer to hardware structure - * @locked: Return the if we locked for this read. - * @reg_val: Value we read from AUTOC + * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: Return the if we locked for this read. + * @reg_val: Value we read from AUTOC * - * For this part (82599) we need to wrap read-modify-writes with a possible - * FW/SW lock. It is assumed this lock will be freed with the next - * prot_autoc_write_82599(). Note, that locked can only be true in cases - * where this function doesn't return an error. - **/ -static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, - u32 *reg_val) + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next + * prot_autoc_write_82599(). + */ +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { s32 ret_val; *locked = false; - /* If LESM is on then we need to hold the SW/FW semaphore. */ + /* If LESM is on then we need to hold the SW/FW semaphore. */ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) + if (ret_val != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; *locked = true; } *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); - return 0; + return IXGBE_SUCCESS; } /** @@ -198,14 +237,14 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, * @hw: pointer to hardware structure * @autoc: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous proc_autoc_read_82599. + * previous proc_autoc_read_82599. * - * This part (82599) may need to hold a the SW/FW lock around all writes to + * This part (82599) may need to hold the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. - **/ -static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) + */ +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) { - s32 ret_val = 0; + s32 ret_val = IXGBE_SUCCESS; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) @@ -218,7 +257,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) + if (ret_val != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; locked = true; @@ -237,98 +276,120 @@ out: return ret_val; } -static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - ixgbe_init_mac_link_ops_82599(hw); - - mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; - mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - return 0; -} - /** - * ixgbe_init_phy_ops_82599 - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during get_invariants because the PHY/SFP type was - * not known. Perform the SFP init if necessary. + * ixgbe_init_ops_82599 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure * + * Initialize the function pointers and assign the MAC type for 82599. + * Does not touch the hardware. **/ -static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) + +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val; - u32 esdp; + u16 i; - if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { - /* Store flag indicating I2C bus access control unit. */ - hw->phy.qsfp_shared_i2c_bus = true; + DEBUGFUNC("ixgbe_init_ops_82599"); - /* Initialize access to QSFP+ I2C bus */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0_DIR; - esdp &= ~IXGBE_ESDP_SDP1_DIR; - esdp &= ~IXGBE_ESDP_SDP0; - esdp &= ~IXGBE_ESDP_SDP0_NATIVE; - esdp &= ~IXGBE_ESDP_SDP1_NATIVE; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); + ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); - phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; - phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; - } + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_82599; + phy->ops.init = ixgbe_init_phy_ops_82599; - /* Identify the PHY or SFP module */ - ret_val = phy->ops.identify(hw); + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_82599; + mac->ops.get_media_type = ixgbe_get_media_type_82599; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82599; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; + mac->ops.start_hw = ixgbe_start_hw_82599; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.prot_autoc_read = prot_autoc_read_82599; + mac->ops.prot_autoc_write = prot_autoc_write_82599; - /* Setup function pointers based on detected SFP module and speeds */ + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; + mac->ops.check_link = ixgbe_check_mac_link_generic; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; ixgbe_init_mac_link_ops_82599(hw); - /* If copper media, overwrite with copper function pointers */ - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82599; - mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; - } + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - /* Set necessary function pointers based on phy type */ - switch (hw->phy.type) { - case ixgbe_phy_tn: - phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - break; - default: - break; - } + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); + + for (i = 0; i < 64; i++) + hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.read = ixgbe_read_eeprom_82599; + eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + + mac->ops.get_thermal_sensor_data = + ixgbe_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + ixgbe_init_thermal_sensor_thresh_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; return ret_val; } /** - * ixgbe_get_link_capabilities_82599 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: true when autoneg or autotry is enabled + * ixgbe_get_link_capabilities_82599 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled * - * Determines the link capabilities by reading the AUTOC register. + * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) { + s32 status = IXGBE_SUCCESS; u32 autoc = 0; - /* Determine 1G link capabilities off of SFP+ type */ + DEBUGFUNC("ixgbe_get_link_capabilities_82599"); + + + /* Check if 1G SFP module. */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || @@ -337,13 +398,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; - return 0; + goto out; } /* * Determine link capabilities based on the stored value of AUTOC, - * which represents EEPROM defaults. If AUTOC value has not been - * stored, use the current register value. + * which represents EEPROM defaults. If AUTOC value has not + * been stored, use the current register values. */ if (hw->mac.orig_link_settings_stored) autoc = hw->mac.orig_autoc; @@ -400,37 +461,46 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, break; default: - return IXGBE_ERR_LINK_SETUP; + status = IXGBE_ERR_LINK_SETUP; + goto out; + break; } if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; - /* QSFP must not enable auto-negotiation */ + /* QSFP must not enable full auto-negotiation + * Limited autoneg is enabled at 1G + */ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) *autoneg = false; else *autoneg = true; } - return 0; +out: + return status; } /** - * ixgbe_get_media_type_82599 - Get media type - * @hw: pointer to hardware structure + * ixgbe_get_media_type_82599 - Get media type + * @hw: pointer to hardware structure * - * Returns the media type (fiber, copper, backplane) + * Returns the media type (fiber, copper, backplane) **/ -static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) { + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82599"); + /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: - return ixgbe_media_type_copper; - + media_type = ixgbe_media_type_copper; + goto out; default: break; } @@ -443,46 +513,47 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ - return ixgbe_media_type_backplane; - + media_type = ixgbe_media_type_backplane; + break; case IXGBE_DEV_ID_82599_SFP: case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: - return ixgbe_media_type_fiber; - + media_type = ixgbe_media_type_fiber; + break; case IXGBE_DEV_ID_82599_CX4: - return ixgbe_media_type_cx4; - + media_type = ixgbe_media_type_cx4; + break; case IXGBE_DEV_ID_82599_T3_LOM: - return ixgbe_media_type_copper; - - case IXGBE_DEV_ID_82599_LS: - return ixgbe_media_type_fiber_lco; - + media_type = ixgbe_media_type_copper; + break; case IXGBE_DEV_ID_82599_QSFP_SF_QP: - return ixgbe_media_type_fiber_qsfp; - + media_type = ixgbe_media_type_fiber_qsfp; + break; default: - return ixgbe_media_type_unknown; + media_type = ixgbe_media_type_unknown; + break; } +out: + return media_type; } /** * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 * @hw: pointer to hardware structure * - * Disables link, should be called during D3 power down sequence. + * Disables link during D3 power down sequence. * **/ -static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) +void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) { u32 autoc2_reg; u16 ee_ctrl_2 = 0; - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); + ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { @@ -493,27 +564,32 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) } /** - * ixgbe_start_mac_link_82599 - Setup MAC link settings - * @hw: pointer to hardware structure - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_start_mac_link_82599 - Setup MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed * - * Configures link settings based on values in the ixgbe_hw struct. - * Restarts the link. Performs autonegotiation if needed. + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. **/ -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; - s32 status = 0; + s32 status = IXGBE_SUCCESS; bool got_lock = false; + DEBUGFUNC("ixgbe_start_mac_link_82599"); + + /* reset_pipeline requires us to hold this lock as it writes to + * AUTOC. + */ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { status = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (status) - return status; + IXGBE_GSSR_MAC_CSR_SM); + if (status != IXGBE_SUCCESS) + goto out; got_lock = true; } @@ -538,7 +614,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; - msleep(100); + msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; @@ -548,20 +624,21 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, } /* Add delay to filter out noises during initial link setup */ - msleep(50); + msec_delay(50); +out: return status; } /** - * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser - * @hw: pointer to hardware structure + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure * - * The base drivers may require better control over SFP+ module - * PHY states. This includes selectively shutting down the Tx - * laser on the PHY, effectively halting physical link. + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. **/ -static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -569,46 +646,48 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) if (ixgbe_check_reset_blocked(hw)) return; - /* Disable tx laser; allow 100us to go dark per spec */ + /* Disable Tx laser; allow 100us to go dark per spec */ esdp_reg |= IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); - udelay(100); + usec_delay(100); } /** - * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser - * @hw: pointer to hardware structure + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure * - * The base drivers may require better control over SFP+ module - * PHY states. This includes selectively turning on the Tx - * laser on the PHY, effectively starting physical link. + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. **/ -static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* Enable tx laser; allow 100ms to light up */ + /* Enable Tx laser; allow 100ms to light up */ esdp_reg &= ~IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); - msleep(100); + msec_delay(100); } /** - * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser - * @hw: pointer to hardware structure + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure * - * When the driver changes the link speeds that it can support, - * it sets autotry_restart to true to indicate that we need to - * initiate a new autotry session with the link partner. To do - * so, we set the speed then disable and re-enable the tx laser, to - * alert the link partner that it also needs to restart autotry on its - * end. This is consistent with true clause 37 autoneg, which also - * involves a loss of signal. + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the Tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. **/ -static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { + DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); + /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return; @@ -627,8 +706,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) * * Set module link speed via RS0/RS1 rate select pins. */ -static void -ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -650,23 +729,25 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) } /** - * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed * - * Implements the Intel SmartSpeed algorithm. + * Implements the Intel SmartSpeed algorithm. **/ -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 i, j; bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); + /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; @@ -691,7 +772,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); - if (status != 0) + if (status != IXGBE_SUCCESS) goto out; /* @@ -701,12 +782,12 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, * Table 9 in the AN MAS. */ for (i = 0; i < 5; i++) { - mdelay(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) + status = ixgbe_check_link(hw, &link_speed, &link_up, + false); + if (status != IXGBE_SUCCESS) goto out; if (link_up) @@ -726,7 +807,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, hw->phy.smart_speed_active = true; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); - if (status != 0) + if (status != IXGBE_SUCCESS) goto out; /* @@ -736,12 +817,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, * connect attempts as defined in the AN MAS table 73-7. */ for (i = 0; i < 6; i++) { - mdelay(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) goto out; if (link_up) @@ -755,46 +835,48 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) - hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); + hw_dbg(hw, "Smartspeed has downgraded the link speed " + "from the maximum advertised\n"); return status; } /** - * ixgbe_setup_mac_link_82599 - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_setup_mac_link_82599 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed * - * Set the link speed in the AUTOC register and restarts link. + * Set the link speed in the AUTOC register and restarts link. **/ -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { bool autoneg = false; - s32 status; - u32 pma_pmd_1g, link_mode, links_reg, i; + s32 status = IXGBE_SUCCESS; + u32 pma_pmd_1g, link_mode; + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ + u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ + u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 links_reg; + u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - /* holds the value of AUTOC register at this current point in time */ - u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - /* holds the cached value of AUTOC register */ - u32 orig_autoc = 0; - /* temporary variable used for comparison purposes */ - u32 autoc = current_autoc; + DEBUGFUNC("ixgbe_setup_mac_link_82599"); /* Check to see if speed passed in is supported. */ - status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, - &autoneg); + status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); if (status) - return status; + goto out; speed &= link_capabilities; - if (speed == IXGBE_LINK_SPEED_UNKNOWN) - return IXGBE_ERR_LINK_SETUP; + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { + status = IXGBE_ERR_LINK_SETUP; + goto out; + } /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) @@ -834,7 +916,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; - if (autoneg) + if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) autoc |= IXGBE_AUTOC_LMS_1G_AN; else autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; @@ -844,8 +926,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, if (autoc != current_autoc) { /* Restart link */ status = hw->mac.ops.prot_autoc_write(hw, autoc, false); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto out; /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { @@ -858,7 +940,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; - msleep(100); + msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = @@ -869,26 +951,29 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, } /* Add delay to filter out noises during initial link setup */ - msleep(50); + msec_delay(50); } +out: return status; } /** - * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true if waiting is needed to complete + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete * - * Restarts link on PHY and MAC based on settings passed in. + * Restarts link on PHY and MAC based on settings passed in. **/ -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status; + DEBUGFUNC("ixgbe_setup_copper_link_82599"); + /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); @@ -899,25 +984,28 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, } /** - * ixgbe_reset_hw_82599 - Perform hardware reset - * @hw: pointer to hardware structure + * ixgbe_reset_hw_82599 - Perform hardware reset + * @hw: pointer to hardware structure * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. **/ -static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; - u32 ctrl, i, autoc, autoc2; + u32 ctrl = 0; + u32 i, autoc, autoc2; u32 curr_lms; bool link_up = false; + DEBUGFUNC("ixgbe_reset_hw_82599"); + /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); @@ -928,7 +1016,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return status; + goto reset_hw_out; /* Setup SFP module if there is one present. */ if (hw->phy.sfp_setup_needed) { @@ -937,7 +1025,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) } if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return status; + goto reset_hw_out; /* Reset PHY */ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) @@ -948,7 +1036,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) mac_reset_top: /* - * Issue global reset to the MAC. Needs to be SW reset if link is up. + * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. @@ -963,14 +1051,13 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); - usleep_range(1000, 1200); - /* Poll for reset bit to self-clear indicating reset is complete */ + /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { + usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; - udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -978,12 +1065,12 @@ mac_reset_top: hw_dbg(hw, "Reset polling failed to complete.\n"); } - msleep(50); + msec_delay(50); /* * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; @@ -1015,7 +1102,7 @@ mac_reset_top: * doesn't autoneg with out driver support we need to * leave LMS in the state it was before we MAC reset. * Likewise if we support WoL we don't want change the - * LMS state either. + * LMS state. */ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || hw->wol_enabled) @@ -1027,8 +1114,8 @@ mac_reset_top: status = hw->mac.ops.prot_autoc_write(hw, hw->mac.orig_autoc, false); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != @@ -1048,14 +1135,14 @@ mac_reset_top: * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES; + hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (is_valid_ether_addr(hw->mac.san_addr)) { + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; @@ -1072,8 +1159,9 @@ mac_reset_top: /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, - &hw->mac.wwpn_prefix); + &hw->mac.wwpn_prefix); +reset_hw_out: return status; } @@ -1082,33 +1170,34 @@ mac_reset_top: * @hw: pointer to hardware structure * @fdircmd: current value of FDIRCMD register */ -static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) { int i; for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) - return 0; - udelay(10); + return IXGBE_SUCCESS; + usec_delay(10); } return IXGBE_ERR_FDIR_CMD_INCOMPLETE; } /** - * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. - * @hw: pointer to hardware structure + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure **/ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { + s32 err; int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); u32 fdircmd; - s32 err; - fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); + /* * Before starting reinitialization process, * FDIRCMD.CMD must be zero. @@ -1151,7 +1240,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; - usleep_range(1000, 2000); + msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); @@ -1165,18 +1254,20 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) IXGBE_READ_REG(hw, IXGBE_FDIRMISS); IXGBE_READ_REG(hw, IXGBE_FDIRLEN); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register **/ -static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) { int i; + DEBUGFUNC("ixgbe_fdir_enable_82599"); + /* Prime the keys for hashing */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); @@ -1200,7 +1291,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; - usleep_range(1000, 2000); + msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) @@ -1208,13 +1299,15 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) } /** - * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation **/ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) { + DEBUGFUNC("ixgbe_init_fdir_signature_82599"); + /* * Continue setup of fdirctrl register bits: * Move the flexible bytes to use the ethertype - shift 6 words @@ -1228,35 +1321,80 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + * @cloud_mode: true - cloud mode, false - other mode **/ -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) { + UNREFERENCED_1PARAMETER(cloud_mode); + DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); + /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on - * Initialize the drop queue + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue to queue 127 * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + if (cloud_mode) + fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << + IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); - return 0; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue + * @hw: pointer to hardware structure + * @dropqueue: Rx queue index used for the dropped packets + **/ +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) +{ + u32 fdirctrl; + + DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); + /* Clear init done bit and drop queue field */ + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); + + /* Set drop queue */ + fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if ((hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_X550EM_a)) + fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); } /* @@ -1269,42 +1407,42 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ common_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ common_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0) /** - * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash - * @input: input bitstream to compute the hash on - * @common: compressed common input dword + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @input: input bitstream to compute the hash on + * @common: compressed common input dword * - * This function is almost identical to the function above but contains - * several optimizations such as unwinding all of the loops, letting the - * compiler work out all of the conditional ifs since the keys are static - * defines, and computing two keys at once since the hashed dword stream - * will be the same for both keys. + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. **/ -static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common) +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = ntohl(input.dword); + flow_vm_vlan = IXGBE_NTOHL(input.dword); /* generate common hash dword */ - hi_hash_dword = ntohl(common.dword); + hi_hash_dword = IXGBE_NTOHL(common.dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); @@ -1318,7 +1456,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed + * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); @@ -1351,28 +1489,31 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, } /** - * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter - * @hw: pointer to hardware structure - * @input: unique input dword - * @common: compressed common input dword - * @queue: queue index to direct traffic to + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to * * Note that the tunnel bit in input must not be set when the hardware * tunneling support does not exist. **/ -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue) +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) { u64 fdirhashcmd; u8 flow_type; bool tunnel; u32 fdircmd; + DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); + /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER */ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); flow_type = input.formatted.flow_type & @@ -1387,7 +1528,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, break; default: hw_dbg(hw, " Error on flow type input\n"); - return IXGBE_ERR_CONFIG; + return; } /* configure FDIRCMD register */ @@ -1403,33 +1544,33 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. */ fdirhashcmd = (u64)fdircmd << 32; - fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); - return 0; + return; } #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ - if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0) /** - * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash - * @input: input bitstream to compute the hash on - * @input_mask: mask for the input bitstream + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream * - * This function serves two main purposes. First it applies the input_mask - * to the atr_input resulting in a cleaned up atr_input data stream. - * Secondly it computes the hash and stores it in the bkt_hash field at - * the end of the input byte stream. This way it will be available for - * future use without needing to recompute the hash. + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. **/ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *input_mask) @@ -1437,20 +1578,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 bucket_hash = 0; - __be32 hi_dword = 0; - int i; + u32 hi_dword = 0; + u32 i = 0; /* Apply masks to input data */ - for (i = 0; i <= 10; i++) - input->dword_stream[i] &= input_mask->dword_stream[i]; + for (i = 0; i < 14; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = ntohl(input->dword_stream[0]); + flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); /* generate common hash dword */ - for (i = 1; i <= 10; i++) + for (i = 1; i <= 13; i++) hi_dword ^= input->dword_stream[i]; - hi_hash_dword = ntohl(hi_dword); + hi_hash_dword = IXGBE_NTOHL(hi_dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); @@ -1464,7 +1605,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed + * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); @@ -1476,24 +1617,23 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, * Limit hash to 13 bits since max bucket count is 8K. * Store result at the end of the input stream. */ - input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); + input->formatted.bkt_hash = bucket_hash & 0x1FFF; } /** - * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks - * @input_mask: mask to be bit swapped + * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks + * @input_mask: mask to be bit swapped * - * The source and destination port masks for flow director are bit swapped - * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to - * generate a correctly swapped value we need to bit swap the mask and that - * is what is accomplished by this function. + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. **/ -static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) { - u32 mask = ntohs(input_mask->formatted.dst_port); - + u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; - mask |= ntohs(input_mask->formatted.src_port); + mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port); mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); @@ -1512,17 +1652,20 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) #define IXGBE_WRITE_REG_BE32(a, reg, value) \ - IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) #define IXGBE_STORE_AS_BE16(_value) \ - ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask) + union ixgbe_atr_input *input_mask, bool cloud_mode) { /* mask IPv6 since it is currently not supported */ u32 fdirm = IXGBE_FDIRM_DIPv6; u32 fdirtcpm; + u32 fdirip6m; + UNREFERENCED_1PARAMETER(cloud_mode); + DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); /* * Program the relevant mask registers. If src/dst_port or src/dst_addr @@ -1564,7 +1707,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, return IXGBE_ERR_CONFIG; } - switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { + switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { case 0x0000: /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; @@ -1585,7 +1728,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, return IXGBE_ERR_CONFIG; } - switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { + switch (input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: /* Mask Flex Bytes */ fdirm |= IXGBE_FDIRM_FLEX; @@ -1597,71 +1740,160 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, return IXGBE_ERR_CONFIG; } + if (cloud_mode) { + fdirm |= IXGBE_FDIRM_L3P; + fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + + switch (input_mask->formatted.inner_mac[0] & 0xFF) { + case 0x00: + /* Mask inner MAC, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; + case 0xFF: + break; + default: + hw_dbg(hw, " Error on inner_mac byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { + case 0x0: + /* Mask vxlan id */ + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + hw_dbg(hw, " Error on TNI/VNI byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tunnel_type & 0xFFFF) { + case 0x0: + /* Mask turnnel type, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + case 0xFFFF: + break; + default: + hw_dbg(hw, " Error on tunnel type byte mask\n"); + return IXGBE_ERR_CONFIG; + } + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); + + /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, + * FDIRSIP4M and FDIRDIP4M in cloud mode to allow + * L3/L3 packets to tunnel. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); + break; + default: + break; + } + } + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); - /* store the TCP/UDP port masks, bit reversed from port layout */ - fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + if (!cloud_mode) { + /* store the TCP/UDP port masks, bit reversed from port + * layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); - /* write both the same so that UDP and TCP use the same mask */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } - /* also use it for SCTP */ - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); - break; - default: - break; + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF); } - - /* store source and destination IP masks (big-enian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, - ~input_mask->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, - ~input_mask->formatted.dst_ip[0]); - - return 0; + return IXGBE_SUCCESS; } s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, - u16 soft_id, u8 queue) + u16 soft_id, u8 queue, bool cloud_mode) { u32 fdirport, fdirvlan, fdirhash, fdircmd; + u32 addr_low, addr_high; + u32 cloud_type = 0; s32 err; + UNREFERENCED_1PARAMETER(cloud_mode); - /* currently IPv6 is not supported, must be programmed with 0 */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), - input->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), - input->formatted.src_ip[1]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), - input->formatted.src_ip[2]); + DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); - /* record the source address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, + input->formatted.src_ip[0]); - /* record the first 32 bits of the destination address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + /* record the first 32 bits of the destination address + * (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, + input->formatted.dst_ip[0]); - /* record source and destination port (little-endian)*/ - fdirport = ntohs(input->formatted.dst_port); - fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; - fdirport |= ntohs(input->formatted.src_port); - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } - /* record vlan (little-endian) and flex_bytes(big-endian) */ - fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes); + /* record VLAN (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; - fdirvlan |= ntohs(input->formatted.vlan_id); + fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + if (cloud_mode) { + if (input->formatted.tunnel_type != 0) + cloud_type = 0x80000000; + + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + cloud_type |= addr_high; + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); + } + /* configure FDIRHASH register */ - fdirhash = (__force u32)input->formatted.bkt_hash; + fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); @@ -1676,6 +1908,8 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; if (queue == IXGBE_FDIR_DROP_QUEUE) fdircmd |= IXGBE_FDIRCMD_DROP; + if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; @@ -1687,7 +1921,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, return err; } - return 0; + return IXGBE_SUCCESS; } s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, @@ -1699,7 +1933,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, s32 err; /* configure FDIRHASH register */ - fdirhash = (__force u32)input->formatted.bkt_hash; + fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); @@ -1723,102 +1957,179 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value + * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter + * @hw: pointer to hardware structure + * @input: input bitstream + * @input_mask: mask for the input bitstream + * @soft_id: software index for the filters + * @queue: queue index to direct traffic to + * @cloud_mode: unused * - * Performs read operation to Omer analog register specified. + * Note that the caller to this function must lock before calling, since the + * hardware writes must be protected from one another. **/ -static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask, + u16 soft_id, u8 queue, bool cloud_mode) +{ + s32 err = IXGBE_ERR_CONFIG; + UNREFERENCED_1PARAMETER(cloud_mode); + + DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); + + /* + * Check flow_type formatting, and bail out before we touch the hardware + * if there's a configuration issue + */ + switch (input->formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_IPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; + if (input->formatted.dst_port || input->formatted.src_port) { + hw_dbg(hw, " Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: + if (input->formatted.dst_port || input->formatted.src_port) { + hw_dbg(hw, " Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + /* fall through */ + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + break; + default: + hw_dbg(hw, " Error on flow type input\n"); + return err; + } + + /* program input mask into the HW */ + err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); + if (err) + return err; + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(input, input_mask); + + /* program filters to filter memory */ + return ixgbe_fdir_write_perfect_filter_82599(hw, input, + soft_id, queue, cloud_mode); +} + +/** + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Omer analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 core_ctl; + DEBUGFUNC("ixgbe_read_analog_reg8_82599"); + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); *val = (u8)core_ctl; - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register - * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write * - * Performs write operation to Omer analog register specified. + * Performs write operation to Omer analog register specified. **/ -static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 core_ctl; + DEBUGFUNC("ixgbe_write_analog_reg8_82599"); + core_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure + * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure * - * Starts the hardware using the generic start_hw function - * and the generation start_hw function. - * Then performs revision-specific operations, if any. + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. **/ -static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) { - s32 ret_val = 0; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82599"); ret_val = ixgbe_start_hw_generic(hw); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; - ret_val = ixgbe_start_hw_gen2(hw); - if (ret_val) - return ret_val; + ixgbe_start_hw_gen2(hw); /* We need to run link autotry after the driver loads */ hw->mac.autotry_restart = true; - return ixgbe_verify_fw_version_82599(hw); + if (ret_val == IXGBE_SUCCESS) + ret_val = ixgbe_verify_fw_version_82599(hw); +out: + return ret_val; } /** - * ixgbe_identify_phy_82599 - Get physical layer module - * @hw: pointer to hardware structure + * ixgbe_identify_phy_82599 - Get physical layer module + * @hw: pointer to hardware structure * - * Determines the physical layer module found on the current adapter. - * If PHY already detected, maintains current PHY type in hw struct, - * otherwise executes the PHY detection routine. + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. **/ -static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { s32 status; + DEBUGFUNC("ixgbe_identify_phy_82599"); + /* Detect PHY if not unknown - returns success if already detected. */ status = ixgbe_identify_phy_generic(hw); - if (status) { + if (status != IXGBE_SUCCESS) { /* 82599 10GBASE-T requires an external PHY */ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) return status; - status = ixgbe_identify_module_generic(hw); + else + status = ixgbe_identify_module_generic(hw); } /* Set PHY type none if no PHY detected */ if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.type = ixgbe_phy_none; - status = 0; + return IXGBE_SUCCESS; } /* Return error if SFP module has been detected but is not supported */ @@ -1829,172 +2140,295 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) } /** - * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 - * @hw: pointer to hardware structure - * @regval: register value to write to RXCTRL + * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type + * @hw: pointer to hardware structure * - * Enables the Rx DMA unit for 82599 + * Determines physical layer capabilities of the current configuration. **/ -static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) { + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_1000BASE_BX; + goto out; + } else + /* SFI mode so read SFP module */ + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; + goto out; + break; + case IXGBE_AUTOC_LMS_10G_SERIAL: + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + if (autoc & IXGBE_AUTOC_KR_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + break; + default: + goto out; + break; + } + +sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); +out: + return physical_layer; +} + +/** + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for 82599 + **/ +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ + + DEBUGFUNC("ixgbe_enable_rx_dma_82599"); + /* * Workaround for 82599 silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang * the Rx DMA unit. Therefore, make sure the security engine is * completely disabled prior to enabling the Rx unit. */ - hw->mac.ops.disable_rx_buff(hw); + + hw->mac.ops.disable_sec_rx_path(hw); if (regval & IXGBE_RXCTRL_RXEN) - hw->mac.ops.enable_rx(hw); + ixgbe_enable_rx(hw); else - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); - hw->mac.ops.enable_rx_buff(hw); + hw->mac.ops.enable_sec_rx_path(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_verify_fw_version_82599 - verify fw version for 82599 - * @hw: pointer to hardware structure + * ixgbe_verify_fw_version_82599 - verify FW version for 82599 + * @hw: pointer to hardware structure * - * Verifies that installed the firmware version is 0.6 or higher - * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. * - * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or - * if the FW version is not supported. + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. **/ -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM_VERSION; u16 fw_offset, fw_ptp_cfg_offset; - u16 offset; - u16 fw_version = 0; + u16 fw_version; + + DEBUGFUNC("ixgbe_verify_fw_version_82599"); /* firmware check is only necessary for SFI devices */ - if (hw->phy.media_type != ixgbe_media_type_fiber) - return 0; + if (hw->phy.media_type != ixgbe_media_type_fiber) { + status = IXGBE_SUCCESS; + goto fw_version_out; + } /* get the offset to the Firmware Module block */ - offset = IXGBE_FW_PTR; - if (hw->eeprom.ops.read(hw, offset, &fw_offset)) - goto fw_version_err; - - if (fw_offset == 0 || fw_offset == 0xFFFF) + if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", IXGBE_FW_PTR); return IXGBE_ERR_EEPROM_VERSION; + } + + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; /* get the offset to the Pass Through Patch Configuration block */ - offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; - if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) - goto fw_version_err; - - if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF) + if (hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), + &fw_ptp_cfg_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); return IXGBE_ERR_EEPROM_VERSION; + } + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; /* get the firmware version */ - offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; - if (hw->eeprom.ops.read(hw, offset, &fw_version)) - goto fw_version_err; + if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + + IXGBE_FW_PATCH_VERSION_4), &fw_version)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); + return IXGBE_ERR_EEPROM_VERSION; + } if (fw_version > 0x5) - status = 0; + status = IXGBE_SUCCESS; +fw_version_out: return status; - -fw_version_err: - hw_err(hw, "eeprom read at offset %d failed\n", offset); - return IXGBE_ERR_EEPROM_VERSION; } /** - * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. - * @hw: pointer to hardware structure + * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + * @hw: pointer to hardware structure * - * Returns true if the LESM FW module is present and enabled. Otherwise - * returns false. Smart Speed must be disabled if LESM FW module is enabled. + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { + bool lesm_enabled = false; u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; s32 status; + DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); + /* get the offset to the Firmware Module block */ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); - if (status || fw_offset == 0 || fw_offset == 0xFFFF) - return false; + if ((status != IXGBE_SUCCESS) || + (fw_offset == 0) || (fw_offset == 0xFFFF)) + goto out; /* get the offset to the LESM Parameters block */ status = hw->eeprom.ops.read(hw, (fw_offset + IXGBE_FW_LESM_PARAMETERS_PTR), &fw_lesm_param_offset); - if (status || - fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF) - return false; + if ((status != IXGBE_SUCCESS) || + (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) + goto out; - /* get the lesm state word */ + /* get the LESM state word */ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + IXGBE_FW_LESM_STATE_1), &fw_lesm_state); - if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) - return true; + if ((status == IXGBE_SUCCESS) && + (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + lesm_enabled = true; - return false; +out: + return lesm_enabled; } /** - * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using - * fastest available method + * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + * fastest available method * - * @hw: pointer to hardware structure - * @offset: offset of word in EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM + * @hw: pointer to hardware structure + * @offset: offset of word in EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM * - * Retrieves 16 bit word(s) read from EEPROM + * Retrieves 16 bit word(s) read from EEPROM **/ -static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; - /* If EEPROM is detected and can be addressed using 14 bits, - * use EERD otherwise use bit bang - */ - if (eeprom->type == ixgbe_eeprom_spi && - offset + (words - 1) <= IXGBE_EERD_MAX_ADDR) - return ixgbe_read_eerd_buffer_generic(hw, offset, words, data); - - return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, - data); -} - -/** - * ixgbe_read_eeprom_82599 - Read EEPROM word using - * fastest available method - * - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM - **/ -static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, - u16 offset, u16 *data) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); /* * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ - if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR) - return ixgbe_read_eerd_generic(hw, offset, data); + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, + data); + else + ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, + words, + data); - return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + return ret_val; +} + +/** + * ixgbe_read_eeprom_82599 - Read EEPROM word using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM + **/ +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_generic(hw, offset, data); + else + ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + + return ret_val; } /** @@ -2003,10 +2437,9 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, * @hw: pointer to hardware structure * * Reset pipeline by asserting Restart_AN together with LMS change to ensure - * full pipeline reset. Note - We must hold the SW/FW semaphore before writing - * to AUTOC, so this function assumes the semaphore is held. + * full pipeline reset. This function assumes the SW/FW lock is held. **/ -static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) { s32 ret_val; u32 anlp1_reg = 0; @@ -2022,14 +2455,12 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; - /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); - /* Wait for AN to leave state 0 */ for (i = 0; i < 10; i++) { - usleep_range(4000, 8000); + msec_delay(4); anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) break; @@ -2041,7 +2472,7 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) goto reset_pipeline_out; } - ret_val = 0; + ret_val = IXGBE_SUCCESS; reset_pipeline_out: /* Write AUTOC register with original LMS field and Restart_AN */ @@ -2052,23 +2483,25 @@ reset_pipeline_out: } /** - * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @dev_addr: address to read from - * @data: value read + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. **/ -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) { u32 esdp; s32 status; s32 timeout = 200; - if (hw->phy.qsfp_shared_i2c_bus == true) { + DEBUGFUNC("ixgbe_read_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; @@ -2080,12 +2513,13 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, if (esdp & IXGBE_ESDP_SDP1) break; - usleep_range(5000, 10000); + msec_delay(5); timeout--; } if (!timeout) { - hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + hw_dbg(hw, "Driver can't access resource," + " acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } @@ -2094,7 +2528,8 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: - if (hw->phy.qsfp_shared_i2c_bus == true) { + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; @@ -2106,23 +2541,25 @@ release_i2c_access: } /** - * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @dev_addr: address to write to - * @data: value to write + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to read from + * @data: value to write * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. **/ -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) { u32 esdp; s32 status; s32 timeout = 200; - if (hw->phy.qsfp_shared_i2c_bus == true) { + DEBUGFUNC("ixgbe_write_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; @@ -2134,12 +2571,13 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, if (esdp & IXGBE_ESDP_SDP1) break; - usleep_range(5000, 10000); + msec_delay(5); timeout--; } if (!timeout) { - hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + hw_dbg(hw, "Driver can't access resource," + " acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } @@ -2148,7 +2586,8 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: - if (hw->phy.qsfp_shared_i2c_bus == true) { + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; @@ -2158,99 +2597,3 @@ release_i2c_access: return status; } - -static const struct ixgbe_mac_operations mac_ops_82599 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_82599, - .start_hw = &ixgbe_start_hw_82599, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_82599, - .enable_rx_dma = &ixgbe_enable_rx_dma_82599, - .disable_rx_buff = &ixgbe_disable_rx_buff_generic, - .enable_rx_buff = &ixgbe_enable_rx_buff_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_device_caps = &ixgbe_get_device_caps_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, - .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, - .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, - .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, - .setup_link = &ixgbe_setup_mac_link_82599, - .set_rxpba = &ixgbe_set_rxpba_generic, - .check_link = &ixgbe_check_mac_link_generic, - .get_link_capabilities = &ixgbe_get_link_capabilities_82599, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_generic, - .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, - .clear_vmdq = &ixgbe_clear_vmdq_generic, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_generic, - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = &ixgbe_setup_sfp_modules_82599, - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, - .init_swfw_sync = NULL, - .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, - .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, - .prot_autoc_read = &prot_autoc_read_82599, - .prot_autoc_write = &prot_autoc_write_82599, - .enable_rx = &ixgbe_enable_rx_generic, - .disable_rx = &ixgbe_disable_rx_generic, -}; - -static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { - .init_params = &ixgbe_init_eeprom_params_generic, - .read = &ixgbe_read_eeprom_82599, - .read_buffer = &ixgbe_read_eeprom_buffer_82599, - .write = &ixgbe_write_eeprom_generic, - .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, - .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, - .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, - .update_checksum = &ixgbe_update_eeprom_checksum_generic, -}; - -static const struct ixgbe_phy_operations phy_ops_82599 = { - .identify = &ixgbe_identify_phy_82599, - .identify_sfp = &ixgbe_identify_module_generic, - .init = &ixgbe_init_phy_ops_82599, - .reset = &ixgbe_reset_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, -}; - -const struct ixgbe_info ixgbe_82599_info = { - .mac = ixgbe_mac_82599EB, - .get_invariants = &ixgbe_get_invariants_82599, - .mac_ops = &mac_ops_82599, - .eeprom_ops = &eeprom_ops_82599, - .phy_ops = &phy_ops_82599, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_8259X, -}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h new file mode 100644 index 000000000000..5219f0721df8 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_82599_H_ +#define _IXGBE_82599_H_ + +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val); +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked); +#endif /* _IXGBE_82599_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_api.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.c new file mode 100644 index 000000000000..689bc8585933 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.c @@ -0,0 +1,1652 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" + +#define IXGBE_EMPTY_PARAM + +static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM) +}; + +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X540) +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_x) +}; + +static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_a) +}; + +/** + * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + if (hw->mac.ops.get_rtrup2tc) + hw->mac.ops.get_rtrup2tc(hw, map); +} + +/** + * ixgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ixgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_shared_code"); + + /* + * Set the mac type + */ + ixgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + status = ixgbe_init_ops_82598(hw); + break; + case ixgbe_mac_82599EB: + status = ixgbe_init_ops_82599(hw); + break; + case ixgbe_mac_X540: + status = ixgbe_init_ops_X540(hw); + break; + case ixgbe_mac_X550: + status = ixgbe_init_ops_X550(hw); + break; + case ixgbe_mac_X550EM_x: + status = ixgbe_init_ops_X550EM_x(hw); + break; + case ixgbe_mac_X550EM_a: + status = ixgbe_init_ops_X550EM_a(hw); + break; + default: + status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; + + /* NVM Update features structure initialization */ + hw->nvmupd_features.major = IXGBE_NVMUPD_FEATURES_API_VER_MAJOR; + hw->nvmupd_features.minor = IXGBE_NVMUPD_FEATURES_API_VER_MINOR; + hw->nvmupd_features.size = sizeof(hw->nvmupd_features); + memset(hw->nvmupd_features.features, 0x0, + IXGBE_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN * + sizeof(*hw->nvmupd_features.features)); + + hw->nvmupd_features.features[0] = + IXGBE_NVMUPD_FEATURE_REGISTER_ACCESS_SUPPORT; + + return status; +} + +/** + * ixgbe_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_mac_type\n"); + + if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) { + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported vendor id: %x", hw->vendor_id); + return IXGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->mvals = ixgbe_mvals_base; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + hw->mac.type = ixgbe_mac_82598EB; + break; + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_XAUI_LOM: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + case IXGBE_DEV_ID_82599_CX4: + case IXGBE_DEV_ID_82599_T3_LOM: + hw->mac.type = ixgbe_mac_82599EB; + break; + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + hw->mac.type = ixgbe_mac_X540; + hw->mvals = ixgbe_mvals_X540; + break; + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: + hw->mac.type = ixgbe_mac_X550; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->mac.type = ixgbe_mac_X550EM_x; + hw->mvals = ixgbe_mvals_X550EM_x; + break; + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->mac.type = ixgbe_mac_X550EM_a; + hw->mvals = ixgbe_mvals_X550EM_a; + break; + default: + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported device id: %x", + hw->device_id); + break; + } + + hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); + return ret_val; +} + +/** + * ixgbe_init_hw - Initialize the hardware + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting and then starting the hardware + **/ +s32 ixgbe_init_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_reset_hw - Performs a hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performs a PHY reset, and performs a MAC reset + **/ +s32 ixgbe_reset_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_start_hw - Prepares hardware for Rx/Tx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, + * clears all on chip counters, initializes receive address registers, + * multicast table, VLAN filter table, calls routine to setup link and + * flow control settings, and leaves transmit and receive units disabled + * and uninitialized. + **/ +s32 ixgbe_start_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_hw_cntrs - Clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), + ixgbe_media_type_unknown); +} + +/** + * ixgbe_get_mac_addr - Get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from the first Receive Address Register + * (RAR0) A reset of the adapter must have been performed prior to calling + * this function in order for the MAC address to have been loaded from the + * EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, + (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_san_mac_addr - Get SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + **/ +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_san_mac_addr - Write a SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Writes A SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word for device capabilities + * + * Reads the extra device capabilities from the EEPROM + **/ +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, + (hw, device_caps), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, + (hw, wwnn_prefix, wwpn_prefix), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, + (hw, bs), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_bus_info - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_of_tx_queues - Get Tx queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_tx_queues; +} + +/** + * ixgbe_get_num_of_rx_queues - Get Rx queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_rx_queues; +} + +/** + * ixgbe_stop_adapter - Disable Rx/Tx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * ixgbe_identify_phy - Get PHY type + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), + IXGBE_NOT_IMPLEMENTED); + } + + return status; +} + +/** + * ixgbe_reset_phy - Perform a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) + status = IXGBE_ERR_PHY; + } + + if (status == IXGBE_SUCCESS) { + status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), + IXGBE_NOT_IMPLEMENTED); + } + return status; +} + +/** + * ixgbe_get_phy_firmware_version - + * @hw: pointer to hardware structure + * @firmware_version: pointer to firmware version + **/ +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, + (hw, firmware_version), + IXGBE_NOT_IMPLEMENTED); + return status; +} + +/** + * ixgbe_read_phy_reg - Read PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: type of device you want to communicate with + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_phy_reg - Write PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: type of device you want to communicate with + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link - Restart PHY autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_internal_phy - Configure integrated PHY + * @hw: pointer to hardware structure + * + * Reconfigure the integrated PHY in order to enable talk to the external PHY. + * Returns success if not implemented, since nothing needs to be done in this + * case. + */ +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), + IXGBE_SUCCESS); +} + +/** + * ixgbe_check_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: link speed + * @link_up: true when link is up + * + * Reads a PHY register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, + link_up), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link_speed - Set auto advertise + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Sets the auto advertised capabilities + **/ +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_phy_power - Control the phy power state + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ +s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) +{ + return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_check_link - Get link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, + link_up, link_up_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_tx_laser - Disable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to disable the laser on SFI optics. + **/ +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); +} + +/** + * ixgbe_enable_tx_laser - Enable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to enable the laser on SFI optics. + **/ +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); +} + +/** + * ixgbe_flap_tx_laser - flap Tx laser to start autotry process + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support then + * flap the tx laser to alert the link partner to start autotry + * process on its end. + **/ +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); +} + +/** + * ixgbe_setup_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_mac_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_link_capabilities - Returns link capabilities + * @hw: pointer to hardware structure + * @speed: link speed capabilities + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities of the current configuration. + **/ +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, + speed, autoneg), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_on - Turn on LEDs + * @hw: pointer to hardware structure + * @index: led number to turn on + * + * Turns on the software controllable LEDs. + **/ +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_off - Turn off LEDs + * @hw: pointer to hardware structure + * @index: led number to turn off + * + * Turns off the software controllable LEDs. + **/ +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_start - Blink LEDs + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Blink LED based on index. + **/ +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_stop - Stop blinking LEDs + * @hw: pointer to hardware structure + * @index: led number to stop + * + * Stop blinking LED based on index. + **/ +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_eeprom_params - Initialize EEPROM parameters + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), + IXGBE_NOT_IMPLEMENTED); +} + + +/** + * ixgbe_write_eeprom - Write word to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word(s) to be written to the EEPROM + * @words: number of words + * + * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom - Read word from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM + **/ +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit word(s) from EEPROM + * @words: number of words + * + * Reads 16 bit word(s) from EEPROM + **/ +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum + **/ +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, + (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, + (hw, addr, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, + enable_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_rar - Clear Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vmdq - Associate a VMDq index with a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to associate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); + +} + +/** + * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address + * @hw: pointer to hardware structure + * @vmdq: VMDq default pool index + **/ +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac, + (hw, vmdq), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to disassociate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. + * @hw: pointer to hardware structure + **/ +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) +{ + return hw->mac.num_rar_entries; +} + +/** + * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new multicast addresses + * @addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + **/ +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, + addr_list, addr_count, func), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, + mc_addr_list, mc_addr_count, func, clear), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN + * @vlvf_bypass: boolean flag indicating updating the default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, + vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating the default pool is okay + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + u32 *vfta_delta, u32 vfta, bool vlvf_bypass) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, + vlan_on, vfta_delta, vfta, vlvf_bypass), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_toggle_txdctl - Toggle VF's queues + * @hw: pointer to hardware structure + * @vind: VMDq pool index + * + * Enable and disable each queue in VF. + */ +s32 ixgbe_toggle_txdctl(struct ixgbe_hw *hw, u32 vind) +{ + return ixgbe_call_func(hw, hw->mac.ops.toggle_txdctl, (hw, + vind), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Configures the flow control settings based on SW configuration. + **/ +s32 ixgbe_fc_enable(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_fw_drv_ver - Try to send the driver version number FW + * @hw: pointer to hardware structure + * @maj: driver major number to be sent to firmware + * @min: driver minor number to be sent to firmware + * @build: driver build number to be sent to firmware + * @ver: driver version number to be sent to firmware + * @len: length of driver_ver string + * @driver_ver: driver string + **/ +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver, u16 len, char *driver_ver) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, + build, ver, len, driver_ver), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + **/ +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enable dmac. + **/ +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC and set high priority bit for + * FCOE TC. The dmac enable bit must be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_ee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_source_address_pruning - Enable/Disable source address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool - Rx pool to toggle source address pruning + **/ +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, enable, pool); +} + +/** + * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + if (hw->mac.ops.set_ethertype_anti_spoofing) + hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); +} + +/** + * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: type of device you want to communicate with + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: type of device you want to communicate with + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mdd - Disable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_disable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_mdd) + hw->mac.ops.disable_mdd(hw); +} + +/** + * ixgbe_enable_mdd - Enable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_mdd) + hw->mac.ops.enable_mdd(hw); +} + +/** + * ixgbe_mdd_event - Handle malicious driver detection event + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + **/ +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + if (hw->mac.ops.mdd_event) + hw->mac.ops.mdd_event(hw, vf_bitmap); +} + +/** + * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver + * detection event + * @hw: pointer to hardware structure + * @vf: vf index + * + **/ +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) +{ + if (hw->mac.ops.restore_mdd_vf) + hw->mac.ops.restore_mdd_vf(hw, vf); +} + +/** + * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode + * @hw: pointer to hardware structure + * + **/ +bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.fw_recovery_mode) + return hw->mac.ops.fw_recovery_mode(hw); + return false; +} + +/** + * ixgbe_enter_lplu - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). + **/ +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_handle_lasi - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_analog_reg8 - Reads 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs write operation to analog register specified. + **/ +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_analog_reg8 - Writes 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. + * @hw: pointer to hardware structure + * + * Initializes the Unicast Table Arrays to zero on device load. This + * is part of the Rx init addr execution path. + **/ +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr, + reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link_unlocked - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link_unlocked - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, + u8 byte_offset, u8 eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_supported_physical_layer - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, + (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); +} + +/** + * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics + * @hw: pointer to hardware structure + * @regval: bitfield to write to the Rx DMA register + * + * Enables the Rx DMA unit of the device. + **/ +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, + (hw, regval), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path. + **/ +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, + (hw, mask), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_release_swfw_semaphore - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + if (hw->mac.ops.release_swfw_sync) + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore + * @hw: pointer to hardware structure + * + * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register. + * Regardless of whether is succeeds or not it then release the semaphore. + * This is function is called to recover from catastrophic failures that + * may have left the semaphore locked. + **/ +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); +} + +void ixgbe_disable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_rx) + hw->mac.ops.disable_rx(hw); +} + +void ixgbe_enable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_rx) + hw->mac.ops.enable_rx(hw); +} + +/** + * ixgbe_set_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the rate select. + */ +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + if (hw->mac.ops.set_rate_select_speed) + hw->mac.ops.set_rate_select_speed(hw, speed); +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_api.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.h new file mode 100644 index 000000000000..7387cacd374e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_API_H_ +#define _IXGBE_API_H_ + +#include "ixgbe_type.h" + +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); + +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw); + +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); +s32 ixgbe_init_hw(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw(struct ixgbe_hw *hw); +s32 ixgbe_start_hw(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); + +s32 ixgbe_identify_phy(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); + +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on); +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); + +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); + +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear); +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); +s32 ixgbe_enable_mc(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc(struct ixgbe_hw *hw); +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); +s32 ixgbe_toggle_txdctl(struct ixgbe_hw *hw, u32 vind); +s32 ixgbe_fc_enable(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver, u16 len, char *driver_ver); +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw); +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); +u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask, bool cloud_mode); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *mask, + u16 soft_id, + u8 queue, + bool cloud_mode); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common); +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue); +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw); +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); +s32 ixgbe_dmac_config(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw); +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee); +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int vf); +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, + int vf); +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data); +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data); +void ixgbe_disable_mdd(struct ixgbe_hw *hw); +void ixgbe_enable_mdd(struct ixgbe_hw *hw); +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf); +bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw); +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); +void ixgbe_disable_rx(struct ixgbe_hw *hw); +void ixgbe_enable_rx(struct ixgbe_hw *hw); +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); + +#endif /* _IXGBE_API_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c new file mode 100644 index 000000000000..121382abb143 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + + +#include "ixgbe.h" +#include "ixgbe_cna.h" +#include "ixgbe_vmdq.h" + +static int ixgbe_cna_open(struct net_device *cnadev) +{ + struct ixgbe_adapter *adapter = netdev_priv(cnadev); + + strscpy(cnadev->name, adapter->netdev->name, sizeof(cnadev->name)); + + DPRINTK(PROBE, INFO, "CNA pseudo device opened %s\n", cnadev->name); + return 0; +} + +static int ixgbe_cna_close(struct net_device *cnadev) +{ + struct ixgbe_adapter *adapter = netdev_priv(cnadev); + + DPRINTK(PROBE, INFO, "CNA pseudo device closed %s\n", cnadev->name); + return 0; +} + +static int ixgbe_cna_change_mtu(struct net_device *cnadev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(cnadev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + + DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", + cnadev->mtu, new_mtu); + /* must set new MTU before calling down or up */ + cnadev->mtu = new_mtu; + + return 0; +} + +int ixgbe_cna_enable(struct ixgbe_adapter *adapter) +{ + struct net_device *cnadev; + struct net_device *netdev; + int err; + u64 wwpn; + u64 wwnn; + + netdev = adapter->netdev; + /* + * Oppositely to regular net device, CNA device doesn't have + * a private allocated region as we don't want to duplicate + * ixgbe_adapter information. Though, the CNA device still need + * to access the ixgbe_adapter while allocating queues or such. Thereby, + * cnadev->priv needs to point to netdev->priv. + */ + cnadev = alloc_etherdev_mq(0, MAX_TX_QUEUES); + if (!cnadev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + adapter->cnadev = cnadev; + SET_MODULE_OWNER(cnadev); + + cnadev->priv = adapter; + + cnadev->open = &ixgbe_cna_open; + cnadev->stop = &ixgbe_cna_close; + cnadev->change_mtu = &ixgbe_cna_change_mtu; + cnadev->do_ioctl = netdev->do_ioctl; + cnadev->hard_start_xmit = netdev->hard_start_xmit; +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + cnadev->vlan_rx_register = netdev->vlan_rx_register; + cnadev->vlan_rx_add_vid = netdev->vlan_rx_add_vid; + cnadev->vlan_rx_kill_vid = netdev->vlan_rx_kill_vid; +#endif + ixgbe_set_ethtool_ops(cnadev); + +#if IS_ENABLED(CONFIG_DCB) + cnadev->dcbnl_ops = netdev->dcbnl_ops; +#endif /* CONFIG_DCB */ + + cnadev->mtu = netdev->mtu; + cnadev->pdev = netdev->pdev; + cnadev->gso_max_size = GSO_MAX_SIZE; + cnadev->features = netdev->features | NETIF_F_CNA | NETIF_F_HW_VLAN_FILTER; + + /* set the MAC address to SAN mac address */ + if (ixgbe_validate_mac_addr(adapter->hw.mac.san_addr) == 0) + memcpy(cnadev->dev_addr, + adapter->hw.mac.san_addr, + cnadev->addr_len); + + cnadev->features |= NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU | + NETIF_F_FSO; + + cnadev->ndo_fcoe_ddp_setup = &ixgbe_fcoe_ddp_get; + cnadev->ndo_fcoe_ddp_done = &ixgbe_fcoe_ddp_put; + cnadev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + + netif_carrier_off(cnadev); + netif_tx_stop_all_queues(cnadev); + + VMKNETDDI_REGISTER_QUEUEOPS(cnadev, ixgbe_netqueue_ops); + + err = register_netdev(cnadev); + if (err) + goto err_register; + + DPRINTK(PROBE, INFO, "CNA pseudo device registered %s\n", netdev->name); + + return err; + +err_register: + DPRINTK(PROBE, INFO, "CNA pseudo device cannot be registered %s\n", + netdev->name); + free_netdev(cnadev); +err_alloc_etherdev: + DPRINTK(PROBE, INFO, "CNA cannot be enabled on %s\n", netdev->name); + adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; + return err; +} + +void ixgbe_cna_disable(struct ixgbe_adapter *adapter) +{ + if (!(adapter->flags2 & IXGBE_FLAG2_CNA_ENABLED)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; + + if (adapter->cnadev) { + unregister_netdev(adapter->cnadev); + DPRINTK(PROBE, INFO, "CNA pseudo device unregistered %s\n", + adapter->cnadev->name); + + free_netdev(adapter->cnadev); + adapter->cnadev = NULL; + } +} + +/* ixgbe_cna.c */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h new file mode 100644 index 000000000000..07229f52f7a6 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_CNA_H_ +#define _IXGBE_CNA_H_ + +int ixgbe_cna_enable(struct ixgbe_adapter *adapter); +void ixgbe_cna_disable(struct ixgbe_adapter *adapter); + +#endif /* _IXGBE_CNA_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 0bd1294ba517..f70f3563c066 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,50 +1,133 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#include -#include -#include -#include - -#include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_api.h" -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); -static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); -static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); -static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count); -static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); -static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -static void ixgbe_release_eeprom(struct ixgbe_hw *hw); +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw); -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset); +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); -static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); - -/* Base table for registers values that change by MAC */ -const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(8259X) -}; /** - * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow - * control - * @hw: pointer to hardware structure + * ixgbe_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_mac_info *mac = &hw->mac; + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_init_ops_generic"); + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; + /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ + if (eec & IXGBE_EEC_PRES) { + eeprom->ops.read = ixgbe_read_eerd_generic; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; + } else { + eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; + eeprom->ops.read_buffer = + ixgbe_read_eeprom_buffer_bit_bang_generic; + } + eeprom->ops.write = ixgbe_write_eeprom_generic; + eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; + eeprom->ops.validate_checksum = + ixgbe_validate_eeprom_checksum_generic; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; + + /* MAC */ + mac->ops.init_hw = ixgbe_init_hw_generic; + mac->ops.reset_hw = NULL; + mac->ops.start_hw = ixgbe_start_hw_generic; + mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; + mac->ops.get_media_type = NULL; + mac->ops.get_supported_physical_layer = NULL; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; + mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; + mac->ops.stop_adapter = ixgbe_stop_adapter_generic; + mac->ops.get_bus_info = ixgbe_get_bus_info_generic; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; + mac->ops.prot_autoc_read = prot_autoc_read_generic; + mac->ops.prot_autoc_write = prot_autoc_write_generic; + + /* LEDs */ + mac->ops.led_on = ixgbe_led_on_generic; + mac->ops.led_off = ixgbe_led_off_generic; + mac->ops.blink_led_start = ixgbe_blink_led_start_generic; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; + mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ixgbe_set_rar_generic; + mac->ops.clear_rar = ixgbe_clear_rar_generic; + mac->ops.insert_mac_addr = NULL; + mac->ops.set_vmdq = NULL; + mac->ops.clear_vmdq = NULL; + mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; + mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; + mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; + mac->ops.enable_mc = ixgbe_enable_mc_generic; + mac->ops.disable_mc = ixgbe_disable_mc_generic; + mac->ops.clear_vfta = NULL; + mac->ops.set_vfta = NULL; + mac->ops.set_vlvf = NULL; + mac->ops.init_uta_tables = NULL; + mac->ops.enable_rx = ixgbe_enable_rx_generic; + mac->ops.disable_rx = ixgbe_disable_rx_generic; + mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_generic; + mac->ops.setup_fc = ixgbe_setup_fc_generic; + mac->ops.fc_autoneg = ixgbe_fc_autoneg; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.setup_link = NULL; + mac->ops.check_link = NULL; + mac->ops.dmac_config = NULL; + mac->ops.dmac_update_tcs = NULL; + mac->ops.dmac_config_tcs = NULL; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation + * of flow control + * @hw: pointer to hardware structure + * + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. * - * There are several phys that do not support autoneg flow control. This - * function check the device id to see if the associated phy supports - * autoneg flow control. **/ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { @@ -52,12 +135,17 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) ixgbe_link_speed speed; bool link_up; + DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: /* flow control autoneg black list */ switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: supported = false; break; default: @@ -92,39 +180,36 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) supported = true; break; default: - break; + supported = false; } default: break; } - if (!supported) - hw_dbg(hw, "Device %x does not support flow control autoneg\n", - hw->device_id); - return supported; } /** - * ixgbe_setup_fc_generic - Set up flow control - * @hw: pointer to hardware structure + * ixgbe_setup_fc_generic - Set up flow control + * @hw: pointer to hardware structure * - * Called at init time to set up flow control. + * Called at init time to set up flow control. **/ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { - s32 ret_val = 0; + s32 ret_val = IXGBE_SUCCESS; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; bool locked = false; - /* - * Validate the requested mode. Strict IEEE mode does not allow - * ixgbe_fc_rx_pause because it will cause us to fail at UNH. - */ + DEBUGFUNC("ixgbe_setup_fc_generic"); + + /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; } /* @@ -143,17 +228,18 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) case ixgbe_media_type_backplane: /* some MAC's need RMW protection on AUTOC */ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; /* fall through - only backplane uses autoc */ + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); break; case ixgbe_media_type_copper: - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, ®_cu); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); break; default: break; @@ -214,11 +300,14 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; } - if (hw->mac.type != ixgbe_mac_X540) { + if (hw->mac.type < ixgbe_mac_X540) { /* * Enable auto-negotiation between the MAC & PHY; * the MAC will advertise clause 37 flow control. @@ -240,32 +329,29 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) * */ if (hw->phy.media_type == ixgbe_media_type_backplane) { - /* Need the SW/FW semaphore around AUTOC writes if 82599 and - * LESM is on, likewise reset_pipeline requries the lock as - * it also writes AUTOC. - */ + reg_bp |= IXGBE_AUTOC_AN_RESTART; ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); if (ret_val) - return ret_val; - + goto out; } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - ixgbe_device_supports_autoneg_fc(hw)) { - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, reg_cu); + (ixgbe_device_supports_autoneg_fc(hw))) { + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); } - hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); +out: return ret_val; } /** - * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure * - * Starts the hardware by filling the bus info structure and media type, clears - * all on chip counters, initializes receive address registers, multicast - * table, VLAN filter table, calls routine to set up link and flow control - * settings, and leaves transmit and receive units disabled and uninitialized + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized **/ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { @@ -273,11 +359,12 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) u32 ctrl_ext; u16 device_caps; + DEBUGFUNC("ixgbe_start_hw_generic"); + /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); - /* Identify the PHY */ - hw->phy.ops.identify(hw); + /* PHY ops initialization must be done in reset_hw() */ /* Clear the VLAN filter table */ hw->mac.ops.clear_vfta(hw); @@ -291,18 +378,18 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); - /* Setup flow control if method for doing so */ - if (hw->mac.ops.setup_fc) { - ret_val = hw->mac.ops.setup_fc(hw); - if (ret_val) - return ret_val; + /* Setup flow control */ + ret_val = ixgbe_setup_fc(hw); + if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { + hw_dbg(hw, "Flow control setup failed, returning %d\n", ret_val); + return ret_val; } - /* Cashe bit indicating need for crosstalk fix */ + /* Cache bit indicating need for crosstalk fix */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) hw->need_crosstalk_fix = false; @@ -317,22 +404,23 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) /* Clear adapter stopped flag */ hw->adapter_stopped = false; - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_start_hw_gen2 - Init sequence for common device family - * @hw: pointer to hw structure + * ixgbe_start_hw_gen2 - Init sequence for common device family + * @hw: pointer to hw structure * * Performs the init sequence common to the second generation * of 10 GbE devices. * Devices in the second generation: - * 82599 - * X540 + * 82599 + * X540 **/ -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +void ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; + u32 regval; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { @@ -341,27 +429,41 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); - return 0; + /* Disable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } } /** - * ixgbe_init_hw_generic - Generic hardware initialization - * @hw: pointer to hardware structure + * ixgbe_init_hw_generic - Generic hardware initialization + * @hw: pointer to hardware structure * - * Initialize the hardware by resetting the hardware, filling the bus info - * structure and media type, clears all on chip counters, initializes receive - * address registers, multicast table, VLAN filter table, calls routine to set - * up link and flow control settings, and leaves transmit and receive units - * disabled and uninitialized + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized **/ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) { s32 status; + DEBUGFUNC("ixgbe_init_hw_generic"); + /* Reset the hardware */ status = hw->mac.ops.reset_hw(hw); - if (status == 0) { + if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { /* Start the HW */ status = hw->mac.ops.start_hw(hw); } @@ -370,20 +472,25 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) if (hw->mac.ops.init_led_link_act) hw->mac.ops.init_led_link_act(hw); + if (status != IXGBE_SUCCESS) + hw_dbg(hw, "Failed to initialize HW, STATUS = %d\n", status); + return status; } /** - * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters - * @hw: pointer to hardware structure + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + * @hw: pointer to hardware structure * - * Clears all hardware statistics counters by reading them from the hardware - * Statistics counters are clear on read. + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. **/ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) { u16 i = 0; + DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); + IXGBE_READ_REG(hw, IXGBE_CRCERRS); IXGBE_READ_REG(hw, IXGBE_ILLERRC); IXGBE_READ_REG(hw, IXGBE_ERRBC); @@ -471,23 +578,27 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) - hw->phy.ops.identify(hw); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); + ixgbe_identify_phy(hw); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_read_pba_string_generic - Reads part number string from EEPROM - * @hw: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length * - * Reads the part number string from the EEPROM. + * Reads the part number string from the EEPROM. **/ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) @@ -498,6 +609,8 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u16 offset; u16 length; + DEBUGFUNC("ixgbe_read_pba_string_generic"); + if (pba_num == NULL) { hw_dbg(hw, "PBA string buffer was null\n"); return IXGBE_ERR_INVALID_ARGUMENT; @@ -552,7 +665,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, pba_num[offset] += 'A' - 0xA; } - return 0; + return IXGBE_SUCCESS; } ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); @@ -587,17 +700,17 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, } pba_num[offset * 2] = '\0'; - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_get_mac_addr_generic - Generic get MAC address - * @hw: pointer to hardware structure - * @mac_addr: Adapter MAC address + * ixgbe_get_mac_addr_generic - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address * - * Reads the adapter's MAC address from first Receive Address Register (RAR0) - * A reset of the adapter must be performed prior to calling this function - * in order for the MAC address to have been loaded from the EEPROM into RAR0 + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) { @@ -605,6 +718,8 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) u32 rar_low; u16 i; + DEBUGFUNC("ixgbe_get_mac_addr_generic"); + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); @@ -614,81 +729,102 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) for (i = 0; i < 2; i++) mac_addr[i+4] = (u8)(rar_high >> (i*8)); - return 0; -} - -enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) -{ - switch (link_status & IXGBE_PCI_LINK_WIDTH) { - case IXGBE_PCI_LINK_WIDTH_1: - return ixgbe_bus_width_pcie_x1; - case IXGBE_PCI_LINK_WIDTH_2: - return ixgbe_bus_width_pcie_x2; - case IXGBE_PCI_LINK_WIDTH_4: - return ixgbe_bus_width_pcie_x4; - case IXGBE_PCI_LINK_WIDTH_8: - return ixgbe_bus_width_pcie_x8; - default: - return ixgbe_bus_width_unknown; - } -} - -enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) -{ - switch (link_status & IXGBE_PCI_LINK_SPEED) { - case IXGBE_PCI_LINK_SPEED_2500: - return ixgbe_bus_speed_2500; - case IXGBE_PCI_LINK_SPEED_5000: - return ixgbe_bus_speed_5000; - case IXGBE_PCI_LINK_SPEED_8000: - return ixgbe_bus_speed_8000; - default: - return ixgbe_bus_speed_unknown; - } + return IXGBE_SUCCESS; } /** - * ixgbe_get_bus_info_generic - Generic set PCI bus info - * @hw: pointer to hardware structure + * ixgbe_set_pci_config_data_generic - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space * - * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + if (hw->bus.type == ixgbe_bus_type_unknown) + hw->bus.type = ixgbe_bus_type_pci_express; + + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = ixgbe_bus_width_pcie_x1; + break; + case IXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = ixgbe_bus_width_pcie_x2; + break; + case IXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = ixgbe_bus_width_pcie_x4; + break; + case IXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = ixgbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = ixgbe_bus_width_unknown; + break; + } + + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = ixgbe_bus_speed_2500; + break; + case IXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = ixgbe_bus_speed_5000; + break; + case IXGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = ixgbe_bus_speed_8000; + break; + default: + hw->bus.speed = ixgbe_bus_speed_unknown; + break; + } + + mac->ops.set_lan_id(hw); +} + +/** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the ixgbe_hw structure. **/ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) { u16 link_status; - hw->bus.type = ixgbe_bus_type_pci_express; + DEBUGFUNC("ixgbe_get_bus_info_generic"); /* Get the negotiated link width and speed from PCI config space */ - link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); - hw->bus.width = ixgbe_convert_bus_width(link_status); - hw->bus.speed = ixgbe_convert_bus_speed(link_status); + ixgbe_set_pci_config_data_generic(hw, link_status); - hw->mac.ops.set_lan_id(hw); - - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices - * @hw: pointer to the HW structure + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure * - * Determines the LAN function id by reading memory-mapped registers - * and swaps the port value if requested. + * Determines the LAN function id by reading memory-mapped registers and swaps + * the port value if requested, and set MAC instance for devices that share + * CS4227. **/ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; - u16 ee_ctrl_4; u32 reg; + u16 ee_ctrl_4; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); reg = IXGBE_READ_REG(hw, IXGBE_STATUS); bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; - bus->lan_id = bus->func; + bus->lan_id = (u8)bus->func; /* check for a port swap */ - reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; @@ -701,19 +837,21 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) } /** - * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units - * @hw: pointer to hardware structure + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + * @hw: pointer to hardware structure * - * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, - * disables transmit and receive units. The adapter_stopped flag is used by - * the shared code and drivers to determine if the adapter is in a stopped - * state and should not touch the hardware. + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. **/ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) { u32 reg_val; u16 i; + DEBUGFUNC("ixgbe_stop_adapter_generic"); + /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware @@ -721,7 +859,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) hw->adapter_stopped = true; /* Disable the receive unit */ - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -743,27 +881,27 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) /* flush all queues disables */ IXGBE_WRITE_FLUSH(hw); - usleep_range(1000, 2000); + msec_delay(2); /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * Prevent the PCI-E bus from hanging by disabling PCI-E master * access and verify no pending requests */ return ixgbe_disable_pcie_master(hw); } /** - * ixgbe_init_led_link_act_generic - Store the LED index link/activity. - * @hw: pointer to hardware structure + * ixgbe_init_led_link_act_generic - Store the LED index link/activity. + * @hw: pointer to hardware structure * - * Store the index for the link active LED. This will be used to support - * blinking the LED. + * Store the index for the link active LED. This will be used to support + * blinking the LED. **/ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; u32 led_reg, led_mode; - u16 i; + u8 i; led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); @@ -772,38 +910,38 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == - IXGBE_LED_LINK_ACTIVE) { + IXGBE_LED_LINK_ACTIVE) { mac->led_link_act = i; - return 0; + return IXGBE_SUCCESS; } } - /* If LEDCTL register does not have the LED link active set, then use + /* + * If LEDCTL register does not have the LED link active set, then use * known MAC defaults. */ switch (hw->mac.type) { - case ixgbe_mac_x550em_a: - mac->led_link_act = 0; - break; + case ixgbe_mac_X550EM_a: case ixgbe_mac_X550EM_x: mac->led_link_act = 1; break; default: mac->led_link_act = 2; } - - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_led_on_generic - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn on + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on **/ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + DEBUGFUNC("ixgbe_led_on_generic"); + if (index > 3) return IXGBE_ERR_PARAM; @@ -813,18 +951,20 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_led_off_generic - Turns off the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn off + * ixgbe_led_off_generic - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off **/ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + DEBUGFUNC("ixgbe_led_off_generic"); + if (index > 3) return IXGBE_ERR_PARAM; @@ -834,15 +974,15 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_init_eeprom_params_generic - Initialize EEPROM params - * @hw: pointer to hardware structure + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params + * @hw: pointer to hardware structure * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) { @@ -850,6 +990,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) u32 eec; u16 eeprom_size; + DEBUGFUNC("ixgbe_init_eeprom_params_generic"); + if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->type = ixgbe_eeprom_none; /* Set default semaphore delay to 10ms which is a well @@ -862,7 +1004,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) * Check for EEPROM present first. * If not present leave as none */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (eec & IXGBE_EEC_PRES) { eeprom->type = ixgbe_eeprom_spi; @@ -872,43 +1014,50 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) */ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = BIT(eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) eeprom->address_bits = 16; else eeprom->address_bits = 8; - hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", - eeprom->type, eeprom->word_size, eeprom->address_bits); + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to write - * @words: number of words - * @data: 16 bit word(s) to write to EEPROM + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write + * @words: number of word(s) + * @data: 16 bit word(s) to write to EEPROM * - * Reads 16 bit word(s) from EEPROM through bit-bang method + * Reads 16 bit word(s) from EEPROM through bit-bang method **/ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; + s32 status = IXGBE_SUCCESS; u16 i, count; + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } - if (offset + words > hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } /* * The EEPROM page size cannot be queried from the chip. We do lazy @@ -925,28 +1074,29 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); - if (status != 0) + if (status != IXGBE_SUCCESS) break; } +out: return status; } /** - * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @words: number of word(s) - * @data: 16 bit word(s) to be written to the EEPROM + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @words: number of word(s) + * @data: 16 bit word(s) to be written to the EEPROM * - * If ixgbe_eeprom_update_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. **/ -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; @@ -955,106 +1105,126 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 i; u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); + /* Prepare the EEPROM for writing */ status = ixgbe_acquire_eeprom(hw); - if (status) - return status; - if (ixgbe_ready_eeprom(hw) != 0) { + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI*/ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + msec_delay(10); + } + /* Done with writing - release the EEPROM */ ixgbe_release_eeprom(hw); - return IXGBE_ERR_EEPROM; } - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - - /* Send the WRITE ENABLE command (8 bit opcode) */ - ixgbe_shift_out_eeprom_bits(hw, - IXGBE_EEPROM_WREN_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); - - ixgbe_standby_eeprom(hw); - - /* Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, write_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); - - page_size = hw->eeprom.word_page_size; - - /* Send the data in burst via SPI */ - do { - word = data[i]; - word = (word >> 8) | (word << 8); - ixgbe_shift_out_eeprom_bits(hw, word, 16); - - if (page_size == 0) - break; - - /* do not wrap around page */ - if (((offset + i) & (page_size - 1)) == - (page_size - 1)) - break; - } while (++i < words); - - ixgbe_standby_eeprom(hw); - usleep_range(10000, 20000); - } - /* Done with writing - release the EEPROM */ - ixgbe_release_eeprom(hw); - - return 0; + return status; } /** - * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @data: 16 bit word to be written to the EEPROM + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM * - * If ixgbe_eeprom_update_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. **/ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { + s32 status; + + DEBUGFUNC("ixgbe_write_eeprom_generic"); + hw->eeprom.ops.init_params(hw); - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } - return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); + +out: + return status; } /** - * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @words: number of word(s) - * @data: read 16 bit words(s) from EEPROM + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit words(s) from EEPROM + * @words: number of word(s) * - * Reads 16 bit word(s) from EEPROM through bit-bang method + * Reads 16 bit word(s) from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; + s32 status = IXGBE_SUCCESS; u16 i, count; + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } - if (offset + words > hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } /* * We cannot hold synchronization semaphores for too long @@ -1063,28 +1233,29 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); - if (status) - return status; + if (status != IXGBE_SUCCESS) + break; } - return 0; +out: + return status; } /** - * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @words: number of word(s) - * @data: read 16 bit word(s) from EEPROM + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit word(s) from EEPROM * - * Reads 16 bit word(s) from EEPROM through bit-bang method + * Reads 16 bit word(s) from EEPROM through bit-bang method **/ -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; @@ -1092,84 +1263,106 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; u16 i; + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); + /* Prepare the EEPROM for reading */ status = ixgbe_acquire_eeprom(hw); - if (status) - return status; - if (ixgbe_ready_eeprom(hw) != 0) { + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + /* End this read operation */ ixgbe_release_eeprom(hw); - return IXGBE_ERR_EEPROM; } - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - /* Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the READ command (opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, read_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); - - /* Read the data. */ - word_in = ixgbe_shift_in_eeprom_bits(hw, 16); - data[i] = (word_in >> 8) | (word_in << 8); - } - - /* End this read operation */ - ixgbe_release_eeprom(hw); - - return 0; + return status; } /** - * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @data: read 16 bit value from EEPROM + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM * - * Reads 16 bit value from EEPROM through bit-bang method + * Reads 16 bit value from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { + s32 status; + + DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); + hw->eeprom.ops.init_params(hw); - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } - return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + +out: + return status; } /** - * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of word(s) - * @data: 16 bit word(s) from the EEPROM + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of word(s) + * @data: 16 bit word(s) from the EEPROM * - * Reads a 16 bit word(s) from the EEPROM using the EERD register. + * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eerd; - s32 status; + s32 status = IXGBE_SUCCESS; u32 i; + DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } for (i = 0; i < words; i++) { eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | @@ -1178,34 +1371,36 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); - if (status == 0) { + if (status == IXGBE_SUCCESS) { data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> IXGBE_EEPROM_RW_REG_DATA); } else { hw_dbg(hw, "Eeprom read timed out\n"); - return status; + goto out; } } - - return 0; +out: + return status; } /** - * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be used as a scratch pad + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be used as a scratch pad * - * Discover EEPROM page size by writing marching data at given offset. - * This function is called only when we are writing a new large buffer - * at given offset so the data would be overwritten anyway. + * Discover EEPROM page size by writing marching data at given offset. + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. **/ -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset) { u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; - s32 status; + s32 status = IXGBE_SUCCESS; u16 i; + DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) data[i] = i; @@ -1213,12 +1408,12 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, IXGBE_EEPROM_PAGE_SIZE_MAX, data); hw->eeprom.word_page_size = 0; - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto out; status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto out; /* * When writing in burst more than the actual page size @@ -1226,18 +1421,19 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, */ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; - hw_dbg(hw, "Detected EEPROM page size = %d words.\n", - hw->eeprom.word_page_size); - return 0; + hw_dbg(hw, "Detected EEPROM page size = %d words.", + hw->eeprom.word_page_size); +out: + return status; } /** - * ixgbe_read_eerd_generic - Read EEPROM word using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM + * ixgbe_read_eerd_generic - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM * - * Reads a 16 bit word from the EEPROM using the EERD register. + * Reads a 16 bit word from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { @@ -1245,59 +1441,68 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) } /** - * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of word(s) + * @data: word(s) write to the EEPROM * - * Write a 16 bit word(s) to the EEPROM using the EEWR register. + * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eewr; - s32 status; + s32 status = IXGBE_SUCCESS; u16 i; + DEBUGFUNC("ixgbe_write_eewr_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } for (i = 0; i < words; i++) { eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | - (data[i] << IXGBE_EEPROM_RW_REG_DATA) | - IXGBE_EEPROM_RW_REG_START; + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status) { + if (status != IXGBE_SUCCESS) { hw_dbg(hw, "Eeprom write EEWR timed out\n"); - return status; + goto out; } IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status) { + if (status != IXGBE_SUCCESS) { hw_dbg(hw, "Eeprom write EEWR timed out\n"); - return status; + goto out; } } - return 0; +out: + return status; } /** - * ixgbe_write_eewr_generic - Write EEPROM word using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM * - * Write a 16 bit word to the EEPROM using the EEWR register. + * Write a 16 bit word to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { @@ -1305,17 +1510,20 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) } /** - * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status - * @hw: pointer to hardware structure - * @ee_reg: EEPROM flag for polling + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + * @hw: pointer to hardware structure + * @ee_reg: EEPROM flag for polling * - * Polls the status bit (bit 1) of the EERD or EEWR to determine when the - * read or write is done respectively. + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. **/ -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) { u32 i; u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { if (ee_reg == IXGBE_NVM_POLL_READ) @@ -1324,162 +1532,196 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) reg = IXGBE_READ_REG(hw, IXGBE_EEWR); if (reg & IXGBE_EEPROM_RW_REG_DONE) { - return 0; + status = IXGBE_SUCCESS; + break; } - udelay(5); + usec_delay(5); } - return IXGBE_ERR_EEPROM; + + if (i == IXGBE_EERD_EEWR_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "EEPROM read/write done polling timed out"); + + return status; } /** - * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang - * @hw: pointer to hardware structure + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure * - * Prepares EEPROM for access using bit-bang method. This function should - * be called before issuing a command to the EEPROM. + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. **/ -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u32 eec; u32 i; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_acquire_eeprom"); - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) + != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; - /* Request EEPROM Access */ - eec |= IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + if (status == IXGBE_SUCCESS) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - if (eec & IXGBE_EEC_GNT) - break; - udelay(5); + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_GNT) + break; + usec_delay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + hw_dbg(hw, "Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + if (status == IXGBE_SUCCESS) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + } } - - /* Release if grant not acquired */ - if (!(eec & IXGBE_EEC_GNT)) { - eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); - hw_dbg(hw, "Could not acquire EEPROM grant\n"); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - return IXGBE_ERR_EEPROM; - } - - /* Setup EEPROM for Read/Write */ - /* Clear CS and SK */ - eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); - return 0; + return status; } /** - * ixgbe_get_eeprom_semaphore - Get hardware semaphore - * @hw: pointer to hardware structure + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure * - * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method **/ -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) { + s32 status = IXGBE_ERR_EEPROM; u32 timeout = 2000; u32 i; u32 swsm; + DEBUGFUNC("ixgbe_get_eeprom_semaphore"); + /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { /* * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (!(swsm & IXGBE_SWSM_SMBI)) + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; break; - usleep_range(50, 100); + } + usec_delay(50); } if (i == timeout) { - hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); - /* this release is particularly important because our attempts + hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts * above to get the semaphore may have succeeded, and if there * was a timeout, we should unconditionally clear the semaphore * bits to free the driver to make progress */ ixgbe_release_eeprom_semaphore(hw); - usleep_range(50, 100); - /* one last try + usec_delay(50); + /* + * one last try * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (swsm & IXGBE_SWSM_SMBI) { - hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); - return IXGBE_ERR_EEPROM; - } + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) + status = IXGBE_SUCCESS; } /* Now get the semaphore between SW/FW through the SWESMBI bit */ - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - /* Set the SW EEPROM semaphore bit to request access */ - swsm |= IXGBE_SWSM_SWESMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); - /* If we set the bit successfully then we got the - * semaphore. + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (swsm & IXGBE_SWSM_SWESMBI) - break; - - usleep_range(50, 100); + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); } - /* Release semaphores and return error if SW EEPROM semaphore - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { - hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); - ixgbe_release_eeprom_semaphore(hw); - return IXGBE_ERR_EEPROM; - } - - return 0; + return status; } /** - * ixgbe_release_eeprom_semaphore - Release hardware semaphore - * @hw: pointer to hardware structure + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure * - * This function clears hardware semaphore bits. + * This function clears hardware semaphore bits. **/ -static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) { u32 swsm; - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + DEBUGFUNC("ixgbe_release_eeprom_semaphore"); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); - IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); IXGBE_WRITE_FLUSH(hw); } /** - * ixgbe_ready_eeprom - Polls for EEPROM ready - * @hw: pointer to hardware structure + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure **/ -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u16 i; u8 spi_stat_reg; + DEBUGFUNC("ixgbe_ready_eeprom"); + /* * Read "Status Register" repeatedly until the LSB is cleared. The * EEPROM will signal that the command has been completed by clearing @@ -1493,9 +1735,9 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) break; - udelay(5); + usec_delay(5); ixgbe_standby_eeprom(hw); - } + }; /* * On some parts, SPI write time could vary from 0-20mSec on 3.3V @@ -1503,53 +1745,57 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) */ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { hw_dbg(hw, "SPI EEPROM Status error\n"); - return IXGBE_ERR_EEPROM; + status = IXGBE_ERR_EEPROM; } - return 0; + return status; } /** - * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state - * @hw: pointer to hardware structure + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure **/ -static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + DEBUGFUNC("ixgbe_standby_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); /* Toggle CS to flush commands */ eec |= IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); eec &= ~IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); } /** - * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. - * @hw: pointer to hardware structure - * @data: data to send to the EEPROM - * @count: number of bits to shift out + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out **/ -static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count) { u32 eec; u32 mask; u32 i; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); /* * Mask is used to shift "count" bits of "data" out to the EEPROM * one bit at a time. Determine the starting bit based on count */ - mask = BIT(count - 1); + mask = 0x01 << (count - 1); for (i = 0; i < count; i++) { /* @@ -1564,10 +1810,10 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, else eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); ixgbe_raise_eeprom_clk(hw, &eec); ixgbe_lower_eeprom_clk(hw, &eec); @@ -1577,25 +1823,27 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, * EEPROM */ mask = mask >> 1; - } + }; /* We leave the "DI" bit set to "0" when we leave this routine. */ eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); } /** - * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM - * @hw: pointer to hardware structure - * @count: number of bits to shift + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + * @count: number of bits to shift **/ -static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) { u32 eec; u32 i; u16 data = 0; + DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); + /* * In order to read a register from the EEPROM, we need to shift * 'count' bits in from the EEPROM. Bits are "shifted in" by raising @@ -1603,7 +1851,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) * the value of the "DO" bit. During this "shifting in" process the * "DI" bit should always be clear. */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); @@ -1611,7 +1859,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) data = data << 1; ixgbe_raise_eeprom_clk(hw, &eec); - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec &= ~(IXGBE_EEC_DI); if (eec & IXGBE_EEC_DO) @@ -1624,74 +1872,78 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) } /** - * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. - * @hw: pointer to hardware structure - * @eec: EEC register's current value + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value **/ -static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { + DEBUGFUNC("ixgbe_raise_eeprom_clk"); + /* * Raise the clock input to the EEPROM * (setting the SK bit), then delay */ *eec = *eec | IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); } /** - * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. - * @hw: pointer to hardware structure - * @eec: EEC's current value + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC's current value **/ -static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { + DEBUGFUNC("ixgbe_lower_eeprom_clk"); + /* * Lower the clock input to the EEPROM (clearing the SK bit), then * delay */ *eec = *eec & ~IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); } /** - * ixgbe_release_eeprom - Release EEPROM, release semaphores - * @hw: pointer to hardware structure + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure **/ -static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + DEBUGFUNC("ixgbe_release_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec |= IXGBE_EEC_CS; /* Pull CS high */ eec &= ~IXGBE_EEC_SK; /* Lower SCK */ - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); /* Stop requesting EEPROM access */ eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - /* - * Delay before attempt to obtain semaphore again to allow FW - * access. semaphore_delay is in ms we need us for usleep_range - */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); + /* Delay before attempt to obtain semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); } /** - * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum - * @hw: pointer to hardware structure + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum **/ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { @@ -1702,11 +1954,13 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) u16 pointer = 0; u16 word = 0; + DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); + /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { if (hw->eeprom.ops.read(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -1745,12 +1999,12 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) } /** - * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. **/ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val) @@ -1759,8 +2013,9 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 checksum; u16 read_checksum = 0; - /* - * Read the first word from the EEPROM. If this times out or fails, do + DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ @@ -1796,16 +2051,17 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, } /** - * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum - * @hw: pointer to hardware structure + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure **/ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) { s32 status; u16 checksum; - /* - * Read the first word from the EEPROM. If this times out or fails, do + DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ @@ -1827,14 +2083,40 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) } /** - * ixgbe_set_rar_generic - Set Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @vmdq: VMDq "set" or "pool" index - * @enable_addr: set flag that address is active + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. * - * Puts an ethernet address into a receive address register. + * Tests a MAC address to ensure it is a valid Individual Address. + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_validate_mac_addr"); + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (IXGBE_IS_BROADCAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * ixgbe_set_rar_generic - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. **/ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr) @@ -1842,9 +2124,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 rar_low, rar_high; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_set_rar_generic"); + /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", index); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } @@ -1871,32 +2156,30 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, if (enable_addr != 0) rar_high |= IXGBE_RAH_AV; - /* Record lower 32 bits of MAC address and then make - * sure that write is flushed to hardware before writing - * the upper 16 bits and setting the valid bit. - */ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); - IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_clear_rar_generic - Remove Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write * - * Clears an ethernet address from a receive address register. + * Clears an ethernet address from a receive address register. **/ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_clear_rar_generic"); + /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", index); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } @@ -1908,52 +2191,58 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - /* Clear the address valid bit and upper 16 bits of the address - * before clearing the lower bits. This way we aren't updating - * a live filter. - */ - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_init_rx_addrs_generic - Initializes receive address filters. - * @hw: pointer to hardware structure + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. + * @hw: pointer to hardware structure * - * Places the MAC address in receive address register 0 and clears the rest - * of the receive address registers. Clears the multicast table. Assumes - * the receiver is in reset when the routine is called. + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. **/ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) { u32 i; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_init_rx_addrs_generic"); + /* * If the current mac address is valid, assume it is a software override * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ - if (!is_valid_ether_addr(hw->mac.addr)) { + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { /* Get the MAC address from the RAR0 for later reference */ hw->mac.ops.get_mac_addr(hw, hw->mac.addr); - hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); } else { /* Setup the receive address. */ hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); - hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } - /* clear VMDq pool/queue selection for RAR 0 */ + /* clear VMDq pool/queue selection for RAR 0 */ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); hw->addr_ctrl.overflow_promisc = 0; @@ -1975,28 +2264,133 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); - if (hw->mac.ops.init_uta_tables) - hw->mac.ops.init_uta_tables(hw); + ixgbe_init_uta_tables(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_mta_vector - Determines bit-vector in multicast table to set - * @hw: pointer to hardware structure - * @mc_addr: the multicast address + * ixgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * @vmdq: VMDq "set" or "pool" index * - * Extracts the 12 bits, from a multicast address, to determine which - * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in - * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initialization - * to mc_filter_type. + * Adds it to unused receive address register or goes into promiscuous mode. **/ -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGFUNC("ixgbe_add_uc_addr"); + + hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + hw_dbg(hw, "ixgbe_add_uc_addr Complete\n"); +} + +/** + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 fctrl; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use+1); + for (i = 0; i < uc_addr_in_use; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + hw_dbg(hw, " Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ixgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + hw_dbg(hw, " Entering address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + hw_dbg(hw, " Leaving address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } + + hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; + DEBUGFUNC("ixgbe_mta_vector"); + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); @@ -2012,6 +2406,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) break; default: /* Invalid mc_filter_type */ hw_dbg(hw, "MC filter type param set incorrectly\n"); + ASSERT(0); break; } @@ -2021,18 +2416,20 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) } /** - * ixgbe_set_mta - Set bit-vector in multicast table - * @hw: pointer to hardware structure - * @mc_addr: Multicast address + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @mc_addr: Multicast address * - * Sets the bit-vector in the multicast table. + * Sets the bit-vector in the multicast table. **/ -static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector; u32 vector_bit; u32 vector_reg; + DEBUGFUNC("ixgbe_set_mta"); + hw->addr_ctrl.mta_in_use++; vector = ixgbe_mta_vector(hw, mc_addr); @@ -2049,40 +2446,46 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; - hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); } /** - * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses - * @hw: pointer to hardware structure - * @netdev: pointer to net device structure + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand * - * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unused receive address - * registers for the first multicast addresses, and hashes the rest into the - * multicast table. + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. **/ -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev) +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) { - struct netdev_hw_addr *ha; u32 i; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); /* * Set the new number of MC addresses that we are being requested to * use. */ - hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.num_mc_addrs = mc_addr_count; hw->addr_ctrl.mta_in_use = 0; /* Clear mta_shadow */ - hw_dbg(hw, " Clearing MTA\n"); - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + if (clear) { + hw_dbg(hw, " Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } - /* Update mta shadow */ - netdev_for_each_mc_addr(ha, netdev) { + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_set_mta(hw, ha->addr); + ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); } /* Enable mta */ @@ -2095,67 +2498,77 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_enable_mc_generic - Enable multicast address in RAR - * @hw: pointer to hardware structure + * ixgbe_enable_mc_generic - Enable multicast address in RAR + * @hw: pointer to hardware structure * - * Enables multicast address in RAR and the use of the multicast hash table. + * Enables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + DEBUGFUNC("ixgbe_enable_mc_generic"); + if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_disable_mc_generic - Disable multicast address in RAR - * @hw: pointer to hardware structure + * ixgbe_disable_mc_generic - Disable multicast address in RAR + * @hw: pointer to hardware structure * - * Disables multicast address in RAR and the use of the multicast hash table. + * Disables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + DEBUGFUNC("ixgbe_disable_mc_generic"); + if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_fc_enable_generic - Enable flow control - * @hw: pointer to hardware structure + * ixgbe_fc_enable_generic - Enable flow control + * @hw: pointer to hardware structure * - * Enable flow control according to the current settings. + * Enable flow control according to the current settings. **/ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) { + s32 ret_val = IXGBE_SUCCESS; u32 mflcn_reg, fccfg_reg; u32 reg; u32 fcrtl, fcrth; int i; - /* Validate the water mark configuration. */ - if (!hw->fc.pause_time) - return IXGBE_ERR_INVALID_LINK_SETTINGS; + DEBUGFUNC("ixgbe_fc_enable_generic"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } /* Low water mark of zero causes XOFF floods */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { hw_dbg(hw, "Invalid water mark configuration\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; } } } @@ -2211,8 +2624,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; } /* Set 802.3x based flow control settings. */ @@ -2220,8 +2636,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; @@ -2244,32 +2661,39 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - return 0; +out: + return ret_val; } /** - * ixgbe_negotiate_fc - Negotiate flow control - * @hw: pointer to hardware structure - * @adv_reg: flow control advertised settings - * @lp_reg: link partner's flow control settings - * @adv_sym: symmetric pause bit in advertisement - * @adv_asm: asymmetric pause bit in advertisement - * @lp_sym: symmetric pause bit in link partner advertisement - * @lp_asm: asymmetric pause bit in link partner advertisement + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement * - * Find the intersection between advertised settings and link partner's - * advertised settings + * Find the intersection between advertised settings and link partner's + * advertised settings **/ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { - if ((!(adv_reg)) || (!(lp_reg))) + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); return IXGBE_ERR_FC_NOT_NEGOTIATED; + } if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { /* @@ -2298,19 +2722,19 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, hw->fc.current_mode = ixgbe_fc_none; hw_dbg(hw, "Flow Control = NONE.\n"); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber - * @hw: pointer to hardware structure + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure * - * Enable flow control according on 1 gig fiber. + * Enable flow control according on 1 gig fiber. **/ -static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) { u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; - s32 ret_val; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On multispeed fiber at 1g, bail out if @@ -2320,31 +2744,34 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || - (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + hw_dbg(hw, "Auto-Negotiation did not complete or timed out\n"); + goto out; + } pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, - pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE, - IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE); + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); +out: return ret_val; } /** - * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure * - * Enable flow control according to IEEE clause 37. + * Enable flow control according to IEEE clause 37. **/ -static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) { u32 links2, anlp1_reg, autoc_reg, links; - s32 ret_val; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On backplane, bail out if @@ -2352,13 +2779,17 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) * - we are 82599 and link partner is not AN enabled */ links = IXGBE_READ_REG(hw, IXGBE_LINKS); - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + goto out; + } if (hw->mac.type == ixgbe_mac_82599EB) { links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { + hw_dbg(hw, "Link partner is not AN enabled\n"); + goto out; + } } /* * Read the 10g AN autoc and LP ability registers and resolve @@ -2371,25 +2802,26 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); +out: return ret_val; } /** - * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure * - * Enable flow control according to IEEE clause 37. + * Enable flow control according to IEEE clause 37. **/ -static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) { u16 technology_ability_reg = 0; u16 lp_technology_ability_reg = 0; - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &technology_ability_reg); - hw->phy.ops.read_reg(hw, MDIO_AN_LPA, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &lp_technology_ability_reg); return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, @@ -2399,11 +2831,11 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) } /** - * ixgbe_fc_autoneg - Configure flow control - * @hw: pointer to hardware structure + * ixgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure * - * Compares our advertised flow control capabilities to those advertised by - * our link partner, and determines the proper flow control mode to use. + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. **/ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) { @@ -2411,24 +2843,30 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) ixgbe_link_speed speed; bool link_up; + DEBUGFUNC("ixgbe_fc_autoneg"); + /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. - * - * Since we're being called from an LSC, link is already known to be up. - * So use link_up_wait_to_complete=false. */ - if (hw->fc.disable_fc_autoneg) + if (hw->fc.disable_fc_autoneg) { + /* TODO: This should be just an informative log */ + ERROR_REPORT1(IXGBE_ERROR_CAUTION, + "Flow control autoneg is disabled"); goto out; + } hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); goto out; + } switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); @@ -2450,7 +2888,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) } out: - if (ret_val == 0) { + if (ret_val == IXGBE_SUCCESS) { hw->fc.fc_was_autonegged = true; } else { hw->fc.fc_was_autonegged = false; @@ -2458,46 +2896,46 @@ out: } } -/** +/* * ixgbe_pcie_timeout_poll - Return number of times to poll for completion * @hw: pointer to hardware structure * * System-wide timeout range is encoded in PCIe Device Control2 register. * - * Add 10% to specified maximum and return the number of times to poll for - * completion timeout, in units of 100 microsec. Never return less than - * 800 = 80 millisec. - **/ -static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) + * Add 10% to specified maximum and return the number of times to poll for + * completion timeout, in units of 100 microsec. Never return less than + * 800 = 80 millisec. + */ +STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) { s16 devctl2; u32 pollcnt; - devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; switch (devctl2) { case IXGBE_PCIDEVCTRL2_65_130ms: - pollcnt = 1300; /* 130 millisec */ + pollcnt = 1300; /* 130 millisec */ break; case IXGBE_PCIDEVCTRL2_260_520ms: - pollcnt = 5200; /* 520 millisec */ + pollcnt = 5200; /* 520 millisec */ break; case IXGBE_PCIDEVCTRL2_1_2s: - pollcnt = 20000; /* 2 sec */ + pollcnt = 20000; /* 2 sec */ break; case IXGBE_PCIDEVCTRL2_4_8s: - pollcnt = 80000; /* 8 sec */ + pollcnt = 80000; /* 8 sec */ break; case IXGBE_PCIDEVCTRL2_17_34s: - pollcnt = 34000; /* 34 sec */ + pollcnt = 34000; /* 34 sec */ break; - case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ - case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ - case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ - case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ default: - pollcnt = 800; /* 80 millisec minimum */ + pollcnt = 800; /* 80 millisec minimum */ break; } @@ -2506,43 +2944,35 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) } /** - * ixgbe_disable_pcie_master - Disable PCI-express master access - * @hw: pointer to hardware structure + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure * - * Disables PCI-Express master access and verifies there are no pending - * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable - * bit hasn't caused the master requests to be disabled, else 0 - * is returned signifying master requests disabled. + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS + * is returned signifying master requests disabled. **/ -static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u32 i, poll; u16 value; + DEBUGFUNC("ixgbe_disable_pcie_master"); + /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); - /* Poll for bit to read as set */ - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) - break; - usleep_range(100, 120); - } - if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { - hw_dbg(hw, "GIO disable did not set - requesting resets\n"); - goto gio_disable_fail; - } - /* Exit if master requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || - ixgbe_removed(hw->hw_addr)) - return 0; + IXGBE_REMOVED(hw->hw_addr)) + goto out; /* Poll for master request bit to clear */ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - udelay(100); + usec_delay(100); if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) - return 0; + goto out; } /* @@ -2554,11 +2984,10 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * again to clear out any effects they may have had on our device. */ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); -gio_disable_fail: hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; if (hw->mac.type >= ixgbe_mac_X550) - return 0; + goto out; /* * Before proceeding, make sure that the PCIe block does not have @@ -2566,25 +2995,29 @@ gio_disable_fail: */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { - udelay(100); - value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); - if (ixgbe_removed(hw->hw_addr)) - return 0; + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) - return 0; + goto out; } - hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); - return IXGBE_ERR_MASTER_REQUESTS_PENDING; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PCIe transaction pending bit also did not clear.\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + +out: + return status; } /** - * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire * - * Acquires the SWFW semaphore through the GSSR register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { @@ -2594,6 +3027,8 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) u32 timeout = 200; u32 i; + DEBUGFUNC("ixgbe_acquire_swfw_sync"); + for (i = 0; i < timeout; i++) { /* * SW NVM semaphore bit is used for access to all @@ -2607,11 +3042,11 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) gssr |= swmask; IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ixgbe_release_eeprom_semaphore(hw); - return 0; + return IXGBE_SUCCESS; } else { /* Resource is currently in use by FW or SW */ ixgbe_release_eeprom_semaphore(hw); - usleep_range(5000, 10000); + msec_delay(5); } } @@ -2619,23 +3054,25 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) if (gssr & (fwmask | swmask)) ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); - usleep_range(5000, 10000); + msec_delay(5); return IXGBE_ERR_SWFW_SYNC; } /** - * ixgbe_release_swfw_sync - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release * - * Releases the SWFW semaphore through the GSSR register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr; u32 swmask = mask; + DEBUGFUNC("ixgbe_release_swfw_sync"); + ixgbe_get_eeprom_semaphore(hw); gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); @@ -2646,47 +3083,21 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) } /** - * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * ixgbe_disable_sec_rx_path_generic - Stops the receive data path * @hw: pointer to hardware structure - * @reg_val: Value we read from AUTOC - * @locked: bool to indicate whether the SW/FW lock should be taken. Never - * true in this the generic case. * - * The default case requires no protection so just to the register read. + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block **/ -s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) { - *locked = false; - *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); - return 0; -} +#define IXGBE_MAX_SECRX_POLL 4000 -/** - * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write - * @hw: pointer to hardware structure - * @reg_val: value to write to AUTOC - * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous read. - **/ -s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) -{ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); - return 0; -} - -/** - * ixgbe_disable_rx_buff_generic - Stops the receive data path - * @hw: pointer to hardware structure - * - * Stops the receive data path and waits for the HW to internally - * empty the Rx security block. - **/ -s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) -{ -#define IXGBE_MAX_SECRX_POLL 40 int i; int secrxreg; + DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg |= IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); @@ -2696,65 +3107,103 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) break; else /* Use interrupt-safe sleep just in case */ - udelay(1000); + usec_delay(10); } /* For informational purposes only */ if (i >= IXGBE_MAX_SECRX_POLL) - hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); - - return 0; + hw_dbg(hw, "Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + return IXGBE_SUCCESS; } /** - * ixgbe_enable_rx_buff - Enables the receive data path - * @hw: pointer to hardware structure + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: bool to indicate whether the SW/FW lock was taken + * @reg_val: Value we read from AUTOC * - * Enables the receive data path + * The default case requires no protection so just to the register read. + */ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = false; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + * + * The default case requires no protection so just to the register write. + */ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + UNREFERENCED_1PARAMETER(locked); + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_sec_rx_path_generic - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. **/ -s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) { u32 secrxreg; + DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit - * @hw: pointer to hardware structure - * @regval: register value to write to RXCTRL + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL * - * Enables the Rx DMA unit + * Enables the Rx DMA unit **/ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { - if (regval & IXGBE_RXCTRL_RXEN) - hw->mac.ops.enable_rx(hw); - else - hw->mac.ops.disable_rx(hw); + DEBUGFUNC("ixgbe_enable_rx_dma_generic"); - return 0; + if (regval & IXGBE_RXCTRL_RXEN) + ixgbe_enable_rx(hw); + else + ixgbe_disable_rx(hw); + + return IXGBE_SUCCESS; } /** - * ixgbe_blink_led_start_generic - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink **/ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; - bool link_up = false; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + bool link_up = 0; + u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; bool locked = false; - s32 ret_val; + + DEBUGFUNC("ixgbe_blink_led_start_generic"); if (index > 3) return IXGBE_ERR_PARAM; @@ -2767,19 +3216,18 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) if (!link_up) { ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; IXGBE_WRITE_FLUSH(hw); - - usleep_range(10000, 20000); + msec_delay(10); } led_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -2787,34 +3235,37 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** - * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking **/ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; bool locked = false; - s32 ret_val; + + DEBUGFUNC("ixgbe_blink_led_stop_generic"); if (index > 3) return IXGBE_ERR_PARAM; ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); @@ -2822,45 +3273,50 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** - * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM - * @hw: pointer to hardware structure - * @san_mac_offset: SAN MAC address offset + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset * - * This function will read the EEPROM location for the SAN MAC address - * pointer, and returns the value at that location. This is used in both - * get and set mac_addr routines. + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. **/ -static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, - u16 *san_mac_offset) +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) { s32 ret_val; + DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); + /* * First read the EEPROM pointer to see if the MAC addresses are * available. */ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); - if (ret_val) - hw_err(hw, "eeprom read at offset %d failed\n", - IXGBE_SAN_MAC_ADDR_PTR); + if (ret_val) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom at offset %d failed", + IXGBE_SAN_MAC_ADDR_PTR); + } return ret_val; } /** - * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM - * @hw: pointer to hardware structure - * @san_mac_addr: SAN MAC address + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address * - * Reads the SAN MAC address from the EEPROM, if it's available. This is - * per-port, so set_lan_id() must be called before reading the addresses. - * set_lan_id() is called by identify_sfp(), but this cannot be relied - * upon for non-SFP connections, so we must call it here. + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. **/ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) { @@ -2868,14 +3324,15 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) u8 i; s32 ret_val; + DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); + /* * First read the EEPROM pointer to see if the MAC addresses are * available. If they're not, no point in calling set_lan_id() here. */ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) - - goto san_mac_addr_clr; + goto san_mac_addr_out; /* make sure we know which port we need to program */ hw->mac.ops.set_lan_id(hw); @@ -2886,35 +3343,73 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) ret_val = hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); if (ret_val) { - hw_err(hw, "eeprom read at offset %d failed\n", - san_mac_offset); - goto san_mac_addr_clr; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + san_mac_offset); + goto san_mac_addr_out; } san_mac_addr[i * 2] = (u8)(san_mac_data); san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); san_mac_offset++; } - return 0; + return IXGBE_SUCCESS; -san_mac_addr_clr: - /* No addresses available in this EEPROM. It's not necessarily an +san_mac_addr_out: + /* + * No addresses available in this EEPROM. It's not an * error though, so just wipe the local address and return. */ for (i = 0; i < 6; i++) san_mac_addr[i] = 0xFF; - return ret_val; + return IXGBE_SUCCESS; } /** - * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count - * @hw: pointer to hardware structure + * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address * - * Read PCIe configuration space, and get the MSI-X vector count from - * the capabilities table. + * Write a SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + s32 ret_val; + u16 san_mac_data, san_mac_offset; + u8 i; + + DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); + + /* Look for SAN mac address pointer. If not defined, return */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + return IXGBE_ERR_NO_SAN_ADDR_PTR; + + /* Make sure we know which port we need to write */ + hw->mac.ops.set_lan_id(hw); + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); + san_mac_offset++; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. **/ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) { - u16 msix_count; + u16 msix_count = 1; u16 max_msix_count; u16 pcie_offset; @@ -2927,16 +3422,17 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; default: - return 1; + return msix_count; } - msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); - if (ixgbe_removed(hw->hw_addr)) + DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); + msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); + if (IXGBE_REMOVED(hw->hw_addr)) msix_count = 0; msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; @@ -2950,45 +3446,117 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) } /** - * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to disassociate - * @vmdq: VMDq pool index to remove from the rar + * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + DEBUGFUNC("ixgbe_insert_mac_addr_generic"); + + /* swap bytes for HW little endian */ + addr_low = addr[0] | (addr[1] << 8) + | (addr[2] << 16) + | (addr[3] << 24); + addr_high = addr[4] | (addr[5] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + + if (((IXGBE_RAH_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + /* already there so just add to the pool bits */ + ixgbe_set_vmdq(hw, rar, vmdq); + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return IXGBE_ERR_INVALID_MAC_ADDR; + } + + /* + * If we found rar[0], make sure the default pool bit (we use pool 0) + * remains cleared to be sure default pool packets will get delivered + */ + if (rar == 0) + ixgbe_clear_vmdq(hw, rar, 0); + + return rar; +} + +/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar **/ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar_lo, mpsar_hi; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_clear_vmdq_generic"); + /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - if (ixgbe_removed(hw->hw_addr)) - return 0; + if (IXGBE_REMOVED(hw->hw_addr)) + goto done; if (!mpsar_lo && !mpsar_hi) - return 0; + goto done; if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { if (mpsar_lo) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - mpsar_lo = 0; + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); } if (mpsar_hi) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); - mpsar_hi = 0; + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); } } else if (vmdq < 32) { - mpsar_lo &= ~BIT(vmdq); + mpsar_lo &= ~(1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { - mpsar_hi &= ~BIT(vmdq - 32); + mpsar_hi &= ~(1 << (vmdq - 32)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } @@ -2996,89 +3564,98 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0 && rar != hw->mac.san_mac_rar_index) hw->mac.ops.clear_rar(hw, rar); - - return 0; +done: + return IXGBE_SUCCESS; } /** - * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq pool index + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index **/ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_set_vmdq_generic"); + /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } if (vmdq < 32) { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= BIT(vmdq); + mpsar |= 1 << vmdq; IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); } else { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= BIT(vmdq - 32); + mpsar |= 1 << (vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } - return 0; + return IXGBE_SUCCESS; } /** - * This function should only be involved in the IOV mode. - * In IOV mode, Default pool is next pool after the number of - * VFs advertized and not 0. - * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] * - * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address - * @hw: pointer to hardware struct - * @vmdq: VMDq pool index + * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index **/ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { u32 rar = hw->mac.san_mac_rar_index; + DEBUGFUNC("ixgbe_set_vmdq_san_mac"); + if (vmdq < 32) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array - * @hw: pointer to hardware structure + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + * @hw: pointer to hardware structure **/ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) { int i; + DEBUGFUNC("ixgbe_init_uta_tables_generic"); + hw_dbg(hw, " Clearing UTA\n"); + for (i = 0; i < 128; i++) IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vlvf_bypass: true to find vlanid only, false returns first empty slot if + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: true to find vlanid only, false returns first empty slot if * vlanid not found * - * return the VLVF index where this VLAN id should be placed + * + * return the VLVF index where this VLAN id should be placed * **/ -static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { s32 regindex, first_empty_slot; u32 bits; @@ -3113,28 +3690,30 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) * slot we found during our search, else error. */ if (!first_empty_slot) - hw_dbg(hw, "No space in VLVF.\n"); + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); - return first_empty_slot ? : IXGBE_ERR_NO_SPACE; + return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; } /** - * ixgbe_set_vfta_generic - Set VLAN filter table - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF - * @vlvf_bypass: boolean flag indicating updating default pool is okay + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN + * @vlvf_bypass: boolean flag indicating updating default pool is okay * - * Turn on/off specified VLAN in the VLAN filter table. + * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { - u32 regidx, vfta_delta, vfta, bits; - s32 vlvf_index; + u32 regidx, vfta_delta, vfta; + s32 ret_val; - if ((vlan > 4095) || (vind > 63)) + DEBUGFUNC("ixgbe_set_vfta_generic"); + + if (vlan > 4095 || vind > 63) return IXGBE_ERR_PARAM; /* @@ -3150,18 +3729,62 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[4-0]: which bit in the register */ regidx = vlan / 32; - vfta_delta = BIT(vlan % 32); + vfta_delta = 1 << (vlan % 32); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); - /* vfta_delta represents the difference between the current value + /* + * vfta_delta represents the difference between the current value * of vfta and the value we want in the register. Since the diff - * is an XOR mask we can just update vfta using an XOR. + * is an XOR mask we can just update the vfta using an XOR */ vfta_delta &= vlan_on ? ~vfta : vfta; vfta ^= vfta_delta; /* Part 2 - * If VT Mode is set + * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF + */ + ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, + vfta, vlvf_bypass); + if (ret_val != IXGBE_SUCCESS) { + if (vlvf_bypass) + goto vfta_update; + return ret_val; + } + +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vlvf_generic - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass) +{ + u32 bits; + s32 vlvf_index; + + DEBUGFUNC("ixgbe_set_vlvf_generic"); + + if (vlan > 4095 || vind > 63) + return IXGBE_ERR_PARAM; + + /* If VT Mode is set * Either vlan_on * make sure the vlan is in VLVF * set the vind bit in the matching VLVFB @@ -3169,24 +3792,21 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * clear the pool bit and possibly the vind */ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) - goto vfta_update; + return IXGBE_SUCCESS; vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); - if (vlvf_index < 0) { - if (vlvf_bypass) - goto vfta_update; + if (vlvf_index < 0) return vlvf_index; - } bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); /* set the pool bit */ - bits |= BIT(vind % 32); + bits |= 1 << (vind % 32); if (vlan_on) goto vlvf_update; /* clear the pool bit */ - bits ^= BIT(vind % 32); + bits ^= 1 << (vind % 32); if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { @@ -3194,14 +3814,14 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * we run the risk of stray packets leaking into * the PF via the default pool */ - if (vfta_delta) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + if (*vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); /* disable VLVF and clear remaining bit from pool */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); - return 0; + return IXGBE_SUCCESS; } /* If there are still bits set in the VLVFB registers @@ -3218,31 +3838,28 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * been cleared. This will be indicated by "bits" being * zero. */ - vfta_delta = 0; + *vfta_delta = 0; vlvf_update: /* record pool change and enable VLAN ID if not already enabled */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); -vfta_update: - /* Update VFTA now that we are ready for traffic */ - if (vfta_delta) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); - - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_clear_vfta_generic - Clear VLAN filter table - * @hw: pointer to hardware structure + * ixgbe_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to hardware structure * - * Clears the VLAN filer table, and the VMDq index associated with the filter + * Clears the VLAN filer table, and the VMDq index associated with the filter **/ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) { u32 offset; + DEBUGFUNC("ixgbe_clear_vfta_generic"); + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); @@ -3252,18 +3869,74 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix - * @hw: pointer to hardware structure + * ixgbe_toggle_txdctl_generic - Toggle VF's queues + * @hw: pointer to hardware structure + * @vf_number: VF index * - * Contains the logic to identify if we need to verify link for the - * crosstalk fix + * Enable and disable each queue in VF. + */ +s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number) +{ + u8 queue_count, i; + u32 offset, reg; + + if (vf_number > 63) + return IXGBE_ERR_PARAM; + + /* + * Determine number of queues by checking + * number of virtual functions + */ + reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) { + case IXGBE_GCR_EXT_VT_MODE_64: + queue_count = 2; + break; + case IXGBE_GCR_EXT_VT_MODE_32: + queue_count = 4; + break; + case IXGBE_GCR_EXT_VT_MODE_16: + queue_count = 8; + break; + default: + return IXGBE_ERR_CONFIG; + } + + /* Toggle queues */ + for (i = 0; i < queue_count; ++i) { + /* Calculate offset of current queue */ + offset = queue_count * vf_number + i; + + /* Enable queue */ + reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset)); + reg |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg); + IXGBE_WRITE_FLUSH(hw); + + /* Disable queue */ + reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset)); + reg &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg); + IXGBE_WRITE_FLUSH(hw); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix + * @hw: pointer to hardware structure + * + * Contains the logic to identify if we need to verify link for the + * crosstalk fix **/ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) { + /* Does FW say we need the fix */ if (!hw->need_crosstalk_fix) return false; @@ -3281,13 +3954,13 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) } /** - * ixgbe_check_mac_link_generic - Determine link and speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true when link is up - * @link_up_wait_to_complete: bool used to wait for link up or not + * ixgbe_check_mac_link_generic - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not * - * Reads the links register to determine if link is up and the current speed + * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) @@ -3295,6 +3968,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, u32 links_reg, links_orig; u32 i; + DEBUGFUNC("ixgbe_check_mac_link_generic"); + /* If Crosstalk fix enabled do the sanity check of making sure * the SFP+ cage is full. */ @@ -3307,7 +3982,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, IXGBE_ESDP_SDP2; break; case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP0; break; @@ -3320,7 +3995,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, if (!sfp_cage_full) { *link_up = false; *speed = IXGBE_LINK_SPEED_UNKNOWN; - return 0; + return IXGBE_SUCCESS; } } @@ -3331,18 +4006,18 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, if (links_orig != links_reg) { hw_dbg(hw, "LINKS changed from %08X to %08X\n", - links_orig, links_reg); + links_orig, links_reg); } if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { if (links_reg & IXGBE_LINKS_UP) { *link_up = true; break; } else { *link_up = false; } - msleep(100); + msec_delay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { @@ -3354,52 +4029,53 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: - if ((hw->mac.type >= ixgbe_mac_X550) && - (links_reg & IXGBE_LINKS_SPEED_NON_STD)) - *speed = IXGBE_LINK_SPEED_2_5GB_FULL; - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } break; case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: - if ((hw->mac.type >= ixgbe_mac_X550) && - (links_reg & IXGBE_LINKS_SPEED_NON_STD)) - *speed = IXGBE_LINK_SPEED_5GB_FULL; - else - *speed = IXGBE_LINK_SPEED_100_FULL; + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } break; case IXGBE_LINKS_SPEED_10_X550EM_A: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || - hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) *speed = IXGBE_LINK_SPEED_10_FULL; - } break; default: *speed = IXGBE_LINK_SPEED_UNKNOWN; } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from - * the EEPROM - * @hw: pointer to hardware structure - * @wwnn_prefix: the alternative WWNN prefix - * @wwpn_prefix: the alternative WWPN prefix + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix * - * This function will read the EEPROM from the alternative SAN MAC address - * block to check the support for the alternative WWNN/WWPN prefix support. + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. **/ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix) + u16 *wwpn_prefix) { u16 offset, caps; u16 alt_san_mac_blk_offset; + DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); + /* clear output first */ *wwnn_prefix = 0xFFFF; *wwpn_prefix = 0xFFFF; @@ -3411,36 +4087,89 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, if ((alt_san_mac_blk_offset == 0) || (alt_san_mac_blk_offset == 0xFFFF)) - return 0; + goto wwn_prefix_out; /* check capability in alternative san mac address block */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; if (hw->eeprom.ops.read(hw, offset, &caps)) goto wwn_prefix_err; if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) - return 0; + goto wwn_prefix_out; /* get the corresponding prefix for WWNN/WWPN */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; - if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) - hw_err(hw, "eeprom read at offset %d failed\n", offset); + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + } offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) goto wwn_prefix_err; - return 0; +wwn_prefix_out: + return IXGBE_SUCCESS; wwn_prefix_err: - hw_err(hw, "eeprom read at offset %d failed\n", offset); - return 0; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_SUCCESS; } /** - * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for MAC anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing + * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) +{ + u16 offset, caps, flags; + s32 status; + + DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); + + /* clear output first */ + *bs = ixgbe_fcoe_bootstatus_unavailable; + + /* check if FCOE IBA block is present */ + offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; + status = hw->eeprom.ops.read(hw, offset, &caps); + if (status != IXGBE_SUCCESS) + goto out; + + if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) + goto out; + + /* check if iSCSI FCOE block is populated */ + status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); + if (status != IXGBE_SUCCESS) + goto out; + + if ((offset == 0) || (offset == 0xFFFF)) + goto out; + + /* read fcoe flags in iSCSI FCOE block */ + offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; + status = hw->eeprom.ops.read(hw, offset, &flags); + if (status != IXGBE_SUCCESS) + goto out; + + if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) + *bs = ixgbe_fcoe_bootstatus_enabled; + else + *bs = ixgbe_fcoe_bootstatus_disabled; + +out: + return status; +} + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for MAC anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing * **/ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) @@ -3454,17 +4183,17 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= BIT(vf_target_shift); + pfvfspoof |= (1 << vf_target_shift); else - pfvfspoof &= ~BIT(vf_target_shift); + pfvfspoof &= ~(1 << vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** - * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for VLAN anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing * **/ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) @@ -3478,106 +4207,43 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= BIT(vf_target_shift); + pfvfspoof |= (1 << vf_target_shift); else - pfvfspoof &= ~BIT(vf_target_shift); + pfvfspoof &= ~(1 << vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** - * ixgbe_get_device_caps_generic - Get additional device capabilities - * @hw: pointer to hardware structure - * @device_caps: the EEPROM word with the extra device capabilities + * ixgbe_get_device_caps_generic - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities * - * This function will read the EEPROM location for the device capabilities, - * and return the word through device_caps. + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. **/ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) { + DEBUGFUNC("ixgbe_get_device_caps_generic"); + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_set_rxpba_generic - Initialize RX packet buffer - * @hw: pointer to hardware structure - * @num_pb: number of packet buffers to allocate - * @headroom: reserve n KB of headroom - * @strategy: packet buffer allocation strategy - **/ -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, - int num_pb, - u32 headroom, - int strategy) -{ - u32 pbsize = hw->mac.rx_pb_size; - int i = 0; - u32 rxpktsize, txpktsize, txpbthresh; - - /* Reserve headroom */ - pbsize -= headroom; - - if (!num_pb) - num_pb = 1; - - /* Divide remaining packet buffer space amongst the number - * of packet buffers requested using supplied strategy. - */ - switch (strategy) { - case (PBA_STRATEGY_WEIGHTED): - /* pba_80_48 strategy weight first half of packet buffer with - * 5/8 of the packet buffer space. - */ - rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); - pbsize -= rxpktsize * (num_pb / 2); - rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; - for (; i < (num_pb / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* fall through - configure remaining packet buffers */ - case (PBA_STRATEGY_EQUAL): - /* Divide the remaining Rx packet buffer evenly among the TCs */ - rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; - for (; i < num_pb; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - break; - default: - break; - } - - /* - * Setup Tx packet buffer and threshold equally for all TCs - * TXPBTHRESH register is set in K so divide by 1024 and subtract - * 10 since the largest packet we support is just over 9K. - */ - txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; - txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; - for (i = 0; i < num_pb; i++) { - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); - } - - /* Clear unused TCs, if any, to zero buffer size*/ - for (; i < IXGBE_MAX_PB; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); - } -} - -/** - * ixgbe_calculate_checksum - Calculate checksum for buffer - * @buffer: pointer to EEPROM - * @length: size of EEPROM to calculate a checksum for - * - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. + * ixgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. **/ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) { u32 i; u8 sum = 0; + DEBUGFUNC("ixgbe_calculate_checksum"); + if (!buffer) return 0; @@ -3588,18 +4254,18 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) } /** - * ixgbe_hic_unlocked - Issue command to manageability block unlocked - * @hw: pointer to the HW structure - * @buffer: command to write and where the return status will be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion + * ixgbe_hic_unlocked - Issue command to manageability block unlocked + * @hw: pointer to the HW structure + * @buffer: command to write and where the return status will be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion * - * Communicates with the manageability block. On success return 0 - * else returns semaphore error when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. * - * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held - * by the caller. + * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held + * by the caller. **/ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) @@ -3607,8 +4273,10 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 hicr, i, fwsts; u16 dword_len; + DEBUGFUNC("ixgbe_hic_unlocked"); + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + hw_dbg(hw, "Buffer length failure buffersize=%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } @@ -3636,7 +4304,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, (__force u32)cpu_to_le32(buffer[i])); + i, IXGBE_CPU_TO_LE32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3645,51 +4313,62 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; - usleep_range(1000, 2000); + msec_delay(1); } - /* Check command successful completion. */ - if ((timeout && i == timeout) || - !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + /* For each command except "Apply Update" perform + * status checks in the HICR registry. + */ + if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) == + IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD) + return IXGBE_SUCCESS; - return 0; + /* Check command completion */ + if ((timeout && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { + ERROR_REPORT1(IXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + return IXGBE_SUCCESS; } /** - * ixgbe_host_interface_command - Issue command to manageability block - * @hw: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) * Needed because FW structures are big endian and decoding of * these fields can be 8 bit or 16 bit based on command. Decoding * is not easily understood without making a table of commands. * So we will leave this up to the caller to read back the data * in these cases. * - * Communicates with the manageability block. On success return 0 - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - u32 length, u32 timeout, - bool return_data) +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - union { - struct ixgbe_hic_hdr hdr; - u32 u32arr[1]; - } *bp = buffer; - u16 buf_len, dword_len; + struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer; + u16 buf_len; s32 status; u32 bi; + u32 dword_len; - if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + DEBUGFUNC("ixgbe_host_interface_command"); + + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize=%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); if (status) @@ -3707,16 +4386,31 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&bp->u32arr[bi]); + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); } - /* If there is any thing in data position pull it in */ - buf_len = bp->hdr.buf_len; + /* + * If there is any thing in data position pull it in + * Read Flash command requires reading buffer length from + * two byes instead of one byte + */ + if (resp->cmd == 0x30 || resp->cmd == 0x31) { + for (; bi < dword_len + 2; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); + } + buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3) + & 0xF00) | resp->buf_len; + hdr_size += (2 << 2); + } else { + buf_len = resp->buf_len; + } if (!buf_len) goto rel_out; - if (length < round_up(buf_len, 4) + hdr_size) { + if (length < buf_len + hdr_size) { hw_dbg(hw, "Buffer not large enough for reply message.\n"); status = IXGBE_ERR_HOST_INTERFACE_COMMAND; goto rel_out; @@ -3727,8 +4421,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&bp->u32arr[bi]); + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); } rel_out: @@ -3738,32 +4432,35 @@ rel_out: } /** - * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware - * @hw: pointer to the HW structure - * @maj: driver version major number - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number - * @len: length of driver_ver string - * @driver_ver: driver string + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: unused + * @driver_ver: unused * - * Sends driver version number to firmware through the manageability - * block. On success return 0 - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * Sends driver version number to firmware through the manageability + * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, __always_unused u16 len, - __always_unused const char *driver_ver) + u8 build, u8 sub, u16 len, + const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; int i; - s32 ret_val; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); + UNREFERENCED_2PARAMETER(len, driver_ver); fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = hw->bus.func; + fw_cmd.port_num = (u8)hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; @@ -3775,16 +4472,16 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, &fw_cmd, + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); - if (ret_val != 0) + if (ret_val != IXGBE_SUCCESS) continue; if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) - ret_val = 0; + ret_val = IXGBE_SUCCESS; else ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; @@ -3794,6 +4491,65 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, return ret_val; } +/** + * ixgbe_set_rxpba_generic - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* ixgbe_dcb_pba_80_48 strategy weight first half of packet + * buffer with 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5) / (num_pb * 4); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* fall through - configure remaining packet buffers */ + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + /** * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo * @hw: pointer to the hardware structure @@ -3822,23 +4578,25 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); - /* wait for a last completion before clearing buffers */ + /* Wait for a last completion before clearing buffers */ IXGBE_WRITE_FLUSH(hw); - usleep_range(3000, 6000); + msec_delay(3); - /* Before proceeding, make sure that the PCIe block does not have + /* + * Before proceeding, make sure that the PCIe block does not have * transactions pending. */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { - usleep_range(100, 200); - value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); - if (ixgbe_removed(hw->hw_addr)) - break; + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) - break; + goto out; } +out: /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, @@ -3846,20 +4604,20 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) /* Flush all writes and allow 20usec for all transactions to clear */ IXGBE_WRITE_FLUSH(hw); - udelay(20); + usec_delay(20); /* restore previous register values */ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); } -static const u8 ixgbe_emc_temp_data[4] = { +STATIC const u8 ixgbe_emc_temp_data[4] = { IXGBE_EMC_INTERNAL_DATA, IXGBE_EMC_DIODE1_DATA, IXGBE_EMC_DIODE2_DATA, IXGBE_EMC_DIODE3_DATA }; -static const u8 ixgbe_emc_therm_limit[4] = { +STATIC const u8 ixgbe_emc_therm_limit[4] = { IXGBE_EMC_INTERNAL_THERM_LIMIT, IXGBE_EMC_DIODE1_THERM_LIMIT, IXGBE_EMC_DIODE2_THERM_LIMIT, @@ -3867,71 +4625,60 @@ static const u8 ixgbe_emc_therm_limit[4] = { }; /** - * ixgbe_get_ets_data - Extracts the ETS bit data - * @hw: pointer to hardware structure - * @ets_cfg: extected ETS data - * @ets_offset: offset of ETS data + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure * - * Returns error code. - **/ -static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, - u16 *ets_offset) -{ - s32 status; - - status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); - if (status) - return status; - - if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) - return IXGBE_NOT_IMPLEMENTED; - - status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); - if (status) - return status; - - if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) - return IXGBE_NOT_IMPLEMENTED; - - return 0; -} - -/** - * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data - * @hw: pointer to hardware structure - * - * Returns the thermal sensor data structure + * Returns the thermal sensor data structure **/ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) { - s32 status; + s32 status = IXGBE_SUCCESS; u16 ets_offset; u16 ets_cfg; u16 ets_sensor; u8 num_sensors; + u8 sensor_index; + u8 sensor_location; u8 i; struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; - /* Only support thermal sensors attached to physical port 0 */ - if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) - return IXGBE_NOT_IMPLEMENTED; + DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic"); - status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset); if (status) - return status; + goto out; + + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg); + if (status) + goto out; + + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); if (num_sensors > IXGBE_MAX_SENSORS) num_sensors = IXGBE_MAX_SENSORS; for (i = 0; i < num_sensors; i++) { - u8 sensor_index; - u8 sensor_location; - status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor); if (status) - return status; + goto out; sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> IXGBE_ETS_DATA_INDEX_SHIFT); @@ -3944,11 +4691,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) IXGBE_I2C_THERMAL_SENSOR_ADDR, &data->sensor[i].temp); if (status) - return status; + goto out; } } - - return 0; +out: + return status; } /** @@ -3960,39 +4707,51 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) { - s32 status; + s32 status = IXGBE_SUCCESS; + u16 offset; u16 ets_offset; u16 ets_cfg; u16 ets_sensor; u8 low_thresh_delta; u8 num_sensors; + u8 sensor_index; + u8 sensor_location; u8 therm_limit; u8 i; struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic"); + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); - /* Only support thermal sensors attached to physical port 0 */ - if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) return IXGBE_NOT_IMPLEMENTED; - status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); - if (status) - return status; + offset = IXGBE_ETS_CFG; + if (hw->eeprom.ops.read(hw, offset, &ets_offset)) + goto eeprom_err; + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return IXGBE_NOT_IMPLEMENTED; + + offset = ets_offset; + if (hw->eeprom.ops.read(hw, offset, &ets_cfg)) + goto eeprom_err; + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) + return IXGBE_NOT_IMPLEMENTED; low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> IXGBE_ETS_LTHRES_DELTA_SHIFT); num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); - if (num_sensors > IXGBE_MAX_SENSORS) - num_sensors = IXGBE_MAX_SENSORS; for (i = 0; i < num_sensors; i++) { - u8 sensor_index; - u8 sensor_location; - - if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { - hw_err(hw, "eeprom read at offset %d failed\n", - ets_offset + 1 + i); + offset = ets_offset + 1 + i; + if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + offset); continue; } sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> @@ -4005,25 +4764,29 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) ixgbe_emc_therm_limit[sensor_index], IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); - if (sensor_location == 0) - continue; - - data->sensor[i].location = sensor_location; - data->sensor[i].caution_thresh = therm_limit; - data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; + if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } } + return status; - return 0; +eeprom_err: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_NOT_IMPLEMENTED; } /** - * ixgbe_get_orom_version - Return option ROM from EEPROM + * ixgbe_get_orom_version - Return option ROM from EEPROM * - * @hw: pointer to hardware structure - * @nvm_ver: pointer to output structure + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure * - * if valid option ROM version, nvm_ver->or_valid set to true - * else nvm_ver->or_valid is false. + * if valid option ROM version, nvm_ver->or_valid set to true + * else nvm_ver->or_valid is false. **/ void ixgbe_get_orom_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) @@ -4035,7 +4798,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw, hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); /* make sure offset is valid */ - if (offset == 0x0 || offset == NVM_INVALID_PTR) + if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) return; hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); @@ -4055,13 +4818,13 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw, } /** - * ixgbe_get_oem_prod_version Etrack ID from EEPROM + * ixgbe_get_oem_prod_version - Return OEM Product version * - * @hw: pointer to hardware structure - * @nvm_ver: pointer to output structure + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure * - * if valid OEM product version, nvm_ver->oem_valid set to true - * else nvm_ver->oem_valid is false. + * if valid OEM product version, nvm_ver->oem_valid set to true + * else nvm_ver->oem_valid is false. **/ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) @@ -4099,15 +4862,14 @@ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, } /** - * ixgbe_get_etk_id - Return Etrack ID from EEPROM + * ixgbe_get_etk_id - Return Etrack ID from EEPROM * - * @hw: pointer to hardware structure - * @nvm_ver: pointer to output structure + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure * - * word read errors will return 0xFFFF + * word read errors will return 0xFFFF **/ -void ixgbe_get_etk_id(struct ixgbe_hw *hw, - struct ixgbe_nvm_version *nvm_ver) +void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) { u16 etk_id_l, etk_id_h; @@ -4128,15 +4890,32 @@ void ixgbe_get_etk_id(struct ixgbe_hw *hw, } } +/** + * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); + return; +} + void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) { + u32 pfdtxgswc; u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { if (hw->mac.type != ixgbe_mac_82598EB) { - u32 pfdtxgswc; - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; @@ -4153,6 +4932,7 @@ void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) { + u32 pfdtxgswc; u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); @@ -4160,8 +4940,6 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) if (hw->mac.type != ixgbe_mac_82598EB) { if (hw->mac.set_lben) { - u32 pfdtxgswc; - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); @@ -4170,9 +4948,10 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) } } -/** ixgbe_mng_present - returns true when management capability is present +/** + * ixgbe_mng_present - returns true when management capability is present * @hw: pointer to hardware structure - **/ + */ bool ixgbe_mng_present(struct ixgbe_hw *hw) { u32 fwsm; @@ -4180,33 +4959,62 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw) if (hw->mac.type < ixgbe_mac_82599EB) return false; - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); } /** - * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_mng_enabled - Is the manageability engine enabled? + * @hw: pointer to hardware structure * - * Set the link speed in the MAC and/or PHY register and restarts link. - */ + * Returns true if the manageability engine is enabled. + **/ +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +{ + u32 fwsm, manc, factps; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) + return false; + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + if (!(manc & IXGBE_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.type <= ixgbe_mac_X540) { + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); + if (factps & IXGBE_FACTPS_MNGCG) + return false; + } + + return true; +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - s32 status = 0; + s32 status = IXGBE_SUCCESS; u32 speedcnt = 0; u32 i = 0; bool autoneg, link_up = false; + DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); + /* Mask off requested but non-supported speeds */ - status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); - if (status) + status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); + if (status != IXGBE_SUCCESS) return status; speed &= link_speed; @@ -4221,29 +5029,28 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber: - hw->mac.ops.set_rate_select_speed(hw, + ixgbe_set_rate_select_speed(hw, IXGBE_LINK_SPEED_10GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects MAC link speed */ break; default: - hw_dbg(hw, "Unexpected media type\n"); + hw_dbg(hw, "Unexpected media type.\n"); break; } /* Allow module to change analog characteristics (1G->10G) */ - msleep(40); + msec_delay(40); - status = hw->mac.ops.setup_mac_link(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg_wait_to_complete); - if (status) + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) return status; /* Flap the Tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); + ixgbe_flap_tx_laser(hw); /* Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is @@ -4251,12 +5058,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, */ for (i = 0; i < 5; i++) { /* Wait for the link partner to also set speed */ - msleep(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status) + status = ixgbe_check_link(hw, &link_speed, + &link_up, false); + if (status != IXGBE_SUCCESS) return status; if (link_up) @@ -4272,37 +5079,35 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber: - hw->mac.ops.set_rate_select_speed(hw, - IXGBE_LINK_SPEED_1GB_FULL); + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects link speed */ break; default: - hw_dbg(hw, "Unexpected media type\n"); + hw_dbg(hw, "Unexpected media type.\n"); break; } /* Allow module to change analog characteristics (10G->1G) */ - msleep(40); + msec_delay(40); - status = hw->mac.ops.setup_mac_link(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg_wait_to_complete); - if (status) + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) return status; /* Flap the Tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); + ixgbe_flap_tx_laser(hw); /* Wait for the link partner to also set speed */ - msleep(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status) + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) return status; if (link_up) @@ -4332,14 +5137,14 @@ out: } /** - * ixgbe_set_soft_rate_select_speed - Set module link speed - * @hw: pointer to hardware structure - * @speed: link speed to set + * ixgbe_set_soft_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set * - * Set module link speed via the soft rate select. + * Set module link speed via the soft rate select. */ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed) + ixgbe_link_speed speed) { s32 status; u8 rs, eeprom_data; @@ -4363,7 +5168,7 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, &eeprom_data); if (status) { hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); - return; + goto out; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; @@ -4373,7 +5178,7 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, eeprom_data); if (status) { hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); - return; + goto out; } /* Set RS1 */ @@ -4382,7 +5187,7 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, &eeprom_data); if (status) { hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); - return; + goto out; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; @@ -4392,6 +5197,8 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, eeprom_data); if (status) { hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); - return; + goto out; } +out: + return; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 4b531e8ae38a..6c8f6ea2226c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,24 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBE_COMMON_H_ #define _IXGBE_COMMON_H_ #include "ixgbe_type.h" -#include "ixgbe.h" + +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map); u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +void ixgbe_start_hw_gen2(struct ixgbe_hw *hw); s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); -enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); -enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status); void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); @@ -44,64 +44,85 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ixgbe_mc_addr_itr func, bool clear); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); + s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_mac_addr(u8 *mac_addr); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); -s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); -s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); -s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); -s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on, bool vlvf_bypass); -s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); -s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete); -s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); + +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass); +s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vind); + +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); + +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver, u16 len, const char *str); u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, - u32 timeout, bool return_data); -s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); -s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout); +s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity, u32 (*data)[FW_PHY_ACT_DATA_COUNT]); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); + +extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, - u32 headroom, int strategy); - -extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; - #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 #define IXGBE_EMC_INTERNAL_DATA 0x00 #define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 @@ -114,8 +135,8 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); -void ixgbe_get_etk_id(struct ixgbe_hw *hw, - struct ixgbe_nvm_version *nvm_ver); + +void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); void ixgbe_get_orom_version(struct ixgbe_hw *hw, @@ -127,80 +148,4 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); - -#define IXGBE_FAILED_READ_RETRIES 5 -#define IXGBE_FAILED_READ_REG 0xffffffffU -#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU -#define IXGBE_FAILED_READ_CFG_WORD 0xffffU - -u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); -void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); - -static inline bool ixgbe_removed(void __iomem *addr) -{ - return unlikely(!addr); -} - -static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) -{ - u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); - - if (ixgbe_removed(reg_addr)) - return; - writel(value, reg_addr + reg); -} -#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value)) - -#ifndef writeq -#define writeq writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel((u32)val, addr); - writel((u32)(val >> 32), addr + 4); -} -#endif - -static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) -{ - u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); - - if (ixgbe_removed(reg_addr)) - return; - writeq(value, reg_addr + reg); -} -#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) - -u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg); -#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) - -#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ - ixgbe_write_reg((a), (reg) + ((offset) << 2), (value)) - -#define IXGBE_READ_REG_ARRAY(a, reg, offset) \ - ixgbe_read_reg((a), (reg) + ((offset) << 2)) - -#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS) - -#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev) - -#define hw_dbg(hw, format, arg...) \ - netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) -#define hw_err(hw, format, arg...) \ - netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) -#define e_dev_info(format, arg...) \ - dev_info(&adapter->pdev->dev, format, ## arg) -#define e_dev_warn(format, arg...) \ - dev_warn(&adapter->pdev->dev, format, ## arg) -#define e_dev_err(format, arg...) \ - dev_err(&adapter->pdev->dev, format, ## arg) -#define e_dev_notice(format, arg...) \ - dev_notice(&adapter->pdev->dev, format, ## arg) -#define e_info(msglvl, format, arg...) \ - netif_info(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_err(msglvl, format, arg...) \ - netif_err(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_warn(msglvl, format, arg...) \ - netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_crit(msglvl, format, arg...) \ - netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) #endif /* IXGBE_COMMON */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index d26cea5b43bd..a62250d95fa8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,14 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + -#include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" /** - * ixgbe_ieee_credits - This calculates the ieee traffic class + * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class * credits from the configured bandwidth percentages. Credits * are the smallest unit programmable into the underlying * hardware. The IEEE 802.1Qaz specification do not use bandwidth @@ -16,19 +16,19 @@ * @bw: bandwidth index by traffic class * @refill: refill credits index by traffic class * @max: max credits by traffic class - * @max_frame: maximum frame size + * @max_frame_size: maximum frame size */ -static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, - __u16 *max, int max_frame) +s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, + int max_frame_size) { int min_percent = 100; int min_credit, multiplier; int i; - min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / - DCB_CREDIT_QUANTUM; + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if (bw[i] < min_percent && bw[i]) min_percent = bw[i]; } @@ -36,52 +36,56 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, multiplier = (min_credit / min_percent) + 1; /* Find out the hw credits for each TC */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL); if (val < min_credit) val = min_credit; - refill[i] = val; + refill[i] = (u16)val; - max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; + max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit; } + return 0; } /** - * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits + * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits * @hw: pointer to hardware structure * @dcb_config: Struct containing DCB settings - * @max_frame: Maximum frame size + * @max_frame_size: Maximum frame size * @direction: Configuring either Tx or Rx * * This function calculates the credits allocated to each traffic class. * It should be called only after the rules are checked by - * ixgbe_dcb_check_config(). + * ixgbe_dcb_check_config_cee(). */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config, - int max_frame, u8 direction) + u32 max_frame_size, u8 direction) { - struct tc_bw_alloc *p; - int min_credit; - int min_multiplier; - int min_percent = 100; + struct ixgbe_dcb_tc_path *p; + u32 min_multiplier = 0; + u16 min_percent = 100; + s32 ret_val = IXGBE_SUCCESS; /* Initialization values default for Tx settings */ - u32 credit_refill = 0; - u32 credit_max = 0; - u16 link_percentage = 0; - u8 bw_percent = 0; + u32 min_credit = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; u8 i; - if (!dcb_config) - return DCB_ERR_CONFIG; + if (dcb_config == NULL) { + ret_val = IXGBE_ERR_CONFIG; + goto out; + } - min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / - DCB_CREDIT_QUANTUM; + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; /* Find smallest link percentage */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; @@ -103,7 +107,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, min_multiplier = (min_credit / min_percent) + 1; /* Find out the link percentage for each TC first */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; @@ -118,7 +122,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, - MAX_CREDIT_REFILL); + (u32)IXGBE_DCB_MAX_CREDIT_REFILL); /* Refill at least minimum credit */ if (credit_refill < min_credit) @@ -127,7 +131,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ - credit_max = (link_percentage * MAX_CREDIT) / 100; + credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100; /* * Adjustment based on rule checking, if the percentage @@ -137,87 +141,103 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, if (credit_max < min_credit) credit_max = min_credit; - if (direction == DCB_TX_CONFIG) { + if (direction == IXGBE_DCB_TX_CONFIG) { /* * Adjustment based on rule checking, if the * percentage of a TC is too small, the maximum * credit may not be enough to send out a TSO * packet in descriptor plane arbitration. */ - if ((hw->mac.type == ixgbe_mac_82598EB) && - credit_max && - (credit_max < MINIMUM_CREDIT_FOR_TSO)) - credit_max = MINIMUM_CREDIT_FOR_TSO; + if (credit_max && (credit_max < + IXGBE_DCB_MIN_TSO_CREDIT) + && (hw->mac.type == ixgbe_mac_82598EB)) + credit_max = IXGBE_DCB_MIN_TSO_CREDIT; dcb_config->tc_config[i].desc_credits_max = - (u16)credit_max; + (u16)credit_max; } p->data_credits_max = (u16)credit_max; } - return 0; +out: + return ret_val; } -void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) +/** + * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info + * @cfg: dcb configuration to unpack into hardware consumable fields + * @map: user priority to traffic class map + * @pfc_up: u8 to store user priority PFC bitmask + * + * This unpacks the dcb configuration PFC info which is stored per + * traffic class into a 8bit user priority bitmask that can be + * consumed by hardware routines. The priority to tc map must be + * updated before calling this routine to use current up-to maps. + */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; - int tc; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int up; - for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { - if (tc_config[tc].dcb_pfc != pfc_disabled) - *pfc_en |= BIT(tc); + /* + * If the TC for this user priority has PFC enabled then set the + * matching bit in 'pfc_up' to reflect that PFC is enabled. + */ + for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { + if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) + *pfc_up |= 1 << up; } } -void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, u16 *refill) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) refill[tc] = tc_config[tc].path[direction].data_credits_refill; } -void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) max[tc] = tc_config[tc].desc_credits_max; } -void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *bwgid) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) bwgid[tc] = tc_config[tc].path[direction].bwg_id; } -void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, - u8 *ptype) +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *tsa) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) - ptype[tc] = tc_config[tc].path[direction].prio_type; + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + tsa[tc] = tc_config[tc].path[direction].tsa; } u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; - u8 prio_mask = BIT(up); + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; u8 tc = cfg->num_tcs.pg_tcs; /* If tc is 0 then DCB is likely not enabled or supported */ if (!tc) - return 0; + goto out; /* * Test from maximum TC to 1 and report the first match we find. If @@ -228,163 +248,456 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) break; } - +out: return tc; } -void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *map) { u8 up; - for (up = 0; up < MAX_USER_PRIORITY; up++) + for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); } /** - * ixgbe_dcb_hw_config - Config and enable DCB + * ixgbe_dcb_config - Struct containing DCB settings. + * @dcb_config: Pointer to DCB config structure + * + * This function checks DCB rules for DCB settings. + * The following rules are checked: + * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. + * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth + * Group must total 100. + * 3. A Traffic Class should not be set to both Link Strict Priority + * and Group Strict Priority. + * 4. Link strict Bandwidth Groups can only have link strict traffic classes + * with zero bandwidth. + */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config) +{ + struct ixgbe_dcb_tc_path *p; + s32 ret_val = IXGBE_SUCCESS; + u8 i, j, bw = 0, bw_id; + u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP]; + bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP]; + + memset(bw_sum, 0, sizeof(bw_sum)); + memset(link_strict, 0, sizeof(link_strict)); + + /* First Tx, then Rx */ + for (i = 0; i < 2; i++) { + /* Check each traffic class for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + p = &dcb_config->tc_config[j].path[i]; + + bw = p->bwg_percent; + bw_id = p->bwg_id; + + if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + if (p->tsa == ixgbe_dcb_tsa_strict) { + link_strict[i][bw_id] = true; + /* Link strict should have zero bandwidth */ + if (bw) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (!bw) { + /* + * Traffic classes without link strict + * should have non-zero bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + bw_sum[i][bw_id] += bw; + } + + bw = 0; + + /* Check each bandwidth group for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) { + bw += dcb_config->bw_percentage[i][j]; + /* + * Sum of bandwidth percentages of all traffic classes + * within a Bandwidth Group must total 100 except for + * link strict group (zero bandwidth). + */ + if (link_strict[i][j]) { + if (bw_sum[i][j]) { + /* + * Link strict group should have zero + * bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT && + bw_sum[i][j] != 0) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + + if (bw != IXGBE_DCB_BW_PERCENT) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + +err_config: + + return ret_val; +} + +/** + * ixgbe_dcb_get_tc_stats - Returns status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, + tsa, map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwgid, tsa); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwgid, tsa, + map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_pfc_cee - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tc_stats - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tc_stats_82598(hw); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_hw_config_cee - Config and enable DCB * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { + s32 ret = IXGBE_NOT_IMPLEMENTED; u8 pfc_en; - u8 ptype[MAX_TRAFFIC_CLASS]; - u8 bwgid[MAX_TRAFFIC_CLASS]; - u8 prio_tc[MAX_TRAFFIC_CLASS]; - u16 refill[MAX_TRAFFIC_CLASS]; - u16 max[MAX_TRAFFIC_CLASS]; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Unpack CEE standard containers */ - ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); - ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max(dcb_config, max); - ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); - ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); - ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc); + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); switch (hw->mac.type) { case ixgbe_mac_82598EB: - return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, - bwgid, ptype); + ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed, + refill, max, bwgid, tsa); + break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, - bwgid, ptype, prio_tc); + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_82599(hw, dcb_config); + ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, + refill, max, bwgid, + tsa, map); + + ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); + break; default: break; } - return 0; + + if (!ret && dcb_config->pfc_mode_enable) { + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; } /* Helper routines to abstract HW specifics from DCB netlink ops */ -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) { + int ret = IXGBE_ERR_PARAM; + switch (hw->mac.type) { case ixgbe_mac_82598EB: - return ixgbe_dcb_config_pfc_82598(hw, pfc_en); + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; default: break; } - return -EINVAL; + return ret; } -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) -{ - __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; - __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; - int i; - - /* naively give each TC a bwg to map onto CEE hardware */ - __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; - - /* Map TSA onto CEE prio type */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - switch (ets->tc_tsa[i]) { - case IEEE_8021QAZ_TSA_STRICT: - prio_type[i] = 2; - break; - case IEEE_8021QAZ_TSA_ETS: - prio_type[i] = 0; - break; - default: - /* Hardware only supports priority strict or - * ETS transmission selection algorithms if - * we receive some other value from dcbnl - * throw an error - */ - return -EINVAL; - } - } - - ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); - return ixgbe_dcb_hw_ets_config(hw, refill, max, - bwg_id, prio_type, ets->prio_tc); -} - -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, - u8 *prio_type, u8 *prio_tc) +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *tsa, u8 *map) { switch (hw->mac.type) { case ixgbe_mac_82598EB: - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, - prio_type); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, - bwg_id, prio_type); + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, - bwg_id, prio_type, prio_tc); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, - bwg_id, prio_type); + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, - prio_type, prio_tc); + tsa, map); break; default: break; } return 0; } - -static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) -{ - u32 reg, i; - - reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - for (i = 0; i < MAX_USER_PRIORITY; i++) - map[i] = IXGBE_RTRUP2TC_UP_MASK & - (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); -} - -void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) -{ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - ixgbe_dcb_read_rtrup2tc_82599(hw, map); - break; - default: - break; - } -} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 60cd5863bf5e..88217d1630ee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -1,146 +1,144 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#ifndef _DCB_CONFIG_H_ -#define _DCB_CONFIG_H_ +#ifndef _IXGBE_DCB_H_ +#define _IXGBE_DCB_H_ -#include #include "ixgbe_type.h" -/* DCB data structures */ +/* DCB defines */ +/* DCB credit calculation defines */ +#define IXGBE_DCB_CREDIT_QUANTUM 64 +#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ +#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ +#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL) -#define IXGBE_MAX_PACKET_BUFFERS 8 -#define MAX_USER_PRIORITY 8 -#define MAX_BW_GROUP 8 -#define BW_PERCENT 100 +/* 513 for 32KB TSO packet */ +#define IXGBE_DCB_MIN_TSO_CREDIT \ + ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1) -#define DCB_TX_CONFIG 0 -#define DCB_RX_CONFIG 1 +/* DCB configuration defines */ +#define IXGBE_DCB_MAX_USER_PRIORITY 8 +#define IXGBE_DCB_MAX_BW_GROUP 8 +#define IXGBE_DCB_BW_PERCENT 100 -/* DCB error Codes */ -#define DCB_SUCCESS 0 -#define DCB_ERR_CONFIG -1 -#define DCB_ERR_PARAM -2 +#define IXGBE_DCB_TX_CONFIG 0 +#define IXGBE_DCB_RX_CONFIG 1 -/* Transmit and receive Errors */ -/* Error in bandwidth group allocation */ -#define DCB_ERR_BW_GROUP -3 -/* Error in traffic class bandwidth allocation */ -#define DCB_ERR_TC_BW -4 -/* Traffic class has both link strict and group strict enabled */ -#define DCB_ERR_LS_GS -5 -/* Link strict traffic class has non zero bandwidth */ -#define DCB_ERR_LS_BW_NONZERO -6 -/* Link strict bandwidth group has non zero bandwidth */ -#define DCB_ERR_LS_BWG_NONZERO -7 -/* Traffic class has zero bandwidth */ -#define DCB_ERR_TC_BW_ZERO -8 +/* DCB capability defines */ +#define IXGBE_DCB_PG_SUPPORT 0x00000001 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010 -#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF - -struct dcb_pfc_tc_debug { - u8 tc; - u8 pause_status; - u64 pause_quanta; -}; - -enum strict_prio_type { - prio_none = 0, - prio_group, - prio_link -}; - -/* DCB capability definitions */ -#define IXGBE_DCB_PG_SUPPORT 0x00000001 -#define IXGBE_DCB_PFC_SUPPORT 0x00000002 -#define IXGBE_DCB_BCN_SUPPORT 0x00000004 -#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 -#define IXGBE_DCB_GSP_SUPPORT 0x00000010 - -#define IXGBE_DCB_8_TC_SUPPORT 0x80 - -struct dcb_support { - /* DCB capabilities */ - u32 capabilities; +struct ixgbe_dcb_support { + u32 capabilities; /* DCB capabilities */ /* Each bit represents a number of TCs configurable in the hw. - * If 8 traffic classes can be configured, the value is 0x80. - */ - u8 traffic_classes; - u8 pfc_traffic_classes; + * If 8 traffic classes can be configured, the value is 0x80. */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +enum ixgbe_dcb_tsa { + ixgbe_dcb_tsa_ets = 0, + ixgbe_dcb_tsa_group_strict_cee, + ixgbe_dcb_tsa_strict }; /* Traffic class bandwidth allocation per direction */ -struct tc_bw_alloc { - u8 bwg_id; /* Bandwidth Group (BWG) ID */ - u8 bwg_percent; /* % of BWG's bandwidth */ - u8 link_percent; /* % of link bandwidth */ - u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ - u16 data_credits_refill; /* Credit refill amount in 64B granularity */ - u16 data_credits_max; /* Max credits for a configured packet buffer - * in 64B granularity.*/ - enum strict_prio_type prio_type; /* Link or Group Strict Priority */ +struct ixgbe_dcb_tc_path { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ }; -enum dcb_pfc_type { - pfc_disabled = 0, - pfc_enabled_full, - pfc_enabled_tx, - pfc_enabled_rx +enum ixgbe_dcb_pfc { + ixgbe_dcb_pfc_disabled = 0, + ixgbe_dcb_pfc_enabled, + ixgbe_dcb_pfc_enabled_txonly, + ixgbe_dcb_pfc_enabled_rxonly }; /* Traffic class configuration */ -struct tc_configuration { - struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ - enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ +struct ixgbe_dcb_tc_config { + struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ + enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */ u16 desc_credits_max; /* For Tx Descriptor arbitration */ u8 tc; /* Traffic class (TC) */ }; -struct dcb_num_tcs { +enum ixgbe_dcb_pba { + /* PBA[0-7] each use 64KB FIFO */ + ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, + /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ + ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED +}; + +struct ixgbe_dcb_num_tcs { u8 pg_tcs; u8 pfc_tcs; }; struct ixgbe_dcb_config { - struct dcb_support support; - struct dcb_num_tcs num_tcs; - struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; - u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ - bool pfc_mode_enable; + struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + struct ixgbe_dcb_support support; + struct ixgbe_dcb_num_tcs num_tcs; + u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + bool round_robin_enable; - u32 dcb_cfg_version; /* Not used...OS-specific? */ - u32 link_speed; /* For bandwidth allocation validation purpose */ + enum ixgbe_dcb_pba rx_pba_cfg; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ + bool vt_mode; }; /* DCB driver APIs */ -void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); -void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); -void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); -void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); -void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); -void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); -u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB rule checking */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *); /* DCB credits calculation */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, - struct ixgbe_dcb_config *, int, u8); +s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int); +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *, u32, u8); -/* DCB hw initialization */ -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max); -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, - u8 *bwg_id, u8 *prio_type, u8 *tc_prio); -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *); +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); -void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); -/* DCB definitions for credit calculation */ -#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ -#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ -#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ -#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ -#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); -#endif /* _DCB_CONFIG_H */ +/* DCB unpack routines */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *); +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB initialization */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); +#endif /* _IXGBE_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index 379ae747cdce..db550e5da7f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -1,29 +1,98 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + -#include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" +/** + * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); + +#if 0 + /* Can we get rid of these?? Consequently, getting rid + * of the tc_stats structure. + */ + tc_stats_array[up]->in_overflow_discards = 0; + tc_stats_array[up]->out_overflow_discards = 0; +#endif + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); + } + + return IXGBE_SUCCESS; +} + /** * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class - * @prio_type: priority type indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class * * Configure Rx Data Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type) +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *tsa) { - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); @@ -39,13 +108,13 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; - credit_max = max[i]; + credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RT2CR_LSP; IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); @@ -62,7 +131,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, reg &= ~IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); - return 0; + return IXGBE_SUCCESS; } /** @@ -71,18 +140,16 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) { - u32 reg, max_credits; - u8 i; + u32 reg, max_credits; + u8 i; reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); @@ -96,22 +163,22 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; - reg |= refill[i]; + reg |= (u32)(refill[i]); reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_TDTQ2TCCR_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_TDTQ2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); } - return 0; + return IXGBE_SUCCESS; } /** @@ -120,15 +187,13 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class * * Configure Tx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) { u32 reg; u8 i; @@ -142,15 +207,15 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_TDPT2TCCR_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_TDPT2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); @@ -161,7 +226,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, reg |= IXGBE_DTXCTL_ENDBUBD; IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); - return 0; + return IXGBE_SUCCESS; } /** @@ -174,7 +239,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) { u32 fcrtl, reg; - u8 i; + u8 i; /* Enable Transmit Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_RMCS); @@ -192,8 +257,8 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); /* Configure PFC Tx thresholds per TC */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (!(pfc_en & BIT(i))) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & (1 << i))) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); continue; @@ -206,15 +271,14 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) } /* Configure pause time */ - reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - return 0; + return IXGBE_SUCCESS; } /** @@ -224,11 +288,11 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ -static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) { u32 reg = 0; - u8 i = 0; - u8 j = 0; + u8 i = 0; + u8 j = 0; /* Receive Queues stats setting - 8 queues per statistics reg */ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { @@ -239,37 +303,39 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) reg |= ((0x1010101) * j); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); } - /* Transmit Queues stats setting - 4 queues per statistics reg */ + /* Transmit Queues stats setting - 4 queues per statistics reg*/ for (i = 0; i < 8; i++) { reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); reg |= ((0x1010101) * i); IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); } - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_dcb_hw_config_82598 - Config and enable DCB * @hw: pointer to hardware structure - * @pfc_en: enabled pfc bitmask + * @link_speed: unused * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type) +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) { - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_pfc_82598(hw, pfc_en); + UNREFERENCED_1PARAMETER(link_speed); + + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); ixgbe_dcb_config_tc_stats_82598(hw); - return 0; + return IXGBE_SUCCESS; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index fdca41abb44c..056700929d2d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -1,72 +1,69 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#ifndef _DCB_82598_CONFIG_H_ -#define _DCB_82598_CONFIG_H_ +#ifndef _IXGBE_DCB_82598_H_ +#define _IXGBE_DCB_82598_H_ /* DCB register definitions */ -#define IXGBE_DPMCS_MTSOS_SHIFT 16 -#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ -#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ -#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ -#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable */ -#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 -#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 -#define IXGBE_TDTQ2TCCR_GSP 0x40000000 -#define IXGBE_TDTQ2TCCR_LSP 0x80000000 +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000 -#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 -#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 -#define IXGBE_TDPT2TCCR_GSP 0x40000000 -#define IXGBE_TDPT2TCCR_LSP 0x80000000 +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +#define IXGBE_TDPT2TCCR_GSP 0x40000000 +#define IXGBE_TDPT2TCCR_LSP 0x80000000 -#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ -#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ -#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ -#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 +/* DCB driver APIs */ -/* DCB hardware-specific driver APIs */ +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8); -/* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); -/* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type); +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *); -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type); - -#endif /* _DCB_82598_CONFIG_H */ +/* DCB initialization */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *); +#endif /* _IXGBE_DCB_82958_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 7948849840a5..c226393a9c6c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -1,33 +1,100 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + -#include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82599.h" +/** + * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes (read low first to prevent missed carry) */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); + stats->qbtc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes (read low first to prevent missed carry) */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); + stats->qbrc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); + + /* Received Dropped Packet */ + stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); + } + + return IXGBE_SUCCESS; +} + /** * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * @prio_tc: priority to tc assignments indexed by priority + * @tsa: transmission selection algorithm indexed by traffic class + * @map: priority to tc assignments indexed by priority * * Configure Rx Packet Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc) +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) { - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; /* * Disable the arbiter before changing parameters @@ -36,21 +103,27 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - /* Map all traffic classes to their UP */ + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ reg = 0; - for (i = 0; i < MAX_USER_PRIORITY; i++) - reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; - credit_max = max[i]; + credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTRPT4C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); @@ -63,7 +136,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - return 0; + return IXGBE_SUCCESS; } /** @@ -72,18 +145,15 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa) { - u32 reg, max_credits; - u8 i; + u32 reg, max_credits; + u8 i; /* Clear the per-Tx queue credits; we use per-TC instead */ for (i = 0; i < 128; i++) { @@ -92,16 +162,16 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, } /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; - reg |= refill[i]; + reg |= (u32)(refill[i]); reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_RTTDT2C_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTTDT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); @@ -114,7 +184,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); - return 0; + return IXGBE_SUCCESS; } /** @@ -123,17 +193,14 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * @prio_tc: priority to tc assignments indexed by priority + * @tsa: transmission selection algorithm indexed by traffic class + * @map: priority to tc assignments indexed by priority * * Configure Tx Packet Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc) +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) { u32 reg; u8 i; @@ -147,22 +214,28 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, IXGBE_RTTPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - /* Map all traffic classes to their UP */ + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ reg = 0; - for (i = 0; i < MAX_USER_PRIORITY; i++) - reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_RTTPT2C_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTTPT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); @@ -176,18 +249,18 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_pfc_82599 - Configure priority flow control * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask - * @prio_tc: priority to tc assignments indexed by priority + * @map: priority to tc assignments indexed by priority * * Configure Priority Flow Control (PFC) for each traffic class. */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) { u32 i, j, fcrtl, reg; u8 max_tc = 0; @@ -200,8 +273,8 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) reg |= IXGBE_MFLCN_DPF; /* - * X540 & X550 supports per TC Rx priority flow control. - * So clear all TCs and only enable those that should be + * X540 supports per TC Rx priority flow control. So + * clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); @@ -214,9 +287,9 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); - for (i = 0; i < MAX_USER_PRIORITY; i++) { - if (prio_tc[i] > max_tc) - max_tc = prio_tc[i]; + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { + if (map[i] > max_tc) + max_tc = map[i]; } @@ -224,8 +297,8 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) for (i = 0; i <= max_tc; i++) { int enabled = 0; - for (j = 0; j < MAX_USER_PRIORITY; j++) { - if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { + for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { + if ((map[j] == i) && (pfc_en & (1 << j))) { enabled = 1; break; } @@ -236,7 +309,8 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); } else { - /* In order to prevent Tx hangs when the internal Tx + /* + * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark * to the Rx packet buffer size - 24KB. This allows * the Tx switch to function even under heavy Rx @@ -249,99 +323,258 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } - for (; i < MAX_TRAFFIC_CLASS; i++) { + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); } /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ -static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) { u32 reg = 0; u8 i = 0; + u8 tc_count = 8; + bool vt_mode = false; - /* - * Receive Queues stats setting - * 32 RQSMR registers, each configuring 4 queues. - * Set all 16 queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - */ - for (i = 0; i < 32; i++) { - reg = 0x01010101 * (i / 4); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + if (dcb_config != NULL) { + tc_count = dcb_config->num_tcs.pg_tcs; + vt_mode = dcb_config->vt_mode; } - /* - * Transmit Queues stats setting - * 32 TQSM registers, each controlling 4 queues. - * Set all queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - * Tx queues are allocated non-uniformly to TCs: - * 32, 32, 16, 16, 8, 8, 8, 8. - */ - for (i = 0; i < 32; i++) { - if (i < 8) - reg = 0x00000000; - else if (i < 16) - reg = 0x01010101; - else if (i < 20) - reg = 0x02020202; - else if (i < 24) - reg = 0x03030303; - else if (i < 26) - reg = 0x04040404; - else if (i < 28) - reg = 0x05050505; - else if (i < 30) - reg = 0x06060606; + + if (!((tc_count == 8 && vt_mode == false) || tc_count == 4)) + return IXGBE_ERR_PARAM; + + if (tc_count == 8 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 32, 32, 16, 16, 8, 8, 8, 8. + */ + for (i = 0; i < 32; i++) { + if (i < 8) + reg = 0x00000000; + else if (i < 16) + reg = 0x01010101; + else if (i < 20) + reg = 0x02020202; + else if (i < 24) + reg = 0x03030303; + else if (i < 26) + reg = 0x04040404; + else if (i < 28) + reg = 0x05050505; + else if (i < 30) + reg = 0x06060606; + else + reg = 0x07070707; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + if (i % 8 > 3) + /* In 4 TC mode, odd 16-queue ranges are + * not used. + */ + continue; + reg = 0x01010101 * (i / 8); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 64, 32, 16, 16. + */ + for (i = 0; i < 32; i++) { + if (i < 16) + reg = 0x00000000; + else if (i < 24) + reg = 0x01010101; + else if (i < 28) + reg = 0x02020202; + else + reg = 0x03030303; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == true) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_82599 - Configure general DCB parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure general DCB parameters. + */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u32 reg; + u32 q; + + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + if (dcb_config->num_tcs.pg_tcs == 8) { + /* Enable DCB for Rx with 8 TCs */ + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case 0: + case IXGBE_MRQC_RT4TCEN: + /* RSS disabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + break; + case IXGBE_MRQC_RSSEN: + case IXGBE_MRQC_RTRSS4TCEN: + /* RSS enabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS8TCEN; + break; + default: + /* + * Unsupported value, assume stale data, + * overwrite no RSS + */ + ASSERT(0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + } + } + if (dcb_config->num_tcs.pg_tcs == 4) { + /* We support both VT-on and VT-off with 4 TCs. */ + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT4TCEN; else - reg = 0x07070707; - IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS4TCEN; } + IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); - return 0; + /* Enable DCB for Tx with 8 TCs */ + if (dcb_config->num_tcs.pg_tcs == 8) + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else { + /* We support both VT-on and VT-off with 4 TCs. */ + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + if (dcb_config->vt_mode) + reg |= IXGBE_MTQC_VT_ENA; + } + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Disable drop for all queues */ + for (q = 0; q < 128; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable Security TX Buffer IFG for DCB */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + return IXGBE_SUCCESS; } /** * ixgbe_dcb_hw_config_82599 - Configure and enable DCB * @hw: pointer to hardware structure - * @pfc_en: enabled pfc bitmask + * @link_speed: unused * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * @prio_tc: priority to tc assignments indexed by priority + * @tsa: transmission selection algorithm indexed by traffic class + * @map: priority to tc assignments indexed by priority * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) { - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, - prio_type, prio_tc); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, - bwg_id, prio_type, prio_tc); - ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); - ixgbe_dcb_config_tc_stats_82599(hw); + UNREFERENCED_1PARAMETER(link_speed); - return 0; + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, + map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + + return IXGBE_SUCCESS; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index c6f084883cab..3abd7731abdc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -1,100 +1,97 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#ifndef _DCB_82599_CONFIG_H_ -#define _DCB_82599_CONFIG_H_ +#ifndef _IXGBE_DCB_82599_H_ +#define _IXGBE_DCB_82599_H_ /* DCB register definitions */ -#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, * 1 WSP - Weighted Strict Priority */ -#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, * 1 WRR - Weighted Round Robin */ -#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ -#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must * clear! */ -#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ /* Receive UP2TC mapping */ -#define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_SHIFT 3 #define IXGBE_RTRUP2TC_UP_MASK 7 /* Transmit UP2TC mapping */ -#define IXGBE_RTTUP2TC_UP_SHIFT 3 +#define IXGBE_RTTUP2TC_UP_SHIFT 3 -#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ -#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ -#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet * buffers enable */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores * (RSS) enable */ /* RTRPCS Bit Masks */ -#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RTRPCS_RAC 0x00000004 -#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ +#define IXGBE_RTRPCS_RAC 0x00000004 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ /* RTTDT2C Bit Masks */ -#define IXGBE_RTTDT2C_MCL_SHIFT 12 -#define IXGBE_RTTDT2C_BWG_SHIFT 9 -#define IXGBE_RTTDT2C_GSP 0x40000000 -#define IXGBE_RTTDT2C_LSP 0x80000000 +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP 0x40000000 +#define IXGBE_RTTDT2C_LSP 0x80000000 -#define IXGBE_RTTPT2C_MCL_SHIFT 12 -#define IXGBE_RTTPT2C_BWG_SHIFT 9 -#define IXGBE_RTTPT2C_GSP 0x40000000 -#define IXGBE_RTTPT2C_LSP 0x80000000 +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP 0x40000000 +#define IXGBE_RTTPT2C_LSP 0x80000000 /* RTTPCS Bit Masks */ -#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, * 1 SP - Strict Priority */ -#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ -#define IXGBE_RTTPCS_ARBD_SHIFT 22 -#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ + +#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ /* SECTXMINIFG DCB */ -#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */ +/* DCB driver APIs */ -/* DCB hardware-specific driver APIs */ +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *); -/* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc); +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); -/* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *, + u8 *, u8 *); -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); +/* DCB initialization */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); - -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type, - u8 *prio_tc); - -#endif /* _DCB_82599_CONFIG_H */ +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, + u8 *, u8 *); +#endif /* _IXGBE_DCB_82959_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index c00332d2e02a..5bec2f9d710c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #include "ixgbe.h" + +#if IS_ENABLED(CONFIG_DCB) #include #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" -#include "ixgbe_sriov.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 @@ -13,40 +14,36 @@ #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 #define BIT_APP_UPCHG 0x10 -#define BIT_LINKSPEED 0x80 +#define BIT_RESETLINK 0x40 +#define BIT_LINKSPEED 0x80 /* Responses for the DCB_C_SET_ALL command */ -#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ -#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ -#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ -static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) { struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; - struct tc_configuration *src = NULL; - struct tc_configuration *dst = NULL; + struct ixgbe_dcb_tc_config *src = NULL; + struct ixgbe_dcb_tc_config *dst = NULL; int i, j; - int tx = DCB_TX_CONFIG; - int rx = DCB_RX_CONFIG; + int tx = IXGBE_DCB_TX_CONFIG; + int rx = IXGBE_DCB_RX_CONFIG; int changes = 0; -#ifdef IXGBE_FCOE - struct dcb_app app = { - .selector = DCB_APP_IDTYPE_ETHTYPE, - .protocol = ETH_P_FCOE, - }; - u8 up = dcb_getapp(adapter->netdev, &app); - if (up && !(up & BIT(adapter->fcoe.up))) +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->fcoe.up_set != adapter->fcoe.up) changes |= BIT_APP_UPCHG; -#endif +#endif /* CONFIG_FCOE */ for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; - if (dst->path[tx].prio_type != src->path[tx].prio_type) { - dst->path[tx].prio_type = src->path[tx].prio_type; + if (dst->path[tx].tsa != src->path[tx].tsa) { + dst->path[tx].tsa = src->path[tx].tsa; changes |= BIT_PG_TX; } @@ -61,14 +58,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) } if (dst->path[tx].up_to_tc_bitmap != - src->path[tx].up_to_tc_bitmap) { + src->path[tx].up_to_tc_bitmap) { dst->path[tx].up_to_tc_bitmap = src->path[tx].up_to_tc_bitmap; changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); } - if (dst->path[rx].prio_type != src->path[rx].prio_type) { - dst->path[rx].prio_type = src->path[rx].prio_type; + if (dst->path[rx].tsa != src->path[rx].tsa) { + dst->path[rx].tsa = src->path[rx].tsa; changes |= BIT_PG_RX; } @@ -83,7 +80,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) } if (dst->path[rx].up_to_tc_bitmap != - src->path[rx].up_to_tc_bitmap) { + src->path[rx].up_to_tc_bitmap) { dst->path[rx].up_to_tc_bitmap = src->path[rx].up_to_tc_bitmap; changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); @@ -92,6 +89,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { j = i - DCB_PG_ATTR_BW_ID_0; + if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; changes |= BIT_PG_TX; @@ -104,8 +102,8 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { j = i - DCB_PFC_UP_ATTR_0; - if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) { - dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; + if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) { + dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc; changes |= BIT_PFC; } } @@ -128,6 +126,7 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err = 0; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) @@ -135,10 +134,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) /* verify there is something to do, if not then exit */ if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return 0; + goto out; - return !!ixgbe_setup_tc(netdev, - state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); + err = ixgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); +out: + return !!err; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, @@ -170,8 +171,13 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (tc < 0 || tc >= IXGBE_DCB_MAX_TRAFFIC_CLASS) { + netdev_err(netdev, "Traffic class out of range.\n"); + return; + } + if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; + adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) @@ -187,6 +193,12 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, { struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (bwg_id < 0 || bwg_id >= IXGBE_DCB_MAX_BW_GROUP) { + netdev_err(netdev, + "BWG index out of range.\n"); + return; + } + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; } @@ -196,8 +208,13 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (tc < 0 || tc >= IXGBE_DCB_MAX_TRAFFIC_CLASS) { + netdev_err(netdev, "Traffic class out of range.\n"); + return; + } + if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; + adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) @@ -213,6 +230,12 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, { struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (bwg_id < 0 || bwg_id >= IXGBE_DCB_MAX_BW_GROUP) { + netdev_err(netdev, + "BWG index out of range.\n"); + return; + } + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; } @@ -222,7 +245,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; + *prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; @@ -242,7 +265,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; + *prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; @@ -256,23 +279,22 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } -static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, - u8 setting) +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up); - adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; - if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != - adapter->dcb_cfg.tc_config[priority].dcb_pfc) + adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc; + if (adapter->temp_dcb_cfg.tc_config[tc].pfc != + adapter->dcb_cfg.tc_config[tc].pfc) adapter->temp_dcb_cfg.pfc_mode_enable = true; } -static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, - u8 *setting) +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; + u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up); + *pfc = adapter->dcb_cfg.tc_config[tc].pfc; } static void ixgbe_dcbnl_devreset(struct net_device *dev) @@ -283,13 +305,21 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) usleep_range(1000, 2000); if (netif_running(dev)) +#ifdef HAVE_NET_DEVICE_OPS dev->netdev_ops->ndo_stop(dev); +#else + dev->stop(dev); +#endif ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(dev)) +#ifdef HAVE_NET_DEVICE_OPS dev->netdev_ops->ndo_open(dev); +#else + dev->open(dev); +#endif clear_bit(__IXGBE_RESETTING, &adapter->state); } @@ -300,83 +330,80 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; struct ixgbe_hw *hw = &adapter->hw; int ret = DCB_NO_HW_CHG; - int i; + u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return DCB_NO_HW_CHG; + return ret; adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, - MAX_TRAFFIC_CLASS); + IXGBE_DCB_MAX_TRAFFIC_CLASS); if (!adapter->dcb_set_bitmap) - return DCB_NO_HW_CHG; + return ret; - if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { - u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; - u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + ixgbe_dcb_unpack_map_cee(dcb_cfg, IXGBE_DCB_TX_CONFIG, prio_tc); + + if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) { /* Priority to TC mapping in CEE case default to 1:1 */ - u8 prio_tc[MAX_USER_PRIORITY]; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; +#ifdef HAVE_MQPRIO + int i; +#endif -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif - ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, - DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, - DCB_RX_CONFIG); + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + IXGBE_DCB_TX_CONFIG); - ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max(dcb_cfg, max); - ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); - ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); - ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + IXGBE_DCB_RX_CONFIG); - ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, - prio_type, prio_tc); + ixgbe_dcb_hw_config_cee(hw, dcb_cfg); +#ifdef HAVE_MQPRIO for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_set_prio_tc_map(netdev, i, prio_tc[i]); - +#endif /* HAVE_MQPRIO */ ret = DCB_HW_CHG_RST; } if (adapter->dcb_set_bitmap & BIT_PFC) { if (dcb_cfg->pfc_mode_enable) { u8 pfc_en; - u8 prio_tc[MAX_USER_PRIORITY]; - - ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); - ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); - ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); + ixgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en); + ixgbe_dcb_config_pfc(hw, pfc_en, prio_tc); } else { hw->mac.ops.fc_enable(hw); } + /* This is known driver so disable MDD before updating SRRCTL */ + if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.disable_mdd(hw); ixgbe_set_rx_drop_en(adapter); - ret = DCB_HW_CHG; + if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.enable_mdd(hw); + + if (ret != DCB_HW_CHG_RST) + ret = DCB_HW_CHG; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* Reprogam FCoE hardware offloads when the traffic class * FCoE is using changes. This happens if the APP info * changes or the up2tc mapping is updated. */ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - struct dcb_app app = { - .selector = DCB_APP_IDTYPE_ETHTYPE, - .protocol = ETH_P_FCOE, - }; - u8 up = dcb_getapp(netdev, &app); - - adapter->fcoe.up = ffs(up) - 1; + adapter->fcoe.up_set = adapter->fcoe.up; ixgbe_dcbnl_devreset(netdev); ret = DCB_HW_CHG_RST; } -#endif +#endif /* CONFIG_FCOE */ adapter->dcb_set_bitmap = 0x00; return ret; @@ -384,7 +411,9 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { +#ifdef HAVE_DCBNL_IEEE struct ixgbe_adapter *adapter = netdev_priv(netdev); +#endif switch (capid) { case DCB_CAP_ATTR_PG: @@ -408,9 +437,11 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) case DCB_CAP_ATTR_BCN: *cap = false; break; +#ifdef HAVE_DCBNL_IEEE case DCB_CAP_ATTR_DCBX: *cap = adapter->dcbx_cap; break; +#endif default: *cap = false; break; @@ -419,9 +450,14 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) return 0; } +#ifdef NUMTCS_RETURNS_U8 +static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#else static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { @@ -432,18 +468,42 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) *num = adapter->dcb_cfg.num_tcs.pfc_tcs; break; default: - return -EINVAL; + rval = -EINVAL; + break; } } else { - return -EINVAL; + rval = -EINVAL; } - return 0; + return rval; } +#ifdef NUMTCS_RETURNS_U8 +static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#else static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#endif { - return -EINVAL; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + adapter->dcb_cfg.num_tcs.pg_tcs = num; + break; + case DCB_NUMTCS_ATTR_PFC: + adapter->dcb_cfg.num_tcs.pfc_tcs = num; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; } static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) @@ -458,8 +518,10 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.pfc_mode_enable = state; + return; } +#ifdef HAVE_DCBNL_OPS_GETAPP /** * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority * @netdev : the corresponding netdev @@ -467,35 +529,100 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) * @id: id is either ether type or TCP/UDP port number * * Returns : on success, returns a non-zero 802.1p user priority bitmap - * otherwise returns -EINVAL as the invalid user priority bitmap to indicate an + * otherwise returns 0 as the invalid user priority bitmap to indicate an * error. */ +#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +#else +static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +#endif { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; +#ifdef HAVE_DCBNL_IEEE struct dcb_app app = { - .selector = idtype, - .protocol = id, - }; + .selector = idtype, + .protocol = id, + }; - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return -EINVAL; + rval = dcb_getapp(netdev, &app); +#endif - return dcb_getapp(netdev, &app); + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) + rval = ixgbe_fcoe_getapp(netdev); +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } + + return rval; } +/** + * ixgbe_dcbnl_setapp - set the DCBX application user priority + * @netdev: the corresponding netdev + * @idtype: identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * @up: the 802.1p user priority bitmap + * + * Returns : 0 on success or 1 on error + */ +#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT +static int ixgbe_dcbnl_setapp(struct net_device *netdev, + u8 idtype, u16 id, u8 up) +#else +static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, + u8 idtype, u16 id, u8 up) +#endif +{ + int err = 0; +#ifdef HAVE_DCBNL_IEEE + struct dcb_app app; + + app.selector = idtype; + app.protocol = id; + app.priority = up; + err = dcb_setapp(netdev, &app); +#endif + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->fcoe.up = up ? ffs(up) - 1 : IXGBE_FCOE_DEFUP; + } +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } + + return err; +} +#endif /* HAVE_DCBNL_OPS_GETAPP */ + +#ifdef HAVE_DCBNL_IEEE static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; - ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; - /* No IEEE PFC settings available */ if (!my_ets) - return 0; + return -EINVAL; + ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; ets->cbs = my_ets->cbs; memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); @@ -509,7 +636,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i, err; + int i, err = 0; __u8 max_tc = 0; __u8 map_chg = 0; @@ -521,13 +648,13 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; - /* initialize UP2TC mappings to invalid value */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) adapter->ixgbe_ieee_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS; /* if possible update UP2TC mappings from HW */ - ixgbe_dcb_read_rtrup2tc(&adapter->hw, + if (adapter->hw.mac.ops.get_rtrup2tc) + adapter->hw.mac.ops.get_rtrup2tc(&adapter->hw, adapter->ixgbe_ieee_ets->prio_tc); } @@ -546,15 +673,17 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; - if (max_tc != adapter->hw_tcs) { + if (max_tc != netdev_get_num_tc(dev)) err = ixgbe_setup_tc(dev, max_tc); - if (err) - return err; - } else if (map_chg) { + else if (map_chg) ixgbe_dcbnl_devreset(dev); - } - return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); + if (err) + goto err_out; + + err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +err_out: + return err; } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, @@ -564,17 +693,16 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; int i; - pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; - /* No IEEE PFC settings available */ if (!my_pfc) - return 0; + return -EINVAL; + pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; pfc->pfc_en = my_pfc->pfc_en; pfc->mbc = my_pfc->mbc; pfc->delay = my_pfc->delay; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { pfc->requests[i] = adapter->stats.pxoffrxc[i]; pfc->indications[i] = adapter->stats.pxofftxc[i]; } @@ -603,14 +731,24 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, prio_tc = adapter->ixgbe_ieee_ets->prio_tc; memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); + /* Enable link flow control parameters if PFC is disabled */ if (pfc->pfc_en) - err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); + err = ixgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc); else err = hw->mac.ops.fc_enable(hw); + /* This is known driver so disable MDD before updating SRRCTL */ + if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.disable_mdd(hw); + ixgbe_set_rx_drop_en(adapter); + if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.enable_mdd(hw); + return err; } @@ -618,47 +756,30 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { struct ixgbe_adapter *adapter = netdev_priv(dev); - int err; + int err = -EINVAL; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; - - err = dcb_ieee_setapp(dev, app); - if (err) return err; -#ifdef IXGBE_FCOE - if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + err = dcb_ieee_setapp(dev, app); + +#if IS_ENABLED(CONFIG_FCOE) + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & BIT(adapter->fcoe.up)) - return 0; + if (app_mask & (1 << adapter->fcoe.up)) + return err; adapter->fcoe.up = app->priority; + adapter->fcoe.up_set = adapter->fcoe.up; ixgbe_dcbnl_devreset(dev); } #endif - - /* VF devices should use default UP when available */ - if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == 0) { - int vf; - - adapter->default_up = app->priority; - - for (vf = 0; vf < adapter->num_vfs; vf++) { - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - - if (!vfinfo->pf_qos) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - app->priority, vf); - } - } - return 0; } +#ifdef HAVE_DCBNL_IEEE_DELAPP static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) { @@ -670,39 +791,22 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, err = dcb_ieee_delapp(dev, app); -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & BIT(adapter->fcoe.up)) - return 0; + if (app_mask & (1 << adapter->fcoe.up)) + return err; adapter->fcoe.up = app_mask ? - ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; + ffs(app_mask) - 1 : IXGBE_FCOE_DEFUP; ixgbe_dcbnl_devreset(dev); } #endif - /* IF default priority is being removed clear VF default UP */ - if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == 0 && adapter->default_up == app->priority) { - int vf; - long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); - int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; - - adapter->default_up = qos; - - for (vf = 0; vf < adapter->num_vfs; vf++) { - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - - if (!vfinfo->pf_qos) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - qos, vf); - } - } - return err; } +#endif /* HAVE_DCBNL_IEEE_DELAPP */ static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) { @@ -713,9 +817,8 @@ static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_ets ets = {0}; - struct ieee_pfc pfc = {0}; - int err = 0; + struct ieee_ets ets = { .ets_cap = 0 }; + struct ieee_pfc pfc = { .pfc_en = 0 }; /* no support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || @@ -736,7 +839,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); } else if (mode & DCB_CAP_DCBX_VER_CEE) { - u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; + u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG); adapter->dcb_set_bitmap |= mask; ixgbe_dcbnl_set_all(dev); @@ -746,19 +849,25 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) */ ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); - err = ixgbe_setup_tc(dev, 0); + ixgbe_setup_tc(dev, 0); } - return err ? 1 : 0; + return 0; } -const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { +#endif + +struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { +#ifdef HAVE_DCBNL_IEEE .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, .ieee_setapp = ixgbe_dcbnl_ieee_setapp, +#ifdef HAVE_DCBNL_IEEE_DELAPP .ieee_delapp = ixgbe_dcbnl_ieee_delapp, +#endif +#endif .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, @@ -778,7 +887,14 @@ const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { .setnumtcs = ixgbe_dcbnl_setnumtcs, .getpfcstate = ixgbe_dcbnl_getpfcstate, .setpfcstate = ixgbe_dcbnl_setpfcstate, +#ifdef HAVE_DCBNL_OPS_GETAPP .getapp = ixgbe_dcbnl_getapp, + .setapp = ixgbe_dcbnl_setapp, +#endif +#ifdef HAVE_DCBNL_IEEE .getdcbx = ixgbe_dcbnl_getdcbx, .setdcbx = ixgbe_dcbnl_setdcbx, +#endif }; + +#endif /* CONFIG_DCB */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 171cdc552961..abf9cf4b2073 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -1,18 +1,25 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -#include -#include +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #include "ixgbe.h" +#ifdef HAVE_IXGBE_DEBUG_FS +#include +#include + static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; -static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos, - char *dbg_buf) +/** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; char *buf; @@ -23,7 +30,8 @@ static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer, return 0; buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, dbg_buf); + adapter->netdev->name, + ixgbe_dbg_reg_ops_buf); if (!buf) return -ENOMEM; @@ -38,20 +46,6 @@ static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer, return len; } -/** - * ixgbe_dbg_reg_ops_read - read for reg_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, - ixgbe_dbg_reg_ops_buf); -} - /** * ixgbe_dbg_reg_ops_write - write into reg_ops datum * @filp: the opened file @@ -86,7 +80,8 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, u32 reg, value; int cnt; cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); - if (cnt == 2) { + /* check format and bounds check register access */ + if (cnt == 2 && reg <= IXGBE_HFDR) { IXGBE_WRITE_REG(&adapter->hw, reg, value); value = IXGBE_READ_REG(&adapter->hw, reg); e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); @@ -97,7 +92,8 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, u32 reg, value; int cnt; cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", ®); - if (cnt == 1) { + /* check format and bounds check register access */ + if (cnt == 1 && reg <= IXGBE_HFDR) { value = IXGBE_READ_REG(&adapter->hw, reg); e_dev_info("read 0x%08x = 0x%08x\n", reg, value); } else { @@ -128,11 +124,33 @@ static char ixgbe_dbg_netdev_ops_buf[256] = ""; * @count: the size of the user's buffer * @ppos: file position offset **/ -static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, char __user *buffer, +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, size_t count, loff_t *ppos) { - return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, - ixgbe_dbg_netdev_ops_buf); + struct ixgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; } /** @@ -166,7 +184,16 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, ixgbe_dbg_netdev_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { +#ifdef HAVE_NET_DEVICE_OPS +#ifdef HAVE_TX_TIMEOUT_TXQUEUE + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, + UINT_MAX); +#else adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); +#endif +#else + adapter->netdev->tx_timeout(adapter->netdev); +#endif /* HAVE_NET_DEVICE_OPS */ e_dev_info("tx_timeout called\n"); } else { e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); @@ -176,7 +203,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, return count; } -static const struct file_operations ixgbe_dbg_netdev_ops_fops = { +static struct file_operations ixgbe_dbg_netdev_ops_fops = { .owner = THIS_MODULE, .open = simple_open, .read = ixgbe_dbg_netdev_ops_read, @@ -190,21 +217,32 @@ static const struct file_operations ixgbe_dbg_netdev_ops_fops = { void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) { const char *name = pci_name(adapter->pdev); - + struct dentry *pfile; adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); - debugfs_create_file("reg_ops", 0600, adapter->ixgbe_dbg_adapter, - adapter, &ixgbe_dbg_reg_ops_fops); - debugfs_create_file("netdev_ops", 0600, adapter->ixgbe_dbg_adapter, - adapter, &ixgbe_dbg_netdev_ops_fops); + if (adapter->ixgbe_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } } /** * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries - * @adapter: the adapter that is exiting + * @adapter: board private structure **/ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) { - debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); + if (adapter->ixgbe_dbg_adapter) + debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); adapter->ixgbe_dbg_adapter = NULL; } @@ -214,6 +252,8 @@ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) void ixgbe_dbg_init(void) { ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); + if (ixgbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); } /** @@ -223,3 +263,5 @@ void ixgbe_dbg_exit(void) { debugfs_remove_recursive(ixgbe_dbg_root); } + +#endif /* HAVE_IXGBE_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 7c52ae8ac005..6e0c9e797874 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,108 +1,126 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ /* ethtool support for ixgbe */ -#include #include #include -#include #include #include #include #include #include -#include + +#ifdef SIOCETHTOOL +#include #include "ixgbe.h" +#ifdef ETHTOOL_GMODULEINFO #include "ixgbe_phy.h" +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO +#include +#endif +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif #define IXGBE_ALL_RAR_ENTRIES 16 -enum {NETDEV_STATS, IXGBE_STATS}; - +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif +#ifdef ETHTOOL_GSTATS struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; - int type; int sizeof_stat; int stat_offset; }; -#define IXGBE_STAT(m) IXGBE_STATS, \ - sizeof(((struct ixgbe_adapter *)0)->m), \ - offsetof(struct ixgbe_adapter, m) -#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ - sizeof(((struct rtnl_link_stats64 *)0)->m), \ - offsetof(struct rtnl_link_stats64, m) +#define IXGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = { + IXGBE_NETDEV_STAT(rx_packets), + IXGBE_NETDEV_STAT(tx_packets), + IXGBE_NETDEV_STAT(rx_bytes), + IXGBE_NETDEV_STAT(tx_bytes), + IXGBE_NETDEV_STAT(rx_errors), + IXGBE_NETDEV_STAT(tx_errors), + IXGBE_NETDEV_STAT(rx_dropped), + IXGBE_NETDEV_STAT(tx_dropped), + IXGBE_NETDEV_STAT(multicast), + IXGBE_NETDEV_STAT(collisions), + IXGBE_NETDEV_STAT(rx_over_errors), + IXGBE_NETDEV_STAT(rx_crc_errors), + IXGBE_NETDEV_STAT(rx_frame_errors), + IXGBE_NETDEV_STAT(rx_fifo_errors), + IXGBE_NETDEV_STAT(rx_missed_errors), + IXGBE_NETDEV_STAT(tx_aborted_errors), + IXGBE_NETDEV_STAT(tx_carrier_errors), + IXGBE_NETDEV_STAT(tx_fifo_errors), + IXGBE_NETDEV_STAT(tx_heartbeat_errors), +}; -static const struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, - {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, - {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, - {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, - {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, - {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, - {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, - {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, - {"lsc_int", IXGBE_STAT(lsc_int)}, - {"tx_busy", IXGBE_STAT(tx_busy)}, - {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, - {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, - {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, - {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, - {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, - {"multicast", IXGBE_NETDEV_STAT(multicast)}, - {"broadcast", IXGBE_STAT(stats.bprc)}, - {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, - {"collisions", IXGBE_NETDEV_STAT(collisions)}, - {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, - {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, - {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, - {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, - {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, - {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, - {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, - {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, - {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, - {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, - {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, - {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, - {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, - {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, - {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, - {"tx_restart_queue", IXGBE_STAT(restart_queue)}, - {"rx_length_errors", IXGBE_STAT(stats.rlec)}, - {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, - {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, - {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, - {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, - {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, - {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, - {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, - {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)}, - {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, - {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, - {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, - {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, - {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, - {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, - {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, - {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)}, - {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)}, - {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)}, - {"tx_ipsec", IXGBE_STAT(tx_ipsec)}, - {"rx_ipsec", IXGBE_STAT(rx_ipsec)}, -#ifdef IXGBE_FCOE - {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, - {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, - {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, - {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, - {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, - {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, - {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, - {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, -#endif /* IXGBE_FCOE */ +#define IXGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct ixgbe_adapter, _stat), \ + .stat_offset = offsetof(struct ixgbe_adapter, _stat) \ +} +static struct ixgbe_stats ixgbe_gstrings_stats[] = { + IXGBE_STAT("rx_pkts_nic", stats.gprc), + IXGBE_STAT("tx_pkts_nic", stats.gptc), + IXGBE_STAT("rx_bytes_nic", stats.gorc), + IXGBE_STAT("tx_bytes_nic", stats.gotc), + IXGBE_STAT("lsc_int", lsc_int), + IXGBE_STAT("tx_busy", tx_busy), + IXGBE_STAT("non_eop_descs", non_eop_descs), + IXGBE_STAT("broadcast", stats.bprc), + IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) , + IXGBE_STAT("tx_timeout_count", tx_timeout_count), + IXGBE_STAT("tx_restart_queue", restart_queue), + IXGBE_STAT("rx_length_errors", stats.rlec), + IXGBE_STAT("rx_long_length_errors", stats.roc), + IXGBE_STAT("rx_short_length_errors", stats.ruc), + IXGBE_STAT("tx_flow_control_xon", stats.lxontxc), + IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + IXGBE_STAT("alloc_rx_page", alloc_rx_page), + IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + IXGBE_STAT("hw_rsc_aggregated", rsc_total_count), + IXGBE_STAT("hw_rsc_flushed", rsc_total_flush), +#ifdef HAVE_TX_MQ + IXGBE_STAT("fdir_match", stats.fdirmatch), + IXGBE_STAT("fdir_miss", stats.fdirmiss), + IXGBE_STAT("fdir_overflow", fdir_overflow), +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) + IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc), + IXGBE_STAT("fcoe_last_errors", stats.fclast), + IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc), + IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc), + IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc), + IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp), + IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff), + IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc), + IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc), +#endif /* CONFIG_FCOE */ + IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), +#ifdef HAVE_PTP_1588_CLOCK + IXGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IXGBE_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IXGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +#endif /* HAVE_PTP_1588_CLOCK */ }; /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so @@ -110,64 +128,132 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { * used because we do not have a good way to get the max number of * rx queues with CONFIG_RPS disabled. */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define IXGBE_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define IXGBE_NUM_RX_QUEUES adapter->indices +#define IXGBE_NUM_TX_QUEUES adapter->indices +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define IXGBE_NUM_TX_QUEUES 1 +#define IXGBE_NUM_RX_QUEUES ( \ + ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) +#endif /* HAVE_TX_MQ */ #define IXGBE_QUEUE_STATS_LEN ( \ - (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ - (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) + (IXGBE_NUM_TX_QUEUES + IXGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats) #define IXGBE_PB_STATS_LEN ( \ - (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ - / sizeof(u64)) + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define IXGBE_VF_STATS_LEN \ + ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \ + (sizeof(struct vf_stats) / sizeof(u64))) #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ + IXGBE_NETDEV_STATS_LEN + \ IXGBE_PB_STATS_LEN + \ - IXGBE_QUEUE_STATS_LEN) + IXGBE_QUEUE_STATS_LEN + \ + IXGBE_VF_STATS_LEN) +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; -#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { -#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) +#define IXGBE_PRIV_FLAGS_FD_ATR BIT(0) + "flow-director-atr", +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(1) "legacy-rx", -#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) - "vf-ipsec", +#endif }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ /* currently supported speeds for 10G */ -#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ - SUPPORTED_10000baseKX4_Full | \ - SUPPORTED_10000baseKR_Full) +#define ADVERTISED_MASK_10G (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full) -#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) +#define ixgbe_isbackplane(type) ((type == ixgbe_media_type_backplane)? true : false) -static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) +#ifdef ETHTOOL_GLINKSETTINGS +static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw, + struct ethtool_link_ksettings *cmd) { - if (!ixgbe_isbackplane(hw->phy.media_type)) - return SUPPORTED_10000baseT_Full; + if (!ixgbe_isbackplane(hw->phy.media_type)) { + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseT_Full); + return; + } switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_X550EM_X_KX4: - return SUPPORTED_10000baseKX4_Full; + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKX4_Full); + break; case IXGBE_DEV_ID_82598_BX: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_XFI: - return SUPPORTED_10000baseKR_Full; + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKR_Full); + break; default: - return SUPPORTED_10000baseKX4_Full | - SUPPORTED_10000baseKR_Full; + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKR_Full); + break; + } +} + +static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw, + struct ethtool_link_ksettings *cmd) +{ + if (!ixgbe_isbackplane(hw->phy.media_type)) { + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseT_Full); + return; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKX4_Full); + break; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_XFI: + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKR_Full); + break; + default: + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKR_Full); + break; } } @@ -178,55 +264,99 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; bool autoneg = false; - u32 supported, advertising; - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); - /* set the supported link speeds */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - supported |= ixgbe_get_supported_10gtypes(hw); - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - supported |= SUPPORTED_100baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_10_FULL) - supported |= SUPPORTED_10baseT_Full; - - /* default advertised speed if phy.autoneg_advertised isn't set */ - advertising = supported; - /* set the advertised speeds */ - if (hw->phy.autoneg_advertised) { - advertising = 0; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) - advertising |= ADVERTISED_10baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - advertising |= supported & ADVRTSD_MSK_10G; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { - if (supported & SUPPORTED_1000baseKX_Full) - advertising |= ADVERTISED_1000baseKX_Full; - else - advertising |= ADVERTISED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) { + ixgbe_set_supported_10gtypes(hw, cmd); + ixgbe_set_advertising_10gtypes(hw, cmd); + } +#ifdef HAVE_ETHTOOL_5G_BITS + if (supported_link & IXGBE_LINK_SPEED_5GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 5000baseT_Full); +#endif +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS + if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 2500baseT_Full); +#endif + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) { + if (ixgbe_isbackplane(hw->phy.media_type)) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + } else { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); } + } + if (supported_link & IXGBE_LINK_SPEED_100_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + } + if (supported_link & IXGBE_LINK_SPEED_10_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + ixgbe_set_advertising_10gtypes(hw, cmd); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + if (ethtool_link_ksettings_test_link_mode + (cmd, supported, 1000baseKX_Full)) + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 1000baseKX_Full); + else + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 1000baseT_Full); + } +#ifdef HAVE_ETHTOOL_5G_BITS + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 5000baseT_Full); + } +#endif +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 2500baseT_Full); + } +#endif } else { if (hw->phy.multispeed_fiber && !autoneg) { if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - advertising = ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseT_Full); } } if (autoneg) { - supported |= SUPPORTED_Autoneg; - advertising |= ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); cmd->base.autoneg = AUTONEG_ENABLE; - } else + } else { cmd->base.autoneg = AUTONEG_DISABLE; + } /* Determine the remaining settings based on the PHY type. */ switch (adapter->hw.phy.type) { @@ -235,13 +365,13 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case ixgbe_phy_x550em_ext_t: case ixgbe_phy_fw: case ixgbe_phy_cu_unknown: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; break; case ixgbe_phy_qt: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; break; case ixgbe_phy_nl: @@ -255,13 +385,15 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case ixgbe_phy_qsfp_active_unknown: case ixgbe_phy_qsfp_intel: case ixgbe_phy_qsfp_unknown: - /* SFP+ devices, further checking needed */ switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ case ixgbe_sfp_type_da_cu: case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_DA; break; case ixgbe_sfp_type_sr: @@ -272,61 +404,72 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case ixgbe_sfp_type_1g_sx_core1: case ixgbe_sfp_type_1g_lx_core0: case ixgbe_sfp_type_1g_lx_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_FIBRE; break; case ixgbe_sfp_type_not_present: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_NONE; break; case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, + TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + TP); cmd->base.port = PORT_TP; break; case ixgbe_sfp_type_unknown: default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_OTHER; break; } break; case ixgbe_phy_xaui: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_NONE; break; case ixgbe_phy_unknown: case ixgbe_phy_generic: case ixgbe_phy_sfp_unsupported: default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_OTHER; break; } /* Indicate pause support */ - supported |= SUPPORTED_Pause; + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); switch (hw->fc.requested_mode) { case ixgbe_fc_full: - advertising |= ADVERTISED_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); break; case ixgbe_fc_rx_pause: - advertising |= ADVERTISED_Pause | - ADVERTISED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; case ixgbe_fc_tx_pause: - advertising |= ADVERTISED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; default: - advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, + Asym_Pause); } if (netif_carrier_ok(netdev)) { @@ -337,9 +480,11 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case IXGBE_LINK_SPEED_5GB_FULL: cmd->base.speed = SPEED_5000; break; +#ifdef SUPPORTED_2500baseX_Full case IXGBE_LINK_SPEED_2_5GB_FULL: cmd->base.speed = SPEED_2500; break; +#endif /* SUPPORTED_2500baseX_Full */ case IXGBE_LINK_SPEED_1GB_FULL: cmd->base.speed = SPEED_1000; break; @@ -358,14 +503,230 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + return 0; +} +#else /* ETHTOOL_GLINKSETTINGS */ +static __u32 ixgbe_backplane_type(struct ixgbe_hw *hw) +{ + __u32 mode = 0x00; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + mode = SUPPORTED_10000baseKX4_Full; + break; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_XFI: + mode = SUPPORTED_10000baseKR_Full; + break; + default: + mode = (SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full); + break; + } + return mode; +} + +static int ixgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ixgbe_link_speed supported_link; + bool autoneg = false; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + ixgbe_backplane_type(hw) : + SUPPORTED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; + + /* default advertised speed if phy.autoneg_advertised isn't set */ + ecmd->advertising = ecmd->supported; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + ecmd->advertising = 0; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= (ecmd->supported & ADVERTISED_MASK_10G); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + } else { + if (hw->phy.multispeed_fiber && !autoneg) { + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + } + + if (autoneg) { + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + } else { + ecmd->autoneg = AUTONEG_DISABLE; + } + + ecmd->transceiver = XCVR_EXTERNAL; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_x550em_ext_t: + case ixgbe_phy_fw: + case ixgbe_phy_cu_unknown: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ixgbe_phy_qt: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ixgbe_phy_nl: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case ixgbe_sfp_type_da_cu: + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case ixgbe_sfp_type_sr: + case ixgbe_sfp_type_lr: + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ixgbe_sfp_type_not_present: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ixgbe_sfp_type_unknown: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + break; + case ixgbe_phy_xaui: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ixgbe_phy_unknown: + case ixgbe_phy_generic: + case ixgbe_phy_sfp_unsupported: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + + /* Indicate pause support */ + ecmd->supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + ecmd->advertising |= ADVERTISED_Pause; + break; + case ixgbe_fc_rx_pause: + ecmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case ixgbe_fc_tx_pause: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (netif_carrier_ok(netdev)) { + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case IXGBE_LINK_SPEED_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_5000); + break; +#ifdef SUPPORTED_2500baseX_Full + case IXGBE_LINK_SPEED_2_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_2500); + break; +#endif /* SUPPORTED_2500baseX_Full */ + case IXGBE_LINK_SPEED_1GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case IXGBE_LINK_SPEED_100_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + case IXGBE_LINK_SPEED_10_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } return 0; } +#endif /* !ETHTOOL_GLINKSETTINGS */ +#ifdef ETHTOOL_GLINKSETTINGS static int ixgbe_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { @@ -373,42 +734,50 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; u32 advertised, old; s32 err = 0; - u32 supported, advertising; - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - - if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.multispeed_fiber)) { + if (hw->phy.media_type == ixgbe_media_type_copper || + hw->phy.multispeed_fiber) { /* * this function does not support duplex forcing, but can * limit the advertising of the adapter to the specified speed */ - if (advertising & ~supported) + if (!bitmap_subset(cmd->link_modes.advertising, + cmd->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) return -EINVAL; /* only allow one speed at a time if no autoneg */ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { - if (advertising == - (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full)) + if (ethtool_link_ksettings_test_link_mode + (cmd, advertising, 10000baseT_Full) && + ethtool_link_ksettings_test_link_mode + (cmd, advertising, 1000baseT_Full)) return -EINVAL; } old = hw->phy.autoneg_advertised; advertised = 0; - if (advertising & ADVERTISED_10000baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (advertising & ADVERTISED_1000baseT_Full) +#ifdef HAVE_ETHTOOL_5G_BITS + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 5000baseT_Full)) + advertised |= IXGBE_LINK_SPEED_5GB_FULL; +#endif +#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 2500baseT_Full)) + advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; +#endif + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (advertising & ADVERTISED_100baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Full)) advertised |= IXGBE_LINK_SPEED_100_FULL; - - if (advertising & ADVERTISED_10baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Full)) advertised |= IXGBE_LINK_SPEED_10_FULL; if (old == advertised) @@ -428,14 +797,82 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, /* in this case we currently only support 10Gb/FULL */ u32 speed = cmd->base.speed; - if ((cmd->base.autoneg == AUTONEG_ENABLE) || - (advertising != ADVERTISED_10000baseT_Full) || + if (cmd->base.autoneg == AUTONEG_ENABLE || + (!ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) || (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) return -EINVAL; } return err; } +#else +static int ixgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (ecmd->advertising & ~ecmd->supported) + return -EINVAL; + + /* only allow one speed at a time if no autoneg */ + if (!ecmd->autoneg && hw->phy.multispeed_fiber) { + if (ecmd->advertising == + (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } + + old = hw->phy.autoneg_advertised; + advertised = 0; + if (ecmd->advertising & ADVERTISED_10000baseT_Full) + advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= IXGBE_LINK_SPEED_10_FULL; + + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + hw->mac.autotry_restart = true; + err = hw->mac.ops.setup_link(hw, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true); + } + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + } + else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = ethtool_cmd_speed(ecmd); + + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} +#endif /* !ETHTOOL_GLINKSETTINGS */ static void ixgbe_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) @@ -471,18 +908,19 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return -EINVAL; - /* some devices do not support autoneg of link flow control */ + + /* some devices do not support autoneg of flow control */ if ((pause->autoneg == AUTONEG_ENABLE) && !ixgbe_device_supports_autoneg_fc(hw)) - return -EINVAL; + return -EINVAL; fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) fc.requested_mode = ixgbe_fc_full; - else if (pause->rx_pause && !pause->tx_pause) + else if (pause->rx_pause) fc.requested_mode = ixgbe_fc_rx_pause; - else if (!pause->rx_pause && pause->tx_pause) + else if (pause->tx_pause) fc.requested_mode = ixgbe_fc_tx_pause; else fc.requested_mode = ixgbe_fc_none; @@ -511,16 +949,16 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) adapter->msg_enable = data; } -static int ixgbe_get_regs_len(struct net_device *netdev) +static int ixgbe_get_regs_len(struct net_device __always_unused *netdev) { #define IXGBE_REGS_LEN 1145 return IXGBE_REGS_LEN * sizeof(u32); } -#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ +#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) -static void ixgbe_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) +static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -533,186 +971,186 @@ static void ixgbe_get_regs(struct net_device *netdev, hw->device_id; /* General Registers */ - regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); - regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); - regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); - regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); - regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); - regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + regs_buff[0] = IXGBE_R32_Q(hw, IXGBE_CTRL); + regs_buff[1] = IXGBE_R32_Q(hw, IXGBE_STATUS); + regs_buff[2] = IXGBE_R32_Q(hw, IXGBE_CTRL_EXT); + regs_buff[3] = IXGBE_R32_Q(hw, IXGBE_ESDP); + regs_buff[4] = IXGBE_R32_Q(hw, IXGBE_EODSDP); + regs_buff[5] = IXGBE_R32_Q(hw, IXGBE_LEDCTL); + regs_buff[6] = IXGBE_R32_Q(hw, IXGBE_FRTIMER); + regs_buff[7] = IXGBE_R32_Q(hw, IXGBE_TCPTIMER); /* NVM Register */ - regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); - regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw)); - regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); - regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); - regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); - regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); - regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); - regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); - regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw)); + regs_buff[8] = IXGBE_R32_Q(hw, IXGBE_EEC); + regs_buff[9] = IXGBE_R32_Q(hw, IXGBE_EERD); + regs_buff[10] = IXGBE_R32_Q(hw, IXGBE_FLA); + regs_buff[11] = IXGBE_R32_Q(hw, IXGBE_EEMNGCTL); + regs_buff[12] = IXGBE_R32_Q(hw, IXGBE_EEMNGDATA); + regs_buff[13] = IXGBE_R32_Q(hw, IXGBE_FLMNGCTL); + regs_buff[14] = IXGBE_R32_Q(hw, IXGBE_FLMNGDATA); + regs_buff[15] = IXGBE_R32_Q(hw, IXGBE_FLMNGCNT); + regs_buff[16] = IXGBE_R32_Q(hw, IXGBE_FLOP); + regs_buff[17] = IXGBE_R32_Q(hw, IXGBE_GRC); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead * read EICS which is a shadow but doesn't clear EICR */ - regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); - regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); - regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); - regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); - regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); - regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); - regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); - regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); - regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); - regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); - regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + regs_buff[18] = IXGBE_R32_Q(hw, IXGBE_EICS); + regs_buff[19] = IXGBE_R32_Q(hw, IXGBE_EICS); + regs_buff[20] = IXGBE_R32_Q(hw, IXGBE_EIMS); + regs_buff[21] = IXGBE_R32_Q(hw, IXGBE_EIMC); + regs_buff[22] = IXGBE_R32_Q(hw, IXGBE_EIAC); + regs_buff[23] = IXGBE_R32_Q(hw, IXGBE_EIAM); + regs_buff[24] = IXGBE_R32_Q(hw, IXGBE_EITR(0)); + regs_buff[25] = IXGBE_R32_Q(hw, IXGBE_IVAR(0)); + regs_buff[26] = IXGBE_R32_Q(hw, IXGBE_MSIXT); + regs_buff[27] = IXGBE_R32_Q(hw, IXGBE_MSIXPBA); + regs_buff[28] = IXGBE_R32_Q(hw, IXGBE_PBACL(0)); + regs_buff[29] = IXGBE_R32_Q(hw, IXGBE_GPIE); /* Flow Control */ - regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); - for (i = 0; i < 4; i++) - regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); + regs_buff[30] = IXGBE_R32_Q(hw, IXGBE_PFCTOP); + regs_buff[31] = IXGBE_R32_Q(hw, IXGBE_FCTTV(0)); + regs_buff[32] = IXGBE_R32_Q(hw, IXGBE_FCTTV(1)); + regs_buff[33] = IXGBE_R32_Q(hw, IXGBE_FCTTV(2)); + regs_buff[34] = IXGBE_R32_Q(hw, IXGBE_FCTTV(3)); for (i = 0; i < 8; i++) { switch (hw->mac.type) { case ixgbe_mac_82598EB: - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + regs_buff[35 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTL(i)); + regs_buff[43 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTH(i)); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + regs_buff[35 + i] = IXGBE_R32_Q(hw, + IXGBE_FCRTL_82599(i)); + regs_buff[43 + i] = IXGBE_R32_Q(hw, + IXGBE_FCRTH_82599(i)); break; default: break; } } - regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); - regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + regs_buff[51] = IXGBE_R32_Q(hw, IXGBE_FCRTV); + regs_buff[52] = IXGBE_R32_Q(hw, IXGBE_TFCS); /* Receive DMA */ for (i = 0; i < 64; i++) - regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + regs_buff[53 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAL(i)); for (i = 0; i < 64; i++) - regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + regs_buff[117 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAH(i)); for (i = 0; i < 64; i++) - regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + regs_buff[181 + i] = IXGBE_R32_Q(hw, IXGBE_RDLEN(i)); for (i = 0; i < 64; i++) - regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + regs_buff[245 + i] = IXGBE_R32_Q(hw, IXGBE_RDH(i)); for (i = 0; i < 64; i++) - regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + regs_buff[309 + i] = IXGBE_R32_Q(hw, IXGBE_RDT(i)); for (i = 0; i < 64; i++) - regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + regs_buff[373 + i] = IXGBE_R32_Q(hw, IXGBE_RXDCTL(i)); for (i = 0; i < 16; i++) - regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + regs_buff[437 + i] = IXGBE_R32_Q(hw, IXGBE_SRRCTL(i)); for (i = 0; i < 16; i++) - regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + regs_buff[453 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_RXCTRL(i)); + regs_buff[469] = IXGBE_R32_Q(hw, IXGBE_RDRXCTL); for (i = 0; i < 8; i++) - regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + regs_buff[470 + i] = IXGBE_R32_Q(hw, IXGBE_RXPBSIZE(i)); + regs_buff[478] = IXGBE_R32_Q(hw, IXGBE_RXCTRL); + regs_buff[479] = IXGBE_R32_Q(hw, IXGBE_DROPEN); /* Receive */ - regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); - regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); + regs_buff[480] = IXGBE_R32_Q(hw, IXGBE_RXCSUM); + regs_buff[481] = IXGBE_R32_Q(hw, IXGBE_RFCTL); for (i = 0; i < 16; i++) - regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + regs_buff[482 + i] = IXGBE_R32_Q(hw, IXGBE_RAL(i)); for (i = 0; i < 16; i++) - regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); - regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); - regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); - regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); - regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); - regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + regs_buff[498 + i] = IXGBE_R32_Q(hw, IXGBE_RAH(i)); + regs_buff[514] = IXGBE_R32_Q(hw, IXGBE_PSRTYPE(0)); + regs_buff[515] = IXGBE_R32_Q(hw, IXGBE_FCTRL); + regs_buff[516] = IXGBE_R32_Q(hw, IXGBE_VLNCTRL); + regs_buff[517] = IXGBE_R32_Q(hw, IXGBE_MCSTCTRL); + regs_buff[518] = IXGBE_R32_Q(hw, IXGBE_MRQC); + regs_buff[519] = IXGBE_R32_Q(hw, IXGBE_VMD_CTL); for (i = 0; i < 8; i++) - regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); + regs_buff[520 + i] = IXGBE_R32_Q(hw, IXGBE_IMIR(i)); for (i = 0; i < 8; i++) - regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); - regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + regs_buff[528 + i] = IXGBE_R32_Q(hw, IXGBE_IMIREXT(i)); + regs_buff[536] = IXGBE_R32_Q(hw, IXGBE_IMIRVP); /* Transmit */ for (i = 0; i < 32; i++) - regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + regs_buff[537 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAL(i)); for (i = 0; i < 32; i++) - regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + regs_buff[569 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAH(i)); for (i = 0; i < 32; i++) - regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + regs_buff[601 + i] = IXGBE_R32_Q(hw, IXGBE_TDLEN(i)); for (i = 0; i < 32; i++) - regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + regs_buff[633 + i] = IXGBE_R32_Q(hw, IXGBE_TDH(i)); for (i = 0; i < 32; i++) - regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + regs_buff[665 + i] = IXGBE_R32_Q(hw, IXGBE_TDT(i)); for (i = 0; i < 32; i++) - regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + regs_buff[697 + i] = IXGBE_R32_Q(hw, IXGBE_TXDCTL(i)); for (i = 0; i < 32; i++) - regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); + regs_buff[729 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAL(i)); for (i = 0; i < 32; i++) - regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); - regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + regs_buff[761 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAH(i)); + regs_buff[793] = IXGBE_R32_Q(hw, IXGBE_DTXCTL); for (i = 0; i < 16; i++) - regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); - regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); + regs_buff[794 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_TXCTRL(i)); + regs_buff[810] = IXGBE_R32_Q(hw, IXGBE_TIPG); for (i = 0; i < 8; i++) - regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); - regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + regs_buff[811 + i] = IXGBE_R32_Q(hw, IXGBE_TXPBSIZE(i)); + regs_buff[819] = IXGBE_R32_Q(hw, IXGBE_MNGTXMAP); /* Wake Up */ - regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); - regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); - regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); - regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); - regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); - regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); - regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); - regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); - regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); + regs_buff[820] = IXGBE_R32_Q(hw, IXGBE_WUC); + regs_buff[821] = IXGBE_R32_Q(hw, IXGBE_WUFC); + regs_buff[822] = IXGBE_R32_Q(hw, IXGBE_WUS); + regs_buff[823] = IXGBE_R32_Q(hw, IXGBE_IPAV); + regs_buff[824] = IXGBE_R32_Q(hw, IXGBE_IP4AT); + regs_buff[825] = IXGBE_R32_Q(hw, IXGBE_IP6AT); + regs_buff[826] = IXGBE_R32_Q(hw, IXGBE_WUPL); + regs_buff[827] = IXGBE_R32_Q(hw, IXGBE_WUPM); + regs_buff[828] = IXGBE_R32_Q(hw, IXGBE_FHFT(0)); /* DCB */ - regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ - regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ + regs_buff[829] = IXGBE_R32_Q(hw, IXGBE_RMCS); /* same as FCCFG */ + regs_buff[831] = IXGBE_R32_Q(hw, IXGBE_PDPMCS); /* same as RTTPCS */ switch (hw->mac.type) { case ixgbe_mac_82598EB: - regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); - regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); + regs_buff[830] = IXGBE_R32_Q(hw, IXGBE_DPMCS); + regs_buff[832] = IXGBE_R32_Q(hw, IXGBE_RUPPBMR); for (i = 0; i < 8; i++) regs_buff[833 + i] = - IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); + IXGBE_R32_Q(hw, IXGBE_RT2CR(i)); for (i = 0; i < 8; i++) regs_buff[841 + i] = - IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); + IXGBE_R32_Q(hw, IXGBE_RT2SR(i)); for (i = 0; i < 8; i++) regs_buff[849 + i] = - IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); + IXGBE_R32_Q(hw, IXGBE_TDTQ2TCCR(i)); for (i = 0; i < 8; i++) regs_buff[857 + i] = - IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + IXGBE_R32_Q(hw, IXGBE_TDTQ2TCSR(i)); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); + regs_buff[830] = IXGBE_R32_Q(hw, IXGBE_RTTDCS); + regs_buff[832] = IXGBE_R32_Q(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) regs_buff[833 + i] = - IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); + IXGBE_R32_Q(hw, IXGBE_RTRPT4C(i)); for (i = 0; i < 8; i++) regs_buff[841 + i] = - IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); + IXGBE_R32_Q(hw, IXGBE_RTRPT4S(i)); for (i = 0; i < 8; i++) regs_buff[849 + i] = - IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); + IXGBE_R32_Q(hw, IXGBE_RTTDT2C(i)); for (i = 0; i < 8; i++) regs_buff[857 + i] = - IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); + IXGBE_R32_Q(hw, IXGBE_RTTDT2S(i)); break; default: break; @@ -720,10 +1158,10 @@ static void ixgbe_get_regs(struct net_device *netdev, for (i = 0; i < 8; i++) regs_buff[865 + i] = - IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ + IXGBE_R32_Q(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ for (i = 0; i < 8; i++) regs_buff[873 + i] = - IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ + IXGBE_R32_Q(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ /* Statistics */ regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); @@ -757,10 +1195,8 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); - regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); - regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); - regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); - regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); + regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); + regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); for (i = 0; i < 8; i++) regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); @@ -770,8 +1206,7 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); - regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); - regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); + regs_buff[961] = IXGBE_GET_STAT(adapter, tor); regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); @@ -793,103 +1228,258 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); /* MAC */ - regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); - regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); - regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); - regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); - regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); - regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); - regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); - regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); - regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); - regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); - regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); - regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); - regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); - regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); - regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); - regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); - regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); - regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); - regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); - regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); - regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); - regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); - regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); - regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); - regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); - regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); - regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); - regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); - regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + regs_buff[1038] = IXGBE_R32_Q(hw, IXGBE_PCS1GCFIG); + regs_buff[1039] = IXGBE_R32_Q(hw, IXGBE_PCS1GLCTL); + regs_buff[1040] = IXGBE_R32_Q(hw, IXGBE_PCS1GLSTA); + regs_buff[1041] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG0); + regs_buff[1042] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG1); + regs_buff[1043] = IXGBE_R32_Q(hw, IXGBE_PCS1GANA); + regs_buff[1044] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLP); + regs_buff[1045] = IXGBE_R32_Q(hw, IXGBE_PCS1GANNP); + regs_buff[1046] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLPNP); + regs_buff[1047] = IXGBE_R32_Q(hw, IXGBE_HLREG0); + regs_buff[1048] = IXGBE_R32_Q(hw, IXGBE_HLREG1); + regs_buff[1049] = IXGBE_R32_Q(hw, IXGBE_PAP); + regs_buff[1050] = IXGBE_R32_Q(hw, IXGBE_MACA); + regs_buff[1051] = IXGBE_R32_Q(hw, IXGBE_APAE); + regs_buff[1052] = IXGBE_R32_Q(hw, IXGBE_ARD); + regs_buff[1053] = IXGBE_R32_Q(hw, IXGBE_AIS); + regs_buff[1054] = IXGBE_R32_Q(hw, IXGBE_MSCA); + regs_buff[1055] = IXGBE_R32_Q(hw, IXGBE_MSRWD); + regs_buff[1056] = IXGBE_R32_Q(hw, IXGBE_MLADD); + regs_buff[1057] = IXGBE_R32_Q(hw, IXGBE_MHADD); + regs_buff[1058] = IXGBE_R32_Q(hw, IXGBE_TREG); + regs_buff[1059] = IXGBE_R32_Q(hw, IXGBE_PCSS1); + regs_buff[1060] = IXGBE_R32_Q(hw, IXGBE_PCSS2); + regs_buff[1061] = IXGBE_R32_Q(hw, IXGBE_XPCSS); + regs_buff[1062] = IXGBE_R32_Q(hw, IXGBE_SERDESC); + regs_buff[1063] = IXGBE_R32_Q(hw, IXGBE_MACS); + regs_buff[1064] = IXGBE_R32_Q(hw, IXGBE_AUTOC); + regs_buff[1065] = IXGBE_R32_Q(hw, IXGBE_LINKS); + regs_buff[1066] = IXGBE_R32_Q(hw, IXGBE_AUTOC2); + regs_buff[1067] = IXGBE_R32_Q(hw, IXGBE_AUTOC3); + regs_buff[1068] = IXGBE_R32_Q(hw, IXGBE_ANLP1); + regs_buff[1069] = IXGBE_R32_Q(hw, IXGBE_ANLP2); + regs_buff[1070] = IXGBE_R32_Q(hw, IXGBE_ATLASCTL); /* Diagnostic */ - regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); + regs_buff[1071] = IXGBE_R32_Q(hw, IXGBE_RDSTATCTL); for (i = 0; i < 8; i++) - regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); - regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); + regs_buff[1072 + i] = IXGBE_R32_Q(hw, IXGBE_RDSTAT(i)); + regs_buff[1080] = IXGBE_R32_Q(hw, IXGBE_RDHMPN); for (i = 0; i < 4; i++) - regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); - regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); - regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); + regs_buff[1081 + i] = IXGBE_R32_Q(hw, IXGBE_RIC_DW(i)); + regs_buff[1085] = IXGBE_R32_Q(hw, IXGBE_RDPROBE); + regs_buff[1095] = IXGBE_R32_Q(hw, IXGBE_TDHMPN); + for (i = 0; i < 4; i++) + regs_buff[1096 + i] = IXGBE_R32_Q(hw, IXGBE_TIC_DW(i)); + regs_buff[1100] = IXGBE_R32_Q(hw, IXGBE_TDPROBE); + regs_buff[1101] = IXGBE_R32_Q(hw, IXGBE_TXBUFCTRL); + regs_buff[1102] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA0); + regs_buff[1103] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA1); + regs_buff[1104] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA2); + regs_buff[1105] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA3); + regs_buff[1106] = IXGBE_R32_Q(hw, IXGBE_RXBUFCTRL); + regs_buff[1107] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA0); + regs_buff[1108] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA1); + regs_buff[1109] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA2); + regs_buff[1110] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA3); for (i = 0; i < 8; i++) - regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); - regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); - for (i = 0; i < 4; i++) - regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); - regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); - regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); - for (i = 0; i < 4; i++) - regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); - regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); - for (i = 0; i < 4; i++) - regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); - for (i = 0; i < 8; i++) - regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); - regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); - regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); - regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); - regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); - regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); - regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); - regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); - regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); - regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); + regs_buff[1111 + i] = IXGBE_R32_Q(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1119] = IXGBE_R32_Q(hw, IXGBE_RFVAL); + regs_buff[1120] = IXGBE_R32_Q(hw, IXGBE_MDFTC1); + regs_buff[1121] = IXGBE_R32_Q(hw, IXGBE_MDFTC2); + regs_buff[1122] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO1); + regs_buff[1123] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO2); + regs_buff[1124] = IXGBE_R32_Q(hw, IXGBE_MDFTS); + regs_buff[1125] = IXGBE_R32_Q(hw, IXGBE_PCIEECCCTL); + regs_buff[1126] = IXGBE_R32_Q(hw, IXGBE_PBTXECC); + regs_buff[1127] = IXGBE_R32_Q(hw, IXGBE_PBRXECC); - /* 82599 X540 specific registers */ - regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); + /* 82599 X540 specific registers */ + regs_buff[1128] = IXGBE_R32_Q(hw, IXGBE_MFLCN); - /* 82599 X540 specific DCB registers */ - regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); + /* 82599 X540 specific DCB registers */ + regs_buff[1129] = IXGBE_R32_Q(hw, IXGBE_RTRUP2TC); + regs_buff[1130] = IXGBE_R32_Q(hw, IXGBE_RTTUP2TC); for (i = 0; i < 4; i++) - regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); - regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); - /* same as RTTQCNRM */ - regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); - /* same as RTTQCNRR */ + regs_buff[1131 + i] = IXGBE_R32_Q(hw, IXGBE_TXLLQ(i)); + regs_buff[1135] = IXGBE_R32_Q(hw, IXGBE_RTTBCNRM); + /* same as RTTQCNRM */ + regs_buff[1136] = IXGBE_R32_Q(hw, IXGBE_RTTBCNRD); + /* same as RTTQCNRR */ - /* X540 specific DCB registers */ - regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); - regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); + /* X540 specific DCB registers */ + regs_buff[1137] = IXGBE_R32_Q(hw, IXGBE_RTTQCNCR); + regs_buff[1138] = IXGBE_R32_Q(hw, IXGBE_RTTQCNTG); /* Security config registers */ - regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); - regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); - regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); - regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + regs_buff[1139] = IXGBE_R32_Q(hw, IXGBE_SECTXCTRL); + regs_buff[1140] = IXGBE_R32_Q(hw, IXGBE_SECTXSTAT); + regs_buff[1141] = IXGBE_R32_Q(hw, IXGBE_SECTXBUFFAF); + regs_buff[1142] = IXGBE_R32_Q(hw, IXGBE_SECTXMINIFG); + regs_buff[1143] = IXGBE_R32_Q(hw, IXGBE_SECRXCTRL); + regs_buff[1144] = IXGBE_R32_Q(hw, IXGBE_SECRXSTAT); + } static int ixgbe_get_eeprom_len(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - return adapter->hw.eeprom.word_size * 2; + return pci_resource_len(adapter->pdev, 0); +} + +static u8 ixgbe_nvmupd_get_module(u32 val) +{ + return (u8)(val & IXGBE_NVMUPD_MOD_PNT_MASK); +} + +static int ixgbe_nvmupd_validate_offset(struct ixgbe_adapter *adapter, + u32 offset) +{ + struct net_device *netdev = adapter->netdev; + + switch (offset) { + case IXGBE_STATUS: + case IXGBE_ESDP: + case IXGBE_MSCA: + case IXGBE_MSRWD: + case IXGBE_EEC: + case IXGBE_FLA: + case IXGBE_FLOP: + case IXGBE_SWSM: + case IXGBE_FWSM: + case IXGBE_FACTPS: + case IXGBE_GSSR: + case IXGBE_HICR: + case IXGBE_FWSTS: + return 0; + default: + if ((offset >= IXGBE_MAVTV(0) && offset <= IXGBE_MAVTV(7)) || + (offset >= IXGBE_RAL(0) && offset <= IXGBE_RAH(15))) + return 0; + } + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + switch (offset) { + case IXGBE_AUTOC: + case IXGBE_EERD: + case IXGBE_BARCTRL: + return 0; + default: + if (offset >= 0x00020000 && + offset <= ixgbe_get_eeprom_len(netdev)) + return 0; + } + break; + case ixgbe_mac_X540: + switch (offset) { + case IXGBE_EERD: + case IXGBE_EEWR: + case IXGBE_SRAMREL: + case IXGBE_BARCTRL: + return 0; + default: + if ((offset >= 0x00020000 && + offset <= ixgbe_get_eeprom_len(netdev))) + return 0; + } + break; + case ixgbe_mac_X550: + switch (offset) { + case IXGBE_EEWR: + case IXGBE_SRAMREL: + case IXGBE_PHYCTL_82599: + case IXGBE_FWRESETCNT: + return 0; + default: + if (offset >= IXGBE_FLEX_MNG_PTR(0) && + offset <= IXGBE_FLEX_MNG_PTR(447)) + return 0; + } + break; + case ixgbe_mac_X550EM_x: + switch (offset) { + case IXGBE_PHYCTL_82599: + case IXGBE_NW_MNG_IF_SEL: + case IXGBE_FWRESETCNT: + case IXGBE_I2CCTL_X550: + return 0; + default: + if ((offset >= IXGBE_FLEX_MNG_PTR(0) && + offset <= IXGBE_FLEX_MNG_PTR(447)) || + (offset >= IXGBE_FUSES0_GROUP(0) && + offset <= IXGBE_FUSES0_GROUP(7))) + return 0; + } + break; + case ixgbe_mac_X550EM_a: + switch (offset) { + case IXGBE_PHYCTL_82599: + case IXGBE_NW_MNG_IF_SEL: + case IXGBE_FWRESETCNT: + case IXGBE_I2CCTL_X550: + case IXGBE_FLA_X550EM_a: + case IXGBE_SWSM_X550EM_a: + case IXGBE_FWSM_X550EM_a: + case IXGBE_SWFW_SYNC_X550EM_a: + case IXGBE_FACTPS_X550EM_a: + case IXGBE_EEC_X550EM_a: + return 0; + default: + if (offset >= IXGBE_FLEX_MNG_PTR(0) && + offset <= IXGBE_FLEX_MNG_PTR(447)) + return 0; + } + default: + break; + } + + return -ENOTTY; +} + +static int ixgbe_nvmupd_command(struct ixgbe_hw *hw, + struct ixgbe_nvm_access *nvm, + u8 *bytes) +{ + u32 command; + int ret_val = 0; + u8 module; + + command = nvm->command; + module = ixgbe_nvmupd_get_module(nvm->config); + + switch (command) { + case IXGBE_NVMUPD_CMD_REG_READ: + switch (module) { + case IXGBE_NVMUPD_EXEC_FEATURES: + if (nvm->data_size == hw->nvmupd_features.size) + memcpy(bytes, &hw->nvmupd_features, + hw->nvmupd_features.size); + else + ret_val = -ENOMEM; + break; + default: + if (ixgbe_nvmupd_validate_offset(hw->back, nvm->offset)) + return -ENOTTY; + + if (nvm->data_size == 1) + *((u8 *)bytes) = IXGBE_R8_Q(hw, nvm->offset); + else + *((u32 *)bytes) = IXGBE_R32_Q(hw, nvm->offset); + break; + } + break; + case IXGBE_NVMUPD_CMD_REG_WRITE: + if (ixgbe_nvmupd_validate_offset(hw->back, nvm->offset)) + return -ENOTTY; + + IXGBE_WRITE_REG(hw, nvm->offset, *((u32 *)bytes)); + break; + } + + return ret_val; } static int ixgbe_get_eeprom(struct net_device *netdev, @@ -899,24 +1489,34 @@ static int ixgbe_get_eeprom(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; u16 *eeprom_buff; int first_word, last_word, eeprom_len; + struct ixgbe_nvm_access *nvm; + u32 magic; int ret_val = 0; u16 i; if (eeprom->len == 0) return -EINVAL; + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + nvm = (struct ixgbe_nvm_access *)eeprom; + ret_val = ixgbe_nvmupd_command(hw, nvm, bytes); + return ret_val; + } + + /* normal ethtool get_eeprom support */ eeprom->magic = hw->vendor_id | (hw->device_id << 16); first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_len = last_word - first_word + 1; - eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL); + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, - eeprom_buff); + eeprom_buff); /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < eeprom_len; i++) @@ -933,14 +1533,24 @@ static int ixgbe_set_eeprom(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - u16 *eeprom_buff; - void *ptr; int max_len, first_word, last_word, ret_val = 0; - u16 i; + struct ixgbe_nvm_access *nvm; + u32 magic; + u16 *eeprom_buff, i; + void *ptr; if (eeprom->len == 0) return -EINVAL; + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + nvm = (struct ixgbe_nvm_access *)eeprom; + ret_val = ixgbe_nvmupd_command(hw, nvm, bytes); + return ret_val; + } + + /* normal ethtool set_eeprom support */ + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EINVAL; @@ -965,7 +1575,7 @@ static int ixgbe_set_eeprom(struct net_device *netdev, ptr++; } - if ((eeprom->offset + eeprom->len) & 1) { + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { /* * need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified @@ -986,8 +1596,8 @@ static int ixgbe_set_eeprom(struct net_device *netdev, cpu_to_le16s(&eeprom_buff[i]); ret_val = hw->eeprom.ops.write_buffer(hw, first_word, - last_word - first_word + 1, - eeprom_buff); + last_word - first_word + 1, + eeprom_buff); /* Update the checksum */ if (ret_val == 0) @@ -1003,30 +1613,34 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, ixgbe_driver_name, + sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ixgbe_driver_version, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, adapter->eeprom_id, sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; +#endif } static void ixgbe_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; - struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; ring->rx_max_pending = IXGBE_MAX_RXD; ring->tx_max_pending = IXGBE_MAX_TXD; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; } static int ixgbe_set_ringparam(struct net_device *netdev, @@ -1073,7 +1687,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, /* allocate temporary buffer to store rings in */ i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, adapter->num_rx_queues); - temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring))); + temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); if (!temp_ring) { err = -ENOMEM; @@ -1140,10 +1754,12 @@ static int ixgbe_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) { memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct ixgbe_ring)); +#ifdef HAVE_XDP_BUFF_RXQ /* Clear copied XDP RX-queue info */ memset(&temp_ring[i].xdp_rxq, 0, sizeof(temp_ring[i].xdp_rxq)); +#endif temp_ring[i].count = new_rx_count; err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); @@ -1154,9 +1770,9 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } goto err_setup; } - } + for (i = 0; i < adapter->num_rx_queues; i++) { ixgbe_free_rx_resources(adapter->rx_ring[i]); @@ -1175,8 +1791,21 @@ clear_reset: return err; } +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ixgbe_get_stats_count(struct net_device *netdev) +{ + return IXGBE_STATS_LEN; +} + +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) { +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#endif +#endif + switch (sset) { case ETH_SS_TEST: return IXGBE_TEST_LEN; @@ -1189,70 +1818,97 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) } } +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ static void ixgbe_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats __always_unused *stats, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 temp; - const struct rtnl_link_stats64 *net_stats; +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif + u64 *queue_stat; + int stat_count, k; +#ifdef HAVE_NDO_GET_STATS64 unsigned int start; +#endif struct ixgbe_ring *ring; int i, j; - char *p = NULL; + char *p; ixgbe_update_stats(adapter); - net_stats = dev_get_stats(netdev, &temp); - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - switch (ixgbe_gstrings_stats[i].type) { - case NETDEV_STATS: - p = (char *) net_stats + - ixgbe_gstrings_stats[i].stat_offset; - break; - case IXGBE_STATS: - p = (char *) adapter + - ixgbe_gstrings_stats[i].stat_offset; - break; - default: - data[i] = 0; - continue; - } - data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset; + data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset; + data[i] = (ixgbe_gstrings_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < netdev->num_tx_queues; j++) { + for (j = 0; j < IXGBE_NUM_TX_QUEUES; j++) { ring = adapter->tx_ring[j]; if (!ring) { - data[i] = 0; - data[i+1] = 0; - i += 2; + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif continue; } +#ifdef HAVE_NDO_GET_STATS64 do { start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; if (!ring) { - data[i] = 0; - data[i+1] = 0; - i += 2; + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif continue; } +#ifdef HAVE_NDO_GET_STATS64 do { start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif } - for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { data[i++] = adapter->stats.pxontxc[j]; data[i++] = adapter->stats.pxofftxc[j]; @@ -1261,56 +1917,111 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i++] = adapter->stats.pxonrxc[j]; data[i++] = adapter->stats.pxoffrxc[j]; } + stat_count = sizeof(struct vf_stats) / sizeof(u64); + for (j = 0; j < adapter->num_vfs; j++) { + queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] += queue_stat[k]; + i += k; + } } static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; unsigned int i; switch (stringset) { case ETH_SS_TEST: - for (i = 0; i < IXGBE_TEST_LEN; i++) { - memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + memcpy(data, *ixgbe_gstrings_test, + IXGBE_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: + for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { memcpy(p, ixgbe_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < netdev->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); + for (i = 0; i < IXGBE_NUM_TX_QUEUES; i++) { + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { - sprintf(p, "rx_queue_%u_packets", i); + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_pb_%u_pxoff", i); p += ETH_GSTRING_LEN; } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "tx_pb_%u_pxon", i); + snprintf(p, ETH_GSTRING_LEN, "rx_pb_%u_pxon", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_pb_%u_pxoff", i); + snprintf(p, ETH_GSTRING_LEN, "rx_pb_%u_pxoff", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "rx_pb_%u_pxon", i); + for (i = 0; i < adapter->num_vfs; i++) { + snprintf(p, ETH_GSTRING_LEN, "VF %u Rx Packets", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_pb_%u_pxoff", i); + snprintf(p, ETH_GSTRING_LEN, "VF %u Rx Bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "VF %u Tx Packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "VF %u Tx Bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "VF %u MC Packets", i); p += ETH_GSTRING_LEN; } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT case ETH_SS_PRIV_FLAGS: memcpy(data, ixgbe_priv_flags_strings, IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ } } @@ -1320,13 +2031,12 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) bool link_up; u32 link_speed = 0; - if (ixgbe_removed(hw->hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { *data = 1; return 1; } *data = 0; - - hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); if (link_up) return *data; else @@ -1361,7 +2071,7 @@ struct ixgbe_reg_test { #define TABLE64_TEST_HI 6 /* default 82599 register test */ -static const struct ixgbe_reg_test reg_test_82599[] = { +static struct ixgbe_reg_test reg_test_82599[] = { { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1385,7 +2095,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = { }; /* default 82598 register test */ -static const struct ixgbe_reg_test reg_test_82598[] = { +static struct ixgbe_reg_test reg_test_82598[] = { { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1412,29 +2122,32 @@ static const struct ixgbe_reg_test reg_test_82598[] = { { .reg = 0 } }; + static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 pat, val, before; static const u32 test_pattern[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { *data = 1; return true; } for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { - before = ixgbe_read_reg(&adapter->hw, reg); - ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); - val = ixgbe_read_reg(&adapter->hw, reg); + before = IXGBE_READ_REG(&adapter->hw, reg); + IXGBE_WRITE_REG(&adapter->hw, reg, test_pattern[pat] & write); + val = IXGBE_READ_REG(&adapter->hw, reg); if (val != (test_pattern[pat] & write & mask)) { - e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", - reg, val, (test_pattern[pat] & write & mask)); + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); *data = reg; - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); return true; } - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); } return false; } @@ -1444,36 +2157,38 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, { u32 val, before; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { *data = 1; return true; } - before = ixgbe_read_reg(&adapter->hw, reg); - ixgbe_write_reg(&adapter->hw, reg, write & mask); - val = ixgbe_read_reg(&adapter->hw, reg); + before = IXGBE_READ_REG(&adapter->hw, reg); + IXGBE_WRITE_REG(&adapter->hw, reg, write & mask); + val = IXGBE_READ_REG(&adapter->hw, reg); if ((write & mask) != (val & mask)) { - e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); return true; } - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); return false; } -static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) +static bool ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) { - const struct ixgbe_reg_test *test; + struct ixgbe_reg_test *test; + struct ixgbe_hw *hw = &adapter->hw; u32 value, before, after; u32 i, toggle; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { e_err(drv, "Adapter removed - register test blocked\n"); *data = 1; - return 1; + return true; } - switch (adapter->hw.mac.type) { + switch (hw->mac.type) { case ixgbe_mac_82598EB: toggle = 0x7FFFF3FF; test = reg_test_82598; @@ -1482,13 +2197,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: toggle = 0x7FFFF30F; test = reg_test_82599; break; default: *data = 1; - return 1; + return true; } /* @@ -1497,18 +2212,19 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) * tests. Some bits are read-only, some toggle, and some * are writeable on newer MACs. */ - before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); - value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); - ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); - after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; + before = IXGBE_READ_REG(hw, IXGBE_STATUS); + value = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; + IXGBE_WRITE_REG(hw, IXGBE_STATUS, toggle); + after = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; if (value != after) { - e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + e_err(drv, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", after, value); *data = 1; - return 1; + return true; } /* restore previous status */ - ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); + IXGBE_WRITE_REG(hw, IXGBE_STATUS, before); /* * Perform the remainder of the register test, looping through @@ -1521,61 +2237,63 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) switch (test->test_type) { case PATTERN_TEST: b = reg_pattern_test(adapter, data, - test->reg + (i * 0x40), - test->mask, - test->write); - break; - case SET_READ_TEST: - b = reg_set_and_check(adapter, data, test->reg + (i * 0x40), test->mask, test->write); break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; case WRITE_NO_TEST: - ixgbe_write_reg(&adapter->hw, - test->reg + (i * 0x40), + IXGBE_WRITE_REG(hw, test->reg + (i * 0x40), test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, - test->reg + (i * 4), - test->mask, - test->write); + test->reg + (i * 4), + test->mask, + test->write); break; case TABLE64_TEST_LO: b = reg_pattern_test(adapter, data, - test->reg + (i * 8), - test->mask, - test->write); + test->reg + (i * 8), + test->mask, + test->write); break; case TABLE64_TEST_HI: b = reg_pattern_test(adapter, data, - (test->reg + 4) + (i * 8), - test->mask, - test->write); + (test->reg + 4) + (i * 8), + test->mask, + test->write); break; } if (b) - return 1; + return true; } test++; } *data = 0; - return 0; + return false; } -static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) +static bool ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) { struct ixgbe_hw *hw = &adapter->hw; - if (hw->eeprom.ops.validate_checksum(hw, NULL)) + + if (hw->eeprom.ops.validate_checksum(hw, NULL)) { *data = 1; - else + return true; + } else { *data = 0; - return *data; + return false; + } } -static irqreturn_t ixgbe_test_intr(int irq, void *data) +static irqreturn_t ixgbe_test_intr(int __always_unused irq, void *data) { struct net_device *netdev = (struct net_device *) data; struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -1591,6 +2309,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) u32 mask, i = 0, shared_int = true; u32 irq = adapter->pdev->irq; + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } *data = 0; /* Hook up test interrupt handler just for this test */ @@ -1599,21 +2321,21 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) return 0; } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { shared_int = false; - if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, + if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, netdev)) { *data = 1; return -1; } - } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, + } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, netdev->name, netdev)) { shared_int = false; - } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, + } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, netdev->name, netdev)) { *data = 1; return -1; } - e_info(hw, "testing %s interrupt\n", shared_int ? - "shared" : "unshared"); + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); @@ -1623,7 +2345,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 10; i++) { /* Interrupt to test */ - mask = BIT(i); + mask = 1 << i; if (!shared_int) { /* @@ -1707,10 +2429,10 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) */ /* first Rx */ - ixgbe_disable_rx(adapter); + ixgbe_disable_rx_queue(adapter); /* now Tx */ - ixgbe_disable_tx(adapter); + ixgbe_disable_tx_queue(adapter); ixgbe_reset(adapter); @@ -1722,7 +2444,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - struct ixgbe_hw *hw = &adapter->hw; u32 rctl, reg_data; int ret_val; int err; @@ -1730,7 +2451,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) /* Setup Tx descriptor ring and Tx buffers */ tx_ring->count = IXGBE_DEFAULT_TXD; tx_ring->queue_index = 0; - tx_ring->dev = &adapter->pdev->dev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; @@ -1743,7 +2464,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1757,9 +2478,12 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) /* Setup Rx Descriptor ring and Rx buffers */ rx_ring->count = IXGBE_DEFAULT_RXD; rx_ring->queue_index = 0; - rx_ring->dev = &adapter->pdev->dev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; +#endif err = ixgbe_setup_rx_resources(adapter, rx_ring); if (err) { @@ -1767,15 +2491,14 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) goto err_nomem; } - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(&adapter->hw); ixgbe_configure_rx_ring(adapter, rx_ring); - rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); rctl |= IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); - - hw->mac.ops.enable_rx(hw); + ixgbe_enable_rx(&adapter->hw); return 0; @@ -1799,12 +2522,12 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); - /* X540 and X550 needs to set the MACC.FLU bit to force link up */ + /* X540 needs to set the MACC.FLU bit to force link up */ switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); @@ -1871,15 +2594,21 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, frame_size >>= 1; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else data = kmap(rx_buffer->page) + rx_buffer->page_offset; +#endif if (data[3] != 0xFF || data[frame_size + 10] != 0xBE || data[frame_size + 12] != 0xAF) match = false; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT kunmap(rx_buffer->page); +#endif return match; } @@ -1888,6 +2617,11 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, unsigned int size) { union ixgbe_adv_rx_desc *rx_desc; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buf_len; +#else + const int bufsz = ixgbe_rx_bufsz(rx_ring); +#endif u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ @@ -1933,7 +2667,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, /* sync Rx buffer for CPU read */ dma_sync_single_for_cpu(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_bufsz(rx_ring), + bufsz, DMA_FROM_DEVICE); /* verify contents of skb */ @@ -1945,7 +2679,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, /* sync Rx buffer for device write */ dma_sync_single_for_device(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_bufsz(rx_ring), + bufsz, DMA_FROM_DEVICE); /* increment Rx next to clean counter */ @@ -1957,8 +2691,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); } - netdev_tx_reset_queue(txring_txq(tx_ring)); - /* re-map buffers to ring, store next to clean values */ ixgbe_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; @@ -1971,7 +2703,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - int i, j, lc, good_cnt, ret_val = 0; + int i, j, lc, ret_val = 0; unsigned int size = 1024; netdev_tx_t tx_ret_val; struct sk_buff *skb; @@ -2001,6 +2733,8 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) lc = ((rx_ring->count / 64) * 2) + 1; for (j = 0; j <= lc; j++) { + unsigned int good_cnt; + /* reset count of good packets */ good_cnt = 0; @@ -2053,13 +2787,21 @@ out: return *data; } +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ixgbe_diag_test_count(struct net_device __always_unused *netdev) +{ + return IXGBE_TEST_LEN; +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ static void ixgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); bool if_running = netif_running(netdev); + struct ixgbe_hw *hw = &adapter->hw; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { e_err(hw, "Adapter removed - test blocked\n"); data[0] = 1; data[1] = 1; @@ -2071,13 +2813,14 @@ static void ixgbe_diag_test(struct net_device *netdev, } set_bit(__IXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - struct ixgbe_hw *hw = &adapter->hw; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { int i; for (i = 0; i < adapter->num_vfs; i++) { if (adapter->vfinfo[i].clear_to_send) { - netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); + e_warn(drv, "Please take active VFS " + "offline and restart the " + "adapter before running NIC " + "diagnostics\n"); data[0] = 1; data[1] = 1; data[2] = 1; @@ -2095,8 +2838,7 @@ static void ixgbe_diag_test(struct net_device *netdev, e_info(hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result - */ + * interfere with test result */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -2124,7 +2866,7 @@ static void ixgbe_diag_test(struct net_device *netdev, * loopback diagnostic. */ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { - e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); data[3] = 0; goto skip_loopback; } @@ -2189,7 +2931,7 @@ static void ixgbe_get_wol(struct net_device *netdev, wol->wolopts = 0; if (ixgbe_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) + !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) return; if (adapter->wol & IXGBE_WUFC_EX) @@ -2205,6 +2947,7 @@ static void ixgbe_get_wol(struct net_device *netdev, static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) @@ -2224,7 +2967,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & WAKE_MAGIC) adapter->wol |= IXGBE_WUFC_MAG; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + hw->wol_enabled = !!(adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); return 0; } @@ -2239,6 +2984,7 @@ static int ixgbe_nway_reset(struct net_device *netdev) return 0; } +#ifdef HAVE_ETHTOOL_SET_PHYS_ID static int ixgbe_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { @@ -2254,11 +3000,13 @@ static int ixgbe_set_phys_id(struct net_device *netdev, return 2; case ETHTOOL_ID_ON: - hw->mac.ops.led_on(hw, hw->mac.led_link_act); + if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) + return -EINVAL; break; case ETHTOOL_ID_OFF: - hw->mac.ops.led_off(hw, hw->mac.led_link_act); + if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) + return -EINVAL; break; case ETHTOOL_ID_INACTIVE: @@ -2269,12 +3017,42 @@ static int ixgbe_set_phys_id(struct net_device *netdev, return 0; } +#else +static int ixgbe_phys_id(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + u32 i; + + if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) + return -EOPNOTSUPP; + + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 1000); i += 400) { + if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) + return -EINVAL; + msleep_interruptible(200); + if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) + return -EINVAL; + msleep_interruptible(200); + } + + /* Restore LED settings */ + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + + return IXGBE_SUCCESS; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ static int ixgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; /* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1) ec->rx_coalesce_usecs = adapter->rx_itr_setting; @@ -2312,7 +3090,8 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); + e_info(probe, "rx-usecs value high enough " + "to re-enable RSC\n"); return true; } /* if interrupt rate is too high then disable RSC */ @@ -2328,9 +3107,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_q_vector *q_vector; int i; - u16 tx_itr_param, rx_itr_param, tx_itr_prev; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; bool need_reset = false; if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { @@ -2342,6 +3121,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, tx_itr_prev = adapter->tx_itr_setting; } + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; @@ -2385,8 +3167,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev, /* check the old value and enable RSC if necessary */ need_reset |= ixgbe_update_rsc(adapter); + if (adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + ixgbe_dmac_config(&adapter->hw); + } + for (i = 0; i < adapter->num_q_vectors; i++) { - q_vector = adapter->q_vector[i]; + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; if (q_vector->tx.count && !q_vector->rx.count) /* tx only */ q_vector->itr = tx_itr_param; @@ -2407,6 +3199,253 @@ static int ixgbe_set_coalesce(struct net_device *netdev, return 0; } +#ifndef HAVE_NDO_SET_FEATURES +static u32 ixgbe_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + /* LRO and RSC both depend on RX checksum to function */ + if (!data && (netdev->features & NETIF_F_LRO)) { + netdev->features &= ~NETIF_F_LRO; + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } + } + +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && data) { + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + if (!need_reset) + adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; + } else { + netdev->hw_enc_features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + ixgbe_clear_udp_tunnel_port(adapter, + IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef NETIF_F_IPV6_CSUM + u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + u32 feature_list = NETIF_F_IP_CSUM; +#endif + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (data) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + else + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; + feature_list |= NETIF_F_GSO_UDP_TUNNEL; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + feature_list |= NETIF_F_SCTP_CSUM; + break; + default: + break; + } + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +#ifdef NETIF_F_TSO +static int ixgbe_set_tso(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_TSO6 + u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; +#else + u32 feature_list = NETIF_F_TSO; +#endif + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!data) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct net_device *v_netdev; + int i; + + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~feature_list; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + return 0; +} + +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS +static int ixgbe_set_flags(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; + u32 changed = netdev->features ^ data; + bool need_reset = false; + int rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + !(data & ETH_FLAG_RXVLAN)) + return -EINVAL; + +#endif + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + supported_flags |= ETH_FLAG_LRO; + +#ifdef ETHTOOL_GRXRINGS + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + supported_flags |= ETH_FLAG_NTUPLE; + default: + break; + } + +#endif +#ifdef NETIF_F_RXHASH + supported_flags |= ETH_FLAG_RXHASH; + +#endif + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if (changed & ETH_FLAG_RXVLAN) + ixgbe_vlan_mode(netdev, netdev->features); +#endif + +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + netdev->features & NETIF_F_RXCSUM) { + vxlan_get_rx_port(netdev); + else + ixgbe_clear_udp_tunnel_port(adapter, + IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + /* if state changes we need to update adapter->flags and reset */ + if (!(netdev->features & NETIF_F_LRO)) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if (changed & ETH_FLAG_LRO) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + } + } + +#ifdef ETHTOOL_GRXRINGS + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (netdev->features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } + +#endif /* ETHTOOL_GRXRINGS */ + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2438,6 +3477,12 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, case IXGBE_ATR_FLOW_TYPE_UDPV4: fsp->flow_type = UDP_V4_FLOW; break; + case IXGBE_ATR_FLOW_TYPE_TCPV6: + fsp->flow_type = TCP_V6_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_UDPV6: + fsp->flow_type = UDP_V6_FLOW; + break; case IXGBE_ATR_FLOW_TYPE_SCTPV4: fsp->flow_type = SCTP_V4_FLOW; break; @@ -2451,14 +3496,26 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, return -EINVAL; } - fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; - fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; - fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; - fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; - fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; - fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; - fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; - fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; +#ifdef HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC + if (rule->filter.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) { + fsp->h_u.tcp_ip6_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip6_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip6_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip6_spec.pdst = mask->formatted.dst_port; + } else { +#endif + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; +#ifdef HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC + } +#endif + fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; fsp->m_ext.vlan_tci = mask->formatted.vlan_id; fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; @@ -2509,11 +3566,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case UDP_V4_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -2523,11 +3580,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, break; case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case UDP_V6_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -2543,7 +3600,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, } static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else u32 *rule_locs) +#endif { struct ixgbe_adapter *adapter = netdev_priv(dev); int ret = -EOPNOTSUPP; @@ -2561,7 +3622,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); break; case ETHTOOL_GRXCLSRLALL: - ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); + ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); break; case ETHTOOL_GRXFH: ret = ixgbe_get_rss_hash_opts(adapter, cmd); @@ -2580,7 +3642,8 @@ int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *rule, *parent; - int err = -EINVAL; + bool deleted = false; + s32 err; parent = NULL; rule = NULL; @@ -2595,24 +3658,32 @@ int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* if there is an old rule occupying our place remove it */ if (rule && (rule->sw_idx == sw_idx)) { - if (!input || (rule->filter.formatted.bkt_hash != - input->filter.formatted.bkt_hash)) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && + (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash))) { err = ixgbe_fdir_erase_perfect_filter_82599(hw, &rule->filter, sw_idx); + if (err) + return -EINVAL; } hlist_del(&rule->fdir_node); kfree(rule); adapter->fdir_filter_count--; + deleted = true; } - /* - * If no input this was a delete, err should be 0 if a rule was - * successfully found and removed from the list else -EINVAL + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. */ if (!input) - return err; + return deleted ? 0 : -EINVAL; /* initialize node and set software index */ INIT_HLIST_NODE(&input->fdir_node); @@ -2640,6 +3711,12 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, case UDP_V4_FLOW: *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; break; + case TCP_V6_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + break; + case UDP_V6_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6; + break; case SCTP_V4_FLOW: *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; break; @@ -2671,6 +3748,24 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, return 1; } +static bool ixgbe_match_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input) +{ + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule = NULL; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + if (rule->filter.formatted.bkt_hash == + input->filter.formatted.bkt_hash && + rule->action == input->action) { + e_info(drv, "FDIR entry already exist\n"); + return true; + } + } + return false; +} + static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2686,7 +3781,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, return -EOPNOTSUPP; /* ring_cookie is a masked into a set of queues and ixgbe pools or - * we use the drop index. + * we use drop index */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { queue = IXGBE_FDIR_DROP_QUEUE; @@ -2694,7 +3789,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); - if (!vf && (ring >= adapter->num_rx_queues)) + if (!vf && ring >= adapter->num_rx_queues) return -EINVAL; else if (vf && ((vf > adapter->num_vfs) || @@ -2737,15 +3832,46 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; - /* Copy input into formatted structures */ - input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; - mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; - input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; - mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; - input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; - mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; - input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; - mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; +#ifdef HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC + /* not support full IPV6 address filtering */ + if (input->filter.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) { + if (fsp->m_u.tcp_ip6_spec.ip6src[0] || + fsp->m_u.tcp_ip6_spec.ip6src[1] || + fsp->m_u.tcp_ip6_spec.ip6src[2] || + fsp->m_u.tcp_ip6_spec.ip6src[3] || + fsp->m_u.tcp_ip6_spec.ip6dst[0] || + fsp->m_u.tcp_ip6_spec.ip6dst[1] || + fsp->m_u.tcp_ip6_spec.ip6dst[2] || + fsp->m_u.tcp_ip6_spec.ip6dst[3]) { + e_err(drv, "Error not support IPv6 address fitlers\n"); + goto err_out; + } + input->filter.formatted.src_port = fsp->h_u.tcp_ip6_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip6_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + } else { +#endif + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = + fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = + fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = + fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = + fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = + fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = + fsp->m_u.tcp_ip4_spec.pdst; +#ifdef HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC + } +#endif if (fsp->flow_type & FLOW_EXT) { input->filter.formatted.vm_pool = @@ -2770,30 +3896,40 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, if (hlist_empty(&adapter->fdir_filter_list)) { /* save mask and program input mask into HW */ memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask, adapter->cloud_mode); if (err) { e_err(drv, "Error writing mask\n"); goto err_out_w_lock; } } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { - e_err(drv, "Only one mask supported per port\n"); + e_err(drv, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); goto err_out_w_lock; } /* apply mask and compute/store hash */ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); - /* program filters to filter memory */ - err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, queue); - if (err) + /* check if new entry does not exist on filter list */ + if (ixgbe_match_ethtool_fdir_entry(adapter, input)) goto err_out_w_lock; + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(adapter->netdev)) { + err = ixgbe_fdir_write_perfect_filter_82599(hw, + &input->filter, input->sw_idx, queue, + adapter->cloud_mode); + if (err) + goto err_out_w_lock; + } + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); - return err; + return 0; err_out_w_lock: spin_unlock(&adapter->fdir_perfect_lock); err_out: @@ -2815,6 +3951,19 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, return err; } +#ifdef ETHTOOL_SRXNTUPLE +/* + * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid + * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag + * was defined that this function was present. + */ +static int ixgbe_set_rx_ntuple(struct net_device __always_unused *dev, + struct ethtool_rx_ntuple __always_unused *cmd) +{ + return -EOPNOTSUPP; +} + +#endif #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, @@ -2901,7 +4050,8 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, if ((flags2 & UDP_RSS_FLAGS) && !(adapter->flags2 & UDP_RSS_FLAGS)) - e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); adapter->flags2 = flags2; @@ -2952,6 +4102,7 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) { if (adapter->hw.mac.type < ixgbe_mac_X550) @@ -2984,13 +4135,19 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) indir[i] = adapter->rss_indir_tbl[i] & rss_m; } +#ifdef HAVE_RXFH_HASHFUNC static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_RXFH_HASHFUNC if (hfunc) *hfunc = ETH_RSS_HASH_TOP; +#endif if (indir) ixgbe_get_reta(adapter, indir); @@ -3001,15 +4158,26 @@ static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +#ifdef HAVE_RXFH_HASHFUNC static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int ixgbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); +#ifdef HAVE_RXFH_HASHFUNC if (hfunc) return -EINVAL; +#endif /* Fill out the redirection table */ if (indir) { @@ -3040,7 +4208,9 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; } +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef HAVE_ETHTOOL_GET_TS_INFO static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -3050,9 +4220,10 @@ static int ixgbe_get_ts_info(struct net_device *dev, info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); switch (adapter->hw.mac.type) { +#ifdef HAVE_PTP_1588_CLOCK case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); break; case ixgbe_mac_X540: @@ -3062,17 +4233,19 @@ static int ixgbe_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); break; +#endif /* HAVE_PTP_1588_CLOCK */ default: return ethtool_op_get_ts_info(dev, info); } +#ifdef HAVE_PTP_1588_CLOCK info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); @@ -3084,12 +4257,16 @@ static int ixgbe_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_TX_ON); return 0; +#endif /* HAVE_PTP_1588_CLOCK */ } +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ +#endif /* ETHTOOL_GRXRINGS */ +#ifdef ETHTOOL_SCHANNELS static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) { unsigned int max_combined; - u8 tcs = adapter->hw_tcs; + u8 tcs = netdev_get_num_tc(adapter->netdev); if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { /* We only support one q_vector without MSI-X */ @@ -3113,7 +4290,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) /* support up to 64 queues with ATR */ max_combined = IXGBE_MAX_FDIR_INDICES; } else { - /* support up to 16 queues with RSS */ + /* support up to max allowed queues with RSS */ max_combined = ixgbe_max_rss_indices(adapter); } @@ -3146,7 +4323,7 @@ static void ixgbe_get_channels(struct net_device *dev, return; /* same thing goes for being DCB enabled */ - if (adapter->hw_tcs > 1) + if (netdev_get_num_tc(dev) > 1) return; /* if ATR is disabled we can exit */ @@ -3184,41 +4361,40 @@ static int ixgbe_set_channels(struct net_device *dev, count = max_rss_indices; adapter->ring_feature[RING_F_RSS].limit = count; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* cap FCoE limit at 8 */ if (count > IXGBE_FCRETA_SIZE) count = IXGBE_FCRETA_SIZE; adapter->ring_feature[RING_F_FCOE].limit = count; +#endif /* CONFIG_FCOE */ -#endif /* use setup TC to update any traffic class queue mapping */ - return ixgbe_setup_tc(dev, adapter->hw_tcs); + return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); } +#endif /* ETHTOOL_SCHANNELS */ +#ifdef ETHTOOL_GMODULEINFO static int ixgbe_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - s32 status; + u32 status; u8 sff8472_rev, addr_mode; bool page_swap = false; - if (hw->phy.type == ixgbe_phy_fw) - return -ENXIO; - /* Check whether we support SFF-8472 or not */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, &sff8472_rev); - if (status) + if (status != 0) return -EIO; /* addressing mode is not supported */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_SWAP, &addr_mode); - if (status) + if (status != 0) return -EIO; if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { @@ -3226,8 +4402,7 @@ static int ixgbe_get_module_info(struct net_device *dev, page_swap = true; } - if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap || - !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) { + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { /* We have a SFP, but it does not support SFF-8472 */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; @@ -3246,16 +4421,13 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 status = IXGBE_ERR_PHY_ADDR_INVALID; u8 databyte = 0xFF; int i = 0; if (ee->len == 0) return -EINVAL; - if (hw->phy.type == ixgbe_phy_fw) - return -ENXIO; - for (i = ee->offset; i < ee->offset + ee->len; i++) { /* I2C reads can take long time */ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -3266,7 +4438,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, else status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); - if (status) + if (status != 0) return -EIO; data[i - ee->offset] = databyte; @@ -3274,6 +4446,9 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, return 0; } +#endif /* ETHTOOL_GMODULEINFO */ + +#ifdef ETHTOOL_GEEE static const struct { ixgbe_link_speed mac_speed; @@ -3341,6 +4516,9 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + if (!hw->mac.ops.setup_eee) + return -EOPNOTSUPP; + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) return -EOPNOTSUPP; @@ -3349,7 +4527,9 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) return -EOPNOTSUPP; } +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -3357,7 +4537,8 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) struct ethtool_eee eee_data; s32 ret_val; - if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))) return -EOPNOTSUPP; memset(&eee_data, 0, sizeof(struct ethtool_eee)); @@ -3366,26 +4547,23 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) if (ret_val) return ret_val; - if (eee_data.eee_enabled && !edata->eee_enabled) { - if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { - e_err(drv, "Setting EEE tx-lpi is not supported\n"); - return -EINVAL; - } + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } - if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { - e_err(drv, - "Setting EEE Tx LPI timer is not supported\n"); - return -EINVAL; - } + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } - if (eee_data.advertised != edata->advertised) { - e_err(drv, - "Setting EEE advertised speeds is not supported\n"); - return -EINVAL; - } + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not supported\n"); + return -EINVAL; } if (eee_data.eee_enabled != edata->eee_enabled) { + if (edata->eee_enabled) { adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; hw->phy.eee_speeds_advertised = @@ -3404,89 +4582,235 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) return 0; } +#endif /* ETHTOOL_SEEE */ +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +/** + * ixgbe_get_priv_flags - report device private flags + * @netdev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the ixgbe_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ static u32 ixgbe_get_priv_flags(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u32 priv_flags = 0; + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + priv_flags |= IXGBE_PRIV_FLAGS_FD_ATR; +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; - - if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED) - priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN; +#endif return priv_flags; } +/** + * ixgbe_set_priv_flags - set private flags + * @netdev: network interface device structure + * @priv_flags: bit flags to be set + **/ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC unsigned int flags2 = adapter->flags2; +#endif + unsigned int flags = adapter->flags; + + /* allow the user to control the state of the Flow + * Director ATR (Application Targeted Routing) feature + * of the driver + */ + flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + if (priv_flags & IXGBE_PRIV_FLAGS_FD_ATR) { + /* We cannot enable ATR if VMDq is enabled */ + if (flags & IXGBE_FLAG_VMDQ_ENABLED) + return -EINVAL; + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + return -EINVAL; + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + return -EINVAL; + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + return -EINVAL; + flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC flags2 &= ~IXGBE_FLAG2_RX_LEGACY; if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) flags2 |= IXGBE_FLAG2_RX_LEGACY; +#endif - flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED; - if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN) - flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED; + if (flags != adapter->flags) { + adapter->flags = flags; - if (flags2 != adapter->flags2) { + /* ATR state change requires a reset */ + ixgbe_do_reset(netdev); +#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC + } +#else + } else if (flags2 != adapter->flags2) { adapter->flags2 = flags2; /* reset interface to repopulate queues */ if (netif_running(netdev)) ixgbe_reinit_locked(adapter); } +#endif return 0; } -static const struct ethtool_ops ixgbe_ethtool_ops = { - .get_drvinfo = ixgbe_get_drvinfo, - .get_regs_len = ixgbe_get_regs_len, - .get_regs = ixgbe_get_regs, - .get_wol = ixgbe_get_wol, - .set_wol = ixgbe_set_wol, - .nway_reset = ixgbe_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = ixgbe_get_eeprom_len, - .get_eeprom = ixgbe_get_eeprom, - .set_eeprom = ixgbe_set_eeprom, - .get_ringparam = ixgbe_get_ringparam, - .set_ringparam = ixgbe_set_ringparam, - .get_pauseparam = ixgbe_get_pauseparam, - .set_pauseparam = ixgbe_set_pauseparam, - .get_msglevel = ixgbe_get_msglevel, - .set_msglevel = ixgbe_set_msglevel, - .self_test = ixgbe_diag_test, - .get_strings = ixgbe_get_strings, - .set_phys_id = ixgbe_set_phys_id, - .get_sset_count = ixgbe_get_sset_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static struct ethtool_ops ixgbe_ethtool_ops = { +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = ixgbe_get_link_ksettings, + .set_link_ksettings = ixgbe_set_link_ksettings, +#else + .get_settings = ixgbe_get_settings, + .set_settings = ixgbe_set_settings, +#endif + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .set_wol = ixgbe_set_wol, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = ixgbe_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .self_test = ixgbe_diag_test, + .get_strings = ixgbe_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = ixgbe_set_phys_id, +#else + .phys_id = ixgbe_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = ixgbe_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = ixgbe_get_sset_count, + .get_priv_flags = ixgbe_get_priv_flags, + .set_priv_flags = ixgbe_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ .get_ethtool_stats = ixgbe_get_ethtool_stats, - .get_coalesce = ixgbe_get_coalesce, - .set_coalesce = ixgbe_set_coalesce, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = ixgbe_get_rx_csum, + .set_rx_csum = ixgbe_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ixgbe_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ixgbe_set_tso, +#endif +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, + .set_flags = ixgbe_set_flags, +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, +#ifdef ETHTOOL_SRXNTUPLE + .set_rx_ntuple = ixgbe_set_rx_ntuple, +#endif +#endif /* ETHTOOL_GRXRINGS */ +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_GEEE + .get_eee = ixgbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ixgbe_set_eee, +#endif /* ETHTOOL_SEEE */ +#ifdef ETHTOOL_SCHANNELS + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, +#endif +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = ixgbe_get_ts_info, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) .get_rxfh_indir_size = ixgbe_rss_indir_size, .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, - .get_eee = ixgbe_get_eee, - .set_eee = ixgbe_set_eee, - .get_channels = ixgbe_get_channels, - .set_channels = ixgbe_set_channels, - .get_priv_flags = ixgbe_get_priv_flags, - .set_priv_flags = ixgbe_set_priv_flags, - .get_ts_info = ixgbe_get_ts_info, - .get_module_info = ixgbe_get_module_info, - .get_module_eeprom = ixgbe_get_module_eeprom, - .get_link_ksettings = ixgbe_get_link_ksettings, - .set_link_ksettings = ixgbe_set_link_ksettings, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ }; +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext ixgbe_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = ixgbe_get_ts_info, + .set_phys_id = ixgbe_set_phys_id, + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, + .set_rxfh = ixgbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_GEEE + .get_eee = ixgbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ixgbe_set_eee, +#endif /* ETHTOOL_SEEE */ +}; + +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ void ixgbe_set_ethtool_ops(struct net_device *netdev) { +#ifndef ETHTOOL_OPS_COMPAT netdev->ethtool_ops = &ixgbe_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &ixgbe_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ } +#endif /* SIOCETHTOOL */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ccd852ad62a4..79ed5068bb08 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #include "ixgbe.h" + +#if IS_ENABLED(CONFIG_FCOE) +#if IS_ENABLED(CONFIG_DCB) +#include "ixgbe_dcb_82599.h" +#endif /* CONFIG_DCB */ #include -#include -#include #include #include #include @@ -42,72 +45,76 @@ static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) */ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) { - int len; + int len = 0; struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; - struct ixgbe_fcoe_ddp *ddp; struct ixgbe_hw *hw; + struct ixgbe_fcoe_ddp *ddp; u32 fcbuff; if (!netdev) - return 0; + goto out_ddp_put; - if (xid >= netdev->fcoe_ddp_xid) - return 0; + if (xid > netdev->fcoe_ddp_xid) + goto out_ddp_put; adapter = netdev_priv(netdev); + hw = &adapter->hw; fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (!ddp->udl) - return 0; + goto out_ddp_put; - hw = &adapter->hw; len = ddp->len; - /* if no error then skip ddp context invalidation */ - if (!ddp->err) - goto skip_ddpinv; + /* if there an error, force to invalidate ddp context */ + if (ddp->err) { + switch (hw->mac.type) { + case ixgbe_mac_X550: + /* X550 does not require DDP FCoE lock */ - if (hw->mac.type == ixgbe_mac_X550) { - /* X550 does not require DDP FCoE lock */ + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), + (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), - (xid | IXGBE_FCFLTRW_WE)); + /* program FCBUFF */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); - /* program FCBUFF */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); + /* program FCDMARW */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_WE)); - /* program FCDMARW */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), - (xid | IXGBE_FCDMARW_WE)); + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, + IXGBE_FCDDC(2, xid)); + break; + default: + /* other hardware requires DDP FCoE lock */ + spin_lock_bh(&fcoe->lock); - /* read FCBUFF to check context invalidated */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), - (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); - } else { - /* other hardware requires DDP FCoE lock */ - spin_lock_bh(&fcoe->lock); - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, - (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_WE)); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, + (xid | IXGBE_FCFLTRW_WE)); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_WE)); - /* guaranteed to be invalidated after 100us */ - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); - spin_unlock_bh(&fcoe->lock); + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); + + spin_unlock_bh(&fcoe->lock); + break; } - if (fcbuff & IXGBE_FCBUFF_VALID) - usleep_range(100, 150); - -skip_ddpinv: + /* guaranteed to be invalidated after 100us */ + if (fcbuff & IXGBE_FCBUFF_VALID) + udelay(100); + } if (ddp->sgl) - dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); if (ddp->pool) { dma_pool_free(ddp->pool, ddp->udl, ddp->udp); @@ -116,6 +123,7 @@ skip_ddpinv: ixgbe_fcoe_clear_ddp(ddp); +out_ddp_put: return len; } @@ -149,11 +157,11 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; - if (!netdev || !sgl) + if (!netdev || !sgl || !sgc) return 0; adapter = netdev_priv(netdev); - if (xid >= netdev->fcoe_ddp_xid) { + if (xid > netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } @@ -167,7 +175,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ddp = &fcoe->ddp[xid]; if (ddp->sgl) { e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", - xid, ddp->sgl, ddp->sgc); + xid, ddp->sgl, ddp->sgc); return 0; } ixgbe_fcoe_clear_ddp(ddp); @@ -185,7 +193,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* setup dma from scsi command sgl */ - dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); if (dmacount == 0) { e_err(drv, "xid 0x%x DMA map error\n", xid); goto out_noddp; @@ -243,8 +251,9 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, lastsize = thisoff + thislen; /* - * lastsize can not be buffer len. + * lastsize can not be bufflen. * If it is then adding another buffer with lastsize = 1. + * Since lastsize is 1 there will be no HW access to this buffer. */ if (lastsize == bufflen) { if (j >= IXGBE_BUFFCNT_MAX) { @@ -285,7 +294,8 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); } - if (hw->mac.type == ixgbe_mac_X550) { + switch (hw->mac.type) { + case ixgbe_mac_X550: /* X550 does not require DDP lock */ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), @@ -297,8 +307,14 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); - } else { - /* DDP lock for indirect DDP context access */ + /* + * TBD: SMAC and FCID info not available with current + * netdev APIs, add code to pull that from skb later + * and then program that here before enabling DDP context. + */ + break; + default: + /* other devices require DDP lock with direct DDP context access */ spin_lock_bh(&fcoe->lock); IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); @@ -311,6 +327,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); spin_unlock_bh(&fcoe->lock); + break; } return 1; @@ -320,7 +337,7 @@ out_noddp_free: ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: - dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); out_noddp: put_cpu(); return 0; @@ -346,6 +363,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); } +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET /** * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode * @netdev: the corresponding net_device @@ -362,11 +380,12 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, * Returns : 1 for success and 0 for no ddp */ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) + struct scatterlist *sgl, unsigned int sgc) { return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); } +#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ /** * ixgbe_fcoe_ddp - check ddp status and mark it done * @adapter: ixgbe adapter @@ -375,7 +394,7 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, * * This checks ddp status. * - * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates + * Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates * not passing the skb to ULD, > 0 indicates is the length of data * being ddped. */ @@ -383,14 +402,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - int rc = -EINVAL; - struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe_ddp *ddp; struct fc_frame_header *fh; - struct fcoe_crc_eof *crc; + int rc = -EINVAL, ddp_max; __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); __le32 ddp_err; - int ddp_max; u32 fctl; u16 xid; @@ -399,35 +416,36 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, else skb->ip_summed = CHECKSUM_UNNECESSARY; - if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) - fh = (struct fc_frame_header *)(skb->data + - sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); - else - fh = (struct fc_frame_header *)(skb->data + - sizeof(struct fcoe_hdr)); + /* verify header contains at least the FCOE header */ + BUG_ON(skb_headlen(skb) < FCOE_HEADER_LEN); + + fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); + + if (skb->protocol == htons(ETH_P_8021Q)) + fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN); fctl = ntoh24(fh->fh_f_ctl); if (fctl & FC_FC_EX_CTX) - xid = be16_to_cpu(fh->fh_ox_id); + xid = ntohs(fh->fh_ox_id); else - xid = be16_to_cpu(fh->fh_rx_id); + xid = ntohs(fh->fh_rx_id); ddp_max = IXGBE_FCOE_DDP_MAX; /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550) ddp_max = IXGBE_FCOE_DDP_MAX_X550; - if (xid >= ddp_max) - return -EINVAL; - fcoe = &adapter->fcoe; + if (xid >= ddp_max) + goto ddp_out; + ddp = &fcoe->ddp[xid]; if (!ddp->udl) - return -EINVAL; + goto ddp_out; ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | IXGBE_RXDADV_ERR_FCERR); if (ddp_err) - return -EINVAL; + goto ddp_out; switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { /* return 0 to bypass going to ULD for DDPed data */ @@ -438,9 +456,9 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, break; /* unmap the sg list when FCPRSP is received */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): - dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); - ddp->err = (__force u32)ddp_err; + ddp->err = ddp_err; ddp->sgl = NULL; ddp->sgc = 0; /* fall through */ @@ -466,11 +484,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, */ if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && (fctl & FC_FC_END_SEQ)) { + struct fcoe_crc_eof *crc; skb_linearize(skb); - crc = skb_put(skb, sizeof(*crc)); + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); crc->fcoe_eof = FC_EOF_T; } - +ddp_out: return rc; } @@ -493,15 +512,17 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; - u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; u8 sof, eof; + u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { - dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", - skb_shinfo(skb)->gso_type); +#ifdef NETIF_F_FSO + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { + dev_err(tx_ring->dev, "Wrong gso type %d:expecting " + "SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type); return -EINVAL; } +#endif /* resets the header to point fcoe/fc */ skb_set_network_header(skb, skb->mac_len); skb_set_transport_header(skb, skb->mac_len + @@ -611,7 +632,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, struct dma_pool *pool; char pool_name[32]; - snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN, PAGE_SIZE); @@ -638,11 +659,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; - int i, fcoe_q, fcoe_i, fcoe_q_h = 0; - int fcreta_size; + int i, fcoe_i; + u32 fcoe_q, fcoe_q_h = 0; u32 etqf; + int fcreta_size; - /* Minimal functionality for FCoE requires at least CRC offloads */ + /* Minimal funcionality for FCoE requires at least CRC offloads */ if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) return; @@ -655,7 +677,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); - /* leave registers un-configured if FCoE is disabled */ + /* leave remaining registers unconfigued if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return; @@ -672,9 +694,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & IXGBE_FCRETA_ENTRY_HIGH_MASK; } - fcoe_i = fcoe->offset + (i % fcoe->indices); - fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + if (fcoe_i >= MAX_RX_QUEUES) + fcoe_i = MAX_RX_QUEUES - 1; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; fcoe_q |= fcoe_q_h; IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); @@ -729,7 +751,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) for_each_possible_cpu(cpu) ixgbe_fcoe_dma_pool_free(fcoe, cpu); - dma_unmap_single(&adapter->pdev->dev, + dma_unmap_single(pci_dev_to_dev(adapter->pdev), fcoe->extra_ddp_buffer_dma, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); @@ -750,7 +772,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct device *dev = &adapter->pdev->dev; + struct device *dev = pci_dev_to_dev(adapter->pdev); void *buffer; dma_addr_t dma; unsigned int cpu; @@ -761,8 +783,10 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) /* Extra buffer to be shared by all DDPs for HW work around */ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); - if (!buffer) + if (!buffer) { + e_err(drv, "failed to allocate extra DDP buffer\n"); return -ENOMEM; + } dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma)) { @@ -788,7 +812,11 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) return 0; } +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +#else static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +#endif { struct ixgbe_fcoe *fcoe = &adapter->fcoe; @@ -803,6 +831,7 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) } adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550) adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; @@ -810,7 +839,11 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) return 0; } +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +#else static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +#endif { struct ixgbe_fcoe *fcoe = &adapter->fcoe; @@ -823,6 +856,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) fcoe->ddp_pool = NULL; } +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE /** * ixgbe_fcoe_enable - turn on FCoE offload feature * @netdev: the corresponding netdev @@ -910,7 +944,28 @@ int ixgbe_fcoe_disable(struct net_device *netdev) return 0; } +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_OPS_GETAPP +/** + * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE + * @netdev: the corresponding net_device + * + * Finds out the corresponding user priority bitmap from the current + * traffic class that FCoE belongs to. Returns 0 as the invalid user + * priority bitmap to indicate an error. + * + * Returns : 802.1p user priority bitmap for FCoE + */ +u8 ixgbe_fcoe_getapp(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return 1 << adapter->fcoe.up; +} +#endif /* HAVE_DCBNL_OPS_GETAPP */ +#endif /* CONFIG_DCB */ +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN /** * ixgbe_fcoe_get_wwn - get world wide name for the node or the port * @netdev : ixgbe adapter @@ -925,6 +980,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev) */ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) { + int rc = -EINVAL; u16 prefix = 0xffff; struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_mac_info *mac = &adapter->hw.mac; @@ -949,94 +1005,12 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) ((u64) mac->san_addr[3] << 16) | ((u64) mac->san_addr[4] << 8) | ((u64) mac->san_addr[5]); - return 0; + rc = 0; } - return -EINVAL; -} - -/** - * ixgbe_fcoe_get_hbainfo - get FCoE HBA information - * @netdev : ixgbe adapter - * @info : HBA information - * - * Returns ixgbe HBA information - * - * Returns : 0 on success - */ -int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, - struct netdev_fcoe_hbainfo *info) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int i, pos; - u8 buf[8]; - - if (!info) - return -EINVAL; - - /* Don't return information on unsupported devices */ - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return -EINVAL; - - /* Manufacturer */ - snprintf(info->manufacturer, sizeof(info->manufacturer), - "Intel Corporation"); - - /* Serial Number */ - - /* Get the PCI-e Device Serial Number Capability */ - pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); - if (pos) { - pos += 4; - for (i = 0; i < 8; i++) - pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); - - snprintf(info->serial_number, sizeof(info->serial_number), - "%02X%02X%02X%02X%02X%02X%02X%02X", - buf[7], buf[6], buf[5], buf[4], - buf[3], buf[2], buf[1], buf[0]); - } else - snprintf(info->serial_number, sizeof(info->serial_number), - "Unknown"); - - /* Hardware Version */ - snprintf(info->hardware_version, - sizeof(info->hardware_version), - "Rev %d", hw->revision_id); - /* Driver Name/Version */ - snprintf(info->driver_version, - sizeof(info->driver_version), - "%s v%s", - ixgbe_driver_name, - ixgbe_driver_version); - /* Firmware Version */ - strlcpy(info->firmware_version, adapter->eeprom_id, - sizeof(info->firmware_version)); - - /* Model */ - if (hw->mac.type == ixgbe_mac_82599EB) { - snprintf(info->model, - sizeof(info->model), - "Intel 82599"); - } else if (hw->mac.type == ixgbe_mac_X550) { - snprintf(info->model, - sizeof(info->model), - "Intel X550"); - } else { - snprintf(info->model, - sizeof(info->model), - "Intel X540"); - } - - /* Model Description */ - snprintf(info->model_description, - sizeof(info->model_description), - "%s", - ixgbe_default_device_descr); - - return 0; + return rc; } +#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */ /** * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to * @adapter: pointer to the device adapter structure @@ -1045,9 +1019,6 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, */ u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) { -#ifdef CONFIG_IXGBE_DCB return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); -#else - return 0; -#endif } +#endif /* CONFIG_FCOE */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 724f5382329f..32ce69ad50b7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -1,8 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#ifndef _IXGBE_FCOE_H -#define _IXGBE_FCOE_H +#ifndef _IXGBE_FCOE_H_ +#define _IXGBE_FCOE_H_ + +#if IS_ENABLED(CONFIG_FCOE) #include #include @@ -13,7 +15,7 @@ /* ddp user buffer */ #define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ #define IXGBE_FCPTR_ALIGN 16 -#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) #define IXGBE_FCBUFF_4KB 0x0 #define IXGBE_FCBUFF_8KB 0x1 #define IXGBE_FCBUFF_16KB 0x2 @@ -23,11 +25,16 @@ #define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ #define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ -/* Default traffic class to use for FCoE */ -#define IXGBE_FCOE_DEFTC 3 +/* Default user priority to use for FCoE */ +#define IXGBE_FCOE_DEFUP 3 /* fcerr */ -#define IXGBE_FCERR_BADCRC 0x00100000 +#define IXGBE_FCERR_BADCRC 0x00100000 +#define IXGBE_FCERR_EOFSOF 0x00200000 +#define IXGBE_FCERR_NOFIRST 0x00300000 +#define IXGBE_FCERR_OOOSEQ 0x00400000 +#define IXGBE_FCERR_NODMA 0x00500000 +#define IXGBE_FCERR_PKTLOST 0x00600000 /* FCoE DDP for target mode */ #define __IXGBE_FCOE_TARGET 1 @@ -58,6 +65,8 @@ struct ixgbe_fcoe { dma_addr_t extra_ddp_buffer_dma; unsigned long mode; u8 up; + u8 up_set; }; +#endif /* CONFIG_FCOE */ #endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c new file mode 100644 index 000000000000..79e2d35b403a --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbe_hv_vf.h" + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @mc_addr_list: unused + * @mc_addr_count: unused + * @next: unused + * @clear: unused + */ +s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @xcast_mode: unused + */ +s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) +{ + UNREFERENCED_2PARAMETER(hw, xcast_mode); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @vlan: unused + * @vind: unused + * @vlan_on: unused + * @vlvf_bypass: unused + */ +s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + UNREFERENCED_3PARAMETER(hw, index, addr); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant; there is no mailbox communication. + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: unused + * + */ +s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + u32 links_reg; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* If we were hit with a reset drop the link */ + if (!mbx->ops[0].check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Reserved for pre-x550 devices */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + * Hyper-V variant. + **/ +s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 reg; + + /* If we are on Hyper-V, we implement this functionality + * differently. + */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); + /* CRC == 4 */ + reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + * Hyper-V version - only ixgbe_mbox_api_10 supported. + **/ +int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) +{ + UNREFERENCED_1PARAMETER(hw); + + /* Hyper-V only supports api version ixgbe_mbox_api_10 */ + if (api != ixgbe_mbox_api_10) + return IXGBE_ERR_INVALID_ARGUMENT; + + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw) +{ + /* Set defaults for VF then override applicable Hyper-V + * specific functions + */ + ixgbe_init_ops_vf(hw); + + hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf; + hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf; + hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf; + hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf; + hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf; + hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode; + hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf; + hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf; + hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf; + + return IXGBE_SUCCESS; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h new file mode 100644 index 000000000000..f035a574e58c --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_HV_VF_H_ +#define _IXGBE_HV_VF_H_ + +/* On Hyper-V, to reset, we need to read from this offset + * from the PCI config space. This is the mechanism used on + * Hyper-V to support PF/VF communication. + */ +#define IXGBE_HV_RESET_OFFSET 0x201 + +#include "ixgbe_vf.h" + +s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete); +s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); +s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr, + bool clear); +s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode); +s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass); +s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api); + +extern s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw); +extern s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq, u32 enable_addr); +#endif /* _IXGBE_HV_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c deleted file mode 100644 index 113f6087c7c9..000000000000 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ /dev/null @@ -1,1296 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ - -#include "ixgbe.h" -#include -#include -#include - -#define IXGBE_IPSEC_KEY_BITS 160 -static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; - -static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); - -/** - * ixgbe_ipsec_set_tx_sa - set the Tx SA registers - * @hw: hw specific details - * @idx: register index to write - * @key: key byte array - * @salt: salt bytes - **/ -static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, - u32 key[], u32 salt) -{ - u32 reg; - int i; - - for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), - (__force u32)cpu_to_be32(key[3 - i])); - IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt)); - IXGBE_WRITE_FLUSH(hw); - - reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); - reg &= IXGBE_RXTXIDX_IPS_EN; - reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; - IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_ipsec_set_rx_item - set an Rx table item - * @hw: hw specific details - * @idx: register index to write - * @tbl: table selector - * - * Trigger the device to store into a particular Rx table the - * data that has already been loaded into the input register - **/ -static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx, - enum ixgbe_ipsec_tbl_sel tbl) -{ - u32 reg; - - reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX); - reg &= IXGBE_RXTXIDX_IPS_EN; - reg |= tbl << IXGBE_RXIDX_TBL_SHIFT | - idx << IXGBE_RXTXIDX_IDX_SHIFT | - IXGBE_RXTXIDX_WRITE; - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info - * @hw: hw specific details - * @idx: register index to write - * @spi: security parameter index - * @key: key byte array - * @salt: salt bytes - * @mode: rx decrypt control bits - * @ip_idx: index into IP table for related IP address - **/ -static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, - u32 key[], u32 salt, u32 mode, u32 ip_idx) -{ - int i; - - /* store the SPI (in bigendian) and IPidx */ - IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, - (__force u32)cpu_to_le32((__force u32)spi)); - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); - IXGBE_WRITE_FLUSH(hw); - - ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl); - - /* store the key, salt, and mode */ - for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), - (__force u32)cpu_to_be32(key[3 - i])); - IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt)); - IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); - IXGBE_WRITE_FLUSH(hw); - - ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl); -} - -/** - * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info - * @hw: hw specific details - * @idx: register index to write - * @addr: IP address byte array - **/ -static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) -{ - int i; - - /* store the ip address */ - for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), - (__force u32)cpu_to_le32((__force u32)addr[i])); - IXGBE_WRITE_FLUSH(hw); - - ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); -} - -/** - * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset - * @adapter: board private structure - **/ -static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 buf[4] = {0, 0, 0, 0}; - u16 idx; - - /* disable Rx and Tx SA lookup */ - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); - IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); - - /* scrub the tables - split the loops for the max of the IP table */ - for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) { - ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); - ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); - ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf); - } - for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) { - ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); - ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); - } -} - -/** - * ixgbe_ipsec_stop_data - * @adapter: board private structure - **/ -static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - bool link = adapter->link_up; - u32 t_rdy, r_rdy; - u32 limit; - u32 reg; - - /* halt data paths */ - reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); - reg |= IXGBE_SECTXCTRL_TX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - reg |= IXGBE_SECRXCTRL_RX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); - - /* If both Tx and Rx are ready there are no packets - * that we need to flush so the loopback configuration - * below is not necessary. - */ - t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & - IXGBE_SECTXSTAT_SECTX_RDY; - r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & - IXGBE_SECRXSTAT_SECRX_RDY; - if (t_rdy && r_rdy) - return; - - /* If the tx fifo doesn't have link, but still has data, - * we can't clear the tx sec block. Set the MAC loopback - * before block clear - */ - if (!link) { - reg = IXGBE_READ_REG(hw, IXGBE_MACC); - reg |= IXGBE_MACC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); - reg |= IXGBE_HLREG0_LPBK; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); - - IXGBE_WRITE_FLUSH(hw); - mdelay(3); - } - - /* wait for the paths to empty */ - limit = 20; - do { - mdelay(10); - t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & - IXGBE_SECTXSTAT_SECTX_RDY; - r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & - IXGBE_SECRXSTAT_SECRX_RDY; - } while (!(t_rdy && r_rdy) && limit--); - - /* undo loopback if we played with it earlier */ - if (!link) { - reg = IXGBE_READ_REG(hw, IXGBE_MACC); - reg &= ~IXGBE_MACC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); - reg &= ~IXGBE_HLREG0_LPBK; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); - - IXGBE_WRITE_FLUSH(hw); - } -} - -/** - * ixgbe_ipsec_stop_engine - * @adapter: board private structure - **/ -static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg; - - ixgbe_ipsec_stop_data(adapter); - - /* disable Rx and Tx SA lookup */ - IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); - - /* disable the Rx and Tx engines and full packet store-n-forward */ - reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); - reg |= IXGBE_SECTXCTRL_SECTX_DIS; - reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD; - IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - reg |= IXGBE_SECRXCTRL_SECRX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); - - /* restore the "tx security buffer almost full threshold" to 0x250 */ - IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250); - - /* Set minimum IFG between packets back to the default 0x1 */ - reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - reg = (reg & 0xfffffff0) | 0x1; - IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); - - /* final set for normal (no ipsec offload) processing */ - IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS); - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS); - - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_ipsec_start_engine - * @adapter: board private structure - * - * NOTE: this increases power consumption whether being used or not - **/ -static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg; - - ixgbe_ipsec_stop_data(adapter); - - /* Set minimum IFG between packets to 3 */ - reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - reg = (reg & 0xfffffff0) | 0x3; - IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); - - /* Set "tx security buffer almost full threshold" to 0x15 so that the - * almost full indication is generated only after buffer contains at - * least an entire jumbo packet. - */ - reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); - reg = (reg & 0xfffffc00) | 0x15; - IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg); - - /* restart the data paths by clearing the DISABLE bits */ - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); - IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); - - /* enable Rx and Tx SA lookup */ - IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN); - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN); - - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset - * @adapter: board private structure - * - * Reload the HW tables from the SW tables after they've been bashed - * by a chip reset. - * - * Any VF entries are removed from the SW and HW tables since either - * (a) the VF also gets reset on PF reset and will ask again for the - * offloads, or (b) the VF has been removed by a change in the num_vfs. - **/ -void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) - return; - - /* clean up and restart the engine */ - ixgbe_ipsec_stop_engine(adapter); - ixgbe_ipsec_clear_hw_tables(adapter); - ixgbe_ipsec_start_engine(adapter); - - /* reload the Rx and Tx keys */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - struct rx_sa *r = &ipsec->rx_tbl[i]; - struct tx_sa *t = &ipsec->tx_tbl[i]; - - if (r->used) { - if (r->mode & IXGBE_RXTXMOD_VF) - ixgbe_ipsec_del_sa(r->xs); - else - ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, - r->key, r->salt, - r->mode, r->iptbl_ind); - } - - if (t->used) { - if (t->mode & IXGBE_RXTXMOD_VF) - ixgbe_ipsec_del_sa(t->xs); - else - ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); - } - } - - /* reload the IP addrs */ - for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { - struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; - - if (ipsa->used) - ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); - } -} - -/** - * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index - * @ipsec: pointer to ipsec struct - * @rxtable: true if we need to look in the Rx table - * - * Returns the first unused index in either the Rx or Tx SA table - **/ -static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable) -{ - u32 i; - - if (rxtable) { - if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) - return -ENOSPC; - - /* search rx sa table */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - if (!ipsec->rx_tbl[i].used) - return i; - } - } else { - if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) - return -ENOSPC; - - /* search tx sa table */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - if (!ipsec->tx_tbl[i].used) - return i; - } - } - - return -ENOSPC; -} - -/** - * ixgbe_ipsec_find_rx_state - find the state that matches - * @ipsec: pointer to ipsec struct - * @daddr: inbound address to match - * @proto: protocol to match - * @spi: SPI to match - * @ip4: true if using an ipv4 address - * - * Returns a pointer to the matching SA state information - **/ -static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, - __be32 *daddr, u8 proto, - __be32 spi, bool ip4) -{ - struct rx_sa *rsa; - struct xfrm_state *ret = NULL; - - rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, - (__force u32)spi) { - if (rsa->mode & IXGBE_RXTXMOD_VF) - continue; - if (spi == rsa->xs->id.spi && - ((ip4 && *daddr == rsa->xs->id.daddr.a4) || - (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, - sizeof(rsa->xs->id.daddr.a6)))) && - proto == rsa->xs->id.proto) { - ret = rsa->xs; - xfrm_state_hold(ret); - break; - } - } - rcu_read_unlock(); - return ret; -} - -/** - * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol - * @xs: pointer to xfrm_state struct - * @mykey: pointer to key array to populate - * @mysalt: pointer to salt value to populate - * - * This copies the protocol keys and salt to our own data tables. The - * 82599 family only supports the one algorithm. - **/ -static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, - u32 *mykey, u32 *mysalt) -{ - struct net_device *dev = xs->xso.dev; - unsigned char *key_data; - char *alg_name = NULL; - int key_len; - - if (!xs->aead) { - netdev_err(dev, "Unsupported IPsec algorithm\n"); - return -EINVAL; - } - - if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { - netdev_err(dev, "IPsec offload requires %d bit authentication\n", - IXGBE_IPSEC_AUTH_BITS); - return -EINVAL; - } - - key_data = &xs->aead->alg_key[0]; - key_len = xs->aead->alg_key_len; - alg_name = xs->aead->alg_name; - - if (strcmp(alg_name, aes_gcm_name)) { - netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", - aes_gcm_name); - return -EINVAL; - } - - /* The key bytes come down in a bigendian array of bytes, so - * we don't need to do any byteswapping. - * 160 accounts for 16 byte key and 4 byte salt - */ - if (key_len == IXGBE_IPSEC_KEY_BITS) { - *mysalt = ((u32 *)key_data)[4]; - } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) { - netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); - return -EINVAL; - } else { - netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n"); - *mysalt = 0; - } - memcpy(mykey, key_data, 16); - - return 0; -} - -/** - * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters - * @xs: pointer to transformer state struct - **/ -static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) -{ - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - u32 mfval, manc, reg; - int num_filters = 4; - bool manc_ipv4; - u32 bmcipval; - int i, j; - -#define MANC_EN_IPV4_FILTER BIT(24) -#define MFVAL_IPV4_FILTER_SHIFT 16 -#define MFVAL_IPV6_FILTER_SHIFT 24 -#define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4)) - -#define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4)) -#define IXGBE_BMCIPVAL 0x5060 -#define BMCIP_V4 0x2 -#define BMCIP_V6 0x3 -#define BMCIP_MASK 0x3 - - manc = IXGBE_READ_REG(hw, IXGBE_MANC); - manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); - mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); - bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL); - - if (xs->props.family == AF_INET) { - /* are there any IPv4 filters to check? */ - if (manc_ipv4) { - /* the 4 ipv4 filters are all in MIPAF(3, i) */ - for (i = 0; i < num_filters; i++) { - if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i))) - continue; - - reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i)); - if (reg == xs->id.daddr.a4) - return 1; - } - } - - if ((bmcipval & BMCIP_MASK) == BMCIP_V4) { - reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3)); - if (reg == xs->id.daddr.a4) - return 1; - } - - } else { - /* if there are ipv4 filters, they are in the last ipv6 slot */ - if (manc_ipv4) - num_filters = 3; - - for (i = 0; i < num_filters; i++) { - if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i))) - continue; - - for (j = 0; j < 4; j++) { - reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j)); - if (reg != xs->id.daddr.a6[j]) - break; - } - if (j == 4) /* did we match all 4 words? */ - return 1; - } - - if ((bmcipval & BMCIP_MASK) == BMCIP_V6) { - for (j = 0; j < 4; j++) { - reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j)); - if (reg != xs->id.daddr.a6[j]) - break; - } - if (j == 4) /* did we match all 4 words? */ - return 1; - } - } - - return 0; -} - -/** - * ixgbe_ipsec_add_sa - program device with a security association - * @xs: pointer to transformer state struct - **/ -static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) -{ - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct ixgbe_hw *hw = &adapter->hw; - int checked, match, first; - u16 sa_idx; - int ret; - int i; - - if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { - netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", - xs->id.proto); - return -EINVAL; - } - - if (ixgbe_ipsec_check_mgmt_ip(xs)) { - netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); - return -EINVAL; - } - - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { - struct rx_sa rsa; - - if (xs->calg) { - netdev_err(dev, "Compression offload not supported\n"); - return -EINVAL; - } - - /* find the first unused index */ - ret = ixgbe_ipsec_find_empty_idx(ipsec, true); - if (ret < 0) { - netdev_err(dev, "No space for SA in Rx table!\n"); - return ret; - } - sa_idx = (u16)ret; - - memset(&rsa, 0, sizeof(rsa)); - rsa.used = true; - rsa.xs = xs; - - if (rsa.xs->id.proto & IPPROTO_ESP) - rsa.decrypt = xs->ealg || xs->aead; - - /* get the key and salt */ - ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); - if (ret) { - netdev_err(dev, "Failed to get key data for Rx SA table\n"); - return ret; - } - - /* get ip for rx sa table */ - if (xs->props.family == AF_INET6) - memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); - else - memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); - - /* The HW does not have a 1:1 mapping from keys to IP addrs, so - * check for a matching IP addr entry in the table. If the addr - * already exists, use it; else find an unused slot and add the - * addr. If one does not exist and there are no unused table - * entries, fail the request. - */ - - /* Find an existing match or first not used, and stop looking - * after we've checked all we know we have. - */ - checked = 0; - match = -1; - first = -1; - for (i = 0; - i < IXGBE_IPSEC_MAX_RX_IP_COUNT && - (checked < ipsec->num_rx_sa || first < 0); - i++) { - if (ipsec->ip_tbl[i].used) { - if (!memcmp(ipsec->ip_tbl[i].ipaddr, - rsa.ipaddr, sizeof(rsa.ipaddr))) { - match = i; - break; - } - checked++; - } else if (first < 0) { - first = i; /* track the first empty seen */ - } - } - - if (ipsec->num_rx_sa == 0) - first = 0; - - if (match >= 0) { - /* addrs are the same, we should use this one */ - rsa.iptbl_ind = match; - ipsec->ip_tbl[match].ref_cnt++; - - } else if (first >= 0) { - /* no matches, but here's an empty slot */ - rsa.iptbl_ind = first; - - memcpy(ipsec->ip_tbl[first].ipaddr, - rsa.ipaddr, sizeof(rsa.ipaddr)); - ipsec->ip_tbl[first].ref_cnt = 1; - ipsec->ip_tbl[first].used = true; - - ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr); - - } else { - /* no match and no empty slot */ - netdev_err(dev, "No space for SA in Rx IP SA table\n"); - memset(&rsa, 0, sizeof(rsa)); - return -ENOSPC; - } - - rsa.mode = IXGBE_RXMOD_VALID; - if (rsa.xs->id.proto & IPPROTO_ESP) - rsa.mode |= IXGBE_RXMOD_PROTO_ESP; - if (rsa.decrypt) - rsa.mode |= IXGBE_RXMOD_DECRYPT; - if (rsa.xs->props.family == AF_INET6) - rsa.mode |= IXGBE_RXMOD_IPV6; - - /* the preparations worked, so save the info */ - memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); - - ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key, - rsa.salt, rsa.mode, rsa.iptbl_ind); - xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; - - ipsec->num_rx_sa++; - - /* hash the new entry for faster search in Rx path */ - hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, - (__force u32)rsa.xs->id.spi); - } else { - struct tx_sa tsa; - - if (adapter->num_vfs && - adapter->bridge_mode != BRIDGE_MODE_VEPA) - return -EOPNOTSUPP; - - /* find the first unused index */ - ret = ixgbe_ipsec_find_empty_idx(ipsec, false); - if (ret < 0) { - netdev_err(dev, "No space for SA in Tx table\n"); - return ret; - } - sa_idx = (u16)ret; - - memset(&tsa, 0, sizeof(tsa)); - tsa.used = true; - tsa.xs = xs; - - if (xs->id.proto & IPPROTO_ESP) - tsa.encrypt = xs->ealg || xs->aead; - - ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); - if (ret) { - netdev_err(dev, "Failed to get key data for Tx SA table\n"); - memset(&tsa, 0, sizeof(tsa)); - return ret; - } - - /* the preparations worked, so save the info */ - memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); - - ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt); - - xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; - - ipsec->num_tx_sa++; - } - - /* enable the engine if not already warmed up */ - if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) { - ixgbe_ipsec_start_engine(adapter); - adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED; - } - - return 0; -} - -/** - * ixgbe_ipsec_del_sa - clear out this specific SA - * @xs: pointer to transformer state struct - **/ -static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) -{ - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct ixgbe_hw *hw = &adapter->hw; - u32 zerobuf[4] = {0, 0, 0, 0}; - u16 sa_idx; - - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { - struct rx_sa *rsa; - u8 ipi; - - sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; - rsa = &ipsec->rx_tbl[sa_idx]; - - if (!rsa->used) { - netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", - sa_idx, xs->xso.offload_handle); - return; - } - - ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0); - hash_del_rcu(&rsa->hlist); - - /* if the IP table entry is referenced by only this SA, - * i.e. ref_cnt is only 1, clear the IP table entry as well - */ - ipi = rsa->iptbl_ind; - if (ipsec->ip_tbl[ipi].ref_cnt > 0) { - ipsec->ip_tbl[ipi].ref_cnt--; - - if (!ipsec->ip_tbl[ipi].ref_cnt) { - memset(&ipsec->ip_tbl[ipi], 0, - sizeof(struct rx_ip_sa)); - ixgbe_ipsec_set_rx_ip(hw, ipi, - (__force __be32 *)zerobuf); - } - } - - memset(rsa, 0, sizeof(struct rx_sa)); - ipsec->num_rx_sa--; - } else { - sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; - - if (!ipsec->tx_tbl[sa_idx].used) { - netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", - sa_idx, xs->xso.offload_handle); - return; - } - - ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0); - memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); - ipsec->num_tx_sa--; - } - - /* if there are no SAs left, stop the engine to save energy */ - if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) { - adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED; - ixgbe_ipsec_stop_engine(adapter); - } -} - -/** - * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload - * @skb: current data packet - * @xs: pointer to transformer state struct - **/ -static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) -{ - if (xs->props.family == AF_INET) { - /* Offload with IPv4 options is not supported yet */ - if (ip_hdr(skb)->ihl != 5) - return false; - } else { - /* Offload with IPv6 extension headers is not support yet */ - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) - return false; - } - - return true; -} - -static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { - .xdo_dev_state_add = ixgbe_ipsec_add_sa, - .xdo_dev_state_delete = ixgbe_ipsec_del_sa, - .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, -}; - -/** - * ixgbe_ipsec_vf_clear - clear the tables of data for a VF - * @adapter: board private structure - * @vf: VF id to be removed - **/ -void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_ipsec *ipsec = adapter->ipsec; - int i; - - if (!ipsec) - return; - - /* search rx sa table */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) { - if (!ipsec->rx_tbl[i].used) - continue; - if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && - ipsec->rx_tbl[i].vf == vf) - ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); - } - - /* search tx sa table */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) { - if (!ipsec->tx_tbl[i].used) - continue; - if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && - ipsec->tx_tbl[i].vf == vf) - ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); - } -} - -/** - * ixgbe_ipsec_vf_add_sa - translate VF request to SA add - * @adapter: board private structure - * @msgbuf: The message buffer - * @vf: the VF index - * - * Make up a new xs and algorithm info from the data sent by the VF. - * We only need to sketch in just enough to set up the HW offload. - * Put the resulting offload_handle into the return message to the VF. - * - * Returns 0 or error value - **/ -int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) -{ - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct xfrm_algo_desc *algo; - struct sa_mbx_msg *sam; - struct xfrm_state *xs; - size_t aead_len; - u16 sa_idx; - u32 pfsa; - int err; - - sam = (struct sa_mbx_msg *)(&msgbuf[1]); - if (!adapter->vfinfo[vf].trusted || - !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) { - e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf); - err = -EACCES; - goto err_out; - } - - /* Tx IPsec offload doesn't seem to work on this - * device, so block these requests for now. - */ - if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { - err = -EOPNOTSUPP; - goto err_out; - } - - xs = kzalloc(sizeof(*xs), GFP_KERNEL); - if (unlikely(!xs)) { - err = -ENOMEM; - goto err_out; - } - - xs->xso.flags = sam->flags; - xs->id.spi = sam->spi; - xs->id.proto = sam->proto; - xs->props.family = sam->family; - if (xs->props.family == AF_INET6) - memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6)); - else - memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); - xs->xso.dev = adapter->netdev; - - algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); - if (unlikely(!algo)) { - err = -ENOENT; - goto err_xs; - } - - aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; - xs->aead = kzalloc(aead_len, GFP_KERNEL); - if (unlikely(!xs->aead)) { - err = -ENOMEM; - goto err_xs; - } - - xs->props.ealgo = algo->desc.sadb_alg_id; - xs->geniv = algo->uinfo.aead.geniv; - xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS; - xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS; - memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key)); - memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); - - /* set up the HW offload */ - err = ixgbe_ipsec_add_sa(xs); - if (err) - goto err_aead; - - pfsa = xs->xso.offload_handle; - if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { - sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; - ipsec->rx_tbl[sa_idx].vf = vf; - ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; - } else { - sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; - ipsec->tx_tbl[sa_idx].vf = vf; - ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; - } - - msgbuf[1] = xs->xso.offload_handle; - - return 0; - -err_aead: - kzfree(xs->aead); -err_xs: - kzfree(xs); -err_out: - msgbuf[1] = err; - return err; -} - -/** - * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete - * @adapter: board private structure - * @msgbuf: The message buffer - * @vf: the VF index - * - * Given the offload_handle sent by the VF, look for the related SA table - * entry and use its xs field to call for a delete of the SA. - * - * Note: We silently ignore requests to delete entries that are already - * set to unused because when a VF is set to "DOWN", the PF first - * gets a reset and clears all the VF's entries; then the VF's - * XFRM stack sends individual deletes for each entry, which the - * reset already removed. In the future it might be good to try to - * optimize this so not so many unnecessary delete messages are sent. - * - * Returns 0 or error value - **/ -int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) -{ - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct xfrm_state *xs; - u32 pfsa = msgbuf[1]; - u16 sa_idx; - - if (!adapter->vfinfo[vf].trusted) { - e_err(drv, "vf %d attempted to delete an SA\n", vf); - return -EPERM; - } - - if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { - struct rx_sa *rsa; - - sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; - if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { - e_err(drv, "vf %d SA index %d out of range\n", - vf, sa_idx); - return -EINVAL; - } - - rsa = &ipsec->rx_tbl[sa_idx]; - - if (!rsa->used) - return 0; - - if (!(rsa->mode & IXGBE_RXTXMOD_VF) || - rsa->vf != vf) { - e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx); - return -ENOENT; - } - - xs = ipsec->rx_tbl[sa_idx].xs; - } else { - struct tx_sa *tsa; - - sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; - if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { - e_err(drv, "vf %d SA index %d out of range\n", - vf, sa_idx); - return -EINVAL; - } - - tsa = &ipsec->tx_tbl[sa_idx]; - - if (!tsa->used) - return 0; - - if (!(tsa->mode & IXGBE_RXTXMOD_VF) || - tsa->vf != vf) { - e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx); - return -ENOENT; - } - - xs = ipsec->tx_tbl[sa_idx].xs; - } - - ixgbe_ipsec_del_sa(xs); - - /* remove the xs that was made-up in the add request */ - kzfree(xs); - - return 0; -} - -/** - * ixgbe_ipsec_tx - setup Tx flags for ipsec offload - * @tx_ring: outgoing context - * @first: current data packet - * @itd: ipsec Tx data for later use in building context descriptor - **/ -int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - struct ixgbe_ipsec_tx_data *itd) -{ - struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct xfrm_state *xs; - struct sec_path *sp; - struct tx_sa *tsa; - - sp = skb_sec_path(first->skb); - if (unlikely(!sp->len)) { - netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", - __func__, sp->len); - return 0; - } - - xs = xfrm_input_state(first->skb); - if (unlikely(!xs)) { - netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", - __func__, xs); - return 0; - } - - itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; - if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { - netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", - __func__, itd->sa_idx, xs->xso.offload_handle); - return 0; - } - - tsa = &ipsec->tx_tbl[itd->sa_idx]; - if (unlikely(!tsa->used)) { - netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", - __func__, itd->sa_idx); - return 0; - } - - first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC; - - if (xs->id.proto == IPPROTO_ESP) { - - itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | - IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) - itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; - - /* The actual trailer length is authlen (16 bytes) plus - * 2 bytes for the proto and the padlen values, plus - * padlen bytes of padding. This ends up not the same - * as the static value found in xs->props.trailer_len (21). - * - * ... but if we're doing GSO, don't bother as the stack - * doesn't add a trailer for those. - */ - if (!skb_is_gso(first->skb)) { - /* The "correct" way to get the auth length would be - * to use - * authlen = crypto_aead_authsize(xs->data); - * but since we know we only have one size to worry - * about * we can let the compiler use the constant - * and save us a few CPU cycles. - */ - const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; - struct sk_buff *skb = first->skb; - u8 padlen; - int ret; - - ret = skb_copy_bits(skb, skb->len - (authlen + 2), - &padlen, 1); - if (unlikely(ret)) - return 0; - itd->trailer_len = authlen + 2 + padlen; - } - } - if (tsa->encrypt) - itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; - - return 1; -} - -/** - * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor - * @rx_ring: receiving ring - * @rx_desc: receive data descriptor - * @skb: current data packet - * - * Determine if there was an ipsec encapsulation noticed, and if so set up - * the resulting status for later in the receive stack. - **/ -void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); - __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | - IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct xfrm_offload *xo = NULL; - struct xfrm_state *xs = NULL; - struct ipv6hdr *ip6 = NULL; - struct iphdr *ip4 = NULL; - struct sec_path *sp; - void *daddr; - __be32 spi; - u8 *c_hdr; - u8 proto; - - /* Find the ip and crypto headers in the data. - * We can assume no vlan header in the way, b/c the - * hw won't recognize the IPsec packet and anyway the - * currently vlan device doesn't support xfrm offload. - */ - if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { - ip4 = (struct iphdr *)(skb->data + ETH_HLEN); - daddr = &ip4->daddr; - c_hdr = (u8 *)ip4 + ip4->ihl * 4; - } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { - ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); - daddr = &ip6->daddr; - c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); - } else { - return; - } - - switch (pkt_info & ipsec_pkt_types) { - case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): - spi = ((struct ip_auth_hdr *)c_hdr)->spi; - proto = IPPROTO_AH; - break; - case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): - spi = ((struct ip_esp_hdr *)c_hdr)->spi; - proto = IPPROTO_ESP; - break; - default: - return; - } - - xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); - if (unlikely(!xs)) - return; - - sp = secpath_set(skb); - if (unlikely(!sp)) - return; - - sp->xvec[sp->len++] = xs; - sp->olen++; - xo = xfrm_offload(skb); - xo->flags = CRYPTO_DONE; - xo->status = CRYPTO_SUCCESS; - - adapter->rx_ipsec++; -} - -/** - * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation - * @adapter: board private structure - **/ -void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ipsec *ipsec; - u32 t_dis, r_dis; - size_t size; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - /* If there is no support for either Tx or Rx offload - * we should not be advertising support for IPsec. - */ - t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & - IXGBE_SECTXSTAT_SECTX_OFF_DIS; - r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & - IXGBE_SECRXSTAT_SECRX_OFF_DIS; - if (t_dis || r_dis) - return; - - ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); - if (!ipsec) - goto err1; - hash_init(ipsec->rx_sa_list); - - size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; - ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); - if (!ipsec->rx_tbl) - goto err2; - - size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; - ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); - if (!ipsec->tx_tbl) - goto err2; - - size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT; - ipsec->ip_tbl = kzalloc(size, GFP_KERNEL); - if (!ipsec->ip_tbl) - goto err2; - - ipsec->num_rx_sa = 0; - ipsec->num_tx_sa = 0; - - adapter->ipsec = ipsec; - ixgbe_ipsec_stop_engine(adapter); - ixgbe_ipsec_clear_hw_tables(adapter); - - adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; - - return; - -err2: - kfree(ipsec->ip_tbl); - kfree(ipsec->rx_tbl); - kfree(ipsec->tx_tbl); - kfree(ipsec); -err1: - netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); -} - -/** - * ixgbe_stop_ipsec_offload - tear down the ipsec offload - * @adapter: board private structure - **/ -void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ipsec *ipsec = adapter->ipsec; - - adapter->ipsec = NULL; - if (ipsec) { - kfree(ipsec->ip_tbl); - kfree(ipsec->rx_tbl); - kfree(ipsec->tx_tbl); - kfree(ipsec); - } -} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h deleted file mode 100644 index d2b64ff8eb4e..000000000000 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ /dev/null @@ -1,83 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ - -#ifndef _IXGBE_IPSEC_H_ -#define _IXGBE_IPSEC_H_ - -#define IXGBE_IPSEC_MAX_SA_COUNT 1024 -#define IXGBE_IPSEC_MAX_RX_IP_COUNT 128 -#define IXGBE_IPSEC_BASE_RX_INDEX 0 -#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT -#define IXGBE_IPSEC_AUTH_BITS 128 - -#define IXGBE_RXTXIDX_IPS_EN 0x00000001 -#define IXGBE_RXIDX_TBL_SHIFT 1 -enum ixgbe_ipsec_tbl_sel { - ips_rx_ip_tbl = 0x01, - ips_rx_spi_tbl = 0x02, - ips_rx_key_tbl = 0x03, -}; - -#define IXGBE_RXTXIDX_IDX_SHIFT 3 -#define IXGBE_RXTXIDX_READ 0x40000000 -#define IXGBE_RXTXIDX_WRITE 0x80000000 - -#define IXGBE_RXMOD_VALID 0x00000001 -#define IXGBE_RXMOD_PROTO_ESP 0x00000004 -#define IXGBE_RXMOD_DECRYPT 0x00000008 -#define IXGBE_RXMOD_IPV6 0x00000010 -#define IXGBE_RXTXMOD_VF 0x00000020 - -struct rx_sa { - struct hlist_node hlist; - struct xfrm_state *xs; - __be32 ipaddr[4]; - u32 key[4]; - u32 salt; - u32 mode; - u8 iptbl_ind; - bool used; - bool decrypt; - u32 vf; -}; - -struct rx_ip_sa { - __be32 ipaddr[4]; - u32 ref_cnt; - bool used; -}; - -struct tx_sa { - struct xfrm_state *xs; - u32 key[4]; - u32 salt; - u32 mode; - bool encrypt; - bool used; - u32 vf; -}; - -struct ixgbe_ipsec_tx_data { - u32 flags; - u16 trailer_len; - u16 sa_idx; -}; - -struct ixgbe_ipsec { - u16 num_rx_sa; - u16 num_tx_sa; - struct rx_ip_sa *ip_tbl; - struct rx_sa *rx_tbl; - struct tx_sa *tx_tbl; - DECLARE_HASHTABLE(rx_sa_list, 10); -}; - -struct sa_mbx_msg { - __be32 spi; - u8 flags; - u8 proto; - u16 family; - __be32 addr[4]; - u32 key[5]; -}; -#endif /* _IXGBE_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index cc3196ae5aea..cb9e3fee75d9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,58 +1,83 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_sriov.h" -#ifdef CONFIG_IXGBE_DCB +#ifdef HAVE_TX_MQ /** - * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV + * ixgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq * @adapter: board private structure to initialize * - * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * Cache the descriptor ring offsets for VMDq to the assigned rings. It * will also try to cache the proper offsets if RSS/FCoE are enabled along * with VMDq. * **/ -static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) +static bool ixgbe_cache_ring_dcb_vmdq(struct ixgbe_adapter *adapter) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; int i; - u16 reg_idx, pool; - u8 tcs = adapter->hw_tcs; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); - /* verify we have DCB queueing enabled before proceeding */ + /* verify we have DCB enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; - /* start at VMDq register offset for SR-IOV enabled setups */ - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { - /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= tcs) { - pool++; - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* + * The bits on the 82598 are reversed compared to the other + * adapters. The DCB bits are the higher order bits and the + * lower bits belong to the VMDq pool. In order to sort + * this out we have to swap the bits to get the correct layout + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + reg_idx = ((i >> 3) | (i << 3)) & 0x3F; + adapter->rx_ring[i]->reg_idx = reg_idx; } - adapter->rx_ring[i]->reg_idx = reg_idx; - adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; + for (i = 0; i < adapter->num_tx_queues; i++) { + reg_idx = ((i >> 4) | (i << 2)) & 0x1F; + adapter->tx_ring[i]->reg_idx = reg_idx; + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + } + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + + break; + default: + break; } - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { - /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= tcs) - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); - adapter->tx_ring[i]->reg_idx = reg_idx; - } - -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* nothing to do if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return true; @@ -70,7 +95,6 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->rx_ring[i]->reg_idx = reg_idx; - adapter->rx_ring[i]->netdev = adapter->netdev; reg_idx++; } @@ -81,8 +105,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) reg_idx++; } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ return true; } @@ -91,7 +115,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) { struct ixgbe_hw *hw = &adapter->hw; - u8 num_tcs = adapter->hw_tcs; + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); *tx = 0; *rx = 0; @@ -106,7 +131,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: +case ixgbe_mac_X550EM_a: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 @@ -146,11 +171,11 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, **/ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { - u8 num_tcs = adapter->hw_tcs; - unsigned int tx_idx, rx_idx; int tc, offset, rss_i, i; + unsigned int tx_idx, rx_idx; + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); - /* verify we have DCB queueing enabled before proceeding */ if (num_tcs <= 1) return false; @@ -161,7 +186,6 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { adapter->tx_ring[offset + i]->reg_idx = tx_idx; adapter->rx_ring[offset + i]->reg_idx = rx_idx; - adapter->rx_ring[offset + i]->netdev = adapter->netdev; adapter->tx_ring[offset + i]->dcb_tc = tc; adapter->rx_ring[offset + i]->dcb_tc = tc; } @@ -170,23 +194,24 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) return true; } -#endif +#endif /* HAVE_TX_MQ */ /** - * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq * @adapter: board private structure to initialize * - * SR-IOV doesn't use any descriptor rings but changes the default if - * no other mapping is used. + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. * - */ -static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) + **/ +static bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; - u16 reg_idx, pool; + u16 reg_idx; int i; /* only proceed if VMDq is enabled */ @@ -194,34 +219,30 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) return false; /* start at VMDq register offset for SR-IOV enabled setups */ - pool = 0; reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; -#endif +#endif /* CONFIG_FCOE */ /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= rss->indices) { - pool++; reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); } adapter->rx_ring[i]->reg_idx = reg_idx; - adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_rx_queues; i++, reg_idx++) { adapter->rx_ring[i]->reg_idx = reg_idx; - adapter->rx_ring[i]->netdev = adapter->netdev; } +#endif /* CONFIG_FCOE */ -#endif reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; @@ -232,12 +253,11 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) adapter->tx_ring[i]->reg_idx = reg_idx; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_tx_queues; i++, reg_idx++) adapter->tx_ring[i]->reg_idx = reg_idx; - -#endif +#endif /* CONFIG_FCOE */ return true; } @@ -246,7 +266,7 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS * @adapter: board private structure to initialize * - * Cache the descriptor ring offsets for RSS to the assigned rings. + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. * **/ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) @@ -255,7 +275,6 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { adapter->rx_ring[i]->reg_idx = i; - adapter->rx_ring[i]->netdev = adapter->netdev; } for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) adapter->tx_ring[i]->reg_idx = reg_idx; @@ -278,19 +297,15 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) **/ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) { - /* start with default case */ - adapter->rx_ring[0]->reg_idx = 0; - adapter->tx_ring[0]->reg_idx = 0; - -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_cache_ring_dcb_sriov(adapter)) +#ifdef HAVE_TX_MQ + if (ixgbe_cache_ring_dcb_vmdq(adapter)) return; if (ixgbe_cache_ring_dcb(adapter)) return; #endif - if (ixgbe_cache_ring_sriov(adapter)) + if (ixgbe_cache_ring_vmdq(adapter)) return; ixgbe_cache_ring_rss(adapter); @@ -298,7 +313,11 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) { +#ifdef HAVE_XDP_SUPPORT return adapter->xdp_prog ? nr_cpu_ids : 0; +#else + return 0; +#endif } #define IXGBE_RSS_64Q_MASK 0x3F @@ -308,58 +327,75 @@ static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) #define IXGBE_RSS_2Q_MASK 0x1 #define IXGBE_RSS_DISABLED_MASK 0x0 -#ifdef CONFIG_IXGBE_DCB +#ifdef HAVE_TX_MQ /** - * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * ixgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * **/ -static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) +static bool ixgbe_set_dcb_vmdq_queues(struct ixgbe_adapter *adapter) { int i; u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) u16 fcoe_i = 0; #endif - u8 tcs = adapter->hw_tcs; + u8 tcs = netdev_get_num_tc(adapter->netdev); - /* verify we have DCB queueing enabled before proceeding */ + /* verify we have DCB enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; - /* limit VMDq instances on the PF by number of Tx queues */ - vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* 4 pools w/ 8TC per pool */ + vmdq_i = min_t(u16, vmdq_i, 4); + vmdq_m = 0x7; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: - /* Add starting offset to total pool count */ - vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; - /* 16 pools w/ 8 TC per pool */ - if (tcs > 4) { - vmdq_i = min_t(u16, vmdq_i, 16); - vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; - /* 32 pools w/ 4 TC per pool */ - } else { - vmdq_i = min_t(u16, vmdq_i, 32); - vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; +#endif /* CONFIG_FCOE */ + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + break; + default: + /* unknown hardware, only support one pool w/ one queue */ + vmdq_i = 1; + tcs = 1; + break; } -#ifdef IXGBE_FCOE - /* queues in the remaining pools are available for FCoE */ - fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; - -#endif - /* remove the starting offset from the pool count */ - vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; - /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; @@ -371,9 +407,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) adapter->ring_feature[RING_F_RSS].indices = 1; adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; - /* disable ATR as it is not supported when VMDq is enabled */ - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = tcs; @@ -381,7 +414,10 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_xdp_queues = 0; adapter->num_rx_queues = vmdq_i * tcs; -#ifdef IXGBE_FCOE + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *fcoe; @@ -409,8 +445,8 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) fcoe->offset = 0; } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ /* configure TC to queue mapping */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(adapter->netdev, i, 1, i); @@ -418,6 +454,17 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) return true; } +/** + * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device + * @adapter: board private structure to initialize + * + * When DCB (Data Center Bridging) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort DCB + * initialization. + * + * This function handles all combinations of DCB, RSS, and FCoE. + * + **/ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; @@ -426,14 +473,17 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) int tcs; /* Map queue offset and counts onto allocated tx queues */ - tcs = adapter->hw_tcs; + tcs = netdev_get_num_tc(dev); - /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* determine the upper limit for our current DCB mode */ +#ifndef HAVE_NETDEV_SELECT_QUEUE + rss_i = adapter->indices; +#else rss_i = dev->num_tx_queues / tcs; +#endif if (adapter->hw.mac.type == ixgbe_mac_82598EB) { /* 8 TC w/ 4 queues per TC */ rss_i = min_t(u16, rss_i, 4); @@ -450,16 +500,17 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) /* set RSS mask and indices */ f = &adapter->ring_feature[RING_F_RSS]; - rss_i = min_t(int, rss_i, f->limit); + rss_i = min_t(u16, rss_i, f->limit); f->indices = rss_i; f->mask = rss_m; - /* disable ATR as it is not supported when multiple TCs are enabled */ + /* disable ATR as it is not supported when DCB is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; -#ifdef IXGBE_FCOE - /* FCoE enabled queues require special configuration indexed - * by feature specific indices and offset. Here we map FCoE +#if IS_ENABLED(CONFIG_FCOE) + /* + * FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE * indices onto the DCB queue pairs allowing FCoE to own * configuration later. */ @@ -470,8 +521,8 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) f->indices = min_t(u16, rss_i, f->limit); f->offset = rss_i * tc; } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(dev, i, rss_i, rss_i * i); @@ -484,57 +535,82 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) #endif /** - * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices + * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues * and VM pools where appropriate. If RSS is available, then also try and * enable RSS and map accordingly. * **/ -static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter) { u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; u16 rss_m = IXGBE_RSS_DISABLED_MASK; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) u16 fcoe_i = 0; #endif - /* only proceed if SR-IOV is enabled */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; - /* limit l2fwd RSS based on total Tx queue limit */ - rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + vmdq_i = min_t(u16, vmdq_i, 16); + /* 16 pool mode with 1 queue per pool */ + if ((vmdq_i > 4) || (rss_i == 1)) { + vmdq_m = 0x0F; + rss_i = 1; + /* 4 pool mode with 8 queue per pool */ + } else { + vmdq_m = 0x18; + rss_m = IXGBE_RSS_8Q_MASK; + rss_i = min_t(u16, rss_i, 8); + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: - /* Add starting offset to total pool count */ - vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; - /* double check we are limited to maximum pools */ - vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); - /* 64 pool mode with 2 queues per pool */ - if (vmdq_i > 32) { - vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; - rss_m = IXGBE_RSS_2Q_MASK; - rss_i = min_t(u16, rss_i, 2); - /* 32 pool mode with up to 4 queues per pool */ - } else { - vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; - rss_m = IXGBE_RSS_4Q_MASK; - /* We can support 4, 2, or 1 queues */ - rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; - } - -#ifdef IXGBE_FCOE - /* queues in the remaining pools are available for FCoE */ - fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); + /* 64 pool mode with 2 queues per pool */ + if (vmdq_i > 32) { + vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; + rss_m = IXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with up to 4 queues per pool */ + } else { + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + rss_m = IXGBE_RSS_4Q_MASK; + /* We can support 4, 2, or 1 queues */ + rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; + } +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); #endif - /* remove the starting offset from the pool count */ - vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + break; + default: + /* unknown hardware, support one pool w/ one queue */ + vmdq_i = 1; + rss_i = 1; + break; + } /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; @@ -548,13 +624,17 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues_per_pool = rss_i; adapter->num_rx_queues = vmdq_i * rss_i; +#ifdef HAVE_TX_MQ adapter->num_tx_queues = vmdq_i * rss_i; +#else + adapter->num_tx_queues = vmdq_i; +#endif /* HAVE_TX_MQ */ adapter->num_xdp_queues = 0; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* * FCoE can use rings from adjacent buffers to allow RSS * like behavior. To account for this we need to add the @@ -583,7 +663,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) /* attempt to reserve some queues for just FCoE */ fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); fcoe->offset = fcoe_i - fcoe->indices; - fcoe_i -= rss_i; } @@ -591,25 +670,12 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_tx_queues += fcoe_i; adapter->num_rx_queues += fcoe_i; } - -#endif - /* To support macvlan offload we have to use num_tc to - * restrict the queues that can be used by the device. - * By doing this we can avoid reporting a false number of - * queues. - */ - if (vmdq_i > 1) - netdev_set_num_tc(adapter->netdev, 1); - - /* populate TC0 for use by pool 0 */ - netdev_set_tc_queue(adapter->netdev, 0, - adapter->num_rx_queues_per_pool, 0); - +#endif /* CONFIG_FCOE */ return true; } /** - * ixgbe_set_rss_queues - Allocate queues for RSS + * ixgbe_set_rss_queues: Allocate queues for RSS * @adapter: board private structure to initialize * * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try @@ -627,7 +693,6 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - if (hw->mac.type < ixgbe_mac_X550) f->mask = IXGBE_RSS_16Q_MASK; else @@ -650,7 +715,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* * FCoE can exist on the same rings as standard network traffic * however it is preferred to avoid that if possible. In order @@ -678,17 +743,19 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) f->offset = fcoe_i - f->indices; rss_i = max_t(u16, fcoe_i, rss_i); } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ adapter->num_tx_queues = rss_i; +#endif adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); return true; } -/** - * ixgbe_set_num_queues - Allocate queues for device, feature dependent +/* + * ixgbe_set_num_queues: Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very @@ -704,18 +771,18 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_xdp_queues = 0; - adapter->num_rx_pools = 1; + adapter->num_rx_pools = adapter->num_rx_queues; adapter->num_rx_queues_per_pool = 1; -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_set_dcb_sriov_queues(adapter)) +#ifdef HAVE_TX_MQ + if (ixgbe_set_dcb_vmdq_queues(adapter)) return; if (ixgbe_set_dcb_queues(adapter)) return; #endif - if (ixgbe_set_sriov_queues(adapter)) + if (ixgbe_set_vmdq_queues(adapter)) return; ixgbe_set_rss_queues(adapter); @@ -734,6 +801,9 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; int i, vectors, vector_threshold; + if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + /* We start by asking for one vector per queue pair with XDP queues * being stacked with TX queues. */ @@ -777,8 +847,7 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) if (vectors < 0) { /* A negative count of allocated vectors indicates an error in - * acquiring within the specified range of MSI-X vectors - */ + * acquiring within the specified range of MSI-X vectors */ e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", vectors); @@ -809,7 +878,6 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, ring->next = head->ring; head->ring = ring; head->count++; - head->next_update = jiffies + 1; } /** @@ -827,22 +895,26 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, - int v_count, int v_idx, - int txr_count, int txr_idx, - int xdp_count, int xdp_idx, - int rxr_count, int rxr_idx) + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int xdp_count, unsigned int xdp_idx, + unsigned int rxr_count, unsigned int rxr_idx) { struct ixgbe_q_vector *q_vector; struct ixgbe_ring *ring; - int node = NUMA_NO_NODE; + int node = -1; +#ifdef HAVE_IRQ_AFFINITY_HINT int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); +#endif int ring_count; - u8 tcs = adapter->hw_tcs; + /* note this will allocate space for the ring structure as well! */ ring_count = txr_count + rxr_count + xdp_count; +#ifdef HAVE_IRQ_AFFINITY_HINT /* customize cpu for Flow Director mapping */ - if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; if (rss_i > 1 && adapter->atr_sample_rate) { if (cpu_online(v_idx)) { @@ -852,9 +924,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, } } +#endif /* allocate q_vector and rings */ q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), GFP_KERNEL, node); + if (!q_vector) q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); @@ -862,19 +936,29 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, return -ENOMEM; /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT if (cpu != -1) cpumask_set_cpu(cpu, &q_vector->affinity_mask); - q_vector->numa_node = node; +#endif + q_vector->node = node; -#ifdef CONFIG_IXGBE_DCA /* initialize CPU for DCA */ q_vector->cpu = -1; -#endif /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif +#endif +#ifdef HAVE_NDO_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); + +#endif /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; @@ -909,7 +993,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, while (txr_count) { /* assign generic ring traits */ - ring->dev = &adapter->pdev->dev; + ring->dev = pci_dev_to_dev(adapter->pdev); ring->netdev = adapter->netdev; /* configure backlink on ring */ @@ -959,10 +1043,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* push pointer to next ring */ ring++; } - while (rxr_count) { /* assign generic ring traits */ - ring->dev = &adapter->pdev->dev; + ring->dev = pci_dev_to_dev(adapter->pdev); ring->netdev = adapter->netdev; /* configure backlink on ring */ @@ -978,16 +1061,18 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, if (adapter->hw.mac.type == ixgbe_mac_82599EB) set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); -#ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) { +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; - if ((rxr_idx >= f->offset) && - (rxr_idx < f->offset + f->indices)) - set_bit(__IXGBE_RX_FCOE, &ring->state); - } -#endif /* IXGBE_FCOE */ + if ((rxr_idx >= f->offset) && + (rxr_idx < f->offset + f->indices)) { + set_bit(__IXGBE_RX_FCOE, &ring->state); + } + } +#endif /* CONFIG_FCOE */ + /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; @@ -1031,13 +1116,10 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) adapter->rx_ring[ring->queue_index] = NULL; adapter->q_vector[v_idx] = NULL; +#ifdef HAVE_NDO_BUSY_POLL napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); - - /* - * ixgbe_get_stats64() might access the rings on this vector, - * we must wait a grace period before freeing it. - */ kfree_rcu(q_vector, rcu); } @@ -1050,22 +1132,20 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) **/ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) { - int q_vectors = adapter->num_q_vectors; - int rxr_remaining = adapter->num_rx_queues; - int txr_remaining = adapter->num_tx_queues; - int xdp_remaining = adapter->num_xdp_queues; - int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; - int err, i; - - /* only one q_vector if MSI-X is disabled. */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int xdp_remaining = adapter->num_xdp_queues; + unsigned int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; + int err; +#ifdef HAVE_AF_XDP_ZC_SUPPORT + int i; +#endif if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { for (; rxr_remaining; v_idx++) { err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 0, 0, 0, 0, 1, rxr_idx); - if (err) goto err_out; @@ -1097,6 +1177,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) xdp_idx += xqpv; } +#ifdef HAVE_AF_XDP_ZC_SUPPORT for (i = 0; i < adapter->num_rx_queues; i++) { if (adapter->rx_ring[i]) adapter->rx_ring[i]->ring_idx = i; @@ -1112,7 +1193,8 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) adapter->xdp_ring[i]->ring_idx = i; } - return 0; +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ + return IXGBE_SUCCESS; err_out: adapter->num_tx_queues = 0; @@ -1147,7 +1229,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) ixgbe_free_q_vector(adapter, v_idx); } -static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; @@ -1167,7 +1249,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ -static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { int err; @@ -1181,7 +1263,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) */ /* Disable DCB unless we only have a single traffic class */ - if (adapter->hw_tcs > 1) { + if (netdev_get_num_tc(adapter->netdev) > 1) { e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); netdev_reset_tc(adapter->netdev); @@ -1193,13 +1275,18 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->dcb_cfg.pfc_mode_enable = false; } - adapter->hw_tcs = 0; adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV /* Disable SR-IOV support */ e_dev_warn("Disabling SR-IOV support\n"); ixgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ /* Disable RSS */ e_dev_warn("Disabling RSS support\n"); @@ -1211,6 +1298,9 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; + if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) + return; + err = pci_enable_msi(adapter->pdev); if (err) e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", @@ -1239,26 +1329,29 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) /* Set interrupt mode */ ixgbe_set_interrupt_capability(adapter); + /* Allocate memory for queues */ err = ixgbe_alloc_q_vectors(adapter); if (err) { - e_dev_err("Unable to allocate memory for queue vectors\n"); - goto err_alloc_q_vectors; + e_err(probe, "Unable to allocate memory for queue vectors\n"); + ixgbe_reset_interrupt_capability(adapter); + return err; } ixgbe_cache_ring_register(adapter); +#ifdef HAVE_XDP_SUPPORT e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues, adapter->num_xdp_queues); - +#else + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); +#endif set_bit(__IXGBE_DOWN, &adapter->state); - return 0; - -err_alloc_q_vectors: - ixgbe_reset_interrupt_capability(adapter); - return err; + return IXGBE_SUCCESS; } /** @@ -1279,7 +1372,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) } void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, - u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx) + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -1293,7 +1386,7 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 91b3780ddb04..1123fe5b12b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,78 +1,88 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ +/****************************************************************************** + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code +******************************************************************************/ #include #include #include #include #include +#include #include #include -#include #include #include -#include #include #include -#include +#ifdef NETIF_F_TSO #include +#ifdef NETIF_F_TSO6 +#include #include -#include +#endif /* NETIF_F_TSO6 */ +#endif /* NETIF_F_TSO */ +#ifdef SIOCETHTOOL #include -#include -#include -#include +#endif + #include -#include +#include "ixgbe.h" +#ifdef HAVE_XDP_SUPPORT #include #include #include -#include -#include +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#include +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#include #include +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD +#include +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#ifdef NETIF_F_HW_TC #include #include #include -#include -#include -#include -#include +#endif /* NETIF_F_HW_TC */ -#include "ixgbe.h" -#include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" -#include "ixgbe_phy.h" #include "ixgbe_sriov.h" -#include "ixgbe_model.h" #include "ixgbe_txrx_common.h" +#ifdef HAVE_TC_SETUP_CLSU32 +#include "ixgbe_model.h" +#endif /* HAVE_TC_SETUP_CLSU32 */ -char ixgbe_driver_name[] = "ixgbe"; -static const char ixgbe_driver_string[] = - "Intel(R) 10 Gigabit PCI Express Network Driver"; -#ifdef IXGBE_FCOE -char ixgbe_default_device_descr[] = - "Intel(R) 10 Gigabit Network Connection"; -#else -static char ixgbe_default_device_descr[] = - "Intel(R) 10 Gigabit Network Connection"; -#endif -#define DRV_VERSION "5.1.0-k" +#define DRV_HW_PERF + +#define FPGA + +#define DRIVERIOV + +#define BYPASS_TAG + +#define RELEASE_TAG + +#define DRV_VERSION "5.9.4" \ + DRIVERIOV DRV_HW_PERF FPGA \ + BYPASS_TAG RELEASE_TAG +#define DRV_SUMMARY "Intel(R) 10GbE PCI Express Linux Network Driver" const char ixgbe_driver_version[] = DRV_VERSION; -static const char ixgbe_copyright[] = - "Copyright (c) 1999-2016 Intel Corporation."; - -static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; - -static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, - [board_82599] = &ixgbe_82599_info, - [board_X540] = &ixgbe_X540_info, - [board_X550] = &ixgbe_X550_info, - [board_X550EM_x] = &ixgbe_X550EM_x_info, - [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, - [board_x550em_a] = &ixgbe_x550em_a_info, - [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, -}; +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +char ixgbe_driver_name[] = "ixgbe"; +#else +const char ixgbe_driver_name[] = "ixgbe"; +#endif +static const char ixgbe_driver_string[] = DRV_SUMMARY; +static const char ixgbe_copyright[] = "Copyright(c) 1999 - 2020 Intel Corporation."; +static const char ixgbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated. " + "Restart the computer. If the problem persists, " + "power off the system and replace the adapter"; /* ixgbe_pci_tbl - PCI Device ID Table * @@ -83,129 +93,118 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { * Class, Class Mask, private data (not used) } */ static const struct pci_device_id ixgbe_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP_N), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), 0}, /* required last entry */ - {0, } + { .device = 0 } }; MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, void *p); static struct notifier_block dca_notifier = { - .notifier_call = ixgbe_notify_dca, - .next = NULL, - .priority = 0 + .notifier_call = ixgbe_notify_dca, + .next = NULL, + .priority = 0 }; -#endif - -#ifdef CONFIG_PCI_IOV -static unsigned int max_vfs; -module_param(max_vfs, uint, 0); -MODULE_PARM_DESC(max_vfs, - "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); -#endif /* CONFIG_PCI_IOV */ - -static unsigned int allow_unsupported_sfp; -module_param(allow_unsupported_sfp, uint, 0); -MODULE_PARM_DESC(allow_unsupported_sfp, - "Allow unsupported and untested SFP+ modules on 82599-based adapters"); - -#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +#endif /* CONFIG_DCA */ +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + static struct workqueue_struct *ixgbe_wq; +static bool ixgbe_is_sfp(struct ixgbe_hw *hw); static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); -static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); -static const struct net_device_ops ixgbe_netdev_ops; - -static bool netif_is_ixgbe(struct net_device *dev) -{ - return dev && (dev->netdev_ops == &ixgbe_netdev_ops); -} - -static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, +static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_hw *hw, u32 reg, u16 *value) { + struct ixgbe_adapter *adapter = hw->back; struct pci_dev *parent_dev; struct pci_bus *parent_bus; + int pos; parent_bus = adapter->pdev->bus->parent; if (!parent_bus) - return -1; + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; parent_dev = parent_bus->self; if (!parent_dev) - return -1; + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - if (!pci_is_pcie(parent_dev)) - return -1; + pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP); + if (!pos) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - pcie_capability_read_word(parent_dev, reg, value); + pci_read_config_word(parent_dev, pos + reg, value); if (*value == IXGBE_FAILED_READ_CFG_WORD && - ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) - return -1; - return 0; + ixgbe_check_cfg_remove(hw, parent_dev)) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + return IXGBE_SUCCESS; } -static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) +/** + * ixgbe_get_parent_bus_info - Set PCI bus info beyond switch + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + * when the device is behind a switch. + **/ +static s32 ixgbe_get_parent_bus_info(struct ixgbe_hw *hw) { - struct ixgbe_hw *hw = &adapter->hw; u16 link_status = 0; int err; @@ -214,40 +213,37 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) /* Get the negotiated link width and speed from PCI config space of the * parent, as this device is behind a switch */ - err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); + err = ixgbe_read_pci_cfg_word_parent(hw, 18, &link_status); - /* assume caller will handle error case */ + /* If the read fails, fallback to default */ if (err) - return err; + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); - hw->bus.width = ixgbe_convert_bus_width(link_status); - hw->bus.speed = ixgbe_convert_bus_speed(link_status); + ixgbe_set_pci_config_data_generic(hw, link_status); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * ixgbe_check_from_parent - determine whether to use parent for PCIe info * @hw: hw specific details * - * This function is used by probe to determine whether a device's PCI-Express - * bandwidth details should be gathered from the parent bus instead of from the - * device. Used to ensure that various locations all have the correct device ID - * checks. + * This function is used by probe to determine whether a device's PCIe info + * (speed, width, etc) should be obtained from the parent bus or directly. This + * is useful for specialized device configurations containing PCIe bridges. */ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) { switch (hw->device_id) { - case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599_QSFP_SF_QP: + case IXGBE_DEV_ID_82599_SFP_SF_QP: return true; default: return false; } } -static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, - int expected_gts) +static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev; @@ -268,6 +264,51 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, pcie_print_link_status(pdev); } +/** + * ixgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + /* Some cards can not use the generic count PCIe functions method, + * because they are behind a parent switch, so we hardcode these to + * correct number of ports. + */ + if (ixgbe_pcie_from_parent(&adapter->hw)) { + physfns = 4; + } else { + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif + + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; + } + } + + return physfns; +} + static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) { if (!test_bit(__IXGBE_DOWN, &adapter->state) && @@ -276,6 +317,15 @@ static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) queue_work(ixgbe_wq, &adapter->service_task); } +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +} + static void ixgbe_remove_adapter(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; @@ -288,21 +338,22 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw) ixgbe_service_event_schedule(adapter); } -static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) +static u32 +ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) { u8 __iomem *reg_addr; u32 value; int i; reg_addr = READ_ONCE(hw->hw_addr); - if (ixgbe_removed(reg_addr)) + if (IXGBE_REMOVED(reg_addr)) return IXGBE_FAILED_READ_REG; - /* Register read of 0xFFFFFFF can indicate the adapter has been removed, - * so perform several status register reads to determine if the adapter - * has been removed. + /* Register read of 0xFFFFFFFF can indicate the adapter has been + * removed, so perform several status register reads to determine if + * the adapter has been removed. */ - for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) { + for (i = 0; i < IXGBE_FAILED_READ_RETRIES; ++i) { value = readl(reg_addr + IXGBE_STATUS); if (value != IXGBE_FAILED_READ_REG) break; @@ -316,25 +367,39 @@ static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) return value; } -/** - * ixgbe_read_reg - Read from device register - * @hw: hw specific details - * @reg: offset of register to read - * - * Returns : value read or IXGBE_FAILED_READ_REG if removed - * - * This function is used to read device registers. It checks for device - * removal by confirming any read that returns all ones by checking the - * status register value for all ones. This function avoids reading from - * the hardware if a removal was previously detected in which case it - * returns IXGBE_FAILED_READ_REG (all ones). - */ -u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) +static u32 +ixgbe_validate_register_read(struct ixgbe_hw *_hw, u32 reg, bool quiet) { - u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); + int i; u32 value; + u8 __iomem *reg_addr; + struct ixgbe_adapter *adapter = _hw->back; - if (ixgbe_removed(reg_addr)) + reg_addr = READ_ONCE(_hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return IXGBE_FAILED_READ_REG; + for (i = 0; i < IXGBE_DEAD_READ_RETRIES; ++i) { + value = readl(reg_addr + reg); + if (value != IXGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == IXGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg, bool quiet) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) return IXGBE_FAILED_READ_REG; if (unlikely(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { @@ -360,471 +425,11 @@ writes_completed: value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) value = ixgbe_check_remove(hw, reg); + if (unlikely(value == IXGBE_DEAD_READ_REG)) + value = ixgbe_validate_register_read(hw, reg, quiet); return value; } -static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) -{ - u16 value; - - pci_read_config_word(pdev, PCI_VENDOR_ID, &value); - if (value == IXGBE_FAILED_READ_CFG_WORD) { - ixgbe_remove_adapter(hw); - return true; - } - return false; -} - -u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) -{ - struct ixgbe_adapter *adapter = hw->back; - u16 value; - - if (ixgbe_removed(hw->hw_addr)) - return IXGBE_FAILED_READ_CFG_WORD; - pci_read_config_word(adapter->pdev, reg, &value); - if (value == IXGBE_FAILED_READ_CFG_WORD && - ixgbe_check_cfg_remove(hw, adapter->pdev)) - return IXGBE_FAILED_READ_CFG_WORD; - return value; -} - -#ifdef CONFIG_PCI_IOV -static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) -{ - struct ixgbe_adapter *adapter = hw->back; - u32 value; - - if (ixgbe_removed(hw->hw_addr)) - return IXGBE_FAILED_READ_CFG_DWORD; - pci_read_config_dword(adapter->pdev, reg, &value); - if (value == IXGBE_FAILED_READ_CFG_DWORD && - ixgbe_check_cfg_remove(hw, adapter->pdev)) - return IXGBE_FAILED_READ_CFG_DWORD; - return value; -} -#endif /* CONFIG_PCI_IOV */ - -void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) -{ - struct ixgbe_adapter *adapter = hw->back; - - if (ixgbe_removed(hw->hw_addr)) - return; - pci_write_config_word(adapter->pdev, reg, value); -} - -static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) -{ - BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); - - /* flush memory to make sure state is correct before next watchdog */ - smp_mb__before_atomic(); - clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); -} - -struct ixgbe_reg_info { - u32 ofs; - char *name; -}; - -static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { - - /* General Registers */ - {IXGBE_CTRL, "CTRL"}, - {IXGBE_STATUS, "STATUS"}, - {IXGBE_CTRL_EXT, "CTRL_EXT"}, - - /* Interrupt Registers */ - {IXGBE_EICR, "EICR"}, - - /* RX Registers */ - {IXGBE_SRRCTL(0), "SRRCTL"}, - {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, - {IXGBE_RDLEN(0), "RDLEN"}, - {IXGBE_RDH(0), "RDH"}, - {IXGBE_RDT(0), "RDT"}, - {IXGBE_RXDCTL(0), "RXDCTL"}, - {IXGBE_RDBAL(0), "RDBAL"}, - {IXGBE_RDBAH(0), "RDBAH"}, - - /* TX Registers */ - {IXGBE_TDBAL(0), "TDBAL"}, - {IXGBE_TDBAH(0), "TDBAH"}, - {IXGBE_TDLEN(0), "TDLEN"}, - {IXGBE_TDH(0), "TDH"}, - {IXGBE_TDT(0), "TDT"}, - {IXGBE_TXDCTL(0), "TXDCTL"}, - - /* List Terminator */ - { .name = NULL } -}; - - -/* - * ixgbe_regdump - register printout routine - */ -static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) -{ - int i; - char rname[16]; - u32 regs[64]; - - switch (reginfo->ofs) { - case IXGBE_SRRCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); - break; - case IXGBE_DCA_RXCTRL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - break; - case IXGBE_RDLEN(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); - break; - case IXGBE_RDH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); - break; - case IXGBE_RDT(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); - break; - case IXGBE_RXDCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - break; - case IXGBE_RDBAL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); - break; - case IXGBE_RDBAH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); - break; - case IXGBE_TDBAL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); - break; - case IXGBE_TDBAH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); - break; - case IXGBE_TDLEN(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); - break; - case IXGBE_TDH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); - break; - case IXGBE_TDT(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); - break; - case IXGBE_TXDCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); - break; - default: - pr_info("%-15s %08x\n", - reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); - return; - } - - i = 0; - while (i < 64) { - int j; - char buf[9 * 8 + 1]; - char *p = buf; - - snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7); - for (j = 0; j < 8; j++) - p += sprintf(p, " %08x", regs[i++]); - pr_err("%-15s%s\n", rname, buf); - } - -} - -static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n) -{ - struct ixgbe_tx_buffer *tx_buffer; - - tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", - n, ring->next_to_use, ring->next_to_clean, - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp); -} - -/* - * ixgbe_dump - Print registers, tx-rings and rx-rings - */ -static void ixgbe_dump(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_reg_info *reginfo; - int n = 0; - struct ixgbe_ring *ring; - struct ixgbe_tx_buffer *tx_buffer; - union ixgbe_adv_tx_desc *tx_desc; - struct my_u0 { u64 a; u64 b; } *u0; - struct ixgbe_ring *rx_ring; - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer_info; - int i = 0; - - if (!netif_msg_hw(adapter)) - return; - - /* Print netdevice Info */ - if (netdev) { - dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state " - "trans_start\n"); - pr_info("%-15s %016lX %016lX\n", - netdev->name, - netdev->state, - dev_trans_start(netdev)); - } - - /* Print Registers */ - dev_info(&adapter->pdev->dev, "Register Dump\n"); - pr_info(" Register Name Value\n"); - for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; - reginfo->name; reginfo++) { - ixgbe_regdump(hw, reginfo); - } - - /* Print TX Ring Summary */ - if (!netdev || !netif_running(netdev)) - return; - - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info(" %s %s %s %s\n", - "Queue [NTU] [NTC] [bi(ntc)->dma ]", - "leng", "ntw", "timestamp"); - for (n = 0; n < adapter->num_tx_queues; n++) { - ring = adapter->tx_ring[n]; - ixgbe_print_buffer(ring, n); - } - - for (n = 0; n < adapter->num_xdp_queues; n++) { - ring = adapter->xdp_ring[n]; - ixgbe_print_buffer(ring, n); - } - - /* Print TX Rings */ - if (!netif_msg_tx_done(adapter)) - goto rx_ring_summary; - - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); - - /* Transmit Descriptor Formats - * - * 82598 Advanced Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +--------------------------------------------------------------+ - * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | - * +--------------------------------------------------------------+ - * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 - * - * 82598 Advanced Transmit Descriptor (Write-Back Format) - * +--------------------------------------------------------------+ - * 0 | RSV [63:0] | - * +--------------------------------------------------------------+ - * 8 | RSV | STA | NXTSEQ | - * +--------------------------------------------------------------+ - * 63 36 35 32 31 0 - * - * 82599+ Advanced Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +--------------------------------------------------------------+ - * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | - * +--------------------------------------------------------------+ - * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 - * - * 82599+ Advanced Transmit Descriptor (Write-Back Format) - * +--------------------------------------------------------------+ - * 0 | RSV [63:0] | - * +--------------------------------------------------------------+ - * 8 | RSV | STA | RSV | - * +--------------------------------------------------------------+ - * 63 36 35 32 31 0 - */ - - for (n = 0; n < adapter->num_tx_queues; n++) { - ring = adapter->tx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("%s%s %s %s %s %s\n", - "T [desc] [address 63:0 ] ", - "[PlPOIdStDDt Ln] [bi->dma ] ", - "leng", "ntw", "timestamp", "bi->skb"); - - for (i = 0; ring->desc && (i < ring->count); i++) { - tx_desc = IXGBE_TX_DESC(ring, i); - tx_buffer = &ring->tx_buffer_info[i]; - u0 = (struct my_u0 *)tx_desc; - if (dma_unmap_len(tx_buffer, len) > 0) { - const char *ring_desc; - - if (i == ring->next_to_use && - i == ring->next_to_clean) - ring_desc = " NTC/U"; - else if (i == ring->next_to_use) - ring_desc = " NTU"; - else if (i == ring->next_to_clean) - ring_desc = " NTC"; - else - ring_desc = ""; - pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s", - i, - le64_to_cpu((__force __le64)u0->a), - le64_to_cpu((__force __le64)u0->b), - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp, - tx_buffer->skb, - ring_desc); - - if (netif_msg_pktdata(adapter) && - tx_buffer->skb) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - tx_buffer->skb->data, - dma_unmap_len(tx_buffer, len), - true); - } - } - } - - /* Print RX Rings Summary */ -rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - pr_info("Queue [NTU] [NTC]\n"); - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info("%5d %5X %5X\n", - n, rx_ring->next_to_use, rx_ring->next_to_clean); - } - - /* Print RX Rings */ - if (!netif_msg_rx_status(adapter)) - return; - - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); - - /* Receive Descriptor Formats - * - * 82598 Advanced Receive Descriptor (Read) Format - * 63 1 0 - * +-----------------------------------------------------+ - * 0 | Packet Buffer Address [63:1] |A0/NSE| - * +----------------------------------------------+------+ - * 8 | Header Buffer Address [63:1] | DD | - * +-----------------------------------------------------+ - * - * - * 82598 Advanced Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 30 21 20 16 15 4 3 0 - * +------------------------------------------------------+ - * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | - * | Packet | IP | | | | Type | Type | - * | Checksum | Ident | | | | | | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length | Extended Error | Extended Status | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 - * - * 82599+ Advanced Receive Descriptor (Read) Format - * 63 1 0 - * +-----------------------------------------------------+ - * 0 | Packet Buffer Address [63:1] |A0/NSE| - * +----------------------------------------------+------+ - * 8 | Header Buffer Address [63:1] | DD | - * +-----------------------------------------------------+ - * - * - * 82599+ Advanced Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 30 21 20 17 16 4 3 0 - * +------------------------------------------------------+ - * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | - * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | - * |/ Flow Dir Flt ID | | | | | | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 - */ - - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("%s%s%s\n", - "R [desc] [ PktBuf A0] ", - "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", - "<-- Adv Rx Read format"); - pr_info("%s%s%s\n", - "RWB[desc] [PcsmIpSHl PtRs] ", - "[vl er S cks ln] ---------------- [bi->skb ] ", - "<-- Adv Rx Write-Back format"); - - for (i = 0; i < rx_ring->count; i++) { - const char *ring_desc; - - if (i == rx_ring->next_to_use) - ring_desc = " NTU"; - else if (i == rx_ring->next_to_clean) - ring_desc = " NTC"; - else - ring_desc = ""; - - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - rx_desc = IXGBE_RX_DESC(rx_ring, i); - u0 = (struct my_u0 *)rx_desc; - if (rx_desc->wb.upper.length) { - /* Descriptor Done */ - pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", - i, - le64_to_cpu((__force __le64)u0->a), - le64_to_cpu((__force __le64)u0->b), - rx_buffer_info->skb, - ring_desc); - } else { - pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n", - i, - le64_to_cpu((__force __le64)u0->a), - le64_to_cpu((__force __le64)u0->b), - (u64)rx_buffer_info->dma, - rx_buffer_info->skb, - ring_desc); - - if (netif_msg_pktdata(adapter) && - rx_buffer_info->dma) { - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - page_address(rx_buffer_info->page) + - rx_buffer_info->page_offset, - ixgbe_rx_bufsz(rx_ring), true); - } - } - } - } -} - static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) { u32 ctrl_ext; @@ -845,7 +450,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } -/** +/* * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes @@ -873,7 +478,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -912,7 +517,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -950,7 +555,6 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->tx_ring[i]->state); - for (i = 0; i < adapter->num_xdp_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->xdp_ring[i]->state); @@ -965,9 +569,11 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; +#ifdef HAVE_DCBNL_IEEE if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); +#endif if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { ixgbe_update_xoff_rx_lfc(adapter); return; @@ -995,17 +601,16 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; tc = tx_ring->dcb_tc; - if (xoff[tc]) + if ((tc <= 7) && (xoff[tc])) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } - for (i = 0; i < adapter->num_xdp_queues; i++) { struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; tc = xdp_ring->dcb_tc; - if (xoff[tc]) + if (tc <= 7 && xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); - } + } } static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) @@ -1023,11 +628,12 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) return ((head <= tail) ? tail : tail + ring->count) - head; } -static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) { u32 tx_done = ixgbe_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = ixgbe_get_tx_pending(tx_ring); + bool ret = false; clear_check_for_tx_hang(tx_ring); @@ -1039,20 +645,22 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) * bit is cleared if a pause frame is received to remove * false hang detection due to PFC or 802.3x frames. By * requiring this to fail twice we avoid races with - * pfc clearing the ARMED bit and conditions where we + * PFC clearing the ARMED bit and conditions where we * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if (tx_done_old == tx_done && tx_pending) + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ - return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, - &tx_ring->state); - /* update completed stats and continue */ - tx_ring->tx_stats.tx_done_old = tx_done; - /* reset the countdown */ - clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } - return false; + return ret; } /** @@ -1065,42 +673,42 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - e_warn(drv, "initiating reset due to tx timeout\n"); ixgbe_service_event_schedule(adapter); } } /** - * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate + * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure - * @queue_index: Tx queue to set - * @maxrate: desired maximum transmit bitrate + * @txqueue: specific tx queue **/ -static int ixgbe_tx_maxrate(struct net_device *netdev, - int queue_index, u32 maxrate) +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else +static void ixgbe_tx_timeout(struct net_device *netdev) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 bcnrc_val = ixgbe_link_mbps(adapter); + bool real_tx_hang = false; + int i; - if (!maxrate) - return 0; +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) + real_tx_hang = true; + } - /* Calculate the rate factor values to set */ - bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; - bcnrc_val /= maxrate; + if (real_tx_hang) { + ixgbe_tx_timeout_reset(adapter); + } else { + e_info(drv, "Fake Tx hang detected with timeout of %d " + "seconds\n", netdev->watchdog_timeo/HZ); - /* clear everything but the rate factor */ - bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | - IXGBE_RTTBCNRC_RF_DEC_MASK; - - /* enable the rate scheduler */ - bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; - - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); - - return 0; + /* fake Tx hang - increase the kernel timeout */ + if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) + netdev->watchdog_timeo *= 2; + } } /** @@ -1115,7 +723,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; + unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; @@ -1146,14 +754,20 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; - if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) - total_ipsec++; /* free the skb */ +#ifdef HAVE_XDP_SUPPORT if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT xdp_return_frame(tx_buffer->xdpf); +#else + page_frag_free(tx_buffer->data); +#endif else napi_consume_skb(tx_buffer->skb, napi_budget); +#else + napi_consume_skb(tx_buffer->skb, napi_budget); +#endif /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1210,12 +824,12 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; - adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct ixgbe_hw *hw = &adapter->hw; - e_err(drv, "Detected Tx Unit Hang %s\n" + + e_err(drv, "Detected Tx Unit Hang%s\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" @@ -1223,7 +837,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", - ring_is_xdp(tx_ring) ? "(XDP)" : "", + ring_is_xdp(tx_ring) ? " (XDP)" : "", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), @@ -1236,9 +850,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, e_info(probe, "tx hang %d detected on queue %d, resetting adapter\n", - adapter->tx_timeout_count + 1, tx_ring->queue_index); + adapter->tx_timeout_count + 1, tx_ring->queue_index); - /* schedule immediate reset if we believe we hung */ ixgbe_tx_timeout_reset(adapter); /* the adapter is about to reset, no point in enabling stuff */ @@ -1247,30 +860,37 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (ring_is_xdp(tx_ring)) return !!budget; - netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + if (unlikely(total_packets && netif_carrier_ok(netdev_ring(tx_ring)) && (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->queue_index) - && !test_bit(__IXGBE_DOWN, &adapter->state)) { - netif_wake_subqueue(tx_ring->netdev, - tx_ring->queue_index); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(netdev_ring(tx_ring), + ring_queue_index(tx_ring)) + && !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { + netif_wake_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); ++tx_ring->tx_stats.restart_queue; } +#else + if (netif_queue_stopped(netdev_ring(tx_ring)) && + !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { + netif_wake_queue(netdev_ring(tx_ring)); + ++tx_ring->tx_stats.restart_queue; + } +#endif } return !!budget; } -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, int cpu) @@ -1362,7 +982,7 @@ out_no_update: static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { - int i; + int v_idx; /* always use CB2 mode, difference is masked in the CB driver */ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) @@ -1372,9 +992,9 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_DISABLE); - for (i = 0; i < adapter->num_q_vectors; i++) { - adapter->q_vector[i]->cpu = -1; - ixgbe_update_dca(adapter->q_vector[i]); + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + adapter->q_vector[v_idx]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[v_idx]); } } @@ -1391,13 +1011,13 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) /* if we're already enabled, don't do it again */ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) break; - if (dca_add_requester(dev) == 0) { + if (dca_add_requester(dev) == IXGBE_SUCCESS) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_MODE_CB2); break; } - /* fall through - DCA is disabled. */ + /* fall through - DCA is disabled */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); @@ -1408,24 +1028,26 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) break; } - return 0; + return IXGBE_SUCCESS; } +#endif /* CONFIG_DCA */ -#endif /* CONFIG_IXGBE_DCA */ - +#ifdef NETIF_F_RXHASH #define IXGBE_RSS_L4_TYPES_MASK \ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ - (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX)) static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - u16 rss_type; + u16 rss_type; - if (!(ring->netdev->features & NETIF_F_RXHASH)) + if (!(netdev_ring(ring)->features & NETIF_F_RXHASH)) return; rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & @@ -1438,8 +1060,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } +#endif /* NETIF_F_RXHASH */ -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /** * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type * @ring: structure containing ring specific data @@ -1457,8 +1080,8 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ /** * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containing ring specific data @@ -1475,13 +1098,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, skb_checksum_none_assert(skb); /* Rx csum disabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) + if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) return; - /* check for VXLAN and Geneve packets */ + /* check for VXLAN or Geneve packet type */ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { encap_pkt = true; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) skb->encapsulation = 1; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + skb->ip_summed = CHECKSUM_NONE; } /* if IP and error */ @@ -1495,6 +1121,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + /* * 82599 errata, UDP frames with a 0 checksum can be marked as * checksum errors. @@ -1512,19 +1139,90 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, if (encap_pkt) { if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) return; - + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { skb->ip_summed = CHECKSUM_NONE; return; } +#ifdef HAVE_SKBUFF_CSUM_LEVEL /* If we checked the outer header let the stack know */ skb->csum_level = 1; +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ } } +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; +#endif + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} + +#else /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) { - return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; + unsigned int res; + + res = ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; + +#ifdef HAVE_XDP_SUPPORT + if (rx_ring->xdp_prog && res < XDP_PACKET_HEADROOM) + res = XDP_PACKET_HEADROOM; +#endif + + return res; } static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, @@ -1532,6 +1230,12 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, { struct page *page = bi->page; dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) @@ -1548,7 +1252,11 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, dma = dma_map_page_attrs(rx_ring->dev, page, 0, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else IXGBE_RX_DMA_ATTR); +#endif /* * if mapping failed free memory back to system since @@ -1564,13 +1272,18 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = ixgbe_rx_offset(rx_ring); +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE page_ref_add(page, USHRT_MAX - 1); bi->pagecnt_bias = USHRT_MAX; +#else + bi->pagecnt_bias = 1; +#endif rx_ring->rx_stats.alloc_rx_page++; return true; } +#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ /** * ixgbe_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on @@ -1581,7 +1294,9 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT u16 bufsz; +#endif /* nothing to do */ if (!cleaned_count) @@ -1590,10 +1305,16 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) rx_desc = IXGBE_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT bufsz = ixgbe_rx_bufsz(rx_ring); +#endif do { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + if (!ixgbe_alloc_mapped_skb(rx_ring, bi)) + break; +#else if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; @@ -1601,12 +1322,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, bufsz, DMA_FROM_DEVICE); +#endif /* * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#else rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); +#endif rx_desc++; bi++; @@ -1625,26 +1351,121 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i += rx_ring->count; - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = i; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(i, rx_ring->tail); - } + if (rx_ring->next_to_use != i) + ixgbe_release_rx_desc(rx_ring, i); } -static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +/** + * ixgbe_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = IXGBE_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + IXGBE_CB(tail)->head = NULL; + + return head; +} + +/** + * ixgbe_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via ixgbe_merge_active_tail. + **/ +static inline void ixgbe_add_active_tail(struct sk_buff *head, + struct sk_buff *tail) +{ + struct sk_buff *old_tail = IXGBE_CB(head)->tail; + + if (old_tail) { + ixgbe_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + IXGBE_CB(tail)->head = head; + IXGBE_CB(head)->tail = tail; +} + +/** + * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool ixgbe_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = IXGBE_CB(head)->tail; + + if (!tail) + return false; + + ixgbe_merge_active_tail(tail); + + IXGBE_CB(head)->tail = NULL; + + return true; +} + +#endif +#ifdef HAVE_VLAN_RX_REGISTER +/** + * ixgbe_receive_skb - Send a completed packet up the stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + u16 vlan_tag = IXGBE_CB(skb)->vid; + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + if (vlan_tag & VLAN_VID_MASK) { + /* by placing vlgrp at start of structure we can alias it */ + struct vlan_group **vlgrp = netdev_priv(skb->dev); + if (!*vlgrp) + dev_kfree_skb_any(skb); + else if (q_vector->netpoll_rx) + vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); + else + vlan_gro_receive(&q_vector->napi, + *vlgrp, vlan_tag, skb); + } else { +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ + if (q_vector->netpoll_rx) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + } +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ +} + +#endif /* HAVE_VLAN_RX_REGISTER */ +#ifdef NETIF_F_GSO +static void ixgbe_set_rsc_gso_size(struct ixgbe_ring __maybe_unused *ring, struct sk_buff *skb) { - u16 hdr_len = skb_headlen(skb); + u16 hdr_len = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); /* set gso_size to avoid messing up TCP MSS */ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), @@ -1652,6 +1473,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } +#endif /* NETIF_F_GSO */ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { @@ -1662,12 +1484,35 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; rx_ring->rx_stats.rsc_flush++; +#ifdef NETIF_F_GSO ixgbe_set_rsc_gso_size(rx_ring, skb); +#endif /* gso_size is computed using append_cnt so always clear it last */ IXGBE_CB(skb)->append_cnt = 0; } +static void ixgbe_rx_vlan(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_CTAG_RX) && +#else + if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_RX) && +#endif + ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) +#ifndef HAVE_VLAN_RX_REGISTER + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + le16_to_cpu(rx_desc->wb.upper.vlan)); +#else + IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); + else + IXGBE_CB(skb)->vid = 0; +#endif +} + /** * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -1682,41 +1527,52 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct net_device *dev = rx_ring->netdev; +#ifdef HAVE_PTP_1588_CLOCK u32 flags = rx_ring->q_vector->adapter->flags; +#endif ixgbe_update_rsc_stats(rx_ring, skb); +#ifdef NETIF_F_RXHASH ixgbe_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ ixgbe_rx_checksum(rx_ring, rx_desc, skb); - +#ifdef HAVE_PTP_1588_CLOCK if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); - if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && - ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { - u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); - } +#endif + ixgbe_rx_vlan(rx_ring, rx_desc, skb); - if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) - ixgbe_ipsec_rx(rx_ring, rx_desc, skb); - - /* record Rx queue, or update MACVLAN statistics */ - if (netif_is_ixgbe(dev)) - skb_record_rx_queue(skb, rx_ring->queue_index); - else - macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, - false); - - skb->protocol = eth_type_trans(skb, dev); + skb_record_rx_queue(skb, ring_queue_index(rx_ring)); + skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); } void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - napi_gro_receive(&q_vector->napi, skb); +#ifdef HAVE_NDO_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ixgbe_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + ixgbe_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif +#ifndef NETIF_F_GRO + + netdev_ring(rx_ring)->last_rx = jiffies; +#endif } /** @@ -1734,6 +1590,9 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + struct sk_buff *next_skb; +#endif u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ @@ -1765,15 +1624,22 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, return false; /* place skb in next buffer to be received */ +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + next_skb = rx_ring->rx_buffer_info[ntc].skb; + + ixgbe_add_active_tail(skb, next_skb); + IXGBE_CB(next_skb)->head = skb; +#else rx_ring->rx_buffer_info[ntc].skb = skb; +#endif rx_ring->rx_stats.non_eop_descs++; return true; } +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT /** * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being adjusted * * This function is an ixgbe specific version of __pskb_pull_tail. The @@ -1783,8 +1649,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. */ -static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, - struct sk_buff *skb) +static void ixgbe_pull_tail(struct sk_buff *skb) { skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; @@ -1826,7 +1691,24 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - if (ring_uses_build_skb(rx_ring)) { +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + IXGBE_RX_DMA_ATTR); +#endif + } else if (ring_uses_build_skb(rx_ring)) { unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; dma_sync_single_range_for_cpu(rx_ring->dev, @@ -1843,14 +1725,6 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, skb_frag_size(frag), DMA_FROM_DEVICE); } - - /* If the page was released, just unmap it. */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - } } /** @@ -1859,7 +1733,7 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * - * Check if the skb is valid in the XDP case it will be an error pointer. + * Check if the skb is valid. In the XDP case it will be an error pointer. * Return true in this case to abort processing and advance to next * descriptor. * @@ -1875,37 +1749,31 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, +bool ixgbe_cleanup_headers(struct ixgbe_ring __maybe_unused *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct net_device *netdev = rx_ring->netdev; - /* XDP packets use error pointer so abort at this point */ if (IS_ERR(skb)) return true; - /* Verify netdev is present, and that packet does not have any - * errors that would be unacceptable to the netdev. - */ - if (!netdev || - (unlikely(ixgbe_test_staterr(rx_desc, - IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && - !(netdev->features & NETIF_F_RXALL)))) { + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbe_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { dev_kfree_skb_any(skb); return true; } /* place header in linear portion of buffer */ if (!skb_headlen(skb)) - ixgbe_pull_tail(rx_ring, skb); + ixgbe_pull_tail(skb); -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* do not attempt to pad FCoE Frames as this will disrupt DDP */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) return false; - #endif + /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -1958,7 +1826,11 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +#else + if (unlikely((page_count(page) - pagecnt_bias) > 1)) +#endif return false; #else /* The last offset is a bit aggressive in that we assume the @@ -1972,6 +1844,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) return false; #endif +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE /* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. @@ -1980,6 +1853,15 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } +#else + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } +#endif return true; } @@ -1989,7 +1871,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into - * @size: size of data in rx_buffer + * @size: size of data * * This function will add the data contained in rx_buffer->page to the skb. * This is done either through a direct copy if the data in the buffer is @@ -2011,8 +1893,10 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : SKB_DATA_ALIGN(size); #endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); + #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else @@ -2020,10 +1904,10 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, #endif } -static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff **skb, - const unsigned int size) +static struct ixgbe_rx_buffer * +ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, + const unsigned int size) { struct ixgbe_rx_buffer *rx_buffer; @@ -2059,6 +1943,13 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, struct sk_buff *skb) { +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif if (ixgbe_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); @@ -2071,7 +1962,11 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else IXGBE_RX_DMA_ATTR); +#endif } __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); @@ -2143,12 +2038,18 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, return skb; } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, struct xdp_buff *xdp, union ixgbe_adv_rx_desc *rx_desc) { +#ifdef HAVE_XDP_BUFF_DATA_META unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; +#else + void *va = xdp->data; +#endif #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else @@ -2158,17 +2059,13 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, #endif struct sk_buff *skb; - /* Prefetch first cache line of first page. If xdp->data_meta - * is unused, this points extactly as xdp->data, otherwise we - * likely have a consumer accessing first few bytes of meta - * data, and then actual data. - */ - prefetch(xdp->data_meta); + /* prefetch first cache line of first page */ + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data_meta + L1_CACHE_BYTES); + prefetch(va + L1_CACHE_BYTES); #endif - /* build an skb to around the page buffer */ + /* build an skb around the page buffer */ skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -2176,8 +2073,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, /* update pointers within the skb to store the data */ skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); +#ifdef HAVE_XDP_BUFF_DATA_META if (metasize) skb_metadata_set(skb, metasize); +#endif /* record DMA address if this is the start of a chain of buffers */ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) @@ -2193,13 +2092,20 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - struct xdp_buff *xdp) +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ + +static struct sk_buff * +ixgbe_run_xdp(struct ixgbe_adapter __maybe_unused *adapter, + struct ixgbe_ring __maybe_unused *rx_ring, + struct xdp_buff __maybe_unused *xdp) { - int err, result = IXGBE_XDP_PASS; + int result = IXGBE_XDP_PASS; +#ifdef HAVE_XDP_SUPPORT struct bpf_prog *xdp_prog; +#ifdef HAVE_XDP_FRAME_STRUCT struct xdp_frame *xdpf; +#endif + int err; u32 act; rcu_read_lock(); @@ -2208,26 +2114,33 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, if (!xdp_prog) goto xdp_out; +#ifdef HAVE_XDP_FRAME_STRUCT prefetchw(xdp->data_hard_start); /* xdp_frame write */ +#endif act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: - xdpf = convert_to_xdp_frame(xdp); +#ifdef HAVE_XDP_FRAME_STRUCT + xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) { result = IXGBE_XDP_CONSUMED; break; } result = ixgbe_xmit_xdp_ring(adapter, xdpf); +#else + result = ixgbe_xmit_xdp_ring(adapter, xdp); +#endif /* !HAVE_XDP_FRAME_STRUCT */ break; case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); - if (!err) + if (!err) { result = IXGBE_XDP_REDIR; - else + } else { result = IXGBE_XDP_CONSUMED; + } break; default: bpf_warn_invalid_xdp_action(act); @@ -2241,6 +2154,8 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, } xdp_out: rcu_read_unlock(); +#endif /* HAVE_XDP_SUPPORT */ + return ERR_PTR(-result); } @@ -2272,23 +2187,27 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the syste. * - * Returns amount of work completed + * Returns amount of work completed. **/ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, - const int budget) + int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct ixgbe_adapter *adapter = q_vector->adapter; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) int ddp_bytes; unsigned int mss = 0; -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); unsigned int xdp_xmit = 0; struct xdp_buff xdp; + xdp.data = NULL; + xdp.data_end = NULL; +#ifdef HAVE_XDP_BUFF_RXQ xdp.rxq = &rx_ring->xdp_rxq; +#endif while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; @@ -2319,7 +2238,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META xdp.data_meta = xdp.data; +#endif xdp.data_hard_start = xdp.data - ixgbe_rx_offset(rx_ring); xdp.data_end = xdp.data + size; @@ -2340,9 +2261,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_bytes += size; } else if (skb) { ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC } else if (ring_uses_build_skb(rx_ring)) { skb = ixgbe_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); +#endif } else { skb = ixgbe_construct_skb(rx_ring, rx_buffer, &xdp, rx_desc); @@ -2372,14 +2295,15 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* populate checksum, timestamp, VLAN, and protocol */ ixgbe_process_skb_fields(rx_ring, rx_desc, skb); -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, + rx_desc, skb); /* include DDPed FCoE data */ if (ddp_bytes > 0) { if (!mss) { - mss = rx_ring->netdev->mtu - + mss = netdev_ring(rx_ring)->mtu - sizeof(struct fcoe_hdr) - sizeof(struct fc_frame_header) - sizeof(struct fcoe_crc_eof); @@ -2392,12 +2316,15 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (!ddp_bytes) { dev_kfree_skb_any(skb); +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; +#endif continue; } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ - ixgbe_rx_skb(q_vector, skb); + ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); /* update budget accounting */ total_rx_packets++; @@ -2426,6 +2353,196 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, return total_rx_packets; } +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ +/** + * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a legacy approach to Rx interrupt + * handling. This version will perform better on systems with a low cost + * dma mapping API. + * + * Returns amount of work completed. + **/ +static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#if IS_ENABLED(CONFIG_FCOE) + int ddp_bytes; + unsigned int mss = 0; +#endif /* CONFIG_FCOE */ + u16 len = 0; + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + struct ixgbe_rx_buffer *rx_buffer; + union ixgbe_adv_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!rx_desc->wb.upper.length) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + len = le16_to_cpu(rx_desc->wb.upper.length); + /* pull the header of the skb in */ + __skb_put(skb, len); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + if (!IXGBE_CB(skb)->head) { + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + skb = ixgbe_merge_active_tail(skb); + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + dma_unmap_single(rx_ring->dev, + IXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + IXGBE_CB(skb)->dma = 0; + + if (ixgbe_close_active_frag_list(skb) && + !IXGBE_CB(skb)->append_cnt) { + /* if we got here without RSC the packet is invalid */ + dev_kfree_skb_any(skb); + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(ixgbe_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + dev_kfree_skb_any(skb); + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + +#if IS_ENABLED(CONFIG_FCOE) + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, + rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = netdev_ring(rx_ring)->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, + mss); + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; +#endif + continue; + } + } + +#endif /* CONFIG_FCOE */ + ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_rx_packets; +} + +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ +#ifdef HAVE_NDO_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbe_busy_poll_recv(struct napi_struct *napi) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int found = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbe_for_each_ring(ring, q_vector->rx) { + found = ixgbe_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ixgbe_qv_unlock_poll(q_vector); + + return found; +} + +#endif /* HAVE_NDO_BUSY_POLL */ /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -2435,13 +2552,12 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, **/ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) { - struct ixgbe_q_vector *q_vector; int v_idx; u32 mask; /* Populate MSIX to EITR Select */ - if (adapter->num_vfs > 32) { - u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; + if (adapter->num_vfs >= 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); } @@ -2450,8 +2566,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) * corresponding register. */ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; - q_vector = adapter->q_vector[v_idx]; ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); @@ -2471,7 +2587,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2488,6 +2604,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } +static inline bool ixgbe_container_is_rx(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring_container *rc) +{ + return &q_vector->rx == rc; +} + /** * ixgbe_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information @@ -2524,35 +2646,24 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, goto clear_counts; packets = ring_container->total_packets; - - /* We have no packets to actually measure against. This means - * either one of the other queues on this vector is active or - * we are a Tx queue doing TSO with too high of an interrupt rate. - * - * When this occurs just tick up our delay by the minimum value - * and hope that this extra delay will prevent us from being called - * without any work on our queue. - */ - if (!packets) { - itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; - if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) - itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; - itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; - goto clear_counts; - } - bytes = ring_container->total_bytes; - /* If packets are less than 4 or bytes are less than 9000 assume - * insufficient data to use bulk rate limiting approach. We are - * likely latency driven. - */ - if (packets < 4 && bytes < 9000) { - itr = IXGBE_ITR_ADAPTIVE_LATENCY; - goto adjust_by_size; + if (ixgbe_container_is_rx(q_vector, ring_container)) { + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (packets && packets < 24 && bytes < 12112) { + itr = IXGBE_ITR_ADAPTIVE_LATENCY; + avg_wire_size = (bytes + packets * 24) * 2; + avg_wire_size = clamp_t(unsigned int, + avg_wire_size, 2560, 12800); + goto adjust_for_speed; + } } - /* Between 4 and 48 we can assume that our current interrupt delay + /* Less than 48 packets we can assume that our current interrupt delay * is only slightly too low. As such we should increase it by a small * fixed amount. */ @@ -2560,6 +2671,20 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += IXGBE_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; goto clear_counts; } @@ -2590,7 +2715,6 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, */ itr = IXGBE_ITR_ADAPTIVE_BULK; -adjust_by_size: /* If packet counts are 256 or greater we can assume we have a gross * overestimation of what the rate should be. Instead of trying to fine * tune it just use the formula below to try and dial in an exact value @@ -2624,7 +2748,7 @@ adjust_by_size: /* 16K ints/sec to 9.2K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; - } else if (avg_wire_size < 1968) { + } else if (avg_wire_size <= 1980) { /* 9.2K ints/sec to 8K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; @@ -2633,12 +2757,7 @@ adjust_by_size: avg_wire_size = 32256; } - /* If we are in low latency mode half our delay which doubles the rate - * to somewhere between 100K to 16K ints/sec - */ - if (itr & IXGBE_ITR_ADAPTIVE_LATENCY) - avg_wire_size >>= 1; - +adjust_for_speed: /* Resultant value is 256 times larger than it needs to be. This * gives us room to adjust the value as needed to either increase * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. @@ -2657,14 +2776,21 @@ adjust_by_size: case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_10_FULL: - if (avg_wire_size > 8064) - avg_wire_size = 8064; itr += DIV_ROUND_UP(avg_wire_size, IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * IXGBE_ITR_ADAPTIVE_MIN_INC; break; } + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & IXGBE_ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - IXGBE_ITR_ADAPTIVE_MIN_INC; clear_counts: /* write back value */ ring_container->itr = itr; @@ -2700,7 +2826,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2724,7 +2850,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) new_itr = min(q_vector->rx.itr, q_vector->tx.itr); /* Clear latency flag if set, shift into correct position */ - new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY; + new_itr &= IXGBE_ITR_ADAPTIVE_MASK_USECS; new_itr <<= 2; if (new_itr != q_vector->itr) { @@ -2762,7 +2888,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) * - We may have missed the interrupt so always have to * check if we got a LSC */ - if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && + if (!(eicr & IXGBE_EICR_GPI_SDP0) && !(eicr & IXGBE_EICR_LSC)) return; @@ -2790,7 +2916,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) default: if (adapter->hw.mac.type >= ixgbe_mac_X540) return; - if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) + if (!(eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw))) return; break; } @@ -2804,17 +2930,15 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && - (eicr & IXGBE_EICR_GPI_SDP1(hw))) { + (eicr & IXGBE_EICR_GPI_SDP1)) { e_crit(probe, "Fan has stopped, replace the adapter\n"); /* write to clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); } } static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) { - struct ixgbe_hw *hw = &adapter->hw; - if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) return; @@ -2824,8 +2948,7 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) * Need to check link state so complete overtemp check * on service task */ - if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || - (eicr & IXGBE_EICR_LSC)) && + if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && (!test_bit(__IXGBE_DOWN, &adapter->state))) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -2833,7 +2956,7 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) return; } return; - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -2856,37 +2979,13 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) e_crit(drv, "%s\n", ixgbe_overheat_msg); } -static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) -{ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - if (hw->phy.type == ixgbe_phy_nl) - return true; - return false; - case ixgbe_mac_82599EB: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - switch (hw->mac.ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - case ixgbe_media_type_fiber_qsfp: - return true; - default: - return false; - } - default: - return false; - } -} - static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; - u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); + u32 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); if (!ixgbe_is_sfp(hw)) return; - - /* Later MAC's use different SDP */ if (hw->mac.type >= ixgbe_mac_X540) eicr_mask = IXGBE_EICR_GPI_SDP0_X540; @@ -2901,9 +3000,9 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) } if (adapter->hw.mac.type == ixgbe_mac_82599EB && - (eicr & IXGBE_EICR_GPI_SDP1(hw))) { + (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; ixgbe_service_event_schedule(adapter); @@ -2925,8 +3024,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) } } -static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, - u64 qmask) +static void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; struct ixgbe_hw *hw = &adapter->hw; @@ -2940,7 +3038,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2954,35 +3052,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, /* skip the flush */ } -static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - mask = (qmask & 0xFFFFFFFF); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); - mask = (qmask >> 32); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); - break; - default: - break; - } - /* skip the flush */ -} - /** * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure @@ -2992,7 +3061,6 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) { - struct ixgbe_hw *hw = &adapter->hw; u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); /* don't reenable LSC while waiting for link */ @@ -3002,36 +3070,39 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP0(hw); + case ixgbe_mac_X550EM_a: + mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); break; case ixgbe_mac_X540: case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: mask |= IXGBE_EIMS_TS; break; default: break; } if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - mask |= IXGBE_EIMS_GPI_SDP1(hw); + mask |= IXGBE_EIMS_GPI_SDP1; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP1(hw); - mask |= IXGBE_EIMS_GPI_SDP2(hw); + mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP2; /* fall through */ case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) - mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); + mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; +#ifdef HAVE_PTP_1588_CLOCK + mask |= IXGBE_EIMS_TIMESYNC; +#endif + break; default: break; @@ -3048,22 +3119,22 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, IXGBE_WRITE_FLUSH(&adapter->hw); } -static irqreturn_t ixgbe_msix_other(int irq, void *data) +static irqreturn_t ixgbe_msix_other(int __always_unused irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; u32 eicr; /* - * Workaround for Silicon errata. Use clear-by-write instead - * of clear-by-read. Reading with EICS will return the + * Workaround for Silicon errata #26 on 82598. Use clear-by-write + * instead of clear-by-read. Reading with EICS will return the * interrupt causes without clearing, which later be done * with the write to EICR. */ eicr = IXGBE_READ_REG(hw, IXGBE_EICS); /* The lower 16bits of the EICR register are for the queue interrupts - * which should be masked here in order to not accidentally clear them if + * which should be masked here in order to not accidently clear them if * the bits are high when ixgbe_msix_other is called. There is a race * condition otherwise which results in possible performance loss * especially if the ixgbe_msix_other interrupt is triggering @@ -3084,7 +3155,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; @@ -3093,28 +3164,36 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) IXGBE_EICR_GPI_SDP0_X540); } if (eicr & IXGBE_EICR_ECC) { - e_info(link, "Received ECC Err, initiating reset\n"); + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } +#ifdef HAVE_TX_MQ /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { int reinit_count = 0; int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = adapter->tx_ring[i]; - if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, - &ring->state)) + if (test_and_clear_bit( + __IXGBE_TX_FDIR_INIT_DONE, + &ring->state)) reinit_count++; } if (reinit_count) { - /* no more flow director interrupts until after init */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); - adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + /* no more flow director interrupts until + * after init + */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, + IXGBE_EIMC_FLOW_DIR); + adapter->flags2 |= + IXGBE_FLAG2_FDIR_REQUIRES_REINIT; ixgbe_service_event_schedule(adapter); } } +#endif ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_overtemp_event(adapter, eicr); break; @@ -3124,8 +3203,10 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); +#ifdef HAVE_PTP_1588_CLOCK if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter); + ixgbe_ptp_check_pps_event(adapter); +#endif /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -3134,7 +3215,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) +static irqreturn_t ixgbe_msix_clean_rings(int __always_unused irq, void *data) { struct ixgbe_q_vector *q_vector = data; @@ -3147,38 +3228,50 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) } /** - * ixgbe_poll - NAPI Rx polling callback - * @napi: structure for representing this polling device - * @budget: how many packets driver is allowed to clean + * ixgbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets * - * This function is used for legacy and MSI, NAPI mode + * This function will clean all queues associated with a q_vector. **/ int ixgbe_poll(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); + container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; int per_ring_budget, work_done = 0; bool clean_complete = true; -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_dca(q_vector); -#endif +#endif /* CONFIG_DCA */ ixgbe_for_each_ring(ring, q_vector->tx) { +#ifdef HAVE_AF_XDP_ZC_SUPPORT bool wd = ring->xsk_umem ? ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : ixgbe_clean_tx_irq(q_vector, ring, budget); - if (!wd) +#else + if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) +#endif clean_complete = false; } +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; + + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) + return budget; +#else /* Exit if we are called by netpoll */ if (budget <= 0) return budget; +#endif /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ @@ -3188,30 +3281,41 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) { +#ifdef HAVE_AF_XDP_ZC_SUPPORT int cleaned = ring->xsk_umem ? ixgbe_clean_rx_irq_zc(q_vector, ring, per_ring_budget) : ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); - +#else + int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ work_done += cleaned; if (cleaned >= per_ring_budget) clean_complete = false; } +#ifdef HAVE_NDO_BUSY_POLL + ixgbe_qv_unlock_napi(q_vector); + +#endif +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; + +#endif /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; - /* all work done, exit the polling mode */ if (likely(napi_complete_done(napi, work_done))) { - if (adapter->rx_itr_setting & 1) + if (adapter->rx_itr_setting == 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable_queues(adapter, - BIT_ULL(q_vector->v_idx)); + ((u64)1 << q_vector->v_idx)); } - return min(work_done, budget - 1); } @@ -3249,16 +3353,18 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { - e_err(probe, "request_irq failed for MSIX interrupt " - "Error: %d\n", err); + e_err(probe, "request_irq failed for MSIX interrupt '%s' " + "Error: %d\n", q_vector->name, err); goto free_queue_irqs; } +#ifdef HAVE_IRQ_AFFINITY_HINT /* If Flow Director is enabled, set interrupt affinity */ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* assign the mask for this irq */ irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); } +#endif /* HAVE_IRQ_AFFINITY_HINT */ } err = request_irq(adapter->msix_entries[vector].vector, @@ -3268,13 +3374,15 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) goto free_queue_irqs; } - return 0; + return IXGBE_SUCCESS; free_queue_irqs: while (vector) { vector--; +#ifdef HAVE_IRQ_AFFINITY_HINT irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); +#endif free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } @@ -3290,7 +3398,7 @@ free_queue_irqs: * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t ixgbe_intr(int irq, void *data) +static irqreturn_t ixgbe_intr(int __always_unused irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; @@ -3324,18 +3432,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: - ixgbe_check_sfp_event(adapter, eicr); - /* Fall through */ case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: + if (eicr & IXGBE_EICR_ECC) { - e_info(link, "Received ECC Err, initiating reset\n"); + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } + ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_overtemp_event(adapter, eicr); break; default: @@ -3343,8 +3452,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); +#ifdef HAVE_PTP_1588_CLOCK if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter); + ixgbe_ptp_check_pps_event(adapter); +#endif /* would disable interrupts here but EIAM disabled it */ napi_schedule_irqoff(&q_vector->napi); @@ -3374,10 +3485,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) err = ixgbe_request_msix_irqs(adapter); else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) - err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, + err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, netdev->name, adapter); else - err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, + err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, netdev->name, adapter); if (err) @@ -3406,9 +3517,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) if (!q_vector->rx.ring && !q_vector->tx.ring) continue; +#ifdef HAVE_IRQ_AFFINITY_HINT /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(entry->vector, NULL); +#endif free_irq(entry->vector, q_vector); } @@ -3429,7 +3542,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3483,23 +3596,33 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; +#ifdef HAVE_AF_XDP_ZC_SUPPORT ring->xsk_umem = NULL; if (ring_is_xdp(ring)) ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); - +#endif /* disable queue to avoid issues while updating state */ - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), - (tdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), tdba >> 32); IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_tx_desc)); + + /* disable head writeback */ + IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(reg_idx), 0); + + /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + /* * set WTHRESH to encourage burst writeback, it should not be set * higher than 1 when: @@ -3511,15 +3634,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, * currently 40. */ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) - txdctl |= 1u << 16; /* WTHRESH = 1 */ + txdctl |= (1 << 16); /* WTHRESH = 1 */ else - txdctl |= 8u << 16; /* WTHRESH = 8 */ + txdctl |= (8 << 16); /* WTHRESH = 8 */ /* * Setting PTHRESH to 32 both improves performance * and avoids a TX hang with DFP enabled */ - txdctl |= (1u << 8) | /* HTHRESH = 1 */ + txdctl |= (1 << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ @@ -3536,7 +3659,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, struct ixgbe_q_vector *q_vector = ring->q_vector; if (q_vector) - netif_set_xps_queue(ring->netdev, + netif_set_xps_queue(adapter->netdev, &q_vector->affinity_mask, ring->queue_index); } @@ -3557,7 +3680,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, /* poll to verify queue is enabled */ do { - usleep_range(1000, 2000); + msleep(1); txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) @@ -3568,7 +3691,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 rttdcs, mtqc; - u8 tcs = adapter->hw_tcs; + u8 tcs = netdev_get_num_tc(adapter->netdev); if (hw->mac.type == ixgbe_mac_82598EB) return; @@ -3579,7 +3702,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); /* set transmit pool layout */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { mtqc = IXGBE_MTQC_VT_ENA; if (tcs > 4) mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; @@ -3631,6 +3754,13 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) u32 dmatxctl; u32 i; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (adapter->num_tx_queues > 1) + adapter->netdev->features |= NETIF_F_MULTI_QUEUE; + else + adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; + +#endif ixgbe_setup_mtqc(adapter); if (hw->mac.type != ixgbe_mac_82598EB) { @@ -3671,18 +3801,16 @@ static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -#ifdef CONFIG_IXGBE_DCB void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) -#else -static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) -#endif { int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; +#ifdef HAVE_DCBNL_IEEE if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); +#endif /* * We should set the drop enable bit if: * SR-IOV is enabled @@ -3702,8 +3830,6 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) } } -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 - static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { @@ -3714,17 +3840,30 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, if (hw->mac.type == ixgbe_mac_82598EB) { u16 mask = adapter->ring_feature[RING_F_RSS].mask; + /* program one srrctl register per VMDq index */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + mask = adapter->ring_feature[RING_F_VMDQ].mask; + /* * if VMDq is not active we must program one srrctl register * per RSS queue since we have enabled RDRXCTL.MVMEN */ reg_idx &= mask; + + /* divide by the first bit of the mask to get the indices */ + if (reg_idx) + reg_idx /= ((~mask) + 1) & mask; } /* configure header buffer length, needed for RSC */ srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#else +#ifdef HAVE_AF_XDP_ZC_SUPPORT if (rx_ring->xsk_umem) { u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - XDP_PACKET_HEADROOM; @@ -3742,10 +3881,14 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, else srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { +#else + if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } else { srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -3858,20 +4001,16 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; /* Write redirection table to HW */ for (i = 0; i < reta_entries; i++) { - u16 pool = adapter->num_rx_pools; - vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; - if ((i & 3) != 3) - continue; - - while (pool--) - IXGBE_WRITE_REG(hw, - IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)), + if ((i & 3) == 3) { + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), vfreta); - vfreta = 0; + vfreta = 0; + } } } @@ -3908,17 +4047,13 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; int i, j; /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) { - u16 pool = adapter->num_rx_pools; - - while (pool--) - IXGBE_WRITE_REG(hw, - IXGBE_PFVFRSSRK(i, VMDQ_P(pool)), - *(adapter->rss_key + i)); - } + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), + *(adapter->rss_key + i)); /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { @@ -3931,11 +4066,13 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) ixgbe_store_vfreta(adapter); } + static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 mrqc = 0, rss_field = 0, vfmrqc = 0; u32 rxcsum; + u32 mrqc = 0, rss_field = 0; + u32 vfmrqc = 0; /* Disable indicating checksum in descriptor, enables RSS hash */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); @@ -3946,9 +4083,9 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->ring_feature[RING_F_RSS].mask) mrqc = IXGBE_MRQC_RSSEN; } else { - u8 tcs = adapter->hw_tcs; + u8 tcs = netdev_get_num_tc(adapter->netdev); - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { if (tcs > 4) mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ else if (tcs > 1) @@ -3958,12 +4095,6 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) mrqc = IXGBE_MRQC_VMDQRSS32EN; else mrqc = IXGBE_MRQC_VMDQRSS64EN; - - /* Enable L3/L4 for Tx Switched packets only for X550, - * older devices do not support this feature - */ - if (hw->mac.type >= ixgbe_mac_X550) - mrqc |= IXGBE_MRQC_L3L4TXSWEN; } else { if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; @@ -3972,10 +4103,13 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) else mrqc = IXGBE_MRQC_RSSEN; } + + /* Enable L3/L4 for Tx Switched packets */ + mrqc |= IXGBE_MRQC_L3L4TXSWEN; } /* Perform hash on these packet types */ - rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | + rss_field = IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; @@ -3987,7 +4121,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { - u16 pool = adapter->num_rx_pools; + unsigned int pf_pool = adapter->num_vfs; /* Enable VF RSS mode */ mrqc |= IXGBE_MRQC_MULTIPLE_RSS; @@ -3997,11 +4131,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ixgbe_setup_vfreta(adapter); vfmrqc = IXGBE_MRQC_RSSEN; vfmrqc |= rss_field; - - while (pool--) - IXGBE_WRITE_REG(hw, - IXGBE_PFVFMRQC(VMDQ_P(pool)), - vfmrqc); + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); } else { ixgbe_setup_reta(adapter); mrqc |= rss_field; @@ -4010,12 +4140,31 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) } /** - * ixgbe_configure_rscctl - enable RSC for the indicated ring + * ixgbe_clear_rscctl - disable RSC for the indicated ring * @adapter: address of board private structure * @ring: structure containing ring specific data **/ -static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) +void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); + rscctrl &= ~IXGBE_RSCCTL_RSCEN; + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); + + clear_ring_rsc_enabled(ring); +} + +/** + * ixgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u32 rscctrl; @@ -4031,11 +4180,27 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, * total size of max desc * buf_len is not greater * than 65536 */ +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#if (MAX_SKB_FRAGS >= 16) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +#elif (MAX_SKB_FRAGS >= 8) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; +#elif (MAX_SKB_FRAGS >= 4) + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#else + rscctrl |= IXGBE_RSCCTL_MAXDESC_1; +#endif +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + if (ring->rx_buf_len <= IXGBE_RXBUFFER_4K) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + else if (ring->rx_buf_len <= IXGBE_RXBUFFER_8K) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; + else + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } -#define IXGBE_MAX_RX_DESC_POLL 10 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { @@ -4044,7 +4209,7 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) return; /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && @@ -4052,13 +4217,13 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, return; do { - usleep_range(1000, 2000); + msleep(1); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " - "the polling period\n", reg_idx); + e_err(drv, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", reg_idx); } } @@ -4071,6 +4236,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; +#ifdef HAVE_AF_XDP_ZC_SUPPORT xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); if (ring->xsk_umem) { @@ -4084,6 +4250,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, MEM_TYPE_PAGE_SHARED, NULL)); } +#endif /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; @@ -4092,21 +4259,31 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), rdba >> 32); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); /* Force flushing of IXGBE_RDLEN to prevent MDD */ IXGBE_WRITE_FLUSH(hw); + /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + ring->next_to_alloc = 0; +#endif + ixgbe_configure_srrctl(adapter, ring); + /* In ESX, RSCCTL configuration is done by on demand */ ixgbe_configure_rscctl(adapter, ring); - if (hw->mac.type == ixgbe_mac_82598EB) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: /* * enable cache line friendly hardware writes: * PTHRESH=32 descriptors (half the internal cache), @@ -4116,23 +4293,34 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT #if (PAGE_SIZE < 8192) - /* RXDCTL.RLPML does not work on 82599 */ - } else if (hw->mac.type != ixgbe_mac_82599EB) { rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); - /* Limit the maximum frame size so we don't overrun the skb. - * This can happen in SRIOV mode when the MTU of the VF is - * higher than the MTU of the PF. - */ + /* Limit the maximum frame size so we don't overrun the skb */ if (ring_uses_build_skb(ring) && !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | IXGBE_RXDCTL_RLPML_EN; #endif +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + /* If operating in IOV mode set RLPML */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + break; + rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + break; + default: + break; } +#ifdef HAVE_AF_XDP_ZC_SUPPORT if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) { u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr - XDP_PACKET_HEADROOM; @@ -4144,6 +4332,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ring->rx_buf_len = xsk_buf_len; } +#endif /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, sizeof(struct ixgbe_rx_buffer) * ring->count); @@ -4157,17 +4346,21 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef HAVE_AF_XDP_ZC_SUPPORT if (ring->xsk_umem) ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); else ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); +#else + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int rss_i = adapter->ring_feature[RING_F_RSS].indices; - u16 pool = adapter->num_rx_pools; + int p; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | @@ -4184,51 +4377,129 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) else if (rss_i > 1) psrtype |= 1u << 29; - while (pool--) - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); + for (p = 0; p < adapter->num_rx_pools; p++) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype); +} + +/** + * ixgbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter: the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw * hw = &adapter->hw; + unsigned int p; + u32 vmdctl; + + if (adapter->flags & IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, 0); + + /* must enable Rx switching replication to allow multicast + * packet reception on all VFs, and to enable source address + * pruning. + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + true, + p); + } + } else { + /* enable Tx loopback for internal VF/PF communication */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* disable Rx switching replication unless we have SR-IOV + * virtual functions + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + if (!adapter->num_vfs) + vmdctl &= ~IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + false, + p); + } + } + } static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u16 pool = adapter->num_rx_pools; u32 reg_offset, vf_shift, vmolr; u32 gcr_ext, vmdctl; int i; - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return; - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; - vmdctl |= IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl |= IXGBE_VT_CTL_VT_ENABLE; + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; + if (adapter->num_vfs) + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - /* accept untagged packets until a vlan tag is - * specifically set for the VMDQ queue/pool - */ - vmolr = IXGBE_VMOLR_AUPE; - while (pool--) - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr); + for (i = 1; i < adapter->num_rx_pools; i++) { + int pool = VMDQ_P(i); - vf_shift = VMDQ_P(0) % 32; - reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + vmolr |= IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); + } - /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - if (adapter->bridge_mode == BRIDGE_MODE_VEB) - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; - /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ - hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); + /* Enable only the PF pools for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - /* clear VLAN promisc flag so VFTA will be updated if necessary */ - adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + /* clear VLAN promisc flag so VFTA + * will be updated if necessary + */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + break; + default: + break; + } + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; /* * Set up VF register offsets for selected VT Mode, @@ -4248,15 +4519,21 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + /* configure default bridge settings */ + ixgbe_configure_bridge_mode(adapter); +#if IS_ENABLED(CONFIG_PCI_IOV) for (i = 0; i < adapter->num_vfs; i++) { /* configure spoof checking */ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, adapter->vfinfo[i].spoofchk_enabled); +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN /* Enable/Disable RSS query feature */ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, - adapter->vfinfo[i].rss_query_enabled); + adapter->vfinfo[i].rss_query_enabled); +#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ } +#endif /* CONFIG_PCI_IOV */ } static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) @@ -4267,14 +4544,25 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) struct ixgbe_ring *rx_ring; int i; u32 mhadd, hlreg0; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + int rx_buf_len; +#endif -#ifdef IXGBE_FCOE + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + max_frame += IXGBE_TS_HDR_LEN; + default: + break; + } + +#if IS_ENABLED(CONFIG_FCOE) /* adjust max frame to be able to do baby jumbo for FCoE */ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; - -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ /* adjust max frame to be at least the size of a standard frame */ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) @@ -4288,8 +4576,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && + (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) { + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + } else if (max_frame <= IXGBE_RXBUFFER_3K) { + rx_buf_len = IXGBE_RXBUFFER_3K; + } else if (max_frame <= IXGBE_RXBUFFER_7K) { + rx_buf_len = IXGBE_RXBUFFER_7K; + } else if (max_frame <= IXGBE_RXBUFFER_15K) { + rx_buf_len = IXGBE_RXBUFFER_15K; + } else { + rx_buf_len = IXGBE_MAX_RXBUFFER; + } + +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ + /* set jumbo enable since MHADD.MFS is keeping size locked at + * max_frame + */ hlreg0 |= IXGBE_HLREG0_JUMBOEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4301,14 +4613,18 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) rx_ring = adapter->rx_ring[i]; clear_ring_rsc_enabled(rx_ring); - clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); - clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); +#if IS_ENABLED(CONFIG_FCOE) + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) continue; @@ -4323,6 +4639,20 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); #endif +#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ + + adapter->flags2 |= IXGBE_FLAG2_RX_LEGACY; +#endif /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + + rx_ring->rx_buf_len = rx_buf_len; +#if IS_ENABLED(CONFIG_FCOE) + + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state) && + (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + rx_ring->rx_buf_len = IXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ } } @@ -4332,22 +4662,9 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { - case ixgbe_mac_82598EB: - /* - * For VMDq support of different descriptor types or - * buffer sizes through the use of multiple SRRCTL - * registers, RDRXCTL.MVMEN must be set to 1 - * - * also, the manual doesn't mention it clearly but DCA hints - * will only use queue 0's tags unless this bit is set. Side - * effects of setting this bit are only that SRRCTL must be - * fully programmed [0..15] - */ - rdrxctl |= IXGBE_RDRXCTL_MVMEN; - break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; /* fall through */ @@ -4361,6 +4678,19 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; break; + case ixgbe_mac_82598EB: + /* + * For VMDq support of different descriptor types or + * buffer sizes through the use of multiple SRRCTL + * registers, RDRXCTL.MVMEN must be set to 1 + * + * also, the manual doesn't mention it clearly but DCA hints + * will only use queue 0's tags unless this bit is set. Side + * effects of setting this bit are only that SRRCTL must be + * fully programmed [0..15] + */ + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + break; default: /* We should do nothing since we don't know this hardware */ return; @@ -4382,7 +4712,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) u32 rxctrl, rfctl; /* disable receives while setting up the descriptors */ - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); @@ -4420,22 +4750,73 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) hw->mac.ops.enable_rx_dma(hw, rxctrl); } +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ +static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); /* add VID to filter table */ - if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); + if (hw->mac.ops.set_vfta) { +#ifndef HAVE_VLAN_RX_REGISTER + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); +#endif - set_bit(vid, adapter->active_vlans); + if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(hw, vid, pool_ndx, true, !!vid); + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && + adapter->hw.mac.type != ixgbe_mac_82598EB) { + int i; + + /* enable vlan id for all pools */ + for (i = 1; i < adapter->num_rx_pools; i++) + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), true, +#ifdef HAVE_VLAN_RX_REGISTER + false); +#else + true); +#endif + } + } +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* + * Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID return 0; +#endif } +#if defined(HAVE_VLAN_RX_REGISTER) && defined(CONFIG_PCI_IOV) +int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +#else static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +#endif { u32 vlvf; int idx; @@ -4468,7 +4849,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) * entry other than the PF. */ word = idx * 2 + (VMDQ_P(0) / 32); - bits = ~BIT(VMDQ_P(0) % 32); + bits = ~(1 << (VMDQ_P(0)) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* Disable the filter so this falls into the default pool. */ @@ -4479,31 +4860,78 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) } } +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_RX */ +static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#else +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + /* User is not allowed to remove vlan ID 0 */ + if (!vid) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#else + return; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_disable(adapter); + + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); + +#endif /* HAVE_VLAN_RX_REGISTER */ /* remove VID from filter table */ - if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); + if (hw->mac.ops.set_vfta) { + if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(hw, vid, pool_ndx, false, true); + + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && + adapter->hw.mac.type != ixgbe_mac_82598EB) { + int i; + + /* remove vlan id from all pools */ + for (i = 1; i < adapter->num_rx_pools; i++) + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), false, + true); + } + } +#ifndef HAVE_VLAN_RX_REGISTER clear_bit(vid, adapter->active_vlans); - +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID return 0; +#endif } +#ifdef HAVE_8021P_SUPPORT /** - * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * ixgbe_vlan_strip_disable - helper to disable vlan tag stripping * @adapter: driver data */ -static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; int i, j; + /* leave vlan tag stripping enabled for DCB */ + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return; + switch (hw->mac.type) { case ixgbe_mac_82598EB: vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); @@ -4514,13 +4942,10 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; - if (!netif_is_ixgbe(ring->netdev)) - continue; - j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl &= ~IXGBE_RXDCTL_VME; @@ -4532,11 +4957,12 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) } } +#endif /* HAVE_8021P_SUPPORT */ /** - * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping + * ixgbe_vlan_strip_enable - helper to enable vlan tag stripping * @adapter: driver data */ -static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) +void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; @@ -4552,13 +4978,10 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; - if (!netif_is_ixgbe(ring->netdev)) - continue; - j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl |= IXGBE_RXDCTL_VME; @@ -4570,6 +4993,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) } } +#ifndef HAVE_VLAN_RX_REGISTER static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -4578,7 +5002,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ + /* we need to keep the VLAN filter on in SRIOV */ vlnctrl |= IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); } else { @@ -4603,7 +5027,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); - vlvfb |= BIT(VMDQ_P(0) % 32); + vlvfb |= 1 << (VMDQ_P(0) % 32); IXGBE_WRITE_REG(hw, reg_offset, vlvfb); } @@ -4633,7 +5057,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) if (vlvf) { /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) @@ -4642,7 +5066,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) /* remove PF from the pool */ word = i * 2 + VMDQ_P(0) / 32; - bits = ~BIT(VMDQ_P(0) % 32); + bits = ~(1 << (VMDQ_P(0) % 32)); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); } @@ -4664,7 +5088,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl, i; - /* Set VLAN filtering to enabled */ + /* configure vlan filtering */ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl |= IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); @@ -4683,15 +5107,135 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) ixgbe_scrub_vfta(adapter, i); } +#endif /* HAVE_VLAN_RX_REGISTER */ + +#ifdef HAVE_VLAN_RX_REGISTER +static void ixgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp) +#else +void ixgbe_vlan_mode(struct net_device *netdev, u32 features) +#endif +{ +#if defined(HAVE_VLAN_RX_REGISTER) || defined(HAVE_8021P_SUPPORT) + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#endif +#ifdef HAVE_8021P_SUPPORT + bool enable; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_disable(adapter); + + adapter->vlgrp = grp; + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); +#endif +#ifdef HAVE_8021P_SUPPORT +#ifdef HAVE_VLAN_RX_REGISTER + enable = (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED)); +#else +#ifdef NETIF_F_HW_VLAN_CTAG_RX + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); +#else + enable = !!(features & NETIF_F_HW_VLAN_RX); +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_VLAN_RX_REGISTER */ + if (enable) + /* enable VLAN tag insert/strip */ + ixgbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + ixgbe_vlan_strip_disable(adapter); + +#endif /* HAVE_8021P_SUPPORT */ +} static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { u16 vid = 1; +#ifdef HAVE_VLAN_RX_REGISTER + ixgbe_vlan_mode(adapter->netdev, adapter->vlgrp); + + /* + * add vlan ID 0 and enable vlan tag stripping so we + * always accept priority-tagged traffic + */ +#ifdef NETIF_F_HW_VLAN_CTAG_RX ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, 0); +#endif +#ifndef HAVE_8021P_SUPPORT + ixgbe_vlan_strip_enable(adapter); +#endif + + if (adapter->vlgrp) { + for (; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); +#endif + } + } +#else /* !HAVE_VLAN_RX_REGISTER */ + +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, 0); +#endif for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) +#ifdef NETIF_F_HW_VLAN_CTAG_RX ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); +#endif +#endif /* HAVE_VLAN_RX_REGISTER */ +} + +#endif +static u8 *ixgbe_addr_list_itr(struct ixgbe_hw __maybe_unused *hw, u8 **mc_addr_ptr, u32 *vmdq) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *mc_ptr; +#else + struct dev_mc_list *mc_ptr; +#endif +#ifdef CONFIG_PCI_IOV + struct ixgbe_adapter *adapter = hw->back; +#endif /* CONFIG_PCI_IOV */ + u8 *addr = *mc_addr_ptr; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } +#else + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; +#endif + else + *mc_addr_ptr = NULL; + + return addr; } /** @@ -4703,24 +5247,44 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) * 0 on no addresses written * X on writing X addresses to MTA **/ -static int ixgbe_write_mc_addr_list(struct net_device *netdev) +int ixgbe_write_mc_addr_list(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#endif + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; if (!netif_running(netdev)) return 0; - if (hw->mac.ops.update_mc_addr_list) - hw->mac.ops.update_mc_addr_list(hw, netdev); - else - return -ENOMEM; - #ifdef CONFIG_PCI_IOV ixgbe_restore_vf_multicasts(adapter); #endif - return netdev_mc_count(netdev); + if (netdev_mc_empty(netdev)) { + hw->mac.ops.update_mc_addr_list(hw, NULL, 0, + ixgbe_addr_list_itr, false); + } else { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; +#else + addr_list = netdev->mc_list->dmi_addr; +#endif + addr_count = netdev_mc_count(netdev); + + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + ixgbe_addr_list_itr, false); + } + + return addr_count; } #ifdef CONFIG_PCI_IOV @@ -4742,8 +5306,8 @@ void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) hw->mac.ops.clear_rar(hw, i); } } - #endif + static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; @@ -4766,21 +5330,7 @@ static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) } } -static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - mac_table->state |= IXGBE_MAC_STATE_MODIFIED; - mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; - } - - ixgbe_sync_mac_table(adapter); -} - -static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) +int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; @@ -4809,7 +5359,7 @@ static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; - memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); + ether_addr_copy(mac_table->addr, hw->mac.addr); mac_table->pool = VMDQ_P(0); mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; @@ -4829,8 +5379,9 @@ int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, return -EINVAL; for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { continue; + } ether_addr_copy(mac_table->addr, addr); mac_table->pool = pool; @@ -4846,6 +5397,20 @@ int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } +static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; + } + + ixgbe_sync_mac_table(adapter); +} + int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, const u8 *addr, u16 pool) { @@ -4856,7 +5421,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, if (is_zero_ether_addr(addr)) return -EINVAL; - /* search table for addr, if found clear IN_USE flag and sync */ + /* search table for addr, if found clear IN USE flag and sync */ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { /* we can only delete an entry if it is in use */ if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) @@ -4879,6 +5444,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } +#ifdef HAVE_SET_RX_MODE static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -4898,6 +5464,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) return 0; } +#endif /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -4912,33 +5479,62 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + u32 vlnctrl; +#endif +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_FILTER) netdev_features_t features = netdev->features; +#endif int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +#endif /* set all bits that we expect to always be set */ - fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ fctrl |= IXGBE_FCTRL_BAM; fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ fctrl |= IXGBE_FCTRL_PMCF; /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); +#endif if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; +#ifdef HAVE_VLAN_RX_REGISTER + /* Only disable hardware filter vlans in promiscuous mode + * if SR-IOV and VMDQ are disabled - otherwise ensure + * that hardware VLAN filters remain enabled. + */ + if ((adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED))) + vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); +#endif +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + features &= ~NETIF_F_HW_VLAN_FILTER; +#endif } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } hw->addr_ctrl.user_set_promisc = false; +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + /* enable hardware vlan filtering */ + vlnctrl |= IXGBE_VLNCTRL_VFE; +#endif } +#ifdef HAVE_SET_RX_MODE /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable @@ -4949,7 +5545,9 @@ void ixgbe_set_rx_mode(struct net_device *netdev) vmolr |= IXGBE_VMOLR_ROPE; } - /* Write addresses to the MTA, if the attempt fails +#endif + /* + * Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ @@ -4968,48 +5566,102 @@ void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); } - /* This is useful for sniffing bad packets. */ - if (features & NETIF_F_RXALL) { - /* UPE and MPE will be handled by normal PROMISC logic - * in e1000e_set_rx_mode */ - fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ - IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ - IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ - - fctrl &= ~(IXGBE_FCTRL_DPF); - /* NOTE: VLAN filtering is disabled by setting PROMISC */ - } - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +#ifdef HAVE_8021P_SUPPORT +#ifdef NETIF_F_HW_VLAN_CTAG_RX if (features & NETIF_F_HW_VLAN_CTAG_RX) +#else + if (features & NETIF_F_HW_VLAN_RX) +#endif ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); +#endif /* HAVE_8021P_SUPPORT */ +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) if (features & NETIF_F_HW_VLAN_CTAG_FILTER) ixgbe_vlan_promisc_disable(adapter); else ixgbe_vlan_promisc_enable(adapter); +#elif defined(NETIF_F_HW_VLAN_FILTER) && !defined(HAVE_VLAN_RX_REGISTER) + if (features & NETIF_F_HW_VLAN_FILTER) + ixgbe_vlan_promisc_disable(adapter); + else + ixgbe_vlan_promisc_enable(adapter); +#elif defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */ } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { + struct ixgbe_q_vector *q_vector; int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) - napi_enable(&adapter->q_vector[q_idx]->napi); + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef HAVE_NDO_BUSY_POLL + ixgbe_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { + struct ixgbe_q_vector *q_vector; int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) - napi_disable(&adapter->q_vector[q_idx]->napi); + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef HAVE_NDO_BUSY_POLL + while(!ixgbe_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif + } } -static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) +#ifdef HAVE_DCBNL_IEEE +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) +{ + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int i; + + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + + ixgbe_dcb_calculate_tc_credits(ets->tc_tx_bw, refill, max, max_frame); + return ixgbe_dcb_hw_config(hw, refill, max, + bwg_id, prio_type, ets->prio_tc); +} +#endif /* HAVE_DCBNL_IEEE */ + +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) +void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) { struct ixgbe_hw *hw = &adapter->hw; u32 vxlanctrl; @@ -5023,53 +5675,127 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) adapter->vxlan_port = 0; - +#ifdef HAVE_UDP_ENC_RX_OFFLOAD if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) adapter->geneve_port = 0; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +} +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef NETIF_F_GSO_PARTIAL +/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) +#endif /* NETIF_F_GSO_PARTIAL */ + +static inline unsigned long ixgbe_tso_features(void) +{ + unsigned long features = 0; + +#ifdef NETIF_F_TSO + features |= NETIF_F_TSO; +#endif /* NETIF_F_TSO */ +#ifdef NETIF_F_TSO6 + features |= NETIF_F_TSO6; +#endif /* NETIF_F_TSO6 */ +#ifdef NETIF_F_GSO_PARTIAL + features |= NETIF_F_GSO_PARTIAL | IXGBE_GSO_PARTIAL_FEATURES; +#endif + + return features; } -#ifdef CONFIG_IXGBE_DCB -/** - * ixgbe_configure_dcb - Configure DCB hardware +/* + * ixgbe_configure_dcb - Configure DCB hardware support * @adapter: ixgbe adapter struct * - * This is called by the driver on open to configure the DCB hardware. - * This is also called by the gennetlink interface when reconfiguring - * the DCB state. + * Called when the driver opens or needs to reconfigure DCB related bits. */ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* The following workaround for 82598EB was originaly hidden inside a + * kcompat definition of netif_set_gso_max_size. This workaround is + * necessary as the 82598EB hardware does not support TSO and DCB + * unless the stack TSO maximum segment size can be reduced. Older + * kernels do not support the requisite interface, and thus need TSO + * disabled if we want to support DCB. + */ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { - if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 65536); + if (hw->mac.type == ixgbe_mac_82598EB) { +#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE + netif_set_gso_max_size(netdev, 65536); +#else + /* We previously disabled TSO, so we should enable it + * now. */ + netdev->features |= ixgbe_tso_features(); +#ifdef NETIF_F_GSO_PARTIAL + netdev->gso_partial_features = + IXGBE_GSO_PARTIAL_FEATURES; +#endif +#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ + } return; } - if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 32768); - -#ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) - max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); + if (hw->mac.type == ixgbe_mac_82598EB) { +#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE + netif_set_gso_max_size(netdev, 32768); +#else + /* Simply disable TSO since we cannot change the maximum + * segment size. */ + netdev->features &= ~ixgbe_tso_features(); +#ifdef NETIF_F_GSO_PARTIAL + netdev->gso_partial_features = 0; #endif +#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ + } - /* reconfigure the hardware */ - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_RX_CONFIG); - ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); - } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { - ixgbe_dcb_hw_ets(&adapter->hw, - adapter->ixgbe_ieee_ets, - max_frame); - ixgbe_dcb_hw_pfc_config(&adapter->hw, - adapter->ixgbe_ieee_pfc->pfc_en, - adapter->ixgbe_ieee_ets->prio_tc); +#if IS_ENABLED(CONFIG_FCOE) + if (netdev->features & NETIF_F_FCOE_MTU) + max_frame = max_t(int, max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + +#ifdef HAVE_DCBNL_IEEE + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { + if (adapter->ixgbe_ieee_ets) + ixgbe_dcb_hw_ets(&adapter->hw, + adapter->ixgbe_ieee_ets, + max_frame); + + if (adapter->ixgbe_ieee_pfc && adapter->ixgbe_ieee_ets) { + struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc; + u8 *tc = adapter->ixgbe_ieee_ets->prio_tc; + + ixgbe_dcb_config_pfc(&adapter->hw, pfc->pfc_en, tc); + } + } else +#endif /* HAVE_DCBNL_IEEE */ + { + ixgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + IXGBE_DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + IXGBE_DCB_RX_CONFIG); + ixgbe_dcb_hw_config_cee(hw, &adapter->dcb_cfg); } /* Enable RSS Hash per TC */ @@ -5086,16 +5812,149 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); } } -#endif +#ifndef IXGBE_NO_LLI +static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter) +{ + u16 port; + + if (adapter->lli_etype) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_BP_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), + (adapter->lli_etype | IXGBE_ETQF_FILTER_EN)); + } + + if (adapter->lli_port) { + port = swab16(adapter->lli_port); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_BP_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), + (IXGBE_FTQF_POOL_MASK_EN | + (IXGBE_FTQF_PRIORITY_MASK << + IXGBE_FTQF_PRIORITY_SHIFT) | + (IXGBE_FTQF_DEST_PORT_MASK << + IXGBE_FTQF_5TUPLE_MASK_SHIFT))); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16)); + } + + if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_PSH_82599 | + IXGBE_IMIR_CTRL_SYN_82599 | + IXGBE_IMIR_CTRL_URG_82599 | + IXGBE_IMIR_CTRL_ACK_82599 | + IXGBE_IMIR_CTRL_RST_82599 | + IXGBE_IMIR_CTRL_FIN_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, + 0xfc000000); + break; + case ixgbe_mac_X540: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_PSH_82599)); + break; + default: + break; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), + (IXGBE_FTQF_POOL_MASK_EN | + (IXGBE_FTQF_PRIORITY_MASK << + IXGBE_FTQF_PRIORITY_SHIFT) | + (IXGBE_FTQF_5TUPLE_MASK_MASK << + IXGBE_FTQF_5TUPLE_MASK_SHIFT))); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100); + } + + if (adapter->lli_size) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_CTRL_BP_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, + adapter->lli_size); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), + (IXGBE_FTQF_POOL_MASK_EN | + (IXGBE_FTQF_PRIORITY_MASK << + IXGBE_FTQF_PRIORITY_SHIFT) | + (IXGBE_FTQF_5TUPLE_MASK_MASK << + IXGBE_FTQF_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP, + (IXGBE_IMIRVP_PRIORITY_EN | + adapter->lli_vlan_pri)); + } +} + +static void ixgbe_configure_lli(struct ixgbe_adapter *adapter) +{ + u16 port; + + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & IXGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + return; + /* LLI not supported on X550 and X550EM_x*/ + if ((adapter->hw.mac.type == ixgbe_mac_X550) || + (adapter->hw.mac.type == ixgbe_mac_X550EM_x)) + return; + /* LLI not supported on X550EM_a */ + if (adapter->hw.mac.type == ixgbe_mac_X550EM_a) + return; + if (adapter->hw.mac.type != ixgbe_mac_82598EB) { + ixgbe_configure_lli_82599(adapter); + return; + } + + if (adapter->lli_port) { + /* use filter 0 for port */ + port = swab16(adapter->lli_port); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0), + (port | IXGBE_IMIR_PORT_IM_EN)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0), + (IXGBE_IMIREXT_SIZE_BP | + IXGBE_IMIREXT_CTRL_BP)); + } + + if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { + /* use filter 1 for push flag */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1), + (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1), + (IXGBE_IMIREXT_SIZE_BP | + IXGBE_IMIREXT_CTRL_PSH)); + } + + if (adapter->lli_size) { + /* use filter 2 for size */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2), + (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2), + (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP)); + } +} + +#endif /* IXGBE_NO_LLI */ /* Additional bittime to account for IXGBE framing */ #define IXGBE_ETH_FRAMING 20 -/** +/* * ixgbe_hpbthresh - calculate high water mark for flow control * * @adapter: board private structure to calculate for - * @pb: packet buffer to calculate + * @pb - packet buffer to calculate */ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) { @@ -5107,20 +5966,20 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate max LAN frame size */ tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE traffic class uses FCOE jumbo frames */ if ((dev->features & NETIF_F_FCOE_MTU) && (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && - (pb == ixgbe_fcoe_get_tc(adapter))) + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif +#endif /* CONFIG_FCOE */ /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -5144,7 +6003,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) */ if (marker < 0) { e_warn(drv, "Packet Buffer(%i) can not provide enough" - "headroom to support flow control." + "headroom to suppport flow control." "Decrease MTU or number of traffic classes\n", pb); marker = tc + 1; } @@ -5152,13 +6011,13 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) return marker; } -/** +/* * ixgbe_lpbthresh - calculate low water mark for for flow control * * @adapter: board private structure to calculate for - * @pb: packet buffer to calculate + * @pb - packet buffer to calculate */ -static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) +static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int __maybe_unused pb) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; @@ -5168,20 +6027,20 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate max LAN frame size */ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE traffic class uses FCOE jumbo frames */ if ((dev->features & NETIF_F_FCOE_MTU) && (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif +#endif /* CONFIG_FCOE */ /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -5199,12 +6058,13 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int num_tc = adapter->hw_tcs; + int num_tc = netdev_get_num_tc(adapter->netdev); int i; if (!num_tc) num_tc = 1; + for (i = 0; i < num_tc; i++) { hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); @@ -5214,7 +6074,7 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) hw->fc.low_water[i] = 0; } - for (; i < MAX_TRAFFIC_CLASS; i++) + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) hw->fc.high_water[i] = 0; } @@ -5222,7 +6082,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int hdrm; - u8 tc = adapter->hw_tcs; + u8 tc = netdev_get_num_tc(adapter->netdev); if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) @@ -5230,7 +6090,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) else hdrm = 0; - hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); ixgbe_pbthresh_setup(adapter); } @@ -5239,26 +6099,49 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; - u64 action; + u8 queue = 0; spin_lock(&adapter->fdir_perfect_lock); if (!hlist_empty(&adapter->fdir_filter_list)) - ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask, + adapter->cloud_mode); hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { - action = filter->action; - if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) - action = - (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + if (filter->action == IXGBE_FDIR_DROP_QUEUE) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && ring >= adapter->num_rx_queues) { + e_err(drv, + "FDIR restore failed w/o vf, ring:%u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, + "FDIR restore failed vf:%hhu, ring:%u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[action]->reg_idx); + &filter->filter, + filter->sw_idx, + queue, + adapter->cloud_mode); } spin_unlock(&adapter->fdir_perfect_lock); @@ -5272,25 +6155,54 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) { u16 i = rx_ring->next_to_clean; struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + +#ifdef HAVE_AF_XDP_ZC_SUPPORT if (rx_ring->xsk_umem) { ixgbe_xsk_clean_rx_ring(rx_ring); goto skip_free; } +#endif /* Free all the Rx ring sk_buffs */ +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + while (i != rx_ring->next_to_use) { +#else while (i != rx_ring->next_to_alloc) { +#endif if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT if (IXGBE_CB(skb)->page_released) dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else IXGBE_RX_DMA_ATTR); +#endif +#else + /* We need to clean up RSC frag lists */ + skb = ixgbe_merge_active_tail(skb); + if (ixgbe_close_active_frag_list(skb)) + dma_unmap_single(rx_ring->dev, + IXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + IXGBE_CB(skb)->dma = 0; +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ dev_kfree_skb(skb); + rx_buffer->skb = NULL; } +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT /* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory. */ @@ -5304,10 +6216,23 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else IXGBE_RX_DMA_ATTR); +#endif + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); - +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + if (rx_buffer->dma) { + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ i++; rx_buffer++; if (i == rx_ring->count) { @@ -5316,88 +6241,14 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) } } +#ifdef HAVE_AF_XDP_ZC_SUPPORT skip_free: +#endif +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; -} - -static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, - struct ixgbe_fwd_adapter *accel) -{ - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - int num_tc = netdev_get_num_tc(adapter->netdev); - struct net_device *vdev = accel->netdev; - int i, baseq, err; - - baseq = accel->pool * adapter->num_rx_queues_per_pool; - netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", - accel->pool, adapter->num_rx_pools, - baseq, baseq + adapter->num_rx_queues_per_pool); - - accel->rx_base_queue = baseq; - accel->tx_base_queue = baseq; - - /* record configuration for macvlan interface in vdev */ - for (i = 0; i < num_tc; i++) - netdev_bind_sb_channel_queue(adapter->netdev, vdev, - i, rss_i, baseq + (rss_i * i)); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) - adapter->rx_ring[baseq + i]->netdev = vdev; - - /* Guarantee all rings are updated before we update the - * MAC address filter. - */ - wmb(); - - /* ixgbe_add_mac_filter will return an index if it succeeds, so we - * need to only treat it as an error value if it is negative. - */ - err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, - VMDQ_P(accel->pool)); - if (err >= 0) - return 0; - - /* if we cannot add the MAC rule then disable the offload */ - macvlan_release_l2fw_offload(vdev); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) - adapter->rx_ring[baseq + i]->netdev = NULL; - - netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); - - /* unbind the queues and drop the subordinate channel config */ - netdev_unbind_sb_channel(adapter->netdev, vdev); - netdev_set_sb_channel(vdev, 0); - - clear_bit(accel->pool, adapter->fwd_bitmask); - kfree(accel); - - return err; -} - -static int ixgbe_macvlan_up(struct net_device *vdev, void *data) -{ - struct ixgbe_adapter *adapter = data; - struct ixgbe_fwd_adapter *accel; - - if (!netif_is_macvlan(vdev)) - return 0; - - accel = macvlan_accel_priv(vdev); - if (!accel) - return 0; - - ixgbe_fwd_ring_up(adapter, accel); - - return 0; -} - -static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) -{ - netdev_walk_all_upper_dev_rcu(adapter->netdev, - ixgbe_macvlan_up, adapter); +#endif } static void ixgbe_configure(struct ixgbe_adapter *adapter) @@ -5405,9 +6256,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; ixgbe_configure_pb(adapter); -#ifdef CONFIG_IXGBE_DCB ixgbe_configure_dcb(adapter); -#endif + /* * We must restore virtualization before VLANs or else * the VLVF registers will not be populated @@ -5415,50 +6265,73 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_virtualization(adapter); ixgbe_set_rx_mode(adapter->netdev); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ixgbe_restore_vlan(adapter); - ixgbe_ipsec_restore(adapter); +#endif - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - hw->mac.ops.disable_rx_buff(hw); - break; - default: - break; - } + if (adapter->hw.mac.type == ixgbe_mac_82599EB || + adapter->hw.mac.type == ixgbe_mac_X540) + hw->mac.ops.disable_sec_rx_path(hw); if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ixgbe_init_fdir_signature_82599(&adapter->hw, adapter->fdir_pballoc); } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { ixgbe_init_fdir_perfect_82599(&adapter->hw, - adapter->fdir_pballoc); + adapter->fdir_pballoc, adapter->cloud_mode); ixgbe_fdir_filter_restore(adapter); } - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - hw->mac.ops.enable_rx_buff(hw); - break; - default: - break; + if (adapter->hw.mac.type == ixgbe_mac_82599EB || + adapter->hw.mac.type == ixgbe_mac_X540) + hw->mac.ops.enable_sec_rx_path(hw); + + /* Enable EEE only when supported and enabled */ + if (hw->mac.ops.setup_eee && + (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { + bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); + + hw->mac.ops.setup_eee(hw, eee_enable); } -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) /* configure DCA */ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) ixgbe_setup_dca(adapter); -#endif /* CONFIG_IXGBE_DCA */ +#endif -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); - ixgbe_configure_dfwd(adapter); +} + +static bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + if (hw->phy.type == ixgbe_phy_nl) + return true; + return false; + case ixgbe_mac_82599EB: + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + return true; + default: + return false; + } + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + return true; + return false; + default: + return false; + } } /** @@ -5468,7 +6341,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) { /* - * We are assuming the worst case scenario here, and that + * We are assuming the worst case scenerio here, and that * is that an SFP was inserted/removed after the reset * but before SFP detection was enabled. As such the best * solution is to just start searching as soon as we start @@ -5490,27 +6363,72 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { u32 speed; bool autoneg, link_up = false; - int ret = IXGBE_ERR_LINK_SETUP; + u32 ret = IXGBE_ERR_LINK_SETUP; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); if (ret) - return ret; + goto link_cfg_out; speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) + if (!speed && hw->mac.ops.get_link_capabilities) { ret = hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | + IXGBE_LINK_SPEED_2_5GB_FULL); + } + if (ret) - return ret; + goto link_cfg_out; if (hw->mac.ops.setup_link) ret = hw->mac.ops.setup_link(hw, speed, link_up); - +link_cfg_out: return ret; } +/** + * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset + * @adapter: board private structure + * + * On a reset we need to clear out the VF stats or accounting gets + * messed up because they're not clear on read. + **/ +static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + adapter->vfinfo[i].last_vfstats.gprc = + IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gprc += + adapter->vfinfo[i].vfstats.gprc; + adapter->vfinfo[i].vfstats.gprc = 0; + adapter->vfinfo[i].last_vfstats.gptc = + IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gptc += + adapter->vfinfo[i].vfstats.gptc; + adapter->vfinfo[i].vfstats.gptc = 0; + adapter->vfinfo[i].last_vfstats.gorc = + IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gorc += + adapter->vfinfo[i].vfstats.gorc; + adapter->vfinfo[i].vfstats.gorc = 0; + adapter->vfinfo[i].last_vfstats.gotc = + IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gotc += + adapter->vfinfo[i].vfstats.gotc; + adapter->vfinfo[i].vfstats.gotc = 0; + adapter->vfinfo[i].last_vfstats.mprc = + IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.mprc += + adapter->vfinfo[i].vfstats.mprc; + adapter->vfinfo[i].vfstats.mprc = 0; + } +} + static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -5532,7 +6450,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -5544,9 +6462,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); } - /* XXX: to interrupt immediately for EICS writes, enable this */ - /* gpie |= IXGBE_GPIE_EIMEN; */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { gpie &= ~IXGBE_GPIE_VTMODE_MASK; @@ -5564,26 +6479,25 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) } /* Enable Thermal over heat sensor interrupt */ - if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP0_GPIEN_8259X; + gpie |= IXGBE_SDP0_GPIEN; break; default: break; } - } /* Enable fan failure interrupt */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - gpie |= IXGBE_SDP1_GPIEN(hw); + gpie |= IXGBE_SDP1_GPIEN; switch (hw->mac.type) { case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; + gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; break; case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: gpie |= IXGBE_SDP0_GPIEN_X540; break; default: @@ -5610,17 +6524,18 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable the optics for 82599 SFP+ fiber */ if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); - - if (hw->phy.ops.set_phy_power) - hw->phy.ops.set_phy_power(hw, true); + ixgbe_set_phy_power(hw, true); smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); +#ifndef IXGBE_NO_LLI + ixgbe_configure_lli(adapter); +#endif if (ixgbe_is_sfp(hw)) { ixgbe_sfp_link_config(adapter); - } else { + } else if (!hw->phy.reset_disable) { err = ixgbe_non_sfp_link_config(hw); if (err) e_err(probe, "link_config FAILED %d\n", err); @@ -5640,12 +6555,16 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) e_crit(drv, "Fan has stopped, replace the adapter\n"); } + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; mod_timer(&adapter->service_timer, jiffies); + ixgbe_clear_vf_stats_counters(adapter); /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; @@ -5656,7 +6575,11 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); @@ -5720,7 +6643,7 @@ static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter) return 32000ul; } -void ixgbe_disable_rx(struct ixgbe_adapter *adapter) +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter) { unsigned long wait_delay, delay_interval; struct ixgbe_hw *hw = &adapter->hw; @@ -5730,7 +6653,7 @@ void ixgbe_disable_rx(struct ixgbe_adapter *adapter) /* disable receives */ hw->mac.ops.disable_rx(hw); - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) return; /* disable all enabled Rx queues */ @@ -5792,14 +6715,14 @@ void ixgbe_disable_rx(struct ixgbe_adapter *adapter) "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); } -void ixgbe_disable_tx(struct ixgbe_adapter *adapter) +void ixgbe_disable_tx_queue(struct ixgbe_adapter *adapter) { unsigned long wait_delay, delay_interval; struct ixgbe_hw *hw = &adapter->hw; int i, wait_loop; u32 txdctl; - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) return; /* disable all enabled Tx queues */ @@ -5879,7 +6802,7 @@ dma_engine_disable: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5892,10 +6815,12 @@ dma_engine_disable: void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; +#ifdef HAVE_SET_RX_MODE struct net_device *netdev = adapter->netdev; +#endif int err; - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) return; /* lock SFP init bit to prevent race conditions with the watchdog */ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -5908,7 +6833,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) err = hw->mac.ops.init_hw(hw); switch (err) { - case 0: + case IXGBE_SUCCESS: case IXGBE_ERR_SFP_NOT_PRESENT: case IXGBE_ERR_SFP_NOT_SUPPORTED: break; @@ -5918,12 +6843,15 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issues associated with " - "your hardware. If you are experiencing problems " - "please contact your Intel or hardware " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); break; + case IXGBE_ERR_OVERTEMP: + e_crit(drv, "%s\n", ixgbe_overheat_msg); + break; default: e_dev_err("Hardware Error: %d\n", err); } @@ -5932,7 +6860,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) /* flush entries out of MAC table */ ixgbe_flush_sw_mac_table(adapter); +#ifdef HAVE_SET_RX_MODE __dev_uc_unsync(netdev, NULL); +#endif /* do not flush user set addresses */ ixgbe_mac_set_default_filter(adapter); @@ -5941,15 +6871,21 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) if (hw->mac.san_mac_rar_index) hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + +#ifdef HAVE_PTP_1588_CLOCK if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_reset(adapter); +#endif - if (hw->phy.ops.set_phy_power) { - if (!netif_running(adapter->netdev) && !adapter->wol) - hw->phy.ops.set_phy_power(hw, false); - else - hw->phy.ops.set_phy_power(hw, true); - } + if (!netif_running(adapter->netdev) && !adapter->wol) + ixgbe_set_phy_power(hw, false); + else + ixgbe_set_phy_power(hw, true); } /** @@ -5961,19 +6897,29 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) u16 i = tx_ring->next_to_clean; struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; +#ifdef HAVE_AF_XDP_ZC_SUPPORT if (tx_ring->xsk_umem) { ixgbe_xsk_clean_tx_ring(tx_ring); goto out; } +#endif while (i != tx_ring->next_to_use) { union ixgbe_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs */ +#ifdef HAVE_XDP_SUPPORT if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT xdp_return_frame(tx_buffer->xdpf); +#else + page_frag_free(tx_buffer->data); +#endif else dev_kfree_skb_any(tx_buffer->skb); +#else + dev_kfree_skb_any(tx_buffer->skb); +#endif /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -6017,7 +6963,10 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) if (!ring_is_xdp(tx_ring)) netdev_tx_reset_queue(txring_txq(tx_ring)); +#ifdef HAVE_AF_XDP_ZC_SUPPORT out: +#endif + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -6084,7 +7033,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_tx_disable(netdev); /* Disable Rx */ - ixgbe_disable_rx(adapter); + ixgbe_disable_rx_queue(adapter); /* synchronize_rcu() needed for pending XDP buffers to drain */ if (adapter->xdp_ring[0]) @@ -6094,8 +7043,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT); clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; del_timer_sync(&adapter->service_timer); @@ -6106,7 +7055,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* Mark all the VFs as inactive */ for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = false; + adapter->vfinfo[i].clear_to_send = 0; /* ping all the active vfs to let them know we are going down */ ixgbe_ping_all_vfs(adapter); @@ -6116,9 +7065,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /* disable transmits in the hardware now that interrupts are off */ - ixgbe_disable_tx(adapter); + ixgbe_disable_tx_queue(adapter); +#ifdef HAVE_PCI_ERS if (!pci_channel_offline(adapter->pdev)) +#endif ixgbe_reset(adapter); /* power down the optics for 82599 SFP+ fiber */ @@ -6131,9 +7082,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /** * ixgbe_eee_capable - helper function to determine EEE support on X550 - * @adapter: board private structure - */ -static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) + * @adapter: board private structure to initialize + **/ +static inline void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -6154,125 +7105,91 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) } } -/** - * ixgbe_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void ixgbe_tx_timeout(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ - ixgbe_tx_timeout_reset(adapter); -} - -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; - struct tc_configuration *tc; - int j; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - case ixgbe_mac_82599EB: - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - break; - case ixgbe_mac_X540: - case ixgbe_mac_X550: - adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - default: - adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; - break; - } + struct ixgbe_dcb_tc_config *tc; + int j, bwg_pct; /* Configure DCB traffic classes */ - for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs; + for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) { tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[DCB_TX_CONFIG].bwg_id = 0; - tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); - tc->path[DCB_RX_CONFIG].bwg_id = 0; - tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); - tc->dcb_pfc = pfc_disabled; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + tc->pfc = ixgbe_dcb_pfc_disabled; } - /* Initialize default user to priority mapping, UPx->TC0 */ + /* reset back to TC 0 */ tc = &adapter->dcb_cfg.tc_config[0]; - tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; - tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + /* total of all TCs bandwidth needs to be 100 */ + bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal; adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.round_robin_enable = false; adapter->dcb_set_bitmap = 0x00; if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, sizeof(adapter->temp_dcb_cfg)); } -#endif +#endif /*CONFIG_DCB*/ /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize - * @ii: pointer to ixgbe_info for device * * ixgbe_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ -static int ixgbe_sw_init(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii) +#ifdef HAVE_CONFIG_HOTPLUG +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +#else +static int ixgbe_sw_init(struct ixgbe_adapter *adapter) +#endif { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - unsigned int rss, fdir; + int err; + unsigned int fdir; u32 fwsm; +#ifdef HAVE_TC_SETUP_CLSU32 int i; +#endif /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; - hw->revision_id = pdev->revision; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == IXGBE_FAILED_READ_CFG_BYTE && + ixgbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; - /* get_invariants needs the device IDs */ - ii->get_invariants(hw); - - /* Set common capability flags and settings */ - rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); - adapter->ring_feature[RING_F_RSS].limit = rss; - adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->max_q_vectors = MAX_Q_VECTORS_82599; - adapter->atr_sample_rate = 20; - fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); - adapter->ring_feature[RING_F_FDIR].limit = fdir; - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; - adapter->ring_feature[RING_F_VMDQ].limit = 1; -#ifdef CONFIG_IXGBE_DCA - adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; -#endif -#ifdef CONFIG_IXGBE_DCB - adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; -#endif -#ifdef IXGBE_FCOE - adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -#ifdef CONFIG_IXGBE_DCB - /* Default traffic class to use for FCoE */ - adapter->fcoe.up = IXGBE_FCOE_DEFTC; -#endif /* CONFIG_IXGBE_DCB */ -#endif /* IXGBE_FCOE */ - + err = ixgbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } +#ifdef HAVE_TC_SETUP_CLSU32 /* initialize static ixgbe jump table entries */ adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), GFP_KERNEL); @@ -6282,50 +7199,80 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) adapter->jump_tables[i] = NULL; - - adapter->mac_table = kcalloc(hw->mac.num_rar_entries, - sizeof(struct ixgbe_mac_addr), +#endif /* HAVE_TC_SETUP_CLSU32 */ + adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * + hw->mac.num_rar_entries, GFP_KERNEL); - if (!adapter->mac_table) - return -ENOMEM; + if (!adapter->mac_table) { + err = IXGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } - if (ixgbe_init_rss_key(adapter)) - return -ENOMEM; + if (ixgbe_init_rss_key(adapter)) { + err = IXGBE_ERR_OUT_OF_MEM; + e_err(probe, "rss_key allocation failed: %d\n", err); + goto out; + } - adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); - if (!adapter->af_xdp_zc_qps) - return -ENOMEM; + /* Set common capability flags and settings */ +#if IS_ENABLED(CONFIG_DCA) + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; +#endif +#if IS_ENABLED(CONFIG_DCB) + adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +#endif +#if IS_ENABLED(CONFIG_FCOE) + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#if IS_ENABLED(CONFIG_DCB) + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = IXGBE_FCOE_DEFUP; + adapter->fcoe.up_set = IXGBE_FCOE_DEFUP; +#endif /* CONFIG_DCB */ +#endif /* CONFIG_FCOE */ + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599; /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: + adapter->flags |= IXGBE_FLAGS_82598_INIT; adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; - adapter->max_q_vectors = MAX_Q_VECTORS_82598; + adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598; adapter->ring_feature[RING_F_FDIR].limit = 0; - adapter->atr_sample_rate = 0; - adapter->fdir_pballoc = 0; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) adapter->fcoe.up = 0; + adapter->fcoe.up_set = 0; #endif /* IXGBE_DCB */ -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ break; case ixgbe_mac_82599EB: + adapter->flags |= IXGBE_FLAGS_82599_INIT; if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; +#ifndef IXGBE_NO_SMART_SPEED + hw->phy.smart_speed = ixgbe_smart_speed_on; +#else + hw->phy.smart_speed = ixgbe_smart_speed_off; +#endif break; case ixgbe_mac_X540: - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + adapter->flags |= IXGBE_FLAGS_X540_INIT; + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: @@ -6335,67 +7282,78 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, default: break; } - /* fall through */ + /* fall through */ case ixgbe_mac_X550EM_x: -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; #endif -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) adapter->fcoe.up = 0; -#endif /* IXGBE_DCB */ -#endif /* IXGBE_FCOE */ - /* Fall Through */ + adapter->fcoe.up_set = 0; +#endif /* CONFIG_DCB */ +#endif /* CONFIG_FCOE */ + /* fall through */ case ixgbe_mac_X550: if (hw->mac.type == ixgbe_mac_X550) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; -#ifdef CONFIG_IXGBE_DCA + ixgbe_set_eee_capable(adapter); + adapter->flags |= IXGBE_FLAGS_X550_INIT; +#if IS_ENABLED(CONFIG_DCA) adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; -#endif - adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; - break; +#endif /* CONFIG_DCA */ default: break; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE support exists, always init the FCoE lock */ spin_lock_init(&adapter->fcoe.lock); +#endif /* CONFIG_FCOE */ -#endif /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + adapter->dcb_cfg.num_tcs.pg_tcs = 8; + adapter->dcb_cfg.num_tcs.pfc_tcs = 8; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + default: + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + break; + } ixgbe_init_dcb(adapter); -#endif - ixgbe_init_ipsec_offload(adapter); + +#endif /* CONFIG_DCB */ + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a || + hw->mac.type == ixgbe_mac_X540) + ixgbe_init_mbx_params_pf(hw); /* default flow control settings */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); - -#ifdef CONFIG_PCI_IOV - if (max_vfs > 0) - e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); - - /* assign number of SR-IOV VFs */ - if (hw->mac.type != ixgbe_mac_82598EB) { - if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { - max_vfs = 0; - e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); - } - } -#endif /* CONFIG_PCI_IOV */ - - /* enable itr by default in dynamic mode */ - adapter->rx_itr_setting = 1; - adapter->tx_itr_setting = 1; + hw->fc.disable_fc_autoneg = false; /* set default ring sizes */ adapter->tx_ring_count = IXGBE_DEFAULT_TXD; @@ -6404,17 +7362,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; - /* initialize eeprom parameters */ - if (ixgbe_init_eeprom_params_generic(hw)) { - e_dev_err("EEPROM initialization failed\n"); - return -EIO; - } - - /* PF holds first pool slot */ - set_bit(0, adapter->fwd_bitmask); set_bit(__IXGBE_DOWN, &adapter->state); - - return 0; +out: + return err; } /** @@ -6427,15 +7377,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; int orig_node = dev_to_node(dev); - int ring_node = NUMA_NO_NODE; + int node = -1; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; if (tx_ring->q_vector) - ring_node = tx_ring->q_vector->numa_node; + node = tx_ring->q_vector->node; - tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); + tx_ring->tx_buffer_info = vmalloc_node(size, node); if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) @@ -6445,7 +7395,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - set_dev_node(dev, ring_node); + set_dev_node(dev, node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, @@ -6457,8 +7407,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) if (!tx_ring->desc) goto err; - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; return 0; err: @@ -6483,6 +7431,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) int i, j = 0, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; @@ -6511,8 +7460,8 @@ err_setup_tx: /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: pointer to ixgbe_adapter - * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @adapter: board private structure + * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ @@ -6521,15 +7470,22 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); - int ring_node = NUMA_NO_NODE; + int node = -1; int size; +#ifdef HAVE_XDP_BUFF_RXQ +#ifdef HAVE_XDP_FRAME_STRUCT +#ifndef HAVE_AF_XDP_ZC_SUPPORT + int err; +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_FRAME_STRUCT */ +#endif /* HAVE_XDP_BUFF_RXQ */ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; if (rx_ring->q_vector) - ring_node = rx_ring->q_vector->numa_node; + node = rx_ring->q_vector->node; - rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); + rx_ring->rx_buffer_info = vmalloc_node(size, node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) @@ -6539,7 +7495,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - set_dev_node(dev, ring_node); + set_dev_node(dev, node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, @@ -6551,14 +7507,24 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, if (!rx_ring->desc) goto err; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - +#ifdef HAVE_XDP_BUFF_RXQ /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, rx_ring->queue_index) < 0) goto err; +#ifndef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + goto err; + } +#endif /* HAVE_XDP_FRAME_STRUCT */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_BUFF_RXQ */ + rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -6592,7 +7558,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) goto err_setup_rx; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) err = ixgbe_setup_fcoe_ddp_resources(adapter); if (!err) #endif @@ -6623,7 +7589,6 @@ void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); - tx_ring->desc = NULL; } @@ -6638,8 +7603,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - if (adapter->tx_ring[i]->desc) - ixgbe_free_tx_resources(adapter->tx_ring[i]); + ixgbe_free_tx_resources(adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) if (adapter->xdp_ring[i]->desc) ixgbe_free_tx_resources(adapter->xdp_ring[i]); @@ -6656,7 +7620,9 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) ixgbe_clean_rx_ring(rx_ring); rx_ring->xdp_prog = NULL; +#ifdef HAVE_XDP_BUFF_RXQ xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +#endif vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -6680,13 +7646,12 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) { int i; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) ixgbe_free_fcoe_ddp_resources(adapter); - #endif + for (i = 0; i < adapter->num_rx_queues; i++) - if (adapter->rx_ring[i]->desc) - ixgbe_free_rx_resources(adapter->rx_ring[i]); + ixgbe_free_rx_resources(adapter->rx_ring[i]); } /** @@ -6699,6 +7664,9 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +#endif if (adapter->xdp_prog) { int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + @@ -6715,6 +7683,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) } } +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + +#endif /* * For 82599EB we cannot allow legacy VFs to enable their receive * paths when MTU greater than 1500 is configured. So display a @@ -6722,7 +7696,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) +#else (new_mtu > ETH_DATA_LEN)) +#endif e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -6751,8 +7729,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err, queues; + int err; /* disallow open during test */ if (test_bit(__IXGBE_TESTING, &adapter->state)) @@ -6777,31 +7754,40 @@ int ixgbe_open(struct net_device *netdev) goto err_req_irq; /* Notify the stack of the actual queue counts. */ - queues = adapter->num_tx_queues; - err = netif_set_real_num_tx_queues(netdev, queues); + err = netif_set_real_num_tx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_tx_queues); if (err) goto err_set_queues; - queues = adapter->num_rx_queues; - err = netif_set_real_num_rx_queues(netdev, queues); + err = netif_set_real_num_rx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_rx_queues); if (err) goto err_set_queues; +#ifdef HAVE_PTP_1588_CLOCK ixgbe_ptp_init(adapter); +#endif /* HAVE_PTP_1588_CLOCK*/ ixgbe_up_complete(adapter); +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD udp_tunnel_get_rx_info(netdev); - - return 0; +#elif defined(HAVE_VXLAN_RX_OFFLOAD) + vxlan_get_rx_port(netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + return IXGBE_SUCCESS; err_set_queues: ixgbe_free_irq(adapter); err_req_irq: ixgbe_free_all_rx_resources(adapter); - if (hw->phy.ops.set_phy_power && !adapter->wol) - hw->phy.ops.set_phy_power(&adapter->hw, false); + if (!adapter->wol) + ixgbe_set_phy_power(&adapter->hw, false); err_setup_rx: ixgbe_free_all_tx_resources(adapter); err_setup_tx: @@ -6810,23 +7796,31 @@ err_setup_tx: return err; } +/** + * ixgbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) { +#ifdef HAVE_PTP_1588_CLOCK ixgbe_ptp_suspend(adapter); +#endif if (adapter->hw.phy.ops.enter_lplu) { adapter->hw.phy.reset_disable = true; ixgbe_down(adapter); - adapter->hw.phy.ops.enter_lplu(&adapter->hw); + ixgbe_enter_lplu(&adapter->hw); adapter->hw.phy.reset_disable = false; } else { ixgbe_down(adapter); } - ixgbe_free_irq(adapter); - ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); + ixgbe_free_all_tx_resources(adapter); } /** @@ -6844,7 +7838,9 @@ int ixgbe_close(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_PTP_1588_CLOCK ixgbe_ptp_stop(adapter); +#endif if (netif_device_present(netdev)) ixgbe_close_suspend(adapter); @@ -6857,12 +7853,21 @@ int ixgbe_close(struct net_device *netdev) } #ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int ixgbe_resume(struct device *dev) +#else static int ixgbe_resume(struct pci_dev *pdev) +#endif /* USE_LEGACY_PM_SUPPORT */ { - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct ixgbe_adapter *adapter; + struct net_device *netdev; u32 err; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; adapter->hw.hw_addr = adapter->io_addr; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); @@ -6888,6 +7893,7 @@ static int ixgbe_resume(struct pci_dev *pdev) IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); rtnl_lock(); + err = ixgbe_init_interrupt_scheme(adapter); if (!err && netif_running(netdev)) err = ixgbe_open(netdev); @@ -6895,12 +7901,82 @@ static int ixgbe_resume(struct pci_dev *pdev) if (!err) netif_device_attach(netdev); + rtnl_unlock(); return err; } + +#ifndef USE_LEGACY_PM_SUPPORT +/** + * ixgbe_freeze - quiesce the device (no IRQ's or DMA) + * @dev: The port's netdev + */ +static int ixgbe_freeze(struct device *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) { + if (lplu_enabled) { + adapter->hw.phy.reset_disable = true; + ixgbe_down(adapter); + adapter->hw.phy.reset_disable = false; + } else { + ixgbe_down(adapter); + } + ixgbe_free_irq(adapter); + } + + ixgbe_reset_interrupt_capability(adapter); + rtnl_unlock(); + + return 0; +} + +/** + * ixgbe_thaw - un-quiesce the device + * @dev: The port's netdev + */ +static int ixgbe_thaw(struct device *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; + + ixgbe_set_interrupt_capability(adapter); + + if (netif_running(netdev)) { + u32 err = ixgbe_request_irq(adapter); + if (err) + return err; + + if (lplu_enabled) { + adapter->hw.phy.reset_disable = true; + ixgbe_up(adapter); + adapter->hw.phy.reset_disable = false; + } else { + ixgbe_up(adapter); + } + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* USE_LEGACY_PM_SUPPORT */ #endif /* CONFIG_PM */ +/* + * __ixgbe_shutdown is not used when power management + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); @@ -6927,8 +8003,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return retval; #endif - if (hw->mac.ops.stop_link_on_d3) - hw->mac.ops.stop_link_on_d3(hw); + + /* this won't stop link of managebility or WoL is enabled */ + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_stop_mac_link_on_d3_82599(hw); if (wufc) { u32 fctrl; @@ -6962,7 +8040,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -6970,8 +8048,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) } *enable_wake = !!wufc; - if (hw->phy.ops.set_phy_power && !*enable_wake) - hw->phy.ops.set_phy_power(hw, false); + if (!*enable_wake) + ixgbe_set_phy_power(hw, false); ixgbe_release_hw_control(adapter); @@ -6980,12 +8058,21 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ #ifdef CONFIG_PM -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +#ifndef USE_LEGACY_PM_SUPPORT +static int ixgbe_suspend(struct device *dev) +#else +static int ixgbe_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +#endif /* USE_LEGACY_PM_SUPPORT */ { int retval; bool wake; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif retval = __ixgbe_shutdown(pdev, &wake); if (retval) @@ -7002,6 +8089,7 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) } #endif /* CONFIG_PM */ +#ifndef USE_REBOOT_NOTIFIER static void ixgbe_shutdown(struct pci_dev *pdev) { bool wake; @@ -7014,13 +8102,120 @@ static void ixgbe_shutdown(struct pci_dev *pdev) } } +#endif +#ifdef HAVE_NDO_GET_STATS64 +static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, + struct ixgbe_ring *ring) +{ + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } +} + +/** + * ixgbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ixgbe_get_stats for kernels which support it. + */ +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 * +ixgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#endif +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + + ixgbe_get_ring_stats64(stats, ring); + } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); + + ixgbe_get_ring_stats64(stats, ring); + } + rcu_read_unlock(); + + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + + return stats; +#endif +} +#else +/** + * ixgbe_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + ixgbe_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif /** * ixgbe_update_stats - Update the board statistics counters. * @adapter: board private structure **/ void ixgbe_update_stats(struct ixgbe_adapter *adapter) { - struct net_device *netdev = adapter->netdev; +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u64 total_mpc = 0; @@ -7054,14 +8249,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hw_csum_rx_error += rx_ring->rx_stats.csum_err; bytes += rx_ring->stats.bytes; packets += rx_ring->stats.packets; + } adapter->non_eop_descs = non_eop_descs; adapter->alloc_rx_page = alloc_rx_page; adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; adapter->hw_csum_rx_error = hw_csum_rx_error; - netdev->stats.rx_bytes = bytes; - netdev->stats.rx_packets = packets; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; bytes = 0; packets = 0; @@ -7083,8 +8279,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } adapter->restart_queue = restart_queue; adapter->tx_busy = tx_busy; - netdev->stats.tx_bytes = bytes; - netdev->stats.tx_packets = packets; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); @@ -7109,7 +8305,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -7123,10 +8319,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540) || (hw->mac.type == ixgbe_mac_X550) || (hw->mac.type == ixgbe_mac_X550EM_x) || - (hw->mac.type == ixgbe_mac_x550em_a)) { + (hw->mac.type == ixgbe_mac_X550EM_a) || + (hw->mac.type == ixgbe_mac_X540)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -7151,8 +8347,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - /* OS2BMC stats are X540 and later */ + case ixgbe_mac_X550EM_a: + /* OS2BMC stats are X540 only*/ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); @@ -7169,16 +8365,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); +#ifdef HAVE_TX_MQ hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); -#ifdef IXGBE_FCOE +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hwstats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); - /* Add up per cpu counters for total ddp aloc fail */ + /* Add up per cpu counters for total ddp alloc fail */ if (adapter->fcoe.ddp_pool) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe_ddp_pool *ddp_pool; @@ -7192,7 +8391,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->fcoe_noddp = noddp; hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; } -#endif /* IXGBE_FCOE */ + +#endif /* CONFIG_FCOE */ break; default: break; @@ -7235,18 +8435,46 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); - /* Fill out the OS statistics structure */ - netdev->stats.multicast = hwstats->mprc; + net_stats->multicast = hwstats->mprc; /* Rx Errors */ - netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; - netdev->stats.rx_dropped = 0; - netdev->stats.rx_length_errors = hwstats->rlec; - netdev->stats.rx_crc_errors = hwstats->crcerrs; - netdev->stats.rx_missed_errors = total_mpc; + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + net_stats->rx_missed_errors = total_mpc; + + /* + * VF Stats Collection - skip while resetting because these + * are not clear on read and otherwise you'll sometimes get + * crazy values. + */ + if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \ + adapter->vfinfo[i].last_vfstats.gprc, \ + adapter->vfinfo[i].vfstats.gprc); + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \ + adapter->vfinfo[i].last_vfstats.gptc, \ + adapter->vfinfo[i].vfstats.gptc); + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \ + IXGBE_PVFGORC_MSB(i), \ + adapter->vfinfo[i].last_vfstats.gorc, \ + adapter->vfinfo[i].vfstats.gorc); + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \ + IXGBE_PVFGOTC_MSB(i), \ + adapter->vfinfo[i].last_vfstats.gotc, \ + adapter->vfinfo[i].vfstats.gotc); + UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \ + adapter->vfinfo[i].last_vfstats.mprc, \ + adapter->vfinfo[i].vfstats.mprc); + } + } } +#ifdef HAVE_TX_MQ /** * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table * @adapter: pointer to the device adapter structure @@ -7271,7 +8499,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) adapter->fdir_overflow++; - if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { + if (ixgbe_reinit_fdir_tables_82599(hw) == IXGBE_SUCCESS) { for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, &(adapter->tx_ring[i]->state)); @@ -7286,6 +8514,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) } } +#endif /* HAVE_TX_MQ */ /** * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts * @adapter: pointer to the device adapter structure @@ -7328,7 +8557,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= BIT_ULL(i); + eics |= ((u64)1 << i); } } @@ -7358,9 +8587,11 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_up = true; } +#ifdef HAVE_DCBNL_IEEE if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); +#endif if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { hw->mac.ops.fc_enable(hw); ixgbe_set_rx_drop_en(adapter); @@ -7376,22 +8607,47 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) adapter->link_up = link_up; adapter->link_speed = link_speed; + + if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { + u8 num_tcs = netdev_get_num_tc(adapter->netdev); +#if IS_ENABLED(CONFIG_FCOE) + u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); + bool fcoe_en = !!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED); +#endif /* CONFIG_FCOE */ + + if (hw->mac.dmac_config.link_speed != link_speed || +#if IS_ENABLED(CONFIG_FCOE) + hw->mac.dmac_config.fcoe_tc != fcoe_tc || + hw->mac.dmac_config.fcoe_en != fcoe_en || +#endif /* CONFIG_FCOE */ + hw->mac.dmac_config.num_tcs != num_tcs) { + hw->mac.dmac_config.link_speed = link_speed; + hw->mac.dmac_config.num_tcs = num_tcs; +#if IS_ENABLED(CONFIG_FCOE) + hw->mac.dmac_config.fcoe_en = fcoe_en; + hw->mac.dmac_config.fcoe_tc = fcoe_tc; +#endif /* CONFIG_FCOE */ + hw->mac.ops.dmac_config(hw); + } + } } static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) { -#ifdef CONFIG_IXGBE_DCB + u8 up = 0; +#ifdef HAVE_DCBNL_IEEE struct net_device *netdev = adapter->netdev; struct dcb_app app = { - .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, + .selector = DCB_APP_IDTYPE_ETHTYPE, .protocol = 0, }; - u8 up = 0; - - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) - up = dcb_ieee_getapp_mask(netdev, &app); + up = dcb_getapp(netdev, &app); +#endif +#if IS_ENABLED(CONFIG_FCOE) adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#else + adapter->default_up = up; #endif } @@ -7422,11 +8678,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); } break; - case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - case ixgbe_mac_82599EB: { + case ixgbe_mac_X550EM_a: + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); @@ -7439,11 +8695,13 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) break; } +#ifdef HAVE_PTP_1588_CLOCK adapter->last_rx_ptp_check = jiffies; if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); +#endif switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed_str = "10 Gbps"; @@ -7473,16 +8731,17 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) (flow_tx ? "TX" : "None")))); netif_carrier_on(netdev); +#ifdef IFLA_VF_MAX ixgbe_check_vf_rate_limit(adapter); +#endif /* IFLA_VF_MAX */ + /* Turn on malicious driver detection */ + if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.enable_mdd(hw); - /* enable transmits */ - netif_tx_wake_all_queues(adapter->netdev); - + netif_tx_wake_all_queues(netdev); /* update the default user priority for VFs */ ixgbe_update_default_up(adapter); - - /* ping all the active vfs to let them know link has changed */ - ixgbe_ping_all_vfs(adapter); } /** @@ -7506,11 +8765,14 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; +#ifdef HAVE_PTP_1588_CLOCK if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); +#endif e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); /* ping all the active vfs to let them know link has changed */ ixgbe_ping_all_vfs(adapter); @@ -7548,16 +8810,15 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) if (!adapter->num_vfs) return false; - /* resetting the PF is only needed for MAC before X550 */ + /* resetting the PF is only needed for MACs < X550 */ if (hw->mac.type >= ixgbe_mac_X550) return false; - for (i = 0; i < adapter->num_vfs; i++) { for (j = 0; j < q_per_pool; j++) { u32 h, t; - h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); - t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); + h = IXGBE_READ_REG(hw, IXGBE_PVFTDHn(q_per_pool, i, j)); + t = IXGBE_READ_REG(hw, IXGBE_PVFTDTn(q_per_pool, i, j)); if (h != t) return true; @@ -7581,13 +8842,42 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ - e_warn(drv, "initiating reset to clear Tx work after link loss\n"); + e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); } } } #ifdef CONFIG_PCI_IOV +static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + int pos, i; + u16 status; + + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} + static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -7601,7 +8891,8 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); if (gpc) /* If incrementing then no need for the check below */ return; - /* Check to see if a bad DMA write target from an errant or + /* + * Check to see if a bad DMA write target from an errant or * malicious VF has caused a PCIe error. If so then we can * issue a VFLR to the offending VF(s) and then resume without * requesting a full slot reset. @@ -7620,7 +8911,7 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) pci_read_config_word(vfdev, PCI_STATUS, &status_reg); if (status_reg != IXGBE_FAILED_READ_CFG_WORD && status_reg & PCI_STATUS_REC_MASTER_ABORT) - pcie_flr(vfdev); + ixgbe_issue_vf_flr(adapter, vfdev); } } @@ -7642,20 +8933,11 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) if (!ssvpc) return; - e_warn(drv, "%u Spoofed packets detected\n", ssvpc); -} -#else -static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) -{ + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); } -static void -ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) -{ -} #endif /* CONFIG_PCI_IOV */ - /** * ixgbe_watchdog_subtask - check and bring link up * @adapter: pointer to the device adapter structure @@ -7674,9 +8956,10 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) ixgbe_watchdog_link_is_up(adapter); else ixgbe_watchdog_link_is_down(adapter); - - ixgbe_check_for_bad_vf(adapter); +#ifdef CONFIG_PCI_IOV ixgbe_spoof_check(adapter); + ixgbe_check_for_bad_vf(adapter); +#endif /* CONFIG_PCI_IOV */ ixgbe_update_stats(adapter); ixgbe_watchdog_flush_tx(adapter); @@ -7698,7 +8981,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) if (adapter->sfp_poll_time && time_after(adapter->sfp_poll_time, jiffies)) - return; /* If not yet time to poll for SFP */ + return; /* If not yet time to poll for SFP */ /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -7746,12 +9029,13 @@ sfp_out: clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && - (adapter->netdev->reg_state == NETREG_REGISTERED)) { + adapter->netdev_registered) { e_dev_err("failed to initialize because an unsupported " "SFP+ module type was detected.\n"); e_dev_err("Reload the driver after installing a " "supported module.\n"); unregister_netdev(adapter->netdev); + adapter->netdev_registered = false; } } @@ -7763,7 +9047,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 cap_speed; - u32 speed; + u32 speed = 0; bool autoneg = false; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) @@ -7777,12 +9061,15 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); - /* advertise highest capable link speed */ - if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL)) - speed = IXGBE_LINK_SPEED_10GB_FULL; - else - speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL); + /* Advertise highest capable link speed */ + if (cap_speed & IXGBE_LINK_SPEED_10GB_FULL) { + if (!autoneg) + speed = IXGBE_LINK_SPEED_10GB_FULL; + else + speed = cap_speed; + } else if (cap_speed == IXGBE_LINK_SPEED_1GB_FULL) { + speed = IXGBE_LINK_SPEED_1GB_FULL; + } if (hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, speed, true); @@ -7794,7 +9081,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_service_timer - Timer Call-back - * @t: pointer to timer_list structure + * @t: pointer to timer_list **/ static void ixgbe_service_timer(struct timer_list *t) { @@ -7815,21 +9102,14 @@ static void ixgbe_service_timer(struct timer_list *t) static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; u32 status; if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) return; - adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; - - if (!hw->phy.ops.handle_lasi) - return; - - status = hw->phy.ops.handle_lasi(&adapter->hw); + status = ixgbe_handle_lasi(&adapter->hw); if (status != IXGBE_ERR_OVERTEMP) return; - e_crit(drv, "%s\n", ixgbe_overheat_msg); } @@ -7839,7 +9119,7 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) return; rtnl_lock(); - /* If we're already down, removing or resetting, just bail */ + /* If we're already down or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) { @@ -7847,7 +9127,6 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) return; } - ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; @@ -7860,19 +9139,10 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) * @adapter: the adapter private structure * * Check firmware errors in register FWSM - */ + **/ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 fwsm; - - /* read fwsm.ext_err_ind register and log errors */ - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); - - if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || - !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) - e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", - fwsm); if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); @@ -7891,7 +9161,7 @@ static void ixgbe_service_task(struct work_struct *work) struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, service_task); - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { if (!test_bit(__IXGBE_DOWN, &adapter->state)) { rtnl_lock(); ixgbe_down(adapter); @@ -7900,42 +9170,54 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } + if (ixgbe_check_fw_error(adapter)) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { unregister_netdev(adapter->netdev); + adapter->netdev_registered = false; + } ixgbe_service_event_complete(adapter); return; } +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; +#ifdef HAVE_UDP_ENC_RX_OFFLOAD udp_tunnel_get_rx_info(adapter->netdev); +#else + vxlan_get_rx_port(adapter->netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ rtnl_unlock(); } +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); ixgbe_check_overtemp_subtask(adapter); ixgbe_watchdog_subtask(adapter); +#ifdef HAVE_TX_MQ ixgbe_fdir_reinit_subtask(adapter); +#endif ixgbe_check_hang_subtask(adapter); - +#ifdef HAVE_PTP_1588_CLOCK if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); - if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) + if (unlikely(adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) ixgbe_ptp_rx_hang(adapter); ixgbe_ptp_tx_hang(adapter); } +#endif /* HAVE_PTP_1588_CLOCK */ ixgbe_service_event_complete(adapter); } static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u8 *hdr_len, - struct ixgbe_ipsec_tx_data *itd) + u8 *hdr_len) { +#ifdef NETIF_F_TSO u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; union { @@ -7948,7 +9230,6 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, unsigned char *hdr; } l4; u32 paylen, l4_offset; - u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -7961,10 +9242,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, if (err < 0) return err; - if (eth_p_mpls(first->protocol)) - ip.hdr = skb_inner_network_header(skb); - else - ip.hdr = skb_network_header(skb); + ip.hdr = skb_network_header(skb); l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ @@ -7974,15 +9252,13 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); - int len = csum_start - trans_start; /* IP header will have to cancel out any data that - * is not a part of the outer IP header, so set to - * a reverse csum if needed, else init check to 0. + * is not a part of the outer IP header */ - ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? - csum_fold(csum_partial(trans_start, - len, 0)) : 0; + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; @@ -8003,7 +9279,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; @@ -8013,18 +9289,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - fceof_saidx |= itd->sa_idx; - type_tucmd |= itd->flags | itd->trailer_len; - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, mss_l4len_idx); return 1; +#else + return 0; +#endif /* NETIF_F_TSO */ } static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) @@ -8037,12 +9313,10 @@ static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) } static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - struct ixgbe_ipsec_tx_data *itd) + struct ixgbe_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { @@ -8083,10 +9357,7 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - fceof_saidx |= itd->sa_idx; - type_tucmd |= itd->flags | itd->trailer_len; - - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0); + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); } #define IXGBE_SET_FLAG(_input, _flag, _result) \ @@ -8094,7 +9365,7 @@ no_csum: ((u32)(_input & _flag) * (_result / _flag)) : \ ((u32)(_input & _flag) / (_flag / _result))) -static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +static u32 ixgbe_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | @@ -8113,9 +9384,6 @@ static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, IXGBE_ADVTXD_MAC_TSTAMP); - /* insert frame checksum */ - cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); - return cmd_type; } @@ -8129,16 +9397,11 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, IXGBE_TX_FLAGS_CSUM, IXGBE_ADVTXD_POPTS_TXSM); - /* enable IPv4 checksum for TSO */ + /* enble IPv4 checksum for TSO */ olinfo_status |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_IPV4, IXGBE_ADVTXD_POPTS_IXSM); - /* enable IPsec */ - olinfo_status |= IXGBE_SET_FLAG(tx_flags, - IXGBE_TX_FLAGS_IPSEC, - IXGBE_ADVTXD_POPTS_IPSEC); - /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running @@ -8191,7 +9454,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; - u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); + u32 cmd_type = ixgbe_tx_cmd_type(tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IXGBE_TX_DESC(tx_ring, i); @@ -8201,7 +9464,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, size = skb_headlen(skb); data_len = skb->data_len; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (tx_flags & IXGBE_TX_FLAGS_FCOE) { if (data_len < sizeof(struct fcoe_crc_eof)) { size -= sizeof(struct fcoe_crc_eof) - data_len; @@ -8210,8 +9473,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, data_len -= sizeof(struct fcoe_crc_eof); } } +#endif /* CONFIG_FCOE */ -#endif dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); tx_buffer = first; @@ -8257,7 +9520,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, } tx_desc->read.olinfo_status = 0; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) size = min_t(unsigned int, data_len, skb_frag_size(frag)); #else size = skb_frag_size(frag); @@ -8279,8 +9542,9 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; - skb_tx_timestamp(skb); - +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = first->time_stamp; +#endif /* * Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered @@ -8302,8 +9566,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + skb_tx_timestamp(skb); + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ } return 0; @@ -8346,8 +9625,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct ipv6hdr *ipv6; } hdr; struct tcphdr *th; - unsigned int hlen; struct sk_buff *skb; + unsigned int hlen; __be16 vlan_id; int l4_proto; @@ -8362,15 +9641,18 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ring->atr_count++; /* currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IP)) && - (first->protocol != htons(ETH_P_IPV6))) + if (first->protocol != htons(ETH_P_IP) && + first->protocol != htons(ETH_P_IPV6)) return; /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); + +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) if (unlikely(hdr.network <= skb->data)) return; + if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && hdr.ipv4->protocol == IPPROTO_UDP) { @@ -8380,15 +9662,18 @@ static void ixgbe_atr(struct ixgbe_ring *ring, VXLAN_HEADROOM)) return; - /* verify the port is recognized as VXLAN */ + /* verify the port is recognized as VXLAN or GENEVE*/ if (adapter->vxlan_port && udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); +#ifdef HAVE_UDP_ENC_RX_OFFLOAD if (adapter->geneve_port && udp_hdr(skb)->dest == adapter->geneve_port) hdr.network = skb_inner_network_header(skb); +#endif } +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ /* Make sure we have at least [minimum IPv4 header + TCP] * or [IPv6 header] bytes @@ -8473,47 +9758,62 @@ static void ixgbe_atr(struct ixgbe_ring *ring, break; } +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) if (hdr.network != skb_network_header(skb)) input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, input, common, ring->queue_index); } +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) -#ifdef IXGBE_FCOE +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused struct net_device *sb_dev, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ { - struct ixgbe_adapter *adapter; + struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring_feature *f; int txq; - if (sb_dev) { - u8 tc = netdev_get_prio_tc_map(dev, skb->priority); - struct net_device *vdev = sb_dev; - - txq = vdev->tc_to_txq[tc].offset; - txq += reciprocal_scale(skb_get_hash(skb), - vdev->tc_to_txq[tc].count); - - return txq; - } - /* * only execute the code below if protocol is FCoE * or FIP and we have FCoE enabled on the adapter */ switch (vlan_get_protocol(skb)) { - case htons(ETH_P_FCOE): - case htons(ETH_P_FIP): + case __constant_htons(ETH_P_FCOE): + case __constant_htons(ETH_P_FIP): adapter = netdev_priv(dev); - if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) break; /* fall through */ default: +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) return netdev_pick_tx(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + return fallback(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + return fallback(dev, skb); +#else + return __netdev_pick_tx(dev, skb); +#endif } f = &adapter->ring_feature[RING_F_FCOE]; @@ -8526,10 +9826,17 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, return txq + f->offset; } +#endif /* CONFIG_FCOE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ -#endif +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf) +#else +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_buff *xdp) +#endif { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; @@ -8538,12 +9845,20 @@ int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, dma_addr_t dma; u16 i; +#ifdef HAVE_XDP_FRAME_STRUCT len = xdpf->len; +#else + len = xdp->data_end - xdp->data; +#endif if (unlikely(!ixgbe_desc_unused(ring))) return IXGBE_XDP_CONSUMED; +#ifdef HAVE_XDP_FRAME_STRUCT dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); +#else + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); +#endif if (dma_mapping_error(ring->dev, dma)) return IXGBE_XDP_CONSUMED; @@ -8558,7 +9873,11 @@ int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); +#ifdef HAVE_XDP_FRAME_STRUCT tx_buffer->xdpf = xdpf; +#else + tx_buffer->data = xdp->data; +#endif tx_desc->read.buffer_addr = cpu_to_le64(dma); @@ -8585,1812 +9904,7 @@ int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, return IXGBE_XDP_TX; } -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) -{ - struct ixgbe_tx_buffer *first; - int tso; - u32 tx_flags = 0; - unsigned short f; - u16 count = TXD_USE_COUNT(skb_headlen(skb)); - struct ixgbe_ipsec_tx_data ipsec_tx = { 0 }; - __be16 protocol = skb->protocol; - u8 hdr_len = 0; - - /* - * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, - * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, - * + 1 desc for context descriptor, - * otherwise try next time - */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_frag_size( - &skb_shinfo(skb)->frags[f])); - - if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { - tx_ring->tx_stats.tx_busy++; - return NETDEV_TX_BUSY; - } - - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; - first->skb = skb; - first->bytecount = skb->len; - first->gso_segs = 1; - - /* if we have a HW VLAN tag being added default to the HW one */ - if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; - /* else if it is a SW VLAN check the next protocol and store the tag */ - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_hdr *vhdr, _vhdr; - vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); - if (!vhdr) - goto out_drop; - - tx_flags |= ntohs(vhdr->h_vlan_TCI) << - IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; - } - protocol = vlan_get_protocol(skb); - - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - adapter->ptp_clock) { - if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, - &adapter->state)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - tx_flags |= IXGBE_TX_FLAGS_TSTAMP; - - /* schedule check for Tx timestamp */ - adapter->ptp_tx_skb = skb_get(skb); - adapter->ptp_tx_start = jiffies; - schedule_work(&adapter->ptp_tx_work); - } else { - adapter->tx_hwtstamp_skipped++; - } - } - -#ifdef CONFIG_PCI_IOV - /* - * Use the l2switch_enable flag - would be false if the DMA - * Tx switch had been disabled. - */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - tx_flags |= IXGBE_TX_FLAGS_CC; - -#endif - /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ - if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || - (skb->priority != TC_PRIO_CONTROL))) { - tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; - tx_flags |= (skb->priority & 0x7) << - IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; - if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { - struct vlan_ethhdr *vhdr; - - if (skb_cow_head(skb, 0)) - goto out_drop; - vhdr = (struct vlan_ethhdr *)skb->data; - vhdr->h_vlan_TCI = htons(tx_flags >> - IXGBE_TX_FLAGS_VLAN_SHIFT); - } else { - tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; - } - } - - /* record initial flags and protocol */ - first->tx_flags = tx_flags; - first->protocol = protocol; - -#ifdef IXGBE_FCOE - /* setup tx offload for FCoE */ - if ((protocol == htons(ETH_P_FCOE)) && - (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { - tso = ixgbe_fso(tx_ring, first, &hdr_len); - if (tso < 0) - goto out_drop; - - goto xmit_fcoe; - } - -#endif /* IXGBE_FCOE */ - -#ifdef CONFIG_IXGBE_IPSEC - if (xfrm_offload(skb) && - !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) - goto out_drop; -#endif - tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx); - if (tso < 0) - goto out_drop; - else if (!tso) - ixgbe_tx_csum(tx_ring, first, &ipsec_tx); - - /* add the ATR filter if ATR is on */ - if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) - ixgbe_atr(tx_ring, first); - -#ifdef IXGBE_FCOE -xmit_fcoe: -#endif /* IXGBE_FCOE */ - if (ixgbe_tx_map(tx_ring, first, hdr_len)) - goto cleanup_tx_timestamp; - - return NETDEV_TX_OK; - -out_drop: - dev_kfree_skb_any(first->skb); - first->skb = NULL; -cleanup_tx_timestamp: - if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - cancel_work_sync(&adapter->ptp_tx_work); - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - } - - return NETDEV_TX_OK; -} - -static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev, - struct ixgbe_ring *ring) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *tx_ring; - - /* - * The minimum packet size for olinfo paylen is 17 so pad the skb - * in order to meet this minimum size requirement. - */ - if (skb_put_padto(skb, 17)) - return NETDEV_TX_OK; - - tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; - if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) - return NETDEV_TX_BUSY; - - return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); -} - -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - return __ixgbe_xmit_frame(skb, netdev, NULL); -} - -/** - * ixgbe_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int ixgbe_set_mac(struct net_device *netdev, void *p) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - - ixgbe_mac_set_default_filter(adapter); - - return 0; -} - -static int -ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u16 value; - int rc; - - if (adapter->mii_bus) { - int regnum = addr; - - if (devad != MDIO_DEVAD_NONE) - regnum |= (devad << 16) | MII_ADDR_C45; - - return mdiobus_read(adapter->mii_bus, prtad, regnum); - } - - if (prtad != hw->phy.mdio.prtad) - return -EINVAL; - rc = hw->phy.ops.read_reg(hw, addr, devad, &value); - if (!rc) - rc = value; - return rc; -} - -static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, - u16 addr, u16 value) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (adapter->mii_bus) { - int regnum = addr; - - if (devad != MDIO_DEVAD_NONE) - regnum |= (devad << 16) | MII_ADDR_C45; - - return mdiobus_write(adapter->mii_bus, prtad, regnum, value); - } - - if (prtad != hw->phy.mdio.prtad) - return -EINVAL; - return hw->phy.ops.write_reg(hw, addr, devad, value); -} - -static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return ixgbe_ptp_set_ts_config(adapter, req); - case SIOCGHWTSTAMP: - return ixgbe_ptp_get_ts_config(adapter, req); - case SIOCGMIIPHY: - if (!adapter->hw.phy.ops.read_reg) - return -EOPNOTSUPP; - /* fall through */ - default: - return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); - } -} - -/** - * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding - * netdev->dev_addrs - * @dev: network interface device structure - * - * Returns non-zero on failure - **/ -static int ixgbe_add_sanmac_netdev(struct net_device *dev) -{ - int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - - if (is_valid_ether_addr(hw->mac.san_addr)) { - rtnl_lock(); - err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - - /* update SAN MAC vmdq pool selection */ - hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); - } - return err; -} - -/** - * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding - * netdev->dev_addrs - * @dev: network interface device structure - * - * Returns non-zero on failure - **/ -static int ixgbe_del_sanmac_netdev(struct net_device *dev) -{ - int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; - - if (is_valid_ether_addr(mac->san_addr)) { - rtnl_lock(); - err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - } - return err; -} - -static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, - struct ixgbe_ring *ring) -{ - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->tx_packets += packets; - stats->tx_bytes += bytes; - } -} - -static void ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - rcu_read_lock(); - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->rx_packets += packets; - stats->rx_bytes += bytes; - } - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); - - ixgbe_get_ring_stats64(stats, ring); - } - for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); - - ixgbe_get_ring_stats64(stats, ring); - } - rcu_read_unlock(); - - /* following stats updated by ixgbe_watchdog_task() */ - stats->multicast = netdev->stats.multicast; - stats->rx_errors = netdev->stats.rx_errors; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; - stats->rx_missed_errors = netdev->stats.rx_missed_errors; -} - -#ifdef CONFIG_IXGBE_DCB -/** - * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. - * @adapter: pointer to ixgbe_adapter - * @tc: number of traffic classes currently enabled - * - * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm - * 802.1Q priority maps to a packet buffer that exists. - */ -static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg, rsave; - int i; - - /* 82598 have a static priority to TC mapping that can not - * be changed so no validation is needed. - */ - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - rsave = reg; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); - - /* If up2tc is out of bounds default to zero */ - if (up2tc > tc) - reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); - } - - if (reg != rsave) - IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); - - return; -} - -/** - * ixgbe_set_prio_tc_map - Configure netdev prio tc map - * @adapter: Pointer to adapter struct - * - * Populate the netdev user priority to tc map - */ -static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) -{ - struct net_device *dev = adapter->netdev; - struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; - struct ieee_ets *ets = adapter->ixgbe_ieee_ets; - u8 prio; - - for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { - u8 tc = 0; - - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) - tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); - else if (ets) - tc = ets->prio_tc[prio]; - - netdev_set_prio_tc_map(dev, prio, tc); - } -} - -#endif /* CONFIG_IXGBE_DCB */ -static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) -{ - struct ixgbe_adapter *adapter = data; - struct ixgbe_fwd_adapter *accel; - int pool; - - /* we only care about macvlans... */ - if (!netif_is_macvlan(vdev)) - return 0; - - /* that have hardware offload enabled... */ - accel = macvlan_accel_priv(vdev); - if (!accel) - return 0; - - /* If we can relocate to a different bit do so */ - pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); - if (pool < adapter->num_rx_pools) { - set_bit(pool, adapter->fwd_bitmask); - accel->pool = pool; - return 0; - } - - /* if we cannot find a free pool then disable the offload */ - netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); - macvlan_release_l2fw_offload(vdev); - - /* unbind the queues and drop the subordinate channel config */ - netdev_unbind_sb_channel(adapter->netdev, vdev); - netdev_set_sb_channel(vdev, 0); - - kfree(accel); - - return 0; -} - -static void ixgbe_defrag_macvlan_pools(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - /* flush any stale bits out of the fwd bitmask */ - bitmap_clear(adapter->fwd_bitmask, 1, 63); - - /* walk through upper devices reassigning pools */ - netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, - adapter); -} - -/** - * ixgbe_setup_tc - configure net_device for multiple traffic classes - * - * @dev: net device to configure - * @tc: number of traffic classes to enable - */ -int ixgbe_setup_tc(struct net_device *dev, u8 tc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - - /* Hardware supports up to 8 traffic classes */ - if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) - return -EINVAL; - - if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) - return -EINVAL; - - /* Hardware has to reinitialize queues and interrupts to - * match packet buffer alignment. Unfortunately, the - * hardware is not flexible enough to do this dynamically. - */ - if (netif_running(dev)) - ixgbe_close(dev); - else - ixgbe_reset(adapter); - - ixgbe_clear_interrupt_scheme(adapter); - -#ifdef CONFIG_IXGBE_DCB - if (tc) { - if (adapter->xdp_prog) { - e_warn(probe, "DCB is not supported with XDP\n"); - - ixgbe_init_interrupt_scheme(adapter); - if (netif_running(dev)) - ixgbe_open(dev); - return -EINVAL; - } - - netdev_set_num_tc(dev, tc); - ixgbe_set_prio_tc_map(adapter); - - adapter->hw_tcs = tc; - adapter->flags |= IXGBE_FLAG_DCB_ENABLED; - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - adapter->last_lfc_mode = adapter->hw.fc.requested_mode; - adapter->hw.fc.requested_mode = ixgbe_fc_none; - } - } else { - netdev_reset_tc(dev); - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; - - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->hw_tcs = tc; - - adapter->temp_dcb_cfg.pfc_mode_enable = false; - adapter->dcb_cfg.pfc_mode_enable = false; - } - - ixgbe_validate_rtr(adapter, tc); - -#endif /* CONFIG_IXGBE_DCB */ - ixgbe_init_interrupt_scheme(adapter); - - ixgbe_defrag_macvlan_pools(dev); - - if (netif_running(dev)) - return ixgbe_open(dev); - - return 0; -} - -static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, - struct tc_cls_u32_offload *cls) -{ - u32 hdl = cls->knode.handle; - u32 uhtid = TC_U32_USERHTID(cls->knode.handle); - u32 loc = cls->knode.handle & 0xfffff; - int err = 0, i, j; - struct ixgbe_jump_table *jump = NULL; - - if (loc > IXGBE_MAX_HW_ENTRIES) - return -EINVAL; - - if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) - return -EINVAL; - - /* Clear this filter in the link data it is associated with */ - if (uhtid != 0x800) { - jump = adapter->jump_tables[uhtid]; - if (!jump) - return -EINVAL; - if (!test_bit(loc - 1, jump->child_loc_map)) - return -EINVAL; - clear_bit(loc - 1, jump->child_loc_map); - } - - /* Check if the filter being deleted is a link */ - for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { - jump = adapter->jump_tables[i]; - if (jump && jump->link_hdl == hdl) { - /* Delete filters in the hardware in the child hash - * table associated with this link - */ - for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { - if (!test_bit(j, jump->child_loc_map)) - continue; - spin_lock(&adapter->fdir_perfect_lock); - err = ixgbe_update_ethtool_fdir_entry(adapter, - NULL, - j + 1); - spin_unlock(&adapter->fdir_perfect_lock); - clear_bit(j, jump->child_loc_map); - } - /* Remove resources for this link */ - kfree(jump->input); - kfree(jump->mask); - kfree(jump); - adapter->jump_tables[i] = NULL; - return err; - } - } - - spin_lock(&adapter->fdir_perfect_lock); - err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); - spin_unlock(&adapter->fdir_perfect_lock); - return err; -} - -static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, - struct tc_cls_u32_offload *cls) -{ - u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); - - if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - /* This ixgbe devices do not support hash tables at the moment - * so abort when given hash tables. - */ - if (cls->hnode.divisor > 0) - return -EINVAL; - - set_bit(uhtid - 1, &adapter->tables); - return 0; -} - -static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, - struct tc_cls_u32_offload *cls) -{ - u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); - - if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - clear_bit(uhtid - 1, &adapter->tables); - return 0; -} - -#ifdef CONFIG_NET_CLS_ACT -struct upper_walk_data { - struct ixgbe_adapter *adapter; - u64 action; - int ifindex; - u8 queue; -}; - -static int get_macvlan_queue(struct net_device *upper, void *_data) -{ - if (netif_is_macvlan(upper)) { - struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); - struct upper_walk_data *data = _data; - struct ixgbe_adapter *adapter = data->adapter; - int ifindex = data->ifindex; - - if (vadapter && upper->ifindex == ifindex) { - data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; - data->action = data->queue; - return 1; - } - } - - return 0; -} - -static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, - u8 *queue, u64 *action) -{ - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - unsigned int num_vfs = adapter->num_vfs, vf; - struct upper_walk_data data; - struct net_device *upper; - - /* redirect to a SRIOV VF */ - for (vf = 0; vf < num_vfs; ++vf) { - upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); - if (upper->ifindex == ifindex) { - *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); - *action = vf + 1; - *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; - return 0; - } - } - - /* redirect to a offloaded macvlan netdev */ - data.adapter = adapter; - data.ifindex = ifindex; - data.action = 0; - data.queue = 0; - if (netdev_walk_all_upper_dev_rcu(adapter->netdev, - get_macvlan_queue, &data)) { - *action = data.action; - *queue = data.queue; - - return 0; - } - - return -EINVAL; -} - -static int parse_tc_actions(struct ixgbe_adapter *adapter, - struct tcf_exts *exts, u64 *action, u8 *queue) -{ - const struct tc_action *a; - int i; - - if (!tcf_exts_has_actions(exts)) - return -EINVAL; - - tcf_exts_for_each_action(i, a, exts) { - /* Drop action */ - if (is_tcf_gact_shot(a)) { - *action = IXGBE_FDIR_DROP_QUEUE; - *queue = IXGBE_FDIR_DROP_QUEUE; - return 0; - } - - /* Redirect to a VF or a offloaded macvlan */ - if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *dev = tcf_mirred_dev(a); - - if (!dev) - return -EINVAL; - return handle_redirect_action(adapter, dev->ifindex, - queue, action); - } - - return -EINVAL; - } - - return -EINVAL; -} -#else -static int parse_tc_actions(struct ixgbe_adapter *adapter, - struct tcf_exts *exts, u64 *action, u8 *queue) -{ - return -EINVAL; -} -#endif /* CONFIG_NET_CLS_ACT */ - -static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, - union ixgbe_atr_input *mask, - struct tc_cls_u32_offload *cls, - struct ixgbe_mat_field *field_ptr, - struct ixgbe_nexthdr *nexthdr) -{ - int i, j, off; - __be32 val, m; - bool found_entry = false, found_jump_field = false; - - for (i = 0; i < cls->knode.sel->nkeys; i++) { - off = cls->knode.sel->keys[i].off; - val = cls->knode.sel->keys[i].val; - m = cls->knode.sel->keys[i].mask; - - for (j = 0; field_ptr[j].val; j++) { - if (field_ptr[j].off == off) { - field_ptr[j].val(input, mask, (__force u32)val, - (__force u32)m); - input->filter.formatted.flow_type |= - field_ptr[j].type; - found_entry = true; - break; - } - } - if (nexthdr) { - if (nexthdr->off == cls->knode.sel->keys[i].off && - nexthdr->val == - (__force u32)cls->knode.sel->keys[i].val && - nexthdr->mask == - (__force u32)cls->knode.sel->keys[i].mask) - found_jump_field = true; - else - continue; - } - } - - if (nexthdr && !found_jump_field) - return -EINVAL; - - if (!found_entry) - return 0; - - mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - - if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) - mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; - - return 0; -} - -static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, - struct tc_cls_u32_offload *cls) -{ - __be16 protocol = cls->common.protocol; - u32 loc = cls->knode.handle & 0xfffff; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_mat_field *field_ptr; - struct ixgbe_fdir_filter *input = NULL; - union ixgbe_atr_input *mask = NULL; - struct ixgbe_jump_table *jump = NULL; - int i, err = -EINVAL; - u8 queue; - u32 uhtid, link_uhtid; - - uhtid = TC_U32_USERHTID(cls->knode.handle); - link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); - - /* At the moment cls_u32 jumps to network layer and skips past - * L2 headers. The canonical method to match L2 frames is to use - * negative values. However this is error prone at best but really - * just broken because there is no way to "know" what sort of hdr - * is in front of the network layer. Fix cls_u32 to support L2 - * headers when needed. - */ - if (protocol != htons(ETH_P_IP)) - return err; - - if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { - e_err(drv, "Location out of range\n"); - return err; - } - - /* cls u32 is a graph starting at root node 0x800. The driver tracks - * links and also the fields used to advance the parser across each - * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map - * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h - * To add support for new nodes update ixgbe_model.h parse structures - * this function _should_ be generic try not to hardcode values here. - */ - if (uhtid == 0x800) { - field_ptr = (adapter->jump_tables[0])->mat; - } else { - if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return err; - if (!adapter->jump_tables[uhtid]) - return err; - field_ptr = (adapter->jump_tables[uhtid])->mat; - } - - if (!field_ptr) - return err; - - /* At this point we know the field_ptr is valid and need to either - * build cls_u32 link or attach filter. Because adding a link to - * a handle that does not exist is invalid and the same for adding - * rules to handles that don't exist. - */ - - if (link_uhtid) { - struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; - - if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) - return err; - - if (!test_bit(link_uhtid - 1, &adapter->tables)) - return err; - - /* Multiple filters as links to the same hash table are not - * supported. To add a new filter with the same next header - * but different match/jump conditions, create a new hash table - * and link to it. - */ - if (adapter->jump_tables[link_uhtid] && - (adapter->jump_tables[link_uhtid])->link_hdl) { - e_err(drv, "Link filter exists for link: %x\n", - link_uhtid); - return err; - } - - for (i = 0; nexthdr[i].jump; i++) { - if (nexthdr[i].o != cls->knode.sel->offoff || - nexthdr[i].s != cls->knode.sel->offshift || - nexthdr[i].m != - (__force u32)cls->knode.sel->offmask) - return err; - - jump = kzalloc(sizeof(*jump), GFP_KERNEL); - if (!jump) - return -ENOMEM; - input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) { - err = -ENOMEM; - goto free_jump; - } - mask = kzalloc(sizeof(*mask), GFP_KERNEL); - if (!mask) { - err = -ENOMEM; - goto free_input; - } - jump->input = input; - jump->mask = mask; - jump->link_hdl = cls->knode.handle; - - err = ixgbe_clsu32_build_input(input, mask, cls, - field_ptr, &nexthdr[i]); - if (!err) { - jump->mat = nexthdr[i].jump; - adapter->jump_tables[link_uhtid] = jump; - break; - } else { - kfree(mask); - kfree(input); - kfree(jump); - } - } - return 0; - } - - input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) - return -ENOMEM; - mask = kzalloc(sizeof(*mask), GFP_KERNEL); - if (!mask) { - err = -ENOMEM; - goto free_input; - } - - if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { - if ((adapter->jump_tables[uhtid])->input) - memcpy(input, (adapter->jump_tables[uhtid])->input, - sizeof(*input)); - if ((adapter->jump_tables[uhtid])->mask) - memcpy(mask, (adapter->jump_tables[uhtid])->mask, - sizeof(*mask)); - - /* Lookup in all child hash tables if this location is already - * filled with a filter - */ - for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { - struct ixgbe_jump_table *link = adapter->jump_tables[i]; - - if (link && (test_bit(loc - 1, link->child_loc_map))) { - e_err(drv, "Filter exists in location: %x\n", - loc); - err = -EINVAL; - goto err_out; - } - } - } - err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); - if (err) - goto err_out; - - err = parse_tc_actions(adapter, cls->knode.exts, &input->action, - &queue); - if (err < 0) - goto err_out; - - input->sw_idx = loc; - - spin_lock(&adapter->fdir_perfect_lock); - - if (hlist_empty(&adapter->fdir_filter_list)) { - memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, mask); - if (err) - goto err_out_w_lock; - } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { - err = -EINVAL; - goto err_out_w_lock; - } - - ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); - err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, - input->sw_idx, queue); - if (!err) - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); - spin_unlock(&adapter->fdir_perfect_lock); - - if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) - set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); - - kfree(mask); - return err; -err_out_w_lock: - spin_unlock(&adapter->fdir_perfect_lock); -err_out: - kfree(mask); -free_input: - kfree(input); -free_jump: - kfree(jump); - return err; -} - -static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter, - struct tc_cls_u32_offload *cls_u32) -{ - switch (cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return ixgbe_configure_clsu32(adapter, cls_u32); - case TC_CLSU32_DELETE_KNODE: - return ixgbe_delete_clsu32(adapter, cls_u32); - case TC_CLSU32_NEW_HNODE: - case TC_CLSU32_REPLACE_HNODE: - return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32); - case TC_CLSU32_DELETE_HNODE: - return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); - default: - return -EOPNOTSUPP; - } -} - -static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - struct ixgbe_adapter *adapter = cb_priv; - - if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) - return -EOPNOTSUPP; - - switch (type) { - case TC_SETUP_CLSU32: - return ixgbe_setup_tc_cls_u32(adapter, type_data); - default: - return -EOPNOTSUPP; - } -} - -static int ixgbe_setup_tc_mqprio(struct net_device *dev, - struct tc_mqprio_qopt *mqprio) -{ - mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return ixgbe_setup_tc(dev, mqprio->num_tc); -} - -static LIST_HEAD(ixgbe_block_cb_list); - -static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - switch (type) { - case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, - &ixgbe_block_cb_list, - ixgbe_setup_tc_block_cb, - adapter, adapter, true); - case TC_SETUP_QDISC_MQPRIO: - return ixgbe_setup_tc_mqprio(dev, type_data); - default: - return -EOPNOTSUPP; - } -} - -#ifdef CONFIG_PCI_IOV -void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - rtnl_lock(); - ixgbe_setup_tc(netdev, adapter->hw_tcs); - rtnl_unlock(); -} - -#endif -void ixgbe_do_reset(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); -} - -static netdev_features_t ixgbe_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ - if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_LRO; - - /* Turn off LRO if not RSC capable */ - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) - features &= ~NETIF_F_LRO; - - if (adapter->xdp_prog && (features & NETIF_F_LRO)) { - e_dev_err("LRO is not supported with XDP\n"); - features &= ~NETIF_F_LRO; - } - - return features; -} - -static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter) -{ - int rss = min_t(int, ixgbe_max_rss_indices(adapter), - num_online_cpus()); - - /* go back to full RSS if we're not running SR-IOV */ - if (!adapter->ring_feature[RING_F_VMDQ].offset) - adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED); - - adapter->ring_feature[RING_F_RSS].limit = rss; - adapter->ring_feature[RING_F_VMDQ].limit = 1; - - ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs); -} - -static int ixgbe_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed = netdev->features ^ features; - bool need_reset = false; - - /* Make sure RSC matches LRO, reset if change */ - if (!(features & NETIF_F_LRO)) { - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - need_reset = true; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && - !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - if (adapter->rx_itr_setting == 1 || - adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - need_reset = true; - } else if ((changed ^ features) & NETIF_F_LRO) { - e_info(probe, "rx-usecs set too low, " - "disabling RSC\n"); - } - } - - /* - * Check if Flow Director n-tuple support or hw_tc support was - * enabled or disabled. If the state changed, we need to reset. - */ - if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { - /* turn off ATR, enable perfect filters and reset */ - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - need_reset = true; - - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - } else { - /* turn off perfect filters, enable ATR and reset */ - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) - need_reset = true; - - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - - /* We cannot enable ATR if SR-IOV is enabled */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || - /* We cannot enable ATR if we have 2 or more tcs */ - (adapter->hw_tcs > 1) || - /* We cannot enable ATR if RSS is disabled */ - (adapter->ring_feature[RING_F_RSS].limit <= 1) || - /* A sample rate of 0 indicates ATR disabled */ - (!adapter->atr_sample_rate)) - ; /* do nothing not supported */ - else /* otherwise supported and set the flag */ - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - } - - if (changed & NETIF_F_RXALL) - need_reset = true; - - netdev->features = features; - - if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } - } - - if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } - } - - if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) - ixgbe_reset_l2fw_offload(adapter); - else if (need_reset) - ixgbe_do_reset(netdev); - else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER)) - ixgbe_set_rx_mode(netdev); - - return 1; -} - -/** - * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_add_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - __be16 port = ti->port; - u32 port_shift = 0; - u32 reg; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port == port) - return; - - if (adapter->vxlan_port) { - netdev_info(dev, - "VXLAN port %d set, not adding port %d\n", - ntohs(adapter->vxlan_port), - ntohs(port)); - return; - } - - adapter->vxlan_port = port; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port == port) - return; - - if (adapter->geneve_port) { - netdev_info(dev, - "GENEVE port %d set, not adding port %d\n", - ntohs(adapter->geneve_port), - ntohs(port)); - return; - } - - port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; - adapter->geneve_port = port; - break; - default: - return; - } - - reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); -} - -/** - * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_del_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - u32 port_mask; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN && - ti->type != UDP_TUNNEL_TYPE_GENEVE) - return; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port != ti->port) { - netdev_info(dev, "VXLAN port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port != ti->port) { - netdev_info(dev, "GENEVE port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - break; - default: - return; - } - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; -} - -static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid, - u16 flags, - struct netlink_ext_ack *extack) -{ - /* guarantee we can provide a unique filter for the unicast address */ - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - struct ixgbe_adapter *adapter = netdev_priv(dev); - u16 pool = VMDQ_P(0); - - if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) - return -ENOMEM; - } - - return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); -} - -/** - * ixgbe_configure_bridge_mode - set various bridge modes - * @adapter: the private structure - * @mode: requested bridge mode - * - * Configure some settings require for various bridge modes. - **/ -static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, - __u16 mode) -{ - struct ixgbe_hw *hw = &adapter->hw; - unsigned int p, num_pools; - u32 vmdctl; - - switch (mode) { - case BRIDGE_MODE_VEPA: - /* disable Tx loopback, rely on switch hairpin mode */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); - - /* must enable Rx switching replication to allow multicast - * packet reception on all VFs, and to enable source address - * pruning. - */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - vmdctl |= IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); - - /* enable Rx source address pruning. Note, this requires - * replication to be enabled or else it does nothing. - */ - num_pools = adapter->num_vfs + adapter->num_rx_pools; - for (p = 0; p < num_pools; p++) { - if (hw->mac.ops.set_source_address_pruning) - hw->mac.ops.set_source_address_pruning(hw, - true, - p); - } - break; - case BRIDGE_MODE_VEB: - /* enable Tx loopback for internal VF/PF communication */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, - IXGBE_PFDTXGSWC_VT_LBEN); - - /* disable Rx switching replication unless we have SR-IOV - * virtual functions - */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - if (!adapter->num_vfs) - vmdctl &= ~IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); - - /* disable Rx source address pruning, since we don't expect to - * be receiving external loopback of our transmitted frames. - */ - num_pools = adapter->num_vfs + adapter->num_rx_pools; - for (p = 0; p < num_pools; p++) { - if (hw->mac.ops.set_source_address_pruning) - hw->mac.ops.set_source_address_pruning(hw, - false, - p); - } - break; - default: - return -EINVAL; - } - - adapter->bridge_mode = mode; - - e_info(drv, "enabling bridge mode: %s\n", - mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); - - return 0; -} - -static int ixgbe_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags, - struct netlink_ext_ack *extack) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct nlattr *attr, *br_spec; - int rem; - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return -EOPNOTSUPP; - - br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); - if (!br_spec) - return -EINVAL; - - nla_for_each_nested(attr, br_spec, rem) { - int status; - __u16 mode; - - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - - if (nla_len(attr) < sizeof(mode)) - return -EINVAL; - - mode = nla_get_u16(attr); - status = ixgbe_configure_bridge_mode(adapter, mode); - if (status) - return status; - - break; - } - - return 0; -} - -static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, - u32 filter_mask, int nlflags) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return 0; - - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, - adapter->bridge_mode, 0, 0, nlflags, - filter_mask, NULL); -} - -static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(pdev); - struct ixgbe_fwd_adapter *accel; - int tcs = adapter->hw_tcs ? : 1; - int pool, err; - - if (adapter->xdp_prog) { - e_warn(probe, "L2FW offload is not supported with XDP\n"); - return ERR_PTR(-EINVAL); - } - - /* The hardware supported by ixgbe only filters on the destination MAC - * address. In order to avoid issues we only support offloading modes - * where the hardware can actually provide the functionality. - */ - if (!macvlan_supports_dest_filter(vdev)) - return ERR_PTR(-EMEDIUMTYPE); - - /* We need to lock down the macvlan to be a single queue device so that - * we can reuse the tc_to_txq field in the macvlan netdev to represent - * the queue mapping to our netdev. - */ - if (netif_is_multiqueue(vdev)) - return ERR_PTR(-ERANGE); - - pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); - if (pool == adapter->num_rx_pools) { - u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; - u16 reserved_pools; - - if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || - adapter->num_rx_pools > IXGBE_MAX_MACVLANS) - return ERR_PTR(-EBUSY); - - /* Hardware has a limited number of available pools. Each VF, - * and the PF require a pool. Check to ensure we don't - * attempt to use more then the available number of pools. - */ - if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) - return ERR_PTR(-EBUSY); - - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED; - - /* Try to reserve as many queues per pool as possible, - * we start with the configurations that support 4 queues - * per pools, followed by 2, and then by just 1 per pool. - */ - if (used_pools < 32 && adapter->num_rx_pools < 16) - reserved_pools = min_t(u16, - 32 - used_pools, - 16 - adapter->num_rx_pools); - else if (adapter->num_rx_pools < 32) - reserved_pools = min_t(u16, - 64 - used_pools, - 32 - adapter->num_rx_pools); - else - reserved_pools = 64 - used_pools; - - - if (!reserved_pools) - return ERR_PTR(-EBUSY); - - adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; - - /* Force reinit of ring allocation with VMDQ enabled */ - err = ixgbe_setup_tc(pdev, adapter->hw_tcs); - if (err) - return ERR_PTR(err); - - if (pool >= adapter->num_rx_pools) - return ERR_PTR(-ENOMEM); - } - - accel = kzalloc(sizeof(*accel), GFP_KERNEL); - if (!accel) - return ERR_PTR(-ENOMEM); - - set_bit(pool, adapter->fwd_bitmask); - netdev_set_sb_channel(vdev, pool); - accel->pool = pool; - accel->netdev = vdev; - - if (!netif_running(pdev)) - return accel; - - err = ixgbe_fwd_ring_up(adapter, accel); - if (err) - return ERR_PTR(err); - - return accel; -} - -static void ixgbe_fwd_del(struct net_device *pdev, void *priv) -{ - struct ixgbe_fwd_adapter *accel = priv; - struct ixgbe_adapter *adapter = netdev_priv(pdev); - unsigned int rxbase = accel->rx_base_queue; - unsigned int i; - - /* delete unicast filter associated with offloaded interface */ - ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, - VMDQ_P(accel->pool)); - - /* Allow remaining Rx packets to get flushed out of the - * Rx FIFO before we drop the netdev for the ring. - */ - usleep_range(10000, 20000); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; - struct ixgbe_q_vector *qv = ring->q_vector; - - /* Make sure we aren't processing any packets and clear - * netdev to shut down the ring. - */ - if (netif_running(adapter->netdev)) - napi_synchronize(&qv->napi); - ring->netdev = NULL; - } - - /* unbind the queues and drop the subordinate channel config */ - netdev_unbind_sb_channel(pdev, accel->netdev); - netdev_set_sb_channel(accel->netdev, 0); - - clear_bit(accel->pool, adapter->fwd_bitmask); - kfree(accel); -} - -#define IXGBE_MAX_MAC_HDR_LEN 127 -#define IXGBE_MAX_NETWORK_HDR_LEN 511 - -static netdev_features_t -ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, - netdev_features_t features) -{ - unsigned int network_hdr_len, mac_hdr_len; - - /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; - if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) - return features & ~(NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_TSO | - NETIF_F_TSO6); - - network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); - if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) - return features & ~(NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_TSO | - NETIF_F_TSO6); - - /* We can only support IPV4 TSO in tunnels if we can mangle the - * inner IP ID field, so strip TSO if MANGLEID is not supported. - * IPsec offoad sets skb->encapsulation but still can handle - * the TSO, so it's the exception. - */ - if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { -#ifdef CONFIG_IXGBE_IPSEC - if (!secpath_exists(skb)) -#endif - features &= ~NETIF_F_TSO; - } - - return features; -} - -static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) -{ - int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct bpf_prog *old_prog; - bool need_reset; - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - return -EINVAL; - - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - return -EINVAL; - - /* verify ixgbe ring attributes are sufficient for XDP */ - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - - if (ring_is_rsc_enabled(ring)) - return -EINVAL; - - if (frame_size > ixgbe_rx_bufsz(ring)) - return -EINVAL; - } - - if (nr_cpu_ids > MAX_XDP_QUEUES) - return -ENOMEM; - - old_prog = xchg(&adapter->xdp_prog, prog); - need_reset = (!!prog != !!old_prog); - - /* If transitioning XDP modes reconfigure rings */ - if (need_reset) { - int err = ixgbe_setup_tc(dev, adapter->hw_tcs); - - if (err) { - rcu_assign_pointer(adapter->xdp_prog, old_prog); - return -EINVAL; - } - } else { - for (i = 0; i < adapter->num_rx_queues; i++) - (void)xchg(&adapter->rx_ring[i]->xdp_prog, - adapter->xdp_prog); - } - - if (old_prog) - bpf_prog_put(old_prog); - - /* Kick start the NAPI context if there is an AF_XDP socket open - * on that queue id. This so that receiving will start. - */ - if (need_reset && prog) - for (i = 0; i < adapter->num_rx_queues; i++) - if (adapter->xdp_ring[i]->xsk_umem) - (void)ixgbe_xsk_wakeup(adapter->netdev, i, - XDP_WAKEUP_RX); - - return 0; -} - -static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - switch (xdp->command) { - case XDP_SETUP_PROG: - return ixgbe_xdp_setup(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = adapter->xdp_prog ? - adapter->xdp_prog->aux->id : 0; - return 0; - case XDP_SETUP_XSK_UMEM: - return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, - xdp->xsk.queue_id); - - default: - return -EINVAL; - } -} - -void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) -{ - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); -} - -static int ixgbe_xdp_xmit(struct net_device *dev, int n, - struct xdp_frame **frames, u32 flags) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ring *ring; - int drops = 0; - int i; - - if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) - return -ENETDOWN; - - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) - return -EINVAL; - - /* During program transitions its possible adapter->xdp_prog is assigned - * but ring has not been configured yet. In this case simply abort xmit. - */ - ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; - if (unlikely(!ring)) - return -ENXIO; - - if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) - return -ENXIO; - - for (i = 0; i < n; i++) { - struct xdp_frame *xdpf = frames[i]; - int err; - - err = ixgbe_xmit_xdp_ring(adapter, xdpf); - if (err != IXGBE_XDP_TX) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } - } - - if (unlikely(flags & XDP_XMIT_FLUSH)) - ixgbe_xdp_ring_update_tail(ring); - - return n - drops; -} - -static const struct net_device_ops ixgbe_netdev_ops = { - .ndo_open = ixgbe_open, - .ndo_stop = ixgbe_close, - .ndo_start_xmit = ixgbe_xmit_frame, - .ndo_set_rx_mode = ixgbe_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ixgbe_set_mac, - .ndo_change_mtu = ixgbe_change_mtu, - .ndo_tx_timeout = ixgbe_tx_timeout, - .ndo_set_tx_maxrate = ixgbe_tx_maxrate, - .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, - .ndo_do_ioctl = ixgbe_ioctl, - .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, - .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, - .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, - .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, - .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, - .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, - .ndo_get_vf_config = ixgbe_ndo_get_vf_config, - .ndo_get_stats64 = ixgbe_get_stats64, - .ndo_setup_tc = __ixgbe_setup_tc, -#ifdef IXGBE_FCOE - .ndo_select_queue = ixgbe_select_queue, - .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, - .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, - .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, - .ndo_fcoe_enable = ixgbe_fcoe_enable, - .ndo_fcoe_disable = ixgbe_fcoe_disable, - .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, - .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, -#endif /* IXGBE_FCOE */ - .ndo_set_features = ixgbe_set_features, - .ndo_fix_features = ixgbe_fix_features, - .ndo_fdb_add = ixgbe_ndo_fdb_add, - .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, - .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, - .ndo_dfwd_add_station = ixgbe_fwd_add, - .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, - .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, - .ndo_features_check = ixgbe_features_check, - .ndo_bpf = ixgbe_xdp, - .ndo_xdp_xmit = ixgbe_xdp_xmit, - .ndo_xsk_wakeup = ixgbe_xsk_wakeup, -}; - +#ifdef HAVE_AF_XDP_ZC_SUPPORT static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { @@ -10544,47 +10058,2089 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) if (xdp_ring) clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); } +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_SUPPORT */ -/** - * ixgbe_enumerate_functions - Get the number of ports this device has - * @adapter: adapter structure - * - * This function enumerates the phsyical functions co-located on a single slot, - * in order to determine how many ports a device has. This is most useful in - * determining the required GT/s of PCIe bandwidth necessary for optimal - * performance. - **/ -static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter __maybe_unused *adapter, + struct ixgbe_ring *tx_ring) { - struct pci_dev *entry, *pdev = adapter->pdev; - int physfns = 0; + struct ixgbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; - /* Some cards can not use the generic count PCIe functions method, - * because they are behind a parent switch, so we hardcode these with - * the correct number of functions. + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time */ - if (ixgbe_pcie_from_parent(&adapter->hw)) - physfns = 4; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); - list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { - /* don't count virtual functions */ - if (entry->is_virtfn) - continue; - - /* When the devices on the bus don't all match our device ID, - * we can't reliably determine the correct number of - * functions. This can occur if a function has been direct - * attached to a virtual machine using VT-d, for example. In - * this case, simply return -1 to indicate this. - */ - if ((entry->vendor != pdev->vendor) || - (entry->device != pdev->device)) - return -1; - - physfns++; + if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; } - return physfns; + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; + } + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SKB_SHARED_TX_IS_UNION + if (unlikely(skb_tx(skb)->hardware) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_tx(skb)->in_progress = 1; +#else + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif + tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + +#endif +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= IXGBE_TX_FLAGS_CC; + +#endif +#ifdef HAVE_TX_MQ + if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL))) { + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; +#if IS_ENABLED(CONFIG_FCOE) + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + ((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP)))) + tx_flags |= adapter->fcoe.up << + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + else +#endif /* CONFIG_FCOE */ + tx_flags |= skb->priority << + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto out_drop; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + IXGBE_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; + } + } + +#endif /* HAVE_TX_MQ */ + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + +#if IS_ENABLED(CONFIG_FCOE) + /* setup tx offload for FCoE */ + if ((protocol == htons(ETH_P_FCOE)) && + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { + tso = ixgbe_fso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + + goto xmit_fcoe; + } else if (protocol == htons(ETH_P_FIP)) { + /* FCoE stack has a bug where it does not set the network + * header offset for FIP frames sent resulting into MACLEN + * being set to ZERO in the Tx context descriptor. + * This will cause MDD events when trying to Tx such frames. + */ + if (!skb_network_offset(skb)) { + if (tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | + IXGBE_TX_FLAGS_SW_VLAN)) + skb_set_network_header(skb, + sizeof(struct ethhdr) + + sizeof(struct vlan_hdr)); + else + skb_set_network_header(skb, + sizeof(struct ethhdr)); + } + } +#endif /* CONFIG_FCOE */ + + tso = ixgbe_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + ixgbe_tx_csum(tx_ring, first); + + /* add the ATR filter if ATR is on */ + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + ixgbe_atr(tx_ring, first); + +#if IS_ENABLED(CONFIG_FCOE) +xmit_fcoe: +#endif /* CONFIG_FCOE */ +#ifdef HAVE_PTP_1588_CLOCK + if (ixgbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; +#else + ixgbe_tx_map(tx_ring, first, hdr_len); +#endif + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; +#ifdef HAVE_PTP_1588_CLOCK +cleanup_tx_tstamp: + if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } +#endif + + return NETDEV_TX_OK; +} + +static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev, + __always_unused struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; +#ifdef HAVE_TX_MQ + unsigned int r_idx = skb->queue_mapping; +#endif + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + +#ifdef HAVE_TX_MQ + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; +#else + tx_ring = adapter->tx_ring[0]; +#endif + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) + return NETDEV_TX_BUSY; +#endif + + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + return __ixgbe_xmit_frame(skb, netdev, NULL); +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + ixgbe_mac_set_default_filter(adapter); + + return 0; +} + +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) +/** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * dev->dev_addr_list + * @dev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = IXGBE_SUCCESS; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + if (is_valid_ether_addr(hw->mac.san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, hw->mac.san_addr, + NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + } + return err; +} + +/** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addr_list + * @dev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = IXGBE_SUCCESS; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */ + +static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, + u16 addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 value; + int rc; + + if (prtad != hw->phy.addr) + return -EINVAL; + + if (!hw->phy.ops.read_reg) + return -EOPNOTSUPP; + + rc = hw->phy.ops.read_reg(hw, addr, devad, &value); + if (!rc) + rc = value; + + return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (prtad != hw->phy.addr) + return -EINVAL; + + if (!hw->phy.ops.write_reg) + return -EOPNOTSUPP; + + return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; + int prtad, devad, ret; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = ixgbe_mdio_read(netdev, prtad, devad, mii->reg_num); + if (ret < 0) + return ret; + mii->val_out = ret; + return 0; + } else { + return ixgbe_mdio_write(netdev, prtad, devad, mii->reg_num, + mii->val_in); + } +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ +#ifdef HAVE_PTP_1588_CLOCK + struct ixgbe_adapter *adapter = netdev_priv(netdev); + +#endif + switch (cmd) { +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + return ixgbe_ptp_get_ts_config(adapter, ifr); +#endif + case SIOCSHWTSTAMP: + return ixgbe_ptp_set_ts_config(adapter, ifr); +#endif +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + case SIOCGMIIREG: + case SIOCSMIIREG: + return ixgbe_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); + adapter->q_vector[i]->netpoll_rx = false; + } + } else { + ixgbe_intr(0, adapter); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to ixgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + int i; + + /* 82598 have a static priority to TC mapping that can not + * be changed so no validation is needed. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + rsave = reg; + + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); + + /* If up2tc is out of bounds default to zero */ + if (up2tc > tc) + reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); + } + + if (reg != rsave) + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + return; +} + +/** + * ixgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void ixgbe_set_prio_tc_map(struct ixgbe_adapter __maybe_unused *adapter) +{ +#ifdef HAVE_DCBNL_IEEE + struct net_device *dev = adapter->netdev; + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->ixgbe_ieee_ets; + u8 prio; + + for (prio = 0; prio < IXGBE_DCB_MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) + tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); + else if (ets) + tc = ets->prio_tc[prio]; + + netdev_set_prio_tc_map(dev, prio, tc); + } +#endif +} + +#ifdef NETIF_F_HW_TC +#ifdef HAVE_TC_SETUP_CLSU32 +static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + u32 hdl = cls->knode.handle; + u32 uhtid = TC_U32_USERHTID(cls->knode.handle); + u32 loc = cls->knode.handle & 0xfffff; + int err = 0, i, j; + struct ixgbe_jump_table *jump = NULL; + + if (loc > IXGBE_MAX_HW_ENTRIES) + return -EINVAL; + + if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) + return -EINVAL; + + /* Clear this filter in the link data it is associated with */ + if (uhtid != 0x800) { + jump = adapter->jump_tables[uhtid]; + if (!jump) + return -EINVAL; + if (!test_bit(loc - 1, jump->child_loc_map)) + return -EINVAL; + clear_bit(loc - 1, jump->child_loc_map); + } + + /* Check if the filter being deleted is a link */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + jump = adapter->jump_tables[i]; + if (jump && jump->link_hdl == hdl) { + /* Delete filters in the hardware in the child hash + * table associated with this link + */ + for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { + if (!test_bit(j, jump->child_loc_map)) + continue; + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, + NULL, + j + 1); + spin_unlock(&adapter->fdir_perfect_lock); + clear_bit(j, jump->child_loc_map); + } + /* Remove resources for this link */ + kfree(jump->input); + kfree(jump->mask); + kfree(jump); + adapter->jump_tables[i] = NULL; + return err; + } + } + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); + spin_unlock(&adapter->fdir_perfect_lock); + return err; +} + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ +#else +static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ +#endif + u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); + + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return -EINVAL; + + /* This ixgbe devices do not support hash tables at the moment + * so abort when given hash tables. + */ + if (cls->hnode.divisor > 0) + return -EINVAL; + + set_bit(uhtid - 1, &adapter->tables); + return 0; +} + +static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); + + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return -EINVAL; + + clear_bit(uhtid - 1, &adapter->tables); + return 0; +} + +#ifdef CONFIG_NET_CLS_ACT +#ifdef HAVE_TCF_MIRRED_REDIRECT +static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, + u8 *queue, u64 *action) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + struct net_device *upper; + + /* redirect to a SRIOV VF */ + for (vf = 0; vf < num_vfs; ++vf) { + upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); + if (upper->ifindex == ifindex) { + if (adapter->num_rx_pools > 1) + *queue = vf * 2; + else + *queue = vf * adapter->num_rx_queues_per_pool; + + *action = vf + 1; + *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + return 0; + } + } + return -EINVAL; +} +#endif + +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; +#if defined(HAVE_TCF_EXTS_TO_LIST) + LIST_HEAD(actions); +#elif defined(HAVE_TCF_EXTS_FOR_EACH_ACTION) + int i; +#endif + +#ifdef HAVE_TCF_EXTS_HAS_ACTION + if (!tcf_exts_has_actions(exts)) +#else + if (tc_no_actions(exts)) +#endif + return -EINVAL; + +#if defined(HAVE_TCF_EXTS_TO_LIST) + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { +#elif defined(HAVE_TCF_EXTS_FOR_EACH_ACTION) + tcf_exts_for_each_action(i, a, exts) { +#else + tc_for_each_action(a, exts) { +#endif + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = IXGBE_FDIR_DROP_QUEUE; + *queue = IXGBE_FDIR_DROP_QUEUE; + return 0; + } + +#ifdef HAVE_TCF_MIRRED_REDIRECT + /* Redirect to a VF or a offloaded macvlan */ +#ifdef HAVE_TCF_MIRRED_EGRESS_REDIRECT + if (is_tcf_mirred_egress_redirect(a)) { +#else + if (is_tcf_mirred_redirect(a)) { +#endif +#ifdef HAVE_TCF_MIRRED_DEV + struct net_device *dev = tcf_mirred_dev(a); + + if (!dev) + return -EINVAL; + return handle_redirect_action(adapter, dev->ifindex, + queue, action); +#else + int ifindex = tcf_mirred_ifindex(a); + + return handle_redirect_action(adapter, ifindex, + queue, action); +#endif + } +#endif + return -EINVAL; + } + + return -EINVAL; +} +#else +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} +#endif /* CONFIG_NET_CLS_ACT */ + +static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + struct tc_cls_u32_offload *cls, + struct ixgbe_mat_field *field_ptr, + struct ixgbe_nexthdr *nexthdr) +{ + int i, j, off; + __be32 val, m; + bool found_entry = false, found_jump_field = false; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + m = cls->knode.sel->keys[i].mask; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off) { + field_ptr[j].val(input, mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + if (nexthdr) { + if (nexthdr->off == cls->knode.sel->keys[i].off && + nexthdr->val == cls->knode.sel->keys[i].val && + nexthdr->mask == cls->knode.sel->keys[i].mask) + found_jump_field = true; + else + continue; + } + } + + if (nexthdr && !found_jump_field) + return -EINVAL; + + if (!found_entry) + return 0; + + mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + return 0; +} + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + __be16 protocol = cls->common.protocol; +#else +static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ +#endif + u32 loc = cls->knode.handle & 0xfffff; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_mat_field *field_ptr; + struct ixgbe_fdir_filter *input = NULL; + union ixgbe_atr_input *mask = NULL; + struct ixgbe_jump_table *jump = NULL; + int i, err = -EINVAL; + u8 queue; + u32 uhtid, link_uhtid; + + uhtid = TC_U32_USERHTID(cls->knode.handle); + link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + /* At the moment cls_u32 jumps to network layer and skips past + * L2 headers. The canonical method to match L2 frames is to use + * negative values. However this is error prone at best but really + * just broken because there is no way to "know" what sort of hdr + * is in front of the network layer. Fix cls_u32 to support L2 + * headers when needed. + */ + if (protocol != htons(ETH_P_IP)) + return err; + + if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return err; + } + + /* cls u32 is a graph starting at root node 0x800. The driver tracks + * links and also the fields used to advance the parser across each + * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map + * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h + * To add support for new nodes update ixgbe_model.h parse structures + * this function _should_ be generic try not to hardcode values here. + */ + if (uhtid == 0x800) { + field_ptr = (adapter->jump_tables[0])->mat; + } else { + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return err; + if (!adapter->jump_tables[uhtid]) + return err; + field_ptr = (adapter->jump_tables[uhtid])->mat; + } + + if (!field_ptr) + return err; + + /* At this point we know the field_ptr is valid and need to either + * build cls_u32 link or attach filter. Because adding a link to + * a handle that does not exist is invalid and the same for adding + * rules to handles that don't exist. + */ + + if (link_uhtid) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; + + if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) + return err; + + if (!test_bit(link_uhtid - 1, &adapter->tables)) + return err; + + /* Multiple filters as links to the same hash table are not + * supported. To add a new filter with the same next header + * but different match/jump conditions, create a new hash table + * and link to it. + */ + if (adapter->jump_tables[link_uhtid] && + (adapter->jump_tables[link_uhtid])->link_hdl) { + e_err(drv, "Link filter exists for link: %x\n", + link_uhtid); + return err; + } + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr[i].o != cls->knode.sel->offoff || + nexthdr[i].s != cls->knode.sel->offshift || + nexthdr[i].m != cls->knode.sel->offmask) + return err; + + jump = kzalloc(sizeof(*jump), GFP_KERNEL); + if (!jump) + return -ENOMEM; + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) { + err = -ENOMEM; + goto free_jump; + } + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } + jump->input = input; + jump->mask = mask; + jump->link_hdl = cls->knode.handle; + + err = ixgbe_clsu32_build_input(input, mask, cls, + field_ptr, &nexthdr[i]); + if (!err) { + jump->mat = nexthdr[i].jump; + adapter->jump_tables[link_uhtid] = jump; + break; + } + } + + return 0; + } + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } + + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { + if ((adapter->jump_tables[uhtid])->input) + memcpy(input, (adapter->jump_tables[uhtid])->input, + sizeof(*input)); + if ((adapter->jump_tables[uhtid])->mask) + memcpy(mask, (adapter->jump_tables[uhtid])->mask, + sizeof(*mask)); + + /* Lookup in all child hash tables if this location is already + * filled with a filter + */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + struct ixgbe_jump_table *link = adapter->jump_tables[i]; + + if (link && (test_bit(loc - 1, link->child_loc_map))) { + e_err(drv, "Filter exists in location: %x\n", + loc); + err = -EINVAL; + goto err_out; + } + } + } + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); + if (err) + goto err_out; + + err = parse_tc_actions(adapter, cls->knode.exts, &input->action, + &queue); + if (err < 0) + goto err_out; + + input->sw_idx = loc; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, mask, + adapter->cloud_mode); + if (err) + goto err_out_w_lock; + } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { + err = -EINVAL; + goto err_out_w_lock; + } + + ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); + err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, + input->sw_idx, queue, + adapter->cloud_mode); + if (!err) + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) + set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); + + kfree(mask); + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(mask); +free_input: + kfree(input); +free_jump: + kfree(jump); + return err; +} + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#ifdef HAVE_TCF_BLOCK +static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls_u32) +#else +static int ixgbe_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) +#endif /* HAVE_TCF_BLOCK */ +{ +#ifdef HAVE_TCF_BLOCK +#ifndef HAVE_TCF_MIRRED_DEV + if (cls_u32->common.chain_index) + return -EOPNOTSUPP; +#endif +#else + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (!is_classid_clsact_ingress(cls_u32->common.classid) || + cls_u32->common.chain_index) + return -EOPNOTSUPP; +#endif + + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); + default: + return -EOPNOTSUPP; + + } +} +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ +#endif /* HAVE_TC_SETUP_CLSU32 */ + +#ifdef HAVE_TCF_BLOCK +static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct ixgbe_adapter *adapter = cb_priv; + +#ifdef HAVE_TCF_MIRRED_DEV + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) +#else + if (!tc_can_offload(adapter->netdev)) +#endif + return -EOPNOTSUPP; + + switch (type) { +#ifdef HAVE_TC_SETUP_CLSU32 + case TC_SETUP_CLSU32: + return ixgbe_setup_tc_cls_u32(adapter, type_data); +#endif /* HAVE_TC_SETUP_CLSU32 */ + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(ixgbe_block_cb_list); + +#endif +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX +static int ixgbe_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return ixgbe_setup_tc(dev, mqprio->num_tc); +} +#endif + +#ifdef HAVE_TCF_BLOCK +#define IXGBE_SETUP_TC_DECLARE_ADAPTER +#endif /* HAVE_TCF_BLOCK */ + +#ifdef HAVE_TC_SETUP_CLSU32 +#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define IXGBE_SETUP_TC_DECLARE_ADAPTER +#endif /* !HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ +#endif /* HAVE_TC_SETUP_CLSU32 */ + +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) +static int +__ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX) +static int +__ixgbe_setup_tc(struct net_device *dev, __always_unused u32 handle, + u32 chain_index, __always_unused __be16 proto, + struct tc_to_netdev *tc) +#else +static int +__ixgbe_setup_tc(struct net_device *dev, __always_unused u32 handle, + __always_unused __be16 proto, struct tc_to_netdev *tc) +#endif +{ +#ifdef IXGBE_SETUP_TC_DECLARE_ADAPTER + struct ixgbe_adapter *adapter = netdev_priv(dev); +#endif +#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + unsigned int type = tc->type; +#ifdef HAVE_NDO_SETUP_TC_CHAIN_INDEX + + if (chain_index) + return -EOPNOTSUPP; +#endif /* HAVE_NDO_SETUP_TC_CHAIN_INDEX */ +#ifdef HAVE_TC_SETUP_CLSU32 + + if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && + type == TC_SETUP_CLSU32) { + + switch (tc->cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, + proto, tc->cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, tc->cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, proto, + tc->cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, + tc->cls_u32); + default: + return -EINVAL; + } + } +#endif /* HAVE_TC_SETUP_CLSU32 */ +#endif /* !HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ + + switch (type) { +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) +#if defined(HAVE_TCF_BLOCK) + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &ixgbe_block_cb_list, + ixgbe_setup_tc_block_cb, + adapter, adapter, true); +#else +#ifdef HAVE_TC_SETUP_CLSU32 + case TC_SETUP_CLSU32: + return ixgbe_setup_tc_cls_u32(dev, type_data); +#endif /* HAVE_TC_SETUP_CLSU32 */ +#endif /* HAVE_TCF_BLOCK */ +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ + case TC_SETUP_QDISC_MQPRIO: +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) + return ixgbe_setup_tc_mqprio(dev, type_data); +#elif defined(TC_MQPRIO_HW_OFFLOAD_MAX) + return ixgbe_setup_tc_mqprio(dev, tc->mqprio); +#else + return ixgbe_setup_tc(dev, tc->tc); +#endif + default: + return -EOPNOTSUPP; + } +} + +#endif /* NETIF_F_HW_TC */ +/** + * ixgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @dev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + /* Hardware supports up to 8 traffic classes */ + if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (tc && hw->mac.type == ixgbe_mac_82598EB && + tc < IXGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbe_close(dev); + else + ixgbe_reset(adapter); + + ixgbe_clear_interrupt_scheme(adapter); + + if (tc) { +#ifdef HAVE_XDP_SUPPORT + if (adapter->xdp_prog) { + e_warn(probe, "DCB is not supported with XDP\n"); + + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + ixgbe_open(dev); + return -EINVAL; + } +#endif /* IXGBE_XDP_ENABLED */ + + netdev_set_num_tc(dev, tc); + ixgbe_set_prio_tc_map(adapter); + + adapter->flags |= IXGBE_FLAG_DCB_ENABLED; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->last_lfc_mode = adapter->hw.fc.requested_mode; + adapter->hw.fc.requested_mode = ixgbe_fc_none; + } + } else { + netdev_reset_tc(dev); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + ixgbe_validate_rtr(adapter, tc); + + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + ixgbe_open(dev); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} +#endif + +void ixgbe_do_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); +} + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 ixgbe_fix_features(struct net_device *netdev, u32 features) +#else +static netdev_features_t ixgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + +#if IS_ENABLED(CONFIG_DCB) + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) +#ifdef NETIF_F_HW_VLAN_CTAG_RX + features |= NETIF_F_HW_VLAN_CTAG_RX; +#else + features |= NETIF_F_HW_VLAN_RX; +#endif +#endif /* CONFIG_DCB */ + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + + if (adapter->xdp_prog && (features & NETIF_F_LRO)) { + e_dev_err("LRO is not supported with XDP\n"); + features &= ~NETIF_F_LRO; + } + + return features; +} + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int ixgbe_set_features(struct net_device *netdev, u32 features) +#else +static int ixgbe_set_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + netdev_features_t changed = netdev->features ^ features; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if (changed & NETIF_F_LRO) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + } + } + + /* + * Check if Flow Director n-tuple support or hw_tc support was + * enabled or disabled. If the state changed, we need to reset. + */ +#ifdef NETIF_F_HW_TC + if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { +#else + if (features & NETIF_F_NTUPLE) { +#endif + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + } else { + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED || + /* We cannot enable ATR if we have 2 or more tcs */ + netdev_get_num_tc(netdev) > 1 || + /* We cannot enable ATR if RSS is disabled */ + adapter->ring_feature[RING_F_RSS].limit <= 1 || + /* A sample rate of 0 indicates ATR disabled */ + !adapter->atr_sample_rate) + ; /* do nothing not supported */ + else /* otherwise supported and set the flag */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + + netdev->features = features; + +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + features & NETIF_F_RXCSUM) { + if (!need_reset) + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE && + features & NETIF_F_RXCSUM) { + if (!need_reset) + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + + if (need_reset) + ixgbe_do_reset(netdev); +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER)) + ixgbe_set_rx_mode(netdev); +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + else if (changed & (NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER)) + ixgbe_set_rx_mode(netdev); +#endif + return 0; + +} +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +/** + * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void ixgbe_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + __be16 port = ti->port; + u32 port_shift = 0; + u32 reg; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port == port) + return; + + if (adapter->geneve_port) { + netdev_info(dev, + "GENEVE port %d set, not adding port %d\n", + ntohs(adapter->geneve_port), + ntohs(port)); + return; + } + + port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; + adapter->geneve_port = port; + break; + default: + return; + } + + reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); +} + +/** + * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void ixgbe_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + u32 port_mask; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN && + ti->type != UDP_TUNNEL_TYPE_GENEVE) + return; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port != ti->port) { + netdev_info(dev, "GENEVE port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + break; + default: + return; + } + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; +} +#elif defined(HAVE_VXLAN_RX_OFFLOAD) +/** + * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifiying us about + * @port: New UDP port number that VXLAN started listening to + */ +static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + if (sa_family != AF_INET) + return; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "Hit Max num of VXLAN ports, not adding port %d\n", + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port)); +} + +/** + * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + */ +static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (sa_family != AF_INET) + return; + + if (adapter->vxlan_port != port) { + netdev_info(dev, "Port %d was not found, not deleting\n", + ntohs(port)); + return; + } + + ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; +} +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef HAVE_NDO_GSO_CHECK +static bool +ixgbe_gso_check(struct sk_buff *skb, __always_unused struct net_device *dev) +{ + return vxlan_gso_check(skb); +} +#endif /* HAVE_NDO_GSO_CHECK */ + +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, +#ifdef HAVE_NDO_FDB_ADD_VID + u16 vid, +#endif +#ifdef HAVE_NDO_FDB_ADD_EXTACK + u16 flags, + struct netlink_ext_ack __always_unused *extack) +#else + u16 flags) +#endif +#else +static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) +#endif /* USE_CONST_DEV_UC_CHAR */ +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 pool = VMDQ_P(0); + + if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) + return -ENOMEM; + } + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_NDO_FDB_ADD_VID + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +#else + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); +#endif /* HAVE_NDO_FDB_ADD_VID */ +#else + return ndo_dflt_fdb_add(ndm, dev, addr, flags); +#endif /* USE_CONST_DEV_UC_CHAR */ +} + +#ifdef HAVE_BRIDGE_ATTRIBS +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS) +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags) +#else +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +#endif /* HAVE_NDO_BRIDGE_SETLINK_EXTACK */ +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags |= IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags &= ~IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + return -EINVAL; + } + + adapter->bridge_mode = mode; + + /* re-configure settings related to bridge mode */ + ixgbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + mode = adapter->bridge_mode; +#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); +#elif defined(HAVE_NDO_FDB_ADD_VID) || \ + defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); +#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +} +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ + +#ifdef HAVE_NDO_FEATURES_CHECK +#define IXGBE_MAX_TUNNEL_HDR_LEN 80 +#ifdef NETIF_F_GSO_PARTIAL +#define IXGBE_MAX_MAC_HDR_LEN 127 +#define IXGBE_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} +#else +static netdev_features_t +ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation) + return features; + + if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > + IXGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + + return features; +} +#endif /* NETIF_F_GSO_PARTIAL */ +#endif /* HAVE_NDO_FEATURES_CHECK */ + +#ifdef HAVE_XDP_SUPPORT +static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + bool need_reset; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return -EINVAL; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return -EINVAL; + + /* verify ixgbe ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring_is_rsc_enabled(ring)) + return -EINVAL; + + if (frame_size > ixgbe_rx_bufsz(ring)) + return -EINVAL; + } + + if (nr_cpu_ids > MAX_XDP_QUEUES) + return -ENOMEM; + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* If transitioning XDP modes reconfigure rings */ + if (need_reset) { + int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); + + if (err) { + rcu_assign_pointer(adapter->xdp_prog, old_prog); + return -EINVAL; + } + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->xdp_ring[i]->xsk_umem) +#ifdef HAVE_NDO_XSK_WAKEUP + (void)ixgbe_xsk_wakeup(adapter->netdev, i, + XDP_WAKEUP_RX); +#else + (void)ixgbe_xsk_async_xmit(adapter->netdev, i); +#endif + +#endif + return 0; +} + +#ifdef HAVE_NDO_BPF +static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) +#else +static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) +#endif +{ +#if defined(HAVE_XDP_QUERY_PROG) || defined(HAVE_AF_XDP_ZC_SUPPORT) + struct ixgbe_adapter *adapter = netdev_priv(dev); +#endif + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ixgbe_xdp_setup(dev, xdp->prog); +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: +#ifndef NO_NETDEV_BPF_PROG_ATTACHED + xdp->prog_attached = !!(adapter->xdp_prog); +#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */ + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; + return 0; +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT + case XDP_SETUP_XSK_UMEM: + return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, + xdp->xsk.queue_id); +#endif + default: + return -EINVAL; + } +} + +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +static int ixgbe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +#else +static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +#endif +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + int drops = 0; + int i; +#else + int err; +#endif + + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return -ENETDOWN; + +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; +#endif + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return -ENXIO; + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) + return -ENXIO; +#endif + +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = ixgbe_xmit_xdp_ring(adapter, xdpf); + if (err != IXGBE_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) + ixgbe_xdp_ring_update_tail(ring); + + return n - drops; +#else + err = ixgbe_xmit_xdp_ring(adapter, xdp); + if (err != IXGBE_XDP_TX) + return -ENOSPC; + + return 0; +#endif +} + +#ifndef NO_NDO_XDP_FLUSH +static void ixgbe_xdp_flush(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + + /* Its possible the device went down between xdp xmit and flush so + * we need to ensure device is still up. + */ + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return; + + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return; + + ixgbe_xdp_ring_update_tail(ring); + + return; +} +#endif /* !NO_NDO_XDP_FLUSH */ +#endif /* HAVE_XDP_SUPPORT */ +#ifdef HAVE_NET_DEVICE_OPS +static const struct net_device_ops ixgbe_netdev_ops = { + .ndo_open = ixgbe_open, + .ndo_stop = ixgbe_close, + .ndo_start_xmit = ixgbe_xmit_frame, +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) + .ndo_select_queue = ixgbe_select_queue, +#else +#ifndef HAVE_MQPRIO + .ndo_select_queue = __netdev_pick_tx, +#endif +#endif /* CONFIG_FCOE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + .ndo_set_rx_mode = ixgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbe_set_mac, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = ixgbe_change_mtu, +#else + .ndo_change_mtu = ixgbe_change_mtu, +#endif + .ndo_tx_timeout = ixgbe_tx_timeout, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, +#endif + .ndo_do_ioctl = ixgbe_ioctl, +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#if defined(HAVE_VF_SPOOFCHK_CONFIGURE) && IS_ENABLED(CONFIG_PCI_IOV) + .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + .ndo_get_vf_config = ixgbe_ndo_get_vf_config, +#endif /* IFLA_VF_MAX */ +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = ixgbe_get_stats64, +#else + .ndo_get_stats = ixgbe_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ +#ifdef HAVE_SETUP_TC +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC + .extended.ndo_setup_tc_rh = __ixgbe_setup_tc, +#else +#ifdef NETIF_F_HW_TC + .ndo_setup_tc = __ixgbe_setup_tc, +#else + .ndo_setup_tc = ixgbe_setup_tc, +#endif /* NETIF_F_HW_TC */ +#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */ +#endif /* HAVE_SETUP_TC */ +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbe_netpoll, +#endif +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = ixgbe_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ +#if IS_ENABLED(CONFIG_FCOE) + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET + .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, +#endif + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE + .ndo_fcoe_enable = ixgbe_fcoe_enable, + .ndo_fcoe_disable = ixgbe_fcoe_disable, +#endif +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN + .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, +#endif +#endif /* CONFIG_FCOE */ +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = &ixgbe_vlan_mode, +#endif +#ifdef HAVE_FDB_OPS + .ndo_fdb_add = ixgbe_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = ndo_dflt_fdb_del, + .ndo_fdb_dump = ndo_dflt_fdb_dump, +#endif +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL + .extended.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, + .extended.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, +#else + .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, + .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, +#endif +#elif defined(HAVE_VXLAN_RX_OFFLOAD) + .ndo_add_vxlan_port = ixgbe_add_vxlan_port, + .ndo_del_vxlan_port = ixgbe_del_vxlan_port, +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_NDO_GSO_CHECK + .ndo_gso_check = ixgbe_gso_check, +#endif /* HAVE_NDO_GSO_CHECK */ +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = ixgbe_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF + .ndo_bpf = ixgbe_xdp, +#else + .ndo_xdp = ixgbe_xdp, +#endif + .ndo_xdp_xmit = ixgbe_xdp_xmit, +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_NDO_XSK_WAKEUP + .ndo_xsk_wakeup = ixgbe_xsk_wakeup, +#else + .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, +#endif +#endif +#ifndef NO_NDO_XDP_FLUSH + .ndo_xdp_flush = ixgbe_xdp_flush, +#endif /* !NO_NDO_XDP_FLUSH */ +#endif +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext ixgbe_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = ixgbe_set_features, + .ndo_fix_features = ixgbe_fix_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; +#endif /* HAVE_NET_DEVICE_OPS */ + +void ixgbe_assign_netdev_ops(struct net_device *dev) +{ +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops = &ixgbe_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(dev, &ixgbe_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#else /* HAVE_NET_DEVICE_OPS */ + dev->open = &ixgbe_open; + dev->stop = &ixgbe_close; + dev->hard_start_xmit = &ixgbe_xmit_frame; + dev->get_stats = &ixgbe_get_stats; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &ixgbe_set_rx_mode; +#endif + dev->set_multicast_list = &ixgbe_set_rx_mode; + dev->set_mac_address = &ixgbe_set_mac; + dev->change_mtu = &ixgbe_change_mtu; + dev->do_ioctl = &ixgbe_ioctl; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &ixgbe_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + dev->vlan_rx_register = &ixgbe_vlan_mode; + dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &ixgbe_netpoll; +#endif +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) + dev->select_queue = &ixgbe_select_queue; +#else + dev->select_queue = &__netdev_pick_tx; +#endif +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* HAVE_NET_DEVICE_OPS */ + +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(dev)->ndo_busy_poll = ixgbe_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + + ixgbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; } /** @@ -10639,7 +12195,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } break; case IXGBE_DEV_ID_82599EN_SFP: - /* Only these subdevices support WOL */ + /* Only these subdevices support WoL */ switch (subdevice_id) { case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: return true; @@ -10665,33 +12221,76 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, * * This function is used by probe and ethtool to determine the FW version to * format to display. The FW version is taken from the EEPROM/NVM. - */ + * + **/ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_nvm_version nvm_ver; + u16 eeprom_verh = 0, eeprom_verl = 0; + u16 offset = 0; + u32 etrack_id; - ixgbe_get_oem_prod_version(hw, &nvm_ver); - if (nvm_ver.oem_valid) { - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), - "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor, - nvm_ver.oem_release); - return; + /* Check for OEM Product Version block format */ + hw->eeprom.ops.read(hw, 0x1b, &offset); + + /* Make sure offset to OEM Product Version block is valid */ + if (!(offset == 0x0) && !(offset == 0xffff)) { + u16 mod_len = 0, cap = 0, prod_ver = 0, rel_num = 0; + u16 build, major, patch; + + /* Read product version block */ + hw->eeprom.ops.read(hw, offset, &mod_len); + hw->eeprom.ops.read(hw, offset + 0x1, &cap); + hw->eeprom.ops.read(hw, offset + 0x2, &prod_ver); + hw->eeprom.ops.read(hw, offset + 0x3, &rel_num); + + /* Only display OEM product version if valid block */ + if (mod_len == 0x3 && (cap & 0xf) == 0x0) { + major = prod_ver >> 8; + build = prod_ver & 0xff; + patch = rel_num; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "%x.%x.%x", major, build, patch); + return; + } } - ixgbe_get_etk_id(hw, &nvm_ver); - ixgbe_get_orom_version(hw, &nvm_ver); + /* + * Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + hw->eeprom.ops.read(hw, 0x2e, &eeprom_verh); + hw->eeprom.ops.read(hw, 0x2d, &eeprom_verl); + etrack_id = (eeprom_verh << 16) | eeprom_verl; - if (nvm_ver.or_valid) { - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), - "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major, - nvm_ver.or_build, nvm_ver.or_patch); - return; + /* Check for SCSI block version format */ + hw->eeprom.ops.read(hw, 0x17, &offset); + + /* Make sure offset to SCSI block is valid */ + if (!(offset == 0x0) && !(offset == 0xffff)) { + u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; + u16 build, major, patch; + + hw->eeprom.ops.read(hw, offset + 0x84, &eeprom_cfg_blkh); + hw->eeprom.ops.read(hw, offset + 0x83, &eeprom_cfg_blkl); + + /* Only display Option Rom if exist */ + if (eeprom_cfg_blkl && eeprom_cfg_blkh) { + major = eeprom_cfg_blkl >> 8; + build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); + patch = eeprom_cfg_blkh & 0x00ff; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x, %d.%d.%d", etrack_id, major, build, + patch); + return; + } } /* Set ETrack ID format */ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), - "0x%08x", nvm_ver.etk_id); + "0x%08x", etrack_id); } /** @@ -10705,82 +12304,130 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +#ifdef HAVE_CONFIG_HOTPLUG +static int __devinit ixgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +#else +static int ixgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +#endif { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; - struct ixgbe_hw *hw; - const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; - int i, err, pci_using_dac, expected_gts; - unsigned int indices = MAX_TX_QUEUES; + struct ixgbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac; + char *info_string; u8 part_str[IXGBE_PBANUM_LENGTH]; + enum ixgbe_mac_type mac_type = ixgbe_mac_unknown; +#ifdef HAVE_TX_MQ + unsigned int indices = MAX_TX_QUEUES; +#endif /* HAVE_TX_MQ */ bool disable_dev = false; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) u16 device_caps; #endif - u32 eec; - - /* Catch broken hardware that put the wrong VF device ID in - * the PCIe SR-IOV capability. - */ - if (pdev->is_virtfn) { - WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", - pci_name(pdev), pdev->vendor, pdev->device); - return -EINVAL; - } - +#ifndef NETIF_F_GSO_PARTIAL +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else + u32 hw_features; +#endif +#endif +#endif /* NETIF_F_GSO_PARTIAL */ + int i; err = pci_enable_device_mem(pdev); if (err) return err; - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } } pci_using_dac = 0; } err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "pci_request_selected_regions failed 0x%x\n", err); goto err_pci_reg; } + /* + * The mac_type is needed before we have the adapter is set up + * so rather than maintain two devID -> MAC tables we dummy up + * an ixgbe_hw stuct and use ixgbe_set_mac_type. + */ + hw = vmalloc(sizeof(struct ixgbe_hw)); + if (!hw) { + pr_info("Unable to allocate memory for early mac " + "check\n"); + } else { + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + ixgbe_set_mac_type(hw); + mac_type = hw->mac.type; + vfree(hw); + } + + /* + * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch + * port to which the 82598 is connected to prevent duplicate + * completions caused by LOs. We need the mac type so that we only + * do this on 82598 devices, ixgbe_set_mac_type does this for us if + * we set it's device ID. + */ + if (mac_type == ixgbe_mac_82598EB) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); + pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); - pci_save_state(pdev); - if (ii->mac == ixgbe_mac_82598EB) { -#ifdef CONFIG_IXGBE_DCB - /* 8 TC w/ 4 queues per TC */ - indices = 4 * MAX_TRAFFIC_CLASS; -#else +#ifdef HAVE_TX_MQ + if (mac_type == ixgbe_mac_82598EB) { +#if IS_ENABLED(CONFIG_DCB) + indices = IXGBE_MAX_DCB_INDICES * 4; +#else /* CONFIG_DCB */ indices = IXGBE_MAX_RSS_INDICES; -#endif +#endif /* !CONFIG_DCB */ } netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); +#else /* HAVE_TX_MQ */ + netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); +#endif /* HAVE_TX_MQ */ if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } - SET_NETDEV_DEV(netdev, &pdev->dev); + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); adapter = netdev_priv(netdev); - +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + adapter->indices = indices; +#endif +#endif adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); @@ -10790,42 +12437,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; } - netdev->netdev_ops = &ixgbe_netdev_ops; - ixgbe_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + ixgbe_assign_netdev_ops(netdev); - /* Setup hw api */ - hw->mac.ops = *ii->mac_ops; - hw->mac.type = ii->mac; - hw->mvals = ii->mvals; - if (ii->link_ops) - hw->link.ops = *ii->link_ops; + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); - /* EEPROM */ - hw->eeprom.ops = *ii->eeprom_ops; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - if (ixgbe_removed(hw->hw_addr)) { - err = -EIO; - goto err_ioremap; - } - /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ - if (!(eec & BIT(8))) - hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; - - /* PHY */ - hw->phy.ops = *ii->phy_ops; - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - /* ixgbe_identify_phy_generic will set prtad and mmds properly */ - hw->phy.mdio.prtad = MDIO_PRTAD_NONE; - hw->phy.mdio.mmds = 0; - hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - hw->phy.mdio.dev = netdev; - hw->phy.mdio.mdio_read = ixgbe_mdio_read; - hw->phy.mdio.mdio_write = ixgbe_mdio_write; + adapter->bd_number = cards_found; + ixgbe_get_hw_control(adapter); /* setup the private structure */ - err = ixgbe_sw_init(adapter, ii); + err = ixgbe_sw_init(adapter); if (err) goto err_sw_init; @@ -10839,7 +12459,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -10847,8 +12467,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* - * If there is a fan on this device and it has failed log the - * failure. + * If we have a fan, this is as early we know, warn if we + * have had a failure. */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -10856,19 +12476,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e_crit(probe, "Fan has stopped, replace the adapter\n"); } - if (allow_unsupported_sfp) - hw->allow_unsupported_sfp = allow_unsupported_sfp; + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + ixgbe_check_options(adapter); /* reset_hw fills in the perm_addr as well */ hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; - ixgbe_set_eee_capable(adapter); if (err == IXGBE_ERR_SFP_NOT_PRESENT) { - err = 0; + err = IXGBE_SUCCESS; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); - e_dev_err("Reload the driver after installing a supported module.\n"); + e_dev_err("failed to load because an unsupported SFP+ or QSFP " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); goto err_sw_init; } else if (err) { e_dev_err("HW Init failed: %d\n", err); @@ -10876,30 +12500,35 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #ifdef CONFIG_PCI_IOV - /* SR-IOV not supported on the 82598 */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - goto skip_sriov; - /* Mailbox */ - ixgbe_init_mbx_params_pf(hw); - hw->mbx.ops = ii->mbx_ops; - pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); - ixgbe_enable_sriov(adapter, max_vfs); -skip_sriov: +#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE) + if (adapter->max_vfs > 0) { + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated.\n"); + e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); + e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x/sriov_numvfs\n", + adapter->max_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn) + ); + } #endif - netdev->features = NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXHASH | - NETIF_F_RXCSUM | - NETIF_F_HW_CSUM; + if (adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE) { + pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); + ixgbe_enable_sriov(adapter); + } -#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ - NETIF_F_GSO_GRE_CSUM | \ - NETIF_F_GSO_IPXIP4 | \ - NETIF_F_GSO_IPXIP6 | \ - NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM) +#endif /* CONFIG_PCI_IOV */ + +#ifdef NETIF_F_GSO_PARTIAL + /* keep |= here to avoid conflict with features set in param.c */ + netdev->features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; netdev->features |= NETIF_F_GSO_PARTIAL | @@ -10908,21 +12537,12 @@ skip_sriov: if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; -#ifdef CONFIG_IXGBE_IPSEC -#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ - NETIF_F_HW_ESP_TX_CSUM | \ - NETIF_F_GSO_ESP) - - if (adapter->ipsec) - netdev->features |= IXGBE_ESP_FEATURES; -#endif /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_RXALL | - NETIF_F_HW_L2FW_DOFFLOAD; + NETIF_F_RXALL; if (hw->mac.type >= ixgbe_mac_82599EB) netdev->hw_features |= NETIF_F_NTUPLE | @@ -10933,11 +12553,7 @@ skip_sriov: netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->hw_enc_features |= netdev->vlan_features; - netdev->mpls_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM; - netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES; + netdev->mpls_features |= NETIF_F_HW_CSUM; /* set this bit last since it cannot be part of vlan_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | @@ -10947,58 +12563,191 @@ skip_sriov: netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; - /* MTU range: 68 - 9710 */ - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; -#ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) - netdev->dcbnl_ops = &ixgbe_dcbnl_ops; +#else /* NETIF_F_GSO_PARTIAL */ + /* keep |= here to avoid conflict with features set in param.c */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM; + +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IPV6_CSUM; #endif -#ifdef IXGBE_FCOE +#ifdef NETIF_F_HW_VLAN_CTAG_TX + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_TX + netdev->features |= NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_FILTER | + NETIF_F_HW_VLAN_RX; +#endif + netdev->features |= ixgbe_tso_features(); +#ifdef NETIF_F_RXHASH + netdev->features |= NETIF_F_RXHASH; +#endif /* NETIF_F_RXHASH */ + netdev->features |= NETIF_F_RXCSUM; + +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = netdev->hw_features; +#else + hw_features = get_netdev_hw_features(netdev); +#endif + hw_features |= netdev->features; + + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + hw_features |= NETIF_F_LRO; + +#else +#ifdef NETIF_F_GRO + + /* this is only needed on kernels prior to 2.6.39 */ + netdev->features |= NETIF_F_GRO; +#endif /* NETIF_F_GRO */ +#endif /* HAVE_NDO_SET_FEATURES */ + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + netdev->features |= NETIF_F_SCTP_CSUM; +#ifdef HAVE_NDO_SET_FEATURES + hw_features |= NETIF_F_SCTP_CSUM | +#ifdef NETIF_F_HW_TC + NETIF_F_HW_TC | +#endif + NETIF_F_NTUPLE; +#endif + break; + default: + break; + } +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif + +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_ENCAP_CSUM_OFFLOAD + netdev->hw_enc_features |= NETIF_F_SG; +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + } +#endif /* NETIF_F_GSO_PARTIAL */ + +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + if (netdev->features & NETIF_F_LRO) { + if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + ((adapter->rx_itr_setting == 1) || + (adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR))) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { + e_dev_info("InterruptThrottleRate set too high, " + "disabling RSC\n"); + } + } +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +#ifdef IFF_SUPP_NOFCS + netdev->priv_flags |= IFF_SUPP_NOFCS; +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9710 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#endif + +#endif +#if IS_ENABLED(CONFIG_DCB) + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + netdev->dcbnl_ops = &ixgbe_dcbnl_ops; + +#endif /* CONFIG_DCB */ +#if IS_ENABLED(CONFIG_FCOE) +#ifdef NETIF_F_FSO if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { unsigned int fcoe_l; - if (hw->mac.ops.get_device_caps) { - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) - adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + e_dev_info("FCoE offload feature is not available. " + "Disabling FCoE offload feature\n"); + } else { + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE + ixgbe_fcoe_ddp_enable(adapter); + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + netdev->features |= NETIF_F_FCOE_MTU; +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ } - fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; - netdev->features |= NETIF_F_FSO | - NETIF_F_FCOE_CRC; - +#ifdef HAVE_NETDEV_VLAN_FEATURES netdev->vlan_features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } +#endif /* NETIF_F_FSO */ +#endif /* CONFIG_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_HIGHDMA; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ } -#endif /* IXGBE_FCOE */ - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) - netdev->hw_features |= NETIF_F_LRO; - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - netdev->features |= NETIF_F_LRO; if (ixgbe_check_fw_error(adapter)) { err = -EIO; goto err_sw_init; } - /* make sure the EEPROM is good */ - if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { + if (hw->eeprom.ops.validate_checksum && + (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); err = -EIO; goto err_sw_init; } - eth_platform_get_mac_address(&adapter->pdev->dev, - adapter->hw.mac.perm_addr); - memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); +#endif if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); @@ -11012,7 +12761,7 @@ skip_sriov: timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); - if (ixgbe_removed(hw->hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { err = -EIO; goto err_sw_init; } @@ -11024,63 +12773,20 @@ skip_sriov: if (err) goto err_sw_init; - for (i = 0; i < adapter->num_rx_queues; i++) - u64_stats_init(&adapter->rx_ring[i]->syncp); - for (i = 0; i < adapter->num_tx_queues; i++) - u64_stats_init(&adapter->tx_ring[i]->syncp); for (i = 0; i < adapter->num_xdp_queues; i++) u64_stats_init(&adapter->xdp_ring[i]->syncp); - /* WOL not supported for all devices */ adapter->wol = 0; hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); - hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, - pdev->subsystem_device); - if (hw->wol_enabled) + if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device)) adapter->wol = IXGBE_WUFC_MAG; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + hw->wol_enabled = !!(adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); - /* save off EEPROM version number */ ixgbe_set_fw_version(adapter); - /* pick up the PCI bus settings for reporting later */ - if (ixgbe_pcie_from_parent(hw)) - ixgbe_get_parent_bus_info(adapter); - else - hw->mac.ops.get_bus_info(hw); - - /* calculate the expected PCIe bandwidth required for optimal - * performance. Note that some older parts will never have enough - * bandwidth due to being older generation PCIe parts. We clamp these - * parts to ensure no warning is displayed if it can't be fixed. - */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); - break; - default: - expected_gts = ixgbe_enumerate_functions(adapter) * 10; - break; - } - - /* don't check link if we failed to enumerate functions */ - if (expected_gts > 0) - ixgbe_check_minimum_link(adapter, expected_gts); - - err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); - if (err) - strlcpy(part_str, "Unknown", sizeof(part_str)); - if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, hw->phy.sfp_type, - part_str); - else - e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, part_str); - - e_dev_info("%pM\n", netdev->dev_addr); - /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); if (err == IXGBE_ERR_EEPROM_VERSION) { @@ -11091,13 +12797,36 @@ skip_sriov: "problems please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); + } else if (err == IXGBE_ERR_OVERTEMP) { + e_crit(drv, "%s\n", ixgbe_overheat_msg); + goto err_register; + } else if (err) { + e_dev_err("HW init failed\n"); + goto err_register; } - strcpy(netdev->name, "eth%d"); + + /* pick up the PCI bus settings for reporting later */ + if (ixgbe_pcie_from_parent(hw)) + ixgbe_get_parent_bus_info(hw); + else + if (hw->mac.ops.get_bus_info) + hw->mac.ops.get_bus_info(hw); + + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); pci_set_drvdata(pdev, adapter); err = register_netdev(netdev); if (err) goto err_register; + adapter->netdev_registered = true; +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + +#endif /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) @@ -11105,38 +12834,115 @@ skip_sriov: /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); -#ifdef CONFIG_IXGBE_DCA - if (dca_add_requester(&pdev->dev) == 0) { - adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - ixgbe_setup_dca(adapter); +#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) { + err = dca_add_requester(pci_dev_to_dev(pdev)); + switch (err) { + case IXGBE_SUCCESS: + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + break; + /* -19 is returned from the kernel when no provider is found */ + case -19: + e_info(rx_err, "No DCA provider found. Please " + "start ioatdma for DCA functionality.\n"); + break; + default: + e_info(probe, "DCA registration failed: %d\n", err); + break; + } } #endif + + /* print all messages at the end so that we use our eth%d name */ + ixgbe_check_minimum_link(adapter); + + /* First try to read PBA as a string */ + err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH); + if (err) + strscpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_info(probe, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str); + else + e_info(probe, "MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + e_dev_info("%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + snprintf(info_string, INFO_STRING_LEN, + "Enabled Features: RxQ: %d TxQ: %d", + adapter->num_rx_queues, adapter->num_tx_queues); +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + strlcat(info_string, " FCoE", INFO_STRING_LEN); +#endif + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + strlcat(info_string, " FdirHash", INFO_STRING_LEN); + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + strlcat(info_string, " DCB", INFO_STRING_LEN); + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + strlcat(info_string, " DCA", INFO_STRING_LEN); + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + strlcat(info_string, " RSC", INFO_STRING_LEN); + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE) + strlcat(info_string, " vxlan_rx", INFO_STRING_LEN); + + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: +#ifdef CONFIG_PCI_IOV if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); + int i; for (i = 0; i < adapter->num_vfs; i++) ixgbe_vf_configuration(pdev, (i | 0x10000000)); } +#endif - /* firmware requires driver version to be 0xFFFFFFFF - * since os does not support feature - */ + /* Initialize the LED link active for LED blink support */ + if (hw->mac.ops.init_led_link_act) + hw->mac.ops.init_led_link_act(hw); + + /* firmware requires blank numerical version */ if (hw->mac.ops.set_fw_drv_ver) hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, sizeof(ixgbe_driver_version) - 1, ixgbe_driver_version); +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); - e_dev_info("%s\n", ixgbe_default_device_descr); +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ + e_info(probe, "Intel(R) 10 Gigabit Network Connection\n"); + cards_found++; -#ifdef CONFIG_IXGBE_HWMON +#ifdef IXGBE_SYSFS if (ixgbe_sysfs_init(adapter)) e_err(probe, "failed to allocate sysfs resources\n"); -#endif /* CONFIG_IXGBE_HWMON */ +#else +#ifdef IXGBE_PROCFS + if (ixgbe_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* IXGBE_PROCFS */ +#endif /* IXGBE_SYSFS */ +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_adapter_init(adapter); +#endif /* HAVE_IXGBE_DEBUG_FS */ /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) @@ -11144,21 +12950,29 @@ skip_sriov: IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); - ixgbe_mii_bus_init(hw); + if (hw->mac.ops.setup_eee && + (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { + bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); + + hw->mac.ops.setup_eee(hw, eee_enable); + } return 0; err_register: - ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); err_sw_init: + ixgbe_release_hw_control(adapter); +#ifdef CONFIG_PCI_IOV ixgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; - iounmap(adapter->io_addr); +#ifdef HAVE_TC_SETUP_CLSU32 kfree(adapter->jump_tables[0]); +#endif kfree(adapter->mac_table); kfree(adapter->rss_key); - bitmap_free(adapter->af_xdp_zc_qps); + iounmap(adapter->io_addr); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -11180,63 +12994,78 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ +#ifdef HAVE_CONFIG_HOTPLUG +static void __devexit ixgbe_remove(struct pci_dev *pdev) +#else static void ixgbe_remove(struct pci_dev *pdev) +#endif { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; bool disable_dev; +#ifdef HAVE_TC_SETUP_CLSU32 int i; +#endif /* if !adapter then we already cleaned up in probe */ if (!adapter) return; - netdev = adapter->netdev; + netdev = adapter->netdev; +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_adapter_exit(adapter); +#endif /*HAVE_IXGBE_DEBUG_FS */ set_bit(__IXGBE_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); - if (adapter->mii_bus) - mdiobus_unregister(adapter->mii_bus); - -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - dca_remove_requester(&pdev->dev); + dca_remove_requester(pci_dev_to_dev(pdev)); IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_DISABLE); } +#endif /* CONFIG_DCA */ -#endif -#ifdef CONFIG_IXGBE_HWMON +#ifdef IXGBE_SYSFS ixgbe_sysfs_exit(adapter); -#endif /* CONFIG_IXGBE_HWMON */ +#else +#ifdef IXGBE_PROCFS + ixgbe_procfs_exit(adapter); +#endif /* IXGBE_PROCFS */ +#endif /* IXGBE-SYSFS */ +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ + #ifdef CONFIG_PCI_IOV ixgbe_disable_sriov(adapter); -#endif - if (netdev->reg_state == NETREG_REGISTERED) +#endif /* CONFIG_PCI_IOV */ + if (adapter->netdev_registered) { unregister_netdev(netdev); + adapter->netdev_registered = false; + } - ixgbe_stop_ipsec_offload(adapter); +#if IS_ENABLED(CONFIG_FCOE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE + ixgbe_fcoe_ddp_disable(adapter); +#endif +#endif /* CONFIG_FCOE */ ixgbe_clear_interrupt_scheme(adapter); - ixgbe_release_hw_control(adapter); -#ifdef CONFIG_DCB +#ifdef HAVE_DCBNL_IEEE kfree(adapter->ixgbe_ieee_pfc); kfree(adapter->ixgbe_ieee_ets); #endif iounmap(adapter->io_addr); pci_release_mem_regions(pdev); - - e_dev_info("complete\n"); - +#ifdef HAVE_TC_SETUP_CLSU32 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { if (adapter->jump_tables[i]) { kfree(adapter->jump_tables[i]->input); @@ -11244,10 +13073,9 @@ static void ixgbe_remove(struct pci_dev *pdev) } kfree(adapter->jump_tables[i]); } - +#endif /* HAVE_TC_SETUP_CLSU32 */ kfree(adapter->mac_table); kfree(adapter->rss_key); - bitmap_free(adapter->af_xdp_zc_qps); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -11255,8 +13083,70 @@ static void ixgbe_remove(struct pci_dev *pdev) if (disable_dev) pci_disable_device(pdev); + } +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD) { + ixgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 value; + + if (IXGBE_REMOVED(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef HAVE_PCI_ERS +#ifdef CONFIG_PCI_IOV +static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 value; + + if (IXGBE_REMOVED(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_DWORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_ERS */ + +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (IXGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +void ewarn(struct ixgbe_hw *hw, const char *st) +{ + struct ixgbe_adapter *adapter = hw->back; + + netif_warn(adapter, drv, adapter->netdev, "%s", st); +} + +#ifdef HAVE_PCI_ERS /** * ixgbe_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -11297,7 +13187,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) goto skip_bad_vf_detection; req_id = dw1 >> 16; @@ -11316,10 +13206,10 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, dw0, dw1, dw2, dw3); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - device_id = IXGBE_82599_VF_DEVICE_ID; + device_id = IXGBE_DEV_ID_82599_VF; break; case ixgbe_mac_X540: - device_id = IXGBE_X540_VF_DEVICE_ID; + device_id = IXGBE_DEV_ID_X540_VF; break; case ixgbe_mac_X550: device_id = IXGBE_DEV_ID_X550_VF; @@ -11327,7 +13217,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X550EM_x: device_id = IXGBE_DEV_ID_X550EM_X_VF; break; - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: device_id = IXGBE_DEV_ID_X550EM_A_VF; break; default: @@ -11349,10 +13239,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, * VFLR. Just clean up the AER in that case. */ if (vfdev) { - pcie_flr(vfdev); + ixgbe_issue_vf_flr(adapter, vfdev); /* Free device reference count */ pci_dev_put(vfdev); } + + pci_aer_clear_nonfatal_status(pdev); } /* @@ -11412,6 +13304,10 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); pci_restore_state(pdev); + /* + * After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ pci_save_state(pdev); pci_wake_from_d3(pdev, false); @@ -11421,6 +13317,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_RECOVERED; } + pci_aer_clear_nonfatal_status(pdev); + return result; } @@ -11452,26 +13350,87 @@ static void ixgbe_io_resume(struct pci_dev *pdev) rtnl_unlock(); } +#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS static const struct pci_error_handlers ixgbe_err_handler = { +#else +static struct pci_error_handlers ixgbe_err_handler = { +#endif .error_detected = ixgbe_io_error_detected, .slot_reset = ixgbe_io_slot_reset, .resume = ixgbe_io_resume, }; +#endif /* HAVE_PCI_ERS */ + +struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw) +{ + return ((struct ixgbe_adapter *)hw->back)->netdev; +} +struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = + container_of(hw, struct ixgbe_adapter, hw); + return (struct ixgbe_msg *)&adapter->msg_enable; +} + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh ixgbe_driver_rh = { + .sriov_configure = ixgbe_pci_sriov_configure, +}; +#endif + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static const struct dev_pm_ops ixgbe_pm_ops = { + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, + .freeze = ixgbe_freeze, + .thaw = ixgbe_thaw, + .poweroff = ixgbe_suspend, + .restore = ixgbe_resume, +}; +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif static struct pci_driver ixgbe_driver = { .name = ixgbe_driver_name, .id_table = ixgbe_pci_tbl, .probe = ixgbe_probe, +#ifdef HAVE_CONFIG_HOTPLUG + .remove = __devexit_p(ixgbe_remove), +#else .remove = ixgbe_remove, +#endif #ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT + .driver = { + .pm = &ixgbe_pm_ops, + }, +#else .suspend = ixgbe_suspend, .resume = ixgbe_resume, +#endif /* USE_LEGACY_PM_SUPPORT */ #endif +#ifndef USE_REBOOT_NOTIFIER .shutdown = ixgbe_shutdown, +#endif +#if defined(HAVE_SRIOV_CONFIGURE) .sriov_configure = ixgbe_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &ixgbe_driver_rh, +#endif /* HAVE_SRIOV_CONFIGURE */ +#ifdef HAVE_PCI_ERS .err_handler = &ixgbe_err_handler +#endif }; +bool ixgbe_is_ixgbe(struct pci_dev *pcidev) +{ + if (pci_dev_driver(pcidev) != &ixgbe_driver) + return false; + else + return true; +} + /** * ixgbe_init_module - Driver Registration Routine * @@ -11490,20 +13449,32 @@ static int __init ixgbe_init_module(void) return -ENOMEM; } +#ifdef IXGBE_PROCFS + if (ixgbe_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_init(); +#endif /* HAVE_IXGBE_DEBUG_FS */ ret = pci_register_driver(&ixgbe_driver); if (ret) { destroy_workqueue(ixgbe_wq); +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_exit(); +#endif /* HAVE_IXGBE_DEBUG_FS */ +#ifdef IXGBE_PROCFS + ixgbe_procfs_topdir_exit(); +#endif return ret; - } +} +#if IS_ENABLED(CONFIG_DCA) -#ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif - return 0; + return ret; } module_init(ixgbe_init_module); @@ -11516,21 +13487,22 @@ module_init(ixgbe_init_module); **/ static void __exit ixgbe_exit_module(void) { -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); - +#ifdef IXGBE_PROCFS + ixgbe_procfs_topdir_exit(); +#endif + destroy_workqueue(ixgbe_wq); +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_exit(); - if (ixgbe_wq) { - destroy_workqueue(ixgbe_wq); - ixgbe_wq = NULL; - } +#endif /* HAVE_IXGBE_DEBUG_FS */ } -#ifdef CONFIG_IXGBE_DCA -static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, - void *p) +#if IS_ENABLED(CONFIG_DCA) +static int ixgbe_notify_dca(struct notifier_block __always_unused *nb, unsigned long event, + void __always_unused *p) { int ret_val; @@ -11539,9 +13511,8 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } - -#endif /* CONFIG_IXGBE_DCA */ - +#endif module_exit(ixgbe_exit_module); /* ixgbe_main.c */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 5679293e53f7..0a1f7b37d088 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,435 +1,1170 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#include -#include -#include "ixgbe.h" +#include "ixgbe_type.h" #include "ixgbe_mbx.h" +STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id); +STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id); + /** - * ixgbe_read_mbx - Reads a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to read + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read * - * returns SUCCESS if it successfully read message from buffer + * returns SUCCESS if it successfully read message from buffer **/ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + DEBUGFUNC("ixgbe_read_mbx"); + /* limit read to size of mailbox */ - if (size > mbx->size) + if (size > mbx->size) { + ERROR_REPORT3(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %u, changing to %u", + size, mbx->size); size = mbx->size; + } - if (!mbx->ops) - return IXGBE_ERR_MBX; + if (mbx->ops[mbx_id].read) + return mbx->ops[mbx_id].read(hw, msg, size, mbx_id); - return mbx->ops->read(hw, msg, size, mbx_id); + return IXGBE_ERR_CONFIG; } /** - * ixgbe_write_mbx - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write + * ixgbe_poll_mbx - Wait for message and read it from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read * - * returns SUCCESS if it successfully copied message into the buffer + * returns SUCCESS if it successfully read message from buffer + **/ +s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val; + + DEBUGFUNC("ixgbe_poll_mbx"); + + if (!mbx->ops[mbx_id].read || !mbx->ops[mbx_id].check_for_msg || + !mbx->timeout) + return IXGBE_ERR_CONFIG; + + /* limit read to size of mailbox */ + if (size > mbx->size) { + ERROR_REPORT3(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %u, changing to %u", + size, mbx->size); + size = mbx->size; + } + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + return mbx->ops[mbx_id].read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox and wait for ACK + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ACK to that message within specified period **/ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; - if (size > mbx->size) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_write_mbx"); - if (!mbx->ops) - return IXGBE_ERR_MBX; + /* + * exit if either we can't write, release + * or there is no timeout defined + */ + if (!mbx->ops[mbx_id].write || !mbx->ops[mbx_id].check_for_ack || + !mbx->ops[mbx_id].release || !mbx->timeout) + return IXGBE_ERR_CONFIG; - return mbx->ops->write(hw, msg, size, mbx_id); + if (size > mbx->size) { + ret_val = IXGBE_ERR_PARAM; + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %u", size); + } else { + ret_val = mbx->ops[mbx_id].write(hw, msg, size, mbx_id); + } + + return ret_val; } /** - * ixgbe_check_for_msg - checks to see if someone sent us mail - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check + * ixgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check * - * returns SUCCESS if the Status bit was found or else ERR_MBX + * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_CONFIG; - if (!mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_check_for_msg"); - return mbx->ops->check_for_msg(hw, mbx_id); + if (mbx->ops[mbx_id].check_for_msg) + ret_val = mbx->ops[mbx_id].check_for_msg(hw, mbx_id); + + return ret_val; } /** - * ixgbe_check_for_ack - checks to see if someone sent us ACK - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check + * ixgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check * - * returns SUCCESS if the Status bit was found or else ERR_MBX + * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_CONFIG; - if (!mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_check_for_ack"); - return mbx->ops->check_for_ack(hw, mbx_id); + if (mbx->ops[mbx_id].check_for_ack) + ret_val = mbx->ops[mbx_id].check_for_ack(hw, mbx_id); + + return ret_val; } /** - * ixgbe_check_for_rst - checks to see if other side has reset - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check + * ixgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check * - * returns SUCCESS if the Status bit was found or else ERR_MBX + * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_CONFIG; - if (!mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_check_for_rst"); - return mbx->ops->check_for_rst(hw, mbx_id); + if (mbx->ops[mbx_id].check_for_rst) + ret_val = mbx->ops[mbx_id].check_for_rst(hw, mbx_id); + + return ret_val; } /** - * ixgbe_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write + * ixgbe_clear_mbx - Clear Mailbox Memory + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write * - * returns SUCCESS if it successfully received a message notification + * Set VFMBMEM of given VF to 0x0. **/ -static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +s32 ixgbe_clear_mbx(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_clear_mbx"); + + if (mbx->ops[mbx_id].clear) + ret_val = mbx->ops[mbx_id].clear(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_poll_for_msg"); - while (mbx->ops->check_for_msg(hw, mbx_id)) { + if (!countdown || !mbx->ops[mbx_id].check_for_msg) + return IXGBE_ERR_CONFIG; + + while (countdown && mbx->ops[mbx_id].check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) - return IXGBE_ERR_MBX; - udelay(mbx->usec_delay); + break; + usec_delay(mbx->usec_delay); } - return 0; + if (countdown == 0) { + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%u mailbox message timedout", mbx_id); + return IXGBE_ERR_TIMEOUT; + } + + return IXGBE_SUCCESS; } /** - * ixgbe_poll_for_ack - Wait for message acknowledgement - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write + * ixgbe_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write * - * returns SUCCESS if it successfully received a message acknowledgement + * returns SUCCESS if it successfully received a message acknowledgment **/ -static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_poll_for_ack"); - while (mbx->ops->check_for_ack(hw, mbx_id)) { + if (!countdown || !mbx->ops[mbx_id].check_for_ack) + return IXGBE_ERR_CONFIG; + + while (countdown && mbx->ops[mbx_id].check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) - return IXGBE_ERR_MBX; - udelay(mbx->usec_delay); + break; + usec_delay(mbx->usec_delay); } - return 0; -} - -/** - * ixgbe_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification and - * copied it into the receive buffer. - **/ -static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; - - if (!mbx->ops) - return IXGBE_ERR_MBX; - - ret_val = ixgbe_poll_for_msg(hw, mbx_id); - if (ret_val) - return ret_val; - - /* if ack received read message */ - return mbx->ops->read(hw, msg, size, mbx_id); -} - -/** - * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ -static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; - - /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops || !mbx->timeout) - return IXGBE_ERR_MBX; - - /* send msg */ - ret_val = mbx->ops->write(hw, msg, size, mbx_id); - if (ret_val) - return ret_val; - - /* if msg sent wait until we receive an ack */ - return ixgbe_poll_for_ack(hw, mbx_id); -} - -static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) -{ - u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); - - if (mbvficr & mask) { - IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); - return 0; + if (countdown == 0) { + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%u mailbox ack timedout", mbx_id); + return IXGBE_ERR_TIMEOUT; } - return IXGBE_ERR_MBX; + return IXGBE_SUCCESS; } /** - * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail - * @hw: pointer to the HW structure - * @vf_number: the VF index + * ixgbe_read_mailbox_vf - read VF's mailbox register + * @hw: pointer to the HW structure * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + * This function is used to read the mailbox register dedicated for VF without + * losing the read to clear status bits. **/ -static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC u32 ixgbe_read_mailbox_vf(struct ixgbe_hw *hw) { - s32 index = IXGBE_MBVFICR_INDEX(vf_number); - u32 vf_bit = vf_number % 16; + u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); - if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, - index)) { + vf_mailbox |= hw->mbx.vf_mailbox; + hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return vf_mailbox; +} + +STATIC void ixgbe_clear_msg_vf(struct ixgbe_hw *hw) +{ + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); + + if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) { hw->mbx.stats.reqs++; - return 0; + hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS; } - - return IXGBE_ERR_MBX; } -/** - * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC void ixgbe_clear_ack_vf(struct ixgbe_hw *hw) { - s32 index = IXGBE_MBVFICR_INDEX(vf_number); - u32 vf_bit = vf_number % 16; + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); - if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, - index)) { + if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) { hw->mbx.stats.acks++; - return 0; + hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK; } - - return IXGBE_ERR_MBX; } -/** - * ixgbe_check_for_rst_pf - checks to see if the VF has reset - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC void ixgbe_clear_rst_vf(struct ixgbe_hw *hw) { - u32 reg_offset = (vf_number < 32) ? 0 : 1; - u32 vf_shift = vf_number % 32; - u32 vflre = 0; + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); - break; - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); - break; - default: - break; - } - - if (vflre & BIT(vf_shift)) { - IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift)); + if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) { hw->mbx.stats.rsts++; - return 0; + hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI | + IXGBE_VFMAILBOX_RSTD); + } +} + +/** + * ixgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); + + if (vf_mailbox & mask) + return IXGBE_SUCCESS; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_msg_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) + return IXGBE_SUCCESS; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_ack_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + /* TODO: should this be autocleared? */ + ixgbe_clear_ack_vf(hw); + return IXGBE_SUCCESS; } return IXGBE_ERR_MBX; } /** - * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock - * @hw: pointer to the HW structure - * @vf_number: the VF index + * ixgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check * - * return SUCCESS if we obtained the mailbox lock + * returns true if the PF has set the reset done bit or else false **/ -static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) { - u32 p2v_mailbox; + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_rst_vf"); - /* Take ownership of the buffer */ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); - - /* reserve mailbox for vf use */ - p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); - if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) - return 0; + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_RSTI | + IXGBE_VFMAILBOX_RSTD)) { + /* TODO: should this be autocleared? */ + ixgbe_clear_rst_vf(hw); + return IXGBE_SUCCESS; + } return IXGBE_ERR_MBX; } /** - * ixgbe_write_mbx_pf - Places a message in the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index + * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure * - * returns SUCCESS if it successfully copied message into the buffer + * return SUCCESS if we obtained the mailbox lock **/ -static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 vf_number) +STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + s32 ret_val = IXGBE_ERR_MBX; + u32 vf_mailbox; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); + + if (!mbx->timeout) + return IXGBE_ERR_CONFIG; + + while (countdown--) { + /* Reserve mailbox for VF use */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_VFU; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* Verify that VF is the owner of the lock */ + if (ixgbe_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) { + ret_val = IXGBE_SUCCESS; + break; + } + + /* Wait a bit before trying again */ + usec_delay(mbx->usec_delay); + } + + if (ret_val != IXGBE_SUCCESS) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Failed to obtain mailbox lock"); + ret_val = IXGBE_ERR_TIMEOUT; + } + + return ret_val; +} + +/** + * ixgbe_release_mbx_lock_dummy - release mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to read + **/ +STATIC void ixgbe_release_mbx_lock_dummy(struct ixgbe_hw *hw, u16 mbx_id) +{ + UNREFERENCED_2PARAMETER(hw, mbx_id); + + DEBUGFUNC("ixgbe_release_mbx_lock_dummy"); +} + +/** + * ixgbe_release_mbx_lock_vf - release mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to read + **/ +STATIC void ixgbe_release_mbx_lock_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + u32 vf_mailbox; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("ixgbe_release_mbx_lock_vf"); + + /* Return ownership of the buffer */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox &= ~IXGBE_VFMAILBOX_VFU; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); +} + +/** + * ixgbe_write_mbx_vf_legacy - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) { s32 ret_val; u16 i; + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_write_mbx_vf_legacy"); + /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + ret_val = ixgbe_obtain_mbx_lock_vf(hw); if (ret_val) return ret_val; /* flush msg and acks as we are overwriting the message buffer */ - ixgbe_check_for_msg_pf(hw, vf_number); - ixgbe_check_for_ack_pf(hw, vf_number); + ixgbe_check_for_msg_vf(hw, 0); + ixgbe_clear_msg_vf(hw); + ixgbe_check_for_ack_vf(hw, 0); + ixgbe_clear_ack_vf(hw); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); - - /* Interrupt VF to tell it a message has been sent and release buffer*/ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); /* update stats */ hw->mbx.stats.msgs_tx++; - return 0; + /* interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + + return IXGBE_SUCCESS; } /** - * ixgbe_read_mbx_pf - Read a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index + * ixgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write * - * This function copies a message from the mailbox buffer to the caller's - * memory buffer. The presumption is that the caller knows that there was - * a message due to a VF request so no polling for message is needed. + * returns SUCCESS if it successfully copied message into the buffer **/ -static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 vf_number) +STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + u32 vf_mailbox; + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("ixgbe_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_clear_msg_vf(hw); + ixgbe_clear_ack_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* interrupt the PF to tell it a message has been sent */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_REQ; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* if msg sent wait until we receive an ack */ + ixgbe_poll_for_ack(hw, mbx_id); + +out: + hw->mbx.ops[mbx_id].release(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_read_mbx_vf_legacy - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +STATIC s32 ixgbe_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) { s32 ret_val; u16 i; + DEBUGFUNC("ixgbe_read_mbx_vf_legacy"); + UNREFERENCED_1PARAMETER(mbx_id); + /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + ret_val = ixgbe_obtain_mbx_lock_vf(hw); if (ret_val) return ret_val; - /* copy the message to the mailbox memory buffer */ + /* copy the message from the mailbox memory buffer */ for (i = 0; i < size; i++) - msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); - /* Acknowledge the message and release buffer */ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; - return 0; + return IXGBE_SUCCESS; } -#ifdef CONFIG_PCI_IOV /** - * ixgbe_init_mbx_params_pf - set initial values for pf mailbox - * @hw: pointer to the HW structure + * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read * - * Initializes the hw->mbx struct to correct values for pf mailbox + * returns SUCCESS if it successfully read message from buffer + **/ +STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + u32 vf_mailbox; + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_vf"); + UNREFERENCED_1PARAMETER(mbx_id); + + /* check if there is a message from PF */ + ret_val = ixgbe_check_for_msg_vf(hw, 0); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_MBX_NOMSG; + + ixgbe_clear_msg_vf(hw); + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_ACK; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes single set the hw->mbx struct to correct values for vf mailbox + * Set of legacy functions is being used here */ -void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_x550em_a && - hw->mac.type != ixgbe_mac_X540) - return; - + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ mbx->timeout = 0; - mbx->usec_delay = 0; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + /* VF has only one mailbox connection, no need for more IDs */ + mbx->ops[0].release = ixgbe_release_mbx_lock_dummy; + mbx->ops[0].read = ixgbe_read_mbx_vf_legacy; + mbx->ops[0].write = ixgbe_write_mbx_vf_legacy; + mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf; + mbx->ops[0].clear = NULL; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; +} + +/** + * ixgbe_upgrade_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; mbx->size = IXGBE_VFMAILBOX_SIZE; + + /* VF has only one mailbox connection, no need for more IDs */ + mbx->ops[0].release = ixgbe_release_mbx_lock_vf; + mbx->ops[0].read = ixgbe_read_mbx_vf; + mbx->ops[0].write = ixgbe_write_mbx_vf; + mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf; + mbx->ops[0].clear = NULL; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; } -#endif /* CONFIG_PCI_IOV */ -const struct ixgbe_mbx_operations mbx_ops_generic = { - .read = ixgbe_read_mbx_pf, - .write = ixgbe_write_mbx_pf, - .read_posted = ixgbe_read_posted_mbx, - .write_posted = ixgbe_write_posted_mbx, - .check_for_msg = ixgbe_check_for_msg_pf, - .check_for_ack = ixgbe_check_for_ack_pf, - .check_for_rst = ixgbe_check_for_rst_pf, -}; +STATIC void ixgbe_clear_msg_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + u32 pfmbicr; + pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index)); + + if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift)) + hw->mbx.stats.reqs++; + + IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index), + IXGBE_PFMBICR_VFREQ_VF1 << vf_shift); +} + +STATIC void ixgbe_clear_ack_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + u32 pfmbicr; + + pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index)); + + if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift)) + hw->mbx.stats.acks++; + + IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index), + IXGBE_PFMBICR_VFACK_VF1 << vf_shift); +} + +STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index)); + + if (pfmbicr & mask) + return IXGBE_SUCCESS; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + + DEBUGFUNC("ixgbe_check_for_msg_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFREQ_VF1 << vf_shift, + index)) + return IXGBE_SUCCESS; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_ack_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFACK_VF1 << vf_shift, + index)) { + ret_val = IXGBE_SUCCESS; + /* TODO: should this be autocleared? */ + ixgbe_clear_ack_pf(hw, vf_id); + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFVFLRE_SHIFT(vf_id); + u32 index = IXGBE_PFVFLRE_INDEX(vf_id); + s32 ret_val = IXGBE_ERR_MBX; + u32 vflre = 0; + + DEBUGFUNC("ixgbe_check_for_rst_pf"); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLRE(index)); + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index)); + break; + default: + break; + } + + if (vflre & (1 << vf_shift)) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + s32 ret_val = IXGBE_ERR_MBX; + u32 pf_mailbox; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); + + if (!mbx->timeout) + return IXGBE_ERR_CONFIG; + + while (countdown--) { + /* Reserve mailbox for PF use */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox |= IXGBE_PFMAILBOX_PFU; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); + + /* Verify that PF is the owner of the lock */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + if (pf_mailbox & IXGBE_PFMAILBOX_PFU) { + ret_val = IXGBE_SUCCESS; + break; + } + + /* Wait a bit before trying again */ + usec_delay(mbx->usec_delay); + } + + if (ret_val != IXGBE_SUCCESS) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Failed to obtain mailbox lock"); + ret_val = IXGBE_ERR_TIMEOUT; + } + + return ret_val; +} + +/** + * ixgbe_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_id: the VF index + **/ +STATIC void ixgbe_release_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 pf_mailbox; + + DEBUGFUNC("ixgbe_release_mbx_lock_pf"); + + /* Return ownership of the buffer */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox &= ~IXGBE_PFMAILBOX_PFU; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); +} + +/** + * ixgbe_write_mbx_pf_legacy - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_write_mbx_pf_legacy"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id); + if (ret_val) + return ret_val; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_id); + ixgbe_clear_msg_pf(hw, vf_id); + ixgbe_check_for_ack_pf(hw, vf_id); + ixgbe_clear_ack_pf(hw, vf_id); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + u32 pf_mailbox; + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id); + if (ret_val) + goto out; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_clear_msg_pf(hw, vf_id); + ixgbe_clear_ack_pf(hw, vf_id); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox |= IXGBE_PFMAILBOX_STS; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); + + /* if msg sent wait until we receive an ack */ + ixgbe_poll_for_ack(hw, vf_id); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out: + hw->mbx.ops[vf_id].release(hw, vf_id); + + return ret_val; + +} + +/** + * ixgbe_read_mbx_pf_legacy - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +STATIC s32 ixgbe_read_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_pf_legacy"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id); + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + u32 pf_mailbox; + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_pf"); + + /* check if there is a message from VF */ + ret_val = ixgbe_check_for_msg_pf(hw, vf_id); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_MBX_NOMSG; + + ixgbe_clear_msg_pf(hw, vf_id); + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i); + + /* Acknowledge the message and release buffer */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox |= IXGBE_PFMAILBOX_ACK; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_mbx_pf - Clear Mailbox Memory + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * Set VFMBMEM of given VF to 0x0. + **/ +STATIC s32 ixgbe_clear_mbx_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u16 mbx_size = hw->mbx.size; + u16 i; + + if (vf_id > 63) + return IXGBE_ERR_PARAM; + + for (i = 0; i < mbx_size; ++i) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, 0x0); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_mbx_params_pf_id - set initial values for pf mailbox + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * Initializes single set of the hw->mbx struct to correct values for pf mailbox + * Set of legacy functions is being used here + */ +void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops[vf_id].release = ixgbe_release_mbx_lock_dummy; + mbx->ops[vf_id].read = ixgbe_read_mbx_pf_legacy; + mbx->ops[vf_id].write = ixgbe_write_mbx_pf_legacy; + mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf; + mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf; +} + +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes all sets of the hw->mbx struct to correct values for pf + * mailbox. One set corresponds to single VF. It also initializes counters + * and general variables. A set of legacy functions is used by default. + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + u16 i; + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* Ensure we are not calling this function from VF */ + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + hw->mac.type != ixgbe_mac_X540) + return; + + /* Initialize common mailbox settings */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; + mbx->size = IXGBE_VFMAILBOX_SIZE; + + /* Initialize counters with zeroes */ + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + /* No matter of VF number, we initialize params for all 64 VFs. */ + /* TODO: 1. Add a define for max VF and refactor SHARED to get rid + * of magic number for that (63 or 64 depending on use case.) + * 2. rewrite the code to dynamically allocate mbx->ops[vf_id] for + * certain number of VFs instead of default maximum value of 64 (0..63) + */ + for (i = 0; i < 64; i++) + ixgbe_init_mbx_params_pf_id(hw, i); +} + +/** + * ixgbe_upgrade_mbx_params_pf - Upgrade initial values for pf mailbox + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * Initializes the hw->mbx struct to new function set for improved + * stability and handling of messages. + */ +void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* Ensure we are not calling this function from VF */ + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops[vf_id].release = ixgbe_release_mbx_lock_pf; + mbx->ops[vf_id].read = ixgbe_read_mbx_pf; + mbx->ops[vf_id].write = ixgbe_write_mbx_pf; + mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf; + mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index a148534d7256..c3c72a613a01 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,47 +1,93 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBE_MBX_H_ #define _IXGBE_MBX_H_ #include "ixgbe_type.h" -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 +struct ixgbe_mbx_operations { + void (*init_params)(struct ixgbe_hw *hw); + void (*release)(struct ixgbe_hw *hw, u16 mbx_id); + s32 (*read)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*check_for_msg)(struct ixgbe_hw *hw, u16 vf_number); + s32 (*check_for_ack)(struct ixgbe_hw *hw, u16 vf_number); + s32 (*check_for_rst)(struct ixgbe_hw *hw, u16 vf_number); + s32 (*clear)(struct ixgbe_hw *hw, u16 vf_number); +}; -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + u32 acks; + u32 reqs; + u32 rsts; +}; -#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ +struct ixgbe_mbx_info { + /* + * PF: One set of operations for each VF to handle various API versions + * at the same time + * VF: Only the very first (0) set should be used + */ + struct ixgbe_mbx_operations ops[64]; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u32 vf_mailbox; + u16 size; +}; +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_PFMBICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_PFMBICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_PFMBICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_PFMBICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is IXGBE_PF_*. - * Message ACK's are the value or'd with 0xF0000000 + * Message results are the value or'd with 0xF0000000 */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this + * have succeeded + */ +#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this + * have failed + */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests + */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) /* definitions to support mailbox API version negotiation */ /* - * Each element denotes a version of the API; existing numbers may not + * each element denotes a version of the API; existing numbers may not * change; any additions must go at the end */ enum ixgbe_pfvf_api_rev { @@ -50,16 +96,18 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + /* API 1.4 is being used in the upstream for IPsec */ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ + ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; /* mailbox API, legacy requests */ -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ /* mailbox API, version 1.0 VF requests */ #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ @@ -69,41 +117,59 @@ enum ixgbe_pfvf_api_rev { /* mailbox API, version 1.1 VF requests */ #define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, +}; + /* GET_QUEUES return data indices within the mailbox */ #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ -/* mailbox API, version 1.2 VF requests */ -#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ -#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ - -#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c - -/* mailbox API, version 1.4 VF requests */ -#define IXGBE_VF_IPSEC_ADD 0x0d -#define IXGBE_VF_IPSEC_DEL 0x0e - /* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 +#define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 +#define IXGBE_VF_MC_TYPE_WORD 3 -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +/* mailbox API, version 2.0 VF requests */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ -s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); -#ifdef CONFIG_PCI_IOV -void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); -#endif /* CONFIG_PCI_IOV */ +/* mailbox API, version 2.0 PF requests */ +#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ -extern const struct ixgbe_mbx_operations mbx_ops_generic; +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id); +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id); +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id); +s32 ixgbe_clear_mbx(struct ixgbe_hw *hw, u16 vf_number); +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); +void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id); +void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id); #endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 1e6cf220f543..d5f876a3f02a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBE_MODEL_H_ #define _IXGBE_MODEL_H_ @@ -29,8 +29,8 @@ static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.src_ip[0] = (__force __be32)val; - mask->formatted.src_ip[0] = (__force __be32)m; + input->filter.formatted.src_ip[0] = val; + mask->formatted.src_ip[0] = m; return 0; } @@ -38,8 +38,8 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.dst_ip[0] = (__force __be32)val; - mask->formatted.dst_ip[0] = (__force __be32)m; + input->filter.formatted.dst_ip[0] = val; + mask->formatted.dst_ip[0] = m; return 0; } @@ -55,10 +55,11 @@ static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.src_port = (__force __be16)(val & 0xffff); - mask->formatted.src_port = (__force __be16)(m & 0xffff); - input->filter.formatted.dst_port = (__force __be16)(val >> 16); - mask->formatted.dst_port = (__force __be16)(m >> 16); + input->filter.formatted.src_port = val & 0xffff; + mask->formatted.src_port = m & 0xffff; + + input->filter.formatted.dst_port = val >> 16; + mask->formatted.dst_port = m >> 16; return 0; }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h new file mode 100644 index 000000000000..dbd04679ac42 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + + +/* glue for the OS independent part of ixgbe + * includes register access macros + */ + +#ifndef _IXGBE_OSDEP_H_ +#define _IXGBE_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "kcompat.h" + +#define IXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define IXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define IXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define IXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) + +#define usec_delay(_x) udelay(_x) + +#define STATIC static + +#define IOMEM __iomem + +#ifdef DBG +#define ASSERT(_x) BUG_ON(!(_x)) +#else +#define ASSERT(_x) do {} while (0) +#endif + +#define DEBUGFUNC(S) do {} while (0) + +#define IXGBE_SFP_DETECT_RETRIES 2 + +struct ixgbe_hw; +struct ixgbe_msg { + u16 msg_enable; +}; +struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw); +struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw); + +#ifdef DBG +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) +#else +#define hw_dbg(hw, format, arg...) do {} while (0) +#endif + +#define hw_err(hw, format, arg...) \ + netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define IXGBE_DEAD_READ_RETRIES 10 +#define IXGBE_DEAD_READ_REG 0xdeadbeefU +#define IXGBE_FAILED_READ_REG 0xffffffffU +#define IXGBE_FAILED_READ_RETRIES 5 +#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define IXGBE_FAILED_READ_CFG_WORD 0xffffU +#define IXGBE_FAILED_READ_CFG_BYTE 0xffU + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ + IXGBE_WRITE_REG((a), (reg) + ((offset) << 2), (value)) + +#define IXGBE_READ_REG(h, r) ixgbe_read_reg(h, r, false) +#define IXGBE_R32_Q(h, r) ixgbe_read_reg(h, r, true) +#define IXGBE_R8_Q(h, r) readb(READ_ONCE(h->hw_addr) + r) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ + IXGBE_READ_REG((a), (reg) + ((offset) << 2))) + +#ifndef writeq +#define writeq(val, addr) do { writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); \ + } while (0); +#endif + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +u32 ixgbe_read_reg(struct ixgbe_hw *, u32 reg, bool quiet); +extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); +extern void ewarn(struct ixgbe_hw *hw, const char *str); + +#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word +#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word +#define IXGBE_EEPROM_GRANT_ATTEMPS 100 +#define IXGBE_HTONL(_i) htonl(_i) +#define IXGBE_NTOHL(_i) ntohl(_i) +#define IXGBE_NTOHS(_i) ntohs(_i) +#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define IXGBE_CPU_TO_LE16(_i) cpu_to_le16(_i) +#define IXGBE_LE32_TO_CPU(_i) le32_to_cpu(_i) +#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) +#define EWARN(H, W) ewarn(H, W) + +enum { + IXGBE_ERROR_SOFTWARE, + IXGBE_ERROR_POLLING, + IXGBE_ERROR_INVALID_STATE, + IXGBE_ERROR_UNSUPPORTED, + IXGBE_ERROR_ARGUMENT, + IXGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case IXGBE_ERROR_SOFTWARE: \ + case IXGBE_ERROR_CAUTION: \ + case IXGBE_ERROR_POLLING: \ + netif_warn(ixgbe_hw_to_msg(hw), drv, ixgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case IXGBE_ERROR_INVALID_STATE: \ + case IXGBE_ERROR_UNSUPPORTED: \ + case IXGBE_ERROR_ARGUMENT: \ + netif_err(ixgbe_hw_to_msg(hw), hw, ixgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) + +#endif /* _IXGBE_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h new file mode 100644 index 000000000000..6147aa3960bd --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_OSDEP2_H_ +#define _IXGBE_OSDEP2_H_ + +static inline bool ixgbe_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define IXGBE_REMOVED(a) ixgbe_removed(a) + +static inline void IXGBE_WRITE_REG(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return; +#ifdef DBG + { + struct net_device *netdev = ixgbe_hw_to_netdev(hw); + + switch (reg) { + case IXGBE_EIMS: + case IXGBE_EIMC: + case IXGBE_EIAM: + case IXGBE_EIAC: + case IXGBE_EICR: + case IXGBE_EICS: + netdev_info(netdev, + "%s: Reg - 0x%05X, value - 0x%08X\n", + __func__, reg, value); + } + } +#endif /* DBG */ + writel(value, reg_addr + reg); +} + +static inline void IXGBE_WRITE_REG64(struct ixgbe_hw *hw, u32 reg, u64 value) +{ + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return; + writeq(value, reg_addr + reg); +} + +#endif /* _IXGBE_OSDEP2_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_param.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_param.c new file mode 100644 index 000000000000..b025e208a552 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_param.c @@ -0,0 +1,1267 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include +#include + +#include "ixgbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define IXGBE_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when ixgbe_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define IXGBE_PARAM(X, desc) \ + static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define IXGBE_PARAM(X, desc) \ + static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +IXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define IXGBE_INT_LEGACY 0 +#define IXGBE_INT_MSI 1 +#define IXGBE_INT_MSIX 2 + +IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); + +#if IS_ENABLED(CONFIG_DCA) +/* DCA - Direct Cache Access (DCA) Control + * + * This option allows the device to hint to DCA enabled processors + * which CPU should have its cache warmed with the data being + * transferred over PCIe. This can increase performance by reducing + * cache misses. ixgbe hardware supports DCA for: + * tx descriptor writeback + * rx descriptor writeback + * rx data + * rx data header only (in packet split mode) + * + * enabling option 2 can cause cache thrash in some tests, particularly + * if the CPU is completely utilized + * + * Valid Range: 0 - 2 + * - 0 - disables DCA + * - 1 - enables DCA + * - 2 - enables DCA with rx data included + * + * Default Value: 2 + */ + +#define IXGBE_MAX_DCA 2 + +IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, " + "1=descriptor only, 2=descriptor and data"); +#endif /* CONFIG_DCA */ + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-16 + * - 0 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()). + * - 1-16 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " + "default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 0/1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 8 + */ + +#define IXGBE_DEFAULT_NUM_VMDQ 8 + +IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable (1 queue) " + "2-16 enable (default=" XSTRINGIFY(IXGBE_DEFAULT_NUM_VMDQ) ")"); + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 + * - 0 Disables SR-IOV + * - 1-63 - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +#define MAX_SRIOV_VFS 63 + +IXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* VEPA - Set internal bridge to VEPA mode + * + * Valid Range: 0-1 + * - 0 Set bridge to VEB mode + * - 1 Set bridge to VEPA mode + * + * Default Value: 0 + */ +/* + *Note: + *===== + * This provides ability to ensure VEPA mode on the internal bridge even if + * the kernel does not support the netdev bridge setting operations. +*/ +IXGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 956-488281 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR 1 +IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,956-488281), default 1"); +#define MAX_ITR IXGBE_MAX_INT_RATE +#define MIN_ITR IXGBE_MIN_INT_RATE + +#ifndef IXGBE_NO_LLI + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + +/* LLIPush (Low Latency Interrupt on TCP Push flag) + * + * Valid Range: 0,1 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)"); + +#define DEFAULT_LLIPUSH 0 +#define MAX_LLIPUSH 1 +#define MIN_LLIPUSH 0 + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* LLIEType (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +#endif /* IXGBE_NO_LLI */ +#ifdef HAVE_TX_MQ +/* Flow Director packet buffer allocation level + * + * Valid Range: 1-3 + * 1 = 8k hash/2k perfect, + * 2 = 16k hash/4k perfect, + * 3 = 32k hash/8k perfect + * + * Default Value: 0 + */ +IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n" + "\t\t\t1 = 8k hash filters or 2k perfect filters\n" + "\t\t\t2 = 16k hash filters or 4k perfect filters\n" + "\t\t\t3 = 32k hash filters or 8k perfect filters"); + +#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K + +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ +IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); + +#define IXGBE_MAX_ATR_SAMPLE_RATE 255 +#define IXGBE_MIN_ATR_SAMPLE_RATE 1 +#define IXGBE_ATR_SAMPLE_RATE_OFF 0 +#define IXGBE_DEFAULT_ATR_SAMPLE_RATE 20 +#endif /* HAVE_TX_MQ */ + +#if IS_ENABLED(CONFIG_FCOE) +/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables FCoE Offload + * - 1 - enables FCoE Offload + * + * Default Value: 1 + */ +IXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1"); +#endif /* CONFIG_FCOE */ + +/* Enable/disable Malicious Driver Detection + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +IXGBE_PARAM(MDD, "Malicious Driver Detection: (0,1), default 1 = on"); + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +IXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 0 = off"); + +/* Enable/disable support for untested SFP+ modules on 82599-based adapters + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 0 + */ +IXGBE_PARAM(allow_unsupported_sfp, "Allow unsupported and untested " + "SFP+ modules on 82599 based adapters, default 0 = Disable"); + +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ +IXGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000), default 0 = off"); + +/* Enable/disable support for VXLAN rx checksum offload + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 1 on hardware that supports it + */ +IXGBE_PARAM(vxlan_rx, + "VXLAN receive checksum offload (0,1), default 1 = Enable"); + + +struct ixgbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct ixgbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +#ifndef IXGBE_NO_LLI +#ifdef module_param_array +/** + * helper function to determine LLI support + * @adapter: board private structure + * @opt: pointer to option struct + * + * LLI is only supported for 82599 and X540 + * LLIPush is not supported on 82599 + **/ +#ifdef HAVE_CONFIG_HOTPLUG +static bool __devinit ixgbe_lli_supported(struct ixgbe_adapter *adapter, + struct ixgbe_option *opt) +#else +static bool ixgbe_lli_supported(struct ixgbe_adapter *adapter, + struct ixgbe_option *opt) +#endif +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (hw->mac.type == ixgbe_mac_82599EB) { + + if (LLIPush[adapter->bd_number] > 0) + goto not_supp; + + return true; + } + + if (hw->mac.type == ixgbe_mac_X540) + return true; + +not_supp: + DPRINTK(PROBE, INFO, "%s not supported on this HW\n", opt->name); + return false; +} +#endif /* module_param_array */ +#endif /* IXGBE_NO_LLI */ + +#ifdef HAVE_CONFIG_HOTPLUG +static int __devinit ixgbe_validate_option(struct net_device *netdev, + unsigned int *value, + struct ixgbe_option *opt) +#else +static int ixgbe_validate_option(struct net_device *netdev, + unsigned int *value, + struct ixgbe_option *opt) +#endif +{ + if (*value == OPTION_UNSET) { + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || + *value == opt->def) { + if (opt->msg) + netdev_info(netdev, "%s set to %d, %s\n", + opt->name, *value, opt->msg); + else + netdev_info(netdev, "%s set to %d\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + + for (i = 0; i < opt->arg.l.nr; i++) { + const struct ixgbe_opt_list *ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(netdev, "%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) +#define PSTR_LEN 10 + +/** + * ixgbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +#ifdef HAVE_CONFIG_HOTPLUG +void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) +#else +void ixgbe_check_options(struct ixgbe_adapter *adapter) +#endif +{ + unsigned int mdd; + int bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct ixgbe_ring_feature *feature = adapter->ring_feature; + unsigned int vmdq; + + if (bd >= IXGBE_MAX_NIC) { + netdev_notice(adapter->netdev, + "Warning: no configuration for board #%d\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); +#ifndef module_param_array + bd = IXGBE_MAX_NIC; +#endif + } + + { /* Interrupt Mode */ + unsigned int int_mode; + static struct ixgbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of " __MODULE_STRING(IXGBE_INT_MSIX), + .def = IXGBE_INT_MSIX, + .arg = { .r = { .min = IXGBE_INT_LEGACY, + .max = IXGBE_INT_MSIX} } + }; + +#ifdef module_param_array + if (num_IntMode > bd || num_InterruptType > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + ixgbe_validate_option(adapter->netdev, + &int_mode, &opt); + switch (int_mode) { + case IXGBE_INT_MSIX: + if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) + netdev_info(adapter->netdev, + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case IXGBE_INT_MSI: + if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; + } + break; + case IXGBE_INT_LEGACY: + default: + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; + break; + } +#ifdef module_param_array + } else { + /* default settings */ + if (*aflags & IXGBE_FLAG_MSIX_CAPABLE) { + *aflags |= IXGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; + } + } +#endif + } + { /* Multiple Queue Support */ + static struct ixgbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_MQ > bd) { +#endif + unsigned int mq = MQ[bd]; + ixgbe_validate_option(adapter->netdev, &mq, &opt); + if (mq) + *aflags |= IXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; +#ifdef module_param_array + } else { + *aflags |= IXGBE_FLAG_MQ_CAPABLE; + } +#endif + /* Check Interoperability */ + if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) && + !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiple queues are not supported while MSI-X " + "is disabled. Disabling Multiple Queues.\n"); + *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; + } + } +#if IS_ENABLED(CONFIG_DCA) + { /* Direct Cache Access (DCA) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Direct Cache Access (DCA)", + .err = "defaulting to Enabled", + .def = IXGBE_MAX_DCA, + .arg = { .r = { .min = OPTION_DISABLED, + .max = IXGBE_MAX_DCA} } + }; + unsigned int dca = opt.def; + +#ifdef module_param_array + if (num_DCA > bd) { +#endif + dca = DCA[bd]; + ixgbe_validate_option(adapter->netdev, &dca, &opt); + if (!dca) + *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; + + /* Check Interoperability */ + if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) { + DPRINTK(PROBE, INFO, "DCA is disabled\n"); + *aflags &= ~IXGBE_FLAG_DCA_ENABLED; + } + + if (dca == IXGBE_MAX_DCA) { + DPRINTK(PROBE, INFO, + "DCA enabled for rx data\n"); + adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; + } +#ifdef module_param_array + } else { + /* make sure to clear the capability flag if the + * option is disabled by default above */ + if (opt.def == OPTION_DISABLED) + *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; + } +#endif + if (dca == IXGBE_MAX_DCA) + adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; + } +#endif /* CONFIG_DCA */ + { /* Receive-Side Scaling (RSS) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 16} } + }; + unsigned int rss = RSS[bd]; + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = ixgbe_max_rss_indices(adapter); + +#ifdef module_param_array + if (num_RSS > bd) { +#endif + ixgbe_validate_option(adapter->netdev, &rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + else + feature[RING_F_FDIR].limit = rss; + + feature[RING_F_RSS].limit = rss; +#ifdef module_param_array + } else if (opt.def == 0) { + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_RSS].limit = rss; + } +#endif + /* Check Interoperability */ + if (rss > 1) { + if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; + } + } + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = IXGBE_MAX_VMDQ_INDICES + } } + }; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* 82598 only supports up to 16 pools */ + opt.arg.r.max = 16; + break; + default: + break; + } + +#ifdef module_param_array + if (num_VMDQ > bd) { +#endif + vmdq = VMDQ[bd]; + + ixgbe_validate_option(adapter->netdev, &vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective */ + if (vmdq > 1) { + *aflags |= IXGBE_FLAG_VMDQ_ENABLED; + } else + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = vmdq; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= IXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } +#endif + /* Check Interoperability */ + if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "VMDQ is not supported while multiple " + "queues are disabled. " + "Disabling VMDQ.\n"); + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = MAX_SRIOV_VFS} } + }; + +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + unsigned int vfs = max_vfs[bd]; + if (ixgbe_validate_option(adapter->netdev, + &vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->max_vfs = vfs; + + if (vfs) + *aflags |= IXGBE_FLAG_SRIOV_ENABLED; + else + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) { + adapter->max_vfs = 0; + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; + } else { + adapter->max_vfs = opt.def; + *aflags |= IXGBE_FLAG_SRIOV_ENABLED; + } + } +#endif + + /* Check Interoperability */ + if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) { + if (!(*aflags & IXGBE_FLAG_SRIOV_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported on this " + "hardware. Disabling IOV.\n"); + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported while multiple " + "queues are disabled. " + "Disabling IOV.\n"); + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } + } + } + { /* VEPA Bridge Mode enable for SR-IOV mode */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "VEPA Bridge Mode Enable", + .err = "defaulting to disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED} } + }; + +#ifdef module_param_array + if (num_VEPA > bd) { +#endif + unsigned int vepa = VEPA[bd]; + ixgbe_validate_option(adapter->netdev, &vepa, &opt); + if (vepa) + adapter->flags |= + IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags |= + IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } +#endif + } +#endif /* CONFIG_PCI_IOV */ + { /* Interrupt Throttling Rate */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + ixgbe_validate_option(adapter->netdev, + &itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (1000000/itr) << 2; + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } +#endif + } +#ifndef IXGBE_NO_LLI + { /* Low Latency Interrupt TCP Port*/ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + +#ifdef module_param_array + if (num_LLIPort > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + ixgbe_validate_option(adapter->netdev, + &adapter->lli_port, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_port = opt.def; + } +#endif + } + { /* Low Latency Interrupt on Packet Size */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + +#ifdef module_param_array + if (num_LLISize > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + ixgbe_validate_option(adapter->netdev, + &adapter->lli_size, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_size = opt.def; + } +#endif + } + { /*Low Latency Interrupt on TCP Push flag*/ + static struct ixgbe_option opt = { + .type = enable_option, + .name = "Low Latency Interrupt on TCP Push flag", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + +#ifdef module_param_array + if (num_LLIPush > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + unsigned int lli_push = LLIPush[bd]; + + ixgbe_validate_option(adapter->netdev, + &lli_push, &opt); + if (lli_push) + *aflags |= IXGBE_FLAG_LLI_PUSH; + else + *aflags &= ~IXGBE_FLAG_LLI_PUSH; +#ifdef module_param_array + } else { + *aflags &= ~IXGBE_FLAG_LLI_PUSH; + } +#endif + } + { /* Low Latency Interrupt EtherType*/ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol " + "Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + +#ifdef module_param_array + if (num_LLIEType > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_etype = LLIEType[bd]; + if (adapter->lli_etype) { + ixgbe_validate_option(adapter->netdev, + &adapter->lli_etype, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_etype = opt.def; + } +#endif + } + { /* LLI VLAN Priority */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority " + "threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + +#ifdef module_param_array + if (num_LLIVLANP > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + ixgbe_validate_option(adapter->netdev, + &adapter->lli_vlan_pri, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_vlan_pri = opt.def; + } +#endif + } +#endif /* IXGBE_NO_LLI */ +#ifdef HAVE_TX_MQ + { /* Flow Director packet buffer allocation */ + unsigned int fdir_pballoc_mode; + static struct ixgbe_option opt = { + .type = range_option, + .name = "Flow Director packet buffer allocation", + .err = "using default of " + __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC), + .def = IXGBE_DEFAULT_FDIR_PBALLOC, + .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K, + .max = IXGBE_FDIR_PBALLOC_256K} } + }; + char pstring[PSTR_LEN]; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_NONE; + } else if (num_FdirPballoc > bd) { + fdir_pballoc_mode = FdirPballoc[bd]; + ixgbe_validate_option(adapter->netdev, + &fdir_pballoc_mode, &opt); + switch (fdir_pballoc_mode) { + case IXGBE_FDIR_PBALLOC_256K: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K; + snprintf(pstring, PSTR_LEN, "256kB"); + break; + case IXGBE_FDIR_PBALLOC_128K: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K; + snprintf(pstring, PSTR_LEN, "128kB"); + break; + case IXGBE_FDIR_PBALLOC_64K: + default: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; + snprintf(pstring, PSTR_LEN, "64kB"); + break; + } + DPRINTK(PROBE, INFO, "Flow Director will be allocated " + "%s of packet buffer\n", pstring); + } else { + adapter->fdir_pballoc = opt.def; + } + + } + { /* Flow Director ATR Tx sample packet rate */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF, + .max = IXGBE_MAX_ATR_SAMPLE_RATE} } + }; + static const char atr_string[] = + "ATR Tx Packet sample rate set to"; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF; + } else if (num_AtrSampleRate > bd) { + adapter->atr_sample_rate = AtrSampleRate[bd]; + + if (adapter->atr_sample_rate) { + ixgbe_validate_option(adapter->netdev, + &adapter->atr_sample_rate, + &opt); + DPRINTK(PROBE, INFO, "%s %d\n", atr_string, + adapter->atr_sample_rate); + } + } else { + adapter->atr_sample_rate = opt.def; + } + } +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) + { + *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_82599EB: { + struct ixgbe_option opt = { + .type = enable_option, + .name = "Enabled/Disable FCoE offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; +#ifdef module_param_array + if (num_FCoE > bd) { +#endif + unsigned int fcoe = FCoE[bd]; + + ixgbe_validate_option(adapter->netdev, + &fcoe, &opt); + if (fcoe) + *aflags |= IXGBE_FLAG_FCOE_CAPABLE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= IXGBE_FLAG_FCOE_CAPABLE; + } +#endif + DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n", + (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ? + "en" : "dis"); + } + break; + default: + break; + } + } +#endif /* CONFIG_FCOE */ + { /* LRO - Set Large Receive Offload */ + struct ixgbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + struct net_device *netdev = adapter->netdev; + + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + opt.def = OPTION_DISABLED; + +#ifdef module_param_array + if (num_LRO > bd) { +#endif + unsigned int lro = LRO[bd]; + ixgbe_validate_option(adapter->netdev, &lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; +#ifdef module_param_array + } else { + netdev->features &= ~NETIF_F_LRO; + } +#endif + if ((netdev->features & NETIF_F_LRO) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { + DPRINTK(PROBE, INFO, + "RSC is not supported on this " + "hardware. Disabling RSC.\n"); + netdev->features &= ~NETIF_F_LRO; + } + } + { /* + * allow_unsupported_sfp - Enable/Disable support for unsupported + * and untested SFP+ modules. + */ + struct ixgbe_option opt = { + .type = enable_option, + .name = "allow_unsupported_sfp", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; +#ifdef module_param_array + if (num_allow_unsupported_sfp > bd) { +#endif + unsigned int enable_unsupported_sfp = + allow_unsupported_sfp[bd]; + ixgbe_validate_option(adapter->netdev, + &enable_unsupported_sfp, &opt); + if (enable_unsupported_sfp) { + adapter->hw.allow_unsupported_sfp = true; + } else { + adapter->hw.allow_unsupported_sfp = false; + } +#ifdef module_param_array + } else { + adapter->hw.allow_unsupported_sfp = false; + } +#endif + } + { /* DMA Coalescing */ + struct ixgbe_option opt = { + .type = range_option, + .name = "dmac_watchdog", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 41, .max = 10000 } }, + }; + const char *cmsg = "DMA coalescing not supported on this hardware"; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (adapter->rx_itr_setting || adapter->tx_itr_setting) + break; + opt.err = "interrupt throttling disabled also disables DMA coalescing"; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + break; + default: + opt.err = cmsg; + opt.msg = cmsg; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + } +#ifdef module_param_array + if (num_dmac_watchdog > bd) { +#endif + unsigned int dmac_wd = dmac_watchdog[bd]; + + ixgbe_validate_option(adapter->netdev, &dmac_wd, &opt); + adapter->hw.mac.dmac_config.watchdog_timer = dmac_wd; +#ifdef module_param_array + } else { + adapter->hw.mac.dmac_config.watchdog_timer = opt.def; + } +#endif + } + { /* VXLAN rx offload */ + struct ixgbe_option opt = { + .type = range_option, + .name = "vxlan_rx", + .err = "defaulting to 1 (enabled)", + .def = 1, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + const char *cmsg = "VXLAN rx offload not supported on this hardware"; + const u32 flag = IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + opt.err = cmsg; + opt.msg = cmsg; + opt.def = 0; + opt.arg.r.max = 0; + } +#ifdef module_param_array + if (num_vxlan_rx > bd) { +#endif + unsigned int enable_vxlan_rx = vxlan_rx[bd]; + + ixgbe_validate_option(adapter->netdev, + &enable_vxlan_rx, &opt); + if (enable_vxlan_rx) + adapter->flags |= flag; + else + adapter->flags &= ~flag; +#ifdef module_param_array + } else if (opt.def) { + adapter->flags |= flag; + } else { + adapter->flags &= ~flag; + } +#endif + } + + { /* MDD support */ + struct ixgbe_option opt = { + .type = enable_option, + .name = "Malicious Driver Detection", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED, + }; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: +#ifdef module_param_array + if (num_MDD > bd) { +#endif + mdd = MDD[bd]; + ixgbe_validate_option(adapter->netdev, + &mdd, &opt); + + if (mdd){ + *aflags |= IXGBE_FLAG_MDD_ENABLED; + + } else{ + *aflags &= ~IXGBE_FLAG_MDD_ENABLED; + } +#ifdef module_param_array + } else { + *aflags |= IXGBE_FLAG_MDD_ENABLED; + } +#endif + break; + default: + *aflags &= ~IXGBE_FLAG_MDD_ENABLED; + break; + } + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 2fb97967961c..c26e0be60b89 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,38 +1,32 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#include -#include -#include -#include - -#include "ixgbe.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" #include "ixgbe_phy.h" -static void ixgbe_i2c_start(struct ixgbe_hw *hw); -static void ixgbe_i2c_stop(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); -static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); -static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); -static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); -static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw); +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw); +STATIC void ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +STATIC void ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); /** - * ixgbe_out_i2c_byte_ack - Send I2C byte with ack - * @hw: pointer to the hardware structure - * @byte: byte to send + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send * - * Returns an error code on error. - **/ -static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) + * Returns an error code on error. + */ +STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) { s32 status; @@ -43,31 +37,27 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) } /** - * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack - * @hw: pointer to the hardware structure - * @byte: pointer to a u8 to receive the byte + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte * - * Returns an error code on error. - **/ -static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) + * Returns an error code on error. + */ +STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) { - s32 status; - - status = ixgbe_clock_in_i2c_byte(hw, byte); - if (status) - return status; + ixgbe_clock_in_i2c_byte(hw, byte); /* ACK */ return ixgbe_clock_out_i2c_bit(hw, false); } /** - * ixgbe_ones_comp_byte_add - Perform one's complement addition - * @add1: addend 1 - * @add2: addend 2 + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1: addend 1 + * @add2: addend 2 * - * Returns one's complement 8-bit sum. - **/ -static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) + * Returns one's complement 8-bit sum. + */ +STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) { u16 sum = add1 + add2; @@ -76,17 +66,17 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) } /** - * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * @lock: true if to take and release semaphore + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * @lock: true if to take and release semaphore * - * Returns an error code on error. + * Returns an error code on error. */ -s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val, bool lock) +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 3; @@ -97,7 +87,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u8 reg_high; u8 csum; - reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ~csum; do { @@ -128,8 +118,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) goto fail; /* Get csum */ - if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) - goto fail; + ixgbe_clock_in_i2c_byte(hw, &csum_byte); /* NACK */ if (ixgbe_clock_out_i2c_bit(hw, false)) goto fail; @@ -143,28 +132,28 @@ fail: ixgbe_i2c_bus_clear(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); - retry++; if (retry < max_retry) - hw_dbg(hw, "I2C byte read combined error - Retry.\n"); + hw_dbg(hw, "I2C byte read combined error - Retrying.\n"); else hw_dbg(hw, "I2C byte read combined error.\n"); - } while (retry < max_retry); + retry++; + } while (retry <= max_retry); return IXGBE_ERR_I2C; } /** - * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * @lock: true if to take and release semaphore + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * @lock: true if to take and release semaphore * - * Returns an error code on error. + * Returns an error code on error. */ -s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 val, bool lock) +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 1; @@ -172,7 +161,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u8 reg_high; u8 csum; - reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ixgbe_ones_comp_byte_add(csum, val >> 8); csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); @@ -208,30 +197,70 @@ fail: ixgbe_i2c_bus_clear(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); - retry++; if (retry < max_retry) - hw_dbg(hw, "I2C byte write combined error - Retry.\n"); + hw_dbg(hw, "I2C byte write combined error - Retrying.\n"); else hw_dbg(hw, "I2C byte write combined error.\n"); - } while (retry < max_retry); + retry++; + } while (retry <= max_retry); return IXGBE_ERR_I2C; } /** - * ixgbe_probe_phy - Probe a single address for a PHY - * @hw: pointer to hardware structure - * @phy_addr: PHY address to probe + * ixgbe_init_phy_ops_generic - Inits PHY function ptrs + * @hw: pointer to the hardware structure * - * Returns true if PHY found + * Initialize the function pointers. **/ +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + DEBUGFUNC("ixgbe_init_phy_ops_generic"); + + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_generic; + phy->ops.reset = ixgbe_reset_phy_generic; + phy->ops.read_reg = ixgbe_read_phy_reg_generic; + phy->ops.write_reg = ixgbe_write_phy_reg_generic; + phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; + phy->ops.setup_link = ixgbe_setup_phy_link_generic; + phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; + phy->ops.check_link = NULL; + phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; + phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; + phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; + phy->ops.identify_sfp = ixgbe_identify_module_generic; + phy->sfp_type = ixgbe_sfp_type_unknown; + phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; + phy->ops.write_i2c_byte_unlocked = + ixgbe_write_i2c_byte_generic_unlocked; + phy->ops.check_overtemp = ixgbe_tn_check_overtemp; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_probe_phy - Probe a single address for a PHY + * @hw: pointer to hardware structure + * @phy_addr: PHY address to probe + * + * Returns true if PHY found + */ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) { u16 ext_ability = 0; - hw->phy.mdio.prtad = phy_addr; - if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) + if (!ixgbe_validate_phy_addr(hw, phy_addr)) { + hw_dbg(hw, "Unable to validate PHY address 0x%04X\n", + phy_addr); return false; + } if (ixgbe_get_phy_id(hw)) return false; @@ -239,13 +268,11 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); if (hw->phy.type == ixgbe_phy_unknown) { - hw->phy.ops.read_reg(hw, - MDIO_PMA_EXTABLE, - MDIO_MMD_PMAPMD, - &ext_ability); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); if (ext_ability & - (MDIO_PMA_EXTABLE_10GBT | - MDIO_PMA_EXTABLE_1000BT)) + (IXGBE_MDIO_PHY_10GBASET_ABILITY | + IXGBE_MDIO_PHY_1000BASET_ABILITY)) hw->phy.type = ixgbe_phy_cu_unknown; else hw->phy.type = ixgbe_phy_generic; @@ -255,15 +282,17 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) } /** - * ixgbe_identify_phy_generic - Get physical layer module - * @hw: pointer to hardware structure + * ixgbe_identify_phy_generic - Get physical layer module + * @hw: pointer to hardware structure * - * Determines the physical layer module found on the current adapter. + * Determines the physical layer module found on the current adapter. **/ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { - u32 phy_addr; - u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u16 phy_addr; + + DEBUGFUNC("ixgbe_identify_phy_generic"); if (!hw->phy.phy_semaphore_mask) { if (hw->bus.lan_id) @@ -273,21 +302,21 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) } if (hw->phy.type != ixgbe_phy_unknown) - return 0; + return IXGBE_SUCCESS; if (hw->phy.nw_mng_if_sel) { phy_addr = (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; if (ixgbe_probe_phy(hw, phy_addr)) - return 0; + return IXGBE_SUCCESS; else return IXGBE_ERR_PHY_ADDR_INVALID; } for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { if (ixgbe_probe_phy(hw, phy_addr)) { - status = 0; + status = IXGBE_SUCCESS; break; } } @@ -296,8 +325,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) * be found and the code will take this path. Caller has to * decide if it is an error or not. */ - if (status) - hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + if (status != IXGBE_SUCCESS) + hw->phy.addr = 0; return status; } @@ -308,20 +337,23 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) * * This function checks the MMNGC.MNG_VETO bit to see if there are * any constraints on link from manageability. For MAC's that don't - * have this bit just return false since the link can not be blocked + * have this bit just return faluse since the link can not be blocked * via this method. **/ -bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) { u32 mmngc; + DEBUGFUNC("ixgbe_check_reset_blocked"); + /* If we don't have this bit, it can't be blocking */ if (hw->mac.type == ixgbe_mac_82598EB) return false; mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); if (mmngc & IXGBE_MMNGC_MNG_VETO) { - hw_dbg(hw, "MNG_VETO bit detected.\n"); + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); return true; } @@ -329,44 +361,77 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) } /** - * ixgbe_get_phy_id - Get the phy type - * @hw: pointer to hardware structure + * ixgbe_validate_phy_addr - Determines phy address is valid + * @hw: pointer to hardware structure + * @phy_addr: PHY address * **/ -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) { - s32 status; + u16 phy_id = 0; + bool valid = false; + + DEBUGFUNC("ixgbe_validate_phy_addr"); + + hw->phy.addr = phy_addr; + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + hw_dbg(hw, "PHY ID HIGH is 0x%04X\n", phy_id); + + return valid; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; u16 phy_id_high = 0; u16 phy_id_low = 0; - status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, + DEBUGFUNC("ixgbe_get_phy_id"); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id_high); - if (!status) { + if (status == IXGBE_SUCCESS) { hw->phy.id = (u32)(phy_id_high << 16); - status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id_low); hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); } + hw_dbg(hw, "PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n", + phy_id_high, phy_id_low); + return status; } /** - * ixgbe_get_phy_type_from_id - Get the phy type - * @phy_id: hardware phy id + * ixgbe_get_phy_type_from_id - Get the phy type + * @phy_id: PHY ID information * **/ -static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) { enum ixgbe_phy_type phy_type; + DEBUGFUNC("ixgbe_get_phy_type_from_id"); + switch (phy_id) { case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; - case X550_PHY_ID2: - case X550_PHY_ID3: + case X550_PHY_ID: case X540_PHY_ID: phy_type = ixgbe_phy_aq; break; @@ -380,46 +445,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case X557_PHY_ID2: phy_type = ixgbe_phy_x550em_ext_t; break; + case IXGBE_M88E1500_E_PHY_ID: + case IXGBE_M88E1543_E_PHY_ID: + phy_type = ixgbe_phy_ext_1g_t; + break; default: phy_type = ixgbe_phy_unknown; break; } - return phy_type; } /** - * ixgbe_reset_phy_generic - Performs a PHY reset - * @hw: pointer to hardware structure + * ixgbe_reset_phy_generic - Performs a PHY reset + * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { u32 i; u16 ctrl = 0; - s32 status = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_reset_phy_generic"); if (hw->phy.type == ixgbe_phy_unknown) status = ixgbe_identify_phy_generic(hw); - if (status != 0 || hw->phy.type == ixgbe_phy_none) - return status; + if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) + goto out; /* Don't reset PHY if it's shut down due to overtemp. */ if (!hw->phy.reset_if_overtemp && (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) - return 0; + goto out; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) - return 0; + goto out; /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_PHYXS, - MDIO_CTRL1_RESET); + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); /* * Poll for reset bit to self-clear indicating reset is complete. @@ -427,94 +497,123 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) * 1.7 usec delay after the reset is complete. */ for (i = 0; i < 30; i++) { - msleep(100); + msec_delay(100); if (hw->phy.type == ixgbe_phy_x550em_ext_t) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_TX_VENDOR_ALARMS_3, - MDIO_MMD_PMAPMD, &ctrl); - if (status) + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ctrl); + if (status != IXGBE_SUCCESS) return status; if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - udelay(2); + usec_delay(2); break; } } else { - status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_PHYXS, &ctrl); - if (status) + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + &ctrl); + if (status != IXGBE_SUCCESS) return status; - if (!(ctrl & MDIO_CTRL1_RESET)) { - udelay(2); + if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { + usec_delay(2); break; } } } - if (ctrl & MDIO_CTRL1_RESET) { - hw_dbg(hw, "PHY reset polling failed to complete.\n"); - return IXGBE_ERR_RESET_FAILED; + if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { + status = IXGBE_ERR_RESET_FAILED; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY reset polling failed to complete.\n"); } - return 0; +out: + return status; } /** - * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without - * the SWFW lock - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @device_type: 5 bit device type - * @phy_data: Pointer to read data from PHY register + * ixgbe_restart_auto_neg - Restart auto negotiation on the PHY + * @hw: pointer to hardware structure + **/ +void ixgbe_restart_auto_neg(struct ixgbe_hw *hw) +{ + u16 autoneg_reg; + + /* Check if PHY reset is blocked by MNG FW */ + if (ixgbe_check_reset_blocked(hw)) + return; + + /* Restart PHY auto-negotiation. */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + autoneg_reg |= IXGBE_MII_RESTART; + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); +} + +/** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type + * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data) + u16 *phy_data) { u32 i, data, command; /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - /* Check every 10 usec to see if the address cycle completed. + /* + * Check every 10 usec to see if the address cycle completed. * The MDI Command bit will clear when the operation is * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; + break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address command did not complete.\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n"); + hw_dbg(hw, "PHY address command did not complete, returning IXGBE_ERR_PHY\n"); return IXGBE_ERR_PHY; } - /* Address cycle complete, setup and write the read + /* + * Address cycle complete, setup and write the read * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - /* Check every 10 usec to see if the address cycle + /* + * Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) @@ -522,27 +621,29 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY read command didn't complete\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n"); + hw_dbg(hw, "PHY read command didn't complete, returning IXGBE_ERR_PHY\n"); return IXGBE_ERR_PHY; } - /* Read operation is complete. Get the data + /* + * Read operation is complete. Get the data * from MSRWD */ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data >>= IXGBE_MSRWD_READ_DATA_SHIFT; *phy_data = (u16)(data); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register - * using the SWFW lock - this function is needed in most cases - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @device_type: 5 bit device type - * @phy_data: Pointer to read data from PHY register + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type + * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) @@ -550,24 +651,25 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 status; u32 gssr = hw->phy.phy_semaphore_mask; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { - status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, - phy_data); - hw->mac.ops.release_swfw_sync(hw, gssr); - } else { + DEBUGFUNC("ixgbe_read_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return IXGBE_ERR_SWFW_SYNC; - } + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, gssr); return status; } /** - * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register - * without SWFW lock - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register + * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register **/ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) @@ -580,7 +682,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -591,7 +693,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) @@ -599,7 +701,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n"); return IXGBE_ERR_PHY; } @@ -609,17 +711,18 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - /* Check every 10 usec to see if the address cycle + /* + * Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) @@ -627,20 +730,20 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY write cmd didn't complete\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n"); return IXGBE_ERR_PHY; } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register - * using SWFW lock- this function is needed in most cases - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register **/ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) @@ -648,344 +751,53 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 status; u32 gssr = hw->phy.phy_semaphore_mask; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { - status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + DEBUGFUNC("ixgbe_write_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { + status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); } else { - return IXGBE_ERR_SWFW_SYNC; + status = IXGBE_ERR_SWFW_SYNC; } return status; } -#define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr) - /** - * ixgbe_msca_cmd - Write the command register and poll for completion/timeout - * @hw: pointer to hardware structure - * @cmd: command register value to write - **/ -static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) -{ - IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd); - - return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd, - !(cmd & IXGBE_MSCA_MDI_COMMAND), 10, - 10 * IXGBE_MDIO_COMMAND_TIMEOUT); -} - -/** - * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags - * @hw: pointer to hardware structure - * @addr: address - * @regnum: register number - * @gssr: semaphore flags to acquire - **/ -static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr, - int regnum, u32 gssr) -{ - u32 hwaddr, cmd; - s32 data; - - if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) - return -EBUSY; - - hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; - if (regnum & MII_ADDR_C45) { - hwaddr |= regnum & GENMASK(21, 0); - cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - } else { - hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; - cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | - IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; - } - - data = ixgbe_msca_cmd(hw, cmd); - if (data < 0) - goto mii_bus_read_done; - - /* For a clause 45 access the address cycle just completed, we still - * need to do the read command, otherwise just get the data - */ - if (!(regnum & MII_ADDR_C45)) - goto do_mii_bus_read; - - cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND; - data = ixgbe_msca_cmd(hw, cmd); - if (data < 0) - goto mii_bus_read_done; - -do_mii_bus_read: - data = IXGBE_READ_REG(hw, IXGBE_MSRWD); - data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); - -mii_bus_read_done: - hw->mac.ops.release_swfw_sync(hw, gssr); - return data; -} - -/** - * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags - * @hw: pointer to hardware structure - * @addr: address - * @regnum: register number - * @val: value to write - * @gssr: semaphore flags to acquire - **/ -static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr, - int regnum, u16 val, u32 gssr) -{ - u32 hwaddr, cmd; - s32 err; - - if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) - return -EBUSY; - - IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); - - hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; - if (regnum & MII_ADDR_C45) { - hwaddr |= regnum & GENMASK(21, 0); - cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - } else { - hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; - cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | - IXGBE_MSCA_MDI_COMMAND; - } - - /* For clause 45 this is an address cycle, for clause 22 this is the - * entire transaction - */ - err = ixgbe_msca_cmd(hw, cmd); - if (err < 0 || !(regnum & MII_ADDR_C45)) - goto mii_bus_write_done; - - cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND; - err = ixgbe_msca_cmd(hw, cmd); - -mii_bus_write_done: - hw->mac.ops.release_swfw_sync(hw, gssr); - return err; -} - -/** - * ixgbe_mii_bus_read - Read a clause 22/45 register - * @hw: pointer to hardware structure - * @addr: address - * @regnum: register number - **/ -static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum) -{ - struct ixgbe_adapter *adapter = bus->priv; - struct ixgbe_hw *hw = &adapter->hw; - u32 gssr = hw->phy.phy_semaphore_mask; - - return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); -} - -/** - * ixgbe_mii_bus_write - Write a clause 22/45 register - * @hw: pointer to hardware structure - * @addr: address - * @regnum: register number - * @val: value to write - **/ -static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum, - u16 val) -{ - struct ixgbe_adapter *adapter = bus->priv; - struct ixgbe_hw *hw = &adapter->hw; - u32 gssr = hw->phy.phy_semaphore_mask; - - return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); -} - -/** - * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a - * @hw: pointer to hardware structure - * @addr: address - * @regnum: register number - **/ -static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr, - int regnum) -{ - struct ixgbe_adapter *adapter = bus->priv; - struct ixgbe_hw *hw = &adapter->hw; - u32 gssr = hw->phy.phy_semaphore_mask; - - gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; - return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); -} - -/** - * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a - * @hw: pointer to hardware structure - * @addr: address - * @regnum: register number - * @val: value to write - **/ -static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr, - int regnum, u16 val) -{ - struct ixgbe_adapter *adapter = bus->priv; - struct ixgbe_hw *hw = &adapter->hw; - u32 gssr = hw->phy.phy_semaphore_mask; - - gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; - return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); -} - -/** - * ixgbe_get_first_secondary_devfn - get first device downstream of root port - * @devfn: PCI_DEVFN of root port on domain 0, bus 0 - * - * Returns pci_dev pointer to PCI_DEVFN(0, 0) on subordinate side of root - * on domain 0, bus 0, devfn = 'devfn' - **/ -static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn) -{ - struct pci_dev *rp_pdev; - int bus; - - rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn); - if (rp_pdev && rp_pdev->subordinate) { - bus = rp_pdev->subordinate->number; - return pci_get_domain_bus_and_slot(0, bus, 0); - } - - return NULL; -} - -/** - * ixgbe_x550em_a_has_mii - is this the first ixgbe x550em_a PCI function? + * ixgbe_setup_phy_link_generic - Set and restart auto-neg * @hw: pointer to hardware structure * - * Returns true if hw points to lowest numbered PCI B:D.F x550_em_a device in - * the SoC. There are up to 4 MACs sharing a single MDIO bus on the x550em_a, - * but we only want to register one MDIO bus. - **/ -static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *func0_pdev; - - /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices - * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0 - * It's not valid for function 0 to be disabled and function 1 is up, - * so the lowest numbered ixgbe dev will be device 0 function 0 on one - * of those two root ports - */ - func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0)); - if (func0_pdev) { - if (func0_pdev == pdev) - return true; - else - return false; - } - func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0)); - if (func0_pdev == pdev) - return true; - - return false; -} - -/** - * ixgbe_mii_bus_init - mii_bus structure setup - * @hw: pointer to hardware structure - * - * Returns 0 on success, negative on failure - * - * ixgbe_mii_bus_init initializes a mii_bus structure in adapter - **/ -s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - struct pci_dev *pdev = adapter->pdev; - struct device *dev = &adapter->netdev->dev; - struct mii_bus *bus; - int err = -ENODEV; - - bus = devm_mdiobus_alloc(dev); - if (!bus) - return -ENOMEM; - - switch (hw->device_id) { - /* C3000 SoCs */ - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - case IXGBE_DEV_ID_X550EM_A_SFP_N: - case IXGBE_DEV_ID_X550EM_A_SGMII: - case IXGBE_DEV_ID_X550EM_A_SGMII_L: - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_SFP: - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - if (!ixgbe_x550em_a_has_mii(hw)) - goto ixgbe_no_mii_bus; - bus->read = &ixgbe_x550em_a_mii_bus_read; - bus->write = &ixgbe_x550em_a_mii_bus_write; - break; - default: - bus->read = &ixgbe_mii_bus_read; - bus->write = &ixgbe_mii_bus_write; - break; - } - - /* Use the position of the device in the PCI hierarchy as the id */ - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name, - pci_name(pdev)); - - bus->name = "ixgbe-mdio"; - bus->priv = adapter; - bus->parent = dev; - bus->phy_mask = GENMASK(31, 0); - - /* Support clause 22/45 natively. ixgbe_probe() sets MDIO_EMULATE_C22 - * unfortunately that causes some clause 22 frames to be sent with - * clause 45 addressing. We don't want that. - */ - hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; - - err = mdiobus_register(bus); - if (!err) { - adapter->mii_bus = bus; - return 0; - } - -ixgbe_no_mii_bus: - devm_mdiobus_free(dev, bus); - return err; -} - -/** - * ixgbe_setup_phy_link_generic - Set and restart autoneg - * @hw: pointer to hardware structure - * - * Restart autonegotiation and PHY and waits for completion. + * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; + DEBUGFUNC("ixgbe_setup_phy_link_generic"); + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); - autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && (speed & IXGBE_LINK_SPEED_10GB_FULL)) - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, &autoneg_reg); + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); if (hw->mac.type == ixgbe_mac_X550) { /* Set or unset auto-negotiation 5G advertisement */ @@ -1009,45 +821,44 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, autoneg_reg); + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); - autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); + autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | + IXGBE_MII_100BASE_T_ADVERTISE_HALF); if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && (speed & IXGBE_LINK_SPEED_100_FULL)) - autoneg_reg |= ADVERTISE_100FULL; + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); - - /* Blocked by MNG FW so don't reset PHY */ - if (ixgbe_check_reset_blocked(hw)) - return 0; - - /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, &autoneg_reg); - - autoneg_reg |= MDIO_AN_CTRL1_RESTART; - - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + ixgbe_restart_auto_neg(hw); return status; } /** - * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: unused + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: unused **/ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - /* Clear autoneg_advertised and set new values based on input link + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); + + /* + * Clear autoneg_advertised and set new values based on input link * speed. */ hw->phy.autoneg_advertised = 0; @@ -1071,50 +882,46 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; /* Setup link based on the new speed settings */ - if (hw->phy.ops.setup_link) - hw->phy.ops.setup_link(hw); + ixgbe_setup_phy_link(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_get_copper_speeds_supported - Get copper link speed from phy + * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy * @hw: pointer to hardware structure * * Determines the supported link capabilities by reading the PHY auto * negotiation register. - */ + **/ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) { - u16 speed_ability; s32 status; + u16 speed_ability; - status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &speed_ability); if (status) return status; - if (speed_ability & MDIO_SPEED_10G) + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_1000) + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_100) + if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; switch (hw->mac.type) { - case ixgbe_mac_X550: - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; - break; case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; break; default: break; } - return 0; + return status; } /** @@ -1122,12 +929,14 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value - */ + **/ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); *autoneg = true; if (!hw->phy.speeds_supported) @@ -1138,24 +947,26 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, } /** - * ixgbe_check_phy_link_tnx - Determine link and speed status - * @hw: pointer to hardware structure - * @speed: link speed - * @link_up: status of link + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: current link speed + * @link_up: true is link is up, false otherwise * - * Reads the VS1 register to determine if link is up and the current speed for - * the PHY. + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. **/ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up) { - s32 status; + s32 status = IXGBE_SUCCESS; u32 time_out; u32 max_time_out = 10; u16 phy_link = 0; u16 phy_speed = 0; u16 phy_data = 0; + DEBUGFUNC("ixgbe_check_phy_link_tnx"); + /* Initialize speed and link to default case */ *link_up = false; *speed = IXGBE_LINK_SPEED_10GB_FULL; @@ -1166,15 +977,14 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * be changed for other copper PHYs. */ for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); + usec_delay(10); status = hw->phy.ops.read_reg(hw, - MDIO_STAT1, - MDIO_MMD_VEND1, - &phy_data); - phy_link = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; phy_speed = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { *link_up = true; if (phy_speed == @@ -1188,41 +998,41 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, } /** - * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * ixgbe_setup_phy_link_tnx - Set and restart auto-neg * @hw: pointer to hardware structure * - * Restart autonegotiation and PHY and waits for completion. - * This function always returns success, this is nessary since - * it is called via a function pointer that could call other - * functions that could return an error. + * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; + DEBUGFUNC("ixgbe_setup_phy_link_tnx"); + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); if (speed & IXGBE_LINK_SPEED_10GB_FULL) { /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { /* Set or unset auto-negotiation 1G advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - MDIO_MMD_AN, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; @@ -1230,44 +1040,70 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - MDIO_MMD_AN, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_100_FULL) { /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - autoneg_reg &= ~(ADVERTISE_100FULL | - ADVERTISE_100HALF); + autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - autoneg_reg |= ADVERTISE_100FULL; + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } - /* Blocked by MNG FW so don't reset PHY */ - if (ixgbe_check_reset_blocked(hw)) - return 0; - - /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, &autoneg_reg); - - autoneg_reg |= MDIO_AN_CTRL1_RESTART; - - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, autoneg_reg); - return 0; + ixgbe_restart_auto_neg(hw); + return status; } /** - * ixgbe_reset_phy_nl - Performs a PHY reset - * @hw: pointer to hardware structure + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) { @@ -1275,37 +1111,42 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) bool end_data = false; u16 list_offset, data_offset; u16 phy_data = 0; - s32 ret_val; + s32 ret_val = IXGBE_SUCCESS; u32 i; + DEBUGFUNC("ixgbe_reset_phy_nl"); + /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) - return 0; + goto out; - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); /* reset the PHY and poll for completion */ - hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, - (phy_data | MDIO_CTRL1_RESET)); + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + (phy_data | IXGBE_MDIO_PHY_XS_RESET)); for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, - &phy_data); - if ((phy_data & MDIO_CTRL1_RESET) == 0) + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) break; - usleep_range(10000, 20000); + msec_delay(10); } - if ((phy_data & MDIO_CTRL1_RESET) != 0) { + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { hw_dbg(hw, "PHY reset did not complete.\n"); - return IXGBE_ERR_PHY; + ret_val = IXGBE_ERR_PHY; + goto out; } /* Get init offsets */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); data_offset++; @@ -1323,24 +1164,25 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) case IXGBE_DELAY_NL: data_offset++; hw_dbg(hw, "DELAY: %d MS\n", edata); - usleep_range(edata * 1000, edata * 2000); + msec_delay(edata); break; case IXGBE_DATA_NL: hw_dbg(hw, "DATA:\n"); data_offset++; - ret_val = hw->eeprom.ops.read(hw, data_offset++, + ret_val = hw->eeprom.ops.read(hw, data_offset, &phy_offset); if (ret_val) goto err_eeprom; + data_offset++; for (i = 0; i < edata; i++) { ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); if (ret_val) goto err_eeprom; hw->phy.ops.write_reg(hw, phy_offset, - MDIO_MMD_PMAPMD, eword); + IXGBE_TWINAX_DEV, eword); hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, - phy_offset); + phy_offset); data_offset++; phy_offset++; } @@ -1355,53 +1197,65 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) hw_dbg(hw, "SOL\n"); } else { hw_dbg(hw, "Bad control value\n"); - return IXGBE_ERR_PHY; + ret_val = IXGBE_ERR_PHY; + goto out; } break; default: hw_dbg(hw, "Bad control type\n"); - return IXGBE_ERR_PHY; + ret_val = IXGBE_ERR_PHY; + goto out; } } +out: return ret_val; err_eeprom: - hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); return IXGBE_ERR_PHY; } /** - * ixgbe_identify_module_generic - Identifies module type - * @hw: pointer to hardware structure + * ixgbe_identify_module_generic - Identifies module type + * @hw: pointer to hardware structure * - * Determines HW type and calls appropriate function. + * Determines HW type and calls appropriate function. **/ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) { + s32 status = IXGBE_ERR_SFP_NOT_PRESENT; + + DEBUGFUNC("ixgbe_identify_module_generic"); + switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: - return ixgbe_identify_sfp_module_generic(hw); + status = ixgbe_identify_sfp_module_generic(hw); + break; + case ixgbe_media_type_fiber_qsfp: - return ixgbe_identify_qsfp_module_generic(hw); + status = ixgbe_identify_qsfp_module_generic(hw); + break; + default: hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + status = IXGBE_ERR_SFP_NOT_PRESENT; + break; } - return IXGBE_ERR_SFP_NOT_PRESENT; + return status; } /** - * ixgbe_identify_sfp_module_generic - Identifies SFP modules - * @hw: pointer to hardware structure + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure * - * Searches for and identifies the SFP module and assigns appropriate PHY type. + * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; - s32 status; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; @@ -1412,241 +1266,258 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) u8 cable_spec = 0; u16 enforce_sfp = 0; + DEBUGFUNC("ixgbe_identify_sfp_module_generic"); + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; } - /* LAN ID is needed for sfp_type determination */ + /* LAN ID is needed for I2C access */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); - if (status) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_1GBE_COMP_CODES, - &comp_codes_1g); - - if (status) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, - &comp_codes_10g); - - if (status) - goto err_read_i2c_eeprom; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_CABLE_TECHNOLOGY, - &cable_tech); - - if (status) - goto err_read_i2c_eeprom; - - /* ID Module - * ========= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CORE0 - 82599-specific - * 4 SFP_DA_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific - * 7 SFP_act_lmt_DA_CORE0 - 82599-specific - * 8 SFP_act_lmt_DA_CORE1 - 82599-specific - * 9 SFP_1g_cu_CORE0 - 82599-specific - * 10 SFP_1g_cu_CORE1 - 82599-specific - * 11 SFP_1g_sx_CORE0 - 82599-specific - * 12 SFP_1g_sx_CORE1 - 82599-specific - */ - if (hw->mac.type == ixgbe_mac_82598EB) { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.sfp_type = ixgbe_sfp_type_da_cu; - else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_sr; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_lr; - else - hw->phy.sfp_type = ixgbe_sfp_type_unknown; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; } else { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core0; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + * 11 SFP_1g_sx_CORE0 - 82599-specific + * 12 SFP_1g_sx_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; else - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core1; - } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { - hw->phy.ops.read_i2c_eeprom( - hw, IXGBE_SFF_CABLE_SPEC_COMP, - &cable_spec); - if (cable_spec & - IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core0; + ixgbe_sfp_type_da_cu_core0; else hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core1; + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core1; } else { - hw->phy.sfp_type = - ixgbe_sfp_type_unknown; + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) { + status = IXGBE_SUCCESS; + goto out; + } + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { + status = IXGBE_SUCCESS; + goto out; + } + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = IXGBE_SUCCESS; + } else { + hw_dbg(hw, "SFP+ module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } } - } else if (comp_codes_10g & - (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_lx_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_lx_core1; } else { - hw->phy.sfp_type = ixgbe_sfp_type_unknown; + status = IXGBE_SUCCESS; } } - if (hw->phy.sfp_type != stored_sfp_type) - hw->phy.sfp_setup_needed = true; - - /* Determine if the SFP+ PHY is dual speed or not. */ - hw->phy.multispeed_fiber = false; - if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = true; - - /* Determine PHY vendor */ - if (hw->phy.type != ixgbe_phy_nl) { - hw->phy.id = identifier; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE0, - &oui_bytes[0]); - - if (status != 0) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE1, - &oui_bytes[1]); - - if (status != 0) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE2, - &oui_bytes[2]); - - if (status != 0) - goto err_read_i2c_eeprom; - - vendor_oui = - ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | - (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | - (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); - - switch (vendor_oui) { - case IXGBE_SFF_VENDOR_OUI_TYCO: - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_tyco; - break; - case IXGBE_SFF_VENDOR_OUI_FTL: - if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = ixgbe_phy_sfp_ftl_active; - else - hw->phy.type = ixgbe_phy_sfp_ftl; - break; - case IXGBE_SFF_VENDOR_OUI_AVAGO: - hw->phy.type = ixgbe_phy_sfp_avago; - break; - case IXGBE_SFF_VENDOR_OUI_INTEL: - hw->phy.type = ixgbe_phy_sfp_intel; - break; - default: - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_unknown; - else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_active_unknown; - else - hw->phy.type = ixgbe_phy_sfp_unknown; - break; - } - } - - /* Allow any DA cable vendor */ - if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | - IXGBE_SFF_DA_ACTIVE_CABLE)) - return 0; - - /* Verify supported 1G SFP modules */ - if (comp_codes_10g == 0 && - !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { - hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - - /* Anything else 82598-based is supported */ - if (hw->mac.type == ixgbe_mac_82598EB) - return 0; - - hw->mac.ops.get_device_caps(hw, &enforce_sfp); - if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && - !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { - /* Make sure we're a supported PHY type */ - if (hw->phy.type == ixgbe_phy_sfp_intel) - return 0; - if (hw->allow_unsupported_sfp) { - e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); - return 0; - } - hw_dbg(hw, "SFP+ module not supported\n"); - hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - return 0; +out: + return status; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1657,16 +1528,77 @@ err_read_i2c_eeprom: return IXGBE_ERR_SFP_NOT_PRESENT; } +/** + * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current SFP. + */ +u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + + DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); + + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return physical_layer; + + switch (hw->phy.type) { + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_qsfp_active_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; + else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; + break; + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + + return physical_layer; +} + /** * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules * @hw: pointer to hardware structure * * Searches for and identifies the QSFP module and assigns appropriate PHY type **/ -static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; - s32 status; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; @@ -1679,23 +1611,27 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) u8 device_tech = 0; bool active_cable = false; + DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; } - /* LAN ID is needed for sfp_type determination */ + /* LAN ID is needed for I2C access */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; } hw->phy.id = identifier; @@ -1703,13 +1639,13 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, &comp_codes_1g); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { @@ -1730,8 +1666,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) if (!active_cable) { /* check for active DA cables that pre-date - * SFF-8436 v3.6 - */ + * SFF-8436 v3.6 */ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_CONNECTOR, &connector); @@ -1763,7 +1698,8 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) } else { /* unsupported module type */ hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; } } @@ -1773,61 +1709,68 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) /* Determine if the QSFP+ PHY is dual speed or not. */ hw->phy.multispeed_fiber = false; if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) hw->phy.multispeed_fiber = true; /* Determine PHY vendor for optical modules */ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { + IXGBE_SFF_10GBASELR_CAPABLE)) { status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, - &oui_bytes[0]); + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, - &oui_bytes[1]); + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, - &oui_bytes[2]); + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; vendor_oui = - ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | - (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | - (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) hw->phy.type = ixgbe_phy_qsfp_intel; else hw->phy.type = ixgbe_phy_qsfp_unknown; - hw->mac.ops.get_device_caps(hw, &enforce_sfp); + ixgbe_get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { /* Make sure we're a supported PHY type */ - if (hw->phy.type == ixgbe_phy_qsfp_intel) - return 0; - if (hw->allow_unsupported_sfp) { - e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); - return 0; + if (hw->phy.type == ixgbe_phy_qsfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = IXGBE_SUCCESS; + } else { + hw_dbg(hw, "QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } } - hw_dbg(hw, "QSFP module not supported\n"); - hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = IXGBE_SUCCESS; } - return 0; } - return 0; + +out: + return status; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1838,13 +1781,13 @@ err_read_i2c_eeprom: } /** - * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence - * @hw: pointer to hardware structure - * @list_offset: offset to the SFP ID list - * @data_offset: offset to the SFP data block + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + * @hw: pointer to hardware structure + * @list_offset: offset to the SFP ID list + * @data_offset: offset to the SFP data block * - * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if - * so it returns the offsets to the phy init sequence block. + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. **/ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, @@ -1853,6 +1796,8 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 sfp_id; u16 sfp_type = hw->phy.sfp_type; + DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) return IXGBE_ERR_SFP_NOT_SUPPORTED; @@ -1880,8 +1825,9 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, /* Read offset to PHY init contents */ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { - hw_err(hw, "eeprom read at %d failed\n", - IXGBE_PHY_INIT_OFFSET_NL); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + IXGBE_PHY_INIT_OFFSET_NL); return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; } @@ -1921,39 +1867,42 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, return IXGBE_ERR_SFP_NOT_SUPPORTED; } - return 0; + return IXGBE_SUCCESS; err_phy: - hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", *list_offset); return IXGBE_ERR_PHY; } /** - * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read - * @eeprom_data: value read + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read * - * Performs byte read operation to SFP module's EEPROM over I2C interface. + * Performs byte read operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { + DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); + return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); } /** - * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: byte offset at address 0xA2 - * @sff8472_data: value read + * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @sff8472_data: value read * - * Performs byte read operation to SFP module's SFF-8472 data over I2C + * Performs byte read operation to SFP module's SFF-8472 data over I2C **/ -s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data) +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) { return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR2, @@ -1961,16 +1910,18 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to write - * @eeprom_data: value to write + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write * - * Performs byte write operation to SFP module's EEPROM over I2C interface. + * Performs byte write operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data) { + DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); + return hw->phy.ops.write_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); @@ -1982,7 +1933,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, * @offset: eeprom offset to be read * @addr: I2C address to be read */ -static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) { if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && offset == IXGBE_SFF_IDENTIFIER && @@ -1992,32 +1943,33 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) } /** - * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @dev_addr: device address - * @data: value read - * @lock: true if to take and release semaphore + * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * @lock: true if to take and release semaphore * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - */ -static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data, bool lock) { s32 status; u32 max_retry = 10; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; - bool nack = true; + bool nack = 1; + *data = 0; + + DEBUGFUNC("ixgbe_read_i2c_byte_generic"); if (hw->mac.type >= ixgbe_mac_X550) max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; - *data = 0; - do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -2026,72 +1978,70 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, /* Device Address and write indication */ status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_start(hw); /* Device Address and read indication */ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; - status = ixgbe_clock_in_i2c_byte(hw, data); - if (status != 0) - goto fail; + ixgbe_clock_in_i2c_byte(hw, data); status = ixgbe_clock_out_i2c_bit(hw, nack); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return 0; + return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); if (lock) { hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(100); + msec_delay(100); } - retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read error - Retrying.\n"); else hw_dbg(hw, "I2C byte read error.\n"); + retry++; - } while (retry < max_retry); + } while (retry <= max_retry); return status; } /** - * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @dev_addr: device address - * @data: value read + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - */ + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { @@ -2100,15 +2050,15 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @dev_addr: device address - * @data: value read + * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - */ + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { @@ -2117,17 +2067,17 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @dev_addr: device address - * @data: value to write - * @lock: true if to take and release semaphore + * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to write to + * @data: value to write + * @lock: true if to take and release semaphore * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - */ -static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { s32 status; @@ -2135,49 +2085,52 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; - if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + DEBUGFUNC("ixgbe_write_i2c_byte_generic"); + + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != + IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; do { ixgbe_i2c_start(hw); status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, data); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return 0; + return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); - retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte write error - Retrying.\n"); else hw_dbg(hw, "I2C byte write error.\n"); - } while (retry < max_retry); + retry++; + } while (retry <= max_retry); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); @@ -2186,15 +2139,15 @@ fail: } /** - * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @dev_addr: device address - * @data: value to write + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to write to + * @data: value to write * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - */ + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { @@ -2203,15 +2156,15 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @dev_addr: device address - * @data: value to write + * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to write to + * @data: value to write * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - */ + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { @@ -2220,163 +2173,171 @@ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_i2c_start - Sets I2C start condition - * @hw: pointer to hardware structure + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure * - * Sets I2C start condition (High -> Low on SDA while SCL is High) - * Set bit-bang mode on X550 hardware. + * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. **/ -static void ixgbe_i2c_start(struct ixgbe_hw *hw) +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - i2cctl |= IXGBE_I2C_BB_EN(hw); + DEBUGFUNC("ixgbe_i2c_start"); + + i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for start condition (4.7us) */ - udelay(IXGBE_I2C_T_SU_STA); + usec_delay(IXGBE_I2C_T_SU_STA); ixgbe_set_i2c_data(hw, &i2cctl, 0); /* Hold time for start condition (4us) */ - udelay(IXGBE_I2C_T_HD_STA); + usec_delay(IXGBE_I2C_T_HD_STA); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); } /** - * ixgbe_i2c_stop - Sets I2C stop condition - * @hw: pointer to hardware structure + * ixgbe_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure * - * Sets I2C stop condition (Low -> High on SDA while SCL is High) - * Disables bit-bang mode and negates data output enable on X550 - * hardware. + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. **/ -static void ixgbe_i2c_stop(struct ixgbe_hw *hw) +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); - u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); - u32 bb_en_bit = IXGBE_I2C_BB_EN(hw); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_i2c_stop"); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for stop condition (4us) */ - udelay(IXGBE_I2C_T_SU_STO); + usec_delay(IXGBE_I2C_T_SU_STO); ixgbe_set_i2c_data(hw, &i2cctl, 1); /* bus free time between stop and start (4.7us)*/ - udelay(IXGBE_I2C_T_BUF); + usec_delay(IXGBE_I2C_T_BUF); if (bb_en_bit || data_oe_bit || clk_oe_bit) { i2cctl &= ~bb_en_bit; i2cctl |= data_oe_bit | clk_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } } /** - * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C - * @hw: pointer to hardware structure - * @data: data byte to clock in + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in * - * Clocks in one byte data via I2C data/clock + * Clocks in one byte data via I2C data/clock **/ -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +STATIC void ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) { s32 i; - bool bit = false; + bool bit = 0; + + DEBUGFUNC("ixgbe_clock_in_i2c_byte"); *data = 0; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); *data |= bit << i; } - - return 0; } /** - * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C - * @hw: pointer to hardware structure - * @data: data byte clocked out + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out * - * Clocks out one byte data via I2C data/clock + * Clocks out one byte data via I2C data/clock **/ -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) { - s32 status; + s32 status = IXGBE_SUCCESS; s32 i; u32 i2cctl; - bool bit = false; + bool bit; + + DEBUGFUNC("ixgbe_clock_out_i2c_byte"); for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; status = ixgbe_clock_out_i2c_bit(hw, bit); - if (status != 0) + if (status != IXGBE_SUCCESS) break; } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - i2cctl |= IXGBE_I2C_DATA_OUT(hw); - i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; } /** - * ixgbe_get_i2c_ack - Polls for I2C ACK - * @hw: pointer to hardware structure + * ixgbe_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure * - * Clocks in/out one bit via I2C data/clock + * Clocks in/out one bit via I2C data/clock **/ -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); - s32 status = 0; + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 timeout = 10; - bool ack = true; + bool ack = 1; + + DEBUGFUNC("ixgbe_get_i2c_ack"); if (data_oe_bit) { - i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ack = ixgbe_get_i2c_data(hw, &i2cctl); - udelay(1); - if (ack == 0) + usec_delay(1); + if (!ack) break; } - if (ack == 1) { + if (ack) { hw_dbg(hw, "I2C ack was not received.\n"); status = IXGBE_ERR_I2C; } @@ -2384,212 +2345,233 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); return status; } /** - * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock - * @hw: pointer to hardware structure - * @data: read data value + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value * - * Clocks in one bit via I2C data/clock + * Clocks in one bit via I2C data/clock **/ -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +STATIC void ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_clock_in_i2c_bit"); if (data_oe_bit) { - i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); - - return 0; + usec_delay(IXGBE_I2C_T_LOW); } /** - * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock - * @hw: pointer to hardware structure - * @data: data value to write + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write * - * Clocks out one bit via I2C data/clock + * Clocks out one bit via I2C data/clock **/ -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_clock_out_i2c_bit"); status = ixgbe_set_i2c_data(hw, &i2cctl, data); - if (status == 0) { + if (status == IXGBE_SUCCESS) { ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us. * This also takes care of the data hold time. */ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); } else { - hw_dbg(hw, "I2C data was not set to %X\n", data); - return IXGBE_ERR_I2C; + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "I2C data was not set to %X\n", data); } - return 0; + return status; } + /** - * ixgbe_raise_i2c_clk - Raises the I2C SCL clock - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register * - * Raises the I2C clock line '0'->'1' - * Negates the I2C clock output enable on X550 hardware. + * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. **/ -static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); u32 i = 0; u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; + DEBUGFUNC("ixgbe_raise_i2c_clk"); + if (clk_oe_bit) { *i2cctl |= clk_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); } for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ - udelay(IXGBE_I2C_T_RISE); + usec_delay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - if (i2cctl_r & IXGBE_I2C_CLK_IN(hw)) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) break; } } /** - * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register * - * Lowers the I2C clock line '1'->'0' - * Asserts the I2C clock output enable on X550 hardware. + * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. **/ -static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { + DEBUGFUNC("ixgbe_lower_i2c_clk"); - *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw); - *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw); + *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ - udelay(IXGBE_I2C_T_FALL); + usec_delay(IXGBE_I2C_T_FALL); } /** - * ixgbe_set_i2c_data - Sets the I2C data bit - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * @data: I2C data value (0 or 1) to set + * ixgbe_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set * - * Sets the I2C data bit - * Asserts the I2C data output enable on X550 hardware. + * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. **/ -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_i2c_data"); if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT(hw); + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw); + *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); *i2cctl &= ~data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ - udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); if (!data) /* Can't verify data in this case */ - return 0; + return IXGBE_SUCCESS; if (data_oe_bit) { *i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); } /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { - hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); - return IXGBE_ERR_I2C; + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "Error - I2C data was not set to %X.\n", + data); } - return 0; + return status; } /** - * ixgbe_get_i2c_data - Reads the I2C SDA data bit - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register + * ixgbe_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register * - * Returns the I2C data bit value - * Negates the I2C data output enable on X550 hardware. + * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. **/ -static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + bool data; + + DEBUGFUNC("ixgbe_get_i2c_data"); if (data_oe_bit) { *i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); - udelay(IXGBE_I2C_T_FALL); + usec_delay(IXGBE_I2C_T_FALL); } - if (*i2cctl & IXGBE_I2C_DATA_IN(hw)) - return true; - return false; + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) + data = 1; + else + data = 0; + + return data; } /** - * ixgbe_i2c_bus_clear - Clears the I2C bus - * @hw: pointer to hardware structure + * ixgbe_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure * - * Clears the I2C bus by sending nine clock pulses. - * Used when data line is stuck low. + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. **/ -static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { u32 i2cctl; u32 i; + DEBUGFUNC("ixgbe_i2c_bus_clear"); + ixgbe_i2c_start(hw); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -2597,12 +2579,12 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) ixgbe_raise_i2c_clk(hw, &i2cctl); /* Min high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Min low period of clock is 4.7us*/ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); } ixgbe_i2c_start(hw); @@ -2612,45 +2594,50 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) } /** - * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. - * @hw: pointer to hardware structure + * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure * - * Checks if the LASI temp alarm status was triggered due to overtemp + * Checks if the LASI temp alarm status was triggered due to overtemp **/ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u16 phy_data = 0; + DEBUGFUNC("ixgbe_tn_check_overtemp"); + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) - return 0; + goto out; /* Check that the LASI temp alarm status was triggered */ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, - MDIO_MMD_PMAPMD, &phy_data); + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) - return 0; + goto out; - return IXGBE_ERR_OVERTEMP; + status = IXGBE_ERR_OVERTEMP; + ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature"); +out: + return status; } -/** ixgbe_set_copper_phy_power - Control power for copper phy - * @hw: pointer to hardware structure - * @on: true for on, false for off - **/ +/** + * ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) { u32 status; u16 reg; - /* Bail if we don't have copper phy */ - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return 0; - if (!on && ixgbe_mng_present(hw)) return 0; - status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); if (status) return status; @@ -2662,6 +2649,8 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; } - status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); return status; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 6544c4539c0d..acad0c740fda 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBE_PHY_H_ #define _IXGBE_PHY_H_ #include "ixgbe_type.h" -#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 -#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF /* EEPROM byte offsets */ #define IXGBE_SFF_IDENTIFIER 0x0 @@ -33,106 +34,112 @@ #define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 /* Bitmasks */ -#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 -#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 #define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 -#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 -#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 -#define IXGBE_SFF_1GBASET_CAPABLE 0x8 -#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 -#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 -#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 -#define IXGBE_SFF_ADDRESSING_MODE 0x4 -#define IXGBE_SFF_DDM_IMPLEMENTED 0x40 -#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 -#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 -#define IXGBE_I2C_EEPROM_READ_MASK 0x100 -#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 -#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 -#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 -#define IXGBE_CS4227 0xBE /* CS4227 address */ -#define IXGBE_CS4227_GLOBAL_ID_LSB 0 -#define IXGBE_CS4227_GLOBAL_ID_MSB 1 -#define IXGBE_CS4227_SCRATCH 2 -#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F -#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ -#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ -#define IXGBE_CS4227_RESET_PENDING 0x1357 -#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 -#define IXGBE_CS4227_RETRIES 15 -#define IXGBE_CS4227_EFUSE_STATUS 0x0181 -#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to set speed */ -#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to set EDC */ -#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to set speed */ -#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ -#define IXGBE_CS4227_EEPROM_STATUS 0x5001 -#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 -#define IXGBE_CS4227_SPEED_1G 0x8000 -#define IXGBE_CS4227_SPEED_10G 0 -#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 -#define IXGBE_CS4227_EDC_MODE_SR 0x0004 -#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 -#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ -#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ -#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ -#define IXGBE_PE 0xE0 /* Port expander addr */ -#define IXGBE_PE_OUTPUT 1 /* Output reg offset */ -#define IXGBE_PE_CONFIG 3 /* Config reg offset */ -#define IXGBE_PE_BIT1 BIT(1) + +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F +#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ +#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ +#define IXGBE_CS4227_RESET_PENDING 0x1357 +#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 +#define IXGBE_CS4227_RETRIES 15 +#define IXGBE_CS4227_EFUSE_STATUS 0x0181 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EEPROM_STATUS 0x5001 +#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 +#define IXGBE_CS4227_SPEED_1G 0x8000 +#define IXGBE_CS4227_SPEED_10G 0 +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander address */ +#define IXGBE_PE_OUTPUT 1 /* Output register offset */ +#define IXGBE_PE_CONFIG 3 /* Config register offset */ +#define IXGBE_PE_BIT1 (1 << 1) /* Flow control defines */ -#define IXGBE_TAF_SYM_PAUSE 0x400 -#define IXGBE_TAF_ASM_PAUSE 0x800 +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 /* Bit-shift macros */ -#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 -#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 -#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ -#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 -#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 -#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 -#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 /* I2C SDA and SCL timing parameters for standard mode */ -#define IXGBE_I2C_T_HD_STA 4 -#define IXGBE_I2C_T_LOW 5 -#define IXGBE_I2C_T_HIGH 4 -#define IXGBE_I2C_T_SU_STA 5 -#define IXGBE_I2C_T_HD_DATA 5 -#define IXGBE_I2C_T_SU_DATA 1 -#define IXGBE_I2C_T_RISE 1 -#define IXGBE_I2C_T_FALL 1 -#define IXGBE_I2C_T_SU_STO 4 -#define IXGBE_I2C_T_BUF 5 +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 -#define IXGBE_SFP_DETECT_RETRIES 2 +#ifndef IXGBE_SFP_DETECT_RETRIES +#define IXGBE_SFP_DETECT_RETRIES 10 -#define IXGBE_TN_LASI_STATUS_REG 0x9005 -#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 +#endif /* IXGBE_SFP_DETECT_RETRIES */ +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 -/* SFP+ SFF-8472 Compliance code */ -#define IXGBE_SFF_SFF_8472_UNSUP 0x00 - -s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw); +/* SFP+ SFF-8472 Compliance */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +void ixgbe_restart_auto_neg(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data); s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); -s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, @@ -140,18 +147,24 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); -bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw); /* PHY specific */ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); @@ -166,10 +179,9 @@ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); -s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val, bool lock); s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c new file mode 100644 index 000000000000..31bf98e7a40f --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c @@ -0,0 +1,917 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_type.h" + +#ifdef IXGBE_PROCFS +#ifndef IXGBE_SYSFS + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *ixgbe_top_dir = NULL; + +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct ixgbe_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +bool ixgbe_thermal_present(struct ixgbe_adapter *adapter) +{ + s32 status; + if (adapter == NULL) + return false; + status = ixgbe_init_thermal_sensor_thresh_generic(&(adapter->hw)); + if (status != IXGBE_SUCCESS) + return false; + + return true; +} + +static int ixgbe_fwbanner(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%s\n", adapter->eeprom_id); +} + +static int ixgbe_porttype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + return snprintf(page, count, "%d\n", + test_bit(__IXGBE_DOWN, &adapter->state)); +} + +static int ixgbe_portspeed(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + int speed = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + speed = 1; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed = 10; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + speed = 100; + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int ixgbe_wqlflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int ixgbe_xflowctl(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct ixgbe_hw *hw; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int ixgbe_rxdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int ixgbe_rxerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} + +static int ixgbe_rxupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPR)); +} + +static int ixgbe_rxmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPRC)); +} + +static int ixgbe_rxbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPRC)); +} + +static int ixgbe_txupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPT)); +} + +static int ixgbe_txmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPTC)); +} + +static int ixgbe_txbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPTC)); +} + +static int ixgbe_txerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int ixgbe_txdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int ixgbe_rxframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int ixgbe_rxbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int ixgbe_txframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int ixgbe_txbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} + +static int ixgbe_linkstat(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + int bitmask = 0; + u32 link_speed; + bool link_up = false; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + bitmask |= 1; + + if (hw->mac.ops.check_link) + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + else + /* always assume link is up, if no check link function */ + link_up = true; + if (link_up) + bitmask |= 2; + + if (adapter->old_lsc != adapter->lsc_int) { + bitmask |= 4; + adapter->old_lsc = adapter->lsc_int; + } + + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int ixgbe_funcid(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct ixgbe_hw *hw; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "0x%X\n", hw->bus.func); +} + +static int ixgbe_funcvers(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%s\n", ixgbe_driver_version); +} + +static int ixgbe_macburn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int ixgbe_macadmn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int ixgbe_maclla1(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct ixgbe_hw *hw; + int rc; + u16 eeprom_buff[6]; + u16 first_word = 0x37; + const u16 word_count = ARRAY_SIZE(eeprom_buff); + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + rc = hw->eeprom.ops.read_buffer(hw, first_word, 1, &first_word); + if (rc != 0) + return snprintf(page, count, "error: reading pointer to the EEPROM\n"); + + if (first_word != 0x0000 && first_word != 0xFFFF) { + rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count, + eeprom_buff); + if (rc != 0) + return snprintf(page, count, "error: reading buffer\n"); + } else { + memset(eeprom_buff, 0, sizeof(eeprom_buff)); + } + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + } + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); +} + +static int ixgbe_mtusize(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int ixgbe_featflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int bitmask = 0; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + if (adapter->netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +} + +static int ixgbe_lsominct(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int ixgbe_prommode(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int ixgbe_txdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int ixgbe_rxdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int ixgbe_rxqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->rx_ring[index]->count - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); +} + +static int ixgbe_txqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->tx_ring[index]->count - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + diff/adapter->num_tx_queues); +} + +static int ixgbe_iovotype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "2\n"); +} + +static int ixgbe_funcnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->num_vfs); +} + +static int ixgbe_pciebnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->pdev->bus->number); +} + +static int ixgbe_therm_location(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->location); +} + + +static int ixgbe_therm_maxopthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->max_op_thresh); +} + + +static int ixgbe_therm_cautionthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->caution_thresh); +} + +static int ixgbe_therm_temp(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + s32 status; + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = ixgbe_get_thermal_sensor_data_generic(therm_data->hw); + if (status != IXGBE_SUCCESS) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + + +struct ixgbe_proc_type { + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct ixgbe_proc_type ixgbe_proc_entries[] = { + {"fwbanner", &ixgbe_fwbanner}, + {"porttype", &ixgbe_porttype}, + {"portspeed", &ixgbe_portspeed}, + {"wqlflag", &ixgbe_wqlflag}, + {"xflowctl", &ixgbe_xflowctl}, + {"rxdrops", &ixgbe_rxdrops}, + {"rxerrors", &ixgbe_rxerrors}, + {"rxupacks", &ixgbe_rxupacks}, + {"rxmpacks", &ixgbe_rxmpacks}, + {"rxbpacks", &ixgbe_rxbpacks}, + {"txdrops", &ixgbe_txdrops}, + {"txerrors", &ixgbe_txerrors}, + {"txupacks", &ixgbe_txupacks}, + {"txmpacks", &ixgbe_txmpacks}, + {"txbpacks", &ixgbe_txbpacks}, + {"rxframes", &ixgbe_rxframes}, + {"rxbytes", &ixgbe_rxbytes}, + {"txframes", &ixgbe_txframes}, + {"txbytes", &ixgbe_txbytes}, + {"linkstat", &ixgbe_linkstat}, + {"funcid", &ixgbe_funcid}, + {"funcvers", &ixgbe_funcvers}, + {"macburn", &ixgbe_macburn}, + {"macadmn", &ixgbe_macadmn}, + {"maclla1", &ixgbe_maclla1}, + {"mtusize", &ixgbe_mtusize}, + {"featflag", &ixgbe_featflag}, + {"lsominct", &ixgbe_lsominct}, + {"prommode", &ixgbe_prommode}, + {"txdscqsz", &ixgbe_txdscqsz}, + {"rxdscqsz", &ixgbe_rxdscqsz}, + {"txqavg", &ixgbe_txqavg}, + {"rxqavg", &ixgbe_rxqavg}, + {"iovotype", &ixgbe_iovotype}, + {"funcnbr", &ixgbe_funcnbr}, + {"pciebnbr", &ixgbe_pciebnbr}, + {"", NULL} +}; + +struct ixgbe_proc_type ixgbe_internal_entries[] = { + {"location", &ixgbe_therm_location}, + {"temp", &ixgbe_therm_temp}, + {"cautionthresh", &ixgbe_therm_cautionthresh}, + {"maxopthresh", &ixgbe_therm_maxopthresh}, + {"", NULL} +}; + +void ixgbe_del_proc_entries(struct ixgbe_adapter *adapter) +{ + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (ixgbe_top_dir == NULL) + return; + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (ixgbe_internal_entries[index].read == NULL) + break; + + remove_proc_entry(ixgbe_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (ixgbe_proc_entries[index].read == NULL) + break; + remove_proc_entry(ixgbe_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), ixgbe_top_dir); +} + +/* called from ixgbe_main.c */ +void ixgbe_procfs_exit(struct ixgbe_adapter *adapter) +{ + ixgbe_del_proc_entries(adapter); +} + +int ixgbe_procfs_topdir_init() +{ + ixgbe_top_dir = proc_mkdir("driver/ixgbe", NULL); + if (ixgbe_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void ixgbe_procfs_topdir_exit() +{ + remove_proc_entry("driver/ixgbe", NULL); +} + +/* called from ixgbe_main.c */ +int ixgbe_procfs_init(struct ixgbe_adapter *adapter) +{ + int rc = 0; + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + for (i = 0; i < IXGBE_MAX_SENSORS; i++) + adapter->therm_dir[i] = NULL; + + if (ixgbe_top_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ixgbe_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ixgbe_proc_entries[index].read == NULL) + break; + if (!(create_proc_read_entry(ixgbe_proc_entries[index].name, + 0444, + adapter->info_dir, + ixgbe_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (ixgbe_thermal_present(adapter) == false) + goto exit; + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == + 0) + continue; + + snprintf(buf, sizeof(buf), "sensor_%d", i); + adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir[i] == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ixgbe_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data[i].hw = &adapter->hw; + adapter->therm_data[i].sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor[i]; + + if (!(create_proc_read_entry( + ixgbe_internal_entries[index].name, + 0444, + adapter->therm_dir[i], + ixgbe_internal_entries[index].read, + &adapter->therm_data[i]))) { + rc = -ENOMEM; + goto fail; + } + } + } + goto exit; + +fail: + ixgbe_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* !IXGBE_SYSFS */ +#endif /* IXGBE_PROCFS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 0be13a90ff79..7b3a2dc89819 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,9 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #include "ixgbe.h" #include -#include /* * The 82599 and the X540 do not have true 64bit nanosecond scale @@ -235,9 +234,9 @@ static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter) */ static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter) { - u32 esdp, tsauxc, freqout, trgttiml, trgttimh, rem, tssdp; struct cyclecounter *cc = &adapter->hw_cc; struct ixgbe_hw *hw = &adapter->hw; + u32 esdp, tsauxc, freqout, trgttiml, trgttimh, rem, tssdp; u64 ns = 0, clock_edge = 0; struct timespec64 ts; unsigned long flags; @@ -272,7 +271,7 @@ static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter) * cycle counter shift is small enough to avoid overflowing a 32bit * value. */ - freqout = div_u64(NS_PER_HALF_SEC << cc->shift, cc->mult); + freqout = (NS_PER_HALF_SEC << cc->shift) / cc->mult; /* Read the current clock time, and save the cycle counter value */ spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -411,7 +410,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, */ case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -495,7 +494,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) { struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); + container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; int neg_adj = 0; u64 rate = IXGBE_X550_BASE_PERIOD; @@ -568,7 +567,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp, switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -598,6 +597,20 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp, return 0; } +/** + * ixgbe_ptp_gettime + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int __maybe_unused ixgbe_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + return ixgbe_ptp_gettimex(ptp, ts, NULL); +} + /** * ixgbe_ptp_settime * @ptp: the ptp clock structure @@ -624,6 +637,31 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, return 0; } +#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 +static int ixgbe_ptp_gettime32(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct timespec64 ts64; + int err; + + err = ixgbe_ptp_gettime(ptp, &ts64); + if (err) + return err; + + *ts = timespec64_to_timespec(ts64); + + return 0; +} + +static int ixgbe_ptp_settime32(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return ixgbe_ptp_settime(ptp, &ts64); +} +#endif + /** * ixgbe_ptp_feature_enable * @ptp: the ptp clock structure @@ -693,12 +731,14 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) * * this watchdog task periodically reads the timecounter * in order to prevent missing when the system time registers wrap - * around. This needs to be run approximately twice a minute. + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. */ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + - IXGBE_OVERFLOW_PERIOD); + IXGBE_OVERFLOW_PERIOD); unsigned long flags; if (timeout) { @@ -745,7 +785,7 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) } /* only need to read the high RXSTMP register to clear the lock */ - if (time_is_before_jiffies(rx_event + 5 * HZ)) { + if (time_is_before_jiffies(rx_event + 5*HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; @@ -863,6 +903,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) return; } + /* check timeout last in case timestamp event just occurred */ if (timeout) { ixgbe_ptp_clear_tx_timestamp(adapter); adapter->tx_hwtstamp_timeouts++; @@ -929,7 +970,6 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, /* Read the tsyncrxctl register afterwards in order to prevent taking an * I/O hit on every packet. */ - tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) return; @@ -983,7 +1023,7 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) * mode, if required to support the specifically requested mode. */ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, - struct hwtstamp_config *config) + struct hwtstamp_config *config) { struct ixgbe_hw *hw = &adapter->hw; u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; @@ -1040,7 +1080,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: +#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL case HWTSTAMP_FILTER_NTP_ALL: +#endif /* HAVE_HWTSTAMP_FILTER_NTP_ALL */ case HWTSTAMP_FILTER_ALL: /* The X550 controller is capable of timestamping all packets, * which allows it to accept any filter. @@ -1053,11 +1095,10 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, } /* fall through */ default: - /* - * register RXMTRL must be set in order to do V1 packets, + /* register RXMTRL must be set in order to do V1 packets, * therefore it is not possible to time stamp both V1 Sync and - * Delay_Req messages and hardware does not support - * timestamping all packets => return error + * Delay_Req messages unless hardware supports timestamping all + * packets => return error */ adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); @@ -1080,7 +1121,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: /* enable timestamping all packets only if at least some * packets were requested. Otherwise, play nice and disable * timestamping @@ -1243,7 +1284,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) cc.shift = 2; } /* fallthrough */ - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; @@ -1317,7 +1358,7 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_init(&adapter->hw_tc, &adapter->hw_cc, - ktime_to_ns(ktime_get_real())); + ktime_get_real_ns()); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); adapter->last_overflow_check = jiffies; @@ -1361,8 +1402,17 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 +#ifdef HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; +#else + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; +#endif /* HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL */ adapter->ptp_caps.settime64 = ixgbe_ptp_settime; +#else /* HAVE_PTP_CLOCK_INFO_GETTIME64 */ + adapter->ptp_caps.gettime = ixgbe_ptp_gettime32; + adapter->ptp_caps.settime = ixgbe_ptp_settime32; +#endif /* !HAVE_PTP_CLOCK_INFO_GETTIME64 */ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540; break; @@ -1378,13 +1428,22 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 +#ifdef HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; +#else + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; +#endif /* HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL */ adapter->ptp_caps.settime64 = ixgbe_ptp_settime; +#else /* HAVE_PTP_CLOCK_INFO_GETTIME64 */ + adapter->ptp_caps.gettime = ixgbe_ptp_gettime32; + adapter->ptp_caps.settime = ixgbe_ptp_settime32; +#endif /* !HAVE_PTP_CLOCK_INFO_GETTIME64 */ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 30000000; @@ -1394,8 +1453,17 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 +#ifdef HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; +#else + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; +#endif /* HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL */ adapter->ptp_caps.settime64 = ixgbe_ptp_settime; +#else /* HAVE_PTP_CLOCK_INFO_GETTIME64 */ + adapter->ptp_caps.gettime = ixgbe_ptp_gettime32; + adapter->ptp_caps.settime = ixgbe_ptp_settime32; +#endif /* !HAVE_PTP_CLOCK_INFO_GETTIME64 */ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X550; break; @@ -1406,7 +1474,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) } adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, - &adapter->pdev->dev); + pci_dev_to_dev(adapter->pdev)); if (IS_ERR(adapter->ptp_clock)) { err = PTR_ERR(adapter->ptp_clock); adapter->ptp_clock = NULL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 537dfff585e0..8126029ba5b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + #include #include @@ -11,10 +12,6 @@ #include #include #include -#include -#ifdef NETIF_F_HW_VLAN_CTAG_TX -#include -#endif #include "ixgbe.h" #include "ixgbe_type.h" @@ -53,14 +50,19 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; int i; +#ifdef HAVE_XDP_SUPPORT if (adapter->xdp_prog) { e_warn(probe, "SRIOV is not supported with XDP\n"); return -EINVAL; } +#endif /* HAVE_XDP_SUPPORT */ + + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | - IXGBE_FLAG_VMDQ_ENABLED; + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; /* Allocate memory for per VF control structures */ adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), @@ -68,35 +70,52 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, if (!adapter->vfinfo) return -ENOMEM; + /* Initialize default switching mode VEB */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* set adapter->num_vfs only after allocating vfinfo to avoid + * NULL pointer issues when accessing adapter->vfinfo + */ adapter->num_vfs = num_vfs; ixgbe_alloc_vf_macvlans(adapter, num_vfs); + adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; - /* Initialize default switching mode VEB */ - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - adapter->bridge_mode = BRIDGE_MODE_VEB; + /* enable L2 switch and replication */ + adapter->flags |= IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + IXGBE_FLAG_SRIOV_REPLICATION_ENABLE; - /* limit trafffic classes based on VFs enabled */ - if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - } else if (num_vfs < 32) { + /* limit traffic classes based on VFs enabled */ + if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && + (adapter->num_vfs < 16)) { + adapter->dcb_cfg.num_tcs.pg_tcs = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + } else if (adapter->num_vfs < 32) { adapter->dcb_cfg.num_tcs.pg_tcs = 4; adapter->dcb_cfg.num_tcs.pfc_tcs = 4; } else { adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; } + adapter->dcb_cfg.vt_mode = true; + +#ifdef IXGBE_DISABLE_VF_MQ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; +#endif /* Disable RSC when in SR-IOV mode */ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | - IXGBE_FLAG2_RSC_ENABLED); + IXGBE_FLAG2_RSC_ENABLED); - for (i = 0; i < num_vfs; i++) { + for (i = 0; i < adapter->num_vfs; i++) { /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN /* We support VF RSS querying only for 82599 and x540 * devices at the moment. These devices share RSS * indirection table and RSS hash key with PF therefore @@ -104,6 +123,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, */ adapter->vfinfo[i].rss_query_enabled = 0; +#endif /* Untrust all VFs */ adapter->vfinfo[i].trusted = false; @@ -111,7 +131,10 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } - e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs); + e_dev_info("SR-IOV enabled with %d VFs\n", num_vfs); + if (hw->mac.type < ixgbe_mac_X550) + e_dev_info("configure port vlans to keep your VFs secure\n"); + return 0; } @@ -150,13 +173,13 @@ static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) /* Note this function is called when the user wants to enable SR-IOV * VFs using the now deprecated module parameter */ -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) { int pre_existing_vfs = 0; unsigned int num_vfs; pre_existing_vfs = pci_num_vf(adapter->pdev); - if (!pre_existing_vfs && !max_vfs) + if (!pre_existing_vfs && !adapter->max_vfs) return; /* If there are pre-existing VFs then we have to force @@ -175,10 +198,11 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) * The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the - * physical function. If the user requests greater than + * physical function. If the user requests greater thn * 63 VFs then it is an error - reset to default of zero. */ - num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); + num_vfs = min_t(unsigned int, adapter->max_vfs, + IXGBE_MAX_VFS_DRV_LIMIT); err = pci_enable_sriov(adapter->pdev, num_vfs); if (err) { @@ -195,16 +219,17 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) /* If we have gotten to this point then there is no memory available * to manage the VF devices - print message and bail. */ - e_err(probe, "Unable to allocate memory for VF Data Storage - " - "SRIOV disabled\n"); + e_err(probe, "Unable to allocate memory for VF Data Storage - SRIOV disabled\n"); ixgbe_disable_sriov(adapter); } -#endif /* #ifdef CONFIG_PCI_IOV */ +#endif /* CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; - int rss; + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie; + u32 vmdctl; /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; @@ -231,6 +256,11 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; + /* Turn off malicious driver detection */ + if ((hw->mac.ops.disable_mdd) && + (!(adapter->flags & IXGBE_FLAG_MDD_ENABLED))) + hw->mac.ops.disable_mdd(hw); + #ifdef CONFIG_PCI_IOV /* * If our VFs are assigned we cannot shut down SR-IOV @@ -244,70 +274,90 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); #endif + /* turn off device IOV mode */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* set default pool back to 0 */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + IXGBE_WRITE_FLUSH(hw); /* Disable VMDq flag so device will be set in VM mode */ - if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) { + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - rss = min_t(int, ixgbe_max_rss_indices(adapter), - num_online_cpus()); - } else { - rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); - } + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; adapter->ring_feature[RING_F_VMDQ].offset = 0; - adapter->ring_feature[RING_F_RSS].limit = rss; /* take a breather then clean up driver data */ msleep(100); return 0; } -static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +static int ixgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, int __maybe_unused num_vfs) { #ifdef CONFIG_PCI_IOV struct ixgbe_adapter *adapter = pci_get_drvdata(dev); int pre_existing_vfs = pci_num_vf(dev); - int err = 0, num_rx_pools, i, limit; + int err = 0, i; u8 num_tc; + if (!(adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE)) { + e_dev_warn("SRIOV not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (adapter->num_vfs == num_vfs) + return -EINVAL; + if (pre_existing_vfs && pre_existing_vfs != num_vfs) err = ixgbe_disable_sriov(adapter); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) - return num_vfs; + goto out; if (err) - return err; + goto err_out; - /* While the SR-IOV capability structure reports total VFs to be 64, - * we limit the actual number allocated as below based on two factors. + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated as below + * so that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. * Num_TCs MAX_VFs * 1 63 * <=4 31 * >4 15 - * First, we reserve some transmit/receive resources for the PF. - * Second, VMDQ also uses the same pools that SR-IOV does. We need to - * account for this, so that we don't accidentally allocate more VFs - * than we have available pools. The PCI bus driver already checks for - * other values out of range. */ - num_tc = adapter->hw_tcs; - num_rx_pools = bitmap_weight(adapter->fwd_bitmask, - adapter->num_rx_pools); - limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : - (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; - - if (num_vfs > (limit - num_rx_pools)) { - e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", - num_tc, num_rx_pools - 1, limit - num_rx_pools); - return -EPERM; + num_tc = netdev_get_num_tc(adapter->netdev); + if (num_tc > 4) { + if (num_vfs > IXGBE_MAX_VFS_8TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); + err = -EPERM; + goto err_out; + } + } else if ((num_tc > 1) && (num_tc <= 4)) { + if (num_vfs > IXGBE_MAX_VFS_4TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); + err = -EPERM; + goto err_out; + } + } else { + if (num_vfs > IXGBE_MAX_VFS_1TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); + err = -EPERM; + goto err_out; + } } err = __ixgbe_enable_sriov(adapter, num_vfs); if (err) - return err; + goto err_out; - for (i = 0; i < num_vfs; i++) + for (i = 0; i < adapter->num_vfs; i++) ixgbe_vf_configuration(dev, (i | 0x10000000)); /* reset before enabling SRIOV to avoid mailbox issues */ @@ -316,14 +366,17 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) err = pci_enable_sriov(dev, num_vfs); if (err) { e_dev_warn("Failed to enable PCI sriov: %d\n", err); - return err; + goto err_out; } ixgbe_get_vfs(adapter); +out: return num_vfs; -#else - return 0; + +err_out: + return err; #endif + return 0; } static int ixgbe_pci_sriov_disable(struct pci_dev *dev) @@ -332,15 +385,16 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) int err; #ifdef CONFIG_PCI_IOV u32 current_flags = adapter->flags; - int prev_num_vf = pci_num_vf(dev); #endif + if (!adapter->num_vfs && !pci_num_vf(dev)) + return -EINVAL; + err = ixgbe_disable_sriov(adapter); /* Only reinit if no error and state changed */ #ifdef CONFIG_PCI_IOV - if (!err && (current_flags != adapter->flags || - prev_num_vf != pci_num_vf(dev))) + if (!err && current_flags != adapter->flags) ixgbe_sriov_reinit(adapter); #endif @@ -364,39 +418,28 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct ixgbe_hw *hw = &adapter->hw; int i; - u32 vector_bit; - u32 vector_reg; - u32 mta_reg; u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); /* only so many hash values supported */ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); - /* - * salt away the number of multi cast addresses assigned + /* salt away the number of multi cast addresses assigned * to this VF for later use to restore when the PF multi cast * list changes */ vfinfo->num_vf_mc_hashes = entries; - /* - * VFs are limited to using the MTA hash table for their multicast - * addresses - */ - for (i = 0; i < entries; i++) { + /* VFs are limited to using the MTA hash table for their multicast + * addresses */ + for (i = 0; i < entries; i++) vfinfo->vf_mc_hashes[i] = hash_list[i]; - } - for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { - vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; - vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; - mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= BIT(vector_bit); - IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); - } vmolr |= IXGBE_VMOLR_ROMPE; IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + /* Sync up the PF and VF in the same MTA table */ + ixgbe_write_mc_addr_list(adapter->netdev); + return 0; } @@ -408,7 +451,9 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) int i, j; u32 vector_bit; u32 vector_reg; - u32 mta_reg; + + /* Clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); for (i = 0; i < adapter->num_vfs; i++) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); @@ -417,11 +462,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) hw->addr_ctrl.mta_in_use++; vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; - mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= BIT(vector_bit); - IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); } - if (vfinfo->num_vf_mc_hashes) vmolr |= IXGBE_VMOLR_ROMPE; else @@ -432,14 +474,14 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) /* Restore any VF macvlans */ ixgbe_full_sync_mac_table(adapter); } -#endif +#endif /* CONFIG_PCI_IOV */ -static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, - u32 vf) +int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; int err; +#ifndef HAVE_VLAN_RX_REGISTER /* If VLAN overlaps with one the PF is currently monitoring make * sure that we are able to allocate a VLVF entry. This may be * redundant but it guarantees PF will maintain visibility to @@ -450,8 +492,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, if (err) return err; } +#endif err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); +#ifndef HAVE_VLAN_RX_REGISTER if (add && !err) return err; @@ -463,14 +507,13 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, if (test_bit(vid, adapter->active_vlans) || (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ixgbe_update_pf_promisc_vlvf(adapter, vid); +#endif return err; } - -static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - int max_frame = msgbuf[1]; u32 max_frs; /* @@ -481,22 +524,22 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) * account before we can enable the VF for receive */ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; int pf_max_frame = dev->mtu + ETH_HLEN; u32 reg_offset, vf_shift, vfre; s32 err = 0; -#ifdef CONFIG_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (dev->features & NETIF_F_FCOE_MTU) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); - #endif /* CONFIG_FCOE */ + switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: - case ixgbe_mbox_api_14: /* Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are * disabled @@ -522,9 +565,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* enable or disable receive depending on error */ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); if (err) - vfre &= ~BIT(vf_shift); + vfre &= ~(1 << vf_shift); else - vfre |= BIT(vf_shift); + vfre |= 1 << vf_shift; IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); if (err) { @@ -533,12 +576,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) } } - /* MTU < 68 is an error and causes problems on some kernels */ - if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { - e_err(drv, "VF max_frame %d out of range\n", max_frame); - return -EINVAL; - } - /* pull current max frame size from hardware */ max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); max_frs &= IXGBE_MHADD_MFS_MASK; @@ -554,10 +591,10 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return 0; } -static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr |= IXGBE_VMOLR_BAM; + vmolr |= IXGBE_VMOLR_BAM; if (aupe) vmolr |= IXGBE_VMOLR_AUPE; else @@ -565,6 +602,15 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); } +static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, + u16 vid, u16 qos, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); +} + static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -578,7 +624,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) u32 vlvfb_mask, pool_mask, i; /* create mask for VF and other pools */ - pool_mask = ~BIT(VMDQ_P(0) % 32); + pool_mask = (u32)~BIT(VMDQ_P(0) % 32); vlvfb_mask = BIT(vf % 32); /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ @@ -596,7 +642,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) /* clear our bit from vlvfb */ vlvfb ^= vlvfb_mask; - /* create 64b mask to chedk to see if we should clear VLVF */ + /* create 64b mask to check to see if we should clear VLVF */ bits[word % 2] = vlvfb; bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); @@ -693,15 +739,16 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { - struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); - u8 num_tcs = adapter->hw_tcs; + struct ixgbe_hw *hw = &adapter->hw; u32 reg_val; u32 queue; + u32 word; - /* remove VLAN filters beloning to this VF */ + /* remove VLAN filters belonging to this VF */ ixgbe_clear_vf_vlans(adapter, vf); /* add back PF assigned VLAN or VLAN 0 */ @@ -720,19 +767,11 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) else ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, adapter->default_up, vf); - - if (vfinfo->spoofchk_enabled) { - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); - } } /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; - /* clear any ipsec table info */ - ixgbe_ipsec_vf_clear(adapter, vf); - /* Flush and reset the mta with the new values */ ixgbe_set_rx_mode(adapter->netdev); @@ -742,64 +781,68 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) /* reset VF api back to unknown */ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; - /* Restart each queue for given VF */ - for (queue = 0; queue < q_per_pool; queue++) { - unsigned int reg_idx = (vf * q_per_pool) + queue; + /* + * Toggling VF's TX queues and clearing VF Mailbox Memory after VFLR + * should only affect X550 and above + */ + if (hw->mac.type >= ixgbe_mac_X550) { + /* Restart each queue for given VF */ + for (queue = 0; queue < q_per_pool; queue++) { + unsigned int reg_idx = (vf * q_per_pool) + queue; - reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); + reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); - /* Re-enabling only configured queues */ - if (reg_val) { - reg_val |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); - reg_val &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); + /* Re-enabling only configured queues */ + if (reg_val) { + reg_val |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), + reg_val); + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), + reg_val); + } } + + /* Clear VF's mailbox memory */ + for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); + + IXGBE_WRITE_FLUSH(hw); } - - IXGBE_WRITE_FLUSH(hw); } -static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) +int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr) { - struct ixgbe_hw *hw = &adapter->hw; - u32 word; - - /* Clear VF's mailbox memory */ - for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); - - IXGBE_WRITE_FLUSH(hw); -} - -static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, - int vf, unsigned char *mac_addr) -{ - s32 retval; + s32 retval = 0; ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); if (retval >= 0) - memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, - ETH_ALEN); + memcpy(adapter->vfinfo[vf].vf_mac_addresses, + mac_addr, ETH_ALEN); else memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); return retval; } +#ifdef CONFIG_PCI_IOV int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { + unsigned char vf_mac_addr[6]; struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); unsigned int vfn = (event_mask & 0x3f); - bool enable = ((event_mask & 0x10000000U) != 0); - if (enable) - eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } return 0; } +#endif /* CONFIG_PCI_IOV */ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, u32 qde) @@ -807,25 +850,25 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg; int i; for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { - u32 reg; - /* flush previous write */ IXGBE_WRITE_FLUSH(hw); - /* indicate to hardware that we want to set drop enable */ + /* drop enable should always be set in SRIOV mode*/ reg = IXGBE_QDE_WRITE | qde; reg |= i << IXGBE_QDE_IDX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); } + } static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; u32 reg, reg_offset, vf_shift; u32 msgbuf[4] = {0, 0, 0, 0}; @@ -838,8 +881,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* reset the filters for the device */ ixgbe_vf_reset_event(adapter, vf); - ixgbe_vf_clear_mbx(adapter, vf); - /* set vf mac address */ if (!is_zero_ether_addr(vf_mac)) ixgbe_set_vf_mac(adapter, vf, vf_mac); @@ -849,7 +890,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable transmit for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= BIT(vf_shift); + reg |= 1 << vf_shift; IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ @@ -861,7 +902,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= BIT(vf_shift); + reg |= 1 << vf_shift; /* * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * For more info take a look at ixgbe_set_vf_lpe @@ -870,23 +911,22 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) struct net_device *dev = adapter->netdev; int pf_max_frame = dev->mtu + ETH_HLEN; -#ifdef CONFIG_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (dev->features & NETIF_F_FCOE_MTU) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); - #endif /* CONFIG_FCOE */ + if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~BIT(vf_shift); + reg &= ~(1 << vf_shift); } IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); /* enable VF mailbox for further messages */ adapter->vfinfo[vf].clear_to_send = true; - /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); - reg |= BIT(vf_shift); + reg |= (1 << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); /* @@ -901,10 +941,10 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET; if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { - msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + msgbuf[0] |= IXGBE_VT_MSGTYPE_SUCCESS; memcpy(addr, vf_mac, ETH_ALEN); } else { - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + msgbuf[0] |= IXGBE_VT_MSGTYPE_FAILURE; } /* @@ -928,14 +968,15 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, } if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && - !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; e_warn(drv, - "VF %d attempted to override administratively set MAC address\n" - "Reload the VF driver to resume operations\n", - vf); + "VF %d attempted to set a new MAC address but it already has an administratively set MAC address %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); + e_warn(drv, "Check the VF driver and if it is not using the correct MAC address you may need to reload the VF driver\n"); return -1; } - return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } @@ -944,7 +985,8 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, { u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); - u8 tcs = adapter->hw_tcs; + u8 tcs = netdev_get_num_tc(adapter->netdev); + int err = 0; if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -958,7 +1000,63 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, if (!vid && !add) return 0; - return ixgbe_set_vf_vlan(adapter, add, vid, vf); + err = ixgbe_set_vf_vlan(adapter, add, vid, vf); + + if (err) + return err; + +#ifdef HAVE_VLAN_RX_REGISTER + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) { + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + if (err) + return err; + } + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + struct ixgbe_hw *hw = &adapter->hw; + u32 bits, vlvf; + s32 reg_ndx; + + reg_ndx = ixgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + bits &= ~(1 << VMDQ_P(0)); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && !bits) + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif /* CONFIG_PCI_IOV */ +#else /* HAVE_VLAN_RX_REGISTER */ + return 0; +#endif /* HAVE_VLAN_RX_REGISTER */ + return err; } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -1015,7 +1113,6 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: - case ixgbe_mbox_api_14: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1033,7 +1130,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, struct net_device *dev = adapter->netdev; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned int default_tc = 0; - u8 num_tcs = adapter->hw_tcs; + u8 num_tcs = netdev_get_num_tc(dev); /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { @@ -1041,7 +1138,6 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: - case ixgbe_mbox_api_14: break; default: return -1; @@ -1069,6 +1165,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, return 0; } +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u32 i, j; @@ -1082,9 +1179,8 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { - case ixgbe_mbox_api_14: - case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -1115,9 +1211,8 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { - case ixgbe_mbox_api_14: - case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -1127,6 +1222,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return 0; } +#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) @@ -1141,9 +1237,8 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; - /* Fall through */ + /* Fall threw */ case ixgbe_mbox_api_13: - case ixgbe_mbox_api_14: break; default: return -EOPNOTSUPP; @@ -1219,8 +1314,8 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) } /* this is a message we already processed, do nothing */ - if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) - return 0; + if (msgbuf[0] & (IXGBE_VT_MSGTYPE_SUCCESS | IXGBE_VT_MSGTYPE_FAILURE)) + return retval; /* flush the ack before we write any messages back */ IXGBE_WRITE_FLUSH(hw); @@ -1232,10 +1327,11 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) * until the vf completes a virtual function reset it should not be * allowed to start any configuration. */ + if (!adapter->vfinfo[vf].clear_to_send) { - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + msgbuf[0] |= IXGBE_VT_MSGTYPE_FAILURE; ixgbe_write_mbx(hw, msgbuf, 1, vf); - return 0; + return retval; } switch ((msgbuf[0] & 0xFFFF)) { @@ -1249,7 +1345,11 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); break; case IXGBE_VF_SET_LPE: - retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); + if (msgbuf[1] > IXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", msgbuf[1]); + return -EINVAL; + } + retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf); break; case IXGBE_VF_SET_MACVLAN: retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); @@ -1260,21 +1360,17 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_QUEUES: retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN case IXGBE_VF_GET_RETA: retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); break; case IXGBE_VF_GET_RSS_KEY: retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); break; +#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; - case IXGBE_VF_IPSEC_ADD: - retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); - break; - case IXGBE_VF_IPSEC_DEL: - retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); - break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1283,9 +1379,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) /* notify the VF of the results of what it sent us */ if (retval) - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + msgbuf[0] |= IXGBE_VT_MSGTYPE_FAILURE; else - msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + msgbuf[0] |= IXGBE_VT_MSGTYPE_SUCCESS; msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; @@ -1297,18 +1393,65 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - u32 msg = IXGBE_VT_MSGTYPE_NACK; + u32 msg = IXGBE_VT_MSGTYPE_FAILURE; /* if device isn't clear to send it shouldn't be reading either */ if (!adapter->vfinfo[vf].clear_to_send) ixgbe_write_mbx(hw, &msg, 1, vf); } +#define Q_BITMAP_DEPTH 2 +static void ixgbe_check_mdd_event(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vf_bitmap[Q_BITMAP_DEPTH] = { 0 }; + u32 j, i; + u32 ping; + + if (!hw->mac.ops.mdd_event) + return; + + /* Did we have a malicious event */ + hw->mac.ops.mdd_event(hw, vf_bitmap); + + /* Log any blocked queues and release lock */ + for (i = 0; i < Q_BITMAP_DEPTH; i++) { + for (j = 0; j < 32 && vf_bitmap[i]; j++) { + u32 vf; + + if (!(vf_bitmap[i] & (1 << j))) + continue; + + /* The VF that malicious event occurred on */ + vf = j + (i * 32); + + dev_warn(pci_dev_to_dev(adapter->pdev), + "Malicious event on VF %d tx:%x rx:%x\n", vf, + IXGBE_READ_REG(hw, IXGBE_LVMMC_TX), + IXGBE_READ_REG(hw, IXGBE_LVMMC_RX)); + + /* restart the vf */ + if (hw->mac.ops.restore_mdd_vf) { + hw->mac.ops.restore_mdd_vf(hw, vf); + + /* get the VF to rebuild its queues */ + adapter->vfinfo[vf].clear_to_send = 0; + ping = IXGBE_PF_CONTROL_MSG | + IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, vf); + } + } + } +} + void ixgbe_msg_task(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vf; + if (adapter->flags & IXGBE_FLAG_MDD_ENABLED && adapter->vfinfo) + ixgbe_check_mdd_event(adapter); + for (vf = 0; vf < adapter->num_vfs; vf++) { /* process any reset requests */ if (!ixgbe_check_for_rst(hw, vf)) @@ -1336,6 +1479,7 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); } +#ifdef HAVE_NDO_SET_VF_TRUST static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -1347,6 +1491,7 @@ static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) ixgbe_write_mbx(hw, &ping, 1, vf); } +#endif void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1361,29 +1506,55 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) } } +#ifdef HAVE_NDO_SET_VF_TRUST +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +#endif +#ifdef IFLA_VF_MAX int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - s32 retval; + s32 retval = 0; - if (vf >= adapter->num_vfs) + if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; if (is_valid_ether_addr(mac)) { - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + dev_info(pci_dev_to_dev(adapter->pdev), "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective."); + dev_info(pci_dev_to_dev(adapter->pdev), "Reload the VF driver to make this change effective.\n"); retval = ixgbe_set_vf_mac(adapter, vf, mac); if (retval >= 0) { + /* pf_set_mac is used in ESX5.1 and base driver but not in ESX5.5 */ adapter->vfinfo[vf].pf_set_mac = true; if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); } } else { - dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); } } else if (is_zero_ether_addr(mac)) { unsigned char *vf_mac_addr = @@ -1393,24 +1564,24 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (is_zero_ether_addr(vf_mac_addr)) return 0; - dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf); + dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", + vf); retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); if (retval >= 0) { adapter->vfinfo[vf].pf_set_mac = false; memcpy(vf_mac_addr, mac, ETH_ALEN); } else { - dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); } } else { retval = -EINVAL; } - return retval; } -static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, - u16 vlan, u8 qos) +static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, + int vf, u16 vlan, u8 qos) { struct ixgbe_hw *hw = &adapter->hw; int err; @@ -1429,16 +1600,13 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, if (hw->mac.type >= ixgbe_mac_X550) ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | IXGBE_QDE_HIDE_VLAN); - adapter->vfinfo[vf].pf_vlan = vlan; adapter->vfinfo[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, + dev_info(pci_dev_to_dev(adapter->pdev), "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before attempting to use the VF device.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); } out: @@ -1467,18 +1635,28 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) return err; } +#ifdef IFLA_VF_VLAN_INFO_MAX int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) +#else +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#endif { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); - if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) + /* VLAN IDs accepted range 0-4094 */ + if (vf < 0 || vf >= adapter->num_vfs || + vlan > VLAN_VID_MASK - 1 || qos > 7) return -EINVAL; + +#ifdef IFLA_VF_VLAN_INFO_MAX if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; +#endif if (vlan || qos) { - /* Check if there is already a port VLAN set, if so + /* + * Check if there is already a port VLAN set, if so * we have to delete the old one first before we * can set the new one. The usage model had * previously assumed the user would delete the @@ -1490,15 +1668,15 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, if (err) goto out; err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); + } else { err = ixgbe_disable_port_vlan(adapter, vf); } - out: return err; } -int ixgbe_link_mbps(struct ixgbe_adapter *adapter) +static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: @@ -1574,7 +1752,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { adapter->vf_rate_link_speed = 0; - dev_info(&adapter->pdev->dev, + dev_info(pci_dev_to_dev(adapter->pdev), "Link speed has been changed. VF Transmit rate is disabled\n"); } @@ -1586,14 +1764,20 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) } } -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int __always_unused min_tx_rate, int max_tx_rate) +#else +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ { struct ixgbe_adapter *adapter = netdev_priv(netdev); int link_speed; /* verify VF is active */ - if (vf >= adapter->num_vfs) + if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; /* verify link is up */ @@ -1605,9 +1789,6 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, if (link_speed != 10000) return -EINVAL; - if (min_tx_rate) - return -EINVAL; - /* rate limit cannot be less than 10Mbs or greater than link speed */ if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) return -EINVAL; @@ -1622,12 +1803,14 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, return 0; } +#endif /* IFLA_VF_MAX */ +#if IS_ENABLED(CONFIG_PCI_IOV) int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - if (vf >= adapter->num_vfs) + if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; adapter->vfinfo[vf].spoofchk_enabled = setting; @@ -1645,7 +1828,7 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), (IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_TX_ANTISPOOF | - ETH_P_LLDP)); + IXGBE_ETH_P_LLDP)); IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), (IXGBE_ETQF_FILTER_EN | @@ -1654,10 +1837,12 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); } - return 0; } +#endif /* CONFIG_PCI_IOV */ +#ifdef IFLA_VF_MAX +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting) { @@ -1670,7 +1855,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, adapter->hw.mac.type >= ixgbe_mac_X550) return -EOPNOTSUPP; - if (vf >= adapter->num_vfs) + if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; adapter->vfinfo[vf].rss_query_enabled = setting; @@ -1678,42 +1863,37 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, return 0; } -int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (vf >= adapter->num_vfs) - return -EINVAL; - - /* nothing to do */ - if (adapter->vfinfo[vf].trusted == setting) - return 0; - - adapter->vfinfo[vf].trusted = setting; - - /* reset VF to reconfigure features */ - adapter->vfinfo[vf].clear_to_send = false; - ixgbe_ping_vf(adapter, vf); - - e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); - - return 0; -} - +#endif int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (vf >= adapter->num_vfs) + + if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; + ivi->vf = vf; memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; ivi->min_tx_rate = 0; +#else + ivi->tx_rate = adapter->vfinfo[vf].tx_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_TRUST ivi->trusted = adapter->vfinfo[vf].trusted; +#endif return 0; } +#endif /* IFLA_VF_MAX */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 3ec21923c89c..0c727b674ed6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -1,51 +1,71 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + #ifndef _IXGBE_SRIOV_H_ #define _IXGBE_SRIOV_H_ -/* ixgbe driver limit the max number of VFs could be enabled to - * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) +/* ixgbe driver limit the max number of VFs could be enabled to + * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) */ #define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) -#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VF_FUNCTIONS -#define IXGBE_MAX_VFS_4TC 32 -#define IXGBE_MAX_VFS_8TC 16 +#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VFS_DRV_LIMIT +#define IXGBE_MAX_VFS_4TC 31 +#define IXGBE_MAX_VFS_8TC 15 -#ifdef CONFIG_PCI_IOV void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); -#endif +int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe); void ixgbe_msg_task(struct ixgbe_adapter *adapter); -int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr); void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +#ifdef IFLA_VF_MAX int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +#ifdef IFLA_VF_VLAN_INFO_MAX int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, - u8 qos, __be16 vlan_proto); -int ixgbe_link_mbps(struct ixgbe_adapter *adapter); + u8 qos, __be16 vlan_proto); +#else +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); -int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#else +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); -void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +#endif /* IFLA_VF_MAX */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs); +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); #endif int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef IFLA_VF_MAX +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +#endif /* IFLA_VF_MAX */ +void ixgbe_dump_registers(struct ixgbe_adapter *adapter); -static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, - u16 vid, u16 qos, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; - - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); -} - +/* + * These are defined in ixgbe_type.h on behalf of the VF driver + * but we need them here unwrapped for the PF driver. + */ +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 #endif /* _IXGBE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 204844288c16..576f117b5fde 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -1,30 +1,36 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_type.h" +#ifdef IXGBE_SYSFS + #include #include #include #include #include #include +#include +#ifdef IXGBE_HWMON #include +#endif +#ifdef IXGBE_HWMON /* hwmon callback functions */ -static ssize_t ixgbe_hwmon_show_location(struct device *dev, +static ssize_t ixgbe_hwmon_show_location(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, dev_attr); - return sprintf(buf, "loc%u\n", + return snprintf(buf, PAGE_SIZE, "loc%u\n", ixgbe_attr->sensor->location); } -static ssize_t ixgbe_hwmon_show_temp(struct device *dev, +static ssize_t ixgbe_hwmon_show_temp(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { @@ -40,10 +46,10 @@ static ssize_t ixgbe_hwmon_show_temp(struct device *dev, /* display millidegree */ value *= 1000; - return sprintf(buf, "%u\n", value); + return snprintf(buf, PAGE_SIZE, "%u\n", value); } -static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, +static ssize_t ixgbe_hwmon_show_cautionthresh(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { @@ -54,10 +60,10 @@ static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, /* display millidegree */ value *= 1000; - return sprintf(buf, "%u\n", value); + return snprintf(buf, PAGE_SIZE, "%u\n", value); } -static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, +static ssize_t ixgbe_hwmon_show_maxopthresh(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { @@ -68,7 +74,7 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, /* display millidegree */ value *= 1000; - return sprintf(buf, "%u\n", value); + return snprintf(buf, PAGE_SIZE, "%u\n", value); } /** @@ -83,12 +89,18 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, */ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, unsigned int offset, int type) { - int rc; unsigned int n_attr; struct hwmon_attr *ixgbe_attr; +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS n_attr = adapter->ixgbe_hwmon_buff->n_hwmon; ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr]; +#else + int rc; + + n_attr = adapter->ixgbe_hwmon_buff.n_hwmon; + ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr]; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ switch (type) { case IXGBE_HWMON_TYPE_LOC: @@ -112,8 +124,7 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, "temp%u_crit", offset + 1); break; default: - rc = -EPERM; - return rc; + return -EPERM; } /* These always the same regardless of type */ @@ -123,6 +134,8 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, ixgbe_attr->dev_attr.store = NULL; ixgbe_attr->dev_attr.attr.mode = 0444; ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; + +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS sysfs_attr_init(&ixgbe_attr->dev_attr.attr); adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr; @@ -130,10 +143,38 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, ++adapter->ixgbe_hwmon_buff->n_hwmon; return 0; -} +#else + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &ixgbe_attr->dev_attr); -static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) + if (rc == 0) + ++adapter->ixgbe_hwmon_buff.n_hwmon; + + return rc; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ +} +#endif /* IXGBE_HWMON */ + +static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter __maybe_unused *adapter) { +#ifdef IXGBE_HWMON +#ifndef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->ixgbe_hwmon_buff.hwmon_list); + + if (adapter->ixgbe_hwmon_buff.device) + hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device); +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ +#endif /* IXGBE_HWMON */ } /* called from ixgbe_main.c */ @@ -145,27 +186,52 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) /* called from ixgbe_main.c */ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) { + int rc = 0; +#ifdef IXGBE_HWMON +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS struct hwmon_buff *ixgbe_hwmon; struct device *hwmon_dev; +#else + struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff; + int n_attrs; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ unsigned int i; - int rc = 0; +#endif /* IXGBE_HWMON */ +#ifdef IXGBE_HWMON /* If this method isn't defined we don't support thermals */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { - goto exit; + goto no_thermal; } /* Don't create thermal hwmon interface if no sensors present */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) - goto exit; + goto no_thermal; +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon), GFP_KERNEL); - if (ixgbe_hwmon == NULL) { + + if (!ixgbe_hwmon) { rc = -ENOMEM; goto exit; } + adapter->ixgbe_hwmon_buff = ixgbe_hwmon; +#else + /* + * Allocation space for max attributs + * max num sensors * values (loc, temp, max, caution) + */ + n_attrs = IXGBE_MAX_SENSORS * 4; + ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + + if (!ixgbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ for (i = 0; i < IXGBE_MAX_SENSORS; i++) { /* @@ -178,18 +244,19 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) /* Bail if any hwmon attr struct fails to initialize */ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); if (rc) - goto exit; + goto err; rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); if (rc) - goto exit; + goto err; rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); if (rc) - goto exit; + goto err; rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); if (rc) - goto exit; + goto err; } +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group; ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs; @@ -197,9 +264,29 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) "ixgbe", ixgbe_hwmon, ixgbe_hwmon->groups); - if (IS_ERR(hwmon_dev)) + + if (IS_ERR(hwmon_dev)) { rc = PTR_ERR(hwmon_dev); + goto exit; + } + +#else + ixgbe_hwmon->device = + hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + + if (IS_ERR(ixgbe_hwmon->device)) { + rc = PTR_ERR(ixgbe_hwmon->device); + goto err; + } + +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ +no_thermal: +#endif /* IXGBE_HWMON */ + goto exit; + +err: + ixgbe_sysfs_del_adapter(adapter); exit: return rc; } - +#endif /* IXGBE_SYSFS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 6d01700b46bc..96462b46166b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -1,30 +1,27 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBE_TXRX_COMMON_H_ #define _IXGBE_TXRX_COMMON_H_ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + #define IXGBE_XDP_PASS 0 #define IXGBE_XDP_CONSUMED BIT(0) #define IXGBE_XDP_TX BIT(1) #define IXGBE_XDP_REDIR BIT(2) -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ - IXGBE_TXD_CMD_RS) - +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf); -bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); -void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); -void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb); +#else +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_buff *xdp); +#endif void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring); -void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); - +#ifdef HAVE_AF_XDP_ZC_SUPPORT void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring); void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring); @@ -42,7 +39,25 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget); +#ifdef HAVE_NDO_XSK_WAKEUP int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +#else +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); +#endif void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); +#endif +#endif /* HAVE_XDP_SUPPORT */ -#endif /* #define _IXGBE_TXRX_COMMON_H_ */ +bool ixgbe_cleanup_headers(struct ixgbe_ring __maybe_unused *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); + +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); +#endif /* _IXGBE_TXRX_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2be1c4c72435..02a633c189c7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,192 +1,250 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBE_TYPE_H_ #define _IXGBE_TYPE_H_ -#include -#include -#include +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - IXGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - IXGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occurred, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - IXGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - IXGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - IXGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - IXGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "ixgbe_osdep.h" + +/* Override this by setting IOMEM in your ixgbe_osdep.h header */ +#ifndef IOMEM +#define IOMEM +#endif + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 /* Device IDs */ -#define IXGBE_DEV_ID_82598 0x10B6 -#define IXGBE_DEV_ID_82598_BX 0x1508 -#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 -#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 -#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB -#define IXGBE_DEV_ID_82598AT 0x10C8 -#define IXGBE_DEV_ID_82598AT2 0x150B -#define IXGBE_DEV_ID_82598EB_CX4 0x10DD -#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC -#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 -#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 -#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 -#define IXGBE_DEV_ID_82599_KX4 0x10F7 -#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 -#define IXGBE_DEV_ID_82599_KR 0x1517 -#define IXGBE_DEV_ID_82599_T3_LOM 0x151C -#define IXGBE_DEV_ID_82599_CX4 0x10F9 -#define IXGBE_DEV_ID_82599_SFP 0x10FB -#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a -#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 -#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 -#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 -#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 -#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 -#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B #define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 #define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D #define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 #define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 #define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE -#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 -#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 -#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D -#define IXGBE_DEV_ID_82599EN_SFP 0x1557 -#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 -#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC -#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 -#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C -#define IXGBE_DEV_ID_82599_LS 0x154F -#define IXGBE_DEV_ID_X540T 0x1528 -#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A -#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 -#define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 +/* Placeholder value, pending official value. */ +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 +#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA +#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 -#define IXGBE_DEV_ID_X550T 0x1563 -#define IXGBE_DEV_ID_X550T1 0x15D1 -#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA -#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB -#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC -#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD -#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE -#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 -#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 -#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 -#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 -#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 -#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 -#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 -#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE -#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 -#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 +#define IXGBE_CAT(r,m) IXGBE_##r##m -/* VF Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 -#define IXGBE_DEV_ID_X550_VF 0x1565 -#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 - -#define IXGBE_CAT(r, m) IXGBE_##r##_##m - -#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)]) +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) /* General Registers */ -#define IXGBE_CTRL 0x00000 -#define IXGBE_STATUS 0x00008 -#define IXGBE_CTRL_EXT 0x00018 -#define IXGBE_ESDP 0x00020 -#define IXGBE_EODSDP 0x00028 - -#define IXGBE_I2CCTL_8259X 0x00028 -#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL_82599 0x00028 +#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 #define IXGBE_I2CCTL_X550 0x15F5C #define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 #define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 -#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL) - -#define IXGBE_LEDCTL 0x00200 -#define IXGBE_FRTIMER 0x00048 -#define IXGBE_TCPTIMER 0x0004C -#define IXGBE_CORESPARE 0x00600 -#define IXGBE_EXVET 0x05078 +#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) +#define IXGBE_PHY_GPIO 0x00028 +#define IXGBE_MAC_GPIO 0x00030 +#define IXGBE_PHYINT_STATUS0 0x00100 +#define IXGBE_PHYINT_STATUS1 0x00104 +#define IXGBE_PHYINT_STATUS2 0x00108 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 /* NVM Registers */ -#define IXGBE_EEC_8259X 0x10010 -#define IXGBE_EEC_X540 IXGBE_EEC_8259X -#define IXGBE_EEC_X550 IXGBE_EEC_8259X -#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X +#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC +#define IXGBE_EEC_X550 IXGBE_EEC +#define IXGBE_EEC_X550EM_x IXGBE_EEC #define IXGBE_EEC_X550EM_a 0x15FF8 -#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC) -#define IXGBE_EERD 0x10014 -#define IXGBE_EEWR 0x10018 -#define IXGBE_FLA_8259X 0x1001C -#define IXGBE_FLA_X540 IXGBE_FLA_8259X -#define IXGBE_FLA_X550 IXGBE_FLA_8259X -#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X +#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC) + +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 + +#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA +#define IXGBE_FLA_X550 IXGBE_FLA +#define IXGBE_FLA_X550EM_x IXGBE_FLA #define IXGBE_FLA_X550EM_a 0x15F68 -#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) -#define IXGBE_EEMNGCTL 0x10110 -#define IXGBE_EEMNGDATA 0x10114 -#define IXGBE_FLMNGCTL 0x10118 -#define IXGBE_FLMNGDATA 0x1011C -#define IXGBE_FLMNGCNT 0x10120 -#define IXGBE_FLOP 0x1013C -#define IXGBE_GRC_8259X 0x10200 -#define IXGBE_GRC_X540 IXGBE_GRC_8259X -#define IXGBE_GRC_X550 IXGBE_GRC_8259X -#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X +#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) +#define IXGBE_FLA_FL_SIZE_SHIFT_X540 17 +#define IXGBE_FLA_FL_SIZE_SHIFT_X550 12 +#define IXGBE_FLA_FL_SIZE_MASK_X540 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X540) +#define IXGBE_FLA_FL_SIZE_MASK_X550 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X550) + +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C + +#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC +#define IXGBE_GRC_X550 IXGBE_GRC +#define IXGBE_GRC_X550EM_x IXGBE_GRC #define IXGBE_GRC_X550EM_a 0x15F64 -#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) +#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC) + +#define IXGBE_SRAMREL 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_a 0x15F6C +#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL) + +#define IXGBE_PHYDBG 0x10218 /* General Receive Control */ -#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ -#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ -#define IXGBE_VPDDIAG0 0x10204 -#define IXGBE_VPDDIAG1 0x10208 +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN_8259X 0x00000001 -#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN #define IXGBE_I2C_CLK_IN_X550 0x00004000 #define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 #define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 -#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) -#define IXGBE_I2C_CLK_OUT_8259X 0x00000002 -#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT #define IXGBE_I2C_CLK_OUT_X550 0x00000200 #define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 #define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 -#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) -#define IXGBE_I2C_DATA_IN_8259X 0x00000004 -#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN #define IXGBE_I2C_DATA_IN_X550 0x00001000 #define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 #define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 -#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) -#define IXGBE_I2C_DATA_OUT_8259X 0x00000008 -#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X +#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT #define IXGBE_I2C_DATA_OUT_X550 0x00000400 #define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 #define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 -#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) -#define IXGBE_I2C_DATA_OE_N_EN_8259X 0 -#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X +#define IXGBE_I2C_DATA_OE_N_EN 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN #define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 #define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 #define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 -#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) +#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) -#define IXGBE_I2C_BB_EN_8259X 0 -#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X +#define IXGBE_I2C_BB_EN 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN #define IXGBE_I2C_BB_EN_X550 0x00000100 #define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 #define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 -#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) +#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) -#define IXGBE_I2C_CLK_OE_N_EN_8259X 0 -#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X +#define IXGBE_I2C_CLK_OE_N_EN 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN #define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 #define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 #define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 -#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) - +#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 @@ -216,21 +274,21 @@ struct ixgbe_thermal_sensor_data { #define NVM_OROM_PATCH_MASK 0xFF #define NVM_OROM_SHIFT 8 -#define NVM_VER_MASK 0x00FF /* version mask */ -#define NVM_VER_SHIFT 8 /* version bit shift */ -#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ -#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ -#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ -#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ +#define NVM_VER_MASK 0x00FF /* version mask */ +#define NVM_VER_SHIFT 8 /* version bit shift */ +#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ +#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ +#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ +#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ #define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ -#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ -#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ -#define NVM_ETK_OFF_HI 0x2E /* version high order word */ -#define NVM_ETK_SHIFT 16 /* high version word shift */ +#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ +#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ +#define NVM_ETK_OFF_HI 0x2E /* version high order word */ +#define NVM_ETK_SHIFT 16 /* high version word shift */ #define NVM_VER_INVALID 0xFFFF #define NVM_ETK_VALID 0x8000 #define NVM_INVALID_PTR 0xFFFF -#define NVM_VER_SIZE 32 /* version sting size */ +#define NVM_VER_SIZE 32 /* version sting size */ struct ixgbe_nvm_version { u32 etk_id; @@ -247,165 +305,180 @@ struct ixgbe_nvm_version { u8 or_major; u16 or_build; u8 or_patch; + }; /* Interrupt Registers */ -#define IXGBE_EICR 0x00800 -#define IXGBE_EICS 0x00808 -#define IXGBE_EIMS 0x00880 -#define IXGBE_EIMC 0x00888 -#define IXGBE_EIAC 0x00810 -#define IXGBE_EIAM 0x00890 -#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) -#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) -#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) -#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* 82599 EITR is only 12 bits, with the lower 3 always zero */ /* * 82598 EITR is 16 bits but set the limits based on the max - * supported by all ixgbe hardware. 82599 EITR is only 12 bits, - * with the lower 3 always zero. + * supported by all ixgbe hardware */ -#define IXGBE_MAX_INT_RATE 488281 -#define IXGBE_MIN_INT_RATE 956 -#define IXGBE_MAX_EITR 0x00000FF8 -#define IXGBE_MIN_EITR 8 -#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ - (0x012300 + (((_i) - 24) * 4))) -#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 -#define IXGBE_EITR_LLI_MOD 0x00008000 -#define IXGBE_EITR_CNT_WDIS 0x80000000 -#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ -#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ -#define IXGBE_EITRSEL 0x00894 -#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ -#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ -#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) -#define IXGBE_GPIE 0x00898 +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 /* Flow Control Registers */ -#define IXGBE_FCADBUL 0x03210 -#define IXGBE_FCADBUH 0x03214 -#define IXGBE_FCAMACL 0x04328 -#define IXGBE_FCAMACH 0x0432C -#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_PFCTOP 0x03008 -#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTV 0x032A0 -#define IXGBE_FCCFG 0x03D00 -#define IXGBE_TFCS 0x0CE00 +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 /* Receive DMA Registers */ -#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ (0x0D000 + (((_i) - 64) * 0x40))) -#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ (0x0D004 + (((_i) - 64) * 0x40))) -#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ (0x0D008 + (((_i) - 64) * 0x40))) -#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ (0x0D010 + (((_i) - 64) * 0x40))) -#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ (0x0D018 + (((_i) - 64) * 0x40))) -#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ - (0x0D028 + (((_i) - 64) * 0x40))) -#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ - (0x0D02C + (((_i) - 64) * 0x40))) -#define IXGBE_RSCDBU 0x03028 -#define IXGBE_RDDCC 0x02F20 -#define IXGBE_RXMEMWRAP 0x03190 -#define IXGBE_STARCTRL 0x03024 +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 /* * Split and Replication Receive Control Registers * 00-15 : 0x02100 + n*4 * 16-64 : 0x01014 + n*0x40 * 64-127: 0x0D014 + (n-64)*0x40 */ -#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ - (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ - (0x0D014 + (((_i) - 64) * 0x40)))) +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40)))) /* * Rx DCA Control Register: * 00-15 : 0x02200 + n*4 * 16-64 : 0x0100C + n*0x40 * 64-127: 0x0D00C + (n-64)*0x40 */ -#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ (0x0D00C + (((_i) - 64) * 0x40)))) -#define IXGBE_RDRXCTL 0x02F00 -#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) - /* 8 of these 0x03C00 - 0x03C1C */ -#define IXGBE_RXCTRL 0x03000 -#define IXGBE_DROPEN 0x03D04 -#define IXGBE_RXPBSIZE_SHIFT 10 +#define IXGBE_RDRXCTL 0x02F00 +/* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 +#define IXGBE_RXPBSIZE_MASK 0x000FFC00 /* Receive Registers */ -#define IXGBE_RXCSUM 0x05000 -#define IXGBE_RFCTL 0x05008 -#define IXGBE_DRECCCTL 0x02F08 -#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_DRECCCTL2 0x02F8C + /* Multicast Table Array - 128 entries */ -#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) -#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x0A200 + ((_i) * 8))) -#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x0A204 + ((_i) * 8))) -#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) -#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) /* Packet split receive type */ -#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ - (0x0EA00 + ((_i) * 4))) +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) /* array of 4096 1-bit vlan filters */ -#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) /*array of 4096 4-bit vlan vmdq indices */ -#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) -#define IXGBE_FCTRL 0x05080 -#define IXGBE_VLNCTRL 0x05088 -#define IXGBE_MCSTCTRL 0x05090 -#define IXGBE_MRQC 0x05818 -#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ -#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ -#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ -#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ -#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ -#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ -#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ -#define IXGBE_RQTC 0x0EC70 -#define IXGBE_MTQC 0x08120 -#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ -#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_PFFLPL 0x050B0 -#define IXGBE_PFFLPH 0x050B4 -#define IXGBE_VT_CTL 0x051B0 -#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ -#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ -#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ -#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ -#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) -#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) -#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) -#define IXGBE_QDE 0x2F04 -#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ -#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ -#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) -#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) -#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) -#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) -#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ -#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ -#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ -#define IXGBE_RXFECCERR0 0x051B8 -#define IXGBE_LLITHRESH 0x0EC90 -#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIRVP 0x05AC0 -#define IXGBE_VMD_CTL 0x0581C -#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ -#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ -#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +/* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) +#define IXGBE_PFMBICR_INDEX(_i) ((_i) >> 4) +#define IXGBE_PFMBICR_SHIFT(_i) ((_i) % 16) +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFVFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_PFVFLREC(_i) (0x00700 + ((_i) * 4)) +#define IXGBE_PFVFLRE_INDEX(_i) ((_i) >> 5) +#define IXGBE_PFVFLRE_SHIFT(_i) ((_i) % 32) +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_LVMMC_RX 0x2FA8 +#define IXGBE_LVMMC_TX 0x8108 +#define IXGBE_LMVM_RX 0x2FA4 +#define IXGBE_LMVM_TX 0x8124 +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ /* Registers for setting up RSS on X550 with SRIOV * _p - pool number (0..63) @@ -416,950 +489,1095 @@ struct ixgbe_nvm_version { #define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) /* Flow Director registers */ -#define IXGBE_FDIRCTRL 0x0EE00 -#define IXGBE_FDIRHKEY 0x0EE68 -#define IXGBE_FDIRSKEY 0x0EE6C -#define IXGBE_FDIRDIP4M 0x0EE3C -#define IXGBE_FDIRSIP4M 0x0EE40 -#define IXGBE_FDIRTCPM 0x0EE44 -#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 #define IXGBE_FDIRSCTPM 0x0EE78 -#define IXGBE_FDIRIP6M 0x0EE74 -#define IXGBE_FDIRM 0x0EE70 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 /* Flow Director Stats registers */ -#define IXGBE_FDIRFREE 0x0EE38 -#define IXGBE_FDIRLEN 0x0EE4C -#define IXGBE_FDIRUSTAT 0x0EE50 -#define IXGBE_FDIRFSTAT 0x0EE54 -#define IXGBE_FDIRMATCH 0x0EE58 -#define IXGBE_FDIRMISS 0x0EE5C +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C /* Flow Director Programming registers */ #define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ -#define IXGBE_FDIRIPSA 0x0EE18 -#define IXGBE_FDIRIPDA 0x0EE1C -#define IXGBE_FDIRPORT 0x0EE20 -#define IXGBE_FDIRVLAN 0x0EE24 -#define IXGBE_FDIRHASH 0x0EE28 -#define IXGBE_FDIRCMD 0x0EE2C +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C /* Transmit DMA registers */ -#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ -#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) -#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) -#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) -#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) -#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) -#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) -#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) -#define IXGBE_DTXCTL 0x07E00 +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 -#define IXGBE_DMATXCTL 0x04A80 -#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ -#define IXGBE_PFDTXGSWC 0x08220 -#define IXGBE_DTXMXSZRQ 0x08100 -#define IXGBE_DTXTCPFLGL 0x04A88 -#define IXGBE_DTXTCPFLGH 0x04A8C -#define IXGBE_LBDRPEN 0x0CA00 -#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ -#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ -#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ -#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ -#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ -#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ -#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ -#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ /* Anti-spoofing defines */ -#define IXGBE_SPOOF_MACAS_MASK 0xFF -#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 -#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 #define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 #define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 -#define IXGBE_PFVFSPOOF_REG_COUNT 8 - -#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ +#define IXGBE_PFVFSPOOF_REG_COUNT 8 +/* 16 of these (0-15) */ +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* Tx DCA Control register : 128 of these (0-127) */ -#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) -#define IXGBE_TIPG 0x0CB00 -#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_MNGTXMAP 0x0CD10 -#define IXGBE_TIPG_FIBER_DEFAULT 3 -#define IXGBE_TXPBSIZE_SHIFT 10 +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 /* Wake up registers */ -#define IXGBE_WUC 0x05800 -#define IXGBE_WUFC 0x05808 -#define IXGBE_WUS 0x05810 -#define IXGBE_IPAV 0x05838 -#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ -#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ -#define IXGBE_WUPL 0x05900 -#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ +#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ #define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ -#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ -#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host - * Filter Table */ /* masks for accessing VXLAN and GENEVE UDP ports */ -#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ -#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ -#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ +#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ +#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 -#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +/* Ext Flexible Host Filter Table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) +#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) -#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 -#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 +/* Four Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +/* Six Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 +/* Eight Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 /* Each Flexible Filter is at most 128 (0x80) bytes in length */ -#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 -#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ -#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ /* Definitions for power management and wakeup registers */ /* Wake Up Control */ -#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ -#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ -#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ /* Wake Up Filter Control */ -#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ -#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ -#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ -#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ -#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ -#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ -#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ -#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ -#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ -#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ -#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ -#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ -#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ +#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ +/* Mask for Ext. flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 +#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ /* Wake Up Status */ -#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC -#define IXGBE_WUS_MAG IXGBE_WUFC_MAG -#define IXGBE_WUS_EX IXGBE_WUFC_EX -#define IXGBE_WUS_MC IXGBE_WUFC_MC -#define IXGBE_WUS_BC IXGBE_WUFC_BC -#define IXGBE_WUS_ARP IXGBE_WUFC_ARP -#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 -#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 -#define IXGBE_WUS_MNG IXGBE_WUFC_MNG -#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 -#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 -#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 -#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 -#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 -#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 -#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK +/* Proxy Status */ +#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ +#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ +#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ +#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ +#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ +#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ -/* Wake Up Packet Length */ -#define IXGBE_WUPL_LENGTH_MASK 0xFFFF +/* Proxying Filter Control */ +#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ +#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ +#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ +#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ +#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ +#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ + +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF /* DCB registers */ -#define MAX_TRAFFIC_CLASS 8 -#define X540_TRAFFIC_CLASS 4 -#define DEF_TRAFFIC_CLASS 1 -#define IXGBE_RMCS 0x03D00 -#define IXGBE_DPMCS 0x07F40 -#define IXGBE_PDPMCS 0x0CD00 -#define IXGBE_RUPPBMR 0x050A0 -#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Power Management */ +/* DMA Coalescing configuration */ +struct ixgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* + * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. + * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == + * 87500 bytes [85KB] + */ +#define IXGBE_DMACRXT_10G 0x55 +#define IXGBE_DMACRXT_1G 0x09 +#define IXGBE_DMACRXT_100M 0x01 + +/* DMA Coalescing registers */ +#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ +#define IXGBE_DMACR 0x02400 /* Control register */ +#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ +/* DMA Coalescing register fields */ +#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ +#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ +#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ +#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 +#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 +#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ +#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ +#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ +#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ +#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ + +/* EEE registers */ +#define IXGBE_EEER 0x043A0 /* EEE register */ +#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ +#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ +#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 +#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ +#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ + +/* EEE register fields */ +#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ +#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ +#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ +#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ +#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ /* Security Control Registers */ -#define IXGBE_SECTXCTRL 0x08800 -#define IXGBE_SECTXSTAT 0x08804 -#define IXGBE_SECTXBUFFAF 0x08808 -#define IXGBE_SECTXMINIFG 0x08810 -#define IXGBE_SECRXCTRL 0x08D00 -#define IXGBE_SECRXSTAT 0x08D04 +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 /* Security Bit Fields and Masks */ -#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 -#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 -#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 -#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 -#define IXGBE_SECTXSTAT_SECTX_OFF_DIS 0x00000002 -#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000004 +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 -#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 -#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 -#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 -#define IXGBE_SECRXSTAT_SECRX_OFF_DIS 0x00000002 -#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000004 +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 /* LinkSec (MacSec) Registers */ -#define IXGBE_LSECTXCAP 0x08A00 -#define IXGBE_LSECRXCAP 0x08F00 -#define IXGBE_LSECTXCTRL 0x08A04 -#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ -#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ -#define IXGBE_LSECTXSA 0x08A10 -#define IXGBE_LSECTXPN0 0x08A14 -#define IXGBE_LSECTXPN1 0x08A18 -#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECRXCTRL 0x08F04 -#define IXGBE_LSECRXSCL 0x08F08 -#define IXGBE_LSECRXSCH 0x08F0C -#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) -#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ -#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ -#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ -#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ -#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ -#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ -#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ -#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ -#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ -#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ -#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ -#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ -#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ -#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ -#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ -#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ -#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ -#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ -#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ /* LinkSec (MacSec) Bit Fields and Masks */ -#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECTXCAP_SUM_SHIFT 16 -#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECRXCAP_SUM_SHIFT 16 +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 -#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 -#define IXGBE_LSECTXCTRL_DISABLE 0x0 -#define IXGBE_LSECTXCTRL_AUTH 0x1 -#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 -#define IXGBE_LSECTXCTRL_AISCI 0x00000020 -#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 -#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 -#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C -#define IXGBE_LSECRXCTRL_EN_SHIFT 2 -#define IXGBE_LSECRXCTRL_DISABLE 0x0 -#define IXGBE_LSECRXCTRL_CHECK 0x1 -#define IXGBE_LSECRXCTRL_STRICT 0x2 -#define IXGBE_LSECRXCTRL_DROP 0x3 -#define IXGBE_LSECRXCTRL_PLSH 0x00000040 -#define IXGBE_LSECRXCTRL_RP 0x00000080 -#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 /* IpSec Registers */ -#define IXGBE_IPSTXIDX 0x08900 -#define IXGBE_IPSTXSALT 0x08904 -#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXIDX 0x08E00 -#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSPI 0x08E14 -#define IXGBE_IPSRXIPIDX 0x08E18 -#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSALT 0x08E2C -#define IXGBE_IPSRXMOD 0x08E30 +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 -#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 /* DCB registers */ -#define IXGBE_RTRPCS 0x02430 -#define IXGBE_RTTDCS 0x04900 -#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_RTTPCS 0x0CD00 -#define IXGBE_RTRUP2TC 0x03020 -#define IXGBE_RTTUP2TC 0x0C800 -#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDQSEL 0x04904 -#define IXGBE_RTTDT1C 0x04908 -#define IXGBE_RTTDT1S 0x0490C -#define IXGBE_RTTQCNCR 0x08B00 -#define IXGBE_RTTQCNTG 0x04A90 -#define IXGBE_RTTBCNRD 0x0498C -#define IXGBE_RTTQCNRR 0x0498C -#define IXGBE_RTTDTECC 0x04990 -#define IXGBE_RTTDTECC_NO_BCN 0x00000100 -#define IXGBE_RTTBCNRC 0x04984 -#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTQCNCR 0x08B00 +#define IXGBE_RTTQCNTG 0x04A90 +#define IXGBE_RTTBCNRD 0x0498C +#define IXGBE_RTTQCNRR 0x0498C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 + +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 #define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF #define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 -#define IXGBE_RTTBCNRC_RF_INT_MASK \ +#define IXGBE_RTTBCNRC_RF_INT_MASK \ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) -#define IXGBE_RTTBCNRM 0x04980 -#define IXGBE_RTTQCNRM 0x04980 +#define IXGBE_RTTBCNRM 0x04980 +#define IXGBE_RTTQCNRM 0x04980 +/* FCoE DMA Context Registers */ /* FCoE Direct DMA Context */ #define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) -/* FCoE DMA Context Registers */ -#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ -#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ -#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ -#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ -#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ -#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) -#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */ -#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */ -#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */ -#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ -#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ -#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 -#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 -#define IXGBE_FCBUFF_OFFSET_SHIFT 16 -#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */ -#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */ -#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ -#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ -#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 - +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 /* FCoE SOF/EOF */ -#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ -#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ -#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ -#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ +#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ +#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 /* FCoE Direct Filter Context */ #define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) #define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) -/* FCoE Filter Context Registers */ -#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ -#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ -#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ -#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */ -#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */ -#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ -#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ -#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */ -#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */ -#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */ +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ /* FCoE Receive Control */ -#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ -#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */ -#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */ -#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */ -#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */ -#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */ -#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */ -#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */ -#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */ -#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ -#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 /* FCoE Redirection */ -#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ -#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ -#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ -#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ -#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ -#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ #define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ /* Higher 7 bits for the queue index */ #define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 #define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 /* Stats registers */ -#define IXGBE_CRCERRS 0x04000 -#define IXGBE_ILLERRC 0x04004 -#define IXGBE_ERRBC 0x04008 -#define IXGBE_MSPDC 0x04010 -#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ -#define IXGBE_MLFC 0x04034 -#define IXGBE_MRFC 0x04038 -#define IXGBE_RLEC 0x04040 -#define IXGBE_LXONTXC 0x03F60 -#define IXGBE_LXONRXC 0x0CF60 -#define IXGBE_LXOFFTXC 0x03F68 -#define IXGBE_LXOFFRXC 0x0CF68 -#define IXGBE_LXONRXCNT 0x041A4 -#define IXGBE_LXOFFRXCNT 0x041A8 -#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ -#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ -#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ -#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ -#define IXGBE_PRC64 0x0405C -#define IXGBE_PRC127 0x04060 -#define IXGBE_PRC255 0x04064 -#define IXGBE_PRC511 0x04068 -#define IXGBE_PRC1023 0x0406C -#define IXGBE_PRC1522 0x04070 -#define IXGBE_GPRC 0x04074 -#define IXGBE_BPRC 0x04078 -#define IXGBE_MPRC 0x0407C -#define IXGBE_GPTC 0x04080 -#define IXGBE_GORCL 0x04088 -#define IXGBE_GORCH 0x0408C -#define IXGBE_GOTCL 0x04090 -#define IXGBE_GOTCH 0x04094 -#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ -#define IXGBE_RUC 0x040A4 -#define IXGBE_RFC 0x040A8 -#define IXGBE_ROC 0x040AC -#define IXGBE_RJC 0x040B0 -#define IXGBE_MNGPRC 0x040B4 -#define IXGBE_MNGPDC 0x040B8 -#define IXGBE_MNGPTC 0x0CF90 -#define IXGBE_TORL 0x040C0 -#define IXGBE_TORH 0x040C4 -#define IXGBE_TPR 0x040D0 -#define IXGBE_TPT 0x040D4 -#define IXGBE_PTC64 0x040D8 -#define IXGBE_PTC127 0x040DC -#define IXGBE_PTC255 0x040E0 -#define IXGBE_PTC511 0x040E4 -#define IXGBE_PTC1023 0x040E8 -#define IXGBE_PTC1522 0x040EC -#define IXGBE_MPTC 0x040F0 -#define IXGBE_BPTC 0x040F4 -#define IXGBE_XEC 0x04120 -#define IXGBE_SSVPC 0x08780 +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 -#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) -#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ (0x08600 + ((_i) * 4))) -#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) -#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ -#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ -#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ -#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ -#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ -#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ -#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ -#define IXGBE_O2BGPTC 0x041C4 -#define IXGBE_O2BSPC 0x087B0 -#define IXGBE_B2OSPC 0x041C0 -#define IXGBE_B2OGPRC 0x02F90 -#define IXGBE_PCRC8ECL 0x0E810 -#define IXGBE_PCRC8ECH 0x0E811 -#define IXGBE_PCRC8ECH_MASK 0x1F -#define IXGBE_LDPCECL 0x0E820 -#define IXGBE_LDPCECH 0x0E821 - -/* MII clause 22/28 definitions */ -#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 - -#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */ -#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ - -#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ - -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ -#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ - -#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ -#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ -#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ -#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 -#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 -#define IXGBE_MII_RESTART 0x200 -#define IXGBE_MII_AUTONEG_LINK_UP 0x04 -#define IXGBE_MII_AUTONEG_REG 0x0 +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ +#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_BUPRC 0x04180 +#define IXGBE_BMPRC 0x04184 +#define IXGBE_BBPRC 0x04188 +#define IXGBE_BUPTC 0x0418C +#define IXGBE_BMPTC 0x04190 +#define IXGBE_BBPTC 0x04194 +#define IXGBE_BCRCERRS 0x04198 +#define IXGBE_BXONRXC 0x0419C +#define IXGBE_BXOFFRXC 0x041E0 +#define IXGBE_BXONTXC 0x041E4 +#define IXGBE_BXOFFTXC 0x041E8 /* Management */ -#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MANC 0x05820 -#define IXGBE_MFVAL 0x05824 -#define IXGBE_MANC2H 0x05860 -#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MIPAF 0x058B0 -#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ -#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_LSWFW 0x15014 +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15F14 +#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ +#define IXGBE_BMCIPVAL 0x05060 +#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 +#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 /* Management Bit Fields and Masks */ +#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ #define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ +#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ +#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 /* Firmware Semaphore Register */ #define IXGBE_FWSM_MODE_MASK 0xE +#define IXGBE_FWSM_TS_ENABLED 0x1 #define IXGBE_FWSM_FW_MODE_PT 0x4 -#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE BIT(5) -#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000 -#define IXGBE_FWSM_FW_VAL_BIT BIT(15) +#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE (1 << 5) +#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000 +#define IXGBE_FWSM_FW_VAL_BIT (1 << 15) /* ARC Subsystem registers */ -#define IXGBE_HICR 0x15F00 -#define IXGBE_FWSTS 0x15F0C -#define IXGBE_HSMC0R 0x15F04 -#define IXGBE_HSMC1R 0x15F08 -#define IXGBE_SWSR 0x15F10 -#define IXGBE_HFDR 0x15FE8 -#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_FWRESETCNT 0x15F40 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ +#define IXGBE_FLEX_MNG_PTR(_i) (IXGBE_FLEX_MNG + ((_i) * 4)) -#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ /* Driver sets this bit when done to put command in RAM */ -#define IXGBE_HICR_C 0x02 -#define IXGBE_HICR_SV 0x04 /* Status Validity */ -#define IXGBE_HICR_FW_RESET_ENABLE 0x40 -#define IXGBE_HICR_FW_RESET 0x80 +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 /* PCI-E registers */ -#define IXGBE_GCR 0x11000 -#define IXGBE_GTV 0x11004 -#define IXGBE_FUNCTAG 0x11008 -#define IXGBE_GLT 0x1100C -#define IXGBE_GSCL_1 0x11010 -#define IXGBE_GSCL_2 0x11014 -#define IXGBE_GSCL_3 0x11018 -#define IXGBE_GSCL_4 0x1101C -#define IXGBE_GSCN_0 0x11020 -#define IXGBE_GSCN_1 0x11024 -#define IXGBE_GSCN_2 0x11028 -#define IXGBE_GSCN_3 0x1102C -#define IXGBE_FACTPS_8259X 0x10150 -#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X -#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X -#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_PCIEPIPEADR 0x11004 +#define IXGBE_PCIEPIPEDAT 0x11008 +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1 +#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0 +#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1 +#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2 +#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3 +#define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS +#define IXGBE_GSCL_1_X550 0x11800 +#define IXGBE_GSCL_2_X550 0x11804 +#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550 0x11820 +#define IXGBE_GSCN_1_X550 0x11824 +#define IXGBE_GSCN_2_X550 0x11828 +#define IXGBE_GSCN_3_X550 0x1182C +#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550 +#define IXGBE_FACTPS_X550 IXGBE_FACTPS +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS +#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550 #define IXGBE_FACTPS_X550EM_a 0x15FEC -#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS) +#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) -#define IXGBE_PCIEANACTL 0x11040 -#define IXGBE_SWSM_8259X 0x10140 -#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X -#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X -#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM +#define IXGBE_SWSM_X550 IXGBE_SWSM +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM #define IXGBE_SWSM_X550EM_a 0x15F70 -#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM) -#define IXGBE_FWSM_8259X 0x10148 -#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X -#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X -#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X +#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM) + +#define IXGBE_FWSM 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM +#define IXGBE_FWSM_X550 IXGBE_FWSM +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM #define IXGBE_FWSM_X550EM_a 0x15F74 -#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM) -#define IXGBE_GSSR 0x10160 -#define IXGBE_MREVID 0x11064 -#define IXGBE_DCA_ID 0x11070 -#define IXGBE_DCA_CTRL 0x11074 -#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR -#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X -#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X -#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X +#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM) + +#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC #define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 -#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) +#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) -/* PCIe registers 82599-specific */ -#define IXGBE_GCR_EXT 0x11050 -#define IXGBE_GSCL_5_82599 0x11030 -#define IXGBE_GSCL_6_82599 0x11034 -#define IXGBE_GSCL_7_82599 0x11038 -#define IXGBE_GSCL_8_82599 0x1103C -#define IXGBE_PHYADR_82599 0x11040 -#define IXGBE_PHYDAT_82599 0x11044 -#define IXGBE_PHYCTL_82599 0x11048 -#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 -#define IXGBE_CIAA_8259X 0x11088 -#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X +/* PCI-E registers 82599-Specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599 +#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599 +#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599 +#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599 +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA 0x11088 +#define IXGBE_CIAD 0x1108C +#define IXGBE_CIAA_82599 IXGBE_CIAA +#define IXGBE_CIAD_82599 IXGBE_CIAD +#define IXGBE_CIAA_X540 IXGBE_CIAA +#define IXGBE_CIAD_X540 IXGBE_CIAD +#define IXGBE_GSCL_5_X550 0x11810 +#define IXGBE_GSCL_6_X550 0x11814 +#define IXGBE_GSCL_7_X550 0x11818 +#define IXGBE_GSCL_8_X550 0x1181C +#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550 #define IXGBE_CIAA_X550 0x11508 -#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 -#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 -#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA) - -#define IXGBE_CIAD_8259X 0x1108C -#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X #define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 #define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 #define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 -#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD) - -#define IXGBE_PICAUSE 0x110B0 -#define IXGBE_PIENA 0x110B8 -#define IXGBE_CDQ_MBR_82599 0x110B4 -#define IXGBE_PCIESPARE 0x110BC -#define IXGBE_MISC_REG_82599 0x110F0 -#define IXGBE_ECC_CTRL_0_82599 0x11100 -#define IXGBE_ECC_CTRL_1_82599 0x11104 -#define IXGBE_ECC_STATUS_82599 0x110E0 -#define IXGBE_BAR_CTRL_82599 0x110F4 +#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) +#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 /* PCI Express Control */ -#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 -#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define IXGBE_GCR_CAP_VER2 0x00040000 +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 -#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 -#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 -#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 -#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 -#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 -#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ IXGBE_GCR_EXT_VT_MODE_64) - +#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 /* Time Sync Registers */ -#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ -#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ -#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ -#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ -#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ -#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ -#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ -#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ -#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ -#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ -#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ -#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ -#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ -#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ -#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ -#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ -#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ -#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ -#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ -#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ -#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ -#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ -#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ -#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ -#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ -#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ -#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ -#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ -#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ -#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ +#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ /* Diagnostic Registers */ -#define IXGBE_RDSTATCTL 0x02C20 -#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ -#define IXGBE_RDHMPN 0x02F08 -#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) -#define IXGBE_RDPROBE 0x02F20 -#define IXGBE_RDMAM 0x02F30 -#define IXGBE_RDMAD 0x02F34 -#define IXGBE_TDSTATCTL 0x07C20 -#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ -#define IXGBE_TDHMPN 0x07F08 -#define IXGBE_TDHMPN2 0x082FC -#define IXGBE_TXDESCIC 0x082CC -#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) -#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) -#define IXGBE_TDPROBE 0x07F20 -#define IXGBE_TXBUFCTRL 0x0C600 -#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_RXBUFCTRL 0x03600 -#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_RFVAL 0x050A4 -#define IXGBE_MDFTC1 0x042B8 -#define IXGBE_MDFTC2 0x042C0 -#define IXGBE_MDFTFIFO1 0x042C4 -#define IXGBE_MDFTFIFO2 0x042C8 -#define IXGBE_MDFTS 0x042CC -#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ -#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ -#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ -#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ -#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ -#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ -#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ -#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ -#define IXGBE_PCIEECCCTL 0x1106C -#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ -#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ -#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ -#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ -#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ -#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ -#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ -#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ -#define IXGBE_PCIEECCCTL0 0x11100 -#define IXGBE_PCIEECCCTL1 0x11104 -#define IXGBE_RXDBUECC 0x03F70 -#define IXGBE_TXDBUECC 0x0CF70 -#define IXGBE_RXDBUEST 0x03F74 -#define IXGBE_TXDBUEST 0x0CF74 -#define IXGBE_PBTXECC 0x0C300 -#define IXGBE_PBRXECC 0x03300 -#define IXGBE_GHECCR 0x110B0 +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 /* MAC Registers */ -#define IXGBE_PCS1GCFIG 0x04200 -#define IXGBE_PCS1GLCTL 0x04208 -#define IXGBE_PCS1GLSTA 0x0420C -#define IXGBE_PCS1GDBG0 0x04210 -#define IXGBE_PCS1GDBG1 0x04214 -#define IXGBE_PCS1GANA 0x04218 -#define IXGBE_PCS1GANLP 0x0421C -#define IXGBE_PCS1GANNP 0x04220 -#define IXGBE_PCS1GANLPNP 0x04224 -#define IXGBE_HLREG0 0x04240 -#define IXGBE_HLREG1 0x04244 -#define IXGBE_PAP 0x04248 -#define IXGBE_MACA 0x0424C -#define IXGBE_APAE 0x04250 -#define IXGBE_ARD 0x04254 -#define IXGBE_AIS 0x04258 -#define IXGBE_MSCA 0x0425C -#define IXGBE_MSRWD 0x04260 -#define IXGBE_MLADD 0x04264 -#define IXGBE_MHADD 0x04268 -#define IXGBE_MAXFRS 0x04268 -#define IXGBE_TREG 0x0426C -#define IXGBE_PCSS1 0x04288 -#define IXGBE_PCSS2 0x0428C -#define IXGBE_XPCSS 0x04290 -#define IXGBE_MFLCN 0x04294 -#define IXGBE_SERDESC 0x04298 -#define IXGBE_MAC_SGMII_BUSY 0x04298 -#define IXGBE_MACS 0x0429C -#define IXGBE_AUTOC 0x042A0 -#define IXGBE_LINKS 0x042A4 -#define IXGBE_LINKS2 0x04324 -#define IXGBE_AUTOC2 0x042A8 -#define IXGBE_AUTOC3 0x042AC -#define IXGBE_ANLP1 0x042B0 -#define IXGBE_ANLP2 0x042B4 -#define IXGBE_MACC 0x04330 -#define IXGBE_ATLASCTL 0x04800 -#define IXGBE_MMNGC 0x042D0 -#define IXGBE_ANLPNP1 0x042D4 -#define IXGBE_ANLPNP2 0x042D8 -#define IXGBE_KRPCSFC 0x042E0 -#define IXGBE_KRPCSS 0x042E4 -#define IXGBE_FECS1 0x042E8 -#define IXGBE_FECS2 0x042EC -#define IXGBE_SMADARCTL 0x14F10 -#define IXGBE_MPVC 0x04318 -#define IXGBE_SGMIIC 0x04314 +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 /* Statistics Registers */ -#define IXGBE_RXNFGPC 0x041B0 -#define IXGBE_RXNFGBCL 0x041B4 -#define IXGBE_RXNFGBCH 0x041B8 -#define IXGBE_RXDGPC 0x02F50 -#define IXGBE_RXDGBCL 0x02F54 -#define IXGBE_RXDGBCH 0x02F58 -#define IXGBE_RXDDGPC 0x02F5C -#define IXGBE_RXDDGBCL 0x02F60 -#define IXGBE_RXDDGBCH 0x02F64 -#define IXGBE_RXLPBKGPC 0x02F68 -#define IXGBE_RXLPBKGBCL 0x02F6C -#define IXGBE_RXLPBKGBCH 0x02F70 -#define IXGBE_RXDLPBKGPC 0x02F74 -#define IXGBE_RXDLPBKGBCL 0x02F78 -#define IXGBE_RXDLPBKGBCH 0x02F7C -#define IXGBE_TXDGPC 0x087A0 -#define IXGBE_TXDGBCL 0x087A4 -#define IXGBE_TXDGBCH 0x087A8 +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 -#define IXGBE_RXDSTATCTRL 0x02F40 +#define IXGBE_RXDSTATCTRL 0x02F40 /* Copper Pond 2 link timeout */ #define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 /* Omer CORECTL */ -#define IXGBE_CORECTL 0x014F00 +#define IXGBE_CORECTL 0x014F00 /* BARCTRL */ -#define IXGBE_BARCTRL 0x110F4 -#define IXGBE_BARCTRL_FLSIZE 0x0700 -#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 -#define IXGBE_BARCTRL_CSRSIZE 0x2000 +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 +#define IXGBE_BARCTRL_CSRSIZE_SHIFT 13 /* RSCCTL Bit Masks */ -#define IXGBE_RSCCTL_RSCEN 0x01 -#define IXGBE_RSCCTL_MAXDESC_1 0x00 -#define IXGBE_RSCCTL_MAXDESC_4 0x04 -#define IXGBE_RSCCTL_MAXDESC_8 0x08 -#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RSCCTL_TS_DIS 0x02 /* RSCDBU Bit Masks */ -#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F -#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 /* RDRXCTL Bit Masks */ -#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ -#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ -#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad small packet */ -#define IXGBE_RDRXCTL_MVMEN 0x00000020 -#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ -#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ -#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ -#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ -#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ -#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ -#define IXGBE_RDRXCTL_MBINTEN 0x10000000 -#define IXGBE_RDRXCTL_MDP_EN 0x20000000 +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080 +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 /* RQTC Bit Masks and Shifts */ -#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) -#define IXGBE_RQTC_TC0_MASK (0x7 << 0) -#define IXGBE_RQTC_TC1_MASK (0x7 << 4) -#define IXGBE_RQTC_TC2_MASK (0x7 << 8) -#define IXGBE_RQTC_TC3_MASK (0x7 << 12) -#define IXGBE_RQTC_TC4_MASK (0x7 << 16) -#define IXGBE_RQTC_TC5_MASK (0x7 << 20) -#define IXGBE_RQTC_TC6_MASK (0x7 << 24) -#define IXGBE_RQTC_TC7_MASK (0x7 << 28) +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) /* PSRTYPE.RQPL Bit masks and shift */ -#define IXGBE_PSRTYPE_RQPL_MASK 0x7 -#define IXGBE_PSRTYPE_RQPL_SHIFT 29 +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 /* CTRL Bit Masks */ -#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ -#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) /* FACTPS */ -#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ -#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ +#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ /* MHADD Bit Masks */ -#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 -#define IXGBE_MHADD_MFS_SHIFT 16 +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 /* Extended Device Control */ -#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ -#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ -#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ /* Direct Cache Access (DCA) definitions */ -#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ -#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ -#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ -#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ -#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ -#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ -#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ -#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ -#define IXGBE_MSCA_NP_ADDR_SHIFT 0 -#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ -#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ -#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ -#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ -#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ -#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ -#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ -#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ -#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ -#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ -#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ -#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ -#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ -#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ -#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ -#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */ /* MSRWD bit masks */ -#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF -#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 -#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 -#define IXGBE_MSRWD_READ_DATA_SHIFT 16 +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 /* Atlas registers */ -#define IXGBE_ATLAS_PDN_LPBK 0x24 -#define IXGBE_ATLAS_PDN_10G 0xB -#define IXGBE_ATLAS_PDN_1G 0xC -#define IXGBE_ATLAS_PDN_AN 0xD +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD /* Atlas bit masks */ -#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 -#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 -#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 /* Omer bit masks */ -#define IXGBE_CORECTL_WRITE_CMD 0x00010000 - -/* MDIO definitions */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 +/* Device Type definitions for new protocol MDIO commands */ #define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 -#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ #define IXGBE_TWINAX_DEV 1 -#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ #define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ - -#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ +#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ +#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ +#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ +#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ #define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ -#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT */ -#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ -#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ +#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ +#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ -#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ #define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ @@ -1367,248 +1585,301 @@ struct ixgbe_nvm_version { #define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ #define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ #define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ -#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* global fault msg */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ #define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ #define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ -/* autoneg vendor alarm int enable */ -#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ #define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ #define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ -#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /*int dev fault enable */ - +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */ +#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ -#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ -#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ -#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ -#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ + +#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ +#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ +#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ /* MII clause 22/28 definitions */ -#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ -#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ -#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ -#define IXGBE_MII_AUTONEG_REG 0x0 +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 -#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 -#define IXGBE_MAX_PHY_ADDR 32 +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 /* PHY IDs*/ -#define TN1010_PHY_ID 0x00A19410 -#define TNX_FW_REV 0xB -#define X540_PHY_ID 0x01540200 +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define X550_PHY_ID 0x01540220 #define X550_PHY_ID2 0x01540223 #define X550_PHY_ID3 0x01540221 -#define X557_PHY_ID 0x01540240 +#define X557_PHY_ID 0x01540240 #define X557_PHY_ID2 0x01540250 -#define QT2022_PHY_ID 0x0043A400 -#define ATH_PHY_ID 0x03429050 -#define AQ_FW_REV 0x20 +#define AQ_FW_REV 0x20 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 + +/* PHY Types */ +#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0 +#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0 /* Special PHY Init Routine */ -#define IXGBE_PHY_INIT_OFFSET_NL 0x002B -#define IXGBE_PHY_INIT_END_NL 0xFFFF -#define IXGBE_CONTROL_MASK_NL 0xF000 -#define IXGBE_DATA_MASK_NL 0x0FFF -#define IXGBE_CONTROL_SHIFT_NL 12 -#define IXGBE_DELAY_NL 0 -#define IXGBE_DATA_NL 1 -#define IXGBE_CONTROL_NL 0x000F -#define IXGBE_CONTROL_EOL_NL 0x0FFF -#define IXGBE_CONTROL_SOL_NL 0x0000 +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 /* General purpose Interrupt Enable */ -#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */ -#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */ -#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */ -#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ -#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ -#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ -#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 -#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 -#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 #define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 #define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 #define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 #define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 #define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 #define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 -#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) -#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) -#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) +#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) -#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ -#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ -#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ -#define IXGBE_GPIE_EIAME 0x40000000 -#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 -#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 -#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ -#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ -#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ -#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ /* Packet Buffer Initialization */ -#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ -#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ -#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ -#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ +#define IXGBE_MAX_PACKET_BUFFERS 8 -#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ #define IXGBE_MAX_PB 8 /* Packet buffer allocation strategies */ enum { - PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ #define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL - PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ #define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED }; /* Transmit Flow Control status */ -#define IXGBE_TFCS_TXOFF 0x00000001 -#define IXGBE_TFCS_TXOFF0 0x00000100 -#define IXGBE_TFCS_TXOFF1 0x00000200 -#define IXGBE_TFCS_TXOFF2 0x00000400 -#define IXGBE_TFCS_TXOFF3 0x00000800 -#define IXGBE_TFCS_TXOFF4 0x00001000 -#define IXGBE_TFCS_TXOFF5 0x00002000 -#define IXGBE_TFCS_TXOFF6 0x00004000 -#define IXGBE_TFCS_TXOFF7 0x00008000 +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 /* TCP Timer */ -#define IXGBE_TCPTIMER_KS 0x00000100 -#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 -#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 -#define IXGBE_TCPTIMER_LOOP 0x00000800 -#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF /* HLREG0 Bit Masks */ -#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ -#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ -#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ -#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ -#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ -#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ -#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ -#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ -#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ -#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ -#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ -#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ -#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ -#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ -#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ /* VMD_CTL bitmasks */ -#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 -#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 /* VT_CTL bitmasks */ -#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ -#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ -#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ -#define IXGBE_VT_CTL_POOL_SHIFT 7 -#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) /* VMOLR bitmasks */ #define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ #define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ -#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ -#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ -#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ -#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ -#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ /* VFRE bitmask */ -#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ /* RDHMPN and TDHMPN bitmasks */ -#define IXGBE_RDHMPN_RDICADDR 0x007FF800 -#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 -#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 -#define IXGBE_TDHMPN_TDICADDR 0x003FF800 -#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 -#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 -#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 -#define IXGBE_RDMAM_DWORD_SHIFT 9 -#define IXGBE_RDMAM_DESC_COMP_FIFO 1 -#define IXGBE_RDMAM_DFC_CMD_FIFO 2 -#define IXGBE_RDMAM_TCN_STATUS_RAM 4 -#define IXGBE_RDMAM_WB_COLL_FIFO 5 -#define IXGBE_RDMAM_QSC_CNT_RAM 6 -#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 -#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA -#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 -#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 -#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 -#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 -#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 -#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 -#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 -#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 -#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 -#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_FCOE_RAM 7 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_QSC_RSC_RAM 0xB +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 +#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 +#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 -#define IXGBE_TXDESCIC_READY 0x80000000 +#define IXGBE_TXDESCIC_READY 0x80000000 /* Receive Checksum Control */ -#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ -#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* FCRTL Bit Masks */ -#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ -#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ /* PAP bit masks*/ -#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ /* RMCS Bit Masks */ -#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */ /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RMCS_RAC 0x00000004 -#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ -#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ -#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ -#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ +#define IXGBE_RMCS_RAC 0x00000004 +/* Deficit Fixed Prio ena */ +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ /* FCCFG Bit Masks */ -#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ -#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ /* Interrupt register bitmasks */ /* Extended Interrupt Cause Read */ -#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ -#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ -#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ -#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ -#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ -#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ -#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ -#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ -#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ -#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */ -#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */ -#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */ -#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 -#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 -#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ #define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 #define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 #define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 @@ -1618,361 +1889,373 @@ enum { #define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 #define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 #define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 -#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) -#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) -#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) +#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) -#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ -#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ -#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ -#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ -#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ /* Extended Interrupt Cause Set */ -#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ -#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) -#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) -#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) -#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ -#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ /* Extended Interrupt Mask Set */ -#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ -#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) -#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) -#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) -#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ -#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ /* Extended Interrupt Mask Clear */ -#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) -#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) -#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) -#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ -#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ #define IXGBE_EIMS_ENABLE_MASK ( \ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_LSC | \ - IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ IXGBE_EIMS_OTHER) /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ -#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ -#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ -#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ -#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ -#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ -#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ -#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ -#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ -#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ -#define IXGBE_MAX_FTQF_FILTERS 128 -#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 -#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 -#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 -#define IXGBE_FTQF_PROTOCOL_SCTP 2 -#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 -#define IXGBE_FTQF_PRIORITY_SHIFT 2 -#define IXGBE_FTQF_POOL_MASK 0x0000003F -#define IXGBE_FTQF_POOL_SHIFT 8 -#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F -#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 -#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E -#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D -#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B -#define IXGBE_FTQF_DEST_PORT_MASK 0x17 -#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F -#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 -#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 /* Interrupt clear mask */ -#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF /* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_REG_NUM 25 -#define IXGBE_IVAR_REG_NUM_82599 64 -#define IXGBE_IVAR_TXRX_ENTRY 96 -#define IXGBE_IVAR_RX_ENTRY 64 -#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) -#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) -#define IXGBE_IVAR_TX_ENTRY 32 +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 -#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ -#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ -#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ /* ETYPE Queue Filter/Select Bit Masks */ -#define IXGBE_MAX_ETQF_FILTERS 8 -#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ -#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ -#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ -#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ -#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ -#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ #define IXGBE_ETQF_POOL_SHIFT 20 -#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ -#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 -#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ -#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ /* * ETQF filter list: one static filter per filter consumer. This is - * to avoid filter collisions later. Add new filters - * here!! + * to avoid filter collisions later. Add new filters + * here!! * * Current filters: - * EAPOL 802.1x (0x888e): Filter 0 - * FCoE (0x8906): Filter 2 - * 1588 (0x88f7): Filter 3 - * FIP (0x8914): Filter 4 - * LLDP (0x88CC): Filter 5 - * LACP (0x8809): Filter 6 - * FC (0x8808): Filter 7 + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 */ -#define IXGBE_ETQF_FILTER_EAPOL 0 -#define IXGBE_ETQF_FILTER_FCOE 2 -#define IXGBE_ETQF_FILTER_1588 3 -#define IXGBE_ETQF_FILTER_FIP 4 -#define IXGBE_ETQF_FILTER_LLDP 5 -#define IXGBE_ETQF_FILTER_LACP 6 -#define IXGBE_ETQF_FILTER_FC 7 - +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 /* VLAN Control Bit Masks */ -#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ -#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ -#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ -#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ -#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ /* VLAN pool filtering masks */ -#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ -#define IXGBE_VLVF_ENTRIES 64 -#define IXGBE_VLVF_VLANID_MASK 0x00000FFF - +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF /* Per VF Port VLAN insertion rules */ -#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ -#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ /* STATUS Bit Masks */ -#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ -#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ -#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */ -#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ -#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ /* ESDP Bit Masks */ -#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ -#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ -#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ -#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ -#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ -#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ -#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ -#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ -#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ -#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ -#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ -#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ -#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ +#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */ +#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ + /* LEDCTL Bit Masks */ -#define IXGBE_LED_IVRT_BASE 0x00000040 -#define IXGBE_LED_BLINK_BASE 0x00000080 -#define IXGBE_LED_MODE_MASK_BASE 0x0000000F -#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) -#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) -#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) -#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) -#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) -#define IXGBE_X557_LED_MANUAL_SET_MASK BIT(8) +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) #define IXGBE_X557_MAX_LED_INDEX 3 #define IXGBE_X557_LED_PROVISIONING 0xC430 /* LED modes */ -#define IXGBE_LED_LINK_UP 0x0 -#define IXGBE_LED_LINK_10G 0x1 -#define IXGBE_LED_MAC 0x2 -#define IXGBE_LED_FILTER 0x3 -#define IXGBE_LED_LINK_ACTIVE 0x4 -#define IXGBE_LED_LINK_1G 0x5 -#define IXGBE_LED_ON 0xE -#define IXGBE_LED_OFF 0xF +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF /* AUTOC Bit Masks */ #define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 -#define IXGBE_AUTOC_KX4_SUPP 0x80000000 -#define IXGBE_AUTOC_KX_SUPP 0x40000000 -#define IXGBE_AUTOC_PAUSE 0x30000000 -#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 -#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 -#define IXGBE_AUTOC_RF 0x08000000 -#define IXGBE_AUTOC_PD_TMR 0x06000000 -#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 -#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 -#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 -#define IXGBE_AUTOC_FECA 0x00040000 -#define IXGBE_AUTOC_FECR 0x00020000 -#define IXGBE_AUTOC_KR_SUPP 0x00010000 -#define IXGBE_AUTOC_AN_RESTART 0x00001000 -#define IXGBE_AUTOC_FLU 0x00000001 -#define IXGBE_AUTOC_LMS_SHIFT 13 -#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 -#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 -#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 -#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 -#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 -#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 -#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 -#define IXGBE_MACC_FLU 0x00000001 -#define IXGBE_MACC_FSV_10G 0x00030000 -#define IXGBE_MACC_FS 0x00040000 -#define IXGBE_MAC_RX2TX_LPBK 0x00000002 +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 -/* Veto Bit definition */ -#define IXGBE_MMNGC_MNG_VETO 0x00000001 +/* Veto Bit definiton */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 /* LINKS Bit Masks */ -#define IXGBE_LINKS_KX_AN_COMP 0x80000000 -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED 0x20000000 -#define IXGBE_LINKS_MODE 0x18000000 -#define IXGBE_LINKS_RX_MODE 0x06000000 -#define IXGBE_LINKS_TX_MODE 0x01800000 -#define IXGBE_LINKS_XGXS_EN 0x00400000 -#define IXGBE_LINKS_SGMII_EN 0x02000000 -#define IXGBE_LINKS_PCS_1G_EN 0x00200000 -#define IXGBE_LINKS_1G_AN_EN 0x00100000 -#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 -#define IXGBE_LINKS_1G_SYNC 0x00040000 -#define IXGBE_LINKS_10G_ALIGN 0x00020000 -#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 -#define IXGBE_LINKS_TL_FAULT 0x00001000 -#define IXGBE_LINKS_SIGNAL 0x00000F00 +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 -#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 -#define IXGBE_LINKS_SPEED_10_X550EM_A 0 -#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ -#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ -#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 /* PCS1GLSTA Bit Masks */ -#define IXGBE_PCS1GLSTA_LINK_OK 1 -#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 -#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 -#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 -#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 -#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 -#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 -#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 -#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 /* PCS1GLCTL Bit Masks */ -#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ -#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 -#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 -#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 -#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 -#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 /* ANLP1 Bit Masks */ -#define IXGBE_ANLP1_PAUSE 0x0C00 -#define IXGBE_ANLP1_SYM_PAUSE 0x0400 -#define IXGBE_ANLP1_ASM_PAUSE 0x0800 -#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 /* SW Semaphore Register bitmasks */ -#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ -#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ /* SW_FW_SYNC/GSSR definitions */ #define IXGBE_GSSR_EEP_SM 0x0001 @@ -1983,40 +2266,43 @@ enum { #define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 #define IXGBE_GSSR_SW_MNG_SM 0x0400 #define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ -#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ -#define IXGBE_GSSR_I2C_MASK 0x1800 -#define IXGBE_GSSR_NVM_PHY_MASK 0xF +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF /* FW Status register bitmask */ -#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ /* EEC Register */ -#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ -#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ -#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ -#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ -#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ -#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ -#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ -#define IXGBE_EEC_FWE_SHIFT 4 -#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ -#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ -#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ -#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ -#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ -#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ -#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ /* EEPROM Addressing bits based on type (0-small, 1-large) */ -#define IXGBE_EEC_ADDR_SIZE 0x00000400 -#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ -#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ -#define IXGBE_EEC_SIZE_SHIFT 11 -#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 -#define IXGBE_EEPROM_OPCODE_BITS 8 +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define IXGBE_FLA_LOCKED 0x00000040 /* Part Number String Length */ -#define IXGBE_PBANUM_LENGTH 11 +#define IXGBE_PBANUM_LENGTH 11 /* Checksum and EEPROM pointers */ #define IXGBE_PBANUM_PTR_GUARD 0xFAFA @@ -2047,139 +2333,164 @@ enum { #define IXGBE_FW_PTR 0x0F #define IXGBE_PBANUM0_PTR 0x15 #define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_ALT_MAC_ADDR_PTR 0x37 #define IXGBE_FREE_SPACE_PTR 0X3E /* External Thermal Sensor Config */ -#define IXGBE_ETS_CFG 0x26 -#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 -#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 -#define IXGBE_ETS_TYPE_MASK 0x0038 -#define IXGBE_ETS_TYPE_SHIFT 3 -#define IXGBE_ETS_TYPE_EMC 0x000 -#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000 -#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 -#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 -#define IXGBE_ETS_DATA_LOC_SHIFT 10 -#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 -#define IXGBE_ETS_DATA_INDEX_SHIFT 8 -#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF +#define IXGBE_ETS_CFG 0x26 +#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +#define IXGBE_ETS_TYPE_MASK 0x0038 +#define IXGBE_ETS_TYPE_SHIFT 3 +#define IXGBE_ETS_TYPE_EMC 0x000 +#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +#define IXGBE_ETS_DATA_LOC_SHIFT 10 +#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF -#define IXGBE_SAN_MAC_ADDR_PTR 0x28 -#define IXGBE_DEVICE_CAPS 0x2C -#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 -#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04 + +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 #define IXGBE_MAX_MSIX_VECTORS_82599 0x40 -#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 #define IXGBE_MAX_MSIX_VECTORS_82598 0x13 /* MSI-X capability fields masks */ -#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF /* Legacy EEPROM word offsets */ -#define IXGBE_ISCSI_BOOT_CAPS 0x0033 -#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 -#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 /* EEPROM Commands - SPI */ -#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ -#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 -#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ -#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ -#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ -#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ /* EEPROM reset Write Enable latch */ -#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 -#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ -#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ -#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ -#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ -#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ /* EEPROM Read Register */ -#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ -#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ -#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ -#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ -#define NVM_INIT_CTRL_3 0x38 -#define NVM_INIT_CTRL_3_LPLU 0x8 -#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 -#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 -#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 -#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ -#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 -#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ -#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */ +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS -#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ #endif #ifndef IXGBE_EERD_EEWR_ATTEMPTS /* Number of 5 microseconds we wait for EERD read and * EERW write to complete */ -#define IXGBE_EERD_EEWR_ATTEMPTS 100000 +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 #endif #ifndef IXGBE_FLUDONE_ATTEMPTS /* # attempts we wait for flush update to complete */ -#define IXGBE_FLUDONE_ATTEMPTS 20000 +#define IXGBE_FLUDONE_ATTEMPTS 20000 #endif -#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ -#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ -#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ -#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ -#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 -#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 -#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 -#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 -#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7) -#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 -#define IXGBE_FW_LESM_STATE_1 0x1 -#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ -#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 -#define IXGBE_FW_PATCH_VERSION_4 0x7 -#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ -#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ -#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ -#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7) +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_LESM_2_STATES_ENABLED_MASK 0x1F +#define IXGBE_FW_LESM_2_STATES_ENABLED 0x12 +#define IXGBE_FW_LESM_STATE0_10G_ENABLED 0x6FFF +#define IXGBE_FW_LESM_STATE1_10G_ENABLED 0x4FFF +#define IXGBE_FW_LESM_STATE0_10G_DISABLED 0x0FFF +#define IXGBE_FW_LESM_STATE1_10G_DISABLED 0x2FFF +#define IXGBE_FW_LESM_PORT0_STATE0_OFFSET 0x2 +#define IXGBE_FW_LESM_PORT0_STATE1_OFFSET 0x3 +#define IXGBE_FW_LESM_PORT1_STATE0_OFFSET 0x6 +#define IXGBE_FW_LESM_PORT1_STATE1_OFFSET 0x7 +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ -#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ -#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ -#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ +/* FW header offset */ +#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_X540_FW_MODULE_MASK 0x7FFF +/* 4KB multiplier */ +#define IXGBE_X540_FW_MODULE_LENGTH 0x1000 +/* version word 2 (month & day) */ +#define IXGBE_X540_FW_PATCH_VERSION_2 0x5 +/* version word 3 (silicon compatibility & year) */ +#define IXGBE_X540_FW_PATCH_VERSION_3 0x6 +/* version word 4 (major & minor numbers) */ +#define IXGBE_X540_FW_PATCH_VERSION_4 0x7 + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ /* PCI Bus Info */ -#define IXGBE_PCI_DEVICE_STATUS 0xAA -#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 -#define IXGBE_PCI_LINK_STATUS 0xB2 -#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 -#define IXGBE_PCI_LINK_WIDTH 0x3F0 -#define IXGBE_PCI_LINK_WIDTH_1 0x10 -#define IXGBE_PCI_LINK_WIDTH_2 0x20 -#define IXGBE_PCI_LINK_WIDTH_4 0x40 -#define IXGBE_PCI_LINK_WIDTH_8 0x80 -#define IXGBE_PCI_LINK_SPEED 0xF -#define IXGBE_PCI_LINK_SPEED_2500 0x1 -#define IXGBE_PCI_LINK_SPEED_5000 0x2 -#define IXGBE_PCI_LINK_SPEED_8000 0x3 -#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E -#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_LINK_SPEED_8000 0x3 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 #define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf #define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 @@ -2195,51 +2506,60 @@ enum { /* Number of 100 microseconds we wait for PCI Express master disable */ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + /* RAH */ -#define IXGBE_RAH_VIND_MASK 0x003C0000 -#define IXGBE_RAH_VIND_SHIFT 18 -#define IXGBE_RAH_AV 0x80000000 -#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF /* Header split receive */ -#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 -#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E -#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 #define IXGBE_RFCTL_RSC_DIS 0x00000020 -#define IXGBE_RFCTL_NFSW_DIS 0x00000040 -#define IXGBE_RFCTL_NFSR_DIS 0x00000080 -#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 -#define IXGBE_RFCTL_NFS_VER_SHIFT 8 -#define IXGBE_RFCTL_NFS_VER_2 0 -#define IXGBE_RFCTL_NFS_VER_3 1 -#define IXGBE_RFCTL_NFS_VER_4 2 -#define IXGBE_RFCTL_IPV6_DIS 0x00000400 -#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 -#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 -#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 -#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 /* Transmit Config masks */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ -#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ /* Enable short packet padding to 64 bytes */ -#define IXGBE_TX_PAD_ENABLE 0x00000400 -#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ /* This allows for 16K packets + 4k for vlan */ -#define IXGBE_MAX_FRAME_SZ 0x40040000 +#define IXGBE_MAX_FRAME_SZ 0x40040000 -#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ -#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ /* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ #define IXGBE_TSAUXC_EN_CLK 0x00000004 #define IXGBE_TSAUXC_SYNCLK 0x00000008 @@ -2265,8 +2585,15 @@ enum { #define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ #define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ +#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ +#define IXGBE_TSIM_SYS_WRAP 0x00000001 #define IXGBE_TSIM_TXTS 0x00000002 +#define IXGBE_TSIM_TADJ 0x00000080 + +#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP +#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS +#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ #define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF #define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 @@ -2275,61 +2602,59 @@ enum { #define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 #define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 -#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 -#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 -#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 -#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 -#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 -#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 -#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 -#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 -#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 -#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00 -#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 -#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ -#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ -#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ -#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ -#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ -#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ /* Receive Priority Flow Control Enable */ -#define IXGBE_FCTRL_RPFCE 0x00004000 -#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ -#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ -#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ -#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ -#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ -#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */ - -#define IXGBE_MFLCN_RPFCE_SHIFT 4 +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */ +#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ /* Multiple Receive Queue Control */ -#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ -#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ -#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ -#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ -#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ -#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ -#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ -#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ -#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 -#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 -#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 -#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 - -#define IXGBE_FWSM_TS_ENABLED 0x1 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 @@ -2337,296 +2662,377 @@ enum { #define IXGBE_QDE_IDX_MASK 0x00007F00 #define IXGBE_QDE_IDX_SHIFT 8 #define IXGBE_QDE_WRITE 0x00010000 +#define IXGBE_QDE_READ 0x00020000 -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 /* Multiple Transmit Queue Command Register */ -#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ -#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ -#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ -#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ -#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ -#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ -#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ /* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ -#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ #define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ -#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ -#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ -#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ -#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ -#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 /* overlap ERR_PE */ -#define IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 /* overlap ERR_OSE */ -#define IXGBE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000 -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 +#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ -#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ -#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */ +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ +#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ /* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 /* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 8) + * = (<< 2) + */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 -#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 /* RSS Hash results */ -#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 -#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 -#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 -#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 #define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 -#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 #define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 /* RSS Packet Types as indicated in the receive descriptor. */ -#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */ #define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ #define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ -#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ -#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 /* Masks to determine if packets should be dropped due to frame errors */ #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL | \ - IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH | \ - IXGBE_RXDADV_ERR_USE) + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE /* Multicast bit mask */ -#define IXGBE_MCSTCTRL_MFE 0x4 +#define IXGBE_MCSTCTRL_MFE 0x4 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 /* Vlan-specific macros */ -#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ -#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT - -/* SR-IOV specific macros */ -#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) -#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) -#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) -#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT /* Translated register #defines */ +#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) +#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) +#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) +#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) +#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) +#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) +#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) +#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) +#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) +#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) +#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) +#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) +#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ + (0x012300 + (((P) - 24) * 4))) +#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) +#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) +#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) +#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) +#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ + : (0x0D000 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ + : (0x0D004 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ + : (0x0D008 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ + : (0x0D010 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ + : (0x0D018 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ + : (0x0D028 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ + : (0x0D014 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) +#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) +#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) +#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P))) #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) #define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) +#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ + : (0x0D00C + (0x40 * ((P) - 64)))) +#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) +#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) +#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) +#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) +#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) +#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) +#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) +#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) #define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) #define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) -#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \ +#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) -#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \ +#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif enum ixgbe_fdir_pballoc_type { IXGBE_FDIR_PBALLOC_NONE = 0, IXGBE_FDIR_PBALLOC_64K = 1, IXGBE_FDIR_PBALLOC_128K = 2, IXGBE_FDIR_PBALLOC_256K = 3, }; -#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 /* Flow Director register values */ -#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 -#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 -#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 -#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 -#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 -#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 -#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 -#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 -#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 #define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 #define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 #define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ #define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ -#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 -#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 -#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 -#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 -#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 -#define IXGBE_FDIRM_VLANID 0x00000001 -#define IXGBE_FDIRM_VLANP 0x00000002 -#define IXGBE_FDIRM_POOL 0x00000004 -#define IXGBE_FDIRM_L4P 0x00000008 -#define IXGBE_FDIRM_FLEX 0x00000010 -#define IXGBE_FDIRM_DIPv6 0x00000020 +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 +#define IXGBE_FDIRM_L3P 0x00000040 -#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF -#define IXGBE_FDIRFREE_FREE_SHIFT 0 -#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 -#define IXGBE_FDIRFREE_COLL_SHIFT 16 -#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F -#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 -#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 -#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 -#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF -#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 -#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 -#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 -#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF -#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 -#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 -#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 -#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 -#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 -#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 -#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 +#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ +#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ +#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ +#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ +#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ -#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 -#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 -#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 -#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 -#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 -#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 -#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 -#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 -#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 -#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 -#define IXGBE_FDIRCMD_IPV6 0x00000080 -#define IXGBE_FDIRCMD_CLEARHT 0x00000100 -#define IXGBE_FDIRCMD_DROP 0x00000200 -#define IXGBE_FDIRCMD_INT 0x00000400 -#define IXGBE_FDIRCMD_LAST 0x00000800 -#define IXGBE_FDIRCMD_COLLISION 0x00001000 -#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 -#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 -#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 -#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT 23 -#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 -#define IXGBE_FDIR_INIT_DONE_POLL 10 -#define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 #define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 - -#define IXGBE_FDIR_DROP_QUEUE 127 +#define IXGBE_FDIR_DROP_QUEUE 127 /* Manageablility Host Interface defines */ #define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ @@ -2635,12 +3041,13 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ #define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ #define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ +#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ /* CEM Support */ #define FW_CEM_HDR_LEN 0x4 #define FW_CEM_CMD_DRIVER_INFO 0xDD #define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_CMD_RESERVED 0X0 #define FW_CEM_UNUSED_VER 0x0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 @@ -2657,7 +3064,7 @@ enum ixgbe_fdir_pballoc_type { #define FW_DISABLE_RXEN_CMD 0xDE #define FW_DISABLE_RXEN_LEN 0x1 #define FW_PHY_MGMT_REQ_CMD 0x20 -#define FW_PHY_TOKEN_REQ_CMD 0x0A +#define FW_PHY_TOKEN_REQ_CMD 0xA #define FW_PHY_TOKEN_REQ_LEN 2 #define FW_PHY_TOKEN_REQ 0 #define FW_PHY_TOKEN_REL 1 @@ -2675,56 +3082,59 @@ enum ixgbe_fdir_pballoc_type { #define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) #define FW_PHY_ACT_INIT_PHY 1 #define FW_PHY_ACT_SETUP_LINK 2 -#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) -#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) -#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) -#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) -#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) -#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) -#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) -#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) -#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) -#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) -#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) +#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0) +#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1) +#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2) +#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3) +#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4) +#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5) +#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6) +#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7) +#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8) +#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9) +#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10) #define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 -#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ - HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \ + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) #define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u #define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u #define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u #define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u -#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) -#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) -#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) -#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) -#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) +#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18) +#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19) +#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20) +#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0) #define FW_PHY_ACT_GET_LINK_INFO 3 -#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) -#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) -#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) -#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) -#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) -#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) -#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) -#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) +#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29) #define FW_PHY_ACT_FORCE_LINK_DOWN 4 -#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0) #define FW_PHY_ACT_PHY_SW_RESET 5 #define FW_PHY_ACT_PHY_HW_RESET 6 #define FW_PHY_ACT_GET_PHY_INFO 7 #define FW_PHY_ACT_UD_2 0x1002 -#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) -#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) -#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) -#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) -#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) -#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) +#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4) +#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3) +#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1) #define FW_PHY_ACT_RETRIES 50 #define FW_PHY_INFO_SPEED_MASK 0xFFFu #define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u #define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu /* Host Interface Command Structures */ + +#pragma pack(push, 1) + struct ixgbe_hic_hdr { u8 cmd; u8 buf_len; @@ -2745,7 +3155,7 @@ struct ixgbe_hic_hdr2_req { struct ixgbe_hic_hdr2_rsp { u8 cmd; u8 buf_lenl; - u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ u8 checksum; }; @@ -2787,8 +3197,8 @@ struct ixgbe_hic_read_shadow_ram { struct ixgbe_hic_write_shadow_ram { union ixgbe_hic_hdr2 hdr; - __be32 address; - __be16 length; + u32 address; + u16 length; u16 pad2; u16 data; u16 pad3; @@ -2816,7 +3226,7 @@ struct ixgbe_hic_internal_phy_req { u16 rsv1; __be32 write_data; u16 pad; -} __packed; +}; struct ixgbe_hic_internal_phy_resp { struct ixgbe_hic_hdr hdr; @@ -2836,20 +3246,53 @@ struct ixgbe_hic_phy_activity_resp { __be32 data[FW_PHY_ACT_DATA_COUNT]; }; +#pragma pack(pop) + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 vlan; + } fields; + } upper; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 buffer_addr; /* Address of descriptor's data buf */ __le32 cmd_type_len; __le32 olinfo_status; } read; struct { - __le64 rsvd; /* Reserved */ + __le64 rsvd; /* Reserved */ __le32 nxtseq_seed; __le32 status; } wb; }; +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 vlan; +}; + /* Receive Descriptor - Advanced */ union ixgbe_adv_rx_desc { struct { @@ -2884,68 +3327,76 @@ union ixgbe_adv_rx_desc { /* Context descriptors */ struct ixgbe_adv_tx_context_desc { __le32 vlan_macip_lens; - __le32 fceof_saidx; + __le32 seqnum_seed; __le32 type_tucmd_mlhl; __le32 mss_l4len_idx; }; /* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ -#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ -#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */ -#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ -#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ -#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ -#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ -#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ -#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ -#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */ -#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */ -#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */ -#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */ -#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */ -#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */ -#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */ -#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */ -#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ +/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26 /* Autonegotiation advertised speeds */ typedef u32 ixgbe_autoneg_advertised; /* Link speed */ @@ -2957,19 +3408,40 @@ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 #define IXGBE_LINK_SPEED_5GB_FULL 0x0800 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 -#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) -#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ - IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Physical layer type */ +typedef u64 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 +#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 +#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 /* Flow Control Data Sheet defined values * Calculation and defines taken from 802.1bb Annex O */ /* BitTimes (BT) conversion */ -#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) -#define IXGBE_B2BT(BT) (BT * 8) +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) /* Calculate Delay to respond to PFC */ #define IXGBE_PFC_D 672 @@ -2979,8 +3451,8 @@ typedef u32 ixgbe_link_speed; #define IXGBE_CABLE_DO 5000 /* Delay Optical */ /* Calculate Interface Delay X540 */ -#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ -#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ #define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ #define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) @@ -3026,8 +3498,8 @@ typedef u32 ixgbe_link_speed; (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) /* Software ATR hash keys */ -#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 -#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 /* Software ATR input stream values and masks */ #define IXGBE_ATR_HASH_MASK 0x7fff @@ -3038,14 +3510,22 @@ typedef u32 ixgbe_link_speed; #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 #define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 enum ixgbe_atr_flow_type { - IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, - IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, - IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, - IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, - IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, - IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, - IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, - IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, }; /* Flow Director ATR input struct. */ @@ -3053,28 +3533,34 @@ union ixgbe_atr_input { /* * Byte layout in order, all values with MSB first: * - * vm_pool - 1 byte - * flow_type - 1 byte - * vlan_id - 2 bytes - * src_ip - 16 bytes - * dst_ip - 16 bytes - * src_port - 2 bytes - * dst_port - 2 bytes - * flex_bytes - 2 bytes - * bkt_hash - 2 bytes + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes */ struct { - u8 vm_pool; - u8 flow_type; + u8 vm_pool; + u8 flow_type; __be16 vlan_id; __be32 dst_ip[4]; __be32 src_ip[4]; + u8 inner_mac[6]; + __be16 tunnel_type; + __be32 tni_vni; __be16 src_port; __be16 dst_port; __be16 flex_bytes; __be16 bkt_hash; } formatted; - __be32 dword_stream[11]; + __be32 dword_stream[14]; }; /* Flow Director compressed ATR hash input struct */ @@ -3093,10 +3579,11 @@ union ixgbe_atr_hash_dword { __be32 dword; }; -#define IXGBE_MVALS_INIT(m) \ +#define IXGBE_MVALS_INIT(m) \ IXGBE_CAT(EEC, m), \ IXGBE_CAT(FLA, m), \ IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ IXGBE_CAT(FACTPS, m), \ IXGBE_CAT(SWSM, m), \ IXGBE_CAT(SWFW_SYNC, m), \ @@ -3119,10 +3606,21 @@ union ixgbe_atr_hash_dword { IXGBE_CAT(I2CCTL, m) enum ixgbe_mvals { - IXGBE_MVALS_INIT(IDX), + IXGBE_MVALS_INIT(_IDX), IXGBE_MVALS_IDX_LIMIT }; +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ixgbe_fcoe_boot_status { + ixgbe_fcoe_bootstatus_disabled = 0, + ixgbe_fcoe_bootstatus_enabled = 1, + ixgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi, @@ -3137,7 +3635,7 @@ enum ixgbe_mac_type { ixgbe_mac_X540, ixgbe_mac_X550, ixgbe_mac_X550EM_x, - ixgbe_mac_x550em_a, + ixgbe_mac_X550EM_a, ixgbe_num_macs }; @@ -3167,7 +3665,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_active_unknown, ixgbe_phy_qsfp_intel, ixgbe_phy_qsfp_unknown, - ixgbe_phy_sfp_unsupported, + ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ ixgbe_phy_sgmii, ixgbe_phy_fw, ixgbe_phy_generic @@ -3176,15 +3674,15 @@ enum ixgbe_phy_type { /* * SFP+ module type IDs: * - * ID Module Type + * ID Module Type * ============= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CU_CORE0 - 82599-specific - * 4 SFP_DA_CU_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific */ enum ixgbe_sfp_type { ixgbe_sfp_type_da_cu = 0, @@ -3210,7 +3708,6 @@ enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, ixgbe_media_type_fiber_qsfp, - ixgbe_media_type_fiber_lco, ixgbe_media_type_copper, ixgbe_media_type_backplane, ixgbe_media_type_cx4, @@ -3237,6 +3734,8 @@ enum ixgbe_smart_speed { /* PCI bus types */ enum ixgbe_bus_type { ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, ixgbe_bus_type_pci_express, ixgbe_bus_type_internal, ixgbe_bus_type_reserved @@ -3244,27 +3743,27 @@ enum ixgbe_bus_type { /* PCI bus speeds */ enum ixgbe_bus_speed { - ixgbe_bus_speed_unknown = 0, - ixgbe_bus_speed_33 = 33, - ixgbe_bus_speed_66 = 66, - ixgbe_bus_speed_100 = 100, - ixgbe_bus_speed_120 = 120, - ixgbe_bus_speed_133 = 133, - ixgbe_bus_speed_2500 = 2500, - ixgbe_bus_speed_5000 = 5000, - ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_8000 = 8000, ixgbe_bus_speed_reserved }; /* PCI bus widths */ enum ixgbe_bus_width { - ixgbe_bus_width_unknown = 0, - ixgbe_bus_width_pcie_x1 = 1, - ixgbe_bus_width_pcie_x2 = 2, - ixgbe_bus_width_pcie_x4 = 4, - ixgbe_bus_width_pcie_x8 = 8, - ixgbe_bus_width_32 = 32, - ixgbe_bus_width_64 = 64, + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, ixgbe_bus_width_reserved }; @@ -3273,7 +3772,6 @@ struct ixgbe_addr_filter_info { u32 rar_used_count; u32 mta_in_use; u32 overflow_promisc; - bool uc_set_promisc; bool user_set_promisc; }; @@ -3283,15 +3781,15 @@ struct ixgbe_bus_info { enum ixgbe_bus_width width; enum ixgbe_bus_type type; - u8 func; + u16 func; u8 lan_id; - u8 instance_id; + u16 instance_id; }; /* Flow control parameters */ struct ixgbe_fc_info { - u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ - u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */ + u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ u16 pause_time; /* Flow Control Pause timer */ bool send_xon; /* Flow control send XON */ bool strict_ieee; /* Strict IEEE mode */ @@ -3352,8 +3850,6 @@ struct ixgbe_hw_stats { u64 mptc; u64 bptc; u64 xec; - u64 rqsmr[16]; - u64 tqsmr[8]; u64 qprc[16]; u64 qptc[16]; u64 qbrc[16]; @@ -3367,6 +3863,7 @@ struct ixgbe_hw_stats { u64 fdirmatch; u64 fdirmiss; u64 fccrc; + u64 fclast; u64 fcoerpdc; u64 fcoeprc; u64 fcoeptc; @@ -3374,12 +3871,43 @@ struct ixgbe_hw_stats { u64 fcoedwtc; u64 fcoe_noddp; u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; u64 b2ospc; u64 b2ogprc; u64 o2bgptc; u64 o2bspc; }; +/* NVM Update commands */ +#define IXGBE_NVMUPD_CMD_REG_READ 0x0000000B +#define IXGBE_NVMUPD_CMD_REG_WRITE 0x0000000C + +/* NVM Update features API */ +#define IXGBE_NVMUPD_FEATURES_API_VER_MAJOR 0 +#define IXGBE_NVMUPD_FEATURES_API_VER_MINOR 0 +#define IXGBE_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12 +#define IXGBE_NVMUPD_EXEC_FEATURES 0xe +#define IXGBE_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0) +#define IXGBE_NVMUPD_FEATURE_REGISTER_ACCESS_SUPPORT BIT(1) + +#define IXGBE_NVMUPD_MOD_PNT_MASK 0xFF + +struct ixgbe_nvm_access { + u32 command; + u32 config; + u32 offset; /* in bytes */ + u32 data_size; /* in bytes */ + u8 data[1]; +}; + +struct ixgbe_nvm_features { + u8 major; + u8 minor; + u16 size; + u8 features[IXGBE_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN]; +}; + /* forward declaration */ struct ixgbe_hw; @@ -3405,19 +3933,22 @@ struct ixgbe_mac_operations { s32 (*start_hw)(struct ixgbe_hw *); s32 (*clear_hw_cntrs)(struct ixgbe_hw *); enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u64 (*get_supported_physical_layer)(struct ixgbe_hw *); s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); void (*set_lan_id)(struct ixgbe_hw *); s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); s32 (*setup_sfp)(struct ixgbe_hw *); - s32 (*disable_rx_buff)(struct ixgbe_hw *); - s32 (*enable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*disable_sec_rx_path)(struct ixgbe_hw *); + s32 (*enable_sec_rx_path)(struct ixgbe_hw *); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); void (*release_swfw_sync)(struct ixgbe_hw *, u32); void (*init_swfw_sync)(struct ixgbe_hw *); @@ -3428,7 +3959,6 @@ struct ixgbe_mac_operations { void (*disable_tx_laser)(struct ixgbe_hw *); void (*enable_tx_laser)(struct ixgbe_hw *); void (*flap_tx_laser)(struct ixgbe_hw *); - void (*stop_link_on_d3)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); @@ -3436,8 +3966,8 @@ struct ixgbe_mac_operations { bool *); void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); - /* Packet Buffer Manipulation */ - void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); /* LED */ s32 (*led_on)(struct ixgbe_hw *, u32); @@ -3448,19 +3978,27 @@ struct ixgbe_mac_operations { /* RAR, Multicast, VLAN */ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr, bool clear); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); + s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32, + bool); s32 (*init_uta_tables)(struct ixgbe_hw *); void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + s32 (*toggle_txdctl)(struct ixgbe_hw *hw, u32 vf_index); /* Flow Control */ s32 (*fc_enable)(struct ixgbe_hw *); @@ -3472,19 +4010,23 @@ struct ixgbe_mac_operations { const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); - bool (*fw_recovery_mode)(struct ixgbe_hw *hw); + void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); void (*set_source_address_pruning)(struct ixgbe_hw *, bool, unsigned int); void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); - - /* DMA Coalescing */ - s32 (*dmac_config)(struct ixgbe_hw *hw); s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee); s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); + void (*disable_mdd)(struct ixgbe_hw *hw); + void (*enable_mdd)(struct ixgbe_hw *hw); + void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap); + void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf); + bool (*fw_recovery_mode)(struct ixgbe_hw *hw); }; struct ixgbe_phy_operations { @@ -3500,11 +4042,13 @@ struct ixgbe_phy_operations { s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + void (*i2c_bus_clear)(struct ixgbe_hw *); s32 (*check_overtemp)(struct ixgbe_hw *); s32 (*set_phy_power)(struct ixgbe_hw *, bool on); s32 (*enter_lplu)(struct ixgbe_hw *); @@ -3530,185 +4074,166 @@ struct ixgbe_link_info { }; struct ixgbe_eeprom_info { - struct ixgbe_eeprom_operations ops; - enum ixgbe_eeprom_type type; - u32 semaphore_delay; - u16 word_size; - u16 address_bits; - u16 word_page_size; - u16 ctrl_word_3; + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; }; #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 struct ixgbe_mac_info { - struct ixgbe_mac_operations ops; - enum ixgbe_mac_type type; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; /* prefix for World Wide Node Name (WWNN) */ - u16 wwnn_prefix; + u16 wwnn_prefix; /* prefix for World Wide Port Name (WWPN) */ - u16 wwpn_prefix; - u16 max_msix_vectors; + u16 wwpn_prefix; #define IXGBE_MAX_MTA 128 - u32 mta_shadow[IXGBE_MAX_MTA]; - s32 mc_filter_type; - u32 mcft_size; - u32 vft_size; - u32 num_rar_entries; - u32 rar_highwater; - u32 rx_pb_size; - u32 max_tx_queues; - u32 max_rx_queues; - u32 orig_autoc; - u32 orig_autoc2; - bool orig_link_settings_stored; - bool autotry_restart; - u8 flags; - u8 san_mac_rar_index; + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_autoc; + u8 san_mac_rar_index; + bool get_link_status; + u32 orig_autoc2; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; struct ixgbe_thermal_sensor_data thermal_sensor_data; - bool set_lben; - u8 led_link_act; + bool thermal_sensor_enabled; + struct ixgbe_dmac_config dmac_config; + bool set_lben; + u32 max_link_up_time; + u8 led_link_act; }; struct ixgbe_phy_info { - struct ixgbe_phy_operations ops; - struct mdio_if_info mdio; - enum ixgbe_phy_type type; - u32 id; - enum ixgbe_sfp_type sfp_type; - bool sfp_setup_needed; - u32 revision; - enum ixgbe_media_type media_type; - u32 phy_semaphore_mask; - bool reset_disable; - ixgbe_autoneg_advertised autoneg_advertised; - ixgbe_link_speed speeds_supported; - ixgbe_link_speed eee_speeds_supported; - ixgbe_link_speed eee_speeds_advertised; - enum ixgbe_smart_speed smart_speed; - bool smart_speed_active; - bool multispeed_fiber; - bool reset_if_overtemp; - bool qsfp_shared_i2c_bus; - u32 nw_mng_if_sel; + struct ixgbe_phy_operations ops; + enum ixgbe_phy_type type; + u32 addr; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + u32 phy_semaphore_mask; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; + ixgbe_link_speed eee_speeds_supported; + ixgbe_link_speed eee_speeds_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; }; #include "ixgbe_mbx.h" -struct ixgbe_mbx_operations { - s32 (*init_params)(struct ixgbe_hw *hw); - s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct ixgbe_hw *, u16); - s32 (*check_for_ack)(struct ixgbe_hw *, u16); - s32 (*check_for_rst)(struct ixgbe_hw *, u16); -}; - -struct ixgbe_mbx_stats { - u32 msgs_tx; - u32 msgs_rx; - - u32 acks; - u32 reqs; - u32 rsts; -}; - -struct ixgbe_mbx_info { - const struct ixgbe_mbx_operations *ops; - struct ixgbe_mbx_stats stats; - u32 timeout; - u32 usec_delay; - u32 v2p_mailbox; - u16 size; -}; - struct ixgbe_hw { - u8 __iomem *hw_addr; - void *back; - struct ixgbe_mac_info mac; - struct ixgbe_addr_filter_info addr_ctrl; - struct ixgbe_fc_info fc; - struct ixgbe_phy_info phy; - struct ixgbe_link_info link; - struct ixgbe_eeprom_info eeprom; - struct ixgbe_bus_info bus; - struct ixgbe_mbx_info mbx; - const u32 *mvals; - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - bool adapter_stopped; - bool force_full_reset; - bool allow_unsupported_sfp; - bool wol_enabled; - bool need_crosstalk_fix; -}; - -struct ixgbe_info { - enum ixgbe_mac_type mac; - s32 (*get_invariants)(struct ixgbe_hw *); - const struct ixgbe_mac_operations *mac_ops; - const struct ixgbe_eeprom_operations *eeprom_ops; - const struct ixgbe_phy_operations *phy_ops; - const struct ixgbe_mbx_operations *mbx_ops; - const struct ixgbe_link_operations *link_ops; - const u32 *mvals; + u8 IOMEM *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_link_info link; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + const u32 *mvals; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + bool need_crosstalk_fix; + /* NVM Update features */ + struct ixgbe_nvm_features nvmupd_features; }; +#define ixgbe_call_func(hw, func, params, error) \ + (func != NULL) ? func params : error /* Error Codes */ -#define IXGBE_ERR_EEPROM -1 -#define IXGBE_ERR_EEPROM_CHECKSUM -2 -#define IXGBE_ERR_PHY -3 -#define IXGBE_ERR_CONFIG -4 -#define IXGBE_ERR_PARAM -5 -#define IXGBE_ERR_MAC_TYPE -6 -#define IXGBE_ERR_UNKNOWN_PHY -7 -#define IXGBE_ERR_LINK_SETUP -8 -#define IXGBE_ERR_ADAPTER_STOPPED -9 -#define IXGBE_ERR_INVALID_MAC_ADDR -10 -#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 -#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 -#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 -#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 -#define IXGBE_ERR_RESET_FAILED -15 -#define IXGBE_ERR_SWFW_SYNC -16 -#define IXGBE_ERR_PHY_ADDR_INVALID -17 -#define IXGBE_ERR_I2C -18 -#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 -#define IXGBE_ERR_SFP_NOT_PRESENT -20 -#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 -#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 -#define IXGBE_ERR_FDIR_REINIT_FAILED -23 -#define IXGBE_ERR_EEPROM_VERSION -24 -#define IXGBE_ERR_NO_SPACE -25 -#define IXGBE_ERR_OVERTEMP -26 -#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 -#define IXGBE_ERR_FC_NOT_SUPPORTED -28 -#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 -#define IXGBE_ERR_PBA_SECTION -31 -#define IXGBE_ERR_INVALID_ARGUMENT -32 -#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_SUCCESS 0 +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_OUT_OF_MEM -34 +#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 +#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 #define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 #define IXGBE_ERR_FW_RESP_INVALID -39 #define IXGBE_ERR_TOKEN_RETRY -40 -#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF +#define IXGBE_ERR_MBX -41 +#define IXGBE_ERR_MBX_NOMSG -42 +#define IXGBE_ERR_TIMEOUT -43 + +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) -#define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV_MASK (3u << 6) +#define IXGBE_FUSES0_300MHZ (1 << 5) +#define IXGBE_FUSES0_REV_MASK (3 << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238) #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918) +#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C) #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) #define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) @@ -3720,90 +4245,112 @@ struct ixgbe_info { #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) -#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) -#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28) -#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29) +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) +#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1) +#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3) +#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29) +#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1) -#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) -#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11) -#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10) -#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11) -#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) -#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19) -#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6) -#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15) -#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16) +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) -#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16) +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3) -#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) -#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 -#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 #define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 #define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF #define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 -#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) #define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 -#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 -#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) #define IXGBE_SB_IOSF_TARGET_KR_PHY 0 #define IXGBE_NW_MNG_IF_SEL 0x00011178 -#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) -#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) -#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ -#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1) +#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2) +#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */ +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) + +/* Code Command (Flash I/F Interface) */ +#define IXGBE_HOST_INTERFACE_FLASH_READ_CMD 0x30 +#define IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD 0x31 +#define IXGBE_HOST_INTERFACE_FLASH_WRITE_CMD 0x32 +#define IXGBE_HOST_INTERFACE_SHADOW_RAM_WRITE_CMD 0x33 +#define IXGBE_HOST_INTERFACE_FLASH_MODULE_UPDATE_CMD 0x34 +#define IXGBE_HOST_INTERFACE_FLASH_BLOCK_EREASE_CMD 0x35 +#define IXGBE_HOST_INTERFACE_SHADOW_RAM_DUMP_CMD 0x36 +#define IXGBE_HOST_INTERFACE_FLASH_INFO_CMD 0x37 +#define IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD 0x38 +#define IXGBE_HOST_INTERFACE_MASK_CMD 0x000000FF + +#include "ixgbe_osdep2.h" + #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index de563cfd294d..89d10e7c9e5a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,13 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_phy.h" #include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 @@ -16,55 +14,164 @@ #define IXGBE_X540_VFT_TBL_SIZE 128 #define IXGBE_X540_RX_PB_SIZE 384 -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); -static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); -enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) -{ - return ixgbe_media_type_copper; -} - -s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +/** + * ixgbe_init_ops_X540 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X540. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + u16 i; - /* set_phy_power was set by default to NULL */ + DEBUGFUNC("ixgbe_init_ops_X540"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_eerd_X540; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; + eeprom->ops.write = ixgbe_write_eewr_X540; + eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_generic; + phy->ops.reset = NULL; phy->ops.set_phy_power = ixgbe_set_copper_phy_power; - mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; - mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_X540; + mac->ops.get_media_type = ixgbe_get_media_type_X540; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X540; + mac->ops.read_analog_reg8 = NULL; + mac->ops.write_analog_reg8 = NULL; + mac->ops.start_hw = ixgbe_start_hw_X540; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; + mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; - return 0; + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + mac->ops.setup_link = ixgbe_setup_mac_link_X540; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + mac->ops.check_link = ixgbe_check_mac_link_generic; + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* + * FWSM register + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); + + for (i = 0; i < 64; i++) + hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf; + + /* LEDs */ + mac->ops.blink_led_start = ixgbe_blink_led_start_X540; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + + return ret_val; } /** - * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_get_link_capabilities_X540 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. **/ -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_media_type_X540 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return ixgbe_media_type_copper; +} + +/** + * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - return hw->phy.ops.setup_link_speed(hw, speed, - autoneg_wait_to_complete); + DEBUGFUNC("ixgbe_setup_mac_link_X540"); + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } /** - * ixgbe_reset_hw_X540 - Perform hardware reset - * @hw: pointer to hardware structure + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, and perform a reset. **/ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { @@ -72,41 +179,43 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) u32 ctrl, i; u32 swfw_mask = hw->phy.phy_semaphore_mask; + DEBUGFUNC("ixgbe_reset_hw_X540"); + /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_dbg(hw, "semaphore failed with %d", status); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } - ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { + usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; - udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Reset polling failed to complete.\n"); } - msleep(100); + msec_delay(100); /* * Double resets are required for recovery from certain error @@ -129,14 +238,14 @@ mac_reset_top: * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; + hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (is_valid_ether_addr(hw->mac.san_addr)) { + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; @@ -155,34 +264,65 @@ mac_reset_top: hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); +reset_hw_out: return status; } /** - * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure + * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure * - * Starts the hardware using the generic start_hw function - * and the generation start_hw function. - * Then performs revision-specific operations, if any. + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. **/ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) { - s32 ret_val; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_X540"); ret_val = ixgbe_start_hw_generic(hw); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; - return ixgbe_start_hw_gen2(hw); + ixgbe_start_hw_gen2(hw); + +out: + return ret_val; } /** - * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params - * @hw: pointer to hardware structure + * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + * @hw: pointer to hardware structure * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { @@ -190,138 +330,157 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) u32 eec; u16 eeprom_size; + DEBUGFUNC("ixgbe_init_eeprom_params_X540"); + if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = BIT(eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); + eeprom->type, eeprom->word_size); } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_read_eerd_X540- Read EEPROM word using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM + * ixgbe_read_eerd_X540- Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM * - * Reads a 16 bit word from the EEPROM using the EERD register. + * Reads a 16 bit word from the EEPROM using the EERD register. **/ -static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { - s32 status; + s32 status = IXGBE_SUCCESS; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_read_eerd_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - status = ixgbe_read_eerd_generic(hw, offset, data); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** - * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM + * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM * - * Reads a 16 bit word(s) from the EEPROM using the EERD register. + * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ -static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) { - s32 status; + s32 status = IXGBE_SUCCESS; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** - * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM * - * Write a 16 bit word to the EEPROM using the EEWR register. + * Write a 16 bit word to the EEPROM using the EEWR register. **/ -static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) { - s32 status; + s32 status = IXGBE_SUCCESS; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_write_eewr_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - status = ixgbe_write_eewr_generic(hw, offset, data); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** - * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM + * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM * - * Write a 16 bit word(s) to the EEPROM using the EEWR register. + * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ -static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) { - s32 status; + s32 status = IXGBE_SUCCESS; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** - * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum * - * This function does not use synchronization for EERD and EEWR. It can - * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * This function does not use synchronization for EERD and EEWR. It can + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. * - * @hw: pointer to hardware structure + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum **/ -static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { - u16 i; - u16 j; + u16 i, j; u16 checksum = 0; u16 length = 0; u16 pointer = 0; u16 word = 0; - u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; - /* - * Do not use hw->eeprom.ops.read because we do not want to take + /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores here. Instead use * ixgbe_read_eerd_generic */ - /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < checksum_last_word; i++) { + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); + + /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the + * checksum itself + */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { if (ixgbe_read_eerd_generic(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; @@ -329,8 +488,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) checksum += word; } - /* - * Include all data from pointers 0x3, 0x6-0xE. This excludes the + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ for (i = ptr_start; i < IXGBE_FW_PTR; i++) { @@ -339,7 +497,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) if (ixgbe_read_eerd_generic(hw, i, &pointer)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } /* Skip pointer section if the pointer is invalid. */ @@ -350,7 +508,6 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) if (ixgbe_read_eerd_generic(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; - break; } /* Skip pointer section if length is invalid. */ @@ -373,20 +530,22 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) } /** - * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum + * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. **/ -static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, - u16 *checksum_val) +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails @@ -418,7 +577,8 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, * calculated checksum */ if (read_checksum != checksum) { - hw_dbg(hw, "Invalid EEPROM checksum"); + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); status = IXGBE_ERR_EEPROM_CHECKSUM; } @@ -440,11 +600,13 @@ out: * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ -static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) { s32 status; u16 checksum; + DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails @@ -456,7 +618,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return IXGBE_ERR_SWFW_SYNC; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) @@ -475,6 +637,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; } @@ -485,41 +648,43 @@ out: * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy * EEPROM from shadow RAM to the flash device. **/ -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) { u32 flup; s32 status; + DEBUGFUNC("ixgbe_update_flash_X540"); + status = ixgbe_poll_flash_update_done_X540(hw); if (status == IXGBE_ERR_EEPROM) { hw_dbg(hw, "Flash update time out\n"); - return status; + goto out; } - flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); status = ixgbe_poll_flash_update_done_X540(hw); - if (status == 0) + if (status == IXGBE_SUCCESS) hw_dbg(hw, "Flash update complete\n"); else hw_dbg(hw, "Flash update time out\n"); - if (hw->revision_id == 0) { - flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (flup & IXGBE_EEC_SEC1VAL) { flup |= IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); } status = ixgbe_poll_flash_update_done_X540(hw); - if (status == 0) + if (status == IXGBE_SUCCESS) hw_dbg(hw, "Flash update complete\n"); else hw_dbg(hw, "Flash update time out\n"); } - +out: return status; } @@ -530,18 +695,28 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) * Polls the FLUDONE (bit 26) of the EEC Register to determine when the * flash update is done. **/ -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) { u32 i; u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - if (reg & IXGBE_EEC_FLUDONE) - return 0; - udelay(5); + reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (reg & IXGBE_EEC_FLUDONE) { + status = IXGBE_SUCCESS; + break; + } + msec_delay(5); } - return IXGBE_ERR_EEPROM; + + if (i == IXGBE_FLUDONE_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Flash update status polling timed out"); + + return status; } /** @@ -555,43 +730,50 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; - u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; u32 fwmask = swmask << 5; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; u32 timeout = 200; u32 hwmask = 0; u32 swfw_sync; u32 i; - if (swmask & IXGBE_GSSR_EEP_SM) - hwmask = IXGBE_GSSR_FLASH_SM; + DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); - /* SW only mask does not have FW bit pair */ + if (swmask & IXGBE_GSSR_EEP_SM) + hwmask |= IXGBE_GSSR_FLASH_SM; + + /* SW only mask doesn't have FW bit pair */ if (mask & IXGBE_GSSR_SW_MNG_SM) swmask |= IXGBE_GSSR_SW_MNG_SM; swmask |= swi2c_mask; fwmask |= swi2c_mask << 2; + if (hw->mac.type >= ixgbe_mac_X550) + timeout = 1000; + for (i = 0; i < timeout; i++) { /* SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ - if (ixgbe_get_swfw_sync_semaphore(hw)) + if (ixgbe_get_swfw_sync_semaphore(hw)) { + hw_dbg(hw, "Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; + } - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); if (!(swfw_sync & (fwmask | swmask | hwmask))) { swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), + swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 6000); - return 0; + return IXGBE_SUCCESS; } /* Firmware currently using resource (fwmask), hardware * currently using resource (hwmask), or other software * thread currently using resource (swmask) */ ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); + msec_delay(5); } /* If the resource is not released by the FW/HW the SW can assume that @@ -599,15 +781,17 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) * of the requested resource(s) while ignoring the corresponding FW/HW * bits in the SW_FW_SYNC register. */ - if (ixgbe_get_swfw_sync_semaphore(hw)) + if (ixgbe_get_swfw_sync_semaphore(hw)) { + hw_dbg(hw, "Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + } + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); if (swfw_sync & (fwmask | hwmask)) { swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 6000); - return 0; + msec_delay(5); + return IXGBE_SUCCESS; } /* If the resource is not released by other SW the SW can assume that * the other SW malfunctions. In that case the SW should clear all SW @@ -623,9 +807,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) rmask |= IXGBE_GSSR_I2C_MASK; ixgbe_release_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_semaphore(hw); + hw_dbg(hw, "Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } ixgbe_release_swfw_sync_semaphore(hw); + hw_dbg(hw, "Returning error IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } @@ -643,16 +829,18 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); u32 swfw_sync; + DEBUGFUNC("ixgbe_release_swfw_sync_X540"); + if (mask & IXGBE_GSSR_I2C_MASK) swmask |= mask & IXGBE_GSSR_I2C_MASK; ixgbe_get_swfw_sync_semaphore(hw); - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); swfw_sync &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 6000); + msec_delay(2); } /** @@ -660,76 +848,90 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) * @hw: pointer to hardware structure * * Sets the hardware semaphores so SW/FW can gain control of shared resources - */ -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) + **/ +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) { + s32 status = IXGBE_ERR_EEPROM; u32 timeout = 2000; u32 i; u32 swsm; + DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); + /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { - /* If the SMBI bit is 0 when we read it, then the bit will be + /* + * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (!(swsm & IXGBE_SWSM_SMBI)) + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; break; - usleep_range(50, 100); - } - - if (i == timeout) { - hw_dbg(hw, - "Software semaphore SMBI between device drivers not granted.\n"); - return IXGBE_ERR_EEPROM; + } + usec_delay(50); } /* Now get the semaphore between SW/FW through the REGSMP bit */ - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); - if (!(swsm & IXGBE_SWFW_REGSMP)) - return 0; + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; - usleep_range(50, 100); + usec_delay(50); + } + + /* + * Release semaphores and return error if SW NVM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "REGSMP Software NVM semaphore not granted.\n"); + ixgbe_release_swfw_sync_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); } - /* Release semaphores and return error if SW NVM semaphore - * was not granted because we do not have access to the EEPROM - */ - hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n"); - ixgbe_release_swfw_sync_semaphore(hw); - return IXGBE_ERR_EEPROM; + return status; } /** - * ixgbe_release_nvm_semaphore - Release hardware semaphore + * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. **/ -static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) { - u32 swsm; + u32 swsm; + + DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); swsm &= ~IXGBE_SWFW_REGSMP; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); swsm &= ~IXGBE_SWSM_SMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); IXGBE_WRITE_FLUSH(hw); } /** - * ixgbe_init_swfw_sync_X540 - Release hardware semaphore - * @hw: pointer to hardware structure + * ixgbe_init_swfw_sync_X540 - Release hardware semaphore + * @hw: pointer to hardware structure * - * This function reset hardware semaphore bits for a semaphore that may - * have be left locked due to a catastrophic failure. + * This function reset hardware semaphore bits for a semaphore that may + * have be left locked due to a catastrophic failure. **/ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) { @@ -747,8 +949,9 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) /* Acquire and release all software resources. */ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | - IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_I2C_MASK; + IXGBE_GSSR_SW_MNG_SM; + rmask |= IXGBE_GSSR_I2C_MASK; ixgbe_acquire_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_X540(hw, rmask); } @@ -759,7 +962,7 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) * @index: led number to blink * * Devices that implement the version 2 interface: - * X540 + * X540 **/ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { @@ -768,15 +971,18 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) ixgbe_link_speed speed; bool link_up; + DEBUGFUNC("ixgbe_blink_led_start_X540"); + if (index > 3) return IXGBE_ERR_PARAM; - /* Link should be up in order for the blink bit in the LED control + /* + * Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. * This will be reversed when we stop the blinking. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { + if (link_up == false) { macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); @@ -788,7 +994,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } /** @@ -797,7 +1003,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) * @index: led number to stop blinking * * Devices that implement the version 2 interface: - * X540 + * X540 **/ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) { @@ -807,6 +1013,8 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) if (index > 3) return IXGBE_ERR_PARAM; + DEBUGFUNC("ixgbe_blink_led_stop_X540"); + /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -820,104 +1028,5 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } -static const struct ixgbe_mac_operations mac_ops_X540 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_X540, - .start_hw = &ixgbe_start_hw_X540, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_X540, - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_device_caps = &ixgbe_get_device_caps_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, - .read_analog_reg8 = NULL, - .write_analog_reg8 = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .set_rxpba = &ixgbe_set_rxpba_generic, - .check_link = &ixgbe_check_mac_link_generic, - .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_X540, - .blink_led_stop = &ixgbe_blink_led_stop_X540, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_generic, - .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, - .clear_vmdq = &ixgbe_clear_vmdq_generic, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_generic, - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = NULL, - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, - .release_swfw_sync = &ixgbe_release_swfw_sync_X540, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .disable_rx_buff = &ixgbe_disable_rx_buff_generic, - .enable_rx_buff = &ixgbe_enable_rx_buff_generic, - .get_thermal_sensor_data = NULL, - .init_thermal_sensor_thresh = NULL, - .prot_autoc_read = &prot_autoc_read_generic, - .prot_autoc_write = &prot_autoc_write_generic, - .enable_rx = &ixgbe_enable_rx_generic, - .disable_rx = &ixgbe_disable_rx_generic, -}; - -static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { - .init_params = &ixgbe_init_eeprom_params_X540, - .read = &ixgbe_read_eerd_X540, - .read_buffer = &ixgbe_read_eerd_buffer_X540, - .write = &ixgbe_write_eewr_X540, - .write_buffer = &ixgbe_write_eewr_buffer_X540, - .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, - .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, - .update_checksum = &ixgbe_update_eeprom_checksum_X540, -}; - -static const struct ixgbe_phy_operations phy_ops_X540 = { - .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_sfp_module_generic, - .init = NULL, - .reset = NULL, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, - .set_phy_power = &ixgbe_set_copper_phy_power, -}; - -static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X540) -}; - -const struct ixgbe_info ixgbe_X540_info = { - .mac = ixgbe_mac_X540, - .get_invariants = &ixgbe_get_invariants_X540, - .mac_ops = &mac_ops_X540, - .eeprom_ops = &eeprom_ops_X540, - .phy_ops = &phy_ops_X540, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X540, -}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index e246c0d2a427..82f60bd2d98a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -1,19 +1,37 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_X540_H_ +#define _IXGBE_X540_H_ #include "ixgbe_type.h" -s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); + bool link_up_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); + s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); -s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); + +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +#endif /* _IXGBE_X540_H_ */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9c42f741ed5e..df2f37f214f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,76 +1,299 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ +#include "ixgbe_x550.h" #include "ixgbe_x540.h" #include "ixgbe_type.h" +#include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" -static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); -static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); -static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); -static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); -static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw); -static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - struct ixgbe_link_info *link = &hw->link; - - /* Start with X540 invariants, since so simular */ - ixgbe_get_invariants_X540(hw); - - if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) - phy->ops.set_phy_power = NULL; - - link->addr = IXGBE_CS4227; - - return 0; -} - -static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; - - /* Start with X540 invariants, since so similar */ - ixgbe_get_invariants_X540(hw); - - phy->ops.set_phy_power = NULL; - - return 0; -} - -static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - - /* Start with X540 invariants, since so simular */ - ixgbe_get_invariants_X540(hw); - - if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) - phy->ops.set_phy_power = NULL; - - return 0; -} - -static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; - - /* Start with X540 invariants, since so similar */ - ixgbe_get_invariants_X540(hw); - - phy->ops.set_phy_power = NULL; - - return 0; -} - -/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control - * @hw: pointer to hardware structure +/** + * ixgbe_init_ops_X550 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X550. + * Does not touch the hardware. **/ -static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550"); + + ret_val = ixgbe_init_ops_X540(hw); + mac->ops.dmac_config = ixgbe_dmac_config_X550; + mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; + mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; + mac->ops.setup_eee = NULL; + mac->ops.set_source_address_pruning = + ixgbe_set_source_address_pruning_X550; + mac->ops.set_ethertype_anti_spoofing = + ixgbe_set_ethertype_anti_spoofing_X550; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + + mac->ops.disable_mdd = ixgbe_disable_mdd_X550; + mac->ops.enable_mdd = ixgbe_enable_mdd_X550; + mac->ops.mdd_event = ixgbe_mdd_event_X550; + mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; + mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550; + mac->ops.disable_rx = ixgbe_disable_rx_x550; + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->mac.ops.led_on = NULL; + hw->mac.ops.led_off = NULL; + break; + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + hw->mac.ops.led_on = ixgbe_led_on_t_X550em; + hw->mac.ops.led_off = ixgbe_led_off_t_X550em; + break; + default: + break; + } + return ret_val; +} + +/** + * ixgbe_read_cs4227 - Read CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: pointer to receive value read + * + * Returns status code + **/ +STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +{ + return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_write_cs4227 - Write CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write to register + * + * Returns status code + **/ +STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +{ + return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_read_pe - Read register from port expander + * @hw: pointer to hardware structure + * @reg: register number to read + * @value: pointer to receive read value + * + * Returns status code + **/ +STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +{ + s32 status; + + status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_write_pe - Write register to port expander + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write + * + * Returns status code + **/ +STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +{ + s32 status; + + status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * @hw: pointer to hardware structure + * + * This function assumes that the caller has acquired the proper semaphore. + * Returns error code + **/ +STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +{ + s32 status; + u32 retry; + u16 value; + u8 reg; + + /* Trigger hard reset. */ + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + usec_delay(IXGBE_CS4227_RESET_HOLD); + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + /* Wait for the reset to complete. */ + msec_delay(IXGBE_CS4227_RESET_DELAY); + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, + &value); + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_EEPROM_LOAD_OK) + break; + msec_delay(IXGBE_CS4227_CHECK_DELAY); + } + if (retry == IXGBE_CS4227_RETRIES) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset did not complete."); + return IXGBE_ERR_PHY; + } + + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); + if (status != IXGBE_SUCCESS || + !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 EEPROM did not load successfully."); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_cs4227 - Check CS4227 and reset as needed + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u16 value = 0; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + msec_delay(IXGBE_CS4227_CHECK_DELAY); + continue; + } + + /* Get status of reset flow. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_RESET_COMPLETE) + goto out; + + if (status != IXGBE_SUCCESS || + value != IXGBE_CS4227_RESET_PENDING) + break; + + /* Reset is pending. Wait and check again. */ + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(IXGBE_CS4227_CHECK_DELAY); + } + + /* If still pending, assume other instance failed. */ + if (retry == IXGBE_CS4227_RETRIES) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return; + } + } + + /* Reset the CS4227. */ + status = ixgbe_reset_cs4227(hw); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset failed: %d", status); + goto out; + } + + /* Reset takes so long, temporarily release semaphore in case the + * other driver instance is waiting for the reset indication. + */ + ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_PENDING); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(10); + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return; + } + + /* Record completion for next time. */ + status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_COMPLETE); + +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(hw->eeprom.semaphore_delay); +} + +/** + * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -84,231 +307,29 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) } /** - * ixgbe_read_cs4227 - Read CS4227 register - * @hw: pointer to hardware structure - * @reg: register number to write - * @value: pointer to receive value read - * - * Returns status code - */ -static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) -{ - return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); -} - -/** - * ixgbe_write_cs4227 - Write CS4227 register - * @hw: pointer to hardware structure - * @reg: register number to write - * @value: value to write to register - * - * Returns status code - */ -static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) -{ - return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); -} - -/** - * ixgbe_read_pe - Read register from port expander - * @hw: pointer to hardware structure - * @reg: register number to read - * @value: pointer to receive read value - * - * Returns status code - */ -static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) -{ - s32 status; - - status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); - if (status) - hw_err(hw, "port expander access failed with %d\n", status); - return status; -} - -/** - * ixgbe_write_pe - Write register to port expander - * @hw: pointer to hardware structure - * @reg: register number to write - * @value: value to write - * - * Returns status code - */ -static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) -{ - s32 status; - - status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, - value); - if (status) - hw_err(hw, "port expander access failed with %d\n", status); - return status; -} - -/** - * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * ixgbe_identify_phy_x550em - Get PHY type based on device id * @hw: pointer to hardware structure * - * This function assumes that the caller has acquired the proper semaphore. * Returns error code */ -static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { - s32 status; - u32 retry; - u16 value; - u8 reg; + hw->mac.ops.set_lan_id(hw); - /* Trigger hard reset. */ - status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status) - return status; - reg |= IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status) - return status; + ixgbe_read_mng_if_sel_x550em(hw); - status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); - if (status) - return status; - reg &= ~IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); - if (status) - return status; - - status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status) - return status; - reg &= ~IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status) - return status; - - usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100); - - status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status) - return status; - reg |= IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status) - return status; - - /* Wait for the reset to complete. */ - msleep(IXGBE_CS4227_RESET_DELAY); - for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, - &value); - if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK) - break; - msleep(IXGBE_CS4227_CHECK_DELAY); - } - if (retry == IXGBE_CS4227_RETRIES) { - hw_err(hw, "CS4227 reset did not complete\n"); - return IXGBE_ERR_PHY; - } - - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); - if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { - hw_err(hw, "CS4227 EEPROM did not load successfully\n"); - return IXGBE_ERR_PHY; - } - - return 0; -} - -/** - * ixgbe_check_cs4227 - Check CS4227 and reset as needed - * @hw: pointer to hardware structure - */ -static void ixgbe_check_cs4227(struct ixgbe_hw *hw) -{ - u32 swfw_mask = hw->phy.phy_semaphore_mask; - s32 status; - u16 value; - u8 retry; - - for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_err(hw, "semaphore failed with %d\n", status); - msleep(IXGBE_CS4227_CHECK_DELAY); - continue; - } - - /* Get status of reset flow. */ - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); - if (!status && value == IXGBE_CS4227_RESET_COMPLETE) - goto out; - - if (status || value != IXGBE_CS4227_RESET_PENDING) - break; - - /* Reset is pending. Wait and check again. */ - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(IXGBE_CS4227_CHECK_DELAY); - } - /* If still pending, assume other instance failed. */ - if (retry == IXGBE_CS4227_RETRIES) { - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_err(hw, "semaphore failed with %d\n", status); - return; - } - } - - /* Reset the CS4227. */ - status = ixgbe_reset_cs4227(hw); - if (status) { - hw_err(hw, "CS4227 reset failed: %d", status); - goto out; - } - - /* Reset takes so long, temporarily release semaphore in case the - * other driver instance is waiting for the reset indication. - */ - ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, - IXGBE_CS4227_RESET_PENDING); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(10000, 12000); - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_err(hw, "semaphore failed with %d", status); - return; - } - - /* Record completion for next time. */ - status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, - IXGBE_CS4227_RESET_COMPLETE); - -out: - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(hw->eeprom.semaphore_delay); -} - -/** ixgbe_identify_phy_x550em - Get PHY type based on device id - * @hw: pointer to hardware structure - * - * Returns error code - */ -static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) -{ switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - return ixgbe_identify_module_generic(hw); + return ixgbe_identify_sfp_module_X550em(hw); case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ - hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_A_SFP_N: - return ixgbe_identify_module_generic(hw); + return ixgbe_identify_sfp_module_X550em(hw); + break; case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; break; @@ -321,11 +342,6 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_A_10G_T: - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); case IXGBE_DEV_ID_X550EM_X_1G_T: @@ -334,8 +350,6 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: hw->phy.type = ixgbe_phy_fw; - hw->phy.ops.read_reg = NULL; - hw->phy.ops.write_reg = NULL; if (hw->bus.lan_id) hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; else @@ -344,81 +358,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) default: break; } - return 0; -} - -static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) -{ - return IXGBE_NOT_IMPLEMENTED; -} - -static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - return IXGBE_NOT_IMPLEMENTED; -} - -/** - * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - **/ -static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) -{ - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); -} - -/** - * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - **/ -static s32 -ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) -{ - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); -} - -/** - * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * - * Returns an error code on error. - **/ -static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) -{ - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); -} - -/** - * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * - * Returns an error code on error. - **/ -static s32 -ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) -{ - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); + return IXGBE_SUCCESS; } /** @@ -436,7 +376,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, } hic; u16 retries = FW_PHY_ACT_RETRIES; s32 rc; - u32 i; + u16 i; do { memset(&hic, 0, sizeof(hic)); @@ -444,22 +384,23 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; hic.cmd.port_number = hw->bus.lan_id; - hic.cmd.activity_id = cpu_to_le16(activity); - for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) - hic.cmd.data[i] = cpu_to_be32((*data)[i]); + hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity); + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]); - rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, + sizeof(hic.cmd), IXGBE_HI_COMMAND_TIMEOUT, true); - if (rc) + if (rc != IXGBE_SUCCESS) return rc; if (hic.rsp.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) { for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) - (*data)[i] = be32_to_cpu(hic.rsp.data[i]); - return 0; + (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]); + return IXGBE_SUCCESS; } - usleep_range(20, 30); + usec_delay(20); --retries; } while (retries > 0); @@ -492,19 +433,18 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) s32 rc; u16 i; - if (hw->phy.id) - return 0; - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); if (rc) return rc; hw->phy.speeds_supported = 0; phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; - for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { if (phy_speeds & ixgbe_fw_map[i].fw_speed) hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; } + if (!hw->phy.autoneg_advertised) + hw->phy.autoneg_advertised = hw->phy.speeds_supported; hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; @@ -512,12 +452,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) return IXGBE_ERR_PHY_ADDR_INVALID; - - hw->phy.autoneg_advertised = hw->phy.speeds_supported; - hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; - return 0; + return IXGBE_SUCCESS; } /** @@ -545,7 +480,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) * * Returns error code */ -static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; @@ -553,6 +488,177 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); } +STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +STATIC s32 +ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +STATIC s32 +ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** +* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM"); + + /* Similar to X550 so start there. */ + ret_val = ixgbe_init_ops_X550(hw); + + /* Since this function eventually calls + * ixgbe_init_ops_540 by design, we are setting + * the pointers to NULL explicitly here to overwrite + * the values being set in the x540 function. + */ + /* Thermal sensor not supported in x550EM */ + mac->ops.get_thermal_sensor_data = NULL; + mac->ops.init_thermal_sensor_thresh = NULL; + mac->thermal_sensor_enabled = false; + + /* FCOE not supported in x550EM */ + mac->ops.get_san_mac_addr = NULL; + mac->ops.set_san_mac_addr = NULL; + mac->ops.get_wwn_prefix = NULL; + mac->ops.get_fcoe_boot_status = NULL; + + /* IPsec not supported in x550EM */ + mac->ops.disable_sec_rx_path = NULL; + mac->ops.enable_sec_rx_path = NULL; + + /* AUTOC register is not present in x550EM. */ + mac->ops.prot_autoc_read = NULL; + mac->ops.prot_autoc_write = NULL; + + /* X550EM bus type is internal*/ + hw->bus.type = ixgbe_bus_type_internal; + mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; + + + mac->ops.get_media_type = ixgbe_get_media_type_X550em; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; + mac->ops.reset_hw = ixgbe_reset_hw_X550em; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X550em; + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) + mac->ops.setup_fc = ixgbe_setup_fc_generic; + else + mac->ops.setup_fc = ixgbe_setup_fc_X550em; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_X550em; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + mac->ops.setup_fc = NULL; + phy->ops.identify = ixgbe_identify_phy_fw; + phy->ops.set_phy_power = NULL; + phy->ops.get_firmware_version = NULL; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + mac->ops.setup_fc = NULL; + phy->ops.identify = ixgbe_identify_phy_x550em; + phy->ops.set_phy_power = NULL; + break; + default: + phy->ops.identify = ixgbe_identify_phy_x550em; + } + + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + + return ret_val; +} + /** * ixgbe_setup_fw_link - Setup firmware-controlled PHYs * @hw: pointer to hardware structure @@ -567,7 +673,8 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) return 0; if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } @@ -588,9 +695,9 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) break; } - for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) - setup[0] |= ixgbe_fw_map[i].fw_speed; + setup[0] |= (u32)(ixgbe_fw_map[i].fw_speed); } setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; @@ -602,11 +709,11 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) return rc; if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) return IXGBE_ERR_OVERTEMP; - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs + * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs * @hw: pointer to hardware structure * * Called at init time to set up flow control. @@ -619,33 +726,334 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) return ixgbe_setup_fw_link(hw); } -/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params - * @hw: pointer to hardware structure +/** + * ixgbe_setup_eee_fw - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. + * Enable/disable EEE based on enable_eee flag. + * This function controls EEE for firmware-based PHY implementations. + */ +static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee) +{ + if (!!hw->phy.eee_speeds_advertised == enable_eee) + return IXGBE_SUCCESS; + if (enable_eee) + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + else + hw->phy.eee_speeds_advertised = 0; + return hw->phy.ops.setup_link(hw); +} + +/** +* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM_a. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM_a"); + + /* Start with generic X550EM init */ + ret_val = ixgbe_init_ops_X550EM(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + } else { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; + } + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; + + switch (mac->ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + mac->ops.setup_fc = NULL; + mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; + break; + case ixgbe_media_type_backplane: + mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; + mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; + break; + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; + mac->ops.setup_fc = ixgbe_fc_autoneg_fw; + mac->ops.setup_eee = ixgbe_setup_eee_fw; + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + break; + default: + break; + } + + return ret_val; +} + +/** +* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM_x. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_link_info *link = &hw->link; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM_x"); + + /* Start with generic X550EM init */ + ret_val = ixgbe_init_ops_X550EM(hw); + + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; + link->ops.read_link = ixgbe_read_i2c_combined_generic; + link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked; + link->ops.write_link = ixgbe_write_i2c_combined_generic; + link->ops.write_link_unlocked = + ixgbe_write_i2c_combined_generic_unlocked; + link->addr = IXGBE_CS4227; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) { + mac->ops.setup_fc = NULL; + mac->ops.setup_eee = NULL; + mac->ops.init_led_link_act = NULL; + } + + return ret_val; +} + +/** + * ixgbe_dmac_config_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. **/ -static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) +{ + u32 reg, high_pri_tc; + + DEBUGFUNC("ixgbe_dmac_config_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + /* Disable DMA Coalescing if the watchdog timer is 0 */ + if (!hw->mac.dmac_config.watchdog_timer) + goto out; + + ixgbe_dmac_config_tcs_X550(hw); + + /* Configure DMA Coalescing Control Register */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + + /* Set the watchdog timer in units of 40.96 usec */ + reg &= ~IXGBE_DMACR_DMACWT_MASK; + reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; + + reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; + /* If fcoe is enabled, set high priority traffic class */ + if (hw->mac.dmac_config.fcoe_en) { + high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; + reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & + IXGBE_DMACR_HIGH_PRI_TC_MASK); + } + reg |= IXGBE_DMACR_EN_MNG_IND; + + /* Enable DMA coalescing after configuration */ + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + +out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_config_tcs_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC. The dmac enable bit must + * be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) +{ + u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; + + DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); + + /* Configure DMA coalescing enabled */ + switch (hw->mac.dmac_config.link_speed) { + case IXGBE_LINK_SPEED_10_FULL: + case IXGBE_LINK_SPEED_100_FULL: + pb_headroom = IXGBE_DMACRXT_100M; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + pb_headroom = IXGBE_DMACRXT_1G; + break; + default: + pb_headroom = IXGBE_DMACRXT_10G; + break; + } + + maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> + IXGBE_MHADD_MFS_SHIFT) / 1024); + + /* Set the per Rx packet buffer receive threshold */ + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { + reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); + reg &= ~IXGBE_DMCTH_DMACRXT_MASK; + + if (tc < hw->mac.dmac_config.num_tcs) { + /* Get Rx PB size */ + rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); + rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> + IXGBE_RXPBSIZE_SHIFT; + + /* Calculate receive buffer threshold in kilobytes */ + if (rx_pb_size > pb_headroom) + rx_pb_size = rx_pb_size - pb_headroom; + else + rx_pb_size = 0; + + /* Minimum of MFS shall be set for DMCTH */ + reg |= (rx_pb_size > maxframe_size_kb) ? + rx_pb_size : maxframe_size_kb; + } + IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_update_tcs_X550 + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enables dmac. + **/ +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + ixgbe_dmac_config_tcs_X550(hw); + + /* Enable DMA coalescing after configuration */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; u16 eeprom_size; + DEBUGFUNC("ixgbe_init_eeprom_params_X550"); + if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = BIT(eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); + eeprom->type, eeprom->word_size); } - return 0; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + +/** + * ixgbe_set_ethertype_anti_spoofing_X550 - Configure Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; + u32 pfvfspoof; + + DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** @@ -653,751 +1061,55 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @ctrl: pointer to location to receive final IOSF control value * - * Return: failing status on timeout + * Returns failing status on timeout * * Note: ctrl can be NULL if the IOSF control register value is not needed - */ -static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) + **/ +STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) { - u32 i, command; + u32 i, command = 0; /* Check every 10 usec to see if the address cycle completed. * The SB IOSF BUSY bit will clear when the operation is - * complete. + * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); - if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) break; - udelay(10); + usec_delay(10); } if (ctrl) *ctrl = command; if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { - hw_dbg(hw, "IOSF wait timed out\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); return IXGBE_ERR_PHY; } - return 0; -} - -/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the - * IOSF device - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @phy_data: Pointer to read data from the register - **/ -static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *data) -{ - u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; - u32 command, error; - s32 ret; - - ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); - if (ret) - return ret; - - ret = ixgbe_iosf_wait(hw, NULL); - if (ret) - goto out; - - command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | - (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); - - /* Write IOSF control register */ - IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); - - ret = ixgbe_iosf_wait(hw, &command); - - if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { - error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> - IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; - hw_dbg(hw, "Failed to read, error %x\n", error); - return IXGBE_ERR_PHY; - } - - if (!ret) - *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); - -out: - hw->mac.ops.release_swfw_sync(hw, gssr); - return ret; + return IXGBE_SUCCESS; } /** - * ixgbe_get_phy_token - Get the token for shared PHY access - * @hw: Pointer to hardware structure - */ -static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) -{ - struct ixgbe_hic_phy_token_req token_cmd; - s32 status; - - token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; - token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; - token_cmd.hdr.cmd_or_resp.cmd_resv = 0; - token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - token_cmd.port_number = hw->bus.lan_id; - token_cmd.command_type = FW_PHY_TOKEN_REQ; - token_cmd.pad = 0; - status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (status) - return status; - if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) - return 0; - if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) - return IXGBE_ERR_FW_RESP_INVALID; - - return IXGBE_ERR_TOKEN_RETRY; -} - -/** - * ixgbe_put_phy_token - Put the token for shared PHY access - * @hw: Pointer to hardware structure - */ -static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) -{ - struct ixgbe_hic_phy_token_req token_cmd; - s32 status; - - token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; - token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; - token_cmd.hdr.cmd_or_resp.cmd_resv = 0; - token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - token_cmd.port_number = hw->bus.lan_id; - token_cmd.command_type = FW_PHY_TOKEN_REL; - token_cmd.pad = 0; - status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (status) - return status; - if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) - return 0; - return IXGBE_ERR_FW_RESP_INVALID; -} - -/** - * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Data to write to the register - **/ -static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - __always_unused u32 device_type, - u32 data) -{ - struct ixgbe_hic_internal_phy_req write_cmd; - - memset(&write_cmd, 0, sizeof(write_cmd)); - write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; - write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; - write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - write_cmd.port_number = hw->bus.lan_id; - write_cmd.command_type = FW_INT_PHY_REQ_WRITE; - write_cmd.address = cpu_to_be16(reg_addr); - write_cmd.write_data = cpu_to_be32(data); - - return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd), - IXGBE_HI_COMMAND_TIMEOUT, false); -} - -/** - * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Pointer to read data from the register - **/ -static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - __always_unused u32 device_type, - u32 *data) -{ - union { - struct ixgbe_hic_internal_phy_req cmd; - struct ixgbe_hic_internal_phy_resp rsp; - } hic; - s32 status; - - memset(&hic, 0, sizeof(hic)); - hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; - hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; - hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - hic.cmd.port_number = hw->bus.lan_id; - hic.cmd.command_type = FW_INT_PHY_REQ_READ; - hic.cmd.address = cpu_to_be16(reg_addr); - - status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), - IXGBE_HI_COMMAND_TIMEOUT, true); - - /* Extract the register value from the response. */ - *data = be32_to_cpu(hic.rsp.read_data); - - return status; -} - -/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM - * - * Reads a 16 bit word(s) from the EEPROM using the hostif. - **/ -static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) -{ - const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; - u32 current_word = 0; - u16 words_to_read; - s32 status; - u32 i; - - /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) { - hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); - return status; - } - - while (words) { - if (words > FW_MAX_READ_BUFFER_SIZE / 2) - words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; - else - words_to_read = words; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = (__force u32)cpu_to_be32((offset + - current_word) * 2); - buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); - buffer.pad2 = 0; - buffer.pad3 = 0; - - status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT); - if (status) { - hw_dbg(hw, "Host interface command failed\n"); - goto out; - } - - for (i = 0; i < words_to_read; i++) { - u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + - 2 * i; - u32 value = IXGBE_READ_REG(hw, reg); - - data[current_word] = (u16)(value & 0xffff); - current_word++; - i++; - if (i < words_to_read) { - value >>= 16; - data[current_word] = (u16)(value & 0xffff); - current_word++; - } - } - words -= words_to_read; - } - -out: - hw->mac.ops.release_swfw_sync(hw, mask); - return status; -} - -/** ixgbe_checksum_ptr_x550 - Checksum one pointer region - * @hw: pointer to hardware structure - * @ptr: pointer offset in eeprom - * @size: size of section pointed by ptr, if 0 first word will be used as size - * @csum: address of checksum to update - * - * Returns error status for any failure - **/ -static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, - u16 size, u16 *csum, u16 *buffer, - u32 buffer_size) -{ - u16 buf[256]; - s32 status; - u16 length, bufsz, i, start; - u16 *local_buffer; - - bufsz = ARRAY_SIZE(buf); - - /* Read a chunk at the pointer location */ - if (!buffer) { - status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); - if (status) { - hw_dbg(hw, "Failed to read EEPROM image\n"); - return status; - } - local_buffer = buf; - } else { - if (buffer_size < ptr) - return IXGBE_ERR_PARAM; - local_buffer = &buffer[ptr]; - } - - if (size) { - start = 0; - length = size; - } else { - start = 1; - length = local_buffer[0]; - - /* Skip pointer section if length is invalid. */ - if (length == 0xFFFF || length == 0 || - (ptr + length) >= hw->eeprom.word_size) - return 0; - } - - if (buffer && ((u32)start + (u32)length > buffer_size)) - return IXGBE_ERR_PARAM; - - for (i = start; length; i++, length--) { - if (i == bufsz && !buffer) { - ptr += bufsz; - i = 0; - if (length < bufsz) - bufsz = length; - - /* Read a chunk at the pointer location */ - status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, - bufsz, buf); - if (status) { - hw_dbg(hw, "Failed to read EEPROM image\n"); - return status; - } - } - *csum += local_buffer[i]; - } - return 0; -} - -/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum - * @hw: pointer to hardware structure - * @buffer: pointer to buffer containing calculated checksum - * @buffer_size: size of buffer - * - * Returns a negative error code on error, or the 16-bit checksum - **/ -static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, - u32 buffer_size) -{ - u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; - u16 *local_buffer; - s32 status; - u16 checksum = 0; - u16 pointer, i, size; - - hw->eeprom.ops.init_params(hw); - - if (!buffer) { - /* Read pointer area */ - status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, - IXGBE_EEPROM_LAST_WORD + 1, - eeprom_ptrs); - if (status) { - hw_dbg(hw, "Failed to read EEPROM image\n"); - return status; - } - local_buffer = eeprom_ptrs; - } else { - if (buffer_size < IXGBE_EEPROM_LAST_WORD) - return IXGBE_ERR_PARAM; - local_buffer = buffer; - } - - /* For X550 hardware include 0x0-0x41 in the checksum, skip the - * checksum word itself - */ - for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) - if (i != IXGBE_EEPROM_CHECKSUM) - checksum += local_buffer[i]; - - /* Include all data from pointers 0x3, 0x6-0xE. This excludes the - * FW, PHY module, and PCIe Expansion/Option ROM pointers. - */ - for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { - if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) - continue; - - pointer = local_buffer[i]; - - /* Skip pointer section if the pointer is invalid. */ - if (pointer == 0xFFFF || pointer == 0 || - pointer >= hw->eeprom.word_size) - continue; - - switch (i) { - case IXGBE_PCIE_GENERAL_PTR: - size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; - break; - case IXGBE_PCIE_CONFIG0_PTR: - case IXGBE_PCIE_CONFIG1_PTR: - size = IXGBE_PCIE_CONFIG_SIZE; - break; - default: - size = 0; - break; - } - - status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, - buffer, buffer_size); - if (status) - return status; - } - - checksum = (u16)IXGBE_EEPROM_SUM - checksum; - - return (s32)checksum; -} - -/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum - * @hw: pointer to hardware structure - * - * Returns a negative error code on error, or the 16-bit checksum - **/ -static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) -{ - return ixgbe_calc_checksum_X550(hw, NULL, 0); -} - -/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the hostif. - **/ -static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) -{ - const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; - s32 status; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = (__force u32)cpu_to_be32(offset * 2); - /* one word */ - buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); - - status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) - return status; - - status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT); - if (!status) { - *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, - FW_NVM_DATA_OFFSET); - } - - hw->mac.ops.release_swfw_sync(hw, mask); - return status; -} - -/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, - u16 *checksum_val) -{ - s32 status; - u16 checksum; - u16 read_checksum = 0; - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - if (status) { - hw_dbg(hw, "EEPROM read failed\n"); - return status; - } - - status = hw->eeprom.ops.calc_checksum(hw); - if (status < 0) - return status; - - checksum = (u16)(status & 0xffff); - - status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, - &read_checksum); - if (status) - return status; - - /* Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) { - status = IXGBE_ERR_EEPROM_CHECKSUM; - hw_dbg(hw, "Invalid EEPROM checksum"); - } - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - - return status; -} - -/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the hostif. - **/ -static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - u16 data) -{ - s32 status; - struct ixgbe_hic_write_shadow_ram buffer; - - buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); - buffer.data = data; - buffer.address = cpu_to_be32(offset * 2); - - status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - return status; -} - -/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the hostif. - **/ -static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - s32 status = 0; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { - status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - hw_dbg(hw, "write ee hostif failed to get semaphore"); - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device - * @hw: pointer to hardware structure - * - * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. - **/ -static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) -{ - s32 status = 0; - union ixgbe_hic_hdr2 buffer; - - buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; - buffer.req.buf_lenh = 0; - buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; - buffer.req.checksum = FW_DEFAULT_CHECKSUM; - - status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - return status; -} - -/** - * ixgbe_get_bus_info_X550em - Set PCI bus info + * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register + * of the IOSF device * @hw: pointer to hardware structure - * - * Sets bus link width and speed to unknown because X550em is - * not a PCI device. + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register **/ -static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) -{ - hw->bus.type = ixgbe_bus_type_internal; - hw->bus.width = ixgbe_bus_width_unknown; - hw->bus.speed = ixgbe_bus_speed_unknown; - - hw->mac.ops.set_lan_id(hw); - - return 0; -} - -/** - * ixgbe_fw_recovery_mode - Check FW NVM recovery mode - * @hw: pointer t hardware structure - * - * Returns true if in FW NVM recovery mode. - */ -static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) -{ - u32 fwsm; - - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); - return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); -} - -/** ixgbe_disable_rx_x550 - Disable RX unit - * - * Enables the Rx DMA unit for x550 - **/ -static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) -{ - u32 rxctrl, pfdtxgswc; - s32 status; - struct ixgbe_hic_disable_rxen fw_cmd; - - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (rxctrl & IXGBE_RXCTRL_RXEN) { - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); - if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { - pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = true; - } else { - hw->mac.set_lben = false; - } - - fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; - fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; - fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - fw_cmd.port_number = hw->bus.lan_id; - - status = ixgbe_host_interface_command(hw, &fw_cmd, - sizeof(struct ixgbe_hic_disable_rxen), - IXGBE_HI_COMMAND_TIMEOUT, true); - - /* If we fail - disable RX using register write */ - if (status) { - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (rxctrl & IXGBE_RXCTRL_RXEN) { - rxctrl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); - } - } - } -} - -/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash - * @hw: pointer to hardware structure - * - * After writing EEPROM to shadow RAM using EEWR register, software calculates - * checksum and updates the EEPROM and instructs the hardware to update - * the flash. - **/ -static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) -{ - s32 status; - u16 checksum = 0; - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); - if (status) { - hw_dbg(hw, "EEPROM read failed\n"); - return status; - } - - status = ixgbe_calc_eeprom_checksum_X550(hw); - if (status < 0) - return status; - - checksum = (u16)(status & 0xffff); - - status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - if (status) - return status; - - status = ixgbe_update_flash_X550(hw); - - return status; -} - -/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM - * - * - * Write a 16 bit word(s) to the EEPROM using the hostif. - **/ -static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, - u16 *data) -{ - s32 status = 0; - u32 i = 0; - - /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - if (status) { - hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); - return status; - } - - for (i = 0; i < words; i++) { - status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, - data[i]); - if (status) { - hw_dbg(hw, "Eeprom buffered write failed\n"); - break; - } - } - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - - return status; -} - -/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the - * IOSF device - * - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Data to write to the register - **/ -static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 data) +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) { u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; u32 command, error; s32 ret; - ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); - if (ret) + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) return ret; ret = ixgbe_iosf_wait(hw, NULL); - if (ret) + if (ret != IXGBE_SUCCESS) goto out; command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | @@ -1414,201 +1126,397 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; - hw_dbg(hw, "Failed to write, error %x\n", error); - return IXGBE_ERR_PHY; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to write, error %x\n", error); + ret = IXGBE_ERR_PHY; } out: - hw->mac.ops.release_swfw_sync(hw, gssr); + ixgbe_release_swfw_semaphore(hw, gssr); return ret; } /** - * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration - * @hw: pointer to hardware structure - * - * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register **/ -static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) { + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to read, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + + if (ret == IXGBE_SUCCESS) + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_get_phy_token - Get the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; s32 status; - u32 reg_val; - /* Disable training protocol FSM. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) { + hw_dbg(hw, "Issuing host interface command failed with Status = %d\n", + status); + return status; + } + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) { + hw_dbg(hw, "Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n", + token_cmd.hdr.cmd_or_resp.ret_status); + return IXGBE_ERR_FW_RESP_INVALID; + } + + hw_dbg(hw, "Returning IXGBE_ERR_TOKEN_RETRY\n"); + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); if (status) return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; - reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; + hw_dbg(hw, "Put PHY Token host interface command failed"); + return IXGBE_ERR_FW_RESP_INVALID; +} - /* Disable Flex from training TXFFE. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; +/** + * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register + * of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + struct ixgbe_hic_internal_phy_req write_cmd; + s32 status; + UNREFERENCED_1PARAMETER(device_type); - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; + memset(&write_cmd, 0, sizeof(write_cmd)); + write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + write_cmd.port_number = hw->bus.lan_id; + write_cmd.command_type = FW_INT_PHY_REQ_WRITE; + write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr); + write_cmd.write_data = IXGBE_CPU_TO_BE32(data); - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; + status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; - - /* Enable override for coefficients. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; - - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); return status; } /** - * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the - * internal PHY - * @hw: pointer to hardware structure + * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register **/ -static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) { + union { + struct ixgbe_hic_internal_phy_req cmd; + struct ixgbe_hic_internal_phy_resp rsp; + } hic; s32 status; - u32 link_ctrl; + UNREFERENCED_1PARAMETER(device_type); - /* Restart auto-negotiation. */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.command_type = FW_INT_PHY_REQ_READ; + hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr); - if (status) { - hw_dbg(hw, "Auto-negotiation did not complete\n"); - return status; - } + status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, + sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, true); - link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); - - if (hw->mac.type == ixgbe_mac_x550em_a) { - u32 flx_mask_st20; - - /* Indicate to FW that AN restart has been asserted */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); - - if (status) { - hw_dbg(hw, "Auto-negotiation did not complete\n"); - return status; - } - - flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); - } + /* Extract the register value from the response. */ + *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data); return status; } -/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. - * @hw: pointer to hardware structure - * @speed: the link speed to force +/** + * ixgbe_disable_mdd_X550 + * @hw: pointer to hardware structure * - * Configures the integrated KR PHY to use iXFI mode. Used to connect an - * internal and external PHY at a specific speed, without autonegotiation. + * Disable malicious driver detection **/ -static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) { - struct ixgbe_mac_info *mac = &hw->mac; - s32 status; - u32 reg_val; + u32 reg; - /* iXFI is only supported with X552 */ - if (mac->type != ixgbe_mac_X550EM_x) - return IXGBE_ERR_LINK_SETUP; + DEBUGFUNC("ixgbe_disable_mdd_X550"); - /* Disable AN and force speed to 10G Serial. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; + /* Disable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + /* Disable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} - /* Select forced link speed for internal PHY. */ - switch (*speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; +/** + * ixgbe_enable_mdd_X550 + * @hw: pointer to hardware structure + * + * Enable malicious driver detection + **/ +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_enable_mdd_X550"); + + /* Enable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + + /* Enable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} + +/** + * ixgbe_restore_mdd_vf_X550 + * @hw: pointer to hardware structure + * @vf: vf index + * + * Restore VF that was disabled during malicious driver detection event + **/ +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) +{ + u32 idx, reg, num_qs, start_q, bitmask; + + DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); + + /* Map VF to queues */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + num_qs = 8; /* 16 VFs / pools */ + bitmask = 0x000000FF; break; - case IXGBE_LINK_SPEED_1GB_FULL: - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + num_qs = 4; /* 32 VFs / pools */ + bitmask = 0x0000000F; + break; + default: /* 64 VFs / pools */ + num_qs = 2; + bitmask = 0x00000003; + break; + } + start_q = vf * num_qs; + + /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ + idx = start_q / 32; + reg = 0; + reg |= (bitmask << (start_q % 32)); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); +} + +/** + * ixgbe_mdd_event_X550 + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + * Handle malicious driver detection event. + **/ +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + u32 wqbr; + u32 i, j, reg, q, shift, vf, idx; + + DEBUGFUNC("ixgbe_mdd_event_X550"); + + /* figure out pool size for mapping to vf's */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + shift = 3; /* 16 VFs / pools */ + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + shift = 2; /* 32 VFs / pools */ break; default: - /* Other link speeds are not supported by internal KR PHY. */ - return IXGBE_ERR_LINK_SETUP; + shift = 1; /* 64 VFs / pools */ + break; } - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; + /* Read WQBR_TX and WQBR_RX and check for malicious queues */ + for (i = 0; i < 4; i++) { + wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); + wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); - /* Additional configuration needed for x550em_x */ - if (hw->mac.type == ixgbe_mac_X550EM_x) { - status = ixgbe_setup_ixfi_x550em_x(hw); - if (status) - return status; + if (!wqbr) + continue; + + /* Get malicious queue */ + for (j = 0; j < 32 && wqbr; j++) { + + if (!(wqbr & (1 << j))) + continue; + + /* Get queue from bitmask */ + q = j + (i * 32); + + /* Map queue to vf */ + vf = (q >> shift); + + /* Set vf bit in vf_bitmap */ + idx = vf / 32; + vf_bitmap[idx] |= (1 << (vf % 32)); + wqbr &= ~(1 << j); + } } - - /* Toggle port SW reset by AN reset. */ - status = ixgbe_restart_an_internal_phy_x550em(hw); - - return status; } /** - * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported - * @hw: pointer to hardware structure - * @linear: true if SFP module is linear + * ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) */ -static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) { + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_X550em"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_XFI: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + media_type = ixgbe_media_type_backplane; + hw->phy.type = ixgbe_phy_sgmii; + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * @hw: pointer to hardware structure + * @linear: true if SFP module is linear + */ +STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +{ + DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); + switch (hw->phy.sfp_type) { case ixgbe_sfp_type_not_present: return IXGBE_ERR_SFP_NOT_PRESENT; @@ -1633,323 +1541,111 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) return IXGBE_ERR_SFP_NOT_SUPPORTED; } - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. + * ixgbe_identify_sfp_module_X550em - Identifies SFP modules * @hw: pointer to hardware structure - * @speed: the link speed to force - * @autoneg_wait_to_complete: unused * - * Configures the extern PHY and the integrated KR PHY for SFP support. - */ -static s32 -ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) { s32 status; - u16 reg_slice, reg_val; - bool setup_linear = false; + bool linear; - /* Check if SFP module is supported and linear */ - status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); - /* If no SFP module present, then return success. Return success since - * there is no reason to configure CS4227 and SFP not present error is - * not accepted in the setup MAC link flow. - */ - if (status == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; + status = ixgbe_identify_module_generic(hw); - if (status) + if (status != IXGBE_SUCCESS) return status; - /* Configure internal PHY for KR/KX. */ - ixgbe_setup_kr_speed_x550em(hw, speed); - - /* Configure CS4227 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); - if (setup_linear) - reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; - else - reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - - status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, - reg_val); + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); return status; } /** - * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode + * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops * @hw: pointer to hardware structure - * @speed: the link speed to force - * - * Configures the integrated PHY for native SFI mode. Used to connect the - * internal PHY directly to an SFP cage, without autonegotiation. - **/ -static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) { - struct ixgbe_mac_info *mac = &hw->mac; s32 status; - u32 reg_val; + bool linear; - /* Disable all AN and force speed to 10G Serial. */ - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) + DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + + if (status != IXGBE_SUCCESS) return status; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; - /* Select forced link speed for internal PHY. */ - switch (*speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; - break; - default: - /* Other link speeds are not supported by internal PHY. */ - return IXGBE_ERR_LINK_SETUP; + return IXGBE_SUCCESS; +} + +/** +* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the +* internal PHY +* @hw: pointer to hardware structure +**/ +STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 link_ctrl; + + /* Restart auto-negotiation. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); + + if (status) { + hw_dbg(hw, "Auto-negotiation did not complete\n"); + return status; } - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); - /* Toggle port SW reset by AN reset. */ - status = ixgbe_restart_an_internal_phy_x550em(hw); + if (hw->mac.type == ixgbe_mac_X550EM_a) { + u32 flx_mask_st20; + + /* Indicate to FW that AN restart has been asserted */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); + + if (status) { + hw_dbg(hw, "Auto-negotiation did not complete\n"); + return status; + } + + flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); + } return status; } -/** - * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP - * @hw: pointer to hardware structure - * @speed: link speed - * @autoneg_wait_to_complete: unused - * - * Configure the the integrated PHY for native SFP support. - */ -static s32 -ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) -{ - bool setup_linear = false; - u32 reg_phy_int; - s32 ret_val; - - /* Check if SFP module is supported and linear */ - ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. - */ - if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; - - if (ret_val) - return ret_val; - - /* Configure internal PHY for native SFI based on module type */ - ret_val = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); - if (ret_val) - return ret_val; - - reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; - if (!setup_linear) - reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; - - ret_val = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); - if (ret_val) - return ret_val; - - /* Setup SFI internal link. */ - return ixgbe_setup_sfi_x550a(hw, &speed); -} - -/** - * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP - * @hw: pointer to hardware structure - * @speed: link speed - * @autoneg_wait_to_complete: unused - * - * Configure the the integrated PHY for SFP support. - */ -static s32 -ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) -{ - u32 reg_slice, slice_offset; - bool setup_linear = false; - u16 reg_phy_ext; - s32 ret_val; - - /* Check if SFP module is supported and linear */ - ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. - */ - if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; - - if (ret_val) - return ret_val; - - /* Configure internal PHY for KR/KX. */ - ixgbe_setup_kr_speed_x550em(hw, speed); - - if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) - return IXGBE_ERR_PHY_ADDR_INVALID; - - /* Get external PHY SKU id */ - ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - if (ret_val) - return ret_val; - - /* When configuring quad port CS4223, the MAC instance is part - * of the slice offset. - */ - if (reg_phy_ext == IXGBE_CS4223_SKU_ID) - slice_offset = (hw->bus.lan_id + - (hw->bus.instance_id << 1)) << 12; - else - slice_offset = hw->bus.lan_id << 12; - - /* Configure CS4227/CS4223 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; - - ret_val = hw->phy.ops.read_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - if (ret_val) - return ret_val; - - reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | - (IXGBE_CS4227_EDC_MODE_SR << 1)); - - if (setup_linear) - reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; - else - reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; - - ret_val = hw->phy.ops.write_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); - if (ret_val) - return ret_val; - - /* Flush previous write with a read */ - return hw->phy.ops.read_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); -} - -/** - * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait: true when waiting for completion is needed - * - * Setup internal/external PHY link speed based on link speed, then set - * external PHY auto advertised link speed. - * - * Returns error status for any failure - **/ -static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait) -{ - s32 status; - ixgbe_link_speed force_speed; - - /* Setup internal/external PHY link speed to iXFI (10G), unless - * only 1G is auto advertised then setup KX link. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - force_speed = IXGBE_LINK_SPEED_10GB_FULL; - else - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* If X552 and internal link mode is XFI, then setup XFI internal link. - */ - if (hw->mac.type == ixgbe_mac_X550EM_x && - !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - status = ixgbe_setup_ixfi_x550em(hw, &force_speed); - - if (status) - return status; - } - - return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); -} - -/** ixgbe_check_link_t_X550em - Determine link and speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true when link is up - * @link_up_wait_to_complete: bool used to wait for link up or not - * - * Check that both the MAC and X557 external PHY have link. - **/ -static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, - bool link_up_wait_to_complete) -{ - u32 status; - u16 i, autoneg_status; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; - - status = ixgbe_check_mac_link_generic(hw, speed, link_up, - link_up_wait_to_complete); - - /* If check link fails or MAC link is not up, then return */ - if (status || !(*link_up)) - return status; - - /* MAC link is up, so check external PHY link. - * Link status is latching low, and can only be used to detect link - * drop, and not the current status of the link without performing - * back-to-back reads. - */ - for (i = 0; i < 2; i++) { - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - - if (status) - return status; - } - - /* If external PHY link is not up, then indicate link not up */ - if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) - *link_up = false; - - return 0; -} - /** * ixgbe_setup_sgmii - Set up link for sgmii * @hw: pointer to hardware structure - * @speed: unused - * @autoneg_wait_to_complete: unused + * @speed: new link speed + * @autoneg_wait: true when waiting for completion is needed */ -static s32 -ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) +STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) { struct ixgbe_mac_info *mac = &hw->mac; u32 lval, sval, flx_val; @@ -1987,14 +1683,8 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, return rc; rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); if (rc) return rc; @@ -2005,22 +1695,25 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); if (rc) return rc; rc = ixgbe_restart_an_internal_phy_x550em(hw); - return rc; + if (rc) + return rc; + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } /** - * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs + * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation * @hw: pointer to hardware structure - * @speed: the link speed to force + * @speed: new link speed * @autoneg_wait: true when waiting for completion is needed */ -static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, +STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) { struct ixgbe_mac_info *mac = &hw->mac; @@ -2082,101 +1775,22 @@ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, if (rc) return rc; - ixgbe_restart_an_internal_phy_x550em(hw); + rc = ixgbe_restart_an_internal_phy_x550em(hw); return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } /** - * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 + * ixgbe_init_mac_link_ops_X550em - init mac link function pointers * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. */ -static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; - u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; - ixgbe_link_speed speed; - bool link_up; - - /* AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - */ - if (hw->fc.disable_fc_autoneg) - goto out; - - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) - goto out; - - /* Check if auto-negotiation has completed */ - status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); - if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { - status = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } - - /* Negotiate the flow control */ - status = ixgbe_negotiate_fc(hw, info[0], info[0], - FW_PHY_ACT_GET_LINK_INFO_FC_RX, - FW_PHY_ACT_GET_LINK_INFO_FC_TX, - FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, - FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); - -out: - if (!status) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - } -} - -/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers - * @hw: pointer to hardware structure - **/ -static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; - switch (mac->ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - mac->ops.setup_fc = NULL; - mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; - break; - case ixgbe_media_type_copper: - if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && - hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { - mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; - break; - } - mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; - mac->ops.setup_fc = ixgbe_fc_autoneg_fw; - mac->ops.setup_link = ixgbe_setup_sgmii_fw; - mac->ops.check_link = ixgbe_check_mac_link_generic; - break; - case ixgbe_media_type_backplane: - mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; - mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; - break; - default: - break; - } -} + DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); -/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers - * @hw: pointer to hardware structure - **/ -static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - mac->ops.setup_fc = ixgbe_setup_fc_x550em; - - switch (mac->ops.get_media_type(hw)) { + switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: /* CS4227 does not support autoneg, so disable the laser control * functions for SFP+ fiber @@ -2185,28 +1799,34 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_SFP_N: - mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; - break; - case IXGBE_DEV_ID_X550EM_A_SFP: - mac->ops.setup_mac_link = - ixgbe_setup_mac_link_sfp_x550a; - break; - default: - mac->ops.setup_mac_link = - ixgbe_setup_mac_link_sfp_x550em; - break; - } mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; + + if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) || + (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP)) + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + else + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; break; case ixgbe_media_type_copper: if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) break; - mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; - mac->ops.setup_fc = ixgbe_setup_fc_generic; - mac->ops.check_link = ixgbe_check_link_t_X550em; + if (hw->mac.type == ixgbe_mac_X550EM_a) { + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + mac->ops.setup_link = ixgbe_setup_sgmii_fw; + mac->ops.check_link = + ixgbe_check_mac_link_generic; + } else { + mac->ops.setup_link = + ixgbe_setup_mac_link_t_X550em; + } + } else { + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.check_link = ixgbe_check_link_t_X550em; + } break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || @@ -2216,40 +1836,20 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) default: break; } - - /* Additional modification for X550em_a devices */ - if (hw->mac.type == ixgbe_mac_x550em_a) - ixgbe_init_mac_link_ops_X550em_a(hw); } -/** ixgbe_setup_sfp_modules_X550em - Setup SFP module - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) -{ - s32 status; - bool linear; - - /* Check if SFP module is supported */ - status = ixgbe_supported_sfp_modules_X550em(hw, &linear); - if (status) - return status; - - ixgbe_init_mac_link_ops_X550em(hw); - hw->phy.ops.reset = NULL; - - return 0; -} - -/** ixgbe_get_link_capabilities_x550em - Determines link capabilities +/** + * ixgbe_get_link_capabilities_x550em - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: true when autoneg or autotry is enabled - **/ -static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) + */ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) { + DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); + if (hw->phy.type == ixgbe_phy_fw) { *autoneg = true; *speed = hw->phy.speeds_supported; @@ -2258,15 +1858,17 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, /* SFP */ if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ *autoneg = false; + /* Check if 1G SFP module. */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 + || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; - return 0; + return IXGBE_SUCCESS; } /* Link capabilities are based on SFP */ @@ -2276,29 +1878,27 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, else *speed = IXGBE_LINK_SPEED_10GB_FULL; } else { + *autoneg = true; + switch (hw->phy.type) { - case ixgbe_phy_x550em_kx4: - *speed = IXGBE_LINK_SPEED_1GB_FULL | - IXGBE_LINK_SPEED_2_5GB_FULL | - IXGBE_LINK_SPEED_10GB_FULL; - break; case ixgbe_phy_x550em_xfi: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; break; case ixgbe_phy_ext_1g_t: case ixgbe_phy_sgmii: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case ixgbe_phy_x550em_kr: - if (hw->mac.type == ixgbe_mac_x550em_a) { + if (hw->mac.type == ixgbe_mac_X550EM_a) { /* check different backplane modes */ if (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { *speed = IXGBE_LINK_SPEED_2_5GB_FULL; break; } else if (hw->device_id == - IXGBE_DEV_ID_X550EM_A_KR_L) { + IXGBE_DEV_ID_X550EM_A_KR_L) { *speed = IXGBE_LINK_SPEED_1GB_FULL; break; } @@ -2309,24 +1909,24 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, IXGBE_LINK_SPEED_1GB_FULL; break; } - *autoneg = true; } - return 0; + + return IXGBE_SUCCESS; } /** * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause * @hw: pointer to hardware structure * @lsc: pointer to boolean flag which indicates whether external Base T - * PHY interrupt is lsc + * PHY interrupt is lsc * * Determime if external Base T PHY interrupt cause is high temperature * failure alarm or link status change. * * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature * failure alarm, else return PHY access status. - **/ -static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + */ +STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) { u32 status; u16 reg; @@ -2335,27 +1935,29 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) /* Vendor alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); - if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) return status; /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); - if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | - IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + if (status != IXGBE_SUCCESS || + !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) return status; /* Global alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); - if (status) + if (status != IXGBE_SUCCESS) return status; /* If high temperature failure, then return over temp error and exit */ @@ -2363,13 +1965,13 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) /* power down the PHY in case the PHY FW didn't already */ ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; - } - if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { /* device fault alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); - if (status) + + if (status != IXGBE_SUCCESS) return status; /* if device fault was due to high temp alarm handle and exit */ @@ -2382,23 +1984,24 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) /* Vendor alarm 2 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, - MDIO_MMD_AN, ®); + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); - if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) return status; /* link connect/disconnect event occurred */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, - MDIO_MMD_AN, ®); + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); - if (status) + if (status != IXGBE_SUCCESS) return status; /* Indicate LSC */ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) *lsc = true; - return 0; + return IXGBE_SUCCESS; } /** @@ -2409,8 +2012,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) * Base T PHY * * Returns PHY access status - **/ -static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + */ +STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) { u32 status; u16 reg; @@ -2429,97 +2032,77 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) * as the internal link to the external PHY. Hence adding a check here * to avoid enabling LASI interrupts for X553 devices. */ - if (hw->mac.type != ixgbe_mac_x550em_a) { + if (hw->mac.type != ixgbe_mac_X550EM_a) { status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, ®); - if (status) + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, reg); - if (status) + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); + + if (status != IXGBE_SUCCESS) return status; } /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); - if (status) + + if (status != IXGBE_SUCCESS) return status; reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); - if (status) + + if (status != IXGBE_SUCCESS) return status; /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); - if (status) + + if (status != IXGBE_SUCCESS) return status; reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | IXGBE_MDIO_GLOBAL_ALARM_1_INT); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); - if (status) + + if (status != IXGBE_SUCCESS) return status; /* Enable chip-wide vendor alarm */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); - if (status) + + if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, - MDIO_MMD_VEND1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); return status; } -/** - * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt - * @hw: pointer to hardware structure - * - * Handle external Base T PHY interrupt. If high temperature - * failure alarm then return error, else if link status change - * then setup internal/external PHY link - * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. - **/ -static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; - bool lsc; - u32 status; - - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); - if (status) - return status; - - if (lsc && phy->ops.setup_internal_link) - return phy->ops.setup_internal_link(hw); - - return 0; -} - /** * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. * @hw: pointer to hardware structure @@ -2527,7 +2110,7 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) * * Configures the integrated KR PHY. **/ -static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, +STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed) { s32 status; @@ -2555,11 +2138,11 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (hw->mac.type == ixgbe_mac_x550em_a) { + if (hw->mac.type == ixgbe_mac_X550EM_a) { /* Set lane mode to KR auto negotiation */ status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) return status; @@ -2571,545 +2154,13 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); } return ixgbe_restart_an_internal_phy_x550em(hw); } -/** - * ixgbe_setup_kr_x550em - Configure the KR PHY - * @hw: pointer to hardware structure - **/ -static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) -{ - /* leave link alone for 2.5G */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - return 0; - - if (ixgbe_check_reset_blocked(hw)) - return 0; - - return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); -} - -/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status - * @hw: address of hardware structure - * @link_up: address of boolean to indicate link status - * - * Returns error code if unable to get link status. - **/ -static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) -{ - u32 ret; - u16 autoneg_status; - - *link_up = false; - - /* read this twice back to back to indicate current status */ - ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; - - ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; - - *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); - - return 0; -} - -/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link - * @hw: point to hardware structure - * - * Configures the link between the integrated KR PHY and the external X557 PHY - * The driver will call this function when it gets a link status change - * interrupt from the X557 PHY. This function configures the link speed - * between the PHYs to match the link speed of the BASE-T link. - * - * A return of a non-zero value indicates an error, and the base driver should - * not report link up. - **/ -static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) -{ - ixgbe_link_speed force_speed; - bool link_up; - u32 status; - u16 speed; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; - - if (!(hw->mac.type == ixgbe_mac_X550EM_x && - !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - return ixgbe_setup_kr_speed_x550em(hw, speed); - } - - /* If link is not up, then there is no setup necessary so return */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) - return status; - - if (!link_up) - return 0; - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - MDIO_MMD_AN, - &speed); - if (status) - return status; - - /* If link is not still up, then no setup is necessary so return */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) - return status; - - if (!link_up) - return 0; - - /* clear everything but the speed and duplex bits */ - speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; - - switch (speed) { - case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: - force_speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - default: - /* Internal PHY does not support anything else */ - return IXGBE_ERR_INVALID_LINK_SETTINGS; - } - - return ixgbe_setup_ixfi_x550em(hw, &force_speed); -} - -/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI - * @hw: pointer to hardware structure - **/ -static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) -{ - s32 status; - - status = ixgbe_reset_phy_generic(hw); - - if (status) - return status; - - /* Configure Link Status Alarm and Temperature Threshold interrupts */ - return ixgbe_enable_lasi_ext_t_x550em(hw); -} - -/** - * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @led_idx: led number to turn on - **/ -static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) -{ - u16 phy_data; - - if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; - - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, &phy_data); - phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, phy_data); - - return 0; -} - -/** - * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs. - * @hw: pointer to hardware structure - * @led_idx: led number to turn off - **/ -static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) -{ - u16 phy_data; - - if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; - - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, &phy_data); - phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, phy_data); - - return 0; -} - -/** - * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware - * @hw: pointer to the HW structure - * @maj: driver version major number - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number - * @len: length of driver_ver string - * @driver_ver: driver string - * - * Sends driver version number to firmware through the manageability - * block. On success return 0 - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ -static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, u16 len, - const char *driver_ver) -{ - struct ixgbe_hic_drv_info2 fw_cmd; - s32 ret_val; - int i; - - if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) - return IXGBE_ERR_INVALID_ARGUMENT; - - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; - fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; - fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; - fw_cmd.ver_maj = maj; - fw_cmd.ver_min = min; - fw_cmd.ver_build = build; - fw_cmd.ver_sub = sub; - fw_cmd.hdr.checksum = 0; - memcpy(fw_cmd.driver_string, driver_ver, len); - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); - - for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (ret_val) - continue; - - if (fw_cmd.hdr.cmd_or_resp.ret_status != - FW_CEM_RESP_STATUS_SUCCESS) - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - return 0; - } - - return ret_val; -} - -/** ixgbe_get_lcd_x550em - Determine lowest common denominator - * @hw: pointer to hardware structure - * @lcd_speed: pointer to lowest common link speed - * - * Determine lowest common link speed with link partner. - **/ -static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed *lcd_speed) -{ - u16 an_lp_status; - s32 status; - u16 word = hw->eeprom.ctrl_word_3; - - *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; - - status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, - MDIO_MMD_AN, - &an_lp_status); - if (status) - return status; - - /* If link partner advertised 1G, return 1G */ - if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { - *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; - return status; - } - - /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ - if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || - (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) - return status; - - /* Link partner not capable of lower speeds, return 10G */ - *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; - return status; -} - -/** - * ixgbe_setup_fc_x550em - Set up flow control - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) -{ - bool pause, asm_dir; - u32 reg_val; - s32 rc = 0; - - /* Validate the requested mode */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; - } - - /* 10gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - /* Determine PAUSE and ASM_DIR bits. */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - pause = false; - asm_dir = false; - break; - case ixgbe_fc_tx_pause: - pause = false; - asm_dir = true; - break; - case ixgbe_fc_rx_pause: - /* Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE, as such we fall - * through to the fc_full statement. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - /* Fallthrough */ - case ixgbe_fc_full: - pause = true; - asm_dir = true; - break; - default: - hw_err(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - rc = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - ®_val); - if (rc) - return rc; - - reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); - if (pause) - reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; - if (asm_dir) - reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - rc = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - reg_val); - - /* This device does not fully support AN. */ - hw->fc.disable_fc_autoneg = true; - break; - case IXGBE_DEV_ID_X550EM_X_XFI: - hw->fc.disable_fc_autoneg = true; - break; - default: - break; - } - return rc; -} - -/** - * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - **/ -static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) -{ - u32 link_s1, lp_an_page_low, an_cntl_1; - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; - ixgbe_link_speed speed; - bool link_up; - - /* AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - */ - if (hw->fc.disable_fc_autoneg) { - hw_err(hw, "Flow control autoneg is disabled"); - goto out; - } - - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { - hw_err(hw, "The link is down"); - goto out; - } - - /* Check at auto-negotiation has completed */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_S1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); - - if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { - hw_dbg(hw, "Auto-Negotiation did not complete\n"); - status = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } - - /* Read the 10g AN autoc and LP ability registers and resolve - * local flow control settings accordingly - */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); - - if (status) { - hw_dbg(hw, "Auto-Negotiation did not complete\n"); - goto out; - } - - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); - - if (status) { - hw_dbg(hw, "Auto-Negotiation did not complete\n"); - goto out; - } - - status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, - IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, - IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, - IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); - -out: - if (!status) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - } -} - -/** - * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings - * @hw: pointer to hardware structure - **/ -static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) -{ - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; -} - -/** ixgbe_enter_lplu_x550em - Transition to low power states - * @hw: pointer to hardware structure - * - * Configures Low Power Link Up on transition to low power states - * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting - * the X557 PHY immediately prior to entering LPLU. - **/ -static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) -{ - u16 an_10g_cntl_reg, autoneg_reg, speed; - s32 status; - ixgbe_link_speed lcd_speed; - u32 save_autoneg; - bool link_up; - - /* If blocked by MNG FW, then don't restart AN */ - if (ixgbe_check_reset_blocked(hw)) - return 0; - - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) - return status; - - status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3, - &hw->eeprom.ctrl_word_3); - if (status) - return status; - - /* If link is down, LPLU disabled in NVM, WoL disabled, or - * manageability disabled, then force link down by entering - * low power mode. - */ - if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || - !(hw->wol_enabled || ixgbe_mng_present(hw))) - return ixgbe_set_copper_phy_power(hw, false); - - /* Determine LCD */ - status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); - if (status) - return status; - - /* If no valid LCD link speed, then force link down and exit. */ - if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) - return ixgbe_set_copper_phy_power(hw, false); - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - MDIO_MMD_AN, - &speed); - if (status) - return status; - - /* If no link now, speed is invalid so take link down */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) - return ixgbe_set_copper_phy_power(hw, false); - - /* clear everything but the speed bits */ - speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; - - /* If current speed is already LCD, then exit. */ - if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && - (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || - ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && - (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) - return status; - - /* Clear AN completed indication */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, - MDIO_MMD_AN, - &autoneg_reg); - if (status) - return status; - - status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, - &an_10g_cntl_reg); - if (status) - return status; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, - &autoneg_reg); - if (status) - return status; - - save_autoneg = hw->phy.autoneg_advertised; - - /* Setup link at least common link speed */ - status = hw->mac.ops.setup_link(hw, lcd_speed, false); - - /* restore autoneg from before setting lplu speed */ - hw->phy.autoneg_advertised = save_autoneg; - - return status; -} - /** * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs * @hw: pointer to hardware structure @@ -3120,7 +2171,7 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) s32 rc; if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) - return 0; + return IXGBE_SUCCESS; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); if (rc) @@ -3151,16 +2202,17 @@ static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) ixgbe_shutdown_fw_phy(hw); return IXGBE_ERR_OVERTEMP; } - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register * @hw: pointer to hardware structure * - * Read NW_MNG_IF_SEL register and save field values. - */ -static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) + * Read NW_MNG_IF_SEL register and save field values, and check for valid field + * values. + **/ +STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) { /* Save NW management interface connected on board. This is used * to determine internal PHY mode. @@ -3170,33 +2222,72 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set * PHY address. This register field was has only been used for X552. */ - if (hw->mac.type == ixgbe_mac_x550em_a && + if (hw->mac.type == ixgbe_mac_X550EM_a && hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { - hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + hw->phy.addr = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; } + + return IXGBE_SUCCESS; } -/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init - * @hw: pointer to hardware structure +/** + * ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure * - * Initialize any function pointers that were not able to be - * set during init_shared_code because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - **/ -static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + */ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; - hw->mac.ops.set_lan_id(hw); + DEBUGFUNC("ixgbe_init_phy_ops_X550em"); + hw->mac.ops.set_lan_id(hw); ixgbe_read_mng_if_sel_x550em(hw); if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); + phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + phy->ops.read_reg_mdi = NULL; + phy->ops.write_reg_mdi = NULL; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + phy->ops.check_overtemp = ixgbe_check_overtemp_fw; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + + break; + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; + hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + phy->ops.read_reg_mdi = NULL; + phy->ops.write_reg_mdi = NULL; + default: + break; } /* Identify the PHY or SFP module */ @@ -3222,6 +2313,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; + case ixgbe_phy_ext_1g_t: + /* link is managed by FW */ + phy->ops.setup_link = NULL; + phy->ops.reset = NULL; + break; case ixgbe_phy_x550em_xfi: /* link is managed by HW */ phy->ops.setup_link = NULL; @@ -3229,21 +2325,16 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_ext_t: - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode - */ - phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - /* If internal link mode is XFI, then setup iXFI internal link, * else setup KR now. */ phy->ops.setup_internal_link = ixgbe_setup_internal_phy_t_x550em; - /* setup SW LPLU only for first revision */ - if (hw->mac.type == ixgbe_mac_X550EM_x && - !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) & - IXGBE_FUSES0_REV_MASK)) + /* setup SW LPLU only for first revision of X550EM_x */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + !(IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; @@ -3256,105 +2347,17 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.setup_link = ixgbe_setup_fw_link; phy->ops.reset = ixgbe_reset_phy_fw; break; - case ixgbe_phy_ext_1g_t: - phy->ops.setup_link = NULL; - phy->ops.read_reg = NULL; - phy->ops.write_reg = NULL; - phy->ops.reset = NULL; - break; default: break; } - return ret_val; } -/** ixgbe_get_media_type_X550em - Get media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - * - */ -static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) -{ - enum ixgbe_media_type media_type; - - /* Detect if there is a copper PHY attached. */ - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_SGMII: - case IXGBE_DEV_ID_X550EM_A_SGMII_L: - hw->phy.type = ixgbe_phy_sgmii; - /* Fallthrough */ - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_KX4: - case IXGBE_DEV_ID_X550EM_X_XFI: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - media_type = ixgbe_media_type_backplane; - break; - case IXGBE_DEV_ID_X550EM_X_SFP: - case IXGBE_DEV_ID_X550EM_A_SFP: - case IXGBE_DEV_ID_X550EM_A_SFP_N: - media_type = ixgbe_media_type_fiber; - break; - case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - media_type = ixgbe_media_type_copper; - break; - default: - media_type = ixgbe_media_type_unknown; - break; - } - return media_type; -} - -/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. - ** @hw: pointer to hardware structure - **/ -static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) -{ - s32 status; - u16 reg; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, - MDIO_MMD_PMAPMD, - ®); - if (status) - return status; - - /* If PHY FW reset completed bit is set then this is the first - * SW instance after a power on so the PHY FW must be un-stalled. - */ - if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - MDIO_MMD_VEND1, - ®); - if (status) - return status; - - reg &= ~IXGBE_MDIO_POWER_UP_STALL; - - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - MDIO_MMD_VEND1, - reg); - if (status) - return status; - } - - return status; -} - /** * ixgbe_set_mdio_speed - Set MDIO clock speed * @hw: pointer to hardware structure */ -static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) { u32 hlreg0; @@ -3364,6 +2367,7 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_SGMII_L: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_QSFP: /* Config MDIO clock speed before the first MDIO PHY access */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 &= ~IXGBE_HLREG0_MDCSPD; @@ -3381,14 +2385,15 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) } } -/** ixgbe_reset_hw_X550em - Perform hardware reset - ** @hw: pointer to hardware structure - ** - ** Resets the hardware by resetting the transmit and receive units, masks - ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) - ** reset. - **/ -static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +/** + * ixgbe_reset_hw_X550em - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + */ +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; @@ -3397,25 +2402,40 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) bool link_up = false; u32 swfw_mask = hw->phy.phy_semaphore_mask; + DEBUGFUNC("ixgbe_reset_hw_X550em"); + /* Call adapter stop to disable Tx/Rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); - if (status) + if (status != IXGBE_SUCCESS) { + hw_dbg(hw, "Failed to stop adapter, STATUS = %d\n", status); return status; - + } /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); + ixgbe_set_mdio_speed(hw); + /* PHY ops must be identified and initialized prior to reset */ status = hw->phy.ops.init(hw); + + if (status) + hw_dbg(hw, "Failed to initialize PHY ops, STATUS = %d\n", + status); + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || - status == IXGBE_ERR_PHY_ADDR_INVALID) + status == IXGBE_ERR_PHY_ADDR_INVALID) { + hw_dbg(hw, "Returning from reset HW due to PHY init failure\n"); return status; + } /* start the external PHY */ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { status = ixgbe_init_ext_t_x550em(hw); - if (status) + if (status) { + hw_dbg(hw, "Failed to start the external PHY, STATUS = %d\n", + status); return status; + } } /* Setup SFP module if there is one present. */ @@ -3428,8 +2448,10 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) return status; /* Reset PHY */ - if (!hw->phy.reset_disable && hw->phy.ops.reset) - hw->phy.ops.reset(hw); + if (!hw->phy.reset_disable && hw->phy.ops.reset) { + if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP) + return IXGBE_ERR_OVERTEMP; + } mac_reset_top: /* Issue global reset to the MAC. Needs to be SW reset if link is up. @@ -3438,7 +2460,6 @@ mac_reset_top: * reset is set, then perform link reset. */ ctrl = IXGBE_CTRL_LNK_RST; - if (!hw->force_full_reset) { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) @@ -3446,23 +2467,22 @@ mac_reset_top: } status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_dbg(hw, "semaphore failed with %d", status); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { + usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; - udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -3470,11 +2490,11 @@ mac_reset_top: hw_dbg(hw, "Reset polling failed to complete.\n"); } - msleep(50); + msec_delay(50); /* Double resets are required for recovery from certain error - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; @@ -3496,72 +2516,1617 @@ mac_reset_top: if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) ixgbe_setup_mux_ctl(hw); + if (status != IXGBE_SUCCESS) + hw_dbg(hw, "Reset HW failed, STATUS = %d\n", status); + return status; } -/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype - * anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for Ethertype anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing - **/ -static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, - bool enable, int vf) +/** + * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + * @hw: pointer to hardware structure + */ +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) { - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; - u32 pfvfspoof; + u32 status; + u16 reg; - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - if (enable) - pfvfspoof |= BIT(vf_target_shift); - else - pfvfspoof &= ~BIT(vf_target_shift); + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); -} + if (status != IXGBE_SUCCESS) + return status; -/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning - * @hw: pointer to hardware structure - * @enable: enable or disable source address pruning - * @pool: Rx pool to set source address pruning for - **/ -static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, - bool enable, - unsigned int pool) -{ - u64 pfflp; + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. + */ + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); - /* max rx pool is 63 */ - if (pool > 63) - return; + if (status != IXGBE_SUCCESS) + return status; - pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); - pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + reg &= ~IXGBE_MDIO_POWER_UP_STALL; - if (enable) - pfflp |= (1ULL << pool); - else - pfflp &= ~(1ULL << pool); + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); - IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); - IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); + if (status != IXGBE_SUCCESS) + return status; + } + + return status; } /** - * ixgbe_setup_fc_backplane_x550em_a - Set up flow control - * @hw: pointer to hardware structure - * - * Called at init time to set up flow control. + * ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure **/ -static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { - s32 status = 0; - u32 an_cntl = 0; + /* leave link alone for 2.5G */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + return IXGBE_SUCCESS; + + if (ixgbe_check_reset_blocked(hw)) + return 0; + + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: unused + * + * Configure the external PHY and the integrated KR PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val; + u16 reg_slice, reg_val; + bool setup_linear = false; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * there is no reason to configure CS4227 and SFP not present error is + * not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + /* Configure CS4227 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + + (hw->bus.lan_id << 12); + if (setup_linear) + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + return ret_val; +} + +/** + * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated PHY for native SFI mode. Used to connect the + * internal PHY directly to an SFP cage, without autonegotiation. + **/ +STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* Disable all AN and force speed to 10G Serial. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: unused + * + * Configure the the integrated PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val; + u16 reg_phy_ext; + bool setup_linear = false; + u32 reg_slice, reg_phy_int, slice_offset; + + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { + /* Configure internal PHY for native SFI based on module type */ + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; + if (!setup_linear) + reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; + + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* Setup SFI internal link. */ + ret_val = ixgbe_setup_sfi_x550a(hw, &speed); + } else { + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) { + /* Find Address */ + hw_dbg(hw, "Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n"); + return IXGBE_ERR_PHY_ADDR_INVALID; + } + + /* Get external PHY SKU id */ + ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_SKU_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; + + ret_val = hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | + (IXGBE_CS4227_EDC_MODE_SR << 1)); + + if (setup_linear) + reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->phy.ops.write_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); + + /* Flush previous write with a read */ + ret_val = hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + } + return ret_val; +} + +/** + * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration + * @hw: pointer to hardware structure + * + * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + **/ +STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* Disable training protocol FSM. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Disable Flex from training TXFFE. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Enable override for coefficients. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + return status; +} + +/** + * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* iXFI is only supported with X552 */ + if (mac->type != ixgbe_mac_X550EM_x) + return IXGBE_ERR_LINK_SETUP; + + /* Disable AN and force speed to 10G Serial. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Additional configuration needed for x550em_x */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + status = ixgbe_setup_ixfi_x550em_x(hw); + if (status != IXGBE_SUCCESS) + return status; + } + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. + */ +STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +{ + u32 ret; + u16 autoneg_status; + + *link_up = false; + + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link + * @hw: point to hardware structure + * + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + */ +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* If link is down, there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + if (!link_up) + return IXGBE_SUCCESS; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + if (status != IXGBE_SUCCESS) + return status; + + /* If link is still down - no setup is required so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + if (!link_up) + return IXGBE_SUCCESS; + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); + } else { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); + } +} + +/** + * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY to use internal loopback mode. + **/ +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set near-end loopback clocks. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set loopback enable. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Training bypass. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + s32 status; + + DEBUGFUNC("ixgbe_read_ee_hostif_X550"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.pad2 = 0; + buffer.data = 0; + buffer.pad3 = 0; + + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (!status) { + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + } + + hw->mac.ops.release_swfw_sync(hw, mask); + return status; +} + +/** + * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; + } + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); + buffer.pad2 = 0; + buffer.data = 0; + buffer.pad3 = 0; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, mask); + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, true); + if (status != IXGBE_SUCCESS) { + hw_dbg(hw, "for offset %04x failed with status %d\n", + offset, status); + return status; + } + + if (buffer.hdr.rsp.buf_lenh_status != FW_CEM_RESP_STATUS_SUCCESS) { + hw_dbg(hw, "for offset %04x host interface return status %02x\n", + offset, buffer.hdr.rsp.buf_lenh_status); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_ee_hostif_X550"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + hw_dbg(hw, "write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u32 i = 0; + + DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status != IXGBE_SUCCESS) { + hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); + goto out; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + + if (status != IXGBE_SUCCESS) { + hw_dbg(hw, "Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: + + return status; +} + +/** + * ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns error status for any failure + */ +STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return IXGBE_SUCCESS; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* + * For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** + * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** + * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + union ixgbe_hic_hdr2 buffer; + + DEBUGFUNC("ixgbe_update_flash_X550"); + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_x550em_kr: + if (hw->mac.type == ixgbe_mac_X550EM_a) { + if (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + physical_layer = + IXGBE_PHYSICAL_LAYER_2500BASE_KX; + break; + } else if (hw->device_id == + IXGBE_DEV_ID_X550EM_A_KR_L) { + physical_layer = + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + } + } + /* fall through */ + case ixgbe_phy_x550em_xfi: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_kx4: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_ext_t: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + case ixgbe_phy_fw: + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T; + break; + case ixgbe_phy_sgmii: + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_ext_1g_t: + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + default: + break; + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); + + return physical_layer; +} + +/** + * ixgbe_get_bus_info_x550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. + **/ +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +{ + + DEBUGFUNC("ixgbe_get_bus_info_x550em"); + + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + + hw->mac.ops.set_lan_id(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_rx_x550 - Disable RX unit + * @hw: pointer to hardware structure + * + * Enables the Rx DMA unit for x550 + **/ +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + DEBUGFUNC("ixgbe_enable_rx_dma_x550"); + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + +/** + * ixgbe_enter_lplu_x550em - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the + * X557 PHY immediately prior to entering LPLU. + **/ +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +{ + u16 an_10g_cntl_reg, autoneg_reg, speed; + s32 status; + ixgbe_link_speed lcd_speed; + u32 save_autoneg; + bool link_up; + + /* SW LPLU not required on later HW revisions. */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + (IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + return IXGBE_SUCCESS; + + /* If blocked by MNG FW, then don't restart AN */ + if (ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; + + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability + * disabled, then force link down by entering low power mode. + */ + if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || + !(hw->wol_enabled || ixgbe_mng_present(hw))) + return ixgbe_set_copper_phy_power(hw, FALSE); + + /* Determine LCD */ + status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no valid LCD link speed, then force link down and exit. */ + if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) + return ixgbe_set_copper_phy_power(hw, FALSE); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no link now, speed is invalid so take link down */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return ixgbe_set_copper_phy_power(hw, false); + + /* clear everything but the speed bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; + + /* If current speed is already LCD, then exit. */ + if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && + (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || + ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && + (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) + return status; + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_10g_cntl_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + save_autoneg = hw->phy.autoneg_advertised; + + /* Setup link at least common link speed */ + status = hw->mac.ops.setup_link(hw, lcd_speed, false); + + /* restore autoneg from before setting lplu speed */ + hw->phy.autoneg_advertised = save_autoneg; + + return status; +} + +/** + * ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed + * + * Determine lowest common link speed with link partner. + **/ +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) +{ + u16 an_lp_status; + s32 status; + u16 word = hw->eeprom.ctrl_word_3; + + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_lp_status); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; + } + + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; + return status; +} + +/** + * ixgbe_setup_fc_X550em - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 pause, asm_dir, reg_val; + + DEBUGFUNC("ixgbe_setup_fc_X550em"); /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = 0; + asm_dir = 0; + break; + case ixgbe_fc_tx_pause: + pause = 0; + asm_dir = 1; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + pause = 1; + asm_dir = 1; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (ret_val != IXGBE_SUCCESS) + goto out; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->fc.disable_fc_autoneg = true; + break; + default: + break; + } + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) +{ + u32 link_s1, lp_an_page_low, an_cntl_1; + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + /* Check at auto-negotiation has completed */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_S1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); + + if (status != IXGBE_SUCCESS || + (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); + + if (status != IXGBE_SUCCESS) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + goto out; + } + + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); + + if (status != IXGBE_SUCCESS) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + goto out; + } + + status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, + IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); + +out: + if (status == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings + * @hw: pointer to hardware structure + * + **/ +void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) +{ + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; +} + +/** + * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + /* Check if auto-negotiation has completed */ + status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); + if (status != IXGBE_SUCCESS || + !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Negotiate the flow control */ + status = ixgbe_negotiate_fc(hw, info[0], info[0], + FW_PHY_ACT_GET_LINK_INFO_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_FC_TX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + +out: + if (status == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_setup_fc_backplane_x550em_a - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 an_cntl = 0; + + DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } @@ -3576,7 +4141,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); - if (status) { + if (status != IXGBE_SUCCESS) { hw_dbg(hw, "Auto-Negotiation did not complete\n"); return status; } @@ -3618,7 +4183,8 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; break; default: - hw_err(hw, "Flow control param set incorrectly\n"); + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } @@ -3637,7 +4203,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @state: set mux if 1, clear if 0 */ -static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) { u32 esdp; @@ -3658,11 +4224,13 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore and sets the I2C MUX - */ -static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) + **/ +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { s32 status; + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); + status = ixgbe_acquire_swfw_sync_X540(hw, mask); if (status) return status; @@ -3670,7 +4238,7 @@ static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 1); - return 0; + return IXGBE_SUCCESS; } /** @@ -3679,9 +4247,11 @@ static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore and sets the I2C MUX - */ -static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) + **/ +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { + DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); + if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 0); @@ -3689,51 +4259,68 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) } /** - * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore + * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * - * Acquires the SWFW semaphore and get the shared PHY token as needed + * Acquires the SWFW semaphore and get the shared phy token as needed */ -static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; int retries = FW_PHY_TOKEN_RETRIES; - s32 status; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); while (--retries) { - status = 0; + status = IXGBE_SUCCESS; if (hmask) status = ixgbe_acquire_swfw_sync_X540(hw, hmask); - if (status) + if (status) { + hw_dbg(hw, "Could not acquire SWFW semaphore, Status = %d\n", + status); return status; + } if (!(mask & IXGBE_GSSR_TOKEN_SM)) - return 0; + return IXGBE_SUCCESS; status = ixgbe_get_phy_token(hw); - if (!status) - return 0; + if (status == IXGBE_ERR_TOKEN_RETRY) + hw_dbg(hw, "Could not acquire PHY token, Status = %d\n", + status); + + if (status == IXGBE_SUCCESS) + return IXGBE_SUCCESS; + if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); - if (status != IXGBE_ERR_TOKEN_RETRY) + + if (status != IXGBE_ERR_TOKEN_RETRY) { + hw_dbg(hw, "Unable to retry acquiring the PHY token, Status = %d\n", + status); return status; - msleep(FW_PHY_TOKEN_DELAY); + } } + hw_dbg(hw, "Semaphore acquisition retries failed!: PHY ID = 0x%08X\n", + hw->phy.id); return status; } /** - * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore + * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * - * Release the SWFW semaphore and puts the shared PHY token as needed + * Releases the SWFW semaphore and puts the shared phy token as needed */ -static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + DEBUGFUNC("ixgbe_release_swfw_sync_X550a"); + if (mask & IXGBE_GSSR_TOKEN_SM) ixgbe_put_phy_token(hw); @@ -3742,7 +4329,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) } /** - * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * ixgbe_read_phy_reg_x550a - Reads specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type @@ -3751,12 +4338,14 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) * Reads a value from a specified PHY register using the SWFW lock and PHY * Token. The PHY Token is needed since the MDIO is shared between to MAC * instances. - */ -static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) + **/ +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) { - u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_read_phy_reg_x550a"); if (hw->mac.ops.acquire_swfw_sync(hw, mask)) return IXGBE_ERR_SWFW_SYNC; @@ -3777,321 +4366,280 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, * * Writes a value to specified PHY register using the SWFW lock and PHY Token. * The PHY Token is needed since the MDIO is shared between to MAC instances. - */ -static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) + **/ +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) { - u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, mask)) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_write_phy_reg_x550a"); - status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); - hw->mac.ops.release_swfw_sync(hw, mask); + if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) { + status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, mask); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } return status; } -#define X550_COMMON_MAC \ - .init_hw = &ixgbe_init_hw_generic, \ - .start_hw = &ixgbe_start_hw_X540, \ - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ - .get_mac_addr = &ixgbe_get_mac_addr_generic, \ - .get_device_caps = &ixgbe_get_device_caps_generic, \ - .stop_adapter = &ixgbe_stop_adapter_generic, \ - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ - .read_analog_reg8 = NULL, \ - .write_analog_reg8 = NULL, \ - .set_rxpba = &ixgbe_set_rxpba_generic, \ - .check_link = &ixgbe_check_mac_link_generic, \ - .blink_led_start = &ixgbe_blink_led_start_X540, \ - .blink_led_stop = &ixgbe_blink_led_stop_X540, \ - .set_rar = &ixgbe_set_rar_generic, \ - .clear_rar = &ixgbe_clear_rar_generic, \ - .set_vmdq = &ixgbe_set_vmdq_generic, \ - .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ - .clear_vmdq = &ixgbe_clear_vmdq_generic, \ - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ - .enable_mc = &ixgbe_enable_mc_generic, \ - .disable_mc = &ixgbe_disable_mc_generic, \ - .clear_vfta = &ixgbe_clear_vfta_generic, \ - .set_vfta = &ixgbe_set_vfta_generic, \ - .fc_enable = &ixgbe_fc_enable_generic, \ - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ - .init_uta_tables = &ixgbe_init_uta_tables_generic, \ - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ - .set_source_address_pruning = \ - &ixgbe_set_source_address_pruning_X550, \ - .set_ethertype_anti_spoofing = \ - &ixgbe_set_ethertype_anti_spoofing_X550, \ - .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ - .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ - .get_thermal_sensor_data = NULL, \ - .init_thermal_sensor_thresh = NULL, \ - .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \ - .enable_rx = &ixgbe_enable_rx_generic, \ - .disable_rx = &ixgbe_disable_rx_x550, \ +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + bool lsc; + u32 status; -static const struct ixgbe_mac_operations mac_ops_X550 = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_generic, - .led_off = ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X540, - .get_media_type = &ixgbe_get_media_type_X540, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .setup_sfp = NULL, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, - .release_swfw_sync = &ixgbe_release_swfw_sync_X540, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .prot_autoc_read = prot_autoc_read_generic, - .prot_autoc_write = prot_autoc_write_generic, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, -}; + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); -static const struct ixgbe_mac_operations mac_ops_X550EM_x = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X550em, - .get_media_type = &ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, - .get_bus_info = &ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, - .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .setup_fc = NULL, /* defined later */ - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, -}; + if (status != IXGBE_SUCCESS) + return status; -static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { - X550_COMMON_MAC - .led_on = NULL, - .led_off = NULL, - .init_led_link_act = NULL, - .reset_hw = &ixgbe_reset_hw_X550em, - .get_media_type = &ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, - .get_bus_info = &ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, - .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .setup_fc = NULL, - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, -}; + if (lsc) + return ixgbe_setup_internal_phy(hw); -static const struct ixgbe_mac_operations mac_ops_x550em_a = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = ixgbe_reset_hw_X550em, - .get_media_type = ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = ixgbe_get_link_capabilities_X550em, - .get_bus_info = ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, - .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, - .setup_fc = ixgbe_setup_fc_x550em, - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, -}; + return IXGBE_SUCCESS; +} -static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_generic, - .led_off = ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = ixgbe_reset_hw_X550em, - .get_media_type = ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = NULL, /* defined later */ - .get_link_capabilities = ixgbe_get_link_capabilities_X550em, - .get_bus_info = ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, - .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, - .setup_fc = ixgbe_setup_fc_x550em, - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, -}; +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + ixgbe_link_speed force_speed; -#define X550_COMMON_EEP \ - .read = &ixgbe_read_ee_hostif_X550, \ - .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ - .write = &ixgbe_write_ee_hostif_X550, \ - .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ - .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ - .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ - .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ + DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); -static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { - X550_COMMON_EEP - .init_params = &ixgbe_init_eeprom_params_X550, -}; + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; -static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { - X550_COMMON_EEP - .init_params = &ixgbe_init_eeprom_params_X540, -}; + /* If X552 and internal link mode is XFI, then setup XFI internal link. + */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); -#define X550_COMMON_PHY \ - .identify_sfp = &ixgbe_identify_module_generic, \ - .reset = NULL, \ - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ - .setup_link = &ixgbe_setup_phy_link_generic, \ - .set_phy_power = NULL, + if (status != IXGBE_SUCCESS) + return status; + } -static const struct ixgbe_phy_operations phy_ops_X550 = { - X550_COMMON_PHY - .check_overtemp = &ixgbe_tn_check_overtemp, - .init = NULL, - .identify = &ixgbe_identify_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, -}; + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); +} -static const struct ixgbe_phy_operations phy_ops_X550EM_x = { - X550_COMMON_PHY - .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, -}; +/** + * ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 status; + u16 i, autoneg_status = 0; -static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { - X550_COMMON_PHY - .check_overtemp = NULL, - .init = ixgbe_init_phy_ops_X550em, - .identify = ixgbe_identify_phy_x550em, - .read_reg = NULL, - .write_reg = NULL, - .read_reg_mdi = NULL, - .write_reg_mdi = NULL, -}; + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; -static const struct ixgbe_phy_operations phy_ops_x550em_a = { - X550_COMMON_PHY - .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_x550a, - .write_reg = &ixgbe_write_phy_reg_x550a, - .read_reg_mdi = &ixgbe_read_phy_reg_mdi, - .write_reg_mdi = &ixgbe_write_phy_reg_mdi, -}; + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); -static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { - X550_COMMON_PHY - .check_overtemp = ixgbe_check_overtemp_fw, - .init = ixgbe_init_phy_ops_X550em, - .identify = ixgbe_identify_phy_fw, - .read_reg = NULL, - .write_reg = NULL, - .read_reg_mdi = NULL, - .write_reg_mdi = NULL, -}; + /* If check link fails or MAC link is not up, then return */ + if (status != IXGBE_SUCCESS || !(*link_up)) + return status; -static const struct ixgbe_link_operations link_ops_x550em_x = { - .read_link = &ixgbe_read_i2c_combined_generic, - .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, - .write_link = &ixgbe_write_i2c_combined_generic, - .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, -}; + /* MAC link is up, so check external PHY link. + * X557 PHY. Link status is latching low, and can only be used to detect + * link drop, and not the current status of the link without performing + * back-to-back reads. + */ + for (i = 0; i < 2; i++) { + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); -static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550) -}; + if (status != IXGBE_SUCCESS) + return status; + } -static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550EM_x) -}; + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = false; -static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550EM_a) -}; + return IXGBE_SUCCESS; +} -const struct ixgbe_info ixgbe_X550_info = { - .mac = ixgbe_mac_X550, - .get_invariants = &ixgbe_get_invariants_X540, - .mac_ops = &mac_ops_X550, - .eeprom_ops = &eeprom_ops_X550, - .phy_ops = &phy_ops_X550, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550, -}; +/** + * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; -const struct ixgbe_info ixgbe_X550EM_x_info = { - .mac = ixgbe_mac_X550EM_x, - .get_invariants = &ixgbe_get_invariants_X550_x, - .mac_ops = &mac_ops_X550EM_x, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_X550EM_x, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550EM_x, - .link_ops = &link_ops_x550em_x, -}; + status = ixgbe_reset_phy_generic(hw); -const struct ixgbe_info ixgbe_x550em_x_fw_info = { - .mac = ixgbe_mac_X550EM_x, - .get_invariants = ixgbe_get_invariants_X550_x_fw, - .mac_ops = &mac_ops_X550EM_x_fw, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_x_fw, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550EM_x, -}; + if (status != IXGBE_SUCCESS) + return status; -const struct ixgbe_info ixgbe_x550em_a_info = { - .mac = ixgbe_mac_x550em_a, - .get_invariants = &ixgbe_get_invariants_X550_a, - .mac_ops = &mac_ops_x550em_a, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_a, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_x550em_a, -}; + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} + +/** + * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_on_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + /* Some designs have the LEDs wired to the MAC */ + return ixgbe_led_on_generic(hw, led_idx); +} + +/** + * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_off_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + /* Some designs have the LEDs wired to the MAC */ + return ixgbe_led_off_generic(hw, led_idx); +} + +/** + * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, const char *driver_ver) +{ + struct ixgbe_hic_drv_info2 fw_cmd; + s32 ret_val = IXGBE_SUCCESS; + int i; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_x550"); + + if ((len == 0) || (driver_ver == NULL) || + (len > sizeof(fw_cmd.driver_string))) + return IXGBE_ERR_INVALID_ARGUMENT; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + memcpy(fw_cmd.driver_string, driver_ver, len); + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != IXGBE_SUCCESS) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = IXGBE_SUCCESS; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode + * @hw: pointer t hardware structure + * + * Returns true if in FW NVM recovery mode. + **/ +bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) +{ + u32 fwsm; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + + return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); +} -const struct ixgbe_info ixgbe_x550em_a_fw_info = { - .mac = ixgbe_mac_x550em_a, - .get_invariants = ixgbe_get_invariants_X550_a_fw, - .mac_ops = &mac_ops_x550em_a_fw, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_a_fw, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_x550em_a, -}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h new file mode 100644 index 000000000000..7ff921608b0d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_X550_H_ +#define _IXGBE_X550_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); + +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw); +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size); +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw); +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, +u16 *data); +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool); +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf); +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver, u16 len, const char *str); +s32 ixgbe_get_phy_token(struct ixgbe_hw *); +s32 ixgbe_put_phy_token(struct ixgbe_hw *); +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf); +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw); +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw); +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw); +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed); +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw); +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw); +#endif /* _IXGBE_X550_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index d6feaacfbf89..8ae8f1bab5b5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -1,23 +1,78 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2018 Intel Corporation. */ - -#include -#include -#include +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #include "ixgbe.h" + +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#include +#endif +#ifdef HAVE_XDP_BUFF_RXQ +#include +#endif + #include "ixgbe_txrx_common.h" +#ifdef HAVE_AF_XDP_ZC_SUPPORT struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { bool xdp_on = READ_ONCE(adapter->xdp_prog); int qid = ring->ring_idx; - if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) + if (!adapter->xsk_umems || !adapter->xsk_umems[qid] || + qid >= adapter->num_xsk_umems || !xdp_on) return NULL; - return xdp_get_umem_from_qid(adapter->netdev, qid); + return adapter->xsk_umems[qid]; +} + +static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter) +{ + if (adapter->xsk_umems) + return 0; + + adapter->num_xsk_umems_used = 0; + adapter->num_xsk_umems = adapter->num_rx_queues; + adapter->xsk_umems = kcalloc(adapter->num_xsk_umems, + sizeof(*adapter->xsk_umems), + GFP_KERNEL); + if (!adapter->xsk_umems) { + adapter->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter, + struct xdp_umem *umem, + u16 qid) +{ + int err; + + err = ixgbe_alloc_xsk_umems(adapter); + if (err) + return err; + + adapter->xsk_umems[qid] = umem; + adapter->num_xsk_umems_used++; + + return 0; +} + +static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid) +{ + adapter->xsk_umems[qid] = NULL; + adapter->num_xsk_umems_used--; + + if (adapter->num_xsk_umems == 0) { + kfree(adapter->xsk_umems); + adapter->xsk_umems = NULL; + adapter->num_xsk_umems = 0; + } } static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, @@ -66,7 +121,6 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, struct xdp_umem *umem, u16 qid) { - struct net_device *netdev = adapter->netdev; struct xdp_umem_fq_reuse *reuseq; bool if_running; int err; @@ -74,9 +128,12 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, if (qid >= adapter->num_rx_queues) return -EINVAL; - if (qid >= netdev->real_num_rx_queues || - qid >= netdev->real_num_tx_queues) - return -EINVAL; + if (adapter->xsk_umems) { + if (qid >= adapter->num_xsk_umems) + return -EINVAL; + if (adapter->xsk_umems[qid]) + return -EBUSY; + } reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); if (!reuseq) @@ -89,18 +146,24 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, return err; if_running = netif_running(adapter->netdev) && - ixgbe_enabled_xdp_adapter(adapter); + READ_ONCE(adapter->xdp_prog); if (if_running) ixgbe_txrx_ring_disable(adapter, qid); - set_bit(qid, adapter->af_xdp_zc_qps); + err = ixgbe_add_xsk_umem(adapter, umem, qid); + if (err) + return err; if (if_running) { ixgbe_txrx_ring_enable(adapter, qid); /* Kick start the NAPI context so that receiving will start */ +#ifdef HAVE_NDO_XSK_WAKEUP err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); +#else + err = ixgbe_xsk_async_xmit(adapter->netdev, qid); +#endif if (err) return err; } @@ -110,21 +173,20 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) { - struct xdp_umem *umem; bool if_running; - umem = xdp_get_umem_from_qid(adapter->netdev, qid); - if (!umem) + if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems || + !adapter->xsk_umems[qid]) return -EINVAL; if_running = netif_running(adapter->netdev) && - ixgbe_enabled_xdp_adapter(adapter); + READ_ONCE(adapter->xdp_prog); if (if_running) ixgbe_txrx_ring_disable(adapter, qid); - clear_bit(qid, adapter->af_xdp_zc_qps); - ixgbe_xsk_umem_dma_unmap(adapter, umem); + ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]); + ixgbe_remove_xsk_umem(adapter, qid); if (if_running) ixgbe_txrx_ring_enable(adapter, qid); @@ -143,25 +205,20 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { - struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; - u64 offset; u32 act; rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - offset = xdp->data - xdp->data_hard_start; - - xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); - + xdp->handle += xdp->data - xdp->data_hard_start; switch (act) { case XDP_PASS: break; case XDP_TX: - xdpf = convert_to_xdp_frame(xdp); + xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) { result = IXGBE_XDP_CONSUMED; break; @@ -206,6 +263,8 @@ ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *obi) { + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; u16 nta = rx_ring->next_to_alloc; struct ixgbe_rx_buffer *nbi; @@ -215,9 +274,14 @@ static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - nbi->dma = obi->dma; - nbi->addr = obi->addr; - nbi->handle = obi->handle; + nbi->dma = obi->dma & mask; + nbi->dma += hr; + + nbi->addr = (void *)((unsigned long)obi->addr & mask); + nbi->addr += hr; + + nbi->handle = obi->handle & mask; + nbi->handle += rx_ring->xsk_umem->headroom; obi->addr = NULL; obi->skb = NULL; @@ -248,8 +312,7 @@ void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); bi->addr += hr; - bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, - rx_ring->xsk_umem->headroom); + bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; } static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, @@ -275,9 +338,9 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); + bi->handle = handle + umem->headroom; - xsk_umem_discard_addr(umem); + xsk_umem_release_addr(umem); return true; } @@ -302,9 +365,9 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); + bi->handle = handle + umem->headroom; - xsk_umem_discard_addr_rq(umem); + xsk_umem_release_addr_rq(umem); return true; } @@ -523,7 +586,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, total_rx_packets++; ixgbe_process_skb_fields(rx_ring, rx_desc, skb); - ixgbe_rx_skb(q_vector, skb); + ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); } if (xdp_xmit & IXGBE_XDP_REDIR) @@ -546,14 +609,6 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; - if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { - if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) - xsk_set_rx_need_wakeup(rx_ring->xsk_umem); - else - xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); - - return (int)total_rx_packets; - } return failure ? budget : (int)total_rx_packets; } @@ -578,9 +633,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_bi; bool work_done = true; +#ifdef XSK_UMEM_RETURNS_XDP_DESC struct xdp_desc desc; +#endif dma_addr_t dma; u32 cmd_type; +#ifndef XSK_UMEM_RETURNS_XDP_DESC + u32 len; +#endif while (budget-- > 0) { if (unlikely(!ixgbe_desc_unused(xdp_ring)) || @@ -589,6 +649,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) break; } +#ifdef XSK_UMEM_RETURNS_XDP_DESC if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) break; @@ -600,7 +661,17 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; tx_bi->bytecount = desc.len; tx_bi->xdpf = NULL; - tx_bi->gso_segs = 1; +#else + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + tx_bi->xdpf = NULL; +#endif tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); tx_desc->read.buffer_addr = cpu_to_le64(dma); @@ -609,10 +680,17 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; +#ifdef XSK_UMEM_RETURNS_XDP_DESC cmd_type |= desc.len | IXGBE_TXD_CMD; tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.olinfo_status = cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT); +#else + cmd_type |= len | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); +#endif xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) @@ -640,17 +718,19 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) { - u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; + u32 i = tx_ring->next_to_clean, xsk_frames = 0; + unsigned int budget = q_vector->tx.work_limit; struct xdp_umem *umem = tx_ring->xsk_umem; union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_bi; - u32 xsk_frames = 0; + bool xmit_done; - tx_bi = &tx_ring->tx_buffer_info[ntc]; - tx_desc = IXGBE_TX_DESC(tx_ring, ntc); + tx_bi = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; - while (ntc != ntu) { + do { if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; @@ -663,21 +743,26 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, xsk_frames++; tx_bi->xdpf = NULL; + total_bytes += tx_bi->bytecount; tx_bi++; tx_desc++; - ntc++; - if (unlikely(ntc == tx_ring->count)) { - ntc = 0; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; tx_bi = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); - } - tx_ring->next_to_clean = ntc; + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; @@ -689,13 +774,15 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames); - if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) - xsk_set_tx_need_wakeup(tx_ring->xsk_umem); - - return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); + xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); + return budget > 0 && xmit_done; } -int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) +#ifdef HAVE_NDO_XSK_WAKEUP +int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 __maybe_unused flags) +#else +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +#endif { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; @@ -709,7 +796,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) if (qid >= adapter->num_xdp_queues) return -ENXIO; - if (!adapter->xdp_ring[qid]->xsk_umem) + if (!adapter->xsk_umems || !adapter->xsk_umems[qid]) return -ENXIO; ring = adapter->xdp_ring[qid]; @@ -747,3 +834,4 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames); } +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ diff --git a/drivers/net/ethernet/intel/ixgbe/kcompat.c b/drivers/net/ethernet/intel/ixgbe/kcompat.c new file mode 100644 index 000000000000..e1b604236cbc --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/kcompat.c @@ -0,0 +1,2964 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbe.h" +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +int ixgbe_dcb_netlink_register(void) +{ + return 0; +} + +int ixgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter, int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", pci_domain_nr(bus), + bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a whitelist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current whitelist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 3 protocol %04x\n", __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 4 protocol %02x\n", __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* !RHEL || RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* !RHEL || RHEL < 8.1 */ +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !RHEL >= 8.2 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ diff --git a/drivers/net/ethernet/intel/ixgbe/kcompat.h b/drivers/net/ethernet/intel/ixgbe/kcompat.h new file mode 100644 index 000000000000..2596852ca443 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/kcompat.h @@ -0,0 +1,7336 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct ixgbe_adapter +#define adapter_q_vector ixgbe_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,3))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#ifndef IXGBE_PROCFS +#define IXGBE_PROCFS +#endif /* IXGBE_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#if IS_ENABLED(CONFIG_SYSFS) +#ifndef IXGBE_SYSFS +#define IXGBE_SYSFS +#endif /* IXGBE_SYSFS */ +#endif /* CONFIG_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef IXGBE_HWMON +#define IXGBE_HWMON +#endif /* IXGBE_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else /* >= 3.15.0 */ +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16))) +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if IS_ENABLED(CONFIG_NET_DEVLINK) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#include +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) && \ + (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7))) +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif /* <4.18.0 && +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +static inline void *_kc_macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + if (attrs->split) + devlink_port_split_set(devlink_port, attrs->phys.port_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8,2))) +#define HAVE_DEVLINK_REGIONS +#endif +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#ifndef metadata_dst_free +#include + +static inline void __kc_metadata_dst_free(struct metadata_dst *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#endif /* !metadata_dst_free */ +#endif /* RHEL >= 7.3 || SLES >= 12.2 */ +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open + +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define devlink_params_publish(devlink) do { } while (0) +#define devlink_params_unpublish(devlink) do { } while (0) +#endif + +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* = 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* CONFIG_NET_DEVLINK */ +#endif /* = 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#define HAVE_XSK_UMEM_HAS_ADDRS +#define HAVE_FLOW_BLOCK_API +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0))) +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} + +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */ +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +#if !(SLE_VERSION_CODE > SLE_VERSION(15,2,0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14,0,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#define cpu_latency_qos_update_request pm_qos_update_request +#define cpu_latency_qos_add_request(arg1, arg2) pm_qos_add_request(arg1, PM_QOS_CPU_DMA_LATENCY, arg2) +#define cpu_latency_qos_remove_request pm_qos_remove_request + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif + +#ifdef HAVE_DEVLINK_REGIONS +#if IS_ENABLED(CONFIG_NET_DEVLINK) +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *data); +}; + +#ifndef devlink_region_create +static inline struct devlink_region * +_kc_devlink_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, u64 region_size) +{ + return devlink_region_create(devlink, ops->name, region_max_snapshots, + region_size); +} + +#define devlink_region_create _kc_devlink_region_create +#endif /* devlink_region_create */ +#endif /* CONFIG_NET_DEVLINK */ +#define HAVE_DEVLINK_SNAPSHOT_CREATE_DESTRUCTOR +#endif /* HAVE_DEVLINK_REGIONS */ +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#else /* >= 5.8.0 */ +#undef HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#define HAVE_XDP_QUERY_PROG +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* 5.9.0 */ + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c b/drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c new file mode 100644 index 000000000000..2c3d84d3b545 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c @@ -0,0 +1,1148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +/* + * net/core/ethtool.c - Ethtool ioctl handler + * Copyright (c) 2003 Matthew Wilcox + * + * This file is where we call all the ethtool_ops commands to get + * the information ethtool needs. We fall back to calling do_ioctl() + * for drivers which haven't been converted to ethtool_ops yet. + * + * It's GPL, stupid. + * + * Modification by sfeldma@pobox.com to work as backward compat + * solution for pre-ethtool_ops kernels. + * - copied struct ethtool_ops from ethtool.h + * - defined SET_ETHTOOL_OPS + * - put in some #ifndef NETIF_F_xxx wrappers + * - changes refs to dev->ethtool_ops to ethtool_ops + * - changed dev_ethtool to ethtool_ioctl + * - remove EXPORT_SYMBOL()s + * - added _kc_ prefix in built-in ethtool_op_xxx ops. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kcompat.h" + +#undef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full (1 << 12) +#undef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full (1 << 12) +#undef SPEED_10000 +#define SPEED_10000 10000 + +#undef ethtool_ops +#define ethtool_ops _kc_ethtool_ops + +struct _kc_ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + u32 (*get_rx_csum)(struct net_device *); + int (*set_rx_csum)(struct net_device *, u32); + u32 (*get_tx_csum)(struct net_device *); + int (*set_tx_csum)(struct net_device *, u32); + u32 (*get_sg)(struct net_device *); + int (*set_sg)(struct net_device *, u32); + u32 (*get_tso)(struct net_device *); + int (*set_tso)(struct net_device *, u32); + int (*self_test_count)(struct net_device *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*phys_id)(struct net_device *, u32); + int (*get_stats_count)(struct net_device *); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, + u64 *); +} *ethtool_ops = NULL; + +#undef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) + +/* + * Some useful ethtool_ops methods that are device independent. If we find that + * all drivers want to do the same thing here, we can turn these into dev_() + * function calls. + */ + +#undef ethtool_op_get_link +#define ethtool_op_get_link _kc_ethtool_op_get_link +u32 _kc_ethtool_op_get_link(struct net_device *dev) +{ + return netif_carrier_ok(dev) ? 1 : 0; +} + +#undef ethtool_op_get_tx_csum +#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum +u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) +{ +#ifdef NETIF_F_IP_CSUM + return (dev->features & NETIF_F_IP_CSUM) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tx_csum +#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum +int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_IP_CSUM + if (data) +#ifdef NETIF_F_IPV6_CSUM + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); +#else + dev->features |= NETIF_F_IP_CSUM; + else + dev->features &= ~NETIF_F_IP_CSUM; +#endif +#endif + + return 0; +} + +#undef ethtool_op_get_sg +#define ethtool_op_get_sg _kc_ethtool_op_get_sg +u32 _kc_ethtool_op_get_sg(struct net_device *dev) +{ +#ifdef NETIF_F_SG + return (dev->features & NETIF_F_SG) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_sg +#define ethtool_op_set_sg _kc_ethtool_op_set_sg +int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_SG + if (data) + dev->features |= NETIF_F_SG; + else + dev->features &= ~NETIF_F_SG; +#endif + + return 0; +} + +#undef ethtool_op_get_tso +#define ethtool_op_get_tso _kc_ethtool_op_get_tso +u32 _kc_ethtool_op_get_tso(struct net_device *dev) +{ +#ifdef NETIF_F_TSO + return (dev->features & NETIF_F_TSO) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tso +#define ethtool_op_set_tso _kc_ethtool_op_set_tso +int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_TSO + if (data) + dev->features |= NETIF_F_TSO; + else + dev->features &= ~NETIF_F_TSO; +#endif + + return 0; +} + +/* Handlers for each ethtool command */ + +static int ethtool_get_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd = { ETHTOOL_GSET }; + int err; + + if (!ethtool_ops->get_settings) + return -EOPNOTSUPP; + + err = ethtool_ops->get_settings(dev, &cmd); + if (err < 0) + return err; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + return 0; +} + +static int ethtool_set_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd; + + if (!ethtool_ops->set_settings) + return -EOPNOTSUPP; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + return ethtool_ops->set_settings(dev, &cmd); +} + +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) +{ + struct ethtool_drvinfo info; + struct ethtool_ops *ops = ethtool_ops; + + if (!ops->get_drvinfo) + return -EOPNOTSUPP; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GDRVINFO; + ops->get_drvinfo(dev, &info); + + if (ops->self_test_count) + info.testinfo_len = ops->self_test_count(dev); + if (ops->get_stats_count) + info.n_stats = ops->get_stats_count(dev); + if (ops->get_regs_len) + info.regdump_len = ops->get_regs_len(dev); + if (ops->get_eeprom_len) + info.eedump_len = ops->get_eeprom_len(dev); + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +static int ethtool_get_regs(struct net_device *dev, char *useraddr) +{ + struct ethtool_regs regs; + struct ethtool_ops *ops = ethtool_ops; + void *regbuf; + int reglen, ret; + + if (!ops->get_regs || !ops->get_regs_len) + return -EOPNOTSUPP; + + if (copy_from_user(®s, useraddr, sizeof(regs))) + return -EFAULT; + + reglen = ops->get_regs_len(dev); + if (regs.len > reglen) + regs.len = reglen; + + regbuf = kmalloc(reglen, GFP_USER); + if (!regbuf) + return -ENOMEM; + + ops->get_regs(dev, ®s, regbuf); + + ret = -EFAULT; + if (copy_to_user(useraddr, ®s, sizeof(regs))) + goto out; + useraddr += offsetof(struct ethtool_regs, data); + if (copy_to_user(useraddr, regbuf, reglen)) + goto out; + ret = 0; + +out: + kfree(regbuf); + return ret; +} + +static int ethtool_get_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; + + if (!ethtool_ops->get_wol) + return -EOPNOTSUPP; + + ethtool_ops->get_wol(dev, &wol); + + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; +} + +static int ethtool_set_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol; + + if (!ethtool_ops->set_wol) + return -EOPNOTSUPP; + + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + return ethtool_ops->set_wol(dev, &wol); +} + +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GMSGLVL }; + + if (!ethtool_ops->get_msglevel) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_msglevel(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_msglevel) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_msglevel(dev, edata.data); + return 0; +} + +static int ethtool_nway_reset(struct net_device *dev) +{ + if (!ethtool_ops->nway_reset) + return -EOPNOTSUPP; + + return ethtool_ops->nway_reset(dev); +} + +static int ethtool_get_link(struct net_device *dev, void *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GLINK }; + + if (!ethtool_ops->get_link) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_link(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->get_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + ret = -EFAULT; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + goto out; + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->set_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->set_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ethtool_ops->get_coalesce(dev, &coalesce); + + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + return 0; +} + +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) + return -EFAULT; + + return ethtool_ops->set_coalesce(dev, &coalesce); +} + +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + ethtool_ops->get_ringparam(dev, &ringparam); + + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) + return -EFAULT; + + return ethtool_ops->set_ringparam(dev, &ringparam); +} + +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + ethtool_ops->get_pauseparam(dev, &pauseparam); + + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) + return -EFAULT; + + return ethtool_ops->set_pauseparam(dev, &pauseparam); +} + +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GRXCSUM }; + + if (!ethtool_ops->get_rx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_rx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_rx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_rx_csum(dev, edata.data); + return 0; +} + +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTXCSUM }; + + if (!ethtool_ops->get_tx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tx_csum(dev, edata.data); +} + +static int ethtool_get_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GSG }; + + if (!ethtool_ops->get_sg) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_sg(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_sg) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_sg(dev, edata.data); +} + +static int ethtool_get_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTSO }; + + if (!ethtool_ops->get_tso) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tso(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tso) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tso(dev, edata.data); +} + +static int ethtool_self_test(struct net_device *dev, char *useraddr) +{ + struct ethtool_test test; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->self_test || !ops->self_test_count) + return -EOPNOTSUPP; + + if (copy_from_user(&test, useraddr, sizeof(test))) + return -EFAULT; + + test.len = ops->self_test_count(dev); + data = kmalloc(test.len * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->self_test(dev, &test, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &test, sizeof(test))) + goto out; + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, test.len * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_strings(struct net_device *dev, void *useraddr) +{ + struct ethtool_gstrings gstrings; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_strings) + return -EOPNOTSUPP; + + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) + return -EFAULT; + + switch (gstrings.string_set) { + case ETH_SS_TEST: + if (!ops->self_test_count) + return -EOPNOTSUPP; + gstrings.len = ops->self_test_count(dev); + break; + case ETH_SS_STATS: + if (!ops->get_stats_count) + return -EOPNOTSUPP; + gstrings.len = ops->get_stats_count(dev); + break; + default: + return -EINVAL; + } + + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_strings(dev, gstrings.string_set, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) + goto out; + useraddr += sizeof(gstrings); + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_phys_id(struct net_device *dev, void *useraddr) +{ + struct ethtool_value id; + + if (!ethtool_ops->phys_id) + return -EOPNOTSUPP; + + if (copy_from_user(&id, useraddr, sizeof(id))) + return -EFAULT; + + return ethtool_ops->phys_id(dev, id.data); +} + +static int ethtool_get_stats(struct net_device *dev, void *useraddr) +{ + struct ethtool_stats stats; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->get_ethtool_stats || !ops->get_stats_count) + return -EOPNOTSUPP; + + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + stats.n_stats = ops->get_stats_count(dev); + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_ethtool_stats(dev, &stats, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &stats, sizeof(stats))) + goto out; + useraddr += sizeof(stats); + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +/* The main entry point in this file. Called from net/core/dev.c */ + +#define ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr) +{ + struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + void *useraddr = (void *) ifr->ifr_data; + u32 ethcmd; + + /* + * XXX: This can be pushed down into the ethtool_* handlers that + * need it. Keep existing behavior for the moment. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!dev || !netif_device_present(dev)) + return -ENODEV; + + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GSET: + return ethtool_get_settings(dev, useraddr); + case ETHTOOL_SSET: + return ethtool_set_settings(dev, useraddr); + case ETHTOOL_GDRVINFO: + return ethtool_get_drvinfo(dev, useraddr); + case ETHTOOL_GREGS: + return ethtool_get_regs(dev, useraddr); + case ETHTOOL_GWOL: + return ethtool_get_wol(dev, useraddr); + case ETHTOOL_SWOL: + return ethtool_set_wol(dev, useraddr); + case ETHTOOL_GMSGLVL: + return ethtool_get_msglevel(dev, useraddr); + case ETHTOOL_SMSGLVL: + return ethtool_set_msglevel(dev, useraddr); + case ETHTOOL_NWAY_RST: + return ethtool_nway_reset(dev); + case ETHTOOL_GLINK: + return ethtool_get_link(dev, useraddr); + case ETHTOOL_GEEPROM: + return ethtool_get_eeprom(dev, useraddr); + case ETHTOOL_SEEPROM: + return ethtool_set_eeprom(dev, useraddr); + case ETHTOOL_GCOALESCE: + return ethtool_get_coalesce(dev, useraddr); + case ETHTOOL_SCOALESCE: + return ethtool_set_coalesce(dev, useraddr); + case ETHTOOL_GRINGPARAM: + return ethtool_get_ringparam(dev, useraddr); + case ETHTOOL_SRINGPARAM: + return ethtool_set_ringparam(dev, useraddr); + case ETHTOOL_GPAUSEPARAM: + return ethtool_get_pauseparam(dev, useraddr); + case ETHTOOL_SPAUSEPARAM: + return ethtool_set_pauseparam(dev, useraddr); + case ETHTOOL_GRXCSUM: + return ethtool_get_rx_csum(dev, useraddr); + case ETHTOOL_SRXCSUM: + return ethtool_set_rx_csum(dev, useraddr); + case ETHTOOL_GTXCSUM: + return ethtool_get_tx_csum(dev, useraddr); + case ETHTOOL_STXCSUM: + return ethtool_set_tx_csum(dev, useraddr); + case ETHTOOL_GSG: + return ethtool_get_sg(dev, useraddr); + case ETHTOOL_SSG: + return ethtool_set_sg(dev, useraddr); + case ETHTOOL_GTSO: + return ethtool_get_tso(dev, useraddr); + case ETHTOOL_STSO: + return ethtool_set_tso(dev, useraddr); + case ETHTOOL_TEST: + return ethtool_self_test(dev, useraddr); + case ETHTOOL_GSTRINGS: + return ethtool_get_strings(dev, useraddr); + case ETHTOOL_PHYS_ID: + return ethtool_phys_id(dev, useraddr); + case ETHTOOL_GSTATS: + return ethtool_get_stats(dev, useraddr); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +#define mii_if_info _kc_mii_if_info +struct _kc_mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + + unsigned int full_duplex : 1; /* is full duplex? */ + unsigned int force_media : 1; /* is autoneg. disabled? */ + + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int phy_id, int location); + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); +}; + +struct ethtool_cmd; +struct mii_ioctl_data; + +#undef mii_link_ok +#define mii_link_ok _kc_mii_link_ok +#undef mii_nway_restart +#define mii_nway_restart _kc_mii_nway_restart +#undef mii_ethtool_gset +#define mii_ethtool_gset _kc_mii_ethtool_gset +#undef mii_ethtool_sset +#define mii_ethtool_sset _kc_mii_ethtool_sset +#undef mii_check_link +#define mii_check_link _kc_mii_check_link +extern int _kc_mii_link_ok (struct mii_if_info *mii); +extern int _kc_mii_nway_restart (struct mii_if_info *mii); +extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern void _kc_mii_check_link (struct mii_if_info *mii); +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +#undef generic_mii_ioctl +#define generic_mii_ioctl _kc_generic_mii_ioctl +extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_changed); +#endif /* > 2.4.6 */ + + +struct _kc_pci_dev_ext { + struct pci_dev *dev; + void *pci_drvdata; + struct pci_driver *driver; +}; + +struct _kc_net_dev_ext { + struct net_device *dev; + unsigned int carrier; +}; + + +/**************************************/ +/* mii support */ + +int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u32 advert, bmcr, lpa, nego; + + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); + + /* only supports twisted-pair */ + ecmd->port = PORT_MII; + + /* only supports internal transceiver */ + ecmd->transceiver = XCVR_INTERNAL; + + /* this isn't fully supported at higher layers */ + ecmd->phy_address = mii->phy_id; + + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + if (advert & ADVERTISE_10HALF) + ecmd->advertising |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + ecmd->advertising |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); + if (bmcr & BMCR_ANENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + + nego = mii_nway_result(advert & lpa); + if (nego == LPA_100FULL || nego == LPA_100HALF) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + if (nego == LPA_100FULL || nego == LPA_10FULL) { + ecmd->duplex = DUPLEX_FULL; + mii->full_duplex = 1; + } else { + ecmd->duplex = DUPLEX_HALF; + mii->full_duplex = 0; + } + } else { + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if (ecmd->phy_address != mii->phy_id) + return -EINVAL; + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* ignore supported, maxtxpkt, maxrxpkt */ + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 bmcr, advert, tmp; + + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) == 0) + return -EINVAL; + + /* advertise only what has been requested */ + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (ADVERTISED_10baseT_Half) + tmp |= ADVERTISE_10HALF; + if (ADVERTISED_10baseT_Full) + tmp |= ADVERTISE_10FULL; + if (ADVERTISED_100baseT_Half) + tmp |= ADVERTISE_100HALF; + if (ADVERTISED_100baseT_Full) + tmp |= ADVERTISE_100FULL; + if (advert != tmp) { + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); + mii->advertising = tmp; + } + + /* turn on autonegotiation, and force a renegotiate */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); + + mii->force_media = 0; + } else { + u32 bmcr, tmp; + + /* turn off auto negotiation, set speed and duplexity */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); + if (ecmd->speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (ecmd->duplex == DUPLEX_FULL) { + tmp |= BMCR_FULLDPLX; + mii->full_duplex = 1; + } else + mii->full_duplex = 0; + if (bmcr != tmp) + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); + + mii->force_media = 1; + } + return 0; +} + +int _kc_mii_link_ok (struct mii_if_info *mii) +{ + /* first, a dummy read, needed to latch some MII phys */ + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) + return 1; + return 0; +} + +int _kc_mii_nway_restart (struct mii_if_info *mii) +{ + int bmcr; + int r = -EINVAL; + + /* if autoneg is off, it's an error */ + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); + r = 0; + } + + return r; +} + +void _kc_mii_check_link (struct mii_if_info *mii) +{ + int cur_link = mii_link_ok(mii); + int prev_link = netif_carrier_ok(mii->dev); + + if (cur_link && !prev_link) + netif_carrier_on(mii->dev); + else if (prev_link && !cur_link) + netif_carrier_off(mii->dev); +} + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_chg_out) +{ + int rc = 0; + unsigned int duplex_changed = 0; + + if (duplex_chg_out) + *duplex_chg_out = 0; + + mii_data->phy_id &= mii_if->phy_id_mask; + mii_data->reg_num &= mii_if->reg_num_mask; + + switch(cmd) { + case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ + case SIOCGMIIPHY: + mii_data->phy_id = mii_if->phy_id; + /* fall through */ + + case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ + case SIOCGMIIREG: + mii_data->val_out = + mii_if->mdio_read(mii_if->dev, mii_data->phy_id, + mii_data->reg_num); + break; + + case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ + case SIOCSMIIREG: { + u16 val = mii_data->val_in; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (mii_data->phy_id == mii_if->phy_id) { + switch(mii_data->reg_num) { + case MII_BMCR: { + unsigned int new_duplex = 0; + if (val & (BMCR_RESET|BMCR_ANENABLE)) + mii_if->force_media = 0; + else + mii_if->force_media = 1; + if (mii_if->force_media && + (val & BMCR_FULLDPLX)) + new_duplex = 1; + if (mii_if->full_duplex != new_duplex) { + duplex_changed = 1; + mii_if->full_duplex = new_duplex; + } + break; + } + case MII_ADVERTISE: + mii_if->advertising = val; + break; + default: + /* do nothing */ + break; + } + } + + mii_if->mdio_write(mii_if->dev, mii_data->phy_id, + mii_data->reg_num, val); + break; + } + + default: + rc = -EOPNOTSUPP; + break; + } + + if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) + *duplex_chg_out = 1; + + return rc; +} +#endif /* > 2.4.6 */ + diff --git a/drivers/net/ethernet/intel/ixgbe/kcompat_overflow.h b/drivers/net/ethernet/intel/ixgbe/kcompat_overflow.h new file mode 100644 index 000000000000..3a306462a24e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/kcompat_overflow.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile old mode 100644 new mode 100755 index 186a4bb24fde..d7f93a677521 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -6,9 +6,12 @@ obj-$(CONFIG_IXGBEVF) += ixgbevf.o -ixgbevf-objs := vf.o \ - mbx.o \ - ethtool.o \ - ixgbevf_main.o -ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o +ixgbevf-objs := ixgbe_vf.o \ + ixgbe_mbx.o \ + ixgbevf_ethtool.o \ + ixgbevf_main.o \ + kcompat.o + +ixgbevf-${CONFIG_PCI_HYPERV:m=y} += ixgbe_hv_vf.o + diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h deleted file mode 100644 index 6bace746eaac..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ /dev/null @@ -1,302 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -#ifndef _IXGBEVF_DEFINES_H_ -#define _IXGBEVF_DEFINES_H_ - -/* Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 -#define IXGBE_DEV_ID_X550_VF 0x1565 -#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 - -#define IXGBE_DEV_ID_82599_VF_HV 0x152E -#define IXGBE_DEV_ID_X540_VF_HV 0x1530 -#define IXGBE_DEV_ID_X550_VF_HV 0x1564 -#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 - -#define IXGBE_VF_IRQ_CLEAR_MASK 7 -#define IXGBE_VF_MAX_TX_QUEUES 8 -#define IXGBE_VF_MAX_RX_QUEUES 8 - -/* DCB define */ -#define IXGBE_VF_MAX_TRAFFIC_CLASS 8 - -/* Link speed */ -typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 -#define IXGBE_LINK_SPEED_100_FULL 0x0008 - -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 - -/* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ - -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ - -/* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 - -/* DCA Control */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ - -/* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 - -/* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 -#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 - -/* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ -#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */ - -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 - -/* RSS Hash results */ -#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 -#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 -#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 -#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 -#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 - -#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) - -#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_USE) - -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) - -/* Transmit Descriptor - Advanced */ -union ixgbe_adv_tx_desc { - struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le32 cmd_type_len; - __le32 olinfo_status; - } read; - struct { - __le64 rsvd; /* Reserved */ - __le32 nxtseq_seed; - __le32 status; - } wb; -}; - -/* Receive Descriptor - Advanced */ -union ixgbe_adv_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - union { - __le32 data; - struct { - __le16 pkt_info; /* RSS, Pkt type */ - __le16 hdr_info; /* Splithdr, hdrlen */ - } hs_rss; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -/* Context descriptors */ -struct ixgbe_adv_tx_context_desc { - __le32 vlan_macip_lens; - __le32 fceof_saidx; - __le32 type_tucmd_mlhl; - __le32 mss_l4len_idx; -}; - -/* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ -#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* ESP Encrypt Enable */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - -/* Interrupt register bitmasks */ - -#define IXGBE_EITR_CNT_WDIS 0x80000000 -#define IXGBE_MAX_EITR 0x00000FF8 -#define IXGBE_MIN_EITR 8 - -/* Error Codes */ -#define IXGBE_ERR_INVALID_MAC_ADDR -1 -#define IXGBE_ERR_RESET_FAILED -2 -#define IXGBE_ERR_INVALID_ARGUMENT -3 - -/* Transmit Config masks */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ -#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ -#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ - -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ - -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ - -#endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c deleted file mode 100644 index 54459b69c948..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ /dev/null @@ -1,998 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -/* ethtool support for ixgbevf */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ixgbevf.h" - -#define IXGBE_ALL_RAR_ENTRIES 16 - -enum {NETDEV_STATS, IXGBEVF_STATS}; - -struct ixgbe_stats { - char stat_string[ETH_GSTRING_LEN]; - int type; - int sizeof_stat; - int stat_offset; -}; - -#define IXGBEVF_STAT(_name, _stat) { \ - .stat_string = _name, \ - .type = IXGBEVF_STATS, \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \ - .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ -} - -#define IXGBEVF_NETDEV_STAT(_net_stat) { \ - .stat_string = #_net_stat, \ - .type = NETDEV_STATS, \ - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ - .stat_offset = offsetof(struct net_device_stats, _net_stat) \ -} - -static struct ixgbe_stats ixgbevf_gstrings_stats[] = { - IXGBEVF_NETDEV_STAT(rx_packets), - IXGBEVF_NETDEV_STAT(tx_packets), - IXGBEVF_NETDEV_STAT(rx_bytes), - IXGBEVF_NETDEV_STAT(tx_bytes), - IXGBEVF_STAT("tx_busy", tx_busy), - IXGBEVF_STAT("tx_restart_queue", restart_queue), - IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), - IXGBEVF_NETDEV_STAT(multicast), - IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), - IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), - IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), - IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), - IXGBEVF_STAT("tx_ipsec", tx_ipsec), - IXGBEVF_STAT("rx_ipsec", rx_ipsec), -}; - -#define IXGBEVF_QUEUE_STATS_LEN ( \ - (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ - ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \ - ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ - (sizeof(struct ixgbevf_stats) / sizeof(u64))) -#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) - -#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) -static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", - "Link test (on/offline)" -}; - -#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) - -static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = { -#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0) - "legacy-rx", -}; - -#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings) - -static int ixgbevf_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - ethtool_link_ksettings_zero_link_mode(cmd, supported); - ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); - cmd->base.autoneg = AUTONEG_DISABLE; - cmd->base.port = -1; - - if (adapter->link_up) { - __u32 speed = SPEED_10000; - - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - speed = SPEED_10000; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - speed = SPEED_1000; - break; - case IXGBE_LINK_SPEED_100_FULL: - speed = SPEED_100; - break; - } - - cmd->base.speed = speed; - cmd->base.duplex = DUPLEX_FULL; - } else { - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - } - - return 0; -} - -static u32 ixgbevf_get_msglevel(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - return adapter->msg_enable; -} - -static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - adapter->msg_enable = data; -} - -#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) - -static int ixgbevf_get_regs_len(struct net_device *netdev) -{ -#define IXGBE_REGS_LEN 45 - return IXGBE_REGS_LEN * sizeof(u32); -} - -static void ixgbevf_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, - void *p) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 *regs_buff = p; - u32 regs_len = ixgbevf_get_regs_len(netdev); - u8 i; - - memset(p, 0, regs_len); - - /* generate a number suitable for ethtool's register version */ - regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; - - /* General Registers */ - regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); - regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); - regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); - regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); - - /* Interrupt */ - /* don't read EICR because it can clear interrupt causes, instead - * read EICS which is a shadow but doesn't clear EICR - */ - regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); - regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); - regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); - regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); - regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); - regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); - regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); - regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); - - /* Receive DMA */ - for (i = 0; i < 2; i++) - regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); - for (i = 0; i < 2; i++) - regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); - for (i = 0; i < 2; i++) - regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); - for (i = 0; i < 2; i++) - regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); - for (i = 0; i < 2; i++) - regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); - for (i = 0; i < 2; i++) - regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); - for (i = 0; i < 2; i++) - regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); - - /* Receive */ - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); - - /* Transmit */ - for (i = 0; i < 2; i++) - regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); - for (i = 0; i < 2; i++) - regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); - for (i = 0; i < 2; i++) - regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); - for (i = 0; i < 2; i++) - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); - for (i = 0; i < 2; i++) - regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); - for (i = 0; i < 2; i++) - regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); - for (i = 0; i < 2; i++) - regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); - for (i = 0; i < 2; i++) - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); -} - -static void ixgbevf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgbevf_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); - - drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN; -} - -static void ixgbevf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - ring->rx_max_pending = IXGBEVF_MAX_RXD; - ring->tx_max_pending = IXGBEVF_MAX_TXD; - ring->rx_pending = adapter->rx_ring_count; - ring->tx_pending = adapter->tx_ring_count; -} - -static int ixgbevf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; - u32 new_rx_count, new_tx_count; - int i, j, err = 0; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); - new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); - - new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); - new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - - /* if nothing to do return success */ - if ((new_tx_count == adapter->tx_ring_count) && - (new_rx_count == adapter->rx_ring_count)) - return 0; - - while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (!netif_running(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->count = new_tx_count; - for (i = 0; i < adapter->num_xdp_queues; i++) - adapter->xdp_ring[i]->count = new_tx_count; - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->count = new_rx_count; - adapter->tx_ring_count = new_tx_count; - adapter->xdp_ring_count = new_tx_count; - adapter->rx_ring_count = new_rx_count; - goto clear_reset; - } - - if (new_tx_count != adapter->tx_ring_count) { - tx_ring = vmalloc(array_size(sizeof(*tx_ring), - adapter->num_tx_queues + - adapter->num_xdp_queues)); - if (!tx_ring) { - err = -ENOMEM; - goto clear_reset; - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - /* clone ring and setup updated count */ - tx_ring[i] = *adapter->tx_ring[i]; - tx_ring[i].count = new_tx_count; - err = ixgbevf_setup_tx_resources(&tx_ring[i]); - if (err) { - while (i) { - i--; - ixgbevf_free_tx_resources(&tx_ring[i]); - } - - vfree(tx_ring); - tx_ring = NULL; - - goto clear_reset; - } - } - - for (j = 0; j < adapter->num_xdp_queues; i++, j++) { - /* clone ring and setup updated count */ - tx_ring[i] = *adapter->xdp_ring[j]; - tx_ring[i].count = new_tx_count; - err = ixgbevf_setup_tx_resources(&tx_ring[i]); - if (err) { - while (i) { - i--; - ixgbevf_free_tx_resources(&tx_ring[i]); - } - - vfree(tx_ring); - tx_ring = NULL; - - goto clear_reset; - } - } - } - - if (new_rx_count != adapter->rx_ring_count) { - rx_ring = vmalloc(array_size(sizeof(*rx_ring), - adapter->num_rx_queues)); - if (!rx_ring) { - err = -ENOMEM; - goto clear_reset; - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - /* clone ring and setup updated count */ - rx_ring[i] = *adapter->rx_ring[i]; - - /* Clear copied XDP RX-queue info */ - memset(&rx_ring[i].xdp_rxq, 0, - sizeof(rx_ring[i].xdp_rxq)); - - rx_ring[i].count = new_rx_count; - err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); - if (err) { - while (i) { - i--; - ixgbevf_free_rx_resources(&rx_ring[i]); - } - - vfree(rx_ring); - rx_ring = NULL; - - goto clear_reset; - } - } - } - - /* bring interface down to prepare for update */ - ixgbevf_down(adapter); - - /* Tx */ - if (tx_ring) { - for (i = 0; i < adapter->num_tx_queues; i++) { - ixgbevf_free_tx_resources(adapter->tx_ring[i]); - *adapter->tx_ring[i] = tx_ring[i]; - } - adapter->tx_ring_count = new_tx_count; - - for (j = 0; j < adapter->num_xdp_queues; i++, j++) { - ixgbevf_free_tx_resources(adapter->xdp_ring[j]); - *adapter->xdp_ring[j] = tx_ring[i]; - } - adapter->xdp_ring_count = new_tx_count; - - vfree(tx_ring); - tx_ring = NULL; - } - - /* Rx */ - if (rx_ring) { - for (i = 0; i < adapter->num_rx_queues; i++) { - ixgbevf_free_rx_resources(adapter->rx_ring[i]); - *adapter->rx_ring[i] = rx_ring[i]; - } - adapter->rx_ring_count = new_rx_count; - - vfree(rx_ring); - rx_ring = NULL; - } - - /* restore interface using new values */ - ixgbevf_up(adapter); - -clear_reset: - /* free Tx resources if Rx error is encountered */ - if (tx_ring) { - for (i = 0; - i < adapter->num_tx_queues + adapter->num_xdp_queues; i++) - ixgbevf_free_tx_resources(&tx_ring[i]); - vfree(tx_ring); - } - - clear_bit(__IXGBEVF_RESETTING, &adapter->state); - return err; -} - -static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) -{ - switch (stringset) { - case ETH_SS_TEST: - return IXGBEVF_TEST_LEN; - case ETH_SS_STATS: - return IXGBEVF_STATS_LEN; - case ETH_SS_PRIV_FLAGS: - return IXGBEVF_PRIV_FLAGS_STR_LEN; - default: - return -EINVAL; - } -} - -static void ixgbevf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 temp; - const struct rtnl_link_stats64 *net_stats; - unsigned int start; - struct ixgbevf_ring *ring; - int i, j; - char *p; - - ixgbevf_update_stats(adapter); - net_stats = dev_get_stats(netdev, &temp); - for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { - switch (ixgbevf_gstrings_stats[i].type) { - case NETDEV_STATS: - p = (char *)net_stats + - ixgbevf_gstrings_stats[i].stat_offset; - break; - case IXGBEVF_STATS: - p = (char *)adapter + - ixgbevf_gstrings_stats[i].stat_offset; - break; - default: - data[i] = 0; - continue; - } - - data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - - /* populate Tx queue data */ - for (j = 0; j < adapter->num_tx_queues; j++) { - ring = adapter->tx_ring[j]; - if (!ring) { - data[i++] = 0; - data[i++] = 0; - continue; - } - - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - data[i] = ring->stats.packets; - data[i + 1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - i += 2; - } - - /* populate XDP queue data */ - for (j = 0; j < adapter->num_xdp_queues; j++) { - ring = adapter->xdp_ring[j]; - if (!ring) { - data[i++] = 0; - data[i++] = 0; - continue; - } - - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - data[i] = ring->stats.packets; - data[i + 1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - i += 2; - } - - /* populate Rx queue data */ - for (j = 0; j < adapter->num_rx_queues; j++) { - ring = adapter->rx_ring[j]; - if (!ring) { - data[i++] = 0; - data[i++] = 0; - continue; - } - - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - data[i] = ring->stats.packets; - data[i + 1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - i += 2; - } -} - -static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - char *p = (char *)data; - int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *ixgbe_gstrings_test, - IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbevf_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_xdp_queues; i++) { - sprintf(p, "xdp_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "xdp_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - } - break; - case ETH_SS_PRIV_FLAGS: - memcpy(data, ixgbevf_priv_flags_strings, - IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); - break; - } -} - -static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) -{ - struct ixgbe_hw *hw = &adapter->hw; - bool link_up; - u32 link_speed = 0; - *data = 0; - - hw->mac.ops.check_link(hw, &link_speed, &link_up, true); - if (!link_up) - *data = 1; - - return *data; -} - -/* ethtool register test data */ -struct ixgbevf_reg_test { - u16 reg; - u8 array_len; - u8 test_type; - u32 mask; - u32 write; -}; - -/* In the hardware, registers are laid out either singly, in arrays - * spaced 0x40 bytes apart, or in contiguous tables. We assume - * most tests take place on arrays or single registers (handled - * as a single-element array) and special-case the tables. - * Table tests are always pattern tests. - * - * We also make provision for some required setup steps by specifying - * registers to be written without any read-back testing. - */ - -#define PATTERN_TEST 1 -#define SET_READ_TEST 2 -#define WRITE_NO_TEST 3 -#define TABLE32_TEST 4 -#define TABLE64_TEST_LO 5 -#define TABLE64_TEST_HI 6 - -/* default VF register test */ -static const struct ixgbevf_reg_test reg_test_vf[] = { - { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, - { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, - { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, - { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, - { .reg = 0 } -}; - -static const u32 register_test_patterns[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF -}; - -static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, - int reg, u32 mask, u32 write) -{ - u32 pat, val, before; - - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - *data = 1; - return true; - } - for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { - before = ixgbevf_read_reg(&adapter->hw, reg); - ixgbe_write_reg(&adapter->hw, reg, - register_test_patterns[pat] & write); - val = ixgbevf_read_reg(&adapter->hw, reg); - if (val != (register_test_patterns[pat] & write & mask)) { - hw_dbg(&adapter->hw, - "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", - reg, val, - register_test_patterns[pat] & write & mask); - *data = reg; - ixgbe_write_reg(&adapter->hw, reg, before); - return true; - } - ixgbe_write_reg(&adapter->hw, reg, before); - } - return false; -} - -static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, - int reg, u32 mask, u32 write) -{ - u32 val, before; - - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - *data = 1; - return true; - } - before = ixgbevf_read_reg(&adapter->hw, reg); - ixgbe_write_reg(&adapter->hw, reg, write & mask); - val = ixgbevf_read_reg(&adapter->hw, reg); - if ((write & mask) != (val & mask)) { - pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", - reg, (val & mask), write & mask); - *data = reg; - ixgbe_write_reg(&adapter->hw, reg, before); - return true; - } - ixgbe_write_reg(&adapter->hw, reg, before); - return false; -} - -static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) -{ - const struct ixgbevf_reg_test *test; - u32 i; - - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - dev_err(&adapter->pdev->dev, - "Adapter removed - register test blocked\n"); - *data = 1; - return 1; - } - test = reg_test_vf; - - /* Perform the register test, looping through the test table - * until we either fail or reach the null entry. - */ - while (test->reg) { - for (i = 0; i < test->array_len; i++) { - bool b = false; - - switch (test->test_type) { - case PATTERN_TEST: - b = reg_pattern_test(adapter, data, - test->reg + (i * 0x40), - test->mask, - test->write); - break; - case SET_READ_TEST: - b = reg_set_and_check(adapter, data, - test->reg + (i * 0x40), - test->mask, - test->write); - break; - case WRITE_NO_TEST: - ixgbe_write_reg(&adapter->hw, - test->reg + (i * 0x40), - test->write); - break; - case TABLE32_TEST: - b = reg_pattern_test(adapter, data, - test->reg + (i * 4), - test->mask, - test->write); - break; - case TABLE64_TEST_LO: - b = reg_pattern_test(adapter, data, - test->reg + (i * 8), - test->mask, - test->write); - break; - case TABLE64_TEST_HI: - b = reg_pattern_test(adapter, data, - test->reg + 4 + (i * 8), - test->mask, - test->write); - break; - } - if (b) - return 1; - } - test++; - } - - *data = 0; - return *data; -} - -static void ixgbevf_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - bool if_running = netif_running(netdev); - - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - dev_err(&adapter->pdev->dev, - "Adapter removed - test blocked\n"); - data[0] = 1; - data[1] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - return; - } - set_bit(__IXGBEVF_TESTING, &adapter->state); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - hw_dbg(&adapter->hw, "offline testing starting\n"); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result - */ - if (ixgbevf_link_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (if_running) - /* indicate we're in test mode */ - ixgbevf_close(netdev); - else - ixgbevf_reset(adapter); - - hw_dbg(&adapter->hw, "register testing starting\n"); - if (ixgbevf_reg_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - ixgbevf_reset(adapter); - - clear_bit(__IXGBEVF_TESTING, &adapter->state); - if (if_running) - ixgbevf_open(netdev); - } else { - hw_dbg(&adapter->hw, "online testing starting\n"); - /* Online tests */ - if (ixgbevf_link_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* Online tests aren't run; pass by default */ - data[0] = 0; - - clear_bit(__IXGBEVF_TESTING, &adapter->state); - } - msleep_interruptible(4 * 1000); -} - -static int ixgbevf_nway_reset(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) - ixgbevf_reinit_locked(adapter); - - return 0; -} - -static int ixgbevf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - /* only valid if in constant ITR mode */ - if (adapter->rx_itr_setting <= 1) - ec->rx_coalesce_usecs = adapter->rx_itr_setting; - else - ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - - /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) - return 0; - - /* only valid if in constant ITR mode */ - if (adapter->tx_itr_setting <= 1) - ec->tx_coalesce_usecs = adapter->tx_itr_setting; - else - ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; - - return 0; -} - -static int ixgbevf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbevf_q_vector *q_vector; - int num_vectors, i; - u16 tx_itr_param, rx_itr_param; - - /* don't accept Tx specific changes if we've got mixed RxTx vectors */ - if (adapter->q_vector[0]->tx.count && - adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) - return -EINVAL; - - if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || - (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) - return -EINVAL; - - if (ec->rx_coalesce_usecs > 1) - adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; - else - adapter->rx_itr_setting = ec->rx_coalesce_usecs; - - if (adapter->rx_itr_setting == 1) - rx_itr_param = IXGBE_20K_ITR; - else - rx_itr_param = adapter->rx_itr_setting; - - if (ec->tx_coalesce_usecs > 1) - adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; - else - adapter->tx_itr_setting = ec->tx_coalesce_usecs; - - if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_12K_ITR; - else - tx_itr_param = adapter->tx_itr_setting; - - num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - for (i = 0; i < num_vectors; i++) { - q_vector = adapter->q_vector[i]; - if (q_vector->tx.count && !q_vector->rx.count) - /* Tx only */ - q_vector->itr = tx_itr_param; - else - /* Rx only or mixed */ - q_vector->itr = rx_itr_param; - ixgbevf_write_eitr(q_vector); - } - - return 0; -} - -static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) -{ - struct ixgbevf_adapter *adapter = netdev_priv(dev); - - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = adapter->num_rx_queues; - return 0; - default: - hw_dbg(&adapter->hw, "Command parameters not supported\n"); - return -EOPNOTSUPP; - } -} - -static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) - return IXGBEVF_X550_VFRETA_SIZE; - - return IXGBEVF_82599_RETA_SIZE; -} - -static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) -{ - return IXGBEVF_RSS_HASH_KEY_SIZE; -} - -static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - int err = 0; - - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - - if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { - if (key) - memcpy(key, adapter->rss_key, - ixgbevf_get_rxfh_key_size(netdev)); - - if (indir) { - int i; - - for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) - indir[i] = adapter->rss_indir_tbl[i]; - } - } else { - /* If neither indirection table nor hash key was requested - * - just return a success avoiding taking any locks. - */ - if (!indir && !key) - return 0; - - spin_lock_bh(&adapter->mbx_lock); - if (indir) - err = ixgbevf_get_reta_locked(&adapter->hw, indir, - adapter->num_rx_queues); - - if (!err && key) - err = ixgbevf_get_rss_key_locked(&adapter->hw, key); - - spin_unlock_bh(&adapter->mbx_lock); - } - - return err; -} - -static u32 ixgbevf_get_priv_flags(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - u32 priv_flags = 0; - - if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) - priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX; - - return priv_flags; -} - -static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - unsigned int flags = adapter->flags; - - flags &= ~IXGBEVF_FLAGS_LEGACY_RX; - if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX) - flags |= IXGBEVF_FLAGS_LEGACY_RX; - - if (flags != adapter->flags) { - adapter->flags = flags; - - /* reset interface to repopulate queues */ - if (netif_running(netdev)) - ixgbevf_reinit_locked(adapter); - } - - return 0; -} - -static const struct ethtool_ops ixgbevf_ethtool_ops = { - .get_drvinfo = ixgbevf_get_drvinfo, - .get_regs_len = ixgbevf_get_regs_len, - .get_regs = ixgbevf_get_regs, - .nway_reset = ixgbevf_nway_reset, - .get_link = ethtool_op_get_link, - .get_ringparam = ixgbevf_get_ringparam, - .set_ringparam = ixgbevf_set_ringparam, - .get_msglevel = ixgbevf_get_msglevel, - .set_msglevel = ixgbevf_set_msglevel, - .self_test = ixgbevf_diag_test, - .get_sset_count = ixgbevf_get_sset_count, - .get_strings = ixgbevf_get_strings, - .get_ethtool_stats = ixgbevf_get_ethtool_stats, - .get_coalesce = ixgbevf_get_coalesce, - .set_coalesce = ixgbevf_set_coalesce, - .get_rxnfc = ixgbevf_get_rxnfc, - .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, - .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, - .get_rxfh = ixgbevf_get_rxfh, - .get_link_ksettings = ixgbevf_get_link_ksettings, - .get_priv_flags = ixgbevf_get_priv_flags, - .set_priv_flags = ixgbevf_set_priv_flags, -}; - -void ixgbevf_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &ixgbevf_ethtool_ops; -} diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c deleted file mode 100644 index 5170dd9d8705..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ /dev/null @@ -1,673 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ - -#include "ixgbevf.h" -#include -#include - -#define IXGBE_IPSEC_KEY_BITS 160 -static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; - -/** - * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA - * @adapter: board private structure - * @xs: xfrm info to be sent to the PF - * - * Returns: positive offload handle from the PF, or negative error code - **/ -static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, - struct xfrm_state *xs) -{ - u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 }; - struct ixgbe_hw *hw = &adapter->hw; - struct sa_mbx_msg *sam; - int ret; - - /* send the important bits to the PF */ - sam = (struct sa_mbx_msg *)(&msgbuf[1]); - sam->flags = xs->xso.flags; - sam->spi = xs->id.spi; - sam->proto = xs->id.proto; - sam->family = xs->props.family; - - if (xs->props.family == AF_INET6) - memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6)); - else - memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4)); - memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key)); - - msgbuf[0] = IXGBE_VF_IPSEC_ADD; - - spin_lock_bh(&adapter->mbx_lock); - - ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); - if (ret) - goto out; - - ret = hw->mbx.ops.read_posted(hw, msgbuf, 2); - if (ret) - goto out; - - ret = (int)msgbuf[1]; - if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0) - ret = -1; - -out: - spin_unlock_bh(&adapter->mbx_lock); - - return ret; -} - -/** - * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA - * @adapter: board private structure - * @pfsa: sa index returned from PF when created, -1 for all - * - * Returns: 0 on success, or negative error code - **/ -static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 msgbuf[2]; - int err; - - memset(msgbuf, 0, sizeof(msgbuf)); - msgbuf[0] = IXGBE_VF_IPSEC_DEL; - msgbuf[1] = (u32)pfsa; - - spin_lock_bh(&adapter->mbx_lock); - - err = hw->mbx.ops.write_posted(hw, msgbuf, 2); - if (err) - goto out; - - err = hw->mbx.ops.read_posted(hw, msgbuf, 2); - if (err) - goto out; - -out: - spin_unlock_bh(&adapter->mbx_lock); - return err; -} - -/** - * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset - * @adapter: board private structure - * - * Reload the HW tables from the SW tables after they've been bashed - * by a chip reset. While we're here, make sure any stale VF data is - * removed, since we go through reset when num_vfs changes. - **/ -void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) -{ - struct ixgbevf_ipsec *ipsec = adapter->ipsec; - struct net_device *netdev = adapter->netdev; - int i; - - if (!(adapter->netdev->features & NETIF_F_HW_ESP)) - return; - - /* reload the Rx and Tx keys */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - struct rx_sa *r = &ipsec->rx_tbl[i]; - struct tx_sa *t = &ipsec->tx_tbl[i]; - int ret; - - if (r->used) { - ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs); - if (ret < 0) - netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n", - i, ret); - } - - if (t->used) { - ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs); - if (ret < 0) - netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n", - i, ret); - } - } -} - -/** - * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index - * @ipsec: pointer to IPsec struct - * @rxtable: true if we need to look in the Rx table - * - * Returns the first unused index in either the Rx or Tx SA table - **/ -static -int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable) -{ - u32 i; - - if (rxtable) { - if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) - return -ENOSPC; - - /* search rx sa table */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - if (!ipsec->rx_tbl[i].used) - return i; - } - } else { - if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) - return -ENOSPC; - - /* search tx sa table */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - if (!ipsec->tx_tbl[i].used) - return i; - } - } - - return -ENOSPC; -} - -/** - * ixgbevf_ipsec_find_rx_state - find the state that matches - * @ipsec: pointer to IPsec struct - * @daddr: inbound address to match - * @proto: protocol to match - * @spi: SPI to match - * @ip4: true if using an IPv4 address - * - * Returns a pointer to the matching SA state information - **/ -static -struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, - __be32 *daddr, u8 proto, - __be32 spi, bool ip4) -{ - struct xfrm_state *ret = NULL; - struct rx_sa *rsa; - - rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, - (__force u32)spi) { - if (spi == rsa->xs->id.spi && - ((ip4 && *daddr == rsa->xs->id.daddr.a4) || - (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, - sizeof(rsa->xs->id.daddr.a6)))) && - proto == rsa->xs->id.proto) { - ret = rsa->xs; - xfrm_state_hold(ret); - break; - } - } - rcu_read_unlock(); - return ret; -} - -/** - * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol - * @xs: pointer to xfrm_state struct - * @mykey: pointer to key array to populate - * @mysalt: pointer to salt value to populate - * - * This copies the protocol keys and salt to our own data tables. The - * 82599 family only supports the one algorithm. - **/ -static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, - u32 *mykey, u32 *mysalt) -{ - struct net_device *dev = xs->xso.dev; - unsigned char *key_data; - char *alg_name = NULL; - int key_len; - - if (!xs->aead) { - netdev_err(dev, "Unsupported IPsec algorithm\n"); - return -EINVAL; - } - - if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { - netdev_err(dev, "IPsec offload requires %d bit authentication\n", - IXGBE_IPSEC_AUTH_BITS); - return -EINVAL; - } - - key_data = &xs->aead->alg_key[0]; - key_len = xs->aead->alg_key_len; - alg_name = xs->aead->alg_name; - - if (strcmp(alg_name, aes_gcm_name)) { - netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", - aes_gcm_name); - return -EINVAL; - } - - /* The key bytes come down in a big endian array of bytes, so - * we don't need to do any byte swapping. - * 160 accounts for 16 byte key and 4 byte salt - */ - if (key_len > IXGBE_IPSEC_KEY_BITS) { - *mysalt = ((u32 *)key_data)[4]; - } else if (key_len == IXGBE_IPSEC_KEY_BITS) { - *mysalt = 0; - } else { - netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); - return -EINVAL; - } - memcpy(mykey, key_data, 16); - - return 0; -} - -/** - * ixgbevf_ipsec_add_sa - program device with a security association - * @xs: pointer to transformer state struct - **/ -static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) -{ - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; - u16 sa_idx; - int ret; - - if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { - netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", - xs->id.proto); - return -EINVAL; - } - - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { - struct rx_sa rsa; - - if (xs->calg) { - netdev_err(dev, "Compression offload not supported\n"); - return -EINVAL; - } - - /* find the first unused index */ - ret = ixgbevf_ipsec_find_empty_idx(ipsec, true); - if (ret < 0) { - netdev_err(dev, "No space for SA in Rx table!\n"); - return ret; - } - sa_idx = (u16)ret; - - memset(&rsa, 0, sizeof(rsa)); - rsa.used = true; - rsa.xs = xs; - - if (rsa.xs->id.proto & IPPROTO_ESP) - rsa.decrypt = xs->ealg || xs->aead; - - /* get the key and salt */ - ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); - if (ret) { - netdev_err(dev, "Failed to get key data for Rx SA table\n"); - return ret; - } - - /* get ip for rx sa table */ - if (xs->props.family == AF_INET6) - memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); - else - memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); - - rsa.mode = IXGBE_RXMOD_VALID; - if (rsa.xs->id.proto & IPPROTO_ESP) - rsa.mode |= IXGBE_RXMOD_PROTO_ESP; - if (rsa.decrypt) - rsa.mode |= IXGBE_RXMOD_DECRYPT; - if (rsa.xs->props.family == AF_INET6) - rsa.mode |= IXGBE_RXMOD_IPV6; - - ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); - if (ret < 0) - return ret; - rsa.pfsa = ret; - - /* the preparations worked, so save the info */ - memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); - - xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; - - ipsec->num_rx_sa++; - - /* hash the new entry for faster search in Rx path */ - hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, - (__force u32)rsa.xs->id.spi); - } else { - struct tx_sa tsa; - - /* find the first unused index */ - ret = ixgbevf_ipsec_find_empty_idx(ipsec, false); - if (ret < 0) { - netdev_err(dev, "No space for SA in Tx table\n"); - return ret; - } - sa_idx = (u16)ret; - - memset(&tsa, 0, sizeof(tsa)); - tsa.used = true; - tsa.xs = xs; - - if (xs->id.proto & IPPROTO_ESP) - tsa.encrypt = xs->ealg || xs->aead; - - ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); - if (ret) { - netdev_err(dev, "Failed to get key data for Tx SA table\n"); - memset(&tsa, 0, sizeof(tsa)); - return ret; - } - - ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); - if (ret < 0) - return ret; - tsa.pfsa = ret; - - /* the preparations worked, so save the info */ - memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); - - xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; - - ipsec->num_tx_sa++; - } - - return 0; -} - -/** - * ixgbevf_ipsec_del_sa - clear out this specific SA - * @xs: pointer to transformer state struct - **/ -static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) -{ - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; - u16 sa_idx; - - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { - sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; - - if (!ipsec->rx_tbl[sa_idx].used) { - netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", - sa_idx, xs->xso.offload_handle); - return; - } - - ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa); - hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist); - memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa)); - ipsec->num_rx_sa--; - } else { - sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; - - if (!ipsec->tx_tbl[sa_idx].used) { - netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", - sa_idx, xs->xso.offload_handle); - return; - } - - ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa); - memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); - ipsec->num_tx_sa--; - } -} - -/** - * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload - * @skb: current data packet - * @xs: pointer to transformer state struct - **/ -static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) -{ - if (xs->props.family == AF_INET) { - /* Offload with IPv4 options is not supported yet */ - if (ip_hdr(skb)->ihl != 5) - return false; - } else { - /* Offload with IPv6 extension headers is not support yet */ - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) - return false; - } - - return true; -} - -static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = { - .xdo_dev_state_add = ixgbevf_ipsec_add_sa, - .xdo_dev_state_delete = ixgbevf_ipsec_del_sa, - .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok, -}; - -/** - * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload - * @tx_ring: outgoing context - * @first: current data packet - * @itd: ipsec Tx data for later use in building context descriptor - **/ -int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first, - struct ixgbevf_ipsec_tx_data *itd) -{ - struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; - struct xfrm_state *xs; - struct sec_path *sp; - struct tx_sa *tsa; - u16 sa_idx; - - sp = skb_sec_path(first->skb); - if (unlikely(!sp->len)) { - netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", - __func__, sp->len); - return 0; - } - - xs = xfrm_input_state(first->skb); - if (unlikely(!xs)) { - netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", - __func__, xs); - return 0; - } - - sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; - if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { - netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", - __func__, sa_idx, xs->xso.offload_handle); - return 0; - } - - tsa = &ipsec->tx_tbl[sa_idx]; - if (unlikely(!tsa->used)) { - netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", - __func__, sa_idx); - return 0; - } - - itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX; - - first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM; - - if (xs->id.proto == IPPROTO_ESP) { - itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | - IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) - itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; - - /* The actual trailer length is authlen (16 bytes) plus - * 2 bytes for the proto and the padlen values, plus - * padlen bytes of padding. This ends up not the same - * as the static value found in xs->props.trailer_len (21). - * - * ... but if we're doing GSO, don't bother as the stack - * doesn't add a trailer for those. - */ - if (!skb_is_gso(first->skb)) { - /* The "correct" way to get the auth length would be - * to use - * authlen = crypto_aead_authsize(xs->data); - * but since we know we only have one size to worry - * about * we can let the compiler use the constant - * and save us a few CPU cycles. - */ - const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; - struct sk_buff *skb = first->skb; - u8 padlen; - int ret; - - ret = skb_copy_bits(skb, skb->len - (authlen + 2), - &padlen, 1); - if (unlikely(ret)) - return 0; - itd->trailer_len = authlen + 2 + padlen; - } - } - if (tsa->encrypt) - itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; - - return 1; -} - -/** - * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor - * @rx_ring: receiving ring - * @rx_desc: receive data descriptor - * @skb: current data packet - * - * Determine if there was an IPsec encapsulation noticed, and if so set up - * the resulting status for later in the receive stack. - **/ -void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev); - __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | - IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; - struct xfrm_offload *xo = NULL; - struct xfrm_state *xs = NULL; - struct ipv6hdr *ip6 = NULL; - struct iphdr *ip4 = NULL; - struct sec_path *sp; - void *daddr; - __be32 spi; - u8 *c_hdr; - u8 proto; - - /* Find the IP and crypto headers in the data. - * We can assume no VLAN header in the way, b/c the - * hw won't recognize the IPsec packet and anyway the - * currently VLAN device doesn't support xfrm offload. - */ - if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { - ip4 = (struct iphdr *)(skb->data + ETH_HLEN); - daddr = &ip4->daddr; - c_hdr = (u8 *)ip4 + ip4->ihl * 4; - } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { - ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); - daddr = &ip6->daddr; - c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); - } else { - return; - } - - switch (pkt_info & ipsec_pkt_types) { - case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): - spi = ((struct ip_auth_hdr *)c_hdr)->spi; - proto = IPPROTO_AH; - break; - case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): - spi = ((struct ip_esp_hdr *)c_hdr)->spi; - proto = IPPROTO_ESP; - break; - default: - return; - } - - xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); - if (unlikely(!xs)) - return; - - sp = secpath_set(skb); - if (unlikely(!sp)) - return; - - sp->xvec[sp->len++] = xs; - sp->olen++; - xo = xfrm_offload(skb); - xo->flags = CRYPTO_DONE; - xo->status = CRYPTO_SUCCESS; - - adapter->rx_ipsec++; -} - -/** - * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation - * @adapter: board private structure - **/ -void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) -{ - struct ixgbevf_ipsec *ipsec; - size_t size; - - switch (adapter->hw.api_version) { - case ixgbe_mbox_api_14: - break; - default: - return; - } - - ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); - if (!ipsec) - goto err1; - hash_init(ipsec->rx_sa_list); - - size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; - ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); - if (!ipsec->rx_tbl) - goto err2; - - size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; - ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); - if (!ipsec->tx_tbl) - goto err2; - - ipsec->num_rx_sa = 0; - ipsec->num_tx_sa = 0; - - adapter->ipsec = ipsec; - - adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops; - -#define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \ - NETIF_F_HW_ESP_TX_CSUM | \ - NETIF_F_GSO_ESP) - - adapter->netdev->features |= IXGBEVF_ESP_FEATURES; - adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES; - - return; - -err2: - kfree(ipsec->rx_tbl); - kfree(ipsec->tx_tbl); - kfree(ipsec); -err1: - netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); -} - -/** - * ixgbevf_stop_ipsec_offload - tear down the IPsec offload - * @adapter: board private structure - **/ -void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) -{ - struct ixgbevf_ipsec *ipsec = adapter->ipsec; - - adapter->ipsec = NULL; - if (ipsec) { - kfree(ipsec->rx_tbl); - kfree(ipsec->tx_tbl); - kfree(ipsec); - } -} diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h deleted file mode 100644 index 3740725041c3..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ - -#ifndef _IXGBEVF_IPSEC_H_ -#define _IXGBEVF_IPSEC_H_ - -#define IXGBE_IPSEC_MAX_SA_COUNT 1024 -#define IXGBE_IPSEC_BASE_RX_INDEX 0 -#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT -#define IXGBE_IPSEC_AUTH_BITS 128 - -#define IXGBE_RXMOD_VALID 0x00000001 -#define IXGBE_RXMOD_PROTO_ESP 0x00000004 -#define IXGBE_RXMOD_DECRYPT 0x00000008 -#define IXGBE_RXMOD_IPV6 0x00000010 - -struct rx_sa { - struct hlist_node hlist; - struct xfrm_state *xs; - __be32 ipaddr[4]; - u32 key[4]; - u32 salt; - u32 mode; - u32 pfsa; - bool used; - bool decrypt; -}; - -struct rx_ip_sa { - __be32 ipaddr[4]; - u32 ref_cnt; - bool used; -}; - -struct tx_sa { - struct xfrm_state *xs; - u32 key[4]; - u32 salt; - u32 pfsa; - bool encrypt; - bool used; -}; - -struct ixgbevf_ipsec_tx_data { - u32 flags; - u16 trailer_len; - u16 pfsa; -}; - -struct ixgbevf_ipsec { - u16 num_rx_sa; - u16 num_tx_sa; - struct rx_sa *rx_tbl; - struct tx_sa *tx_tbl; - DECLARE_HASHTABLE(rx_sa_list, 10); -}; - -struct sa_mbx_msg { - __be32 spi; - u8 flags; - u8 proto; - u16 family; - __be32 addr[4]; - u32 key[5]; -}; -#endif /* _IXGBEVF_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbevf/ixgbe_common.h new file mode 100644 index 000000000000..205cbce3ae1d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_common.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" + +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map); + +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +void ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ixgbe_mc_addr_itr func, bool clear); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); + +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); + +s32 ixgbe_validate_mac_addr(u8 *mac_addr); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); + +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass); +s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vind); + +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); + +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy); +void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver, u16 len, const char *str); +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout); +s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]); +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); + +extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); +bool ixgbe_mng_present(struct ixgbe_hw *hw); +bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + +void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); +#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_hv_vf.c b/drivers/net/ethernet/intel/ixgbevf/ixgbe_hv_vf.c new file mode 100644 index 000000000000..26b3dbdd39e9 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_hv_vf.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbe_hv_vf.h" + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @mc_addr_list: unused + * @mc_addr_count: unused + * @next: unused + * @clear: unused + */ +s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @xcast_mode: unused + */ +s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) +{ + UNREFERENCED_2PARAMETER(hw, xcast_mode); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @vlan: unused + * @vind: unused + * @vlan_on: unused + * @vlvf_bypass: unused + */ +s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + UNREFERENCED_3PARAMETER(hw, index, addr); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant; there is no mailbox communication. + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: unused + * + */ +s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + u32 links_reg; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* If we were hit with a reset drop the link */ + if (!mbx->ops[0].check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Reserved for pre-x550 devices */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return 0; +} + +/** + * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + * Hyper-V variant. + **/ +s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 reg; + + /* If we are on Hyper-V, we implement this functionality + * differently. + */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); + /* CRC == 4 */ + reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); + + return 0; +} + +/** + * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + * Hyper-V version - only ixgbe_mbox_api_10 supported. + **/ +int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) +{ + UNREFERENCED_1PARAMETER(hw); + + /* Hyper-V only supports api version ixgbe_mbox_api_10 */ + if (api != ixgbe_mbox_api_10) + return IXGBE_ERR_INVALID_ARGUMENT; + + return 0; +} + +/** + * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw) +{ + /* Set defaults for VF then override applicable Hyper-V + * specific functions + */ + ixgbe_init_ops_vf(hw); + + hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf; + hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf; + hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf; + hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf; + hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf; + hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode; + hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf; + hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf; + hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf; + + return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_hv_vf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbe_hv_vf.h new file mode 100644 index 000000000000..9458851b3060 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_hv_vf.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_HV_VF_H_ +#define _IXGBE_HV_VF_H_ + +/* On Hyper-V, to reset, we need to read from this offset + * from the PCI config space. This is the mechanism used on + * Hyper-V to support PF/VF communication. + */ +#define IXGBE_HV_RESET_OFFSET 0x201 + +#include "ixgbe_vf.h" + +s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw); +s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete); +s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); +s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr, + bool clear); +s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode); +s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass); +s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api); + +extern s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw); +extern s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq, u32 enable_addr); +#endif /* _IXGBE_HV_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbevf/ixgbe_mbx.c new file mode 100644 index 000000000000..37e10a41e5ad --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_mbx.c @@ -0,0 +1,1045 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbe_type.h" +#include "ixgbe_mbx.h" + +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id); +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id); + +/** + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* limit read to size of mailbox */ + if (size > mbx->size) { + ERROR_REPORT3(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %u, changing to %u", + size, mbx->size); + size = mbx->size; + } + + if (mbx->ops[mbx_id].read) + return mbx->ops[mbx_id].read(hw, msg, size, mbx_id); + + return IXGBE_ERR_CONFIG; +} + +/** + * ixgbe_poll_mbx - Wait for message and read it from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val; + + if (!mbx->ops[mbx_id].read || !mbx->ops[mbx_id].check_for_msg || + !mbx->timeout) + return IXGBE_ERR_CONFIG; + + /* limit read to size of mailbox */ + if (size > mbx->size) { + ERROR_REPORT3(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %u, changing to %u", + size, mbx->size); + size = mbx->size; + } + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + return mbx->ops[mbx_id].read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox and wait for ACK + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ACK to that message within specified period + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + /* + * exit if either we can't write, release + * or there is no timeout defined + */ + if (!mbx->ops[mbx_id].write || !mbx->ops[mbx_id].check_for_ack || + !mbx->ops[mbx_id].release || !mbx->timeout) + return IXGBE_ERR_CONFIG; + + if (size > mbx->size) { + ret_val = IXGBE_ERR_PARAM; + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %u", size); + } else { + ret_val = mbx->ops[mbx_id].write(hw, msg, size, mbx_id); + } + + return ret_val; +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops[mbx_id].check_for_msg) + return IXGBE_ERR_CONFIG; + + while (countdown && mbx->ops[mbx_id].check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) { + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%u mailbox message timedout", mbx_id); + return IXGBE_ERR_TIMEOUT; + } + + return 0; +} + +/** + * ixgbe_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops[mbx_id].check_for_ack) + return IXGBE_ERR_CONFIG; + + while (countdown && mbx->ops[mbx_id].check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) { + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%u mailbox ack timedout", mbx_id); + return IXGBE_ERR_TIMEOUT; + } + + return 0; +} + +/** + * ixgbe_read_mailbox_vf - read VF's mailbox register + * @hw: pointer to the HW structure + * + * This function is used to read the mailbox register dedicated for VF without + * losing the read to clear status bits. + **/ +static u32 ixgbe_read_mailbox_vf(struct ixgbe_hw *hw) +{ + u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + vf_mailbox |= hw->mbx.vf_mailbox; + hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return vf_mailbox; +} + +static void ixgbe_clear_msg_vf(struct ixgbe_hw *hw) +{ + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); + + if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) { + hw->mbx.stats.reqs++; + hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS; + } +} + +static void ixgbe_clear_ack_vf(struct ixgbe_hw *hw) +{ + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); + + if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) { + hw->mbx.stats.acks++; + hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK; + } +} + +static void ixgbe_clear_rst_vf(struct ixgbe_hw *hw) +{ + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); + + if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) { + hw->mbx.stats.rsts++; + hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI | + IXGBE_VFMAILBOX_RSTD); + } +} + +/** + * ixgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 vf_mailbox = ixgbe_read_mailbox_vf(hw); + + if (vf_mailbox & mask) + return 0; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + UNREFERENCED_1PARAMETER(mbx_id); + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) + return 0; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + UNREFERENCED_1PARAMETER(mbx_id); + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + /* TODO: should this be autocleared? */ + ixgbe_clear_ack_vf(hw); + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + UNREFERENCED_1PARAMETER(mbx_id); + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_RSTI | + IXGBE_VFMAILBOX_RSTD)) { + /* TODO: should this be autocleared? */ + ixgbe_clear_rst_vf(hw); + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + s32 ret_val = IXGBE_ERR_MBX; + u32 vf_mailbox; + + if (!mbx->timeout) + return IXGBE_ERR_CONFIG; + + while (countdown--) { + /* Reserve mailbox for VF use */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_VFU; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* Verify that VF is the owner of the lock */ + if (ixgbe_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) { + ret_val = 0; + break; + } + + /* Wait a bit before trying again */ + udelay(mbx->udelay); + } + + if (ret_val != 0) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Failed to obtain mailbox lock"); + ret_val = IXGBE_ERR_TIMEOUT; + } + + return ret_val; +} + +/** + * ixgbe_release_mbx_lock_dummy - release mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to read + **/ +static void ixgbe_release_mbx_lock_dummy(struct ixgbe_hw *hw, u16 mbx_id) +{ + UNREFERENCED_2PARAMETER(hw, mbx_id); + +} + +/** + * ixgbe_release_mbx_lock_vf - release mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to read + **/ +static void ixgbe_release_mbx_lock_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + u32 vf_mailbox; + + UNREFERENCED_1PARAMETER(mbx_id); + + /* Return ownership of the buffer */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox &= ~IXGBE_VFMAILBOX_VFU; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); +} + +/** + * ixgbe_write_mbx_vf_legacy - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + return ret_val; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_vf(hw, 0); + ixgbe_clear_msg_vf(hw); + ixgbe_check_for_ack_vf(hw, 0); + ixgbe_clear_ack_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + + return 0; +} + +/** + * ixgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + u32 vf_mailbox; + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_clear_msg_vf(hw); + ixgbe_clear_ack_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* interrupt the PF to tell it a message has been sent */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_REQ; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* if msg sent wait until we receive an ack */ + ixgbe_poll_for_ack(hw, mbx_id); + +out: + hw->mbx.ops[mbx_id].release(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_read_mbx_vf_legacy - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +static s32 ixgbe_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + return ret_val; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return 0; +} + +/** + * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + u32 vf_mailbox; + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + /* check if there is a message from PF */ + ret_val = ixgbe_check_for_msg_vf(hw, 0); + if (ret_val != 0) + return IXGBE_ERR_MBX_NOMSG; + + ixgbe_clear_msg_vf(hw); + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt */ + vf_mailbox = ixgbe_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_ACK; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return 0; +} + +/** + * ixgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes single set the hw->mbx struct to correct values for vf mailbox + * Set of legacy functions is being used here + */ +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + /* VF has only one mailbox connection, no need for more IDs */ + mbx->ops[0].release = ixgbe_release_mbx_lock_dummy; + mbx->ops[0].read = ixgbe_read_mbx_vf_legacy; + mbx->ops[0].write = ixgbe_write_mbx_vf_legacy; + mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf; + mbx->ops[0].clear = NULL; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +/** + * ixgbe_upgrade_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + /* VF has only one mailbox connection, no need for more IDs */ + mbx->ops[0].release = ixgbe_release_mbx_lock_vf; + mbx->ops[0].read = ixgbe_read_mbx_vf; + mbx->ops[0].write = ixgbe_write_mbx_vf; + mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf; + mbx->ops[0].clear = NULL; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +static void ixgbe_clear_msg_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + u32 pfmbicr; + + pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index)); + + if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift)) + hw->mbx.stats.reqs++; + + IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index), + IXGBE_PFMBICR_VFREQ_VF1 << vf_shift); +} + +static void ixgbe_clear_ack_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + u32 pfmbicr; + + pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index)); + + if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift)) + hw->mbx.stats.acks++; + + IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index), + IXGBE_PFMBICR_VFACK_VF1 << vf_shift); +} + +static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index)); + + if (pfmbicr & mask) + return 0; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFREQ_VF1 << vf_shift, + index)) + return 0; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id); + s32 index = IXGBE_PFMBICR_INDEX(vf_id); + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFACK_VF1 << vf_shift, + index)) { + ret_val = 0; + /* TODO: should this be autocleared? */ + ixgbe_clear_ack_pf(hw, vf_id); + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 vf_shift = IXGBE_PFVFLRE_SHIFT(vf_id); + u32 index = IXGBE_PFVFLRE_INDEX(vf_id); + s32 ret_val = IXGBE_ERR_MBX; + u32 vflre = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLRE(index)); + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index)); + break; + default: + break; + } + + if (vflre & (1 << vf_shift)) { + ret_val = 0; + IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + s32 ret_val = IXGBE_ERR_MBX; + u32 pf_mailbox; + + if (!mbx->timeout) + return IXGBE_ERR_CONFIG; + + while (countdown--) { + /* Reserve mailbox for PF use */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox |= IXGBE_PFMAILBOX_PFU; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); + + /* Verify that PF is the owner of the lock */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + if (pf_mailbox & IXGBE_PFMAILBOX_PFU) { + ret_val = 0; + break; + } + + /* Wait a bit before trying again */ + udelay(mbx->udelay); + } + + if (ret_val != 0) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Failed to obtain mailbox lock"); + ret_val = IXGBE_ERR_TIMEOUT; + } + + return ret_val; +} + +/** + * ixgbe_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_id: the VF index + **/ +static void ixgbe_release_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u32 pf_mailbox; + + /* Return ownership of the buffer */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox &= ~IXGBE_PFMAILBOX_PFU; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); +} + +/** + * ixgbe_write_mbx_pf_legacy - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id); + if (ret_val) + return ret_val; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_id); + ixgbe_clear_msg_pf(hw, vf_id); + ixgbe_check_for_ack_pf(hw, vf_id); + ixgbe_clear_ack_pf(hw, vf_id); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + return 0; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + u32 pf_mailbox; + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id); + if (ret_val) + goto out; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_clear_msg_pf(hw, vf_id); + ixgbe_clear_ack_pf(hw, vf_id); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox |= IXGBE_PFMAILBOX_STS; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); + + /* if msg sent wait until we receive an ack */ + ixgbe_poll_for_ack(hw, vf_id); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out: + hw->mbx.ops[vf_id].release(hw, vf_id); + + return ret_val; + +} + +/** + * ixgbe_read_mbx_pf_legacy - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 ixgbe_read_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id); + if (ret_val != 0) + return ret_val; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return 0; +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_id: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_id) +{ + u32 pf_mailbox; + s32 ret_val; + u16 i; + + /* check if there is a message from VF */ + ret_val = ixgbe_check_for_msg_pf(hw, vf_id); + if (ret_val != 0) + return IXGBE_ERR_MBX_NOMSG; + + ixgbe_clear_msg_pf(hw, vf_id); + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i); + + /* Acknowledge the message and release buffer */ + pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id)); + pf_mailbox |= IXGBE_PFMAILBOX_ACK; + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return 0; +} + +/** + * ixgbe_clear_mbx_pf - Clear Mailbox Memory + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * Set VFMBMEM of given VF to 0x0. + **/ +static s32 ixgbe_clear_mbx_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + u16 mbx_size = hw->mbx.size; + u16 i; + + if (vf_id > 63) + return IXGBE_ERR_PARAM; + + for (i = 0; i < mbx_size; ++i) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, 0x0); + + return 0; +} + +/** + * ixgbe_init_mbx_params_pf_id - set initial values for pf mailbox + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * Initializes single set of the hw->mbx struct to correct values for pf mailbox + * Set of legacy functions is being used here + */ +void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops[vf_id].release = ixgbe_release_mbx_lock_dummy; + mbx->ops[vf_id].read = ixgbe_read_mbx_pf_legacy; + mbx->ops[vf_id].write = ixgbe_write_mbx_pf_legacy; + mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf; + mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf; +} + +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes all sets of the hw->mbx struct to correct values for pf + * mailbox. One set corresponds to single VF. It also initializes counters + * and general variables. A set of legacy functions is used by default. + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + u16 i; + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* Ensure we are not calling this function from VF */ + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + hw->mac.type != ixgbe_mac_X540) + return; + + /* Initialize common mailbox settings */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; + mbx->size = IXGBE_VFMAILBOX_SIZE; + + /* Initialize counters with zeroes */ + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + /* No matter of VF number, we initialize params for all 64 VFs. */ + /* TODO: 1. Add a define for max VF and refactor SHARED to get rid + * of magic number for that (63 or 64 depending on use case.) + * 2. rewrite the code to dynamically allocate mbx->ops[vf_id] for + * certain number of VFs instead of default maximum value of 64 (0..63) + */ + for (i = 0; i < 64; i++) + ixgbe_init_mbx_params_pf_id(hw, i); +} + +/** + * ixgbe_upgrade_mbx_params_pf - Upgrade initial values for pf mailbox + * @hw: pointer to the HW structure + * @vf_id: the VF index + * + * Initializes the hw->mbx struct to new function set for improved + * stability and handling of messages. + */ +void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* Ensure we are not calling this function from VF */ + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops[vf_id].release = ixgbe_release_mbx_lock_pf; + mbx->ops[vf_id].read = ixgbe_read_mbx_pf; + mbx->ops[vf_id].write = ixgbe_write_mbx_pf; + mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf; + mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbevf/ixgbe_mbx.h new file mode 100644 index 000000000000..6677f29d8735 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_mbx.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +struct ixgbe_mbx_operations { + void (*init_params)(struct ixgbe_hw *hw); + void (*release)(struct ixgbe_hw *hw, u16 mbx_id); + s32 (*read)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*check_for_msg)(struct ixgbe_hw *hw, u16 vf_number); + s32 (*check_for_ack)(struct ixgbe_hw *hw, u16 vf_number); + s32 (*check_for_rst)(struct ixgbe_hw *hw, u16 vf_number); + s32 (*clear)(struct ixgbe_hw *hw, u16 vf_number); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + /* + * PF: One set of operations for each VF to handle various API versions + * at the same time + * VF: Only the very first (0) set should be used + */ + struct ixgbe_mbx_operations ops[64]; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 vf_mailbox; + u16 size; +}; + +#include "ixgbe_vf.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_PFMBICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_PFMBICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_PFMBICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_PFMBICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message results are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this + * have succeeded + */ +#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this + * have failed + */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests + */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + /* API 1.4 is being used in the upstream for IPsec */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ + ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); +void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id); +void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id); + +#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbevf/ixgbe_type.h new file mode 100644 index 000000000000..78db8895ff01 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_type.h @@ -0,0 +1,4038 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - IXGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - IXGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occurred, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - IXGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - IXGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - IXGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - IXGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "ixgbevf_osdep.h" + +/* Override this by setting __iomem in your ixgbe_osdep.h header */ + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_82599_VF_HV 0x152E +#define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X540_VF_HV 0x1530 +#define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 +/* Placeholder value, pending official value. */ +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 +#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA +#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 +#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + +#define IXGBE_CAT(r,m) IXGBE_##r##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL_82599 0x00028 +#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X550 0x15F5C +#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) +#define IXGBE_PHY_GPIO 0x00028 +#define IXGBE_MAC_GPIO 0x00030 +#define IXGBE_PHYINT_STATUS0 0x00100 +#define IXGBE_PHYINT_STATUS1 0x00104 +#define IXGBE_PHYINT_STATUS2 0x00108 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC +#define IXGBE_EEC_X550 IXGBE_EEC +#define IXGBE_EEC_X550EM_x IXGBE_EEC +#define IXGBE_EEC_X550EM_a 0x15FF8 +#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC) + +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 + +#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA +#define IXGBE_FLA_X550 IXGBE_FLA +#define IXGBE_FLA_X550EM_x IXGBE_FLA +#define IXGBE_FLA_X550EM_a 0x15F68 +#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) +#define IXGBE_FLA_FL_SIZE_SHIFT_X540 17 +#define IXGBE_FLA_FL_SIZE_SHIFT_X550 12 +#define IXGBE_FLA_FL_SIZE_MASK_X540 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X540) +#define IXGBE_FLA_FL_SIZE_MASK_X550 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X550) + +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C + +#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC +#define IXGBE_GRC_X550 IXGBE_GRC +#define IXGBE_GRC_X550EM_x IXGBE_GRC +#define IXGBE_GRC_X550EM_a 0x15F64 +#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC) + +#define IXGBE_SRAMREL 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_a 0x15F6C +#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL) + +#define IXGBE_PHYDBG 0x10218 + +/* General Receive Control */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN +#define IXGBE_I2C_CLK_IN_X550 0x00004000 +#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) + +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT +#define IXGBE_I2C_CLK_OUT_X550 0x00000200 +#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) + +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN +#define IXGBE_I2C_DATA_IN_X550 0x00001000 +#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) + +#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT +#define IXGBE_I2C_DATA_OUT_X550 0x00000400 +#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) + +#define IXGBE_I2C_DATA_OE_N_EN 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN +#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) + +#define IXGBE_I2C_BB_EN 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN +#define IXGBE_I2C_BB_EN_X550 0x00000100 +#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN +#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) +#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 + +#define NVM_OROM_OFFSET 0x17 +#define NVM_OROM_BLK_LOW 0x83 +#define NVM_OROM_BLK_HI 0x84 +#define NVM_OROM_PATCH_MASK 0xFF +#define NVM_OROM_SHIFT 8 + +#define NVM_VER_MASK 0x00FF /* version mask */ +#define NVM_VER_SHIFT 8 /* version bit shift */ +#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ +#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ +#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ +#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ +#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ +#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ +#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ +#define NVM_ETK_OFF_HI 0x2E /* version high order word */ +#define NVM_ETK_SHIFT 16 /* high version word shift */ +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETK_VALID 0x8000 +#define NVM_INVALID_PTR 0xFFFF +#define NVM_VER_SIZE 32 /* version sting size */ + +struct ixgbe_nvm_version { + u32 etk_id; + u8 nvm_major; + u16 nvm_minor; + u8 nvm_id; + + bool oem_valid; + u8 oem_major; + u8 oem_minor; + u16 oem_release; + + bool or_valid; + u8 or_major; + u16 or_build; + u8 or_patch; + +}; + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* 82599 EITR is only 12 bits, with the lower 3 always zero */ +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 +/* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 +#define IXGBE_RXPBSIZE_MASK 0x000FFC00 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_DRECCCTL2 0x02F8C + +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +/* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) +#define IXGBE_PFMBICR_INDEX(_i) ((_i) >> 4) +#define IXGBE_PFMBICR_SHIFT(_i) ((_i) % 16) +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFVFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_PFVFLREC(_i) (0x00700 + ((_i) * 4)) +#define IXGBE_PFVFLRE_INDEX(_i) ((_i) >> 5) +#define IXGBE_PFVFLRE_SHIFT(_i) ((_i) % 32) +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_LVMMC_RX 0x2FA8 +#define IXGBE_LVMMC_TX 0x8108 +#define IXGBE_LMVM_RX 0x2FA4 +#define IXGBE_LMVM_TX 0x8124 +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRSCTPM 0x0EE78 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 +#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 +/* 16 of these (0-15) */ +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ +#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ +#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ + +/* masks for accessing VXLAN and GENEVE UDP ports */ +#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ +#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 + +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +/* Ext Flexible Host Filter Table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) +#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) + +/* Four Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +/* Six Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 +/* Eight Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ +#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ +/* Mask for Ext. flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 +#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK +/* Proxy Status */ +#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ +#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ +#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ +#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ +#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ +#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ + +/* Proxying Filter Control */ +#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ +#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ +#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ +#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ +#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ +#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ + +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Power Management */ +/* DMA Coalescing configuration */ +struct ixgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* + * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. + * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == + * 87500 bytes [85KB] + */ +#define IXGBE_DMACRXT_10G 0x55 +#define IXGBE_DMACRXT_1G 0x09 +#define IXGBE_DMACRXT_100M 0x01 + +/* DMA Coalescing registers */ +#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ +#define IXGBE_DMACR 0x02400 /* Control register */ +#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ +/* DMA Coalescing register fields */ +#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ +#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ +#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ +#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 +#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 +#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ +#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ +#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ +#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ +#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ + +/* EEE registers */ +#define IXGBE_EEER 0x043A0 /* EEE register */ +#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ +#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ +#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 +#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ +#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ + +/* EEE register fields */ +#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ +#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ +#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ +#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ +#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTQCNCR 0x08B00 +#define IXGBE_RTTQCNTG 0x04A90 +#define IXGBE_RTTBCNRD 0x0498C +#define IXGBE_RTTQCNRR 0x0498C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 + +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM 0x04980 +#define IXGBE_RTTQCNRM 0x04980 + +/* FCoE DMA Context Registers */ +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ +#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ +#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ +#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_BUPRC 0x04180 +#define IXGBE_BMPRC 0x04184 +#define IXGBE_BBPRC 0x04188 +#define IXGBE_BUPTC 0x0418C +#define IXGBE_BMPTC 0x04190 +#define IXGBE_BBPTC 0x04194 +#define IXGBE_BCRCERRS 0x04198 +#define IXGBE_BXONRXC 0x0419C +#define IXGBE_BXOFFRXC 0x041E0 +#define IXGBE_BXONTXC 0x041E4 +#define IXGBE_BXOFFTXC 0x041E8 + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15F14 +#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ +#define IXGBE_BMCIPVAL 0x05060 +#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 +#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 + +/* Management Bit Fields and Masks */ +#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ +#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ +#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ +#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 + +/* Firmware Semaphore Register */ +#define IXGBE_FWSM_MODE_MASK 0xE +#define IXGBE_FWSM_TS_ENABLED 0x1 +#define IXGBE_FWSM_FW_MODE_PT 0x4 +#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000 +#define IXGBE_FWSM_FW_VAL_BIT (1 << 15) + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_FWRESETCNT 0x15F40 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ +#define IXGBE_FLEX_MNG_PTR(_i) (IXGBE_FLEX_MNG + ((_i) * 4)) + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_PCIEPIPEADR 0x11004 +#define IXGBE_PCIEPIPEDAT 0x11008 +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1 +#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0 +#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1 +#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2 +#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3 +#define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS +#define IXGBE_GSCL_1_X550 0x11800 +#define IXGBE_GSCL_2_X550 0x11804 +#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550 0x11820 +#define IXGBE_GSCN_1_X550 0x11824 +#define IXGBE_GSCN_2_X550 0x11828 +#define IXGBE_GSCN_3_X550 0x1182C +#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550 +#define IXGBE_FACTPS_X550 IXGBE_FACTPS +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS +#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550 +#define IXGBE_FACTPS_X550EM_a 0x15FEC +#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) + +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM +#define IXGBE_SWSM_X550 IXGBE_SWSM +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM +#define IXGBE_SWSM_X550EM_a 0x15F70 +#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM) + +#define IXGBE_FWSM 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM +#define IXGBE_FWSM_X550 IXGBE_FWSM +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM +#define IXGBE_FWSM_X550EM_a 0x15F74 +#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM) + +#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 +#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) + +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 + +/* PCI-E registers 82599-Specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599 +#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599 +#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599 +#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599 +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA 0x11088 +#define IXGBE_CIAD 0x1108C +#define IXGBE_CIAA_82599 IXGBE_CIAA +#define IXGBE_CIAD_82599 IXGBE_CIAD +#define IXGBE_CIAA_X540 IXGBE_CIAA +#define IXGBE_CIAD_X540 IXGBE_CIAD +#define IXGBE_GSCL_5_X550 0x11810 +#define IXGBE_GSCL_6_X550 0x11814 +#define IXGBE_GSCL_7_X550 0x11818 +#define IXGBE_GSCL_8_X550 0x1181C +#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550 +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 +#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 +#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 +#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) +#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) +#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ +#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL 0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 +#define IXGBE_BARCTRL_CSRSIZE_SHIFT 13 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RSCCTL_TS_DIS 0x02 + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080 +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) + +/* FACTPS */ +#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 + +/* Device Type definitions for new protocol MDIO commands */ +#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ +#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ +#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ +#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ +#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ +#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ +#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ +#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ +#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ +#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ +#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ +#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */ +#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ + +#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ +#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ +#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define X550_PHY_ID 0x01540220 +#define X550_PHY_ID2 0x01540223 +#define X550_PHY_ID3 0x01540221 +#define X557_PHY_ID 0x01540240 +#define X557_PHY_ID2 0x01540250 +#define AQ_FW_REV 0x20 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 + +/* PHY Types */ +#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0 +#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_MAX_PACKET_BUFFERS 8 + +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PB 8 + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ +#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_FCOE_RAM 7 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_QSC_RSC_RAM 0xB +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 +#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 +#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +/* Deficit Fixed Prio ena */ +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) + +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT 20 + +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ +#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */ +#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ + + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) +#define IXGBE_X557_MAX_LED_INDEX 3 +#define IXGBE_X557_LED_PROVISIONING 0xC430 + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* Veto Bit definiton */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define IXGBE_FLA_LOCKED 0x00000040 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_EEPROM_CTRL_4 0x45 +#define IXGBE_EE_CTRL_4_INST_ID 0x10 +#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_ALT_MAC_ADDR_PTR 0x37 +#define IXGBE_FREE_SPACE_PTR 0X3E + +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04 + +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_MAX_MSIX_VECTORS_82598 0x13 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ +#endif + +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 + +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 + +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7) +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_LESM_2_STATES_ENABLED_MASK 0x1F +#define IXGBE_FW_LESM_2_STATES_ENABLED 0x12 +#define IXGBE_FW_LESM_STATE0_10G_ENABLED 0x6FFF +#define IXGBE_FW_LESM_STATE1_10G_ENABLED 0x4FFF +#define IXGBE_FW_LESM_STATE0_10G_DISABLED 0x0FFF +#define IXGBE_FW_LESM_STATE1_10G_DISABLED 0x2FFF +#define IXGBE_FW_LESM_PORT0_STATE0_OFFSET 0x2 +#define IXGBE_FW_LESM_PORT0_STATE1_OFFSET 0x3 +#define IXGBE_FW_LESM_PORT1_STATE0_OFFSET 0x6 +#define IXGBE_FW_LESM_PORT1_STATE1_OFFSET 0x7 +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ + +/* FW header offset */ +#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_X540_FW_MODULE_MASK 0x7FFF +/* 4KB multiplier */ +#define IXGBE_X540_FW_MODULE_LENGTH 0x1000 +/* version word 2 (month & day) */ +#define IXGBE_X540_FW_PATCH_VERSION_2 0x5 +/* version word 3 (silicon compatibility & year) */ +#define IXGBE_X540_FW_PATCH_VERSION_3 0x6 +/* version word 4 (major & minor numbers) */ +#define IXGBE_X540_FW_PATCH_VERSION_4 0x7 + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_LINK_SPEED_8000 0x3 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define IXGBE_PCIDEVCTRL2_50_100us 0x1 +#define IXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define IXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define IXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define IXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define IXGBE_PCIDEVCTRL2_1_2s 0xa +#define IXGBE_PCIDEVCTRL2_4_8s 0xd +#define IXGBE_PCIDEVCTRL2_17_34s 0xe + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_RSC_DIS 0x00000020 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_EN_TT0 0x00000001 +#define IXGBE_TSAUXC_EN_TT1 0x00000002 +#define IXGBE_TSAUXC_ST0 0x00000010 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 + +#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 +#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 +#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 + +#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ + +#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 +#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ +#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ + +#define IXGBE_TSIM_SYS_WRAP 0x00000001 +#define IXGBE_TSIM_TXTS 0x00000002 +#define IXGBE_TSIM_TADJ 0x00000080 + +#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP +#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS +#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ + +#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF +#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 +#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 +#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 +#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */ +#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 +#define IXGBE_QDE_WRITE 0x00010000 +#define IXGBE_QDE_READ 0x00020000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ +#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ +#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 8) + * = (<< 2) + */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */ +#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ +#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT +/* Translated register #defines */ +#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) +#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) +#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) +#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) +#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) +#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) +#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) +#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) +#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) +#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) +#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) +#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) +#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ + (0x012300 + (((P) - 24) * 4))) +#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) +#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) +#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) +#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) +#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ + : (0x0D000 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ + : (0x0D004 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ + : (0x0D008 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ + : (0x0D010 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ + : (0x0D018 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ + : (0x0D028 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ + : (0x0D014 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) +#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) +#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) +#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P))) +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) +#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ + : (0x0D00C + (0x40 * ((P) - 64)))) +#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) +#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) +#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) +#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) +#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) +#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) +#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) +#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) + +#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) + +#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, +}; + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 +#define IXGBE_FDIRM_L3P 0x00000040 + +#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ +#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ +#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ +#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ +#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 +#define IXGBE_FDIR_DROP_QUEUE 127 + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ +#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_PHY_TOKEN_REQ_CMD 0xA +#define FW_PHY_TOKEN_REQ_LEN 2 +#define FW_PHY_TOKEN_REQ 0 +#define FW_PHY_TOKEN_REL 1 +#define FW_PHY_TOKEN_OK 1 +#define FW_PHY_TOKEN_RETRY 0x80 +#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ +#define FW_PHY_TOKEN_WAIT 5 /* seconds */ +#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) +#define FW_INT_PHY_REQ_CMD 0xB +#define FW_INT_PHY_REQ_LEN 10 +#define FW_INT_PHY_REQ_READ 0 +#define FW_INT_PHY_REQ_WRITE 1 +#define FW_PHY_ACT_REQ_CMD 5 +#define FW_PHY_ACT_DATA_COUNT 4 +#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) +#define FW_PHY_ACT_INIT_PHY 1 +#define FW_PHY_ACT_SETUP_LINK 2 +#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0) +#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1) +#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2) +#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3) +#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4) +#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5) +#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6) +#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7) +#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8) +#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9) +#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \ + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u +#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18) +#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19) +#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20) +#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0) +#define FW_PHY_ACT_GET_LINK_INFO 3 +#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29) +#define FW_PHY_ACT_FORCE_LINK_DOWN 4 +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0) +#define FW_PHY_ACT_PHY_SW_RESET 5 +#define FW_PHY_ACT_PHY_HW_RESET 6 +#define FW_PHY_ACT_GET_PHY_INFO 7 +#define FW_PHY_ACT_UD_2 0x1002 +#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4) +#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3) +#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1) +#define FW_PHY_ACT_RETRIES 50 +#define FW_PHY_INFO_SPEED_MASK 0xFFFu +#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u +#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu + +/* Host Interface Command Structures */ + +#pragma pack(push, 1) + +struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + +struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +struct ixgbe_hic_drv_info2 { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; +}; + +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct ixgbe_hic_phy_token_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + __be16 address; + u16 rsv1; + __be32 write_data; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_resp { + struct ixgbe_hic_hdr hdr; + __be32 read_data; +}; + +struct ixgbe_hic_phy_activity_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad; + __le16 activity_id; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +struct ixgbe_hic_phy_activity_resp { + struct ixgbe_hic_hdr hdr; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +#pragma pack(pop) + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 vlan; + } fields; + } upper; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 vlan; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ +/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26 +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_10_FULL 0x0002 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Physical layer type */ +typedef u64 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 +#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 +#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define IXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define IXGBE_CABLE_DC 5556 /* Delay Copper */ +#define IXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) + +/* Calculate Interface Delay 82598, 82599 */ +#define IXGBE_PHY_D 12800 +#define IXGBE_MAC_D 4096 +#define IXGBE_XAUI_D (2 * 1024) + +#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define IXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define IXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID_X540) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate 82599, 82598 delay value in bit times */ +#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define IXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * IXGBE_B2BT(_max_frame_tc) + \ + (36 * IXGBE_PCI_DELAY / 25) + 1) +#define IXGBE_LOW_DV(_max_frame_tc) \ + (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + u8 inner_mac[6]; + __be16 tunnel_type; + __be32 tni_vni; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[14]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +#define IXGBE_MVALS_INIT(m) \ + IXGBE_CAT(EEC, m), \ + IXGBE_CAT(FLA, m), \ + IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ + IXGBE_CAT(FACTPS, m), \ + IXGBE_CAT(SWSM, m), \ + IXGBE_CAT(SWFW_SYNC, m), \ + IXGBE_CAT(FWSM, m), \ + IXGBE_CAT(SDP0_GPIEN, m), \ + IXGBE_CAT(SDP1_GPIEN, m), \ + IXGBE_CAT(SDP2_GPIEN, m), \ + IXGBE_CAT(EICR_GPI_SDP0, m), \ + IXGBE_CAT(EICR_GPI_SDP1, m), \ + IXGBE_CAT(EICR_GPI_SDP2, m), \ + IXGBE_CAT(CIAA, m), \ + IXGBE_CAT(CIAD, m), \ + IXGBE_CAT(I2C_CLK_IN, m), \ + IXGBE_CAT(I2C_CLK_OUT, m), \ + IXGBE_CAT(I2C_DATA_IN, m), \ + IXGBE_CAT(I2C_DATA_OUT, m), \ + IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ + IXGBE_CAT(I2C_BB_EN, m), \ + IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ + IXGBE_CAT(I2CCTL, m) + +enum ixgbe_mvals { + IXGBE_MVALS_INIT(_IDX), + IXGBE_MVALS_IDX_LIMIT +}; + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ixgbe_fcoe_boot_status { + ixgbe_fcoe_bootstatus_disabled = 0, + ixgbe_fcoe_bootstatus_enabled = 1, + ixgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_flash, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_82599_vf, + ixgbe_mac_X540, + ixgbe_mac_X540_vf, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, + ixgbe_mac_X550EM_a, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, + ixgbe_mac_X550EM_a_vf, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_xfi, + ixgbe_phy_x550em_ext_t, + ixgbe_phy_ext_1g_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_sfp_passive_tyco, + ixgbe_phy_sfp_passive_unknown, + ixgbe_phy_sfp_active_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_ftl_active, + ixgbe_phy_sfp_unknown, + ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, + ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ixgbe_phy_sgmii, + ixgbe_phy_fw, + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_da_cu_core0 = 3, + ixgbe_sfp_type_da_cu_core1 = 4, + ixgbe_sfp_type_srlr_core0 = 5, + ixgbe_sfp_type_srlr_core1 = 6, + ixgbe_sfp_type_da_act_lmt_core0 = 7, + ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_1g_sx_core0 = 11, + ixgbe_sfp_type_1g_sx_core1 = 12, + ixgbe_sfp_type_not_present = 0xFFFE, + ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_fiber_qsfp, + ixgbe_media_type_copper, + ixgbe_media_type_backplane, + ixgbe_media_type_cx4, + ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES 3 +enum ixgbe_smart_speed { + ixgbe_smart_speed_auto = 0, + ixgbe_smart_speed_on, + ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { + ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, + ixgbe_bus_type_pci_express, + ixgbe_bus_type_internal, + ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { + enum ixgbe_bus_speed speed; + enum ixgbe_bus_width width; + enum ixgbe_bus_type type; + + u16 func; + u8 lan_id; + u16 instance_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ixgbe_fc_mode current_mode; /* FC mode in effect */ + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); +}; + +/* Error Codes */ +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_OUT_OF_MEM -34 +#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 +#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 +#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 +#define IXGBE_ERR_FW_RESP_INVALID -39 +#define IXGBE_ERR_TOKEN_RETRY -40 +#define IXGBE_ERR_MBX -41 +#define IXGBE_ERR_MBX_NOMSG -42 +#define IXGBE_ERR_TIMEOUT -43 + +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) +#define IXGBE_FUSES0_300MHZ (1 << 5) +#define IXGBE_FUSES0_REV_MASK (3 << 6) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238) +#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918) +#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C) +#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) +#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) + +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) +#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1) +#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3) +#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29) +#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1) + +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11) + +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 + +#define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1) +#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2) +#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */ +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) + +/* Code Command (Flash I/F Interface) */ +#define IXGBE_HOST_INTERFACE_FLASH_READ_CMD 0x30 +#define IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD 0x31 +#define IXGBE_HOST_INTERFACE_FLASH_WRITE_CMD 0x32 +#define IXGBE_HOST_INTERFACE_SHADOW_RAM_WRITE_CMD 0x33 +#define IXGBE_HOST_INTERFACE_FLASH_MODULE_UPDATE_CMD 0x34 +#define IXGBE_HOST_INTERFACE_FLASH_BLOCK_EREASE_CMD 0x35 +#define IXGBE_HOST_INTERFACE_SHADOW_RAM_DUMP_CMD 0x36 +#define IXGBE_HOST_INTERFACE_FLASH_INFO_CMD 0x37 +#define IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD 0x38 +#define IXGBE_HOST_INTERFACE_MASK_CMD 0x000000FF + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_vf.c b/drivers/net/ethernet/intel/ixgbevf/ixgbe_vf.c new file mode 100644 index 000000000000..3519cb22c932 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_vf.c @@ -0,0 +1,753 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbe_vf.h" + +#ifndef IXGBE_VFWRITE_REG +#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG +#endif +#ifndef IXGBE_VFREAD_REG +#define IXGBE_VFREAD_REG IXGBE_READ_REG +#endif + +/** + * ixgbe_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw) +{ + u16 i; + + /* MAC */ + hw->mac.ops.init_hw = ixgbe_init_hw_vf; + hw->mac.ops.reset_hw = ixgbe_reset_hw_vf; + hw->mac.ops.start_hw = ixgbe_start_hw_vf; + /* Cannot clear stats on VF */ + hw->mac.ops.clear_hw_cntrs = NULL; + hw->mac.ops.get_media_type = NULL; + hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf; + hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf; + hw->mac.ops.get_bus_info = NULL; + hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version; + + /* Link */ + hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf; + hw->mac.ops.check_link = ixgbe_check_mac_link_vf; + hw->mac.ops.get_link_capabilities = NULL; + + /* RAR, Multicast, VLAN */ + hw->mac.ops.set_rar = ixgbe_set_rar_vf; + hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf; + hw->mac.ops.init_rx_addrs = NULL; + hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf; + hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode; + hw->mac.ops.enable_mc = NULL; + hw->mac.ops.disable_mc = NULL; + hw->mac.ops.clear_vfta = NULL; + hw->mac.ops.set_vfta = ixgbe_set_vfta_vf; + hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf; + + hw->mac.max_tx_queues = 1; + hw->mac.max_rx_queues = 1; + + for (i = 0; i < 64; i++) + hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_vf; + + return 0; +} + +/* ixgbe_virt_clr_reg - Set register to default (power on) state. + * @hw: pointer to hardware structure + */ +static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw) +{ + int i; + u32 vfsrrctl; + u32 vfdca_rxctrl; + u32 vfdca_txctrl; + + /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ + vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + + /* DCA_RXCTRL default value */ + vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + + /* DCA_TXCTRL default value */ + vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DESC_WRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + + for (i = 0; i < 7; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl); + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +/** + * ixgbe_init_hw_vf - virtual function hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware and then starting + * the hardware + **/ +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/** + * ixgbe_reset_hw_vf - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts. + **/ +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = IXGBE_VF_INIT_TIMEOUT; + s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* reset the api version */ + hw->api_version = ixgbe_mbox_api_10; + ixgbe_init_mbx_params_vf(hw); + + hw_dbg(hw, "Issuing a function level reset to MAC\n"); + + IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); + IXGBE_WRITE_FLUSH(hw); + + msleep(50); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops[0].check_for_rst(hw, 0) && timeout) { + timeout--; + udelay(5); + } + + if (!timeout) + return IXGBE_ERR_RESET_FAILED; + + /* Reset VF registers to initial values */ + ixgbe_virt_clr_reg(hw); + + /* mailbox timeout can now become active */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = IXGBE_VF_RESET; + ixgbe_write_mbx(hw, msgbuf, 1, 0); + + msleep(10); + + /* + * set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ + ret_val = ixgbe_poll_mbx(hw, msgbuf, + IXGBE_VF_PERMADDR_MSG_LEN, 0); + if (ret_val) + return ret_val; + + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) && + msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE)) + return IXGBE_ERR_INVALID_MAC_ADDR; + + if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS)) + memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); + + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; + + return ret_val; +} + +/** + * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_VFREAD_REG(hw, IXGBE_VTEICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); + } + /* Clear packet split and pool config */ + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + msleep(2); + + return 0; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +static s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, + u32 *retmsg, u16 size) +{ + s32 retval = ixgbe_write_mbx(hw, msg, size, 0); + + if (retval) + return retval; + + return ixgbe_poll_mbx(hw, retmsg, size, 0); +} + +/** + * ixgbe_set_rar_vf - set device MAC address + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + **/ +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + UNREFERENCED_3PARAMETER(vmdq, enable_addr, index); + + memset(msgbuf, 0, 12); + msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) { + ixgbe_get_mac_addr_vf(hw, hw->mac.addr); + return IXGBE_ERR_MBX; + } + + return ret_val; +} + +/** + * ixgbe_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @next: caller supplied function to return next address in list + * @clear: unused + * + * Updates the Multicast Table Array. + **/ +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 vector; + u32 cnt, i; + u32 vmdq; + + UNREFERENCED_1PARAMETER(clear); + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + hw_dbg(hw, "MC Addr Count = %d\n", mc_addr_count); + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = IXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); + hw_dbg(hw, "Hash value = 0x%03X\n", vector); + vector_list[i] = (u16)vector; + } + + return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE); +} + +/** + * ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + **/ +s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) +{ + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + /* New modes were introduced in 1.3 version */ + if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + /* Fall through */ + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_15: + break; + default: + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE)) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + return 0; +} + +/** + * ixgbe_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vlan: 12 bit VLAN ID + * @vind: unused by VF drivers + * @vlan_on: if true then set bit, else clear bit + * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + u32 msgbuf[2]; + s32 ret_val; + UNREFERENCED_2PARAMETER(vind, vlvf_bypass); + + msgbuf[0] = IXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; + + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_SUCCESS)) + return 0; + + return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE); +} + +/** + * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_TX_QUEUES; +} + +/** + * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_RX_QUEUES; +} + +/** + * ixgbe_get_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * @mac_addr: the MAC address + **/ +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++) + mac_addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + u32 msgbuf[3], msgbuf_chk; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* + * If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + msgbuf_chk = msgbuf[0]; + if (addr) + memcpy(msg_addr, addr, 6); + + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + if (!ret_val) { + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE)) + return IXGBE_ERR_OUT_OF_MEM; + } + + return ret_val; +} + +/** + * ixgbe_setup_mac_link_vf - Setup MAC link settings + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete); + return 0; +} + +/** + * ixgbe_check_mac_link_vf - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 links_reg; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* If we were hit with a reset drop the link */ + if (!mbx->ops[0].check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ + if (hw->mac.type >= ixgbe_mac_X550_vf) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + if (hw->api_version < ixgbe_mbox_api_15) { + u32 in_msg = 0; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_FAILURE) + ret_val = IXGBE_ERR_MBX; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = IXGBE_ERR_TIMEOUT; + goto out; + } + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +/** + * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + s32 retval; + + msgbuf[0] = IXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + + retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (retval) + return retval; + if ((msgbuf[0] & IXGBE_VF_SET_LPE) && + (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) + return IXGBE_ERR_MBX; + + return 0; +} + +/** + * ixgbevf_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + **/ +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +{ + int err; + u32 msg[3]; + + /* Negotiate the mailbox API version */ + msg[0] = IXGBE_VF_API_NEGOTIATE; + msg[1] = api; + msg[2] = 0; + + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* Store value and return 0 on success */ + if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_SUCCESS)) { + hw->api_version = api; + return 0; + } + + err = IXGBE_ERR_INVALID_ARGUMENT; + } + + return err; +} + +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc) +{ + int err; + u32 msg[5]; + + /* do nothing if API doesn't support ixgbevf_get_queues */ + switch (hw->api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_15: + break; + default: + return 0; + } + + /* Fetch queue configuration from the PF */ + msg[0] = IXGBE_VF_GET_QUEUES; + msg[1] = msg[2] = msg[3] = msg[4] = 0; + + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* + * if we didn't get a SUCCESS there must have been + * some sort of mailbox error so we should treat it + * as such + */ + if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS)) + return IXGBE_ERR_MBX; + + /* record and validate values from message */ + hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; + if (hw->mac.max_tx_queues == 0 || + hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) + hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + + hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; + if (hw->mac.max_rx_queues == 0 || + hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) + hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + + *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; + /* in case of unknown state assume we cannot tag frames */ + if (*num_tcs > hw->mac.max_rx_queues) + *num_tcs = 1; + + *default_tc = msg[IXGBE_VF_DEF_QUEUE]; + /* default to queue 0 on out-of-bounds queue number */ + if (*default_tc >= hw->mac.max_tx_queues) + *default_tc = 0; + } + + return err; +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbe_vf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbe_vf.h new file mode 100644 index 000000000000..a7413cb30d2c --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbe_vf.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBE_VF_H_ +#define _IXGBE_VF_H_ + +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 + +/* DCB define */ +#define IXGBE_VF_MAX_TRAFFIC_CLASS 8 + +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +/* define IXGBE_VFPBACL still says TBD in EAS */ +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) + +struct ixgbe_hw; + +#include "ixgbe_type.h" + +#include "ixgbe_mbx.h" + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u32 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); + + /* Link */ + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr, bool); + s32 (*update_xcast_mode)(struct ixgbe_hw *, int); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); + s32 (*set_rlpml)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum ixgbe_mac_type type; + + s32 mc_filter_type; + + bool get_link_status; + u32 max_tx_queues; + u32 max_rx_queues; + u32 max_msix_vectors; +}; + +struct ixgbe_hw { + void *back; + + u8 __iomem *hw_addr; + + struct ixgbe_mac_info mac; + struct ixgbe_mbx_info mbx; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; + bool adapter_stopped; + + int api_version; +}; + +struct ixgbevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete); +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr, + bool clear); +s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode); +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass); +s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc); + +#include "ixgbevf_osdep2.h" + +#endif /* __IXGBE_VF_H__ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ecab686574b6..9552fc80649a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,31 +1,55 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ #ifndef _IXGBEVF_H_ #define _IXGBEVF_H_ -#include -#include -#include -#include +#include #include -#include -#include -#include +#include -#include "vf.h" -#include "ipsec.h" +#ifdef SIOCETHTOOL +#include +#endif +#ifdef NETIF_F_HW_VLAN_TX +#include +#endif +#ifdef HAVE_NDO_GET_STATS64 +#include +#endif +#ifdef HAVE_XDP_SUPPORT +#include +#endif /* HAVE_XDP_SUPPORT */ + +#include "kcompat.h" + +#include "ixgbe_type.h" +#include "ixgbe_vf.h" +#if IS_ENABLED(CONFIG_PCI_HYPERV) +#include "ixgbe_hv_vf.h" +#endif + +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +#ifdef HAVE_NDO_BUSY_POLL +#define BP_EXTENDED_STATS +#endif +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ + (void)(netdev_printk(KERN_##klevel, adapter->netdev, \ + "%s: " fmt, __func__, ## args)) : NULL) #define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) /* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer - */ + * so a DMA handle can be stored along with the buffer */ struct ixgbevf_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; @@ -56,6 +80,11 @@ struct ixgbevf_rx_buffer { struct ixgbevf_stats { u64 packets; u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif }; struct ixgbevf_tx_queue_stats { @@ -76,6 +105,7 @@ enum ixgbevf_ring_state_t { __IXGBEVF_RX_BUILD_SKB_ENABLED, __IXGBEVF_TX_DETECT_HANG, __IXGBEVF_HANG_CHECK_ARMED, + __IXGBEVF_RX_CSUM_UDP_ZERO_ERR, __IXGBEVF_TX_XDP_RING, __IXGBEVF_TX_XDP_RING_PRIMED, }; @@ -89,39 +119,44 @@ enum ixgbevf_ring_state_t { struct ixgbevf_ring { struct ixgbevf_ring *next; - struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */ - struct net_device *netdev; + struct ixgbevf_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ struct bpf_prog *xdp_prog; - struct device *dev; + struct device *dev; /* device for DMA mapping */ void *desc; /* descriptor ring memory */ - dma_addr_t dma; /* phys. address of descriptor ring */ - unsigned int size; /* length in bytes */ - u16 count; /* amount of descriptors */ - u16 next_to_use; - u16 next_to_clean; - u16 next_to_alloc; - union { struct ixgbevf_tx_buffer *tx_buffer_info; struct ixgbevf_rx_buffer *rx_buffer_info; }; unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + struct sk_buff *skb; + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + struct ixgbevf_stats stats; - struct u64_stats_sync syncp; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#endif union { struct ixgbevf_tx_queue_stats tx_stats; struct ixgbevf_rx_queue_stats rx_stats; }; +#ifdef HAVE_XDP_BUFF_RXQ struct xdp_rxq_info xdp_rxq; - u64 hw_csum_rx_error; - u8 __iomem *tail; - struct sk_buff *skb; - - /* holds the special value that gets the hardware register offset - * associated with this ring, which is different for DCB and RSS modes - */ - u16 reg_idx; - int queue_index; /* needed for multiqueue queue management */ +#endif } ____cacheline_internodealigned_in_smp; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -136,19 +171,19 @@ struct ixgbevf_ring { #define IXGBEVF_RSS_HASH_KEY_SIZE 40 #define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */ -#define IXGBEVF_DEFAULT_TXD 1024 -#define IXGBEVF_DEFAULT_RXD 512 -#define IXGBEVF_MAX_TXD 4096 -#define IXGBEVF_MIN_TXD 64 -#define IXGBEVF_MAX_RXD 4096 -#define IXGBEVF_MIN_RXD 64 +#define IXGBEVF_DEFAULT_TXD 1024 +#define IXGBEVF_DEFAULT_RXD 512 +#define IXGBEVF_MAX_TXD 4096 +#define IXGBEVF_MIN_TXD 64 +#define IXGBEVF_MAX_RXD 4096 +#define IXGBEVF_MIN_RXD 64 /* Supported Rx Buffer Sizes */ -#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2048 2048 -#define IXGBEVF_RXBUFFER_3072 3072 +#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_RXBUFFER_3072 3072 -#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -164,7 +199,6 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_IPV4 BIT(3) -#define IXGBE_TX_FLAGS_IPSEC BIT(4) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -225,42 +259,144 @@ struct ixgbevf_ring_container { #define ixgbevf_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -/* MAX_MSIX_Q_VECTORS of these are allocated, +/* MAX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ struct ixgbevf_q_vector { struct ixgbevf_adapter *adapter; - /* index of q_vector within array, also used for finding the bit in - * EICR and friends that represents the vector for this ring - */ - u16 v_idx; - u16 itr; /* Interrupt throttle rate written to EITR */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif struct ixgbevf_ring_container rx, tx; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; + bool netpoll_rx; - /* for dynamic allocation of rings associated with this q_vector */ - struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp; -#ifdef CONFIG_NET_RX_BUSY_POLL +#ifdef HAVE_NDO_BUSY_POLL unsigned int state; #define IXGBEVF_QV_STATE_IDLE 0 #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ -#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) -#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ -#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ - IXGBEVF_QV_STATE_POLL_YIELD) -#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ - IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) spinlock_t lock; -#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* HAVE_NDO_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp; }; -/* microsecond values for various ITR rates shifted by 2 to fit itr register +#ifdef HAVE_NDO_BUSY_POLL +static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) +{ + + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBEVF_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); + q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->tx.ring->stats.yields++; +#endif + } else { + /* we don't care if someone yielded */ + q_vector->state = IXGBEVF_QV_STATE_NAPI; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | + IXGBEVF_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* called from ixgbevf_low_latency_poll() */ +static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBEVF_QV_LOCKED)) { + q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->rx.ring->stats.yields++; +#endif + } else { + /* preserve yield marks */ + q_vector->state |= IXGBEVF_QV_STATE_POLL; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); + return q_vector->state & IXGBEVF_QV_USER_PEND; +} + +/* false if QV is currently owned */ +static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_OWNED) + rc = false; + q_vector->state |= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +#endif /* HAVE_NDO_BUSY_POLL */ + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ #define IXGBE_MIN_RSC_ITR 24 @@ -268,14 +404,6 @@ struct ixgbevf_q_vector { #define IXGBE_20K_ITR 200 #define IXGBE_12K_ITR 336 -/* Helper macros to switch between ints/sec and what the register uses. - * And yes, it's the same math going both ways. The lowest value - * supported by all of the ixgbe hardware is 8. - */ -#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ - ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) -#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG - /* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, const u32 stat_err_bits) @@ -291,92 +419,102 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } -static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) -{ - writel(value, ring->tail); -} - -#define IXGBEVF_RX_DESC(R, i) \ +#define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_DESC(R, i) \ +#define IXGBEVF_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_CTXTDESC(R, i) \ +#define IXGBEVF_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ +#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 2 +#define MAX_Q_VECTORS 2 -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#ifdef HAVE_STRUCT_DMA_ATTRS +#define IXGBEVF_RX_DMA_ATTR NULL +#else #define IXGBEVF_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif /* board specific private data structure */ struct ixgbevf_adapter { +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; +#else /* this field must be first, see ixgbevf_process_skb_fields */ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ - struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct pci_dev *pdev; - /* Interrupt Throttle Rate */ - u16 rx_itr_setting; + unsigned long state; + + /* XDP */ + u16 xdp_ring_count; + u16 num_xdp_queues; + + /* Tx hotpath*/ + u16 tx_ring_count; + u16 num_tx_queues; u16 tx_itr_setting; + /* Rx hotpath */ + u16 rx_ring_count; + u16 num_rx_queues; + u16 rx_itr_setting; + + /* Rings, Tx first since it is accessed in hotpath */ + struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ + struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES]; + struct ixgbevf_ring *rx_ring[MAX_RX_QUEUES]; /* One per active queue */ + + /* interrupt vector accounting */ + struct ixgbevf_q_vector *q_vector[MAX_Q_VECTORS]; + int num_q_vectors; + struct msix_entry *msix_entries; + /* interrupt masks */ u32 eims_enable_mask; u32 eims_other; - /* XDP */ - int num_xdp_queues; - struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES]; - - /* TX */ - int num_tx_queues; - struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ + /* stats */ + u64 tx_busy; u64 restart_queue; - u32 tx_timeout_count; - u64 tx_ipsec; - - /* RX */ - int num_rx_queues; - struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ - u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; - int num_msix_vectors; + u64 hw_csum_rx_error; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 alloc_rx_page; - u64 rx_ipsec; - struct msix_entry *msix_entries; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif - /* OS defined structs */ - struct net_device *netdev; - struct bpf_prog *xdp_prog; - struct pci_dev *pdev; + u32 tx_timeout_count; /* structs defined in ixgbe_vf.h */ struct ixgbe_hw hw; - u16 msg_enable; - /* Interrupt Throttle Rate */ - u32 eitr_param; - struct ixgbevf_hw_stats stats; - unsigned long state; - u64 tx_busy; - unsigned int tx_ring_count; - unsigned int xdp_ring_count; - unsigned int rx_ring_count; + u32 *config_space; - u8 __iomem *io_addr; /* Mainly for iounmap use */ + u16 msg_enable; + + u8 __iomem *io_addr; u32 link_speed; bool link_up; + bool dev_closed; struct timer_list service_timer; struct work_struct service_task; @@ -387,11 +525,16 @@ struct ixgbevf_adapter { u32 *rss_key; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u32 flags; -#define IXGBEVF_FLAGS_LEGACY_RX BIT(1) + bool irqs_ready; +#define IXGBE_FLAG_RX_CSUM_ENABLED BIT(1) +#define IXGBEVF_FLAGS_LEGACY_RX BIT(2) +#define IXGBEVF_FLAG_RSS_FIELD_IPV4_UDP BIT(4) +#define IXGBEVF_FLAG_RSS_FIELD_IPV6_UDP BIT(5) +}; -#ifdef CONFIG_XFRM - struct ixgbevf_ipsec *ipsec; -#endif /* CONFIG_XFRM */ +struct ixgbevf_info { + enum ixgbe_mac_type mac; + unsigned int flags; }; enum ixbgevf_state_t { @@ -406,40 +549,15 @@ enum ixbgevf_state_t { __IXGBEVF_QUEUE_RESET_REQUESTED, }; -enum ixgbevf_boards { - board_82599_vf, - board_82599_vf_hv, - board_X540_vf, - board_X540_vf_hv, - board_X550_vf, - board_X550_vf_hv, - board_X550EM_x_vf, - board_X550EM_x_vf_hv, - board_x550em_a_vf, +#ifdef HAVE_VLAN_RX_REGISTER +struct ixgbevf_cb { + u16 vid; /* VLAN tag */ }; +#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb) -enum ixgbevf_xcast_modes { - IXGBEVF_XCAST_MODE_NONE = 0, - IXGBEVF_XCAST_MODE_MULTI, - IXGBEVF_XCAST_MODE_ALLMULTI, - IXGBEVF_XCAST_MODE_PROMISC, -}; - -extern const struct ixgbevf_info ixgbevf_82599_vf_info; -extern const struct ixgbevf_info ixgbevf_X540_vf_info; -extern const struct ixgbevf_info ixgbevf_X550_vf_info; -extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; -extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; -extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; - -extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; -extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; -extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; -extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; -extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; - -/* needed by ethtool.c */ -extern const char ixgbevf_driver_name[]; +#endif +/* needed by ixgbevf_ethtool.c */ +extern char ixgbevf_driver_name[]; extern const char ixgbevf_driver_version[]; int ixgbevf_open(struct net_device *netdev); @@ -451,45 +569,34 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter); void ixgbevf_set_ethtool_ops(struct net_device *netdev); int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring); -int ixgbevf_setup_tx_resources(struct ixgbevf_ring *); -void ixgbevf_free_rx_resources(struct ixgbevf_ring *); -void ixgbevf_free_tx_resources(struct ixgbevf_ring *); +int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring); +void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring); +void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring); void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); +#ifdef ETHTOOL_OPS_COMPAT int ethtool_ioctl(struct ifreq *ifr); +#endif -extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); - -#ifdef CONFIG_IXGBEVF_IPSEC -void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter); -void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter); -void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter); -void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); -int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first, - struct ixgbevf_ipsec_tx_data *itd); -#else -static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) -{ } -static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) -{ } -static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { } -static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) { } -static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first, - struct ixgbevf_ipsec_tx_data *itd) -{ return 0; } -#endif /* CONFIG_IXGBEVF_IPSEC */ +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); -#define ixgbevf_hw_to_netdev(hw) \ - (((struct ixgbevf_adapter *)(hw)->back)->netdev) +static inline u32 __er32(struct ixgbe_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} -#define hw_dbg(hw, format, arg...) \ - netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg) +static inline void __ew32(struct ixgbe_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} +#define er32(reg) IXGBE_READ_REG(hw, IXGBE_##reg) +#define ew32(reg,val) IXGBE_WRITE_REG(hw, IXGBE_##reg, (val)) +#define e1e_flush() er32(STATUS) + +static inline struct netdev_queue *txring_txq(const struct ixgbevf_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} #endif /* _IXGBEVF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_ethtool.c new file mode 100644 index 000000000000..cab35a9c38a6 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_ethtool.c @@ -0,0 +1,1644 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +/* ethtool support for ixgbe */ + +#include +#include +#include +#include +#include +#include +#ifdef SIOCETHTOOL +#include + +#include "ixgbevf.h" + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#define IXGBE_ALL_RAR_ENTRIES 16 + +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif +#ifdef ETHTOOL_GSTATS + +enum {NETDEV_STATS, IXGBEVF_STATS}; + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define IXGBEVF_STAT(_name, _stat) { \ + .stat_string = _name, \ + .type = IXGBEVF_STATS, \ + .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \ + .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ +} + +#define IXGBEVF_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} + +static struct ixgbe_stats ixgbe_gstrings_stats[] = { + IXGBEVF_NETDEV_STAT(rx_packets), + IXGBEVF_NETDEV_STAT(tx_packets), + IXGBEVF_NETDEV_STAT(rx_bytes), + IXGBEVF_NETDEV_STAT(tx_bytes), + IXGBEVF_STAT("tx_busy", tx_busy), + IXGBEVF_STAT("tx_restart_queue", restart_queue), + IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), + IXGBEVF_NETDEV_STAT(multicast), + IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), + IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), + IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +}; + +#define IXGBEVF_QUEUE_STATS_LEN ( \ + (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ + ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \ + ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ + (sizeof(struct ixgbevf_stats) / sizeof(u64))) +#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) + +#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Link test (on/offline)" +}; +#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + +#if defined(HAVE_ETHTOOL_GET_SSET_COUNT) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) +static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings) +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT && HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ixgbevf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +#else +static int ixgbevf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +#endif +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = -1; +#else + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + ecmd->port = -1; +#endif + + if (adapter->link_up) { + __u32 speed = SPEED_10000; + + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed = SPEED_10000; + break; + case IXGBE_LINK_SPEED_5GB_FULL: + speed = SPEED_5000; + break; +#ifdef SUPPORTED_2500baseX_Full + case IXGBE_LINK_SPEED_2_5GB_FULL: + speed = SPEED_2500; + break; +#endif /* SUPPORTED_2500baseX_Full */ + case IXGBE_LINK_SPEED_1GB_FULL: + speed = SPEED_1000; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed = SPEED_100; + break; + case IXGBE_LINK_SPEED_10_FULL: + speed = SPEED_10; + break; + } + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + cmd->base.speed = speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } +#else + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } +#endif + + return 0; +} + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ixgbevf_set_link_ksettings(struct net_device __always_unused *netdev, + const struct ethtool_link_ksettings __always_unused *cmd) +#else +static int ixgbevf_set_settings(struct net_device __always_unused *netdev, + struct ethtool_cmd __always_unused *ecmd) +#endif +{ + return -EINVAL; +} + +#ifndef HAVE_NDO_SET_FEATURES +static u32 ixgbevf_get_rx_csum(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); +} + +static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + if (data) + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) { + if (!adapter->dev_closed) { + ixgbevf_reinit_locked(adapter); + } + } else { + ixgbevf_reset(adapter); + } + + return 0; +} + +static u32 ixgbevf_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_IP_CSUM) != 0; +} + +static int ixgbevf_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); +#else + netdev->features |= NETIF_F_IP_CSUM; + else + netdev->features &= ~NETIF_F_IP_CSUM; +#endif + + return 0; +} +#endif + +#ifndef HAVE_NDO_SET_FEATURES +#ifdef NETIF_F_TSO +static int ixgbevf_set_tso(struct net_device *netdev, u32 data) +{ +#ifndef HAVE_NETDEV_VLAN_FEATURES + struct ixgbevf_adapter *adapter = netdev_priv(netdev); +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + if (data) { + netdev->features |= NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features |= NETIF_F_TSO6; +#endif + } else { + netif_tx_stop_all_queues(netdev); + netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features &= ~NETIF_F_TSO6; +#endif +#ifndef HAVE_NETDEV_VLAN_FEATURES +#ifdef NETIF_F_HW_VLAN_TX + /* disable TSO on all VLANs if they're present */ + if (adapter->vlgrp) { + int i; + struct net_device *v_netdev; + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = + vlan_group_get_device(adapter->vlgrp, i); + if (v_netdev) { + v_netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + v_netdev->features &= ~NETIF_F_TSO6; +#endif + vlan_group_set_device(adapter->vlgrp, i, + v_netdev); + } + } + } +#endif +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + netif_tx_start_all_queues(netdev); + } + return 0; +} +#endif /* NETIF_F_TSO */ +#endif + +static u32 ixgbevf_get_msglevel(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ixgbevf_get_regs_len(struct net_device __always_unused *netdev) +{ +#define IXGBE_REGS_LEN 45 + return IXGBE_REGS_LEN * sizeof(u32); +} + +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ + +static void ixgbevf_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); + + /* generate a number suitable for ethtool's register version */ + regs->version = (1u << 24) | hw->revision_id << 16 | hw->device_id; + + /* IXGBE_VFCTRL is a Write Only register, so just return 0 */ + regs_buff[0] = 0x0; + + /* General Registers */ + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR */ + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + + /* Receive DMA */ + for (i = 0; i < 2; i++) + regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); + for (i = 0; i < 2; i++) + regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); + for (i = 0; i < 2; i++) + regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); + + /* Receive */ + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); + + /* Transmit */ + for (i = 0; i < 2; i++) + regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); + for (i = 0; i < 2; i++) + regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); + for (i = 0; i < 2; i++) + regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); + +} + +static int ixgbevf_get_eeprom(struct net_device __always_unused *netdev, + struct ethtool_eeprom __always_unused *eeprom, + u8 __always_unused *bytes) +{ + return -EOPNOTSUPP; +} + +static int ixgbevf_set_eeprom(struct net_device __always_unused *netdev, + struct ethtool_eeprom __always_unused *eeprom, + u8 __always_unused *bytes) +{ + return -EOPNOTSUPP; +} + +static void ixgbevf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgbevf_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +#if defined(HAVE_ETHTOOL_GET_SSET_COUNT) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + + drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN; +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT && HAVE_SWIOTLB_SKIP_CPU_SYNC */ +} + +static void ixgbevf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IXGBEVF_MAX_RXD; + ring->tx_max_pending = IXGBEVF_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int ixgbevf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; + u32 new_rx_count, new_tx_count; + int i, j, err = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) + return 0; + + while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) + msleep(1); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_xdp_queues; i++) + adapter->xdp_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (new_tx_count != adapter->tx_ring_count) { + tx_ring = vmalloc((adapter->num_tx_queues + + adapter->num_xdp_queues) * sizeof(*tx_ring)); + if (!tx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + /* clone ring and setup updated count */ + tx_ring[i] = *adapter->tx_ring[i]; + tx_ring[i].count = new_tx_count; + err = ixgbevf_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_tx_resources(&tx_ring[i]); + } + + vfree(tx_ring); + tx_ring = NULL; + + goto clear_reset; + } + } + + for (j = 0; j < adapter->num_xdp_queues; i++, j++) { + /* clone ring and setup updated count */ + tx_ring[i] = *adapter->xdp_ring[j]; + tx_ring[i].count = new_tx_count; + err = ixgbevf_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_tx_resources(&tx_ring[i]); + } + + vfree(tx_ring); + tx_ring = NULL; + + goto clear_reset; + } + } + } + + if (new_rx_count != adapter->rx_ring_count) { + rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); + if (!rx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + /* clone ring and setup updated count */ + rx_ring[i] = *adapter->rx_ring[i]; +#ifdef HAVE_XDP_BUFF_RXQ + + /* Clear copied XDP RX-queue info */ + memset(&rx_ring[i].xdp_rxq, 0, + sizeof(rx_ring[i].xdp_rxq)); +#endif /* HAVE_XDP_BUFF_RXQ */ + + rx_ring[i].count = new_rx_count; + err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_rx_resources(&rx_ring[i]); + } + + vfree(rx_ring); + rx_ring = NULL; + + goto clear_reset; + } + } + } + + /* bring interface down to prepare for update */ + ixgbevf_down(adapter); + + /* Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbevf_free_tx_resources(adapter->tx_ring[i]); + *adapter->tx_ring[i] = tx_ring[i]; + } + adapter->tx_ring_count = new_tx_count; + + for (j = 0; j < adapter->num_xdp_queues; i++, j++) { + ixgbevf_free_tx_resources(adapter->xdp_ring[j]); + *adapter->xdp_ring[j] = tx_ring[i]; + } + adapter->xdp_ring_count = new_tx_count; + + vfree(tx_ring); + tx_ring = NULL; + } + + /* Rx */ + if (rx_ring) { + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbevf_free_rx_resources(adapter->rx_ring[i]); + *adapter->rx_ring[i] = rx_ring[i]; + } + adapter->rx_ring_count = new_rx_count; + + vfree(rx_ring); + rx_ring = NULL; + } + + /* restore interface using new values */ + ixgbevf_up(adapter); + +clear_reset: + /* free Tx resources if Rx error is encountered */ + if (tx_ring) { + for (i = 0; + i < adapter->num_tx_queues + adapter->num_xdp_queues; i++) + ixgbevf_free_tx_resources(&tx_ring[i]); + vfree(tx_ring); + } + + clear_bit(__IXGBEVF_RESETTING, &adapter->state); + return err; +} + +static void ixgbevf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_NDO_GET_STATS64 + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + unsigned int start; +#else + struct net_device_stats *net_stats; +#ifdef HAVE_NET_DEVICE_OPS + const struct net_device_ops *ops = netdev->netdev_ops; +#endif /* HAVE_NET_DEVICE_OPS */ +#endif /* HAVE_NDO_GET_STATS64 */ + struct ixgbevf_ring *ring; + int i, j; + char *p; + + ixgbevf_update_stats(adapter); +#ifdef HAVE_NDO_GET_STATS64 + net_stats = dev_get_stats(netdev, &temp); +#else +#ifdef HAVE_NET_DEVICE_OPS + net_stats = ops->ndo_get_stats(netdev); +#else + net_stats = netdev->get_stats(netdev); +#endif /* HAVE_NET_DEVICE_OPS */ +#endif /* HAVE_NDO_GET_STATS64 */ + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + switch (ixgbe_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *)net_stats + + ixgbe_gstrings_stats[i].stat_offset; + break; + case IXGBEVF_STATS: + p = (char *)adapter + + ixgbe_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + /* populate Tx queue data */ + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + + /* populate XDP queue data */ + for (j = 0; j < adapter->num_xdp_queues; j++) { + ring = adapter->xdp_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + i += 2; + } + + /* populate Rx queue data */ + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } +} + +static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ixgbe_gstrings_test, + IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, + "xdp_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "xdp_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + break; +#if defined(HAVE_ETHTOOL_GET_SSET_COUNT) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + case ETH_SS_PRIV_FLAGS: + memcpy(data, ixgbevf_priv_flags_strings, + IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT && HAVE_SWIOTLB_SKIP_CPU_SYNC */ + } +} + +static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ixgbevf_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default VF register test */ +static struct ixgbevf_reg_test reg_test_vf[] = { + { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, + { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { 0, 0, 0, 0, 0 } +}; + +static int +reg_pattern_test(struct ixgbevf_adapter *adapter, u32 r, u32 m, u32 w, + u64 *data) +{ + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + struct ixgbe_hw *hw = &adapter->hw; + u32 pat, val, before; + + if (IXGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + before = IXGBE_READ_REG(hw, r); + IXGBE_WRITE_REG(hw, r, _test[pat] & w); + val = IXGBE_READ_REG(hw, r); + if (val != (_test[pat] & w & m)) { + DPRINTK(DRV, ERR, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + r, val, _test[pat] & w & m); + *data = r; + IXGBE_WRITE_REG(hw, r, before); + return 1; + } + IXGBE_WRITE_REG(hw, r, before); + } + return 0; +} + +static int +reg_set_and_check(struct ixgbevf_adapter *adapter, u32 r, u32 m, u32 w, + u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 val, before; + + if (IXGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + before = IXGBE_READ_REG(hw, r); + IXGBE_WRITE_REG(hw, r, w & m); + val = IXGBE_READ_REG(hw, r); + if ((w & m) != (val & m)) { + DPRINTK(DRV, ERR, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + r, (val & m), (w & m)); + *data = r; + IXGBE_WRITE_REG(hw, r, before); + return 1; + } + IXGBE_WRITE_REG(hw, r, before); + return 0; +} + +static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbevf_reg_test *test; + int rc; + u32 i; + + if (IXGBE_REMOVED(hw->hw_addr)) { + DPRINTK(DRV, ERR, "Adapter removed - register test blocked\n"); + *data = 1; + return 1; + } + test = reg_test_vf; + + /* + * Perform the register test, looping through the test table + * until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + rc = 0; + switch (test->test_type) { + case PATTERN_TEST: + rc = reg_pattern_test(adapter, + test->reg + (i * 0x40), + test->mask, + test->write, + data); + break; + case SET_READ_TEST: + rc = reg_set_and_check(adapter, + test->reg + (i * 0x40), + test->mask, + test->write, + data); + break; + case WRITE_NO_TEST: + IXGBE_WRITE_REG(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + rc = reg_pattern_test(adapter, + test->reg + (i * 4), + test->mask, + test->write, + data); + break; + case TABLE64_TEST_LO: + rc = reg_pattern_test(adapter, + test->reg + (i * 8), + test->mask, + test->write, + data); + break; + case TABLE64_TEST_HI: + rc = reg_pattern_test(adapter, + test->reg + 4 + (i * 8), + test->mask, + test->write, + data); + break; + } + if (rc) + return rc; + } + test++; + } + + *data = 0; + return *data; +} + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) +{ + switch(stringset) { + case ETH_SS_TEST: + return IXGBEVF_TEST_LEN; + case ETH_SS_STATS: + return IXGBEVF_STATS_LEN; +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + case ETH_SS_PRIV_FLAGS: + return IXGBEVF_PRIV_FLAGS_STR_LEN; +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ + default: + return -EINVAL; + } +} +#else +static int ixgbevf_diag_test_count(struct net_device __always_unused *netdev) +{ + return IXGBEVF_TEST_LEN; +} + +static int ixgbevf_get_stats_count(struct net_device *netdev) +{ + return IXGBEVF_STATS_LEN; +} +#endif + +static void ixgbevf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + DPRINTK(DRV, ERR, "Adapter removed - test blocked\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[0] = 1; + data[1] = 1; + return; + } + set_bit(__IXGBEVF_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + DPRINTK(HW, INFO, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + ixgbevf_close(netdev); + else + ixgbevf_reset(adapter); + + DPRINTK(HW, INFO, "register testing starting\n"); + if (ixgbevf_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbevf_reset(adapter); + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + if (if_running) + ixgbevf_open(netdev); + } else { + DPRINTK(HW, INFO, "online testing starting\n"); + /* Online tests */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static int ixgbevf_nway_reset(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); + + return 0; +} + +static int ixgbevf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ixgbevf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + + /* don't accept tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count + && ec->tx_coalesce_usecs) + return -EINVAL; + + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbevf_write_eitr(q_vector); + } + + return 0; +} + +#ifdef ETHTOOL_GRXRINGS +#define UDP_RSS_FLAGS (IXGBEVF_FLAG_RSS_FIELD_IPV4_UDP | \ + IXGBEVF_FLAG_RSS_FIELD_IPV6_UDP) +static int ixgbevf_set_rss_hash_opt(struct ixgbevf_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 flags = adapter->flags; + + + if (hw->mac.type < ixgbe_mac_X550) + return -EOPNOTSUPP; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IXGBEVF_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IXGBEVF_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IXGBEVF_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IXGBEVF_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + u32 vfmrqc; + + vfmrqc = IXGBE_READ_REG(hw, IXGBE_VFMRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + DPRINTK(DRV, WARNING, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + vfmrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + vfmrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | + IXGBE_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IXGBEVF_FLAG_RSS_FIELD_IPV4_UDP) + vfmrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IXGBEVF_FLAG_RSS_FIELD_IPV6_UDP) + vfmrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc); + } + + return 0; +} + +static int ixgbevf_get_rss_hash_opts(struct ixgbevf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on ixgbevf */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + if (adapter->flags & IXGBEVF_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + if (adapter->flags & IXGBEVF_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ixgbevf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ixgbevf_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (cmd->cmd == ETHTOOL_SRXFH) + ret = ixgbevf_set_rss_hash_opt(adapter, cmd); + + return ret; +} + +static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + __always_unused void *rule_locs) +#else + __always_unused u32 *rule_locs) +#endif +{ + struct ixgbevf_adapter *adapter = netdev_priv(dev); + int ret; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_rx_queues; + break; + case ETHTOOL_GRXFH: + ret = ixgbevf_get_rss_hash_opts(adapter, info); + if (ret) + return ret; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} +#endif /* ETHTOOL_GRXRINGS */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +/** + * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. + * @hw: pointer to the HW structure + * @reta: buffer to fill with RETA contents. + * @num_rx_queues: Number of Rx queues configured for this port + * + * The "reta" buffer should be big enough to contain 32 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +static int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, + int num_rx_queues) +{ + int err, i, j; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u32 *hw_reta = &msgbuf[1]; + u32 mask = 0; + + /* We have to use a mailbox for 82599 and x540 devices only. + * For these devices RETA has 128 entries. + * Also these VFs support up to 4 RSS queues. Therefore PF will compress + * 16 RETA entries in each DWORD giving 2 bits to each entry. + */ + int dwords = IXGBEVF_82599_RETA_SIZE / 16; + + /* We support the RSS querying for 82599 and x540 devices only. + * Thus return an error if API doesn't support RETA querying or querying + * is not supported for this device type. + */ + switch (hw->api_version) { + case ixgbe_mbox_api_15: + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type < ixgbe_mac_X550_vf) + break; + /* fall through */ + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_GET_RETA; + + err = ixgbe_write_mbx(hw, msgbuf, 1, 0); + + if (err) + return err; + + err = ixgbe_poll_mbx(hw, msgbuf, dwords + 1, 0); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS)) + return IXGBE_ERR_MBX; + + /* ixgbevf doesn't support more than 2 queues at the moment */ + if (num_rx_queues > 1) + mask = 0x1; + + for (i = 0; i < dwords; i++) + for (j = 0; j < 16; j++) + reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; + + return 0; +} + +/** + * ixgbevf_get_rss_key_locked - get the RSS Random Key + * @hw: pointer to the HW structure + * @rss_key: buffer to fill with RSS Hash Key contents. + * + * The "rss_key" buffer should be big enough to contain 10 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +static int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) +{ + int err; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + + /* We currently support the RSS Random Key retrieval for 82599 and x540 + * devices only. + * + * Thus return an error if API doesn't support RSS Random Key retrieval + * or if the operation is not supported for this device type. + */ + switch (hw->api_version) { + case ixgbe_mbox_api_15: + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type < ixgbe_mac_X550_vf) + break; + /* fall through */ + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_GET_RSS_KEY; + err = ixgbe_write_mbx(hw, msgbuf, 1, 0); + + if (err) + return err; + + err = ixgbe_poll_mbx(hw, msgbuf, 11, 0); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS)) + return IXGBE_ERR_MBX; + + memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); + + return 0; +} + +static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) + return IXGBEVF_X550_VFRETA_SIZE; + + return IXGBEVF_82599_RETA_SIZE; +} + +static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) +{ + return IXGBEVF_RSS_HASH_KEY_SIZE; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +#else +static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int err = 0; + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + +#endif + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { + if (key) + memcpy(key, adapter->rss_key, + ixgbevf_get_rxfh_key_size(netdev)); + + if (indir) { + int i; + + for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + } + } else { + /* If neither indirection table nor hash key was requested + * - just return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; + + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + } + + return err; +} + +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#if defined(HAVE_ETHTOOL_GET_SSET_COUNT) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) +static u32 ixgbevf_get_priv_flags(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) + priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IXGBEVF_FLAGS_LEGACY_RX; + if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX) + flags |= IXGBEVF_FLAGS_LEGACY_RX; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); + } + + return 0; +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT && HAVE_SWIOTLB_SKIP_CPU_SYNC */ +static struct ethtool_ops ixgbevf_ethtool_ops = { +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + .get_link_ksettings = ixgbevf_get_link_ksettings, + .set_link_ksettings = ixgbevf_set_link_ksettings, +#else + .get_settings = ixgbevf_get_settings, + .set_settings = ixgbevf_set_settings, +#endif + .get_drvinfo = ixgbevf_get_drvinfo, + .get_regs_len = ixgbevf_get_regs_len, + .get_regs = ixgbevf_get_regs, + .nway_reset = ixgbevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom = ixgbevf_get_eeprom, + .set_eeprom = ixgbevf_set_eeprom, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = ethtool_op_get_ts_info, +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, + .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, + .get_rxfh = ixgbevf_get_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + .get_ringparam = ixgbevf_get_ringparam, + .set_ringparam = ixgbevf_set_ringparam, + .get_msglevel = ixgbevf_get_msglevel, + .set_msglevel = ixgbevf_set_msglevel, +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = ixgbevf_get_rx_csum, + .set_rx_csum = ixgbevf_set_rx_csum, + .get_tx_csum = ixgbevf_get_tx_csum, + .set_tx_csum = ixgbevf_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ixgbevf_set_tso, +#endif +#endif + .self_test = ixgbevf_diag_test, +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + .get_sset_count = ixgbevf_get_sset_count, +#else + .self_test_count = ixgbevf_diag_test_count, + .get_stats_count = ixgbevf_get_stats_count, +#endif + .get_strings = ixgbevf_get_strings, + .get_ethtool_stats = ixgbevf_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = ixgbevf_get_rxnfc, + .set_rxnfc = ixgbevf_set_rxnfc, +#endif +#if defined(HAVE_ETHTOOL_GET_SSET_COUNT) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + .get_priv_flags = ixgbevf_get_priv_flags, + .set_priv_flags = ixgbevf_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT && HAVE_SWIOTLB_SKIP_CPU_SYNC */ +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext ixgbevf_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = ethtool_op_get_ts_info, +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, + .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, + .get_rxfh = ixgbevf_get_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +}; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + +void ixgbevf_set_ethtool_ops(struct net_device *netdev) +{ +#ifndef ETHTOOL_OPS_COMPAT + netdev->ethtool_ops = &ixgbevf_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &ixgbevf_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +} +#endif /* SIOCETHTOOL */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 076f2da36f27..ac1e9a2fe61b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,14 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include -#include #include #include #include @@ -17,42 +14,78 @@ #include #include #include -#include #include -#include +#include +#ifdef NETIF_F_TSO #include +#ifdef NETIF_F_TSO6 +#include #include +#endif +#endif +#ifdef SIOCETHTOOL #include -#include +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) #include -#include -#include -#include -#include -#include -#include +#endif #include "ixgbevf.h" -const char ixgbevf_driver_name[] = "ixgbevf"; -static const char ixgbevf_driver_string[] = - "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; +#ifdef HAVE_XDP_SUPPORT +#include +#include +#include -#define DRV_VERSION "4.1.0-k" +#endif /* HAVE_XDP_SUPPORT */ +#define RELEASE_TAG + +#define DRV_VERSION __stringify(4.9.3) RELEASE_TAG +#define DRV_SUMMARY __stringify(Intel(R) 10GbE PCI Express Virtual Function Driver) const char ixgbevf_driver_version[] = DRV_VERSION; -static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2018 Intel Corporation."; +char ixgbevf_driver_name[] = "ixgbevf"; +static const char ixgbevf_driver_string[] = DRV_SUMMARY; +static const char ixgbevf_copyright[] = "Copyright(c) 1999 - 2020 Intel Corporation."; + +static struct ixgbevf_info ixgbevf_82599_vf_info = { + .mac = ixgbe_mac_82599_vf, + .flags = 0, +}; + +static struct ixgbevf_info ixgbevf_X540_vf_info = { + .mac = ixgbe_mac_X540_vf, + .flags = 0, +}; + +static struct ixgbevf_info ixgbevf_X550_vf_info = { + .mac = ixgbe_mac_X550_vf, + .flags = 0, +}; + +static struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .flags = 0, +}; + +static struct ixgbevf_info ixgbevf_X550EM_a_vf_info = { + .mac = ixgbe_mac_X550EM_a_vf, + .flags = 0, +}; + +enum ixgbevf_boards { + board_82599_vf, + board_X540_vf, + board_X550_vf, + board_X550EM_x_vf, + board_X550EM_a_vf, +}; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, - [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info, [board_X540_vf] = &ixgbevf_X540_vf_info, - [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info, [board_X550_vf] = &ixgbevf_X550_vf_info, - [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, - [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, - [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, + [board_X550EM_a_vf] = &ixgbevf_X550EM_a_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -63,39 +96,45 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static const struct pci_device_id ixgbevf_pci_tbl[] = { +static struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv }, +#if IS_ENABLED(CONFIG_PCI_HYPERV) + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf }, +#endif {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv }, +#if IS_ENABLED(CONFIG_PCI_HYPERV) + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf }, +#endif {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, +#if IS_ENABLED(CONFIG_PCI_HYPERV) + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf }, +#endif {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, +#if IS_ENABLED(CONFIG_PCI_HYPERV) + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf }, +#endif + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_X550EM_a_vf }, +#if IS_ENABLED(CONFIG_PCI_HYPERV) + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF_HV), board_X550EM_a_vf }, +#endif /* required last entry */ - {0, } + { .device = 0 } }; MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -static struct workqueue_struct *ixgbevf_wq; +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) { if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) - queue_work(ixgbevf_wq, &adapter->service_task); + schedule_work(&adapter->service_task); } static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) @@ -119,82 +158,123 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; - if (!hw->hw_addr) + if (IXGBE_REMOVED(hw->hw_addr)) return; hw->hw_addr = NULL; - dev_err(&adapter->pdev->dev, "Adapter removed\n"); + DPRINTK(DRV, ERR, "Adapter removed\n"); if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) ixgbevf_service_event_schedule(adapter); } static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) { - u32 value; + u32 value; - /* The following check not only optimizes a bit by not - * performing a read on the status register when the - * register just read was a status register read that - * returned IXGBE_FAILED_READ_REG. It also blocks any - * potential recursion. - */ - if (reg == IXGBE_VFSTATUS) { - ixgbevf_remove_adapter(hw); - return; - } - value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); - if (value == IXGBE_FAILED_READ_REG) - ixgbevf_remove_adapter(hw); + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned IXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == IXGBE_VFSTATUS) { + ixgbevf_remove_adapter(hw); + return; + } + value = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbevf_remove_adapter(hw); } -u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) +static u32 ixgbevf_validate_register_read(struct ixgbe_hw *hw, u32 reg) { - u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); + int i; u32 value; + u8 __iomem *reg_addr; + reg_addr = READ_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return IXGBE_FAILED_READ_REG; + for (i = 0; i < IXGBE_DEAD_READ_RETRIES; ++i) { + value = readl(reg_addr + reg); + if (value != IXGBE_DEAD_READ_REG) + break; + } + if (value == IXGBE_DEAD_READ_REG) + pr_info("%s: register %x read unchanged\n", __func__, reg); + else + pr_info("%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); if (IXGBE_REMOVED(reg_addr)) return IXGBE_FAILED_READ_REG; value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) ixgbevf_check_remove(hw, reg); + if (unlikely(value == IXGBE_DEAD_READ_REG)) + value = ixgbevf_validate_register_read(hw, reg); return value; } -/** - * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors - * @adapter: pointer to adapter struct - * @direction: 0 for Rx, 1 for Tx, -1 for other causes - * @queue: queue to map the corresponding interrupt to - * @msix_vector: the vector to map to the corresponding queue - **/ -static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, - u8 queue, u8 msix_vector) -{ - u32 ivar, index; - struct ixgbe_hw *hw = &adapter->hw; - - if (direction == -1) { - /* other causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); - ivar &= ~0xFF; - ivar |= msix_vector; - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); - } else { - /* Tx or Rx causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - index = ((16 * (queue & 1)) + (8 * direction)); - ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); - } -} - static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) { return ring->stats.packets; } +#if IS_ENABLED(CONFIG_PCI_HYPERV) +/** + * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: Unused in this implementation + * @enable_addr: Unused in this implementation + * + * We don't really allow setting the device MAC address. However, + * if the address being set is the permanent MAC address we will + * permit that. + **/ +s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq, u32 enable_addr) +{ + if (ether_addr_equal(addr, hw->mac.perm_addr)) + return 0; + + return -EOPNOTSUPP; +} + +/** + * Hyper-V variant; the VF/PF communication is through the PCI + * config space. + * @hw: pointer to hardware structure + */ +s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = hw->back; +#if IS_ENABLED(CONFIG_PCI_MMCONFIG) + int i; + + for (i = 0; i < 6; i++) + pci_read_config_byte(adapter->pdev, + (i + IXGBE_HV_RESET_OFFSET), + &hw->mac.perm_addr[i]); + return 0; +#else + + dev_err(&adapter->pdev->dev, + "PCI_MMCONFIG needs to be enabled for Hyper-V\n"); + return -EOPNOTSUPP; +#endif +} + +#endif static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) { struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); @@ -218,7 +298,8 @@ static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) clear_check_for_tx_hang(tx_ring); - /* Check for a hung queue, but be thorough. This verifies + /* + * Check for a hung queue, but be thorough. This verifies * that a transmit has been completed since the previous * check AND there is at least one packet pending. The * ARMED bit is set to indicate a potential hang. @@ -249,14 +330,20 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) /** * ixgbevf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure + * @txqueue: specific tx queue **/ +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else static void ixgbevf_tx_timeout(struct net_device *netdev) +#endif { struct ixgbevf_adapter *adapter = netdev_priv(netdev); ixgbevf_tx_timeout_reset(adapter); } + /** * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure @@ -269,7 +356,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; + unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = tx_ring->count / 2; unsigned int i = tx_ring->next_to_clean; @@ -300,14 +387,16 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; - if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) - total_ipsec++; /* free the skb */ +#ifdef HAVE_XDP_SUPPORT if (ring_is_xdp(tx_ring)) page_frag_free(tx_buffer->data); else napi_consume_skb(tx_buffer->skb, napi_budget); +#else + napi_consume_skb(tx_buffer->skb, napi_budget); +#endif /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -364,7 +453,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; - adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { struct ixgbe_hw *hw = &adapter->hw; @@ -378,7 +466,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, " next_to_use <%x>\n" " next_to_clean <%x>\n" "tx_buffer_info[next_to_clean]\n" - " next_to_watch <%p>\n" + " next_to_watch <%p>\n" " eop_desc->wb.status <%x>\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", @@ -403,6 +491,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, if (ring_is_xdp(tx_ring)) return !!budget; + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { @@ -410,7 +501,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, * sees the new next_to_clean. */ smp_mb(); - +#ifdef HAVE_TX_MQ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IXGBEVF_DOWN, &adapter->state)) { @@ -418,11 +509,68 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; } +#else + if (netif_queue_stopped(tx_ring->netdev) && + !test_bit(__IXGBEVF_DOWN, &adapter->state)) { + netif_wake_queue(tx_ring->netdev); + ++tx_ring->tx_stats.restart_queue; + } +#endif } return !!budget; } +#ifdef HAVE_VLAN_RX_REGISTER +static void ixgbevf_rx_vlan(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && +#else + if ((dev->features & NETIF_F_HW_VLAN_RX) && +#endif + ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) + IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); + else + IXGBE_CB(skb)->vid = 0; +} + +/** + * ixgbevf_receive_skb - Send a completed packet up the stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, + struct sk_buff *skb) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + u16 vlan_tag = IXGBE_CB(skb)->vid; +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + struct vlan_group **vlgrp = &adapter->vlgrp; + + if (vlan_tag & VLAN_VID_MASK && *vlgrp != NULL) { + + if (q_vector->netpoll_rx) + vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); + else + vlan_gro_receive(&q_vector->napi, + *vlgrp, vlan_tag, skb); + } else { +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_CLAN_CTAG_TX */ + if (q_vector->netpoll_rx) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + } +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_CLAN_CTAG_TX */ +} +#endif /* HAVE_VLAN_RX_REGISTER */ + /** * ixgbevf_rx_skb - Helper function to determine proper Rx method * @q_vector: structure containing interrupt and ring information @@ -431,9 +579,32 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb) { - napi_gro_receive(&q_vector->napi, skb); +#ifdef HAVE_NDO_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ixgbevf_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif /* HAVE_NDO_BUSY_POLL */ +#ifdef HAVE_VLAN_RX_REGISTER + ixgbevf_receive_skb(q_vector, skb); +#else +#ifndef HAVE_NDO_BUSY_POLL + if (q_vector->netpoll_rx) + netif_rx(skb); + else +#endif + napi_gro_receive(&q_vector->napi, skb); + +#endif +#ifndef NETIF_F_GRO + q_vector->adapter->netdev->last_rx = jiffies; +#endif } +#ifdef NETIF_F_RXHASH #define IXGBE_RSS_L4_TYPES_MASK \ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ @@ -459,13 +630,14 @@ static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } +#endif /* NETIF_F_RXHASH */ /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - **/ + */ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -487,6 +659,15 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, return; if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + /* 82599 errata, UDP frames with a 0 checksum can be marked as + * checksum errors. + */ + if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && + test_bit(__IXGBEVF_RX_CSUM_UDP_ZERO_ERR, &ring->state)) + return; + ring->rx_stats.csum_err++; return; } @@ -504,14 +685,18 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, * This function checks the ring, descriptor, and packet information in * order to populate the checksum, VLAN, protocol, and other fields within * the skb. - **/ + */ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { +#ifdef NETIF_F_RXHASH ixgbevf_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ ixgbevf_rx_checksum(rx_ring, rx_desc, skb); - +#ifdef HAVE_VLAN_RX_REGISTER + ixgbevf_rx_vlan(rx_ring, rx_desc, skb); +#else if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); unsigned long *active_vlans = netdev_priv(rx_ring->netdev); @@ -519,9 +704,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, if (test_bit(vid & VLAN_VID_MASK, active_vlans)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - - if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) - ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); +#endif skb->protocol = eth_type_trans(skb, rx_ring->netdev); } @@ -551,6 +734,13 @@ static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct sk_buff *skb) { +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif if (ixgbevf_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ ixgbevf_reuse_rx_page(rx_ring, rx_buffer); @@ -562,7 +752,11 @@ static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else IXGBEVF_RX_DMA_ATTR); +#endif __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } @@ -608,6 +802,12 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, { struct page *page = bi->page; dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) @@ -623,7 +823,12 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ixgbevf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + IXGBEVF_RX_DMA_ATTR); +#endif /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@ -709,7 +914,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, * such as IA-64). */ wmb(); - ixgbevf_write_tail(rx_ring, i); + writel(i, rx_ring->tail); } } @@ -730,7 +935,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. - **/ + */ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -743,7 +948,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, if (unlikely(ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { struct net_device *netdev = rx_ring->netdev; - if (!(netdev->features & NETIF_F_RXALL)) { dev_kfree_skb_any(skb); return true; @@ -757,13 +961,12 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, return false; } -/** - * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring +/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter - **/ + */ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *old_buff) { @@ -799,8 +1002,13 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +#else + if (unlikely((page_count(page) - pagecnt_bias) > 1)) +#endif return false; + #else #define IXGBEVF_LAST_OFFSET \ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) @@ -809,7 +1017,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) return false; #endif - +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE /* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. @@ -819,6 +1027,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) rx_buffer->pagecnt_bias = USHRT_MAX; } +#else /* !HAVE_PAGE_COUNT_BULK_UPDATE */ + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } + +#endif /* !HAVE_PAGE_COUNT_BULK_UPDATE */ return true; } @@ -830,7 +1048,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) * @size: size of buffer to be added * * This function will add the data contained in rx_buffer->page to the skb. - **/ + */ static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct sk_buff *skb, @@ -843,6 +1061,7 @@ static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : SKB_DATA_ALIGN(size); #endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); #if (PAGE_SIZE < 8192) @@ -850,6 +1069,7 @@ static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, #else rx_buffer->page_offset += truesize; #endif + } static @@ -930,12 +1150,18 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct xdp_buff *xdp, union ixgbe_adv_rx_desc *rx_desc) { +#ifdef HAVE_XDP_BUFF_DATA_META unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; +#else + void *va = xdp->data; +#endif /* HAVE_XDP_BUFF_DATA_META */ #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; #else @@ -950,9 +1176,9 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, * have a consumer accessing first few bytes of meta data, * and then actual data. */ - prefetch(xdp->data_meta); + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data_meta + L1_CACHE_BYTES); + prefetch(va + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ @@ -963,8 +1189,10 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, /* update pointers within the skb to store the data */ skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); +#ifdef HAVE_XDP_BUFF_DATA_META if (metasize) skb_metadata_set(skb, metasize); +#endif /* HAVE_XDP_BUFF_DATA_META */ /* update buffer offset */ #if (PAGE_SIZE < 8192) @@ -976,10 +1204,15 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, return skb; } +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + #define IXGBEVF_XDP_PASS 0 #define IXGBEVF_XDP_CONSUMED 1 #define IXGBEVF_XDP_TX 2 +#ifdef HAVE_XDP_SUPPORT static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, struct xdp_buff *xdp) { @@ -1020,7 +1253,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc->vlan_macip_lens = cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); - context_desc->fceof_saidx = 0; + context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); @@ -1057,11 +1290,14 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, return IXGBEVF_XDP_TX; } -static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring, - struct xdp_buff *xdp) +#endif /* HAVE_XDP_SUPPORT */ +static struct sk_buff * +ixgbevf_run_xdp(struct ixgbevf_adapter __maybe_unused *adapter, + struct ixgbevf_ring __maybe_unused *rx_ring, + struct xdp_buff __maybe_unused *xdp) { int result = IXGBEVF_XDP_PASS; +#ifdef HAVE_XDP_SUPPORT struct ixgbevf_ring *xdp_ring; struct bpf_prog *xdp_prog; u32 act; @@ -1092,6 +1328,8 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, } xdp_out: rcu_read_unlock(); +#endif /* HAVE_XDP_SUPPORT */ + return ERR_PTR(-result); } @@ -1113,8 +1351,8 @@ static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, } static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *rx_ring, - int budget) + struct ixgbevf_ring *rx_ring, + int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct ixgbevf_adapter *adapter = q_vector->adapter; @@ -1123,7 +1361,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, bool xdp_xmit = false; struct xdp_buff xdp; + xdp.data = NULL; + xdp.data_end = NULL; +#ifdef HAVE_XDP_BUFF_RXQ xdp.rxq = &rx_ring->xdp_rxq; +#endif /* HAVE_XDP_BUFF_RXQ */ while (likely(total_rx_packets < budget)) { struct ixgbevf_rx_buffer *rx_buffer; @@ -1147,13 +1389,15 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, */ rmb(); + /* retrieve a buffer from the ring */ rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); - /* retrieve a buffer from the ring */ if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META xdp.data_meta = xdp.data; +#endif /* HAVE_XDP_BUFF_DATA_META */ xdp.data_hard_start = xdp.data - ixgbevf_rx_offset(rx_ring); xdp.data_end = xdp.data + size; @@ -1173,9 +1417,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, total_rx_bytes += size; } else if (skb) { ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC } else if (ring_uses_build_skb(rx_ring)) { skb = ixgbevf_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ } else { skb = ixgbevf_construct_skb(rx_ring, rx_buffer, &xdp, rx_desc); @@ -1208,7 +1454,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * source pruning. */ if ((skb->pkt_type == PACKET_BROADCAST || - skb->pkt_type == PACKET_MULTICAST) && + skb->pkt_type == PACKET_MULTICAST) && ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); @@ -1238,7 +1484,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * know there are new descriptors to fetch. */ wmb(); - ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); + writel(xdp_ring->next_to_use, xdp_ring->tail); } u64_stats_update_begin(&rx_ring->syncp); @@ -1247,6 +1493,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; + q_vector->netpoll_rx = false; return total_rx_packets; } @@ -1275,10 +1522,16 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (budget <= 0) return budget; +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; + + if (!ixgbevf_qv_lock_napi(q_vector)) + return budget; +#endif /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling - */ + * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else @@ -1292,29 +1545,34 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) clean_complete = false; } +#ifdef HAVE_NDO_BUSY_POLL + ixgbevf_qv_unlock_napi(q_vector); + +#endif +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; + +#endif /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; + /* all work done, exit the polling mode */ + napi_complete_done(napi, work_done); + if (adapter->rx_itr_setting == 1) + ixgbevf_set_itr(q_vector); + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) + ixgbevf_irq_enable_queues(adapter, + BIT(q_vector->v_idx)); - /* Exit the polling mode, but don't re-enable interrupts if stack might - * poll us due to busy-polling - */ - if (likely(napi_complete_done(napi, work_done))) { - if (adapter->rx_itr_setting == 1) - ixgbevf_set_itr(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && - !test_bit(__IXGBEVF_REMOVING, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, - BIT(q_vector->v_idx)); - } - - return min(work_done, budget - 1); + return 0; } /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information - **/ + */ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; @@ -1322,7 +1580,8 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; - /* set the WDIS bit to not clear the timer bits and cause an + /* + * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; @@ -1330,6 +1589,71 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } +#ifdef HAVE_NDO_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbevf_busy_poll_recv(struct napi_struct *napi) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int found = 0; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbevf_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbevf_for_each_ring(ring, q_vector->rx) { + found = ixgbevf_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ixgbevf_qv_unlock_poll(q_vector); + + return found; +} +#endif /* HAVE_NDO_BUSY_POLL */ + +/** + * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + ivar &= ~0xFF; + ivar |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); + } else { + /* tx or rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); + } +} + /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -1340,17 +1664,16 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) { struct ixgbevf_q_vector *q_vector; - int q_vectors, v_idx; + int v_idx; - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->eims_enable_mask = 0; - /* Populate the IVAR table and set the ITR values to the + /* + * Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (v_idx = 0; v_idx < q_vectors; v_idx++) { + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { struct ixgbevf_ring *ring; - q_vector = adapter->q_vector[v_idx]; ixgbevf_for_each_ring(ring, q_vector->rx) @@ -1360,13 +1683,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { - /* Tx only vector */ + /* tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { - /* Rx or Rx/Tx vector */ + /* rx or rx/tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else @@ -1397,13 +1720,15 @@ enum latency_range { * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see ixgbevf_param.c) **/ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container) @@ -1417,32 +1742,33 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, if (packets == 0) return; - /* simple throttle rate management + /* simple throttlerate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (12000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; - if (timepassed_us == 0) - return; - bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { case lowest_latency: - if (bytes_perint > 10) + if (bytes_perint > 10) { itr_setting = low_latency; + } break; case low_latency: - if (bytes_perint > 20) + if (bytes_perint > 20) { itr_setting = bulk_latency; - else if (bytes_perint <= 10) + } + else if (bytes_perint <= 10) { itr_setting = lowest_latency; + } break; case bulk_latency: - if (bytes_perint <= 20) + if (bytes_perint <= 20) { itr_setting = low_latency; + } break; } @@ -1491,7 +1817,7 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) } } -static irqreturn_t ixgbevf_msix_other(int irq, void *data) +static irqreturn_t ixgbevf_msix_other(int __always_unused irq, void *data) { struct ixgbevf_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; @@ -1510,7 +1836,7 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data) * @irq: unused * @data: pointer to our q_vector struct for this interrupt vector **/ -static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) +static irqreturn_t ixgbevf_msix_clean_rings(int __always_unused irq, void *data) { struct ixgbevf_q_vector *q_vector = data; @@ -1531,11 +1857,10 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; unsigned int ri = 0, ti = 0; int vector, err; - for (vector = 0; vector < q_vectors; vector++) { + for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; @@ -1556,9 +1881,9 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { - hw_dbg(&adapter->hw, - "request_irq failed for MSIX interrupt Error: %d\n", - err); + DPRINTK(PROBE, ERR, + "request_irq failed for MSIX interrupt " + "Error: %d\n", err); goto free_queue_irqs; } } @@ -1566,11 +1891,13 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { - hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", - err); + DPRINTK(PROBE, ERR, + "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } + adapter->irqs_ready = true; + return 0; free_queue_irqs: @@ -1581,7 +1908,7 @@ free_queue_irqs: } /* This failure is non-recoverable - it indicates the system is * out of MSIX vector resources and the VF driver cannot run - * without them. Set the number of msix vectors to zero + * without them. Set the number of q vectors to zero * indicating that not enough can be allocated. The error * will be returned to the user indicating device open failed. * Any further attempts to force the driver to open will also @@ -1589,7 +1916,7 @@ free_queue_irqs: * reload it again. If the system has recovered some MSIX * vectors then it may succeed. */ - adapter->num_msix_vectors = 0; + adapter->num_q_vectors = 0; return err; } @@ -1602,36 +1929,40 @@ free_queue_irqs: **/ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) { - int err = ixgbevf_request_msix_irqs(adapter); + int err = 0; + + err = ixgbevf_request_msix_irqs(adapter); if (err) - hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); + DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); return err; } static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) { - int i, q_vectors; + int vector; if (!adapter->msix_entries) return; - q_vectors = adapter->num_msix_vectors; - i = q_vectors - 1; + if (!adapter->irqs_ready) + return; - free_irq(adapter->msix_entries[i].vector, adapter); - i--; + adapter->irqs_ready = false; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; - for (; i >= 0; i--) { /* free only the irqs that were actually requested */ - if (!adapter->q_vector[i]->rx.ring && - !adapter->q_vector[i]->tx.ring) + if (!q_vector->rx.ring && !q_vector->tx.ring) continue; - free_irq(adapter->msix_entries[i].vector, - adapter->q_vector[i]); + free_irq(entry->vector, q_vector); } + + free_irq(adapter->msix_entries[vector].vector, adapter); } /** @@ -1641,7 +1972,7 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int i; + int vector; IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); @@ -1649,8 +1980,10 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) IXGBE_WRITE_FLUSH(hw); - for (i = 0; i < adapter->num_msix_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector].vector); } /** @@ -1695,6 +2028,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); + /* enable relaxed ordering */ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), (IXGBE_DCA_TXCTRL_DESC_RRO_EN | @@ -1709,15 +2043,21 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, ring->next_to_clean = 0; ring->next_to_use = 0; - /* In order to avoid issues WTHRESH + PTHRESH should always be equal + /* set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when ITR is 0 as it could cause false TX hangs + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal * to or less than the number of on chip descriptors, which is * currently 40. */ - txdctl |= (8 << 16); /* WTHRESH = 8 */ + if (!ring->q_vector || (ring->q_vector->itr < 8)) + txdctl |= (1 << 16); /* WTHRESH = 1 */ + else + txdctl |= (8 << 16); /* WTHRESH = 8 */ /* Setting PTHRESH to 32 both improves performance */ - txdctl |= (1u << 8) | /* HTHRESH = 1 */ - 32; /* PTHRESH = 32 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, @@ -1730,11 +2070,11 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, /* poll to verify queue is enabled */ do { - usleep_range(1000, 2000); + msleep(1); txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) - hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); + DPRINTK(HW, DEBUG, "Could not enable Tx Queue %d\n", reg_idx); } /** @@ -1754,8 +2094,6 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); } -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 - static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring, int index) { @@ -1806,15 +2144,16 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); - /* the hardware may take up to 100us to really disable the Rx queue */ + /* the hardware may take up to 100us to really disable the rx queue */ do { udelay(10); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) - pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", - reg_idx); + DPRINTK(PROBE, ERR, + "RXDCTL.ENABLE queue %d not cleared while polling\n", + reg_idx); } static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, @@ -1833,8 +2172,9 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) - pr_err("RXDCTL.ENABLE queue %d not set while polling\n", - reg_idx); + DPRINTK(PROBE, ERR, + "RXDCTL.ENABLE queue %d not set while polling\n", + reg_idx); } /** @@ -1842,7 +2182,7 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, * @adapter: device handle * * Allocates and initializes the RSS key if it is not allocated. - **/ + */ static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter) { u32 *rss_key; @@ -1868,7 +2208,7 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) /* Fill out hash function seeds */ for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) - IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) @@ -1884,12 +2224,12 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) } /* Perform hash on these packet types */ - vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 | - IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP | - IXGBE_VFMRQC_RSS_FIELD_IPV6 | - IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP; + vfmrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; - vfmrqc |= IXGBE_VFMRQC_RSSEN; + vfmrqc |= IXGBE_MRQC_RSSEN; IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc); } @@ -1963,6 +2303,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { @@ -1986,6 +2327,7 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, } } +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure @@ -2003,12 +2345,10 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_setup_vfmrqc(adapter); spin_lock_bh(&adapter->mbx_lock); - /* notify the PF of our intent to use this size of frame */ ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); spin_unlock_bh(&adapter->mbx_lock); if (ret) - dev_err(&adapter->pdev->dev, - "Failed to set MTU at %d\n", netdev->mtu); + DPRINTK(HW, DEBUG, "Failed to set MTU at %d\n", netdev->mtu); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -2016,86 +2356,207 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ixgbevf_set_rx_buffer_len(adapter, rx_ring); +#endif ixgbevf_configure_rx_ring(adapter, rx_ring); } } -static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) +#ifdef HAVE_VLAN_RX_REGISTER +static void ixgbevf_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int err; + int i, j; + u32 ctrl; + + adapter->vlgrp = grp; + + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl); + } +} +#endif /* HAVE_VLAN_RX_REGISTER */ + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX +static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ +static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else +static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; +#ifndef HAVE_NETDEV_VLAN_FEATURES + struct net_device *v_netdev; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + int ret_val; spin_lock_bh(&adapter->mbx_lock); /* add VID to filter table */ - err = hw->mac.ops.set_vfta(hw, vid, 0, true); + ret_val = hw->mac.ops.set_vfta(hw, vid, 0, true, false); spin_unlock_bh(&adapter->mbx_lock); - /* translate error return types so error makes sense */ - if (err == IXGBE_ERR_MBX) - return -EIO; + if (ret_val) { + netdev_err(netdev, "VF could not set VLAN %d\n", vid); + } else { +#ifndef HAVE_NETDEV_VLAN_FEATURES - if (err == IXGBE_ERR_INVALID_ARGUMENT) - return -EACCES; + /* + * Copy feature flags from netdev to the vlan netdev for this + * vid. This allows things like TSO to bubble down to our + * vlan device. + */ + if (adapter->vlgrp) { + v_netdev = vlan_group_get_device(adapter->vlgrp, vid); + if (v_netdev) { + v_netdev->features |= adapter->netdev->features; + vlan_group_set_device(adapter->vlgrp, + vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifndef HAVE_VLAN_RX_REGISTER + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); +#endif + } - set_bit(vid, adapter->active_vlans); - - return err; +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return ret_val; +#endif } +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_RX */ +static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#else +static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int err; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + ixgbevf_irq_disable(adapter); + + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + ixgbevf_irq_enable(adapter); +#endif spin_lock_bh(&adapter->mbx_lock); /* remove VID from filter table */ - err = hw->mac.ops.set_vfta(hw, vid, 0, false); + hw->mac.ops.set_vfta(hw, vid, 0, false, false); spin_unlock_bh(&adapter->mbx_lock); +#ifndef HAVE_VLAN_RX_REGISTER clear_bit(vid, adapter->active_vlans); - - return err; +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif } static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) { u16 vid; +#ifdef HAVE_VLAN_RX_REGISTER + ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp); + + if (adapter->vlgrp) { + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbevf_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), + vid); +#else + ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ + } + } +#else /* !HAVE_VLAN_RX_REGISTER */ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - ixgbevf_vlan_rx_add_vid(adapter->netdev, - htons(ETH_P_8021Q), vid); +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbevf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), + vid); +#else + ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); +#endif +#endif /* HAVE_VLAN_RX_REGISTER */ +} +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX*/ + +static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw __maybe_unused *hw, u8 **mc_addr_ptr, + u32 *vmdq) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *mc_ptr; +#else + struct dev_mc_list *mc_ptr; +#endif + u8 *addr = *mc_addr_ptr; + *vmdq = 0; + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } +#else + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; +#endif + else + *mc_addr_ptr = NULL; + + return addr; } +#ifdef NETDEV_HW_ADDR_T_UNICAST static int ixgbevf_write_uc_addr_list(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int count = 0; - if ((netdev_uc_count(netdev)) > 10) { - pr_err("Too many unicast filters - No Space\n"); - return -ENOSPC; - } - if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; - netdev_for_each_uc_addr(ha, netdev) { hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); udelay(200); } } else { - /* If the list is empty then send message to PF driver to - * clear all MAC VLANs on this VF. + /* + * If the list is empty then send message to PF driver to + * clear all macvlans on this VF. */ hw->mac.ops.set_uc_addr(hw, 0, NULL); } @@ -2103,6 +2564,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) return count; } +#endif /** * ixgbevf_set_rx_mode - Multicast and unicast set * @netdev: network interface device structure @@ -2118,6 +2580,8 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) struct ixgbe_hw *hw = &adapter->hw; unsigned int flags = netdev->flags; int xcast_mode; + u8 *addr_list = NULL; + int addr_count = 0; /* request the most inclusive mode we need */ if (flags & IFF_PROMISC) @@ -2129,14 +2593,30 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) else xcast_mode = IXGBEVF_XCAST_MODE_NONE; + /* reprogram multicast list */ + addr_count = netdev_mc_count(netdev); + if (addr_count) +#ifdef NETDEV_HW_ADDR_T_MULTICAST + { + struct netdev_hw_addr *ha; + + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; + } +#else + addr_list = netdev->mc_list->dmi_addr; +#endif spin_lock_bh(&adapter->mbx_lock); hw->mac.ops.update_xcast_mode(hw, xcast_mode); - /* reprogram multicast list */ - hw->mac.ops.update_mc_addr_list(hw, netdev); + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + ixgbevf_addr_list_itr, false); +#ifdef NETDEV_HW_ADDR_T_UNICAST ixgbevf_write_uc_addr_list(netdev); +#endif spin_unlock_bh(&adapter->mbx_lock); } @@ -2144,24 +2624,27 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) { int q_idx; - struct ixgbevf_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; - napi_enable(&q_vector->napi); + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { +#ifdef HAVE_NDO_BUSY_POLL + ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&adapter->q_vector[q_idx]->napi); } } static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) { int q_idx; - struct ixgbevf_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; - napi_disable(&q_vector->napi); + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + napi_disable(&adapter->q_vector[q_idx]->napi); +#ifdef HAVE_NDO_BUSY_POLL + while(!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif /* HAVE_NDO_BUSY_POLL */ } } @@ -2214,9 +2697,10 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) ixgbevf_set_rx_mode(adapter->netdev); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ixgbevf_restore_vlan(adapter); - ixgbevf_ipsec_restore(adapter); +#endif ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); } @@ -2262,15 +2746,13 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - static const int api[] = { - ixgbe_mbox_api_14, - ixgbe_mbox_api_13, - ixgbe_mbox_api_12, - ixgbe_mbox_api_11, - ixgbe_mbox_api_10, - ixgbe_mbox_api_unknown - }; - int err, idx = 0; + int api[] = { ixgbe_mbox_api_15, + ixgbe_mbox_api_13, + ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown }; + int err = 0, idx = 0; spin_lock_bh(&adapter->mbx_lock); @@ -2281,6 +2763,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) idx++; } + if (hw->api_version >= ixgbe_mbox_api_15) + ixgbe_upgrade_mbx_params_vf(hw); + spin_unlock_bh(&adapter->mbx_lock); } @@ -2289,14 +2774,21 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (adapter->num_tx_queues > 1) + netdev->features |= NETIF_F_MULTI_QUEUE; + +#endif ixgbevf_configure_msix(adapter); spin_lock_bh(&adapter->mbx_lock); if (is_valid_ether_addr(hw->mac.addr)) - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, + 0, IXGBE_RAH_AV); else - hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); + hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, + 0, IXGBE_RAH_AV); spin_unlock_bh(&adapter->mbx_lock); @@ -2332,6 +2824,12 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) { u16 i = rx_ring->next_to_clean; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif /* Free Rx ring sk_buff */ if (rx_ring->skb) { @@ -2359,7 +2857,11 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) rx_buffer->dma, ixgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else IXGBEVF_RX_DMA_ATTR); +#endif __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); @@ -2387,10 +2889,14 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) union ixgbe_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs */ +#ifdef HAVE_XDP_SUPPORT if (ring_is_xdp(tx_ring)) page_frag_free(tx_buffer->data); else dev_kfree_skb_any(tx_buffer->skb); +#else + dev_kfree_skb_any(tx_buffer->skb); +#endif /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -2430,10 +2936,13 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) } } + /* reset BQL for queue */ + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - } /** @@ -2472,7 +2981,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) return; /* do nothing if already down */ - /* disable all enabled Rx queues */ + /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); @@ -2493,7 +3002,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { u8 reg_idx = adapter->tx_ring[i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } @@ -2505,7 +3013,9 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) IXGBE_TXDCTL_SWFLSH); } +#ifdef HAVE_PCI_ERS if (!pci_channel_offline(adapter->pdev)) +#endif ixgbevf_reset(adapter); ixgbevf_clean_all_tx_rings(adapter); @@ -2531,8 +3041,10 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + if (IXGBE_REMOVED(hw->hw_addr)) + return; if (hw->mac.ops.reset_hw(hw)) { - hw_dbg(hw, "PF still resetting\n"); + DPRINTK(PROBE, ERR, "PF still resetting\n"); } else { hw->mac.ops.init_hw(hw); ixgbevf_negotiate_api(adapter); @@ -2566,18 +3078,16 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, vector_threshold, vectors); if (vectors < 0) { - dev_err(&adapter->pdev->dev, - "Unable to allocate MSI-X interrupts\n"); + DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); kfree(adapter->msix_entries); adapter->msix_entries = NULL; return vectors; } - /* Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + /* Adjust for only the vectors we'll use, which is the number of * vectors we were allocated. */ - adapter->num_msix_vectors = vectors; + adapter->num_q_vectors = vectors - NON_Q_VECTORS; return 0; } @@ -2625,13 +3135,15 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: - case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; adapter->num_rx_queues = rss; +#ifdef HAVE_TX_MQ adapter->num_tx_queues = rss; +#endif adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; default: break; @@ -2645,7 +3157,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. - **/ + */ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { int vector, v_budget; @@ -2662,8 +3174,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) + if (!adapter->msix_entries) { return -ENOMEM; + } for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; @@ -2793,6 +3306,12 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, /* update q_vector Rx values */ ixgbevf_add_ring(ring, &q_vector->rx); + /* 82599 errata, UDP frames with a 0 checksum + * can be marked as checksum errors. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599_vf) + set_bit(__IXGBEVF_RX_CSUM_UDP_ZERO_ERR, &ring->state); + /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; @@ -2808,6 +3327,12 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, /* push pointer to next ring */ ring++; } +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + + napi_hash_add(&q_vector->napi); +#endif +#endif return 0; } @@ -2837,9 +3362,12 @@ static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx) adapter->rx_ring[ring->queue_index] = NULL; adapter->q_vector[v_idx] = NULL; +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); - /* ixgbevf_get_stats() might access the rings on this vector, + /* ixgbe_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it. */ kfree_rcu(q_vector, rcu); @@ -2854,7 +3382,7 @@ static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx) **/ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) { - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int q_vectors = adapter->num_q_vectors; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; int xdp_remaining = adapter->num_xdp_queues; @@ -2864,7 +3392,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { for (; rxr_remaining; v_idx++, q_vectors--) { int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); - err = ixgbevf_alloc_q_vector(adapter, v_idx, 0, 0, 0, 0, rqpv, rxr_idx); if (err) @@ -2901,10 +3428,13 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) return 0; err_out: - while (v_idx) { - v_idx--; + adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) ixgbevf_free_q_vector(adapter, v_idx); - } return -ENOMEM; } @@ -2919,12 +3449,14 @@ err_out: **/ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) { - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int v_idx = adapter->num_q_vectors; - while (q_vectors) { - q_vectors--; - ixgbevf_free_q_vector(adapter, q_vectors); - } + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ixgbevf_free_q_vector(adapter, v_idx); } /** @@ -2949,6 +3481,7 @@ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) **/ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; int err; /* Number of supported queues */ @@ -2956,21 +3489,26 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) err = ixgbevf_set_interrupt_capability(adapter); if (err) { - hw_dbg(&adapter->hw, - "Unable to setup interrupt capabilities\n"); + dev_err(&pdev->dev, "Unable to setup interrupt capabilities\n"); goto err_set_interrupt; } err = ixgbevf_alloc_q_vectors(adapter); if (err) { - hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); + dev_err(&pdev->dev, "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } - hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", - adapter->num_rx_queues, adapter->num_tx_queues, - adapter->num_xdp_queues); +#ifdef HAVE_XDP_SUPPORT + dev_info(&pdev->dev, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues, + adapter->num_xdp_queues); +#else + dev_info(&pdev->dev, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); +#endif /* HAVE_XDP_SUPPORT */ set_bit(__IXGBEVF_DOWN, &adapter->state); @@ -3000,13 +3538,14 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) /** * ixgbevf_sw_init - Initialize general software structures + * (struct ixgbevf_adapter) * @adapter: board private structure to initialize * * ixgbevf_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ -static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) +static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; @@ -3014,18 +3553,35 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) int err; /* PCI config space info */ + hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; - hw->revision_id = pdev->revision; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; - hw->mbx.ops.init_params(hw); +#if IS_ENABLED(CONFIG_PCI_HYPERV) + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_VF_HV: + case IXGBE_DEV_ID_X540_VF_HV: + case IXGBE_DEV_ID_X550_VF_HV: + case IXGBE_DEV_ID_X550EM_X_VF_HV: + case IXGBE_DEV_ID_X550EM_A_VF_HV: + ixgbevf_hv_init_ops_vf(hw); + break; + default: + ixgbe_init_ops_vf(hw); + break; + } +#else + ixgbe_init_ops_vf(hw); +#endif + ixgbe_init_mbx_params_vf(hw); if (hw->mac.type >= ixgbe_mac_X550_vf) { err = ixgbevf_init_rss_key(adapter); if (err) - goto out; + return err; } /* assume legacy case in which PF would only give VF 2 queues */ @@ -3037,13 +3593,14 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) err = hw->mac.ops.reset_hw(hw); if (err) { - dev_info(&pdev->dev, + dev_info(pci_dev_to_dev(pdev), "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.init_hw(hw); if (err) { - pr_err("init_shared_code failed: %d\n", err); - goto out; + dev_err(pci_dev_to_dev(pdev), + "init_shared_code failed: %d\n", err); + return err; } ixgbevf_negotiate_api(adapter); err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); @@ -3070,11 +3627,14 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; - set_bit(__IXGBEVF_DOWN, &adapter->state); - return 0; + /* enable rx csum by default */ + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; -out: - return err; + adapter->irqs_ready = false; + + set_bit(__IXGBEVF_DOWN, &adapter->state); + + return 0; } #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ @@ -3091,8 +3651,8 @@ out: { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ - u64 current_counter = (current_counter_msb << 32) | \ - current_counter_lsb; \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ @@ -3106,6 +3666,7 @@ out: void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + u64 restart_queue = 0, tx_busy = 0; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; u64 alloc_rx_page = 0, hw_csum_rx_error = 0; int i; @@ -3140,6 +3701,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; adapter->alloc_rx_page = alloc_rx_page; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbevf_ring *tx_ring = adapter->rx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; } /** @@ -3163,6 +3733,7 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) return; rtnl_lock(); + /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_REMOVING, &adapter->state) || @@ -3185,7 +3756,7 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred. - **/ + */ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -3206,7 +3777,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) } /* get one bit for every active Tx/Rx interrupt vector */ - for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) @@ -3252,19 +3823,36 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; + const char *speed_str; /* only continue if link was previously down */ if (netif_carrier_ok(netdev)) return; - dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", - (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - "10 Gbps" : - (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? - "1 Gbps" : - (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? - "100 Mbps" : - "unknown speed"); + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed_str = "10 Gbps"; + break; + case IXGBE_LINK_SPEED_5GB_FULL: + speed_str = "5 Gbps"; + break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + speed_str = "2.5 Gbps"; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed_str = "1 Gbps"; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed_str = "100 Mbps"; + break; + case IXGBE_LINK_SPEED_10_FULL: + speed_str = "10 Mbps"; + break; + default: + speed_str = "unknown speed"; + break; + } + e_info(drv, "NIC Link is Up %s\n", speed_str); netif_carrier_on(netdev); } @@ -3284,17 +3872,18 @@ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) if (!netif_carrier_ok(netdev)) return; - dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); + e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); } /** * ixgbevf_watchdog_subtask - worker thread to bring link up - * @adapter: board private structure + * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) { + /* if interface is down do nothing */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -3327,6 +3916,7 @@ static void ixgbevf_service_task(struct work_struct *work) ixgbevf_down(adapter); rtnl_unlock(); } + ixgbevf_service_event_complete(adapter); return; } @@ -3355,8 +3945,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) if (!tx_ring->desc) return; - dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, - tx_ring->dma); + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } @@ -3381,13 +3971,12 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: Tx descriptor ring (for a specific queue) to setup + * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) { - struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; @@ -3411,7 +4000,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); + dev_err(tx_ring->dev, "Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } @@ -3433,7 +4022,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); + DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } @@ -3485,11 +4074,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, if (!rx_ring->desc) goto err; +#ifdef HAVE_XDP_BUFF_RXQ /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, rx_ring->queue_index) < 0) goto err; +#endif /* HAVE_XDP_BUFF_RXQ */ rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -3518,7 +4109,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); + DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } @@ -3541,12 +4132,14 @@ void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) ixgbevf_clean_rx_ring(rx_ring); rx_ring->xdp_prog = NULL; +#ifdef HAVE_XDP_BUFF_RXQ xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +#endif /* HAVE_XDP_BUFF_RXQ */ vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } @@ -3586,21 +4179,21 @@ int ixgbevf_open(struct net_device *netdev) /* A previous failure to open the device because of a lack of * available MSIX vector resources may have reset the number - * of msix vectors variable to zero. The only way to recover + * of q vectors variable to zero. The only way to recover * is to unload/reload the driver and hope that the system has * been able to recover some MSIX vector resources. */ - if (!adapter->num_msix_vectors) + if (!adapter->num_q_vectors) return -ENOMEM; if (hw->adapter_stopped) { ixgbevf_reset(adapter); /* if adapter is still stopped then PF isn't up and - * the VF can't start. - */ + * the vf can't start. */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; - pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); + DPRINTK(DRV, ERR, "Unable to start - perhaps the PF" + "Driver isn't up yet\n"); goto err_setup_reset; } } @@ -3662,10 +4255,10 @@ err_setup_reset: */ static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter) { - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); } /** @@ -3702,18 +4295,22 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; - /* Hardware has to reinitialize queues and interrupts to + /* + * Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ rtnl_lock(); + /* disable running interface */ if (netif_running(dev)) ixgbevf_close(dev); + /* free and reallocate queues */ ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_init_interrupt_scheme(adapter); + /* reenable running interface */ if (netif_running(dev)) ixgbevf_open(dev); @@ -3721,8 +4318,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, - u32 vlan_macip_lens, u32 fceof_saidx, - u32 type_tucmd, u32 mss_l4len_idx) + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -3736,16 +4333,18 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); + context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, - u8 *hdr_len, - struct ixgbevf_ipsec_tx_data *itd) + u8 *hdr_len) { +#ifndef NETIF_F_TSO + return 0; +#else u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; union { @@ -3758,7 +4357,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, unsigned char *hdr; } l4; u32 paylen, l4_offset; - u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3768,13 +4366,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, return 0; err = skb_cow_head(skb, 0); - if (err < 0) + if (err) return err; - if (eth_p_mpls(first->protocol)) - ip.hdr = skb_inner_network_header(skb); - else - ip.hdr = skb_network_header(skb); + ip.hdr = skb_network_header(skb); l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ @@ -3784,17 +4379,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); - int len = csum_start - trans_start; + /* IP header will have to cancel out any data that - * is not a part of the outer IP header, so set to - * a reverse csum if needed, else init check to 0. + * is not a part of the outer IP header */ - ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? - csum_fold(csum_partial(trans_start, - len, 0)) : 0; + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | @@ -3809,7 +4402,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, l4_offset = l4.hdr - skb->data; /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; + *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; @@ -3824,18 +4417,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); - fceof_saidx |= itd->pfsa; - type_tucmd |= itd->flags | itd->trailer_len; - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, - mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + type_tucmd, mss_l4len_idx); return 1; +#endif } static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) @@ -3848,12 +4439,10 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) } static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first, - struct ixgbevf_ipsec_tx_data *itd) + struct ixgbevf_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3866,7 +4455,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): - /* validate that this is actually an SCTP request */ + /* validate this is actual an SCTP request */ if (((first->protocol == htons(ETH_P_IP)) && (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || ((first->protocol == htons(ETH_P_IPV6)) && @@ -3887,16 +4476,13 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - skb_network_offset(skb); + no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - fceof_saidx |= itd->pfsa; - type_tucmd |= itd->flags | itd->trailer_len; - - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - fceof_saidx, type_tucmd, 0); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -3906,7 +4492,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); - /* set HW VLAN bit if VLAN is present */ + /* set HW vlan bit if vlan is present */ if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); @@ -3930,15 +4516,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - /* enable IPsec */ - if (tx_flags & IXGBE_TX_FLAGS_IPSEC) - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); - - /* use index 1 context for TSO/FSO/FCOE/IPSEC */ - if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) + /* use index 1 context for TSO/FSO/FCOE */ + if (tx_flags & IXGBE_TX_FLAGS_TSO) olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); - /* Check Context must be set if Tx switch is enabled, which it + /* + * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); @@ -3946,6 +4529,35 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, tx_desc->read.olinfo_status = olinfo_status; } +static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ixgbevf_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + + return 0; +} + +static inline int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) +{ + if (likely(ixgbevf_desc_unused(tx_ring) >= size)) + return 0; + return __ixgbevf_maybe_stop_tx(tx_ring, size); +} + static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, const u8 hdr_len) @@ -4025,16 +4637,20 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); tx_desc->read.cmd_type_len = cmd_type; + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + /* set the timestamp */ first->time_stamp = jiffies; - skb_tx_timestamp(skb); - - /* Force memory writes to complete before letting h/w know there +#ifndef HAVE_TRANS_START_IN_QUEUE + tx_ring->netdev->trans_start = first->time_stamp; +#endif + /* + * Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). * - * We also need this memory barrier (wmb) to make certain all of the + * We also need this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written. */ wmb(); @@ -4048,8 +4664,27 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_ring->next_to_use = i; - /* notify HW of packet */ - ixgbevf_write_tail(tx_ring, i); + ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* software timestamp */ + skb_tx_timestamp(skb); + + if (!netdev_xmit_more() || netif_xmit_stopped(txring_txq(tx_ring))) { + writel(i, tx_ring->tail); +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ + } return; dma_error: @@ -4083,35 +4718,6 @@ dma_error: tx_ring->next_to_use = i; } -static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. - */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. - */ - if (likely(ixgbevf_desc_unused(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - - return 0; -} - -static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) -{ - if (likely(ixgbevf_desc_unused(tx_ring) >= size)) - return 0; - return __ixgbevf_maybe_stop_tx(tx_ring, size); -} - static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, struct ixgbevf_ring *tx_ring) { @@ -4119,33 +4725,29 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); - struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; -#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; -#endif + __be16 protocol = skb->protocol; u8 hdr_len = 0; - u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); - - if (!dst_mac || is_link_local_ether_addr(dst_mac)) { - dev_kfree_skb_any(skb); + /* + * if this is an LLDP ether frame then drop it - VFs do not + * forward LLDP frames. + */ + if (ntohs(skb->protocol) == 0x88CC) { + dev_kfree_skb(skb); return NETDEV_TX_OK; } - /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time */ -#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); - count += TXD_USE_COUNT(skb_frag_size(frag)); - } -#else - count += skb_shinfo(skb)->nr_frags; -#endif if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; @@ -4157,30 +4759,36 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == __constant_htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + IXGBE_TX_FLAGS_VLAN_SHIFT; } +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ /* record initial flags and protocol */ first->tx_flags = tx_flags; - first->protocol = vlan_get_protocol(skb); + first->protocol = protocol; -#ifdef CONFIG_IXGBEVF_IPSEC - if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) - goto out_drop; -#endif - tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); + tso = ixgbevf_tso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; else if (!tso) - ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); + ixgbevf_tx_csum(tx_ring, first); ixgbevf_tx_map(tx_ring, first, hdr_len); - ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: @@ -4190,7 +4798,8 @@ out_drop: return NETDEV_TX_OK; } -static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring; @@ -4203,13 +4812,14 @@ static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *ne /* The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (skb->len < 17) { - if (skb_padto(skb, 17)) - return NETDEV_TX_OK; - skb->len = 17; - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; +#ifdef HAVE_TX_MQ tx_ring = adapter->tx_ring[skb->queue_mapping]; +#else + tx_ring = adapter->tx_ring[0]; +#endif return ixgbevf_xmit_frame_ring(skb, tx_ring); } @@ -4232,7 +4842,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) spin_lock_bh(&adapter->mbx_lock); - err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); + err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0, IXGBE_RAH_AV); spin_unlock_bh(&adapter->mbx_lock); @@ -4260,6 +4870,12 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int ret; +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + +#endif /* prevent MTU being changed to a size unsupported by XDP */ if (adapter->xdp_prog) { dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n"); @@ -4267,16 +4883,17 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) } spin_lock_bh(&adapter->mbx_lock); + /* notify the PF of our intent to use this size of frame */ ret = hw->mac.ops.set_rlpml(hw, max_frame); spin_unlock_bh(&adapter->mbx_lock); if (ret) return -EINVAL; - hw_dbg(hw, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); - /* must set new MTU before calling down or up */ + /* set new MTU */ netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -4285,7 +4902,44 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) +#ifdef ETHTOOL_OPS_COMPAT +static int ixgbevf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCETHTOOL: + return ethtool_ioctl(ifr); + default: + return -EOPNOTSUPP; + } +} +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + * @netdev: network interface device structure + **/ +static void ixgbevf_netpoll(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return; + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + +#ifndef USE_REBOOT_NOTIFIER +static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t __maybe_unused state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -4322,7 +4976,8 @@ static int ixgbevf_resume(struct pci_dev *pdev) u32 err; pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call + /* + * pci_restore_state clears dev->state_saved so call * pci_save_state to restore it. */ pci_save_state(pdev); @@ -4352,13 +5007,15 @@ static int ixgbevf_resume(struct pci_dev *pdev) return err; } - #endif /* CONFIG_PM */ + static void ixgbevf_shutdown(struct pci_dev *pdev) { ixgbevf_suspend(pdev, PMSG_SUSPEND); } +#endif /* USE_REBOOT_NOTIFIER */ +#ifdef HAVE_NDO_GET_STATS64 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, const struct ixgbevf_ring *ring) { @@ -4376,8 +5033,13 @@ static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, } } -static void ixgbevf_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void ixgbevf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *ixgbevf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int start; @@ -4385,20 +5047,21 @@ static void ixgbevf_get_stats(struct net_device *netdev, const struct ixgbevf_ring *ring; int i; - ixgbevf_update_stats(adapter); - stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { ring = adapter->rx_ring[i]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - bytes = ring->stats.bytes; - packets = ring->stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->rx_bytes += bytes; - stats->rx_packets += packets; + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->rx_bytes += bytes; + stats->rx_packets += packets; + } } for (i = 0; i < adapter->num_tx_queues; i++) { @@ -4411,7 +5074,57 @@ static void ixgbevf_get_stats(struct net_device *netdev, ixgbevf_get_tx_ring_stats(stats, ring); } rcu_read_unlock(); +#ifndef HAVE_VOID_NDO_GET_STATS64 + + return stats; +#endif } +#else /* HAVE_NDO_GET_STATS64 */ +/** + * ixgbevf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif + const struct ixgbevf_ring *ring; + unsigned long bytes = 0, packets = 0; + int i; + + net_stats->multicast = adapter->stats.vfmprc - + adapter->stats.base_vfmprc; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + bytes += ring->stats.bytes; + packets += ring->stats.packets; + } + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + bytes = 0; + packets = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + bytes += ring->stats.bytes; + packets += ring->stats.packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + /* only return the current stats */ + return net_stats; +} +#endif /* HAVE_NDO_GET_STATS64 */ +#ifdef NETIF_F_GSO_PARTIAL #define IXGBEVF_MAX_MAC_HDR_LEN 127 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511 @@ -4446,7 +5159,9 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +#endif /* NETIF_F_GSO_PARTIAL */ +#ifdef HAVE_XDP_SUPPORT static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) { int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; @@ -4488,41 +5203,130 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return 0; } +#ifdef HAVE_NDO_BPF static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) +#else +static int ixgbevf_xdp(struct net_device *dev, struct netdev_xdp *xdp) +#endif /* HAVE_NDO_BPF */ { +#ifdef HAVE_XDP_QUERY_PROG struct ixgbevf_adapter *adapter = netdev_priv(dev); +#endif switch (xdp->command) { case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); +#ifdef HAVE_XDP_QUERY_PROG case XDP_QUERY_PROG: +#ifndef NO_NETDEV_BPF_PROG_ATTACHED + xdp->prog_attached = !!(adapter->xdp_prog); +#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */ xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; +#endif default: return -EINVAL; } } +#endif /* HAVE_XDP_SUPPORT */ +#ifdef HAVE_NET_DEVICE_OPS static const struct net_device_ops ixgbevf_netdev_ops = { +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, .ndo_start_xmit = ixgbevf_xmit_frame, .ndo_set_rx_mode = ixgbevf_set_rx_mode, - .ndo_get_stats64 = ixgbevf_get_stats, +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = ixgbevf_get_stats64, +#else + .ndo_get_stats = ixgbevf_get_stats, +#endif .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbevf_set_mac, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = ixgbevf_change_mtu, +#else .ndo_change_mtu = ixgbevf_change_mtu, +#endif +#ifdef ETHTOOL_OPS_COMPAT + .ndo_do_ioctl = ixgbevf_ioctl, +#endif .ndo_tx_timeout = ixgbevf_tx_timeout, +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = ixgbevf_vlan_rx_register, +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, +#endif +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = ixgbevf_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbevf_netpoll, +#endif +#if defined(NETIF_F_GSO_PARTIAL) .ndo_features_check = ixgbevf_features_check, +#elif defined(HAVE_PASSTHRU_FEATURES_CHECK) + .ndo_features_check = passthru_features_check, +#endif +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF .ndo_bpf = ixgbevf_xdp, +#else + .ndo_xdp = ixgbevf_xdp, +#endif /* HAVE_NDO_BPF */ +#endif /* HAVE_XDP_SUPPORT */ }; +#endif /* HAVE_NET_DEVICE_OPS */ static void ixgbevf_assign_netdev_ops(struct net_device *dev) { +#ifdef HAVE_NET_DEVICE_OPS dev->netdev_ops = &ixgbevf_netdev_ops; +#else /* HAVE_NET_DEVICE_OPS */ + dev->open = ixgbevf_open; + dev->stop = ixgbevf_close; + + dev->hard_start_xmit = ixgbevf_xmit_frame; + + dev->get_stats = ixgbevf_get_stats; + dev->set_multicast_list = ixgbevf_set_rx_mode; + dev->set_mac_address = ixgbevf_set_mac; + dev->change_mtu = ixgbevf_change_mtu; +#ifdef ETHTOOL_OPS_COMPAT + dev->do_ioctl = ixgbevf_ioctl; +#endif +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = ixgbevf_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + dev->vlan_rx_register = ixgbevf_vlan_rx_register; + dev->vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = ixgbevf_netpoll; +#endif +#endif /* HAVE_NET_DEVICE_OPS */ + +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(dev)->ndo_busy_poll = ixgbevf_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; } @@ -4538,25 +5342,38 @@ static void ixgbevf_assign_netdev_ops(struct net_device *dev) * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit ixgbevf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbevf_adapter *adapter = NULL; +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + unsigned int min_mtu, max_mtu; +#endif struct ixgbe_hw *hw = NULL; - const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; int err, pci_using_dac; + const struct ixgbevf_info *ei = ixgbevf_info_tbl[ent->driver_data]; bool disable_dev = false; +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else + u32 hw_features; +#endif +#endif /* HAVE_NDO_SET_FEATURES */ err = pci_enable_device(pdev); if (err) return err; - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(pci_dev_to_dev(pdev), + DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + dev_err(pci_dev_to_dev(pdev), "No usable DMA configuration, aborting\n"); goto err_dma; } pci_using_dac = 0; @@ -4564,14 +5381,20 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_request_regions(pdev, ixgbevf_driver_name); if (err) { - dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); + dev_err(pci_dev_to_dev(pdev), + "pci_request_regions failed 0x%x\n", err); goto err_pci_reg; } + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), - MAX_TX_QUEUES); +#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), MAX_TX_QUEUES); +#else + netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter)); +#endif if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -4585,13 +5408,19 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); - /* call save state here in standalone driver because it relies on + hw->mac.type = ei->mac; + adapter->flags = ei->flags; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on * adapter struct to exist, and needs to call netdev_priv */ pci_save_state(pdev); +#endif hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); adapter->io_addr = hw->hw_addr; @@ -4602,89 +5431,141 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbevf_assign_netdev_ops(netdev); - /* Setup HW API */ - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); - hw->mac.type = ii->mac; - - memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, - sizeof(struct ixgbe_mbx_operations)); - /* setup the private structure */ err = ixgbevf_sw_init(adapter); if (err) goto err_sw_init; - /* The HW MAC address was set and/or determined in sw_init */ - if (!is_valid_ether_addr(netdev->dev_addr)) { - pr_err("invalid MAC address\n"); - err = -EIO; - goto err_sw_init; - } +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + netdev->features = NETIF_F_SG | +#ifdef NETIF_F_TSO + NETIF_F_TSO | +#ifdef NETIF_F_TSO6 + NETIF_F_TSO6 | +#endif /* NETIF_F_TSO6 */ +#endif /* NETIF_F_TSO */ + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; - netdev->hw_features = NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM | - NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; +#else /* !defined(NETIF_F_HW_VLAN_TX) || !defined(NETIF_F_HW_VLAN_CTAG_TX) */ + netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM; -#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ - NETIF_F_GSO_GRE_CSUM | \ - NETIF_F_GSO_IPXIP4 | \ - NETIF_F_GSO_IPXIP6 | \ - NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM) +#endif/* defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) */ +#ifdef NETIF_F_GSO_PARTIAL - netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; - netdev->hw_features |= NETIF_F_GSO_PARTIAL | - IXGBEVF_GSO_PARTIAL_FEATURES; + netdev->gso_partial_features = NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | +#ifdef NETIF_F_GSO_IPXIP4 + NETIF_F_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_IPXIP6 + NETIF_F_GSO_IPXIP6 | +#endif +#else + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | +#endif + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->features = netdev->hw_features; + netdev->features |= netdev->gso_partial_features | + NETIF_F_GSO_PARTIAL; +#endif /* NETIF_F_GSO_PARTIAL */ +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = netdev->hw_features; +#else + hw_features = get_netdev_hw_features(netdev); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + + hw_features |= netdev->features; +#else +#ifdef NETIF_F_GRO + netdev->features |= NETIF_F_GRO; +#endif /* NETIF_F_GRO */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif /* HAVE_NDO_SET_FEATURES */ if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; - netdev->mpls_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM; - netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES; +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= netdev->features; +#ifdef NETIF_F_GSO_PARTIAL + netdev->vlan_features |= NETIF_F_TSO_MANGLEID; +#endif +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_MPLS_FEATURES + netdev->mpls_features |= NETIF_F_HW_CSUM; +#endif +#ifdef HAVE_SKB_INNER_NETWORK_HEADER netdev->hw_enc_features |= netdev->vlan_features; +#endif /* set this bit last since it cannot be part of vlan_features */ - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | +#ifdef NETIF_F_HW_VLAN_CTAG_TX + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_TX + netdev->features |= NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; +#endif + /* The HW MAC address was set and/or determined in sw_init */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - netdev->priv_flags |= IFF_UNICAST_FLT; + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(pci_dev_to_dev(pdev), + "ixgbevf: invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU /* MTU range: 68 - 1504 or 9710 */ - netdev->min_mtu = ETH_MIN_MTU; + min_mtu = ETH_MIN_MTU; switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: - case ixgbe_mbox_api_14: - netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - - (ETH_HLEN + ETH_FCS_LEN); + case ixgbe_mbox_api_15: + max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); break; default: if (adapter->hw.mac.type != ixgbe_mac_82599_vf) - netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - - (ETH_HLEN + ETH_FCS_LEN); + max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); else - netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; + max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; break; } +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = min_mtu; + netdev->extended->max_mtu = max_mtu; +#else + netdev->min_mtu = min_mtu; + netdev->max_mtu = max_mtu; +#endif +#endif + + timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); + if (IXGBE_REMOVED(hw->hw_addr)) { err = -EIO; goto err_sw_init; } - - timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); - INIT_WORK(&adapter->service_task, ixgbevf_service_task); set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); @@ -4693,7 +5574,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; - strcpy(netdev->name, "eth%d"); + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); err = register_netdev(netdev); if (err) @@ -4701,27 +5582,35 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); - ixgbevf_init_ipsec_offload(adapter); + netif_tx_stop_all_queues(netdev); ixgbevf_init_last_counter_stats(adapter); - /* print the VF info */ - dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); - dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); + /* print the MAC address */ + dev_info(&pdev->dev, "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); switch (hw->mac.type) { case ixgbe_mac_X550_vf: - dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); + DPRINTK(PROBE, INFO, "Intel(R) X550 Virtual Function\n"); break; case ixgbe_mac_X540_vf: - dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); + DPRINTK(PROBE, INFO, "Intel(R) X540 Virtual Function\n"); break; case ixgbe_mac_82599_vf: default: - dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); + DPRINTK(PROBE, INFO, "Intel(R) 82599 Virtual Function\n"); break; } +#ifdef NETIF_F_GRO + if (netdev->features & NETIF_F_GRO) + DPRINTK(PROBE, INFO, "GRO is enabled\n"); +#endif + + DPRINTK(PROBE, INFO, "%s\n", ixgbevf_driver_string); return 0; err_register: @@ -4751,7 +5640,7 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void ixgbevf_remove(struct pci_dev *pdev) +static void __devexit ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter; @@ -4768,19 +5657,19 @@ static void ixgbevf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - ixgbevf_stop_ipsec_offload(adapter); ixgbevf_clear_interrupt_scheme(adapter); - ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); pci_release_regions(pdev); - hw_dbg(&adapter->hw, "Remove complete\n"); + DPRINTK(PROBE, INFO, "Remove complete\n"); kfree(adapter->rss_key); disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); + pci_disable_pcie_error_reporting(pdev); + if (disable_dev) pci_disable_device(pdev); } @@ -4792,7 +5681,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) * * This function is called after a PCI bus error affecting * this device has been detected. - **/ + */ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -4827,7 +5716,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the ixgbevf_resume routine. - **/ + */ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4856,7 +5745,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the ixgbevf_resume routine. - **/ + */ static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4869,25 +5758,83 @@ static void ixgbevf_io_resume(struct pci_dev *pdev) rtnl_unlock(); } +#if defined(HAVE_RHEL7_PCI_RESET_NOTIFY) || \ + defined(HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY) +static void ixgbevf_io_reset_notify(struct pci_dev *pdev, bool prepare) +{ + if (prepare) + ixgbevf_suspend(pdev, PMSG_SUSPEND); +#ifdef CONFIG_PM + else + ixgbevf_resume(pdev); +#endif +} + +#endif +#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +static void pci_io_reset_prepare(struct pci_dev *pdev) +{ + ixgbevf_suspend(pdev, PMSG_SUSPEND); +} + +static void pci_io_reset_done(struct pci_dev *pdev) +{ +#ifdef CONFIG_PM + ixgbevf_resume(pdev); +#endif +} + +#endif +struct net_device *ixgbevf_hw_to_netdev(const struct ixgbe_hw *hw) +{ + return ((struct ixgbevf_adapter *)hw->back)->netdev; +} + +struct ixgbevf_msg *ixgbevf_hw_to_msg(const struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = + container_of(hw, struct ixgbevf_adapter, hw); + return (struct ixgbevf_msg *)&adapter->msg_enable; +} + /* PCI Error Recovery (ERS) */ -static const struct pci_error_handlers ixgbevf_err_handler = { +static struct pci_error_handlers ixgbevf_err_handler = { .error_detected = ixgbevf_io_error_detected, .slot_reset = ixgbevf_io_slot_reset, +#ifdef HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY + .reset_notify = ixgbevf_io_reset_notify, +#endif +#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE + .reset_prepare = pci_io_reset_prepare, + .reset_done = pci_io_reset_done, +#endif .resume = ixgbevf_io_resume, }; +#ifdef HAVE_RHEL7_PCI_RESET_NOTIFY +static struct pci_driver_rh ixgbevf_driver_rh = { + .size = sizeof(struct pci_driver_rh), + .reset_notify = ixgbevf_io_reset_notify, +}; + +#endif static struct pci_driver ixgbevf_driver = { - .name = ixgbevf_driver_name, - .id_table = ixgbevf_pci_tbl, - .probe = ixgbevf_probe, - .remove = ixgbevf_remove, + .name = ixgbevf_driver_name, + .id_table = ixgbevf_pci_tbl, + .probe = ixgbevf_probe, + .remove = __devexit_p(ixgbevf_remove), #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = ixgbevf_suspend, - .resume = ixgbevf_resume, + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, +#endif +#ifndef USE_REBOOT_NOTIFIER + .shutdown = ixgbevf_shutdown, +#endif + .err_handler = &ixgbevf_err_handler, +#ifdef HAVE_RHEL7_PCI_RESET_NOTIFY + .pci_driver_rh = &ixgbevf_driver_rh, #endif - .shutdown = ixgbevf_shutdown, - .err_handler = &ixgbevf_err_handler }; /** @@ -4898,17 +5845,14 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { - pr_info("%s - version %s\n", ixgbevf_driver_string, + int ret; + pr_info("ixgbevf: %s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); - pr_info("%s\n", ixgbevf_copyright); - ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); - if (!ixgbevf_wq) { - pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name); - return -ENOMEM; - } + pr_info("ixgbevf: %s\n", ixgbevf_copyright); - return pci_register_driver(&ixgbevf_driver); + ret = pci_register_driver(&ixgbevf_driver); + return ret; } module_init(ixgbevf_init_module); @@ -4922,26 +5866,8 @@ module_init(ixgbevf_init_module); static void __exit ixgbevf_exit_module(void) { pci_unregister_driver(&ixgbevf_driver); - if (ixgbevf_wq) { - destroy_workqueue(ixgbevf_wq); - ixgbevf_wq = NULL; - } } -#ifdef DEBUG -/** - * ixgbevf_get_hw_dev_name - return device name string - * used by hardware layer to print debugging information - * @hw: pointer to private hardware struct - **/ -char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) -{ - struct ixgbevf_adapter *adapter = hw->back; - - return adapter->netdev->name; -} - -#endif module_exit(ixgbevf_exit_module); /* ixgbevf_main.c */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_osdep.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_osdep.h new file mode 100644 index 000000000000..27b4cd18d7e6 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_osdep.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + + +/* glue for the OS independent part of ixgbe + * includes register access macros + */ + +#ifndef _IXGBEVF_OSDEP_H_ +#define _IXGBEVF_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "kcompat.h" + + +#ifdef DBG +#define hw_dbg(hw, S, A...) netdev_dbg(ixgbevf_hw_to_netdev(hw), S, ## A) +#else +#define hw_dbg(hw, S, A...) do {} while (0) +#endif + +#define IXGBE_REMOVED(a) unlikely(!(a)) +#define IXGBE_DEAD_READ_RETRIES 10 +#define IXGBE_DEAD_READ_REG 0xdeadbeefU +#define IXGBE_FAILED_READ_REG 0xffffffffU + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ + IXGBE_WRITE_REG((a), (reg) + ((offset) << 2), (value)) + +#define IXGBE_READ_REG(a, reg) ixgbe_read_reg(a, reg) +#define IXGBE_READ_REG_ARRAY(a, reg, offset) \ + IXGBE_READ_REG((a), (reg) + ((offset) << 2)) + +#ifndef writeq +#define writeq(val, addr) writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); +#endif + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_VFSTATUS) + +struct ixgbe_hw; +struct ixgbevf_msg { + u16 msg_enable; +}; +struct net_device *ixgbevf_hw_to_netdev(const struct ixgbe_hw *hw); +struct ixgbevf_msg *ixgbevf_hw_to_msg(const struct ixgbe_hw *hw); + +extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); +#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word +#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word +#define IXGBE_EEPROM_GRANT_ATTEMPS 100 + +enum { + IXGBE_ERROR_SOFTWARE, + IXGBE_ERROR_POLLING, + IXGBE_ERROR_INVALID_STATE, + IXGBE_ERROR_UNSUPPORTED, + IXGBE_ERROR_ARGUMENT, + IXGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case IXGBE_ERROR_SOFTWARE: \ + case IXGBE_ERROR_CAUTION: \ + case IXGBE_ERROR_POLLING: \ + netif_warn(ixgbevf_hw_to_msg(hw), drv, ixgbevf_hw_to_netdev(hw),\ + format, ## arg); \ + break; \ + case IXGBE_ERROR_INVALID_STATE: \ + case IXGBE_ERROR_UNSUPPORTED: \ + case IXGBE_ERROR_ARGUMENT: \ + netif_err(ixgbevf_hw_to_msg(hw), hw, ixgbevf_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) +#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ + uninitialized_var(_t); \ +} while (0) + +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) + +#endif /* _IXGBEVF_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_osdep2.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_osdep2.h new file mode 100644 index 000000000000..cc24ec4e9c1c --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_osdep2.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _IXGBEVF_OSDEP2_H_ +#define _IXGBEVF_OSDEP2_H_ + +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg); + +static inline void IXGBE_WRITE_REG(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return; +#ifdef DBG + { + struct net_device *netdev = ixgbevf_hw_to_netdev(hw); + + switch (reg) { + case IXGBE_EIMS: + case IXGBE_EIMC: + case IXGBE_EIAM: + case IXGBE_EIAC: + case IXGBE_EICR: + case IXGBE_EICS: + netdev_info(netdev, + "%s: Reg - 0x%05X, value - 0x%08X\n", + __func__, reg, value); + } + } +#endif /* DBG */ + writel(value, reg_addr + reg); +} + +static inline void IXGBE_WRITE_REG64(struct ixgbe_hw *hw, u32 reg, u64 value) +{ + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return; + writeq(value, reg_addr + reg); +} + +#endif /* _IXGBEVF_OSDEP2_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/kcompat.c b/drivers/net/ethernet/intel/ixgbevf/kcompat.c new file mode 100644 index 000000000000..438f5e106ce6 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/kcompat.c @@ -0,0 +1,2719 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#include "ixgbevf.h" +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct adapter_struct *adapter = netdev_priv(netdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", pci_domain_nr(bus), + bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a whitelist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current whitelist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 3 protocol %04x\n", __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 4 protocol %02x\n", __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* !RHEL || RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* !RHEL || RHEL < 8.1 */ +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !RHEL >= 8.2 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ diff --git a/drivers/net/ethernet/intel/ixgbevf/kcompat.h b/drivers/net/ethernet/intel/ixgbevf/kcompat.h new file mode 100644 index 000000000000..00cdebd06223 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/kcompat.h @@ -0,0 +1,7245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define CONFIG_IGBVF_NAPI +#define NAPI + +#define adapter_struct ixgbevf_adapter +#define adapter_q_vector ixgbevf_q_vector +#define CONFIG_IXGBEVF_MSIX + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,3))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#else /* < 2.6.25 */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else /* >= 3.15.0 */ +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16))) +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if IS_ENABLED(CONFIG_NET_DEVLINK) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#include +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) && \ + (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7))) +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif /* <4.18.0 && +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +static inline void *_kc_macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + if (attrs->split) + devlink_port_split_set(devlink_port, attrs->phys.port_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8,2))) +#define HAVE_DEVLINK_REGIONS +#endif +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#ifndef metadata_dst_free +#include + +static inline void __kc_metadata_dst_free(struct metadata_dst *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#endif /* !metadata_dst_free */ +#endif /* RHEL >= 7.3 || SLES >= 12.2 */ +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open + +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define devlink_params_publish(devlink) do { } while (0) +#define devlink_params_unpublish(devlink) do { } while (0) +#endif + +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* = 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* CONFIG_NET_DEVLINK */ +#endif /* = 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#define HAVE_XSK_UMEM_HAS_ADDRS +#define HAVE_FLOW_BLOCK_API +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0))) +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} + +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */ +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +#if !(SLE_VERSION_CODE > SLE_VERSION(15,2,0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14,0,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#define cpu_latency_qos_update_request pm_qos_update_request +#define cpu_latency_qos_add_request(arg1, arg2) pm_qos_add_request(arg1, PM_QOS_CPU_DMA_LATENCY, arg2) +#define cpu_latency_qos_remove_request pm_qos_remove_request + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif + +#ifdef HAVE_DEVLINK_REGIONS +#if IS_ENABLED(CONFIG_NET_DEVLINK) +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *data); +}; + +#ifndef devlink_region_create +static inline struct devlink_region * +_kc_devlink_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, u64 region_size) +{ + return devlink_region_create(devlink, ops->name, region_max_snapshots, + region_size); +} + +#define devlink_region_create _kc_devlink_region_create +#endif /* devlink_region_create */ +#endif /* CONFIG_NET_DEVLINK */ +#define HAVE_DEVLINK_SNAPSHOT_CREATE_DESTRUCTOR +#endif /* HAVE_DEVLINK_REGIONS */ +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#else /* >= 5.8.0 */ +#undef HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#define HAVE_XDP_QUERY_PROG +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* 5.9.0 */ + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/kcompat_ethtool.c b/drivers/net/ethernet/intel/ixgbevf/kcompat_ethtool.c new file mode 100644 index 000000000000..2c3d84d3b545 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/kcompat_ethtool.c @@ -0,0 +1,1148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +/* + * net/core/ethtool.c - Ethtool ioctl handler + * Copyright (c) 2003 Matthew Wilcox + * + * This file is where we call all the ethtool_ops commands to get + * the information ethtool needs. We fall back to calling do_ioctl() + * for drivers which haven't been converted to ethtool_ops yet. + * + * It's GPL, stupid. + * + * Modification by sfeldma@pobox.com to work as backward compat + * solution for pre-ethtool_ops kernels. + * - copied struct ethtool_ops from ethtool.h + * - defined SET_ETHTOOL_OPS + * - put in some #ifndef NETIF_F_xxx wrappers + * - changes refs to dev->ethtool_ops to ethtool_ops + * - changed dev_ethtool to ethtool_ioctl + * - remove EXPORT_SYMBOL()s + * - added _kc_ prefix in built-in ethtool_op_xxx ops. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kcompat.h" + +#undef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full (1 << 12) +#undef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full (1 << 12) +#undef SPEED_10000 +#define SPEED_10000 10000 + +#undef ethtool_ops +#define ethtool_ops _kc_ethtool_ops + +struct _kc_ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + u32 (*get_rx_csum)(struct net_device *); + int (*set_rx_csum)(struct net_device *, u32); + u32 (*get_tx_csum)(struct net_device *); + int (*set_tx_csum)(struct net_device *, u32); + u32 (*get_sg)(struct net_device *); + int (*set_sg)(struct net_device *, u32); + u32 (*get_tso)(struct net_device *); + int (*set_tso)(struct net_device *, u32); + int (*self_test_count)(struct net_device *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*phys_id)(struct net_device *, u32); + int (*get_stats_count)(struct net_device *); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, + u64 *); +} *ethtool_ops = NULL; + +#undef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) + +/* + * Some useful ethtool_ops methods that are device independent. If we find that + * all drivers want to do the same thing here, we can turn these into dev_() + * function calls. + */ + +#undef ethtool_op_get_link +#define ethtool_op_get_link _kc_ethtool_op_get_link +u32 _kc_ethtool_op_get_link(struct net_device *dev) +{ + return netif_carrier_ok(dev) ? 1 : 0; +} + +#undef ethtool_op_get_tx_csum +#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum +u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) +{ +#ifdef NETIF_F_IP_CSUM + return (dev->features & NETIF_F_IP_CSUM) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tx_csum +#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum +int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_IP_CSUM + if (data) +#ifdef NETIF_F_IPV6_CSUM + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); +#else + dev->features |= NETIF_F_IP_CSUM; + else + dev->features &= ~NETIF_F_IP_CSUM; +#endif +#endif + + return 0; +} + +#undef ethtool_op_get_sg +#define ethtool_op_get_sg _kc_ethtool_op_get_sg +u32 _kc_ethtool_op_get_sg(struct net_device *dev) +{ +#ifdef NETIF_F_SG + return (dev->features & NETIF_F_SG) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_sg +#define ethtool_op_set_sg _kc_ethtool_op_set_sg +int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_SG + if (data) + dev->features |= NETIF_F_SG; + else + dev->features &= ~NETIF_F_SG; +#endif + + return 0; +} + +#undef ethtool_op_get_tso +#define ethtool_op_get_tso _kc_ethtool_op_get_tso +u32 _kc_ethtool_op_get_tso(struct net_device *dev) +{ +#ifdef NETIF_F_TSO + return (dev->features & NETIF_F_TSO) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tso +#define ethtool_op_set_tso _kc_ethtool_op_set_tso +int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_TSO + if (data) + dev->features |= NETIF_F_TSO; + else + dev->features &= ~NETIF_F_TSO; +#endif + + return 0; +} + +/* Handlers for each ethtool command */ + +static int ethtool_get_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd = { ETHTOOL_GSET }; + int err; + + if (!ethtool_ops->get_settings) + return -EOPNOTSUPP; + + err = ethtool_ops->get_settings(dev, &cmd); + if (err < 0) + return err; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + return 0; +} + +static int ethtool_set_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd; + + if (!ethtool_ops->set_settings) + return -EOPNOTSUPP; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + return ethtool_ops->set_settings(dev, &cmd); +} + +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) +{ + struct ethtool_drvinfo info; + struct ethtool_ops *ops = ethtool_ops; + + if (!ops->get_drvinfo) + return -EOPNOTSUPP; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GDRVINFO; + ops->get_drvinfo(dev, &info); + + if (ops->self_test_count) + info.testinfo_len = ops->self_test_count(dev); + if (ops->get_stats_count) + info.n_stats = ops->get_stats_count(dev); + if (ops->get_regs_len) + info.regdump_len = ops->get_regs_len(dev); + if (ops->get_eeprom_len) + info.eedump_len = ops->get_eeprom_len(dev); + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +static int ethtool_get_regs(struct net_device *dev, char *useraddr) +{ + struct ethtool_regs regs; + struct ethtool_ops *ops = ethtool_ops; + void *regbuf; + int reglen, ret; + + if (!ops->get_regs || !ops->get_regs_len) + return -EOPNOTSUPP; + + if (copy_from_user(®s, useraddr, sizeof(regs))) + return -EFAULT; + + reglen = ops->get_regs_len(dev); + if (regs.len > reglen) + regs.len = reglen; + + regbuf = kmalloc(reglen, GFP_USER); + if (!regbuf) + return -ENOMEM; + + ops->get_regs(dev, ®s, regbuf); + + ret = -EFAULT; + if (copy_to_user(useraddr, ®s, sizeof(regs))) + goto out; + useraddr += offsetof(struct ethtool_regs, data); + if (copy_to_user(useraddr, regbuf, reglen)) + goto out; + ret = 0; + +out: + kfree(regbuf); + return ret; +} + +static int ethtool_get_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; + + if (!ethtool_ops->get_wol) + return -EOPNOTSUPP; + + ethtool_ops->get_wol(dev, &wol); + + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; +} + +static int ethtool_set_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol; + + if (!ethtool_ops->set_wol) + return -EOPNOTSUPP; + + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + return ethtool_ops->set_wol(dev, &wol); +} + +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GMSGLVL }; + + if (!ethtool_ops->get_msglevel) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_msglevel(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_msglevel) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_msglevel(dev, edata.data); + return 0; +} + +static int ethtool_nway_reset(struct net_device *dev) +{ + if (!ethtool_ops->nway_reset) + return -EOPNOTSUPP; + + return ethtool_ops->nway_reset(dev); +} + +static int ethtool_get_link(struct net_device *dev, void *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GLINK }; + + if (!ethtool_ops->get_link) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_link(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->get_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + ret = -EFAULT; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + goto out; + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->set_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->set_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ethtool_ops->get_coalesce(dev, &coalesce); + + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + return 0; +} + +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) + return -EFAULT; + + return ethtool_ops->set_coalesce(dev, &coalesce); +} + +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + ethtool_ops->get_ringparam(dev, &ringparam); + + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) + return -EFAULT; + + return ethtool_ops->set_ringparam(dev, &ringparam); +} + +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + ethtool_ops->get_pauseparam(dev, &pauseparam); + + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) + return -EFAULT; + + return ethtool_ops->set_pauseparam(dev, &pauseparam); +} + +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GRXCSUM }; + + if (!ethtool_ops->get_rx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_rx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_rx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_rx_csum(dev, edata.data); + return 0; +} + +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTXCSUM }; + + if (!ethtool_ops->get_tx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tx_csum(dev, edata.data); +} + +static int ethtool_get_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GSG }; + + if (!ethtool_ops->get_sg) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_sg(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_sg) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_sg(dev, edata.data); +} + +static int ethtool_get_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTSO }; + + if (!ethtool_ops->get_tso) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tso(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tso) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tso(dev, edata.data); +} + +static int ethtool_self_test(struct net_device *dev, char *useraddr) +{ + struct ethtool_test test; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->self_test || !ops->self_test_count) + return -EOPNOTSUPP; + + if (copy_from_user(&test, useraddr, sizeof(test))) + return -EFAULT; + + test.len = ops->self_test_count(dev); + data = kmalloc(test.len * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->self_test(dev, &test, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &test, sizeof(test))) + goto out; + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, test.len * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_strings(struct net_device *dev, void *useraddr) +{ + struct ethtool_gstrings gstrings; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_strings) + return -EOPNOTSUPP; + + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) + return -EFAULT; + + switch (gstrings.string_set) { + case ETH_SS_TEST: + if (!ops->self_test_count) + return -EOPNOTSUPP; + gstrings.len = ops->self_test_count(dev); + break; + case ETH_SS_STATS: + if (!ops->get_stats_count) + return -EOPNOTSUPP; + gstrings.len = ops->get_stats_count(dev); + break; + default: + return -EINVAL; + } + + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_strings(dev, gstrings.string_set, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) + goto out; + useraddr += sizeof(gstrings); + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_phys_id(struct net_device *dev, void *useraddr) +{ + struct ethtool_value id; + + if (!ethtool_ops->phys_id) + return -EOPNOTSUPP; + + if (copy_from_user(&id, useraddr, sizeof(id))) + return -EFAULT; + + return ethtool_ops->phys_id(dev, id.data); +} + +static int ethtool_get_stats(struct net_device *dev, void *useraddr) +{ + struct ethtool_stats stats; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->get_ethtool_stats || !ops->get_stats_count) + return -EOPNOTSUPP; + + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + stats.n_stats = ops->get_stats_count(dev); + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_ethtool_stats(dev, &stats, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &stats, sizeof(stats))) + goto out; + useraddr += sizeof(stats); + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +/* The main entry point in this file. Called from net/core/dev.c */ + +#define ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr) +{ + struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + void *useraddr = (void *) ifr->ifr_data; + u32 ethcmd; + + /* + * XXX: This can be pushed down into the ethtool_* handlers that + * need it. Keep existing behavior for the moment. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!dev || !netif_device_present(dev)) + return -ENODEV; + + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GSET: + return ethtool_get_settings(dev, useraddr); + case ETHTOOL_SSET: + return ethtool_set_settings(dev, useraddr); + case ETHTOOL_GDRVINFO: + return ethtool_get_drvinfo(dev, useraddr); + case ETHTOOL_GREGS: + return ethtool_get_regs(dev, useraddr); + case ETHTOOL_GWOL: + return ethtool_get_wol(dev, useraddr); + case ETHTOOL_SWOL: + return ethtool_set_wol(dev, useraddr); + case ETHTOOL_GMSGLVL: + return ethtool_get_msglevel(dev, useraddr); + case ETHTOOL_SMSGLVL: + return ethtool_set_msglevel(dev, useraddr); + case ETHTOOL_NWAY_RST: + return ethtool_nway_reset(dev); + case ETHTOOL_GLINK: + return ethtool_get_link(dev, useraddr); + case ETHTOOL_GEEPROM: + return ethtool_get_eeprom(dev, useraddr); + case ETHTOOL_SEEPROM: + return ethtool_set_eeprom(dev, useraddr); + case ETHTOOL_GCOALESCE: + return ethtool_get_coalesce(dev, useraddr); + case ETHTOOL_SCOALESCE: + return ethtool_set_coalesce(dev, useraddr); + case ETHTOOL_GRINGPARAM: + return ethtool_get_ringparam(dev, useraddr); + case ETHTOOL_SRINGPARAM: + return ethtool_set_ringparam(dev, useraddr); + case ETHTOOL_GPAUSEPARAM: + return ethtool_get_pauseparam(dev, useraddr); + case ETHTOOL_SPAUSEPARAM: + return ethtool_set_pauseparam(dev, useraddr); + case ETHTOOL_GRXCSUM: + return ethtool_get_rx_csum(dev, useraddr); + case ETHTOOL_SRXCSUM: + return ethtool_set_rx_csum(dev, useraddr); + case ETHTOOL_GTXCSUM: + return ethtool_get_tx_csum(dev, useraddr); + case ETHTOOL_STXCSUM: + return ethtool_set_tx_csum(dev, useraddr); + case ETHTOOL_GSG: + return ethtool_get_sg(dev, useraddr); + case ETHTOOL_SSG: + return ethtool_set_sg(dev, useraddr); + case ETHTOOL_GTSO: + return ethtool_get_tso(dev, useraddr); + case ETHTOOL_STSO: + return ethtool_set_tso(dev, useraddr); + case ETHTOOL_TEST: + return ethtool_self_test(dev, useraddr); + case ETHTOOL_GSTRINGS: + return ethtool_get_strings(dev, useraddr); + case ETHTOOL_PHYS_ID: + return ethtool_phys_id(dev, useraddr); + case ETHTOOL_GSTATS: + return ethtool_get_stats(dev, useraddr); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +#define mii_if_info _kc_mii_if_info +struct _kc_mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + + unsigned int full_duplex : 1; /* is full duplex? */ + unsigned int force_media : 1; /* is autoneg. disabled? */ + + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int phy_id, int location); + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); +}; + +struct ethtool_cmd; +struct mii_ioctl_data; + +#undef mii_link_ok +#define mii_link_ok _kc_mii_link_ok +#undef mii_nway_restart +#define mii_nway_restart _kc_mii_nway_restart +#undef mii_ethtool_gset +#define mii_ethtool_gset _kc_mii_ethtool_gset +#undef mii_ethtool_sset +#define mii_ethtool_sset _kc_mii_ethtool_sset +#undef mii_check_link +#define mii_check_link _kc_mii_check_link +extern int _kc_mii_link_ok (struct mii_if_info *mii); +extern int _kc_mii_nway_restart (struct mii_if_info *mii); +extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern void _kc_mii_check_link (struct mii_if_info *mii); +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +#undef generic_mii_ioctl +#define generic_mii_ioctl _kc_generic_mii_ioctl +extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_changed); +#endif /* > 2.4.6 */ + + +struct _kc_pci_dev_ext { + struct pci_dev *dev; + void *pci_drvdata; + struct pci_driver *driver; +}; + +struct _kc_net_dev_ext { + struct net_device *dev; + unsigned int carrier; +}; + + +/**************************************/ +/* mii support */ + +int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u32 advert, bmcr, lpa, nego; + + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); + + /* only supports twisted-pair */ + ecmd->port = PORT_MII; + + /* only supports internal transceiver */ + ecmd->transceiver = XCVR_INTERNAL; + + /* this isn't fully supported at higher layers */ + ecmd->phy_address = mii->phy_id; + + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + if (advert & ADVERTISE_10HALF) + ecmd->advertising |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + ecmd->advertising |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); + if (bmcr & BMCR_ANENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + + nego = mii_nway_result(advert & lpa); + if (nego == LPA_100FULL || nego == LPA_100HALF) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + if (nego == LPA_100FULL || nego == LPA_10FULL) { + ecmd->duplex = DUPLEX_FULL; + mii->full_duplex = 1; + } else { + ecmd->duplex = DUPLEX_HALF; + mii->full_duplex = 0; + } + } else { + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if (ecmd->phy_address != mii->phy_id) + return -EINVAL; + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* ignore supported, maxtxpkt, maxrxpkt */ + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 bmcr, advert, tmp; + + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) == 0) + return -EINVAL; + + /* advertise only what has been requested */ + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (ADVERTISED_10baseT_Half) + tmp |= ADVERTISE_10HALF; + if (ADVERTISED_10baseT_Full) + tmp |= ADVERTISE_10FULL; + if (ADVERTISED_100baseT_Half) + tmp |= ADVERTISE_100HALF; + if (ADVERTISED_100baseT_Full) + tmp |= ADVERTISE_100FULL; + if (advert != tmp) { + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); + mii->advertising = tmp; + } + + /* turn on autonegotiation, and force a renegotiate */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); + + mii->force_media = 0; + } else { + u32 bmcr, tmp; + + /* turn off auto negotiation, set speed and duplexity */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); + if (ecmd->speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (ecmd->duplex == DUPLEX_FULL) { + tmp |= BMCR_FULLDPLX; + mii->full_duplex = 1; + } else + mii->full_duplex = 0; + if (bmcr != tmp) + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); + + mii->force_media = 1; + } + return 0; +} + +int _kc_mii_link_ok (struct mii_if_info *mii) +{ + /* first, a dummy read, needed to latch some MII phys */ + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) + return 1; + return 0; +} + +int _kc_mii_nway_restart (struct mii_if_info *mii) +{ + int bmcr; + int r = -EINVAL; + + /* if autoneg is off, it's an error */ + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); + r = 0; + } + + return r; +} + +void _kc_mii_check_link (struct mii_if_info *mii) +{ + int cur_link = mii_link_ok(mii); + int prev_link = netif_carrier_ok(mii->dev); + + if (cur_link && !prev_link) + netif_carrier_on(mii->dev); + else if (prev_link && !cur_link) + netif_carrier_off(mii->dev); +} + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_chg_out) +{ + int rc = 0; + unsigned int duplex_changed = 0; + + if (duplex_chg_out) + *duplex_chg_out = 0; + + mii_data->phy_id &= mii_if->phy_id_mask; + mii_data->reg_num &= mii_if->reg_num_mask; + + switch(cmd) { + case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ + case SIOCGMIIPHY: + mii_data->phy_id = mii_if->phy_id; + /* fall through */ + + case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ + case SIOCGMIIREG: + mii_data->val_out = + mii_if->mdio_read(mii_if->dev, mii_data->phy_id, + mii_data->reg_num); + break; + + case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ + case SIOCSMIIREG: { + u16 val = mii_data->val_in; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (mii_data->phy_id == mii_if->phy_id) { + switch(mii_data->reg_num) { + case MII_BMCR: { + unsigned int new_duplex = 0; + if (val & (BMCR_RESET|BMCR_ANENABLE)) + mii_if->force_media = 0; + else + mii_if->force_media = 1; + if (mii_if->force_media && + (val & BMCR_FULLDPLX)) + new_duplex = 1; + if (mii_if->full_duplex != new_duplex) { + duplex_changed = 1; + mii_if->full_duplex = new_duplex; + } + break; + } + case MII_ADVERTISE: + mii_if->advertising = val; + break; + default: + /* do nothing */ + break; + } + } + + mii_if->mdio_write(mii_if->dev, mii_data->phy_id, + mii_data->reg_num, val); + break; + } + + default: + rc = -EOPNOTSUPP; + break; + } + + if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) + *duplex_chg_out = 1; + + return rc; +} +#endif /* > 2.4.6 */ + diff --git a/drivers/net/ethernet/intel/ixgbevf/kcompat_overflow.h b/drivers/net/ethernet/intel/ixgbevf/kcompat_overflow.h new file mode 100644 index 000000000000..3a306462a24e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/kcompat_overflow.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2020 Intel Corporation. */ + +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c deleted file mode 100644 index 6bc1953263b9..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ /dev/null @@ -1,336 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -#include "mbx.h" -#include "ixgbevf.h" - -/** - * ixgbevf_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * - * returns 0 if it successfully received a message notification - **/ -static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - while (countdown && mbx->ops.check_for_msg(hw)) { - countdown--; - udelay(mbx->udelay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - - return countdown ? 0 : IXGBE_ERR_MBX; -} - -/** - * ixgbevf_poll_for_ack - Wait for message acknowledgment - * @hw: pointer to the HW structure - * - * returns 0 if it successfully received a message acknowledgment - **/ -static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - while (countdown && mbx->ops.check_for_ack(hw)) { - countdown--; - udelay(mbx->udelay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - - return countdown ? 0 : IXGBE_ERR_MBX; -} - -/** - * ixgbevf_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfully received a message notification and - * copied it into the receive buffer. - **/ -static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - if (!mbx->ops.read) - goto out; - - ret_val = ixgbevf_poll_for_msg(hw); - - /* if ack received read message, otherwise we timed out */ - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size); -out: - return ret_val; -} - -/** - * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ -static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) - goto out; - - /* send msg */ - ret_val = mbx->ops.write(hw, msg, size); - - /* if msg sent wait until we receive an ack */ - if (!ret_val) - ret_val = ixgbevf_poll_for_ack(hw); -out: - return ret_val; -} - -/** - * ixgbevf_read_v2p_mailbox - read v2p mailbox - * @hw: pointer to the HW structure - * - * This function is used to read the v2p mailbox without losing the read to - * clear status bits. - **/ -static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw) -{ - u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); - - v2p_mailbox |= hw->mbx.v2p_mailbox; - hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; - - return v2p_mailbox; -} - -/** - * ixgbevf_check_for_bit_vf - Determine if a status bit was set - * @hw: pointer to the HW structure - * @mask: bitmask for bits to be tested and cleared - * - * This function is used to check for the read to clear bits within - * the V2P mailbox. - **/ -static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) -{ - u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw); - s32 ret_val = IXGBE_ERR_MBX; - - if (v2p_mailbox & mask) - ret_val = 0; - - hw->mbx.v2p_mailbox &= ~mask; - - return ret_val; -} - -/** - * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail - * @hw: pointer to the HW structure - * - * returns 0 if the PF has set the Status bit or else ERR_MBX - **/ -static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { - ret_val = 0; - hw->mbx.stats.reqs++; - } - - return ret_val; -} - -/** - * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd - * @hw: pointer to the HW structure - * - * returns 0 if the PF has set the ACK bit or else ERR_MBX - **/ -static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { - ret_val = 0; - hw->mbx.stats.acks++; - } - - return ret_val; -} - -/** - * ixgbevf_check_for_rst_vf - checks to see if the PF has reset - * @hw: pointer to the HW structure - * - * returns true if the PF has set the reset done bit or else false - **/ -static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | - IXGBE_VFMAILBOX_RSTI))) { - ret_val = 0; - hw->mbx.stats.rsts++; - } - - return ret_val; -} - -/** - * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock - * @hw: pointer to the HW structure - * - * return 0 if we obtained the mailbox lock - **/ -static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - /* Take ownership of the buffer */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); - - /* reserve mailbox for VF use */ - if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) - ret_val = 0; - - return ret_val; -} - -/** - * ixgbevf_write_mbx_vf - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfully copied message into the buffer - **/ -static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - s32 ret_val; - u16 i; - - /* lock the mailbox to prevent PF/VF race condition */ - ret_val = ixgbevf_obtain_mbx_lock_vf(hw); - if (ret_val) - goto out_no_write; - - /* flush msg and acks as we are overwriting the message buffer */ - ixgbevf_check_for_msg_vf(hw); - ixgbevf_check_for_ack_vf(hw); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); - - /* update stats */ - hw->mbx.stats.msgs_tx++; - - /* Drop VFU and interrupt the PF to tell it a message has been sent */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); - -out_no_write: - return ret_val; -} - -/** - * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfully read message from buffer - **/ -static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - s32 ret_val = 0; - u16 i; - - /* lock the mailbox to prevent PF/VF race condition */ - ret_val = ixgbevf_obtain_mbx_lock_vf(hw); - if (ret_val) - goto out_no_read; - - /* copy the message from the mailbox memory buffer */ - for (i = 0; i < size; i++) - msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); - - /* Acknowledge receipt and release mailbox, then we're done */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; - -out_no_read: - return ret_val; -} - -/** - * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox - * @hw: pointer to the HW structure - * - * Initializes the hw->mbx struct to correct values for VF mailbox - */ -static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - - /* start mailbox as timed out and let the reset_hw call set the timeout - * value to begin communications - */ - mbx->timeout = 0; - mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; - - mbx->size = IXGBE_VFMAILBOX_SIZE; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - - return 0; -} - -const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .read = ixgbevf_read_mbx_vf, - .write = ixgbevf_write_mbx_vf, - .read_posted = ixgbevf_read_posted_mbx, - .write_posted = ixgbevf_write_posted_mbx, - .check_for_msg = ixgbevf_check_for_msg_vf, - .check_for_ack = ixgbevf_check_for_ack_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, -}; - -/* Mailbox operations when running on Hyper-V. - * On Hyper-V, PF/VF communication is not through the - * hardware mailbox; this communication is through - * a software mediated path. - * Most mail box operations are noop while running on - * Hyper-V. - */ -const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, -}; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h deleted file mode 100644 index 853796c8ef0e..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ /dev/null @@ -1,113 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -#ifndef _IXGBE_MBX_H_ -#define _IXGBE_MBX_H_ - -#include "vf.h" - -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 - -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 - -/* Define mailbox register bits */ -#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ -#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ - -#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) -#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) - -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ - -#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - -/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the - * PF. The reverse is true if it is IXGBE_PF_*. - * Message ACK's are the value or'd with 0xF0000000 - */ -/* Messages below or'd with this are the ACK */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 -/* Messages below or'd with this are the NACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 -/* Indicates that VF is still clear to send requests */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 -#define IXGBE_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) - -/* definitions to support mailbox API version negotiation */ - -/* each element denotes a version of the API; existing numbers may not - * change; any additions must go at the end - */ -enum ixgbe_pfvf_api_rev { - ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ - ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ - ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ - ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ - ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ - ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ - /* This value should always be last */ - ixgbe_mbox_api_unknown, /* indicates that API version is not known */ -}; - -/* mailbox API, legacy requests */ -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ - -/* mailbox API, version 1.0 VF requests */ -#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ -#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ - -/* mailbox API, version 1.1 VF requests */ -#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ - -/* GET_QUEUES return data indices within the mailbox */ -#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ -#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ -#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */ -#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ - -/* mailbox API, version 1.2 VF requests */ -#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ -#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ - -#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c - -/* mailbox API, version 1.4 VF requests */ -#define IXGBE_VF_IPSEC_ADD 0x0d -#define IXGBE_VF_IPSEC_DEL 0x0e - -/* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 -/* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 - -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ - -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ - -/* forward declaration of the HW struct */ -struct ixgbe_hw; - -#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h deleted file mode 100644 index 68d16ae5b65a..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -#ifndef _IXGBEVF_REGS_H_ -#define _IXGBEVF_REGS_H_ - -#define IXGBE_VFCTRL 0x00000 -#define IXGBE_VFSTATUS 0x00008 -#define IXGBE_VFLINKS 0x00010 -#define IXGBE_VFFRTIMER 0x00048 -#define IXGBE_VFRXMEMWRAP 0x03190 -#define IXGBE_VTEICR 0x00100 -#define IXGBE_VTEICS 0x00104 -#define IXGBE_VTEIMS 0x00108 -#define IXGBE_VTEIMC 0x0010C -#define IXGBE_VTEIAC 0x00110 -#define IXGBE_VTEIAM 0x00114 -#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) -#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) -#define IXGBE_VTIVAR_MISC 0x00140 -#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) -#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) -#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) -#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) -#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) -#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) -#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) -#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) -#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) -#define IXGBE_VFPSRTYPE 0x00300 -#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) -#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) -#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) -#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) -#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) -#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) -#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) -#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) -#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) -#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) -#define IXGBE_VFGPRC 0x0101C -#define IXGBE_VFGPTC 0x0201C -#define IXGBE_VFGORC_LSB 0x01020 -#define IXGBE_VFGORC_MSB 0x01024 -#define IXGBE_VFGOTC_LSB 0x02020 -#define IXGBE_VFGOTC_MSB 0x02024 -#define IXGBE_VFMPRC 0x01034 -#define IXGBE_VFMRQC 0x3000 -#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) -#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) - -/* VFMRQC bits */ -#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 - -#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) - -#endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c deleted file mode 100644 index d5ce49636548..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ /dev/null @@ -1,1018 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -#include "vf.h" -#include "ixgbevf.h" - -/* On Hyper-V, to reset, we need to read from this offset - * from the PCI config space. This is the mechanism used on - * Hyper-V to support PF/VF communication. - */ -#define IXGBE_HV_RESET_OFFSET 0x201 - -static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, - u32 *retmsg, u16 size) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 retval = mbx->ops.write_posted(hw, msg, size); - - if (retval) - return retval; - - return mbx->ops.read_posted(hw, retmsg, size); -} - -/** - * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware by filling the bus info structure and media type, clears - * all on chip counters, initializes receive address registers, multicast - * table, VLAN filter table, calls routine to set up link and flow control - * settings, and leaves transmit and receive units disabled and uninitialized - **/ -static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) -{ - /* Clear adapter stopped flag */ - hw->adapter_stopped = false; - - return 0; -} - -/** - * ixgbevf_init_hw_vf - virtual function hardware initialization - * @hw: pointer to hardware structure - * - * Initialize the hardware by resetting the hardware and then starting - * the hardware - **/ -static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) -{ - s32 status = hw->mac.ops.start_hw(hw); - - hw->mac.ops.get_mac_addr(hw, hw->mac.addr); - - return status; -} - -/** - * ixgbevf_reset_hw_vf - Performs hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks and - * clears all interrupts. - **/ -static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 timeout = IXGBE_VF_INIT_TIMEOUT; - s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; - u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; - u8 *addr = (u8 *)(&msgbuf[1]); - - /* Call adapter stop to disable tx/rx and clear interrupts */ - hw->mac.ops.stop_adapter(hw); - - /* reset the api version */ - hw->api_version = ixgbe_mbox_api_10; - - IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); - IXGBE_WRITE_FLUSH(hw); - - /* we cannot reset while the RSTI / RSTD bits are asserted */ - while (!mbx->ops.check_for_rst(hw) && timeout) { - timeout--; - udelay(5); - } - - if (!timeout) - return IXGBE_ERR_RESET_FAILED; - - /* mailbox timeout can now become active */ - mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; - - msgbuf[0] = IXGBE_VF_RESET; - mbx->ops.write_posted(hw, msgbuf, 1); - - mdelay(10); - - /* set our "perm_addr" based on info provided by PF - * also set up the mc_filter_type which is piggy backed - * on the mac address in word 3 - */ - ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); - if (ret_val) - return ret_val; - - /* New versions of the PF may NACK the reset return message - * to indicate that no MAC address has yet been assigned for - * the VF. - */ - if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && - msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) - return IXGBE_ERR_INVALID_MAC_ADDR; - - if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) - ether_addr_copy(hw->mac.perm_addr, addr); - - hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; - - return 0; -} - -/** - * Hyper-V variant; the VF/PF communication is through the PCI - * config space. - * @hw: pointer to private hardware struct - */ -static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) -{ -#if IS_ENABLED(CONFIG_PCI_MMCONFIG) - struct ixgbevf_adapter *adapter = hw->back; - int i; - - for (i = 0; i < 6; i++) - pci_read_config_byte(adapter->pdev, - (i + IXGBE_HV_RESET_OFFSET), - &hw->mac.perm_addr[i]); - return 0; -#else - pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n"); - return -EOPNOTSUPP; -#endif -} - -/** - * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units - * @hw: pointer to hardware structure - * - * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, - * disables transmit and receive units. The adapter_stopped flag is used by - * the shared code and drivers to determine if the adapter is in a stopped - * state and should not touch the hardware. - **/ -static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) -{ - u32 number_of_queues; - u32 reg_val; - u16 i; - - /* Set the adapter_stopped flag so other driver functions stop touching - * the hardware - */ - hw->adapter_stopped = true; - - /* Disable the receive unit by stopped each queue */ - number_of_queues = hw->mac.max_rx_queues; - for (i = 0; i < number_of_queues; i++) { - reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); - if (reg_val & IXGBE_RXDCTL_ENABLE) { - reg_val &= ~IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); - } - } - - IXGBE_WRITE_FLUSH(hw); - - /* Clear interrupt mask to stop from interrupts being generated */ - IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); - - /* Clear any pending interrupts */ - IXGBE_READ_REG(hw, IXGBE_VTEICR); - - /* Disable the transmit unit. Each queue must be disabled. */ - number_of_queues = hw->mac.max_tx_queues; - for (i = 0; i < number_of_queues; i++) { - reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); - if (reg_val & IXGBE_TXDCTL_ENABLE) { - reg_val &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); - } - } - - return 0; -} - -/** - * ixgbevf_mta_vector - Determines bit-vector in multicast table to set - * @hw: pointer to hardware structure - * @mc_addr: the multicast address - * - * Extracts the 12 bits, from a multicast address, to determine which - * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming Rx multicast addresses, to determine the bit-vector to check in - * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initialization - * to mc_filter_type. - **/ -static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) -{ - u32 vector = 0; - - switch (hw->mac.mc_filter_type) { - case 0: /* use bits [47:36] of the address */ - vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); - break; - case 1: /* use bits [46:35] of the address */ - vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); - break; - case 2: /* use bits [45:34] of the address */ - vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); - break; - case 3: /* use bits [43:32] of the address */ - vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); - break; - default: /* Invalid mc_filter_type */ - break; - } - - /* vector can only be 12-bits or boundary will be exceeded */ - vector &= 0xFFF; - return vector; -} - -/** - * ixgbevf_get_mac_addr_vf - Read device MAC address - * @hw: pointer to the HW structure - * @mac_addr: pointer to storage for retrieved MAC address - **/ -static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) -{ - ether_addr_copy(mac_addr, hw->mac.perm_addr); - - return 0; -} - -static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) -{ - u32 msgbuf[3], msgbuf_chk; - u8 *msg_addr = (u8 *)(&msgbuf[1]); - s32 ret_val; - - memset(msgbuf, 0, sizeof(msgbuf)); - /* If index is one then this is the start of a new list and needs - * indication to the PF so it can do it's own list management. - * If it is zero then that tells the PF to just clear all of - * this VF's macvlans and there is no new list. - */ - msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; - msgbuf[0] |= IXGBE_VF_SET_MACVLAN; - msgbuf_chk = msgbuf[0]; - - if (addr) - ether_addr_copy(msg_addr, addr); - - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - ARRAY_SIZE(msgbuf)); - if (!ret_val) { - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) - return -ENOMEM; - } - - return ret_val; -} - -static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) -{ - return -EOPNOTSUPP; -} - -/** - * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. - * @hw: pointer to hardware structure - * @reta: buffer to fill with RETA contents. - * @num_rx_queues: Number of Rx queues configured for this port - * - * The "reta" buffer should be big enough to contain 32 registers. - * - * Returns: 0 on success. - * if API doesn't support this operation - (-EOPNOTSUPP). - */ -int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) -{ - int err, i, j; - u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; - u32 *hw_reta = &msgbuf[1]; - u32 mask = 0; - - /* We have to use a mailbox for 82599 and x540 devices only. - * For these devices RETA has 128 entries. - * Also these VFs support up to 4 RSS queues. Therefore PF will compress - * 16 RETA entries in each DWORD giving 2 bits to each entry. - */ - int dwords = IXGBEVF_82599_RETA_SIZE / 16; - - /* We support the RSS querying for 82599 and x540 devices only. - * Thus return an error if API doesn't support RETA querying or querying - * is not supported for this device type. - */ - switch (hw->api_version) { - case ixgbe_mbox_api_14: - case ixgbe_mbox_api_13: - case ixgbe_mbox_api_12: - if (hw->mac.type < ixgbe_mac_X550_vf) - break; - /* fall through */ - default: - return -EOPNOTSUPP; - } - - msgbuf[0] = IXGBE_VF_GET_RETA; - - err = hw->mbx.ops.write_posted(hw, msgbuf, 1); - - if (err) - return err; - - err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); - - if (err) - return err; - - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - /* If the operation has been refused by a PF return -EPERM */ - if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) - return -EPERM; - - /* If we didn't get an ACK there must have been - * some sort of mailbox error so we should treat it - * as such. - */ - if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK)) - return IXGBE_ERR_MBX; - - /* ixgbevf doesn't support more than 2 queues at the moment */ - if (num_rx_queues > 1) - mask = 0x1; - - for (i = 0; i < dwords; i++) - for (j = 0; j < 16; j++) - reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; - - return 0; -} - -/** - * ixgbevf_get_rss_key_locked - get the RSS Random Key - * @hw: pointer to the HW structure - * @rss_key: buffer to fill with RSS Hash Key contents. - * - * The "rss_key" buffer should be big enough to contain 10 registers. - * - * Returns: 0 on success. - * if API doesn't support this operation - (-EOPNOTSUPP). - */ -int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) -{ - int err; - u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; - - /* We currently support the RSS Random Key retrieval for 82599 and x540 - * devices only. - * - * Thus return an error if API doesn't support RSS Random Key retrieval - * or if the operation is not supported for this device type. - */ - switch (hw->api_version) { - case ixgbe_mbox_api_14: - case ixgbe_mbox_api_13: - case ixgbe_mbox_api_12: - if (hw->mac.type < ixgbe_mac_X550_vf) - break; - /* fall through */ - default: - return -EOPNOTSUPP; - } - - msgbuf[0] = IXGBE_VF_GET_RSS_KEY; - err = hw->mbx.ops.write_posted(hw, msgbuf, 1); - - if (err) - return err; - - err = hw->mbx.ops.read_posted(hw, msgbuf, 11); - - if (err) - return err; - - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - /* If the operation has been refused by a PF return -EPERM */ - if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK)) - return -EPERM; - - /* If we didn't get an ACK there must have been - * some sort of mailbox error so we should treat it - * as such. - */ - if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK)) - return IXGBE_ERR_MBX; - - memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); - - return 0; -} - -/** - * ixgbevf_set_rar_vf - set device MAC address - * @hw: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @vmdq: Unused in this implementation - **/ -static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, - u32 vmdq) -{ - u32 msgbuf[3]; - u8 *msg_addr = (u8 *)(&msgbuf[1]); - s32 ret_val; - - memset(msgbuf, 0, sizeof(msgbuf)); - msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; - ether_addr_copy(msg_addr, addr); - - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - ARRAY_SIZE(msgbuf)); - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - /* if nacked the address was rejected, use "perm_addr" */ - if (!ret_val && - (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) { - ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); - return IXGBE_ERR_MBX; - } - - return ret_val; -} - -/** - * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant - * @hw: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @vmdq: Unused in this implementation - * - * We don't really allow setting the device MAC address. However, - * if the address being set is the permanent MAC address we will - * permit that. - **/ -static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, - u32 vmdq) -{ - if (ether_addr_equal(addr, hw->mac.perm_addr)) - return 0; - - return -EOPNOTSUPP; -} - -/** - * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses - * @hw: pointer to the HW structure - * @netdev: pointer to net device structure - * - * Updates the Multicast Table Array. - **/ -static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, - struct net_device *netdev) -{ - struct netdev_hw_addr *ha; - u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; - u16 *vector_list = (u16 *)&msgbuf[1]; - u32 cnt, i; - - /* Each entry in the list uses 1 16 bit word. We have 30 - * 16 bit words available in our HW msg buffer (minus 1 for the - * msg type). That's 30 hash values if we pack 'em right. If - * there are more than 30 MC addresses to add then punt the - * extras for now and then add code to handle more than 30 later. - * It would be unusual for a server to request that many multi-cast - * addresses except for in large enterprise network environments. - */ - - cnt = netdev_mc_count(netdev); - if (cnt > 30) - cnt = 30; - msgbuf[0] = IXGBE_VF_SET_MULTICAST; - msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; - - i = 0; - netdev_for_each_mc_addr(ha, netdev) { - if (i == cnt) - break; - if (is_link_local_ether_addr(ha->addr)) - continue; - - vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); - } - - return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - IXGBE_VFMAILBOX_SIZE); -} - -/** - * Hyper-V variant - just a stub. - * @hw: unused - * @netdev: unused - */ -static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, - struct net_device *netdev) -{ - return -EOPNOTSUPP; -} - -/** - * ixgbevf_update_xcast_mode - Update Multicast mode - * @hw: pointer to the HW structure - * @xcast_mode: new multicast mode - * - * Updates the Multicast Mode of VF. - **/ -static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) -{ - u32 msgbuf[2]; - s32 err; - - switch (hw->api_version) { - case ixgbe_mbox_api_12: - /* promisc introduced in 1.3 version */ - if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) - return -EOPNOTSUPP; - /* Fall threw */ - case ixgbe_mbox_api_14: - case ixgbe_mbox_api_13: - break; - default: - return -EOPNOTSUPP; - } - - msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; - msgbuf[1] = xcast_mode; - - err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - ARRAY_SIZE(msgbuf)); - if (err) - return err; - - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) - return -EPERM; - - return 0; -} - -/** - * Hyper-V variant - just a stub. - * @hw: unused - * @xcast_mode: unused - */ -static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) -{ - return -EOPNOTSUPP; -} - -/** - * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address - * @hw: pointer to the HW structure - * @vlan: 12 bit VLAN ID - * @vind: unused by VF drivers - * @vlan_on: if true then set bit, else clear bit - **/ -static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) -{ - u32 msgbuf[2]; - s32 err; - - msgbuf[0] = IXGBE_VF_SET_VLAN; - msgbuf[1] = vlan; - /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ - msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - - err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - ARRAY_SIZE(msgbuf)); - if (err) - goto mbx_err; - - /* remove extra bits from the message */ - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT); - - if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK)) - err = IXGBE_ERR_INVALID_ARGUMENT; - -mbx_err: - return err; -} - -/** - * Hyper-V variant - just a stub. - * @hw: unused - * @vlan: unused - * @vind: unused - * @vlan_on: unused - */ -static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) -{ - return -EOPNOTSUPP; -} - -/** - * ixgbevf_setup_mac_link_vf - Setup MAC link settings - * @hw: pointer to hardware structure - * @speed: Unused in this implementation - * @autoneg: Unused in this implementation - * @autoneg_wait_to_complete: Unused in this implementation - * - * Do nothing and return success. VF drivers are not allowed to change - * global settings. Maintained for driver compatibility. - **/ -static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) -{ - return 0; -} - -/** - * ixgbevf_check_mac_link_vf - Get link/speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true is link is up, false otherwise - * @autoneg_wait_to_complete: unused - * - * Reads the links register to determine if link is up and the current speed - **/ -static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, - bool autoneg_wait_to_complete) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - struct ixgbe_mac_info *mac = &hw->mac; - s32 ret_val = 0; - u32 links_reg; - u32 in_msg = 0; - - /* If we were hit with a reset drop the link */ - if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) - mac->get_link_status = true; - - if (!mac->get_link_status) - goto out; - - /* if link status is down no point in checking to see if pf is up */ - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - - /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs - * before the link status is correct - */ - if (mac->type == ixgbe_mac_82599_vf) { - int i; - - for (i = 0; i < 5; i++) { - udelay(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - } - } - - switch (links_reg & IXGBE_LINKS_SPEED_82599) { - case IXGBE_LINKS_SPEED_10G_82599: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - case IXGBE_LINKS_SPEED_1G_82599: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case IXGBE_LINKS_SPEED_100_82599: - *speed = IXGBE_LINK_SPEED_100_FULL; - break; - } - - /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error - */ - if (mbx->ops.read(hw, &in_msg, 1)) - goto out; - - if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { - /* msg is not CTS and is NACK we must have lost CTS status */ - if (in_msg & IXGBE_VT_MSGTYPE_NACK) - ret_val = -1; - goto out; - } - - /* the pf is talking, if we timed out in the past we reinit */ - if (!mbx->timeout) { - ret_val = -1; - goto out; - } - - /* if we passed all the tests above then the link is up and we no - * longer need to check for link - */ - mac->get_link_status = false; - -out: - *link_up = !mac->get_link_status; - return ret_val; -} - -/** - * Hyper-V variant; there is no mailbox communication. - * @hw: pointer to private hardware struct - * @speed: pointer to link speed - * @link_up: true is link is up, false otherwise - * @autoneg_wait_to_complete: unused - */ -static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, - bool autoneg_wait_to_complete) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - struct ixgbe_mac_info *mac = &hw->mac; - u32 links_reg; - - /* If we were hit with a reset drop the link */ - if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) - mac->get_link_status = true; - - if (!mac->get_link_status) - goto out; - - /* if link status is down no point in checking to see if pf is up */ - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - - /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs - * before the link status is correct - */ - if (mac->type == ixgbe_mac_82599_vf) { - int i; - - for (i = 0; i < 5; i++) { - udelay(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - } - } - - switch (links_reg & IXGBE_LINKS_SPEED_82599) { - case IXGBE_LINKS_SPEED_10G_82599: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - case IXGBE_LINKS_SPEED_1G_82599: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case IXGBE_LINKS_SPEED_100_82599: - *speed = IXGBE_LINK_SPEED_100_FULL; - break; - } - - /* if we passed all the tests above then the link is up and we no - * longer need to check for link - */ - mac->get_link_status = false; - -out: - *link_up = !mac->get_link_status; - return 0; -} - -/** - * ixgbevf_set_rlpml_vf - Set the maximum receive packet length - * @hw: pointer to the HW structure - * @max_size: value to assign to max frame size - **/ -static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) -{ - u32 msgbuf[2]; - s32 ret_val; - - msgbuf[0] = IXGBE_VF_SET_LPE; - msgbuf[1] = max_size; - - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - ARRAY_SIZE(msgbuf)); - if (ret_val) - return ret_val; - if ((msgbuf[0] & IXGBE_VF_SET_LPE) && - (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) - return IXGBE_ERR_MBX; - - return 0; -} - -/** - * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length - * @hw: pointer to the HW structure - * @max_size: value to assign to max frame size - * Hyper-V variant. - **/ -static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) -{ - u32 reg; - - /* If we are on Hyper-V, we implement this functionality - * differently. - */ - reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); - /* CRC == 4 */ - reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); - IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); - - return 0; -} - -/** - * ixgbevf_negotiate_api_version_vf - Negotiate supported API version - * @hw: pointer to the HW structure - * @api: integer containing requested API version - **/ -static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) -{ - int err; - u32 msg[3]; - - /* Negotiate the mailbox API version */ - msg[0] = IXGBE_VF_API_NEGOTIATE; - msg[1] = api; - msg[2] = 0; - - err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); - if (!err) { - msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - /* Store value and return 0 on success */ - if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { - hw->api_version = api; - return 0; - } - - err = IXGBE_ERR_INVALID_ARGUMENT; - } - - return err; -} - -/** - * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version - * @hw: pointer to the HW structure - * @api: integer containing requested API version - * Hyper-V version - only ixgbe_mbox_api_10 supported. - **/ -static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) -{ - /* Hyper-V only supports api version ixgbe_mbox_api_10 */ - if (api != ixgbe_mbox_api_10) - return IXGBE_ERR_INVALID_ARGUMENT; - - return 0; -} - -int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, - unsigned int *default_tc) -{ - int err; - u32 msg[5]; - - /* do nothing if API doesn't support ixgbevf_get_queues */ - switch (hw->api_version) { - case ixgbe_mbox_api_11: - case ixgbe_mbox_api_12: - case ixgbe_mbox_api_13: - case ixgbe_mbox_api_14: - break; - default: - return 0; - } - - /* Fetch queue configuration from the PF */ - msg[0] = IXGBE_VF_GET_QUEUE; - msg[1] = msg[2] = msg[3] = msg[4] = 0; - - err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); - if (!err) { - msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - /* if we we didn't get an ACK there must have been - * some sort of mailbox error so we should treat it - * as such - */ - if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK)) - return IXGBE_ERR_MBX; - - /* record and validate values from message */ - hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; - if (hw->mac.max_tx_queues == 0 || - hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) - hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; - - hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; - if (hw->mac.max_rx_queues == 0 || - hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) - hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; - - *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; - /* in case of unknown state assume we cannot tag frames */ - if (*num_tcs > hw->mac.max_rx_queues) - *num_tcs = 1; - - *default_tc = msg[IXGBE_VF_DEF_QUEUE]; - /* default to queue 0 on out-of-bounds queue number */ - if (*default_tc >= hw->mac.max_tx_queues) - *default_tc = 0; - } - - return err; -} - -static const struct ixgbe_mac_operations ixgbevf_mac_ops = { - .init_hw = ixgbevf_init_hw_vf, - .reset_hw = ixgbevf_reset_hw_vf, - .start_hw = ixgbevf_start_hw_vf, - .get_mac_addr = ixgbevf_get_mac_addr_vf, - .stop_adapter = ixgbevf_stop_hw_vf, - .setup_link = ixgbevf_setup_mac_link_vf, - .check_link = ixgbevf_check_mac_link_vf, - .negotiate_api_version = ixgbevf_negotiate_api_version_vf, - .set_rar = ixgbevf_set_rar_vf, - .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, - .update_xcast_mode = ixgbevf_update_xcast_mode, - .set_uc_addr = ixgbevf_set_uc_addr_vf, - .set_vfta = ixgbevf_set_vfta_vf, - .set_rlpml = ixgbevf_set_rlpml_vf, -}; - -static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { - .init_hw = ixgbevf_init_hw_vf, - .reset_hw = ixgbevf_hv_reset_hw_vf, - .start_hw = ixgbevf_start_hw_vf, - .get_mac_addr = ixgbevf_get_mac_addr_vf, - .stop_adapter = ixgbevf_stop_hw_vf, - .setup_link = ixgbevf_setup_mac_link_vf, - .check_link = ixgbevf_hv_check_mac_link_vf, - .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf, - .set_rar = ixgbevf_hv_set_rar_vf, - .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, - .update_xcast_mode = ixgbevf_hv_update_xcast_mode, - .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, - .set_vfta = ixgbevf_hv_set_vfta_vf, - .set_rlpml = ixgbevf_hv_set_rlpml_vf, -}; - -const struct ixgbevf_info ixgbevf_82599_vf_info = { - .mac = ixgbe_mac_82599_vf, - .mac_ops = &ixgbevf_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_82599_vf_hv_info = { - .mac = ixgbe_mac_82599_vf, - .mac_ops = &ixgbevf_hv_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_X540_vf_info = { - .mac = ixgbe_mac_X540_vf, - .mac_ops = &ixgbevf_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_X540_vf_hv_info = { - .mac = ixgbe_mac_X540_vf, - .mac_ops = &ixgbevf_hv_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_X550_vf_info = { - .mac = ixgbe_mac_X550_vf, - .mac_ops = &ixgbevf_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_X550_vf_hv_info = { - .mac = ixgbe_mac_X550_vf, - .mac_ops = &ixgbevf_hv_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { - .mac = ixgbe_mac_X550EM_x_vf, - .mac_ops = &ixgbevf_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = { - .mac = ixgbe_mac_X550EM_x_vf, - .mac_ops = &ixgbevf_hv_mac_ops, -}; - -const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { - .mac = ixgbe_mac_x550em_a_vf, - .mac_ops = &ixgbevf_mac_ops, -}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h deleted file mode 100644 index d1e9e306653b..000000000000 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ /dev/null @@ -1,194 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ - -#ifndef __IXGBE_VF_H__ -#define __IXGBE_VF_H__ - -#include -#include -#include -#include -#include - -#include "defines.h" -#include "regs.h" -#include "mbx.h" - -struct ixgbe_hw; - -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); -struct ixgbe_mac_operations { - s32 (*init_hw)(struct ixgbe_hw *); - s32 (*reset_hw)(struct ixgbe_hw *); - s32 (*start_hw)(struct ixgbe_hw *); - s32 (*clear_hw_cntrs)(struct ixgbe_hw *); - enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); - s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*stop_adapter)(struct ixgbe_hw *); - s32 (*get_bus_info)(struct ixgbe_hw *); - s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); - - /* Link */ - s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); - s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, - bool *); - - /* RAR, Multicast, VLAN */ - s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); - s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); - s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*update_xcast_mode)(struct ixgbe_hw *, int); - s32 (*enable_mc)(struct ixgbe_hw *); - s32 (*disable_mc)(struct ixgbe_hw *); - s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); - s32 (*set_rlpml)(struct ixgbe_hw *, u16); -}; - -enum ixgbe_mac_type { - ixgbe_mac_unknown = 0, - ixgbe_mac_82599_vf, - ixgbe_mac_X540_vf, - ixgbe_mac_X550_vf, - ixgbe_mac_X550EM_x_vf, - ixgbe_mac_x550em_a_vf, - ixgbe_num_macs -}; - -struct ixgbe_mac_info { - struct ixgbe_mac_operations ops; - u8 addr[6]; - u8 perm_addr[6]; - - enum ixgbe_mac_type type; - - s32 mc_filter_type; - - bool get_link_status; - u32 max_tx_queues; - u32 max_rx_queues; - u32 max_msix_vectors; -}; - -struct ixgbe_mbx_operations { - s32 (*init_params)(struct ixgbe_hw *hw); - s32 (*read)(struct ixgbe_hw *, u32 *, u16); - s32 (*write)(struct ixgbe_hw *, u32 *, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16); - s32 (*check_for_msg)(struct ixgbe_hw *); - s32 (*check_for_ack)(struct ixgbe_hw *); - s32 (*check_for_rst)(struct ixgbe_hw *); -}; - -struct ixgbe_mbx_stats { - u32 msgs_tx; - u32 msgs_rx; - - u32 acks; - u32 reqs; - u32 rsts; -}; - -struct ixgbe_mbx_info { - struct ixgbe_mbx_operations ops; - struct ixgbe_mbx_stats stats; - u32 timeout; - u32 udelay; - u32 v2p_mailbox; - u16 size; -}; - -struct ixgbe_hw { - void *back; - - u8 __iomem *hw_addr; - - struct ixgbe_mac_info mac; - struct ixgbe_mbx_info mbx; - - u16 device_id; - u16 subsystem_vendor_id; - u16 subsystem_device_id; - u16 vendor_id; - - u8 revision_id; - bool adapter_stopped; - - int api_version; -}; - -struct ixgbevf_hw_stats { - u64 base_vfgprc; - u64 base_vfgptc; - u64 base_vfgorc; - u64 base_vfgotc; - u64 base_vfmprc; - - u64 last_vfgprc; - u64 last_vfgptc; - u64 last_vfgorc; - u64 last_vfgotc; - u64 last_vfmprc; - - u64 vfgprc; - u64 vfgptc; - u64 vfgorc; - u64 vfgotc; - u64 vfmprc; - - u64 saved_reset_vfgprc; - u64 saved_reset_vfgptc; - u64 saved_reset_vfgorc; - u64 saved_reset_vfgotc; - u64 saved_reset_vfmprc; -}; - -struct ixgbevf_info { - enum ixgbe_mac_type mac; - const struct ixgbe_mac_operations *mac_ops; -}; - -#define IXGBE_FAILED_READ_REG 0xffffffffU - -#define IXGBE_REMOVED(a) unlikely(!(a)) - -static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) -{ - u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); - - if (IXGBE_REMOVED(reg_addr)) - return; - writel(value, reg_addr + reg); -} - -#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) - -u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); -#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) - -static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset, u32 value) -{ - ixgbe_write_reg(hw, reg + (offset << 2), value); -} - -#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) - -static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset) -{ - return ixgbevf_read_reg(hw, reg + (offset << 2)); -} - -#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) - -int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, - unsigned int *default_tc); -int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); -int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key); -#endif /* __IXGBE_VF_H__ */ diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e49820675c8c..a10ae28ebc8a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -388,6 +388,8 @@ struct mvneta_pcpu_stats { struct u64_stats_sync syncp; u64 rx_packets; u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; u64 tx_packets; u64 tx_bytes; }; @@ -706,6 +708,8 @@ mvneta_get_stats64(struct net_device *dev, struct mvneta_pcpu_stats *cpu_stats; u64 rx_packets; u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; u64 tx_packets; u64 tx_bytes; @@ -714,19 +718,20 @@ mvneta_get_stats64(struct net_device *dev, start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); rx_packets = cpu_stats->rx_packets; rx_bytes = cpu_stats->rx_bytes; + rx_dropped = cpu_stats->rx_dropped; + rx_errors = cpu_stats->rx_errors; tx_packets = cpu_stats->tx_packets; tx_bytes = cpu_stats->tx_bytes; } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; + stats->rx_dropped += rx_dropped; + stats->rx_errors += rx_errors; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; } - stats->rx_errors = dev->stats.rx_errors; - stats->rx_dropped = dev->stats.rx_dropped; - stats->tx_dropped = dev->stats.tx_dropped; } @@ -1703,8 +1708,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, static void mvneta_rx_error(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); u32 status = rx_desc->status; + /* update per-cpu counter */ + u64_stats_update_begin(&stats->syncp); + stats->rx_errors++; + u64_stats_update_end(&stats->syncp); + switch (status & MVNETA_RXD_ERR_CODE_MASK) { case MVNETA_RXD_ERR_CRC: netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", @@ -1965,7 +1976,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Check errors only for FIRST descriptor */ if (rx_status & MVNETA_RXD_ERR_SUMMARY) { mvneta_rx_error(pp, rx_desc); - dev->stats.rx_errors++; /* leave the descriptor untouched */ continue; } @@ -1976,11 +1986,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi, skb_size = max(rx_copybreak, rx_header_size); rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size); if (unlikely(!rxq->skb)) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id); - dev->stats.rx_dropped++; + rxq->skb_alloc_err++; + + u64_stats_update_begin(&stats->syncp); + stats->rx_dropped++; + u64_stats_update_end(&stats->syncp); continue; } copy_size = min(skb_size, rx_bytes); @@ -2137,7 +2153,6 @@ err_drop_frame_ret_pool: mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, rx_desc->buf_phys_addr); err_drop_frame: - dev->stats.rx_errors++; mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ continue; @@ -2789,11 +2804,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget) /* For the case where the last mvneta_poll did not process all * RX packets */ - rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); - cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : port->cause_rx_tx; + rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); if (rx_queue) { rx_queue = rx_queue - 1; if (pp->bm_priv) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 111b3b8239e1..ef44c6979a31 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3674,7 +3674,7 @@ static int mvpp2_open(struct net_device *dev) valid = true; } - if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { + if (priv->hw_version == MVPP22 && port->link_irq) { err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, dev->name, port); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 65482f004e50..10327d728f0d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mlx4.h" @@ -48,6 +49,7 @@ struct mlx4_device_context { static LIST_HEAD(intf_list); static LIST_HEAD(dev_list); static DEFINE_MUTEX(intf_mutex); +static struct work_struct load_mlx4_en_work; static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) { @@ -87,6 +89,26 @@ static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *pr } } +static void load_mlx4(struct work_struct *work) +{ + request_module("mlx4_en"); +} + +int mlx4_load_mlx4_en(void) +{ + int load_mlx4_en = 0; + mutex_lock(&intf_mutex); + if (list_empty(&intf_list)) + load_mlx4_en = 1; + mutex_unlock(&intf_mutex); + if (load_mlx4_en) + { + INIT_WORK(&load_mlx4_en_work, load_mlx4); + queue_work(mlx4_wq, &load_mlx4_en_work); + } + return 0; +} + int mlx4_register_interface(struct mlx4_interface *intf) { struct mlx4_priv *priv; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d44ac666e730..58546231416b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1141,6 +1141,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev, } } mlx4_set_port_mask(dev); + + mlx4_load_mlx4_en(); err = mlx4_register_device(dev); if (err) { mlx4_err(dev, "Failed to register device\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index d787bc0a4155..e09bc3858d57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { - if (!MLX5_CAP_GEN(mdev, tls)) + if (!MLX5_CAP_GEN(mdev, tls_tx)) return false; if (!MLX5_CAP_GEN(mdev, log_max_dek)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f1a7bc46f1c0..55ceabf077b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -371,6 +371,7 @@ enum { struct mlx5e_sq_wqe_info { u8 opcode; + u8 num_wqebbs; /* Auxiliary data for different opcodes. */ union { @@ -760,7 +761,7 @@ enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, MLX5E_STATE_XDP_TX_ENABLED, - MLX5E_STATE_XDP_OPEN, + MLX5E_STATE_XDP_ACTIVE, }; struct mlx5e_rqt { @@ -816,7 +817,7 @@ struct mlx5e_xsk { struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; - int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; + int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; #endif @@ -1058,6 +1059,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); void mlx5e_free_rx_descs(struct mlx5e_rq *rq); +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 68d593074f6c..d48292ccda29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -122,6 +122,22 @@ enum { #endif }; +#define MLX5E_TTC_NUM_GROUPS 3 +#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) +#define MLX5E_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ + MLX5E_TTC_GROUP2_SIZE +\ + MLX5E_TTC_GROUP3_SIZE) + +#define MLX5E_INNER_TTC_NUM_GROUPS 3 +#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) +#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ + MLX5E_INNER_TTC_GROUP2_SIZE +\ + MLX5E_INNER_TTC_GROUP3_SIZE) + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_table { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 1d6b58860da6..20b907dc1e29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv, struct devlink_health_reporter *reporter, char *err_str, struct mlx5e_err_ctx *err_ctx) { - if (!reporter) { - netdev_err(priv->netdev, err_str); - return err_ctx->recover(&err_ctx->ctx); - } + netdev_err(priv->netdev, err_str); + + if (!reporter) + return err_ctx->recover(err_ctx->ctx); + return devlink_health_report(reporter, err_str, err_ctx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index d3693fa547ac..e54f70d9af22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -10,8 +10,7 @@ static inline bool cqe_syndrome_needs_recover(u8 syndrome) { - return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR || - syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || + return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index f777994f3005..fce6eccdcf8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -73,6 +73,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000, [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, + [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, [MLX5E_400GAUI_8] = 400000, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 633b117eb13e..99c7cdd0404a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { + pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", + i, port_buffer->buffer[i].size); return -ENOMEM; + } port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; port_buffer->buffer[i].xon = @@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu, return 0; } +static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en) +{ + u32 g_rx_pause, g_tx_pause; + int err; + + err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause); + if (err) + return err; + + /* If global pause enabled, set all active buffers to lossless. + * Otherwise, check PFC setting. + */ + if (g_rx_pause || g_tx_pause) + *pfc_en = 0xff; + else + err = mlx5_query_port_pfc(mdev, pfc_en, NULL); + + return err; +} + #define MINIMUM_MAX_MTU 9216 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, @@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { update_prio2buffer = true; - err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL); + err = fill_pfc_en(priv->mdev, &curr_pfc_en); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index b860569d4247..9fa4b98001d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) goto out; mlx5e_reset_icosq_cc_pc(icosq); - mlx5e_free_rx_descs(rq); + mlx5e_free_rx_in_progress_descs(rq); clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); mlx5e_activate_icosq(icosq); mlx5e_activate_rq(rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 745ab6cd7c30..362f01bc8372 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -144,10 +144,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) int ret; - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, - fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, + NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); if (!(*out_ttl)) *out_ttl = ip6_dst_hoplimit(dst); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 7c8796d9743f..f07b1399744e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -179,6 +179,16 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) } } +static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) +{ + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + mlx5_wq_ll_reset(&rq->mpwqe.wq); + rq->mpwqe.actual_wq_head = 0; + } else { + mlx5_wq_cyc_reset(&rq->wqe.wq); + } +} + /* SW parser related functions */ struct mlx5e_swp_spec { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 36ac1e3816b9..d7587f40ecae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -75,12 +75,18 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); + + if (priv->channels.params.xdp_prog) + set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); } static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) { + if (priv->channels.params.xdp_prog) + clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); - /* let other device's napi(s) see our new state */ + /* Let other device's napi(s) and XSK wakeups see our new state. */ synchronize_rcu(); } @@ -89,19 +95,9 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); } -static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv) +static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv) { - set_bit(MLX5E_STATE_XDP_OPEN, &priv->state); -} - -static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv) -{ - clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state); -} - -static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv) -{ - return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state); + return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); } static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 631af8dee517..c28cbae42331 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -144,6 +144,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) { clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); napi_synchronize(&c->napi); + synchronize_rcu(); /* Sync with the XSK wakeup. */ mlx5e_close_rq(&c->xskrq); mlx5e_close_cq(&c->xskrq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 87827477d38c..fe2d596cb361 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -14,7 +14,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) struct mlx5e_channel *c; u16 ix; - if (unlikely(!mlx5e_xdp_is_open(priv))) + if (unlikely(!mlx5e_xdp_is_active(priv))) return -ENETDOWN; if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix))) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index a3efa29a4629..63116be6b1d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -38,8 +38,8 @@ enum { enum { MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2, }; struct mlx5e_ktls_offload_context_tx { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 778dab1af8fc..52a56622034a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct tx_sync_info { u64 rcd_sn; - s32 sync_len; + u32 sync_len; int nr_frags; skb_frag_t frags[MAX_SKB_FRAGS]; }; @@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval { static enum mlx5e_ktls_sync_retval tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, - u32 tcp_seq, struct tx_sync_info *info) + u32 tcp_seq, int datalen, struct tx_sync_info *info) { struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; struct tls_record_info *record; int remaining, i = 0; unsigned long flags; + bool ends_before; spin_lock_irqsave(&tx_ctx->lock, flags); record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); @@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, goto out; } - if (unlikely(tcp_seq < tls_record_start_seq(record))) { - ret = tls_record_is_start_marker(record) ? - MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + /* There are the following cases: + * 1. packet ends before start marker: bypass offload. + * 2. packet starts before start marker and ends after it: drop, + * not supported, breaks contract with kernel. + * 3. packet ends before tls record info starts: drop, + * this packet was already acknowledged and its record info + * was released. + */ + ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record)); + + if (unlikely(tls_record_is_start_marker(record))) { + ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + goto out; + } else if (ends_before) { + ret = MLX5E_KTLS_SYNC_FAIL; goto out; } @@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, u8 num_wqebbs; int i = 0; - ret = tx_sync_info_get(priv_tx, seq, &info); + ret = tx_sync_info_get(priv_tx, seq, datalen, &info); if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { stats->tls_skip_no_sync_data++; @@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, goto err_out; } - if (unlikely(info.sync_len < 0)) { - if (likely(datalen <= -info.sync_len)) - return MLX5E_KTLS_SYNC_DONE; - - stats->tls_drop_bypass_req++; - goto err_out; - } - stats->tls_ooo++; tx_post_resync_params(sq, priv_tx, info.rcd_sn); @@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, if (unlikely(contig_wqebbs_room < num_wqebbs)) mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); - tx_post_resync_params(sq, priv_tx, info.rcd_sn); - for (; i < info.nr_frags; i++) { unsigned int orig_fsz, frag_offset = 0, n = 0; skb_frag_t *f = &info.frags[i]; @@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, enum mlx5e_ktls_sync_retval ret = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); - if (likely(ret == MLX5E_KTLS_SYNC_DONE)) + switch (ret) { + case MLX5E_KTLS_SYNC_DONE: *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); - else if (ret == MLX5E_KTLS_SYNC_FAIL) + break; + case MLX5E_KTLS_SYNC_SKIP_NO_DATA: + if (likely(!skb->decrypted)) + goto out; + WARN_ON_ONCE(1); + /* fall-through */ + default: /* MLX5E_KTLS_SYNC_FAIL */ goto err_out; - else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */ - goto out; + } } priv_tx->expected_seq = seq + datalen; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 71384ad1a443..ef1ed15a53b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, int datalen; u32 skb_seq; - if (MLX5_CAP_GEN(sq->channel->mdev, tls)) { + if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) { skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 95601269fa2e..c6776f308d5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1027,18 +1027,11 @@ static bool ext_link_mode_requested(const unsigned long *adver) return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static bool ext_speed_requested(u32 speed) -{ -#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000 - return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED); -} - -static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed) +static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported) { bool ext_link_mode = ext_link_mode_requested(adver); - bool ext_speed = ext_speed_requested(speed); - return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed; + return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported; } int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, @@ -1065,8 +1058,8 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, autoneg = link_ksettings->base.autoneg; speed = link_ksettings->base.speed; - ext = ext_requested(autoneg, adver, speed), ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = ext_requested(autoneg, adver, ext_supported); if (!ext_supported && ext) return -EOPNOTSUPP; @@ -1643,7 +1636,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, break; case MLX5_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; default: netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 15b7f0f1427c..73d3dc07331f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -904,22 +904,6 @@ del_rules: return err; } -#define MLX5E_TTC_NUM_GROUPS 3 -#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) -#define MLX5E_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ - MLX5E_TTC_GROUP2_SIZE +\ - MLX5E_TTC_GROUP3_SIZE) - -#define MLX5E_INNER_TTC_NUM_GROUPS 3 -#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ - MLX5E_INNER_TTC_GROUP2_SIZE +\ - MLX5E_INNER_TTC_GROUP3_SIZE) - static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, bool use_ipv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2a56e66f58d8..ee7c753e9ea0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -723,6 +723,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) if (!in) return -ENOMEM; + if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY) + mlx5e_rqwq_reset(rq); + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); MLX5_SET(modify_rq_in, in, rq_state, curr_state); @@ -821,6 +824,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) return -ETIMEDOUT; } +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq; + u16 head; + int i; + + if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + return; + + wq = &rq->mpwqe.wq; + head = wq->head; + + /* Outstanding UMR WQEs (in progress) start at wq->head */ + for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { + rq->dealloc_wqe(rq, head); + head = mlx5_wq_ll_get_wqe_next_ix(wq, head); + } + + rq->mpwqe.actual_wq_head = wq->head; + rq->mpwqe.umr_in_progress = 0; + rq->mpwqe.umr_completed = 0; +} + void mlx5e_free_rx_descs(struct mlx5e_rq *rq) { __be16 wqe_ix_be; @@ -828,14 +854,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - u16 head = wq->head; - int i; - /* Outstanding UMR WQEs (in progress) start at wq->head */ - for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { - rq->dealloc_wqe(rq, head); - head = mlx5_wq_ll_get_wqe_next_ix(wq, head); - } + mlx5e_free_rx_in_progress_descs(rq); while (!mlx5_wq_ll_is_empty(wq)) { struct mlx5e_rx_wqe_ll *wqe; @@ -1693,11 +1713,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - struct mlx5e_priv *priv = c->priv; int err, tc; for (tc = 0; tc < params->num_tc; tc++) { - int txq_ix = c->ix + tc * priv->max_nch; + int txq_ix = c->ix + tc * params->num_channels; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, params, &cparam->sq, &c->sq[tc], tc); @@ -2878,26 +2897,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } -static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) +static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) { - int i, tc; + int i, ch; - for (i = 0; i < priv->max_nch; i++) - for (tc = 0; tc < priv->profile->max_tc; tc++) - priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch; -} + ch = priv->channels.num; -static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) -{ - struct mlx5e_channel *c; - struct mlx5e_txqsq *sq; - int i, tc; + for (i = 0; i < ch; i++) { + int tc; + + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + struct mlx5e_channel *c = priv->channels.c[i]; + struct mlx5e_txqsq *sq = &c->sq[tc]; - for (i = 0; i < priv->channels.num; i++) { - c = priv->channels.c[i]; - for (tc = 0; tc < c->num_tc; tc++) { - sq = &c->sq[tc]; priv->txq2sq[sq->txq_ix] = sq; + priv->channel_tc2realtxq[i][tc] = i + tc * ch; } } } @@ -2912,7 +2926,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, num_rxqs); - mlx5e_build_tx2sq_maps(priv); + mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); mlx5e_xdp_tx_enable(priv); netif_tx_start_all_queues(priv->netdev); @@ -3008,12 +3022,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - bool is_xdp = priv->channels.params.xdp_prog; int err; set_bit(MLX5E_STATE_OPENED, &priv->state); - if (is_xdp) - mlx5e_xdp_set_open(priv); err = mlx5e_open_channels(priv, &priv->channels); if (err) @@ -3028,8 +3039,6 @@ int mlx5e_open_locked(struct net_device *netdev) return 0; err_clear_state_opened_flag: - if (is_xdp) - mlx5e_xdp_set_closed(priv); clear_bit(MLX5E_STATE_OPENED, &priv->state); return err; } @@ -3061,8 +3070,6 @@ int mlx5e_close_locked(struct net_device *netdev) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - if (priv->channels.params.xdp_prog) - mlx5e_xdp_set_closed(priv); clear_bit(MLX5E_STATE_OPENED, &priv->state); netif_carrier_off(priv->netdev); @@ -4379,16 +4386,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return 0; } -static int mlx5e_xdp_update_state(struct mlx5e_priv *priv) -{ - if (priv->channels.params.xdp_prog) - mlx5e_xdp_set_open(priv); - else - mlx5e_xdp_set_closed(priv); - - return 0; -} - static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4428,7 +4425,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) mlx5e_set_rq_type(priv->mdev, &new_channels.params); old_prog = priv->channels.params.xdp_prog; - err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto unlock; } else { @@ -5028,7 +5025,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); - mlx5e_build_tc2txq_maps(priv); mlx5e_health_create_reporters(priv); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 82cffb3a9964..1d295a7afc8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, /* fill sq frag edge with nops to avoid wqe wrapping two pages */ for (; wi < edge_wi; wi++) { wi->opcode = MLX5_OPCODE_NOP; + wi->num_wqebbs = 1; mlx5e_post_nop(wq, sq->sqn, &sq->pc); } } @@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; + sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS; sq->db.ico_wqe[pi].umr.rq = rq; sq->pc += MLX5E_UMR_WQEBBS; @@ -628,17 +630,14 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.ico_wqe[ci]; + sqcc += wi->num_wqebbs; - if (likely(wi->opcode == MLX5_OPCODE_UMR)) { - sqcc += MLX5E_UMR_WQEBBS; + if (likely(wi->opcode == MLX5_OPCODE_UMR)) wi->umr.rq->mpwqe.umr_completed++; - } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) { - sqcc++; - } else { + else if (unlikely(wi->opcode != MLX5_OPCODE_NOP)) netdev_WARN_ONCE(cq->channel->netdev, "Bad OPCODE in ICOSQ WQE info: 0x%x\n", wi->opcode); - } } while (!last_wqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 7e6ebd0505cc..a05158472ed1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -297,6 +297,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; #endif s->tx_cqes += sq_stats->cqes; + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); } } } @@ -1601,7 +1604,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, sq_stats_desc[j].format, - priv->channel_tc2txq[i][tc]); + i + tc * max_nch); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index f90a9f8e0fc6..1f9107d83848 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -586,7 +586,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; - ft_attr->max_fte = MLX5E_NUM_TT; + ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; ft_attr->prio = MLX5E_TC_PRIO; } @@ -1615,8 +1615,11 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) flow_flag_clear(flow, DUP); - mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); - kvfree(flow->peer_flow); + if (refcount_dec_and_test(&flow->peer_flow->refcnt)) { + mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); + kfree(flow->peer_flow); + } + flow->peer_flow = NULL; } @@ -3443,6 +3446,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } + if (!(attr->action & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action"); + return -EOPNOTSUPP; + } + if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { NL_SET_ERR_MSG_MOD(extack, "current firmware doesn't support split rule for port mirroring"); @@ -3942,6 +3951,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, u32 rate_mbps; int err; + vport_num = rpriv->rep->vport; + if (vport_num >= MLX5_VPORT_ECPF) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress rate limit is supported only for Eswitch ports connected to VFs"); + return -EOPNOTSUPP; + } + esw = priv->mdev->priv.eswitch; /* rate is given in bytes/sec. * First convert to bits/sec and then round to the nearest mbit/secs. @@ -3950,8 +3966,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, * 1 mbit/sec. */ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; - vport_num = rpriv->rep->vport; - err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 67dc4f0921b6..dee12f17f9c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -93,7 +93,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, if (txq_ix >= num_channels) txq_ix = priv->txq2sq[txq_ix]->ch_ix; - return priv->channel_tc2txq[txq_ix][up]; + return priv->channel_tc2realtxq[txq_ix][up]; } static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 257a7c9f7a14..800d34ed8a96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; + sq->db.ico_wqe[pi].num_wqebbs = 1; nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 60fddf8afc99..c6ed4b7f4f97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2319,25 +2319,17 @@ out: int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) { - int err = 0; - if (!esw) return -EOPNOTSUPP; if (!ESW_ALLOWED(esw)) return -EPERM; - mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { - err = -EOPNOTSUPP; - goto out; - } + if (esw->mode != MLX5_ESWITCH_LEGACY) + return -EOPNOTSUPP; *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; - -out: - mutex_unlock(&esw->state_lock); - return err; + return 0; } int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 9004a07e457a..5acfdea3a75a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -858,7 +858,7 @@ out: */ #define ESW_SIZE (16 * 1024 * 1024) const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, - 64 * 1024, 4 * 1024 }; + 64 * 1024, 128 }; static int get_sz_from_pool(struct mlx5_eswitch *esw) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index c76da309506b..72232e570af7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context) mutex_lock(&fpga_xfrm->lock); if (!--fpga_xfrm->num_rules) { mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); + kfree(fpga_xfrm->sa_ctx); fpga_xfrm->sa_ctx = NULL; } mutex_unlock(&fpga_xfrm->lock); @@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) return 0; - if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 791e14ac26f4..86e6bbb57482 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1555,16 +1555,16 @@ struct match_list_head { struct match_list first; }; -static void free_match_list(struct match_list_head *head) +static void free_match_list(struct match_list_head *head, bool ft_locked) { if (!list_empty(&head->list)) { struct match_list *iter, *match_tmp; list_del(&head->first.list); - tree_put_node(&head->first.g->node, false); + tree_put_node(&head->first.g->node, ft_locked); list_for_each_entry_safe(iter, match_tmp, &head->list, list) { - tree_put_node(&iter->g->node, false); + tree_put_node(&iter->g->node, ft_locked); list_del(&iter->list); kfree(iter); } @@ -1573,7 +1573,8 @@ static void free_match_list(struct match_list_head *head) static int build_match_list(struct match_list_head *match_head, struct mlx5_flow_table *ft, - const struct mlx5_flow_spec *spec) + const struct mlx5_flow_spec *spec, + bool ft_locked) { struct rhlist_head *tmp, *list; struct mlx5_flow_group *g; @@ -1598,7 +1599,7 @@ static int build_match_list(struct match_list_head *match_head, curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { - free_match_list(match_head); + free_match_list(match_head, ft_locked); err = -ENOMEM; goto out; } @@ -1778,7 +1779,7 @@ search_again_locked: version = atomic_read(&ft->node.version); /* Collect all fgs which has a matching match_criteria */ - err = build_match_list(&match_head, ft, spec); + err = build_match_list(&match_head, ft, spec, take_write); if (err) { if (take_write) up_write_ref_node(&ft->node, false); @@ -1792,7 +1793,7 @@ search_again_locked: rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest, dest_num, version); - free_match_list(&match_head); + free_match_list(&match_head, take_write); if (!IS_ERR(rule) || (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { if (take_write) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index a19790dee7b2..13e86f0b42f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -239,7 +239,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } - if (MLX5_CAP_GEN(dev, tls)) { + if (MLX5_CAP_GEN(dev, tls_tx)) { err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 50ab88d80033..c96a0e501007 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1197,6 +1197,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) if (err) goto err_load; + if (boot) { + err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1214,6 +1220,9 @@ out: return err; err_reg_dev: + if (boot) + mlx5_devlink_unregister(priv_to_devlink(dev)); +err_devlink_reg: mlx5_unload(dev); err_load: if (boot) @@ -1353,10 +1362,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) request_module_nowait(MLX5_IB_MOD); - err = mlx5_devlink_register(devlink, &pdev->dev); - if (err) - goto clean_load; - err = mlx5_crdump_enable(dev); if (err) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); @@ -1364,9 +1369,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_save_state(pdev); return 0; -clean_load: - mlx5_unload_one(dev, true); - err_load_one: mlx5_pci_close(dev); pci_init_err: @@ -1567,6 +1569,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 004c56c2fc0c..b2dfa2b5366f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -930,7 +930,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn, action->rewrite.data = (void *)ops; action->rewrite.num_of_actions = i; - action->rewrite.chunk->byte_size = i * sizeof(*ops); ret = mlx5dr_send_postsend_action(dmn, action); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index bd1699e62142..e1a647dde978 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, /* We need to copy the refcount since this ste * may have been traversed several times */ - refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount)); + new_ste->refcount = cur_ste->refcount; /* Link old STEs rule_mem list to the new ste */ mlx5dr_rule_update_rule_member(cur_ste, new_ste); @@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, if (!rule_mem) return -ENOMEM; + INIT_LIST_HEAD(&rule_mem->list); + INIT_LIST_HEAD(&rule_mem->use_ste_list); + rule_mem->ste = ste; list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 51803eef13dd..095ec7b1399d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2019 Mellanox Technologies. */ +#include #include "dr_types.h" #define QUEUE_SIZE 128 @@ -557,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, int ret; send_info.write.addr = (uintptr_t)action->rewrite.data; - send_info.write.length = action->rewrite.chunk->byte_size; + send_info.write.length = action->rewrite.num_of_actions * + DR_MODIFY_ACTION_SIZE; send_info.write.lkey = 0; send_info.remote_addr = action->rewrite.chunk->mr_addr; send_info.rkey = action->rewrite.chunk->rkey; @@ -729,7 +731,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, if (!in) goto err_cqwq; - vector = smp_processor_id() % mlx5_comp_vectors_count(mdev); + vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev); err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn); if (err) { kvfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 3cbf74b44d1f..841abe75652c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -340,7 +340,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) if (dst->next_htbl) dst->next_htbl->pointing_ste = dst; - refcount_set(&dst->refcount, refcount_read(&src->refcount)); + dst->refcount = src->refcount; INIT_LIST_HEAD(&dst->rule_list); list_splice_tail_init(&src->rule_list, &dst->rule_list); @@ -557,7 +557,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste) bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) { - return !refcount_read(&ste->refcount); + return !ste->refcount; } /* Init one ste as a pattern for ste data array */ @@ -681,14 +681,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, htbl->ste_arr = chunk->ste_arr; htbl->hw_ste_arr = chunk->hw_ste_arr; htbl->miss_list = chunk->miss_list; - refcount_set(&htbl->refcount, 0); + htbl->refcount = 0; for (i = 0; i < chunk->num_of_entries; i++) { struct mlx5dr_ste *ste = &htbl->ste_arr[i]; ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; ste->htbl = htbl; - refcount_set(&ste->refcount, 0); + ste->refcount = 0; INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&htbl->miss_list[i]); INIT_LIST_HEAD(&ste->rule_list); @@ -705,7 +705,7 @@ out_free_htbl: int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) { - if (refcount_read(&htbl->refcount)) + if (htbl->refcount) return -EBUSY; mlx5dr_icm_free_chunk(htbl->chunk); @@ -2257,7 +2257,9 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_cmd_caps *caps; + u8 *bit_mask = sb->bit_mask; u8 *tag = hw_ste->tag; + bool source_gvmi_set; DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); @@ -2278,7 +2280,8 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, if (!vport_cap) return -EINVAL; - if (vport_cap->vport_gvmi) + source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); + if (vport_cap->vport_gvmi && source_gvmi_set) MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); misc->source_eswitch_owner_vhca_id = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 1cb3769d4e3c..31737dfca4ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -117,7 +117,7 @@ struct mlx5dr_matcher_rx_tx; struct mlx5dr_ste { u8 *hw_ste; /* refcount: indicates the num of rules that using this ste */ - refcount_t refcount; + u32 refcount; /* attached to the miss_list head at each htbl entry */ struct list_head miss_list_node; @@ -149,7 +149,7 @@ struct mlx5dr_ste_htbl_ctrl { struct mlx5dr_ste_htbl { u8 lu_type; u16 byte_mask; - refcount_t refcount; + u32 refcount; struct mlx5dr_icm_chunk *chunk; struct mlx5dr_ste *ste_arr; u8 *hw_ste_arr; @@ -200,13 +200,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl); static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl) { - if (refcount_dec_and_test(&htbl->refcount)) + htbl->refcount--; + if (!htbl->refcount) mlx5dr_ste_htbl_free(htbl); } static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) { - refcount_inc(&htbl->refcount); + htbl->refcount++; } /* STE utils */ @@ -248,14 +249,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste, struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher) { - if (refcount_dec_and_test(&ste->refcount)) + ste->refcount--; + if (!ste->refcount) mlx5dr_ste_free(ste, matcher, nic_matcher); } /* initial as 0, increased only when ste appears in a new rule */ static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste) { - refcount_inc(&ste->refcount); + ste->refcount++; } void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 3d587d0bdbbe..1e32e2443f73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { enum mlx5_flow_destination_type type = dst->dest_attr.type; - u32 id; if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { err = -ENOSPC; goto free_actions; } - switch (type) { - case MLX5_FLOW_DESTINATION_TYPE_COUNTER: - id = dst->dest_attr.counter_id; + if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; - tmp_action = - mlx5dr_action_create_flow_counter(id); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - break; + switch (type) { case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: tmp_action = create_ft_action(dev, dst); if (!tmp_action) { @@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + list_for_each_entry(dst, &fte->node.children, node.list) { + u32 id; + + if (dst->dest_attr.type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -ENOSPC; + goto free_actions; + } + + id = dst->dest_attr.counter_id; + tmp_action = + mlx5dr_action_create_flow_counter(id); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + } + params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index dd2315ce4441..41e35b341b70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -96,6 +96,13 @@ err_db_free: return err; } +void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq) +{ + wq->wqe_ctr = 0; + wq->cur_sz = 0; + mlx5_wq_cyc_update_db_record(wq); +} + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) @@ -194,6 +201,19 @@ err_db_free: return err; } +static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq) +{ + struct mlx5_wqe_srq_next_seg *next_seg; + int i; + + for (i = 0; i < wq->fbc.sz_m1; i++) { + next_seg = mlx5_wq_ll_get_wqe(wq, i); + next_seg->next_wqe_index = cpu_to_be16(i + 1); + } + next_seg = mlx5_wq_ll_get_wqe(wq, i); + wq->tail_next = &next_seg->next_wqe_index; +} + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) @@ -201,9 +221,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; - struct mlx5_wqe_srq_next_seg *next_seg; int err; - int i; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -222,13 +240,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); - for (i = 0; i < fbc->sz_m1; i++) { - next_seg = mlx5_wq_ll_get_wqe(wq, i); - next_seg->next_wqe_index = cpu_to_be16(i + 1); - } - next_seg = mlx5_wq_ll_get_wqe(wq, i); - wq->tail_next = &next_seg->next_wqe_index; - + mlx5_wq_ll_init_list(wq); wq_ctrl->mdev = mdev; return 0; @@ -239,6 +251,15 @@ err_db_free: return err; } +void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq) +{ + wq->head = 0; + wq->wqe_ctr = 0; + wq->cur_sz = 0; + mlx5_wq_ll_init_list(wq); + mlx5_wq_ll_update_db_record(wq); +} + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) { mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 55791f71a778..5efc038440df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -80,10 +80,12 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); +void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl); +void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq); int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c index 544344ac4894..79057af4fe99 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "mlxfw_mfa2.h" #include "mlxfw_mfa2_file.h" @@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_size = be32_to_cpu(comp->size); comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len; - comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL); + comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size); if (!comp_data) return ERR_PTR(-ENOMEM); comp_data->comp.data_size = comp_size; @@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len; return &comp_data->comp; err_out: - kfree(comp_data); + vfree(comp_data); return ERR_PTR(err); } @@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp) const struct mlxfw_mfa2_comp_data *comp_data; comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp); - kfree(comp_data); + vfree(comp_data); } void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file) diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 471b0ca6d69a..55dfba990e6e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -204,8 +204,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) err_register_netdev: mlxsw_m->ports[local_port] = NULL; - free_netdev(dev); err_dev_addr_get: + free_netdev(dev); err_alloc_etherdev: mlxsw_core_port_fini(mlxsw_m->core, local_port); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 615455a21567..f3d1f9411d10 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1318,36 +1318,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, mbox->mapaddr); } -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, - const struct pci_device_id *id) +static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id, + u32 *p_sys_status) { unsigned long end; - char mrsr_pl[MLXSW_REG_MRSR_LEN]; - int err; + u32 val; - mlxsw_reg_mrsr_pack(mrsr_pl); - err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); - if (err) - return err; if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); return 0; } - /* We must wait for the HW to become responsive once again. */ + /* We must wait for the HW to become responsive. */ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); do { - u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); - + val = mlxsw_pci_read32(mlxsw_pci, FW_READY); if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) return 0; cond_resched(); } while (time_before(jiffies, end)); + + *p_sys_status = val & MLXSW_PCI_FW_READY_MASK; + return -EBUSY; } +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + char mrsr_pl[MLXSW_REG_MRSR_LEN]; + u32 sys_status; + int err; + + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); + if (err) { + dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n", + sys_status); + return err; + } + + mlxsw_reg_mrsr_pack(mrsr_pl); + err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); + if (err) + return err; + + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); + if (err) { + dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n", + sys_status); + return err; + } + + return 0; +} + static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) { int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5494cf93f34c..8e42ebdbd487 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5421,6 +5421,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1, + MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP, __MLXSW_REG_HTGT_TRAP_GROUP_MAX, MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index dcf9562bce8a..a806c6190bb1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -812,23 +812,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } - if (eth_skb_pad(skb)) { this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); return NETDEV_TX_OK; @@ -1167,6 +1161,9 @@ static void update_stats_cache(struct work_struct *work) periodic_hw_stats.update_dw.work); if (!netif_carrier_ok(mlxsw_sp_port->dev)) + /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as + * necessary when port goes down. + */ goto out; mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, @@ -4176,6 +4173,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; } +static void +mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int i; + + for (i = 0; i < TC_MAX_QUEUE; i++) + mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; +} + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@ -4197,6 +4203,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, } else { netdev_info(mlxsw_sp_port->dev, "link down\n"); netif_carrier_off(mlxsw_sp_port->dev); + mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); } } @@ -4398,8 +4405,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -4483,6 +4490,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) rate = 19 * 1024; burst_size = 12; break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: + rate = 360; + burst_size = 7; + break; default: continue; } @@ -4522,6 +4533,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: priority = 5; tc = 5; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 150b3a144b83..3d3cca596116 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -25,6 +26,7 @@ struct mlxsw_sp_acl { struct mlxsw_sp_fid *dummy_fid; struct rhashtable ruleset_ht; struct list_head rules; + struct mutex rules_lock; /* Protects rules list */ struct { struct delayed_work dw; unsigned long interval; /* ms */ @@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, goto err_ruleset_block_bind; } + mutex_lock(&mlxsw_sp->acl->rules_lock); list_add_tail(&rule->list, &mlxsw_sp->acl->rules); + mutex_unlock(&mlxsw_sp->acl->rules_lock); block->rule_count++; block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker; return 0; @@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker; ruleset->ht_key.block->rule_count--; + mutex_lock(&mlxsw_sp->acl->rules_lock); list_del(&rule->list); + mutex_unlock(&mlxsw_sp->acl->rules_lock); if (!ruleset->ht_key.chain_index && mlxsw_sp_acl_ruleset_is_singular(ruleset)) mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, @@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) struct mlxsw_sp_acl_rule *rule; int err; - /* Protect internal structures from changes */ - rtnl_lock(); + mutex_lock(&acl->rules_lock); list_for_each_entry(rule, &acl->rules, list) { err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, rule); if (err) goto err_rule_update; } - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return 0; err_rule_update: - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return err; } @@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->dummy_fid = fid; INIT_LIST_HEAD(&acl->rules); + mutex_init(&acl->rules_lock); err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam); if (err) goto err_acl_ops_init; @@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) return 0; err_acl_ops_init: + mutex_destroy(&acl->rules_lock); mlxsw_sp_fid_put(fid); err_fid_get: rhashtable_destroy(&acl->ruleset_ht); @@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam); + mutex_destroy(&acl->rules_lock); WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 49933818c6f5..2dc0978428e6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled, start_again: err = devlink_dpipe_entry_ctx_prepare(dump_ctx); if (err) - return err; + goto err_ctx_prepare; j = 0; for (; i < rif_count; i++) { struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); @@ -247,6 +247,7 @@ start_again: return 0; err_entry_append: err_entry_get: +err_ctx_prepare: rtnl_unlock(); devlink_dpipe_entry_clear(&entry); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index b607919c8ad0..498de6ef6870 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -123,9 +123,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, u8 prio = act->vlan.prio; u16 vid = act->vlan.vid; - return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, - act->id, vid, - proto, prio, extack); + err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, + act->id, vid, + proto, prio, extack); + if (err) + return err; + break; } default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 54275624718b..336e5ecc68f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table, return 0; err_erif_unresolve: - list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list, - vif_node) + list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list, + vif_node) mlxsw_sp_mr_route_evif_unresolve(mr_table, erve); err_irif_unresolve: - list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list, - vif_node) + list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list, + vif_node) mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve); mr_vif->rif = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index bdf53cf350f6..dc63583c4948 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -195,6 +195,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } +static u64 +mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->backlog[tclass_num] + + xstats->backlog[tclass_num + 8]; +} + +static u64 +mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->tail_drop[tclass_num] + + xstats->tail_drop[tclass_num + 8]; +} + static void mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, u8 prio_bitmap, u64 *tx_packets, @@ -269,7 +283,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; - red_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; stats_base->drops = red_base->prob_drop + red_base->pdrop; @@ -369,7 +383,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; marks = xstats->ecn - xstats_base->prob_mark; - pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - + xstats_base->pdrop; res->pdrop += pdrops; res->prob_drop += early_drops; @@ -402,9 +417,10 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; - drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + drops = xstats->wred_drop[tclass_num] + + mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - stats_base->drops; - backlog = xstats->backlog[tclass_num]; + backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num); _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); stats_ptr->qstats->overlimits += overlimits; @@ -575,9 +591,9 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, tx_packets = stats->tx_packets - stats_base->tx_packets; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - drops += xstats->tail_drop[i]; + drops += mlxsw_sp_xstats_tail_drop(xstats, i); drops += xstats->wred_drop[i]; - backlog += xstats->backlog[i]; + backlog += mlxsw_sp_xstats_backlog(xstats, i); } drops = drops - stats_base->drops; @@ -613,7 +629,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base->drops = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i); stats_base->drops += xstats->wred_drop[i]; } @@ -650,6 +666,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) return 0; + if (!p->child_handle) { + /* This is an invisible FIFO replacing the original Qdisc. + * Ignore it--the original Qdisc's destroy will follow. + */ + return 0; + } + /* See if the grafted qdisc is already offloaded on any tclass. If so, * unoffload it. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 39d600c8b92d..efdf8cb5114c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5637,8 +5637,13 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp_fib6_rt_should_ignore(rt)) return; + /* Multipath routes are first added to the FIB trie and only then + * notified. If we vetoed the addition, we will get a delete + * notification for a route we do not have. Therefore, do not warn if + * route was not found. + */ fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); - if (WARN_ON(!fib6_entry)) + if (!fib6_entry) return; /* If not all the nexthops are deleted, then only reduce the nexthop @@ -6980,6 +6985,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { rif = mlxsw_sp->router->rifs[i]; + if (rif && rif->ops && + rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB) + continue; if (rif && rif->dev && rif->dev != dev && !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, mlxsw_sp->mac_mask)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 1c14c051ee52..63e7a058b7c6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its * bytes as being sent. diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index a41a90c589db..45cc840d8e2e 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -157,21 +157,47 @@ static int msg_enable; */ /** - * ks_rdreg8 - read 8 bit register from device + * ks_check_endian - Check whether endianness of the bus is correct * @ks : The chip information - * @offset: The register address * - * Read a 8bit register from the chip, returning the result + * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit + * bus. To maintain optimum performance, the bus endianness should be set + * such that it matches the endianness of the CPU. */ -static u8 ks_rdreg8(struct ks_net *ks, int offset) + +static int ks_check_endian(struct ks_net *ks) { - u16 data; - u8 shift_bit = offset & 0x03; - u8 shift_data = (offset & 1) << 3; - ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit); - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); - data = ioread16(ks->hw_addr); - return (u8)(data >> shift_data); + u16 cider; + + /* + * Read CIDER register first, however read it the "wrong" way around. + * If the endian strap on the KS8851-16MLL in incorrect and the chip + * is operating in different endianness than the CPU, then the meaning + * of BE[3:0] byte-enable bits is also swapped such that: + * BE[3,2,1,0] becomes BE[1,0,3,2] + * + * Luckily for us, the byte-enable bits are the top four MSbits of + * the address register and the CIDER register is at offset 0xc0. + * Hence, by reading address 0xc0c0, which is not impacted by endian + * swapping, we assert either BE[3:2] or BE[1:0] while reading the + * CIDER register. + * + * If the bus configuration is correct, reading 0xc0c0 asserts + * BE[3:2] and this read returns 0x0000, because to read register + * with bottom two LSbits of address set to 0, BE[1:0] must be + * asserted. + * + * If the bus configuration is NOT correct, reading 0xc0c0 asserts + * BE[1:0] and this read returns non-zero 0x8872 value. + */ + iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd); + cider = ioread16(ks->hw_addr); + if (!cider) + return 0; + + netdev_err(ks->netdev, "incorrect EESK endian strap setting\n"); + + return -EINVAL; } /** @@ -189,22 +215,6 @@ static u16 ks_rdreg16(struct ks_net *ks, int offset) return ioread16(ks->hw_addr); } -/** - * ks_wrreg8 - write 8bit register value to chip - * @ks: The chip information - * @offset: The register address - * @value: The value to write - * - */ -static void ks_wrreg8(struct ks_net *ks, int offset, u8 value) -{ - u8 shift_bit = (offset & 0x03); - u16 value_write = (u16)(value << ((offset & 1) << 3)); - ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit); - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); - iowrite16(value_write, ks->hw_addr); -} - /** * ks_wrreg16 - write 16bit register value to chip * @ks: The chip information @@ -324,8 +334,7 @@ static void ks_read_config(struct ks_net *ks) u16 reg_data = 0; /* Regardless of bus width, 8 bit read should always work.*/ - reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF; - reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8; + reg_data = ks_rdreg16(ks, KS_CCR); /* addr/data bus are multiplexed */ ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED; @@ -429,7 +438,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) /* 1. set sudo DMA mode */ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); /* 2. read prepend data */ /** @@ -446,7 +455,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) ks_inblk(ks, buf, ALIGN(len, 4)); /* 4. reset sudo DMA Mode */ - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); } /** @@ -548,14 +557,17 @@ static irqreturn_t ks_irq(int irq, void *pw) { struct net_device *netdev = pw; struct ks_net *ks = netdev_priv(netdev); + unsigned long flags; u16 status; + spin_lock_irqsave(&ks->statelock, flags); /*this should be the first in IRQ handler */ ks_save_cmd_reg(ks); status = ks_rdreg16(ks, KS_ISR); if (unlikely(!status)) { ks_restore_cmd_reg(ks); + spin_unlock_irqrestore(&ks->statelock, flags); return IRQ_NONE; } @@ -581,6 +593,7 @@ static irqreturn_t ks_irq(int irq, void *pw) ks->netdev->stats.rx_over_errors++; /* this should be the last in IRQ handler*/ ks_restore_cmd_reg(ks); + spin_unlock_irqrestore(&ks->statelock, flags); return IRQ_HANDLED; } @@ -650,6 +663,7 @@ static int ks_net_stop(struct net_device *netdev) /* shutdown RX/TX QMU */ ks_disable_qmu(ks); + ks_disable_int(ks); /* set powermode to soft power down to save power */ ks_set_powermode(ks, PMECR_PM_SOFTDOWN); @@ -679,13 +693,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) ks->txh.txw[1] = cpu_to_le16(len); /* 1. set sudo-DMA mode */ - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); /* 2. write status/lenth info */ ks_outblk(ks, ks->txh.txw, 4); /* 3. write pkt data */ ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4)); /* 4. reset sudo-DMA mode */ - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */ ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE); /* 6. wait until TXQCR_METFE is auto-cleared */ @@ -706,10 +720,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) { netdev_tx_t retv = NETDEV_TX_OK; struct ks_net *ks = netdev_priv(netdev); + unsigned long flags; - disable_irq(netdev->irq); - ks_disable_int(ks); - spin_lock(&ks->statelock); + spin_lock_irqsave(&ks->statelock, flags); /* Extra space are required: * 4 byte for alignment, 4 for status/length, 4 for CRC @@ -723,9 +736,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb(skb); } else retv = NETDEV_TX_BUSY; - spin_unlock(&ks->statelock); - ks_enable_int(ks); - enable_irq(netdev->irq); + spin_unlock_irqrestore(&ks->statelock, flags); return retv; } @@ -1251,6 +1262,10 @@ static int ks8851_probe(struct platform_device *pdev) goto err_free; } + err = ks_check_endian(ks); + if (err) + goto err_free; + netdev->irq = platform_get_irq(pdev, 0); if ((int)netdev->irq < 0) { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 672ea1342add..da1fd0e08c36 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1979,14 +1979,18 @@ static struct ptp_clock_info ocelot_ptp_clock_info = { static int ocelot_init_timestamp(struct ocelot *ocelot) { + struct ptp_clock *ptp_clock; + ocelot->ptp_info = ocelot_ptp_clock_info; - ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); - if (IS_ERR(ocelot->ptp_clock)) - return PTR_ERR(ocelot->ptp_clock); + ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); + if (IS_ERR(ptp_clock)) + return PTR_ERR(ptp_clock); /* Check if PHC support is missing at the configuration level */ - if (!ocelot->ptp_clock) + if (!ptp_clock) return 0; + ocelot->ptp_clock = ptp_clock; + ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG); ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW); ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH); @@ -2213,6 +2217,8 @@ void ocelot_deinit(struct ocelot *ocelot) destroy_workqueue(ocelot->stats_queue); mutex_destroy(&ocelot->stats_lock); ocelot_ace_deinit(); + if (ocelot->ptp_clock) + ptp_clock_unregister(ocelot->ptp_clock); for (i = 0; i < ocelot->num_phys_ports; i++) { port = ocelot->ports[i]; diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index aac115136720..337156232501 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -112,6 +112,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) if (err != 4) break; + /* At this point the IFH was read correctly, so it is safe to + * presume that there is no error. The err needs to be reset + * otherwise a frame could come in CPU queue between the while + * condition and the check for error later on. And in that case + * the new frame is just removed and not processed. + */ + err = 0; + ocelot_parse_ifh(ifh, &info); dev = ocelot->ports[info.port]->dev; diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index b339125b2f09..05e760444a92 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); + spin_lock_init(&lp->lock); + for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (skb == NULL) { @@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev) return 0; } +/* Wait for the SONIC to become idle. */ +static void sonic_quiesce(struct net_device *dev, u16 mask) +{ + struct sonic_local * __maybe_unused lp = netdev_priv(dev); + int i; + u16 bits; + + for (i = 0; i < 1000; ++i) { + bits = SONIC_READ(SONIC_CMD) & mask; + if (!bits) + return; + if (irqs_disabled() || in_interrupt()) + udelay(20); + else + usleep_range(100, 200); + } + WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits); +} /* * Close the SONIC device @@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev) /* * stop the SONIC, disable interrupts */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev) * put the Sonic into software-reset mode and * disable all interrupts before releasing DMA buffers */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev) * wake the tx queue * Concurrently with all of this, the SONIC is potentially writing to * the status flags of the TDs. - * Until some mutual exclusion is added, this code will not work with SMP. However, - * MIPS Jazz machines and m68k Macs were all uni-processor machines. */ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) @@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); dma_addr_t laddr; int length; - int entry = lp->next_tx; + int entry; + unsigned long flags; netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); @@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + spin_lock_irqsave(&lp->lock, flags); + + entry = lp->next_tx; + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ @@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); - /* - * Must set tx_skb[entry] only after clearing status, and - * before clearing EOL and before stopping queue - */ wmb(); lp->tx_len[entry] = length; lp->tx_laddr[entry] = laddr; @@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; } @@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) struct net_device *dev = dev_id; struct sonic_local *lp = netdev_priv(dev); int status; + unsigned long flags; + + /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt() + * with sonic_send_packet() so that the two functions can share state. + * Secondly, it makes sonic_interrupt() re-entrant, as that is required + * by macsonic which must use two IRQs with different priority levels. + */ + spin_lock_irqsave(&lp->lock, flags); + + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + if (!status) { + spin_unlock_irqrestore(&lp->lock, flags); - if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) return IRQ_NONE; + } do { + SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */ + if (status & SONIC_INT_PKTRX) { netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); sonic_rx(dev); /* got packet(s) */ - SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ } if (status & SONIC_INT_TXDN) { @@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) int td_status; int freed_some = 0; - /* At this point, cur_tx is the index of a TD that is one of: - * unallocated/freed (status set & tx_skb[entry] clear) - * allocated and sent (status set & tx_skb[entry] set ) - * allocated and not yet sent (status clear & tx_skb[entry] set ) - * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) + /* The state of a Transmit Descriptor may be inferred + * from { tx_skb[entry], td_status } as follows. + * { clear, clear } => the TD has never been used + * { set, clear } => the TD was handed to SONIC + * { set, set } => the TD was handed back + * { clear, set } => the TD is available for re-use */ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); @@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) break; - if (td_status & 0x0001) { + if (td_status & SONIC_TCR_PTX) { lp->stats.tx_packets++; lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); } else { - lp->stats.tx_errors++; - if (td_status & 0x0642) + if (td_status & (SONIC_TCR_EXD | + SONIC_TCR_EXC | SONIC_TCR_BCM)) lp->stats.tx_aborted_errors++; - if (td_status & 0x0180) + if (td_status & + (SONIC_TCR_NCRS | SONIC_TCR_CRLS)) lp->stats.tx_carrier_errors++; - if (td_status & 0x0020) + if (td_status & SONIC_TCR_OWC) lp->stats.tx_window_errors++; - if (td_status & 0x0004) + if (td_status & SONIC_TCR_FU) lp->stats.tx_fifo_errors++; } @@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (freed_some || lp->tx_skb[entry] == NULL) netif_wake_queue(dev); /* The ring is no longer full */ lp->cur_tx = entry; - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ } /* @@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (status & SONIC_INT_RFO) { netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", __func__); - lp->stats.rx_fifo_errors++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ } if (status & SONIC_INT_RDE) { netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ } if (status & SONIC_INT_RBAE) { netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ } /* counter overruns; all counters are 16bit wide */ - if (status & SONIC_INT_FAE) { + if (status & SONIC_INT_FAE) lp->stats.rx_frame_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ - } - if (status & SONIC_INT_CRC) { + if (status & SONIC_INT_CRC) lp->stats.rx_crc_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ - } - if (status & SONIC_INT_MP) { + if (status & SONIC_INT_MP) lp->stats.rx_missed_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ - } /* transmit error */ if (status & SONIC_INT_TXER) { - if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) - netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ + u16 tcr = SONIC_READ(SONIC_TCR); + + netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n", + __func__, tcr); + + if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC | + SONIC_TCR_FU | SONIC_TCR_BCM)) { + /* Aborted transmission. Try again. */ + netif_stop_queue(dev); + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + } } /* bus retry */ @@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) /* ... to help debug DMA problems causing endless interrupts. */ /* Bounce the eth interface to turn on the interrupt again. */ SONIC_WRITE(SONIC_IMR, 0); - SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ } - /* load CAM done */ - if (status & SONIC_INT_LCD) - SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ - } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + } while (status); + + spin_unlock_irqrestore(&lp->lock, flags); + return IRQ_HANDLED; } +/* Return the array index corresponding to a given Receive Buffer pointer. */ +static int index_from_addr(struct sonic_local *lp, dma_addr_t addr, + unsigned int last) +{ + unsigned int i = last; + + do { + i = (i + 1) & SONIC_RRS_MASK; + if (addr == lp->rx_laddr[i]) + return i; + } while (i != last); + + return -ENOENT; +} + +/* Allocate and map a new skb to be used as a receive buffer. */ +static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, + struct sk_buff **new_skb, dma_addr_t *new_addr) +{ + *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); + if (!*new_skb) + return false; + + if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) + skb_reserve(*new_skb, 2); + + *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), + SONIC_RBSIZE, DMA_FROM_DEVICE); + if (!*new_addr) { + dev_kfree_skb(*new_skb); + *new_skb = NULL; + return false; + } + + return true; +} + +/* Place a new receive resource in the Receive Resource Area and update RWP. */ +static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp, + dma_addr_t old_addr, dma_addr_t new_addr) +{ + unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP)); + unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP)); + u32 buf; + + /* The resources in the range [RRP, RWP) belong to the SONIC. This loop + * scans the other resources in the RRA, those in the range [RWP, RRP). + */ + do { + buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) | + sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L); + + if (buf == old_addr) + break; + + entry = (entry + 1) & SONIC_RRS_MASK; + } while (entry != end); + + WARN_ONCE(buf != old_addr, "failed to find resource!\n"); + + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16); + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff); + + entry = (entry + 1) & SONIC_RRS_MASK; + + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry)); +} + /* * We have a good packet(s), pass it/them up the network stack. */ static void sonic_rx(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); - int status; int entry = lp->cur_rx; + int prev_entry = lp->eol_rx; + bool rbe = false; while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { - struct sk_buff *used_skb; - struct sk_buff *new_skb; - dma_addr_t new_laddr; - u16 bufadr_l; - u16 bufadr_h; - int pkt_len; + u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); - status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); - if (status & SONIC_RCR_PRX) { - /* Malloc up new buffer. */ - new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); - if (new_skb == NULL) { - lp->stats.rx_dropped++; - break; - } - /* provide 16 byte IP header alignment unless DMA requires otherwise */ - if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) - skb_reserve(new_skb, 2); + /* If the RD has LPKT set, the chip has finished with the RB */ + if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) { + struct sk_buff *new_skb; + dma_addr_t new_laddr; + u32 addr = (sonic_rda_get(dev, entry, + SONIC_RD_PKTPTR_H) << 16) | + sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L); + int i = index_from_addr(lp, addr, entry); - new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), - SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!new_laddr) { - dev_kfree_skb(new_skb); - printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + if (i < 0) { + WARN_ONCE(1, "failed to find buffer!\n"); break; } - /* now we have a new skb to replace it, pass the used one up the stack */ - dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); - used_skb = lp->rx_skb[entry]; - pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); - skb_trim(used_skb, pkt_len); - used_skb->protocol = eth_type_trans(used_skb, dev); - netif_rx(used_skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) { + struct sk_buff *used_skb = lp->rx_skb[i]; + int pkt_len; - /* and insert the new skb */ - lp->rx_laddr[entry] = new_laddr; - lp->rx_skb[entry] = new_skb; + /* Pass the used buffer up the stack */ + dma_unmap_single(lp->device, addr, SONIC_RBSIZE, + DMA_FROM_DEVICE); - bufadr_l = (unsigned long)new_laddr & 0xffff; - bufadr_h = (unsigned long)new_laddr >> 16; - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); - } else { - /* This should only happen, if we enable accepting broken packets. */ - lp->stats.rx_errors++; - if (status & SONIC_RCR_FAER) - lp->stats.rx_frame_errors++; - if (status & SONIC_RCR_CRCR) - lp->stats.rx_crc_errors++; - } - if (status & SONIC_RCR_LPKT) { - /* - * this was the last packet out of the current receive buffer - * give the buffer back to the SONIC + pkt_len = sonic_rda_get(dev, entry, + SONIC_RD_PKTLEN); + skb_trim(used_skb, pkt_len); + used_skb->protocol = eth_type_trans(used_skb, + dev); + netif_rx(used_skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pkt_len; + + lp->rx_skb[i] = new_skb; + lp->rx_laddr[i] = new_laddr; + } else { + /* Failed to obtain a new buffer so re-use it */ + new_laddr = addr; + lp->stats.rx_dropped++; + } + /* If RBE is already asserted when RWP advances then + * it's safe to clear RBE after processing this packet. */ - lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); - if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); - if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { - netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ - } - } else - printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", - dev->name); + rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; + sonic_update_rra(dev, lp, addr, new_laddr); + } /* * give back the descriptor */ - sonic_rda_put(dev, entry, SONIC_RD_LINK, - sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); + sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0); sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); - sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, - sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); - lp->eol_rx = entry; - lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; + + prev_entry = entry; + entry = (entry + 1) & SONIC_RDS_MASK; } + + lp->cur_rx = entry; + + if (prev_entry != lp->eol_rx) { + /* Advance the EOL flag to put descriptors back into service */ + sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL | + sonic_rda_get(dev, prev_entry, SONIC_RD_LINK)); + sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL & + sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK)); + lp->eol_rx = prev_entry; + } + + if (rbe) + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* * If any worth-while packets have been received, netif_rx() * has done a mark_bh(NET_BH) for us and will work on them @@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { + unsigned long flags; + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ @@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev) i++; } SONIC_WRITE(SONIC_CDC, 16); - /* issue Load CAM command */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); + + /* LCAM and TXP commands can't be used simultaneously */ + spin_lock_irqsave(&lp->lock, flags); + sonic_quiesce(dev, SONIC_CR_TXP); SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); + sonic_quiesce(dev, SONIC_CR_LCAM); + spin_unlock_irqrestore(&lp->lock, flags); } } @@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev) */ static int sonic_init(struct net_device *dev) { - unsigned int cmd; struct sonic_local *lp = netdev_priv(dev); int i; @@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev) SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + /* While in reset mode, clear CAM Enable register */ + SONIC_WRITE(SONIC_CE, 0); + /* * clear software reset flag, disable receiver, clear and * enable interrupts, then completely initialize the SONIC */ SONIC_WRITE(SONIC_CMD, 0); - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP); + sonic_quiesce(dev, SONIC_CR_ALL); /* * initialize the receive resource area @@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev) } /* initialize all RRA registers */ - lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - - SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_REA, lp->rra_end); - SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); + SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS)); + SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1)); SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); @@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) - break; - } - - netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), i); + sonic_quiesce(dev, SONIC_CR_RRRA); /* * Initialize the receive descriptors so that they @@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev) * load the CAM */ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); - - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) - break; - } - netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); + sonic_quiesce(dev, SONIC_CR_LCAM); /* * enable receiver, disable loopback * and enable all interrupts */ - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); - - cmd = SONIC_READ(SONIC_CMD); - if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) - printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN); netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, SONIC_READ(SONIC_CMD)); diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h index 2b27f7049acb..1df6d2f06cc4 100644 --- a/drivers/net/ethernet/natsemi/sonic.h +++ b/drivers/net/ethernet/natsemi/sonic.h @@ -110,6 +110,9 @@ #define SONIC_CR_TXP 0x0002 #define SONIC_CR_HTX 0x0001 +#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \ + SONIC_CR_RXEN | SONIC_CR_TXP) + /* * SONIC data configuration bits */ @@ -175,6 +178,7 @@ #define SONIC_TCR_NCRS 0x0100 #define SONIC_TCR_CRLS 0x0080 #define SONIC_TCR_EXC 0x0040 +#define SONIC_TCR_OWC 0x0020 #define SONIC_TCR_PMB 0x0008 #define SONIC_TCR_FU 0x0004 #define SONIC_TCR_BCM 0x0002 @@ -274,8 +278,9 @@ #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ #define SONIC_NUM_TDS 16 /* number of transmit descriptors */ -#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) -#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) +#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1) +#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1) +#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1) #define SONIC_RBSIZE 1520 /* size of one resource buffer */ @@ -312,8 +317,6 @@ struct sonic_local { u32 rda_laddr; /* logical DMA address of RDA */ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */ - unsigned int rra_end; - unsigned int cur_rwp; unsigned int cur_rx; unsigned int cur_tx; /* first unacked transmit packet */ unsigned int eol_rx; @@ -322,6 +325,7 @@ struct sonic_local { int msg_enable; struct device *device; /* generic device */ struct net_device_stats stats; + spinlock_t lock; }; #define TX_TIMEOUT (3 * HZ) @@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev); as far as we can tell. */ /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put() is a much better name. */ -static inline void sonic_buf_put(void* base, int bitmode, +static inline void sonic_buf_put(u16 *base, int bitmode, int offset, __u16 val) { if (bitmode) #ifdef __BIG_ENDIAN - ((__u16 *) base + (offset*2))[1] = val; + __raw_writew(val, base + (offset * 2) + 1); #else - ((__u16 *) base + (offset*2))[0] = val; + __raw_writew(val, base + (offset * 2) + 0); #endif else - ((__u16 *) base)[offset] = val; + __raw_writew(val, base + (offset * 1) + 0); } -static inline __u16 sonic_buf_get(void* base, int bitmode, +static inline __u16 sonic_buf_get(u16 *base, int bitmode, int offset) { if (bitmode) #ifdef __BIG_ENDIAN - return ((volatile __u16 *) base + (offset*2))[1]; + return __raw_readw(base + (offset * 2) + 1); #else - return ((volatile __u16 *) base + (offset*2))[0]; + return __raw_readw(base + (offset * 2) + 0); #endif else - return ((volatile __u16 *) base)[offset]; + return __raw_readw(base + (offset * 1) + 0); } /* Inlines that you should actually use for reading/writing DMA buffers */ @@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry, (entry * SIZEOF_SONIC_RR) + offset); } +static inline u16 sonic_rr_addr(struct net_device *dev, int entry) +{ + struct sonic_local *lp = netdev_priv(dev); + + return lp->rra_laddr + + entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); +} + +static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr) +{ + struct sonic_local *lp = netdev_priv(dev); + + return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR * + SONIC_BUS_SCALE(lp->dma_bitmode)); +} + static const char version[] = "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c index 9f8a1f69c0c4..23ebddfb9532 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c @@ -176,10 +176,8 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink, u8 mask, val; int err; - if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) { - err = -EOPNOTSUPP; + if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) goto err_delete; - } tos_off = proto == htons(ETH_P_IP) ? 16 : 20; @@ -200,18 +198,14 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink, if ((iter->val & cmask) == (val & cmask) && iter->band != knode->res->classid) { NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter"); - err = -EOPNOTSUPP; goto err_delete; } } if (!match) { match = kzalloc(sizeof(*match), GFP_KERNEL); - if (!match) { - err = -ENOMEM; - goto err_delete; - } - + if (!match) + return -ENOMEM; list_add(&match->list, &alink->dscp_map); } match->handle = knode->handle; @@ -227,7 +221,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink, err_delete: nfp_abm_u32_knode_delete(alink, knode); - return err; + return -EOPNOTSUPP; } static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type, diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 7c4a15e967df..5defd31d481c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { - if (priv->active_mem_unit == priv->total_mem_units) { - priv->stats_ids.init_unalloc--; - priv->active_mem_unit = 0; - } - *stats_context_id = FIELD_PREP(NFP_FL_STAT_ID_STAT, priv->stats_ids.init_unalloc - 1) | FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, priv->active_mem_unit); - priv->active_mem_unit++; + + if (++priv->active_mem_unit == priv->total_mem_units) { + priv->stats_ids.init_unalloc--; + priv->active_mem_unit = 0; + } + return 0; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 5bfdda19f64d..d8745f87f065 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -862,7 +862,7 @@ struct ionic_rxq_comp { #define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40 #define IONIC_RXQ_COMP_CSUM_F_CALC 0x80 u8 pkt_type_color; -#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x0f +#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x7f }; enum ionic_pkt_type { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 20faa8d24c9f..134640412d7b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1364,12 +1364,9 @@ int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, static int ionic_lif_rss_init(struct ionic_lif *lif) { - u8 rss_key[IONIC_RSS_HASH_KEY_SIZE]; unsigned int tbl_sz; unsigned int i; - netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE); - lif->rss_types = IONIC_RSS_TYPE_IPV4 | IONIC_RSS_TYPE_IPV4_TCP | IONIC_RSS_TYPE_IPV4_UDP | @@ -1382,12 +1379,18 @@ static int ionic_lif_rss_init(struct ionic_lif *lif) for (i = 0; i < tbl_sz; i++) lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); - return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL); + return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); } -static int ionic_lif_rss_deinit(struct ionic_lif *lif) +static void ionic_lif_rss_deinit(struct ionic_lif *lif) { - return ionic_lif_rss_config(lif, 0x0, NULL, NULL); + int tbl_sz; + + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + memset(lif->rss_ind_tbl, 0, tbl_sz); + memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); + + ionic_lif_rss_config(lif, 0x0, NULL, NULL); } static void ionic_txrx_disable(struct ionic_lif *lif) @@ -1710,6 +1713,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); goto err_out_free_qcqs; } + netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); list_add_tail(&lif->list, &ionic->lifs); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 0dacf2c18c09..3e613058e225 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -44,8 +44,8 @@ /* Add/subtract the Adjustment_Value when making a Drift adjustment */ #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 #define QED_TIMESTAMP_MASK BIT(16) -/* Param mask for Hardware to detect/timestamp the unicast PTP packets */ -#define QED_PTP_UCAST_PARAM_MASK 0xF +/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */ +#define QED_PTP_UCAST_PARAM_MASK 0x70F static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index c303a92d5b06..1f27f9866b80 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -163,6 +163,8 @@ struct qede_rdma_dev { struct list_head entry; struct list_head rdma_event_list; struct workqueue_struct *rdma_wq; + struct kref refcnt; + struct completion event_comp; bool exp_recovery; }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9a6a9a008714..c8bdbf057d5a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev, netif_addr_lock_bh(ndev); mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { + if (mc_count <= 64) { netdev_for_each_mc_addr(ha, ndev) { ether_addr_copy(temp, ha->addr); temp += ETH_ALEN; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a220cc7c947a..ba53612ae0df 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1406,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->rx_buf_seg_size = roundup_pow_of_two(size); } else { rxq->rx_buf_seg_size = PAGE_SIZE; + edev->ndev->features &= ~NETIF_F_GRO_HW; } /* Allocate the parallel driver ring for Rx buffers */ @@ -1450,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); if (!edev->gro_disable) qede_set_tpa_param(rxq); err: @@ -1702,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } - - edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); } static int qede_set_real_num_queues(struct qede_dev *edev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index ffabc2d2f082..2d873ae8a234 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -59,6 +59,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev) static int qede_rdma_create_wq(struct qede_dev *edev) { INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); + kref_init(&edev->rdma_info.refcnt); + init_completion(&edev->rdma_info.event_comp); + edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); if (!edev->rdma_info.rdma_wq) { DP_NOTICE(edev, "qedr: Could not create workqueue\n"); @@ -83,8 +86,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev) } } +static void qede_rdma_complete_event(struct kref *ref) +{ + struct qede_rdma_dev *rdma_dev = + container_of(ref, struct qede_rdma_dev, refcnt); + + /* no more events will be added after this */ + complete(&rdma_dev->event_comp); +} + static void qede_rdma_destroy_wq(struct qede_dev *edev) { + /* Avoid race with add_event flow, make sure it finishes before + * we start accessing the list and cleaning up the work + */ + kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); + wait_for_completion(&edev->rdma_info.event_comp); + qede_rdma_cleanup_event(edev); destroy_workqueue(edev->rdma_info.rdma_wq); } @@ -310,15 +328,24 @@ static void qede_rdma_add_event(struct qede_dev *edev, if (!edev->rdma_info.qedr_dev) return; + /* We don't want the cleanup flow to start while we're allocating and + * scheduling the work + */ + if (!kref_get_unless_zero(&edev->rdma_info.refcnt)) + return; /* already being destroyed */ + event_node = qede_rdma_get_free_event_node(edev); if (!event_node) - return; + goto out; event_node->event = event; event_node->ptr = edev; INIT_WORK(&event_node->work, qede_rdma_handle_event); queue_work(edev->rdma_info.rdma_wq, &event_node->work); + +out: + kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); } void qede_rdma_dev_event_open(struct qede_dev *edev) diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b4b8ba00ee01..986f26578d34 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) int err; for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { @@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) ql_free_large_buffers(qdev); return -ENOMEM; } else { - - lrg_buf_cb = &qdev->lrg_buf[i]; - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; - lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer @@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) return -ENOMEM; } + lrg_buf_cb->skb = skb; dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index a496390b8632..07f9067affc6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, break; } entry += p_hdr->size; + cond_resched(); } p_dev->ahw->reset.seq_index = index; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index afa10a163da1..f34ae8c75bc5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, addr += 16; reg_read -= 16; ret += 16; + cond_resched(); } out: mutex_unlock(&adapter->ahw->mem_lock); @@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; + cond_resched(); } fw_dump->clr = 1; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 06de59521fc4..fbf4cbcf1a65 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -13,25 +13,6 @@ #include "rmnet_vnd.h" #include "rmnet_private.h" -/* Locking scheme - - * The shared resource which needs to be protected is realdev->rx_handler_data. - * For the writer path, this is using rtnl_lock(). The writer paths are - * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These - * paths are already called with rtnl_lock() acquired in. There is also an - * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For - * dereference here, we will need to use rtnl_dereference(). Dev list writing - * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). - * For the reader path, the real_dev->rx_handler_data is called in the TX / RX - * path. We only need rcu_read_lock() for these scenarios. In these cases, - * the rcu_read_lock() is held in __dev_queue_xmit() and - * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() - * to get the relevant information. For dev list reading, we again acquire - * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). - * We also use unregister_netdevice_many() to free all rmnet devices in - * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in - * same context. - */ - /* Local Definitions and Declarations */ static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = { @@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev) return rtnl_dereference(real_dev->rx_handler_data); } -static int rmnet_unregister_real_device(struct net_device *real_dev, - struct rmnet_port *port) +static int rmnet_unregister_real_device(struct net_device *real_dev) { + struct rmnet_port *port = rmnet_get_port_rtnl(real_dev); + if (port->nr_rmnet_devs) return -EINVAL; @@ -61,9 +43,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, kfree(port); - /* release reference on real_dev */ - dev_put(real_dev); - netdev_dbg(real_dev, "Removed from rmnet\n"); return 0; } @@ -89,9 +68,6 @@ static int rmnet_register_real_device(struct net_device *real_dev) return -EBUSY; } - /* hold on to real dev for MAP data */ - dev_hold(real_dev); - for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) INIT_HLIST_HEAD(&port->muxed_ep[entry]); @@ -99,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev) return 0; } -static void rmnet_unregister_bridge(struct net_device *dev, - struct rmnet_port *port) +static void rmnet_unregister_bridge(struct rmnet_port *port) { - struct rmnet_port *bridge_port; - struct net_device *bridge_dev; + struct net_device *bridge_dev, *real_dev, *rmnet_dev; + struct rmnet_port *real_port; if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) return; - /* bridge slave handling */ + rmnet_dev = port->rmnet_dev; if (!port->nr_rmnet_devs) { - bridge_dev = port->bridge_ep; + /* bridge device */ + real_dev = port->bridge_ep; + bridge_dev = port->dev; - bridge_port = rmnet_get_port_rtnl(bridge_dev); - bridge_port->bridge_ep = NULL; - bridge_port->rmnet_mode = RMNET_EPMODE_VND; + real_port = rmnet_get_port_rtnl(real_dev); + real_port->bridge_ep = NULL; + real_port->rmnet_mode = RMNET_EPMODE_VND; } else { + /* real device */ bridge_dev = port->bridge_ep; - bridge_port = rmnet_get_port_rtnl(bridge_dev); - rmnet_unregister_real_device(bridge_dev, bridge_port); + port->bridge_ep = NULL; + port->rmnet_mode = RMNET_EPMODE_VND; } + + netdev_upper_dev_unlink(bridge_dev, rmnet_dev); + rmnet_unregister_real_device(bridge_dev); } static int rmnet_newlink(struct net *src_net, struct net_device *dev, @@ -135,6 +116,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, int err = 0; u16 mux_id; + if (!tb[IFLA_LINK]) { + NL_SET_ERR_MSG_MOD(extack, "link not specified"); + return -EINVAL; + } + real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev || !dev) return -ENODEV; @@ -157,7 +143,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (err) goto err1; + err = netdev_upper_dev_link(real_dev, dev, extack); + if (err < 0) + goto err2; + port->rmnet_mode = mode; + port->rmnet_dev = dev; hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); @@ -173,8 +164,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, return 0; +err2: + unregister_netdevice(dev); + rmnet_vnd_dellink(mux_id, port, ep); err1: - rmnet_unregister_real_device(real_dev, port); + rmnet_unregister_real_device(real_dev); err0: kfree(ep); return err; @@ -183,77 +177,74 @@ err0: static void rmnet_dellink(struct net_device *dev, struct list_head *head) { struct rmnet_priv *priv = netdev_priv(dev); - struct net_device *real_dev; + struct net_device *real_dev, *bridge_dev; + struct rmnet_port *real_port, *bridge_port; struct rmnet_endpoint *ep; - struct rmnet_port *port; - u8 mux_id; + u8 mux_id = priv->mux_id; real_dev = priv->real_dev; - if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + if (!rmnet_is_real_dev_registered(real_dev)) return; - port = rmnet_get_port_rtnl(real_dev); + real_port = rmnet_get_port_rtnl(real_dev); + bridge_dev = real_port->bridge_ep; + if (bridge_dev) { + bridge_port = rmnet_get_port_rtnl(bridge_dev); + rmnet_unregister_bridge(bridge_port); + } - mux_id = rmnet_vnd_get_mux(dev); - - ep = rmnet_get_endpoint(port, mux_id); + ep = rmnet_get_endpoint(real_port, mux_id); if (ep) { hlist_del_init_rcu(&ep->hlnode); - rmnet_unregister_bridge(dev, port); - rmnet_vnd_dellink(mux_id, port, ep); + rmnet_vnd_dellink(mux_id, real_port, ep); kfree(ep); } - rmnet_unregister_real_device(real_dev, port); + netdev_upper_dev_unlink(real_dev, dev); + rmnet_unregister_real_device(real_dev); unregister_netdevice_queue(dev, head); } -static void rmnet_force_unassociate_device(struct net_device *dev) +static void rmnet_force_unassociate_device(struct net_device *real_dev) { - struct net_device *real_dev = dev; struct hlist_node *tmp_ep; struct rmnet_endpoint *ep; struct rmnet_port *port; unsigned long bkt_ep; LIST_HEAD(list); - if (!rmnet_is_real_dev_registered(real_dev)) - return; + port = rmnet_get_port_rtnl(real_dev); - ASSERT_RTNL(); - - port = rmnet_get_port_rtnl(dev); - - rcu_read_lock(); - rmnet_unregister_bridge(dev, port); - - hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { - unregister_netdevice_queue(ep->egress_dev, &list); - rmnet_vnd_dellink(ep->mux_id, port, ep); - - hlist_del_init_rcu(&ep->hlnode); - kfree(ep); + if (port->nr_rmnet_devs) { + /* real device */ + rmnet_unregister_bridge(port); + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + unregister_netdevice_queue(ep->egress_dev, &list); + netdev_upper_dev_unlink(real_dev, ep->egress_dev); + rmnet_vnd_dellink(ep->mux_id, port, ep); + hlist_del_init_rcu(&ep->hlnode); + kfree(ep); + } + rmnet_unregister_real_device(real_dev); + unregister_netdevice_many(&list); + } else { + rmnet_unregister_bridge(port); } - - rcu_read_unlock(); - unregister_netdevice_many(&list); - - rmnet_unregister_real_device(real_dev, port); } static int rmnet_config_notify_cb(struct notifier_block *nb, unsigned long event, void *data) { - struct net_device *dev = netdev_notifier_info_to_dev(data); + struct net_device *real_dev = netdev_notifier_info_to_dev(data); - if (!dev) + if (!rmnet_is_real_dev_registered(real_dev)) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: - netdev_dbg(dev, "Kernel unregister\n"); - rmnet_force_unassociate_device(dev); + netdev_dbg(real_dev, "Kernel unregister\n"); + rmnet_force_unassociate_device(real_dev); break; default: @@ -295,16 +286,18 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (!dev) return -ENODEV; - real_dev = __dev_get_by_index(dev_net(dev), - nla_get_u32(tb[IFLA_LINK])); - - if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + real_dev = priv->real_dev; + if (!rmnet_is_real_dev_registered(real_dev)) return -ENODEV; port = rmnet_get_port_rtnl(real_dev); if (data[IFLA_RMNET_MUX_ID]) { mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); + if (rmnet_get_endpoint(port, mux_id)) { + NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); + return -EINVAL; + } ep = rmnet_get_endpoint(port, priv->mux_id); if (!ep) return -ENODEV; @@ -379,11 +372,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = { .fill_info = rmnet_fill_info, }; -/* Needs either rcu_read_lock() or rtnl lock */ -struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev) { if (rmnet_is_real_dev_registered(real_dev)) - return rcu_dereference_rtnl(real_dev->rx_handler_data); + return rcu_dereference_bh(real_dev->rx_handler_data); else return NULL; } @@ -409,7 +401,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, struct rmnet_port *port, *slave_port; int err; - port = rmnet_get_port(real_dev); + port = rmnet_get_port_rtnl(real_dev); /* If there is more than one rmnet dev attached, its probably being * used for muxing. Skip the briding in that case @@ -417,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (port->nr_rmnet_devs > 1) return -EINVAL; + if (port->rmnet_mode != RMNET_EPMODE_VND) + return -EINVAL; + if (rmnet_is_real_dev_registered(slave_dev)) return -EBUSY; @@ -424,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (err) return -EBUSY; - slave_port = rmnet_get_port(slave_dev); + err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, + extack); + if (err) { + rmnet_unregister_real_device(slave_dev); + return err; + } + + slave_port = rmnet_get_port_rtnl(slave_dev); slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; slave_port->bridge_ep = real_dev; + slave_port->rmnet_dev = rmnet_dev; port->rmnet_mode = RMNET_EPMODE_BRIDGE; port->bridge_ep = slave_dev; @@ -438,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, int rmnet_del_bridge(struct net_device *rmnet_dev, struct net_device *slave_dev) { - struct rmnet_priv *priv = netdev_priv(rmnet_dev); - struct net_device *real_dev = priv->real_dev; - struct rmnet_port *port, *slave_port; + struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev); - port = rmnet_get_port(real_dev); - port->rmnet_mode = RMNET_EPMODE_VND; - port->bridge_ep = NULL; - - slave_port = rmnet_get_port(slave_dev); - rmnet_unregister_real_device(slave_dev, slave_port); + rmnet_unregister_bridge(port); netdev_dbg(slave_dev, "removed from rmnet as slave\n"); return 0; @@ -473,8 +469,8 @@ static int __init rmnet_init(void) static void __exit rmnet_exit(void) { - unregister_netdevice_notifier(&rmnet_dev_notifier); rtnl_link_unregister(&rmnet_link_ops); + unregister_netdevice_notifier(&rmnet_dev_notifier); } module_init(rmnet_init) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index cd0a6bcbe74a..be515982d628 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -28,6 +28,7 @@ struct rmnet_port { u8 rmnet_mode; struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; struct net_device *bridge_ep; + struct net_device *rmnet_dev; }; extern struct rtnl_link_ops rmnet_link_ops; @@ -65,7 +66,7 @@ struct rmnet_priv { struct rmnet_priv_stats stats; }; -struct rmnet_port *rmnet_get_port(struct net_device *real_dev); +struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev); struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); int rmnet_add_bridge(struct net_device *rmnet_dev, struct net_device *slave_dev, diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 1b74bc160402..29a7bfa2584d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, static void rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) { + if (skb_mac_header_was_set(skb)) + skb_push(skb, skb->mac_len); + if (bridge_dev) { skb->dev = bridge_dev; dev_queue_xmit(skb); @@ -184,7 +187,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) return RX_HANDLER_PASS; dev = skb->dev; - port = rmnet_get_port(dev); + port = rmnet_get_port_rcu(dev); switch (port->rmnet_mode) { case RMNET_EPMODE_VND: @@ -217,7 +220,7 @@ void rmnet_egress_handler(struct sk_buff *skb) skb->dev = priv->real_dev; mux_id = priv->mux_id; - port = rmnet_get_port(skb->dev); + port = rmnet_get_port_rcu(skb->dev); if (!port) goto drop; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 509dfc895a33..26ad40f19c64 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, return 0; } -u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) -{ - struct rmnet_priv *priv; - - priv = netdev_priv(rmnet_dev); - return priv->mux_id; -} - int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) { netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h index 54cbaf3c3bc4..14d77c709d4a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, struct rmnet_endpoint *ep); void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); -u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev); void rmnet_vnd_setup(struct net_device *dev); #endif /* _RMNET_VND_H_ */ diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index c33c438850cc..3bc6d1ef29ec 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -680,6 +680,7 @@ struct rtl8169_private { struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; u32 saved_wolopts; + int eee_adv; const char *fw_name; struct rtl_fw *rtl_fw; @@ -1516,6 +1517,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) rtl_lock_config_regs(tp); device_set_wakeup_enable(tp_to_dev(tp), wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; } static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -2074,6 +2076,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) } ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); out: pm_runtime_put_noidle(d); return ret; @@ -2104,10 +2110,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { static void rtl_enable_eee(struct rtl8169_private *tp) { struct phy_device *phydev = tp->phydev; - int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + int adv; - if (supported > 0) - phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported); + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); } static void rtl8169_get_mac_version(struct rtl8169_private *tp) @@ -3919,7 +3931,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61: RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -4118,7 +4130,7 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: r8168dp_hw_jumbo_enable(tp); break; - case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: r8168e_hw_jumbo_enable(tp); break; default: @@ -4144,7 +4156,7 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: r8168dp_hw_jumbo_disable(tp); break; - case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: r8168e_hw_jumbo_disable(tp); break; default: @@ -6800,7 +6812,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); rtl_lock_config_regs(tp); /* fall through */ - case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: flags = PCI_IRQ_LEGACY; break; default: @@ -6891,6 +6903,13 @@ static int r8169_mdio_register(struct rtl8169_private *tp) if (!tp->phydev) { mdiobus_unregister(new_bus); return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n"); + mdiobus_unregister(new_bus); + return -EUNATCH; } /* PHY will be woken up in rtl_open() */ @@ -7063,6 +7082,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->pci_dev = pdev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; /* Get the *optional* external "ether_clk" used on some boards */ rc = rtl_get_ether_clk(tp); @@ -7147,12 +7167,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; @@ -7170,25 +7188,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; if (rtl_chip_supports_csum_v2(tp)) { - dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + dev->hw_features |= NETIF_F_IPV6_CSUM; + dev->features |= NETIF_F_IPV6_CSUM; + } + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; } - /* RTL8168e-vl and one RTL8168c variant are known to have a - * HW issue with TSO. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_22) { - dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); - dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); - dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); - } - dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7ba35a0bdb29..8aa1b1bda96d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) if (cd->tsu) { add_tsu_reg(ARSTR); add_tsu_reg(TSU_CTRST); - add_tsu_reg(TSU_FWEN0); - add_tsu_reg(TSU_FWEN1); - add_tsu_reg(TSU_FCM); - add_tsu_reg(TSU_BSYSL0); - add_tsu_reg(TSU_BSYSL1); - add_tsu_reg(TSU_PRISL0); - add_tsu_reg(TSU_PRISL1); - add_tsu_reg(TSU_FWSL0); - add_tsu_reg(TSU_FWSL1); + if (cd->dual_port) { + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + } add_tsu_reg(TSU_FWSLC); - add_tsu_reg(TSU_QTAGM0); - add_tsu_reg(TSU_QTAGM1); - add_tsu_reg(TSU_FWSR); - add_tsu_reg(TSU_FWINMK); - add_tsu_reg(TSU_ADQT0); - add_tsu_reg(TSU_ADQT1); - add_tsu_reg(TSU_VTAG0); - add_tsu_reg(TSU_VTAG1); + if (cd->dual_port) { + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + } add_tsu_reg(TSU_ADSBSY); add_tsu_reg(TSU_TEN); add_tsu_reg(TSU_POST1); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index c56fcbb37066..38767d797914 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2279,7 +2279,7 @@ static int __init sxgbe_cmdline_opt(char *str) if (!str || !*str) return -EINVAL; while ((opt = strsep(&str, ",")) != NULL) { - if (!strncmp(opt, "eee_timer:", 6)) { + if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2fef7402233e..82391abbd42b 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -519,6 +519,7 @@ efx_copy_channel(const struct efx_channel *old_channel) if (tx_queue->channel) tx_queue->channel = channel; tx_queue->buffer = NULL; + tx_queue->cb_page = NULL; memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); } diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index af15a737c675..59b4f16896a8 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -560,13 +560,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, u32 nic_major, u32 nic_minor, s32 correction) { + u32 sync_timestamp; ktime_t kt = { 0 }; + s16 delta; if (!(nic_major & 0x80000000)) { WARN_ON_ONCE(nic_major >> 16); - /* Use the top bits from the latest sync event. */ - nic_major &= 0xffff; - nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000); + + /* Medford provides 48 bits of timestamp, so we must get the top + * 16 bits from the timesync event state. + * + * We only have the lower 16 bits of the time now, but we do + * have a full resolution timestamp at some point in past. As + * long as the difference between the (real) now and the sync + * is less than 2^15, then we can reconstruct the difference + * between those two numbers using only the lower 16 bits of + * each. + * + * Put another way + * + * a - b = ((a mod k) - b) mod k + * + * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know + * (a mod k) and b, so can calculate the delta, a - b. + * + */ + sync_timestamp = last_sync_timestamp_major(efx); + + /* Because delta is s16 this does an implicit mask down to + * 16 bits which is what we need, assuming + * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that + * we can deal with the (unlikely) case of sync timestamps + * arriving from the future. + */ + delta = nic_major - sync_timestamp; + + /* Recover the fully specified time now, by applying the offset + * to the (fully specified) sync time. + */ + nic_major = sync_timestamp + delta; kt = ptp->nic_to_kernel_time(nic_major, nic_minor, correction); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 8d88e4083456..7b65e79d6ae9 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -936,7 +936,7 @@ static void smc911x_phy_configure(struct work_struct *work) if (lp->ctl_rspeed != 100) my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); - if (!lp->ctl_rfduplx) + if (!lp->ctl_rfduplx) my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); /* Update our Auto-Neg Advertisement Register */ diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index f9e6744d8fd6..8bd2912bf713 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -847,8 +847,8 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv, enum dma_data_direction dma_dir = page_pool_get_dma_dir(rx_ring->page_pool); - dma_handle = page_pool_get_dma_addr(page) + - NETSEC_RXBUF_HEADROOM; + dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom + + sizeof(*xdpf); dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len, dma_dir); tx_desc.buf_type = TYPE_NETSEC_XDP_TX; @@ -928,7 +928,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) struct netsec_rx_pkt_info rx_info; enum dma_data_direction dma_dir; struct bpf_prog *xdp_prog; - struct sk_buff *skb = NULL; u16 xdp_xmit = 0; u32 xdp_act = 0; int done = 0; @@ -942,7 +941,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); struct netsec_desc *desc = &dring->desc[idx]; struct page *page = virt_to_page(desc->addr); - u32 xdp_result = XDP_PASS; + u32 xdp_result = NETSEC_XDP_PASS; + struct sk_buff *skb = NULL; u16 pkt_len, desc_len; dma_addr_t dma_handle; struct xdp_buff xdp; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 6e984d5a729f..38d39c4b5ac8 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev, phy_ethtool_get_wol(ndev->phydev, wol); } +static int __ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + if (!ndev->phydev || + (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) + return -EOPNOTSUPP; + + return phy_ethtool_set_wol(ndev->phydev, wol); +} + static int ave_ethtool_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { int ret; - if (!ndev->phydev || - (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) - return -EOPNOTSUPP; - - ret = phy_ethtool_set_wol(ndev->phydev, wol); + ret = __ave_ethtool_set_wol(ndev, wol); if (!ret) device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); @@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev) /* set wol initial state disabled */ wol.wolopts = 0; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (!phy_interface_is_rgmii(phydev)) phy_set_max_speed(phydev, SPEED_100); @@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev) ave_ethtool_get_wol(ndev, &wol); wol.wolopts = priv->wolopts; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (ndev->phydev) { ret = phy_resume(ndev->phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 912bbb6515b2..bc82cdf36cc3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -364,9 +364,8 @@ struct dma_features { unsigned int arpoffsel; }; -/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ -#define BUF_SIZE_16KiB 16384 -/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ +/* RX Buffer size must be multiple of 4/8/16 bytes */ +#define BUF_SIZE_16KiB 16368 #define BUF_SIZE_8KiB 8188 #define BUF_SIZE_4KiB 4096 #define BUF_SIZE_2KiB 2048 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 306da8f6b7d5..33ce139f090f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) struct device *dev = dwmac->dev; const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS]; struct meson8b_dwmac_clk_configs *clk_configs; + static const struct clk_div_table div_table[] = { + { .div = 2, .val = 2, }, + { .div = 3, .val = 3, }, + { .div = 4, .val = 4, }, + { .div = 5, .val = 5, }, + { .div = 6, .val = 6, }, + { .div = 7, .val = 7, }, + }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); if (!clk_configs) @@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0; clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; - clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED | - CLK_DIVIDER_ALLOW_ZERO | - CLK_DIVIDER_ROUND_CLOSEST; + clk_configs->m250_div.table = div_table; + clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO | + CLK_DIVIDER_ROUND_CLOSEST; clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1, &clk_divider_ops, &clk_configs->m250_div.hw); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 7ec895407d23..e0a5fe83d8e0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -413,6 +413,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos) dll_lock = rgmii_readl(ethqos, SDC4_STATUS); if (dll_lock & SDC4_STATUS_DLL_LOCK) break; + retry--; } while (retry > 0); if (!retry) dev_err(ðqos->pdev->dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index e2e469c37a4d..9f9aaa47a8dc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev) ret = rk_gmac_clk_init(plat_dat); if (ret) - return ret; + goto err_remove_config_dt; ret = rk_gmac_powerup(plat_dat->bsp_priv); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 6e47be63a43c..e9e0867ec139 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) /* default */ break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; break; case PHY_INTERFACE_MODE_RMII: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index a299da3971b4..102d637bc84a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) * rate, which then uses the auto-reparenting feature of the * clock driver, and enabling/disabling the clock. */ - if (gmac->interface == PHY_INTERFACE_MODE_RGMII) { + if (phy_interface_mode_is_rgmii(gmac->interface)) { clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); clk_prepare_enable(gmac->tx_clk); gmac->clk_enabled = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 3d69da112625..bc9b01376e80 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -24,6 +24,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw, struct net_device *dev) { + struct stmmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONTROL); int mtu = dev->mtu; @@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw, * Broadcom tags can look like invalid LLC/SNAP packets and cause the * hardware to truncate packets on reception. */ - if (netdev_uses_dsa(dev)) + if (netdev_uses_dsa(dev) || !priv->plat->enh_desc) value &= ~GMAC_CONTROL_ACS; if (mtu > 1500) @@ -208,7 +209,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, reg++; } - while (reg <= perfect_addr_number) { + while (reg < perfect_addr_number) { writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); writel(0, ioaddr + GMAC_ADDR_LOW(reg)); reg++; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 9d08a934fe4f..ff751ab3d765 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -343,6 +343,8 @@ #define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) #define XGMAC_RxPBL GENMASK(21, 16) #define XGMAC_RxPBL_SHIFT 16 +#define XGMAC_RBSZ GENMASK(14, 1) +#define XGMAC_RBSZ_SHIFT 1 #define XGMAC_RXST BIT(0) #define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x))) #define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index f70ca5300b82..4af7271cea56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -489,7 +489,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) u32 value; value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); - value |= bfsize << 1; + value &= ~XGMAC_RBSZ; + value |= bfsize << XGMAC_RBSZ_SHIFT; writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f826365c979d..89a6ae2b17e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -45,7 +45,7 @@ #include "dwxgmac2.h" #include "hwif.h" -#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) +#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ @@ -105,6 +105,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_DEBUG_FS +static const struct net_device_ops stmmac_netdev_ops; static void stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(struct net_device *dev); #endif @@ -1108,7 +1109,9 @@ static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; - if (mtu >= BUF_SIZE_4KiB) + if (mtu >= BUF_SIZE_8KiB) + ret = BUF_SIZE_16KiB; + else if (mtu >= BUF_SIZE_4KiB) ret = BUF_SIZE_8KiB; else if (mtu >= BUF_SIZE_2KiB) ret = BUF_SIZE_4KiB; @@ -1292,19 +1295,9 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; int ret = -ENOMEM; - int bfsize = 0; int queue; int i; - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); - if (bfsize < 0) - bfsize = 0; - - if (bfsize < BUF_SIZE_16KiB) - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); - - priv->dma_buf_sz = bfsize; - /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, "SKB addresses:\nskb\t\tskb data\tdma data\n"); @@ -1346,8 +1339,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) } } - buf_sz = bfsize; - return 0; err_init_rx_buffers: @@ -1502,10 +1493,8 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv) rx_q->dma_erx, rx_q->dma_rx_phy); kfree(rx_q->buf_pool); - if (rx_q->page_pool) { - page_pool_request_shutdown(rx_q->page_pool); + if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); - } } } @@ -2656,6 +2645,7 @@ static void stmmac_hw_teardown(struct net_device *dev) static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + int bfsize = 0; u32 chan; int ret; @@ -2675,7 +2665,16 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); + if (bfsize < 0) + bfsize = 0; + + if (bfsize < BUF_SIZE_16KiB) + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); + + priv->dma_buf_sz = bfsize; + buf_sz = bfsize; + priv->rx_copybreak = STMMAC_RX_COPYBREAK; ret = alloc_dma_desc_resources(priv); @@ -3105,6 +3104,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3332,6 +3332,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3749,12 +3750,24 @@ static void stmmac_set_rx_mode(struct net_device *dev) static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); + int txfifosz = priv->plat->tx_fifo_size; + + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + txfifosz /= priv->plat->tx_queues_to_use; if (netif_running(dev)) { netdev_err(priv->dev, "must be stopped to change its MTU\n"); return -EBUSY; } + new_mtu = STMMAC_ALIGN(new_mtu); + + /* If condition true, FIFO is too small or MTU too large */ + if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) + return -EINVAL; + dev->mtu = new_mtu; netdev_update_features(dev); @@ -4163,10 +4176,40 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); +/* Use network device events to rename debugfs file entries. + */ +static int stmmac_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct stmmac_priv *priv = netdev_priv(dev); + + if (dev->netdev_ops != &stmmac_netdev_ops) + goto done; + + switch (event) { + case NETDEV_CHANGENAME: + if (priv->dbgfs_dir) + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, + priv->dbgfs_dir, + stmmac_fs_dir, + dev->name); + break; + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block stmmac_notifier = { + .notifier_call = stmmac_device_event, +}; + static void stmmac_init_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + rtnl_lock(); + /* Create per netdev entries */ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); @@ -4177,6 +4220,8 @@ static void stmmac_init_fs(struct net_device *dev) /* Entry to report the DMA HW features */ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, &stmmac_dma_cap_fops); + + rtnl_unlock(); } static void stmmac_exit_fs(struct net_device *dev) @@ -4684,14 +4729,14 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); -#ifdef CONFIG_DEBUG_FS - stmmac_exit_fs(ndev); -#endif stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); +#ifdef CONFIG_DEBUG_FS + stmmac_exit_fs(ndev); +#endif phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); @@ -4719,6 +4764,7 @@ int stmmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); + u32 chan; if (!ndev || !netif_running(ndev)) return 0; @@ -4732,6 +4778,9 @@ int stmmac_suspend(struct device *dev) stmmac_disable_all_queues(priv); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer); + /* Stop TX/RX DMA */ stmmac_stop_all_dma(priv); @@ -4907,6 +4956,7 @@ static int __init stmmac_init(void) /* Create debugfs main directory if it doesn't exist yet */ if (!stmmac_fs_dir) stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + register_netdevice_notifier(&stmmac_notifier); #endif return 0; @@ -4915,6 +4965,7 @@ static int __init stmmac_init(void) static void __exit stmmac_exit(void) { #ifdef CONFIG_DEBUG_FS + unregister_netdevice_notifier(&stmmac_notifier); debugfs_remove_recursive(stmmac_fs_dir); #endif } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 170c3a052b14..5150551c28be 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -320,7 +320,7 @@ out: static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, struct device_node *np, struct device *dev) { - bool mdio = true; + bool mdio = !of_phy_is_fixed_link(np); static const struct of_device_id need_mdio_ids[] = { { .compatible = "snps,dwc-qos-ethernet-4.10" }, {}, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index ac3f658105c0..ba03a2d77434 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -80,7 +80,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, if (attr->max_size && (attr->max_size > size)) size = attr->max_size; - skb = netdev_alloc_skb_ip_align(priv->dev, size); + skb = netdev_alloc_skb(priv->dev, size); if (!skb) return NULL; @@ -244,6 +244,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, struct net_device *orig_ndev) { struct stmmac_test_priv *tpriv = pt->af_packet_priv; + unsigned char *src = tpriv->packet->src; + unsigned char *dst = tpriv->packet->dst; struct stmmachdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; @@ -260,15 +262,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, goto out; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (tpriv->packet->dst) { - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (dst) { + if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) goto out; } if (tpriv->packet->sarc) { - if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) + if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest)) goto out; - } else if (tpriv->packet->src) { - if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) + } else if (src) { + if (!ether_addr_equal_unaligned(ehdr->h_source, src)) goto out; } @@ -624,6 +626,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv) return -EOPNOTSUPP; if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) return -EOPNOTSUPP; + if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) + return -EOPNOTSUPP; while (--tries) { /* We only need to check the mc_addr for collisions */ @@ -666,6 +670,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv) if (stmmac_filter_check(priv)) return -EOPNOTSUPP; + if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) + return -EOPNOTSUPP; if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) return -EOPNOTSUPP; @@ -710,7 +716,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb, struct ethhdr *ehdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) + if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr)) goto out; if (ehdr->h_proto != htons(ETH_P_PAUSE)) goto out; @@ -847,12 +853,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb, if (tpriv->vlan_id) { if (skb->vlan_proto != htons(proto)) goto out; - if (skb->vlan_tci != tpriv->vlan_id) + if (skb->vlan_tci != tpriv->vlan_id) { + /* Means filter did not work. */ + tpriv->ok = false; + complete(&tpriv->comp); goto out; + } } ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst)) goto out; ihdr = ip_hdr(skb); @@ -1287,16 +1297,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1363,7 +1376,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1408,16 +1422,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1489,7 +1506,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1542,7 +1560,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb, struct arphdr *ahdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src)) goto out; ahdr = arp_hdr(skb); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index f9a9a9d82233..1d135b02ea02 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -579,6 +579,10 @@ static int tc_setup_cls(struct stmmac_priv *priv, { int ret = 0; + /* When RSS is enabled, the filtering will be bypassed */ + if (priv->rss.enable) + return -EBUSY; + switch (cls->command) { case FLOW_CLS_REPLACE: ret = tc_add_flow(priv, cls); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 834afca3a019..137632b09c72 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -22,6 +22,7 @@ config TI_DAVINCI_EMAC depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST select TI_DAVINCI_MDIO select PHYLIB + select GENERIC_ALLOCATOR ---help--- This driver supports TI's DaVinci Ethernet . diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f298d714efd6..d7a953c647b4 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -890,8 +890,8 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) { struct cpsw_common *cpsw = dev_id; - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); writel(0, &cpsw->wr_regs->rx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); if (cpsw->quirk_irq) { disable_irq_nosync(cpsw->irqs_table[0]); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 84025dcc78d5..e7c24396933e 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -779,6 +779,7 @@ void cpsw_ale_start(struct cpsw_ale *ale) void cpsw_ale_stop(struct cpsw_ale *ale) { del_timer_sync(&ale->timer); + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); } @@ -862,6 +863,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; } + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); return ale; } diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 37ba708ac781..6614fa3089b2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1018,7 +1018,6 @@ static int cpdma_chan_submit_si(struct submit_info *si) struct cpdma_chan *chan = si->chan; struct cpdma_ctlr *ctlr = chan->ctlr; int len = si->len; - int swlen = len; struct cpdma_desc __iomem *desc; dma_addr_t buffer; u32 mode; @@ -1046,7 +1045,6 @@ static int cpdma_chan_submit_si(struct submit_info *si) if (si->data_dma) { buffer = si->data_dma; dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); - swlen |= CPDMA_DMA_EXT_MAP; } else { buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); ret = dma_mapping_error(ctlr->dev, buffer); @@ -1065,7 +1063,8 @@ static int cpdma_chan_submit_si(struct submit_info *si) writel_relaxed(mode | len, &desc->hw_mode); writel_relaxed((uintptr_t)si->token, &desc->sw_token); writel_relaxed(buffer, &desc->sw_buffer); - writel_relaxed(swlen, &desc->sw_len); + writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len, + &desc->sw_len); desc_read(desc, sw_len); __cpdma_chan_submit(chan, desc); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 276292bca334..53fb8141f1a6 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -375,10 +375,14 @@ struct temac_local { int tx_bd_next; int tx_bd_tail; int rx_bd_ci; + int rx_bd_tail; /* DMA channel control setup */ u32 tx_chnl_ctrl; u32 rx_chnl_ctrl; + u8 coalesce_count_rx; + + struct delayed_work restart_work; }; /* Wrappers for temac_ior()/temac_iow() function pointers above */ diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 21c1b4322ea7..eb480204cdbe 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -367,6 +368,8 @@ static int temac_dma_bd_init(struct net_device *ndev) skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) + goto out; lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); @@ -387,12 +390,13 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->tx_bd_next = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; + lp->rx_bd_tail = RX_BD_NUM - 1; /* Enable RX DMA transfers */ wmb(); lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); lp->dma_out(lp, RX_TAILDESC_PTR, - lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail)); /* Prepare for TX DMA transfer */ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); @@ -788,6 +792,9 @@ static void temac_start_xmit_done(struct net_device *ndev) stat = be32_to_cpu(cur_p->app0); } + /* Matches barrier in temac_start_xmit */ + smp_mb(); + netif_wake_queue(ndev); } @@ -830,9 +837,19 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; if (temac_check_tx_bd_space(lp, num_frag + 1)) { - if (!netif_queue_stopped(ndev)) - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; + if (netif_queue_stopped(ndev)) + return NETDEV_TX_BUSY; + + netif_stop_queue(ndev); + + /* Matches barrier in temac_start_xmit_done */ + smp_mb(); + + /* Space might have just been freed - check again */ + if (temac_check_tx_bd_space(lp, num_frag)) + return NETDEV_TX_BUSY; + + netif_wake_queue(ndev); } cur_p->app0 = 0; @@ -850,12 +867,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); cur_p->len = cpu_to_be32(skb_headlen(skb)); + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } cur_p->phys = cpu_to_be32(skb_dma_addr); ptr_to_txbd((void *)skb, cur_p); for (ii = 0; ii < num_frag; ii++) { - lp->tx_bd_tail++; - if (lp->tx_bd_tail >= TX_BD_NUM) + if (++lp->tx_bd_tail >= TX_BD_NUM) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; @@ -863,6 +884,27 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { + if (--lp->tx_bd_tail < 0) + lp->tx_bd_tail = TX_BD_NUM - 1; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + while (--ii >= 0) { + --frag; + dma_unmap_single(ndev->dev.parent, + be32_to_cpu(cur_p->phys), + skb_frag_size(frag), + DMA_TO_DEVICE); + if (--lp->tx_bd_tail < 0) + lp->tx_bd_tail = TX_BD_NUM - 1; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + } + dma_unmap_single(ndev->dev.parent, + be32_to_cpu(cur_p->phys), + skb_headlen(skb), DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } cur_p->phys = cpu_to_be32(skb_dma_addr); cur_p->len = cpu_to_be32(skb_frag_size(frag)); cur_p->app0 = 0; @@ -884,31 +926,56 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +static int ll_temac_recv_buffers_available(struct temac_local *lp) +{ + int available; + + if (!lp->rx_skb[lp->rx_bd_ci]) + return 0; + available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; + if (available <= 0) + available += RX_BD_NUM; + return available; +} static void ll_temac_recv(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); - struct sk_buff *skb, *new_skb; - unsigned int bdstat; - struct cdmac_bd *cur_p; - dma_addr_t tail_p, skb_dma_addr; - int length; unsigned long flags; + int rx_bd; + bool update_tail = false; spin_lock_irqsave(&lp->rx_lock, flags); - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; + /* Process all received buffers, passing them on network + * stack. After this, the buffer descriptors will be in an + * un-allocated stage, where no skb is allocated for it, and + * they are therefore not available for TEMAC/DMA. + */ + do { + struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; + struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; + unsigned int bdstat = be32_to_cpu(bd->app0); + int length; - bdstat = be32_to_cpu(cur_p->app0); - while ((bdstat & STS_CTRL_APP0_CMPLT)) { + /* While this should not normally happen, we can end + * here when GFP_ATOMIC allocations fail, and we + * therefore have un-allocated buffers. + */ + if (!skb) + break; - skb = lp->rx_skb[lp->rx_bd_ci]; - length = be32_to_cpu(cur_p->app4) & 0x3FFF; + /* Loop over all completed buffer descriptors */ + if (!(bdstat & STS_CTRL_APP0_CMPLT)) + break; - dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), + dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys), XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + /* The buffer is not valid for DMA anymore */ + bd->phys = 0; + bd->len = 0; + length = be32_to_cpu(bd->app4) & 0x3FFF; skb_put(skb, length); skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); @@ -923,43 +990,102 @@ static void ll_temac_recv(struct net_device *ndev) * (back) for proper IP checksum byte order * (be16). */ - skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF); + skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); + /* The skb buffer is now owned by network stack above */ + lp->rx_skb[lp->rx_bd_ci] = NULL; ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; - new_skb = netdev_alloc_skb_ip_align(ndev, - XTE_MAX_JUMBO_FRAME_SIZE); - if (!new_skb) { - spin_unlock_irqrestore(&lp->rx_lock, flags); - return; + rx_bd = lp->rx_bd_ci; + if (++lp->rx_bd_ci >= RX_BD_NUM) + lp->rx_bd_ci = 0; + } while (rx_bd != lp->rx_bd_tail); + + /* DMA operations will halt when the last buffer descriptor is + * processed (ie. the one pointed to by RX_TAILDESC_PTR). + * When that happens, no more interrupt events will be + * generated. No IRQ_COAL or IRQ_DLY, and not even an + * IRQ_ERR. To avoid stalling, we schedule a delayed work + * when there is a potential risk of that happening. The work + * will call this function, and thus re-schedule itself until + * enough buffers are available again. + */ + if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx) + schedule_delayed_work(&lp->restart_work, HZ / 1000); + + /* Allocate new buffers for those buffer descriptors that were + * passed to network stack. Note that GFP_ATOMIC allocations + * can fail (e.g. when a larger burst of GFP_ATOMIC + * allocations occurs), so while we try to allocate all + * buffers in the same interrupt where they were processed, we + * continue with what we could get in case of allocation + * failure. Allocation of remaining buffers will be retried + * in following calls. + */ + while (1) { + struct sk_buff *skb; + struct cdmac_bd *bd; + dma_addr_t skb_dma_addr; + + rx_bd = lp->rx_bd_tail + 1; + if (rx_bd >= RX_BD_NUM) + rx_bd = 0; + bd = &lp->rx_bd_v[rx_bd]; + + if (bd->phys) + break; /* All skb's allocated */ + + skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); + if (!skb) { + dev_warn(&ndev->dev, "skb alloc failed\n"); + break; } - cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); - skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data, + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); - cur_p->phys = cpu_to_be32(skb_dma_addr); - cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); - lp->rx_skb[lp->rx_bd_ci] = new_skb; + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, + skb_dma_addr))) { + dev_kfree_skb_any(skb); + break; + } - lp->rx_bd_ci++; - if (lp->rx_bd_ci >= RX_BD_NUM) - lp->rx_bd_ci = 0; + bd->phys = cpu_to_be32(skb_dma_addr); + bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); + bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); + lp->rx_skb[rx_bd] = skb; - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - bdstat = be32_to_cpu(cur_p->app0); + lp->rx_bd_tail = rx_bd; + update_tail = true; + } + + /* Move tail pointer when buffers have been allocated */ + if (update_tail) { + lp->dma_out(lp, RX_TAILDESC_PTR, + lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail); } - lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); spin_unlock_irqrestore(&lp->rx_lock, flags); } +/* Function scheduled to ensure a restart in case of DMA halt + * condition caused by running out of buffer descriptors. + */ +static void ll_temac_restart_work_func(struct work_struct *work) +{ + struct temac_local *lp = container_of(work, struct temac_local, + restart_work.work); + struct net_device *ndev = lp->ndev; + + ll_temac_recv(ndev); +} + static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) { struct net_device *ndev = _ndev; @@ -1052,6 +1178,8 @@ static int temac_stop(struct net_device *ndev) dev_dbg(&ndev->dev, "temac_close()\n"); + cancel_delayed_work_sync(&lp->restart_work); + free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); @@ -1184,6 +1312,7 @@ static int temac_probe(struct platform_device *pdev) lp->dev = &pdev->dev; lp->options = XTE_OPTION_DEFAULTS; spin_lock_init(&lp->rx_lock); + INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); /* Setup mutex for synchronization of indirect register access */ if (pdata) { @@ -1290,6 +1419,7 @@ static int temac_probe(struct platform_device *pdev) */ lp->tx_chnl_ctrl = 0x10220000; lp->rx_chnl_ctrl = 0xff070000; + lp->coalesce_count_rx = 0x07; /* Finished with the DMA node; drop the reference */ of_node_put(dma_np); @@ -1321,11 +1451,14 @@ static int temac_probe(struct platform_device *pdev) (pdata->tx_irq_count << 16); else lp->tx_chnl_ctrl = 0x10220000; - if (pdata->rx_irq_timeout || pdata->rx_irq_count) + if (pdata->rx_irq_timeout || pdata->rx_irq_count) { lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) | (pdata->rx_irq_count << 16); - else + lp->coalesce_count_rx = pdata->rx_irq_count; + } else { lp->rx_chnl_ctrl = 0xff070000; + lp->coalesce_count_rx = 0x07; + } } /* Error handle returned DMA RX and TX interrupts */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 676006f32f91..479325eeaf8a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1790,10 +1790,6 @@ static int axienet_probe(struct platform_device *pdev) /* Check for these resources directly on the Ethernet node. */ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "unable to get DMA memory resource\n"); - goto free_netdev; - } lp->dma_regs = devm_ioremap_resource(&pdev->dev, res); lp->rx_irq = platform_get_irq(pdev, 1); lp->tx_irq = platform_get_irq(pdev, 0); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 6fc04ffb22c2..d4e095d0e8f1 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -517,25 +517,14 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, return ret; } -static int ixp4xx_mdio_register(void) +static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) { int err; if (!(mdio_bus = mdiobus_alloc())) return -ENOMEM; - if (cpu_is_ixp43x()) { - /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */ - if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH)) - return -ENODEV; - mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; - } else { - /* All MII PHY accesses use NPE-B Ethernet registers */ - if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) - return -ENODEV; - mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; - } - + mdio_regs = regs; __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); spin_lock_init(&mdio_lock); mdio_bus->name = "IXP4xx MII Bus"; @@ -1374,7 +1363,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static int eth_init_one(struct platform_device *pdev) +static int ixp4xx_eth_probe(struct platform_device *pdev) { struct port *port; struct net_device *dev; @@ -1384,7 +1373,7 @@ static int eth_init_one(struct platform_device *pdev) char phy_id[MII_BUS_ID_SIZE + 3]; int err; - if (!(dev = alloc_etherdev(sizeof(struct port)))) + if (!(dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct port)))) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); @@ -1394,20 +1383,51 @@ static int eth_init_one(struct platform_device *pdev) switch (port->id) { case IXP4XX_ETH_NPEA: + /* If the MDIO bus is not up yet, defer probe */ + if (!mdio_bus) + return -EPROBE_DEFER; port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT; regs_phys = IXP4XX_EthA_BASE_PHYS; break; case IXP4XX_ETH_NPEB: + /* + * On all except IXP43x, NPE-B is used for the MDIO bus. + * If there is no NPE-B in the feature set, bail out, else + * register the MDIO bus. + */ + if (!cpu_is_ixp43x()) { + if (!(ixp4xx_read_feature_bits() & + IXP4XX_FEATURE_NPEB_ETH0)) + return -ENODEV; + /* Else register the MDIO bus on NPE-B */ + if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT))) + return err; + } + if (!mdio_bus) + return -EPROBE_DEFER; port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; regs_phys = IXP4XX_EthB_BASE_PHYS; break; case IXP4XX_ETH_NPEC: + /* + * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access, + * of there is no NPE-C, no bus, nothing works, so bail out. + */ + if (cpu_is_ixp43x()) { + if (!(ixp4xx_read_feature_bits() & + IXP4XX_FEATURE_NPEC_ETH)) + return -ENODEV; + /* Else register the MDIO bus on NPE-C */ + if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT))) + return err; + } + if (!mdio_bus) + return -EPROBE_DEFER; port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; regs_phys = IXP4XX_EthC_BASE_PHYS; break; default: - err = -ENODEV; - goto err_free; + return -ENODEV; } dev->netdev_ops = &ixp4xx_netdev_ops; @@ -1416,10 +1436,8 @@ static int eth_init_one(struct platform_device *pdev) netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); - if (!(port->npe = npe_request(NPE_ID(port->id)))) { - err = -EIO; - goto err_free; - } + if (!(port->npe = npe_request(NPE_ID(port->id)))) + return -EIO; port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); if (!port->mem_res) { @@ -1465,12 +1483,10 @@ err_free_mem: release_resource(port->mem_res); err_npe_rel: npe_release(port->npe); -err_free: - free_netdev(dev); return err; } -static int eth_remove_one(struct platform_device *pdev) +static int ixp4xx_eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct phy_device *phydev = dev->phydev; @@ -1478,45 +1494,21 @@ static int eth_remove_one(struct platform_device *pdev) unregister_netdev(dev); phy_disconnect(phydev); + ixp4xx_mdio_remove(); npe_port_tab[NPE_ID(port->id)] = NULL; npe_release(port->npe); release_resource(port->mem_res); - free_netdev(dev); return 0; } static struct platform_driver ixp4xx_eth_driver = { .driver.name = DRV_NAME, - .probe = eth_init_one, - .remove = eth_remove_one, + .probe = ixp4xx_eth_probe, + .remove = ixp4xx_eth_remove, }; - -static int __init eth_init_module(void) -{ - int err; - - /* - * FIXME: we bail out on device tree boot but this really needs - * to be fixed in a nicer way: this registers the MDIO bus before - * even matching the driver infrastructure, we should only probe - * detected hardware. - */ - if (of_have_populated_dt()) - return -ENODEV; - if ((err = ixp4xx_mdio_register())) - return err; - return platform_driver_register(&ixp4xx_eth_driver); -} - -static void __exit eth_cleanup_module(void) -{ - platform_driver_unregister(&ixp4xx_eth_driver); - ixp4xx_mdio_remove(); -} +module_platform_driver(ixp4xx_eth_driver); MODULE_AUTHOR("Krzysztof Halasa"); MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:ixp4xx_eth"); -module_init(eth_init_module); -module_exit(eth_cleanup_module); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b517c1af9de0..91a1059517f5 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -166,6 +166,9 @@ static int fjes_acpi_add(struct acpi_device *device) /* create platform_device */ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, ARRAY_SIZE(fjes_resource)); + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + device->driver_data = plat_dev; return 0; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 3ab24fdccd3b..730ab57201bd 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -853,7 +853,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, if (dst) return dst; } - if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { + dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, + NULL); + if (IS_ERR(dst)) { netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); return ERR_PTR(-ENETUNREACH); } @@ -1843,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head) if (!net_eq(dev_net(geneve->dev), net)) unregister_netdevice_queue(geneve->dev, head); } - - WARN_ON_ONCE(!list_empty(&gn->sock_list)); } static void __net_exit geneve_exit_batch_net(struct list_head *net_list) @@ -1859,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list) /* unregister the devices gathered above */ unregister_netdevice_many(&list); rtnl_unlock(); + + list_for_each_entry(net, net_list, exit_list) { + const struct geneve_net *gn = net_generic(net, geneve_net_id); + + WARN_ON_ONCE(!list_empty(&gn->sock_list)); + } } static struct pernet_operations geneve_net_ops = { diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index ecfe26215935..3a53d222bfcc 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -38,7 +38,6 @@ struct pdp_ctx { struct hlist_node hlist_addr; union { - u64 tid; struct { u64 tid; u16 flow; @@ -541,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, mtu = dst_mtu(&rt->dst); } - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); + rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { @@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev) } static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); -static void gtp_hashtable_free(struct gtp_dev *gtp); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); +static void gtp_destructor(struct net_device *dev) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); +} + static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, if (err < 0) return err; - if (!data[IFLA_GTP_PDP_HASHSIZE]) + if (!data[IFLA_GTP_PDP_HASHSIZE]) { hashsize = 1024; - else + } else { hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); + if (!hashsize) + hashsize = 1024; + } err = gtp_hashtable_new(gtp, hashsize); if (err < 0) @@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gn = net_generic(dev_net(dev), gtp_net_id); list_add_rcu(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; netdev_dbg(dev, "registered new GTP interface\n"); return 0; out_hashtable: - gtp_hashtable_free(gtp); + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); out_encap: gtp_encap_disable(gtp); return err; @@ -693,8 +704,13 @@ out_encap: static void gtp_dellink(struct net_device *dev, struct list_head *head) { struct gtp_dev *gtp = netdev_priv(dev); + struct pdp_ctx *pctx; + int i; + + for (i = 0; i < gtp->hash_size; i++) + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); - gtp_hashtable_free(gtp); list_del_rcu(>p->list); unregister_netdevice_queue(dev, head); } @@ -751,12 +767,12 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) int i; gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (gtp->addr_hash == NULL) return -ENOMEM; gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (gtp->tid_hash == NULL) goto err1; @@ -772,20 +788,6 @@ err1: return -ENOMEM; } -static void gtp_hashtable_free(struct gtp_dev *gtp) -{ - struct pdp_ctx *pctx; - int i; - - for (i = 0; i < gtp->hash_size; i++) - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) - pdp_context_delete(pctx); - - synchronize_rcu(); - kfree(gtp->addr_hash); - kfree(gtp->tid_hash); -} - static struct sock *gtp_encap_enable_socket(int fd, int type, struct gtp_dev *gtp) { @@ -802,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, return NULL; } - if (sock->sk->sk_protocol != IPPROTO_UDP) { + sk = sock->sk; + if (sk->sk_protocol != IPPROTO_UDP || + sk->sk_type != SOCK_DGRAM || + (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { pr_debug("socket fd=%d not UDP\n", fd); sk = ERR_PTR(-EINVAL); goto out_sock; } - lock_sock(sock->sk); - if (sock->sk->sk_user_data) { + lock_sock(sk); + if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); - goto out_sock; + goto out_rel_sock; } - sk = sock->sk; sock_hold(sk); tuncfg.sk_user_data = gtp; @@ -824,8 +828,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); -out_sock: +out_rel_sock: release_sock(sock->sk); +out_sock: sockfd_put(sock); return sk; } @@ -926,24 +931,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) } } -static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, - struct genl_info *info) +static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { + struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; - struct pdp_ctx *pctx; + unsigned int version; bool found = false; __be32 ms_addr; ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + version = nla_get_u32(info->attrs[GTPA_VERSION]); - hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) { - if (pctx->ms_addr_ip4.s_addr == ms_addr) { - found = true; - break; - } - } + pctx = ipv4_pdp_find(gtp, ms_addr); + if (pctx) + found = true; + if (version == GTP_V0) + pctx_tid = gtp0_pdp_find(gtp, + nla_get_u64(info->attrs[GTPA_TID])); + else if (version == GTP_V1) + pctx_tid = gtp1_pdp_find(gtp, + nla_get_u32(info->attrs[GTPA_I_TEI])); + if (pctx_tid) + found = true; if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) @@ -951,6 +963,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; + if (pctx && pctx_tid) + return -EEXIST; + if (!pctx) + pctx = pctx_tid; + ipv4_pdp_fill(pctx, info); if (pctx->gtp_version == GTP_V0) @@ -1074,7 +1091,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } - err = ipv4_pdp_add(gtp, sk, info); + err = gtp_pdp_add(gtp, sk, info); out_unlock: rcu_read_unlock(); @@ -1232,43 +1249,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, struct netlink_callback *cb) { struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + int i, j, bucket = cb->args[0], skip = cb->args[1]; struct net *net = sock_net(skb->sk); - struct gtp_net *gn = net_generic(net, gtp_net_id); - unsigned long tid = cb->args[1]; - int i, k = cb->args[0], ret; struct pdp_ctx *pctx; + struct gtp_net *gn; + + gn = net_generic(net, gtp_net_id); if (cb->args[4]) return 0; + rcu_read_lock(); list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { if (last_gtp && last_gtp != gtp) continue; else last_gtp = NULL; - for (i = k; i < gtp->hash_size; i++) { - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { - if (tid && tid != pctx->u.tid) - continue; - else - tid = 0; - - ret = gtp_genl_fill_info(skb, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - cb->nlh->nlmsg_type, pctx); - if (ret < 0) { + for (i = bucket; i < gtp->hash_size; i++) { + j = 0; + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], + hlist_tid) { + if (j >= skip && + gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; - cb->args[1] = pctx->u.tid; + cb->args[1] = j; cb->args[2] = (unsigned long)gtp; goto out; } + j++; } + skip = 0; } + bucket = 0; } cb->args[4] = 1; out: + rcu_read_unlock(); return skb->len; } diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 23281aeeb222..71d6629e65c9 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!sp) return; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index c5bfa19ddb93..deef14215110 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty) { struct mkiss *ax; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); ax = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!ax) return; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index fb547f37af1e..e74f2d1def80 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -169,7 +169,6 @@ struct rndis_device { u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; - u16 rx_table[ITAB_NUM]; }; @@ -938,6 +937,8 @@ struct net_device_context { u32 tx_table[VRSS_SEND_TAB_SIZE]; + u16 rx_table[ITAB_NUM]; + /* Ethtool settings */ u8 duplex; u32 speed; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index eab83e71567a..6c0732fc8c25 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -99,7 +99,7 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; - net_device->tx_disable = false; + net_device->tx_disable = true; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 963509add611..ca16ae8c8332 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash( else if (flow.basic.n_proto == htons(ETH_P_IPV6)) hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); else - hash = 0; + return 0; - skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); + __skb_set_sw_hash(skb, hash, false); } return hash; @@ -795,8 +795,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, skb->protocol == htons(ETH_P_IP)) netvsc_comp_ipcsum(skb); - /* Do L4 checksum offload if enabled and present. - */ + /* Do L4 checksum offload if enabled and present. */ if (csum_info && (net->features & NETIF_F_RXCSUM)) { if (csum_info->receive.tcp_checksum_succeeded || csum_info->receive.udp_checksum_succeeded) @@ -974,6 +973,7 @@ static int netvsc_attach(struct net_device *ndev, } /* In any case device is now ready */ + nvdev->tx_disable = false; netif_device_attach(ndev); /* Note: enable and attach happen when sub-channels setup */ @@ -1659,7 +1659,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - indir[i] = rndis_dev->rx_table[i]; + indir[i] = ndc->rx_table[i]; } if (key) @@ -1689,7 +1689,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, return -EINVAL; for (i = 0; i < ITAB_NUM; i++) - rndis_dev->rx_table[i] = indir[i]; + ndc->rx_table[i] = indir[i]; } if (!key) { @@ -2351,6 +2351,8 @@ static int netvsc_probe(struct hv_device *dev, else net->max_mtu = ETH_DATA_LEN; + nvdev->tx_disable = false; + ret = register_netdevice(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index abaf8156d19d..b9e44bb22289 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -767,6 +767,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev, const u8 *rss_key, u16 flag) { struct net_device *ndev = rdev->ndev; + struct net_device_context *ndc = netdev_priv(ndev); struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; @@ -806,7 +807,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev, /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = rdev->rx_table[i]; + itab[i] = ndc->rx_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset); @@ -1165,6 +1166,9 @@ int rndis_set_subchannel(struct net_device *ndev, wait_event(nvdev->subchan_open, atomic_read(&nvdev->open_chn) == nvdev->num_chn); + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) + ndev_ctx->tx_table[i] = i % nvdev->num_chn; + /* ignore failures from setting rss parameters, still have channels */ if (dev_info) rndis_filter_set_rss_param(rdev, dev_info->rss_key); @@ -1174,9 +1178,6 @@ int rndis_set_subchannel(struct net_device *ndev, netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); - for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) - ndev_ctx->tx_table[i] = i % nvdev->num_chn; - return 0; } @@ -1305,6 +1306,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); + struct net_device_context *ndc = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; struct ndis_recv_scale_cap rsscap; @@ -1391,9 +1393,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, /* We will use the given number of channels if available. */ net_device->num_chn = min(net_device->max_chn, device_info->num_chn); - for (i = 0; i < ITAB_NUM; i++) - rndis_device->rx_table[i] = ethtool_rxfh_indir_default( + if (!netif_is_rxfh_configured(net)) { + for (i = 0; i < ITAB_NUM; i++) + ndc->rx_table[i] = ethtool_rxfh_indir_default( i, net_device->num_chn); + } atomic_set(&net_device->open_chn, 1); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); @@ -1432,8 +1436,6 @@ void rndis_filter_device_remove(struct hv_device *dev, /* Halt and release the rndis device */ rndis_filter_halt_device(net_dev, rndis_dev); - net_dev->extension = NULL; - netvsc_device_remove(dev); } diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 242b9b0943f8..7fe306e76281 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp) } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { - skb->tc_redirected = 0; + skb->redirected = 0; skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); @@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp) rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; - if (!skb->tc_from_ingress) { + if (!skb->from_ingress) { dev_queue_xmit(skb); } else { skb_pull_rcsum(skb, skb->mac_len); @@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) txp->rx_bytes += skb->len; u64_stats_update_end(&txp->rsync); - if (!skb->tc_redirected || !skb->skb_iif) { + if (!skb->redirected || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 30cd0c4f0be0..8801d093135c 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work) } if (dev) dev_put(dev); + cond_resched(); } } @@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb) struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; - /* In this mode we dont care about multicast and broadcast traffic */ - if (is_multicast_ether_addr(ethh->h_dest)) { - pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n", - ntohs(skb->protocol)); - kfree_skb(skb); - goto out; - } - /* The ipvlan is a pseudo-L2 device, so the packets that we receive * will have L2; which need to discarded and processed further * in the net-ns of the main-device. */ if (skb_mac_header_was_set(skb)) { + /* In this mode we dont care about + * multicast and broadcast traffic */ + if (is_multicast_ether_addr(ethh->h_dest)) { + pr_debug_ratelimited( + "Dropped {multi|broad}cast of type=[%x]\n", + ntohs(skb->protocol)); + kfree_skb(skb); + goto out; + } + skb_pull(skb, sizeof(*ethh)); skb->mac_header = (typeof(skb->mac_header))~0U; skb_reset_network_header(skb); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index ba3dfac1d904..b805abc9ec3b 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev) static int ipvlan_open(struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); - struct net_device *phy_dev = ipvlan->phy_dev; struct ipvl_addr *addr; if (ipvlan->port->mode == IPVLAN_MODE_L3 || @@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev) ipvlan_ht_addr_add(ipvlan, addr); rcu_read_unlock(); - return dev_uc_add(phy_dev, phy_dev->dev_addr); + return 0; } static int ipvlan_stop(struct net_device *dev) @@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev) dev_uc_unsync(phy_dev, dev); dev_mc_unsync(phy_dev, dev); - dev_uc_del(phy_dev, phy_dev->dev_addr); - rcu_read_lock(); list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) ipvlan_ht_addr_del(addr); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index afd8b2a08245..32c627702ac5 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -2882,6 +2883,11 @@ static void macsec_dev_set_rx_mode(struct net_device *dev) dev_uc_sync(real_dev, dev); } +static sci_t dev_to_sci(struct net_device *dev, __be16 port) +{ + return make_sci(dev->dev_addr, port); +} + static int macsec_set_mac_address(struct net_device *dev, void *p) { struct macsec_dev *macsec = macsec_priv(dev); @@ -2903,6 +2909,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p) out: ether_addr_copy(dev->dev_addr, addr->sa_data); + macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); return 0; } @@ -2977,6 +2984,7 @@ static const struct device_type macsec_type = { static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, + [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, @@ -3176,11 +3184,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci) return false; } -static sci_t dev_to_sci(struct net_device *dev, __be16 port) -{ - return make_sci(dev->dev_addr, port); -} - static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) { struct macsec_dev *macsec = macsec_priv(dev); @@ -3234,6 +3237,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; + if (real_dev->type != ARPHRD_ETHER) + return -EINVAL; dev->priv_flags |= IFF_MACSEC; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 34fc59bd1e20..26f6be4796c7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w) if (src) dev_put(src->dev); consume_skb(skb); + + cond_resched(); } } @@ -359,10 +361,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, } spin_unlock(&port->bc_queue.lock); + schedule_work(&port->bc_work); + if (err) goto free_nskb; - schedule_work(&port->bc_work); return; free_nskb: @@ -512,10 +515,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *dest; if (vlan->mode == MACVLAN_MODE_BRIDGE) { - const struct ethhdr *eth = (void *)skb->data; + const struct ethhdr *eth = skb_eth_hdr(skb); /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 44c2d857a7fa..91b302f0192f 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -73,7 +73,7 @@ static const struct file_operations nsim_dev_take_snapshot_fops = { static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) { - char dev_ddir_name[16]; + char dev_ddir_name[sizeof(DRV_NAME) + 10]; sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id); nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir); diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 3b29d381116f..975789d9349d 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = { .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, .read_status = aqr_read_status, + .suspend = aqr107_suspend, + .resume = aqr107_resume, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR106), diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index 23f1958ba6ad..459fb2069c7e 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, .phy_id_mask = 0xfffffc00, + .name = "Broadcom BCM63XX (2)", /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 937d0059e8ac..5e956089bf52 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -26,18 +26,13 @@ MODULE_DESCRIPTION("Broadcom PHY driver"); MODULE_AUTHOR("Maciej W. Rozycki"); MODULE_LICENSE("GPL"); +static int bcm54xx_config_clock_delay(struct phy_device *phydev); + static int bcm54210e_config_init(struct phy_device *phydev) { int val; - val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); - val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; - val |= MII_BCM54XX_AUXCTL_MISC_WREN; - bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val); - - val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL); - val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN; - bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val); + bcm54xx_config_clock_delay(phydev); if (phydev->dev_flags & PHY_BRCM_EN_MASTER_MODE) { val = phy_read(phydev, MII_CTRL1000); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 37fceaf9fa10..31a559513362 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -25,7 +25,8 @@ #define DP83867_CFG3 0x1e /* Extended Registers */ -#define DP83867_CFG4 0x0031 +#define DP83867_FLD_THR_CFG 0x002e +#define DP83867_CFG4 0x0031 #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6)) #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5) #define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5) @@ -74,12 +75,14 @@ #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0) #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0 #define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2) +#define DP83867_STRAP_STS2_STRAP_FLD BIT(10) /* PHY CTRL bits */ #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 #define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03 #define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14) #define DP83867_PHYCR_RESERVED_MASK BIT(11) +#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10) /* RGMIIDCTL bits */ #define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf @@ -95,9 +98,16 @@ #define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8) #define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8 +/* CFG3 bits */ +#define DP83867_CFG3_INT_OE BIT(7) +#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9) + /* CFG4 bits */ #define DP83867_CFG4_PORT_MIRROR_EN BIT(0) +/* FLD_THR_CFG */ +#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7 + enum { DP83867_PORT_MIRROING_KEEP, DP83867_PORT_MIRROING_EN, @@ -313,6 +323,20 @@ static int dp83867_config_init(struct phy_device *phydev) phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, BIT(7)); + bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2); + if (bs & DP83867_STRAP_STS2_STRAP_FLD) { + /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will + * be set to 0x2. This may causes the PHY link to be unstable - + * the default value 0x1 need to be restored. + */ + ret = phy_modify_mmd(phydev, DP83867_DEVADDR, + DP83867_FLD_THR_CFG, + DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK, + 0x1); + if (ret) + return ret; + } + if (phy_interface_is_rgmii(phydev)) { val = phy_read(phydev, MII_DP83867_PHYCTRL); if (val < 0) @@ -410,12 +434,13 @@ static int dp83867_config_init(struct phy_device *phydev) phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val); } + val = phy_read(phydev, DP83867_CFG3); /* Enable Interrupt output INT_OE in CFG3 register */ - if (phy_interrupt_is_valid(phydev)) { - val = phy_read(phydev, DP83867_CFG3); - val |= BIT(7); - phy_write(phydev, DP83867_CFG3, val); - } + if (phy_interrupt_is_valid(phydev)) + val |= DP83867_CFG3_INT_OE; + + val |= DP83867_CFG3_ROBUST_AUTO_MDIX; + phy_write(phydev, DP83867_CFG3, val); if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP) dp83867_config_port_mirroring(phydev); @@ -449,7 +474,12 @@ static int dp83867_phy_reset(struct phy_device *phydev) usleep_range(10, 20); - return 0; + /* After reset FORCE_LINK_GOOD bit is set. Although the + * default value should be unset. Disable FORCE_LINK_GOOD + * for the phy to work properly. + */ + return phy_modify(phydev, MII_DP83867_PHYCTRL, + DP83867_PHYCR_FORCE_LINK_GOOD, 0); } static struct phy_driver dp83867_driver[] = { diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 7c5265fd2b94..4190f9ed5313 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -212,16 +212,13 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np) */ gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0, GPIOD_IN, "mdio"); - of_node_put(fixed_link_node); - if (IS_ERR(gpiod)) { - if (PTR_ERR(gpiod) == -EPROBE_DEFER) - return gpiod; - + if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) { if (PTR_ERR(gpiod) != -ENOENT) pr_err("error getting GPIO for fixed link %pOF, proceed without\n", fixed_link_node); gpiod = NULL; } + of_node_put(fixed_link_node); return gpiod; } diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c index 7e9975d25066..f1ded03f0229 100644 --- a/drivers/net/phy/mdio-bcm-iproc.c +++ b/drivers/net/phy/mdio-bcm-iproc.c @@ -178,6 +178,23 @@ static int iproc_mdio_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +int iproc_mdio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iproc_mdio_priv *priv = platform_get_drvdata(pdev); + + /* restore the mii clock configuration */ + iproc_mdio_config_clk(priv->base); + + return 0; +} + +static const struct dev_pm_ops iproc_mdio_pm_ops = { + .resume = iproc_mdio_resume +}; +#endif /* CONFIG_PM_SLEEP */ + static const struct of_device_id iproc_mdio_of_match[] = { { .compatible = "brcm,iproc-mdio", }, { /* sentinel */ }, @@ -188,6 +205,9 @@ static struct platform_driver iproc_mdio_driver = { .driver = { .name = "iproc-mdio", .of_match_table = iproc_mdio_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &iproc_mdio_pm_ops, +#endif }, .probe = iproc_mdio_probe, .remove = iproc_mdio_remove, diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 4a28fb29adaa..fbd36891ee64 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - priv->clk = devm_clk_get(&pdev->dev, NULL); - if (PTR_ERR(priv->clk) == -EPROBE_DEFER) + priv->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); - else - priv->clk = NULL; ret = clk_prepare_enable(priv->clk); if (ret) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 88d409e48c1f..aad6809ebe39 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev) static int mdio_mux_iproc_resume(struct device *dev) { struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); + int rc; - clk_prepare_enable(md->core_clk); + rc = clk_prepare_enable(md->core_clk); + if (rc) { + dev_err(md->dev, "failed to enable core clk\n"); + return rc; + } mdio_mux_iproc_config(md); return 0; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index dbacb0031877..229e480179ff 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -62,8 +62,8 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev) struct reset_control *reset = NULL; if (mdiodev->dev.of_node) - reset = devm_reset_control_get_exclusive(&mdiodev->dev, - "phy"); + reset = of_reset_control_get_exclusive(mdiodev->dev.of_node, + "phy"); if (IS_ERR(reset)) { if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP) reset = NULL; @@ -107,6 +107,8 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev) if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) return -EINVAL; + reset_control_put(mdiodev->reset_ctrl); + mdiodev->bus->mdio_map[mdiodev->addr] = NULL; return 0; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 63dedec0433d..51b64f087717 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -25,6 +25,7 @@ #include #include #include +#include /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -902,6 +903,12 @@ static int kszphy_resume(struct phy_device *phydev) genphy_resume(phydev); + /* After switching from power-down to normal mode, an internal global + * reset is automatically generated. Wait a minimum of 1 ms before + * read/write access to the PHY registers. + */ + usleep_range(1000, 2000); + ret = kszphy_config_reset(phydev); if (ret) return ret; diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 7ada1fd9ca71..2339b9381d21 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -302,11 +302,11 @@ enum rgmii_rx_clock_delay { BIT(VSC8531_FORCE_LED_OFF) | \ BIT(VSC8531_FORCE_LED_ON)) -#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin" +#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin" #define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800 #define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48 -#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin" +#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin" #define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000 #define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 105d389b58e7..ea890d802ffe 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -761,7 +761,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) phy_trigger_machine(phydev); } - if (phy_clear_interrupt(phydev)) + /* did_interrupt() may have cleared the interrupt already */ + if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev)) goto phy_err; return IRQ_HANDLED; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index adb66a2fae18..0907c3d8d94a 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -246,7 +246,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) * MDIO bus driver and clock gated at this point. */ if (!netdev) - return !phydev->suspended; + goto out; if (netdev->wol_enabled) return false; @@ -266,7 +266,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) if (device_may_wakeup(&netdev->dev)) return false; - return true; +out: + return !phydev->suspended; } static int mdio_bus_phy_suspend(struct device *dev) @@ -284,6 +285,8 @@ static int mdio_bus_phy_suspend(struct device *dev) if (!mdio_bus_phy_may_suspend(phydev)) return 0; + phydev->suspended_by_mdio_bus = 1; + return phy_suspend(phydev); } @@ -292,9 +295,11 @@ static int mdio_bus_phy_resume(struct device *dev) struct phy_device *phydev = to_phy_device(dev); int ret; - if (!mdio_bus_phy_may_suspend(phydev)) + if (!phydev->suspended_by_mdio_bus) goto no_resume; + phydev->suspended_by_mdio_bus = 0; + ret = phy_resume(phydev); if (ret < 0) return ret; @@ -488,7 +493,7 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv) if (phydev->is_c45) { for (i = 1; i < num_ids; i++) { - if (!(phydev->c45_ids.devices_in_package & (1 << i))) + if (phydev->c45_ids.device_ids[i] == 0xffffffff) continue; if ((phydrv->phy_id & phydrv->phy_id_mask) == @@ -552,7 +557,7 @@ static const struct device_type mdio_bus_phy_type = { .pm = MDIO_BUS_PHY_PM_OPS, }; -static int phy_request_driver_module(struct phy_device *dev, int phy_id) +static int phy_request_driver_module(struct phy_device *dev, u32 phy_id) { int ret; @@ -564,15 +569,15 @@ static int phy_request_driver_module(struct phy_device *dev, int phy_id) * then modprobe isn't available. */ if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) { - phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n", - ret, phy_id); + phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n", + ret, (unsigned long)phy_id); return ret; } return 0; } -struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, +struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { @@ -596,8 +601,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, mdiodev->device_free = phy_mdio_device_free; mdiodev->device_remove = phy_mdio_device_remove; - dev->speed = 0; - dev->duplex = -1; + dev->speed = SPEED_UNKNOWN; + dev->duplex = DUPLEX_UNKNOWN; dev->pause = 0; dev->asym_pause = 0; dev->link = 0; @@ -632,7 +637,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, int i; for (i = 1; i < num_ids; i++) { - if (!(c45_ids->devices_in_package & (1 << i))) + if (c45_ids->device_ids[i] == 0xffffffff) continue; ret = phy_request_driver_module(dev, @@ -812,10 +817,13 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, */ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) { - struct phy_c45_device_ids c45_ids = {0}; + struct phy_c45_device_ids c45_ids; u32 phy_id = 0; int r; + c45_ids.devices_in_package = 0; + memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); + r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids); if (r) return ERR_PTR(r); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 536236fdb232..bf5bbb565cf5 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -444,8 +444,7 @@ static void phylink_mac_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; pl->ops->mac_link_up(pl->config, pl->link_an_mode, - pl->phy_state.interface, - pl->phydev); + pl->cur_interface, pl->phydev); if (ndev) netif_carrier_on(ndev); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 677c45985338..879ca37c8508 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -171,7 +171,9 @@ static int rtl8211c_config_init(struct phy_device *phydev) static int rtl8211f_config_init(struct phy_device *phydev) { + struct device *dev = &phydev->mdio.dev; u16 val; + int ret; /* enable TX-delay for rgmii-{id,txid}, and disable it for rgmii and * rgmii-rxid. The RX-delay can be enabled by the external RXDLY pin. @@ -189,7 +191,22 @@ static int rtl8211f_config_init(struct phy_device *phydev) return 0; } - return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val); + ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, + val); + if (ret < 0) { + dev_err(dev, "Failed to update the TX delay register\n"); + return ret; + } else if (ret) { + dev_dbg(dev, + "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n", + val ? "Enabling" : "Disabling"); + } else { + dev_dbg(dev, + "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n", + val ? "enabled" : "disabled"); + } + + return 0; } static int rtl8211e_config_init(struct phy_device *phydev) @@ -439,6 +456,15 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + PHY_ID_MATCH_MODEL(0x001cc880), + .name = "RTL8208 Fast Ethernet", + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, + .suspend = genphy_suspend, + .resume = genphy_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { PHY_ID_MATCH_EXACT(0x001cc910), .name = "RTL8211 Gigabit Ethernet", diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index a7b9cf3269bf..29a0917a81e6 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -874,15 +874,15 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf, skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); if (!skb) goto nomem; - ap->rpkt = skb; - } - if (skb->len == 0) { - /* Try to get the payload 4-byte aligned. - * This should match the - * PPP_ALLSTATIONS/PPP_UI/compressed tests in - * process_input_packet, but we do not have - * enough chars here to test buf[1] and buf[2]. - */ + ap->rpkt = skb; + } + if (skb->len == 0) { + /* Try to get the payload 4-byte aligned. + * This should match the + * PPP_ALLSTATIONS/PPP_UI/compressed tests in + * process_input_packet, but we do not have + * enough chars here to test buf[1] and buf[2]. + */ if (buf[0] != PPP_ALLSTATIONS) skb_reserve(skb, 2 + (buf[0] & 1)); } diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index 58a69f830d29..f78ceba42e57 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, struct cstate *cs = lcs->next; unsigned long deltaS, deltaA; short changes = 0; - int hlen; + int nlen, hlen; unsigned char new_seq[16]; unsigned char *cp = new_seq; struct iphdr *ip; @@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, return isize; ip = (struct iphdr *) icp; + if (ip->version != 4 || ip->ihl < 5) + return isize; /* Bail if this packet isn't TCP, or is an IP fragment */ if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) { @@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, comp->sls_o_tcp++; return isize; } - /* Extract TCP header */ + nlen = ip->ihl * 4; + if (isize < nlen + sizeof(*th)) + return isize; - th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4); - hlen = ip->ihl*4 + th->doff*4; + th = (struct tcphdr *)(icp + nlen); + if (th->doff < sizeof(struct tcphdr) / 4) + return isize; + hlen = nlen + th->doff * 4; /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or * some other control bit is set). Also uncompressible if diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 4d479e3c817d..8e56a41dd758 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work) */ static void slip_write_wakeup(struct tty_struct *tty) { - struct slip *sl = tty->disc_data; + struct slip *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } static void sl_tx_timeout(struct net_device *dev) @@ -855,7 +862,11 @@ err_free_chan: sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); + sl_free_netdev(sl->dev); + /* do not call free_netdev before rtnl_unlock */ + rtnl_unlock(); free_netdev(sl->dev); + return err; err_exit: rtnl_unlock(); @@ -881,10 +892,11 @@ static void slip_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* VSV = very important to remove timers */ diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 8156b33ee3e7..4004f98e50d9 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2074,7 +2074,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev, cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; - list_for_each_entry(port, &team->port_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) { if (team_port_txable(port)) { if (port->state.speed != SPEED_UNKNOWN) speed += port->state.speed; @@ -2083,6 +2084,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev, cmd->base.duplex = port->state.duplex; } } + rcu_read_unlock(); + cmd->base.speed = speed ? : SPEED_UNKNOWN; return 0; @@ -2237,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, + [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 }, + [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 }, }; static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a8d3141582a5..16f5cb249ed5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -313,8 +313,8 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, tfile->napi_enabled = napi_en; tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { - netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, - NAPI_POLL_WEIGHT); + netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll, + NAPI_POLL_WEIGHT); napi_enable(&tfile->napi); } } @@ -1715,8 +1715,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, alloc_frag->offset += buflen; } err = tun_xdp_act(tun, xdp_prog, &xdp, act); - if (err < 0) - goto err_xdp; + if (err < 0) { + if (act == XDP_REDIRECT || act == XDP_TX) + put_page(alloc_frag->page); + goto out; + } + if (err == XDP_REDIRECT) xdp_do_flush_map(); if (err != XDP_PASS) @@ -1730,8 +1734,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); -err_xdp: - put_page(alloc_frag->page); out: rcu_read_unlock(); local_bh_enable(); @@ -1936,6 +1938,10 @@ drop: if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); + if (frags) { + tfile->napi.skb = NULL; + mutex_unlock(&tfile->napi_mutex); + } return total_len; } } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f24a1b0b801f..0170a441208a 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -511,7 +512,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, } } else { netdev_warn(dev->net, - "Failed to read stat ret = 0x%x", ret); + "Failed to read stat ret = %d", ret); } kfree(stats); @@ -1808,6 +1809,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) dev->mdiobus->read = lan78xx_mdiobus_read; dev->mdiobus->write = lan78xx_mdiobus_write; dev->mdiobus->name = "lan78xx-mdiobus"; + dev->mdiobus->parent = &dev->udev->dev; snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); @@ -2723,11 +2725,6 @@ static int lan78xx_stop(struct net_device *net) return 0; } -static int lan78xx_linearize(struct sk_buff *skb) -{ - return skb_linearize(skb); -} - static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { @@ -2739,8 +2736,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, return NULL; } - if (lan78xx_linearize(skb) < 0) + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); return NULL; + } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; @@ -3670,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net) tasklet_schedule(&dev->bh); } +static netdev_features_t lan78xx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE) + features &= ~NETIF_F_GSO_MASK; + + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + return features; +} + static const struct net_device_ops lan78xx_netdev_ops = { .ndo_open = lan78xx_open, .ndo_stop = lan78xx_stop, @@ -3683,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = { .ndo_set_features = lan78xx_set_features, .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, + .ndo_features_check = lan78xx_features_check, }; static void lan78xx_stat_monitor(struct timer_list *t) @@ -3752,6 +3765,7 @@ static int lan78xx_probe(struct usb_interface *intf, /* MTU range: 68 - 9000 */ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; + netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4196c0e32740..6c738a271257 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -61,7 +61,6 @@ enum qmi_wwan_flags { enum qmi_wwan_quirks { QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ - QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */ }; struct qmimux_hdr { @@ -338,6 +337,9 @@ static void qmi_wwan_netdev_setup(struct net_device *net) netdev_dbg(net, "mode: raw IP\n"); } else if (!net->header_ops) { /* don't bother if already set */ ether_setup(net); + /* Restoring min/max mtu values set originally by usbnet */ + net->min_mtu = 0; + net->max_mtu = ETH_MAX_MTU; clear_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: Ethernet\n"); } @@ -916,16 +918,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { .data = QMI_WWAN_QUIRK_DTR, }; -static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { - .description = "WWAN/QMI device", - .flags = FLAG_WWAN | FLAG_SEND_ZLP, - .bind = qmi_wwan_bind, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .rx_fixup = qmi_wwan_rx_fixup, - .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG, -}; - #define HUAWEI_VENDOR_ID 0x12D1 /* map QMI/wwan function by a fixed interface number */ @@ -946,14 +938,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { #define QMI_GOBI_DEVICE(vend, prod) \ QMI_FIXED_INTF(vend, prod, 0) -/* Quectel does not use fixed interface numbers on at least some of their - * devices. We need to check the number of endpoints to ensure that we bind to - * the correct interface. +/* Many devices have QMI and DIAG functions which are distinguishable + * from other vendor specific functions by class, subclass and + * protocol all being 0xff. The DIAG function has exactly 2 endpoints + * and is silently rejected when probed. + * + * This makes it possible to match dynamically numbered QMI functions + * as seen on e.g. many Quectel modems. */ -#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \ +#define QMI_MATCH_FF_FF_FF(vend, prod) \ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \ USB_SUBCLASS_VENDOR_SPEC, 0xff), \ - .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr static const struct usb_device_id products[] = { /* 1. CDC ECM like devices match on the control interface */ @@ -1059,9 +1055,10 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1213,6 +1210,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ + {QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1362,6 +1360,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ @@ -1453,7 +1452,6 @@ static int qmi_wwan_probe(struct usb_interface *intf, { struct usb_device_id *id = (struct usb_device_id *)prod; struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; - const struct driver_info *info; /* Workaround to enable dynamic IDs. This disables usbnet * blacklisting functionality. Which, if required, can be @@ -1489,12 +1487,8 @@ static int qmi_wwan_probe(struct usb_interface *intf, * different. Ignore the current interface if the number of endpoints * equals the number for the diag interface (two). */ - info = (void *)id->driver_info; - - if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { - if (desc->bNumEndpoints == 2) - return -ENODEV; - } + if (desc->bNumEndpoints == 2) + return -ENODEV; return usbnet_probe(intf, id); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index b2507c59ba8b..44ea5dcc43fd 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -29,7 +29,7 @@ #define NETNEXT_VERSION "10" /* Information for net */ -#define NET_VERSION "10" +#define NET_VERSION "11" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers " @@ -63,6 +63,7 @@ #define PLA_LED_FEATURE 0xdd92 #define PLA_PHYAR 0xde00 #define PLA_BOOT_CTRL 0xe004 +#define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 #define PLA_EEEP_CR 0xe080 @@ -90,6 +91,7 @@ #define PLA_TALLYCNT 0xe890 #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 +#define PLA_CONFIG6 0xe90a /* CONFIG6 */ #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a @@ -102,6 +104,7 @@ #define PLA_BP_EN 0xfc38 #define USB_USB2PHY 0xb41e +#define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 @@ -286,6 +289,9 @@ #define LINK_ON_WAKE_EN 0x0010 #define LINK_OFF_WAKE_EN 0x0008 +/* PLA_CONFIG6 */ +#define LANWAKE_CLR_EN BIT(0) + /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 @@ -298,6 +304,7 @@ /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 +#define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 @@ -310,6 +317,7 @@ #define MAC_CLK_SPDWN_EN BIT(15) /* PLA_MAC_PWR_CTRL3 */ +#define PLA_MCU_SPDWN_EN BIT(14) #define PKT_AVAIL_SPDWN_EN 0x0100 #define SUSPEND_SPDWN_EN 0x0004 #define U1U2_SPDWN_EN 0x0002 @@ -340,6 +348,9 @@ /* PLA_BOOT_CTRL */ #define AUTOLOAD_DONE 0x0002 +/* PLA_LWAKE_CTRL_REG */ +#define LANWAKE_PIN BIT(7) + /* PLA_SUSPEND_FLAG */ #define LINK_CHG_EVENT BIT(0) @@ -353,6 +364,9 @@ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 +/* USB_SSPHYLINK1 */ +#define DELAY_PHY_PWR_CHG BIT(1) + /* USB_SSPHYLINK2 */ #define pwd_dn_scale_mask 0x3ffe #define pwd_dn_scale(x) ((x) << 1) @@ -2992,6 +3006,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired) } msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; } return data; @@ -3175,7 +3191,6 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - r8153_u2p3en(tp, true); r8153b_u1u2en(tp, true); } } @@ -3703,7 +3718,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) r8153_aldps_en(tp, true); r8152b_enable_fc(tp); - r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } @@ -4004,6 +4018,8 @@ static void rtl8152_down(struct r8152 *tp) static void rtl8153_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -4011,6 +4027,19 @@ static void rtl8153_up(struct r8152 *tp) r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1); + ocp_data &= ~DELAY_PHY_PWR_CHG; + ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data); + r8153_aldps_en(tp, true); switch (tp->version) { @@ -4029,11 +4058,17 @@ static void rtl8153_up(struct r8152 *tp) static void rtl8153_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data &= ~LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); @@ -4044,6 +4079,8 @@ static void rtl8153_down(struct r8152 *tp) static void rtl8153b_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -4054,18 +4091,27 @@ static void rtl8153b_up(struct r8152 *tp) r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153_aldps_en(tp, true); - r8153_u2p3en(tp, true); r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); @@ -4375,7 +4421,10 @@ static void r8153_init(struct r8152 *tp) if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; } data = r8153_phy_status(tp, 0); @@ -4454,6 +4503,14 @@ static void r8153_init(struct r8152 *tp) r8153_mac_clk_spd(tp, false); usb_enable_lpm(tp->udev); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); @@ -4493,7 +4550,10 @@ static void r8153b_init(struct r8152 *tp) if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; } data = r8153_phy_status(tp, 0); @@ -4526,6 +4586,19 @@ static void r8153b_init(struct r8152 *tp) ocp_data |= MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + if (tp->version == RTL_VER_09) { + /* Disable Test IO for 32QFN */ + if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= TEST_IO_OFF; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + } + } + set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ @@ -5587,6 +5660,9 @@ static int rtl8152_probe(struct usb_interface *intf, return -ENODEV; } + if (intf->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { @@ -5687,6 +5763,11 @@ static int rtl8152_probe(struct usb_interface *intf, intf->needs_remote_wakeup = 1; + if (!rtl_can_wakeup(tp)) + __rtl_set_wol(tp, 0); + else + tp->saved_wolopts = __rtl_get_wol(tp); + tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp); @@ -5700,10 +5781,6 @@ static int rtl8152_probe(struct usb_interface *intf, goto out1; } - if (!rtl_can_wakeup(tp)) - __rtl_set_wol(tp, 0); - - tp->saved_wolopts = __rtl_get_wol(tp); if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); else diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8869154fad88..93690f77ec9c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2276,7 +2276,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct dst_entry *ndst; struct flowi6 fl6; - int err; if (!sock6) return ERR_PTR(-EIO); @@ -2299,10 +2298,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.fl6_dport = dport; fl6.fl6_sport = sport; - err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - sock6->sock->sk, - &ndst, &fl6); - if (unlikely(err < 0)) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, + &fl6, NULL); + if (unlikely(IS_ERR(ndst))) { netdev_dbg(dev, "no route to %pI6\n", daddr); return ERR_PTR(-ENETUNREACH); } @@ -2544,7 +2542,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2584,7 +2582,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), @@ -2781,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, /* Setup stats when device is created */ static int vxlan_init(struct net_device *dev) { + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; + err = gro_cells_init(&vxlan->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + return 0; } @@ -3045,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev) vxlan->dev = dev; - gro_cells_init(&vxlan->gro_cells, dev); - for (h = 0; h < FDB_HASH_SIZE; ++h) { spin_lock_init(&vxlan->hash_lock[h]); INIT_HLIST_HEAD(&vxlan->fdb_head[h]); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index ca0f3be2b6bf..4ad0a0c33d85 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = { }, }; -static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; +static struct ucc_tdm_info utdm_info[UCC_MAX_NUM]; static int uhdlc_init(struct ucc_hdlc_private *priv) { @@ -245,6 +245,11 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) ret = -ENOMEM; goto free_riptr; } + if (riptr != (u16)riptr || tiptr != (u16)tiptr) { + dev_err(priv->dev, "MURAM allocation out of addressable range\n"); + ret = -ENOMEM; + goto free_tiptr; + } /* Set RIPTR, TIPTR */ iowrite16be(riptr, &priv->ucc_pram->riptr); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 5643675ff724..bf78073ee7fd 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -62,11 +62,12 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - skb_push(skb, 1); - if (skb_cow(skb, 1)) return NET_RX_DROP; + skb_push(skb, 1); + skb_reset_network_header(skb); + ptr = skb->data; *ptr = X25_IFACE_DATA; @@ -79,6 +80,13 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb) { hdlc_device *hdlc = dev_to_hdlc(dev); + + skb_reset_network_header(skb); + skb->protocol = hdlc_type_trans(skb, dev); + + if (dev_nit_active(dev)) + dev_queue_xmit_nit(skb, dev); + hdlc->xmit(skb, dev); /* Ignore return value :-( */ } @@ -93,6 +101,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); + skb_reset_network_header(skb); if ((result = lapb_data_request(dev, skb)) != LAPB_OK) dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index ea6ee6a608ce..e7619cec978a 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -258,7 +258,7 @@ struct port { struct hss_plat_info *plat; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; struct desc *desc_tab; /* coherent */ - u32 desc_tab_phys; + dma_addr_t desc_tab_phys; unsigned int id; unsigned int clock_type, clock_rate, loopback; unsigned int initialized, carrier; @@ -858,7 +858,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_dropped++; return NETDEV_TX_OK; } - memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); + memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); dev_kfree_skb(skb); #endif diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index e2e679a01b65..77ccf3672ede 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb, spin_lock_irqsave(&sdla_lock, flags); SDLA_WINDOW(dev, addr); - pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK)); + pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK)); __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); SDLA_WINDOW(dev, addr); pbuf->opp_flag = 1; diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index b94759daeacc..da2d179430ca 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -255,7 +255,8 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, if (flags & AR5523_CMD_FLAG_MAGIC) hdr->magic = cpu_to_be32(1 << 24); - memcpy(hdr + 1, idata, ilen); + if (ilen) + memcpy(hdr + 1, idata, ilen); cmd->odata = odata; cmd->olen = olen; diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index b6d2932383cf..1cfe75a2d0c3 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -1208,9 +1208,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA); dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len); - memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, - crash_data->ramdump_buf_len); - sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + if (crash_data->ramdump_buf_len) { + memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, + crash_data->ramdump_buf_len); + sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + } } mutex_unlock(&ar->dump_mutex); @@ -1257,6 +1259,9 @@ int ath10k_coredump_register(struct ath10k *ar) if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar); + if (!crash_data->ramdump_buf_len) + return 0; + crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len); if (!crash_data->ramdump_buf) return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 53f1095de8ff..9f0e7b4943ec 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2726,7 +2726,7 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) { + if (!peer || !peer->sta) { spin_unlock_bh(&ar->data_lock); rcu_read_unlock(); continue; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a6d21856b7e7..36d24ea126a2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3708,7 +3708,7 @@ static int ath10k_mac_tx(struct ath10k *ar, struct ieee80211_vif *vif, enum ath10k_hw_txrx_mode txmode, enum ath10k_mac_tx_path txpath, - struct sk_buff *skb) + struct sk_buff *skb, bool noque_offchan) { struct ieee80211_hw *hw = ar->hw; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -3738,10 +3738,10 @@ static int ath10k_mac_tx(struct ath10k *ar, } } - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { if (!ath10k_mac_tx_frm_has_freq(ar)) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", - skb); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n", + skb, skb->len); skb_queue_tail(&ar->offchan_tx_queue, skb); ieee80211_queue_work(hw, &ar->offchan_tx_work); @@ -3803,8 +3803,8 @@ void ath10k_offchan_tx_work(struct work_struct *work) mutex_lock(&ar->conf_mutex); - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", - skb); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n", + skb, skb->len); hdr = (struct ieee80211_hdr *)skb->data; peer_addr = ieee80211_get_DA(hdr); @@ -3850,7 +3850,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); - ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true); if (ret) { ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", ret); @@ -3860,8 +3860,8 @@ void ath10k_offchan_tx_work(struct work_struct *work) time_left = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); if (time_left == 0) - ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", - skb); + ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n", + skb, skb->len); if (!peer && tmp_peer_created) { ret = ath10k_peer_delete(ar, vdev_id, peer_addr); @@ -3903,8 +3903,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) ar->running_fw->fw_file.fw_features)) { paddr = dma_map_single(ar->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (!paddr) + if (dma_mapping_error(ar->dev, paddr)) { + ieee80211_free_txskb(ar->hw, skb); continue; + } ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); if (ret) { ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", @@ -4097,7 +4099,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false); if (unlikely(ret)) { ath10k_warn(ar, "failed to push frame: %d\n", ret); @@ -4378,7 +4380,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false); if (ret) { ath10k_warn(ar, "failed to transmit frame: %d\n", ret); if (is_htt) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index a0b4d265c6eb..0a727502d14c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 i; + int ret; + + mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_ON) { + ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n"); + ret = -EIO; + goto done; + } for (i = 0; i < region->len; i += 4) *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); - return region->len; + ret = region->len; +done: + mutex_unlock(&ar->conf_mutex); + return ret; } /* if an error happened returns < 0, otherwise the length */ @@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar, count = ath10k_pci_dump_memory_sram(ar, current_region, buf); break; case ATH10K_MEM_REGION_TYPE_IOREG: - count = ath10k_pci_dump_memory_reg(ar, current_region, buf); + ret = ath10k_pci_dump_memory_reg(ar, current_region, buf); + if (ret < 0) + break; + + count = ret; break; default: ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); @@ -3490,7 +3505,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, struct ath10k_pci *ar_pci; enum ath10k_hw_rev hw_rev; struct ath10k_bus_params bus_params = {}; - bool pci_ps; + bool pci_ps, is_qca988x = false; int (*pci_soft_reset)(struct ath10k *ar); int (*pci_hard_reset)(struct ath10k *ar); u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); @@ -3500,6 +3515,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; pci_ps = false; + is_qca988x = true; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; @@ -3619,25 +3635,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_deinit_irq; } + bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.link_can_suspend = true; + /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that + * fall off the bus during chip_reset. These chips have the same pci + * device id as the QCA9880 BR4A or 2R4E. So that's why the check. + */ + if (is_qca988x) { + bus_params.chip_id = + ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (bus_params.chip_id != 0xffffffff) { + if (!ath10k_pci_chip_is_supported(pdev->device, + bus_params.chip_id)) + goto err_unsupported; + } + } + ret = ath10k_pci_chip_reset(ar); if (ret) { ath10k_err(ar, "failed to reset chip: %d\n", ret); goto err_free_irq; } - bus_params.dev_type = ATH10K_DEV_TYPE_LL; - bus_params.link_can_suspend = true; bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); - if (bus_params.chip_id == 0xffffffff) { - ath10k_err(ar, "failed to get chip id\n"); - goto err_free_irq; - } + if (bus_params.chip_id == 0xffffffff) + goto err_unsupported; - if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { - ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", - pdev->device, bus_params.chip_id); + if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) goto err_free_irq; - } ret = ath10k_core_register(ar, &bus_params); if (ret) { @@ -3647,6 +3672,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev, return 0; +err_unsupported: + ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", + pdev->device, bus_params.chip_id); + err_free_irq: ath10k_pci_free_irq(ar); ath10k_pci_rx_retry_sync(ar); diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 3b63b6257c43..545ac1f06997 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -581,22 +581,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi) { struct wlfw_host_cap_resp_msg_v01 resp = {}; struct wlfw_host_cap_req_msg_v01 req = {}; + struct qmi_elem_info *req_ei; struct ath10k *ar = qmi->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); struct qmi_txn txn; int ret; req.daemon_support_valid = 1; req.daemon_support = 0; - ret = qmi_txn_init(&qmi->qmi_hdl, &txn, - wlfw_host_cap_resp_msg_v01_ei, &resp); + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei, + &resp); if (ret < 0) goto out; + if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags)) + req_ei = wlfw_host_cap_8bit_req_msg_v01_ei; + else + req_ei = wlfw_host_cap_req_msg_v01_ei; + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, QMI_WLFW_HOST_CAP_REQ_V01, WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN, - wlfw_host_cap_req_msg_v01_ei, &req); + req_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath10k_err(ar, "failed to send host capability request: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c index 1fe05c6218c3..86fcf4e1de5f 100644 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c @@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = { {} }; +struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support), + }, + {} +}; + struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h index bca1186e1560..4d107e1364a8 100644 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h @@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 { #define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; +extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[]; struct wlfw_host_cap_resp_msg_v01 { struct qmi_response_type_v01 resp; diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index b491361e6ed4..63607c3b8e81 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1261,6 +1261,15 @@ out: return ret; } +static void ath10k_snoc_quirks_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct device *dev = &ar_snoc->dev->dev; + + if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk")) + set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags); +} + int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); @@ -1678,6 +1687,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ar->ce_priv = &ar_snoc->ce; msa_size = drv_data->msa_size; + ath10k_snoc_quirks_init(ar); + ret = ath10k_snoc_resource_init(ar); if (ret) { ath10k_warn(ar, "failed to initialize resource: %d\n", ret); @@ -1718,13 +1729,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ret = ath10k_qmi_init(ar, msa_size); if (ret) { ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret); - goto err_core_destroy; + goto err_power_off; } ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); return 0; +err_power_off: + ath10k_hw_power_off(ar); + err_free_irq: ath10k_snoc_free_irq(ar); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index d62f53501fbb..9db823e46314 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -63,6 +63,7 @@ enum ath10k_snoc_flags { ATH10K_SNOC_FLAG_REGISTERED, ATH10K_SNOC_FLAG_UNREGISTERING, ATH10K_SNOC_FLAG_RECOVERY, + ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, }; struct ath10k_snoc { diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 4102df016931..39abf8b12903 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -95,6 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); + info->status.rates[0].idx = -1; + trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index e1420f67f776..1e0343081be9 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -38,6 +38,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) struct ath10k_urb_context *urb_context = NULL; unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = list_first_entry(&pipe->urb_list_head, @@ -55,6 +59,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, { unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; @@ -435,6 +443,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk transmit failed: %d\n", ret); usb_unanchor_urb(urb); + usb_free_urb(urb); ret = -EINVAL; goto err_free_urb_to_pipe; } diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 4d5d10c01064..eb0c963d9fd5 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3650,6 +3650,7 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, struct wmi_tlv *tlv; struct sk_buff *skb; __le32 *channel_list; + u16 tlv_len; size_t len; void *ptr; u32 i; @@ -3707,10 +3708,12 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, /* nlo_configured_parameters(nlo_list) */ cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS)); + tlv_len = __le32_to_cpu(cmd->no_of_ssids) * + sizeof(struct nlo_configured_parameters); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); - tlv->len = __cpu_to_le16(len); + tlv->len = __cpu_to_le16(tlv_len); ptr += sizeof(*tlv); nlo_list = ptr; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 4f707c6394bb..90f1197a6ad8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -9422,7 +9422,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, msdu = pkt_addr->vaddr; dma_unmap_single(ar->dev, pkt_addr->paddr, - msdu->len, DMA_FROM_DEVICE); + msdu->len, DMA_TO_DEVICE); ieee80211_free_txskb(ar->hw, msdu); return 0; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 2b29bf4730f6..b4885a700296 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah) static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) { - u32 data, ko, kg; + u32 data = 0, ko, kg; if (!AR_SREV_9462_20_OR_LATER(ah)) return; diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c index 159490f5a111..60731e07f681 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c @@ -84,7 +84,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, val = swahb32(val); } - __raw_writel(val, mem + reg); + iowrite32(val, mem + reg); usleep_range(100, 120); } diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index fb649d85b8fc..dd0c32379375 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -1216,7 +1216,7 @@ err_fw: static int send_eject_command(struct usb_interface *interface) { struct usb_device *udev = interface_to_usbdev(interface); - struct usb_host_interface *iface_desc = &interface->altsetting[0]; + struct usb_host_interface *iface_desc = interface->cur_altsetting; struct usb_endpoint_descriptor *endpoint; unsigned char *cmd; u8 bulk_out_ep; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 4e8e80ac8341..9cec5c216e1f 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, struct ath_htc_rx_status *rxstatus; struct ath_rx_status rx_stats; bool decrypt_error = false; + __be16 rs_datalen; + bool is_phyerr; if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", @@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, rxstatus = (struct ath_htc_rx_status *)skb->data; - if (be16_to_cpu(rxstatus->rs_datalen) - - (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) { + rs_datalen = be16_to_cpu(rxstatus->rs_datalen); + if (unlikely(rs_datalen - + (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) { ath_err(common, "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n", - rxstatus->rs_datalen, skb->len); + rs_datalen, skb->len); + goto rx_next; + } + + is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY; + /* + * Discard zero-length packets and packets smaller than an ACK + * which are not PHY_ERROR (short radar pulses have a length of 3) + */ + if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { + ath_warn(common, + "Short RX data len, dropping (dlen: %d)\n", + rs_datalen); goto rx_next; } @@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, * Process PHY errors and return so that the packet * can be dropped. */ - if (rx_stats.rs_status & ATH9K_RXERR_PHY) { + if (unlikely(is_phyerr)) { /* TODO: Not using DFS processing now. */ if (ath_cmn_process_fft(&priv->spec_priv, hdr, &rx_stats, rx_status->mactime)) { diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 04d576deae72..6cb0d7bcfe76 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -880,6 +880,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, u8 data_offset; struct wil_rx_status_extended *s; u16 sring_idx = sring - wil->srings; + int invalid_buff_id_retry; BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb)); @@ -893,9 +894,9 @@ again: /* Extract the buffer ID from the status message */ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg)); + invalid_buff_id_retry = 0; while (!buff_id) { struct wil_rx_status_extended *s; - int invalid_buff_id_retry = 0; wil_dbg_txrx(wil, "buff_id is not updated yet by HW, (swhead 0x%x)\n", diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 153b84447e40..41389c1eb252 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -2505,7 +2505,8 @@ int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie) cmd->mgmt_frm_type = type; /* BUG: FW API define ieLen as u8. Will fix FW */ cmd->ie_len = cpu_to_le16(ie_len); - memcpy(cmd->ie_info, ie, ie_len); + if (ie_len) + memcpy(cmd->ie_info, ie, ie_len); rc = wmi_send(wil, WMI_SET_APPIE_CMDID, vif->mid, cmd, len); kfree(cmd); out: @@ -2541,7 +2542,8 @@ int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie) } cmd->ie_len = cpu_to_le16(ie_len); - memcpy(cmd->ie_info, ie, ie_len); + if (ie_len) + memcpy(cmd->ie_info, ie, ie_len); rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len); kfree(cmd); diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 4325e91736eb..8b6b657c4b85 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -1275,8 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev) } /* Interrupt handler bottom-half */ -static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev) +static void b43legacy_interrupt_tasklet(unsigned long data) { + struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data; u32 reason; u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; u32 merged_dma_reason = 0; @@ -3741,7 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev, b43legacy_set_status(wldev, B43legacy_STAT_UNINIT); wldev->bad_frames_preempt = modparam_bad_frames_preempt; tasklet_init(&wldev->isr_tasklet, - (void (*)(unsigned long))b43legacy_interrupt_tasklet, + b43legacy_interrupt_tasklet, (unsigned long)wldev); if (modparam_pio) wldev->__using_pio = true; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 406b367c284c..85cf96461dde 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1350,6 +1350,11 @@ void brcmf_detach(struct device *dev) brcmf_fweh_detach(drvr); brcmf_proto_detach(drvr); + if (drvr->mon_if) { + brcmf_net_detach(drvr->mon_if->ndev, false); + drvr->mon_if = NULL; + } + /* make sure primary interface removed last */ for (i = BRCMF_MAX_IFS - 1; i > -1; i--) { if (drvr->iflist[i]) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 7ba9f6a68645..1f5deea5a288 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2092,7 +2092,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, /* firmware requires unique mac address for p2pdev interface */ if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) { bphy_err(drvr, "discovery vif must be different from primary interface\n"); - return ERR_PTR(-EINVAL); + err = -EINVAL; + goto fail; } brcmf_p2p_generate_bss_mac(p2p, addr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 6c463475e90b..3be60aef5465 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1427,6 +1427,8 @@ static int brcmf_pcie_reset(struct device *dev) struct brcmf_fw_request *fwreq; int err; + brcmf_pcie_intr_disable(devinfo); + brcmf_pcie_bus_console_read(devinfo, true); brcmf_detach(dev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 264ad63232f8..d43247a95ce5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1934,7 +1934,10 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new, BRCMF_SDIO_FT_NORMAL)) { rd->len = 0; + brcmf_sdio_rxfail(bus, true, true); + sdio_release_host(bus->sdiodev->func1); brcmu_pkt_buf_free_skb(pkt); + continue; } bus->sdcnt.rx_readahead_cnt++; if (rd->len != roundup(rd_new.len, 16)) { @@ -4225,6 +4228,12 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, } if (err == 0) { + /* Assign bus interface call back */ + sdiod->bus_if->dev = sdiod->dev; + sdiod->bus_if->ops = &brcmf_sdio_bus_ops; + sdiod->bus_if->chip = bus->ci->chip; + sdiod->bus_if->chiprev = bus->ci->chiprev; + /* Allow full data communication using DPC from now on. */ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); @@ -4241,12 +4250,6 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, sdio_release_host(sdiod->func1); - /* Assign bus interface call back */ - sdiod->bus_if->dev = sdiod->dev; - sdiod->bus_if->ops = &brcmf_sdio_bus_ops; - sdiod->bus_if->chip = bus->ci->chip; - sdiod->bus_if->chiprev = bus->ci->chiprev; - err = brcmf_alloc(sdiod->dev, sdiod->settings); if (err) { brcmf_err("brcmf_alloc failed\n"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 06f3c01f10b3..575ed19e9195 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -430,6 +430,7 @@ fail: usb_free_urb(req->urb); list_del(q->next); } + kfree(reqs); return NULL; } @@ -1348,7 +1349,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) goto fail; } - desc = &intf->altsetting[0].desc; + desc = &intf->cur_altsetting->desc; if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || (desc->bInterfaceSubClass != 2) || (desc->bInterfaceProtocol != 0xff)) { @@ -1361,7 +1362,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) num_of_eps = desc->bNumEndpoints; for (ep = 0; ep < num_of_eps; ep++) { - endpoint = &intf->altsetting[0].endpoint[ep].desc; + endpoint = &intf->cur_altsetting->endpoint[ep].desc; endpoint_num = usb_endpoint_num(endpoint); if (!usb_endpoint_xfer_bulk(endpoint)) continue; diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index f43c06569ea1..c4c8f1b62e1e 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { case AIROGVLIST: ridcode = RID_APLIST; break; case AIROGDRVNAM: ridcode = RID_DRVNAME; break; case AIROGEHTENC: ridcode = RID_ETHERENCAP; break; - case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; - case AIROGWEPKNV: ridcode = RID_WEP_PERM; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; + case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break; + case AIROGWEPKNV: ridcode = RID_WEP_PERM; break; case AIROGSTAT: ridcode = RID_STATUS; break; case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; case AIROGSTATSC32: ridcode = RID_STATS; break; @@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { return -EINVAL; } - if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) + if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) { + /* Only super-user can read WEP keys */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + } + + if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 8dfbaff2d1fe..a162146a43a7 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -3206,8 +3206,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) } } -static void ipw2100_irq_tasklet(struct ipw2100_priv *priv) +static void ipw2100_irq_tasklet(unsigned long data) { + struct ipw2100_priv *priv = (struct ipw2100_priv *)data; struct net_device *dev = priv->net_dev; unsigned long flags; u32 inta, tmp; @@ -6007,7 +6008,7 @@ static void ipw2100_rf_kill(struct work_struct *work) spin_unlock_irqrestore(&priv->low_lock, flags); } -static void ipw2100_irq_tasklet(struct ipw2100_priv *priv); +static void ipw2100_irq_tasklet(unsigned long data); static const struct net_device_ops ipw2100_netdev_ops = { .ndo_open = ipw2100_open, @@ -6137,7 +6138,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event); - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + tasklet_init(&priv->irq_tasklet, ipw2100_irq_tasklet, (unsigned long)priv); /* NOTE: We do not start the deferred work for status checks yet */ diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ed0f06532d5e..ac5f797fb1ad 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -1945,8 +1945,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv) wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); } -static void ipw_irq_tasklet(struct ipw_priv *priv) +static void ipw_irq_tasklet(unsigned long data) { + struct ipw_priv *priv = (struct ipw_priv *)data; u32 inta, inta_mask, handled = 0; unsigned long flags; int rc = 0; @@ -10680,7 +10681,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); #endif /* CONFIG_IPW2200_QOS */ - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + tasklet_init(&priv->irq_tasklet, ipw_irq_tasklet, (unsigned long)priv); return ret; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 4fbcc7fba3cc..e2e9c3e8fff5 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -1376,8 +1376,9 @@ il3945_dump_nic_error_log(struct il_priv *il) } static void -il3945_irq_tasklet(struct il_priv *il) +il3945_irq_tasklet(unsigned long data) { + struct il_priv *il = (struct il_priv *)data; u32 inta, handled = 0; u32 inta_fh; unsigned long flags; @@ -3403,7 +3404,7 @@ il3945_setup_deferred_work(struct il_priv *il) timer_setup(&il->watchdog, il_bg_watchdog, 0); tasklet_init(&il->irq_tasklet, - (void (*)(unsigned long))il3945_irq_tasklet, + il3945_irq_tasklet, (unsigned long)il); } diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index ffb705b18fb1..5fe17039a337 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4344,8 +4344,9 @@ il4965_synchronize_irq(struct il_priv *il) } static void -il4965_irq_tasklet(struct il_priv *il) +il4965_irq_tasklet(unsigned long data) { + struct il_priv *il = (struct il_priv *)data; u32 inta, handled = 0; u32 inta_fh; unsigned long flags; @@ -6238,7 +6239,7 @@ il4965_setup_deferred_work(struct il_priv *il) timer_setup(&il->watchdog, il_bg_watchdog, 0); tasklet_init(&il->irq_tasklet, - (void (*)(unsigned long))il4965_irq_tasklet, + il4965_irq_tasklet, (unsigned long)il); } diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 73f7bbf742bc..746749f37996 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -699,7 +699,7 @@ il_eeprom_init(struct il_priv *il) u32 gp = _il_rd(il, CSR_EEPROM_GP); int sz; int ret; - u16 addr; + int addr; /* allocate eeprom */ sz = il->cfg->eeprom_size; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index dd387aba3317..e8a4d604b910 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -171,6 +171,9 @@ void iwl_leds_init(struct iwl_priv *priv) priv->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(priv->hw->wiphy)); + if (!priv->led.name) + return; + priv->led.brightness_set = iwl_led_brightness_set; priv->led.blink_set = iwl_led_blink_set; priv->led.max_brightness = 1; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 3029e3f6de63..621cd7206b7c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_station_priv *sta_priv = NULL; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; __le16 fc; u8 hdr_len; @@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, if (unlikely(!dev_cmd)) goto drop_unlock_priv; - memset(dev_cmd, 0, sizeof(*dev_cmd)); dev_cmd->hdr.cmd = REPLY_TX; tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 87421807e040..cb5465d9c068 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,7 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1373,11 +1373,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, goto out; } - /* - * region register have absolute value so apply rxf offset after - * reading the registers - */ - offs += rxf_data.offset; + offs = rxf_data.offset; /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); @@ -2315,10 +2311,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) goto out; } - if (iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true)) { - IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n"); - goto out; - } + iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); if (iwl_trans_dbg_ini_valid(fwrt->trans)) @@ -2484,19 +2477,14 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, return 0; } -int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_params *params, - bool stop) +void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop) { int ret = 0; - /* if the FW crashed or not debug monitor cfg was given, there is - * no point in changing the recording state - */ - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) || - (!fwrt->trans->dbg.dest_tlv && - fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) - return 0; + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) + return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) @@ -2513,7 +2501,5 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, iwl_fw_set_dbg_rec_on(fwrt); } #endif - - return ret; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index e3b5dd34643f..2ac61629f46f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -263,9 +263,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_params *params, - bool stop); +void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop); #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index c8972f6e38ba..5d546dac7814 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -225,6 +225,34 @@ enum iwl_nvm_channel_flags { NVM_CHANNEL_DC_HIGH = BIT(12), }; +/** + * enum iwl_reg_capa_flags - global flags applied for the whole regulatory + * domain. + * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the + * 2.4Ghz band is allowed. + * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the + * 5Ghz band is allowed. + * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. + * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. + * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. + */ +enum iwl_reg_capa_flags { + REG_CAPA_BF_CCD_LOW_BAND = BIT(0), + REG_CAPA_BF_CCD_HIGH_BAND = BIT(1), + REG_CAPA_160MHZ_ALLOWED = BIT(2), + REG_CAPA_80MHZ_ALLOWED = BIT(3), + REG_CAPA_MCS_8_ALLOWED = BIT(4), + REG_CAPA_MCS_9_ALLOWED = BIT(5), + REG_CAPA_40MHZ_FORBIDDEN = BIT(7), + REG_CAPA_DC_HIGH_ENABLED = BIT(9), +}; + static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { @@ -1031,6 +1059,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, + u16 cap_flags, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; @@ -1069,13 +1098,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, (flags & NL80211_RRF_NO_IR)) flags |= NL80211_RRF_GO_CONCURRENT; + /* + * cap_flags is per regulatory domain so apply it for every channel + */ + if (ch_idx >= NUM_2GHZ_CHANNELS) { + if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN) + flags |= NL80211_RRF_NO_HT40; + + if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_80MHZ; + + if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_160MHZ; + } + return flags; } struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info) + u16 geo_info, u16 cap) { int ch_idx; u16 ch_flags; @@ -1133,7 +1176,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, - ch_flags, cfg); + ch_flags, cap, + cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index b7e1ddf8f177..4eeedb41e9ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -103,7 +103,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info); + u16 geo_info, u16 cap); /** * struct iwl_nvm_section - describes an NVM section in memory. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 28bdc9a9617e..f91197e4ae40 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -66,7 +66,9 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops) + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP @@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd), - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); + cmd_pool_size, cmd_pool_align, + SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index a31408188ed0..1e85d59b9161 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -193,6 +193,18 @@ struct iwl_device_cmd { }; } __packed; +/** + * struct iwl_device_tx_cmd - buffer for TX command + * @hdr: the header + * @payload: the payload placeholder + * + * The actual structure is sized dynamically according to need. + */ +struct iwl_device_tx_cmd { + struct iwl_cmd_header hdr; + u8 payload[]; +} __packed; + #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) /* @@ -544,7 +556,7 @@ struct iwl_trans_ops { int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue); + struct iwl_device_tx_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); @@ -921,22 +933,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) return trans->ops->dump_data(trans, dump_mask); } -static inline struct iwl_device_cmd * +static inline struct iwl_device_tx_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { - return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); + return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, - struct iwl_device_cmd *dev_cmd) + struct iwl_device_tx_cmd *dev_cmd) { kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue) + struct iwl_device_tx_cmd *dev_cmd, int queue) { if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; @@ -1239,7 +1251,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops); + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 60aff2ecec12..58df25e2fb32 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -154,5 +154,6 @@ #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT false #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 +#define IWL_MVM_USE_NSSN_SYNC 0 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 9f4b117db9d7..d47f76890cf9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -8,6 +8,7 @@ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -389,6 +391,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) if (req != mvm->ftm_initiator.req) return; + iwl_mvm_ftm_reset(mvm); + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, LOCATION_GROUP, 0), 0, sizeof(cmd), &cmd)) @@ -502,7 +506,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) { - IWL_ERR(mvm, "Got FTM response but have no request?\n"); return; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index d9eb2b286438..c54fe6650018 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -514,6 +514,18 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) struct iwl_phy_cfg_cmd phy_cfg_cmd; enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; + if (iwl_mvm_has_unified_ucode(mvm) && + !mvm->trans->cfg->tx_with_siso_diversity) { + return 0; + } else if (mvm->trans->cfg->tx_with_siso_diversity) { + /* + * TODO: currently we don't set the antenna but letting the NIC + * to decide which antenna to use. This should come from BIOS. + */ + phy_cfg_cmd.phy_cfg = + cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); + } + /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); @@ -1169,7 +1181,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { - return -ENOENT; + return 0; } #endif /* CONFIG_ACPI */ @@ -1344,12 +1356,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; - - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) - goto error; } + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; + ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto error; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index d104da9170ca..72c4b2b8399d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -129,6 +129,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm) mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(mvm->hw->wiphy)); + if (!mvm->led.name) + return -ENOMEM; + mvm->led.brightness_set = iwl_led_brightness_set; mvm->led.max_brightness = 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d31f96c3f925..6ca087ffd163 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -256,7 +254,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), - __le16_to_cpu(resp->geo_info)); + __le16_to_cpu(resp->geo_info), + __le16_to_cpu(resp->cap)); /* Store the return source id */ src_id = resp->source_id; kfree(resp); @@ -742,6 +741,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) return ret; } +static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + if (likely(sta)) { + if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) + return; + } else { + if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) + return; + } + + ieee80211_free_txskb(mvm->hw, skb); +} + static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -785,14 +798,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } } - if (sta) { - if (iwl_mvm_tx_skb(mvm, skb, sta)) - goto drop; - return; - } - - if (iwl_mvm_tx_skb_non_sta(mvm, skb)) - goto drop; + iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); @@ -842,10 +848,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) break; } - if (!txq->sta) - iwl_mvm_tx_skb_non_sta(mvm, skb); - else - iwl_mvm_tx_skb(mvm, skb, txq->sta); + iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); @@ -2020,7 +2023,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); - if (IS_ERR(sta)) { + if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 5ca50f39a023..5f1ecbb6fb71 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1508,8 +1508,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta); +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 945c1ea5cda8..ed367b0a185c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -309,7 +309,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) } /* PHY_SKU section is mandatory in B0 */ - if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { + if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT && + !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { IWL_ERR(mvm, "Can't parse phy_sku in B0, empty sections\n"); return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 8f50e2b121bd..24df3182ec9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -147,7 +147,11 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - /* consider our LDPC support in case of HE */ + /* consider LDPC support in case of HE */ + if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) + flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + if (sband->iftype_data && sband->iftype_data->he_cap.has_he && !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) @@ -350,7 +354,13 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, u16 size = le32_to_cpu(notif->amsdu_size); int i; - if (WARN_ON(sta->max_amsdu_len < size)) + /* + * In debug sta->max_amsdu_len < size + * so also check with orig_amsdu_len which holds the original + * data before debugfs changed the value + */ + if (WARN_ON(sta->max_amsdu_len < size && + mvmsta->orig_amsdu_len < size)) goto out; mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 0ad8ed23a455..5ee33c8ae9d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -60,6 +60,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#include #include #include #include "iwl-trans.h" @@ -357,7 +358,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); len = le16_to_cpu(rx_res->byte_count); - rx_pkt_status = le32_to_cpup((__le32 *) + rx_pkt_status = get_unaligned_le32((__le32 *) (pkt->data + sizeof(*rx_res) + len)); /* Dont use dev_alloc_skb(), we'll have enough headroom once diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 77b03b757193..a6e2a30eb310 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, - .metadata.sync = 0, - .nssn_sync.baid = baid, - .nssn_sync.nssn = nssn, - }; + if (IWL_MVM_USE_NSSN_SYNC) { + struct iwl_mvm_rss_sync_notif notif = { + .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, + .metadata.sync = 0, + .nssn_sync.baid = baid, + .nssn_sync.nssn = nssn, + }; - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); + iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, + sizeof(notif)); + } } #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index fcafa22ec6ce..8aa567d7912c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1220,7 +1220,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) cmd_size = sizeof(struct iwl_scan_config_v2); else cmd_size = sizeof(struct iwl_scan_config_v1); - cmd_size += num_channels; + cmd_size += mvm->fw->ucode_capa.n_scan_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); if (!cfg) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b3768d5d852a..8ad2d889179c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -3321,6 +3321,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, igtk_cmd.sta_id = cpu_to_le32(sta_id); if (remove_key) { + /* This is a valid situation for IGTK */ + if (sta_id == IWL_MVM_INVALID_STA) + return 0; + igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); } else { struct ieee80211_key_seq seq; @@ -3575,9 +3579,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); - if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index f0c539b37ea7..a630e4edd9b4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -731,7 +731,8 @@ static struct thermal_zone_device_ops tzone_ops = { static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) { int i; - char name[] = "iwlwifi"; + char name[16]; + static atomic_t counter = ATOMIC_INIT(0); if (!iwl_mvm_is_tt_in_fw(mvm)) { mvm->tz_device.tzone = NULL; @@ -741,6 +742,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); mvm->tz_device.tzone = thermal_zone_device_register(name, IWL_MAX_DTS_TRIPS, IWL_WRITABLE_TRIPS_MSK, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 8a059da7a1fa..2b92980a49e6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -487,13 +487,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ -static struct iwl_device_cmd * +static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); @@ -501,11 +501,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, if (unlikely(!dev_cmd)) return NULL; - /* Make sure we zero enough of dev_cmd */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); - - memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { @@ -594,7 +589,7 @@ out: } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, - struct iwl_device_cmd *cmd) + struct iwl_device_tx_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); @@ -713,7 +708,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; @@ -935,7 +930,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); - max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid); + /* + * Take the min of ieee80211 station and mvm station + */ + max_amsdu_len = + min_t(unsigned int, sta->max_amsdu_len, + iwl_mvm_max_amsdu_size(mvm, sta, tid)); /* * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not @@ -1070,7 +1070,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; @@ -1146,7 +1146,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); - return 0; + return -1; } if (!iwl_mvm_has_new_tx_api(mvm)) { @@ -1198,8 +1198,8 @@ drop: return -1; } -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta) +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 040cec17d3ad..b0b7eca1754e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1111,18 +1111,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; - else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + if (cfg == &iwl_ax101_cfg_qu_hr) + cfg = &iwl_ax101_cfg_quz_hr; + else if (cfg == &iwl_ax201_cfg_qu_hr) + cfg = &iwl_ax201_cfg_quz_hr; + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 1047d48beaa5..9b5b96e34456 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -305,7 +305,7 @@ struct iwl_cmd_meta { #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { - struct iwl_device_cmd *cmd; + void *cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; @@ -690,7 +690,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); @@ -1111,7 +1111,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 19dd075f2f63..64c74acadb99 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1429,6 +1429,7 @@ out_err: static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct napi_struct *napi; struct iwl_rxq *rxq; u32 r, i, count = 0; bool emergency = false; @@ -1534,8 +1535,16 @@ out: if (unlikely(emergency && count)) iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); - if (rxq->napi.poll) - napi_gro_flush(&rxq->napi, false); + napi = &rxq->napi; + if (napi->poll) { + napi_gro_flush(napi, false); + + if (napi->rx_count) { + netif_receive_skb_list(&napi->rx_list); + INIT_LIST_HEAD(&napi->rx_list); + napi->rx_count = 0; + } + } iwl_pcie_rxq_restock(trans, rxq); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index ca3bb4d65b00..df8455f14e4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -57,24 +57,6 @@ #include "internal.h" #include "fw/dbg.h" -static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) -{ - iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - udelay(20); - iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_PG_EN | - HPM_HIPM_GEN_CFG_CR_SLP_EN); - udelay(20); - iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - - iwl_trans_sw_reset(trans); - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - return 0; -} - /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) @@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && - trans->cfg->integrated) { - ret = iwl_pcie_gen2_force_power_gating(trans); - if (ret) - return ret; - } - ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 6961f00ff812..c76d26708e65 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -79,6 +79,7 @@ #include "iwl-agn-hw.h" #include "fw/error-dump.h" #include "fw/dbg.h" +#include "fw/api/tx.h" #include "internal.h" #include "iwl-fh.h" @@ -1783,6 +1784,29 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) return 0; } +static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) +{ + int ret; + + ret = iwl_finish_nic_init(trans, trans->trans_cfg); + if (ret < 0) + return ret; + + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + udelay(20); + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_PG_EN | + HPM_HIPM_GEN_CFG_CR_SLP_EN); + udelay(20); + iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + + iwl_trans_pcie_sw_reset(trans); + + return 0; +} + static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1802,6 +1826,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) iwl_trans_pcie_sw_reset(trans); + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + trans->cfg->integrated) { + err = iwl_pcie_gen2_force_power_gating(trans); + if (err) + return err; + } + err = iwl_pcie_apm_init(trans); if (err) return err; @@ -3432,19 +3463,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; - int ret, addr_size; + int ret, addr_size, txcmd_size, txcmd_align; + const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; + + if (!cfg_trans->gen2) { + ops = &trans_ops_pcie; + txcmd_size = sizeof(struct iwl_tx_cmd); + txcmd_align = sizeof(void *); + } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { + txcmd_size = sizeof(struct iwl_tx_cmd_gen2); + txcmd_align = 64; + } else { + txcmd_size = sizeof(struct iwl_tx_cmd_gen3); + txcmd_align = 128; + } + + txcmd_size += sizeof(struct iwl_cmd_header); + txcmd_size += 36; /* biggest possible 802.11 header */ + + /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ + if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) + return ERR_PTR(-EINVAL); ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); - if (cfg_trans->gen2) - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie_gen2); - else - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie); - + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, + txcmd_size, txcmd_align); if (!trans) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index d80f71f82a6d..ff4c34d7b74f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -243,7 +243,8 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, int start_len, - u8 hdr_len, struct iwl_device_cmd *dev_cmd) + u8 hdr_len, + struct iwl_device_tx_cmd *dev_cmd) { #ifdef CONFIG_INET struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -371,7 +372,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -403,6 +404,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, @@ -456,7 +461,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -468,6 +473,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, dma_addr_t tb_phys; int len, tb1_len, tb2_len; void *tb1_addr; + struct sk_buff *frag; tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); @@ -495,6 +501,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -516,6 +526,19 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; + skb_walk_frags(skb, frag) { + tb_phys = dma_map_single(trans->dev, frag->data, + skb_headlen(frag), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag)); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, + frag->data, + skb_headlen(frag)); + if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta)) + goto out_err; + } + return tfd; out_err: @@ -526,7 +549,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { @@ -566,7 +589,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, } int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_cmd_meta *out_meta; @@ -591,7 +614,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 4806a04cec8c..d3b58334e13e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->write_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[read_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -1196,7 +1196,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct iwl_device_cmd *dev_cmd_ptr; + struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); @@ -2099,7 +2099,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; @@ -2281,7 +2282,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); @@ -2291,7 +2293,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; @@ -2348,7 +2350,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c index 0094b1d2b577..3ec46f48cfde 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ap.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c @@ -2508,7 +2508,7 @@ static int prism2_hostapd_add_sta(struct ap_data *ap, sta->supported_rates[0] = 2; if (sta->tx_supp_rates & WLAN_RATE_2M) sta->supported_rates[1] = 4; - if (sta->tx_supp_rates & WLAN_RATE_5M5) + if (sta->tx_supp_rates & WLAN_RATE_5M5) sta->supported_rates[2] = 11; if (sta->tx_supp_rates & WLAN_RATE_11M) sta->supported_rates[3] = 22; diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 40a8b941ad5c..e753f43e0162 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -1361,7 +1361,8 @@ static int ezusb_init(struct hermes *hw) int retval; BUG_ON(in_interrupt()); - BUG_ON(!upriv); + if (!upriv) + return -EINVAL; upriv->reply_count = 0; /* Write the MAGIC number on the simulated registers to keep @@ -1608,9 +1609,9 @@ static int ezusb_probe(struct usb_interface *interface, /* set up the endpoint information */ /* check out the endpoints */ - iface_desc = &interface->altsetting[0].desc; + iface_desc = &interface->cur_altsetting->desc; for (i = 0; i < iface_desc->bNumEndpoints; ++i) { - ep = &interface->altsetting[0].endpoint[i].desc; + ep = &interface->cur_altsetting->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep)) { /* we found a bulk in endpoint */ diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 57edfada0665..4e3de684928b 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) int hw, ap, ap_max = ie[1]; u8 hw_rate; + if (ap_max > MAX_RATES) { + lbs_deb_assoc("invalid rates\n"); + return tlv; + } /* Advance past IE header */ ie += 2; @@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, struct cmd_ds_802_11_ad_hoc_join cmd; u8 preamble = RADIO_PREAMBLE_SHORT; int ret = 0; + int hw, i; + u8 rates_max; + u8 *rates; /* TODO: set preamble based on scan result */ ret = lbs_set_radio(priv, preamble, 1); @@ -1775,9 +1782,14 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, if (!rates_eid) { lbs_add_rates(cmd.bss.rates); } else { - int hw, i; - u8 rates_max = rates_eid[1]; - u8 *rates = cmd.bss.rates; + rates_max = rates_eid[1]; + if (rates_max > MAX_RATES) { + lbs_deb_join("invalid rates"); + rcu_read_unlock(); + ret = -EINVAL; + goto out; + } + rates = cmd.bss.rates; for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { u8 hw_rate = lbs_rates[hw].bitrate / 5; for (i = 0; i < rates_max; i++) { diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 242d8845da3f..30f1025ecb9b 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1179,6 +1179,10 @@ static int if_sdio_probe(struct sdio_func *func, spin_lock_init(&card->lock); card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0); + if (unlikely(!card->workqueue)) { + ret = -ENOMEM; + goto err_queue; + } INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); init_waitqueue_head(&card->pwron_waitq); @@ -1230,6 +1234,7 @@ err_activate_card: lbs_remove_card(priv); free: destroy_workqueue(card->workqueue); +err_queue: while (card->packets) { packet = card->packets; card->packets = card->packets->next; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index a9657ae6d782..d14e55e3c9da 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -631,6 +631,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt); + adapter->is_up = true; goto done; err_add_intf: @@ -1469,6 +1470,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) mwifiex_deauthenticate(priv, NULL); mwifiex_uninit_sw(adapter); + adapter->is_up = false; if (adapter->if_ops.down_dev) adapter->if_ops.down_dev(adapter); @@ -1730,7 +1732,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) if (!adapter) return 0; - mwifiex_uninit_sw(adapter); + if (adapter->is_up) + mwifiex_uninit_sw(adapter); if (adapter->irq_wakeup >= 0) device_init_wakeup(adapter->dev, false); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 095837fba300..fa5634af40f7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1017,6 +1017,7 @@ struct mwifiex_adapter { /* For synchronizing FW initialization with device lifecycle. */ struct completion *fw_done; + bool is_up; bool ext_scan; u8 fw_api_ver; @@ -1294,19 +1295,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len) return pos; } -/* This function return interface number with the same bss_type. - */ -static inline u8 -mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type) -{ - u8 i, num = 0; - - for (i = 0; i < adapter->priv_num; i++) - if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type) - num++; - return num; -} - /* * This function returns the correct private structure pointer based * upon the BSS type and BSS number. diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index eff06d59e9df..fc1706d0647d 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -687,8 +687,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) skb_put(skb, MAX_EVENT_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, - PCI_DMA_FROMDEVICE)) + PCI_DMA_FROMDEVICE)) { + kfree_skb(skb); + kfree(card->evtbd_ring_vbase); return -1; + } buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); @@ -1029,8 +1032,10 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) } skb_put(skb, MWIFIEX_UPLD_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, - PCI_DMA_FROMDEVICE)) + PCI_DMA_FROMDEVICE)) { + kfree_skb(skb); return -1; + } card->cmdrsp_buf = skb; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 593c594982cb..59f0651d148b 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2886,6 +2886,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, vs_param_set->header.len = cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) & 0x00FF) + 2); + if (le16_to_cpu(vs_param_set->header.len) > + MWIFIEX_MAX_VSIE_LEN) { + mwifiex_dbg(priv->adapter, ERROR, + "Invalid param length!\n"); + break; + } + memcpy(vs_param_set->ie, priv->vs_ie[id].ie, le16_to_cpu(vs_param_set->header.len)); *buffer += le16_to_cpu(vs_param_set->header.len) + diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 24c041dad9f6..fec38b6e86ff 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -444,6 +444,9 @@ static int mwifiex_sdio_suspend(struct device *dev) return 0; } + if (!adapter->is_up) + return -EBUSY; + mwifiex_enable_wake(adapter); /* Enable the Host Sleep */ @@ -2220,22 +2223,30 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) struct sdio_func *func = card->func; int ret; + /* Prepare the adapter for the reset. */ mwifiex_shutdown_sw(adapter); - - /* power cycle the adapter */ - sdio_claim_host(func); - mmc_hw_reset(func->card->host); - sdio_release_host(func); - - /* Previous save_adapter won't be valid after this. We will cancel - * pending work requests. - */ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); - ret = mwifiex_reinit_sw(adapter); - if (ret) - dev_err(&func->dev, "reinit failed: %d\n", ret); + /* Run a HW reset of the SDIO interface. */ + sdio_claim_host(func); + ret = mmc_hw_reset(func->card->host); + sdio_release_host(func); + + switch (ret) { + case 1: + dev_dbg(&func->dev, "SDIO HW reset asynchronous\n"); + complete_all(adapter->fw_done); + break; + case 0: + ret = mwifiex_reinit_sw(adapter); + if (ret) + dev_err(&func->dev, "reinit failed: %d\n", ret); + break; + default: + dev_err(&func->dev, "SDIO HW reset failed: %d\n", ret); + break; + } } /* This function read/write firmware */ diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 74e50566db1f..fbfa0b15d0c8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -229,6 +229,15 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, "11D: skip setting domain info in FW\n"); return 0; } + + if (country_ie_len > + (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { + rcu_read_unlock(); + mwifiex_dbg(priv->adapter, ERROR, + "11D: country_ie_len overflow!, deauth AP\n"); + return -EINVAL; + } + memcpy(priv->adapter->country_code, &country_ie[2], 2); domain_info->country_code[0] = country_ie[2]; @@ -272,8 +281,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, priv->scan_block = false; if (bss) { - if (adapter->region_code == 0x00) - mwifiex_process_country_ie(priv, bss); + if (adapter->region_code == 0x00 && + mwifiex_process_country_ie(priv, bss)) + return -EINVAL; /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 09313047beed..f8f282ce39bd 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -894,7 +894,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, u8 *peer, *pos, *end; u8 i, action, basic; u16 cap = 0; - int ie_len = 0; + int ies_len = 0; if (len < (sizeof(struct ethhdr) + 3)) return; @@ -916,7 +916,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, pos = buf + sizeof(struct ethhdr) + 4; /* payload 1+ category 1 + action 1 + dialog 1 */ cap = get_unaligned_le16(pos); - ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN; + ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN; pos += 2; break; @@ -926,7 +926,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/ pos = buf + sizeof(struct ethhdr) + 6; cap = get_unaligned_le16(pos); - ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN; + ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN; pos += 2; break; @@ -934,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN)) return; pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN; - ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN; + ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN; break; default: mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n"); @@ -947,65 +947,104 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, sta_ptr->tdls_cap.capab = cpu_to_le16(cap); - for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) { - if (pos + 2 + pos[1] > end) + for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) { + u8 ie_len = pos[1]; + + if (pos + 2 + ie_len > end) break; switch (*pos) { case WLAN_EID_SUPP_RATES: - sta_ptr->tdls_cap.rates_len = pos[1]; - for (i = 0; i < pos[1]; i++) + if (ie_len > sizeof(sta_ptr->tdls_cap.rates)) + return; + sta_ptr->tdls_cap.rates_len = ie_len; + for (i = 0; i < ie_len; i++) sta_ptr->tdls_cap.rates[i] = pos[i + 2]; break; case WLAN_EID_EXT_SUPP_RATES: + if (ie_len > sizeof(sta_ptr->tdls_cap.rates)) + return; basic = sta_ptr->tdls_cap.rates_len; - for (i = 0; i < pos[1]; i++) + if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic) + return; + for (i = 0; i < ie_len; i++) sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2]; - sta_ptr->tdls_cap.rates_len += pos[1]; + sta_ptr->tdls_cap.rates_len += ie_len; break; case WLAN_EID_HT_CAPABILITY: - memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos, + if (ie_len != sizeof(struct ieee80211_ht_cap)) + return; + /* copy the ie's value into ht_capb*/ + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2, sizeof(struct ieee80211_ht_cap)); sta_ptr->is_11n_enabled = 1; break; case WLAN_EID_HT_OPERATION: - memcpy(&sta_ptr->tdls_cap.ht_oper, pos, + if (ie_len != sizeof(struct ieee80211_ht_operation)) + return; + /* copy the ie's value into ht_oper*/ + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2, sizeof(struct ieee80211_ht_operation)); break; case WLAN_EID_BSS_COEX_2040: + if (ie_len != sizeof(pos[2])) + return; sta_ptr->tdls_cap.coex_2040 = pos[2]; break; case WLAN_EID_EXT_CAPABILITY: + if (ie_len < sizeof(struct ieee_types_header)) + return; + if (ie_len > 8) + return; memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos, sizeof(struct ieee_types_header) + - min_t(u8, pos[1], 8)); + min_t(u8, ie_len, 8)); break; case WLAN_EID_RSN: + if (ie_len < sizeof(struct ieee_types_header)) + return; + if (ie_len > IEEE_MAX_IE_SIZE - + sizeof(struct ieee_types_header)) + return; memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos, sizeof(struct ieee_types_header) + - min_t(u8, pos[1], IEEE_MAX_IE_SIZE - + min_t(u8, ie_len, IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header))); break; case WLAN_EID_QOS_CAPA: + if (ie_len != sizeof(pos[2])) + return; sta_ptr->tdls_cap.qos_info = pos[2]; break; case WLAN_EID_VHT_OPERATION: - if (priv->adapter->is_hw_11ac_capable) - memcpy(&sta_ptr->tdls_cap.vhtoper, pos, + if (priv->adapter->is_hw_11ac_capable) { + if (ie_len != + sizeof(struct ieee80211_vht_operation)) + return; + /* copy the ie's value into vhtoper*/ + memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2, sizeof(struct ieee80211_vht_operation)); + } break; case WLAN_EID_VHT_CAPABILITY: if (priv->adapter->is_hw_11ac_capable) { - memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos, + if (ie_len != sizeof(struct ieee80211_vht_cap)) + return; + /* copy the ie's value into vhtcap*/ + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2, sizeof(struct ieee80211_vht_cap)); sta_ptr->is_11ac_enabled = 1; } break; case WLAN_EID_AID: - if (priv->adapter->is_hw_11ac_capable) + if (priv->adapter->is_hw_11ac_capable) { + if (ie_len != sizeof(u16)) + return; sta_ptr->tdls_cap.aid = get_unaligned_le16((pos + 2)); + } + break; default: break; } diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 41f0231376c0..132f9e8ed68c 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, "WMM Parameter Set Count: %d\n", wmm_param_ie->qos_info_bitmap & mask); + if (wmm_param_ie->vend_hdr.len + 2 > + sizeof(struct ieee_types_wmm_parameter)) + break; + memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. wmm_ie, wmm_param_ie, wmm_param_ie->vend_hdr.len + 2); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 8f69d00bd940..6249a46c1976 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -448,10 +448,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, struct page *page = virt_to_head_page(data); int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; + struct skb_shared_info *shinfo = skb_shinfo(skb); - offset += q->buf_offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, - q->buf_size); + if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { + offset += q->buf_offset; + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, + q->buf_size); + } if (more) return; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 8aec7ccf2d79..502814c26b33 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -367,8 +367,8 @@ enum mt76u_in_ep { enum mt76u_out_ep { MT_EP_OUT_INBAND_CMD, - MT_EP_OUT_AC_BK, MT_EP_OUT_AC_BE, + MT_EP_OUT_AC_BK, MT_EP_OUT_AC_VI, MT_EP_OUT_AC_VO, MT_EP_OUT_HCCA, @@ -799,7 +799,8 @@ static inline int mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, int timeout) { - struct usb_device *udev = to_usb_device(dev->dev); + struct usb_interface *uintf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(uintf); struct mt76_usb *usb = &dev->usb; unsigned int pipe; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index c328192307c4..ff3f3d98b625 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -1032,8 +1032,10 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, if (idx && (cur_rate->idx != info->status.rates[i].idx || cur_rate->flags != info->status.rates[i].flags)) { i++; - if (i == ARRAY_SIZE(info->status.rates)) + if (i == ARRAY_SIZE(info->status.rates)) { + i--; break; + } info->status.rates[i] = *cur_rate; info->status.rates[i].count = 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index e07ce2c10013..111e38ff954a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -914,8 +914,10 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, if (idx && (cur_rate->idx != info->status.rates[i].idx || cur_rate->flags != info->status.rates[i].flags)) { i++; - if (i == ARRAY_SIZE(info->status.rates)) + if (i == ARRAY_SIZE(info->status.rates)) { + i--; break; + } info->status.rates[i] = *cur_rate; info->status.rates[i].count = 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 00a445d27599..65d404e61404 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -226,7 +226,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, u32 mac_rev; int ret; - mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops, + mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, &drv_ops); if (!mdev) return -ENOMEM; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index da5e0f9a8bae..8b26c6108186 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -39,7 +39,7 @@ static int mt76x2u_probe(struct usb_interface *intf, struct mt76_dev *mdev; int err; - mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops, + mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops, &drv_ops); if (!mdev) return -ENOMEM; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 20c6fe510e9d..05aa42bd9808 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -20,7 +20,8 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t len) { - struct usb_device *udev = to_usb_device(dev->dev); + struct usb_interface *uintf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(uintf); unsigned int pipe; int i, ret; @@ -235,7 +236,8 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base, static bool mt76u_check_sg(struct mt76_dev *dev) { - struct usb_device *udev = to_usb_device(dev->dev); + struct usb_interface *uintf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(uintf); return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && (udev->bus->no_sg_constraint || @@ -370,7 +372,8 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, struct urb *urb, usb_complete_t complete_fn, void *context) { - struct usb_device *udev = to_usb_device(dev->dev); + struct usb_interface *uintf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(uintf); unsigned int pipe; if (dir == USB_DIR_IN) @@ -952,6 +955,7 @@ int mt76u_init(struct mt76_dev *dev, .rd_rp = mt76u_rd_rp, .type = MT76_BUS_USB, }; + struct usb_device *udev = interface_to_usbdev(intf); struct mt76_usb *usb = &dev->usb; tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); @@ -965,6 +969,8 @@ int mt76u_init(struct mt76_dev *dev, dev->bus = &mt76u_ops; dev->queue_ops = &usb_queue_ops; + dev_set_drvdata(&udev->dev, dev); + usb->sg_en = mt76u_check_sg(dev); return mt76u_set_endpoints(intf, usb); diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c index 06f5702ab4bd..d863ab4a66c9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/phy.c +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c @@ -213,7 +213,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev) do { val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION); - if (val && ~val) + if (val && val != 0xff) break; } while (--i); diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index dc0c7244b60e..c0c32805fb8d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -83,6 +83,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, struct qlink_cmd *cmd; struct qlink_resp *resp = NULL; struct sk_buff *resp_skb = NULL; + int resp_res = 0; u16 cmd_id; u8 mac_id; u8 vif_id; @@ -113,6 +114,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, } resp = (struct qlink_resp *)resp_skb->data; + resp_res = le16_to_cpu(resp->result); ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id, const_resp_size); if (ret) @@ -128,8 +130,8 @@ out: else consume_skb(resp_skb); - if (!ret && resp) - return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result)); + if (!ret) + return qtnf_cmd_resp_result_decode(resp_res); pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n", mac_id, vif_id, cmd_id, ret); diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index b57c8c18a8d0..7846383c8828 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -171,8 +171,9 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, return -EPROTO; } - pr_debug("VIF%u.%u: BSSID:%pM status:%u\n", - vif->mac->macid, vif->vifid, join_info->bssid, status); + pr_debug("VIF%u.%u: BSSID:%pM chan:%u status:%u\n", + vif->mac->macid, vif->vifid, join_info->bssid, + le16_to_cpu(join_info->chan.chan.center_freq), status); if (status != WLAN_STATUS_SUCCESS) goto done; @@ -181,7 +182,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, if (!cfg80211_chandef_valid(&chandef)) { pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n", vif->mac->macid, vif->vifid, - chandef.chan->center_freq, + chandef.chan ? chandef.chan->center_freq : 0, chandef.center_freq1, chandef.center_freq2, chandef.width); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index 8ae318b5fe54..4824be0c6231 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -130,6 +130,8 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus) { + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + char card_id[64]; int ret; bus->fw_state = QTNF_FW_STATE_BOOT_DONE; @@ -137,7 +139,9 @@ int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus) if (ret) { pr_err("failed to attach core\n"); } else { - qtnf_debugfs_init(bus, DRV_NAME); + snprintf(card_id, sizeof(card_id), "%s:%s", + DRV_NAME, pci_name(priv->pdev)); + qtnf_debugfs_init(bus, card_id); qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index f1cdcd61c54a..c99f1912e266 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -5839,8 +5839,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21); rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40); } else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392) || - rt2x00_rt(rt2x00dev, RT6352)) { + rt2x00_rt(rt2x00dev, RT5392)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); @@ -5854,8 +5853,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); - rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002); - rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F); rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000); rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0); rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index ade057d868f7..5e9ce03067de 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1341,6 +1341,7 @@ struct rtl8xxxu_fileops { u8 has_s0s1:1; u8 has_tx_report:1; u8 gen2_thermal_meter:1; + u8 needs_full_init:1; u32 adda_1t_init; u32 adda_1t_path_on; u32 adda_2t_path_on_a; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index ceffe05bd65b..f3cd314d1a9c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1670,6 +1670,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = { .has_s0s1 = 1, .has_tx_report = 1, .gen2_thermal_meter = 1, + .needs_full_init = 1, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index c6c41fb962ff..3499b211dad5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -3902,6 +3902,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) else macpower = true; + if (fops->needs_full_init) + macpower = false; + ret = fops->power_on(priv); if (ret < 0) { dev_warn(dev, "%s: Failed power on\n", __func__); @@ -5444,6 +5447,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); + usb_free_urb(urb); goto error; } @@ -5911,7 +5915,7 @@ static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv, u8 dir, xtype, num; int ret = 0; - host_interface = &interface->altsetting[0]; + host_interface = interface->cur_altsetting; interface_desc = &host_interface->desc; endpoints = interface_desc->bNumEndpoints; diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index f88d26535978..25335bd2873b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1061,13 +1061,15 @@ done: return ret; } -static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) +static void _rtl_pci_irq_tasklet(unsigned long data) { + struct ieee80211_hw *hw = (struct ieee80211_hw *)data; _rtl_pci_tx_chk_waitq(hw); } -static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) +static void _rtl_pci_prepare_bcn_tasklet(unsigned long data) { + struct ieee80211_hw *hw = (struct ieee80211_hw *)data; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -1193,10 +1195,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw, /*task */ tasklet_init(&rtlpriv->works.irq_tasklet, - (void (*)(unsigned long))_rtl_pci_irq_tasklet, + _rtl_pci_irq_tasklet, (unsigned long)hw); tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, - (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, + _rtl_pci_prepare_bcn_tasklet, (unsigned long)hw); INIT_WORK(&rtlpriv->works.lps_change_work, rtl_lps_change_work_callback); diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index c10432cd703e..8be31e0ad878 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -386,7 +386,7 @@ int rtl_regd_init(struct ieee80211_hw *hw, struct wiphy *wiphy = hw->wiphy; struct country_code_to_enum_rd *country = NULL; - if (wiphy == NULL || &rtlpriv->regd == NULL) + if (!wiphy) return -EINVAL; /* init country_code from efuse channel plan */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index 917729807514..e17f70b4d199 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size) rxmcs == DESC92C_RATE11M) struct phy_status_rpt { + u8 padding[2]; u8 ch_corr[2]; u8 cck_sig_qual_ofdm_pwdb_all; u8 cck_agc_rpt_ofdm_cfosho_a; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 56cc3bc30860..f070f25bb735 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1540,6 +1540,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) * This is maybe necessary: * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb); */ + dev_kfree_skb(skb); + return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index c7f29a9be50d..146fe144f5f5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -1176,6 +1176,7 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtlpci->irq_enabled = true; } void rtl92de_disable_interrupt(struct ieee80211_hw *hw) @@ -1185,7 +1186,7 @@ void rtl92de_disable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); - synchronize_irq(rtlpci->pdev->irq); + rtlpci->irq_enabled = false; } static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw) @@ -1351,7 +1352,7 @@ void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw) bcn_interval = mac->beacon_interval; atim_window = 2; - /*rtl92de_disable_interrupt(hw); */ + rtl92de_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); @@ -1371,9 +1372,9 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n", bcn_interval); - /* rtl92de_disable_interrupt(hw); */ + rtl92de_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); - /* rtl92de_enable_interrupt(hw); */ + rtl92de_enable_interrupt(hw); } void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 99e5cd9a5c86..1dbdddce0823 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -216,6 +216,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = { .led_control = rtl92de_led_control, .set_desc = rtl92de_set_desc, .get_desc = rtl92de_get_desc, + .is_tx_desc_closed = rtl92de_is_tx_desc_closed, .tx_polling = rtl92de_tx_polling, .enable_hw_sec = rtl92de_enable_hw_security_config, .set_key = rtl92de_set_key, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 2494e1f118f8..92c9fb45f800 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -804,13 +804,15 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw, break; } } else { - struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(pdesc); + ret = GET_RX_DESC_OWN(p_desc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(pdesc); + ret = GET_RX_DESC_PKT_LEN(p_desc); + break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_DESC_BUFF_ADDR(p_desc); break; default: WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", @@ -821,6 +823,23 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw, return ret; } +bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN); + + /* a beacon packet will only use the first + * descriptor by defaut, and the own bit may not + * be cleared by the hardware + */ + if (own) + return false; + return true; +} + void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index 36820070fd76..635989e15282 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -715,6 +715,8 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val); u64 rtl92de_get_desc(struct ieee80211_hw *hw, u8 *p_desc, bool istx, u8 desc_name); +bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 4b59f3b46b28..348b0072cdd6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1021,8 +1021,10 @@ int rtl_usb_probe(struct usb_interface *intf, rtlpriv->hw = hw; rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32), GFP_KERNEL); - if (!rtlpriv->usb_data) + if (!rtlpriv->usb_data) { + ieee80211_free_hw(hw); return -ENOMEM; + } /* this spin lock must be initialized early */ spin_lock_init(&rtlpriv->locks.usb_lock); @@ -1083,6 +1085,7 @@ error_out2: _rtl_usb_io_handler_release(hw); usb_put_dev(udev); complete(&rtlpriv->firmware_loading_complete); + kfree(rtlpriv->usb_data); return -ENODEV; } EXPORT_SYMBOL(rtl_usb_probe); diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index 793b40bdbf7c..3e95ad198912 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -1308,6 +1308,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; bool wl_hi_pri = false; u8 table_case, tdma_case; + u32 slot_type = 0; if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 || coex_stat->wl_hi_pri_task2) @@ -1318,14 +1319,16 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) if (wl_hi_pri) { table_case = 15; if (coex_stat->bt_a2dp_exist && - !coex_stat->bt_pan_exist) + !coex_stat->bt_pan_exist) { + slot_type = TDMA_4SLOT; tdma_case = 11; - else if (coex_stat->wl_hi_pri_task1) + } else if (coex_stat->wl_hi_pri_task1) { tdma_case = 6; - else if (!coex_stat->bt_page) + } else if (!coex_stat->bt_page) { tdma_case = 8; - else + } else { tdma_case = 9; + } } else if (coex_stat->wl_connected) { table_case = 10; tdma_case = 10; @@ -1361,7 +1364,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); rtw_coex_table(rtwdev, table_case); - rtw_coex_tdma(rtwdev, false, tdma_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); } static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev) @@ -1475,13 +1478,13 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev) if (efuse->share_ant) { /* Shared-Ant */ + slot_type = TDMA_4SLOT; + if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0) table_case = 10; else table_case = 9; - slot_type = TDMA_4SLOT; - if (coex_stat->wl_gl_busy) tdma_case = 13; else @@ -1585,13 +1588,14 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev) if (efuse->share_ant) { /* Shared-Ant */ + slot_type = TDMA_4SLOT; + if (coex_stat->bt_ble_exist) table_case = 26; else table_case = 9; if (coex_stat->wl_gl_busy) { - slot_type = TDMA_4SLOT; tdma_case = 13; } else { tdma_case = 14; @@ -1794,10 +1798,12 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + u32 slot_type = 0; if (efuse->share_ant) { /* Shared-Ant */ if (coex_stat->bt_a2dp_exist) { + slot_type = TDMA_4SLOT; table_case = 9; tdma_case = 11; } else { @@ -1818,7 +1824,7 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); rtw_coex_table(rtwdev, table_case); - rtw_coex_tdma(rtwdev, false, tdma_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); } static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index b082e2cc95f5..35dbdb3c4f1e 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -498,9 +498,6 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size, { struct sk_buff *skb = rsvd_pkt->skb; - if (rsvd_pkt->add_txdesc) - rtw_fill_rsvd_page_desc(rtwdev, skb); - if (page >= 1) memcpy(buf + page_margin + page_size * (page - 1), skb->data, skb->len); @@ -625,16 +622,37 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type); if (!iter) { - rtw_err(rtwdev, "fail to build rsvd packet\n"); + rtw_err(rtwdev, "failed to build rsvd packet\n"); goto release_skb; } + + /* Fill the tx_desc for the rsvd pkt that requires one. + * And iter->len will be added with size of tx_desc_sz. + */ + if (rsvd_pkt->add_txdesc) + rtw_fill_rsvd_page_desc(rtwdev, iter); + rsvd_pkt->skb = iter; rsvd_pkt->page = total_page; - if (rsvd_pkt->add_txdesc) + + /* Reserved page is downloaded via TX path, and TX path will + * generate a tx_desc at the header to describe length of + * the buffer. If we are not counting page numbers with the + * size of tx_desc added at the first rsvd_pkt (usually a + * beacon, firmware default refer to the first page as the + * content of beacon), we could generate a buffer which size + * is smaller than the actual size of the whole rsvd_page + */ + if (total_page == 0) { + if (rsvd_pkt->type != RSVD_BEACON) { + rtw_err(rtwdev, "first page should be a beacon\n"); + goto release_skb; + } total_page += rtw_len_to_page(iter->len + tx_desc_sz, page_size); - else + } else { total_page += rtw_len_to_page(iter->len, page_size); + } } if (total_page > rtwdev->fifo.rsvd_drv_pg_num) { @@ -647,13 +665,24 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, if (!buf) goto release_skb; + /* Copy the content of each rsvd_pkt to the buf, and they should + * be aligned to the pages. + * + * Note that the first rsvd_pkt is a beacon no matter what vif->type. + * And that rsvd_pkt does not require tx_desc because when it goes + * through TX path, the TX path will generate one for it. + */ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin, page, buf, rsvd_pkt); - page += rtw_len_to_page(rsvd_pkt->skb->len, page_size); - } - list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) + if (page == 0) + page += rtw_len_to_page(rsvd_pkt->skb->len + + tx_desc_sz, page_size); + else + page += rtw_len_to_page(rsvd_pkt->skb->len, page_size); + kfree_skb(rsvd_pkt->skb); + } return buf; @@ -706,6 +735,11 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) goto free; } + /* The last thing is to download the *ONLY* beacon again, because + * the previous tx_desc is to describe the total rsvd page. Download + * the beacon again to replace the TX desc header, and we will get + * a correct tx_desc for the beacon in the rsvd page. + */ ret = rtw_download_beacon(rtwdev, vif); if (ret) { rtw_err(rtwdev, "failed to download beacon\n"); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 6dd457741b15..88e2252bf8a2 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -556,8 +556,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) is_support_sgi = true; } else if (sta->ht_cap.ht_supported) { - ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) | - (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12); + ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | + (sta->ht_cap.mcs.rx_mask[0] << 12); if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = HT_STBC_EN; if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) @@ -567,6 +567,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) is_support_sgi = true; } + if (efuse->hw_cap.nss == 1) + ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; + if (hal->current_band_type == RTW_BAND_5G) { ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4; if (sta->vht_cap.vht_supported) { @@ -600,11 +603,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) wireless_set = 0; } - if (efuse->hw_cap.nss == 1) { - ra_mask &= RA_MASK_VHT_RATES_1SS; - ra_mask &= RA_MASK_HT_RATES_1SS; - } - switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; @@ -1020,7 +1018,8 @@ static int rtw_dump_hw_feature(struct rtw_dev *rtwdev) rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num); - if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE) + if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE || + efuse->hw_cap.nss > rtwdev->hal.rf_path_num) efuse->hw_cap.nss = rtwdev->hal.rf_path_num; rtw_dbg(rtwdev, RTW_DBG_EFUSE, @@ -1047,19 +1046,19 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) /* power on mac to read efuse */ ret = rtw_chip_efuse_enable(rtwdev); if (ret) - goto out; + goto out_unlock; ret = rtw_parse_efuse_map(rtwdev); if (ret) - goto out; + goto out_disable; ret = rtw_dump_hw_feature(rtwdev); if (ret) - goto out; + goto out_disable; ret = rtw_check_supported_rfe(rtwdev); if (ret) - goto out; + goto out_disable; if (efuse->crystal_cap == 0xff) efuse->crystal_cap = 0; @@ -1086,9 +1085,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0; +out_disable: rtw_chip_efuse_disable(rtwdev); -out: +out_unlock: mutex_unlock(&rtwdev->mutex); return ret; } diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index d90928be663b..77a2bdee50fa 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -762,6 +762,11 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, while (count--) { skb = skb_dequeue(&ring->queue); + if (!skb) { + rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", + count, hw_queue, bd_idx, ring->r.rp, cur_rp); + break; + } tx_data = rtw_pci_get_tx_data(skb); pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len, PCI_DMA_TODEVICE); diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index d3d3f40de75e..47d199d2e7dc 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -118,7 +118,7 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) { for (j = 0; j < RTW_RF_PATH_MAX; j++) - dm_info->cck_pd_lv[i][j] = 0; + dm_info->cck_pd_lv[i][j] = CCK_PD_LV0; } dm_info->cck_fa_avg = CCK_FA_AVG_RESET; @@ -461,7 +461,6 @@ static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) chip->ops->dpk_track(rtwdev); } -#define CCK_PD_LV_MAX 5 #define CCK_PD_FA_LV1_MIN 1000 #define CCK_PD_FA_LV0_MAX 500 @@ -471,10 +470,10 @@ static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev) u32 cck_fa_avg = dm_info->cck_fa_avg; if (cck_fa_avg > CCK_PD_FA_LV1_MIN) - return 1; + return CCK_PD_LV1; if (cck_fa_avg < CCK_PD_FA_LV0_MAX) - return 0; + return CCK_PD_LV0; return CCK_PD_LV_MAX; } @@ -494,15 +493,15 @@ static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev) u32 cck_fa_avg = dm_info->cck_fa_avg; if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL) - return 4; + return CCK_PD_LV4; if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL) - return 3; + return CCK_PD_LV3; if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL) - return 2; + return CCK_PD_LV2; if (cck_fa_avg > CCK_PD_FA_LV1_MIN) - return 1; + return CCK_PD_LV1; if (cck_fa_avg < CCK_PD_FA_LV0_MAX) - return 0; + return CCK_PD_LV0; return CCK_PD_LV_MAX; } diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index e79b084628e7..33a5eb9637c0 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -125,6 +125,15 @@ rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, u8 ch, u8 regd, struct rtw_power_params *pwr_param); +enum rtw_phy_cck_pd_lv { + CCK_PD_LV0, + CCK_PD_LV1, + CCK_PD_LV2, + CCK_PD_LV3, + CCK_PD_LV4, + CCK_PD_LV_MAX, +}; + #define MASKBYTE0 0xff #define MASKBYTE1 0xff00 #define MASKBYTE2 0xff0000 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index c2f6cd76a658..de0505a6a365 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -3168,8 +3168,8 @@ rtw8822c_phy_cck_pd_set_reg(struct rtw_dev *rtwdev, static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; - s8 pd_lvl[4] = {2, 4, 6, 8}; - s8 cs_lvl[4] = {2, 2, 2, 4}; + s8 pd_lvl[CCK_PD_LV_MAX] = {0, 2, 4, 6, 8}; + s8 cs_lvl[CCK_PD_LV_MAX] = {0, 2, 2, 2, 4}; u8 cur_lvl; u8 nrx, bw; diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index f84250bdb8cf..6f8d5f9a9f7e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -622,6 +622,7 @@ static int bl_cmd(struct rsi_hw *adapter, u8 cmd, u8 exp_resp, char *str) bl_start_cmd_timer(adapter, timeout); status = bl_write_cmd(adapter, cmd, exp_resp, ®out_val); if (status < 0) { + bl_stop_cmd_timer(adapter); rsi_dbg(ERR_ZONE, "%s: Command %s (%0x) writing failed..\n", __func__, str, cmd); @@ -737,10 +738,9 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size) } status = bl_cmd(adapter, cmd_req, cmd_resp, str); - if (status) { - bl_stop_cmd_timer(adapter); + if (status) return status; - } + return 0; } @@ -828,10 +828,9 @@ static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content, status = bl_cmd(adapter, EOF_REACHED, FW_LOADING_SUCCESSFUL, "EOF_REACHED"); - if (status) { - bl_stop_cmd_timer(adapter); + if (status) return status; - } + rsi_dbg(INFO_ZONE, "FW loading is done and FW is running..\n"); return 0; } @@ -849,6 +848,7 @@ static int rsi_hal_prepare_fwload(struct rsi_hw *adapter) ®out_val, RSI_COMMON_REG_SIZE); if (status < 0) { + bl_stop_cmd_timer(adapter); rsi_dbg(ERR_ZONE, "%s: REGOUT read failed\n", __func__); return status; diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 6c7f26ef6476..9cc8a335d519 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1756,6 +1756,7 @@ static int rsi_send_beacon(struct rsi_common *common) skb_pull(skb, (64 - dword_align_bytes)); if (rsi_prepare_beacon(common, skb)) { rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n"); + dev_kfree_skb(skb); return -EINVAL; } skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 760eaffeebd6..4b9e406b8461 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -16,6 +16,7 @@ */ #include +#include #include #include "rsi_usb.h" #include "rsi_hal.h" @@ -29,7 +30,7 @@ MODULE_PARM_DESC(dev_oper_mode, "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n" "6[AP + BT classic], 14[AP + BT classic + BT LE]"); -static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num); +static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags); /** * rsi_usb_card_write() - This function writes to the USB Card. @@ -117,7 +118,7 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface, __le16 buffer_size; int ii, bin_found = 0, bout_found = 0; - iface_desc = &(interface->altsetting[0]); + iface_desc = interface->cur_altsetting; for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) { endpoint = &(iface_desc->endpoint[ii].desc); @@ -285,20 +286,29 @@ static void rsi_rx_done_handler(struct urb *urb) status = 0; out: - if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num)) + if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC)) rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__); if (status) dev_kfree_skb(rx_cb->rx_skb); } +static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) +{ + struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1]; + struct urb *urb = rx_cb->rx_urb; + + usb_kill_urb(urb); +} + /** * rsi_rx_urb_submit() - This function submits the given URB to the USB stack. * @adapter: Pointer to the adapter structure. * * Return: 0 on success, a negative error code on failure. */ -static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) +static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) { struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1]; @@ -328,9 +338,11 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) rsi_rx_done_handler, rx_cb); - status = usb_submit_urb(urb, GFP_KERNEL); - if (status) + status = usb_submit_urb(urb, mem_flags); + if (status) { rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__); + dev_kfree_skb(skb); + } return status; } @@ -816,17 +828,20 @@ static int rsi_probe(struct usb_interface *pfunction, rsi_dbg(INIT_ZONE, "%s: Device Init Done\n", __func__); } - status = rsi_rx_urb_submit(adapter, WLAN_EP); + status = rsi_rx_urb_submit(adapter, WLAN_EP, GFP_KERNEL); if (status) goto err1; if (adapter->priv->coex_mode > 1) { - status = rsi_rx_urb_submit(adapter, BT_EP); + status = rsi_rx_urb_submit(adapter, BT_EP, GFP_KERNEL); if (status) - goto err1; + goto err_kill_wlan_urb; } return 0; + +err_kill_wlan_urb: + rsi_rx_urb_kill(adapter, WLAN_EP); err1: rsi_deinit_usb_interface(adapter); err: @@ -857,6 +872,10 @@ static void rsi_disconnect(struct usb_interface *pfunction) adapter->priv->bt_adapter = NULL; } + if (adapter->priv->coex_mode > 1) + rsi_rx_urb_kill(adapter, BT_EP); + rsi_rx_urb_kill(adapter, WLAN_EP); + rsi_reset_card(adapter); rsi_deinit_usb_interface(adapter); rsi_91x_deinit(adapter); diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c index 6574e78e05ea..2a03dc533b6a 100644 --- a/drivers/net/wireless/st/cw1200/fwio.c +++ b/drivers/net/wireless/st/cw1200/fwio.c @@ -320,12 +320,12 @@ int cw1200_load_firmware(struct cw1200_common *priv) goto out; } - priv->hw_type = cw1200_get_hw_type(val32, &major_revision); - if (priv->hw_type < 0) { + ret = cw1200_get_hw_type(val32, &major_revision); + if (ret < 0) { pr_err("Can't deduce hardware type.\n"); - ret = -ENOTSUPP; goto out; } + priv->hw_type = ret; /* Set DPLL Reg value, and read back to confirm writes work */ ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 7997cc6de334..01305ba2d3aa 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -450,7 +450,6 @@ static void virt_wifi_net_device_destructor(struct net_device *dev) */ kfree(dev->ieee80211_ptr); dev->ieee80211_ptr = NULL; - free_netdev(dev); } /* No lock interaction. */ @@ -458,7 +457,7 @@ static void virt_wifi_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &virt_wifi_ops; - dev->priv_destructor = virt_wifi_net_device_destructor; + dev->needs_free_netdev = true; } /* Called in a RCU read critical section from netif_receive_skb */ @@ -544,6 +543,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, goto unregister_netdev; } + dev->priv_destructor = virt_wifi_net_device_destructor; priv->being_deleted = false; priv->is_connected = false; priv->is_up = false; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index 7b5c2fe5bd4d..8ff0374126e4 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -1263,7 +1263,7 @@ static void print_id(struct usb_device *udev) static int eject_installer(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); - struct usb_host_interface *iface_desc = &intf->altsetting[0]; + struct usb_host_interface *iface_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *endpoint; unsigned char *cmd; u8 bulk_out_ep; diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index 0cc9ac856fe2..ed2123129e0e 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) const struct firmware *fw; struct sk_buff *skb; unsigned long len; - u8 max_size, payload_size; + int max_size, payload_size; int rc = 0; if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) || @@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) while (len) { - payload_size = min_t(unsigned long, (unsigned long) max_size, - len); + payload_size = min_t(unsigned long, max_size, len); skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size), GFP_KERNEL); diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 4d1909aecd6c..9f60e4dc5a90 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -278,7 +278,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client, r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios); if (r) - return r; + dev_dbg(dev, "Unable to add GPIO mapping table\n"); phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_en)) { diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index e897e4d768ef..d7a355d05368 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) cmd, sizeof(cmd), false); rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), - &transferred, 0); + &transferred, 5000); kfree(buffer); if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 720c89d6066e..4ac8cb262559 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -225,6 +225,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy) out: gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); + usleep_range(10000, 15000); } static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode) diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index cda996f6954e..2b83156efe3f 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c @@ -693,7 +693,7 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev, target->nfcid1_len != 10) return -EOPNOTSUPP; - return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, + return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, PN544_RF_READER_CMD_ACTIVATE_NEXT, target->nfcid1, target->nfcid1_len, NULL); } else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK | diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 604dba4f18af..8e4d355dc3ae 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -565,7 +565,7 @@ static void port100_tx_update_payload_len(void *_frame, int len) { struct port100_frame *frame = _frame; - frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len); + le16_add_cpu(&frame->datalen, len); } static bool port100_rx_frame_is_valid(void *_frame) diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 3e9f45aec8d1..5129543a0473 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1261,11 +1261,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, ret = btt_data_read(arena, page, off, postmap, cur_len); if (ret) { - int rc; - /* Media error - set the e_flag */ - rc = btt_map_write(arena, premap, postmap, 0, 1, - NVDIMM_IO_ATOMIC); + if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC)) + dev_warn_ratelimited(to_dev(arena), + "Error persistently tracking bad blocks at %#x\n", + premap); goto out_rtt; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fa7ba09dca77..9615ba4f8b1c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); * nvme_reset_wq - hosts nvme reset works * nvme_delete_wq - hosts nvme delete works * - * nvme_wq will host works such are scan, aen handling, fw activation, - * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq + * nvme_wq will host works such as scan, aen handling, fw activation, + * keep-alive, periodic reconnects etc. nvme_reset_wq * runs reset works which also flush works hosted on nvme_wq for * serialization purposes. nvme_delete_wq host controller deletion * works which flush reset works for serialization. @@ -313,7 +313,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) if (blk_mq_request_completed(req)) return true; - nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; blk_mq_complete_request(req); return true; } @@ -493,6 +493,7 @@ struct request *nvme_alloc_request(struct request_queue *q, req->cmd_flags |= REQ_FAILFAST_DRIVER; nvme_clear_nvme_request(req); nvme_req(req)->cmd = cmd; + nvme_req(req)->opcode = cmd->common.opcode; return req; } @@ -611,8 +612,14 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, struct nvme_dsm_range *range; struct bio *bio; - range = kmalloc_array(segments, sizeof(*range), - GFP_ATOMIC | __GFP_NOWARN); + /* + * Some devices do not consider the DSM 'Number of Ranges' field when + * determining how much data to DMA. Always allocate memory for maximum + * number of segments to prevent device reading beyond end of buffer. + */ + static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; + + range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); if (!range) { /* * If we fail allocation our range, fallback to the controller @@ -652,7 +659,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_offset = offset_in_page(range); - req->special_vec.bv_len = sizeof(*range) * segments; + req->special_vec.bv_len = alloc_size; req->rq_flags |= RQF_SPECIAL_PAYLOAD; return BLK_STS_OK; @@ -905,6 +912,11 @@ static int nvme_submit_user_cmd(struct request_queue *q, req->timeout = timeout ? timeout : ADMIN_TIMEOUT; nvme_req(req)->flags |= NVME_REQ_USERCMD; + if (cmd->common.opcode == nvme_cmd_read || + cmd->common.opcode == nvme_cmd_write) { + req->__sector = meta_seed; + req->__data_len = bufflen; + } if (ubuffer && bufflen) { ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, @@ -966,7 +978,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) startka = true; spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); } static int nvme_keep_alive(struct nvme_ctrl *ctrl) @@ -996,7 +1008,7 @@ static void nvme_keep_alive_work(struct work_struct *work) dev_dbg(ctrl->device, "reschedule traffic based keep-alive timer\n"); ctrl->comp_seen = false; - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); return; } @@ -1013,7 +1025,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) if (unlikely(ctrl->kato == 0)) return; - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); } void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) @@ -1155,8 +1167,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result) { + union nvme_result res = { 0 }; struct nvme_command c; - union nvme_result res; int ret; memset(&c, 0, sizeof(c)); @@ -1727,6 +1739,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, if (ret) dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", ret); + if (ret > 0) + ret = 0; } return ret; } @@ -2404,16 +2418,6 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x14a4, .fr = "22301111", .quirks = NVME_QUIRK_SIMPLE_SUSPEND, - }, - { - /* - * This Kingston E8FK11.T firmware version has no interrupt - * after resume with actions related to suspend to idle - * https://bugzilla.kernel.org/show_bug.cgi?id=204887 - */ - .vid = 0x2646, - .fr = "E8FK11.T", - .quirks = NVME_QUIRK_SIMPLE_SUSPEND, } }; @@ -3855,7 +3859,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) if (!log) return; - if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, + if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log, sizeof(*log), 0)) dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); kfree(log); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 265f89e11d8b..59474bd0c728 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || - !template->max_dif_sgl_segments || !template->dma_boundary) { + !template->max_dif_sgl_segments || !template->dma_boundary || + !template->module) { ret = -EINVAL; goto out_reghost_failed; } @@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); + struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { @@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); + module_put(lport->ops->module); } static void @@ -2907,10 +2910,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) static void __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) { - nvme_stop_keep_alive(&ctrl->ctrl); + /* + * if state is connecting - the error occurred as part of a + * reconnect attempt. The create_association error paths will + * clean up any outstanding io. + * + * if it's a different state - ensure all pending io is + * terminated. Given this can delay while waiting for the + * aborted io to return, we recheck adapter state below + * before changing state. + */ + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { + nvme_stop_keep_alive(&ctrl->ctrl); - /* will block will waiting for io to terminate */ - nvme_fc_delete_association(ctrl); + /* will block will waiting for io to terminate */ + nvme_fc_delete_association(ctrl); + } if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) @@ -3056,10 +3071,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } + if (!try_module_get(lport->ops->module)) { + ret = -EUNATCH; + goto out_free_ctrl; + } + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; - goto out_free_ctrl; + goto out_mod_put; } ctrl->ctrl.opts = opts; @@ -3212,6 +3232,8 @@ out_free_queues: out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); +out_mod_put: + module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index e0f064dcbd02..aed6354cb271 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -95,6 +95,7 @@ void nvme_failover_req(struct request *req) } break; case NVME_SC_HOST_PATH_ERROR: + case NVME_SC_HOST_ABORTED_CMD: /* * Temporary transport disruption in talking to the controller. * Try to send on a new path. @@ -710,6 +711,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) } INIT_WORK(&ctrl->ana_work, nvme_ana_work); + kfree(ctrl->ana_log_buf); ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); if (!ctrl->ana_log_buf) { error = -ENOMEM; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 22e8401352c2..2b51bb294081 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -128,6 +128,7 @@ struct nvme_request { u8 flags; u16 status; struct nvme_ctrl *ctrl; + u8 opcode; }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 869f462e6b6e..737072a44a4a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -68,14 +68,14 @@ static int io_queue_depth = 1024; module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); -static int write_queues; -module_param(write_queues, int, 0644); +static unsigned int write_queues; +module_param(write_queues, uint, 0644); MODULE_PARM_DESC(write_queues, "Number of queues to use for writes. If not set, reads and writes " "will share a queue set."); -static int poll_queues; -module_param(poll_queues, int, 0644); +static unsigned int poll_queues; +module_param(poll_queues, uint, 0644); MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); struct nvme_dev; @@ -167,7 +167,6 @@ struct nvme_queue { /* only used for poll queues: */ spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; volatile struct nvme_completion *cqes; - struct blk_mq_tags **tags; dma_addr_t sq_dma_addr; dma_addr_t cq_dma_addr; u32 __iomem *q_db; @@ -377,29 +376,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, WARN_ON(hctx_idx != 0); WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); - WARN_ON(nvmeq->tags); hctx->driver_data = nvmeq; - nvmeq->tags = &dev->admin_tagset.tags[0]; return 0; } -static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) -{ - struct nvme_queue *nvmeq = hctx->driver_data; - - nvmeq->tags = NULL; -} - static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = data; struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; - if (!nvmeq->tags) - nvmeq->tags = &dev->tagset.tags[hctx_idx]; - WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); hctx->driver_data = nvmeq; return 0; @@ -881,6 +868,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req = bd->rq; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_command cmnd; + struct nvme_request *rq; blk_status_t ret; iod->aborted = 0; @@ -898,6 +886,9 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ret) return ret; + rq = blk_mq_rq_to_pdu(req); + rq->opcode = cmnd.common.opcode; + if (blk_rq_nr_phys_segments(req)) { ret = nvme_map_data(dev, req, &cmnd); if (ret) @@ -950,6 +941,71 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) writel(head, nvmeq->q_db + nvmeq->dev->db_stride); } +static inline void nvme_eh_io_timeout(struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_ctrl *ctrl = &iod->nvmeq->dev->ctrl; + struct nvme_request *rq = nvme_req(req); + + /* admin command error */ + if (req->q == ctrl->admin_q || req->q == ctrl->connect_q) { + dev_warn_ratelimited(ctrl->device, + "Admin command timeout, CMD: %d(+%d)", + rq->opcode, rq->retries); + return; + } + + /* io command timeout */ + if (rq->opcode == nvme_cmd_write || rq->opcode == nvme_cmd_read) + dev_warn_ratelimited(ctrl->device, + "I/O timeout, QID: %d, CMD: %d(+%d), Sector: %llu+%u", + iod->nvmeq->qid, rq->opcode, rq->retries, + (unsigned long long)blk_rq_pos(req), + blk_rq_sectors(req)); + else + dev_warn_ratelimited(ctrl->device, + "I/O timeout, QID: %d, CMD: %d(+%d)", + iod->nvmeq->qid, rq->opcode, rq->retries); +} + +static inline void nvme_eh_io_error(struct request *req, __le16 status) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_ctrl *ctrl = &iod->nvmeq->dev->ctrl; + struct nvme_request *rq = nvme_req(req); + + if (status == NVME_SC_SUCCESS || rq->opcode == nvme_admin_abort_cmd) + return; + + /* admin command error */ + if (req->q == ctrl->admin_q || req->q == ctrl->connect_q) { + dev_warn_ratelimited(ctrl->device, + "Admin command error, CMD: %d(+%d), Status: 0x%x", + rq->opcode, rq->retries, status); + return; + } + + /* io command error */ + if (rq->opcode == nvme_cmd_write || rq->opcode == nvme_cmd_read) + dev_warn_ratelimited(ctrl->device, + "I/O error, QID: %d, CMD: %d(+%d), Sector: %llu+%u, Status: 0x%x", + iod->nvmeq->qid, rq->opcode, rq->retries, + (unsigned long long)blk_rq_pos(req), + blk_rq_sectors(req), status); + else + dev_warn_ratelimited(ctrl->device, + "I/O error, QID: %d, CMD: %d(+%d), Status: 0x%x", + iod->nvmeq->qid, rq->opcode, rq->retries, + status); +} + +static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) +{ + if (!nvmeq->qid) + return nvmeq->dev->admin_tagset.tags[0]; + return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; +} + static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) { volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; @@ -975,7 +1031,8 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) return; } - req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); + nvme_eh_io_error(req, le16_to_cpu(cqe->status) >> 1); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); nvme_end_request(req, cqe->status, cqe->result); } @@ -1090,9 +1147,9 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx) spin_lock(&nvmeq->cq_poll_lock); found = nvme_process_cq(nvmeq, &start, &end, -1); + nvme_complete_cqes(nvmeq, start, end); spin_unlock(&nvmeq->cq_poll_lock); - nvme_complete_cqes(nvmeq, start, end); return found; } @@ -1247,6 +1304,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_command cmd; u32 csts = readl(dev->bar + NVME_REG_CSTS); + /* log error memset */ + nvme_eh_io_timeout(req); + /* If PCI error recovery process is happening, we cannot reset or * the recovery mechanism will surely fail. */ @@ -1413,6 +1473,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) nvme_poll_irqdisable(nvmeq, -1); } +/* + * Called only on a device that has been disabled and after all other threads + * that can check this device's completion queues have synced. This is the + * last chance for the driver to see a natural completion before + * nvme_cancel_request() terminates all incomplete requests. + */ +static void nvme_reap_pending_cqes(struct nvme_dev *dev) +{ + u16 start, end; + int i; + + for (i = dev->ctrl.queue_count - 1; i > 0; i--) { + nvme_process_cq(&dev->queues[i], &start, &end, -1); + nvme_complete_cqes(&dev->queues[i], start, end); + } +} + static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, int entry_size) { @@ -1578,7 +1655,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = { .queue_rq = nvme_queue_rq, .complete = nvme_pci_complete_rq, .init_hctx = nvme_admin_init_hctx, - .exit_hctx = nvme_admin_exit_hctx, .init_request = nvme_init_request, .timeout = nvme_timeout, }; @@ -2060,7 +2136,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) .priv = dev, }; unsigned int irq_queues, this_p_queues; - unsigned int nr_cpus = num_possible_cpus(); /* * Poll queues don't need interrupts, but we need at least one IO @@ -2071,10 +2146,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) this_p_queues = nr_io_queues - 1; irq_queues = 1; } else { - if (nr_cpus < nr_io_queues - this_p_queues) - irq_queues = nr_cpus + 1; - else - irq_queues = nr_io_queues - this_p_queues + 1; + irq_queues = nr_io_queues - this_p_queues + 1; } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; @@ -2252,11 +2324,6 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) if (timeout == 0) return false; - /* handle any remaining CQEs */ - if (opcode == nvme_admin_delete_cq && - !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags)) - nvme_poll_irqdisable(nvmeq, -1); - sent--; if (nr_queues) goto retry; @@ -2445,6 +2512,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_suspend_io_queues(dev); nvme_suspend_queue(&dev->queues[0]); nvme_pci_disable(dev); + nvme_reap_pending_cqes(dev); blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); @@ -2751,6 +2819,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) return NVME_QUIRK_NO_APST; + } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || + pdev->device == 0xa808 || pdev->device == 0xa809)) || + (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { + /* + * Forcing to use host managed nvme power settings for + * lowest idle power with quick resume latency on + * Samsung and Toshiba SSDs based on suspend behavior + * on Coffee Lake board for LENOVO C640 + */ + if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && + dmi_match(DMI_BOARD_NAME, "LNVNB161216")) + return NVME_QUIRK_SIMPLE_SUSPEND; } return 0; @@ -2882,6 +2962,9 @@ static void nvme_remove(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); + /* output the hotplug nvme drive letter and BDF */ + dev_info(dev->ctrl.device, "remove pci function %s\n", dev_name(&pdev->dev)); + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); pci_set_drvdata(pdev, NULL); @@ -3112,7 +3195,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), + .driver_data = NVME_QUIRK_SINGLE_VECTOR }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), .driver_data = NVME_QUIRK_SINGLE_VECTOR | diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index cb4c3000a57e..73e8475ddc8a 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -850,9 +850,11 @@ out_free_tagset: if (new) blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); out_free_async_qe: - nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, - sizeof(struct nvme_command), DMA_TO_DEVICE); - ctrl->async_event_sqe.data = NULL; + if (ctrl->async_event_sqe.data) { + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; @@ -1088,7 +1090,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) return; - queue_work(nvme_wq, &ctrl->err_work); + queue_work(nvme_reset_wq, &ctrl->err_work); } static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7544be84ab35..244984420b41 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) return; - queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work); + queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); } static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, @@ -1054,7 +1054,12 @@ static void nvme_tcp_io_work(struct work_struct *w) } else if (unlikely(result < 0)) { dev_err(queue->ctrl->ctrl.device, "failed to send request %d\n", result); - if (result != -EPIPE) + + /* + * Fail the request unless peer closed the connection, + * in which case error recovery flow will complete all. + */ + if ((result != -EPIPE) && (result != -ECONNRESET)) nvme_tcp_fail_request(queue->request); nvme_tcp_done_send_req(queue); return; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 3a67e244e568..57a4062cbb59 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -555,7 +555,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns) } else { struct nvmet_ns *old; - list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { + list_for_each_entry_rcu(old, &subsys->namespaces, dev_link, + lockdep_is_held(&subsys->lock)) { BUG_ON(ns->nsid == old->nsid); if (ns->nsid < old->nsid) break; @@ -1174,7 +1175,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, ctrl->p2p_client = get_device(req->p2p_client); - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) + list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link, + lockdep_is_held(&ctrl->subsys->lock)) nvmet_p2pmem_ns_add_p2p(ctrl, ns); } diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index d16b55ffe79f..4e9004fe5c6f 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -105,6 +105,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) u16 qid = le16_to_cpu(c->qid); u16 sqsize = le16_to_cpu(c->sqsize); struct nvmet_ctrl *old; + u16 ret; old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { @@ -115,7 +116,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) if (!sqsize) { pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); - return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + goto err; } /* note: convert queue size from 0's-based value to 1's-based value */ @@ -128,16 +130,19 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) } if (ctrl->ops->install_queue) { - u16 ret = ctrl->ops->install_queue(req->sq); - + ret = ctrl->ops->install_queue(req->sq); if (ret) { pr_err("failed to install queue %d cntlid %d ret %x\n", - qid, ret, ctrl->cntlid); - return ret; + qid, ctrl->cntlid, ret); + goto err; } } return 0; + +err: + req->sq->ctrl = NULL; + return ret; } static void nvmet_execute_admin_connect(struct nvmet_req *req) diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index b50b53db3746..1c50af6219f3 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { + .module = THIS_MODULE, .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index d535080b781f..2fe34fd4c3f3 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) return 1; } -static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) +static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; int ret; @@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) while (cmd->cur_sg) { struct page *page = sg_page(cmd->cur_sg); u32 left = cmd->cur_sg->length - cmd->offset; + int flags = MSG_DONTWAIT; + + if ((!last_in_batch && cmd->queue->send_list_len) || + cmd->wbytes_done + left < cmd->req.transfer_len || + queue->data_digest || !queue->nvme_sq.sqhd_disabled) + flags |= MSG_MORE; ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, - left, MSG_DONTWAIT | MSG_MORE); + left, flags); if (ret <= 0) return ret; @@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, } if (cmd->state == NVMET_TCP_SEND_DATA) { - ret = nvmet_try_send_data(cmd); + ret = nvmet_try_send_data(cmd, last_in_batch); if (ret <= 0) goto done_send; } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 057d1ff87d5d..960542dea5ad 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -110,7 +110,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell) list_del(&cell->node); mutex_unlock(&nvmem_mutex); of_node_put(cell->np); - kfree(cell->name); + kfree_const(cell->name); kfree(cell); } @@ -137,7 +137,9 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, cell->nvmem = nvmem; cell->offset = info->offset; cell->bytes = info->bytes; - cell->name = info->name; + cell->name = kstrdup_const(info->name, GFP_KERNEL); + if (!cell->name) + return -ENOMEM; cell->bit_offset = info->bit_offset; cell->nbits = info->nbits; @@ -327,7 +329,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) dev_err(dev, "cell %s unaligned to nvmem stride %d\n", cell->name, nvmem->stride); /* Cells already added will be freed later. */ - kfree(cell->name); + kfree_const(cell->name); kfree(cell); return -EINVAL; } diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index dff2f3c357f5..fc40555ca4cd 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -521,6 +521,10 @@ static int imx_ocotp_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); + clk_prepare_enable(priv->clk); + imx_ocotp_clr_err_if_set(priv->base); + clk_disable_unprepare(priv->clk); + priv->params = of_device_get_match_data(&pdev->dev); imx_ocotp_nvmem_config.size = 4 * priv->params->nregs; imx_ocotp_nvmem_config.dev = dev; diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c index 9e0c429cd08a..8759c4470012 100644 --- a/drivers/nvmem/nvmem-sysfs.c +++ b/drivers/nvmem/nvmem-sysfs.c @@ -56,6 +56,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, count = round_down(count, nvmem->word_size); + if (!nvmem->reg_read) + return -EPERM; + rc = nvmem->reg_read(nvmem->priv, pos, buf, count); if (rc) @@ -90,6 +93,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, count = round_down(count, nvmem->word_size); + if (!nvmem->reg_write) + return -EPERM; + rc = nvmem->reg_write(nvmem->priv, pos, buf, count); if (rc) diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 37c2ccbefecd..d91618641be6 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -103,4 +103,8 @@ config OF_OVERLAY config OF_NUMA bool +config OF_DMA_DEFAULT_COHERENT + # arches should select this if DMA is coherent by default for OF devices + bool + endif # OF diff --git a/drivers/of/address.c b/drivers/of/address.c index 978427a9d5e6..8f74c4626e0e 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -998,12 +998,16 @@ EXPORT_SYMBOL_GPL(of_dma_get_range); * @np: device node * * It returns true if "dma-coherent" property was found - * for this device in DT. + * for this device in the DT, or if DMA is coherent by + * default for OF devices on the current platform. */ bool of_dma_is_coherent(struct device_node *np) { struct device_node *node = of_node_get(np); + if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT)) + return true; + while (node) { if (of_property_read_bool(node, "dma-coherent")) { of_node_put(node); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index bd6129db6417..c34a6df712ad 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -268,6 +268,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) rc = of_mdiobus_register_phy(mdio, child, addr); if (rc && rc != -ENODEV) goto unregister; + break; } } } diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index c423e94baf0f..9617b7df7c4d 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -305,7 +305,6 @@ static int add_changeset_property(struct overlay_changeset *ovcs, { struct property *new_prop = NULL, *prop; int ret = 0; - bool check_for_non_overlay_node = false; if (target->in_livetree) if (!of_prop_cmp(overlay_prop->name, "name") || @@ -318,6 +317,25 @@ static int add_changeset_property(struct overlay_changeset *ovcs, else prop = NULL; + if (prop) { + if (!of_prop_cmp(prop->name, "#address-cells")) { + if (!of_prop_val_eq(prop, overlay_prop)) { + pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + return ret; + + } else if (!of_prop_cmp(prop->name, "#size-cells")) { + if (!of_prop_val_eq(prop, overlay_prop)) { + pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + return ret; + } + } + if (is_symbols_prop) { if (prop) return -EINVAL; @@ -330,33 +348,18 @@ static int add_changeset_property(struct overlay_changeset *ovcs, return -ENOMEM; if (!prop) { - check_for_non_overlay_node = true; if (!target->in_livetree) { new_prop->next = target->np->deadprops; target->np->deadprops = new_prop; } ret = of_changeset_add_property(&ovcs->cset, target->np, new_prop); - } else if (!of_prop_cmp(prop->name, "#address-cells")) { - if (!of_prop_val_eq(prop, new_prop)) { - pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", - target->np); - ret = -EINVAL; - } - } else if (!of_prop_cmp(prop->name, "#size-cells")) { - if (!of_prop_val_eq(prop, new_prop)) { - pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", - target->np); - ret = -EINVAL; - } } else { - check_for_non_overlay_node = true; ret = of_changeset_update_property(&ovcs->cset, target->np, new_prop); } - if (check_for_non_overlay_node && - !of_node_check_flag(target->np, OF_OVERLAY)) + if (!of_node_check_flag(target->np, OF_OVERLAY)) pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n", target->np, new_prop->name); diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 92e895d86458..ca7823eef2b4 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -1146,8 +1146,10 @@ static void attach_node_and_children(struct device_node *np) full_name = kasprintf(GFP_KERNEL, "%pOF", np); if (!strcmp(full_name, "/__local_fixups__") || - !strcmp(full_name, "/__fixups__")) + !strcmp(full_name, "/__fixups__")) { + kfree(full_name); return; + } dup = of_find_node_by_path(full_name); kfree(full_name); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1cbb58240b80..1e5fcdee043c 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -678,15 +678,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); of_node_put(np); - return ret; + goto put_list_kref; } else if (opp) { count++; } } /* There should be one of more OPP defined */ - if (WARN_ON(!count)) - return -ENOENT; + if (WARN_ON(!count)) { + ret = -ENOENT; + goto put_list_kref; + } list_for_each_entry(opp, &opp_table->opp_list, node) pstate_count += !!opp->pstate; @@ -695,7 +697,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) if (pstate_count && pstate_count != count) { dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); - return -ENOENT; + ret = -ENOENT; + goto put_list_kref; } if (pstate_count) @@ -704,6 +707,11 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) opp_table->parsed_static_opps = true; return 0; + +put_list_kref: + _put_opp_list_kref(opp_table); + + return ret; } /* Initializes OPP tables based on old-deprecated bindings */ @@ -738,6 +746,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", __func__, freq, ret); + _put_opp_list_kref(opp_table); return ret; } nr -= 2; diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 7b4ee33c1935..15c81cffd2de 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv) return 0; } +/* + * Iterates through all the devices connected to the bus and return 1 + * if the device is a parallel port. + */ + +static int port_detect(struct device *dev, void *dev_drv) +{ + if (is_parport(dev)) + return 1; + return 0; +} + /** * parport_register_driver - register a parallel port device driver * @drv: structure describing the driver @@ -282,6 +294,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner, if (ret) return ret; + /* + * check if bus has any parallel port registered, if + * none is found then load the lowlevel driver. + */ + ret = bus_for_each_dev(&parport_bus_type, NULL, NULL, + port_detect); + if (!ret) + get_lowlevel_driver(); + mutex_lock(®istration_lock); if (drv->match_port) bus_for_each_dev(&parport_bus_type, NULL, drv, diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index af677254a072..c8c702c494a2 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -422,7 +422,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) lower_32_bits(start) | OB_ENABLEN); ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i), upper_32_bits(start)); - start += OB_WIN_SIZE; + start += OB_WIN_SIZE * SZ_1M; } val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); @@ -510,7 +510,7 @@ static void ks_pcie_stop_link(struct dw_pcie *pci) /* Disable Link training */ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); val &= ~LTSSM_EN_VAL; - ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); + ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); } static int ks_pcie_start_link(struct dw_pcie *pci) @@ -1354,7 +1354,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) ret = of_property_read_u32(np, "num-viewport", &num_viewport); if (ret < 0) { dev_err(dev, "unable to read *num-viewport* property\n"); - return ret; + goto err_get_sync; } /* diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index e35e9eaa50ee..b927a92e3463 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -250,15 +250,15 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp) if (IS_ERR(res->port_clk)) return PTR_ERR(res->port_clk); - res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0); + res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0); if (IS_ERR(res->mipi_gate)) return PTR_ERR(res->mipi_gate); - res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0); + res->general_clk = meson_pcie_probe_clock(dev, "general", 0); if (IS_ERR(res->general_clk)) return PTR_ERR(res->general_clk); - res->clk = meson_pcie_probe_clock(dev, "pcie", 0); + res->clk = meson_pcie_probe_clock(dev, "pclk", 0); if (IS_ERR(res->clk)) return PTR_ERR(res->clk); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 0f36a926059a..8615f1548882 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -78,7 +78,8 @@ static struct msi_domain_info dw_pcie_msi_domain_info = { irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) { int i, pos, irq; - u32 val, num_ctrls; + unsigned long val; + u32 status, num_ctrls; irqreturn_t ret = IRQ_NONE; num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; @@ -86,14 +87,14 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) for (i = 0; i < num_ctrls; i++) { dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + (i * MSI_REG_CTRL_BLOCK_SIZE), - 4, &val); - if (!val) + 4, &status); + if (!status) continue; ret = IRQ_HANDLED; + val = status; pos = 0; - while ((pos = find_next_bit((unsigned long *) &val, - MAX_MSI_IRQS_PER_CTRL, + while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos)) != MAX_MSI_IRQS_PER_CTRL) { irq = irq_find_mapping(pp->irq_domain, (i * MAX_MSI_IRQS_PER_CTRL) + diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index fc0fe4d4de49..97245e076548 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -180,6 +180,8 @@ #define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_USLEEP_MIN 90000 #define LINK_WAIT_USLEEP_MAX 100000 +#define RETRAIN_WAIT_MAX_RETRIES 10 +#define RETRAIN_WAIT_USLEEP_US 2000 #define MSI_IRQ_NUM 32 @@ -239,6 +241,17 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie) return -ETIMEDOUT; } +static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie) +{ + size_t retries; + + for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) { + if (!advk_pcie_link_up(pcie)) + break; + udelay(RETRAIN_WAIT_USLEEP_US); + } +} + static void advk_pcie_setup_hw(struct advk_pcie *pcie) { u32 reg; @@ -415,7 +428,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, case PCI_EXP_RTCTL: { u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); - *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0; + *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; return PCI_BRIDGE_EMUL_HANDLED; } @@ -426,11 +439,20 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, return PCI_BRIDGE_EMUL_HANDLED; } + case PCI_EXP_LNKCTL: { + /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */ + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) & + ~(PCI_EXP_LNKSTA_LT << 16); + if (!advk_pcie_link_up(pcie)) + val |= (PCI_EXP_LNKSTA_LT << 16); + *value = val; + return PCI_BRIDGE_EMUL_HANDLED; + } + case PCI_CAP_LIST_ID: case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: case PCI_EXP_LNKCAP: - case PCI_EXP_LNKCTL: *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); return PCI_BRIDGE_EMUL_HANDLED; default: @@ -447,15 +469,25 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, switch (reg) { case PCI_EXP_DEVCTL: - case PCI_EXP_LNKCTL: advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); break; - case PCI_EXP_RTCTL: - new = (new & PCI_EXP_RTCTL_PMEIE) << 3; - advk_writel(pcie, new, PCIE_ISR0_MASK_REG); + case PCI_EXP_LNKCTL: + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); + if (new & PCI_EXP_LNKCTL_RL) + advk_pcie_wait_for_retrain(pcie); break; + case PCI_EXP_RTCTL: { + /* Only mask/unmask PME interrupt */ + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) & + ~PCIE_MSG_PM_PME_MASK; + if ((new & PCI_EXP_RTCTL_PMEIE) == 0) + val |= PCIE_MSG_PM_PME_MASK; + advk_writel(pcie, val, PCIE_ISR0_MASK_REG); + break; + } + case PCI_EXP_RTSTA: new = (new & PCI_EXP_RTSTA_PME) >> 9; advk_writel(pcie, new, PCIE_ISR0_REG); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 673a1725ef38..ac93f5a0398e 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = { .num_ports = 2, .ports = tegra20_pcie_ports, .msi_base_shift = 0, - .afi_pex2_ctrl = 0x128, .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, .pads_refclk_cfg0 = 0xfa5cfa5c, @@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = { .num_ports = 3, .ports = tegra30_pcie_ports, .msi_base_shift = 8, + .afi_pex2_ctrl = 0x128, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0xfa5cfa5c, @@ -2798,7 +2798,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); - if (err) { + if (err < 0) { dev_err(dev, "fail to enable pcie controller: %d\n", err); goto teardown_msi; } diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 2d457bfdaf66..933a4346ae5d 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -1608,6 +1608,30 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_disable_msi_parsing); +static void quirk_paxc_bridge(struct pci_dev *pdev) +{ + /* + * The PCI config space is shared with the PAXC root port and the first + * Ethernet device. So, we need to workaround this by telling the PCI + * code that the bridge is not an Ethernet device. + */ + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + pdev->class = PCI_CLASS_BRIDGE_PCI << 8; + + /* + * MPSS is not being set properly (as it is currently 0). This is + * because that area of the PCI config space is hard coded to zero, and + * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) + * so that the MPS can be set to the real max value. + */ + pdev->pcie_mpss = 2; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge); + MODULE_AUTHOR("Ray Jui "); MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index a45a6447b01d..32f37d08d5bc 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -235,7 +235,7 @@ static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val) return PCIBIOS_SUCCESSFUL; } -static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) +static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) { void *addr; u32 val; @@ -250,7 +250,8 @@ static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) return val; } -static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size) +static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, + size_t size) { void *addr; int ret; @@ -262,19 +263,19 @@ static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size) dev_err(&pcie->pdev->dev, "write CSR address failed\n"); } -static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off) +static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off) { - return csr_read(pcie, off, 0x4); + return mobiveil_csr_read(pcie, off, 0x4); } -static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off) +static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off) { - csr_write(pcie, val, off, 0x4); + mobiveil_csr_write(pcie, val, off, 0x4); } static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) { - return (csr_readl(pcie, LTSSM_STATUS) & + return (mobiveil_csr_readl(pcie, LTSSM_STATUS) & LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0; } @@ -323,7 +324,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT; - csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); + mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); return pcie->config_axi_slave_base + where; } @@ -353,13 +354,14 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) chained_irq_enter(chip, desc); /* read INTx status */ - val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); - mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); + val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); + mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); intr_status = val & mask; /* Handle INTx */ if (intr_status & PAB_INTP_INTX_MASK) { - shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); + shifted_status = mobiveil_csr_readl(pcie, + PAB_INTP_AMBA_MISC_STAT); shifted_status &= PAB_INTP_INTX_MASK; shifted_status >>= PAB_INTX_START; do { @@ -373,12 +375,13 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) bit); /* clear interrupt handled */ - csr_writel(pcie, 1 << (PAB_INTX_START + bit), - PAB_INTP_AMBA_MISC_STAT); + mobiveil_csr_writel(pcie, + 1 << (PAB_INTX_START + bit), + PAB_INTP_AMBA_MISC_STAT); } - shifted_status = csr_readl(pcie, - PAB_INTP_AMBA_MISC_STAT); + shifted_status = mobiveil_csr_readl(pcie, + PAB_INTP_AMBA_MISC_STAT); shifted_status &= PAB_INTP_INTX_MASK; shifted_status >>= PAB_INTX_START; } while (shifted_status != 0); @@ -413,7 +416,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) } /* Clear the interrupt status */ - csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT); + mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT); chained_irq_exit(chip, desc); } @@ -474,24 +477,24 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, return; } - value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num)); + value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num)); value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK); value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT | (lower_32_bits(size64) & WIN_SIZE_MASK); - csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num)); + mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num)); - csr_writel(pcie, upper_32_bits(size64), - PAB_EXT_PEX_AMAP_SIZEN(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(size64), + PAB_EXT_PEX_AMAP_SIZEN(win_num)); - csr_writel(pcie, lower_32_bits(cpu_addr), - PAB_PEX_AMAP_AXI_WIN(win_num)); - csr_writel(pcie, upper_32_bits(cpu_addr), - PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); + mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr), + PAB_PEX_AMAP_AXI_WIN(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); - csr_writel(pcie, lower_32_bits(pci_addr), - PAB_PEX_AMAP_PEX_WIN_L(win_num)); - csr_writel(pcie, upper_32_bits(pci_addr), - PAB_PEX_AMAP_PEX_WIN_H(win_num)); + mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_L(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_H(win_num)); pcie->ib_wins_configured++; } @@ -515,27 +518,29 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit * to 4 KB in PAB_AXI_AMAP_CTRL register */ - value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); + value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK); value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | (lower_32_bits(size64) & WIN_SIZE_MASK); - csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num)); + mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num)); - csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(size64), + PAB_EXT_AXI_AMAP_SIZE(win_num)); /* * program AXI window base with appropriate value in * PAB_AXI_AMAP_AXI_WIN0 register */ - csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK), - PAB_AXI_AMAP_AXI_WIN(win_num)); - csr_writel(pcie, upper_32_bits(cpu_addr), - PAB_EXT_AXI_AMAP_AXI_WIN(win_num)); + mobiveil_csr_writel(pcie, + lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK), + PAB_AXI_AMAP_AXI_WIN(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_AXI_AMAP_AXI_WIN(win_num)); - csr_writel(pcie, lower_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_L(win_num)); - csr_writel(pcie, upper_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_H(win_num)); + mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), + PAB_AXI_AMAP_PEX_WIN_L(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), + PAB_AXI_AMAP_PEX_WIN_H(win_num)); pcie->ob_wins_configured++; } @@ -579,42 +584,42 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) struct resource_entry *win; /* setup bus numbers */ - value = csr_readl(pcie, PCI_PRIMARY_BUS); + value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS); value &= 0xff000000; value |= 0x00ff0100; - csr_writel(pcie, value, PCI_PRIMARY_BUS); + mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS); /* * program Bus Master Enable Bit in Command Register in PAB Config * Space */ - value = csr_readl(pcie, PCI_COMMAND); + value = mobiveil_csr_readl(pcie, PCI_COMMAND); value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; - csr_writel(pcie, value, PCI_COMMAND); + mobiveil_csr_writel(pcie, value, PCI_COMMAND); /* * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL * register */ - pab_ctrl = csr_readl(pcie, PAB_CTRL); + pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL); pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT); - csr_writel(pcie, pab_ctrl, PAB_CTRL); + mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL); - csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), - PAB_INTP_AMBA_MISC_ENB); + mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), + PAB_INTP_AMBA_MISC_ENB); /* * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in * PAB_AXI_PIO_CTRL Register */ - value = csr_readl(pcie, PAB_AXI_PIO_CTRL); + value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL); value |= APIO_EN_MASK; - csr_writel(pcie, value, PAB_AXI_PIO_CTRL); + mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL); /* Enable PCIe PIO master */ - value = csr_readl(pcie, PAB_PEX_PIO_CTRL); + value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL); value |= 1 << PIO_ENABLE_SHIFT; - csr_writel(pcie, value, PAB_PEX_PIO_CTRL); + mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL); /* * we'll program one outbound window for config reads and @@ -647,10 +652,10 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) } /* fixup for PCIe class register */ - value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); + value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); value &= 0xff; value |= (PCI_CLASS_BRIDGE_PCI << 16); - csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); + mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); /* setup MSI hardware registers */ mobiveil_pcie_enable_msi(pcie); @@ -668,9 +673,9 @@ static void mobiveil_mask_intx_irq(struct irq_data *data) pcie = irq_desc_get_chip_data(desc); mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); + shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); shifted_val &= ~mask; - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); + mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); } @@ -684,9 +689,9 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data) pcie = irq_desc_get_chip_data(desc); mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); + shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); shifted_val |= mask; - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); + mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); } diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index f6a669a9af41..1ad0b56f11b4 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -93,8 +93,11 @@ #define LINK_SPEED_2_5GTS (1 << 16) #define LINK_SPEED_5_0GTS (2 << 16) #define MACCTLR 0x011058 +#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */ #define SPEED_CHANGE BIT(24) #define SCRAMBLE_DISABLE BIT(27) +#define LTSMDIS BIT(31) +#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK) #define PMSR 0x01105c #define MACS2R 0x011078 #define MACCGSPSETR 0x011084 @@ -615,6 +618,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) if (IS_ENABLED(CONFIG_PCI_MSI)) rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); + rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); + /* Finish initialization - establish a PCI Express link */ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); @@ -1237,6 +1242,7 @@ static int rcar_pcie_resume_noirq(struct device *dev) return 0; /* Re-establish the PCIe link */ + rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); return rcar_pcie_wait_for_dl(pcie); } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index e4c46637f32f..b3869951c0eb 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -449,8 +449,15 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge) /* Scan non-hotplug bridges that need to be reconfigured */ for_each_pci_bridge(dev, bus) { - if (!hotplug_is_native(dev)) - max = pci_scan_bridge(bus, dev, max, 1); + if (hotplug_is_native(dev)) + continue; + + max = pci_scan_bridge(bus, dev, max, 1); + if (dev->subordinate) { + pcibios_resource_survey_bus(dev->subordinate); + pci_bus_size_bridges(dev->subordinate); + pci_bus_assign_resources(dev->subordinate); + } } } @@ -480,7 +487,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (PCI_SLOT(dev->devfn) == slot->device) acpiphp_native_scan_bridge(dev); } - pci_assign_unassigned_bridge_resources(bus->self); } else { LIST_HEAD(add_list); int max, pass; diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 654c972b8ea0..882ce82c4699 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -72,6 +72,7 @@ extern int pciehp_poll_time; * @reset_lock: prevents access to the Data Link Layer Link Active bit in the * Link Status register and to the Presence Detect State bit in the Slot * Status register during a slot reset which may cause them to flap + * @ist_running: flag to keep user request waiting while IRQ thread is running * @request_result: result of last user request submitted to the IRQ thread * @requester: wait queue to wake up on completion of user request, * used for synchronous slot enable/disable request via sysfs @@ -101,6 +102,7 @@ struct controller { struct hotplug_slot hotplug_slot; /* hotplug core interface */ struct rw_semaphore reset_lock; + unsigned int ist_running; int request_result; wait_queue_head_t requester; }; diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index b3122c151b80..56daad828c9e 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -253,7 +253,7 @@ static bool pme_is_native(struct pcie_device *dev) return pcie_ports_native || host->native_pme; } -static int pciehp_suspend(struct pcie_device *dev) +static void pciehp_disable_interrupt(struct pcie_device *dev) { /* * Disable hotplug interrupt so that it does not trigger @@ -261,7 +261,19 @@ static int pciehp_suspend(struct pcie_device *dev) */ if (pme_is_native(dev)) pcie_disable_interrupt(get_service_data(dev)); +} +#ifdef CONFIG_PM_SLEEP +static int pciehp_suspend(struct pcie_device *dev) +{ + /* + * If the port is already runtime suspended we can keep it that + * way. + */ + if (dev_pm_smart_suspend_and_suspended(&dev->port->dev)) + return 0; + + pciehp_disable_interrupt(dev); return 0; } @@ -279,6 +291,7 @@ static int pciehp_resume_noirq(struct pcie_device *dev) return 0; } +#endif static int pciehp_resume(struct pcie_device *dev) { @@ -292,6 +305,12 @@ static int pciehp_resume(struct pcie_device *dev) return 0; } +static int pciehp_runtime_suspend(struct pcie_device *dev) +{ + pciehp_disable_interrupt(dev); + return 0; +} + static int pciehp_runtime_resume(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); @@ -318,10 +337,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = { .remove = pciehp_remove, #ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .suspend = pciehp_suspend, .resume_noirq = pciehp_resume_noirq, .resume = pciehp_resume, - .runtime_suspend = pciehp_suspend, +#endif + .runtime_suspend = pciehp_runtime_suspend, .runtime_resume = pciehp_runtime_resume, #endif /* PM */ }; diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 21af7b16d7a4..dd8e4a5fb282 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -375,7 +375,8 @@ int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot) ctrl->request_result = -ENODEV; pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC); wait_event(ctrl->requester, - !atomic_read(&ctrl->pending_events)); + !atomic_read(&ctrl->pending_events) && + !ctrl->ist_running); return ctrl->request_result; case POWERON_STATE: ctrl_info(ctrl, "Slot(%s): Already in powering on state\n", @@ -408,7 +409,8 @@ int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot) mutex_unlock(&ctrl->state_lock); pciehp_request(ctrl, DISABLE_SLOT); wait_event(ctrl->requester, - !atomic_read(&ctrl->pending_events)); + !atomic_read(&ctrl->pending_events) && + !ctrl->ist_running); return ctrl->request_result; case POWEROFF_STATE: ctrl_info(ctrl, "Slot(%s): Already in powering off state\n", diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 1a522c1c4177..86d97f3112f0 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -583,6 +583,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) irqreturn_t ret; u32 events; + ctrl->ist_running = true; pci_config_pm_runtime_get(pdev); /* rerun pciehp_isr() if the port was inaccessible on interrupt */ @@ -629,6 +630,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) up_read(&ctrl->reset_lock); pci_config_pm_runtime_put(pdev); + ctrl->ist_running = false; wake_up(&ctrl->requester); return IRQ_HANDLED; } diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 18627bb21e9e..32eab1776cfe 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot) return speed; } -static int get_children_props(struct device_node *dn, const int **drc_indexes, - const int **drc_names, const int **drc_types, - const int **drc_power_domains) +static int get_children_props(struct device_node *dn, const __be32 **drc_indexes, + const __be32 **drc_names, const __be32 **drc_types, + const __be32 **drc_power_domains) { - const int *indexes, *names, *types, *domains; + const __be32 *indexes, *names, *types, *domains; indexes = of_get_property(dn, "ibm,drc-indexes", NULL); names = of_get_property(dn, "ibm,drc-names", NULL); @@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name, char *drc_type, unsigned int my_index) { char *name_tmp, *type_tmp; - const int *indexes, *names; - const int *types, *domains; + const __be32 *indexes, *names; + const __be32 *types, *domains; int i, rc; rc = get_children_props(dn->parent, &indexes, &names, &types, &domains); @@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name, /* Iterate through parent properties, looking for my-drc-index */ for (i = 0; i < be32_to_cpu(indexes[0]); i++) { - if ((unsigned int) indexes[i + 1] == my_index) + if (be32_to_cpu(indexes[i + 1]) == my_index) break; name_tmp += (strlen(name_tmp) + 1); @@ -239,6 +239,8 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, value = of_prop_next_u32(info, NULL, &entries); if (!value) return -EINVAL; + else + value++; for (j = 0; j < entries; j++) { of_read_drc_info_cell(&info, &value, &drc); @@ -246,9 +248,10 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, /* Should now know end of current entry */ /* Found it */ - if (my_index <= drc.last_drc_index) { + if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) { + int index = my_index - drc.drc_index_start; sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, - my_index); + drc.drc_name_suffix_start + index); break; } } @@ -265,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, int rpaphp_check_drc_props(struct device_node *dn, char *drc_name, char *drc_type) { - const unsigned int *my_index; + const __be32 *my_index; my_index = of_get_property(dn, "ibm,my-drc-index", NULL); if (!my_index) { @@ -273,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name, return -EINVAL; } - if (firmware_has_feature(FW_FEATURE_DRC_INFO)) + if (of_find_property(dn->parent, "ibm,drc-info", NULL)) return rpaphp_check_drc_props_v2(dn, drc_name, drc_type, - *my_index); + be32_to_cpu(*my_index)); else return rpaphp_check_drc_props_v1(dn, drc_name, drc_type, - *my_index); + be32_to_cpu(*my_index)); } EXPORT_SYMBOL_GPL(rpaphp_check_drc_props); @@ -309,10 +312,11 @@ static int is_php_type(char *drc_type) * for built-in pci slots (even when the built-in slots are * dlparable.) */ -static int is_php_dn(struct device_node *dn, const int **indexes, - const int **names, const int **types, const int **power_domains) +static int is_php_dn(struct device_node *dn, const __be32 **indexes, + const __be32 **names, const __be32 **types, + const __be32 **power_domains) { - const int *drc_types; + const __be32 *drc_types; int rc; rc = get_children_props(dn, indexes, names, &drc_types, power_domains); @@ -347,7 +351,7 @@ int rpaphp_add_slot(struct device_node *dn) struct slot *slot; int retval = 0; int i; - const int *indexes, *names, *types, *power_domains; + const __be32 *indexes, *names, *types, *power_domains; char *name, *type; if (!dn->name || strcmp(dn->name, "pci")) diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index b3f972e8cfed..32d94ed3b855 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -187,10 +187,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) - goto failed2; + goto failed1; rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn"); if (rc) - goto failed3; + goto failed2; kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); @@ -198,11 +198,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) return 0; -failed3: - sysfs_remove_link(&dev->dev.kobj, buf); failed2: - pci_stop_and_remove_bus_device(virtfn); + sysfs_remove_link(&dev->dev.kobj, buf); failed1: + pci_stop_and_remove_bus_device(virtfn); pci_dev_put(dev); failed0: virtfn_remove_bus(dev->bus, bus); @@ -294,6 +293,8 @@ static ssize_t sriov_numvfs_store(struct device *dev, if (num_vfs == 0) { /* disable VFs */ + dev_info(&pdev->dev, "disable vfs. old vfs %d, new vfs %d\n", + pdev->sriov->num_VFs, num_vfs); ret = pdev->driver->sriov_configure(pdev, 0); goto exit; } @@ -306,6 +307,9 @@ static ssize_t sriov_numvfs_store(struct device *dev, goto exit; } + dev_info(&pdev->dev, "enable vfs. old vfs %d, new vfs %d\n", + pdev->sriov->num_VFs, num_vfs); + ret = pdev->driver->sriov_configure(pdev, num_vfs); if (ret < 0) goto exit; @@ -567,6 +571,8 @@ static void sriov_disable(struct pci_dev *dev) if (!iov->num_VFs) return; + pr_info("disable %d sriov vfs\n", iov->num_VFs); + sriov_del_vfs(dev); iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 0884bedcfc7a..771041784e64 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -213,12 +213,13 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) if (pci_msi_ignore_mask) return 0; + desc_addr = pci_msix_desc_addr(desc); if (!desc_addr) return 0; mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; - if (flag) + if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT) mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a8124e47bf6e..729eca6e01a2 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -26,6 +26,8 @@ struct pci_dynid { struct pci_device_id id; }; +static int network_strict_sort; + /** * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices * @drv: target pci driver @@ -364,6 +366,32 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, return error; } +#define STORAGE_PROBE_DELAY (5*HZ) +static void +storage_probe_delay(struct device *dev) { + static long lastprobe = 0; + long delta = lastprobe + STORAGE_PROBE_DELAY - jiffies; + if(lastprobe && delta > 0 && delta < STORAGE_PROBE_DELAY) { + dev_info(dev, "probe delay %ld ms\n", delta * 1000 / HZ); + while(delta > 0) + delta = schedule_timeout_interruptible(delta); + } + lastprobe = jiffies; +} + +#define NETWORK_PROBE_DELAY (5*HZ) +static void +network_probe_delay(struct device *dev) { + static long lastprobe = 0; + long delta = lastprobe + NETWORK_PROBE_DELAY - jiffies; + if(lastprobe && delta > 0 && delta < NETWORK_PROBE_DELAY) { + dev_info(dev, "probe delay %ld ms\n", delta * 1000 / HZ); + while(delta > 0) + delta = schedule_timeout_interruptible(delta); + } + lastprobe = jiffies; +} + /** * __pci_device_probe - check if a driver wants to claim a specific PCI device * @drv: driver to call to check if it wants the PCI device @@ -381,8 +409,13 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) error = -ENODEV; id = pci_match_device(drv, pci_dev); - if (id) + if (id) { + if((pci_dev->class>>16) == PCI_BASE_CLASS_STORAGE && pci_dev->vendor != 0x1af4) + storage_probe_delay(&pci_dev->dev); + if((pci_dev->class>>16) == PCI_BASE_CLASS_NETWORK && network_strict_sort) + network_probe_delay(&pci_dev->dev); error = pci_call_probe(drv, pci_dev, id); + } } return error; } @@ -941,12 +974,11 @@ static int pci_pm_resume_noirq(struct device *dev) pci_pm_default_resume_early(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); + pcie_pme_root_status_cleanup(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); - pcie_pme_root_status_cleanup(pci_dev); - if (drv && drv->pm && drv->pm->resume_noirq) error = drv->pm->resume_noirq(dev); @@ -1076,17 +1108,22 @@ static int pci_pm_thaw_noirq(struct device *dev) return error; } - if (pci_has_legacy_pm_support(pci_dev)) - return pci_legacy_resume_early(dev); - /* - * pci_restore_state() requires the device to be in D0 (because of MSI - * restoration among other things), so force it into D0 in case the - * driver's "freeze" callbacks put it into a low-power state directly. + * Both the legacy ->resume_early() and the new pm->thaw_noirq() + * callbacks assume the device has been returned to D0 and its + * config state has been restored. + * + * In addition, pci_restore_state() restores MSI-X state in MMIO + * space, which requires the device to be in D0, so return it to D0 + * in case the driver's "freeze" callbacks put it into a low-power + * state. */ pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume_early(dev); + if (drv && drv->pm && drv->pm->thaw_noirq) error = drv->pm->thaw_noirq(dev); @@ -1701,3 +1738,10 @@ static int __init pci_driver_init(void) return 0; } postcore_initcall(pci_driver_init); + +static int parse_network_strict_sort(char *arg) +{ + network_strict_sort = 1; + return 0; +} +early_param("network-strict-sort", parse_network_strict_sort); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 793412954529..e401f040f157 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -464,7 +464,8 @@ static ssize_t dev_rescan_store(struct device *dev, } return count; } -static DEVICE_ATTR_WO(dev_rescan); +static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL, + dev_rescan_store); static ssize_t remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -501,7 +502,8 @@ static ssize_t bus_rescan_store(struct device *dev, } return count; } -static DEVICE_ATTR_WO(bus_rescan); +static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, + bus_rescan_store); #if defined(CONFIG_PM) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a97e2571a527..981ae16f935b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5854,10 +5854,29 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, return 0; } +#ifdef CONFIG_ACPI +bool pci_pr3_present(struct pci_dev *pdev) +{ + struct acpi_device *adev; + + if (acpi_disabled) + return false; + + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) + return false; + + return adev->power.flags.power_resources && + acpi_has_method(adev->handle, "_PR3"); +} +EXPORT_SYMBOL_GPL(pci_pr3_present); +#endif + /** * pci_add_dma_alias - Add a DMA devfn alias for a device * @dev: the PCI device for which alias is added - * @devfn: alias slot and function + * @devfn_from: alias slot and function + * @nr_devfns: number of subsequent devfns to alias * * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask * which is used to program permissible bus-devfn source addresses for DMA @@ -5873,18 +5892,29 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, * cannot be left as a userspace activity). DMA aliases should therefore * be configured via quirks, such as the PCI fixup header quirk. */ -void pci_add_dma_alias(struct pci_dev *dev, u8 devfn) +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns) { + int devfn_to; + + nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from); + devfn_to = devfn_from + nr_devfns - 1; + if (!dev->dma_alias_mask) - dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL); + dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL); if (!dev->dma_alias_mask) { pci_warn(dev, "Unable to allocate DMA alias mask\n"); return; } - set_bit(devfn, dev->dma_alias_mask); - pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", - PCI_SLOT(devfn), PCI_FUNC(devfn)); + bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns); + + if (nr_devfns == 1) + pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from)); + else if (nr_devfns > 1) + pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n", + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from), + PCI_SLOT(devfn_to), PCI_FUNC(devfn_to)); } bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 3f6947ee3324..273d60cb0762 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -4,6 +4,9 @@ #include +/* Number of possible devfns: 0.0 to 1f.7 inclusive */ +#define MAX_NR_DEVFNS 256 + #define PCI_FIND_CAP_TTL 48 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index b45bc47d04fe..271aecfbc3bf 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -1387,6 +1387,7 @@ static int aer_probe(struct pcie_device *dev) return -ENOMEM; rpc->rpd = port; + INIT_KFIFO(rpc->aer_fifo); set_service_data(dev, rpc); status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr, diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 98cfa30f3fae..9361f3aa26ab 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -21,7 +21,7 @@ static void pci_ptm_info(struct pci_dev *dev) snprintf(clock_desc, sizeof(clock_desc), ">254ns"); break; default: - snprintf(clock_desc, sizeof(clock_desc), "%udns", + snprintf(clock_desc, sizeof(clock_desc), "%uns", dev->ptm_granularity); break; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 3d5271a7a849..5c68a0bffd20 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -485,6 +485,10 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) if (mem_base_hi <= mem_limit_hi) { base64 |= (u64) mem_base_hi << 32; limit64 |= (u64) mem_limit_hi << 32; + } else { + base64 |= (u64) mem_base_hi << 32; + limit64 |= (u64) mem_limit_hi << 32; + pr_info("jws[base>limit]: base:%llu, limit:%llu\n", base64, limit64); } } @@ -572,6 +576,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev) bridge->release_fn(bridge); pci_free_resource_list(&bridge->windows); + pci_free_resource_list(&bridge->dma_ranges); } static void pci_release_host_bridge_dev(struct device *dev) @@ -1089,14 +1094,15 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, * @sec: updated with secondary bus number from EA * @sub: updated with subordinate bus number from EA * - * If @dev is a bridge with EA capability, update @sec and @sub with - * fixed bus numbers from the capability and return true. Otherwise, - * return false. + * If @dev is a bridge with EA capability that specifies valid secondary + * and subordinate bus numbers, return true with the bus numbers in @sec + * and @sub. Otherwise return false. */ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub) { int ea, offset; u32 dw; + u8 ea_sec, ea_sub; if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) return false; @@ -1108,8 +1114,13 @@ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub) offset = ea + PCI_EA_FIRST_ENT; pci_read_config_dword(dev, offset, &dw); - *sec = dw & PCI_EA_SEC_BUS_MASK; - *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT; + ea_sec = dw & PCI_EA_SEC_BUS_MASK; + ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT; + if (ea_sec == 0 || ea_sub < ea_sec) + return false; + + *sec = ea_sec; + *sub = ea_sub; return true; } @@ -2260,6 +2271,21 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) return NULL; +#ifdef CONFIG_FIX_OPTION_ROM +{ + u16 class; + u32 bar_response; + + pci_bus_read_config_word(bus, devfn, PCI_CLASS_DEVICE, &class); + class = class >> 8; + if (class == PCI_BASE_CLASS_STORAGE) { + pci_bus_write_config_dword(bus, devfn, 0x30, 0xffffffff); + pci_bus_read_config_dword(bus, devfn, 0x30, &bar_response); + pci_bus_write_config_dword(bus, devfn, 0x30, 0x0); + } +} +#endif + dev = pci_alloc_dev(bus); if (!dev) return NULL; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 320255e5e8f8..2fdceaab7307 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1871,19 +1871,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); +static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay) +{ + if (dev->d3_delay >= delay) + return; + + dev->d3_delay = delay; + pci_info(dev, "extending delay after power-on from D3hot to %d msec\n", + dev->d3_delay); +} + static void quirk_radeon_pm(struct pci_dev *dev) { if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE && - dev->subsystem_device == 0x00e2) { - if (dev->d3_delay < 20) { - dev->d3_delay = 20; - pci_info(dev, "extending delay after power-on from D3 to %d msec\n", - dev->d3_delay); - } - } + dev->subsystem_device == 0x00e2) + quirk_d3hot_delay(dev, 20); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm); +/* + * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle. + * https://bugzilla.kernel.org/show_bug.cgi?id=205587 + * + * The kernel attempts to transition these devices to D3cold, but that seems + * to be ineffective on the platforms in question; the PCI device appears to + * remain on in D3hot state. The D3hot-to-D0 transition then requires an + * extended delay in order to succeed. + */ +static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev) +{ + quirk_d3hot_delay(dev, 20); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot); + #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) { @@ -2381,32 +2402,6 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5719, quirk_brcm_5719_limit_mrrs); -#ifdef CONFIG_PCIE_IPROC_PLATFORM -static void quirk_paxc_bridge(struct pci_dev *pdev) -{ - /* - * The PCI config space is shared with the PAXC root port and the first - * Ethernet device. So, we need to workaround this by telling the PCI - * code that the bridge is not an Ethernet device. - */ - if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) - pdev->class = PCI_CLASS_BRIDGE_PCI << 8; - - /* - * MPSS is not being set properly (as it is currently 0). This is - * because that area of the PCI config space is hard coded to zero, and - * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) - * so that the MPS can be set to the real max value. - */ - pdev->pcie_mpss = 2; -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge); -#endif - /* * Originally in EDAC sources for i82875P: Intel tells BIOS developers to * hide device 6 which configures the overflow device access containing the @@ -3932,7 +3927,7 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) static void quirk_dma_func0_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 0) - pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1); } /* @@ -3946,7 +3941,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); static void quirk_dma_func1_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 1) - pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); + pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1); } /* @@ -4031,7 +4026,7 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev) id = pci_match_id(fixed_dma_alias_tbl, dev); if (id) - pci_add_dma_alias(dev, id->driver_data); + pci_add_dma_alias(dev, id->driver_data, 1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias); @@ -4073,13 +4068,42 @@ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); */ static void quirk_mic_x200_dma_alias(struct pci_dev *pdev) { - pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0)); - pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0)); - pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3)); + pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1); + pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1); + pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); +/* + * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices + * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx). + * + * Similarly to MIC x200, we need to add DMA aliases to allow buffer access + * when IOMMU is enabled. These aliases allow computational unit access to + * host memory. These aliases mark the whole VCA device as one IOMMU + * group. + * + * All possible slot numbers (0x20) are used, since we are unable to tell + * what slot is used on other side. This quirk is intended for both host + * and computational unit sides. The VCA devices have up to five functions + * (four for DMA channels and one additional). + */ +static void quirk_pex_vca_alias(struct pci_dev *pdev) +{ + const unsigned int num_pci_slots = 0x20; + unsigned int slot; + + for (slot = 0; slot < num_pci_slots; slot++) + pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias); + /* * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are * associated not at the root bus, but at a bridge below. This quirk avoids @@ -4313,15 +4337,21 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) { + if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) + return false; + + switch (dev->device) { /* - * Effectively selects all downstream ports for whole ThunderX 1 - * family by 0xf800 mask (which represents 8 SoCs), while the lower - * bits of device ID are used to indicate which subdevice is used - * within the SoC. + * Effectively selects all downstream ports for whole ThunderX1 + * (which represents 8 SoCs). */ - return (pci_is_pcie(dev) && - (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) && - ((dev->device & 0xf800) == 0xa000)); + case 0xa000 ... 0xa7ff: /* ThunderX1 */ + case 0xaf84: /* ThunderX2 */ + case 0xb884: /* ThunderX3 */ + return true; + default: + return false; + } } static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) @@ -4706,7 +4736,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) #define INTEL_BSPR_REG_BPPD (1 << 9) /* Upstream Peer Decode Configuration Register */ -#define INTEL_UPDCR_REG 0x1114 +#define INTEL_UPDCR_REG 0x1014 /* 5:0 Peer Decode Enable bits */ #define INTEL_UPDCR_REG_MASK 0x3f @@ -5015,18 +5045,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS /* - * Some devices have a broken ATS implementation causing IOMMU stalls. - * Don't use ATS for those devices. + * Some devices require additional driver setup to enable ATS. Don't use + * ATS for those devices as ATS will be enabled before the driver has had a + * chance to load and configure the device. */ -static void quirk_no_ats(struct pci_dev *pdev) +static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - pci_info(pdev, "disabling ATS (broken on this device)\n"); + if (pdev->device == 0x7340 && pdev->revision != 0xc5) + return; + + pci_info(pdev, "disabling ATS\n"); pdev->ats_cap = 0; } /* AMD Stoney platform GPU */ -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); +/* AMD Iceland dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi14 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ /* Freescale PCIe doesn't support MSI in RC mode */ @@ -5273,7 +5310,7 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev) pci_dbg(pdev, "Aliasing Partition %d Proxy ID %02x.%d\n", pp, PCI_SLOT(devfn), PCI_FUNC(devfn)); - pci_add_dma_alias(pdev, devfn); + pci_add_dma_alias(pdev, devfn, 1); } } @@ -5315,6 +5352,21 @@ SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */ SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */ SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */ +/* + * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints. + * These IDs are used to forward responses to the originator on the other + * side of the NTB. Alias all possible IDs to the NTB to permit access when + * the IOMMU is turned on. + */ +static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev) +{ + pci_info(pdev, "Setting PLX NTB proxy ID aliases\n"); + /* PLX NTB may use all 256 devfns */ + pci_add_dma_alias(pdev, 0, 256); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias); + /* * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does * not always reset the secondary Nvidia GPU between reboots if the system diff --git a/drivers/pci/search.c b/drivers/pci/search.c index bade14002fd8..e4dbdef5aef0 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -41,9 +41,9 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, * DMA, iterate over that too. */ if (unlikely(pdev->dma_alias_mask)) { - u8 devfn; + unsigned int devfn; - for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) { + for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) { ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn), data); if (ret) diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index e7dbe21705ba..5356630e0e48 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1785,12 +1785,18 @@ again: /* Restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; + int idx; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; - if (fail_res->dev->subordinate) - res->flags = 0; + + if (pci_is_bridge(fail_res->dev)) { + idx = res - &fail_res->dev->resource[0]; + if (idx >= PCI_BRIDGE_RESOURCES && + idx <= PCI_BRIDGE_RESOURCE_END) + res->flags = 0; + } } free_list(&fail_head); @@ -2037,12 +2043,18 @@ again: /* Restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; + int idx; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; - if (fail_res->dev->subordinate) - res->flags = 0; + + if (pci_is_bridge(fail_res->dev)) { + idx = res - &fail_res->dev->resource[0]; + if (idx >= PCI_BRIDGE_RESOURCES && + idx <= PCI_BRIDGE_RESOURCE_END) + res->flags = 0; + } } free_list(&fail_head); diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 8c94cd3fd1f2..cc43c855452f 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -675,7 +675,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev, return -ENOMEM; s->global = ioread32(&stdev->mmio_sw_event->global_summary); - s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); + s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap); s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); for (i = 0; i < stdev->partition_count; i++) { @@ -1276,7 +1276,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev) if (nvecs < 0) return nvecs; - event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number); + event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number); if (event_irq < 0 || event_irq >= nvecs) return -EFAULT; @@ -1349,7 +1349,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, if (rc) return rc; - rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) return rc; diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index acce8781c456..f5c7a845cd7b 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -24,8 +24,6 @@ static int arm_pmu_acpi_register_irq(int cpu) int gsi, trigger; gicc = acpi_cpu_get_madt_gicc(cpu); - if (WARN_ON(!gicc)) - return -EINVAL; gsi = gicc->performance_interrupt; @@ -64,11 +62,10 @@ static void arm_pmu_acpi_unregister_irq(int cpu) int gsi; gicc = acpi_cpu_get_madt_gicc(cpu); - if (!gicc) - return; gsi = gicc->performance_interrupt; - acpi_unregister_gsi(gsi); + if (gsi) + acpi_unregister_gsi(gsi); } #if IS_ENABLED(CONFIG_ARM_SPE_PMU) diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index abcf54f7d19c..2f8787276d9b 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -772,7 +772,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) smmu_pmu->reloc_base = smmu_pmu->reg_base; } - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) smmu_pmu->irq = irq; @@ -815,7 +815,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) if (err) { dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", err, &res_0->start); - goto out_cpuhp_err; + return err; } err = perf_pmu_register(&smmu_pmu->pmu, name, -1); @@ -834,8 +834,6 @@ static int smmu_pmu_probe(struct platform_device *pdev) out_unregister: cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); -out_cpuhp_err: - put_cpu(); return err; } diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index ce7345745b42..6eef47de8fcc 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -45,7 +45,8 @@ static DEFINE_IDA(ddr_ida); /* DDR Perf hardware feature */ -#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ +#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ +#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ struct fsl_ddr_devtype_data { unsigned int quirks; /* quirks needed for different DDR Perf core */ @@ -178,6 +179,36 @@ static const struct attribute_group *attr_groups[] = { NULL, }; +static bool ddr_perf_is_filtered(struct perf_event *event) +{ + return event->attr.config == 0x41 || event->attr.config == 0x42; +} + +static u32 ddr_perf_filter_val(struct perf_event *event) +{ + return event->attr.config1; +} + +static bool ddr_perf_filters_compatible(struct perf_event *a, + struct perf_event *b) +{ + if (!ddr_perf_is_filtered(a)) + return true; + if (!ddr_perf_is_filtered(b)) + return true; + return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); +} + +static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) +{ + unsigned int filt; + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + + filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; + return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) && + ddr_perf_is_filtered(event); +} + static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) { int i; @@ -209,27 +240,17 @@ static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) { - return readl_relaxed(pmu->base + COUNTER_READ + counter * 4); -} + struct perf_event *event = pmu->events[counter]; + void __iomem *base = pmu->base; -static bool ddr_perf_is_filtered(struct perf_event *event) -{ - return event->attr.config == 0x41 || event->attr.config == 0x42; -} - -static u32 ddr_perf_filter_val(struct perf_event *event) -{ - return event->attr.config1; -} - -static bool ddr_perf_filters_compatible(struct perf_event *a, - struct perf_event *b) -{ - if (!ddr_perf_is_filtered(a)) - return true; - if (!ddr_perf_is_filtered(b)) - return true; - return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); + /* + * return bytes instead of bursts from ddr transaction for + * axid-read and axid-write event if PMU core supports enhanced + * filter. + */ + base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : + COUNTER_READ; + return readl_relaxed(base + counter * 4); } static int ddr_perf_event_init(struct perf_event *event) @@ -306,9 +327,10 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, if (enable) { /* - * must disable first, then enable again - * otherwise, cycle counter will not work - * if previous state is enabled. + * cycle counter is special which should firstly write 0 then + * write 1 into CLEAR bit to clear it. Other counters only + * need write 0 into CLEAR bit and it turns out to be 1 by + * hardware. Below enable flow is harmless for all counters. */ writel(0, pmu->base + reg); val = CNTL_EN | CNTL_CLEAR; @@ -316,7 +338,8 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, writel(val, pmu->base + reg); } else { /* Disable counter */ - writel(0, pmu->base + reg); + val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; + writel(val, pmu->base + reg); } } @@ -551,13 +574,17 @@ static int ddr_perf_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); - goto ddr_perf_err; + goto cpuhp_state_err; } pmu->cpuhp_state = ret; /* Register the pmu instance for cpu hotplug */ - cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + goto cpuhp_instance_err; + } /* Request irq */ irq = of_irq_get(np, 0); @@ -591,9 +618,10 @@ static int ddr_perf_probe(struct platform_device *pdev) return 0; ddr_perf_err: - if (pmu->cpuhp_state) - cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); - + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(pmu->cpuhp_state); +cpuhp_state_err: ida_simple_remove(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; @@ -604,6 +632,7 @@ static int ddr_perf_remove(struct platform_device *pdev) struct ddr_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); + cpuhp_remove_multi_state(pmu->cpuhp_state); irq_set_affinity_hint(pmu->irq, NULL); perf_pmu_unregister(&pmu->pmu); diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c index 544d64a84cc0..6e457967653e 100644 --- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c +++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c @@ -323,7 +323,8 @@ static int ltq_vrx200_pcie_phy_power_on(struct phy *phy) goto err_disable_pdi_clk; /* Check if we are in "startup ready" status */ - if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0) + ret = ltq_vrx200_pcie_phy_wait_for_pll(phy); + if (ret) goto err_disable_phy_clk; ltq_vrx200_pcie_phy_apply_workarounds(phy); diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index ead06c6c2601..5baf64dfb24d 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -115,7 +115,7 @@ struct cpcap_usb_ints_state { enum cpcap_gpio_mode { CPCAP_DM_DP, CPCAP_MDM_RX_TX, - CPCAP_UNKNOWN, + CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */ CPCAP_OTG_DM_DP, }; @@ -207,6 +207,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata, static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata); static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata); +static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata, + enum musb_vbus_id_status status) +{ + int error; + + error = musb_mailbox(status); + if (!error) + return; + + dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n", + __func__, error); +} + static void cpcap_usb_detect(struct work_struct *work) { struct cpcap_phy_ddata *ddata; @@ -226,9 +239,7 @@ static void cpcap_usb_detect(struct work_struct *work) if (error) goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); - if (error) - goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, CPCAP_BIT_VBUSSTBY_EN | @@ -257,9 +268,7 @@ static void cpcap_usb_detect(struct work_struct *work) error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); - if (error) - goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); return; } @@ -269,22 +278,18 @@ static void cpcap_usb_detect(struct work_struct *work) error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_VALID); - if (error) - goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID); return; } + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); + /* Default to debug UART mode */ error = cpcap_usb_set_uart_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - goto out_err; - dev_dbg(ddata->dev, "set UART mode\n"); return; @@ -376,7 +381,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) goto out_err; @@ -403,6 +409,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable UART mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -415,7 +426,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) return error; @@ -455,6 +467,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable USB mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -649,9 +666,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev) if (error) dev_err(ddata->dev, "could not set UART mode\n"); - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - dev_err(ddata->dev, "could not set mailbox\n"); + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); usb_remove_phy(&ddata->phy); cancel_delayed_work_sync(&ddata->detect_work); diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index ee184d5607bd..94a34cf75eb3 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -20,6 +20,7 @@ #define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */ #define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */ +#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */ #define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */ #define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */ @@ -200,7 +201,7 @@ static void phy_mdm6600_status(struct work_struct *work) struct phy_mdm6600 *ddata; struct device *dev; DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES); - int error, i, val = 0; + int error; ddata = container_of(work, struct phy_mdm6600, status_work.work); dev = ddata->dev; @@ -212,16 +213,11 @@ static void phy_mdm6600_status(struct work_struct *work) if (error) return; - for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) { - val |= test_bit(i, values) << i; - dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", - __func__, i, test_bit(i, values), val); - } - ddata->status = values[0]; + ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1); dev_info(dev, "modem status: %i %s\n", ddata->status, - phy_mdm6600_status_name[ddata->status & 7]); + phy_mdm6600_status_name[ddata->status]); complete(&ddata->ack); } @@ -248,10 +244,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data) { struct phy_mdm6600 *ddata = data; struct gpio_desc *mode_gpio1; + int error, wakeup; mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1]; - dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", - gpiod_get_value(mode_gpio1)); + wakeup = gpiod_get_value(mode_gpio1); + if (!wakeup) + return IRQ_NONE; + + dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup); + error = pm_runtime_get_sync(ddata->dev); + if (error < 0) { + pm_runtime_put_noidle(ddata->dev); + + return IRQ_NONE; + } + + /* Just wake-up and kick the autosuspend timer */ + pm_runtime_mark_last_busy(ddata->dev); + pm_runtime_put_autosuspend(ddata->dev); return IRQ_HANDLED; } @@ -501,8 +511,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work) ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work); phy_mdm6600_wake_modem(ddata); + + /* + * The modem does not always stay awake 1.2 seconds after toggling + * the wake GPIO, and sometimes it idles after about some 600 ms + * making writes time out. + */ schedule_delayed_work(&ddata->modem_wake_work, - msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS)); + msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS)); } static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev) diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c index 42bc5150dd92..febe0aef68d4 100644 --- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c @@ -80,7 +80,7 @@ static int read_poll_timeout(void __iomem *addr, u32 mask) if (readl_relaxed(addr) & mask) return 0; - usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50); + usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50); } while (!time_after(jiffies, timeout)); return (readl_relaxed(addr) & mask) ? 0 : -ETIMEDOUT; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 39e8deb8001e..27dd20a7fe13 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -66,7 +66,7 @@ /* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ #define CLAMP_EN BIT(0) /* enables i/o clamp_n */ -#define PHY_INIT_COMPLETE_TIMEOUT 1000 +#define PHY_INIT_COMPLETE_TIMEOUT 10000 #define POWER_DOWN_DELAY_US_MIN 10 #define POWER_DOWN_DELAY_US_MAX 11 diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c index b163b3a1558d..61054272a7c8 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c @@ -158,8 +158,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy) /* setup initial state */ qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state, uphy->vbus_edev); - ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev, - EXTCON_USB, &uphy->vbus_notify); + ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB, + &uphy->vbus_notify); if (ret) goto err_ulpi; } @@ -180,6 +180,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy) { struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy); + if (uphy->vbus_edev) + extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB, + &uphy->vbus_notify); regulator_disable(uphy->v3p3); regulator_disable(uphy->v1p8); clk_disable_unprepare(uphy->sleep_clk); diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c index 2926e4937301..2e279ac0fa4d 100644 --- a/drivers/phy/renesas/phy-rcar-gen2.c +++ b/drivers/phy/renesas/phy-rcar-gen2.c @@ -71,6 +71,7 @@ struct rcar_gen2_phy_driver { struct rcar_gen2_phy_data { const struct phy_ops *gen2_phy_ops; const u32 (*select_value)[PHYS_PER_CHANNEL]; + const u32 num_channels; }; static int rcar_gen2_phy_init(struct phy *p) @@ -271,11 +272,13 @@ static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = { static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = { .gen2_phy_ops = &rcar_gen2_phy_ops, .select_value = pci_select_value, + .num_channels = ARRAY_SIZE(pci_select_value), }; static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = { .gen2_phy_ops = &rz_g1c_phy_ops, .select_value = usb20_select_value, + .num_channels = ARRAY_SIZE(usb20_select_value), }; static const struct of_device_id rcar_gen2_phy_match_table[] = { @@ -389,7 +392,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev) channel->selected_phy = -1; error = of_property_read_u32(np, "reg", &channel_num); - if (error || channel_num > 2) { + if (error || channel_num >= data->num_channels) { dev_err(dev, "Invalid \"reg\" property\n"); of_node_put(np); return error; diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index b7f6b1324395..bfb22f868857 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -320,9 +321,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) return -EIO; - if (!strncmp(buf, "host", strlen("host"))) + if (sysfs_streq(buf, "host")) new_mode = PHY_MODE_USB_HOST; - else if (!strncmp(buf, "peripheral", strlen("peripheral"))) + else if (sysfs_streq(buf, "peripheral")) new_mode = PHY_MODE_USB_DEVICE; else return -EINVAL; @@ -614,7 +615,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) return PTR_ERR(channel->base); /* call request_irq for OTG */ - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq >= 0) { INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c index 2b97fb1185a0..9ca20c947283 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c @@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate && !cfg->fracdiv) break; @@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate) break; diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index a52c5bb35033..1c536fc03c83 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -69,31 +69,31 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode) break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII; break; case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII; rgmii_id = 1; break; case PHY_INTERFACE_MODE_MII: - mode = AM33XX_GMII_SEL_MODE_MII; + case PHY_INTERFACE_MODE_GMII: + gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII; break; default: - dev_warn(dev, - "port%u: unsupported mode: \"%s\". Defaulting to MII.\n", - if_phy->id, phy_modes(rgmii_id)); + dev_warn(dev, "port%u: unsupported mode: \"%s\"\n", + if_phy->id, phy_modes(submode)); return -EINVAL; } if_phy->phy_if_mode = submode; dev_dbg(dev, "%s id:%u mode:%u rgmii_id:%d rmii_clk_ext:%d\n", - __func__, if_phy->id, mode, rgmii_id, + __func__, if_phy->id, submode, rgmii_id, if_phy->rmii_clock_external); regfield = if_phy->fields[PHY_GMII_SEL_PORT_MODE]; diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index c6800d220920..bb07024d22ed 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -1088,60 +1088,52 @@ SSSF_PIN_DECL(AF15, GPIOV7, LPCSMI, SIG_DESC_SET(SCU434, 15)); #define AB7 176 SIG_EXPR_LIST_DECL_SESG(AB7, LAD0, LPC, SIG_DESC_SET(SCU434, 16), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16)); PIN_DECL_2(AB7, GPIOW0, LAD0, ESPID0); #define AB8 177 SIG_EXPR_LIST_DECL_SESG(AB8, LAD1, LPC, SIG_DESC_SET(SCU434, 17), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17)); PIN_DECL_2(AB8, GPIOW1, LAD1, ESPID1); #define AC8 178 SIG_EXPR_LIST_DECL_SESG(AC8, LAD2, LPC, SIG_DESC_SET(SCU434, 18), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18)); PIN_DECL_2(AC8, GPIOW2, LAD2, ESPID2); #define AC7 179 SIG_EXPR_LIST_DECL_SESG(AC7, LAD3, LPC, SIG_DESC_SET(SCU434, 19), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19)); PIN_DECL_2(AC7, GPIOW3, LAD3, ESPID3); #define AE7 180 SIG_EXPR_LIST_DECL_SESG(AE7, LCLK, LPC, SIG_DESC_SET(SCU434, 20), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20)); PIN_DECL_2(AE7, GPIOW4, LCLK, ESPICK); #define AF7 181 SIG_EXPR_LIST_DECL_SESG(AF7, LFRAME, LPC, SIG_DESC_SET(SCU434, 21), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21)); PIN_DECL_2(AF7, GPIOW5, LFRAME, ESPICS); #define AD7 182 SIG_EXPR_LIST_DECL_SESG(AD7, LSIRQ, LSIRQ, SIG_DESC_SET(SCU434, 22), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22)); PIN_DECL_2(AD7, GPIOW6, LSIRQ, ESPIALT); FUNC_GROUP_DECL(LSIRQ, AD7); FUNC_GROUP_DECL(ESPIALT, AD7); #define AD8 183 SIG_EXPR_LIST_DECL_SESG(AD8, LPCRST, LPC, SIG_DESC_SET(SCU434, 23), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23)); PIN_DECL_2(AD8, GPIOW7, LPCRST, ESPIRST); FUNC_GROUP_DECL(LPC, AB7, AB8, AC8, AC7, AE7, AF7, AD8); diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig index f1806fd781a0..530426a74f75 100644 --- a/drivers/pinctrl/cirrus/Kconfig +++ b/drivers/pinctrl/cirrus/Kconfig @@ -2,6 +2,7 @@ config PINCTRL_LOCHNAGAR tristate "Cirrus Logic Lochnagar pinctrl driver" depends on MFD_LOCHNAGAR + select GPIOLIB select PINMUX select PINCONF select GENERIC_PINCONF diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2bbd8ee93507..6381745e3bb1 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -2025,7 +2025,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev) return PTR_ERR(pctldev->p); } - kref_get(&pctldev->p->users); pctldev->hog_default = pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); if (IS_ERR(pctldev->hog_default)) { diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 5d6d8b1e9062..dbaacde1b36a 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -29,6 +29,13 @@ struct pinctrl_dt_map { static void dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { + int i; + + for (i = 0; i < num_maps; ++i) { + kfree_const(map[i].dev_name); + map[i].dev_name = NULL; + } + if (pctldev) { const struct pinctrl_ops *ops = pctldev->desc->pctlops; if (ops->dt_free_map) @@ -63,7 +70,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, /* Initialize common mapping table entry fields */ for (i = 0; i < num_maps; i++) { - map[i].dev_name = dev_name(p->dev); + const char *devname; + + devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL); + if (!devname) + goto err_free_map; + + map[i].dev_name = devname; map[i].name = statename; if (pctldev) map[i].ctrl_dev_name = dev_name(pctldev->dev); @@ -71,10 +84,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, /* Remember the converted mapping table entries */ dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL); - if (!dt_map) { - dt_free_map(pctldev, map, num_maps); - return -ENOMEM; - } + if (!dt_map) + goto err_free_map; dt_map->pctldev = pctldev; dt_map->map = map; @@ -82,6 +93,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, list_add_tail(&dt_map->node, &p->dt_maps); return pinctrl_register_map(map, num_maps, false); + +err_free_map: + dt_free_map(pctldev, map, num_maps); + return -ENOMEM; } struct pinctrl_dev *of_pinctrl_get(struct device_node *np) diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c index 73bf1d9f9cc6..23cf04bdfc55 100644 --- a/drivers/pinctrl/freescale/pinctrl-scu.c +++ b/drivers/pinctrl/freescale/pinctrl-scu.c @@ -23,12 +23,12 @@ struct imx_sc_msg_req_pad_set { struct imx_sc_rpc_msg hdr; u32 val; u16 pad; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_pad_get { struct imx_sc_rpc_msg hdr; u16 pad; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_resp_pad_get { struct imx_sc_rpc_msg hdr; diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 9ffb22211d2b..606fe216f902 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -110,7 +110,6 @@ struct byt_gpio { struct platform_device *pdev; struct pinctrl_dev *pctl_dev; struct pinctrl_desc pctl_desc; - raw_spinlock_t lock; const struct intel_pinctrl_soc_data *soc_data; struct intel_community *communities_copy; struct byt_gpio_pin_context *saved_context; @@ -549,6 +548,8 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = { NULL }; +static DEFINE_RAW_SPINLOCK(byt_lock); + static struct intel_community *byt_get_community(struct byt_gpio *vg, unsigned int pin) { @@ -658,7 +659,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -678,7 +679,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static void byt_set_group_mixed_mux(struct byt_gpio *vg, @@ -688,7 +689,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -708,7 +709,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, @@ -749,11 +750,17 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset) unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); - value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); + + /* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */ + if (value & BYT_DIRECT_IRQ_EN) + /* nothing to do */ ; + else + value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); + writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, @@ -765,7 +772,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, u32 value, gpio_mux; unsigned long flags; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); /* * In most cases, func pin mux 000 means GPIO function. @@ -787,7 +794,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, "pin %u forcibly re-configured as GPIO\n", offset); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); pm_runtime_get(&vg->pdev->dev); @@ -815,7 +822,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(val_reg); value &= ~BYT_DIR_MASK; @@ -832,7 +839,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, "Potential Error: Setting GPIO with direct_irq_en to output"); writel(value, val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -901,11 +908,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, u32 conf, pull, val, debounce; u16 arg = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); pull = conf & BYT_PULL_ASSIGN_MASK; val = readl(val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (param) { case PIN_CONFIG_BIAS_DISABLE: @@ -932,9 +939,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, if (!(conf & BYT_DEBOUNCE_EN)) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); debounce = readl(db_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { case BYT_DEBOUNCE_PULSE_375US: @@ -986,7 +993,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, u32 conf, val, debounce; int i, ret = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); val = readl(val_reg); @@ -1094,7 +1101,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, if (!ret) writel(conf, conf_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return ret; } @@ -1119,9 +1126,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset) unsigned long flags; u32 val; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return !!(val & BYT_LEVEL); } @@ -1136,13 +1143,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); old_val = readl(reg); if (value) writel(old_val | BYT_LEVEL, reg); else writel(old_val & ~BYT_LEVEL, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) @@ -1155,9 +1162,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) if (!reg) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); if (!(value & BYT_OUTPUT_EN)) return 0; @@ -1200,14 +1207,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) const char *label; unsigned int pin; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); pin = vg->soc_data->pins[i].number; reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG); if (!reg) { seq_printf(s, "Could not retrieve pin %i conf0 reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } conf0 = readl(reg); @@ -1216,11 +1223,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) if (!reg) { seq_printf(s, "Could not retrieve pin %i val reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); comm = byt_get_community(vg, pin); if (!comm) { @@ -1304,9 +1311,9 @@ static void byt_irq_ack(struct irq_data *d) if (!reg) return; - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); writel(BIT(offset % 32), reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); } static void byt_irq_mask(struct irq_data *d) @@ -1330,7 +1337,7 @@ static void byt_irq_unmask(struct irq_data *d) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); switch (irqd_get_trigger_type(d)) { @@ -1353,7 +1360,7 @@ static void byt_irq_unmask(struct irq_data *d) writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_irq_type(struct irq_data *d, unsigned int type) @@ -1367,7 +1374,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) if (!reg || offset >= vg->chip.ngpio) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); WARN(value & BYT_DIRECT_IRQ_EN, @@ -1389,7 +1396,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) else if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -1425,9 +1432,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) continue; } - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); pending = readl(reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); for_each_set_bit(pin, &pending, 32) { virq = irq_find_mapping(vg->chip.irq.domain, base + pin); generic_handle_irq(virq); @@ -1638,8 +1645,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(vg->pctl_dev); } - raw_spin_lock_init(&vg->lock); - ret = byt_gpio_probe(vg); if (ret) return ret; @@ -1654,8 +1659,11 @@ static int byt_pinctrl_probe(struct platform_device *pdev) static int byt_gpio_suspend(struct device *dev) { struct byt_gpio *vg = dev_get_drvdata(dev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1676,14 +1684,18 @@ static int byt_gpio_suspend(struct device *dev) vg->saved_context[i].val = value; } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } static int byt_gpio_resume(struct device *dev) { struct byt_gpio *vg = dev_get_drvdata(dev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1721,6 +1733,7 @@ static int byt_gpio_resume(struct device *dev) } } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } #endif diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c index 2e06fb1464ab..7fdf4257df1e 100644 --- a/drivers/pinctrl/intel/pinctrl-lewisburg.c +++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c @@ -33,6 +33,7 @@ .npins = ((e) - (s) + 1), \ } +/* Lewisburg */ static const struct pinctrl_pin_desc lbg_pins[] = { /* GPP_A */ PINCTRL_PIN(0, "RCINB"), @@ -72,7 +73,7 @@ static const struct pinctrl_pin_desc lbg_pins[] = { PINCTRL_PIN(33, "SRCCLKREQB_4"), PINCTRL_PIN(34, "SRCCLKREQB_5"), PINCTRL_PIN(35, "GPP_B_11"), - PINCTRL_PIN(36, "GLB_RST_WARN_N"), + PINCTRL_PIN(36, "SLP_S0B"), PINCTRL_PIN(37, "PLTRSTB"), PINCTRL_PIN(38, "SPKR"), PINCTRL_PIN(39, "GPP_B_15"), @@ -185,96 +186,96 @@ static const struct pinctrl_pin_desc lbg_pins[] = { PINCTRL_PIN(141, "GBE_PCI_DIS"), PINCTRL_PIN(142, "GBE_LAN_DIS"), PINCTRL_PIN(143, "GPP_I_10"), - PINCTRL_PIN(144, "GPIO_RCOMP_3P3"), /* GPP_J */ - PINCTRL_PIN(145, "GBE_LED_0_0"), - PINCTRL_PIN(146, "GBE_LED_0_1"), - PINCTRL_PIN(147, "GBE_LED_1_0"), - PINCTRL_PIN(148, "GBE_LED_1_1"), - PINCTRL_PIN(149, "GBE_LED_2_0"), - PINCTRL_PIN(150, "GBE_LED_2_1"), - PINCTRL_PIN(151, "GBE_LED_3_0"), - PINCTRL_PIN(152, "GBE_LED_3_1"), - PINCTRL_PIN(153, "GBE_SCL_0"), - PINCTRL_PIN(154, "GBE_SDA_0"), - PINCTRL_PIN(155, "GBE_SCL_1"), - PINCTRL_PIN(156, "GBE_SDA_1"), - PINCTRL_PIN(157, "GBE_SCL_2"), - PINCTRL_PIN(158, "GBE_SDA_2"), - PINCTRL_PIN(159, "GBE_SCL_3"), - PINCTRL_PIN(160, "GBE_SDA_3"), - PINCTRL_PIN(161, "GBE_SDP_0_0"), - PINCTRL_PIN(162, "GBE_SDP_0_1"), - PINCTRL_PIN(163, "GBE_SDP_1_0"), - PINCTRL_PIN(164, "GBE_SDP_1_1"), - PINCTRL_PIN(165, "GBE_SDP_2_0"), - PINCTRL_PIN(166, "GBE_SDP_2_1"), - PINCTRL_PIN(167, "GBE_SDP_3_0"), - PINCTRL_PIN(168, "GBE_SDP_3_1"), + PINCTRL_PIN(144, "GBE_LED_0_0"), + PINCTRL_PIN(145, "GBE_LED_0_1"), + PINCTRL_PIN(146, "GBE_LED_1_0"), + PINCTRL_PIN(147, "GBE_LED_1_1"), + PINCTRL_PIN(148, "GBE_LED_2_0"), + PINCTRL_PIN(149, "GBE_LED_2_1"), + PINCTRL_PIN(150, "GBE_LED_3_0"), + PINCTRL_PIN(151, "GBE_LED_3_1"), + PINCTRL_PIN(152, "GBE_SCL_0"), + PINCTRL_PIN(153, "GBE_SDA_0"), + PINCTRL_PIN(154, "GBE_SCL_1"), + PINCTRL_PIN(155, "GBE_SDA_1"), + PINCTRL_PIN(156, "GBE_SCL_2"), + PINCTRL_PIN(157, "GBE_SDA_2"), + PINCTRL_PIN(158, "GBE_SCL_3"), + PINCTRL_PIN(159, "GBE_SDA_3"), + PINCTRL_PIN(160, "GBE_SDP_0_0"), + PINCTRL_PIN(161, "GBE_SDP_0_1"), + PINCTRL_PIN(162, "GBE_SDP_1_0"), + PINCTRL_PIN(163, "GBE_SDP_1_1"), + PINCTRL_PIN(164, "GBE_SDP_2_0"), + PINCTRL_PIN(165, "GBE_SDP_2_1"), + PINCTRL_PIN(166, "GBE_SDP_3_0"), + PINCTRL_PIN(167, "GBE_SDP_3_1"), /* GPP_K */ - PINCTRL_PIN(169, "GBE_RMIICLK"), - PINCTRL_PIN(170, "GBE_RMII_TXD_0"), - PINCTRL_PIN(171, "GBE_RMII_TXD_1"), + PINCTRL_PIN(168, "GBE_RMIICLK"), + PINCTRL_PIN(169, "GBE_RMII_RXD_0"), + PINCTRL_PIN(170, "GBE_RMII_RXD_1"), + PINCTRL_PIN(171, "GBE_RMII_CRS_DV"), PINCTRL_PIN(172, "GBE_RMII_TX_EN"), - PINCTRL_PIN(173, "GBE_RMII_CRS_DV"), - PINCTRL_PIN(174, "GBE_RMII_RXD_0"), - PINCTRL_PIN(175, "GBE_RMII_RXD_1"), - PINCTRL_PIN(176, "GBE_RMII_RX_ER"), - PINCTRL_PIN(177, "GBE_RMII_ARBIN"), - PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"), - PINCTRL_PIN(179, "PE_RST_N"), - PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"), + PINCTRL_PIN(173, "GBE_RMII_TXD_0"), + PINCTRL_PIN(174, "GBE_RMII_TXD_1"), + PINCTRL_PIN(175, "GBE_RMII_RX_ER"), + PINCTRL_PIN(176, "GBE_RMII_ARBIN"), + PINCTRL_PIN(177, "GBE_RMII_ARB_OUT"), + PINCTRL_PIN(178, "PE_RST_N"), /* GPP_G */ - PINCTRL_PIN(181, "FAN_TACH_0"), - PINCTRL_PIN(182, "FAN_TACH_1"), - PINCTRL_PIN(183, "FAN_TACH_2"), - PINCTRL_PIN(184, "FAN_TACH_3"), - PINCTRL_PIN(185, "FAN_TACH_4"), - PINCTRL_PIN(186, "FAN_TACH_5"), - PINCTRL_PIN(187, "FAN_TACH_6"), - PINCTRL_PIN(188, "FAN_TACH_7"), - PINCTRL_PIN(189, "FAN_PWM_0"), - PINCTRL_PIN(190, "FAN_PWM_1"), - PINCTRL_PIN(191, "FAN_PWM_2"), - PINCTRL_PIN(192, "FAN_PWM_3"), - PINCTRL_PIN(193, "GSXDOUT"), - PINCTRL_PIN(194, "GSXSLOAD"), - PINCTRL_PIN(195, "GSXDIN"), - PINCTRL_PIN(196, "GSXSRESETB"), - PINCTRL_PIN(197, "GSXCLK"), - PINCTRL_PIN(198, "ADR_COMPLETE"), - PINCTRL_PIN(199, "NMIB"), - PINCTRL_PIN(200, "SMIB"), - PINCTRL_PIN(201, "SSATA_DEVSLP_0"), - PINCTRL_PIN(202, "SSATA_DEVSLP_1"), - PINCTRL_PIN(203, "SSATA_DEVSLP_2"), - PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"), + PINCTRL_PIN(179, "FAN_TACH_0"), + PINCTRL_PIN(180, "FAN_TACH_1"), + PINCTRL_PIN(181, "FAN_TACH_2"), + PINCTRL_PIN(182, "FAN_TACH_3"), + PINCTRL_PIN(183, "FAN_TACH_4"), + PINCTRL_PIN(184, "FAN_TACH_5"), + PINCTRL_PIN(185, "FAN_TACH_6"), + PINCTRL_PIN(186, "FAN_TACH_7"), + PINCTRL_PIN(187, "FAN_PWM_0"), + PINCTRL_PIN(188, "FAN_PWM_1"), + PINCTRL_PIN(189, "FAN_PWM_2"), + PINCTRL_PIN(190, "FAN_PWM_3"), + PINCTRL_PIN(191, "GSXDOUT"), + PINCTRL_PIN(192, "GSXSLOAD"), + PINCTRL_PIN(193, "GSXDIN"), + PINCTRL_PIN(194, "GSXSRESETB"), + PINCTRL_PIN(195, "GSXCLK"), + PINCTRL_PIN(196, "ADR_COMPLETE"), + PINCTRL_PIN(197, "NMIB"), + PINCTRL_PIN(198, "SMIB"), + PINCTRL_PIN(199, "SSATA_DEVSLP_0"), + PINCTRL_PIN(200, "SSATA_DEVSLP_1"), + PINCTRL_PIN(201, "SSATA_DEVSLP_2"), + PINCTRL_PIN(202, "SSATAXPCIE0_SSATAGP0"), /* GPP_H */ - PINCTRL_PIN(205, "SRCCLKREQB_6"), - PINCTRL_PIN(206, "SRCCLKREQB_7"), - PINCTRL_PIN(207, "SRCCLKREQB_8"), - PINCTRL_PIN(208, "SRCCLKREQB_9"), - PINCTRL_PIN(209, "SRCCLKREQB_10"), - PINCTRL_PIN(210, "SRCCLKREQB_11"), - PINCTRL_PIN(211, "SRCCLKREQB_12"), - PINCTRL_PIN(212, "SRCCLKREQB_13"), - PINCTRL_PIN(213, "SRCCLKREQB_14"), - PINCTRL_PIN(214, "SRCCLKREQB_15"), - PINCTRL_PIN(215, "SML2CLK"), - PINCTRL_PIN(216, "SML2DATA"), - PINCTRL_PIN(217, "SML2ALERTB"), - PINCTRL_PIN(218, "SML3CLK"), - PINCTRL_PIN(219, "SML3DATA"), - PINCTRL_PIN(220, "SML3ALERTB"), - PINCTRL_PIN(221, "SML4CLK"), - PINCTRL_PIN(222, "SML4DATA"), - PINCTRL_PIN(223, "SML4ALERTB"), - PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"), - PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"), - PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"), - PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"), - PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"), + PINCTRL_PIN(203, "SRCCLKREQB_6"), + PINCTRL_PIN(204, "SRCCLKREQB_7"), + PINCTRL_PIN(205, "SRCCLKREQB_8"), + PINCTRL_PIN(206, "SRCCLKREQB_9"), + PINCTRL_PIN(207, "SRCCLKREQB_10"), + PINCTRL_PIN(208, "SRCCLKREQB_11"), + PINCTRL_PIN(209, "SRCCLKREQB_12"), + PINCTRL_PIN(210, "SRCCLKREQB_13"), + PINCTRL_PIN(211, "SRCCLKREQB_14"), + PINCTRL_PIN(212, "SRCCLKREQB_15"), + PINCTRL_PIN(213, "SML2CLK"), + PINCTRL_PIN(214, "SML2DATA"), + PINCTRL_PIN(215, "SML2ALERTB"), + PINCTRL_PIN(216, "SML3CLK"), + PINCTRL_PIN(217, "SML3DATA"), + PINCTRL_PIN(218, "SML3ALERTB"), + PINCTRL_PIN(219, "SML4CLK"), + PINCTRL_PIN(220, "SML4DATA"), + PINCTRL_PIN(221, "SML4ALERTB"), + PINCTRL_PIN(222, "SSATAXPCIE1_SSATAGP1"), + PINCTRL_PIN(223, "SSATAXPCIE2_SSATAGP2"), + PINCTRL_PIN(224, "SSATAXPCIE3_SSATAGP3"), + PINCTRL_PIN(225, "SSATAXPCIE4_SSATAGP4"), + PINCTRL_PIN(226, "SSATAXPCIE5_SSATAGP5"), /* GPP_L */ + PINCTRL_PIN(227, "GPP_L_0"), + PINCTRL_PIN(228, "EC_CSME_INTR_OUT"), PINCTRL_PIN(229, "VISA2CH0_D0"), PINCTRL_PIN(230, "VISA2CH0_D1"), PINCTRL_PIN(231, "VISA2CH0_D2"), diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 44d7f50bbc82..d936e7aa74c4 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -49,6 +49,7 @@ .padown_offset = SPT_PAD_OWN, \ .padcfglock_offset = SPT_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ + .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ .pin_base = (s), \ .npins = ((e) - (s) + 1), \ diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 72c5373c8dc1..e8d1f3050487 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[] = { GPIOX_0 }; static const unsigned int sdio_d1_pins[] = { GPIOX_1 }; static const unsigned int sdio_d2_pins[] = { GPIOX_2 }; static const unsigned int sdio_d3_pins[] = { GPIOX_3 }; -static const unsigned int sdio_cmd_pins[] = { GPIOX_4 }; -static const unsigned int sdio_clk_pins[] = { GPIOX_5 }; +static const unsigned int sdio_clk_pins[] = { GPIOX_4 }; +static const unsigned int sdio_cmd_pins[] = { GPIOX_5 }; static const unsigned int sdio_irq_pins[] = { GPIOX_7 }; static const unsigned int nand_ce0_pins[] = { BOOT_8 }; diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 8bba9d053d9f..aba479a1150c 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc, return ret; meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit); + bit = bit << 1; ret = regmap_read(pc->reg_ds, reg, &val); if (ret) diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index f2f5fcd9a237..83e585c5a613 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -595,10 +595,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) regmap_read(info->regmap, in_reg, &in_val); /* Set initial polarity based on current input level. */ - if (in_val & d->mask) - val |= d->mask; /* falling */ + if (in_val & BIT(d->hwirq % GPIO_PER_REG)) + val |= BIT(d->hwirq % GPIO_PER_REG); /* falling */ else - val &= ~d->mask; /* rising */ + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); /* rising */ break; } default: diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 2c61141519f8..eab078244a4c 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -540,7 +540,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) irqreturn_t ret = IRQ_NONE; unsigned int i, irqnr; unsigned long flags; - u32 *regs, regval; + u32 __iomem *regs; + u32 regval; u64 status, mask; /* Read the wake status */ diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c index a454f57c264e..62c02b969327 100644 --- a/drivers/pinctrl/pinctrl-falcon.c +++ b/drivers/pinctrl/pinctrl-falcon.c @@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev) falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL); if (IS_ERR(falcon_info.clk[*bank])) { dev_err(&ppdev->dev, "failed to get clock\n"); - of_node_put(np) + of_node_put(np); return PTR_ERR(falcon_info.clk[*bank]); } falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev, diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c index 3be1d833bf25..eda88cdf870d 100644 --- a/drivers/pinctrl/pinctrl-rza2.c +++ b/drivers/pinctrl/pinctrl-rza2.c @@ -213,8 +213,8 @@ static const char * const rza2_gpio_names[] = { "PC_0", "PC_1", "PC_2", "PC_3", "PC_4", "PC_5", "PC_6", "PC_7", "PD_0", "PD_1", "PD_2", "PD_3", "PD_4", "PD_5", "PD_6", "PD_7", "PE_0", "PE_1", "PE_2", "PE_3", "PE_4", "PE_5", "PE_6", "PE_7", - "PF_0", "PF_1", "PF_2", "PF_3", "P0_4", "PF_5", "PF_6", "PF_7", - "PG_0", "PG_1", "PG_2", "P0_3", "PG_4", "PG_5", "PG_6", "PG_7", + "PF_0", "PF_1", "PF_2", "PF_3", "PF_4", "PF_5", "PF_6", "PF_7", + "PG_0", "PG_1", "PG_2", "PG_3", "PG_4", "PG_5", "PG_6", "PG_7", "PH_0", "PH_1", "PH_2", "PH_3", "PH_4", "PH_5", "PH_6", "PH_7", /* port I does not exist */ "PJ_0", "PJ_1", "PJ_2", "PJ_3", "PJ_4", "PJ_5", "PJ_6", "PJ_7", diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index e914f6efd39e..9503ddf2edc7 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -85,7 +85,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin) const struct pinmux_ops *ops = pctldev->desc->pmxops; /* Can't inspect pin, assume it can be used */ - if (!desc) + if (!desc || !ops) return true; if (ops->strict && desc->mux_usecount) diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c index 6399c8a2bc22..d6cfad7417b1 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7180.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c @@ -77,6 +77,7 @@ enum { .intr_cfg_reg = 0, \ .intr_status_reg = 0, \ .intr_target_reg = 0, \ + .tile = SOUTH, \ .mux_bit = -1, \ .pull_bit = pull, \ .drv_bit = drv, \ @@ -102,6 +103,7 @@ enum { .intr_cfg_reg = 0, \ .intr_status_reg = 0, \ .intr_target_reg = 0, \ + .tile = SOUTH, \ .mux_bit = -1, \ .pull_bit = 3, \ .drv_bit = 0, \ @@ -1087,14 +1089,14 @@ static const struct msm_pingroup sc7180_groups[] = { [116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _), [117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _), [118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _), - [119] = UFS_RESET(ufs_reset, 0x97f000), - [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x97a000, 15, 0), - [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x97a000, 13, 6), - [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x97a000, 11, 3), - [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x97a000, 9, 0), - [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x97b000, 14, 6), - [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x97b000, 11, 3), - [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x97b000, 9, 0), + [119] = UFS_RESET(ufs_reset, 0x7f000), + [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x7a000, 15, 0), + [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x7a000, 13, 6), + [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x7a000, 11, 3), + [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x7a000, 9, 0), + [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x7b000, 14, 6), + [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x7b000, 11, 3), + [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0), }; static const struct msm_pinctrl_soc_data sc7180_pinctrl = { diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index ebc27b06718c..0599f5127b01 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -486,8 +486,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) if (match) { irq_chip = kmemdup(match->data, sizeof(*irq_chip), GFP_KERNEL); - if (!irq_chip) + if (!irq_chip) { + of_node_put(np); return -ENOMEM; + } wkup_np = np; break; } @@ -504,6 +506,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) bank->nr_pins, &exynos_eint_irqd_ops, bank); if (!bank->irq_domain) { dev_err(dev, "wkup irq domain add failed\n"); + of_node_put(wkup_np); return -ENXIO; } @@ -518,8 +521,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) weint_data = devm_kcalloc(dev, bank->nr_pins, sizeof(*weint_data), GFP_KERNEL); - if (!weint_data) + if (!weint_data) { + of_node_put(wkup_np); return -ENOMEM; + } for (idx = 0; idx < bank->nr_pins; ++idx) { irq = irq_of_parse_and_map(bank->of_node, idx); @@ -536,10 +541,13 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) } } - if (!muxed_banks) + if (!muxed_banks) { + of_node_put(wkup_np); return 0; + } irq = irq_of_parse_and_map(wkup_np, 0); + of_node_put(wkup_np); if (!irq) { dev_err(dev, "irq number for muxed EINTs not found\n"); return 0; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 7e824e4d20f4..9bd0a3de101d 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -490,8 +490,10 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d) return -ENODEV; eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL); - if (!eint_data) + if (!eint_data) { + of_node_put(eint_np); return -ENOMEM; + } eint_data->drvdata = d; @@ -503,12 +505,14 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d) irq = irq_of_parse_and_map(eint_np, i); if (!irq) { dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i); + of_node_put(eint_np); return -ENXIO; } eint_data->parents[i] = irq; irq_set_chained_handler_and_data(irq, handlers[i], eint_data); } + of_node_put(eint_np); bank = d->pin_banks; for (i = 0; i < d->nr_banks; ++i, ++bank) { diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index c399f0932af5..f97f8179f2b1 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -704,8 +704,10 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d) return -ENODEV; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + of_node_put(eint0_np); return -ENOMEM; + } data->drvdata = d; for (i = 0; i < NUM_EINT0_IRQ; ++i) { @@ -714,6 +716,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d) irq = irq_of_parse_and_map(eint0_np, i); if (!irq) { dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i); + of_node_put(eint0_np); return -ENXIO; } @@ -721,6 +724,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d) s3c64xx_eint0_handlers[i], data); } + of_node_put(eint0_np); bank = d->pin_banks; for (i = 0; i < d->nr_banks; ++i, ++bank) { diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index de0477bb469d..f26574ef234a 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -272,6 +272,7 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev, &reserved_maps, num_maps); if (ret < 0) { samsung_dt_free_map(pctldev, *map, *num_maps); + of_node_put(np); return ret; } } @@ -785,8 +786,10 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions( if (!of_get_child_count(cfg_np)) { ret = samsung_pinctrl_create_function(dev, drvdata, cfg_np, func); - if (ret < 0) + if (ret < 0) { + of_node_put(cfg_np); return ERR_PTR(ret); + } if (ret > 0) { ++func; ++func_cnt; @@ -797,8 +800,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions( for_each_child_of_node(cfg_np, func_np) { ret = samsung_pinctrl_create_function(dev, drvdata, func_np, func); - if (ret < 0) + if (ret < 0) { + of_node_put(func_np); + of_node_put(cfg_np); return ERR_PTR(ret); + } if (ret > 0) { ++func; ++func_cnt; diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c index b8640ad41bef..ce983247c9e2 100644 --- a/drivers/pinctrl/sh-pfc/core.c +++ b/drivers/pinctrl/sh-pfc/core.c @@ -29,12 +29,12 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc, struct platform_device *pdev) { - unsigned int num_windows, num_irqs; struct sh_pfc_window *windows; unsigned int *irqs = NULL; + unsigned int num_windows; struct resource *res; unsigned int i; - int irq; + int num_irqs; /* Count the MEM and IRQ resources. */ for (num_windows = 0;; num_windows++) { @@ -42,17 +42,13 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc, if (!res) break; } - for (num_irqs = 0;; num_irqs++) { - irq = platform_get_irq(pdev, num_irqs); - if (irq == -EPROBE_DEFER) - return irq; - if (irq < 0) - break; - } - if (num_windows == 0) return -EINVAL; + num_irqs = platform_irq_count(pdev); + if (num_irqs < 0) + return num_irqs; + /* Allocate memory windows and IRQs arrays. */ windows = devm_kcalloc(pfc->dev, num_windows, sizeof(*windows), GFP_KERNEL); diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c index 24866a5958ae..a9875038ed9b 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c @@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_ATAG0_A, 0, FN_REMOCON_B, 0, /* IP0_11_8 [4] */ FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS, - FN_ATADIR0_A, 0, FN_SDSELF_B, 0, + FN_ATADIR0_A, 0, FN_SDSELF_A, 0, FN_PWM4_B, 0, 0, 0, 0, 0, 0, 0, /* IP0_7_5 [3] */ @@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_TS_SDAT0_A, 0, 0, 0, 0, 0, 0, 0, /* IP1_10_8 [3] */ - FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24, + FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24, FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A, /* IP1_7_5 [3] */ FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A, diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c index 697c77a4ea95..773d3bc38c8c 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c @@ -5984,7 +5984,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { { PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */ } }, { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) { - { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */ + { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */ { PIN_FSCLKST, 20, 2 }, /* FSCLKST */ { PIN_TMS, 4, 2 }, /* TMS */ } }, @@ -6240,8 +6240,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = { [31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */ } }, { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) { - [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */ - [ 1] = SH_PFC_PIN_NONE, + [ 0] = SH_PFC_PIN_NONE, + [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */ [ 2] = PIN_FSCLKST, /* FSCLKST */ [ 3] = PIN_EXTALR, /* EXTALR*/ [ 4] = PIN_TRST_N, /* TRST# */ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c index 2dfb8d9cfda1..5200dadd6b3e 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c @@ -448,6 +448,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM #define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0)) /* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ +#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) +#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1) #define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1) #define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1) #define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1) @@ -468,7 +470,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM #define PINMUX_MOD_SELS \ \ -MOD_SEL0_30_29 \ + MOD_SEL1_31 \ +MOD_SEL0_30_29 MOD_SEL1_30 \ MOD_SEL1_29 \ MOD_SEL0_28 MOD_SEL1_28 \ MOD_SEL0_27_26 \ @@ -1058,7 +1061,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1), PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1), PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0), - PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B), + PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1), PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0), PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP), @@ -1067,7 +1070,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1), PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1), PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0), - PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B), + PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1), PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0), /* IPSR11 */ @@ -1085,13 +1088,13 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0), PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0), - PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A), + PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0), PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC), PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1), PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0), PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A), - PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A), + PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0), PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0), PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1), @@ -1196,7 +1199,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0), PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1), PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1), - PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A), + PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0), PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT), PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1), @@ -1264,7 +1267,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2), PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3), PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1), - PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B), + PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1), PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6), PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0), @@ -4957,11 +4960,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_1_0 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, - GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, - 2, 2, 2, 1, 1, 2, 1, 4), + GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, + 1, 2, 2, 2, 1, 1, 2, 1, 4), GROUP( - /* RESERVED 31, 30 */ - 0, 0, 0, 0, + MOD_SEL1_31 + MOD_SEL1_30 MOD_SEL1_29 MOD_SEL1_28 /* RESERVED 27 */ diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c index 4a95867deb8a..5a026601d4f9 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c @@ -497,17 +497,15 @@ enum { SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK, CRX0_MARK, CRX1_MARK, CTX0_MARK, CTX1_MARK, + CRX0_CRX1_MARK, CTX0_CTX1_MARK, PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK, PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK, PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK, PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK, IERXD_MARK, IETXD_MARK, - CRX0_CRX1_MARK, WDTOVF_MARK, - CRX0X1_MARK, - /* DMAC */ TEND0_MARK, DACK0_MARK, DREQ0_MARK, TEND1_MARK, DACK1_MARK, DREQ1_MARK, @@ -995,12 +993,12 @@ static const u16 pinmux_data[] = { PINMUX_DATA(PJ3_DATA, PJ3MD_00), PINMUX_DATA(CRX1_MARK, PJ3MD_01), - PINMUX_DATA(CRX0X1_MARK, PJ3MD_10), + PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10), PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11), PINMUX_DATA(PJ2_DATA, PJ2MD_000), PINMUX_DATA(CTX1_MARK, PJ2MD_001), - PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010), + PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010), PINMUX_DATA(CS2_MARK, PJ2MD_011), PINMUX_DATA(SCK0_MARK, PJ2MD_100), PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101), @@ -1245,6 +1243,7 @@ static const struct pinmux_func pinmux_func_gpios[] = { GPIO_FN(CTX1), GPIO_FN(CRX1), GPIO_FN(CTX0), + GPIO_FN(CTX0_CTX1), GPIO_FN(CRX0), GPIO_FN(CRX0_CRX1), diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c index 6cbb18ef77dc..d20974a55d93 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c @@ -737,13 +737,12 @@ enum { CRX0_MARK, CTX0_MARK, CRX1_MARK, CTX1_MARK, CRX2_MARK, CTX2_MARK, - CRX0_CRX1_MARK, - CRX0_CRX1_CRX2_MARK, - CTX0CTX1CTX2_MARK, + CRX0_CRX1_MARK, CTX0_CTX1_MARK, + CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK, CRX1_PJ22_MARK, CTX1_PJ23_MARK, CRX2_PJ20_MARK, CTX2_PJ21_MARK, - CRX0CRX1_PJ22_MARK, - CRX0CRX1CRX2_PJ20_MARK, + CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK, + CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK, /* VDC */ DV_CLK_MARK, @@ -821,6 +820,7 @@ static const u16 pinmux_data[] = { PINMUX_DATA(CS3_MARK, PC8MD_001), PINMUX_DATA(TXD7_MARK, PC8MD_010), PINMUX_DATA(CTX1_MARK, PC8MD_011), + PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100), PINMUX_DATA(PC7_DATA, PC7MD_000), PINMUX_DATA(CKE_MARK, PC7MD_001), @@ -833,11 +833,12 @@ static const u16 pinmux_data[] = { PINMUX_DATA(CAS_MARK, PC6MD_001), PINMUX_DATA(SCK7_MARK, PC6MD_010), PINMUX_DATA(CTX0_MARK, PC6MD_011), + PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100), PINMUX_DATA(PC5_DATA, PC5MD_000), PINMUX_DATA(RAS_MARK, PC5MD_001), PINMUX_DATA(CRX0_MARK, PC5MD_011), - PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100), + PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100), PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101), PINMUX_DATA(PC4_DATA, PC4MD_00), @@ -1289,30 +1290,32 @@ static const u16 pinmux_data[] = { PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011), PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100), - PINMUX_DATA(CTX1_MARK, PJ23MD_101), + PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101), + PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110), PINMUX_DATA(PJ22_DATA, PJ22MD_000), PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001), PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011), PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100), - PINMUX_DATA(CRX1_MARK, PJ22MD_101), - PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110), + PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101), + PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110), PINMUX_DATA(PJ21_DATA, PJ21MD_000), PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001), PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011), PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100), - PINMUX_DATA(CTX2_MARK, PJ21MD_101), + PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101), + PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110), PINMUX_DATA(PJ20_DATA, PJ20MD_000), PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001), PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010), PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011), PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100), - PINMUX_DATA(CRX2_MARK, PJ20MD_101), - PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110), + PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101), + PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110), PINMUX_DATA(PJ19_DATA, PJ19MD_000), PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001), @@ -1663,12 +1666,24 @@ static const struct pinmux_func pinmux_func_gpios[] = { GPIO_FN(WDTOVF), /* CAN */ + GPIO_FN(CTX2), + GPIO_FN(CRX2), GPIO_FN(CTX1), GPIO_FN(CRX1), GPIO_FN(CTX0), GPIO_FN(CRX0), + GPIO_FN(CTX0_CTX1), GPIO_FN(CRX0_CRX1), + GPIO_FN(CTX0_CTX1_CTX2), GPIO_FN(CRX0_CRX1_CRX2), + GPIO_FN(CTX2_PJ21), + GPIO_FN(CRX2_PJ20), + GPIO_FN(CTX1_PJ23), + GPIO_FN(CRX1_PJ22), + GPIO_FN(CTX0_CTX1_PJ23), + GPIO_FN(CRX0_CRX1_PJ22), + GPIO_FN(CTX0_CTX1_CTX2_PJ21), + GPIO_FN(CRX0_CRX1_CRX2_PJ20), /* DMAC */ GPIO_FN(TEND0), diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c index 5dfd991ffdaa..dbc36079c381 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c @@ -1450,7 +1450,7 @@ static const struct pinmux_func pinmux_func_gpios[] = { GPIO_FN(ET0_ETXD2_A), GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B), GPIO_FN(ET0_ETXD3_A), - GPIO_FN(RD_WR), GPIO_FN(TCLK1_B), + GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4), GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B), GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2), GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A), @@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* IP3_20 [1] */ FN_EX_WAIT0, FN_TCLK1_B, /* IP3_19_18 [2] */ - FN_RD_WR, FN_TCLK1_B, 0, 0, + FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4, /* IP3_17_15 [3] */ FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B, FN_ET0_ETXD3_A, 0, 0, 0, diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 835148fc0f28..cab7da130925 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -422,12 +422,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info; /* * Describe a pinmux configuration in which a pin is physically multiplexed * with other pins. - * - ipsr: IPSR field (unused, for documentation purposes only) + * - ipsr: IPSR field * - fn: Function name * - psel: Physical multiplexing selector */ #define PINMUX_IPSR_PHYS(ipsr, fn, psel) \ - PINMUX_DATA(fn##_MARK, FN_##psel) + PINMUX_DATA(fn##_MARK, FN_##psel, FN_##ipsr) /* * Describe a pinmux configuration for a single-function pin with GPIO diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index e5e7f1f22813..b522ca010332 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -496,7 +496,7 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev, return -EINVAL; rows = pinctrl_count_index_with_args(np, name); - if (rows == -EINVAL) + if (rows < 0) return rows; *map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL); diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c index b9d03c33d8dc..1176d543191a 100644 --- a/drivers/platform/chrome/wilco_ec/telemetry.c +++ b/drivers/platform/chrome/wilco_ec/telemetry.c @@ -406,8 +406,8 @@ static int telem_device_remove(struct platform_device *pdev) struct telem_device_data *dev_data = platform_get_drvdata(pdev); cdev_device_del(&dev_data->cdev, &dev_data->dev); - put_device(&dev_data->dev); ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt)); + put_device(&dev_data->dev); return 0; } diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 9a5c9fd2dbc6..5739a9669b29 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info { * @work: work struct for deferred process * @timer: background timer * @vring: Tx/Rx ring - * @spin_lock: spin lock + * @spin_lock: Tx/Rx spin lock * @is_ready: ready flag */ struct mlxbf_tmfifo { @@ -164,7 +164,7 @@ struct mlxbf_tmfifo { struct work_struct work; struct timer_list timer; struct mlxbf_tmfifo_vring *vring[2]; - spinlock_t spin_lock; /* spin lock */ + spinlock_t spin_lock[2]; /* spin lock */ bool is_ready; }; @@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); /* Use spin-lock to protect the 'cons->tx_buf'. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); while (size > 0) { addr = cons->tx_buf.buf + cons->tx_buf.tail; @@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) } } - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } /* Rx/Tx one word in the descriptor buffer. */ @@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, fifo->vring[is_rx] = NULL; /* Notify upper layer that packet is done. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); vring_interrupt(0, vring->vq); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); } mlxbf_tmfifo_desc_done: @@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) * worker handler. */ if (vring->vdev_id == VIRTIO_ID_CONSOLE) { - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; mlxbf_tmfifo_console_output(tm_vdev, vring); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events)) { return true; @@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev) if (!fifo) return -ENOMEM; - spin_lock_init(&fifo->spin_lock); + spin_lock_init(&fifo->spin_lock[0]); + spin_lock_init(&fifo->spin_lock[1]); INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); mutex_init(&fifo->lock); diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c index a7f184bb47e0..3d29a11c1d6b 100644 --- a/drivers/platform/mips/cpu_hwmon.c +++ b/drivers/platform/mips/cpu_hwmon.c @@ -161,7 +161,7 @@ static int __init loongson_hwmon_init(void) cpu_hwmon_dev = hwmon_device_register(NULL); if (IS_ERR(cpu_hwmon_dev)) { - ret = -ENOMEM; + ret = PTR_ERR(cpu_hwmon_dev); pr_err("hwmon_device_register fail!\n"); goto fail_hwmon_device_register; } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ae21d08c65e8..1cab99320514 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -806,7 +806,6 @@ config PEAQ_WMI tristate "PEAQ 2-in-1 WMI hotkey driver" depends on ACPI_WMI depends on INPUT - select INPUT_POLLDEV help Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s. diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 821b08e01635..982f0cc8270c 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -512,13 +512,7 @@ static void kbd_led_update(struct asus_wmi *asus) { int ctrl_param = 0; - /* - * bits 0-2: level - * bit 7: light on/off - */ - if (asus->kbd_led_wk > 0) - ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); - + ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); } diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index d27be2836bc2..74e988f839e8 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -33,6 +33,7 @@ struct quirk_entry { bool touchpad_led; + bool kbd_led_not_present; bool kbd_led_levels_off_1; bool kbd_missing_ac_tag; @@ -73,6 +74,10 @@ static struct quirk_entry quirk_dell_latitude_e6410 = { .kbd_led_levels_off_1 = true, }; +static struct quirk_entry quirk_dell_inspiron_1012 = { + .kbd_led_not_present = true, +}; + static struct platform_driver platform_driver = { .driver = { .name = "dell-laptop", @@ -310,6 +315,24 @@ static const struct dmi_system_id dell_quirks[] __initconst = { }, .driver_data = &quirk_dell_latitude_e6410, }, + { + .callback = dmi_matched, + .ident = "Dell Inspiron 1012", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + }, + .driver_data = &quirk_dell_inspiron_1012, + }, + { + .callback = dmi_matched, + .ident = "Dell Inspiron 1018", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1018"), + }, + .driver_data = &quirk_dell_inspiron_1012, + }, { } }; @@ -1493,6 +1516,9 @@ static void kbd_init(void) { int ret; + if (quirks && quirks->kbd_led_not_present) + return; + ret = kbd_init_info(); kbd_init_tokens(); diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c index be85ed966bf3..b471b86c28fe 100644 --- a/drivers/platform/x86/gpd-pocket-fan.c +++ b/drivers/platform/x86/gpd-pocket-fan.c @@ -16,17 +16,27 @@ #define MAX_SPEED 3 -static int temp_limits[3] = { 55000, 60000, 65000 }; +#define TEMP_LIMIT0_DEFAULT 55000 +#define TEMP_LIMIT1_DEFAULT 60000 +#define TEMP_LIMIT2_DEFAULT 65000 + +#define HYSTERESIS_DEFAULT 3000 + +#define SPEED_ON_AC_DEFAULT 2 + +static int temp_limits[3] = { + TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT, +}; module_param_array(temp_limits, int, NULL, 0444); MODULE_PARM_DESC(temp_limits, "Millicelsius values above which the fan speed increases"); -static int hysteresis = 3000; +static int hysteresis = HYSTERESIS_DEFAULT; module_param(hysteresis, int, 0444); MODULE_PARM_DESC(hysteresis, "Hysteresis in millicelsius before lowering the fan speed"); -static int speed_on_ac = 2; +static int speed_on_ac = SPEED_ON_AC_DEFAULT; module_param(speed_on_ac, int, 0444); MODULE_PARM_DESC(speed_on_ac, "minimum fan speed to allow when system is powered by AC"); @@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev) int i; for (i = 0; i < ARRAY_SIZE(temp_limits); i++) { - if (temp_limits[i] < 40000 || temp_limits[i] > 70000) { + if (temp_limits[i] < 20000 || temp_limits[i] > 90000) { dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n", temp_limits[i]); - return -EINVAL; + temp_limits[0] = TEMP_LIMIT0_DEFAULT; + temp_limits[1] = TEMP_LIMIT1_DEFAULT; + temp_limits[2] = TEMP_LIMIT2_DEFAULT; + break; } } if (hysteresis < 1000 || hysteresis > 10000) { dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n", hysteresis); - return -EINVAL; + hysteresis = HYSTERESIS_DEFAULT; } if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) { dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n", speed_on_ac); - return -EINVAL; + speed_on_ac = SPEED_ON_AC_DEFAULT; } fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 6bcbbb375401..a881b709af25 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -65,7 +65,7 @@ struct bios_args { u32 command; u32 commandtype; u32 datasize; - u32 data; + u8 data[128]; }; enum hp_wmi_commandtype { @@ -216,7 +216,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, .command = command, .commandtype = query, .datasize = insize, - .data = 0, + .data = { 0 }, }; struct acpi_buffer input = { sizeof(struct bios_args), &args }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -228,7 +228,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, if (WARN_ON(insize > sizeof(args.data))) return -EINVAL; - memcpy(&args.data, buffer, insize); + memcpy(&args.data[0], buffer, insize); wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output); @@ -300,7 +300,7 @@ static int __init hp_wmi_bios_2008_later(void) static int __init hp_wmi_bios_2009_later(void) { - int state = 0; + u8 state[128]; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, sizeof(state), sizeof(state)); if (!ret) @@ -380,7 +380,7 @@ static int hp_wmi_rfkill2_refresh(void) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + sizeof(state), sizeof(state)); if (err) return err; @@ -778,7 +778,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + sizeof(state), sizeof(state)); if (err) return err < 0 ? err : -EINVAL; diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c index af233b7b77f2..29d12f2f59e0 100644 --- a/drivers/platform/x86/intel_int0002_vgpio.c +++ b/drivers/platform/x86/intel_int0002_vgpio.c @@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data) return IRQ_HANDLED; } +static bool int0002_check_wake(void *data) +{ + u32 gpe_sts_reg; + + gpe_sts_reg = inl(GPE0A_STS_PORT); + return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT); +} + static struct irq_chip int0002_byt_irqchip = { .name = DRV_NAME, .irq_ack = int0002_irq_ack, @@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev) gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL); + acpi_register_wakeup_handler(irq, int0002_check_wake, NULL); device_init_wakeup(dev, true); return 0; } @@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev) static int int0002_remove(struct platform_device *pdev) { device_init_wakeup(&pdev->dev, false); + acpi_unregister_wakeup_handler(int0002_check_wake, NULL); return 0; } diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 292bace83f1e..6f436836fe50 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - ddata = (struct mid_pb_ddata *)id->driver_data; + ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data, + sizeof(*ddata), GFP_KERNEL); if (!ddata) - return -ENODATA; + return -ENOMEM; ddata->dev = &pdev->dev; ddata->irq = irq; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 94a008efb09b..571b4754477c 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -158,8 +158,9 @@ static const struct pmc_reg_map spt_reg_map = { .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET, }; -/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */ +/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */ static const struct pmc_bit_map cnp_pfear_map[] = { + /* Reserved for Cannon Lake but valid for Comet Lake */ {"PMC", BIT(0)}, {"OPI-DMI", BIT(1)}, {"SPI/eSPI", BIT(2)}, @@ -185,7 +186,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"SDX", BIT(4)}, {"SPE", BIT(5)}, {"Fuse", BIT(6)}, - /* Reserved for Cannonlake but valid for Icelake */ + /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ {"SBR8", BIT(7)}, {"CSME_FSC", BIT(0)}, @@ -229,12 +230,12 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"HDA_PGD4", BIT(2)}, {"HDA_PGD5", BIT(3)}, {"HDA_PGD6", BIT(4)}, - /* Reserved for Cannonlake but valid for Icelake */ + /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ {"PSF6", BIT(5)}, {"PSF7", BIT(6)}, {"PSF8", BIT(7)}, - /* Icelake generation onwards only */ + /* Ice Lake generation onwards only */ {"RES_65", BIT(0)}, {"RES_66", BIT(1)}, {"RES_67", BIT(2)}, @@ -324,7 +325,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { {"ISH", CNP_PMC_LTR_ISH}, {"UFSX2", CNP_PMC_LTR_UFSX2}, {"EMMC", CNP_PMC_LTR_EMMC}, - /* Reserved for Cannonlake but valid for Icelake */ + /* Reserved for Cannon Lake but valid for Ice Lake */ {"WIGIG", ICL_PMC_LTR_WIGIG}, /* Below two cannot be used for LTR_IGNORE */ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, @@ -813,6 +814,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map), INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map), INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), + INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map), + INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map), {} }; @@ -871,8 +874,8 @@ static int pmc_core_probe(struct platform_device *pdev) pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data; /* - * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here - * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap + * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here + * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap * in this case. */ if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids)) diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index 6fe829f30997..e1266f5c6359 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { INTEL_CPU_FAM6(KABYLAKE, pmc_core_device), INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device), INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids); diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index cdab916fbf92..e330ec73c465 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -67,26 +67,22 @@ struct intel_scu_ipc_pdata_t { u32 i2c_base; u32 i2c_len; - u8 irq_mode; }; static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { .i2c_base = 0xff12b000, .i2c_len = 0x10, - .irq_mode = 0, }; /* Penwell and Cloverview */ static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { .i2c_base = 0xff12b000, .i2c_len = 0x10, - .irq_mode = 1, }; static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { .i2c_base = 0xff00d000, .i2c_len = 0x10, - .irq_mode = 0, }; struct intel_scu_ipc_dev { @@ -99,6 +95,9 @@ struct intel_scu_ipc_dev { static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ +#define IPC_STATUS 0x04 +#define IPC_STATUS_IRQ BIT(2) + /* * IPC Read Buffer (Read Only): * 16 byte buffer for receiving data from SCU, if IPC command @@ -120,11 +119,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ */ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) { - if (scu->irq_mode) { - reinit_completion(&scu->cmd_complete); - writel(cmd | IPC_IOC, scu->ipc_base); - } - writel(cmd, scu->ipc_base); + reinit_completion(&scu->cmd_complete); + writel(cmd | IPC_IOC, scu->ipc_base); } /* @@ -610,9 +606,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); static irqreturn_t ioc(int irq, void *dev_id) { struct intel_scu_ipc_dev *scu = dev_id; + int status = ipc_read_status(scu); - if (scu->irq_mode) - complete(&scu->cmd_complete); + writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS); + complete(&scu->cmd_complete); return IRQ_HANDLED; } @@ -638,8 +635,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!pdata) return -ENODEV; - scu->irq_mode = pdata->irq_mode; - err = pcim_enable_device(pdev); if (err) return err; diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index 48b112b4f0b0..c32daf087640 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -95,7 +95,7 @@ static struct gpiod_lookup_table gpios_led_table = { NULL, 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3, NULL, 2, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_REG_SIMSWAP, + GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP, NULL, 3, GPIO_ACTIVE_LOW), } }; diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index fdeb3624c529..cf9c44c20a82 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include @@ -18,8 +18,7 @@ MODULE_ALIAS("wmi:"PEAQ_DOLBY_BUTTON_GUID); -static unsigned int peaq_ignore_events_counter; -static struct input_polled_dev *peaq_poll_dev; +static struct input_dev *peaq_poll_dev; /* * The Dolby button (yes really a Dolby button) causes an ACPI variable to get @@ -28,8 +27,10 @@ static struct input_polled_dev *peaq_poll_dev; * (if polling after the release) or twice (polling between press and release). * We ignore events for 0.5s after the first event to avoid reporting 2 presses. */ -static void peaq_wmi_poll(struct input_polled_dev *dev) +static void peaq_wmi_poll(struct input_dev *input_dev) { + static unsigned long last_event_time; + static bool had_events; union acpi_object obj; acpi_status status; u32 dummy = 0; @@ -44,22 +45,25 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) return; if (obj.type != ACPI_TYPE_INTEGER) { - dev_err(&peaq_poll_dev->input->dev, + dev_err(&input_dev->dev, "Error WMBC did not return an integer\n"); return; } - if (peaq_ignore_events_counter && peaq_ignore_events_counter--) + if (!obj.integer.value) return; - if (obj.integer.value) { - input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 1); - input_sync(peaq_poll_dev->input); - input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 0); - input_sync(peaq_poll_dev->input); - peaq_ignore_events_counter = max(1u, - PEAQ_POLL_IGNORE_MS / peaq_poll_dev->poll_interval); - } + if (had_events && time_before(jiffies, last_event_time + + msecs_to_jiffies(PEAQ_POLL_IGNORE_MS))) + return; + + input_event(input_dev, EV_KEY, KEY_SOUND, 1); + input_sync(input_dev); + input_event(input_dev, EV_KEY, KEY_SOUND, 0); + input_sync(input_dev); + + last_event_time = jiffies; + had_events = true; } /* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */ @@ -75,6 +79,8 @@ static const struct dmi_system_id peaq_dmi_table[] __initconst = { static int __init peaq_wmi_init(void) { + int err; + /* WMI GUID is not unique, also check for a DMI match */ if (!dmi_check_system(peaq_dmi_table)) return -ENODEV; @@ -82,24 +88,36 @@ static int __init peaq_wmi_init(void) if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return -ENODEV; - peaq_poll_dev = input_allocate_polled_device(); + peaq_poll_dev = input_allocate_device(); if (!peaq_poll_dev) return -ENOMEM; - peaq_poll_dev->poll = peaq_wmi_poll; - peaq_poll_dev->poll_interval = PEAQ_POLL_INTERVAL_MS; - peaq_poll_dev->poll_interval_max = PEAQ_POLL_MAX_MS; - peaq_poll_dev->input->name = "PEAQ WMI hotkeys"; - peaq_poll_dev->input->phys = "wmi/input0"; - peaq_poll_dev->input->id.bustype = BUS_HOST; - input_set_capability(peaq_poll_dev->input, EV_KEY, KEY_SOUND); + peaq_poll_dev->name = "PEAQ WMI hotkeys"; + peaq_poll_dev->phys = "wmi/input0"; + peaq_poll_dev->id.bustype = BUS_HOST; + input_set_capability(peaq_poll_dev, EV_KEY, KEY_SOUND); - return input_register_polled_device(peaq_poll_dev); + err = input_setup_polling(peaq_poll_dev, peaq_wmi_poll); + if (err) + goto err_out; + + input_set_poll_interval(peaq_poll_dev, PEAQ_POLL_INTERVAL_MS); + input_set_max_poll_interval(peaq_poll_dev, PEAQ_POLL_MAX_MS); + + err = input_register_device(peaq_poll_dev); + if (err) + goto err_out; + + return 0; + +err_out: + input_free_device(peaq_poll_dev); + return err; } static void __exit peaq_wmi_exit(void) { - input_unregister_polled_device(peaq_poll_dev); + input_unregister_device(peaq_poll_dev); } module_init(peaq_wmi_init); diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 07d1b911e72f..2b1a3a6ee8db 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -383,6 +383,14 @@ static const struct dmi_system_id critclk_systems[] = { DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"), }, }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Lex 2I385SW", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"), + DMI_MATCH(DMI_PRODUCT_NAME, "2I385SW"), + }, + }, { /* pmc_plt_clk* - are used for ethernet controllers */ .ident = "Beckhoff CB3163", @@ -429,6 +437,14 @@ static const struct dmi_system_id critclk_systems[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"), }, }, + { + .ident = "CONNECT X300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"), + DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"), + }, + }, + { /*sentinel*/ } }; diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c index 0d34a932b6d5..f74b0556bb6b 100644 --- a/drivers/power/supply/axp20x_ac_power.c +++ b/drivers/power/supply/axp20x_ac_power.c @@ -23,6 +23,8 @@ #define AXP20X_PWR_STATUS_ACIN_PRESENT BIT(7) #define AXP20X_PWR_STATUS_ACIN_AVAIL BIT(6) +#define AXP813_ACIN_PATH_SEL BIT(7) + #define AXP813_VHOLD_MASK GENMASK(5, 3) #define AXP813_VHOLD_UV_TO_BIT(x) ((((x) / 100000) - 40) << 3) #define AXP813_VHOLD_REG_TO_UV(x) \ @@ -40,6 +42,7 @@ struct axp20x_ac_power { struct power_supply *supply; struct iio_channel *acin_v; struct iio_channel *acin_i; + bool has_acin_path_sel; }; static irqreturn_t axp20x_ac_power_irq(int irq, void *devid) @@ -86,6 +89,17 @@ static int axp20x_ac_power_get_property(struct power_supply *psy, return ret; val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_AVAIL); + + /* ACIN_PATH_SEL disables ACIN even if ACIN_AVAIL is set. */ + if (val->intval && power->has_acin_path_sel) { + ret = regmap_read(power->regmap, AXP813_ACIN_PATH_CTRL, + ®); + if (ret) + return ret; + + val->intval = !!(reg & AXP813_ACIN_PATH_SEL); + } + return 0; case POWER_SUPPLY_PROP_VOLTAGE_NOW: @@ -224,21 +238,25 @@ static const struct power_supply_desc axp813_ac_power_desc = { struct axp_data { const struct power_supply_desc *power_desc; bool acin_adc; + bool acin_path_sel; }; static const struct axp_data axp20x_data = { - .power_desc = &axp20x_ac_power_desc, - .acin_adc = true, + .power_desc = &axp20x_ac_power_desc, + .acin_adc = true, + .acin_path_sel = false, }; static const struct axp_data axp22x_data = { - .power_desc = &axp22x_ac_power_desc, - .acin_adc = false, + .power_desc = &axp22x_ac_power_desc, + .acin_adc = false, + .acin_path_sel = false, }; static const struct axp_data axp813_data = { - .power_desc = &axp813_ac_power_desc, - .acin_adc = false, + .power_desc = &axp813_ac_power_desc, + .acin_adc = false, + .acin_path_sel = true, }; static int axp20x_ac_power_probe(struct platform_device *pdev) @@ -282,6 +300,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev) } power->regmap = dev_get_regmap(pdev->dev.parent, NULL); + power->has_acin_path_sel = axp_data->acin_path_sel; platform_set_drvdata(pdev, power); diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index 1bbba6bba673..cf4c67b2d235 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -21,6 +21,7 @@ #include #include #include +#include #define PS_STAT_VBUS_TRIGGER BIT(0) #define PS_STAT_BAT_CHRG_DIR BIT(2) @@ -545,6 +546,49 @@ out: return IRQ_HANDLED; } +/* + * The HP Pavilion x2 10 series comes in a number of variants: + * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D" + * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E" + * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4" + * + * The variants with the AXP288 PMIC are all kinds of special: + * + * 1. All variants use a Type-C connector which the AXP288 does not support, so + * when using a Type-C charger it is not recognized. Unlike most AXP288 devices, + * this model actually has mostly working ACPI AC / Battery code, the ACPI code + * "solves" this by simply setting the input_current_limit to 3A. + * There are still some issues with the ACPI code, so we use this native driver, + * and to solve the charging not working (500mA is not enough) issue we hardcode + * the 3A input_current_limit like the ACPI code does. + * + * 2. If no charger is connected the machine boots with the vbus-path disabled. + * Normally this is done when a 5V boost converter is active to avoid the PMIC + * trying to charge from the 5V boost converter's output. This is done when + * an OTG host cable is inserted and the ID pin on the micro-B receptacle is + * pulled low and the ID pin has an ACPI event handler associated with it + * which re-enables the vbus-path when the ID pin is pulled high when the + * OTG host cable is removed. The Type-C connector has no ID pin, there is + * no ID pin handler and there appears to be no 5V boost converter, so we + * end up not charging because the vbus-path is disabled, until we unplug + * the charger which automatically clears the vbus-path disable bit and then + * on the second plug-in of the adapter we start charging. To solve the not + * charging on first charger plugin we unconditionally enable the vbus-path at + * probe on this model, which is safe since there is no 5V boost converter. + */ +static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = { + { + /* + * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry + * Trail model has "HP", so we only match on product_name. + */ + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + }, + }, + {} /* Terminating entry */ +}; + static void axp288_charger_extcon_evt_worker(struct work_struct *work) { struct axp288_chrg_info *info = @@ -568,7 +612,11 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work) } /* Determine cable/charger type */ - if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) { + if (dmi_check_system(axp288_hp_x2_dmi_ids)) { + /* See comment above axp288_hp_x2_dmi_ids declaration */ + dev_dbg(&info->pdev->dev, "HP X2 with Type-C, setting inlmt to 3A\n"); + current_limit = 3000000; + } else if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) { dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n"); current_limit = 500000; } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) { @@ -685,6 +733,13 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) return ret; } + if (dmi_check_system(axp288_hp_x2_dmi_ids)) { + /* See comment above axp288_hp_x2_dmi_ids declaration */ + ret = axp288_charger_vbus_path_select(info, true); + if (ret < 0) + return ret; + } + /* Read current charge voltage and current limit */ ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val); if (ret < 0) { diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c index 1bb32b7226d7..b8e1ec106627 100644 --- a/drivers/power/supply/bd70528-charger.c +++ b/drivers/power/supply/bd70528-charger.c @@ -741,3 +741,4 @@ module_platform_driver(bd70528_power); MODULE_AUTHOR("Matti Vaittinen "); MODULE_DESCRIPTION("BD70528 power-supply driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd70528-power"); diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c index 61d6447d1966..00a96e4a1cdc 100644 --- a/drivers/power/supply/cpcap-battery.c +++ b/drivers/power/supply/cpcap-battery.c @@ -562,12 +562,14 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data) switch (d->action) { case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW: if (latest->current_ua >= 0) - dev_warn(ddata->dev, "Battery low at 3.3V!\n"); + dev_warn(ddata->dev, "Battery low at %imV!\n", + latest->voltage / 1000); break; case CPCAP_BATTERY_IRQ_ACTION_POWEROFF: - if (latest->current_ua >= 0) { + if (latest->current_ua >= 0 && latest->voltage <= 3200000) { dev_emerg(ddata->dev, - "Battery empty at 3.1V, powering off\n"); + "Battery empty at %imV, powering off\n", + latest->voltage / 1000); orderly_poweroff(true); } break; diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c index 35816d4b3012..2748715c4c75 100644 --- a/drivers/power/supply/ingenic-battery.c +++ b/drivers/power/supply/ingenic-battery.c @@ -100,10 +100,17 @@ static int ingenic_battery_set_scale(struct ingenic_battery *bat) return -EINVAL; } - return iio_write_channel_attribute(bat->channel, - scale_raw[best_idx], - scale_raw[best_idx + 1], - IIO_CHAN_INFO_SCALE); + /* Only set scale if there is more than one (fractional) entry */ + if (scale_len > 2) { + ret = iio_write_channel_attribute(bat->channel, + scale_raw[best_idx], + scale_raw[best_idx + 1], + IIO_CHAN_INFO_SCALE); + if (ret) + return ret; + } + + return 0; } static enum power_supply_property ingenic_battery_properties[] = { diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c index da49436176cd..30a9014b2f95 100644 --- a/drivers/power/supply/ltc2941-battery-gauge.c +++ b/drivers/power/supply/ltc2941-battery-gauge.c @@ -449,7 +449,7 @@ static int ltc294x_i2c_remove(struct i2c_client *client) { struct ltc294x_info *info = i2c_get_clientdata(client); - cancel_delayed_work(&info->work); + cancel_delayed_work_sync(&info->work); power_supply_unregister(info->supply); return 0; } diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 94ddd7d659c8..925b0004a0ed 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1293,6 +1293,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv) struct cpuinfo_x86 *c = &cpu_data(cpu); int ret; + if (!rapl_defaults) + return ERR_PTR(-ENODEV); + rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); if (!rp) return ERR_PTR(-ENOMEM); diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index e60eab7f8a61..b84f16bbd6f2 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -166,10 +166,11 @@ static struct posix_clock_operations ptp_clock_ops = { .read = ptp_read, }; -static void delete_ptp_clock(struct posix_clock *pc) +static void ptp_clock_release(struct device *dev) { - struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); + ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); @@ -213,7 +214,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } ptp->clock.ops = ptp_clock_ops; - ptp->clock.release = delete_ptp_clock; ptp->info = info; ptp->devid = MKDEV(major, index); ptp->index = index; @@ -236,15 +236,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, if (err) goto no_pin_groups; - /* Create a new device in our class. */ - ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid, - ptp, ptp->pin_attr_groups, - "ptp%d", ptp->index); - if (IS_ERR(ptp->dev)) { - err = PTR_ERR(ptp->dev); - goto no_device; - } - /* Register a new PPS source. */ if (info->pps) { struct pps_source_info pps; @@ -260,8 +251,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } } - /* Create a posix clock. */ - err = posix_clock_register(&ptp->clock, ptp->devid); + /* Initialize a new device of our class in our clock structure. */ + device_initialize(&ptp->dev); + ptp->dev.devt = ptp->devid; + ptp->dev.class = ptp_class; + ptp->dev.parent = parent; + ptp->dev.groups = ptp->pin_attr_groups; + ptp->dev.release = ptp_clock_release; + dev_set_drvdata(&ptp->dev, ptp); + dev_set_name(&ptp->dev, "ptp%d", ptp->index); + + /* Create a posix clock and link it to the device. */ + err = posix_clock_register(&ptp->clock, &ptp->dev); if (err) { pr_err("failed to create posix clock\n"); goto no_clock; @@ -273,8 +274,6 @@ no_clock: if (ptp->pps_source) pps_unregister_source(ptp->pps_source); no_pps: - device_destroy(ptp_class, ptp->devid); -no_device: ptp_cleanup_pin_groups(ptp); no_pin_groups: if (ptp->kworker) @@ -304,10 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp) if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - device_destroy(ptp_class, ptp->devid); - ptp_cleanup_pin_groups(ptp); - posix_clock_unregister(&ptp->clock); + return 0; } EXPORT_SYMBOL(ptp_clock_unregister); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 9171d42468fd..6b97155148f1 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -28,7 +28,7 @@ struct timestamp_event_queue { struct ptp_clock { struct posix_clock clock; - struct device *dev; + struct device dev; struct ptp_clock_info *info; dev_t devid; int index; /* index into clocks.map */ diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c index 00772fc53490..88a3c5690fea 100644 --- a/drivers/pwm/pwm-omap-dmtimer.c +++ b/drivers/pwm/pwm-omap-dmtimer.c @@ -256,7 +256,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) if (!timer_pdev) { dev_err(&pdev->dev, "Unable to find Timer pdev\n"); ret = -ENODEV; - goto put; + goto err_find_timer_pdev; } timer_pdata = dev_get_platdata(&timer_pdev->dev); @@ -264,7 +264,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "dmtimer pdata structure NULL, deferring probe\n"); ret = -EPROBE_DEFER; - goto put; + goto err_platdata; } pdata = timer_pdata->timer_ops; @@ -283,30 +283,25 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) !pdata->write_counter) { dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n"); ret = -EINVAL; - goto put; + goto err_platdata; } if (!of_get_property(timer, "ti,timer-pwm", NULL)) { dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n"); ret = -ENODEV; - goto put; + goto err_timer_property; } dm_timer = pdata->request_by_node(timer); if (!dm_timer) { ret = -EPROBE_DEFER; - goto put; + goto err_request_timer; } -put: - of_node_put(timer); - if (ret < 0) - return ret; - omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL); if (!omap) { - pdata->free(dm_timer); - return -ENOMEM; + ret = -ENOMEM; + goto err_alloc_omap; } omap->pdata = pdata; @@ -339,27 +334,56 @@ put: ret = pwmchip_add(&omap->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to register PWM\n"); - omap->pdata->free(omap->dm_timer); - return ret; + goto err_pwmchip_add; } + of_node_put(timer); + platform_set_drvdata(pdev, omap); return 0; + +err_pwmchip_add: + + /* + * *omap is allocated using devm_kzalloc, + * so no free necessary here + */ +err_alloc_omap: + + pdata->free(dm_timer); +err_request_timer: + +err_timer_property: +err_platdata: + + put_device(&timer_pdev->dev); +err_find_timer_pdev: + + of_node_put(timer); + + return ret; } static int pwm_omap_dmtimer_remove(struct platform_device *pdev) { struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev); + int ret; + + ret = pwmchip_remove(&omap->chip); + if (ret) + return ret; if (pm_runtime_active(&omap->dm_timer_pdev->dev)) omap->pdata->stop(omap->dm_timer); omap->pdata->free(omap->dm_timer); + put_device(&omap->dm_timer_pdev->dev); + mutex_destroy(&omap->mutex); - return pwmchip_remove(&omap->chip); + return 0; } static const struct of_device_id pwm_omap_dmtimer_of_match[] = { diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index 168684b02ebc..b07bdca3d510 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -159,13 +159,9 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset, static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) { struct pca9685 *pca = gpiochip_get_data(gpio); - struct pwm_device *pwm; pca9685_pwm_gpio_set(gpio, offset, 0); pm_runtime_put(pca->chip.dev); - mutex_lock(&pca->lock); - pwm = &pca->chip.pwms[offset]; - mutex_unlock(&pca->lock); } static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip, diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 6f5840a1a82d..05273725a9ff 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -137,10 +137,10 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip, val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm)); - tmp = prescaler * NSEC_PER_SEC * PWM_REG_DTY(val); + tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_DTY(val); state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); - tmp = prescaler * NSEC_PER_SEC * PWM_REG_PRD(val); + tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_PRD(val); state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); } diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index efb2f01a9101..f60e1b26c2d2 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c @@ -953,23 +953,6 @@ static struct ab8500_regulator_info .update_val_idle = 0x82, .update_val_normal = 0x02, }, - [AB8505_LDO_USB] = { - .desc = { - .name = "LDO-USB", - .ops = &ab8500_regulator_mode_ops, - .type = REGULATOR_VOLTAGE, - .id = AB8505_LDO_USB, - .owner = THIS_MODULE, - .n_voltages = 1, - .volt_table = fixed_3300000_voltage, - }, - .update_bank = 0x03, - .update_reg = 0x82, - .update_mask = 0x03, - .update_val = 0x01, - .update_val_idle = 0x03, - .update_val_normal = 0x01, - }, [AB8505_LDO_AUDIO] = { .desc = { .name = "LDO-AUDIO", diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 989506bd90b1..16f0c8570036 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) int i; for (i = 0; i < rate_count; i++) { - if (ramp <= slew_rates[i]) - cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); - else + if (ramp > slew_rates[i]) break; + + if (id == AXP20X_DCDC2) + cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i); + else + cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); } if (cfg == 0xff) { @@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100, AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, - AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c index 0248a61f1006..5bf8a2dc5fe7 100644 --- a/drivers/regulator/bd70528-regulator.c +++ b/drivers/regulator/bd70528-regulator.c @@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, - .set_ramp_delay = bd70528_set_ramp_delay, }; static const struct regulator_ops bd70528_led_ops = { @@ -286,3 +285,4 @@ module_platform_driver(bd70528_regulator); MODULE_AUTHOR("Matti Vaittinen "); MODULE_DESCRIPTION("BD70528 voltage regulator driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd70528-pmic"); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index a46be221dbdc..0011bdc15afb 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1403,7 +1403,9 @@ static int set_machine_constraints(struct regulator_dev *rdev, rdev_err(rdev, "failed to enable\n"); return ret; } - rdev->use_count++; + + if (rdev->constraints->always_on) + rdev->use_count++; } print_constraints(rdev); @@ -1935,8 +1937,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id, regulator = create_regulator(rdev, dev, id); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); - put_device(&rdev->dev); module_put(rdev->owner); + put_device(&rdev->dev); return regulator; } @@ -2057,13 +2059,13 @@ static void _regulator_put(struct regulator *regulator) rdev->open_count--; rdev->exclusive = 0; - put_device(&rdev->dev); regulator_unlock(rdev); kfree_const(regulator->supply_name); kfree(regulator); module_put(rdev->owner); + put_device(&rdev->dev); } /** @@ -3460,6 +3462,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, out: return ret; } +EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev); static int regulator_limit_voltage_step(struct regulator_dev *rdev, int *current_uV, int *min_uV) @@ -4024,6 +4027,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev) return ret; return ret - rdev->constraints->uV_offset; } +EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev); /** * regulator_get_voltage - get regulator output voltage @@ -4990,6 +4994,7 @@ regulator_register(const struct regulator_desc *regulator_desc, struct regulator_dev *rdev; bool dangling_cfg_gpiod = false; bool dangling_of_gpiod = false; + bool reg_device_fail = false; struct device *dev; int ret, i; @@ -5175,7 +5180,7 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); ret = device_register(&rdev->dev); if (ret != 0) { - put_device(&rdev->dev); + reg_device_fail = true; goto unset_supplies; } @@ -5198,6 +5203,7 @@ unset_supplies: regulator_remove_coupling(rdev); mutex_unlock(®ulator_list_mutex); wash: + kfree(rdev->coupling_desc.coupled_rdevs); kfree(rdev->constraints); mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); @@ -5205,7 +5211,10 @@ wash: clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - kfree(rdev); + if (reg_device_fail) + put_device(&rdev->dev); + else + kfree(rdev); kfree(config); rinse: if (dangling_cfg_gpiod) diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index ca3dc3f3bb29..bb16c465426e 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -13,6 +13,8 @@ #include #include +#include "internal.h" + /** * regulator_is_enabled_regmap - standard is_enabled() for regmap users * @@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, consumers[i].supply = supply_names[i]; } EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names); + +/** + * regulator_is_equal - test whether two regulators are the same + * + * @reg1: first regulator to operate on + * @reg2: second regulator to operate on + */ +bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2) +{ + return reg1->rdev == reg2->rdev; +} +EXPORT_SYMBOL_GPL(regulator_is_equal); diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index 76152aaa330b..96dc0eea7659 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c @@ -296,7 +296,10 @@ static int max8907_regulator_probe(struct platform_device *pdev) memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc)); /* Backwards compatibility with MAX8907B; SD1 uses different voltages */ - regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val); + ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val); + if (ret) + return ret; + if ((val & MAX8907_II2RR_VERSION_MASK) == MAX8907_II2RR_VERSION_REV_B) { pmic->desc[MAX8907_SD1].min_uV = 637500; @@ -333,14 +336,20 @@ static int max8907_regulator_probe(struct platform_device *pdev) } if (pmic->desc[i].ops == &max8907_ldo_ops) { - regmap_read(config.regmap, pmic->desc[i].enable_reg, + ret = regmap_read(config.regmap, pmic->desc[i].enable_reg, &val); + if (ret) + return ret; + if ((val & MAX8907_MASK_LDO_SEQ) != MAX8907_MASK_LDO_SEQ) pmic->desc[i].ops = &max8907_ldo_hwctl_ops; } else if (pmic->desc[i].ops == &max8907_out5v_ops) { - regmap_read(config.regmap, pmic->desc[i].enable_reg, + ret = regmap_read(config.regmap, pmic->desc[i].enable_reg, &val); + if (ret) + return ret; + if ((val & (MAX8907_MASK_OUT5V_VINEN | MAX8907_MASK_OUT5V_ENSRC)) != MAX8907_MASK_OUT5V_ENSRC) diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 61bd5ef0806c..97c846c19c2f 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1297,7 +1297,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev, } if (!pdata->dvs_gpio[i]) { - dev_warn(dev, "there is no dvs%d gpio\n", i); + dev_info(dev, "there is no dvs%d gpio\n", i); continue; } diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c index eb807a059479..aa6e7c5341ce 100644 --- a/drivers/regulator/rn5t618-regulator.c +++ b/drivers/regulator/rn5t618-regulator.c @@ -148,6 +148,7 @@ static struct platform_driver rn5t618_regulator_driver = { module_platform_driver(rn5t618_regulator_driver); +MODULE_ALIAS("platform:rn5t618-regulator"); MODULE_AUTHOR("Beniamino Galvani "); MODULE_DESCRIPTION("RN5T618 regulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 8919a5130bec..25f24df9aa82 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -88,7 +88,7 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev) } val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); - val = (val & ~STM32_ENVR) | STM32_HIZ; + val &= ~STM32_ENVR; writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); pm_runtime_mark_last_busy(priv->dev); @@ -175,6 +175,7 @@ static const struct regulator_desc stm32_vrefbuf_regu = { .volt_table = stm32_vrefbuf_voltages, .n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages), .ops = &stm32_vrefbuf_volt_ops, + .off_on_delay = 1000, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }; diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c index 9a9ee8188109..cbadb1c99679 100644 --- a/drivers/regulator/vctrl-regulator.c +++ b/drivers/regulator/vctrl-regulator.c @@ -11,10 +11,13 @@ #include #include #include +#include #include #include #include +#include "internal.h" + struct vctrl_voltage_range { int min_uV; int max_uV; @@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV) static int vctrl_get_voltage(struct regulator_dev *rdev) { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); - int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); return vctrl_calc_output_voltage(vctrl, ctrl_uV); } @@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); struct regulator *ctrl_reg = vctrl->ctrl_reg; - int orig_ctrl_uV = regulator_get_voltage(ctrl_reg); + int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev); int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV); int ret; if (req_min_uV >= uV || !vctrl->ovp_threshold) /* voltage rising or no OVP */ - return regulator_set_voltage( - ctrl_reg, + return regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl_calc_ctrl_voltage(vctrl, req_min_uV), - vctrl_calc_ctrl_voltage(vctrl, req_max_uV)); + vctrl_calc_ctrl_voltage(vctrl, req_max_uV), + PM_SUSPEND_ON); while (uV > req_min_uV) { int max_drop_uV = (uV * vctrl->ovp_threshold) / 100; @@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, next_uV = max_t(int, req_min_uV, uV - max_drop_uV); next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV); - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, next_ctrl_uV, - next_ctrl_uV); + next_ctrl_uV, + PM_SUSPEND_ON); if (ret) goto err; @@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, err: /* Try to go back to original voltage */ - regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV); + regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV, + PM_SUSPEND_ON); return ret; } @@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, if (selector >= vctrl->sel || !vctrl->ovp_threshold) { /* voltage rising or no OVP */ - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl->vtable[selector].ctrl, - vctrl->vtable[selector].ctrl); + vctrl->vtable[selector].ctrl, + PM_SUSPEND_ON); if (!ret) vctrl->sel = selector; @@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, else next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel; - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl->vtable[next_sel].ctrl, - vctrl->vtable[next_sel].ctrl); + vctrl->vtable[next_sel].ctrl, + PM_SUSPEND_ON); if (ret) { dev_err(&rdev->dev, "failed to set control voltage to %duV\n", @@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, err: if (vctrl->sel != orig_sel) { /* Try to go back to original voltage */ - if (!regulator_set_voltage(ctrl_reg, + if (!regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl->vtable[orig_sel].ctrl, - vctrl->vtable[orig_sel].ctrl)) + vctrl->vtable[orig_sel].ctrl, + PM_SUSPEND_ON)) vctrl->sel = orig_sel; else dev_warn(&rdev->dev, @@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev) if (ret) return ret; - ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); if (ctrl_uV < 0) { dev_err(&pdev->dev, "failed to get control voltage\n"); return ctrl_uV; diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 3c5fbbbfb0f1..b542debbc6f0 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2224,7 +2224,7 @@ static int __init remoteproc_init(void) return 0; } -module_init(remoteproc_init); +subsys_initcall(remoteproc_init); static void __exit remoteproc_exit(void) { diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 3c9a64c1b7a8..76c0dc7f165d 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -787,7 +787,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev, return ERR_PTR(-ENOMEM); rstc = __reset_control_get(dev, id, index, shared, optional, acquired); - if (!IS_ERR(rstc)) { + if (!IS_ERR_OR_NULL(rstc)) { *ptr = rstc; devres_add(dev, ptr); } else { @@ -861,8 +861,7 @@ static int of_reset_control_get_count(struct device_node *node) * @acquired: only one reset control may be acquired for a given controller * and ID * - * Returns pointer to allocated reset_control_array on success or - * error on failure + * Returns pointer to allocated reset_control on success or error on failure */ struct reset_control * of_reset_control_array_get(struct device_node *np, bool shared, bool optional, @@ -915,8 +914,7 @@ EXPORT_SYMBOL_GPL(of_reset_control_array_get); * that just have to be asserted or deasserted, without any * requirements on the order. * - * Returns pointer to allocated reset_control_array on success or - * error on failure + * Returns pointer to allocated reset_control on success or error on failure */ struct reset_control * devm_reset_control_array_get(struct device *dev, bool shared, bool optional) @@ -930,7 +928,7 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional) return ERR_PTR(-ENOMEM); rstc = of_reset_control_array_get(dev->of_node, shared, optional, true); - if (IS_ERR(rstc)) { + if (IS_ERR_OR_NULL(rstc)) { devres_free(devres); return rstc; } diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c index a608f445dad6..f213264c8567 100644 --- a/drivers/reset/reset-brcmstb.c +++ b/drivers/reset/reset-brcmstb.c @@ -91,12 +91,6 @@ static int brcmstb_reset_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) || - !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) { - dev_err(kdev, "incorrect register range\n"); - return -EINVAL; - } - priv->base = devm_ioremap_resource(kdev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index 74e589f5dd6a..279e535bf5d8 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c @@ -193,8 +193,8 @@ static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = { #define UNIPHIER_PERI_RESET_FI2C(id, ch) \ UNIPHIER_RESETX((id), 0x114, 24 + (ch)) -#define UNIPHIER_PERI_RESET_SCSSI(id) \ - UNIPHIER_RESETX((id), 0x110, 17) +#define UNIPHIER_PERI_RESET_SCSSI(id, ch) \ + UNIPHIER_RESETX((id), 0x110, 17 + (ch)) #define UNIPHIER_PERI_RESET_MCSSI(id) \ UNIPHIER_RESETX((id), 0x114, 14) @@ -209,7 +209,7 @@ static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = { UNIPHIER_PERI_RESET_I2C(6, 2), UNIPHIER_PERI_RESET_I2C(7, 3), UNIPHIER_PERI_RESET_I2C(8, 4), - UNIPHIER_PERI_RESET_SCSSI(11), + UNIPHIER_PERI_RESET_SCSSI(11, 0), UNIPHIER_RESET_END, }; @@ -225,8 +225,11 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = { UNIPHIER_PERI_RESET_FI2C(8, 4), UNIPHIER_PERI_RESET_FI2C(9, 5), UNIPHIER_PERI_RESET_FI2C(10, 6), - UNIPHIER_PERI_RESET_SCSSI(11), - UNIPHIER_PERI_RESET_MCSSI(12), + UNIPHIER_PERI_RESET_SCSSI(11, 0), + UNIPHIER_PERI_RESET_SCSSI(12, 1), + UNIPHIER_PERI_RESET_SCSSI(13, 2), + UNIPHIER_PERI_RESET_SCSSI(14, 3), + UNIPHIER_PERI_RESET_MCSSI(15), UNIPHIER_RESET_END, }; diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 621f1afd4d6b..1995f5b3ea67 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -241,10 +241,31 @@ static void qcom_glink_channel_release(struct kref *ref) { struct glink_channel *channel = container_of(ref, struct glink_channel, refcount); + struct glink_core_rx_intent *intent; + struct glink_core_rx_intent *tmp; unsigned long flags; + int iid; + + /* cancel pending rx_done work */ + cancel_work_sync(&channel->intent_work); spin_lock_irqsave(&channel->intent_lock, flags); + /* Free all non-reuse intents pending rx_done work */ + list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { + if (!intent->reuse) { + kfree(intent->data); + kfree(intent); + } + } + + idr_for_each_entry(&channel->liids, tmp, iid) { + kfree(tmp->data); + kfree(tmp); + } idr_destroy(&channel->liids); + + idr_for_each_entry(&channel->riids, tmp, iid) + kfree(tmp); idr_destroy(&channel->riids); spin_unlock_irqrestore(&channel->intent_lock, flags); @@ -1094,13 +1115,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink, close_link: /* * Send a close request to "undo" our open-ack. The close-ack will - * release the last reference. + * release qcom_glink_send_open_req() reference and the last reference + * will be relesed after receiving remote_close or transport unregister + * by calling qcom_glink_native_remove(). */ qcom_glink_send_close_req(glink, channel); - /* Release qcom_glink_send_open_req() reference */ - kref_put(&channel->refcount, qcom_glink_channel_release); - return ret; } @@ -1415,15 +1435,13 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, ret = rpmsg_register_device(rpdev); if (ret) - goto free_rpdev; + goto rcid_remove; channel->rpdev = rpdev; } return 0; -free_rpdev: - kfree(rpdev); rcid_remove: spin_lock_irqsave(&glink->idr_lock, flags); idr_remove(&glink->rcids, channel->rcid); @@ -1544,6 +1562,18 @@ static void qcom_glink_work(struct work_struct *work) } } +static void qcom_glink_cancel_rx_work(struct qcom_glink *glink) +{ + struct glink_defer_cmd *dcmd; + struct glink_defer_cmd *tmp; + + /* cancel any pending deferred rx_work */ + cancel_work_sync(&glink->rx_work); + + list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node) + kfree(dcmd); +} + struct qcom_glink *qcom_glink_native_probe(struct device *dev, unsigned long features, struct qcom_glink_pipe *rx, @@ -1619,23 +1649,24 @@ void qcom_glink_native_remove(struct qcom_glink *glink) struct glink_channel *channel; int cid; int ret; - unsigned long flags; disable_irq(glink->irq); - cancel_work_sync(&glink->rx_work); + qcom_glink_cancel_rx_work(glink); ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device); if (ret) dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); - spin_lock_irqsave(&glink->idr_lock, flags); /* Release any defunct local channels, waiting for close-ack */ idr_for_each_entry(&glink->lcids, channel, cid) kref_put(&channel->refcount, qcom_glink_channel_release); + /* Release any defunct local channels, waiting for close-req */ + idr_for_each_entry(&glink->rcids, channel, cid) + kref_put(&channel->refcount, qcom_glink_channel_release); + idr_destroy(&glink->lcids); idr_destroy(&glink->rcids); - spin_unlock_irqrestore(&glink->idr_lock, flags); mbox_free_channel(glink->mbox_chan); } EXPORT_SYMBOL_GPL(qcom_glink_native_remove); diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c index 4238383d8685..579bc4443f6d 100644 --- a/drivers/rpmsg/qcom_glink_smem.c +++ b/drivers/rpmsg/qcom_glink_smem.c @@ -105,7 +105,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np, tail = le32_to_cpu(*pipe->tail); tail += count; - if (tail > pipe->native.length) + if (tail >= pipe->native.length) tail -= pipe->native.length; *pipe->tail = cpu_to_le32(tail); diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index eea5ebbb5119..c655074c07c2 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -227,8 +227,10 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb, if (!kbuf) return -ENOMEM; - if (!copy_from_iter_full(kbuf, len, from)) - return -EFAULT; + if (!copy_from_iter_full(kbuf, len, from)) { + ret = -EFAULT; + goto free_kbuf; + } if (mutex_lock_interruptible(&eptdev->ept_lock)) { ret = -ERESTARTSYS; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 1adf9f815652..c5b980414086 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -240,6 +240,7 @@ config RTC_DRV_AS3722 config RTC_DRV_DS1307 tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057" + select REGMAP_I2C help If you say yes here you get support for various compatible RTC chips (often with battery backup) connected with I2C. This driver @@ -326,6 +327,7 @@ config RTC_DRV_MAX6900 config RTC_DRV_MAX8907 tristate "Maxim MAX8907" depends on MFD_MAX8907 || COMPILE_TEST + select REGMAP_IRQ help If you say yes here you will get support for the RTC of Maxim MAX8907 PMIC. @@ -632,6 +634,7 @@ config RTC_DRV_RX8010 config RTC_DRV_RX8581 tristate "Epson RX-8571/RX-8581" + select REGMAP_I2C help If you say yes here you will get support for the Epson RX-8571/ RX-8581. @@ -659,6 +662,7 @@ config RTC_DRV_EM3027 config RTC_DRV_RV3028 tristate "Micro Crystal RV3028" + select REGMAP_I2C help If you say yes here you get support for the Micro Crystal RV3028. @@ -688,6 +692,7 @@ config RTC_DRV_S5M config RTC_DRV_SD3078 tristate "ZXW Shenzhen whwave SD3078" + select REGMAP_I2C help If you say yes here you get support for the ZXW Shenzhen whwave SD3078 RTC chips. @@ -859,14 +864,14 @@ config RTC_I2C_AND_SPI default m if I2C=m default y if I2C=y default y if SPI_MASTER=y - select REGMAP_I2C if I2C - select REGMAP_SPI if SPI_MASTER comment "SPI and I2C RTC drivers" config RTC_DRV_DS3232 tristate "Dallas/Maxim DS3232/DS3234" depends on RTC_I2C_AND_SPI + select REGMAP_I2C if I2C + select REGMAP_SPI if SPI_MASTER help If you say yes here you get support for Dallas Semiconductor DS3232 and DS3234 real-time clock chips. If an interrupt is associated @@ -886,6 +891,8 @@ config RTC_DRV_DS3232_HWMON config RTC_DRV_PCF2127 tristate "NXP PCF2127" depends on RTC_I2C_AND_SPI + select REGMAP_I2C if I2C + select REGMAP_SPI if SPI_MASTER select WATCHDOG_CORE if WATCHDOG help If you say yes here you get support for the NXP PCF2127/29 RTC @@ -902,6 +909,8 @@ config RTC_DRV_PCF2127 config RTC_DRV_RV3029C2 tristate "Micro Crystal RV3029/3049" depends on RTC_I2C_AND_SPI + select REGMAP_I2C if I2C + select REGMAP_SPI if SPI_MASTER help If you say yes here you get support for the Micro Crystal RV3029 and RV3049 RTC chips. diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index c93ef33b01d3..5c1378d2fab3 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -125,7 +125,7 @@ EXPORT_SYMBOL_GPL(rtc_read_time); int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { - int err; + int err, uie; err = rtc_valid_tm(tm); if (err != 0) @@ -137,6 +137,17 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) rtc_subtract_offset(rtc, tm); +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active; +#else + uie = rtc->uie_rtctimer.enabled; +#endif + if (uie) { + err = rtc_update_irq_enable(rtc, 0); + if (err) + return err; + } + err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; @@ -153,6 +164,12 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) /* A timer might have just expired */ schedule_work(&rtc->irqwork); + if (uie) { + err = rtc_update_irq_enable(rtc, 1); + if (err) + return err; + } + trace_rtc_set_time(rtc_tm_to_time64(tm), err); return err; } diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c index 7744333b0f40..627037aa66a8 100644 --- a/drivers/rtc/rtc-bd70528.c +++ b/drivers/rtc/rtc-bd70528.c @@ -491,3 +491,4 @@ module_platform_driver(bd70528_rtc); MODULE_AUTHOR("Matti Vaittinen "); MODULE_DESCRIPTION("BD70528 RTC driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd70528-rtc"); diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c index 3e9800f9878a..82d2ab0b3e9c 100644 --- a/drivers/rtc/rtc-brcmstb-waketimer.c +++ b/drivers/rtc/rtc-brcmstb-waketimer.c @@ -277,6 +277,7 @@ static int brcmstb_waketmr_remove(struct platform_device *pdev) struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev); unregister_reboot_notifier(&timer->reboot_notifier); + clk_disable_unprepare(timer->clk); return 0; } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 033303708c8b..cb28bbdc9e17 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) rtc_cmos_int_handler = cmos_interrupt; retval = request_irq(rtc_irq, rtc_cmos_int_handler, - IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev), + 0, dev_name(&cmos_rtc.rtc->dev), cmos_rtc.rtc); if (retval < 0) { dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c index 443f6d05ce29..fb6d7967ec00 100644 --- a/drivers/rtc/rtc-hym8563.c +++ b/drivers/rtc/rtc-hym8563.c @@ -97,7 +97,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm) if (!hym8563->valid) { dev_warn(&client->dev, "no valid clock/calendar values available\n"); - return -EPERM; + return -EINVAL; } ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf); diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index 1c2d3c4a4963..b1f2bedee77e 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c @@ -133,7 +133,8 @@ static int msm6242_read_time(struct device *dev, struct rtc_time *tm) msm6242_read(priv, MSM6242_SECOND1); tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 + msm6242_read(priv, MSM6242_MINUTE1); - tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 + + tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) & + MSM6242_HOUR10_HR_MASK) * 10 + msm6242_read(priv, MSM6242_HOUR1); tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 + msm6242_read(priv, MSM6242_DAY1); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 704229eb0cac..b216bdcba0da 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -47,6 +47,14 @@ #define RTC_AL_SEC 0x0018 +#define RTC_AL_SEC_MASK 0x003f +#define RTC_AL_MIN_MASK 0x003f +#define RTC_AL_HOU_MASK 0x001f +#define RTC_AL_DOM_MASK 0x001f +#define RTC_AL_DOW_MASK 0x0007 +#define RTC_AL_MTH_MASK 0x000f +#define RTC_AL_YEA_MASK 0x007f + #define RTC_PDN2 0x002e #define RTC_PDN2_PWRON_ALARM BIT(4) @@ -103,7 +111,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data) irqen = irqsta & ~RTC_IRQ_EN_AL; mutex_lock(&rtc->lock); if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, - irqen) < 0) + irqen) == 0) mtk_rtc_write_trigger(rtc); mutex_unlock(&rtc->lock); @@ -225,12 +233,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM); mutex_unlock(&rtc->lock); - tm->tm_sec = data[RTC_OFFSET_SEC]; - tm->tm_min = data[RTC_OFFSET_MIN]; - tm->tm_hour = data[RTC_OFFSET_HOUR]; - tm->tm_mday = data[RTC_OFFSET_DOM]; - tm->tm_mon = data[RTC_OFFSET_MTH]; - tm->tm_year = data[RTC_OFFSET_YEAR]; + tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK; + tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK; + tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK; + tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK; + tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK; + tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK; tm->tm_year += RTC_MIN_YEAR_OFFSET; tm->tm_mon--; @@ -251,14 +259,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) tm->tm_year -= RTC_MIN_YEAR_OFFSET; tm->tm_mon++; - data[RTC_OFFSET_SEC] = tm->tm_sec; - data[RTC_OFFSET_MIN] = tm->tm_min; - data[RTC_OFFSET_HOUR] = tm->tm_hour; - data[RTC_OFFSET_DOM] = tm->tm_mday; - data[RTC_OFFSET_MTH] = tm->tm_mon; - data[RTC_OFFSET_YEAR] = tm->tm_year; - mutex_lock(&rtc->lock); + ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC, + data, RTC_OFFSET_COUNT); + if (ret < 0) + goto exit; + + data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) | + (tm->tm_sec & RTC_AL_SEC_MASK)); + data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) | + (tm->tm_min & RTC_AL_MIN_MASK)); + data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) | + (tm->tm_hour & RTC_AL_HOU_MASK)); + data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) | + (tm->tm_mday & RTC_AL_DOM_MASK)); + data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) | + (tm->tm_mon & RTC_AL_MTH_MASK)); + data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) | + (tm->tm_year & RTC_AL_YEA_MASK)); + if (alm->enabled) { ret = regmap_bulk_write(rtc->regmap, rtc->addr_base + RTC_AL_SEC, diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 5e2bd9f1d01e..fc32be687606 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -380,6 +380,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node) CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc", sun50i_h6_rtc_clk_init); +/* + * The R40 user manual is self-conflicting on whether the prescaler is + * fixed or configurable. The clock diagram shows it as fixed, but there + * is also a configurable divider in the RTC block. + */ +static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = { + .rc_osc_rate = 16000000, + .fixed_prescaler = 512, +}; +static void __init sun8i_r40_rtc_clk_init(struct device_node *node) +{ + sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data); +} +CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc", + sun8i_r40_rtc_clk_init); + static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = { .rc_osc_rate = 32000, .has_out_clk = 1, diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 6cca72782af6..cf87eb27879f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void) (unsigned long) block); INIT_LIST_HEAD(&block->ccw_queue); spin_lock_init(&block->queue_lock); + INIT_LIST_HEAD(&block->format_list); + spin_lock_init(&block->format_lock); timer_setup(&block->timer, dasd_block_timeout, 0); spin_lock_init(&block->profile.lock); @@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, if (dasd_ese_needs_format(cqr->block, irb)) { if (rq_data_dir((struct request *)cqr->callback_data) == READ) { - device->discipline->ese_read(cqr); + device->discipline->ese_read(cqr, irb); cqr->status = DASD_CQR_SUCCESS; cqr->stopclk = now; dasd_device_clear_timer(device); dasd_schedule_device_bh(device); return; } - fcqr = device->discipline->ese_format(device, cqr); + fcqr = device->discipline->ese_format(device, cqr, irb); if (IS_ERR(fcqr)) { + if (PTR_ERR(fcqr) == -EINVAL) { + cqr->status = DASD_CQR_ERROR; + return; + } /* * If we can't format now, let the request go * one extra round. Maybe we can format later. */ cqr->status = DASD_CQR_QUEUED; + dasd_schedule_device_bh(device); + return; } else { fcqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED; @@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) { struct request *req; blk_status_t error = BLK_STS_OK; + unsigned int proc_bytes; int status; req = (struct request *) cqr->callback_data; dasd_profile_end(cqr->block, cqr, req); + proc_bytes = cqr->proc_bytes; status = cqr->block->base->discipline->free_cp(cqr, req); if (status < 0) error = errno_to_blk_status(status); @@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) blk_mq_end_request(req, error); blk_mq_run_hw_queues(req->q, true); } else { - blk_mq_complete_request(req); + /* + * Partial completed requests can happen with ESE devices. + * During read we might have gotten a NRF error and have to + * complete a request partially. + */ + if (proc_bytes) { + blk_update_request(req, BLK_STS_OK, + blk_rq_bytes(req) - proc_bytes); + blk_mq_requeue_request(req, true); + } else { + blk_mq_complete_request(req); + } } } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index c94184d080f8..ad44d22e8859 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) geo->head |= head; } +/* + * calculate failing track from sense data depending if + * it is an EAV device or not + */ +static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device, + sector_t *track) +{ + struct dasd_eckd_private *private = device->private; + u8 *sense = NULL; + u32 cyl; + u8 head; + + sense = dasd_get_sense(irb); + if (!sense) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "ESE error no sense data\n"); + return -EINVAL; + } + if (!(sense[27] & DASD_SENSE_BIT_2)) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "ESE error no valid track data\n"); + return -EINVAL; + } + + if (sense[27] & DASD_SENSE_BIT_3) { + /* enhanced addressing */ + cyl = sense[30] << 20; + cyl |= (sense[31] & 0xF0) << 12; + cyl |= sense[28] << 8; + cyl |= sense[29]; + } else { + cyl = sense[29] << 8; + cyl |= sense[30]; + } + head = sense[31] & 0x0F; + *track = cyl * private->rdc_data.trk_per_cyl + head; + return 0; +} + static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, struct dasd_device *device) { @@ -1128,7 +1167,8 @@ static u32 get_fcx_max_data(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; int fcx_in_css, fcx_in_gneq, fcx_in_features; - int tpm, mdc; + unsigned int mdc; + int tpm; if (dasd_nofcx) return 0; @@ -1142,7 +1182,7 @@ static u32 get_fcx_max_data(struct dasd_device *device) return 0; mdc = ccw_device_get_mdc(device->cdev, 0); - if (mdc < 0) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); return 0; } else { @@ -1153,12 +1193,12 @@ static u32 get_fcx_max_data(struct dasd_device *device) static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) { struct dasd_eckd_private *private = device->private; - int mdc; + unsigned int mdc; u32 fcx_max_data; if (private->fcx_max_data) { mdc = ccw_device_get_mdc(device->cdev, lpm); - if ((mdc < 0)) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum data size for zHPF " "requests failed (rc=%d) for a new path %x\n", @@ -2073,7 +2113,7 @@ out_err2: dasd_free_block(device->block); device->block = NULL; out_err1: - kfree(private->conf_data); + dasd_eckd_clear_conf_data(device); kfree(device->private); device->private = NULL; return rc; @@ -2082,7 +2122,6 @@ out_err1: static void dasd_eckd_uncheck_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - int i; if (!private) return; @@ -2092,21 +2131,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; - private->conf_len = 0; - for (i = 0; i < 8; i++) { - kfree(device->path[i].conf_data); - if ((__u8 *)device->path[i].conf_data == - private->conf_data) { - private->conf_data = NULL; - private->conf_len = 0; - } - device->path[i].conf_data = NULL; - device->path[i].cssid = 0; - device->path[i].ssid = 0; - device->path[i].chpid = 0; - } - kfree(private->conf_data); - private->conf_data = NULL; + dasd_eckd_clear_conf_data(device); } static struct dasd_ccw_req * @@ -3000,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base, 0, NULL); } +static bool test_and_set_format_track(struct dasd_format_entry *to_format, + struct dasd_block *block) +{ + struct dasd_format_entry *format; + unsigned long flags; + bool rc = false; + + spin_lock_irqsave(&block->format_lock, flags); + list_for_each_entry(format, &block->format_list, list) { + if (format->track == to_format->track) { + rc = true; + goto out; + } + } + list_add_tail(&to_format->list, &block->format_list); + +out: + spin_unlock_irqrestore(&block->format_lock, flags); + return rc; +} + +static void clear_format_track(struct dasd_format_entry *format, + struct dasd_block *block) +{ + unsigned long flags; + + spin_lock_irqsave(&block->format_lock, flags); + list_del_init(&format->list); + spin_unlock_irqrestore(&block->format_lock, flags); +} + /* * Callback function to free ESE format requests. */ @@ -3007,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) { struct dasd_device *device = cqr->startdev; struct dasd_eckd_private *private = device->private; + struct dasd_format_entry *format = data; + clear_format_track(format, cqr->basedev->block); private->count--; dasd_ffree_request(cqr, device); } static struct dasd_ccw_req * -dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) +dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, + struct irb *irb) { struct dasd_eckd_private *private; + struct dasd_format_entry *format; struct format_data_t fdata; unsigned int recs_per_trk; struct dasd_ccw_req *fcqr; @@ -3025,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) struct request *req; sector_t first_trk; sector_t last_trk; + sector_t curr_trk; int rc; req = cqr->callback_data; - base = cqr->block->base; + block = cqr->block; + base = block->base; private = base->private; - block = base->block; blksize = block->bp_block; recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); + format = &startdev->format_entry; first_trk = blk_rq_pos(req) >> block->s2b_shift; sector_div(first_trk, recs_per_trk); last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; sector_div(last_trk, recs_per_trk); + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); + if (rc) + return ERR_PTR(rc); - fdata.start_unit = first_trk; - fdata.stop_unit = last_trk; + if (curr_trk < first_trk || curr_trk > last_trk) { + DBF_DEV_EVENT(DBF_WARNING, startdev, + "ESE error track %llu not within range %llu - %llu\n", + curr_trk, first_trk, last_trk); + return ERR_PTR(-EINVAL); + } + format->track = curr_trk; + /* test if track is already in formatting by another thread */ + if (test_and_set_format_track(format, block)) + return ERR_PTR(-EEXIST); + + fdata.start_unit = curr_trk; + fdata.stop_unit = curr_trk; fdata.blksize = blksize; fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; @@ -3058,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) return fcqr; fcqr->callback = dasd_eckd_ese_format_cb; + fcqr->callback_data = (void *) format; return fcqr; } @@ -3065,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) /* * When data is read from an unformatted area of an ESE volume, this function * returns zeroed data and thereby mimics a read of zero data. + * + * The first unformatted track is the one that got the NRF error, the address is + * encoded in the sense data. + * + * All tracks before have returned valid data and should not be touched. + * All tracks after the unformatted track might be formatted or not. This is + * currently not known, remember the processed data and return the remainder of + * the request to the blocklayer in __dasd_cleanup_cqr(). */ -static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr) +static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) { + struct dasd_eckd_private *private; + sector_t first_trk, last_trk; + sector_t first_blk, last_blk; unsigned int blksize, off; + unsigned int recs_per_trk; struct dasd_device *base; struct req_iterator iter; + struct dasd_block *block; + unsigned int skip_block; + unsigned int blk_count; struct request *req; struct bio_vec bv; + sector_t curr_trk; + sector_t end_blk; char *dst; + int rc; req = (struct request *) cqr->callback_data; base = cqr->block->base; blksize = base->block->bp_block; + block = cqr->block; + private = base->private; + skip_block = 0; + blk_count = 0; + + recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); + first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; + sector_div(first_trk, recs_per_trk); + last_trk = last_blk = + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; + sector_div(last_trk, recs_per_trk); + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); + if (rc) + return rc; + + /* sanity check if the current track from sense data is valid */ + if (curr_trk < first_trk || curr_trk > last_trk) { + DBF_DEV_EVENT(DBF_WARNING, base, + "ESE error track %llu not within range %llu - %llu\n", + curr_trk, first_trk, last_trk); + return -EINVAL; + } + + /* + * if not the first track got the NRF error we have to skip over valid + * blocks + */ + if (curr_trk != first_trk) + skip_block = curr_trk * recs_per_trk - first_blk; + + /* we have no information beyond the current track */ + end_blk = (curr_trk + 1) * recs_per_trk; rq_for_each_segment(bv, req, iter) { dst = page_address(bv.bv_page) + bv.bv_offset; for (off = 0; off < bv.bv_len; off += blksize) { - if (dst && rq_data_dir(req) == READ) { + if (first_blk + blk_count >= end_blk) { + cqr->proc_bytes = blk_count * blksize; + return 0; + } + if (dst && !skip_block) { dst += off; memset(dst, 0, blksize); + } else { + skip_block--; } + blk_count++; } } + return 0; } /* diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 91c9f9586e0f..fa552f9f1666 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -187,6 +187,7 @@ struct dasd_ccw_req { void (*callback)(struct dasd_ccw_req *, void *data); void *callback_data; + unsigned int proc_bytes; /* bytes for partial completion */ }; /* @@ -387,8 +388,9 @@ struct dasd_discipline { int (*ext_pool_warn_thrshld)(struct dasd_device *); int (*ext_pool_oos)(struct dasd_device *); int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *); - struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *); - void (*ese_read)(struct dasd_ccw_req *); + struct dasd_ccw_req *(*ese_format)(struct dasd_device *, + struct dasd_ccw_req *, struct irb *); + int (*ese_read)(struct dasd_ccw_req *, struct irb *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -474,6 +476,11 @@ struct dasd_profile { spinlock_t lock; }; +struct dasd_format_entry { + struct list_head list; + sector_t track; +}; + struct dasd_device { /* Block device stuff. */ struct dasd_block *block; @@ -539,6 +546,7 @@ struct dasd_device { struct dentry *debugfs_dentry; struct dentry *hosts_dentry; struct dasd_profile profile; + struct dasd_format_entry format_entry; }; struct dasd_block { @@ -564,6 +572,9 @@ struct dasd_block { struct dentry *debugfs_dentry; struct dasd_profile profile; + + struct list_head format_list; + spinlock_t format_lock; }; struct dasd_attention_data { diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2a3f874a21d5..9cebff8e8d74 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -303,8 +303,10 @@ static void * cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct ccwdev_iter *iter; + loff_t p = *offset; - if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + (*offset)++; + if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; iter = it; if (iter->devno == __MAX_SUBCHANNEL) { @@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) return NULL; } else iter->devno++; - (*offset)++; return iter; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 65841af15748..ccecf6b9504e 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); * @mask: mask of paths to use * * Return the number of 64K-bytes blocks all paths at least support - * for a transport command. Return values <= 0 indicate failures. + * for a transport command. Return value 0 indicates failure. */ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index cd164886132f..ee0b3c586211 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "cio.h" @@ -207,7 +208,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, /* fill in sl */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - q->sl->element[j].sbal = (unsigned long)q->sbal[j]; + q->sl->element[j].sbal = virt_to_phys(q->sbal[j]); } static void setup_queues(struct qdio_irq *irq_ptr, diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a1915061932e..5256e3ce84e5 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev) drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) return -ENODEV; - /* (re-)init queue's state machine */ - ap_queue_reinit_state(to_ap_queue(dev)); } /* Add queue/card to list of active queues/cards */ diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 433b7b64368d..4348fdff1c61 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -162,7 +162,7 @@ struct ap_card { unsigned int functions; /* AP device function bitfield. */ int queue_depth; /* AP queue depth.*/ int id; /* AP card number. */ - atomic_t total_request_count; /* # requests ever for this AP device.*/ + atomic64_t total_request_count; /* # requests ever for this AP device.*/ }; #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) @@ -179,7 +179,7 @@ struct ap_queue { enum ap_state state; /* State of the AP device. */ int pendingq_count; /* # requests on pendingq list. */ int requestq_count; /* # requests on requestq list. */ - int total_request_count; /* # requests ever for this AP device.*/ + u64 total_request_count; /* # requests ever for this AP device.*/ int request_timeout; /* Request timeout in jiffies. */ struct timer_list timeout; /* Timer for request timeouts. */ struct list_head pendingq; /* List of message sent to AP queue. */ @@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); -void ap_queue_reinit_state(struct ap_queue *aq); +void ap_queue_init_state(struct ap_queue *aq); struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, int comp_device_type, unsigned int functions); diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 63b4cc6cd7e5..e85bfca1ed16 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_card *ac = to_ap_card(dev); - unsigned int req_cnt; + u64 req_cnt; req_cnt = 0; spin_lock_bh(&ap_list_lock); - req_cnt = atomic_read(&ac->total_request_count); + req_cnt = atomic64_read(&ac->total_request_count); spin_unlock_bh(&ap_list_lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); } static ssize_t request_count_store(struct device *dev, @@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev, for_each_ap_queue(aq, ac) aq->total_request_count = 0; spin_unlock_bh(&ap_list_lock); - atomic_set(&ac->total_request_count, 0); + atomic64_set(&ac->total_request_count, 0); return count; } diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index dad2be333d82..a317ab484932 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_queue *aq = to_ap_queue(dev); - unsigned int req_cnt; + u64 req_cnt; spin_lock_bh(&aq->lock); req_cnt = aq->total_request_count; spin_unlock_bh(&aq->lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); } static ssize_t request_count_store(struct device *dev, @@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->ap_dev.device.type = &ap_queue_type; aq->ap_dev.device_type = device_type; aq->qid = qid; - aq->state = AP_STATE_RESET_START; + aq->state = AP_STATE_UNBOUND; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->list); @@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) list_add_tail(&ap_msg->list, &aq->requestq); aq->requestq_count++; aq->total_request_count++; - atomic_inc(&aq->card->total_request_count); + atomic64_inc(&aq->card->total_request_count); /* Send/receive as many request from the queue as possible. */ ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); @@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_reinit_state(struct ap_queue *aq) +void ap_queue_init_state(struct ap_queue *aq) { spin_lock_bh(&aq->lock); aq->state = AP_STATE_RESET_START; ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); } +EXPORT_SYMBOL(ap_queue_init_state); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 9de3d46b3253..5c9898e934d9 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -740,8 +740,10 @@ static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) kapqns = kmalloc(nbytes, GFP_KERNEL); if (!kapqns) return ERR_PTR(-ENOMEM); - if (copy_from_user(kapqns, uapqns, nbytes)) + if (copy_from_user(kapqns, uapqns, nbytes)) { + kfree(kapqns); return ERR_PTR(-EFAULT); + } } return kapqns; @@ -792,7 +794,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; rc = cca_sec2protkey(ksp.cardnr, ksp.domain, ksp.seckey.seckey, ksp.protkey.protkey, - NULL, &ksp.protkey.type); + &ksp.protkey.len, &ksp.protkey.type); DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc); if (rc) break; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 9157e728a362..7fa0262e91af 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -605,8 +605,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc, weight += atomic_read(&zc->load); pref_weight += atomic_read(&pref_zc->load); if (weight == pref_weight) - return atomic_read(&zc->card->total_request_count) > - atomic_read(&pref_zc->card->total_request_count); + return atomic64_read(&zc->card->total_request_count) > + atomic64_read(&pref_zc->card->total_request_count); return weight > pref_weight; } @@ -1216,11 +1216,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) spin_unlock(&zcrypt_list_lock); } -static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) +static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) { struct zcrypt_card *zc; struct zcrypt_queue *zq; int card; + u64 cnt; memset(reqcnt, 0, sizeof(int) * max_adapters); spin_lock(&zcrypt_list_lock); @@ -1232,8 +1233,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) || card >= max_adapters) continue; spin_lock(&zq->queue->lock); - reqcnt[card] = zq->queue->total_request_count; + cnt = zq->queue->total_request_count; spin_unlock(&zq->queue->lock); + reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; } } local_bh_enable(); @@ -1411,9 +1413,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, return 0; } case ZCRYPT_PERDEV_REQCNT: { - int *reqcnt; + u32 *reqcnt; - reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); + reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); @@ -1470,7 +1472,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, } case Z90STAT_PERDEV_REQCNT: { /* the old ioctl supports only 64 adapters */ - int reqcnt[MAX_ZDEV_CARDIDS]; + u32 reqcnt[MAX_ZDEV_CARDIDS]; zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index c1db64a2db21..110fe9d0cb91 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prepparm = (struct iprepparm *) prepcblk->rpl_parmb; /* do some plausibility checks on the key block */ - if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || - prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) { + if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || + prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) { DEBUG_ERR("%s reply with invalid or unknown key block\n", __func__); rc = -EIO; diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index c50f3e86cc74..7cbb384ec535 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2A_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index 35c7c6672713..c78c0d119806 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_rapq(aq->qid); rc = zcrypt_cex2c_rng_supported(aq); if (rc < 0) { zcrypt_queue_free(zq); @@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) else zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, MSGTYPE06_VARIANT_NORNG); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2C_CLEANUP_TIME; aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 442e3d6162f7..6fabc906114c 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX4_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index f34ee41cbed8..4f4dd9d727c9 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -61,6 +61,7 @@ struct error_hdr { #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 #define REP82_ERROR_RESERVED_FIELD 0x88 #define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A +#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B #define REP82_ERROR_TRANSPORT_FAIL 0x90 #define REP82_ERROR_PACKET_TRUNCATED 0xA0 #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 @@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq, case REP82_ERROR_INVALID_DOMAIN_PRECHECK: case REP82_ERROR_INVALID_DOMAIN_PENDING: case REP82_ERROR_INVALID_SPECIAL_CMD: + case REP82_ERROR_FILTERED_BY_HYPERVISOR: // REP88_ERROR_INVALID_KEY // '82' CEX2A // REP88_ERROR_OPERAND // '84' CEX2A // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 65e31df37b1f..820f2c29376c 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -620,6 +620,7 @@ struct qeth_ipato { struct qeth_channel { struct ccw_device *ccwdev; + struct qeth_cmd_buffer *active_cmd; enum qeth_channel_states state; atomic_t irq_pending; }; @@ -1024,6 +1025,8 @@ int qeth_do_run_thread(struct qeth_card *, unsigned long); void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok); +int qeth_stop_channel(struct qeth_channel *channel); + void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 83794d7494d4..fe70e9875bde 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -515,7 +515,9 @@ static int __qeth_issue_next_read(struct qeth_card *card) QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); - if (rc) { + if (!rc) { + channel->active_cmd = iob; + } else { QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", rc, CARD_DEVID(card)); atomic_set(&channel->irq_pending, 0); @@ -653,17 +655,17 @@ static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); - if ((buffer[2] & 0xc0) == 0xc0) { + if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", buffer[4]); QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, " idxterm"); - QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); - if (buffer[4] == 0xf6) { + QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); + if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || + buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { dev_err(&card->gdev->dev, - "The qeth device is not configured " - "for the OSI layer required by z/VM\n"); - return -EPERM; + "The device does not support the configured transport mode\n"); + return -EPROTONOSUPPORT; } return -EIO; } @@ -740,10 +742,10 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, case 0: break; case -EIO: - qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); /* fall through */ default: + qeth_clear_ipacmd_list(card); goto out; } @@ -986,8 +988,21 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, QETH_CARD_TEXT(card, 5, "data"); } - if (qeth_intparm_is_iob(intparm)) - iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + if (intparm == 0) { + QETH_CARD_TEXT(card, 5, "irqunsol"); + } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { + QETH_CARD_TEXT(card, 5, "irqunexp"); + + dev_err(&cdev->dev, + "Received IRQ with intparm %lx, expected %px\n", + intparm, channel->active_cmd); + if (channel->active_cmd) + qeth_cancel_cmd(channel->active_cmd, -EIO); + } else { + iob = (struct qeth_cmd_buffer *) (addr_t)intparm; + } + + channel->active_cmd = NULL; rc = qeth_check_irb_error(card, cdev, irb); if (rc) { @@ -1007,15 +1022,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) channel->state = CH_STATE_HALTED; - if (intparm == QETH_CLEAR_CHANNEL_PARM) { - QETH_CARD_TEXT(card, 6, "clrchpar"); - /* we don't have to handle this further */ - intparm = 0; - } - if (intparm == QETH_HALT_CHANNEL_PARM) { - QETH_CARD_TEXT(card, 6, "hltchpar"); - /* we don't have to handle this further */ - intparm = 0; + if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | + SCSW_FCTL_HALT_FUNC))) { + qeth_cancel_cmd(iob, -ECANCELED); + iob = NULL; } cstat = irb->scsw.cmd.cstat; @@ -1234,7 +1244,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) if (count == 1) dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); - card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; card->qdio.no_out_queues = count; return 0; } @@ -1408,7 +1417,7 @@ static int qeth_clear_channel(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "clearch"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); + rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) @@ -1430,7 +1439,7 @@ static int qeth_halt_channel(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "haltch"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); + rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) @@ -1444,6 +1453,25 @@ static int qeth_halt_channel(struct qeth_card *card, return 0; } +int qeth_stop_channel(struct qeth_channel *channel) +{ + struct ccw_device *cdev = channel->ccwdev; + int rc; + + rc = ccw_device_set_offline(cdev); + + spin_lock_irq(get_ccwdev_lock(cdev)); + if (channel->active_cmd) { + dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", + channel->active_cmd); + channel->active_cmd = NULL; + } + spin_unlock_irq(get_ccwdev_lock(cdev)); + + return rc; +} +EXPORT_SYMBOL_GPL(qeth_stop_channel); + static int qeth_halt_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; @@ -1747,6 +1775,8 @@ static int qeth_send_control_data(struct qeth_card *card, spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), (addr_t) iob, 0, 0, timeout); + if (!rc) + channel->active_cmd = iob; spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", @@ -2451,50 +2481,46 @@ static int qeth_mpc_initialize(struct qeth_card *card) rc = qeth_cm_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "2err%d", rc); - goto out_qdio; + return rc; } rc = qeth_cm_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "3err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "4err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_alloc_qdio_queues(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_qdio_establish(card); if (rc) { QETH_CARD_TEXT_(card, 2, "6err%d", rc); qeth_free_qdio_queues(card); - goto out_qdio; + return rc; } rc = qeth_qdio_activate(card); if (rc) { QETH_CARD_TEXT_(card, 2, "7err%d", rc); - goto out_qdio; + return rc; } rc = qeth_dm_act(card); if (rc) { QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out_qdio; + return rc; } return 0; -out_qdio: - qeth_qdio_clear_card(card, !IS_IQD(card)); - qdio_free(CARD_DDEV(card)); - return rc; } void qeth_print_status_message(struct qeth_card *card) @@ -2607,12 +2633,12 @@ static int qeth_init_input_buffer(struct qeth_card *card, buf->rx_skb = netdev_alloc_skb(card->dev, QETH_RX_PULL_LEN + ETH_HLEN); if (!buf->rx_skb) - return 1; + return -ENOMEM; } pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) - return 1; + return -ENOBUFS; /* * since the buffer is accessed only from the input_tasklet @@ -2644,10 +2670,15 @@ int qeth_init_qdio_queues(struct qeth_card *card) /* inbound queue */ qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); memset(&card->rx, 0, sizeof(struct qeth_rx)); + qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ - for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) - qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) { + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + if (rc) + return rc; + } + card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, @@ -3382,11 +3413,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) goto out; } - if (card->state != CARD_STATE_DOWN) { - rc = -1; - goto out; - } - qeth_free_qdio_queues(card); card->options.cq = cq; rc = 0; @@ -4634,12 +4660,12 @@ EXPORT_SYMBOL_GPL(qeth_vm_request_mac); static void qeth_determine_capabilities(struct qeth_card *card) { + struct qeth_channel *channel = &card->data; + struct ccw_device *ddev = channel->ccwdev; int rc; - struct ccw_device *ddev; int ddev_offline = 0; QETH_CARD_TEXT(card, 2, "detcapab"); - ddev = CARD_DDEV(card); if (!ddev->online) { ddev_offline = 1; rc = ccw_device_set_online(ddev); @@ -4678,7 +4704,7 @@ static void qeth_determine_capabilities(struct qeth_card *card) out_offline: if (ddev_offline == 1) - ccw_device_set_offline(ddev); + qeth_stop_channel(channel); out: return; } @@ -4694,10 +4720,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card, if (card->options.cq == QETH_CQ_ENABLED) { int offset = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - in_sbal_ptrs[offset + i] = (struct qdio_buffer *) - virt_to_phys(card->qdio.c_q->bufs[i].buffer); - } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[offset + i] = + card->qdio.c_q->bufs[i].buffer; queue_start_poll[card->qdio.no_in_queues - 1] = NULL; } @@ -4731,10 +4757,9 @@ static int qeth_qdio_establish(struct qeth_card *card) rc = -ENOMEM; goto out_free_qib_param; } - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - in_sbal_ptrs[i] = (struct qdio_buffer *) - virt_to_phys(card->qdio.in_q->bufs[i].buffer); - } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer; queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), GFP_KERNEL); @@ -4755,11 +4780,11 @@ static int qeth_qdio_establish(struct qeth_card *card) rc = -ENOMEM; goto out_free_queue_start_poll; } + for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { - out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( - card->qdio.out_qs[i]->bufs[j]->buffer); - } + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++) + out_sbal_ptrs[k] = + card->qdio.out_qs[i]->bufs[j]->buffer; memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); @@ -4879,9 +4904,9 @@ retry: QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", CARD_DEVID(card)); rc = qeth_qdio_clear_card(card, !IS_IQD(card)); - ccw_device_set_offline(CARD_DDEV(card)); - ccw_device_set_offline(CARD_WDEV(card)); - ccw_device_set_offline(CARD_RDEV(card)); + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); qdio_free(CARD_DDEV(card)); rc = ccw_device_set_online(CARD_RDEV(card)); if (rc) @@ -4972,10 +4997,8 @@ retriable: } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); - if (rc < 0) { + if (rc) QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out; - } } return 0; out: diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 6420b58cf42b..65038539b324 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -28,20 +28,6 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_TIMEOUT (10 * HZ) #define QETH_IPA_TIMEOUT (45 * HZ) -#define QETH_CLEAR_CHANNEL_PARM -10 -#define QETH_HALT_CHANNEL_PARM -11 - -static inline bool qeth_intparm_is_iob(unsigned long intparm) -{ - switch (intparm) { - case QETH_CLEAR_CHANNEL_PARM: - case QETH_HALT_CHANNEL_PARM: - case 0: - return false; - } - return true; -} - /*****************************************************************************/ /* IP Assist related definitions */ /*****************************************************************************/ @@ -912,6 +898,11 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; #define QETH_IDX_ACT_ERR_AUTH 0x1E #define QETH_IDX_ACT_ERR_AUTH_USER 0x20 +#define QETH_IDX_TERMINATE 0xc0 +#define QETH_IDX_TERMINATE_MASK 0xc0 +#define QETH_IDX_TERM_BAD_TRANSPORT 0x41 +#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6 + #define PDU_ENCAPSULATION(buffer) \ (buffer + *(buffer + (*(buffer + 0x0b)) + \ *(buffer + *(buffer + 0x0b) + 0x11) + 0x07)) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 9f392497d570..4c3e222e5572 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -227,7 +227,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; } else if (sysfs_streq(buf, "prio_queueing_vlan")) { if (IS_LAYER3(card)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out; } card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 4bccdce19b5a..92bace3b28fd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -287,14 +287,15 @@ static void qeth_l2_stop_card(struct qeth_card *card) card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); flush_workqueue(card->event_wq); card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + card->info.promisc_mode = 0; } static int qeth_l2_process_inbound_buffer(struct qeth_card *card, @@ -876,9 +877,9 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev) out_remove: qeth_l2_stop_card(card); - ccw_device_set_offline(CARD_DDEV(card)); - ccw_device_set_offline(CARD_WDEV(card)); - ccw_device_set_offline(CARD_RDEV(card)); + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); qdio_free(CARD_DDEV(card)); mutex_unlock(&card->conf_mutex); @@ -909,9 +910,9 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, rtnl_unlock(); qeth_l2_stop_card(card); - rc = ccw_device_set_offline(CARD_DDEV(card)); - rc2 = ccw_device_set_offline(CARD_WDEV(card)); - rc3 = ccw_device_set_offline(CARD_RDEV(card)); + rc = qeth_stop_channel(&card->data); + rc2 = qeth_stop_channel(&card->write); + rc3 = qeth_stop_channel(&card->read); if (!rc) rc = (rc2) ? rc2 : rc3; if (rc) @@ -1845,15 +1846,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state) QETH_CARD_TEXT(card, 2, "vniccsch"); - /* do not change anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and enable/disable are supported */ if (!(card->options.vnicc.sup_chars & vnicc) || !(card->options.vnicc.set_char_sup & vnicc)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* set enable/disable command and store wanted characteristic */ if (state) { cmd = IPA_VNICC_ENABLE; @@ -1899,14 +1899,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state) QETH_CARD_TEXT(card, 2, "vniccgch"); - /* do not get anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic is supported */ if (!(card->options.vnicc.sup_chars & vnicc)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* if card is ready, query current VNICC state */ if (qeth_card_hw_is_reachable(card)) rc = qeth_l2_vnicc_query_chars(card); @@ -1924,15 +1923,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout) QETH_CARD_TEXT(card, 2, "vniccsto"); - /* do not change anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and set_timeout are supported */ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* do we need to do anything? */ if (card->options.vnicc.learning_timeout == timeout) return rc; @@ -1961,14 +1959,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) QETH_CARD_TEXT(card, 2, "vniccgto"); - /* do not get anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and get_timeout are supported */ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* if card is ready, get timeout. Otherwise, just return stored value */ *timeout = card->options.vnicc.learning_timeout; if (qeth_card_hw_is_reachable(card)) @@ -1982,8 +1980,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) /* check if VNICC is currently enabled */ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) { - /* if everything is turned off, VNICC is not active */ - if (!card->options.vnicc.cur_chars) + if (!card->options.vnicc.sup_chars) return false; /* default values are only OK if rx_bcast was not enabled by user * or the card is offline. @@ -2070,8 +2067,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card) /* enforce assumed default values and recover settings, if changed */ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, timeout); - chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; - chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; + /* Change chars, if necessary */ + chars_tmp = card->options.vnicc.wanted_chars ^ + card->options.vnicc.cur_chars; chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; for_each_set_bit(i, &chars_tmp, chars_len) { vnicc = BIT(i); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index e2bcb26105a3..fc7101ad84de 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -279,7 +279,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) return; mutex_lock(&card->sbp_lock); - if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { + if (!card->options.sbp.reflect_promisc && + card->options.sbp.role != QETH_SBP_ROLE_NONE) { /* Conditional to avoid spurious error messages */ qeth_bridgeport_setrole(card, card->options.sbp.role); /* Let the callback function refresh the stored role value. */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d7bfc7a0e4c0..a1c23e998f97 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1426,13 +1426,14 @@ static void qeth_l3_stop_card(struct qeth_card *card) card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); flush_workqueue(card->event_wq); + card->info.promisc_mode = 0; } static void qeth_l3_set_promisc_mode(struct qeth_card *card) @@ -2382,9 +2383,9 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev) return 0; out_remove: qeth_l3_stop_card(card); - ccw_device_set_offline(CARD_DDEV(card)); - ccw_device_set_offline(CARD_WDEV(card)); - ccw_device_set_offline(CARD_RDEV(card)); + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); qdio_free(CARD_DDEV(card)); mutex_unlock(&card->conf_mutex); @@ -2420,9 +2421,10 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, call_netdevice_notifiers(NETDEV_REBOOT, card->dev); rtnl_unlock(); } - rc = ccw_device_set_offline(CARD_DDEV(card)); - rc2 = ccw_device_set_offline(CARD_WDEV(card)); - rc3 = ccw_device_set_offline(CARD_RDEV(card)); + + rc = qeth_stop_channel(&card->data); + rc2 = qeth_stop_channel(&card->write); + rc3 = qeth_stop_channel(&card->read); if (!rc) rc = (rc2) ? rc2 : rc3; if (rc) diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 2f73b33c9347..333fd4619dc6 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -270,24 +270,36 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; char *tmp; - int rc; if (!card) return -EINVAL; if (!IS_IQD(card)) return -EPERM; - if (card->state != CARD_STATE_DOWN) - return -EPERM; - if (card->options.sniffer) - return -EPERM; - if (card->options.cq == QETH_CQ_NOTAVAILABLE) - return -EPERM; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + if (card->options.sniffer) { + rc = -EPERM; + goto out; + } + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -EPERM; + goto out; + } tmp = strsep((char **)&buf, "\n"); - if (strlen(tmp) > 8) - return -EINVAL; + if (strlen(tmp) > 8) { + rc = -EINVAL; + goto out; + } if (card->options.hsuid[0]) /* delete old ip address */ @@ -298,11 +310,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, card->options.hsuid[0] = '\0'; memcpy(card->dev->perm_addr, card->options.hsuid, 9); qeth_configure_cq(card, QETH_CQ_DISABLED); - return count; + goto out; } - if (qeth_configure_cq(card, QETH_CQ_ENABLED)) - return -EPERM; + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) { + rc = -EPERM; + goto out; + } snprintf(card->options.hsuid, sizeof(card->options.hsuid), "%-8s", tmp); @@ -311,6 +325,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, rc = qeth_l3_modify_hsuid(card, true); +out: + mutex_unlock(&card->conf_mutex); return rc ? rc : count; } diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index dccdb41bed8c..1234294700c4 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -95,11 +95,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); - if (q_head->fsf_command != FSF_QTCB_FCP_CMND) { - rec->pl_len = q_head->log_length; - zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, - rec->pl_len, "fsf_res", req->req_id); - } + rec->pl_len = q_head->log_length; + zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, + rec->pl_len, "fsf_res", req->req_id); debug_event(dbf->hba, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 536426f25e86..d4401c768a0c 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -129,6 +129,9 @@ #define NCR5380_release_dma_irq(x) #endif +static unsigned int disconnect_mask = ~0; +module_param(disconnect_mask, int, 0444); + static int do_abort(struct Scsi_Host *); static void do_reset(struct Scsi_Host *); static void bus_reset_cleanup(struct Scsi_Host *); @@ -954,7 +957,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) int err; bool ret = true; bool can_disconnect = instance->irq != NO_IRQ && - cmd->cmnd[0] != REQUEST_SENSE; + cmd->cmnd[0] != REQUEST_SENSE && + (disconnect_mask & BIT(scmd_id(cmd))); NCR5380_dprint(NDEBUG_ARBITRATION, instance); dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index a9d40d3b90ef..4190a025381a 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, * At some speeds, we only support * ST transfers. */ - if ((syncrate->sxfr_u2 & ST_SXFR) != 0) + if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index e809493d0d06..a82b63a66635 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) atari_scsi_template.sg_tablesize = SG_ALL; } else { atari_scsi_template.can_queue = 1; - atari_scsi_template.sg_tablesize = SG_NONE; + atari_scsi_template.sg_tablesize = 1; } if (setup_can_queue > 0) @@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev) if (setup_cmd_per_lun > 0) atari_scsi_template.cmd_per_lun = setup_cmd_per_lun; - /* Leave sg_tablesize at 0 on a Falcon! */ - if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0) + /* Don't increase sg_tablesize on Falcon! */ + if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0) atari_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) { diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 29ab81df75c0..fbfce02e5b93 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -275,8 +275,10 @@ bfad_im_get_stats(struct Scsi_Host *shost) rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); - if (rc != BFA_STATUS_OK) + if (rc != BFA_STATUS_OK) { + kfree(fcstats); return NULL; + } wait_for_completion(&fcomp.comp); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index c5fa5f3b00e9..0b28d44d3573 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba) INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_active_list); INIT_LIST_HEAD(&hba->ep_destroy_list); - pci_dev_put(hba->pcidev); if (hba->regview) { pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } + pci_dev_put(hba->pcidev); bnx2i_free_mp_bdt(hba); bnx2i_release_free_cid_que(hba); iscsi_host_free(shost); diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index 66e58f0a75dc..23cbe4cda760 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) struct fc_fdmi_port_name *port_name; uint8_t buf[64]; uint8_t *fc4_type; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", @@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) len = (uint32_t)(pld - (uint8_t *)cmd); /* Submit FDMI RPA request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* @@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) struct fc_fdmi_rpl *reg_pl; struct fs_fdmi_attrs *attrib_blk; uint8_t buf[64]; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", @@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) attrib_blk->numattrs = htonl(numattrs); /* Submit FDMI RHBA request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* @@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) void *cmd; struct fc_fdmi_port_name *port_name; uint32_t len; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", @@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) len += sizeof(*port_name); /* Submit FDMI request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /** diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 469d0bc9f5fe..00cf33573136 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev, return -EINVAL; /* Delete NPIV lnodes */ - csio_lnodes_exit(hw, 1); + csio_lnodes_exit(hw, 1); /* Block upper IOs */ csio_lnodes_block_request(hw); diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 3e17af8aedeb..2cd2761bd249 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) "cdev 0x%p, p# %u.\n", cdev, cdev->nports); cxgbi_hbas_remove(cdev); cxgbi_device_portmap_cleanup(cdev); - cxgbi_ppm_release(cdev->cdev2ppm(cdev)); + if (cdev->cdev2ppm) + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); if (cdev->pmap.max_connect) cxgbi_free_big_mem(cdev->pmap.port_csk); kfree(cdev); diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c index 7bd376d95ed5..b02ac389e6c6 100644 --- a/drivers/scsi/esas2r/esas2r_flash.c +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a) if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR, sizeof(struct esas2r_sas_nvram))) { esas2r_hdebug("NVRAM read failed, using defaults"); + up(&a->nvram_semaphore); return false; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 80608b53897b..e3f5c91d5e4f 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 78af9cc2009b..522636e94628 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) int vnic_dev_hang_notify(struct vnic_dev *vdev) { - u64 a0, a1; + u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); } int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { - u64 a0, a1; + u64 a[2] = {}; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; - err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait); if (err) return err; for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = ((u8 *)&a0)[i]; + mac_addr[i] = ((u8 *)&a)[i]; return 0; } @@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait); if (err) pr_err("Can't add addr [%pM], %d\n", addr, err); } void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait); if (err) pr_err("Can't del addr [%pM], %d\n", addr, err); } diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 0847e682797b..849335d76cf6 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, dev = hisi_hba->dev; if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { - if (in_softirq()) + /* + * For IOs from upper layer, it may already disable preempt + * in the IO path, if disable preempt again in down(), + * function schedule() will report schedule_bug(), so check + * preemptible() before goto down(). + */ + if (!preemptible()) return -EINVAL; down(&hisi_hba->sem); @@ -2676,6 +2682,7 @@ int hisi_sas_probe(struct platform_device *pdev, err_out_register_ha: scsi_remove_host(shost); err_out_ha: + hisi_sas_debugfs_exit(hisi_hba); hisi_sas_free(hisi_hba); scsi_host_put(shost); return rc; @@ -3712,9 +3719,6 @@ static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba) int p, c, d; size_t sz; - hisi_hba->debugfs_dump_dentry = - debugfs_create_dir("dump", hisi_hba->debugfs_dir); - sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4; hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] = devm_kmalloc(dev, sz, GFP_KERNEL); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index cb8d087762db..723f51c822af 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -3022,11 +3022,6 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val); - mdelay(100); - reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); - hisi_sas_phy_write32(hisi_hba, phy_id, - SAS_PHY_BIST_CTRL, reg_val); - /* set the bist init value */ hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CODE, @@ -3035,6 +3030,11 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) SAS_PHY_BIST_CODE1, SAS_PHY_BIST_CODE1_INIT); + mdelay(100); + reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CTRL, reg_val); + /* clear error bit */ mdelay(100); hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT); @@ -3259,6 +3259,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_out_register_ha: scsi_remove_host(shost); err_out_ha: + hisi_sas_debugfs_exit(hisi_hba); scsi_host_put(shost); err_out_regions: pci_release_regions(pdev); @@ -3422,6 +3423,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev) if (rc) { scsi_remove_host(shost); pci_disable_device(pdev); + return rc; } hisi_hba->hw->phys_init(hisi_hba); sas_resume_ha(sha); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 079c04bc448a..7a57b61f0340 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9947,6 +9947,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->max_devs_supported = ipr_max_devs; if (ioa_cfg->sis64) { + host->max_channel = IPR_MAX_SIS64_BUSES; host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_SIS64_DEVS) @@ -9955,6 +9956,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, + ((sizeof(struct ipr_config_table_entry64) * ioa_cfg->max_devs_supported))); } else { + host->max_channel = IPR_VSET_BUS; host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) @@ -9964,7 +9966,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, * ioa_cfg->max_devs_supported))); } - host->max_channel = IPR_VSET_BUS; host->unique_id = host->host_no; host->max_cmd_len = IPR_MAX_CDB_LEN; host->can_queue = ioa_cfg->max_cmds; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index a67baeb36d1f..b97aa9ac2ffe 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1300,6 +1300,7 @@ struct ipr_resource_entry { #define IPR_ARRAY_VIRTUAL_BUS 0x1 #define IPR_VSET_VIRTUAL_BUS 0x2 #define IPR_IOAFP_VIRTUAL_BUS 0x3 +#define IPR_MAX_SIS64_BUSES 0x4 #define IPR_GET_RES_PHYS_LOC(res) \ (((res)->bus << 24) | ((res)->target << 8) | (res)->lun) diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 7bedbe877704..b5dd1caae5e9 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; unsigned int noreclaim_flag; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; int rc = 0; + if (!tcp_sw_conn->sock) { + iscsi_conn_printk(KERN_ERR, conn, + "Transport not bound to socket!\n"); + return -EINVAL; + } + noreclaim_flag = memalloc_noreclaim_save(); while (iscsi_sw_tcp_xmit_qlen(conn)) { @@ -879,6 +887,10 @@ free_host: static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct iscsi_session *session = cls_session->dd_data; + + if (WARN_ON_ONCE(session->leadconn)) + return; iscsi_tcp_r2tpool_free(cls_session->dd_data); iscsi_session_teardown(cls_session); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 9c5f7c9178c6..2b865c6423e2 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -628,6 +628,8 @@ redisc: } out: kref_put(&rdata->kref, fc_rport_destroy); + if (!IS_ERR(fp)) + fc_frame_free(fp); } /** diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index ebd47c0cf9e9..70b99c0e2e67 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1945,7 +1945,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); - spin_lock(&session->frwd_lock); + spin_lock_bh(&session->frwd_lock); task = (struct iscsi_task *)sc->SCp.ptr; if (!task) { /* @@ -2072,7 +2072,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) done: if (task) task->last_timeout = jiffies; - spin_unlock(&session->frwd_lock); + spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index f47b4b281b14..d7302c2052f9 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -81,12 +81,21 @@ static int sas_get_port_device(struct asd_sas_port *port) else dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; - } else { + } else if (port->oob_mode == SAS_OOB_MODE) { struct sas_identify_frame *id = (struct sas_identify_frame *) dev->frame_rcvd; dev->dev_type = id->dev_type; dev->iproto = id->initiator_bits; dev->tproto = id->target_bits; + } else { + /* If the oob mode is OOB_NOT_CONNECTED, the port is + * disconnected due to race with PHY down. We cannot + * continue to discover this port + */ + sas_put_device(dev); + pr_warn("Port %016llx is disconnected when discovering\n", + SAS_ADDR(port->attached_sas_addr)); + return -ENODEV; } sas_init_dev(dev); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 39a736b887b1..6c2b03415a2c 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -4489,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, phba->mbox_ext_buf_ctx.seqNum++; nemb_tp = phba->mbox_ext_buf_ctx.nembType; - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (!dd_data) { - rc = -ENOMEM; - goto job_error; - } - pbuf = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, @@ -4531,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, "2968 SLI_CONFIG ext-buffer wr all %d " "ebuffers received\n", phba->mbox_ext_buf_ctx.numBuf); + + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { @@ -4579,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, return SLI_CONFIG_HANDLED; job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 25e86706e207..f81d1453eefb 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1477,33 +1477,35 @@ int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) { - char fwrev[FW_REV_STR_SIZE]; - int n; + char fwrev[FW_REV_STR_SIZE] = {0}; + char tmp[MAXHOSTNAMELEN] = {0}; + + memset(symbol, 0, size); + + scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; lpfc_decode_firmware_rev(vport->phba, fwrev, 0); + scnprintf(tmp, sizeof(tmp), " FV%s", fwrev); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n += scnprintf(symbol + n, size - n, " FV%s", fwrev); - if (size < n) - return n; - - n += scnprintf(symbol + n, size - n, " DV%s.", - lpfc_release_version); - if (size < n) - return n; - - n += scnprintf(symbol + n, size - n, " HN:%s.", - init_utsname()->nodename); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), " HN:%s", init_utsname()->nodename); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; /* Note :- OS name is "Linux" */ - n += scnprintf(symbol + n, size - n, " OS:%s", - init_utsname()->sysname); - return n; + scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname); + strlcat(symbol, tmp, size); + +buffer_done: + return strnlen(symbol, size); + } static uint32_t @@ -1868,6 +1870,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { case IOERR_SLI_ABORTED: + case IOERR_SLI_DOWN: + /* Driver aborted this IO. No retry as error + * is likely Offline->Online or some adapter + * error. Recovery will try again. + */ + break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: case IOERR_ILLEGAL_FRAME: diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index d5303994bfd6..66f8867dd837 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_nodelist *ndlp; + char *mode; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; @@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } + /* If we don't send GFT_ID to Fabric, a PRLI error + * could be expected. + */ + if ((vport->fc_flag & FC_FABRIC) || + (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) + mode = KERN_ERR; + else + mode = KERN_INFO; + /* PRLI failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, mode, LOG_ELS, "2754 PRLI failure DID:%06X Status:x%x/x%x, " "data: x%x\n", ndlp->nlp_DID, irsp->ulpStatus, @@ -4430,7 +4440,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, mempool_free(mbox, phba->mbox_mem_pool); } out: - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); spin_unlock_irq(shost->host_lock); @@ -6455,7 +6465,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, uint32_t payload_len, length, nportid, *cmd; int rscn_cnt; int rscn_id = 0, hba_id = 0; - int i; + int i, tmo; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; @@ -6561,6 +6571,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_DEFERRED; + + /* Restart disctmo if its already running */ + if (vport->fc_flag & FC_DISC_TMO) { + tmo = ((phba->fc_ratov * 3) + 3); + mod_timer(&vport->fc_disctmo, + jiffies + msecs_to_jiffies(1000 * tmo)); + } if ((rscn_cnt < FC_MAX_HOLD_RSCN) && !(vport->fc_flag & FC_RSCN_DISCOVERY)) { vport->fc_flag |= FC_RSCN_MODE; @@ -7986,20 +8003,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) struct lpfc_sli_ring *pring; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; + unsigned long iflags = 0; lpfc_fabric_abort_vport(vport); + /* * For SLI3, only the hbalock is required. But SLI4 needs to coordinate * with the ring insert operation. Because lpfc_sli_issue_abort_iotag * ultimately grabs the ring_lock, the driver must splice the list into * a working list and release the locks before calling the abort. */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); pring = lpfc_phba_elsring(phba); /* Bail out if we've no ELS wq, like in PCI error recovery case. */ if (unlikely(!pring)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); return; } @@ -8014,6 +8033,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) if (piocb->vport != vport) continue; + if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + /* On the ELS ring we can have ELS_REQUESTs or * GEN_REQUESTs waiting for a response. */ @@ -8037,21 +8059,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); list_del_init(&piocb->dlist); lpfc_sli_issue_abort_iotag(phba, pring, piocb); - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); } if (!list_empty(&abort_list)) lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "3387 abort list for txq not empty\n"); INIT_LIST_HEAD(&abort_list); - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); @@ -8091,7 +8113,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &abort_list, diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 749286acdc17..ee70d14e7a9d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba) if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) set_bit(LPFC_DATA_READY, &phba->data_flags); } else { - if (phba->link_state >= LPFC_LINK_UP || + /* Driver could have abort request completed in queue + * when link goes down. Allow for this transition. + */ + if (phba->link_state >= LPFC_LINK_DOWN || phba->link_flag & LS_MDS_LOOPBACK) { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, @@ -4840,6 +4843,44 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } } +/* + * Sets the mailbox completion handler to be used for the + * unreg_rpi command. The handler varies based on the state of + * the port and what will be happening to the rpi next. + */ +static void +lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) +{ + unsigned long iflags; + + if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + mbox->ctx_ndlp = ndlp; + mbox->mbox_cmpl = lpfc_nlp_logo_unreg; + + } else if (phba->sli_rev == LPFC_SLI_REV4 && + (!(vport->load_flag & FC_UNLOADING)) && + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2) && + (kref_read(&ndlp->kref) > 0)) { + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; + } else { + if (vport->load_flag & FC_UNLOADING) { + if (phba->sli_rev == LPFC_SLI_REV4) { + spin_lock_irqsave(&vport->phba->ndlp_lock, + iflags); + ndlp->nlp_flag |= NLP_RELEASE_RPI; + spin_unlock_irqrestore(&vport->phba->ndlp_lock, + iflags); + } + lpfc_nlp_get(ndlp); + } + mbox->ctx_ndlp = ndlp; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } +} + /* * Free rpi associated with LPFC_NODELIST entry. * This routine is called from lpfc_freenode(), when we are removing @@ -4890,33 +4931,12 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_unreg_login(phba, vport->vpi, rpi, mbox); mbox->vport = vport; - if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { - mbox->ctx_ndlp = ndlp; - mbox->mbox_cmpl = lpfc_nlp_logo_unreg; - } else { - if (phba->sli_rev == LPFC_SLI_REV4 && - (!(vport->load_flag & FC_UNLOADING)) && - (bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf) >= - LPFC_SLI_INTF_IF_TYPE_2) && - (kref_read(&ndlp->kref) > 0)) { - mbox->ctx_ndlp = lpfc_nlp_get(ndlp); - mbox->mbox_cmpl = - lpfc_sli4_unreg_rpi_cmpl_clr; - /* - * accept PLOGIs after unreg_rpi_cmpl - */ - acc_plogi = 0; - } else if (vport->load_flag & FC_UNLOADING) { - mbox->ctx_ndlp = NULL; - mbox->mbox_cmpl = - lpfc_sli_def_mbox_cmpl; - } else { - mbox->ctx_ndlp = ndlp; - mbox->mbox_cmpl = - lpfc_sli_def_mbox_cmpl; - } - } + lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); + if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) + /* + * accept PLOGIs after unreg_rpi_cmpl + */ + acc_plogi = 0; if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!(vport->fc_flag & FC_OFFLINE_MODE))) @@ -5057,6 +5077,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_dmabuf *mp; + unsigned long iflags; /* Cleanup node for NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -5138,8 +5159,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_cleanup_vports_rrqs(vport, ndlp); if (phba->sli_rev == LPFC_SLI_REV4) ndlp->nlp_flag |= NLP_RELEASE_RPI; - lpfc_unreg_rpi(vport, ndlp); - + if (!lpfc_unreg_rpi(vport, ndlp)) { + /* Clean up unregistered and non freed rpis */ + if ((ndlp->nlp_flag & NLP_RELEASE_RPI) && + !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) { + lpfc_sli4_free_rpi(vport->phba, + ndlp->nlp_rpi); + spin_lock_irqsave(&vport->phba->ndlp_lock, + iflags); + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + spin_unlock_irqrestore(&vport->phba->ndlp_lock, + iflags); + } + } return 0; } @@ -5405,9 +5438,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) /* If we've already received a PLOGI from this NPort * we don't need to try to discover it again. */ - if (ndlp->nlp_flag & NLP_RCV_PLOGI) + if (ndlp->nlp_flag & NLP_RCV_PLOGI && + !(ndlp->nlp_type & + (NLP_FCP_TARGET | NLP_NVME_TARGET))) return NULL; + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index fc6e4546d738..696171382558 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -484,8 +484,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * single discovery thread, this will cause a huge delay in * discovery. Also this will cause multiple state machines * running in parallel for this node. + * This only applies to a fabric environment. */ - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { + if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && + (vport->fc_flag & FC_FABRIC)) { /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index a227e36cbdc2..8e0f03ef346b 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1976,6 +1976,8 @@ out_unlock: /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { + .module = THIS_MODULE, + /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 6822cd9ff8f1..40706cb842fd 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -526,7 +526,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, &qp->lpfc_abts_io_buf_list, list) { if (psb->cur_iocbq.sli4_xritag == xri) { list_del_init(&psb->list); - psb->exch_busy = 0; + psb->flags &= ~LPFC_SBUF_XBUSY; psb->status = IOSTAT_SUCCESS; if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) { qp->abts_nvme_io_bufs--; @@ -566,7 +566,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, if (iocbq->sli4_xritag != xri) continue; psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); - psb->exch_busy = 0; + psb->flags &= ~LPFC_SBUF_XBUSY; spin_unlock_irqrestore(&phba->hbalock, iflag); if (!list_empty(&pring->txq)) lpfc_worker_wake_up(phba); @@ -719,7 +719,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, iocb->ulpLe = 1; iocb->ulpClass = CLASS3; - if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { atomic_inc(&ndlp->cmd_pending); lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; } @@ -786,7 +786,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) psb->prot_seg_cnt = 0; qp = psb->hdwq; - if (psb->exch_busy) { + if (psb->flags & LPFC_SBUF_XBUSY) { spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); psb->pCmd = NULL; list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); @@ -3835,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); lpfc_cmd->status = pIocbOut->iocb.ulpStatus; /* pick up SLI4 exhange busy status from HBA */ - lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; + if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY) + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->prot_data_type) { @@ -4843,20 +4846,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0); } - /* no longer need the lock after this point */ - spin_unlock_irqrestore(&phba->hbalock, flags); if (ret_val == IOCB_ERROR) { /* Indicate the IO is not being aborted by the driver. */ iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; lpfc_cmd->waitq = NULL; spin_unlock(&lpfc_cmd->buf_lock); + spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; } + /* no longer need the lock after this point */ spin_unlock(&lpfc_cmd->buf_lock); + spin_unlock_irqrestore(&phba->hbalock, flags); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_handle_fast_ring_event(phba, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 614f78dddafe..e2cec1f6e659 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2526,6 +2526,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } else { __lpfc_sli_rpi_release(vport, ndlp); } + if (vport->load_flag & FC_UNLOADING) + lpfc_nlp_put(ndlp); pmb->ctx_ndlp = NULL; } } @@ -11050,9 +11052,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4]); spin_unlock_irq(&phba->hbalock); - if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && - irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) - lpfc_sli_release_iocbq(phba, abort_iocb); } release_iocb: lpfc_sli_release_iocbq(phba, cmdiocb); @@ -11736,7 +11735,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, cur_iocbq); - lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; + if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY)) + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; } pdone_q = cmdiocbq->context_un.wait_queue; @@ -13158,13 +13160,19 @@ send_current_mbox: phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; /* Setting active mailbox pointer need to be in sync to flag clear */ phba->sli.mbox_active = NULL; + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Wake up worker thread to post the next pending mailbox command */ lpfc_worker_wake_up(phba); + return workposted; + out_no_mqe_complete: + spin_lock_irqsave(&phba->hbalock, iflags); if (bf_get(lpfc_trailer_consumed, mcqe)) lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); - return workposted; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return false; } /** @@ -18181,6 +18189,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) static void __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) { + /* + * if the rpi value indicates a prior unreg has already + * been done, skip the unreg. + */ + if (rpi == LPFC_RPI_ALLOC_ERROR) + return; + if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { phba->sli4_hba.rpi_count--; phba->sli4_hba.max_cfg_param.rpi_used--; @@ -20093,6 +20108,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; + if (phba->cfg_xpsgl && !phba->nvmet_support && + !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + + if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); + if (phba->cfg_xri_rebalancing) { if (lpfc_ncmd->expedite) { /* Return to expedite pool */ @@ -20157,13 +20179,6 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } - - if (phba->cfg_xpsgl && !phba->nvmet_support && - !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) - lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); - - if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) - lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); } /** @@ -20399,8 +20414,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) struct sli4_hybrid_sgl *allocated_sgl = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->sgl_list; + unsigned long iflags; - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(buf_list))) { /* break off 1 chunk from the sgl_list */ @@ -20412,9 +20428,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) } } else { /* allocate more */ - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, - cpu_to_node(smp_processor_id())); + cpu_to_node(hdwq->io_wq->chann)); if (!tmp) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "8353 error kmalloc memory for HDWQ " @@ -20434,7 +20450,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) return NULL; } - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); } @@ -20442,7 +20458,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) struct sli4_hybrid_sgl, list_node); - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return allocated_sgl; } @@ -20466,8 +20482,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) struct sli4_hybrid_sgl *tmp = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->sgl_list; + unsigned long iflags; - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { list_for_each_entry_safe(list_entry, tmp, @@ -20480,7 +20497,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) rc = -EINVAL; } - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return rc; } @@ -20501,8 +20518,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, struct list_head *buf_list = &hdwq->sgl_list; struct sli4_hybrid_sgl *list_entry = NULL; struct sli4_hybrid_sgl *tmp = NULL; + unsigned long iflags; - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); /* Free sgl pool */ list_for_each_entry_safe(list_entry, tmp, @@ -20514,7 +20532,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, kfree(list_entry); } - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); } /** @@ -20538,8 +20556,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, struct fcp_cmd_rsp_buf *allocated_buf = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + unsigned long iflags; - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(buf_list))) { /* break off 1 chunk from the list */ @@ -20552,9 +20571,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, } } else { /* allocate more */ - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, - cpu_to_node(smp_processor_id())); + cpu_to_node(hdwq->io_wq->chann)); if (!tmp) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "8355 error kmalloc memory for HDWQ " @@ -20579,7 +20598,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + sizeof(struct fcp_cmnd)); - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); } @@ -20587,7 +20606,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, struct fcp_cmd_rsp_buf, list_node); - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return allocated_buf; } @@ -20612,8 +20631,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, struct fcp_cmd_rsp_buf *tmp = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + unsigned long iflags; - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { list_for_each_entry_safe(list_entry, tmp, @@ -20626,7 +20646,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, rc = -EINVAL; } - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return rc; } @@ -20647,8 +20667,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct fcp_cmd_rsp_buf *list_entry = NULL; struct fcp_cmd_rsp_buf *tmp = NULL; + unsigned long iflags; - spin_lock_irq(&hdwq->hdwq_lock); + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); /* Free cmd_rsp buf pool */ list_for_each_entry_safe(list_entry, tmp, @@ -20661,5 +20682,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, kfree(list_entry); } - spin_unlock_irq(&hdwq->hdwq_lock); + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 37fbcb46387e..7bcf922a8be2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -384,14 +384,13 @@ struct lpfc_io_buf { struct lpfc_nodelist *ndlp; uint32_t timeout; - uint16_t flags; /* TBD convert exch_busy to flags */ + uint16_t flags; #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ #define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ /* External DIF device IO conversions */ #define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */ #define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */ #define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */ - uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t status; /* From IOCB Word 7- ulpStatus */ uint32_t result; /* From IOCB Word 4. */ diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 9c5566217ef6..b5dde9d0d054 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev) mac_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) mac_scsi_template.cmd_per_lun = setup_cmd_per_lun; - if (setup_sg_tablesize >= 0) + if (setup_sg_tablesize > 0) mac_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) mac_scsi_template.this_id = setup_hostid & 7; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 42cf38c1ea99..0cbe6740e0c9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4392,7 +4392,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) { if (instance->adapter_type == MFI_SERIES) return KILL_ADAPTER; else if (instance->unload || - test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) + test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, + &instance->reset_flags)) return IGNORE_TIMEOUT; else return INITIATE_OCR; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index e301458bcbae..89daa9815d4e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -594,7 +594,8 @@ retry_alloc: fusion->io_request_frames = dma_pool_alloc(fusion->io_request_frames_pool, - GFP_KERNEL, &fusion->io_request_frames_phys); + GFP_KERNEL | __GFP_NOWARN, + &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) { instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT; @@ -632,7 +633,7 @@ retry_alloc: fusion->io_request_frames = dma_pool_alloc(fusion->io_request_frames_pool, - GFP_KERNEL, + GFP_KERNEL | __GFP_NOWARN, &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { @@ -3105,7 +3106,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; /* If FW supports PD sequence number */ - if (instance->support_seqnum_jbod_fp) { + if (!reset_devices && instance->support_seqnum_jbod_fp) { if (instance->use_seqnum_jbod_fp && instance->pd_list[pd_index].driveType == TYPE_DISK) { @@ -4847,6 +4848,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); instance->instancet->disable_intr(instance); megasas_sync_irqs((unsigned long)instance); @@ -5046,7 +5048,7 @@ kill_hba: instance->skip_heartbeat_timer_del = 1; retval = FAILED; out: - clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); mutex_unlock(&instance->reset_mutex); return retval; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index c013c80fe4e6..dd2e37e40d6b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -89,6 +89,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FP_CMD_LEN 16 #define MEGASAS_FUSION_IN_RESET 0 +#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1 #define RAID_1_PEER_CMDS 2 #define JBOD_MAPS_COUNT 2 #define MEGASAS_REDUCE_QD_COUNT 64 diff --git a/drivers/scsi/mpt3sas/GPL_license.txt b/drivers/scsi/mpt3sas/GPL_license.txt new file mode 100755 index 000000000000..3912109b5cd6 --- /dev/null +++ b/drivers/scsi/mpt3sas/GPL_license.txt @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig old mode 100644 new mode 100755 index a072187875df..86564232ec16 --- a/drivers/scsi/mpt3sas/Kconfig +++ b/drivers/scsi/mpt3sas/Kconfig @@ -2,8 +2,10 @@ # Kernel configuration file for the MPT3SAS # # This code is based on drivers/scsi/mpt3sas/Kconfig -# Copyright (C) 2012-2014 LSI Corporation -# (mailto:DL-MPTFusionLinux@lsi.com) +# Copyright (C) 2012-2018 LSI Corporation +# Copyright (C) 2013-2018 Avago Technologies +# Copyright (C) 2013-2018 Broadcom Inc. +# (mailto:MPT-FusionLinux.pdl@broadcom.com) # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License @@ -45,21 +47,20 @@ config SCSI_MPT3SAS depends on PCI && SCSI select SCSI_SAS_ATTRS select RAID_ATTRS - select IRQ_POLL ---help--- This driver supports PCI-Express SAS 12Gb/s Host Adapters. config SCSI_MPT2SAS_MAX_SGE - int "LSI MPT Fusion SAS 2.0 Max number of SG Entries (16 - 256)" - depends on PCI && SCSI && SCSI_MPT3SAS - default "128" - range 16 256 - ---help--- - This option allows you to specify the maximum number of scatter- - gather entries per I/O. The driver default is 128, which matches - MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this - can be 256. However, it may decreased down to 16. Decreasing this - parameter will reduce memory requirements on a per controller instance. + int "LSI MPT Fusion SAS 2.0 Max number of SG Entries (16 - 256)" + depends on PCI && SCSI && SCSI_MPT3SAS + default "128" + range 16 256 + ---help--- + This option allows you to specify the maximum number of scatter- + gather entries per I/O. The driver default is 128, which matches + MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this + can be 256. However, it may decreased down to 16. Decreasing this + parameter will reduce memory requirements on a per controller instance. config SCSI_MPT3SAS_MAX_SGE int "LSI MPT Fusion SAS 3.0 Max number of SG Entries (16 - 256)" @@ -72,12 +73,3 @@ config SCSI_MPT3SAS_MAX_SGE MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this can be 256. However, it may decreased down to 16. Decreasing this parameter will reduce memory requirements on a per controller instance. - -config SCSI_MPT2SAS - tristate "Legacy MPT2SAS config option" - default n - select SCSI_MPT3SAS - depends on PCI && SCSI - ---help--- - Dummy config option for backwards compatiblity: configure the MPT3SAS - driver instead. diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile old mode 100644 new mode 100755 index 84fb3fbdb0ca..4748143b14c5 --- a/drivers/scsi/mpt3sas/Makefile +++ b/drivers/scsi/mpt3sas/Makefile @@ -1,10 +1,15 @@ -# SPDX-License-Identifier: GPL-2.0 # mpt3sas makefile +# uncomment when enabling firmware UART 'iop reset -1' option +#EXTRA_CFLAGS += -DDISABLE_RESET_SUPPORT +EXTRA_CFLAGS := -DCPQ_CIM + +# comment out this line so driver can compile for kernels not having mpt3sas obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o mpt3sas-y += mpt3sas_base.o \ mpt3sas_config.o \ + mpt3sas_warpdrive.o \ mpt3sas_scsih.o \ mpt3sas_transport.o \ mpt3sas_ctl.o \ mpt3sas_trigger_diag.o \ - mpt3sas_warpdrive.o + diff --git a/drivers/scsi/mpt3sas/clean.sh b/drivers/scsi/mpt3sas/clean.sh new file mode 100755 index 000000000000..3c8fb64bd0c7 --- /dev/null +++ b/drivers/scsi/mpt3sas/clean.sh @@ -0,0 +1,30 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +# +# clean.sh : a script for cleaning the driver source tree. +# + +rm -f .*.o.* +rm -f .*.ko.* +rm -f *.ko +rm -f *.o +rm -f *~ +#rm -f csmi/*~ +rm -f *.mod.* +rm -fr .tmp_versions +rm -fr Modules.symvers +rm -fr Module.symvers +rm -f output.log +rm tags +dmesg -c > /dev/null + +exit 0 diff --git a/drivers/scsi/mpt3sas/compile.sh b/drivers/scsi/mpt3sas/compile.sh new file mode 100755 index 000000000000..f0bcf6b28742 --- /dev/null +++ b/drivers/scsi/mpt3sas/compile.sh @@ -0,0 +1,70 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +# +# compile.sh: a script compiling the driver source. +# + +# enable scripting debug +# shopt -o -s xtrace + +blacklist_file="/etc/modprobe.d/blacklist" +blacklist_enclosure() +{ + if [ -f /etc/SuSE-release ] && [ `grep -c "VERSION = 11" /etc/SuSE-release` -gt 0 ]; then + if [ -f ${file} ]; then + if [ `grep -c "blacklist enclosure" ${blacklist_file}` -eq 0 ]; then + echo "blacklist enclosure" >> ${blacklist_file}; + fi; + if [ `grep -c "blacklist ses" ${blacklist_file}` -eq 0 ]; then + echo "blacklist ses" >> ${blacklist_file}; + fi; + fi; + fi; +} + +fw_mpi2="$PWD/../../fwapi/mpi2" +if [ ! -d mpi ] && [ ! -L mpi ] && [ -d $fw_mpi2 ]; then + ln -s ../../fwapi/mpi2 mpi +elif [ ! -d mpi ] && [ ! -d $fw_mpi2 ]; then + echo "MPI header files from $fw_mpi2 are missing" + exit +fi + +# Sparse - a semantic parser, provides a set of annotations designed to convey +# semantic information about types, such as what address space pointers point +# to, or what locks a function acquires or releases. You can obtain the latest +# sparse code by typing the following command: +# git clone git://git.kernel.org/pub/scm/devel/sparse/sparse.git + +# Set SPARSE to non-zero value to enable sparse checking +# kernel="2.6.18-8.el5" +# kernel="2.6.18-53.el5" +# kernel="2.6.27.15-2-default" + kernel=`uname -r` +SPARSE=0 + +blacklist_enclosure +rm -fr output.log +./clean.sh +ctags -R * + +if [ ${SPARSE} == 0 ] ; then + make -j4 CONFIG_DEBUG_INFO=1 -C /lib/modules/${kernel}/build \ + M=$PWD 2>&1 | tee output.log +else +# This turns on ENDIAN checking: + make C=2 CF="-D__CHECK_ENDIAN__" CONFIG_DEBUG_INFO=1 -C \ + /lib/modules/${kernel}/build M=$PWD 2>&1 | tee output.log +fi; + +exit 0 + diff --git a/drivers/scsi/mpt3sas/csmi/csmisas.c b/drivers/scsi/mpt3sas/csmi/csmisas.c new file mode 100755 index 000000000000..bb3f8b9a7358 --- /dev/null +++ b/drivers/scsi/mpt3sas/csmi/csmisas.c @@ -0,0 +1,3213 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt2sas/mpt2_base.c + * Copyright (c) 2007-2018 LSI Corporation + * Copyright (c) 2013-2018 Avago Technologies + * Copyright (c) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +/** + * struct _scsi_io_transfer - scsi io transfer + * @handle: sas device handle (assigned by firmware) + * @is_raid: flag set for hidden raid components + * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, + * @data_length: data transfer length + * @data_dma: dma pointer to data + * @sense: sense data + * @lun: lun number + * @cdb_length: cdb length + * @cdb: cdb contents + * @timeout: timeout for this command + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @valid_reply: flag set for reply message + * @sense_length: sense length + * @ioc_status: ioc status + * @scsi_state: scsi state + * @scsi_status: scsi staus + * @log_info: log information + * @transfer_length: data length transfer when there is a reply message + * + * Used for sending internal scsi commands to devices within this module. + * Refer to _scsi_send_scsi_io(). + */ +struct _scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + /* the following bits are only valid when 'valid_reply = 1' */ + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +/** + * _csmisas_sas_device_find_by_id - sas device search + * @ioc: per adapter object + * @id: target id assigned by the OS + * @channel: channel id assigned by the OS, (but needs to be zero) + * @sas_device: the sas_device object + * + * This searches for sas_device based on id, then return sas_device + * object in parameters. + * + * Returns zero if successful + */ +static int +_csmisas_sas_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel, + struct _sas_device *sas_device) +{ + unsigned long flags; + struct _sas_device *a; + int rc = -ENODEV; + + if (channel != 0) /* only channel = 0 support */ + return rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(a, &ioc->sas_device_list, list) { + if (a->id != id) + continue; + memcpy(sas_device, a, sizeof(struct _sas_device)); + rc = 0; + goto out; + } + + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _csmisas_sas_device_find_by_handle - sas device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * @sas_device: the sas_device object + * + * This searches for sas_device based on handle, then return sas_device + * object in parameters. + * + * Returns zero if successful + */ +static int +_csmisas_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle, + struct _sas_device *sas_device) +{ + unsigned long flags; + struct _sas_device *a; + int rc = -ENODEV; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(a, &ioc->sas_device_list, list) { + if (a->handle != handle) + continue; + memcpy(sas_device, a, sizeof(struct _sas_device)); + rc = 0; + goto out; + } + + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _csmisas_scsih_sas_device_find_by_sas_address - sas device search + * @ioc: per adapter object + * @sas_address: sas address + * @sas_device: the sas_device object + * + * This searches for sas_device based on sas address, then return sas_device + * object in parameters. + * + * Returns zero if successful + */ +static int +_csmisas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct _sas_device *sas_device) +{ + unsigned long flags; + struct _sas_device *a; + int rc = -ENODEV; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(a, &ioc->sas_device_list, list) { + if (a->sas_address != sas_address) + continue; + memcpy(sas_device, a, sizeof(struct _sas_device)); + rc = 0; + goto out; + } + + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _csmisas_raid_device_find_by_handle - raid device search + * @ioc: per adapter object + * @handle: device handle (assigned by firmware) + * @raid_device: the raid_device object + * + * This searches for raid_device based on handle, then return raid_device + * object in parameters. + * + * Returns zero if successful +*/ +static int +_csmisas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle, + struct _raid_device *raid_device) +{ + struct _raid_device *r; + unsigned long flags; + int rc = -ENODEV; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(r, &ioc->raid_device_list, list) { + if (r->handle != handle) + continue; + memcpy(raid_device, r, sizeof(struct _raid_device)); + rc = 0; + goto out; + } + out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +/** + * _csmisas_raid_device_find_by_id - raid device search + * @ioc: per adapter object + * @id: target id assigned by the OS + * @channel: channel id assigned by the OS, (but needs to be one) + * @raid_device: the raid_device object + * + * This searches for raid_device based on id, then return raid_device + * object in parameters. + * + * Returns zero if successful + */ +static int +_csmisas_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, + int channel, struct _raid_device *raid_device) +{ + struct _raid_device *r; + unsigned long flags; + int rc = -ENODEV; + + if (channel != 1) /* only channel = 1 support */ + return rc; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(r, &ioc->raid_device_list, list) { + if (r->id != id) + continue; + memcpy(raid_device, r, sizeof(struct _raid_device)); + rc = 0; + goto out; + } + out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +/** + * _csmisas_scsi_lookup_find_by_tag - pending IO lookup + * @ioc: per adapter object + * @queue_tag: tag assigned by the OS + * @task_mid: the smid that the driver assinged to this IO + * @scmd: desired scmd + * + * This searches for pending IO based on tag, then return smid + scmd pointer + * in parameters. + * + * Returns zero if successful + */ +static int +_csmisas_scsi_lookup_find_by_tag(struct MPT3SAS_ADAPTER *ioc, u32 queue_tag, + u16 *task_mid, struct scsi_cmnd **scmd) +{ + int rc = 1; + int smid; + struct scsi_cmnd *cmd; + struct scsiio_tracker *st; + + *task_mid = 0; + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!cmd) + continue; + st = mpt3sas_base_scsi_cmd_priv(cmd); + if (!st || st->smid == 0) + continue; + if (cmd->tag == queue_tag) { + *task_mid = st->smid; + *scmd = st->scmd; + rc = 0; + goto out; + } + } + out: + return rc; +} + +/** + * _map_sas_status_to_csmi - Conversion for Connection Status + * @mpi_sas_status: SAS status returned by the firmware + * + * Returns converted connection status + * + */ +static u8 +_map_sas_status_to_csmi(u8 mpi_sas_status) +{ + u8 csmi_connect_status; + + switch (mpi_sas_status) { + + case MPI2_SASSTATUS_SUCCESS: + csmi_connect_status = CSMI_SAS_OPEN_ACCEPT; + break; + + case MPI2_SASSTATUS_UTC_BAD_DEST: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_BAD_DESTINATION; + break; + + case MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_RATE_NOT_SUPPORTED; + break; + + case MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED: + csmi_connect_status = + CSMI_SAS_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED; + break; + + case MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_STP_RESOURCES_BUSY; + break; + + case MPI2_SASSTATUS_UTC_WRONG_DESTINATION: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_WRONG_DESTINATION; + break; + + case MPI2_SASSTATUS_SDSF_NAK_RECEIVED: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_RETRY; + break; + + case MPI2_SASSTATUS_SDSF_CONNECTION_FAILED: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_PATHWAY_BLOCKED; + break; + + case MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_NO_DESTINATION; + break; + + case MPI2_SASSTATUS_UNKNOWN_ERROR: + case MPI2_SASSTATUS_INVALID_FRAME: + case MPI2_SASSTATUS_UTC_BREAK_RECEIVED: + case MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST: + case MPI2_SASSTATUS_SHORT_INFORMATION_UNIT: + case MPI2_SASSTATUS_LONG_INFORMATION_UNIT: + case MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA: + case MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR: + case MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED: + case MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH: + case MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA: + case MPI2_SASSTATUS_DATA_OFFSET_ERROR: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_RESERVE_STOP; + break; + + default: + csmi_connect_status = CSMI_SAS_OPEN_REJECT_RESERVE_STOP; + break; + } + + return csmi_connect_status; +} + +/** + * _csmi_valid_phy - Verify the phy provided is a valid phy and if not + * set the ioctl return code appropriately + * @ioc: per adapter object + * @IoctlHeader: pointer to the ioctl header to update if needed + * @bPhyIdentifier: phy id to check + * + * Return: 0 if successful + * 1 if not successful + * + */ +static u8 +_csmi_valid_phy(struct MPT3SAS_ADAPTER *ioc, IOCTL_HEADER *IoctlHeader, + u8 bPhyIdentifier) +{ + u8 rc = 0; + if (bPhyIdentifier >= ioc->sas_hba.num_phys) { + IoctlHeader->ReturnCode = CSMI_SAS_PHY_DOES_NOT_EXIST; + dcsmisasprintk(ioc, printk(KERN_WARNING + ": phy_number >= ioc->num_ports\n")); + rc = 1; + } + return rc; +} + +/** + * _csmi_valid_port_for_smp_cmd - Verify the port provided is a valid phy and if + * not set the ioctl return code appropriately. Also verifies the phy/port + * values are set appropriately for CSMI + * @ioc: per adapter object + * @IoctlHeader: pointer to the ioctl header to update if needed + * @bPhyIdentifier: phy id to check + * @bPortIdentifier: port id to check + * + * Return: 0 if successful + * 1 if not successful + * + */ +static u8 +_csmi_valid_port_for_smp_cmd(struct MPT3SAS_ADAPTER *ioc, + IOCTL_HEADER *IoctlHeader, u8 bPhyIdentifier, + u8 bPortIdentifier) +{ + + /* Neither a phy nor a port has been selected. + */ + if ((bPhyIdentifier == CSMI_SAS_USE_PORT_IDENTIFIER) && + (bPortIdentifier == CSMI_SAS_IGNORE_PORT)) { + IoctlHeader->ReturnCode = CSMI_SAS_SELECT_PHY_OR_PORT; + dcsmisasprintk(ioc, printk(KERN_ERR + "%s::%s() @%d - incorrect bPhyIdentifier " + "and bPortIdentifier!\n", + __FILE__, __func__, __LINE__)); + return 1; + } + + if ((bPhyIdentifier != CSMI_SAS_USE_PORT_IDENTIFIER) || + (bPortIdentifier == CSMI_SAS_IGNORE_PORT)) { + IoctlHeader->ReturnCode = CSMI_SAS_PHY_CANNOT_BE_SELECTED; + dcsmisasprintk(ioc, printk(KERN_ERR + "%s::%s() @%d - phy selection for SMPs is not allowed!\n", + __FILE__, __func__, __LINE__)); + return 1; + } + + return 0; +} + +/** + * _csmi_valid_port_for_cmd - Verify the port provided is a valid phy and if + * not set the ioctl return code appropriately. Also verifies the phy/port + * values are set appropriately for CSMI + * @ioc: per adapter object + * @IoctlHeader: pointer to the ioctl header to update if needed + * @bPhyIdentifier: phy id to check + * @bPortIdentifier: port id to check + * + * Return: 0 if successful + * 1 if not successful + * + */ +static u8 +_csmi_valid_port_for_cmd(struct MPT3SAS_ADAPTER *ioc, IOCTL_HEADER *IoctlHeader, + u8 bPhyIdentifier, u8 bPortIdentifier) +{ + /* Neither a phy nor a port has been selected. + */ + if ((bPhyIdentifier == CSMI_SAS_USE_PORT_IDENTIFIER) && + (bPortIdentifier == CSMI_SAS_IGNORE_PORT)) { + IoctlHeader->ReturnCode = CSMI_SAS_SELECT_PHY_OR_PORT; + dcsmisasprintk(ioc, printk(KERN_ERR + "%s::%s() @%d - incorrect bPhyIdentifier " + "and bPortIdentifier!\n", + __FILE__, __func__, __LINE__)); + return 1; + } + + return _csmi_valid_phy(ioc, IoctlHeader, bPhyIdentifier); +} + +/** + * Routine for the CSMI Sas Get Driver Info command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_driver_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_DRIVER_INFO_BUFFER *karg) +{ + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + /* Fill in the data and return the structure to the calling + * program + */ + memcpy(karg->Information.szName, MPT2SAS_DEV_NAME, + sizeof(MPT2SAS_DEV_NAME)); + sprintf(karg->Information.szDescription, "%s %s", MPT2SAS_DESCRIPTION, + MPT2SAS_DRIVER_VERSION); + + karg->Information.usMajorRevision = MPT2SAS_MAJOR_VERSION; + karg->Information.usMinorRevision = MPT2SAS_MINOR_VERSION; + karg->Information.usBuildRevision = MPT2SAS_BUILD_VERSION; + karg->Information.usReleaseRevision = MPT2SAS_RELEASE_VERSION; + + karg->Information.usCSMIMajorRevision = CSMI_MAJOR_REVISION; + karg->Information.usCSMIMinorRevision = CSMI_MINOR_REVISION; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI_SAS_GET_CNTLR_CONFIG command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_cntlr_config(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_CNTLR_CONFIG_BUFFER *karg) +{ + + u32 version; + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + /* Clear the struct before filling in data. */ + memset(&karg->Configuration, 0, sizeof(CSMI_SAS_CNTLR_CONFIG)); + + /* Fill in the data and return the structure to the calling program */ + karg->Configuration.uBaseIoAddress = ioc->pio_chip; + memcpy(&karg->Configuration.BaseMemoryAddress, + &ioc->chip_phys, 8); + + karg->Configuration.uBoardID = (ioc->pdev->subsystem_device << 16) | + (ioc->pdev->subsystem_vendor); + karg->Configuration.usSlotNumber = (ioc->ioc_pg1.PCISlotNum == 0xFF) ? + SLOT_NUMBER_UNKNOWN : ioc->ioc_pg1.PCISlotNum; + karg->Configuration.bControllerClass = CSMI_SAS_CNTLR_CLASS_HBA; + karg->Configuration.bIoBusType = CSMI_SAS_BUS_TYPE_PCI; + karg->Configuration.BusAddress.PciAddress.bBusNumber = + ioc->pdev->bus->number; + karg->Configuration.BusAddress.PciAddress.bDeviceNumber = + PCI_SLOT(ioc->pdev->devfn); + karg->Configuration.BusAddress.PciAddress.bFunctionNumber = + PCI_FUNC(ioc->pdev->devfn); + + /* Serial number */ + memcpy(&karg->Configuration.szSerialNumber, + ioc->manu_pg0.BoardTracerNumber, 16); + + /* Firmware version */ + karg->Configuration.usMajorRevision = + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24; + karg->Configuration.usMinorRevision = + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16; + karg->Configuration.usBuildRevision = + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8; + karg->Configuration.usReleaseRevision = + (ioc->facts.FWVersion.Word & 0x000000FF); + + /* Bios version */ + version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + karg->Configuration.usBIOSMajorRevision = (version & 0xFF000000) >> 24; + karg->Configuration.usBIOSMinorRevision = (version & 0x00FF0000) >> 16; + karg->Configuration.usBIOSBuildRevision = (version & 0x0000FF00) >> 8; + karg->Configuration.usBIOSReleaseRevision = (version & 0x000000FF); + + karg->Configuration.uControllerFlags = + CSMI_SAS_CNTLR_SAS_HBA | CSMI_SAS_CNTLR_FWD_ONLINE | + CSMI_SAS_CNTLR_SATA_HBA | CSMI_SAS_CNTLR_FWD_HRESET | + CSMI_SAS_CNTLR_FWD_SUPPORT; + + dcsmisasprintk(ioc, printk(KERN_DEBUG "Board ID = 0x%x\n", + karg->Configuration.uBoardID)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "Serial Number = %s\n", + karg->Configuration.szSerialNumber)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "Major Revision = %d\n", + karg->Configuration.usMajorRevision)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "Minor Revision = %d\n", + karg->Configuration.usMinorRevision)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "Build Revision = %d\n", + karg->Configuration.usBuildRevision)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "Release Revision = %d\n", + karg->Configuration.usReleaseRevision)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "uControllerFlags = 0x%x\n", + karg->Configuration.uControllerFlags)); + + /* Success */ + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI Sas Get Controller Status command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_cntlr_status(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_CNTLR_STATUS_BUFFER *karg) +{ + + int rc = 0; + u32 ioc_state; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + /* Fill in the data and return the structure to the calling program */ + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + switch (ioc_state & MPI2_IOC_STATE_MASK) { + case MPI2_IOC_STATE_OPERATIONAL: + karg->Status.uStatus = CSMI_SAS_CNTLR_STATUS_GOOD; + karg->Status.uOfflineReason = 0; + break; + + case MPI2_IOC_STATE_FAULT: + karg->Status.uStatus = CSMI_SAS_CNTLR_STATUS_FAILED; + karg->Status.uOfflineReason = 0; + break; + + case MPI2_IOC_STATE_RESET: + case MPI2_IOC_STATE_READY: + default: + karg->Status.uStatus = CSMI_SAS_CNTLR_STATUS_OFFLINE; + karg->Status.uOfflineReason = + CSMI_SAS_OFFLINE_REASON_INITIALIZING; + break; + } + + dcsmisasprintk(ioc, printk(KERN_DEBUG "IOC state = 0x%x\n", ioc_state)); + + /* Success */ + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI Sas Get Phy Info command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_phy_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_PHY_INFO_BUFFER *karg) +{ + int rc = 0; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2SasPhyPage0_t sas_phy_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + u64 sas_address; + u32 device_info; + u16 ioc_status; + u16 sz; + int i; + + memset(&karg->Information, 0, sizeof(CSMI_SAS_PHY_INFO)); + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + /* Get number of phys on this HBA */ + mpt3sas_config_get_number_hba_phys(ioc, + &karg->Information.bNumberOfPhys); + if (!karg->Information.bNumberOfPhys) { + printk(MPT3SAS_ERR_FMT "%s():%d: failure\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto out; + } + + /* + * Get SAS IO Unit page 0 + */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + + (karg->Information.bNumberOfPhys * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + printk(MPT3SAS_ERR_FMT "%s():%d: failure\n", + ioc->name, __func__, __LINE__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + printk(MPT3SAS_ERR_FMT "%s():%d: failure\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto kfree; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "%s():%d: IOC status = 0x%x\n", + ioc->name, __func__, __LINE__, ioc_status); + rc = -EFAULT; + goto kfree; + } + + /* Loop over all PHYs */ + for (i = 0; i < karg->Information.bNumberOfPhys; i++) { + /* Dump SAS IO Unit page 0 data */ + dcsmisasprintk(ioc, printk(KERN_DEBUG + "---- SAS IO UNIT PAGE 0 ----\n")); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "PHY Num = 0x%X\n", i)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Attached Device Handle = 0x%X\n", + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle))); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Controller Device Handle = 0x%X\n", + le16_to_cpu(sas_iounit_pg0-> + PhyData[i].ControllerDevHandle))); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Port = 0x%X\n", sas_iounit_pg0->PhyData[i].Port)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Port Flags = 0x%X\n", + sas_iounit_pg0->PhyData[i].PortFlags)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "PHY Flags = 0x%X\n", sas_iounit_pg0->PhyData[i].PhyFlags)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Negotiated Link Rate = 0x%X\n", + sas_iounit_pg0->PhyData[i].NegotiatedLinkRate)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Controller PHY Device Info = 0x%X\n", + le32_to_cpu(sas_iounit_pg0-> + PhyData[i].ControllerPhyDeviceInfo))); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "DiscoveryStatus = 0x%X\n", + le32_to_cpu(sas_iounit_pg0->PhyData[i].DiscoveryStatus))); + + + /* Fill in data from SAS IO Unit page 0 */ + + /* Port identifier */ + karg->Information.Phy[i].bPortIdentifier = + sas_iounit_pg0->PhyData[i].Port; + + /* Negotiated link rate */ + switch (sas_iounit_pg0->PhyData[i].NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL) { + + case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED: + karg->Information.Phy[i].bNegotiatedLinkRate = + CSMI_SAS_PHY_DISABLED; + break; + + case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + karg->Information.Phy[i].bNegotiatedLinkRate = + CSMI_SAS_LINK_RATE_FAILED; + break; + + case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + break; + + case MPI2_SAS_NEG_LINK_RATE_1_5: + karg->Information.Phy[i].bNegotiatedLinkRate = + CSMI_SAS_LINK_RATE_1_5_GBPS; + break; + + case MPI2_SAS_NEG_LINK_RATE_3_0: + karg->Information.Phy[i].bNegotiatedLinkRate = + CSMI_SAS_LINK_RATE_3_0_GBPS; + break; + + case MPI2_SAS_NEG_LINK_RATE_6_0: + karg->Information.Phy[i].bNegotiatedLinkRate = + CSMI_SAS_LINK_RATE_6_0_GBPS; + break; + + default: + karg->Information.Phy[i].bNegotiatedLinkRate = + CSMI_SAS_LINK_RATE_UNKNOWN; + break; + } + + device_info = le32_to_cpu(sas_iounit_pg0->PhyData[i]. + ControllerPhyDeviceInfo); + + /* Parent initiator port protocol */ + karg->Information.Phy[i].Identify.bInitiatorPortProtocol = 0; + + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR) + karg->Information.Phy[i].Identify. + bInitiatorPortProtocol |= CSMI_SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR) + karg->Information.Phy[i].Identify. + bInitiatorPortProtocol |= CSMI_SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR) + karg->Information.Phy[i].Identify. + bInitiatorPortProtocol |= CSMI_SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST) + karg->Information.Phy[i].Identify. + bInitiatorPortProtocol |= CSMI_SAS_PROTOCOL_SATA; + + /* Parent target port protocol */ + karg->Information.Phy[i].Identify.bTargetPortProtocol = 0; + + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + karg->Information.Phy[i].Identify.bTargetPortProtocol |= + CSMI_SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + karg->Information.Phy[i].Identify.bTargetPortProtocol |= + CSMI_SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) + karg->Information.Phy[i].Identify.bTargetPortProtocol |= + CSMI_SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + karg->Information.Phy[i].Identify.bTargetPortProtocol |= + CSMI_SAS_PROTOCOL_SATA; + + /* Parent device type */ + switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + + case MPI2_SAS_DEVICE_INFO_NO_DEVICE: + karg->Information.Phy[i].Identify.bDeviceType = + CSMI_SAS_NO_DEVICE_ATTACHED; + break; + + case MPI2_SAS_DEVICE_INFO_END_DEVICE: + karg->Information.Phy[i].Identify.bDeviceType = + CSMI_SAS_END_DEVICE; + break; + + case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER: + karg->Information.Phy[i].Identify.bDeviceType = + CSMI_SAS_EDGE_EXPANDER_DEVICE; + break; + + case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER: + karg->Information.Phy[i].Identify.bDeviceType = + CSMI_SAS_FANOUT_EXPANDER_DEVICE; + break; + } + + + /* + * Get SAS PHY page 0 + */ + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &sas_phy_pg0, + i))) { + printk(MPT3SAS_ERR_FMT "%s():%d: failure\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto kfree; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "%s():%d: IOC status = 0x%x\n", + ioc->name, __func__, __LINE__, ioc_status); + rc = -EFAULT; + goto kfree; + } + + /* Dump SAS PHY page 0 data */ + dcsmisasprintk(ioc, printk(KERN_DEBUG + "---- SAS PHY PAGE 0 ----\n")); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "PHY Num = 0x%X\n", i)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Attached Device Handle = 0x%X\n", + le16_to_cpu(sas_phy_pg0.AttachedDevHandle))); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Attached PHY Identifier = 0x%X\n", + sas_phy_pg0.AttachedPhyIdentifier)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Programmed Link Rate = 0x%X\n", + sas_phy_pg0.ProgrammedLinkRate)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Hardware Link Rate = 0x%X\n", sas_phy_pg0.HwLinkRate)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Change Count = 0x%X\n", sas_phy_pg0.ChangeCount)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "PHY Info = 0x%X\n", le32_to_cpu(sas_phy_pg0.PhyInfo))); + + /* Get PhyChangeCount */ + karg->Information.Phy[i].bPhyChangeCount = + sas_phy_pg0.ChangeCount; + + /* Get AutoDiscover */ + if (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) + karg->Information.Phy[i].bAutoDiscover = + CSMI_SAS_DISCOVER_IN_PROGRESS; + else if (sas_iounit_pg0->PhyData[i].DiscoveryStatus) + karg->Information.Phy[i].bAutoDiscover = + CSMI_SAS_DISCOVER_ERROR; + else + karg->Information.Phy[i].bAutoDiscover = + CSMI_SAS_DISCOVER_COMPLETE; + + /* Minimum hardware link rate */ + switch (sas_phy_pg0.HwLinkRate & + MPI2_SAS_HWRATE_MIN_RATE_MASK) { + + case MPI2_SAS_HWRATE_MIN_RATE_1_5: + karg->Information.Phy[i].bMinimumLinkRate = + CSMI_SAS_LINK_RATE_1_5_GBPS; + break; + + case MPI2_SAS_HWRATE_MIN_RATE_3_0: + karg->Information.Phy[i].bMinimumLinkRate = + CSMI_SAS_LINK_RATE_3_0_GBPS; + break; + + case MPI2_SAS_HWRATE_MIN_RATE_6_0: + karg->Information.Phy[i].bMinimumLinkRate = + CSMI_SAS_LINK_RATE_6_0_GBPS; + break; + + default: + karg->Information.Phy[i].bMinimumLinkRate = + CSMI_SAS_LINK_RATE_UNKNOWN; + break; + } + + /* Maximum hardware link rate */ + switch (sas_phy_pg0.HwLinkRate & + MPI2_SAS_HWRATE_MAX_RATE_MASK) { + + case MPI2_SAS_HWRATE_MAX_RATE_1_5: + karg->Information.Phy[i].bMaximumLinkRate = + CSMI_SAS_LINK_RATE_1_5_GBPS; + break; + + case MPI2_SAS_HWRATE_MAX_RATE_3_0: + karg->Information.Phy[i].bMaximumLinkRate = + CSMI_SAS_LINK_RATE_3_0_GBPS; + break; + + case MPI2_SAS_HWRATE_MAX_RATE_6_0: + karg->Information.Phy[i].bMaximumLinkRate = + CSMI_SAS_LINK_RATE_6_0_GBPS; + break; + + default: + karg->Information.Phy[i].bMaximumLinkRate = + CSMI_SAS_LINK_RATE_UNKNOWN; + break; + } + + /* Minimum programmed link rate */ + switch (sas_phy_pg0.ProgrammedLinkRate & + MPI2_SAS_PRATE_MIN_RATE_MASK) { + + case MPI2_SAS_PRATE_MIN_RATE_1_5: + karg->Information.Phy[i].bMinimumLinkRate |= + (CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS << 4); + break; + + case MPI2_SAS_PRATE_MIN_RATE_3_0: + karg->Information.Phy[i].bMinimumLinkRate |= + (CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS << 4); + break; + + case MPI2_SAS_PRATE_MIN_RATE_6_0: + karg->Information.Phy[i].bMinimumLinkRate |= + (CSMI_SAS_PROGRAMMED_LINK_RATE_6_0_GBPS << 4); + break; + + default: + karg->Information.Phy[i].bMinimumLinkRate |= + (CSMI_SAS_LINK_RATE_UNKNOWN << 4); + break; + } + + /* Maximum programmed link rate */ + switch (sas_phy_pg0.ProgrammedLinkRate & + MPI2_SAS_PRATE_MAX_RATE_MASK) { + + case MPI2_SAS_PRATE_MAX_RATE_1_5: + karg->Information.Phy[i].bMaximumLinkRate |= + (CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS << 4); + break; + + case MPI2_SAS_PRATE_MAX_RATE_3_0: + karg->Information.Phy[i].bMaximumLinkRate |= + (CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS << 4); + break; + + case MPI2_SAS_PRATE_MAX_RATE_6_0: + karg->Information.Phy[i].bMaximumLinkRate |= + (CSMI_SAS_PROGRAMMED_LINK_RATE_6_0_GBPS << 4); + break; + + default: + karg->Information.Phy[i].bMaximumLinkRate |= + (CSMI_SAS_LINK_RATE_UNKNOWN << 4); + break; + } + + + + /* Get attached SAS Device page 0 */ + if (sas_phy_pg0.AttachedDevHandle) { + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(sas_phy_pg0.AttachedDevHandle)))) { + printk(MPT3SAS_ERR_FMT "%s():%d: failure\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto kfree; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "%s():%d: IOC status = " + "0x%x\n", ioc->name, __func__, __LINE__, + ioc_status); + rc = -EFAULT; + goto kfree; + } + + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* Dump attached SAS Device page 0 data */ + dcsmisasprintk(ioc, printk(KERN_DEBUG + "---- SAS DEVICE PAGE 0 ----\n")); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "PHY Num = 0x%X\n", i)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "SAS Address = 0x%llX\n", (unsigned long long) + sas_address)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Device Info = 0x%X\n", + le32_to_cpu(sas_device_pg0.DeviceInfo))); + + + /* Fill in data from SAS PHY page 0 and attached SAS + * Device page 0 */ + + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + /* Attached initiator port protocol */ + karg->Information.Phy[i].Attached. + bInitiatorPortProtocol = 0; + + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR) + karg->Information.Phy[i].Attached. + bInitiatorPortProtocol |= CSMI_SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR) + karg->Information.Phy[i].Attached. + bInitiatorPortProtocol |= CSMI_SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR) + karg->Information.Phy[i].Attached. + bInitiatorPortProtocol |= CSMI_SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST) + karg->Information.Phy[i].Attached. + bInitiatorPortProtocol |= + CSMI_SAS_PROTOCOL_SATA; + + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Device Info Initiator Port = 0x%X\n", + karg->Information.Phy[i].Attached. + bInitiatorPortProtocol)); + + /* Attached target port protocol */ + karg->Information.Phy[i].Attached. + bTargetPortProtocol = 0; + + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + karg->Information.Phy[i].Attached. + bTargetPortProtocol |= CSMI_SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + karg->Information.Phy[i].Attached. + bTargetPortProtocol |= CSMI_SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) + karg->Information.Phy[i].Attached. + bTargetPortProtocol |= CSMI_SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + karg->Information.Phy[i].Attached. + bTargetPortProtocol |= CSMI_SAS_PROTOCOL_SATA; + + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Device Info Target Port = 0x%X\n", + karg->Information.Phy[i].Attached. + bTargetPortProtocol)); + + /* Attached device type */ + switch (device_info & + MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + + case MPI2_SAS_DEVICE_INFO_NO_DEVICE: + karg->Information.Phy[i].Attached.bDeviceType = + CSMI_SAS_NO_DEVICE_ATTACHED; + break; + + case MPI2_SAS_DEVICE_INFO_END_DEVICE: + karg->Information.Phy[i].Attached.bDeviceType = + CSMI_SAS_END_DEVICE; + break; + + case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER: + karg->Information.Phy[i].Attached.bDeviceType = + CSMI_SAS_EDGE_EXPANDER_DEVICE; + break; + + case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER: + karg->Information.Phy[i].Attached.bDeviceType = + CSMI_SAS_FANOUT_EXPANDER_DEVICE; + break; + } + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Device Info Device Type = 0x%X\n", + karg->Information.Phy[i].Attached.bDeviceType)); + + /* Setup the SAS address for the attached device */ + *(__be64 *)karg->Information.Phy[i]. + Attached.bSASAddress = cpu_to_be64(sas_address); + karg->Information.Phy[i].Attached.bPhyIdentifier = + sas_phy_pg0.AttachedPhyIdentifier; + } + + + /* Get parent SAS Device page 0 */ + if (sas_iounit_pg0->PhyData[i].ControllerDevHandle) { + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(sas_iounit_pg0-> + PhyData[i].ControllerDevHandle)))) { + printk(MPT3SAS_ERR_FMT "%s():%d: failure\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto kfree; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "%s():%d: IOC status = " + "0x%x\n", ioc->name, __func__, __LINE__, + ioc_status); + rc = -EFAULT; + goto kfree; + } + + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* Dump parent SAS Device page 0 data */ + dcsmisasprintk(ioc, printk(KERN_DEBUG + "---- SAS DEVICE PAGE 0 (Parent) ----\n")); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "PHY Num = 0x%X\n", i)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "SAS Address = 0x%llX\n", (unsigned long long) + sas_address)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Device Info = 0x%X\n", + le32_to_cpu(sas_device_pg0.DeviceInfo))); + + + /* Fill in data from parent SAS Device page 0 */ + + /* Setup the SAS address for the parent device */ + *(__be64 *)karg->Information.Phy[i].Identify. + bSASAddress = cpu_to_be64(sas_address); + karg->Information.Phy[i].Identify.bPhyIdentifier = i; + +#if 0 + /* Dump all the data we care about */ + printk(KERN_DEBUG "\tPortId = %x\n", + karg->Information.Phy[i].bPortIdentifier); + printk(KERN_DEBUG "\tNegotiated Linkrate = %x\n", + karg->Information.Phy[i].bNegotiatedLinkRate); + + printk(KERN_DEBUG "\tIdentify Target Port Protocol = %x\n", + karg->Information.Phy[i].Identify. + bTargetPortProtocol); + printk(KERN_DEBUG "\tIdentify Device Type = %x\n", + karg->Information.Phy[i].Identify.bDeviceType); + printk(KERN_DEBUG "\tIdentify SAS Address = %llx\n", + be64_to_cpu(*(u64 *)karg->Information.Phy[i]. + Identify.bSASAddress)); + + printk(KERN_DEBUG "\tAttached Target Port Protocol = %x\n", + karg->Information.Phy[i].Attached. + bTargetPortProtocol); + printk(KERN_DEBUG "\tAttached Device Type = %x\n", + karg->Information.Phy[i].Attached.bDeviceType); + printk(KERN_DEBUG "\tAttached SAS Address = %llx\n", + be64_to_cpu(*(u64 *)karg->Information.Phy[i]. + Attached.bSASAddress)); +#endif + } + } + + /* Success */ + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + kfree: + kfree(sas_iounit_pg0); + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Set PHY Info command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_set_phy_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SET_PHY_INFO_BUFFER *karg) +{ + CSMI_SAS_SET_PHY_INFO *info = &karg->Information; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2ConfigReply_t mpi_reply; + MPI2_SAS_IO_UNIT1_PHY_DATA *phy_ptr = NULL; + u16 ioc_status; + int rc = 0; + u16 sz; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + if (_csmi_valid_phy(ioc, &karg->IoctlHeader, info->bPhyIdentifier)) + goto out; + + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + if (mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + info->bPhyIdentifier)) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + phy_ptr = &sas_iounit_pg1->PhyData[info->bPhyIdentifier]; + + switch (info->bNegotiatedLinkRate) { + case CSMI_SAS_LINK_RATE_NEGOTIATE: + phy_ptr->PhyFlags &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + break; + case CSMI_SAS_LINK_RATE_PHY_DISABLED: + phy_ptr->PhyFlags |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + break; + default: + printk(MPT3SAS_ERR_FMT + "%s():%d: bad negotiated link rate: %X\n", + ioc->name, __func__, __LINE__, info->bNegotiatedLinkRate); + rc = -EFAULT; + goto out; + break; + } + + switch (info->bProgrammedMinimumLinkRate) { + case CSMI_SAS_PROGRAMMED_LINK_RATE_UNCHANGED: + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS: + phy_ptr->MaxMinLinkRate = + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MAX_RATE_MASK) | + MPI2_SASIOUNIT1_MIN_RATE_1_5; + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS: + phy_ptr->MaxMinLinkRate = + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MAX_RATE_MASK) | + MPI2_SASIOUNIT1_MIN_RATE_3_0; + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_6_0_GBPS: + phy_ptr->MaxMinLinkRate = + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MAX_RATE_MASK) | + MPI2_SASIOUNIT1_MIN_RATE_6_0; + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_12_0_GBPS: + default: + karg->IoctlHeader.ReturnCode = CSMI_SAS_LINK_RATE_OUT_OF_RANGE; + printk(MPT3SAS_ERR_FMT + "%s():%d: bad programmed minimum link rate: %X\n", + ioc->name, __func__, __LINE__, + info->bProgrammedMinimumLinkRate); + rc = -EFAULT; + goto out; + break; + } + + switch (info->bProgrammedMaximumLinkRate) { + case CSMI_SAS_PROGRAMMED_LINK_RATE_UNCHANGED: + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS: + phy_ptr->MaxMinLinkRate = + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MIN_RATE_MASK) | + MPI2_SASIOUNIT1_MAX_RATE_1_5; + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS: + phy_ptr->MaxMinLinkRate = + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MIN_RATE_MASK) | + MPI2_SASIOUNIT1_MAX_RATE_3_0; + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_6_0_GBPS: + phy_ptr->MaxMinLinkRate = + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MIN_RATE_MASK) | + MPI2_SASIOUNIT1_MAX_RATE_6_0; + break; + case CSMI_SAS_PROGRAMMED_LINK_RATE_12_0_GBPS: + default: + karg->IoctlHeader.ReturnCode = CSMI_SAS_LINK_RATE_OUT_OF_RANGE; + printk(MPT3SAS_ERR_FMT + "%s():%d: bad programmed maximum link rate: %X\n", + ioc->name, __func__, __LINE__, + info->bProgrammedMaximumLinkRate); + rc = -EFAULT; + goto out; + break; + } + + if (info->bSignalClass != CSMI_SAS_SIGNAL_CLASS_UNKNOWN) { + printk(MPT3SAS_ERR_FMT + "%s():%d: signal class is not supported: %X\n", + ioc->name, __func__, __LINE__, + info->bSignalClass); + rc = -EFAULT; + goto out; + } + + /* Validate the new settings before committing them + * min <= max, min >= hwmin, max <= hwmax + */ + if ((phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MIN_RATE_MASK) > + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MAX_RATE_MASK)>>4 || + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MIN_RATE_MASK) < + (phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK) || + (phy_ptr->MaxMinLinkRate & MPI2_SASIOUNIT1_MAX_RATE_MASK) > + (phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MAX_RATE_MASK)) { + printk(MPT3SAS_ERR_FMT + "%s():%d: Rates invalid or exceed hardware limits\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto out; + } + + + if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + + out: + kfree(sas_iounit_pg1); + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI Sas Get SCSI Address command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_scsi_address(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_GET_SCSI_ADDRESS_BUFFER *karg) +{ + int rc = 0; + struct _sas_device sas_device; + u64 sas_address; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + karg->bTargetId = 0; + karg->bPathId = 0; + karg->bLun = 0; + + /* Find the target SAS device */ + sas_address = be64_to_cpu(*(__be64 *)karg->bSASAddress); + rc = _csmisas_scsih_sas_device_find_by_sas_address(ioc, + sas_address, &sas_device); + if (rc == 0) { + karg->bHostIndex = ioc->shost->host_no; + karg->bTargetId = sas_device.id; + karg->bPathId = sas_device.channel; + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + } else + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_SCSI_ADDRESS; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI Sas Get SATA Signature + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_sata_signature(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SATA_SIGNATURE_BUFFER *karg) +{ + int rc = 0; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasPhyPage0_t phy_pg0_request; + Mpi2SasDevicePage1_t sas_device_pg1; + struct _sas_device sas_device; + u32 handle; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_NOT_AN_END_DEVICE; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + if (_csmi_valid_phy(ioc, &karg->IoctlHeader, + karg->Signature.bPhyIdentifier)) + goto out; + + if (mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0_request, + karg->Signature.bPhyIdentifier)) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + handle = le16_to_cpu(phy_pg0_request.AttachedDevHandle); + if (!handle) { + dcsmisasprintk(ioc, printk(KERN_WARNING + ": NOT AN END DEVICE\n")); + goto out; + } + rc = _csmisas_sas_device_find_by_handle(ioc, handle, &sas_device); + if (rc != 0) + goto out; + if (!(sas_device.device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) { + dcsmisasprintk(ioc, printk(KERN_WARNING + ": NOT A SATA DEVICE\n")); + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_SATA_DEVICE; + goto out; + } + + if (mpt3sas_config_get_sas_device_pg1(ioc, &mpi_reply, &sas_device_pg1, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + memcpy(karg->Signature.bSignatureFIS, + sas_device_pg1.InitialRegDeviceFIS, 20); + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI Sas Get Device Address + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_device_address(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER *karg) +{ + int rc = 0; + struct _sas_device sas_device; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_DEVICE_ADDRESS; + memset(karg->bSASAddress, 0, sizeof(u64)); + memset(karg->bSASLun, 0, sizeof(karg->bSASLun)); + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + if (karg->bHostIndex != ioc->shost->host_no) + goto out; + + /* search for device based on bus/target */ + rc = _csmisas_sas_device_find_by_id(ioc, karg->bTargetId, + karg->bPathId, &sas_device); + if (!rc) { + *(__be64 *)karg->bSASAddress = + cpu_to_be64(sas_device.sas_address); + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + } + + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI Sas Get Link Errors command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_link_errors(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_LINK_ERRORS_BUFFER *karg) +{ + int rc = 0; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasPhyPage1_t phy_pg1; + Mpi2SasIoUnitControlRequest_t sas_iounit_request; + Mpi2SasIoUnitControlReply_t sas_iounit_reply; + u32 phy_number; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + phy_number = karg->Information.bPhyIdentifier; + if (_csmi_valid_phy(ioc, &karg->IoctlHeader, phy_number)) + goto out; + + /* get hba phy error logs */ + if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy_number))) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + printk(MPT3SAS_INFO_FMT "phy(%d), ioc_status" + "(0x%04x), loginfo(0x%08x)\n", ioc->name, + phy_number, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + +/* EDM : dump PHY Page 1 data*/ + dcsmisasprintk(ioc, printk(KERN_DEBUG + "---- SAS PHY PAGE 1 ------------\n")); + dcsmisasprintk(ioc, printk(KERN_DEBUG "Invalid Dword Count=0x%x\n", + phy_pg1.InvalidDwordCount)); + dcsmisasprintk(ioc, printk(KERN_DEBUG + "Running Disparity Error Count=0x%x\n", + phy_pg1.RunningDisparityErrorCount)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "Loss Dword Synch Count=0x%x\n", + phy_pg1.LossDwordSynchCount)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "PHY Reset Problem Count=0x%x\n", + phy_pg1.PhyResetProblemCount)); + dcsmisasprintk(ioc, printk(KERN_DEBUG "\n\n")); +/* EDM : debug data */ + + karg->Information.uInvalidDwordCount = + le32_to_cpu(phy_pg1.InvalidDwordCount); + karg->Information.uRunningDisparityErrorCount = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + karg->Information.uLossOfDwordSyncCount = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + karg->Information.uPhyResetProblemCount = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + if (karg->Information.bResetCounts == + CSMI_SAS_LINK_ERROR_DONT_RESET_COUNTS) + goto out; + + memset(&sas_iounit_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + memset(&sas_iounit_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + sas_iounit_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + sas_iounit_request.PhyNum = phy_number; + sas_iounit_request.Operation = MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG; + + if (mpt3sas_base_sas_iounit_control(ioc, &sas_iounit_reply, + &sas_iounit_request)) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + + if (sas_iounit_reply.IOCStatus || sas_iounit_reply.IOCLogInfo) + printk(MPT3SAS_INFO_FMT "phy(%d), ioc_status" + "(0x%04x), loginfo(0x%08x)\n", ioc->name, + phy_number, le16_to_cpu(sas_iounit_reply.IOCStatus), + le32_to_cpu(sas_iounit_reply.IOCLogInfo)); + + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS SMP Passthru command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_smp_passthru(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SMP_PASSTHRU_BUFFER *karg) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + int rc = 0; + u16 smid; + unsigned long timeleft; + void *psge; + u32 sgl_flags; + void *data_addr = NULL; + dma_addr_t data_dma; + u32 data_out_sz = 0, data_in_sz = 0; + u16 response_sz; + u16 ioc_status; + u8 issue_host_reset = 0; + u64 sas_address; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + if (_csmi_valid_port_for_smp_cmd(ioc, &karg->IoctlHeader, + karg->Parameters.bPhyIdentifier, karg->Parameters.bPortIdentifier)) + goto out; + + /* MPI doesn't support SMP commands across specific phy's at specific + * rates so a port must be specified + */ + if (karg->Parameters.bConnectionRate != CSMI_SAS_LINK_RATE_NEGOTIATE) { + dcsmisasprintk(ioc, printk(KERN_ERR + "%s::%s() @%d - must use CSMI_SAS_LINK_RATE_NEGOTIATE\n", + __FILE__, __func__, __LINE__)); + goto out; + } + + /* + * The following is taken from _transport_smp_handler and + * _transport_expander_report_manufacture in mpt3sas_transport.c. + * More or less. + */ + + if (ioc->shost_recovery) { + printk(MPT3SAS_INFO_FMT "%s():%d: host reset in progress!\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto out; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s():%d: status = 0x%x\n", + ioc->name, __func__, __LINE__, + ioc->transport_cmds.status); + rc = -EFAULT; + goto unlock; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + printk(MPT3SAS_ERR_FMT "%s():%d: smid = 0\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto cmd_not_used; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + /* Request and response sizes */ + data_out_sz = karg->Parameters.uRequestLength; + data_in_sz = sizeof(CSMI_SAS_SMP_RESPONSE); + + /* Allocate DMA memory. Use a single buffer for both request and + * response data. */ + data_addr = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz, + &data_dma); + if (!data_addr) { + printk(MPT3SAS_ERR_FMT "%s():%d: data addr = NULL\n", + ioc->name, __func__, __LINE__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto cmd_not_used; + } + + /* Copy the SMP request to the DMA buffer */ + memcpy(data_addr, &karg->Parameters.Request, data_out_sz); + + /* Setup the MPI request frame */ + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = karg->Parameters.bPortIdentifier; + sas_address = be64_to_cpu( + *(__be64 *)karg->Parameters.bDestinationSASAddress); + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + + /* Request (write) sgel first */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | data_out_sz, data_dma); + + /* incr sgel */ + psge += ioc->sge_size; + + /* Response (read) sgel last */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | data_in_sz, data_dma + + data_out_sz); + + /* Send the request */ + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): sending request " + "(target addr = 0x%llx, function = 0x%x timeout = %d sec)\n", + ioc->name, __func__, (unsigned long long)sas_address, + karg->Parameters.Request.bFunction, karg->IoctlHeader.Timeout)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + karg->IoctlHeader.Timeout*HZ); + + /* Check the command status */ + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + printk(MPT3SAS_ERR_FMT "%s(): failure (target addr = 0x%llx, " + "function = 0x%x, status = 0x%0x, timeleft = %lu)\n", + ioc->name, __func__, (unsigned long long) + sas_address, karg->Parameters.Request.bFunction, + ioc->transport_cmds.status, timeleft); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + rc = -EFAULT; + if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET)) + issue_host_reset = 1; + goto pci_free; + } + + /* Check if the response is valid */ + if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) { + printk(MPT3SAS_ERR_FMT "%s(): target addr = 0x%llx, " + "function = 0x%x, status = 0x%x\n", ioc->name, __func__, + (unsigned long long)sas_address, + karg->Parameters.Request.bFunction, + ioc->transport_cmds.status); + rc = -EFAULT; + goto pci_free; + } + + /* Process the response */ + mpi_reply = ioc->transport_cmds.reply; + + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if ((ioc_status != MPI2_IOCSTATUS_SUCCESS) && + (ioc_status != MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN)) { + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): " + "target addr = 0x%llx, function = 0x%x, IOC Status = 0x%x, " + "IOC LogInfo = 0x%x\n", ioc->name, __func__, + (unsigned long long)sas_address, + karg->Parameters.Request.bFunction, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + rc = -EFAULT; + goto pci_free; + } + + karg->Parameters.bConnectionStatus = + _map_sas_status_to_csmi(mpi_reply->SASStatus); + + /* Copy the SMP response data from the DMA buffer */ + response_sz = le16_to_cpu(mpi_reply->ResponseDataLength); + if (response_sz > sizeof(CSMI_SAS_SMP_RESPONSE)) { + printk(MPT3SAS_ERR_FMT "%s(): response size(0x%X) larger than" + "available memory(0x%lX)\n", ioc->name, __func__, + response_sz, sizeof(CSMI_SAS_SMP_RESPONSE)); + rc = -ENOMEM; + goto pci_free; + } else if (response_sz) { + karg->Parameters.uResponseBytes = response_sz; + memcpy(&karg->Parameters.Response, data_addr + data_out_sz, + response_sz); + } + + /* Success */ + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + +pci_free: + pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz, data_addr, + data_dma); +cmd_not_used: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; +unlock: + mutex_unlock(&ioc->transport_cmds.mutex); +out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + if (issue_host_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS SSP Passthru command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_ssp_passthru(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SSP_PASSTHRU_BUFFER *karg) +{ + Mpi2SCSIIORequest_t *mpi_request; + Mpi2SCSIIOReply_t *mpi_reply = NULL; + int rc = 0; + u16 smid; + unsigned long timeleft; + void *priv_sense; + u32 mpi_control; + u32 sgl_flags; + void *data_addr = NULL; + dma_addr_t data_dma = -1; + u32 data_sz = 0; + u64 sas_address; + struct _sas_device sas_device; + u16 ioc_status; + int ii; + u8 response_length; + u16 wait_state_count; + u32 ioc_state; + u8 issue_tm = 0; + u8 issue_host_reset = 0; + u32 in_size; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + if (_csmi_valid_port_for_cmd(ioc, &karg->IoctlHeader, + karg->Parameters.bPhyIdentifier, karg->Parameters.bPortIdentifier)) + goto out; + + /* MPI doesn't support SSP commands across specific phy's at a specific + * rate so a port must be used + */ + if (karg->Parameters.bConnectionRate != CSMI_SAS_LINK_RATE_NEGOTIATE) { + dcsmisasprintk(ioc, printk(KERN_ERR + "%s::%s() @%d - must use CSMI_SAS_LINK_RATE_NEGOTIATE\n", + __FILE__, __func__, __LINE__)); + goto out; + } + + /* Find the target SAS device */ + sas_address = be64_to_cpu(*(__be64 *)karg->Parameters. + bDestinationSASAddress); + if (_csmisas_scsih_sas_device_find_by_sas_address(ioc, sas_address, + &sas_device) != 0) { + printk(MPT3SAS_ERR_FMT "%s():%d: device with addr 0x%llx not " + "found!\n", ioc->name, __func__, __LINE__, + (unsigned long long)sas_address); + rc = -EFAULT; + goto out; + } + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s(): karg->IoctlHeader.Length = %d, " + "karg->Parameters.uDataLength = %d\n", + ioc->name, __func__, karg->IoctlHeader.Length, + karg->Parameters.uDataLength)); + + /* data buffer bounds checking */ + in_size = offsetof(CSMI_SAS_SSP_PASSTHRU_BUFFER, bDataBuffer) - + sizeof(IOCTL_HEADER) + karg->Parameters.uDataLength; + if (in_size > karg->IoctlHeader.Length) { + printk(MPT3SAS_ERR_FMT "%s():%d: data buffer too small " + "IoctlHeader.Length = %d, Parameters.uDataLength = %d !\n", + ioc->name, __func__, __LINE__, karg->IoctlHeader.Length, + karg->Parameters.uDataLength); + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + goto out; + } + + /* + * The following is taken from _scsi_send_scsi_io in mpt3sas_scsih.c. + * More or less. + */ + + if (ioc->shost_recovery) { + printk(MPT3SAS_INFO_FMT "%s():%d: host reset in progress!\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto out; + } + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s():%d: status = 0x%x\n", ioc->name, + __func__, __LINE__, ioc->scsih_cmds.status); + rc = -EFAULT; + goto unlock; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + printk(MPT3SAS_ERR_FMT "%s: failed due to ioc not " + "operational\n", ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + printk(MPT3SAS_INFO_FMT "%s: waiting for operational " + "state(count=%d)\n", ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + printk(MPT3SAS_INFO_FMT "%s: ioc is operational\n", ioc->name, + __func__); + + /* Use first reserved smid for passthrough ioctls */ + smid = ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_IOCTL; + + /* Allocate DMA memory and copy the SSP write payload from user memory + * to the DMA buffer */ + data_sz = karg->Parameters.uDataLength; + if (data_sz) { + data_addr = pci_alloc_consistent(ioc->pdev, data_sz, &data_dma); + if (!data_addr) { + printk(MPT3SAS_ERR_FMT "%s():%d: data addr = NULL\n", + ioc->name, __func__, __LINE__); + rc = -ENOMEM; + goto set_cmd_status; + } + if (data_sz && (karg->IoctlHeader.Direction == + CSMI_SAS_DATA_WRITE)) + memcpy(data_addr, karg->bDataBuffer, data_sz); + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + + /* Setup the MPI request frame */ + memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(sas_device.handle); + + /* Set scatter gather flags */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + if (karg->IoctlHeader.Direction == CSMI_SAS_DATA_WRITE) + sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + + /* Direction */ + if (!data_sz) /* handle case when uFlags is incorrectly set */ { + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + } else if (karg->IoctlHeader.Direction == CSMI_SAS_DATA_WRITE) { + ioc->base_add_sg_single(&mpi_request->SGL, sgl_flags | data_sz, + data_dma); + mpi_control = MPI2_SCSIIO_CONTROL_WRITE; + + } else if (karg->IoctlHeader.Direction == CSMI_SAS_DATA_READ) { + ioc->base_add_sg_single(&mpi_request->SGL, sgl_flags | data_sz, + data_dma); + mpi_control = MPI2_SCSIIO_CONTROL_READ; + + } else { + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + } + + mpi_request->Control = cpu_to_le32(mpi_control | + MPI2_SCSIIO_CONTROL_SIMPLEQ); + mpi_request->DataLength = cpu_to_le32(data_sz); + mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + priv_sense = mpt3sas_base_get_sense_buffer(ioc, smid); + mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; + mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI + + MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR); + mpi_request->IoFlags = cpu_to_le16(karg->Parameters.bCDBLength); + memcpy(mpi_request->LUN, karg->Parameters.bLun, 8); + memcpy(mpi_request->CDB.CDB32, karg->Parameters.bCDB, + karg->Parameters.bCDBLength); + + /* Send the request */ + if (ioc->logging_level & MPT_DEBUG_CSMISAS) { + printk(MPT3SAS_INFO_FMT "%s(): sending request " + "(target addr = 0x%llx, SCSI cmd = {", + ioc->name, __func__, (unsigned long long)sas_address); + for (ii = 0; ii < karg->Parameters.bCDBLength; ii++) { + printk("%02x", karg->Parameters.bCDB[ii]); + if (ii < karg->Parameters.bCDBLength - 1) + printk(" "); + } + printk("}, timeout = %d sec)\n", karg->IoctlHeader.Timeout); + } + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_scsi_io(ioc, smid, sas_device.handle); + timeleft = wait_for_completion_timeout(&ioc->scsih_cmds.done, + karg->IoctlHeader.Timeout*HZ); + + /* Check the command status */ + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + printk(MPT3SAS_ERR_FMT "%s(): failure (target addr = 0x%llx, " + "SCSI cmd = 0x%x, status = 0x%x, timeleft = %lu)\n", + ioc->name, __func__, (unsigned long long)sas_address, + mpi_request->CDB.CDB32[0], ioc->scsih_cmds.status, + timeleft); + _debug_dump_mf(mpi_request, sizeof(Mpi2SCSIIORequest_t)/4); + rc = -EFAULT; + if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET)) + issue_tm = 1; + goto issue_target_reset; + } + + /* Initialize the CSMI reply */ + memset(&karg->Status, 0, sizeof(CSMI_SAS_SSP_PASSTHRU_STATUS)); + karg->Status.bConnectionStatus = CSMI_SAS_OPEN_ACCEPT; + karg->Status.bDataPresent = CSMI_SAS_SSP_NO_DATA_PRESENT; + karg->Status.bSSPStatus = CSMI_SAS_SSP_STATUS_COMPLETED; + karg->Status.bStatus = GOOD; + karg->Status.uDataBytes = data_sz; + + /* Process the respone */ + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + karg->Status.bStatus = mpi_reply->SCSIStatus; + karg->Status.uDataBytes = le32_to_cpu(mpi_reply->TransferCount); + + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + + if (mpi_reply->SCSIState == MPI2_SCSI_STATE_AUTOSENSE_VALID) { + response_length = + le32_to_cpu(mpi_reply->SenseCount) & 0xff; + karg->Status.bDataPresent = + CSMI_SAS_SSP_SENSE_DATA_PRESENT; + karg->Status.bResponseLength[0] = response_length; + memcpy(karg->Status.bResponse, priv_sense, + response_length); + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): " + "[sense_key,asc,asq]: [0x%02x,0x%02x,0x%02x]\n", + ioc->name, __func__, ((u8 *)priv_sense)[2] & 0xf, + ((u8 *)priv_sense)[12], ((u8 *)priv_sense)[13])); + + } else if (mpi_reply->SCSIState == + MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { + response_length = + sizeof(mpi_reply->ResponseInfo) & 0xff; + karg->Status.bDataPresent = + CSMI_SAS_SSP_RESPONSE_DATA_PRESENT; + karg->Status.bResponseLength[0] = response_length; + for (ii = 0; ii < response_length; ii++) { + karg->Status.bResponse[ii] = + ((u8 *)&mpi_reply-> + ResponseInfo)[response_length-1-ii]; + } + + } else if ((ioc_status != MPI2_IOCSTATUS_SUCCESS) && + (ioc_status != + MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) && + (ioc_status != MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN)) { + printk(MPT3SAS_WARN_FMT "%s(): target addr = 0x%llx, " + "SCSI cmd = 0x%x, IOC Status = 0x%x, IOC LogInfo = " + "0x%x\n", ioc->name, __func__, + (unsigned long long)sas_address, + mpi_request->CDB.CDB32[0], + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + } + +#if 0 + /* Dump the data buffer */ + printk(KERN_DEBUG "arg = %p\n", arg); + printk(KERN_DEBUG "off = %lu\n", + offsetof(CSMI_SAS_SSP_PASSTHRU_BUFFER, bDataBuffer)); + printk(KERN_DEBUG "arg + off = %p\n", arg + + offsetof(CSMI_SAS_SSP_PASSTHRU_BUFFER, bDataBuffer)); + printk(KERN_DEBUG "sof = %lu\n", + sizeof(CSMI_SAS_SSP_PASSTHRU_BUFFER)); + printk(KERN_DEBUG "arg + sof = %p\n", + arg + sizeof(CSMI_SAS_SSP_PASSTHRU_BUFFER)); + _debug_dump_mf(data_addr, data_sz / 4); +#endif + + /* Copy the SSP read payload from the DMA buffer to user memory */ + if (data_sz && (karg->IoctlHeader.Direction == CSMI_SAS_DATA_READ)) { + karg->Status.uDataBytes = + min(le32_to_cpu(mpi_reply->TransferCount), data_sz); + memcpy(karg->bDataBuffer, data_addr, data_sz); + } + + /* Success */ + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + issue_target_reset: + if (issue_tm) { + printk(MPT3SAS_INFO_FMT "issue target reset: handle" + "(0x%04x)\n", ioc->name, sas_device.handle); + mpt3sas_scsih_issue_locked_tm(ioc, sas_device.handle, 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 0); + if (ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) { + printk(MPT3SAS_INFO_FMT "target reset completed: handle" + "(0x%04x)\n", ioc->name, sas_device.handle); + rc = -EAGAIN; + } else { + printk(MPT3SAS_INFO_FMT "target reset didn't complete:" + " handle(0x%04x)\n", ioc->name, sas_device.handle); + issue_host_reset = 1; + } + karg->Status.bSSPStatus = CSMI_SAS_SSP_STATUS_RETRY; + } + pci_free_consistent(ioc->pdev, data_sz, data_addr, data_dma); + set_cmd_status: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + unlock: + mutex_unlock(&ioc->scsih_cmds.mutex); + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + if (issue_host_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS STP Passthru command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_stp_passthru(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_STP_PASSTHRU_BUFFER *karg) +{ + int rc = 0; + u64 sas_address; + struct _sas_device sas_device; + Mpi2SataPassthroughRequest_t *mpi_request; + Mpi2SataPassthroughReply_t *mpi_reply; + u16 smid; + u16 wait_state_count; + u32 ioc_state; + u16 ioc_status; + u32 sgl_flags; + void *data_addr = NULL; + dma_addr_t data_dma = -1; + u32 data_sz = 0; + unsigned long timeleft; + u8 issue_tm = 0; + u8 issue_host_reset = 0; + u32 in_size; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + if (ioc->shost_recovery) { + printk(MPT3SAS_INFO_FMT "%s():%d: host reset in progress!\n", + ioc->name, __func__, __LINE__); + rc = -EFAULT; + goto out; + } + + if (_csmi_valid_port_for_cmd(ioc, &karg->IoctlHeader, + karg->Parameters.bPhyIdentifier, karg->Parameters.bPortIdentifier)) + goto out; + + /* data buffer bounds checking */ + in_size = offsetof(CSMI_SAS_STP_PASSTHRU_BUFFER, bDataBuffer) - + sizeof(IOCTL_HEADER) + karg->Parameters.uDataLength; + if (in_size > karg->IoctlHeader.Length) { + printk(MPT3SAS_ERR_FMT "%s():%d: data buffer too small " + "IoctlHeader.Length = %d, Parameters.uDataLength = %d !\n", + ioc->name, __func__, __LINE__, karg->IoctlHeader.Length, + karg->Parameters.uDataLength); + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + goto out; + } + + data_sz = karg->Parameters.uDataLength; + + sas_address = + be64_to_cpu(*(__be64 *)karg->Parameters.bDestinationSASAddress); + rc = _csmisas_scsih_sas_device_find_by_sas_address(ioc, sas_address, + &sas_device); + + /* check that this is an STP or SATA target device + */ + if (rc != 0 || (!(sas_device.device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) { + dcsmisasprintk(ioc, printk(KERN_WARNING + ": NOT A SATA DEVICE\n")); + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + goto out; + } + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s():%d: status = 0x%x\n", ioc->name, + __func__, __LINE__, ioc->scsih_cmds.status); + rc = -EFAULT; + goto unlock; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + printk(MPT3SAS_ERR_FMT "%s: failed due to ioc not " + "operational\n", ioc->name, __func__); + rc = -EFAULT; + goto set_cmd_status; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + printk(MPT3SAS_INFO_FMT "%s: waiting for operational " + "state(count=%d)\n", ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + printk(MPT3SAS_INFO_FMT "%s: ioc is operational\n", ioc->name, + __func__); + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsih_cb_idx, NULL); + if (!smid) { + printk(MPT3SAS_ERR_FMT "%s():%d: smid = 0\n", ioc->name, + __func__, __LINE__); + rc = -EFAULT; + goto set_cmd_status; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + memset(mpi_request, 0, sizeof(Mpi2SataPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SATA_PASSTHROUGH; + mpi_request->PassthroughFlags = cpu_to_le16(karg->Parameters.uFlags); + mpi_request->DevHandle = cpu_to_le16(sas_device.handle); + mpi_request->DataLength = cpu_to_le32(data_sz); + memcpy(mpi_request->CommandFIS, karg->Parameters.bCommandFIS, 20); + + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + if (karg->IoctlHeader.Direction == CSMI_SAS_DATA_WRITE) + sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + + sgl_flags |= data_sz; + if (data_sz > 0) { + data_addr = pci_alloc_consistent(ioc->pdev, data_sz, &data_dma); + + if (data_addr == NULL) { + dcsmisasprintk(ioc, printk(KERN_ERR + ": pci_alloc_consistent: FAILED\n")); + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + mpt3sas_base_free_smid(ioc, smid); + goto set_cmd_status; + } + + ioc->base_add_sg_single(&mpi_request->SGL, sgl_flags, data_dma); + if (karg->IoctlHeader.Direction == CSMI_SAS_DATA_WRITE) + memcpy(data_addr, karg->bDataBuffer, data_sz); + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + + init_completion(&ioc->scsih_cmds.done); + ioc->scsih_cmds.smid = smid; + ioc->put_smid_scsi_io(ioc, smid, sas_device.handle); + timeleft = wait_for_completion_timeout(&ioc->scsih_cmds.done, + karg->IoctlHeader.Timeout * HZ); + + /* Check the command status */ + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + printk(MPT3SAS_ERR_FMT "%s(): failure (target addr = 0x%llx, " + "timeleft = %lu)\n", ioc->name, __func__, + (unsigned long long)sas_address, timeleft); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SataPassthroughRequest_t)/4); + rc = -EFAULT; + if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET)) + issue_tm = 1; + goto issue_target_reset; + } + + memset(&karg->Status, 0, sizeof(CSMI_SAS_STP_PASSTHRU_STATUS)); + + if ((ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + dcsmisasprintk(ioc, printk(KERN_DEBUG + ": STP Passthru: oh no, there is no reply!!")); + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + goto set_cmd_status; + } + + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status != MPI2_IOCSTATUS_SUCCESS && + ioc_status != MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + dcsmisasprintk(ioc, printk(KERN_DEBUG ": STP Passthru: ")); + dcsmisasprintk(ioc, printk("IOCStatus=0x%X IOCLogInfo=0x%X " + "SASStatus=0x%X\n", le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), mpi_reply->SASStatus)); + } + + karg->Status.bConnectionStatus = + _map_sas_status_to_csmi(mpi_reply->SASStatus); + memcpy(karg->Status.bStatusFIS, mpi_reply->StatusFIS, 20); + + memset(karg->Status.uSCR, 0, 64); + karg->Status.uSCR[0] = le32_to_cpu(mpi_reply->StatusControlRegisters); + + if (data_sz && (karg->IoctlHeader.Direction == CSMI_SAS_DATA_READ)) { + karg->Status.uDataBytes = + min(le32_to_cpu(mpi_reply->TransferCount), data_sz); + memcpy(karg->bDataBuffer, data_addr, data_sz); + } + + if (data_addr) + pci_free_consistent(ioc->pdev, data_sz, (u8 *)data_addr, + data_dma); + + /* Success */ + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + issue_target_reset: + if (issue_tm) { + printk(MPT3SAS_INFO_FMT "issue target reset: handle" + "(0x%04x)\n", ioc->name, sas_device.handle); + mpt3sas_scsih_issue_locked_tm(ioc, sas_device.handle, 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 0); + if (ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) { + printk(MPT3SAS_INFO_FMT "target reset completed: handle" + "(0x%04x)\n", ioc->name, sas_device.handle); + rc = -EAGAIN; + } else { + printk(MPT3SAS_INFO_FMT "target reset didn't complete:" + " handle(0x%04x)\n", ioc->name, sas_device.handle); + issue_host_reset = 1; + } + } + + set_cmd_status: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + unlock: + mutex_unlock(&ioc->scsih_cmds.mutex); + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + if (issue_host_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Firmware Download command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_firmware_download(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER *karg) +{ + int rc = 0; + int retval; + pMpi2FWImageHeader_t pFwHeader = NULL; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + karg->Information.usStatus = CSMI_SAS_FWD_REJECT; + karg->Information.usSeverity = CSMI_SAS_FWD_ERROR; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + /* check the incoming frame */ + if ((karg->Information.uBufferLength + + sizeof(CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER)) > + karg->IoctlHeader.Length) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + goto out; + } + + /* return if user specific soft reset or to validate fw */ + if (karg->Information.uDownloadFlags & + (CSMI_SAS_FWD_SOFT_RESET | CSMI_SAS_FWD_VALIDATE)) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + goto out; + } + + /* get a pointer to our bDataBuffer */ + pFwHeader = (pMpi2FWImageHeader_t)karg->bDataBuffer; + + /* verify the fimware signature */ + if (!((pFwHeader->Signature0 == MPI2_FW_HEADER_SIGNATURE0) && + (pFwHeader->Signature1 == MPI2_FW_HEADER_SIGNATURE1) && + (pFwHeader->Signature2 == MPI2_FW_HEADER_SIGNATURE2))) + goto out; + + if (_ctl_do_fw_download(ioc, karg->bDataBuffer, + karg->Information.uBufferLength) != 0) { + printk(KERN_ERR "_ctl_do_fw_download failed\n"); + karg->Information.usSeverity = CSMI_SAS_FWD_FATAL; + karg->Information.usStatus = CSMI_SAS_FWD_FAILED; + goto out; + } else { + printk(KERN_ERR "_ctl_do_fw_download succeeded\n"); + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + karg->Information.usSeverity = CSMI_SAS_FWD_INFORMATION; + karg->Information.usStatus = CSMI_SAS_FWD_SUCCESS; + } + + /* reset HBA to load new fw if desired*/ + if (karg->Information.uDownloadFlags & CSMI_SAS_FWD_HARD_RESET) { + scsi_block_requests(ioc->shost); + retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + scsi_unblock_requests(ioc->shost); + printk(MPT3SAS_INFO_FMT "%s: host reset %s.\n", ioc->name, + __func__, (retval < 0) ? "failed" : "succeeded"); + if (retval < 0) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + karg->Information.usSeverity = CSMI_SAS_FWD_FATAL; + karg->Information.usStatus = CSMI_SAS_FWD_FAILED; + } + goto out; + } + + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Get RAID Info command. + * Note: This function is required for the SAS agent (cmasasd) + * to correctly process physically removed drives from DotHill + * enclosures due to the behaviour of the DH FW. The function + * is required even if the controller or FW don't support RAID. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_raid_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_INFO_BUFFER *karg) +{ + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + /* Don't need to return any data so just clear the struct */ + memset(&karg->Information, 0, sizeof(CSMI_SAS_RAID_INFO)); + + /* Success */ + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Get RAID Config command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_raid_config(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_CONFIG_BUFFER *karg) +{ + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Get RAID Features command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_raid_features(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_FEATURES_BUFFER *karg) +{ + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Set RAID Control command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_set_raid_control(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_CONTROL_BUFFER *karg) +{ + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Get Raid Element. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_raid_element(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_ELEMENT_BUFFER *karg) +{ + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Set Raid Operation + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_set_raid_operation(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_SET_OPERATION_BUFFER *karg) +{ + int rc = 0; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + + +/** + * Prototype Routine for the CSMI SAS Task Managment Config command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_task_managment(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SSP_TASK_IU_BUFFER *karg) +{ + struct _sas_device sas_device; + int rc = 0; + u16 handle = 0; + u8 task_type; + u16 task_mid = 0; + struct scsi_cmnd *scmd = NULL; + + memset(&karg->Status, 0, sizeof(CSMI_SAS_SSP_PASSTHRU_STATUS)); + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + switch (karg->Parameters.uInformation) { + case CSMI_SAS_SSP_TEST: + dcsmisasprintk(ioc, printk(KERN_DEBUG + "TM request for test purposes\n")); + break; + case CSMI_SAS_SSP_EXCEEDED: + dcsmisasprintk(ioc, printk(KERN_DEBUG + "TM request due to timeout\n")); + break; + case CSMI_SAS_SSP_DEMAND: + dcsmisasprintk(ioc, printk(KERN_DEBUG + "TM request demanded by app\n")); + break; + case CSMI_SAS_SSP_TRIGGER: + dcsmisasprintk(ioc, printk(KERN_DEBUG + "TM request sent to trigger event\n")); + break; + } + + if (karg->Parameters.bHostIndex != ioc->shost->host_no) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_DEVICE_ADDRESS; + goto out; + } + + /* search for device based on bus/target */ + rc = _csmisas_sas_device_find_by_id(ioc, karg->Parameters.bTargetId, + karg->Parameters.bPathId, &sas_device); + if (rc != 0) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_DEVICE_ADDRESS; + goto out; + } + + handle = sas_device.handle; + + /* try to catch an error + */ + if ((karg->Parameters.uFlags & CSMI_SAS_TASK_IU) && + (karg->Parameters.uFlags & CSMI_SAS_HARD_RESET_SEQUENCE)) + goto out; + + if (karg->Parameters.uFlags & CSMI_SAS_TASK_IU) { + switch (karg->Parameters.bTaskManagementFunction) { + + case CSMI_SAS_SSP_ABORT_TASK: + if (_csmisas_scsi_lookup_find_by_tag(ioc, + karg->Parameters.uQueueTag, &task_mid, + &scmd)) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_SUCCESS; + karg->Status.bSSPStatus = + CSMI_SAS_SSP_STATUS_NO_TAG; + goto out; + } + task_type = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + break; + case CSMI_SAS_SSP_ABORT_TASK_SET: + task_type = MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET; + break; + case CSMI_SAS_SSP_CLEAR_TASK_SET: + task_type = MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET; + break; + case CSMI_SAS_SSP_LOGICAL_UNIT_RESET: + task_type = + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET; + break; + case CSMI_SAS_SSP_CLEAR_ACA: + task_type = + MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA; + break; + case CSMI_SAS_SSP_QUERY_TASK: + if (_csmisas_scsi_lookup_find_by_tag(ioc, + karg->Parameters.uQueueTag, &task_mid, + &scmd)) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_SUCCESS; + karg->Status.bSSPStatus = + CSMI_SAS_SSP_STATUS_NO_TAG; + goto out; + } + task_type = + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK; + break; + default: + goto out; + } + } else if (karg->Parameters.uFlags & CSMI_SAS_HARD_RESET_SEQUENCE) + task_type = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + else + goto out; + + if (mpt3sas_scsih_issue_locked_tm(ioc, handle, karg->Parameters.bPathId, + karg->Parameters.bTargetId, karg->Parameters.bLun, task_type, + task_mid, karg->IoctlHeader.Timeout, + 0) + == SUCCESS) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + karg->Status.bSSPStatus = CSMI_SAS_SSP_STATUS_COMPLETED; + } else { + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + karg->Status.bSSPStatus = CSMI_SAS_SSP_STATUS_FATAL_ERROR; + } + + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** Prototype Routine for the CSMI SAS Phy Control command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_phy_control(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_PHY_CONTROL_BUFFER *karg) +{ + int rc = 0; + int sz; + Mpi2SasIoUnitControlReply_t sas_iounit_ctr_reply; + Mpi2SasIoUnitControlRequest_t sas_iounit_ctr; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2ConfigReply_t sas_iounit_pg1_reply; + u32 ioc_status; + u8 phy_number; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + phy_number = karg->bPhyIdentifier; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + if (_csmi_valid_phy(ioc, &karg->IoctlHeader, phy_number)) + goto out; + + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &sas_iounit_pg1_reply, + sas_iounit_pg1, sz))) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + ioc_status = le16_to_cpu(sas_iounit_pg1_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + switch (karg->uFunction) { + case CSMI_SAS_PC_LINK_RESET: + case CSMI_SAS_PC_HARD_RESET: + if ((karg->uLinkFlags & CSMI_SAS_PHY_ACTIVATE_CONTROL) && + (karg->usLengthOfControl >= sizeof(CSMI_SAS_PHY_CONTROL)) && + (karg->bNumberOfControls > 0)) { + if (karg->Control[0].bRate == + CSMI_SAS_LINK_RATE_1_5_GBPS) { + sas_iounit_pg1->PhyData[phy_number]. + MaxMinLinkRate = + MPI2_SASIOUNIT1_MAX_RATE_1_5 | + MPI2_SASIOUNIT1_MIN_RATE_1_5; + } else if (karg->Control[0].bRate == + CSMI_SAS_LINK_RATE_3_0_GBPS) { + sas_iounit_pg1->PhyData[phy_number]. + MaxMinLinkRate = + MPI2_SASIOUNIT1_MAX_RATE_3_0 | + MPI2_SASIOUNIT1_MIN_RATE_3_0; + } else if (karg->Control[0].bRate == + CSMI_SAS_LINK_RATE_6_0_GBPS) { + sas_iounit_pg1->PhyData[phy_number]. + MaxMinLinkRate = + MPI2_SASIOUNIT1_MAX_RATE_6_0 | + MPI2_SASIOUNIT1_MIN_RATE_6_0; + } else { + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()! Unsupported " + "Link Rate\n", ioc->name, + __FILE__, __LINE__, __func__); + goto out; + } + + sas_iounit_pg1->PhyData[phy_number].PhyFlags &= + ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + + mpt3sas_config_set_sas_iounit_pg1(ioc, + &sas_iounit_pg1_reply, sas_iounit_pg1, sz); + + memset(&sas_iounit_ctr, 0, + sizeof(Mpi2SasIoUnitControlRequest_t)); + sas_iounit_ctr.Function = + MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + sas_iounit_ctr.Operation = karg->uFunction == + CSMI_SAS_PC_LINK_RESET ? MPI2_SAS_OP_PHY_LINK_RESET + : MPI2_SAS_OP_PHY_HARD_RESET; + sas_iounit_ctr.PhyNum = phy_number; + + if ((mpt3sas_base_sas_iounit_control(ioc, + &sas_iounit_ctr_reply, &sas_iounit_ctr))) { + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + goto out; + } + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + } + break; + case CSMI_SAS_PC_PHY_DISABLE: + if (karg->usLengthOfControl || karg->bNumberOfControls) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + break; + } + sas_iounit_pg1->PhyData[phy_number].PhyFlags |= + MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + + mpt3sas_config_set_sas_iounit_pg1(ioc, + &sas_iounit_pg1_reply, sas_iounit_pg1, sz); + + memset(&sas_iounit_ctr, 0, + sizeof(Mpi2SasIoUnitControlRequest_t)); + sas_iounit_ctr.Function = + MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + sas_iounit_ctr.Operation = MPI2_SAS_OP_PHY_HARD_RESET; + sas_iounit_ctr.PhyNum = phy_number; + + if ((mpt3sas_base_sas_iounit_control(ioc, + &sas_iounit_ctr_reply, &sas_iounit_ctr))) { + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + goto out; + } + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + break; + case CSMI_SAS_PC_GET_PHY_SETTINGS: + if (karg->usLengthOfControl || karg->bNumberOfControls) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + break; + } + if (karg->IoctlHeader.Length + < offsetof(CSMI_SAS_PHY_CONTROL_BUFFER, Control) + + (4 * sizeof(CSMI_SAS_PHY_CONTROL))) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_STATUS_INVALID_PARAMETER; + break; + } + karg->usLengthOfControl = sizeof(CSMI_SAS_PHY_CONTROL); + karg->bNumberOfControls = 6; + karg->Control[0].bType = CSMI_SAS_SAS; + karg->Control[0].bRate = CSMI_SAS_LINK_RATE_1_5_GBPS; + karg->Control[1].bType = CSMI_SAS_SAS; + karg->Control[1].bRate = CSMI_SAS_LINK_RATE_3_0_GBPS; + karg->Control[2].bType = CSMI_SAS_SAS; + karg->Control[2].bRate = CSMI_SAS_LINK_RATE_6_0_GBPS; + karg->Control[3].bType = CSMI_SAS_SATA; + karg->Control[3].bRate = CSMI_SAS_LINK_RATE_1_5_GBPS; + karg->Control[4].bType = CSMI_SAS_SATA; + karg->Control[4].bRate = CSMI_SAS_LINK_RATE_3_0_GBPS; + karg->Control[5].bType = CSMI_SAS_SATA; + karg->Control[5].bRate = CSMI_SAS_LINK_RATE_6_0_GBPS; + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + break; + default: + break; + } + + out: + kfree(sas_iounit_pg1); + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/** + * Prototype Routine for the CSMI SAS Get Connector info command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_connector_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_CONNECTOR_INFO_BUFFER *karg) +{ + int rc = 0; + int i; + u16 sz; + Mpi2ConfigReply_t mpi_reply; + Mpi2ManufacturingPage7_t *manu_pg7; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + sz = offsetof(Mpi2ManufacturingPage7_t, ConnectorInfo) + + (ioc->sas_hba.num_phys * sizeof(MPI2_MANPAGE7_CONNECTOR_INFO)); + + manu_pg7 = kzalloc(sz, GFP_KERNEL); + if (!manu_pg7) { + printk(KERN_ERR "%s@%d::%s() - Unable to alloc @ %p\n", + __FILE__, __LINE__, __func__, manu_pg7); + goto out; + } + + /* initialize the array */ + for (i = 0; i < 32; i++) { + karg->Reference[i].uPinout = CSMI_SAS_CON_UNKNOWN; + strcpy(karg->Reference[i].bConnector, ""); + karg->Reference[i].bLocation = CSMI_SAS_CON_UNKNOWN; + } + + if (!mpt3sas_config_get_manufacturing_pg7(ioc, &mpi_reply, manu_pg7, + sz)) { + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + karg->Reference[i].uPinout = + le32_to_cpu(manu_pg7->ConnectorInfo[i].Pinout); + strncpy(karg->Reference[i].bConnector, + manu_pg7->ConnectorInfo[i].Connector, 16); + karg->Reference[i].bLocation = + manu_pg7->ConnectorInfo[i].Location; + } + } + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + kfree(manu_pg7); + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} + +/* supporting routines for CSMI SAS Get location command. */ +static void +_csmisas_fill_location_data(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device, u8 opcode, + CSMI_SAS_LOCATION_IDENTIFIER *location_ident) +{ + + location_ident->bLocationFlags |= CSMI_SAS_LOCATE_SAS_ADDRESS_VALID; + *(__be64 *)location_ident->bSASAddress = + cpu_to_be64(sas_device->sas_address); + + location_ident->bLocationFlags |= CSMI_SAS_LOCATE_SAS_LUN_VALID; + memset(location_ident->bSASLun, 0, sizeof(location_ident->bSASLun)); + + location_ident->bLocationFlags |= + CSMI_SAS_LOCATE_ENCLOSURE_IDENTIFIER_VALID; + if (sas_device->enclosure_logical_id) + *(__be64 *)location_ident->bEnclosureIdentifier = + cpu_to_be64(sas_device->enclosure_logical_id); + else + memcpy(location_ident->bEnclosureIdentifier, "Internal", 8); + + location_ident->bLocationFlags |= CSMI_SAS_LOCATE_ENCLOSURE_NAME_VALID; + strcpy(location_ident->bEnclosureName, "Not Supported"); + + location_ident->bLocationFlags |= CSMI_SAS_LOCATE_LOCATION_STATE_VALID; + location_ident->bLocationState = CSMI_SAS_LOCATE_UNKNOWN; + + location_ident->bLocationFlags |= CSMI_SAS_LOCATE_BAY_IDENTIFIER_VALID; + location_ident->bBayIdentifier = sas_device->slot; +} + +/* supporting routines for CSMI SAS Get location command. */ +static int +_csmisas_fill_location_data_raid(struct MPT3SAS_ADAPTER *ioc, u16 handle, + CSMI_SAS_GET_LOCATION_BUFFER *karg) +{ + struct _raid_device raid_device; + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + rc = _csmisas_raid_device_find_by_handle(ioc, handle, &raid_device); + if (rc) + return -ENODEV; + + karg->bNumberOfLocationIdentifiers = 0; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->volume_wwid != raid_device.wwid) + continue; + _csmisas_fill_location_data(ioc, sas_device, karg->bIdentify, + &karg->Location[karg->bNumberOfLocationIdentifiers]); + karg->bNumberOfLocationIdentifiers++; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return 0; +} + +/** + * Prototype Routine for the CSMI SAS Get location command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +_csmisas_get_location(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_GET_LOCATION_BUFFER *karg) +{ + int rc = 0; + struct _sas_device sas_device; + struct _raid_device raid_device; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): enter\n", ioc->name, + __func__)); + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; + if (karg->bLengthOfLocationIdentifier != + sizeof(CSMI_SAS_LOCATION_IDENTIFIER)) + goto out; + + /* invalid for channel > 1 */ + if (karg->bPathId > 1) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_DEVICE_ADDRESS; + goto out; + } + + if (karg->bHostIndex != ioc->shost->host_no) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_DEVICE_ADDRESS; + goto out; + } + + /* volumes are on channel = 1 */ + if (karg->bPathId == 1) { + rc = _csmisas_raid_device_find_by_id(ioc, karg->bTargetId, + karg->bPathId, &raid_device); + if (rc != 0) { + karg->IoctlHeader.ReturnCode = + CSMI_SAS_NO_DEVICE_ADDRESS; + goto out; + } + if (_csmisas_fill_location_data_raid(ioc, raid_device.handle, + karg) == 0) + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + else + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; + goto out; + } + + /* bare drives are on channel = 0 */ + + rc = _csmisas_sas_device_find_by_id(ioc, karg->bTargetId, karg->bPathId, + &sas_device); + if (rc != 0) { + karg->IoctlHeader.ReturnCode = CSMI_SAS_NO_DEVICE_ADDRESS; + goto out; + } + + /* make sure there's enough room to populate the Location[] struct */ + if ((karg->IoctlHeader.Length - offsetof(CSMI_SAS_GET_LOCATION_BUFFER, + Location)) < sizeof(CSMI_SAS_LOCATION_IDENTIFIER)) + goto out; + + karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; + karg->bNumberOfLocationIdentifiers = 1; + karg->Location[0].bLocationFlags = 0; + _csmisas_fill_location_data(ioc, &sas_device, karg->bIdentify, + &karg->Location[0]); + + out: + dcsmisasprintk(ioc, printk(MPT3SAS_INFO_FMT "%s(): exit\n", ioc->name, + __func__)); + return rc; +} diff --git a/drivers/scsi/mpt3sas/csmi/csmisas.h b/drivers/scsi/mpt3sas/csmi/csmisas.h new file mode 100755 index 000000000000..364b06ff2631 --- /dev/null +++ b/drivers/scsi/mpt3sas/csmi/csmisas.h @@ -0,0 +1,1861 @@ +/************************************************************************** + +Module Name: + + CSMISAS.H + + +Abstract: + + This file contains constants and data structure definitions used by drivers + that support the Common Storage Management Interface specification for + SAS or SATA in either the Windows or Linux. + + This should be considered as a reference implementation only. Changes may + be necessary to accommodate a specific build environment or target OS. + +Revision History: + + 001 SEF 8/12/03 Initial release. + 002 SEF 8/20/03 Cleanup to match documentation. + 003 SEF 9/12/03 Additional cleanup, created combined header + 004 SEF 9/23/03 Changed base types to match linux defaults + Added RAID signature + Added bControllerFlags to CSMI_SAS_CNTLR_CONFIG + Changed CSMI_SAS_BEGIN_PACK to 8 for common structures + Fixed other typos identified in first compilation test + 005 SEF 10/03/03 Additions to match first version of CSMI document + 006 SEF 10/14/03 Fixed typedef struct _CSMI_SAS_SMP_PASSTHRU_BUFFER + Added defines for bConnectionRate + 007 SEF 10/15/03 Added Firmware Download Control Code and support + Added CSMI revision support + 008 SEF 10/30/03 No functional change, just updated version to track + spec changes + 009 SEF 12/09/03 No functional change, just updated version to track + spec changes + 010 SEF 3/11/04 Fixed typedef struct CSMI_SAS_RAID_DRIVES to include the + bFirmware member that is defined in the spec, but + was missing in this file, + added CC_CSMI_SAS_TASK_MANAGEMENT + 011 SEF 4/02/04 No functional change, added comment line before + CC_CSMI_SAS_TASK_MANAGEMENT + 012 SEF 4/16/04 Added IOControllerNumber to linux header, + Modified linux control codes to have upper word of + 0xCC77.... to indicate CSMI version 77 + Added bSignalClass to CC_CSMI_SET_PHY_INFO + Added CC_CSMI_SAS_PHY_CONTROL support + 013 SEF 5/14/04 Added CC_CSMI_SAS_GET_CONNECTOR_INFO support + 014 SEF 5/24/04 No functional change, just updated version to track spec + changes + 015 SEF 6/16/04 changed bPinout to uPinout to reflect proper size, + changed width of bLocation defines to reflect size + 016 SEF 6/17/04 changed bLengthOfControls in CSMI_SAS_PHY_CONTROL + to be proper size + 017 SEF 9/17/04 added CSMI_SAS_SATA_PORT_SELECTOR, + CSMI_SAS_LINK_VIRTUAL, CSMI_SAS_CON_NOT_PRESENT, and + CSMI_SAS_CON_NOT_CONNECTED + 018 SEF 9/20/04 added CSMI_SAS_PHY_USER_PATTERN, + changed definition of CSMI_SAS_PHY_FIXED_PATTERN to not + conflict with activate definition + 019 SEF 12/06/04 added CSMI_SAS_GET_LOCATION + added bSSPStatus to CSMI_SAS_SSP_PASSTHRU_STATUS + structure + 020 SEF 5/25/05 added CSMI_SAS_PHY_VIRTUAL_SMP, and changes to + CSMI_SAS_GET_LOCATION + 021 SEF 11/03/05 added new RAID creation functionality + 022 SEF 2/01/06 corrected typo bNegotitiatedLInkRate + Added two more RAID_TYPES, 7 and 8 + 023 SEF 4/04/06 added CSMI_RAID_TYPE_1E + changed structures that contained surface scan + to priority approach rather than time, causes + 0.89 to incompatible with 0.87, so a version + check is necessary when interpreting the + raid structures + Added netware section + 024 DRG 5/22/06 Added uFailureCode to CSMI_SAS_RAID_CONFIG and + CSMI_SAS_RAID_FEATURES + Changed __u64 fields to high and low __u32 fields in + order to avoid backward compatibility issues with + packing and alignment. + Fixed alignment problem in CSMI_SAS_RAID_DRIVES. + Added CSMI_SAS_CNTLR_SMART_ARRAY to uControllerFlags + Reassigned the value of CSMI_SAS_CNTLR_RAID_CFG_SUPPORT + to avoid a conflict. + 025 PSB 09/29/10 Added 6Gig and 12Gig SAS. + +**************************************************************************/ + +#ifndef _CSMI_SAS_H_ +#define _CSMI_SAS_H_ + +// CSMI Specification Revision, the intent is that all versions of the +// specification will be backward compatible after the 1.00 release. +// Major revision number, corresponds to xxxx. of CSMI specification +// Minor revision number, corresponds to .xxxx of CSMI specification +#define CSMI_MAJOR_REVISION 1 +#define CSMI_MINOR_REVISION 02 + +/*************************************************************************/ +/* PATCHES FOR TYPOS */ +/*************************************************************************/ + +#define bNegotitiatedLInkRate bNegotiatedLinkRate + +/*************************************************************************/ +/* TARGET OS LINUX SPECIFIC CODE */ +/*************************************************************************/ + +// EDM #ifdef _linux +#ifdef __KERNEL__ + +// Linux base types + +#include + +#define __i8 char + +// pack definition + +// EDM #define CSMI_SAS_BEGIN_PACK(x) pack(x) +// EDM #define CSMI_SAS_END_PACK pack() + +// IOCTL Control Codes +// (IoctlHeader.ControlCode) + +// Control Codes prior to 0.77 + +// Control Codes requiring CSMI_ALL_SIGNATURE + +// #define CC_CSMI_SAS_GET_DRIVER_INFO 0x12345678 +// #define CC_CSMI_SAS_GET_CNTLR_CONFIG 0x23456781 +// #define CC_CSMI_SAS_GET_CNTLR_STATUS 0x34567812 +// #define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 0x92345678 + +// Control Codes requiring CSMI_RAID_SIGNATURE + +// #define CC_CSMI_SAS_GET_RAID_INFO 0x45678123 +// #define CC_CSMI_SAS_GET_RAID_CONFIG 0x56781234 + +// Control Codes requiring CSMI_SAS_SIGNATURE + +// #define CC_CSMI_SAS_GET_PHY_INFO 0x67812345 +// #define CC_CSMI_SAS_SET_PHY_INFO 0x78123456 +// #define CC_CSMI_SAS_GET_LINK_ERRORS 0x81234567 +// #define CC_CSMI_SAS_SMP_PASSTHRU 0xA1234567 +// #define CC_CSMI_SAS_SSP_PASSTHRU 0xB1234567 +// #define CC_CSMI_SAS_STP_PASSTHRU 0xC1234567 +// #define CC_CSMI_SAS_GET_SATA_SIGNATURE 0xD1234567 +// #define CC_CSMI_SAS_GET_SCSI_ADDRESS 0xE1234567 +// #define CC_CSMI_SAS_GET_DEVICE_ADDRESS 0xF1234567 +// #define CC_CSMI_SAS_TASK_MANAGEMENT 0xA2345678 + +// Control Codes for 0.77 and later + +// Control Codes requiring CSMI_ALL_SIGNATURE + +#define CC_CSMI_SAS_GET_DRIVER_INFO 0xCC770001 +#define CC_CSMI_SAS_GET_CNTLR_CONFIG 0xCC770002 +#define CC_CSMI_SAS_GET_CNTLR_STATUS 0xCC770003 +#define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 0xCC770004 + +// Control Codes requiring CSMI_RAID_SIGNATURE + +#define CC_CSMI_SAS_GET_RAID_INFO 0xCC77000A +#define CC_CSMI_SAS_GET_RAID_CONFIG 0xCC77000B +#define CC_CSMI_SAS_GET_RAID_FEATURES 0xCC77000C +#define CC_CSMI_SAS_SET_RAID_CONTROL 0xCC77000D +#define CC_CSMI_SAS_GET_RAID_ELEMENT 0xCC77000E +#define CC_CSMI_SAS_SET_RAID_OPERATION 0xCC77000F + +// Control Codes requiring CSMI_SAS_SIGNATURE + +#define CC_CSMI_SAS_GET_PHY_INFO 0xCC770014 +#define CC_CSMI_SAS_SET_PHY_INFO 0xCC770015 +#define CC_CSMI_SAS_GET_LINK_ERRORS 0xCC770016 +#define CC_CSMI_SAS_SMP_PASSTHRU 0xCC770017 +#define CC_CSMI_SAS_SSP_PASSTHRU 0xCC770018 +#define CC_CSMI_SAS_STP_PASSTHRU 0xCC770019 +#define CC_CSMI_SAS_GET_SATA_SIGNATURE 0xCC770020 +#define CC_CSMI_SAS_GET_SCSI_ADDRESS 0xCC770021 +#define CC_CSMI_SAS_GET_DEVICE_ADDRESS 0xCC770022 +#define CC_CSMI_SAS_TASK_MANAGEMENT 0xCC770023 +#define CC_CSMI_SAS_GET_CONNECTOR_INFO 0xCC770024 +#define CC_CSMI_SAS_GET_LOCATION 0xCC770025 + + +// Control Codes requiring CSMI_PHY_SIGNATURE + +#define CC_CSMI_SAS_PHY_CONTROL 0xCC77003C + +// EDM #pragma CSMI_SAS_BEGIN_PACK(8) +#pragma pack(8) + +// IOCTL_HEADER +typedef struct _IOCTL_HEADER { + __u32 IOControllerNumber; + __u32 Length; + __u32 ReturnCode; + __u32 Timeout; + __u16 Direction; +} IOCTL_HEADER, + *PIOCTL_HEADER; + +// EDM #pragma CSMI_SAS_END_PACK +#pragma pack() + +#endif + +/*************************************************************************/ +/* TARGET OS WINDOWS SPECIFIC CODE */ +/*************************************************************************/ + +#ifdef _WIN32 + +// windows IOCTL definitions + +#ifndef _NTDDSCSIH_ +#include +#endif + +// pack definition + +#if defined _MSC_VER + #define CSMI_SAS_BEGIN_PACK(x) pack(push,x) + #define CSMI_SAS_END_PACK pack(pop) +#elif defined __BORLANDC__ + #define CSMI_SAS_BEGIN_PACK(x) option -a##x + #define CSMI_SAS_END_PACK option -a. +#else + #error "CSMISAS.H - Must externally define a pack compiler designator." +#endif + +// base types + +#define __u8 unsigned char +#define __u16 unsigned short +#define __u32 unsigned long +#define __u64 unsigned __int64 + +#define __i8 char + +// IOCTL Control Codes +// (IoctlHeader.ControlCode) + +// Control Codes requiring CSMI_ALL_SIGNATURE + +#define CC_CSMI_SAS_GET_DRIVER_INFO 1 +#define CC_CSMI_SAS_GET_CNTLR_CONFIG 2 +#define CC_CSMI_SAS_GET_CNTLR_STATUS 3 +#define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 4 + +// Control Codes requiring CSMI_RAID_SIGNATURE + +#define CC_CSMI_SAS_GET_RAID_INFO 10 +#define CC_CSMI_SAS_GET_RAID_CONFIG 11 +#define CC_CSMI_SAS_GET_RAID_FEATURES 12 +#define CC_CSMI_SAS_SET_RAID_CONTROL 13 +#define CC_CSMI_SAS_GET_RAID_ELEMENT 14 +#define CC_CSMI_SAS_SET_RAID_OPERATION 15 + +// Control Codes requiring CSMI_SAS_SIGNATURE + +#define CC_CSMI_SAS_GET_PHY_INFO 20 +#define CC_CSMI_SAS_SET_PHY_INFO 21 +#define CC_CSMI_SAS_GET_LINK_ERRORS 22 +#define CC_CSMI_SAS_SMP_PASSTHRU 23 +#define CC_CSMI_SAS_SSP_PASSTHRU 24 +#define CC_CSMI_SAS_STP_PASSTHRU 25 +#define CC_CSMI_SAS_GET_SATA_SIGNATURE 26 +#define CC_CSMI_SAS_GET_SCSI_ADDRESS 27 +#define CC_CSMI_SAS_GET_DEVICE_ADDRESS 28 +#define CC_CSMI_SAS_TASK_MANAGEMENT 29 +#define CC_CSMI_SAS_GET_CONNECTOR_INFO 30 +#define CC_CSMI_SAS_GET_LOCATION 31 + +// Control Codes requiring CSMI_PHY_SIGNATURE + +#define CC_CSMI_SAS_PHY_CONTROL 60 + +#define IOCTL_HEADER SRB_IO_CONTROL +#define PIOCTL_HEADER PSRB_IO_CONTROL + +#endif + +/*************************************************************************/ +/* TARGET OS NETWARE SPECIFIC CODE */ +/*************************************************************************/ + +#ifdef _NETWARE + +// NetWare IOCTL definitions + +#define CSMI_SAS_BEGIN_PACK(x) pack(x) +#define CSMI_SAS_END_PACK pack() + +#ifndef LONG +typedef unsigned long LONG; +#endif + +#ifndef WORD +typedef unsigned short WORD; +#endif + +#ifndef BYTE +typedef unsigned char BYTE; +#endif + +/* Need to have these definitions for Netware */ +#define __u8 unsigned char +#define __u16 unsigned short +#define __u32 unsigned long +#define __u64 unsigned __int64 + +#define __i8 char + + +// EDM #pragma CSMI_SAS_BEGIN_PACK(8) +#pragma pack(8) + +// IOCTL_HEADER +typedef struct _IOCTL_HEADER { + __u32 Length; + __u32 ReturnCode; +} IOCTL_HEADER, + *PIOCTL_HEADER; + +// EDM #pragma CSMI_SAS_END_PACK +#pragma pack() + +// IOCTL Control Codes +// (IoctlHeader.ControlCode) + +// Control Codes requiring CSMI_ALL_SIGNATURE + +#define CC_CSMI_SAS_GET_DRIVER_INFO 0x01FF0001 +#define CC_CSMI_SAS_GET_CNTLR_CONFIG 0x01FF0002 +#define CC_CSMI_SAS_GET_CNTLR_STATUS 0x01FF0003 +#define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 0x01FF0004 + +// Control Codes requiring CSMI_RAID_SIGNATURE + +#define CC_CSMI_SAS_GET_RAID_INFO 0x01FF000A +#define CC_CSMI_SAS_GET_RAID_CONFIG 0x01FF000B +#define CC_CSMI_SAS_GET_RAID_FEATURES 0x01FF000C +#define CC_CSMI_SAS_SET_RAID_CONTROL 0x01FF000D +#define CC_CSMI_SAS_GET_RAID_ELEMENT 0x01FF000E +#define CC_CSMI_SAS_SET_RAID_OPERATION 0x01FF000F + +// Control Codes requiring CSMI_SAS_SIGNATURE + +#define CC_CSMI_SAS_GET_PHY_INFO 0x01FF0014 +#define CC_CSMI_SAS_SET_PHY_INFO 0x01FF0015 +#define CC_CSMI_SAS_GET_LINK_ERRORS 0x01FF0016 +#define CC_CSMI_SAS_SMP_PASSTHRU 0x01FF0017 +#define CC_CSMI_SAS_SSP_PASSTHRU 0x01FF0018 +#define CC_CSMI_SAS_STP_PASSTHRU 0x01FF0019 +#define CC_CSMI_SAS_GET_SATA_SIGNATURE 0x01FF001A +#define CC_CSMI_SAS_GET_SCSI_ADDRESS 0x01FF001B +#define CC_CSMI_SAS_GET_DEVICE_ADDRESS 0x01FF001C +#define CC_CSMI_SAS_TASK_MANAGEMENT 0x01FF001D +#define CC_CSMI_SAS_GET_CONNECTOR_INFO 0x01FF001E +#define CC_CSMI_SAS_GET_LOCATION 0x01FF001F + +// Control Codes requiring CSMI_PHY_SIGNATURE + +#define CC_CSMI_SAS_PHY_CONTROL 60 + +#endif + +/*************************************************************************/ +/* TARGET OS NOT DEFINED ERROR */ +/*************************************************************************/ + +// EDM +//#if (!_WIN32 && !_linux && !_NETWARE) +// #error "Unknown target OS." +//#endif + +/*************************************************************************/ +/* OS INDEPENDENT CODE */ +/*************************************************************************/ + +/* * * * * * * * * * Class Independent IOCTL Constants * * * * * * * * * */ + +// Return codes for all IOCTL's regardless of class +// (IoctlHeader.ReturnCode) + +#define CSMI_SAS_STATUS_SUCCESS 0 +#define CSMI_SAS_STATUS_FAILED 1 +#define CSMI_SAS_STATUS_BAD_CNTL_CODE 2 +#define CSMI_SAS_STATUS_INVALID_PARAMETER 3 +#define CSMI_SAS_STATUS_WRITE_ATTEMPTED 4 + +// Signature value +// (IoctlHeader.Signature) + +#define CSMI_ALL_SIGNATURE "CSMIALL" + +// Timeout value default of 60 seconds +// (IoctlHeader.Timeout) + +#define CSMI_ALL_TIMEOUT 60 + +// Direction values for data flow on this IOCTL +// (IoctlHeader.Direction, Linux only) +#define CSMI_SAS_DATA_READ 0 +#define CSMI_SAS_DATA_WRITE 1 + +// I/O Bus Types +// ISA and EISA bus types are not supported +// (bIoBusType) + +#define CSMI_SAS_BUS_TYPE_PCI 3 +#define CSMI_SAS_BUS_TYPE_PCMCIA 4 + +// Controller Status +// (uStatus) + +#define CSMI_SAS_CNTLR_STATUS_GOOD 1 +#define CSMI_SAS_CNTLR_STATUS_FAILED 2 +#define CSMI_SAS_CNTLR_STATUS_OFFLINE 3 +#define CSMI_SAS_CNTLR_STATUS_POWEROFF 4 + +// Offline Status Reason +// (uOfflineReason) + +#define CSMI_SAS_OFFLINE_REASON_NO_REASON 0 +#define CSMI_SAS_OFFLINE_REASON_INITIALIZING 1 +#define CSMI_SAS_OFFLINE_REASON_BACKSIDE_BUS_DEGRADED 2 +#define CSMI_SAS_OFFLINE_REASON_BACKSIDE_BUS_FAILURE 3 + +// Controller Class +// (bControllerClass) + +#define CSMI_SAS_CNTLR_CLASS_HBA 5 + +// Controller Flag bits +// (uControllerFlags) + +#define CSMI_SAS_CNTLR_SAS_HBA 0x00000001 +#define CSMI_SAS_CNTLR_SAS_RAID 0x00000002 +#define CSMI_SAS_CNTLR_SATA_HBA 0x00000004 +#define CSMI_SAS_CNTLR_SATA_RAID 0x00000008 +#define CSMI_SAS_CNTLR_SMART_ARRAY 0x00000010 + +// for firmware download +#define CSMI_SAS_CNTLR_FWD_SUPPORT 0x00010000 +#define CSMI_SAS_CNTLR_FWD_ONLINE 0x00020000 +#define CSMI_SAS_CNTLR_FWD_SRESET 0x00040000 +#define CSMI_SAS_CNTLR_FWD_HRESET 0x00080000 +#define CSMI_SAS_CNTLR_FWD_RROM 0x00100000 + +// for RAID configuration supported +#define CSMI_SAS_CNTLR_RAID_CFG_SUPPORT 0x01000000 + +// Download Flag bits +// (uDownloadFlags) +#define CSMI_SAS_FWD_VALIDATE 0x00000001 +#define CSMI_SAS_FWD_SOFT_RESET 0x00000002 +#define CSMI_SAS_FWD_HARD_RESET 0x00000004 + +// Firmware Download Status +// (usStatus) +#define CSMI_SAS_FWD_SUCCESS 0 +#define CSMI_SAS_FWD_FAILED 1 +#define CSMI_SAS_FWD_USING_RROM 2 +#define CSMI_SAS_FWD_REJECT 3 +#define CSMI_SAS_FWD_DOWNREV 4 + +// Firmware Download Severity +// (usSeverity> +#define CSMI_SAS_FWD_INFORMATION 0 +#define CSMI_SAS_FWD_WARNING 1 +#define CSMI_SAS_FWD_ERROR 2 +#define CSMI_SAS_FWD_FATAL 3 + +/* * * * * * * * * * SAS RAID Class IOCTL Constants * * * * * * * * */ + +// Return codes for the RAID IOCTL's regardless of class +// (IoctlHeader.ReturnCode) + +#define CSMI_SAS_RAID_SET_OUT_OF_RANGE 1000 +#define CSMI_SAS_RAID_SET_BUFFER_TOO_SMALL 1001 +#define CSMI_SAS_RAID_SET_DATA_CHANGED 1002 + +// Signature value +// (IoctlHeader.Signature) + +#define CSMI_RAID_SIGNATURE "CSMIARY" + +// Timeout value default of 60 seconds +// (IoctlHeader.Timeout) + +#define CSMI_RAID_TIMEOUT 60 + +// RAID Types +// (bRaidType) +#define CSMI_SAS_RAID_TYPE_NONE 0 +#define CSMI_SAS_RAID_TYPE_0 1 +#define CSMI_SAS_RAID_TYPE_1 2 +#define CSMI_SAS_RAID_TYPE_10 3 +#define CSMI_SAS_RAID_TYPE_5 4 +#define CSMI_SAS_RAID_TYPE_15 5 +#define CSMI_SAS_RAID_TYPE_6 6 +#define CSMI_SAS_RAID_TYPE_50 7 +#define CSMI_SAS_RAID_TYPE_VOLUME 8 +#define CSMI_SAS_RAID_TYPE_1E 9 +#define CSMI_SAS_RAID_TYPE_OTHER 255 +// the last value 255 was already defined for other +// so end is defined as 254 +#define CSMI_SAS_RAID_TYPE_END 254 + +// RAID Status +// (bStatus) +#define CSMI_SAS_RAID_SET_STATUS_OK 0 +#define CSMI_SAS_RAID_SET_STATUS_DEGRADED 1 +#define CSMI_SAS_RAID_SET_STATUS_REBUILDING 2 +#define CSMI_SAS_RAID_SET_STATUS_FAILED 3 +#define CSMI_SAS_RAID_SET_STATUS_OFFLINE 4 +#define CSMI_SAS_RAID_SET_STATUS_TRANSFORMING 5 +#define CSMI_SAS_RAID_SET_STATUS_QUEUED_FOR_REBUILD 6 +#define CSMI_SAS_RAID_SET_STATUS_QUEUED_FOR_TRANSFORMATION 7 + +// RAID Drive Count +// (bDriveCount, 0xF1 to 0xFF are reserved) +#define CSMI_SAS_RAID_DRIVE_COUNT_TOO_BIG 0xF1 +#define CSMI_SAS_RAID_DRIVE_COUNT_SUPRESSED 0xF2 + +// RAID Data Type +// (bDataType) +#define CSMI_SAS_RAID_DATA_DRIVES 0 +#define CSMI_SAS_RAID_DATA_DEVICE_ID 1 +#define CSMI_SAS_RAID_DATA_ADDITIONAL_DATA 2 + +// RAID Drive Status +// (bDriveStatus) +#define CSMI_SAS_DRIVE_STATUS_OK 0 +#define CSMI_SAS_DRIVE_STATUS_REBUILDING 1 +#define CSMI_SAS_DRIVE_STATUS_FAILED 2 +#define CSMI_SAS_DRIVE_STATUS_DEGRADED 3 +#define CSMI_SAS_DRIVE_STATUS_OFFLINE 4 +#define CSMI_SAS_DRIVE_STATUS_QUEUED_FOR_REBUILD 5 + +// RAID Drive Usage +// (bDriveUsage) +#define CSMI_SAS_DRIVE_CONFIG_NOT_USED 0 +#define CSMI_SAS_DRIVE_CONFIG_MEMBER 1 +#define CSMI_SAS_DRIVE_CONFIG_SPARE 2 +#define CSMI_SAS_DRIVE_CONFIG_SPARE_ACTIVE 3 + +// RAID Drive Type +// (bDriveType) +#define CSMI_SAS_DRIVE_TYPE_UNKNOWN 0 +#define CSMI_SAS_DRIVE_TYPE_SINGLE_PORT_SAS 1 +#define CSMI_SAS_DRIVE_TYPE_DUAL_PORT_SAS 2 +#define CSMI_SAS_DRIVE_TYPE_SATA 3 +#define CSMI_SAS_DRIVE_TYPE_SATA_PS 4 +#define CSMI_SAS_DRIVE_TYPE_OTHER 255 + +// RAID Write Protect +// (bWriteProtect) +#define CSMI_SAS_RAID_SET_WRITE_PROTECT_UNKNOWN 0 +#define CSMI_SAS_RAID_SET_WRITE_PROTECT_UNCHANGED 0 +#define CSMI_SAS_RAID_SET_WRITE_PROTECT_ENABLED 1 +#define CSMI_SAS_RAID_SET_WRITE_PROTECT_DISABLED 2 + +// RAID Cache Setting +// (bCacheSetting) +#define CSMI_SAS_RAID_SET_CACHE_UNKNOWN 0 +#define CSMI_SAS_RAID_SET_CACHE_UNCHANGED 0 +#define CSMI_SAS_RAID_SET_CACHE_ENABLED 1 +#define CSMI_SAS_RAID_SET_CACHE_DISABLED 2 +#define CSMI_SAS_RAID_SET_CACHE_CORRUPT 3 + +// RAID Features +// (uFeatures) +#define CSMI_SAS_RAID_FEATURE_TRANSFORMATION 0x00000001 +#define CSMI_SAS_RAID_FEATURE_REBUILD 0x00000002 +#define CSMI_SAS_RAID_FEATURE_SPLIT_MIRROR 0x00000004 +#define CSMI_SAS_RAID_FEATURE_MERGE_MIRROR 0x00000008 +#define CSMI_SAS_RAID_FEATURE_LUN_RENUMBER 0x00000010 +#define CSMI_SAS_RAID_FEATURE_SURFACE_SCAN 0x00000020 +#define CSMI_SAS_RAID_FEATURE_SPARES_SHARED 0x00000040 + +// RAID Priority +// (bDefaultTransformPriority, etc.) +#define CSMI_SAS_PRIORITY_UNKNOWN 0 +#define CSMI_SAS_PRIORITY_UNCHANGED 0 +#define CSMI_SAS_PRIORITY_AUTO 1 +#define CSMI_SAS_PRIORITY_OFF 2 +#define CSMI_SAS_PRIORITY_LOW 3 +#define CSMI_SAS_PRIORITY_MEDIUM 4 +#define CSMI_SAS_PRIORITY_HIGH 5 + +// RAID Transformation Rules +// (uRaidSetTransformationRules) +#define CSMI_SAS_RAID_RULE_AVAILABLE_MEMORY 0x00000001 +#define CSMI_SAS_RAID_RULE_OVERLAPPED_EXTENTS 0x00000002 + +// RAID Cache Ratios Supported +// (bCacheRatiosSupported) +// from 0 to 100 defines the write to read ratio, 0 is 100% write +#define CSMI_SAS_RAID_CACHE_RATIO_RANGE 101 +#define CSMI_SAS_RAID_CACHE_RATIO_FIXED 102 +#define CSMI_SAS_RAID_CACHE_RATIO_AUTO 103 +#define CSMI_SAS_RAID_CACHE_RATIO_END 255 + +// RAID Cache Ratio Flag +// (bCacheRatioFlag) +#define CSMI_SAS_RAID_CACHE_RATIO_DISABLE 0 +#define CSMI_SAS_RAID_CACHE_RATIO_ENABLE 1 + +// RAID Clear Configuration Signature +// (bClearConfiguration) +#define CSMI_SAS_RAID_CLEAR_CONFIGURATION_SIGNATURE "RAIDCLR" + +// RAID Failure Codes +// (uFailureCode) +#define CSMI_SAS_FAIL_CODE_OK 0 +#define CSMI_SAS_FAIL_CODE_PARAMETER_INVALID 1000 +#define CSMI_SAS_FAIL_CODE_TRANSFORM_PRIORITY_INVALID 1001 +#define CSMI_SAS_FAIL_CODE_REBUILD_PRIORITY_INVALID 1002 +#define CSMI_SAS_FAIL_CODE_CACHE_RATIO_INVALID 1003 +#define CSMI_SAS_FAIL_CODE_SURFACE_SCAN_INVALID 1004 +#define CSMI_SAS_FAIL_CODE_CLEAR_CONFIGURATION_INVALID 1005 +#define CSMI_SAS_FAIL_CODE_ELEMENT_INDEX_INVALID 1006 +#define CSMI_SAS_FAIL_CODE_SUBELEMENT_INDEX_INVALID 1007 +#define CSMI_SAS_FAIL_CODE_EXTENT_INVALID 1008 +#define CSMI_SAS_FAIL_CODE_BLOCK_COUNT_INVALID 1009 +#define CSMI_SAS_FAIL_CODE_DRIVE_INDEX_INVALID 1010 +#define CSMI_SAS_FAIL_CODE_EXISTING_LUN_INVALID 1011 +#define CSMI_SAS_FAIL_CODE_RAID_TYPE_INVALID 1012 +#define CSMI_SAS_FAIL_CODE_STRIPE_SIZE_INVALID 1013 +#define CSMI_SAS_FAIL_CODE_TRANSFORMATION_INVALID 1014 +#define CSMI_SAS_FAIL_CODE_CHANGE_COUNT_INVALID 1015 +#define CSMI_SAS_FAIL_CODE_ENUMERATION_TYPE_INVALID 1016 + +#define CSMI_SAS_FAIL_CODE_EXCEEDED_RAID_SET_COUNT 2000 +#define CSMI_SAS_FAIL_CODE_DUPLICATE_LUN 2001 + +#define CSMI_SAS_FAIL_CODE_WAIT_FOR_OPERATION 3000 + +// RAID Enumeration Types +// (uEnumerationType) +#define CSMI_SAS_RAID_ELEMENT_TYPE_DRIVE 0 +#define CSMI_SAS_RAID_ELEMENT_TYPE_MODULE 1 +#define CSMI_SAS_RAID_ELEMENT_TYPE_DRIVE_RAID_SET 2 +#define CSMI_SAS_RAID_ELEMENT_TYPE_EXTENT_DRIVE 3 + +// RAID Extent Types +// (bExtentType) +#define CSMI_SAS_RAID_EXTENT_RESERVED 0 +#define CSMI_SAS_RAID_EXTENT_METADATA 1 +#define CSMI_SAS_RAID_EXTENT_ALLOCATED 2 +#define CSMI_SAS_RAID_EXTENT_UNALLOCATED 3 + +// RAID Operation Types +// (uOperationType) +#define CSMI_SAS_RAID_SET_CREATE 0 +#define CSMI_SAS_RAID_SET_LABEL 1 +#define CSMI_SAS_RAID_SET_TRANSFORM 2 +#define CSMI_SAS_RAID_SET_DELETE 3 +#define CSMI_SAS_RAID_SET_WRITE_PROTECT 4 +#define CSMI_SAS_RAID_SET_CACHE 5 +#define CSMI_SAS_RAID_SET_ONLINE_STATE 6 +#define CSMI_SAS_RAID_SET_SPARE 7 + +// RAID Transform Types +// (bTransformType) +#define CSMI_SAS_RAID_SET_TRANSFORM_SPLIT_MIRROR 0 +#define CSMI_SAS_RAID_SET_TRANSFORM_MERGE_RAID_0 1 +#define CSMI_SAS_RAID_SET_TRANSFORM_LUN_RENUMBER 2 +#define CSMI_SAS_RAID_SET_TRANSFORM_RAID_SET 3 + +// RAID Online State +// (bOnlineState) +#define CSMI_SAS_RAID_SET_STATE_UNKNOWN 0 +#define CSMI_SAS_RAID_SET_STATE_ONLINE 1 +#define CSMI_SAS_RAID_SET_STATE_OFFLINE 2 + +/* * * * * * * * * * SAS HBA Class IOCTL Constants * * * * * * * * * */ + +// Return codes for SAS IOCTL's +// (IoctlHeader.ReturnCode) + +#define CSMI_SAS_PHY_INFO_CHANGED CSMI_SAS_STATUS_SUCCESS +#define CSMI_SAS_PHY_INFO_NOT_CHANGEABLE 2000 +#define CSMI_SAS_LINK_RATE_OUT_OF_RANGE 2001 + +#define CSMI_SAS_PHY_DOES_NOT_EXIST 2002 +#define CSMI_SAS_PHY_DOES_NOT_MATCH_PORT 2003 +#define CSMI_SAS_PHY_CANNOT_BE_SELECTED 2004 +#define CSMI_SAS_SELECT_PHY_OR_PORT 2005 +#define CSMI_SAS_PORT_DOES_NOT_EXIST 2006 +#define CSMI_SAS_PORT_CANNOT_BE_SELECTED 2007 +#define CSMI_SAS_CONNECTION_FAILED 2008 + +#define CSMI_SAS_NO_SATA_DEVICE 2009 +#define CSMI_SAS_NO_SATA_SIGNATURE 2010 +#define CSMI_SAS_SCSI_EMULATION 2011 +#define CSMI_SAS_NOT_AN_END_DEVICE 2012 +#define CSMI_SAS_NO_SCSI_ADDRESS 2013 +#define CSMI_SAS_NO_DEVICE_ADDRESS 2014 + +// Signature value +// (IoctlHeader.Signature) + +#define CSMI_SAS_SIGNATURE "CSMISAS" + +// Timeout value default of 60 seconds +// (IoctlHeader.Timeout) + +#define CSMI_SAS_TIMEOUT 60 + +// Device types +// (bDeviceType) + +#define CSMI_SAS_PHY_UNUSED 0x00 +#define CSMI_SAS_NO_DEVICE_ATTACHED 0x00 +#define CSMI_SAS_END_DEVICE 0x10 +#define CSMI_SAS_EDGE_EXPANDER_DEVICE 0x20 +#define CSMI_SAS_FANOUT_EXPANDER_DEVICE 0x30 + +// Protocol options +// (bInitiatorPortProtocol, bTargetPortProtocol) + +#define CSMI_SAS_PROTOCOL_SATA 0x01 +#define CSMI_SAS_PROTOCOL_SMP 0x02 +#define CSMI_SAS_PROTOCOL_STP 0x04 +#define CSMI_SAS_PROTOCOL_SSP 0x08 + +// Negotiated and hardware link rates +// (bNegotiatedLinkRate, bMinimumLinkRate, bMaximumLinkRate) + +#define CSMI_SAS_LINK_RATE_UNKNOWN 0x00 +#define CSMI_SAS_PHY_DISABLED 0x01 +#define CSMI_SAS_LINK_RATE_FAILED 0x02 +#define CSMI_SAS_SATA_SPINUP_HOLD 0x03 +#define CSMI_SAS_SATA_PORT_SELECTOR 0x04 +#define CSMI_SAS_LINK_RATE_1_5_GBPS 0x08 +#define CSMI_SAS_LINK_RATE_3_0_GBPS 0x09 +#define CSMI_SAS_LINK_RATE_6_0_GBPS 0x0A +#define CSMI_SAS_LINK_RATE_12_0_GBPS 0x0B +#define CSMI_SAS_LINK_VIRTUAL 0x10 + +// Discover state +// (bAutoDiscover) + +#define CSMI_SAS_DISCOVER_NOT_SUPPORTED 0x00 +#define CSMI_SAS_DISCOVER_NOT_STARTED 0x01 +#define CSMI_SAS_DISCOVER_IN_PROGRESS 0x02 +#define CSMI_SAS_DISCOVER_COMPLETE 0x03 +#define CSMI_SAS_DISCOVER_ERROR 0x04 + +// Phy features + +#define CSMI_SAS_PHY_VIRTUAL_SMP 0x01 + +// Programmed link rates +// (bMinimumLinkRate, bMaximumLinkRate) +// (bProgrammedMinimumLinkRate, bProgrammedMaximumLinkRate) + +#define CSMI_SAS_PROGRAMMED_LINK_RATE_UNCHANGED 0x00 +#define CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS 0x08 +#define CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS 0x09 +#define CSMI_SAS_PROGRAMMED_LINK_RATE_6_0_GBPS 0x0A +#define CSMI_SAS_PROGRAMMED_LINK_RATE_12_0_GBPS 0x0B + +// Link rate +// (bNegotiatedLinkRate in CSMI_SAS_SET_PHY_INFO) + +#define CSMI_SAS_LINK_RATE_NEGOTIATE 0x00 +#define CSMI_SAS_LINK_RATE_PHY_DISABLED 0x01 + +// Signal class +// (bSignalClass in CSMI_SAS_SET_PHY_INFO) + +#define CSMI_SAS_SIGNAL_CLASS_UNKNOWN 0x00 +#define CSMI_SAS_SIGNAL_CLASS_DIRECT 0x01 +#define CSMI_SAS_SIGNAL_CLASS_SERVER 0x02 +#define CSMI_SAS_SIGNAL_CLASS_ENCLOSURE 0x03 + +// Link error reset +// (bResetCounts) + +#define CSMI_SAS_LINK_ERROR_DONT_RESET_COUNTS 0x00 +#define CSMI_SAS_LINK_ERROR_RESET_COUNTS 0x01 + +// Phy identifier +// (bPhyIdentifier) + +#define CSMI_SAS_USE_PORT_IDENTIFIER 0xFF + +// Port identifier +// (bPortIdentifier) + +#define CSMI_SAS_IGNORE_PORT 0xFF + +// Programmed link rates +// (bConnectionRate) + +#define CSMI_SAS_LINK_RATE_NEGOTIATED 0x00 +#define CSMI_SAS_LINK_RATE_1_5_GBPS 0x08 +#define CSMI_SAS_LINK_RATE_3_0_GBPS 0x09 +#define CSMI_SAS_LINK_RATE_6_0_GBPS 0x0A +#define CSMI_SAS_LINK_RATE_12_0_GBPS 0x0B + +// Connection status +// (bConnectionStatus) + +#define CSMI_SAS_OPEN_ACCEPT 0 +#define CSMI_SAS_OPEN_REJECT_BAD_DESTINATION 1 +#define CSMI_SAS_OPEN_REJECT_RATE_NOT_SUPPORTED 2 +#define CSMI_SAS_OPEN_REJECT_NO_DESTINATION 3 +#define CSMI_SAS_OPEN_REJECT_PATHWAY_BLOCKED 4 +#define CSMI_SAS_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED 5 +#define CSMI_SAS_OPEN_REJECT_RESERVE_ABANDON 6 +#define CSMI_SAS_OPEN_REJECT_RESERVE_CONTINUE 7 +#define CSMI_SAS_OPEN_REJECT_RESERVE_INITIALIZE 8 +#define CSMI_SAS_OPEN_REJECT_RESERVE_STOP 9 +#define CSMI_SAS_OPEN_REJECT_RETRY 10 +#define CSMI_SAS_OPEN_REJECT_STP_RESOURCES_BUSY 11 +#define CSMI_SAS_OPEN_REJECT_WRONG_DESTINATION 12 + +// SSP Status +// (bSSPStatus) + +#define CSMI_SAS_SSP_STATUS_UNKNOWN 0x00 +#define CSMI_SAS_SSP_STATUS_WAITING 0x01 +#define CSMI_SAS_SSP_STATUS_COMPLETED 0x02 +#define CSMI_SAS_SSP_STATUS_FATAL_ERROR 0x03 +#define CSMI_SAS_SSP_STATUS_RETRY 0x04 +#define CSMI_SAS_SSP_STATUS_NO_TAG 0x05 + +// SSP Flags +// (uFlags) + +#define CSMI_SAS_SSP_READ 0x00000001 +#define CSMI_SAS_SSP_WRITE 0x00000002 +#define CSMI_SAS_SSP_UNSPECIFIED 0x00000004 + +#define CSMI_SAS_SSP_TASK_ATTRIBUTE_SIMPLE 0x00000000 +#define CSMI_SAS_SSP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 0x00000010 +#define CSMI_SAS_SSP_TASK_ATTRIBUTE_ORDERED 0x00000020 +#define CSMI_SAS_SSP_TASK_ATTRIBUTE_ACA 0x00000040 + +// SSP Data present +// (bDataPresent) + +#define CSMI_SAS_SSP_NO_DATA_PRESENT 0x00 +#define CSMI_SAS_SSP_RESPONSE_DATA_PRESENT 0x01 +#define CSMI_SAS_SSP_SENSE_DATA_PRESENT 0x02 + +// STP Flags +// (uFlags) + +#define CSMI_SAS_STP_READ 0x00000001 +#define CSMI_SAS_STP_WRITE 0x00000002 +#define CSMI_SAS_STP_UNSPECIFIED 0x00000004 +#define CSMI_SAS_STP_PIO 0x00000010 +#define CSMI_SAS_STP_DMA 0x00000020 +#define CSMI_SAS_STP_PACKET 0x00000040 +#define CSMI_SAS_STP_DMA_QUEUED 0x00000080 +#define CSMI_SAS_STP_EXECUTE_DIAG 0x00000100 +#define CSMI_SAS_STP_RESET_DEVICE 0x00000200 + +// Task Management Flags +// (uFlags) + +#define CSMI_SAS_TASK_IU 0x00000001 +#define CSMI_SAS_HARD_RESET_SEQUENCE 0x00000002 +#define CSMI_SAS_SUPPRESS_RESULT 0x00000004 + +// Task Management Functions +// (bTaskManagement) + +#define CSMI_SAS_SSP_ABORT_TASK 0x01 +#define CSMI_SAS_SSP_ABORT_TASK_SET 0x02 +#define CSMI_SAS_SSP_CLEAR_TASK_SET 0x04 +#define CSMI_SAS_SSP_LOGICAL_UNIT_RESET 0x08 +#define CSMI_SAS_SSP_CLEAR_ACA 0x40 +#define CSMI_SAS_SSP_QUERY_TASK 0x80 + +// Task Management Information +// (uInformation) + +#define CSMI_SAS_SSP_TEST 1 +#define CSMI_SAS_SSP_EXCEEDED 2 +#define CSMI_SAS_SSP_DEMAND 3 +#define CSMI_SAS_SSP_TRIGGER 4 + +// Connector Pinout Information +// (uPinout) + +#define CSMI_SAS_CON_UNKNOWN 0x00000001 +#define CSMI_SAS_CON_SFF_8482 0x00000002 +#define CSMI_SAS_CON_SFF_8470_LANE_1 0x00000100 +#define CSMI_SAS_CON_SFF_8470_LANE_2 0x00000200 +#define CSMI_SAS_CON_SFF_8470_LANE_3 0x00000400 +#define CSMI_SAS_CON_SFF_8470_LANE_4 0x00000800 +#define CSMI_SAS_CON_SFF_8484_LANE_1 0x00010000 +#define CSMI_SAS_CON_SFF_8484_LANE_2 0x00020000 +#define CSMI_SAS_CON_SFF_8484_LANE_3 0x00040000 +#define CSMI_SAS_CON_SFF_8484_LANE_4 0x00080000 + +// Connector Location Information +// (bLocation) + +// same as uPinout above... +// #define CSMI_SAS_CON_UNKNOWN 0x01 +#define CSMI_SAS_CON_INTERNAL 0x02 +#define CSMI_SAS_CON_EXTERNAL 0x04 +#define CSMI_SAS_CON_SWITCHABLE 0x08 +#define CSMI_SAS_CON_AUTO 0x10 +#define CSMI_SAS_CON_NOT_PRESENT 0x20 +#define CSMI_SAS_CON_NOT_CONNECTED 0x80 + +// Device location identification +// (bIdentify) + +#define CSMI_SAS_LOCATE_UNKNOWN 0x00 +#define CSMI_SAS_LOCATE_FORCE_OFF 0x01 +#define CSMI_SAS_LOCATE_FORCE_ON 0x02 + +// Location Valid flags +// (uLocationFlags) + +#define CSMI_SAS_LOCATE_SAS_ADDRESS_VALID 0x00000001 +#define CSMI_SAS_LOCATE_SAS_LUN_VALID 0x00000002 +#define CSMI_SAS_LOCATE_ENCLOSURE_IDENTIFIER_VALID 0x00000004 +#define CSMI_SAS_LOCATE_ENCLOSURE_NAME_VALID 0x00000008 +#define CSMI_SAS_LOCATE_BAY_PREFIX_VALID 0x00000010 +#define CSMI_SAS_LOCATE_BAY_IDENTIFIER_VALID 0x00000020 +#define CSMI_SAS_LOCATE_LOCATION_STATE_VALID 0x00000040 + +/* * * * * * * * SAS Phy Control Class IOCTL Constants * * * * * * * * */ + +// Return codes for SAS Phy Control IOCTL's +// (IoctlHeader.ReturnCode) + +// Signature value +// (IoctlHeader.Signature) + +#define CSMI_PHY_SIGNATURE "CSMIPHY" + +// Phy Control Functions +// (bFunction) + +// values 0x00 to 0xFF are consistent in definition with the SMP PHY CONTROL +// function defined in the SAS spec +#define CSMI_SAS_PC_NOP 0x00000000 +#define CSMI_SAS_PC_LINK_RESET 0x00000001 +#define CSMI_SAS_PC_HARD_RESET 0x00000002 +#define CSMI_SAS_PC_PHY_DISABLE 0x00000003 +// 0x04 to 0xFF reserved... +#define CSMI_SAS_PC_GET_PHY_SETTINGS 0x00000100 + +// Link Flags +#define CSMI_SAS_PHY_ACTIVATE_CONTROL 0x00000001 +#define CSMI_SAS_PHY_UPDATE_SPINUP_RATE 0x00000002 +#define CSMI_SAS_PHY_AUTO_COMWAKE 0x00000004 + +// Device Types for Phy Settings +// (bType) +#define CSMI_SAS_UNDEFINED 0x00 +#define CSMI_SAS_SATA 0x01 +#define CSMI_SAS_SAS 0x02 + +// Transmitter Flags +// (uTransmitterFlags) +#define CSMI_SAS_PHY_PREEMPHASIS_DISABLED 0x00000001 + +// Receiver Flags +// (uReceiverFlags) +#define CSMI_SAS_PHY_EQUALIZATION_DISABLED 0x00000001 + +// Pattern Flags +// (uPatternFlags) +// #define CSMI_SAS_PHY_ACTIVATE_CONTROL 0x00000001 +#define CSMI_SAS_PHY_DISABLE_SCRAMBLING 0x00000002 +#define CSMI_SAS_PHY_DISABLE_ALIGN 0x00000004 +#define CSMI_SAS_PHY_DISABLE_SSC 0x00000008 + +#define CSMI_SAS_PHY_FIXED_PATTERN 0x00000010 +#define CSMI_SAS_PHY_USER_PATTERN 0x00000020 + +// Fixed Patterns +// (bFixedPattern) +#define CSMI_SAS_PHY_CJPAT 0x00000001 +#define CSMI_SAS_PHY_ALIGN 0x00000002 + +// Type Flags +// (bTypeFlags) +#define CSMI_SAS_PHY_POSITIVE_DISPARITY 0x01 +#define CSMI_SAS_PHY_NEGATIVE_DISPARITY 0x02 +#define CSMI_SAS_PHY_CONTROL_CHARACTER 0x04 + +// Miscellaneous +#define SLOT_NUMBER_UNKNOWN 0xFFFF + +/*************************************************************************/ +/* DATA STRUCTURES */ +/*************************************************************************/ + +/* * * * * * * * * * Class Independent Structures * * * * * * * * * */ + +// EDM #pragma CSMI_SAS_BEGIN_PACK(8) +#pragma pack(8) + +// CC_CSMI_SAS_DRIVER_INFO + +typedef struct _CSMI_SAS_DRIVER_INFO { + __u8 szName[81]; + __u8 szDescription[81]; + __u16 usMajorRevision; + __u16 usMinorRevision; + __u16 usBuildRevision; + __u16 usReleaseRevision; + __u16 usCSMIMajorRevision; + __u16 usCSMIMinorRevision; +} CSMI_SAS_DRIVER_INFO, + *PCSMI_SAS_DRIVER_INFO; + +typedef struct _CSMI_SAS_DRIVER_INFO_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_DRIVER_INFO Information; +} CSMI_SAS_DRIVER_INFO_BUFFER, + *PCSMI_SAS_DRIVER_INFO_BUFFER; + +// CC_CSMI_SAS_CNTLR_CONFIGURATION + +typedef struct _CSMI_SAS_PCI_BUS_ADDRESS { + __u8 bBusNumber; + __u8 bDeviceNumber; + __u8 bFunctionNumber; + __u8 bReserved; +} CSMI_SAS_PCI_BUS_ADDRESS, + *PCSMI_SAS_PCI_BUS_ADDRESS; + +typedef union _CSMI_SAS_IO_BUS_ADDRESS { + CSMI_SAS_PCI_BUS_ADDRESS PciAddress; + __u8 bReserved[32]; +} CSMI_SAS_IO_BUS_ADDRESS, + *PCSMI_SAS_IO_BUS_ADDRESS; + +typedef struct _CSMI_SAS_CNTLR_CONFIG { + __u32 uBaseIoAddress; + struct { + __u32 uLowPart; + __u32 uHighPart; + } BaseMemoryAddress; + __u32 uBoardID; + __u16 usSlotNumber; + __u8 bControllerClass; + __u8 bIoBusType; + CSMI_SAS_IO_BUS_ADDRESS BusAddress; + __u8 szSerialNumber[81]; + __u16 usMajorRevision; + __u16 usMinorRevision; + __u16 usBuildRevision; + __u16 usReleaseRevision; + __u16 usBIOSMajorRevision; + __u16 usBIOSMinorRevision; + __u16 usBIOSBuildRevision; + __u16 usBIOSReleaseRevision; + __u32 uControllerFlags; + __u16 usRromMajorRevision; + __u16 usRromMinorRevision; + __u16 usRromBuildRevision; + __u16 usRromReleaseRevision; + __u16 usRromBIOSMajorRevision; + __u16 usRromBIOSMinorRevision; + __u16 usRromBIOSBuildRevision; + __u16 usRromBIOSReleaseRevision; + __u8 bReserved[7]; +} CSMI_SAS_CNTLR_CONFIG, + *PCSMI_SAS_CNTLR_CONFIG; + +typedef struct _CSMI_SAS_CNTLR_CONFIG_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_CNTLR_CONFIG Configuration; +} CSMI_SAS_CNTLR_CONFIG_BUFFER, + *PCSMI_SAS_CNTLR_CONFIG_BUFFER; + +// CC_CSMI_SAS_CNTLR_STATUS + +typedef struct _CSMI_SAS_CNTLR_STATUS { + __u32 uStatus; + __u32 uOfflineReason; + __u8 bReserved[28]; +} CSMI_SAS_CNTLR_STATUS, + *PCSMI_SAS_CNTLR_STATUS; + +typedef struct _CSMI_SAS_CNTLR_STATUS_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_CNTLR_STATUS Status; +} CSMI_SAS_CNTLR_STATUS_BUFFER, + *PCSMI_SAS_CNTLR_STATUS_BUFFER; + +// CC_CSMI_SAS_FIRMWARE_DOWNLOAD + +typedef struct _CSMI_SAS_FIRMWARE_DOWNLOAD { + __u32 uBufferLength; + __u32 uDownloadFlags; + __u8 bReserved[32]; + __u16 usStatus; + __u16 usSeverity; +} CSMI_SAS_FIRMWARE_DOWNLOAD, + *PCSMI_SAS_FIRMWARE_DOWNLOAD; + +typedef struct _CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_FIRMWARE_DOWNLOAD Information; + __u8 bDataBuffer[1]; +} CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER, + *PCSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER; + +// CC_CSMI_SAS_RAID_INFO + +typedef struct _CSMI_SAS_RAID_INFO { + __u32 uNumRaidSets; + __u32 uMaxDrivesPerSet; + __u32 uMaxRaidSets; + __u8 bMaxRaidTypes; + __u8 bReservedByteFields[7]; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulMinRaidSetBlocks; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulMaxRaidSetBlocks; + __u32 uMaxPhysicalDrives; + __u32 uMaxExtents; + __u32 uMaxModules; + __u32 uMaxTransformationMemory; + __u32 uChangeCount; + __u8 bReserved[44]; +} CSMI_SAS_RAID_INFO, + *PCSMI_SAS_RAID_INFO; + +typedef struct _CSMI_SAS_RAID_INFO_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_RAID_INFO Information; +} CSMI_SAS_RAID_INFO_BUFFER, + *PCSMI_SAS_RAID_INFO_BUFFER; + +// CC_CSMI_SAS_GET_RAID_CONFIG + +typedef struct _CSMI_SAS_RAID_DRIVES { + __u8 bModel[40]; + __u8 bFirmware[8]; + __u8 bSerialNumber[40]; + __u8 bSASAddress[8]; + __u8 bSASLun[8]; + __u8 bDriveStatus; + __u8 bDriveUsage; + __u16 usBlockSize; + __u8 bDriveType; + __u8 bReserved[15]; + __u32 uDriveIndex; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulTotalUserBlocks; +} CSMI_SAS_RAID_DRIVES, + *PCSMI_SAS_RAID_DRIVES; + +typedef struct _CSMI_SAS_RAID_DEVICE_ID { + __u8 bDeviceIdentificationVPDPage[1]; +} CSMI_SAS_RAID_DEVICE_ID, + *PCSMI_SAS_RAID_DEVICE_ID; + +typedef struct _CSMI_SAS_RAID_SET_ADDITIONAL_DATA { + __u8 bLabel[16]; + __u8 bRaidSetLun[8]; + __u8 bWriteProtection; + __u8 bCacheSetting; + __u8 bCacheRatio; + __u16 usBlockSize; + __u8 bReservedBytes[11]; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulRaidSetExtentOffset; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulRaidSetBlocks; + __u32 uStripeSizeInBlocks; + __u32 uSectorsPerTrack; + __u8 bApplicationScratchPad[16]; + __u32 uNumberOfHeads; + __u32 uNumberOfTracks; + __u8 bReserved[24]; +} CSMI_SAS_RAID_SET_ADDITIONAL_DATA, + *PCSMI_SAS_RAID_SET_ADDITIONAL_DATA; + +typedef struct _CSMI_SAS_RAID_CONFIG { + __u32 uRaidSetIndex; + __u32 uCapacity; + __u32 uStripeSize; + __u8 bRaidType; + __u8 bStatus; + __u8 bInformation; + __u8 bDriveCount; + __u8 bDataType; + __u8 bReserved[11]; + __u32 uFailureCode; + __u32 uChangeCount; + union { + CSMI_SAS_RAID_DRIVES Drives[1]; + CSMI_SAS_RAID_DEVICE_ID DeviceId[1]; + CSMI_SAS_RAID_SET_ADDITIONAL_DATA Data[1]; + }; +} CSMI_SAS_RAID_CONFIG, + *PCSMI_SAS_RAID_CONFIG; + +typedef struct _CSMI_SAS_RAID_CONFIG_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_RAID_CONFIG Configuration; +} CSMI_SAS_RAID_CONFIG_BUFFER, + *PCSMI_SAS_RAID_CONFIG_BUFFER; + +// CC_CSMI_SAS_GET_RAID_FEATURES + +typedef struct _CSMI_SAS_RAID_TYPE_DESCRIPTION { + __u8 bRaidType; + __u8 bReservedBytes[7]; + __u32 uSupportedStripeSizeMap; + __u8 bReserved[24]; +} CSMI_SAS_RAID_TYPE_DESCRIPTION, + *PCSMI_SAS_RAID_TYPE_DESCRIPTION; + +typedef struct _CSMI_SAS_RAID_FEATURES { + __u32 uFeatures; + __u8 bReservedFeatures[32]; + __u8 bDefaultTransformPriority; + __u8 bTransformPriority; + __u8 bDefaultRebuildPriority; + __u8 bRebuildPriority; + __u8 bDefaultSurfaceScanPriority; + __u8 bSurfaceScanPriority; + __u16 usReserved; + __u32 uRaidSetTransformationRules; + __u32 uReserved[11]; + CSMI_SAS_RAID_TYPE_DESCRIPTION RaidType[24]; + __u8 bCacheRatiosSupported[104]; + __u32 uChangeCount; + __u32 uFailureCode; + __u8 bReserved[120]; +} CSMI_SAS_RAID_FEATURES, + *PCSMI_SAS_RAID_FEATURES; + +typedef struct _CSMI_SAS_RAID_FEATURES_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_RAID_FEATURES Information; +} CSMI_SAS_RAID_FEATURES_BUFFER, + *PCSMI_SAS_RAID_FEATURES_BUFFER; + +// CC_CSMI_SAS_SET_RAID_CONTROL + +typedef struct _CSMI_SAS_RAID_CONTROL { + __u8 bTransformPriority; + __u8 bRebuildPriority; + __u8 bCacheRatioFlag; + __u8 bCacheRatio; + __u8 bSurfaceScanPriority; + __u8 bReservedBytes[15]; + __u8 bClearConfiguration[8]; + __u32 uChangeCount; + __u8 bReserved[88]; + __u32 uFailureCode; + __u8 bFailureDescription[80]; +} CSMI_SAS_RAID_CONTROL, + *PCSMI_SAS_RAID_CONTROL; + +typedef struct _CSMI_SAS_RAID_CONTROL_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_RAID_CONTROL Information; +} CSMI_SAS_RAID_CONTROL_BUFFER, + *PCSMI_SAS_RAID_CONTROL_BUFFER; + +// CC_CSMI_SAS_GET_RAID_ELEMENT + +typedef struct _CSMI_SAS_DRIVE_EXTENT_INFO { + __u32 uDriveIndex; + __u8 bExtentType; + __u8 bReservedBytes[7]; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulExtentOffset; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulExtentBlocks; + __u32 uRaidSetIndex; + __u8 bReserved[96]; +} CSMI_SAS_DRIVE_EXTENT_INFO, + *PCSMI_SAS_DRIVE_EXTENT_INFO; + +typedef struct _CSMI_SAS_RAID_MODULE_INFO { + __u8 bReserved[128]; +} CSMI_SAS_RAID_MODULE_INFO, + *PCSMI_SAS_RAID_MODULE_INFO; + +typedef struct _CSMI_SAS_DRIVE_LOCATION { + __u8 bConnector[16]; + __u8 bBoxName[16]; + __u32 uBay; + __u8 bReservedBytes[4]; + __u8 bAttachedSASAddress[8]; + __u8 bAttachedPhyIdentifier; + __u8 bReserved[79]; +} CSMI_SAS_DRIVE_LOCATION, + *PCSMI_SAS_DRIVE_LOCATION; + +typedef struct _CSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA { + __u8 bNegotiatedLinkRate[2]; + __u8 bReserved[126]; +} CSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA, + *PCSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA; + +typedef struct _CSMI_SAS_DRIVE_INFO { + CSMI_SAS_RAID_DRIVES Device; + CSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA Data; + CSMI_SAS_DRIVE_LOCATION Location; + __u8 bReserved[16]; +} CSMI_SAS_DRIVE_INFO, + *PCSMI_SAS_DRIVE_INFO; + +typedef struct _CSMI_SAS_RAID_ELEMENT { + __u32 uEnumerationType; + __u32 uElementIndex; + __u32 uNumElements; + __u32 uChangeCount; + __u32 uSubElementIndex; + __u8 bReserved[32]; + __u32 uFailureCode; + __u8 bFailureDescription[80]; + union { + CSMI_SAS_DRIVE_INFO Drive; + CSMI_SAS_RAID_MODULE_INFO Module; + CSMI_SAS_DRIVE_EXTENT_INFO Extent; + } Element; +} CSMI_SAS_RAID_ELEMENT, + *PCSMI_SAS_RAID_ELEMENT; + +typedef struct _CSMI_SAS_RAID_ELEMENT_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_RAID_ELEMENT Information; +} CSMI_SAS_RAID_ELEMENT_BUFFER, + *PCSMI_SAS_RAID_ELEMENT_BUFFER; + +// CC_CSMI_SAS_SET_RAID_OPERATION + +typedef struct _CSMI_SAS_RAID_SET_LIST { + __u32 uRaidSetIndex; + __u8 bExistingLun[8]; + __u8 bNewLun[8]; + __u8 bReserved[12]; +} CSMI_SAS_RAID_SET_LIST, + *PCSMI_SAS_RAID_SET_LIST; + +typedef struct _CSMI_SAS_RAID_SET_DRIVE_LIST { + __u32 uDriveIndex; + __u8 bDriveUsage; + __u8 bReserved[27]; +} CSMI_SAS_RAID_SET_DRIVE_LIST, + *PCSMI_SAS_RAID_SET_DRIVE_LIST; + +typedef struct _CSMI_SAS_RAID_SET_SPARE_INFO { + __u32 uRaidSetIndex; + __u32 uDriveCount; + __u8 bApplicationScratchPad[16]; + __u8 bReserved[104]; +} CSMI_SAS_RAID_SET_SPARE_INFO, + *PCSMI_SAS_RAID_SET_SPARE_INFO; + +typedef struct _CSMI_SAS_RAID_SET_ONLINE_STATE_INFO { + __u32 uRaidSetIndex; + __u8 bOnlineState; + __u8 bReserved[123]; +} CSMI_SAS_RAID_SET_ONLINE_STATE_INFO, + *PCSMI_SAS_RAID_SET_ONLINE_STATE_INFO; + +typedef struct _CSMI_SAS_RAID_SET_CACHE_INFO { + __u32 uRaidSetIndex; + __u8 bCacheSetting; + __u8 bCacheRatioFlag; + __u8 bCacheRatio; + __u8 bReserved[121]; +} CSMI_SAS_RAID_SET_CACHE_INFO, + *PCSMI_SAS_RAID_SET_CACHE_INFO; + +typedef struct _CSMI_SAS_RAID_SET_WRITE_PROTECT_INFO { + __u32 uRaidSetIndex; + __u8 bWriteProtectSetting; + __u8 bReserved[123]; +} CSMI_SAS_RAID_SET_WRITE_PROTECT_INFO, + *PCSMI_SAS_RAID_SET_WRITE_PROTECT_INFO; + +typedef struct _CSMI_SAS_RAID_SET_DELETE_INFO { + __u32 uRaidSetIndex; + __u8 bReserved[124]; +} CSMI_SAS_RAID_SET_DELETE_INFO, + *PCSMI_SAS_RAID_SET_DELETE_INFO; + +typedef struct _CSMI_SAS_RAID_SET_MODIFY_INFO { + __u8 bRaidType; + __u8 bReservedBytes[7]; + __u32 uStripeSize; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulRaidSetBlocks; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulRaidSetExtentOffset; + __u32 uDriveCount; + __u8 bReserved[96]; +} CSMI_SAS_RAID_SET_MODIFY_INFO, + *PCSMI_SAS_RAID_SET_MODIFY_INFO; + +typedef struct _CSMI_SAS_RAID_SET_TRANSFORM_INFO { + __u8 bTransformType; + __u8 bReservedBytes[3]; + __u32 uRaidSetIndex; + __u8 bRaidType; + __u8 bReservedBytes2[11]; + __u32 uAdditionalRaidSetIndex; + __u32 uRaidSetCount; + __u8 bApplicationScratchPad[16]; + CSMI_SAS_RAID_SET_MODIFY_INFO Modify; + __u8 bReserved[80]; +} CSMI_SAS_RAID_SET_TRANSFORM_INFO, + *PCSMI_SAS_RAID_SET_TRANSFORM_INFO; + +typedef struct _CSMI_SAS_RAID_SET_LABEL_INFO { + __u32 uRaidSetIndex; + __u8 bLabel[16]; + __u8 bReserved[108]; +} CSMI_SAS_RAID_SET_LABEL_INFO, + *PCSMI_SAS_RAID_SET_LABEL_INFO; + +typedef struct _CSMI_SAS_RAID_SET_CREATE_INFO { + __u8 bRaidType; + __u8 bReservedBytes[7]; + __u32 uStripeSize; + __u32 uTrackSectorCount; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulRaidSetBlocks; + struct + { + __u32 uLowPart; + __u32 uHighPart; + } ulRaidSetExtentOffset; + __u32 uDriveCount; + __u8 bLabel[16]; + __u32 uRaidSetIndex; + __u8 bApplicationScratchPad[16]; + __u32 uNumberOfHeads; + __u32 uNumberOfTracks; + __u8 bReserved[48]; +} CSMI_SAS_RAID_SET_CREATE_INFO, + *PCSMI_SAS_RAID_SET_CREATE_INFO; + +typedef struct _CSMI_SAS_RAID_SET_OPERATION { + __u32 uOperationType; + __u32 uChangeCount; + __u32 uFailureCode; + __u8 bFailureDescription[80]; + __u8 bReserved[28]; + union { + CSMI_SAS_RAID_SET_CREATE_INFO Create; + CSMI_SAS_RAID_SET_LABEL_INFO Label; + CSMI_SAS_RAID_SET_TRANSFORM_INFO Transform; + CSMI_SAS_RAID_SET_DELETE_INFO Delete; + CSMI_SAS_RAID_SET_WRITE_PROTECT_INFO Protect; + CSMI_SAS_RAID_SET_CACHE_INFO Cache; + CSMI_SAS_RAID_SET_ONLINE_STATE_INFO State; + CSMI_SAS_RAID_SET_SPARE_INFO Spare; + } Operation; + union { + CSMI_SAS_RAID_SET_DRIVE_LIST DriveList[1]; + CSMI_SAS_RAID_SET_LIST RaidSetList[1]; + } Parameters; +} CSMI_SAS_RAID_SET_OPERATION, + *PCSMI_SAS_RAID_SET_OPERATION; + +typedef struct _CSMI_SAS_RAID_SET_OPERATION_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_RAID_SET_OPERATION Information; +} CSMI_SAS_RAID_SET_OPERATION_BUFFER, + *PCSMI_SAS_RAID_SET_OPERATION_BUFFER; + +/* * * * * * * * * * SAS HBA Class Structures * * * * * * * * * */ + +// CC_CSMI_SAS_GET_PHY_INFO + +typedef struct _CSMI_SAS_IDENTIFY { + __u8 bDeviceType; + __u8 bRestricted; + __u8 bInitiatorPortProtocol; + __u8 bTargetPortProtocol; + __u8 bRestricted2[8]; + __u8 bSASAddress[8]; + __u8 bPhyIdentifier; + __u8 bSignalClass; + __u8 bReserved[6]; +} CSMI_SAS_IDENTIFY, + *PCSMI_SAS_IDENTIFY; + +typedef struct _CSMI_SAS_PHY_ENTITY { + CSMI_SAS_IDENTIFY Identify; + __u8 bPortIdentifier; + __u8 bNegotiatedLinkRate; + __u8 bMinimumLinkRate; + __u8 bMaximumLinkRate; + __u8 bPhyChangeCount; + __u8 bAutoDiscover; + __u8 bPhyFeatures; + __u8 bReserved; + CSMI_SAS_IDENTIFY Attached; +} CSMI_SAS_PHY_ENTITY, + *PCSMI_SAS_PHY_ENTITY; + +typedef struct _CSMI_SAS_PHY_INFO { + __u8 bNumberOfPhys; + __u8 bReserved[3]; + CSMI_SAS_PHY_ENTITY Phy[32]; +} CSMI_SAS_PHY_INFO, + *PCSMI_SAS_PHY_INFO; + +typedef struct _CSMI_SAS_PHY_INFO_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_PHY_INFO Information; +} CSMI_SAS_PHY_INFO_BUFFER, + *PCSMI_SAS_PHY_INFO_BUFFER; + +// CC_CSMI_SAS_SET_PHY_INFO + +typedef struct _CSMI_SAS_SET_PHY_INFO { + __u8 bPhyIdentifier; + __u8 bNegotiatedLinkRate; + __u8 bProgrammedMinimumLinkRate; + __u8 bProgrammedMaximumLinkRate; + __u8 bSignalClass; + __u8 bReserved[3]; +} CSMI_SAS_SET_PHY_INFO, + *PCSMI_SAS_SET_PHY_INFO; + +typedef struct _CSMI_SAS_SET_PHY_INFO_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_SET_PHY_INFO Information; +} CSMI_SAS_SET_PHY_INFO_BUFFER, + *PCSMI_SAS_SET_PHY_INFO_BUFFER; + +// CC_CSMI_SAS_GET_LINK_ERRORS + +typedef struct _CSMI_SAS_LINK_ERRORS { + __u8 bPhyIdentifier; + __u8 bResetCounts; + __u8 bReserved[2]; + __u32 uInvalidDwordCount; + __u32 uRunningDisparityErrorCount; + __u32 uLossOfDwordSyncCount; + __u32 uPhyResetProblemCount; +} CSMI_SAS_LINK_ERRORS, + *PCSMI_SAS_LINK_ERRORS; + +typedef struct _CSMI_SAS_LINK_ERRORS_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_LINK_ERRORS Information; +} CSMI_SAS_LINK_ERRORS_BUFFER, + *PCSMI_SAS_LINK_ERRORS_BUFFER; + +// CC_CSMI_SAS_SMP_PASSTHRU + +typedef struct _CSMI_SAS_SMP_REQUEST { + __u8 bFrameType; + __u8 bFunction; + __u8 bReserved[2]; + __u8 bAdditionalRequestBytes[1016]; +} CSMI_SAS_SMP_REQUEST, + *PCSMI_SAS_SMP_REQUEST; + +typedef struct _CSMI_SAS_SMP_RESPONSE { + __u8 bFrameType; + __u8 bFunction; + __u8 bFunctionResult; + __u8 bReserved; + __u8 bAdditionalResponseBytes[1016]; +} CSMI_SAS_SMP_RESPONSE, + *PCSMI_SAS_SMP_RESPONSE; + +typedef struct _CSMI_SAS_SMP_PASSTHRU { + __u8 bPhyIdentifier; + __u8 bPortIdentifier; + __u8 bConnectionRate; + __u8 bReserved; + __u8 bDestinationSASAddress[8]; + __u32 uRequestLength; + CSMI_SAS_SMP_REQUEST Request; + __u8 bConnectionStatus; + __u8 bReserved2[3]; + __u32 uResponseBytes; + CSMI_SAS_SMP_RESPONSE Response; +} CSMI_SAS_SMP_PASSTHRU, + *PCSMI_SAS_SMP_PASSTHRU; + +typedef struct _CSMI_SAS_SMP_PASSTHRU_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_SMP_PASSTHRU Parameters; +} CSMI_SAS_SMP_PASSTHRU_BUFFER, + *PCSMI_SAS_SMP_PASSTHRU_BUFFER; + +// CC_CSMI_SAS_SSP_PASSTHRU + +typedef struct _CSMI_SAS_SSP_PASSTHRU { + __u8 bPhyIdentifier; + __u8 bPortIdentifier; + __u8 bConnectionRate; + __u8 bReserved; + __u8 bDestinationSASAddress[8]; + __u8 bLun[8]; + __u8 bCDBLength; + __u8 bAdditionalCDBLength; + __u8 bReserved2[2]; + __u8 bCDB[16]; + __u32 uFlags; + __u8 bAdditionalCDB[24]; + __u32 uDataLength; +} CSMI_SAS_SSP_PASSTHRU, + *PCSMI_SAS_SSP_PASSTHRU; + +typedef struct _CSMI_SAS_SSP_PASSTHRU_STATUS { + __u8 bConnectionStatus; + __u8 bSSPStatus; + __u8 bReserved[2]; + __u8 bDataPresent; + __u8 bStatus; + __u8 bResponseLength[2]; + __u8 bResponse[256]; + __u32 uDataBytes; +} CSMI_SAS_SSP_PASSTHRU_STATUS, + *PCSMI_SAS_SSP_PASSTHRU_STATUS; + +typedef struct _CSMI_SAS_SSP_PASSTHRU_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_SSP_PASSTHRU Parameters; + CSMI_SAS_SSP_PASSTHRU_STATUS Status; + __u8 bDataBuffer[1]; +} CSMI_SAS_SSP_PASSTHRU_BUFFER, + *PCSMI_SAS_SSP_PASSTHRU_BUFFER; + +// CC_CSMI_SAS_STP_PASSTHRU + +typedef struct _CSMI_SAS_STP_PASSTHRU { + __u8 bPhyIdentifier; + __u8 bPortIdentifier; + __u8 bConnectionRate; + __u8 bReserved; + __u8 bDestinationSASAddress[8]; + __u8 bReserved2[4]; + __u8 bCommandFIS[20]; + __u32 uFlags; + __u32 uDataLength; +} CSMI_SAS_STP_PASSTHRU, + *PCSMI_SAS_STP_PASSTHRU; + +typedef struct _CSMI_SAS_STP_PASSTHRU_STATUS { + __u8 bConnectionStatus; + __u8 bReserved[3]; + __u8 bStatusFIS[20]; + __u32 uSCR[16]; + __u32 uDataBytes; +} CSMI_SAS_STP_PASSTHRU_STATUS, + *PCSMI_SAS_STP_PASSTHRU_STATUS; + +typedef struct _CSMI_SAS_STP_PASSTHRU_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_STP_PASSTHRU Parameters; + CSMI_SAS_STP_PASSTHRU_STATUS Status; + __u8 bDataBuffer[1]; +} CSMI_SAS_STP_PASSTHRU_BUFFER, + *PCSMI_SAS_STP_PASSTHRU_BUFFER; + +// CC_CSMI_SAS_GET_SATA_SIGNATURE + +typedef struct _CSMI_SAS_SATA_SIGNATURE { + __u8 bPhyIdentifier; + __u8 bReserved[3]; + __u8 bSignatureFIS[20]; +} CSMI_SAS_SATA_SIGNATURE, + *PCSMI_SAS_SATA_SIGNATURE; + +typedef struct _CSMI_SAS_SATA_SIGNATURE_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_SATA_SIGNATURE Signature; +} CSMI_SAS_SATA_SIGNATURE_BUFFER, + *PCSMI_SAS_SATA_SIGNATURE_BUFFER; + +// CC_CSMI_SAS_GET_SCSI_ADDRESS + +typedef struct _CSMI_SAS_GET_SCSI_ADDRESS_BUFFER { + IOCTL_HEADER IoctlHeader; + __u8 bSASAddress[8]; + __u8 bSASLun[8]; + __u8 bHostIndex; + __u8 bPathId; + __u8 bTargetId; + __u8 bLun; +} CSMI_SAS_GET_SCSI_ADDRESS_BUFFER, + *PCSMI_SAS_GET_SCSI_ADDRESS_BUFFER; + +// CC_CSMI_SAS_GET_DEVICE_ADDRESS + +typedef struct _CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER { + IOCTL_HEADER IoctlHeader; + __u8 bHostIndex; + __u8 bPathId; + __u8 bTargetId; + __u8 bLun; + __u8 bSASAddress[8]; + __u8 bSASLun[8]; +} CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER, + *PCSMI_SAS_GET_DEVICE_ADDRESS_BUFFER; + +// CC_CSMI_SAS_TASK_MANAGEMENT + +typedef struct _CSMI_SAS_SSP_TASK_IU { + __u8 bHostIndex; + __u8 bPathId; + __u8 bTargetId; + __u8 bLun; + __u32 uFlags; + __u32 uQueueTag; + __u32 uReserved; + __u8 bTaskManagementFunction; + __u8 bReserved[7]; + __u32 uInformation; +} CSMI_SAS_SSP_TASK_IU, + *PCSMI_SAS_SSP_TASK_IU; + +typedef struct _CSMI_SAS_SSP_TASK_IU_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_SSP_TASK_IU Parameters; + CSMI_SAS_SSP_PASSTHRU_STATUS Status; +} CSMI_SAS_SSP_TASK_IU_BUFFER, + *PCSMI_SAS_SSP_TASK_IU_BUFFER; + +// CC_CSMI_SAS_GET_CONNECTOR_INFO + +typedef struct _CSMI_SAS_GET_CONNECTOR_INFO { + __u32 uPinout; + __u8 bConnector[16]; + __u8 bLocation; + __u8 bReserved[15]; +} CSMI_SAS_CONNECTOR_INFO, + *PCSMI_SAS_CONNECTOR_INFO; + +typedef struct _CSMI_SAS_CONNECTOR_INFO_BUFFER { + IOCTL_HEADER IoctlHeader; + CSMI_SAS_CONNECTOR_INFO Reference[32]; +} CSMI_SAS_CONNECTOR_INFO_BUFFER, + *PCSMI_SAS_CONNECTOR_INFO_BUFFER; + +// CC_CSMI_SAS_GET_LOCATION + +typedef struct _CSMI_SAS_LOCATION_IDENTIFIER { + __u32 bLocationFlags; + __u8 bSASAddress[8]; + __u8 bSASLun[8]; + __u8 bEnclosureIdentifier[8]; + __u8 bEnclosureName[32]; + __u8 bBayPrefix[32]; + __u8 bBayIdentifier; + __u8 bLocationState; + __u8 bReserved[2]; +} CSMI_SAS_LOCATION_IDENTIFIER, + *PCSMI_SAS_LOCATION_IDENTIFIER; + +typedef struct _CSMI_SAS_GET_LOCATION_BUFFER { + IOCTL_HEADER IoctlHeader; + __u8 bHostIndex; + __u8 bPathId; + __u8 bTargetId; + __u8 bLun; + __u8 bIdentify; + __u8 bNumberOfLocationIdentifiers; + __u8 bLengthOfLocationIdentifier; + CSMI_SAS_LOCATION_IDENTIFIER Location[1]; +} CSMI_SAS_GET_LOCATION_BUFFER, + *PCSMI_SAS_GET_LOCATION_BUFFER; + +// CC_CSMI_SAS_PHY_CONTROL + +typedef struct _CSMI_SAS_CHARACTER { + __u8 bTypeFlags; + __u8 bValue; +} CSMI_SAS_CHARACTER, + *PCSMI_SAS_CHARACTER; + +typedef struct _CSMI_SAS_PHY_CONTROL { + __u8 bType; + __u8 bRate; + __u8 bReserved[6]; + __u32 uVendorUnique[8]; + __u32 uTransmitterFlags; + __i8 bTransmitAmplitude; + __i8 bTransmitterPreemphasis; + __i8 bTransmitterSlewRate; + __i8 bTransmitterReserved[13]; + __u8 bTransmitterVendorUnique[64]; + __u32 uReceiverFlags; + __i8 bReceiverThreshold; + __i8 bReceiverEqualizationGain; + __i8 bReceiverReserved[14]; + __u8 bReceiverVendorUnique[64]; + __u32 uPatternFlags; + __u8 bFixedPattern; + __u8 bUserPatternLength; + __u8 bPatternReserved[6]; + CSMI_SAS_CHARACTER UserPatternBuffer[16]; +} CSMI_SAS_PHY_CONTROL, + *PCSMI_SAS_PHY_CONTROL; + +typedef struct _CSMI_SAS_PHY_CONTROL_BUFFER { + IOCTL_HEADER IoctlHeader; + __u32 uFunction; + __u8 bPhyIdentifier; + __u16 usLengthOfControl; + __u8 bNumberOfControls; + __u8 bReserved[4]; + __u32 uLinkFlags; + __u8 bSpinupRate; + __u8 bLinkReserved[7]; + __u32 uVendorUnique[8]; + CSMI_SAS_PHY_CONTROL Control[1]; +} CSMI_SAS_PHY_CONTROL_BUFFER, + *PCSMI_SAS_PHY_CONTROL_BUFFER; + +//EDM #pragma CSMI_SAS_END_PACK +#pragma pack() + +#endif // _CSMI_SAS_H_ diff --git a/drivers/scsi/mpt3sas/load.sh b/drivers/scsi/mpt3sas/load.sh new file mode 100755 index 000000000000..7087cf62a9c1 --- /dev/null +++ b/drivers/scsi/mpt3sas/load.sh @@ -0,0 +1,70 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +# +# load.sh : a helper script for loading the drivers +# + +# toggling bits for enabling scsi-mid layer logging + +# enable sense data +#sysctl -w dev.scsi.logging_level=0x1000 + +# enable scanning debuging +sysctl -w dev.scsi.logging_level=0x1C0 + +# enable sense data and scanning +#sysctl -w dev.scsi.logging_level=0x11C0 + +# loading scsi mid layer and transports +modprobe sg +modprobe sd_mod +modprobe scsi_transport_sas +modprobe raid_class + +# loading the driver +insmod mpt3sas.ko +#insmod mpt3sas.ko logging_level=0x310 +#insmod mpt3sas.ko max_queue_depth=128 max_sgl_entries=32 logging_level=0x620 +#insmod mpt3sas.ko max_queue_depth=500 max_sgl_entries=32 logging_level=0x620 + +# fw events + tm + reset + reply +#insmod mpt3sas.ko logging_level=0x2308 + +# work task + reply +#insmod mpt3sas.ko logging_level=0x210 +#insmod mpt3sas.ko logging_level=0x200 + +# fw events + reply +#insmod mpt3sas.ko logging_level=0x208 + +# fw events + work task + reply +#insmod mpt3sas.ko logging_level=0x218 + +# handshake + init +#insmod mpt3sas.ko logging_level=0x420 + +# everything +#insmod mpt3sas.ko logging_level=0xFFFFFFF + +# reset +#insmod mpt3sas.ko logging_level=0x2000 + +#ioctls +#insmod mpt3sas.ko logging_level=0x8000 + +#init +#insmod mpt3sas.ko logging_level=0x20 + +#config +#insmod mpt3sas.ko logging_level=0x800 + +exit 0 diff --git a/drivers/scsi/mpt3sas/load_diag_trace_on.sh b/drivers/scsi/mpt3sas/load_diag_trace_on.sh new file mode 100755 index 000000000000..7b24099a4718 --- /dev/null +++ b/drivers/scsi/mpt3sas/load_diag_trace_on.sh @@ -0,0 +1,61 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +# +# load_diag_trace_on.sh : loads driver with trace buffer turned on +# + +#shopt -o -s xtrace + +# global parameters +declare -r CURRENT_PATH=$PWD +declare -r SCSI_HOST="/sys/class/scsi_host" +declare -r FILE_NAME="trace_buffer" + +# enable scanning debuging +sysctl -w dev.scsi.logging_level=0x1C0 + +# turn on KERN_DEBUG messages +echo -7 > /proc/sysrq-trigger + +# loading scsi mid layer and transports +modprobe sg +modprobe sd_mod +modprobe scsi_transport_sas +modprobe raid_class + +# loading the driver +insmod mpt3sas.ko logging_level=0x210 diag_buffer_enable=1 + +if [ $? -ne 0 ] ; then + printf "ERROR: insmod failed !!!\n" + exit 1 +fi; + +DRIVER_LOAD_STATUS=$(lsmod | grep mpt3sas) +if [ -z DRIVER_LOAD_STATUS ]; then + printf "ERROR: driver didn't load !!!\n" + exit 1 +fi + +# run multiple instances of the trace buffer script per IOC +for HOST in $(ls ${SCSI_HOST}); do + cd ${SCSI_HOST}/${HOST}; + if [ `cat proc_name` != "mpt3sas" ]; then + continue; + fi; + host_number=${HOST//host/} + cd ${CURRENT_PATH} + trace_buffer/start_tb.sh ${host_number} & +done; + +cd ${CURRENT_PATH} +exit 0 diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h old mode 100644 new mode 100755 index 18b1e31b5eb8..81eaed7c1cfb --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -1,136 +1,141 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2.h - * Title: MPI Message independent structures and definitions - * including System Interface Register Set and - * scatter/gather formats. - * Creation Date: June 21, 2006 + * Name: mpi2.h + * Title: MPI Message independent structures and definitions + * including System Interface Register Set and + * scatter/gather formats. + * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.54 + * mpi2.h Version: 02.00.55 * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved ReplyPostHostIndex register to offset 0x6C of the - * MPI2_SYSTEM_INTERFACE_REGS and modified the define for - * MPI2_REPLY_POST_HOST_INDEX_OFFSET. - * Added union of request descriptors. - * Added union of reply descriptors. - * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_VERSION_02_00. - * Fixed the size of the FunctionDependent5 field in the - * MPI2_DEFAULT_REPLY structure. - * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. - * Removed the MPI-defined Fault Codes and extended the - * product specific codes up to 0xEFFF. - * Added a sixth key value for the WriteSequence register - * and changed the flush value to 0x0. - * Added message function codes for Diagnostic Buffer Post - * and Diagnsotic Release. - * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED - * Moved MPI2_VERSION_UNION from mpi2_ioc.h. - * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. - * Added #defines for marking a reply descriptor as unused. - * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved LUN field defines from mpi2_init.h. - * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. - * In all request and reply descriptors, replaced VF_ID - * field with MSIxIndex field. - * Removed DevHandle field from - * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those - * bytes reserved. - * Added RAID Accelerator functionality. - * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MSI-x index mask and shift for Reply Post Host - * Index register. - * Added function code for Host Based Discovery Action. - * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. - * Added defines for product-specific range of message - * function codes, 0xF0 to 0xFF. - * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. - * Added alternative defines for the SGE Direction bit. - * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. - * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. - * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. - * Incorporating additions for MPI v2.5. - * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. - * Added Hard Reset delay timings. - * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. - * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. - * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. - * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. - * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. - * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT - * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-18-14 02.00.36 Updated copyright information. - * Bumped MPI2_HEADER_VERSION_UNIT. - * 03-16-15 02.00.37 Bumped MPI2_HEADER_VERSION_UNIT. - * Added Scratchpad registers to - * MPI2_SYSTEM_INTERFACE_REGS. - * Added MPI2_DIAG_SBR_RELOAD. - * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. - * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT - * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT - * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines - * to be unique within first 32 characters. - * Removed AHCI support. - * Removed SOP support. - * Bumped MPI2_HEADER_VERSION_UNIT. - * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. - * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. - * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. - * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-22-18 02.00.51 Added SECURE_BOOT define. - * Bumped MPI2_HEADER_VERSION_UNIT - * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI2_IOCSTATUS_FAILURE - * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-14 02.00.36 Updated copyright information. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 03-16-15 02.00.37 Updated for MPI v2.6. + * Bumped MPI2_HEADER_VERSION_UNIT. + * Added Scratchpad registers and + * AtomicRequestDescriptorPost register to + * MPI2_SYSTEM_INTERFACE_REGS. + * Added MPI2_DIAG_SBR_RELOAD. + * Added MPI2_IOCSTATUS_INSUFFICIENT_POWER. + * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. + * Added V7 HostDiagnostic register defines + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT + * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT + * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines + * to be unique within first 32 characters. + * Removed AHCI support. + * Removed SOP support. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. + * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-22-18 02.00.51 Added SECURE_BOOT define. + * Bumped MPI2_HEADER_VERSION_UNIT + * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNITMPI2_HEADER_VERSION_UNIT. + * Added MPI2_IOCSTATUS_FAILURE + * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT + * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT * -------------------------------------------------------------------------- */ #ifndef MPI2_H #define MPI2_H + /***************************************************************************** * -* MPI Version Definitions +* MPI Version Definitions * *****************************************************************************/ @@ -139,41 +144,43 @@ #define MPI2_VERSION_MINOR_MASK (0x00FF) #define MPI2_VERSION_MINOR_SHIFT (0) -/*major version for all MPI v2.x */ +/* major version for all MPI v2.x */ #define MPI2_VERSION_MAJOR (0x02) -/*minor version for MPI v2.0 compatible products */ +/* minor version for MPI v2.0 compatible products */ #define MPI2_VERSION_MINOR (0x00) -#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ - MPI2_VERSION_MINOR) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) #define MPI2_VERSION_02_00 (0x0200) -/*minor version for MPI v2.5 compatible products */ + +/* minor version for MPI v2.5 compatible products */ #define MPI25_VERSION_MINOR (0x05) -#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ - MPI25_VERSION_MINOR) +#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI25_VERSION_MINOR) #define MPI2_VERSION_02_05 (0x0205) -/*minor version for MPI v2.6 compatible products */ -#define MPI26_VERSION_MINOR (0x06) -#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ - MPI26_VERSION_MINOR) -#define MPI2_VERSION_02_06 (0x0206) + +/* minor version for MPI v2.6 compatible products */ +#define MPI26_VERSION_MINOR (0x06) +#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI26_VERSION_MINOR) +#define MPI2_VERSION_02_06 (0x0206) /* Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x36) +#define MPI2_HEADER_VERSION_UNIT (0x37) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) #define MPI2_HEADER_VERSION_DEV_SHIFT (0) -#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \ - MPI2_HEADER_VERSION_DEV) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV) + /***************************************************************************** * -* IOC State Definitions +* IOC State Definitions * *****************************************************************************/ @@ -185,68 +192,69 @@ #define MPI2_IOC_STATE_MASK (0xF0000000) #define MPI2_IOC_STATE_SHIFT (28) -/*Fault state range for prodcut specific codes */ +/* Fault state range for prodcut specific codes */ #define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000) #define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF) + /***************************************************************************** * -* System Interface Register Definitions +* System Interface Register Definitions * *****************************************************************************/ -typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { - U32 Doorbell; /*0x00 */ - U32 WriteSequence; /*0x04 */ - U32 HostDiagnostic; /*0x08 */ - U32 Reserved1; /*0x0C */ - U32 DiagRWData; /*0x10 */ - U32 DiagRWAddressLow; /*0x14 */ - U32 DiagRWAddressHigh; /*0x18 */ - U32 Reserved2[5]; /*0x1C */ - U32 HostInterruptStatus; /*0x30 */ - U32 HostInterruptMask; /*0x34 */ - U32 DCRData; /*0x38 */ - U32 DCRAddress; /*0x3C */ - U32 Reserved3[2]; /*0x40 */ - U32 ReplyFreeHostIndex; /*0x48 */ - U32 Reserved4[8]; /*0x4C */ - U32 ReplyPostHostIndex; /*0x6C */ - U32 Reserved5; /*0x70 */ - U32 HCBSize; /*0x74 */ - U32 HCBAddressLow; /*0x78 */ - U32 HCBAddressHigh; /*0x7C */ - U32 Reserved6[12]; /*0x80 */ - U32 Scratchpad[4]; /*0xB0 */ - U32 RequestDescriptorPostLow; /*0xC0 */ - U32 RequestDescriptorPostHigh; /*0xC4 */ - U32 AtomicRequestDescriptorPost;/*0xC8 */ - U32 Reserved7[13]; /*0xCC */ -} MPI2_SYSTEM_INTERFACE_REGS, - *PTR_MPI2_SYSTEM_INTERFACE_REGS, - Mpi2SystemInterfaceRegs_t, - *pMpi2SystemInterfaceRegs_t; +typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS +{ + U32 Doorbell; /* 0x00 */ + U32 WriteSequence; /* 0x04 */ + U32 HostDiagnostic; /* 0x08 */ + U32 Reserved1; /* 0x0C */ + U32 DiagRWData; /* 0x10 */ + U32 DiagRWAddressLow; /* 0x14 */ + U32 DiagRWAddressHigh; /* 0x18 */ + U32 Reserved2[5]; /* 0x1C */ + U32 HostInterruptStatus; /* 0x30 */ + U32 HostInterruptMask; /* 0x34 */ + U32 DCRData; /* 0x38 */ + U32 DCRAddress; /* 0x3C */ + U32 Reserved3[2]; /* 0x40 */ + U32 ReplyFreeHostIndex; /* 0x48 */ + U32 Reserved4[8]; /* 0x4C */ + U32 ReplyPostHostIndex; /* 0x6C */ + U32 Reserved5; /* 0x70 */ + U32 HCBSize; /* 0x74 */ + U32 HCBAddressLow; /* 0x78 */ + U32 HCBAddressHigh; /* 0x7C */ + U32 Reserved6[12]; /* 0x80 */ + U32 Scratchpad[4]; /* 0xB0 */ + U32 RequestDescriptorPostLow; /* 0xC0 */ + U32 RequestDescriptorPostHigh; /* 0xC4 */ + U32 AtomicRequestDescriptorPost;/* 0xC8 */ /* MPI v2.6 and later; reserved in earlier versions */ + U32 Reserved7[13]; /* 0xCC */ +} MPI2_SYSTEM_INTERFACE_REGS, MPI2_POINTER PTR_MPI2_SYSTEM_INTERFACE_REGS, + Mpi2SystemInterfaceRegs_t, MPI2_POINTER pMpi2SystemInterfaceRegs_t; /* - *Defines for working with the Doorbell register. + * Defines for working with the Doorbell register. */ #define MPI2_DOORBELL_OFFSET (0x00000000) -/*IOC --> System values */ +/* IOC --> System values */ #define MPI2_DOORBELL_USED (0x08000000) #define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000) #define MPI2_DOORBELL_WHO_INIT_SHIFT (24) #define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF) #define MPI2_DOORBELL_DATA_MASK (0x0000FFFF) -/*System --> IOC values */ +/* System --> IOC values */ #define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000) #define MPI2_DOORBELL_FUNCTION_SHIFT (24) #define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000) #define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16) + /* - *Defines for the WriteSequence register + * Defines for the WriteSequence register */ #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) #define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F) @@ -259,7 +267,7 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) /* - *Defines for the HostDiagnostic register + * Defines for the HostDiagnostic register */ #define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) @@ -288,25 +296,25 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { #define MPI2_DIAG_HOLD_IOC_RESET (0x00000002) /* - *Offsets for DiagRWData and address + * Offsets for DiagRWData and address */ #define MPI2_DIAG_RW_DATA_OFFSET (0x00000010) #define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014) #define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018) /* - *Defines for the HostInterruptStatus register + * Defines for the HostInterruptStatus register */ #define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030) #define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000) -#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS +#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS #define MPI2_HIS_RESET_IRQ_STATUS (0x40000000) #define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008) #define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001) -#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS +#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS /* - *Defines for the HostInterruptMask register + * Defines for the HostInterruptMask register */ #define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034) #define MPI2_HIM_RESET_IRQ_MASK (0x40000000) @@ -316,28 +324,28 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { #define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK /* - *Offsets for DCRData and address + * Offsets for DCRData and address */ #define MPI2_DCR_DATA_OFFSET (0x00000038) #define MPI2_DCR_ADDRESS_OFFSET (0x0000003C) /* - *Offset for the Reply Free Queue + * Offset for the Reply Free Queue */ #define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) /* - *Defines for the Reply Descriptor Post Queue + * Defines for the Reply Descriptor Post Queue */ #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) #define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) #define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) #define MPI2_RPHI_MSIX_INDEX_SHIFT (24) -#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /*MPI v2.5 only*/ +#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */ /* - *Defines for the HCBSize and address + * Defines for the HCBSize and address */ #define MPI2_HCB_SIZE_OFFSET (0x00000074) #define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000) @@ -347,7 +355,7 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { #define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) /* - *Offsets for the Scratchpad registers + * Offsets for the Scratchpad registers */ #define MPI26_SCRATCHPAD0_OFFSET (0x000000B0) #define MPI26_SCRATCHPAD1_OFFSET (0x000000B4) @@ -355,40 +363,41 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { #define MPI26_SCRATCHPAD3_OFFSET (0x000000BC) /* - *Offsets for the Request Descriptor Post Queue + * Offsets for the Request Descriptor Post Queue */ #define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) #define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) #define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8) -/*Hard Reset delay timings */ + +/* Hard Reset delay timings */ #define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) #define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000) #define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000) /***************************************************************************** * -* Message Descriptors +* Message Descriptors * *****************************************************************************/ -/*Request Descriptors */ +/* Request Descriptors */ -/*Default Request Descriptor */ -typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { - U8 RequestFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U16 LMID; /*0x04 */ - U16 DescriptorTypeDependent; /*0x06 */ +/* Default Request Descriptor */ +typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 DescriptorTypeDependent; /* 0x06 */ } MPI2_DEFAULT_REQUEST_DESCRIPTOR, - *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, - Mpi2DefaultRequestDescriptor_t, - *pMpi2DefaultRequestDescriptor_t; + MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, + Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t; -/*defines for the RequestFlags field */ +/* defines for the RequestFlags field */ #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E) -#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1) /* use carefully; values below are pre-shifted left */ #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) @@ -397,86 +406,96 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { #define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) #define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10) -#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) +#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) -/*High Priority Request Descriptor */ -typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { - U8 RequestFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U16 LMID; /*0x04 */ - U16 Reserved1; /*0x06 */ + +/* High Priority Request Descriptor */ +typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 Reserved1; /* 0x06 */ } MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, - *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, - Mpi2HighPriorityRequestDescriptor_t, - *pMpi2HighPriorityRequestDescriptor_t; + MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + Mpi2HighPriorityRequestDescriptor_t, + MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t; -/*SCSI IO Request Descriptor */ -typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR { - U8 RequestFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U16 LMID; /*0x04 */ - U16 DevHandle; /*0x06 */ + +/* SCSI IO Request Descriptor */ +typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 DevHandle; /* 0x06 */ } MPI2_SCSI_IO_REQUEST_DESCRIPTOR, - *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, - Mpi2SCSIIORequestDescriptor_t, - *pMpi2SCSIIORequestDescriptor_t; + MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t; -/*SCSI Target Request Descriptor */ -typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { - U8 RequestFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U16 LMID; /*0x04 */ - U16 IoIndex; /*0x06 */ + +/* SCSI Target Request Descriptor */ +typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 IoIndex; /* 0x06 */ } MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, - *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, - Mpi2SCSITargetRequestDescriptor_t, - *pMpi2SCSITargetRequestDescriptor_t; + MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + Mpi2SCSITargetRequestDescriptor_t, + MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t; -/*RAID Accelerator Request Descriptor */ -typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { - U8 RequestFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U16 LMID; /*0x04 */ - U16 Reserved; /*0x06 */ + +/* RAID Accelerator Request Descriptor */ +typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 Reserved; /* 0x06 */ } MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, - *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, - Mpi2RAIDAcceleratorRequestDescriptor_t, - *pMpi2RAIDAcceleratorRequestDescriptor_t; + MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + Mpi2RAIDAcceleratorRequestDescriptor_t, + MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t; -/*Fast Path SCSI IO Request Descriptor */ + +/* Fast Path SCSI IO Request Descriptor */ typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR - MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, - *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, - Mpi25FastPathSCSIIORequestDescriptor_t, - *pMpi25FastPathSCSIIORequestDescriptor_t; + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi25FastPathSCSIIORequestDescriptor_t, + MPI2_POINTER pMpi25FastPathSCSIIORequestDescriptor_t; -/*PCIe Encapsulated Request Descriptor */ + +/* PCIe Encapsulated Request Descriptor */ typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR - MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, - *PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, - Mpi26PCIeEncapsulatedRequestDescriptor_t, - *pMpi26PCIeEncapsulatedRequestDescriptor_t; + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + Mpi26PCIeEncapsulatedRequestDescriptor_t, + MPI2_POINTER pMpi26PCIeEncapsulatedRequestDescriptor_t; -/*union of Request Descriptors */ -typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { - MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; - MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; - MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; - MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; - MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; - MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; - MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated; - U64 Words; -} MPI2_REQUEST_DESCRIPTOR_UNION, - *PTR_MPI2_REQUEST_DESCRIPTOR_UNION, - Mpi2RequestDescriptorUnion_t, - *pMpi2RequestDescriptorUnion_t; -/*Atomic Request Descriptors */ +/* union of Request Descriptors */ +typedef union _MPI2_REQUEST_DESCRIPTOR_UNION +{ + MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated; + U64 Words; +} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION, + Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t; + + +/* Atomic Request Descriptors */ /* * All Atomic Request Descriptors have the same format, so the following @@ -490,34 +509,32 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { * Atomic PCIe Encapsulated Request Descriptor */ -/*Atomic Request Descriptor */ -typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR { - U8 RequestFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ +/* Atomic Request Descriptor */ +typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ } MPI26_ATOMIC_REQUEST_DESCRIPTOR, - *PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR, - Mpi26AtomicRequestDescriptor_t, - *pMpi26AtomicRequestDescriptor_t; + MPI2_POINTER PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR, + Mpi26AtomicRequestDescriptor_t, MPI2_POINTER pMpi26AtomicRequestDescriptor_t; -/*for the RequestFlags field, use the same - *defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR - */ +/* for the RequestFlags field, use the same defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR */ -/*Reply Descriptors */ -/*Default Reply Descriptor */ -typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { - U8 ReplyFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 DescriptorTypeDependent1; /*0x02 */ - U32 DescriptorTypeDependent2; /*0x04 */ -} MPI2_DEFAULT_REPLY_DESCRIPTOR, - *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, - Mpi2DefaultReplyDescriptor_t, - *pMpi2DefaultReplyDescriptor_t; +/* Reply Descriptors */ -/*defines for the ReplyFlags field */ +/* Default Reply Descriptor */ +typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 DescriptorTypeDependent1; /* 0x02 */ + U32 DescriptorTypeDependent2; /* 0x04 */ +} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, + Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t; + +/* defines for the ReplyFlags field */ #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) #define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) @@ -528,161 +545,174 @@ typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { #define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08) #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) -/*values for marking a reply descriptor as unused */ +/* values for marking a reply descriptor as unused */ #define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) #define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF) -/*Address Reply Descriptor */ -typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR { - U8 ReplyFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U32 ReplyFrameAddress; /*0x04 */ -} MPI2_ADDRESS_REPLY_DESCRIPTOR, - *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, - Mpi2AddressReplyDescriptor_t, - *pMpi2AddressReplyDescriptor_t; +/* Address Reply Descriptor */ +typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U32 ReplyFrameAddress; /* 0x04 */ +} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, + Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t; #define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00) -/*SCSI IO Success Reply Descriptor */ -typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { - U8 ReplyFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U16 TaskTag; /*0x04 */ - U16 Reserved1; /*0x06 */ + +/* SCSI IO Success Reply Descriptor */ +typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 TaskTag; /* 0x04 */ + U16 Reserved1; /* 0x06 */ } MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - Mpi2SCSIIOSuccessReplyDescriptor_t, - *pMpi2SCSIIOSuccessReplyDescriptor_t; + MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi2SCSIIOSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t; -/*TargetAssist Success Reply Descriptor */ -typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { - U8 ReplyFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U8 SequenceNumber; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 IoIndex; /*0x06 */ + +/* TargetAssist Success Reply Descriptor */ +typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U8 SequenceNumber; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 IoIndex; /* 0x06 */ } MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, - *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, - Mpi2TargetAssistSuccessReplyDescriptor_t, - *pMpi2TargetAssistSuccessReplyDescriptor_t; + MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + Mpi2TargetAssistSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t; -/*Target Command Buffer Reply Descriptor */ -typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { - U8 ReplyFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U8 VP_ID; /*0x02 */ - U8 Flags; /*0x03 */ - U16 InitiatorDevHandle; /*0x04 */ - U16 IoIndex; /*0x06 */ + +/* Target Command Buffer Reply Descriptor */ +typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U8 VP_ID; /* 0x02 */ + U8 Flags; /* 0x03 */ + U16 InitiatorDevHandle; /* 0x04 */ + U16 IoIndex; /* 0x06 */ } MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, - *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, - Mpi2TargetCommandBufferReplyDescriptor_t, - *pMpi2TargetCommandBufferReplyDescriptor_t; + MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + Mpi2TargetCommandBufferReplyDescriptor_t, + MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t; -/*defines for Flags field */ +/* defines for Flags field */ #define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) -/*RAID Accelerator Success Reply Descriptor */ -typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { - U8 ReplyFlags; /*0x00 */ - U8 MSIxIndex; /*0x01 */ - U16 SMID; /*0x02 */ - U32 Reserved; /*0x04 */ + +/* RAID Accelerator Success Reply Descriptor */ +typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U32 Reserved; /* 0x04 */ } MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, - *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, - Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, - *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; + MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; -/*Fast Path SCSI IO Success Reply Descriptor */ + +/* Fast Path SCSI IO Success Reply Descriptor */ typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR - MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, - *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, + MPI2_POINTER pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; -/*PCIe Encapsulated Success Reply Descriptor */ + +/* PCIe Encapsulated Success Reply Descriptor */ typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR - MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, - *PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, - Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t, - *pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t; + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t, + MPI2_POINTER pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t; + + +/* union of Reply Descriptors */ +typedef union _MPI2_REPLY_DESCRIPTORS_UNION +{ + MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR PCIeEncapsulatedSuccess; + U64 Words; +} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, + Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; + -/*union of Reply Descriptors */ -typedef union _MPI2_REPLY_DESCRIPTORS_UNION { - MPI2_DEFAULT_REPLY_DESCRIPTOR Default; - MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; - MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; - MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; - MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; - MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; - MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; - MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR - PCIeEncapsulatedSuccess; - U64 Words; -} MPI2_REPLY_DESCRIPTORS_UNION, - *PTR_MPI2_REPLY_DESCRIPTORS_UNION, - Mpi2ReplyDescriptorsUnion_t, - *pMpi2ReplyDescriptorsUnion_t; /***************************************************************************** * -* Message Functions +* Message Functions * *****************************************************************************/ -#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) -#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) -#define MPI2_FUNCTION_IOC_INIT (0x02) -#define MPI2_FUNCTION_IOC_FACTS (0x03) -#define MPI2_FUNCTION_CONFIG (0x04) -#define MPI2_FUNCTION_PORT_FACTS (0x05) -#define MPI2_FUNCTION_PORT_ENABLE (0x06) -#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) -#define MPI2_FUNCTION_EVENT_ACK (0x08) -#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) -#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) -#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) -#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) -#define MPI2_FUNCTION_FW_UPLOAD (0x12) -#define MPI2_FUNCTION_RAID_ACTION (0x15) -#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) -#define MPI2_FUNCTION_TOOLBOX (0x17) -#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) -#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) -#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) -#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B) -#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) -#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) -#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) -#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) -#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) -#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) -#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) -#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) -#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) -#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33) -#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) -#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) /* SCSI Task Management */ +#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ +#define MPI2_FUNCTION_IOC_FACTS (0x03) /* IOC Facts */ +#define MPI2_FUNCTION_CONFIG (0x04) /* Configuration */ +#define MPI2_FUNCTION_PORT_FACTS (0x05) /* Port Facts */ +#define MPI2_FUNCTION_PORT_ENABLE (0x06) /* Port Enable */ +#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) /* Event Notification */ +#define MPI2_FUNCTION_EVENT_ACK (0x08) /* Event Acknowledge */ +#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) /* FW Download */ +#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) /* Target Assist */ +#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) /* Target Status Send */ +#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) /* Target Mode Abort */ +#define MPI2_FUNCTION_FW_UPLOAD (0x12) /* FW Upload */ +#define MPI2_FUNCTION_RAID_ACTION (0x15) /* RAID Action */ +#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) /* SCSI IO RAID Passthrough */ +#define MPI2_FUNCTION_TOOLBOX (0x17) /* Toolbox */ +#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) /* SCSI Enclosure Processor */ +#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) /* SMP Passthrough */ +#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) /* SAS IO Unit Control */ /* for MPI v2.5 and earlier */ +#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B) /* IO Unit Control */ /* for MPI v2.6 and later */ +#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) /* SATA Passthrough */ +#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) /* Diagnostic Buffer Post */ +#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */ +#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ +#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ +#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator */ +#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) /* Host Based Discovery Action */ +#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) /* Power Management Control */ +#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) /* Send Host Message */ +#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33) /* NVMe Encapsulated (MPI v2.6) */ +#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) /* beginning of product-specific range */ +#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) /* end of product-specific range */ -/*Doorbell functions */ + + +/* Doorbell functions */ #define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) #define MPI2_FUNCTION_HANDSHAKE (0x42) + /***************************************************************************** * -* IOC Status Values +* IOC Status Values * *****************************************************************************/ -/*mask for IOCStatus status value */ +/* mask for IOCStatus status value */ #define MPI2_IOCSTATUS_MASK (0x7FFF) /**************************************************************************** -* Common IOCStatus values for all replies +* Common IOCStatus values for all replies ****************************************************************************/ #define MPI2_IOCSTATUS_SUCCESS (0x0000) @@ -695,12 +725,11 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) #define MPI2_IOCSTATUS_INVALID_STATE (0x0008) #define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) -/*MPI v2.6 and later */ -#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) +#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) /* MPI v2.6 and later */ #define MPI2_IOCSTATUS_FAILURE (0x000F) /**************************************************************************** -* Config IOCStatus values +* Config IOCStatus values ****************************************************************************/ #define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) @@ -711,7 +740,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) /**************************************************************************** -* SCSI IO Reply +* SCSI IO Reply ****************************************************************************/ #define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) @@ -728,7 +757,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) /**************************************************************************** -* For use by SCSI Initiator and SCSI Target end-to-end data protection +* For use by SCSI Initiator and SCSI Target end-to-end data protection ****************************************************************************/ #define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) @@ -736,7 +765,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) /**************************************************************************** -* SCSI Target values +* SCSI Target values ****************************************************************************/ #define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) @@ -751,32 +780,32 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) /**************************************************************************** -* Serial Attached SCSI values +* Serial Attached SCSI values ****************************************************************************/ #define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) #define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) /**************************************************************************** -* Diagnostic Buffer Post / Diagnostic Release values +* Diagnostic Buffer Post / Diagnostic Release values ****************************************************************************/ #define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) /**************************************************************************** -* RAID Accelerator values +* RAID Accelerator values ****************************************************************************/ #define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0) /**************************************************************************** -* IOCStatus flag to indicate that log info is available +* IOCStatus flag to indicate that log info is available ****************************************************************************/ #define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) /**************************************************************************** -* IOCLogInfo Types +* IOCLogInfo Types ****************************************************************************/ #define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000) @@ -788,64 +817,72 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4) #define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF) + /***************************************************************************** * -* Standard Message Structures +* Standard Message Structures * *****************************************************************************/ /**************************************************************************** -*Request Message Header for all request messages +* Request Message Header for all request messages ****************************************************************************/ -typedef struct _MPI2_REQUEST_HEADER { - U16 FunctionDependent1; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 FunctionDependent2; /*0x04 */ - U8 FunctionDependent3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ -} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER, - MPI2RequestHeader_t, *pMPI2RequestHeader_t; +typedef struct _MPI2_REQUEST_HEADER +{ + U16 FunctionDependent1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 FunctionDependent2; /* 0x04 */ + U8 FunctionDependent3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ +} MPI2_REQUEST_HEADER, MPI2_POINTER PTR_MPI2_REQUEST_HEADER, + MPI2RequestHeader_t, MPI2_POINTER pMPI2RequestHeader_t; + /**************************************************************************** -* Default Reply +* Default Reply ****************************************************************************/ -typedef struct _MPI2_DEFAULT_REPLY { - U16 FunctionDependent1; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 FunctionDependent2; /*0x04 */ - U8 FunctionDependent3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U16 FunctionDependent5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY, - MPI2DefaultReply_t, *pMPI2DefaultReply_t; +typedef struct _MPI2_DEFAULT_REPLY +{ + U16 FunctionDependent1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 FunctionDependent2; /* 0x04 */ + U8 FunctionDependent3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U16 FunctionDependent5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_DEFAULT_REPLY, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY, + MPI2DefaultReply_t, MPI2_POINTER pMPI2DefaultReply_t; -/*common version structure/union used in messages and configuration pages */ -typedef struct _MPI2_VERSION_STRUCT { - U8 Dev; /*0x00 */ - U8 Unit; /*0x01 */ - U8 Minor; /*0x02 */ - U8 Major; /*0x03 */ +/* common version structure/union used in messages and configuration pages */ + +typedef struct _MPI2_VERSION_STRUCT +{ + U8 Dev; /* 0x00 */ + U8 Unit; /* 0x01 */ + U8 Minor; /* 0x02 */ + U8 Major; /* 0x03 */ } MPI2_VERSION_STRUCT; -typedef union _MPI2_VERSION_UNION { - MPI2_VERSION_STRUCT Struct; - U32 Word; +typedef union _MPI2_VERSION_UNION +{ + MPI2_VERSION_STRUCT Struct; + U32 Word; } MPI2_VERSION_UNION; -/*LUN field defines, common to many structures */ + +/* LUN field defines, common to many structures */ #define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) #define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) #define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) @@ -853,181 +890,196 @@ typedef union _MPI2_VERSION_UNION { #define MPI2_LUN_LEVEL_1_WORD (0xFF00) #define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00) + /***************************************************************************** * -* Fusion-MPT MPI Scatter Gather Elements +* Fusion-MPT MPI Scatter Gather Elements * *****************************************************************************/ /**************************************************************************** -* MPI Simple Element structures +* MPI Simple Element structures ****************************************************************************/ -typedef struct _MPI2_SGE_SIMPLE32 { - U32 FlagsLength; - U32 Address; -} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32, - Mpi2SGESimple32_t, *pMpi2SGESimple32_t; +typedef struct _MPI2_SGE_SIMPLE32 +{ + U32 FlagsLength; + U32 Address; +} MPI2_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_SGE_SIMPLE32, + Mpi2SGESimple32_t, MPI2_POINTER pMpi2SGESimple32_t; -typedef struct _MPI2_SGE_SIMPLE64 { - U32 FlagsLength; - U64 Address; -} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64, - Mpi2SGESimple64_t, *pMpi2SGESimple64_t; +typedef struct _MPI2_SGE_SIMPLE64 +{ + U32 FlagsLength; + U64 Address; +} MPI2_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_SGE_SIMPLE64, + Mpi2SGESimple64_t, MPI2_POINTER pMpi2SGESimple64_t; + +typedef struct _MPI2_SGE_SIMPLE_UNION +{ + U32 FlagsLength; + union + { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION, + Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t; -typedef struct _MPI2_SGE_SIMPLE_UNION { - U32 FlagsLength; - union { - U32 Address32; - U64 Address64; - } u; -} MPI2_SGE_SIMPLE_UNION, - *PTR_MPI2_SGE_SIMPLE_UNION, - Mpi2SGESimpleUnion_t, - *pMpi2SGESimpleUnion_t; /**************************************************************************** -* MPI Chain Element structures - for MPI v2.0 products only +* MPI Chain Element structures - for MPI v2.0 products only ****************************************************************************/ -typedef struct _MPI2_SGE_CHAIN32 { - U16 Length; - U8 NextChainOffset; - U8 Flags; - U32 Address; -} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32, - Mpi2SGEChain32_t, *pMpi2SGEChain32_t; +typedef struct _MPI2_SGE_CHAIN32 +{ + U16 Length; + U8 NextChainOffset; + U8 Flags; + U32 Address; +} MPI2_SGE_CHAIN32, MPI2_POINTER PTR_MPI2_SGE_CHAIN32, + Mpi2SGEChain32_t, MPI2_POINTER pMpi2SGEChain32_t; -typedef struct _MPI2_SGE_CHAIN64 { - U16 Length; - U8 NextChainOffset; - U8 Flags; - U64 Address; -} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64, - Mpi2SGEChain64_t, *pMpi2SGEChain64_t; +typedef struct _MPI2_SGE_CHAIN64 +{ + U16 Length; + U8 NextChainOffset; + U8 Flags; + U64 Address; +} MPI2_SGE_CHAIN64, MPI2_POINTER PTR_MPI2_SGE_CHAIN64, + Mpi2SGEChain64_t, MPI2_POINTER pMpi2SGEChain64_t; + +typedef struct _MPI2_SGE_CHAIN_UNION +{ + U16 Length; + U8 NextChainOffset; + U8 Flags; + union + { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION, + Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t; -typedef struct _MPI2_SGE_CHAIN_UNION { - U16 Length; - U8 NextChainOffset; - U8 Flags; - union { - U32 Address32; - U64 Address64; - } u; -} MPI2_SGE_CHAIN_UNION, - *PTR_MPI2_SGE_CHAIN_UNION, - Mpi2SGEChainUnion_t, - *pMpi2SGEChainUnion_t; /**************************************************************************** -* MPI Transaction Context Element structures - for MPI v2.0 products only +* MPI Transaction Context Element structures - for MPI v2.0 products only ****************************************************************************/ -typedef struct _MPI2_SGE_TRANSACTION32 { - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[1]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION32, - *PTR_MPI2_SGE_TRANSACTION32, - Mpi2SGETransaction32_t, - *pMpi2SGETransaction32_t; +typedef struct _MPI2_SGE_TRANSACTION32 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[1]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION32, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION32, + Mpi2SGETransaction32_t, MPI2_POINTER pMpi2SGETransaction32_t; -typedef struct _MPI2_SGE_TRANSACTION64 { - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[2]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION64, - *PTR_MPI2_SGE_TRANSACTION64, - Mpi2SGETransaction64_t, - *pMpi2SGETransaction64_t; +typedef struct _MPI2_SGE_TRANSACTION64 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[2]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION64, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION64, + Mpi2SGETransaction64_t, MPI2_POINTER pMpi2SGETransaction64_t; -typedef struct _MPI2_SGE_TRANSACTION96 { - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[3]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96, - Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t; +typedef struct _MPI2_SGE_TRANSACTION96 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[3]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION96, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION96, + Mpi2SGETransaction96_t, MPI2_POINTER pMpi2SGETransaction96_t; -typedef struct _MPI2_SGE_TRANSACTION128 { - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[4]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128, - Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128; +typedef struct _MPI2_SGE_TRANSACTION128 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[4]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION128, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION128, + Mpi2SGETransaction_t128, MPI2_POINTER pMpi2SGETransaction_t128; + +typedef struct _MPI2_SGE_TRANSACTION_UNION +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + union + { + U32 TransactionContext32[1]; + U32 TransactionContext64[2]; + U32 TransactionContext96[3]; + U32 TransactionContext128[4]; + } u; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION_UNION, + Mpi2SGETransactionUnion_t, MPI2_POINTER pMpi2SGETransactionUnion_t; -typedef struct _MPI2_SGE_TRANSACTION_UNION { - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - union { - U32 TransactionContext32[1]; - U32 TransactionContext64[2]; - U32 TransactionContext96[3]; - U32 TransactionContext128[4]; - } u; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION_UNION, - *PTR_MPI2_SGE_TRANSACTION_UNION, - Mpi2SGETransactionUnion_t, - *pMpi2SGETransactionUnion_t; /**************************************************************************** -* MPI SGE union for IO SGL's - for MPI v2.0 products only +* MPI SGE union for IO SGL's - for MPI v2.0 products only ****************************************************************************/ -typedef struct _MPI2_MPI_SGE_IO_UNION { - union { - MPI2_SGE_SIMPLE_UNION Simple; - MPI2_SGE_CHAIN_UNION Chain; - } u; -} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION, - Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t; +typedef struct _MPI2_MPI_SGE_IO_UNION +{ + union + { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + } u; +} MPI2_MPI_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_IO_UNION, + Mpi2MpiSGEIOUnion_t, MPI2_POINTER pMpi2MpiSGEIOUnion_t; + /**************************************************************************** -* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only +* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only ****************************************************************************/ -typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION { - union { - MPI2_SGE_SIMPLE_UNION Simple; - MPI2_SGE_TRANSACTION_UNION Transaction; - } u; -} MPI2_SGE_TRANS_SIMPLE_UNION, - *PTR_MPI2_SGE_TRANS_SIMPLE_UNION, - Mpi2SGETransSimpleUnion_t, - *pMpi2SGETransSimpleUnion_t; +typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION +{ + union + { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_SGE_TRANS_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANS_SIMPLE_UNION, + Mpi2SGETransSimpleUnion_t, MPI2_POINTER pMpi2SGETransSimpleUnion_t; + /**************************************************************************** -* All MPI SGE types union +* All MPI SGE types union ****************************************************************************/ -typedef struct _MPI2_MPI_SGE_UNION { - union { - MPI2_SGE_SIMPLE_UNION Simple; - MPI2_SGE_CHAIN_UNION Chain; - MPI2_SGE_TRANSACTION_UNION Transaction; - } u; -} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION, - Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t; +typedef struct _MPI2_MPI_SGE_UNION +{ + union + { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_MPI_SGE_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_UNION, + Mpi2MpiSgeUnion_t, MPI2_POINTER pMpi2MpiSgeUnion_t; + /**************************************************************************** -* MPI SGE field definition and masks +* MPI SGE field definition and masks ****************************************************************************/ -/*Flags field bit definitions */ +/* Flags field bit definitions */ #define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80) #define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40) @@ -1042,31 +1094,31 @@ typedef struct _MPI2_MPI_SGE_UNION { #define MPI2_SGE_LENGTH_MASK (0x00FFFFFF) #define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF) -/*Element Type */ +/* Element Type */ -#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) +#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) /* for MPI v2.0 products only */ #define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10) -#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) +#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) /* for MPI v2.0 products only */ #define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30) -/*Address location */ +/* Address location */ #define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00) -/*Direction */ +/* Direction */ #define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) #define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) -#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) -#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) +#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) +#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) -/*Address Size */ +/* Address Size */ #define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) -/*Context Size */ +/* Context Size */ #define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00) #define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02) @@ -1077,125 +1129,126 @@ typedef struct _MPI2_MPI_SGE_UNION { #define MPI2_SGE_CHAIN_OFFSET_SHIFT (16) /**************************************************************************** -* MPI SGE operation Macros +* MPI SGE operation Macros ****************************************************************************/ -/*SIMPLE FlagsLength manipulations... */ -#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) -#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \ - MPI2_SGE_FLAGS_SHIFT) -#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) -#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) +/* SIMPLE FlagsLength manipulations... */ +#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) +#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) -#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \ - MPI2_SGE_LENGTH(l)) +#define MPI2_SGE_SET_FLAGS_LENGTH(f,l) (MPI2_SGE_SET_FLAGS(f) | MPI2_SGE_LENGTH(l)) -#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) -#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) -#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ - MPI2_SGE_SET_FLAGS_LENGTH(f, l)) +#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_SGE_SET_FLAGS_LENGTH(f,l) -/*CAUTION - The following are READ-MODIFY-WRITE! */ -#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ - MPI2_SGE_SET_FLAGS(f)) -#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ - MPI2_SGE_LENGTH(l)) +/* CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_SGE_SET_FLAGS(f) +#define MPI2_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_SGE_LENGTH(l) + +#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> MPI2_SGE_CHAIN_OFFSET_SHIFT) -#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \ - MPI2_SGE_CHAIN_OFFSET_SHIFT) /***************************************************************************** * -* Fusion-MPT IEEE Scatter Gather Elements +* Fusion-MPT IEEE Scatter Gather Elements * *****************************************************************************/ /**************************************************************************** -* IEEE Simple Element structures +* IEEE Simple Element structures ****************************************************************************/ -/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ -typedef struct _MPI2_IEEE_SGE_SIMPLE32 { - U32 Address; - U32 FlagsLength; -} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32, - Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t; +/* MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_SIMPLE32 +{ + U32 Address; + U32 FlagsLength; +} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32, + Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t; -typedef struct _MPI2_IEEE_SGE_SIMPLE64 { - U64 Address; - U32 Length; - U16 Reserved1; - U8 Reserved2; - U8 Flags; -} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64, - Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t; +typedef struct _MPI2_IEEE_SGE_SIMPLE64 +{ + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64, + Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t; + +typedef union _MPI2_IEEE_SGE_SIMPLE_UNION +{ + MPI2_IEEE_SGE_SIMPLE32 Simple32; + MPI2_IEEE_SGE_SIMPLE64 Simple64; +} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION, + Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t; -typedef union _MPI2_IEEE_SGE_SIMPLE_UNION { - MPI2_IEEE_SGE_SIMPLE32 Simple32; - MPI2_IEEE_SGE_SIMPLE64 Simple64; -} MPI2_IEEE_SGE_SIMPLE_UNION, - *PTR_MPI2_IEEE_SGE_SIMPLE_UNION, - Mpi2IeeeSgeSimpleUnion_t, - *pMpi2IeeeSgeSimpleUnion_t; /**************************************************************************** -* IEEE Chain Element structures +* IEEE Chain Element structures ****************************************************************************/ -/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ -typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; +/* MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; -/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ -typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; +/* MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; -typedef union _MPI2_IEEE_SGE_CHAIN_UNION { - MPI2_IEEE_SGE_CHAIN32 Chain32; - MPI2_IEEE_SGE_CHAIN64 Chain64; -} MPI2_IEEE_SGE_CHAIN_UNION, - *PTR_MPI2_IEEE_SGE_CHAIN_UNION, - Mpi2IeeeSgeChainUnion_t, - *pMpi2IeeeSgeChainUnion_t; +typedef union _MPI2_IEEE_SGE_CHAIN_UNION +{ + MPI2_IEEE_SGE_CHAIN32 Chain32; + MPI2_IEEE_SGE_CHAIN64 Chain64; +} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION, + Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t; + +/* MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */ +typedef struct _MPI25_IEEE_SGE_CHAIN64 +{ + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64, + Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t; -/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */ -typedef struct _MPI25_IEEE_SGE_CHAIN64 { - U64 Address; - U32 Length; - U16 Reserved1; - U8 NextChainOffset; - U8 Flags; -} MPI25_IEEE_SGE_CHAIN64, - *PTR_MPI25_IEEE_SGE_CHAIN64, - Mpi25IeeeSgeChain64_t, - *pMpi25IeeeSgeChain64_t; /**************************************************************************** -* All IEEE SGE types union +* All IEEE SGE types union ****************************************************************************/ -/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ -typedef struct _MPI2_IEEE_SGE_UNION { - union { - MPI2_IEEE_SGE_SIMPLE_UNION Simple; - MPI2_IEEE_SGE_CHAIN_UNION Chain; - } u; -} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION, - Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t; +/* MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_UNION +{ + union + { + MPI2_IEEE_SGE_SIMPLE_UNION Simple; + MPI2_IEEE_SGE_CHAIN_UNION Chain; + } u; +} MPI2_IEEE_SGE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_UNION, + Mpi2IeeeSgeUnion_t, MPI2_POINTER pMpi2IeeeSgeUnion_t; + /**************************************************************************** -* IEEE SGE union for IO SGL's +* IEEE SGE union for IO SGL's ****************************************************************************/ -typedef union _MPI25_SGE_IO_UNION { - MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; - MPI25_IEEE_SGE_CHAIN64 IeeeChain; -} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION, - Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t; +typedef union _MPI25_SGE_IO_UNION +{ + MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; + MPI25_IEEE_SGE_CHAIN64 IeeeChain; +} MPI25_SGE_IO_UNION, MPI2_POINTER PTR_MPI25_SGE_IO_UNION, + Mpi25SGEIOUnion_t, MPI2_POINTER pMpi25SGEIOUnion_t; + /**************************************************************************** -* IEEE SGE field definitions and masks +* IEEE SGE field definitions and masks ****************************************************************************/ -/*Flags field bit definitions */ +/* Flags field bit definitions */ #define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80) #define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40) @@ -1204,93 +1257,94 @@ typedef union _MPI25_SGE_IO_UNION { #define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF) -/*Element Type */ +/* Element Type */ #define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) #define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) -/*Next Segment Format */ +/* Next Segment Format */ #define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) #define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) -/*Data Location Address Space */ +/* Data Location Address Space */ #define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) -#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) -#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) +#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5 and later, use in IEEE Simple or Chain element */ +#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) /* use in IEEE Simple Element only */ #define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) -#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) -#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) -#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \ - (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) -#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5, use in IEEE Simple or Chain element */ +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) /* use in MPI v2.0 IEEE Chain Element only */ +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */ + +#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02) /* for MPI v2.6 only */ /**************************************************************************** -* IEEE SGE operation Macros +* IEEE SGE operation Macros ****************************************************************************/ -/*SIMPLE FlagsLength manipulations... */ -#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) -#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \ - >> MPI2_IEEE32_SGE_FLAGS_SHIFT) -#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) +/* SIMPLE FlagsLength manipulations... */ +#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) >> MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) -#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\ - MPI2_IEEE32_SGE_LENGTH(l)) +#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) | MPI2_IEEE32_SGE_LENGTH(l)) + +#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f,l) + +/* CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_IEEE32_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_IEEE32_SGE_SET_FLAGS(f) +#define MPI2_IEEE32_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_IEEE32_SGE_LENGTH(l) -#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \ - MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) -#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \ - MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) -#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ - MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l)) -/*CAUTION - The following are READ-MODIFY-WRITE! */ -#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ - MPI2_IEEE32_SGE_SET_FLAGS(f)) -#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ - MPI2_IEEE32_SGE_LENGTH(l)) /***************************************************************************** * -* Fusion-MPT MPI/IEEE Scatter Gather Unions +* Fusion-MPT MPI/IEEE Scatter Gather Unions * *****************************************************************************/ -typedef union _MPI2_SIMPLE_SGE_UNION { - MPI2_SGE_SIMPLE_UNION MpiSimple; - MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; -} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION, - Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t; +typedef union _MPI2_SIMPLE_SGE_UNION +{ + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; +} MPI2_SIMPLE_SGE_UNION, MPI2_POINTER PTR_MPI2_SIMPLE_SGE_UNION, + Mpi2SimpleSgeUntion_t, MPI2_POINTER pMpi2SimpleSgeUntion_t; + + +typedef union _MPI2_SGE_IO_UNION +{ + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_SGE_CHAIN_UNION MpiChain; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION, + Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t; -typedef union _MPI2_SGE_IO_UNION { - MPI2_SGE_SIMPLE_UNION MpiSimple; - MPI2_SGE_CHAIN_UNION MpiChain; - MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; - MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; -} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION, - Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t; /**************************************************************************** * -* Values for SGLFlags field, used in many request messages with an SGL +* Values for SGLFlags field, used in many request messages with an SGL * ****************************************************************************/ -/*values for MPI SGL Data Location Address Space subfield */ +/* values for MPI SGL Data Location Address Space subfield */ #define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C) #define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) #define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) -#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) -#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) -#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) -/*values for SGL Type subfield */ +#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.5 and earlier */ +#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.6 */ +#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) /* only for MPI v2.5 and earlier */ +/* values for SGL Type subfield */ #define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) #define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00) -#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) /* MPI v2.0 products only */ #define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02) + #endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h old mode 100644 new mode 100755 index 3a6871aecada..71e905cb1c89 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -1,307 +1,312 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2_cnfg.h - * Title: MPI Configuration messages and pages - * Creation Date: November 10, 2006 + * Name: mpi2_cnfg.h + * Title: MPI Configuration messages and pages + * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.47 + * mpi2_cnfg.h Version: 02.00.48 * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. - * Added Manufacturing Page 11. - * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE - * define. - * 06-26-07 02.00.02 Adding generic structure for product-specific - * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. - * Rework of BIOS Page 2 configuration page. - * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the - * forms. - * Added configuration pages IOC Page 8 and Driver - * Persistent Mapping Page 0. - * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated - * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, - * RAID Physical Disk Pages 0 and 1, RAID Configuration - * Page 0). - * Added new value for AccessStatus field of SAS Device - * Page 0 (_SATA_NEEDS_INITIALIZATION). - * 10-31-07 02.00.04 Added missing SEPDevHandle field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for - * NVDATA. - * Modified IOC Page 7 to use masks and added field for - * SASBroadcastPrimitiveMasks. - * Added MPI2_CONFIG_PAGE_BIOS_4. - * Added MPI2_CONFIG_PAGE_LOG_0. - * 02-29-08 02.00.06 Modified various names to make them 32-character unique. - * Added SAS Device IDs. - * Updated Integrated RAID configuration pages including - * Manufacturing Page 4, IOC Page 6, and RAID Configuration - * Page 0. - * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. - * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. - * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. - * Added missing MaxNumRoutedSasAddresses field to - * MPI2_CONFIG_PAGE_EXPANDER_0. - * Added SAS Port Page 0. - * Modified structure layout for - * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. - * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use - * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. - * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF - * to 0x000000FF. - * Added two new values for the Physical Disk Coercion Size - * bits in the Flags field of Manufacturing Page 4. - * Added product-specific Manufacturing pages 16 to 31. - * Modified Flags bits for controlling write cache on SATA - * drives in IO Unit Page 1. - * Added new bit to AdditionalControlFlags of SAS IO Unit - * Page 1 to control Invalid Topology Correction. - * Added additional defines for RAID Volume Page 0 - * VolumeStatusFlags field. - * Modified meaning of RAID Volume Page 0 VolumeSettings - * define for auto-configure of hot-swap drives. - * Added SupportedPhysDisks field to RAID Volume Page 1 and - * added related defines. - * Added PhysDiskAttributes field (and related defines) to - * RAID Physical Disk Page 0. - * Added MPI2_SAS_PHYINFO_PHY_VACANT define. - * Added three new DiscoveryStatus bits for SAS IO Unit - * Page 0 and SAS Expander Page 0. - * Removed multiplexing information from SAS IO Unit pages. - * Added BootDeviceWaitTime field to SAS IO Unit Page 4. - * Removed Zone Address Resolved bit from PhyInfo and from - * Expander Page 0 Flags field. - * Added two new AccessStatus values to SAS Device Page 0 - * for indicating routing problems. Added 3 reserved words - * to this page. - * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. - * Inserted missing reserved field into structure for IOC - * Page 6. - * Added more pending task bits to RAID Volume Page 0 - * VolumeStatusFlags defines. - * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. - * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 - * and SAS Expander Page 0 to flag a downstream initiator - * when in simplified routing mode. - * Removed SATA Init Failure defines for DiscoveryStatus - * fields of SAS IO Unit Page 0 and SAS Expander Page 0. - * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. - * Added PortGroups, DmaGroup, and ControlGroup fields to - * SAS Device Page 0. - * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO - * Unit Page 6. - * Added expander reduced functionality data to SAS - * Expander Page 0. - * Added SAS PHY Page 2 and SAS PHY Page 3. - * 07-30-09 02.00.12 Added IO Unit Page 7. - * Added new device ids. - * Added SAS IO Unit Page 5. - * Added partial and slumber power management capable flags - * to SAS Device Page 0 Flags field. - * Added PhyInfo defines for power condition. - * Added Ethernet configuration pages. - * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. - * Added SAS PHY Page 4 structure and defines. - * 02-10-10 02.00.14 Modified the comments for the configuration page - * structures that contain an array of data. The host - * should use the "count" field in the page data (e.g. the - * NumPhys field) to determine the number of valid elements - * in the array. - * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. - * Added PowerManagementCapabilities to IO Unit Page 7. - * Added PortWidthModGroup field to - * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. - * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT - * define. - * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. - * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. - * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) - * defines. - * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to - * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for - * the Pinout field. - * Added BoardTemperature and BoardTemperatureUnits fields - * to MPI2_CONFIG_PAGE_IO_UNIT_7. - * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define - * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. - * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. - * Added IO Unit Page 8, IO Unit Page 9, - * and IO Unit Page 10. - * Added SASNotifyPrimitiveMasks field to - * MPI2_CONFIG_PAGE_IOC_7. - * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). - * 05-25-11 02.00.20 Cleaned up a few comments. - * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities - * for PCIe link as obsolete. - * Added SpinupFlags field containing a Disable Spin-up bit - * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO - * Unit Page 4. - * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. - * Added UEFIVersion field to BIOS Page 1 and defined new - * BiosOptions bits. - * Incorporating additions for MPI v2.5. - * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. - * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. - * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as - * obsolete for MPI v2.5 and later. - * Added some defines for 12G SAS speeds. - * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. - * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to - * match the specification. - * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for - * future use. - * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for - * MPI2_CONFIG_PAGE_MAN_7. - * Added EnclosureLevel and ConnectorName fields to - * MPI2_CONFIG_PAGE_SAS_DEV_0. - * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for - * MPI2_CONFIG_PAGE_SAS_DEV_0. - * Added EnclosureLevel field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 01-08-14 02.00.28 Added more defines for the BiosOptions field of - * MPI2_CONFIG_PAGE_BIOS_1. - * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and - * more defines for the BiosOptions field. - * 11-18-14 02.00.30 Updated copyright information. - * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. - * Added AdapterOrderAux fields to BIOS Page 3. - * 03-16-15 02.00.31 Updated for MPI v2.6. - * Added Flags field to IO Unit Page 7. - * Added new SAS Phy Event codes - * 05-25-15 02.00.33 Added more defines for the BiosOptions field of - * MPI2_CONFIG_PAGE_BIOS_1. - * 08-25-15 02.00.34 Bumped Header Version. - * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. - * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. - * Added Link field to PCIe Link Pages - * Added EnclosureLevel and ConnectorName to PCIe - * Device Page 0. - * Added define for PCIE IoUnit page 1 max rate shift. - * Added comment for reserved ExtPageTypes. - * Added SAS 4 22.5 gbs speed support. - * Added PCIe 4 16.0 GT/sec speec support. - * Removed AHCI support. - * Removed SOP support. - * Added NegotiatedLinkRate and NegotiatedPortWidth to - * PCIe device page 0. - * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines - * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. - * Changed declaration of ConnectorName in PCIe DevicePage0 - * to match SAS DevicePage 0. - * Added SATADeviceWaitTime to IO Unit Page 11. - * Added MPI26_MFGPAGE_DEVID_SAS4008 - * Added x16 PCIe width to IO Unit Page 7 - * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 - * phy data. - * Added InitStatus to PCIe IO Unit Page 1 header. - * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. - * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and - * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. - * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. - * Added ChassisSlot field to SAS Enclosure Page 0. - * Added ChassisSlot Valid bit (bit 5) to the Flags field - * in SAS Enclosure Page 0. - * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and - * MPI26_MFGPAGE_DEVID_SAS3916 defines. - * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. - * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. - * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to - * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. - * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to - * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. - * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. - * Added NOIOB field to PCIe Device Page 2. - * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to - * the Capabilities field of PCIe Device Page 2. - * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816. - * Added WRiteCache defines to IO Unit Page 1. - * Added MaxEnclosureLevel to BIOS Page 1. - * Added OEMRD to SAS Enclosure Page 1. - * Added DMDReportPCIe to PCIe IO Unit Page 1. - * Added Flags field and flags for Retimers to - * PCIe Switch Page 1. - * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. - * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 - * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 - * Added DMDReport Delay Time defines to - * PCIeIOUnitPage1 - * -------------------------------------------------------------------------- - * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. - * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 - * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 - * Added DMDReport Delay Time defines to PCIeIOUnitPage1 - * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for + * MPI2_CONFIG_PAGE_MAN_7. + * Added EnclosureLevel and ConnectorName fields to + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added EnclosureLevel field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and + * more defines for the BiosOptions field. + * 11-18-14 02.00.30 Updated copyright information. + * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. + * Added AdapterOrderAux fields to BIOS Page 3. + * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added BoardPowerRequirement, PCISlotPowerAllocation, and + * Flags field to IO Unit Page 7. + * Added IO Unit Page 11. + * Added new SAS Phy Event codes + * Added PCIe configuration pages. + * 03-19-15 02.00.32 Fixed PCIe Link Config page structure names to be + * unique in first 32 characters. + * 05-25-15 02.00.33 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 08-25-15 02.00.34 Added PCIe Device Page 2 SGL format capability. + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. + * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. + * Added Link field to PCIe Link Pages + * Added EnclosureLevel and ConnectorName to PCIe + * Device Page 0. + * Added define for PCIE IoUnit page 1 max rate shift. + * Added comment for reserved ExtPageTypes. + * Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * Added NegotiatedLinkRate and NegotiatedPortWidth to + * PCIe device page 0. + * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines + * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. + * Changed declaration of ConnectorName in PCIe DevicePage0 + * to match SAS DevicePage 0. + * Added SATADeviceWaitTime to IO Unit Page 11. + * Added MPI26_MFGPAGE_DEVID_SAS4008 + * Added x16 PCIe width to IO Unit Page 7 + * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 + * phy data. + * Added InitStatus to PCIe IO Unit Page 1 header. + * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. + * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and + * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. + * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. + * Added ChassisSlot field to SAS Enclosure Page 0. + * Added ChassisSlot Valid bit (bit 5) to the Flags field + * in SAS Enclosure Page 0. + * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and + * MPI26_MFGPAGE_DEVID_SAS3916 defines. + * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. + * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. + * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to + * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. + * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. + * Added NOIOB field to PCIe Device Page 2. + * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to + * the Capabilities field of PCIe Device Page 2. + * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816. + * Added WRiteCache defines to IO Unit Page 1. + * Added MaxEnclosureLevel to BIOS Page 1. + * Added OEMRD to SAS Enclosure Page 1. + * Added DMDReportPCIe to PCIe IO Unit Page 1. + * Added Flags field and flags for Retimers to + * PCIe Switch Page 1. + * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. + * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 + * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 + * Added DMDReport Delay Time defines to PCIeIOUnitPage1 + * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. + * 06-24-19 02.00.48 Add DEVID for PCI Switch and MPI endpoints. + * Add Man7 flag for Connector Lane. + * Modify NVMe WriteCache values. + * Add ConfigSource to PCIeIOUnit0. + * Add various PCI id info to PCIeDevice2. + * -------------------------------------------------------------------------- */ #ifndef MPI2_CNFG_H #define MPI2_CNFG_H /***************************************************************************** -* Configuration Page Header and defines +* Configuration Page Header and defines *****************************************************************************/ -/*Config Page Header */ -typedef struct _MPI2_CONFIG_PAGE_HEADER { - U8 PageVersion; /*0x00 */ - U8 PageLength; /*0x01 */ - U8 PageNumber; /*0x02 */ - U8 PageType; /*0x03 */ -} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER, - Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t; +/* Config Page Header */ +typedef struct _MPI2_CONFIG_PAGE_HEADER +{ + U8 PageVersion; /* 0x00 */ + U8 PageLength; /* 0x01 */ + U8 PageNumber; /* 0x02 */ + U8 PageType; /* 0x03 */ +} MPI2_CONFIG_PAGE_HEADER, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER, + Mpi2ConfigPageHeader_t, MPI2_POINTER pMpi2ConfigPageHeader_t; -typedef union _MPI2_CONFIG_PAGE_HEADER_UNION { - MPI2_CONFIG_PAGE_HEADER Struct; - U8 Bytes[4]; - U16 Word16[2]; - U32 Word32; -} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION, - Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion; +typedef union _MPI2_CONFIG_PAGE_HEADER_UNION +{ + MPI2_CONFIG_PAGE_HEADER Struct; + U8 Bytes[4]; + U16 Word16[2]; + U32 Word32; +} MPI2_CONFIG_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER_UNION, + Mpi2ConfigPageHeaderUnion, MPI2_POINTER pMpi2ConfigPageHeaderUnion; -/*Extended Config Page Header */ -typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER { - U8 PageVersion; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 PageNumber; /*0x02 */ - U8 PageType; /*0x03 */ - U16 ExtPageLength; /*0x04 */ - U8 ExtPageType; /*0x06 */ - U8 Reserved2; /*0x07 */ +/* Extended Config Page Header */ +typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER +{ + U8 PageVersion; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 PageNumber; /* 0x02 */ + U8 PageType; /* 0x03 */ + U16 ExtPageLength; /* 0x04 */ + U8 ExtPageType; /* 0x06 */ + U8 Reserved2; /* 0x07 */ } MPI2_CONFIG_EXTENDED_PAGE_HEADER, - *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, - Mpi2ConfigExtendedPageHeader_t, - *pMpi2ConfigExtendedPageHeader_t; + MPI2_POINTER PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, + Mpi2ConfigExtendedPageHeader_t, MPI2_POINTER pMpi2ConfigExtendedPageHeader_t; -typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { - MPI2_CONFIG_PAGE_HEADER Struct; - MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; - U8 Bytes[8]; - U16 Word16[4]; - U32 Word32[2]; -} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, - *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, - Mpi2ConfigPageExtendedHeaderUnion, - *pMpi2ConfigPageExtendedHeaderUnion; +typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION +{ + MPI2_CONFIG_PAGE_HEADER Struct; + MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; + U8 Bytes[8]; + U16 Word16[4]; + U32 Word32[2]; +} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + Mpi2ConfigPageExtendedHeaderUnion, MPI2_POINTER pMpi2ConfigPageExtendedHeaderUnion; -/*PageType field values */ +/* PageType field values */ #define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00) #define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10) #define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20) @@ -319,7 +324,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_CONFIG_TYPENUM_MASK (0x0FFF) -/*ExtPageType field values */ +/* ExtPageType field values */ #define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) #define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) #define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) @@ -331,17 +336,19 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) #define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) #define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B) -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C) -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D) -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B) /* MPI v2.6 and later */ +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C) /* MPI v2.6 and later */ +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D) /* MPI v2.6 and later */ +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E) /* MPI v2.6 and later */ +/* Product specific reserved values 0xE0 - 0xEF */ +/* Vendor specific reserved values 0xF0 - 0xFF */ /***************************************************************************** -* PageAddress defines +* PageAddress defines *****************************************************************************/ -/*RAID Volume PageAddress format */ +/* RAID Volume PageAddress format */ #define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000) #define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) #define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) @@ -349,7 +356,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF) -/*RAID Physical Disk PageAddress format */ +/* RAID Physical Disk PageAddress format */ #define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000) #define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) #define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) @@ -359,7 +366,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF) -/*SAS Expander PageAddress format */ +/* SAS Expander PageAddress format */ #define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000) #define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) #define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) @@ -370,7 +377,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) -/*SAS Device PageAddress format */ +/* SAS Device PageAddress format */ #define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000) #define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) #define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) @@ -378,7 +385,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) -/*SAS PHY PageAddress format */ +/* SAS PHY PageAddress format */ #define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000) #define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) #define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000) @@ -387,7 +394,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF) -/*SAS Port PageAddress format */ +/* SAS Port PageAddress format */ #define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000) #define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) #define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) @@ -395,21 +402,21 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF) -/*SAS Enclosure PageAddress format */ +/* SAS Enclosure PageAddress format */ #define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000) #define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) #define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) #define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) -/*Enclosure PageAddress format */ +/* Enclosure PageAddress format */ #define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000) #define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) #define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000) #define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) -/*RAID Configuration PageAddress format */ +/* RAID Configuration PageAddress format */ #define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) #define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) #define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000) @@ -418,7 +425,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF) -/*Driver Persistent Mapping PageAddress format */ +/* Driver Persistent Mapping PageAddress format */ #define MPI2_DPM_PGAD_FORM_MASK (0xF0000000) #define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000) @@ -427,14 +434,14 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) -/*Ethernet PageAddress format */ +/* Ethernet PageAddress format */ #define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) #define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) #define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) -/*PCIe Switch PageAddress format */ +/* PCIe Switch PageAddress format */ #define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000) #define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000) #define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000) @@ -445,14 +452,14 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16) -/*PCIe Device PageAddress format */ +/* PCIe Device PageAddress format */ #define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000) #define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) #define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000) #define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) -/*PCIe Link PageAddress format */ +/* PCIe Link PageAddress format */ #define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000) #define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000) #define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000) @@ -462,32 +469,33 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { /**************************************************************************** -* Configuration messages +* Configuration messages ****************************************************************************/ -/*Configuration Request Message */ -typedef struct _MPI2_CONFIG_REQUEST { - U8 Action; /*0x00 */ - U8 SGLFlags; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 ExtPageLength; /*0x04 */ - U8 ExtPageType; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U8 Reserved2; /*0x0C */ - U8 ProxyVF_ID; /*0x0D */ - U16 Reserved4; /*0x0E */ - U32 Reserved3; /*0x10 */ - MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ - U32 PageAddress; /*0x18 */ - MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */ -} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST, - Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t; +/* Configuration Request Message */ +typedef struct _MPI2_CONFIG_REQUEST +{ + U8 Action; /* 0x00 */ + U8 SGLFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 ExtPageLength; /* 0x04 */ + U8 ExtPageType; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U8 Reserved2; /* 0x0C */ + U8 ProxyVF_ID; /* 0x0D */ + U16 Reserved4; /* 0x0E */ + U32 Reserved3; /* 0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ + U32 PageAddress; /* 0x18 */ + MPI2_SGE_IO_UNION PageBufferSGE; /* 0x1C */ +} MPI2_CONFIG_REQUEST, MPI2_POINTER PTR_MPI2_CONFIG_REQUEST, + Mpi2ConfigRequest_t, MPI2_POINTER pMpi2ConfigRequest_t; -/*values for the Action field */ +/* values for the Action field */ #define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00) #define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) #define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) @@ -497,43 +505,44 @@ typedef struct _MPI2_CONFIG_REQUEST { #define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) #define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) -/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ -/*Config Reply Message */ -typedef struct _MPI2_CONFIG_REPLY { - U8 Action; /*0x00 */ - U8 SGLFlags; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 ExtPageLength; /*0x04 */ - U8 ExtPageType; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U16 Reserved2; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ -} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY, - Mpi2ConfigReply_t, *pMpi2ConfigReply_t; +/* Config Reply Message */ +typedef struct _MPI2_CONFIG_REPLY +{ + U8 Action; /* 0x00 */ + U8 SGLFlags; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 ExtPageLength; /* 0x04 */ + U8 ExtPageType; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U16 Reserved2; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ +} MPI2_CONFIG_REPLY, MPI2_POINTER PTR_MPI2_CONFIG_REPLY, + Mpi2ConfigReply_t, MPI2_POINTER pMpi2ConfigReply_t; /***************************************************************************** * -* C o n f i g u r a t i o n P a g e s +* C o n f i g u r a t i o n P a g e s * *****************************************************************************/ /**************************************************************************** -* Manufacturing Config pages +* Manufacturing Config pages ****************************************************************************/ #define MPI2_MFGPAGE_VENDORID_LSI (0x1000) -/*MPI v2.0 SAS products */ +/* MPI v2.0 SAS products */ #define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070) #define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072) #define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074) @@ -553,10 +562,8 @@ typedef struct _MPI2_CONFIG_REPLY { #define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) #define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) #define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) -#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP (0x02B0) -#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1 (0x02B1) -/*MPI v2.5 SAS products */ +/* MPI v2.5 SAS products */ #define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) #define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097) #define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090) @@ -582,6 +589,9 @@ typedef struct _MPI2_CONFIG_REPLY { #define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD) #define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE) #define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF) + +#define MPI26_MFGPAGE_DEVID_PEX88000 (0x00B2) + #define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0) #define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1) #define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2) @@ -598,139 +608,141 @@ typedef struct _MPI2_CONFIG_REPLY { #define MPI26_MFGPAGE_DEVID_HARD_SEC_3816 (0x00E6) #define MPI26_MFGPAGE_DEVID_INVALID1_3816 (0x00E7) +#define MPI26_MFGPAGE_DEVID_SWCH_MPI_EP (0x02B0) +#define MPI26_MFGPAGE_DEVID_SWCH_MPI_EP_1 (0x02B1) -/*Manufacturing Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_MAN_0 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 ChipName[16]; /*0x04 */ - U8 ChipRevision[8]; /*0x14 */ - U8 BoardName[16]; /*0x1C */ - U8 BoardAssembly[16]; /*0x2C */ - U8 BoardTracerNumber[16]; /*0x3C */ +/* Manufacturing Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 ChipName[16]; /* 0x04 */ + U8 ChipRevision[8]; /* 0x14 */ + U8 BoardName[16]; /* 0x1C */ + U8 BoardAssembly[16]; /* 0x2C */ + U8 BoardTracerNumber[16]; /* 0x3C */ } MPI2_CONFIG_PAGE_MAN_0, - *PTR_MPI2_CONFIG_PAGE_MAN_0, - Mpi2ManufacturingPage0_t, - *pMpi2ManufacturingPage0_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_0, + Mpi2ManufacturingPage0_t, MPI2_POINTER pMpi2ManufacturingPage0_t; #define MPI2_MANUFACTURING0_PAGEVERSION (0x00) -/*Manufacturing Page 1 */ +/* Manufacturing Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_MAN_1 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 VPD[256]; /*0x04 */ +typedef struct _MPI2_CONFIG_PAGE_MAN_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 VPD[256]; /* 0x04 */ } MPI2_CONFIG_PAGE_MAN_1, - *PTR_MPI2_CONFIG_PAGE_MAN_1, - Mpi2ManufacturingPage1_t, - *pMpi2ManufacturingPage1_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_1, + Mpi2ManufacturingPage1_t, MPI2_POINTER pMpi2ManufacturingPage1_t; #define MPI2_MANUFACTURING1_PAGEVERSION (0x00) -typedef struct _MPI2_CHIP_REVISION_ID { - U16 DeviceID; /*0x00 */ - U8 PCIRevisionID; /*0x02 */ - U8 Reserved; /*0x03 */ -} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID, - Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t; +typedef struct _MPI2_CHIP_REVISION_ID +{ + U16 DeviceID; /* 0x00 */ + U8 PCIRevisionID; /* 0x02 */ + U8 Reserved; /* 0x03 */ +} MPI2_CHIP_REVISION_ID, MPI2_POINTER PTR_MPI2_CHIP_REVISION_ID, + Mpi2ChipRevisionId_t, MPI2_POINTER pMpi2ChipRevisionId_t; -/*Manufacturing Page 2 */ +/* Manufacturing Page 2 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check Header.PageLength at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. */ #ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS #define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1) #endif -typedef struct _MPI2_CONFIG_PAGE_MAN_2 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ - U32 - HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */ +typedef struct _MPI2_CONFIG_PAGE_MAN_2 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */ + U32 HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 0x08 */ } MPI2_CONFIG_PAGE_MAN_2, - *PTR_MPI2_CONFIG_PAGE_MAN_2, - Mpi2ManufacturingPage2_t, - *pMpi2ManufacturingPage2_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_2, + Mpi2ManufacturingPage2_t, MPI2_POINTER pMpi2ManufacturingPage2_t; #define MPI2_MANUFACTURING2_PAGEVERSION (0x00) -/*Manufacturing Page 3 */ +/* Manufacturing Page 3 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check Header.PageLength at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. */ #ifndef MPI2_MAN_PAGE_3_INFO_WORDS #define MPI2_MAN_PAGE_3_INFO_WORDS (1) #endif -typedef struct _MPI2_CONFIG_PAGE_MAN_3 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ - U32 - Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */ +typedef struct _MPI2_CONFIG_PAGE_MAN_3 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */ + U32 Info[MPI2_MAN_PAGE_3_INFO_WORDS];/* 0x08 */ } MPI2_CONFIG_PAGE_MAN_3, - *PTR_MPI2_CONFIG_PAGE_MAN_3, - Mpi2ManufacturingPage3_t, - *pMpi2ManufacturingPage3_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_3, + Mpi2ManufacturingPage3_t, MPI2_POINTER pMpi2ManufacturingPage3_t; #define MPI2_MANUFACTURING3_PAGEVERSION (0x00) -/*Manufacturing Page 4 */ +/* Manufacturing Page 4 */ -typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS { - U8 PowerSaveFlags; /*0x00 */ - U8 InternalOperationsSleepTime; /*0x01 */ - U8 InternalOperationsRunTime; /*0x02 */ - U8 HostIdleTime; /*0x03 */ +typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS +{ + U8 PowerSaveFlags; /* 0x00 */ + U8 InternalOperationsSleepTime; /* 0x01 */ + U8 InternalOperationsRunTime; /* 0x02 */ + U8 HostIdleTime; /* 0x03 */ } MPI2_MANPAGE4_PWR_SAVE_SETTINGS, - *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, - Mpi2ManPage4PwrSaveSettings_t, - *pMpi2ManPage4PwrSaveSettings_t; + MPI2_POINTER PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + Mpi2ManPage4PwrSaveSettings_t, MPI2_POINTER pMpi2ManPage4PwrSaveSettings_t; -/*defines for the PowerSaveFlags field */ +/* defines for the PowerSaveFlags field */ #define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03) #define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00) #define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01) #define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02) -typedef struct _MPI2_CONFIG_PAGE_MAN_4 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 Flags; /*0x08 */ - U8 InquirySize; /*0x0C */ - U8 Reserved2; /*0x0D */ - U16 Reserved3; /*0x0E */ - U8 InquiryData[56]; /*0x10 */ - U32 RAID0VolumeSettings; /*0x48 */ - U32 RAID1EVolumeSettings; /*0x4C */ - U32 RAID1VolumeSettings; /*0x50 */ - U32 RAID10VolumeSettings; /*0x54 */ - U32 Reserved4; /*0x58 */ - U32 Reserved5; /*0x5C */ - MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */ - U8 MaxOCEDisks; /*0x64 */ - U8 ResyncRate; /*0x65 */ - U16 DataScrubDuration; /*0x66 */ - U8 MaxHotSpares; /*0x68 */ - U8 MaxPhysDisksPerVol; /*0x69 */ - U8 MaxPhysDisks; /*0x6A */ - U8 MaxVolumes; /*0x6B */ +typedef struct _MPI2_CONFIG_PAGE_MAN_4 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Flags; /* 0x08 */ + U8 InquirySize; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + U8 InquiryData[56]; /* 0x10 */ + U32 RAID0VolumeSettings; /* 0x48 */ + U32 RAID1EVolumeSettings; /* 0x4C */ + U32 RAID1VolumeSettings; /* 0x50 */ + U32 RAID10VolumeSettings; /* 0x54 */ + U32 Reserved4; /* 0x58 */ + U32 Reserved5; /* 0x5C */ + MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /* 0x60 */ + U8 MaxOCEDisks; /* 0x64 */ + U8 ResyncRate; /* 0x65 */ + U16 DataScrubDuration; /* 0x66 */ + U8 MaxHotSpares; /* 0x68 */ + U8 MaxPhysDisksPerVol; /* 0x69 */ + U8 MaxPhysDisks; /* 0x6A */ + U8 MaxVolumes; /* 0x6B */ } MPI2_CONFIG_PAGE_MAN_4, - *PTR_MPI2_CONFIG_PAGE_MAN_4, - Mpi2ManufacturingPage4_t, - *pMpi2ManufacturingPage4_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_4, + Mpi2ManufacturingPage4_t, MPI2_POINTER pMpi2ManufacturingPage4_t; #define MPI2_MANUFACTURING4_PAGEVERSION (0x0A) -/*Manufacturing Page 4 Flags field */ +/* Manufacturing Page 4 Flags field */ #define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000) #define MPI2_MANPAGE4_METADATA_512MB (0x00000000) @@ -759,70 +771,67 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_4 { #define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001) -/*Manufacturing Page 5 */ +/* Manufacturing Page 5 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES #define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) #endif -typedef struct _MPI2_MANUFACTURING5_ENTRY { - U64 WWID; /*0x00 */ - U64 DeviceName; /*0x08 */ -} MPI2_MANUFACTURING5_ENTRY, - *PTR_MPI2_MANUFACTURING5_ENTRY, - Mpi2Manufacturing5Entry_t, - *pMpi2Manufacturing5Entry_t; +typedef struct _MPI2_MANUFACTURING5_ENTRY +{ + U64 WWID; /* 0x00 */ + U64 DeviceName; /* 0x08 */ +} MPI2_MANUFACTURING5_ENTRY, MPI2_POINTER PTR_MPI2_MANUFACTURING5_ENTRY, + Mpi2Manufacturing5Entry_t, MPI2_POINTER pMpi2Manufacturing5Entry_t; -typedef struct _MPI2_CONFIG_PAGE_MAN_5 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 NumPhys; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 Reserved2; /*0x06 */ - U32 Reserved3; /*0x08 */ - U32 Reserved4; /*0x0C */ - MPI2_MANUFACTURING5_ENTRY - Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */ +typedef struct _MPI2_CONFIG_PAGE_MAN_5 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhys; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ + MPI2_MANUFACTURING5_ENTRY Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/* 0x08 */ } MPI2_CONFIG_PAGE_MAN_5, - *PTR_MPI2_CONFIG_PAGE_MAN_5, - Mpi2ManufacturingPage5_t, - *pMpi2ManufacturingPage5_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_5, + Mpi2ManufacturingPage5_t, MPI2_POINTER pMpi2ManufacturingPage5_t; #define MPI2_MANUFACTURING5_PAGEVERSION (0x03) -/*Manufacturing Page 6 */ +/* Manufacturing Page 6 */ -typedef struct _MPI2_CONFIG_PAGE_MAN_6 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 ProductSpecificInfo;/*0x04 */ +typedef struct _MPI2_CONFIG_PAGE_MAN_6 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 ProductSpecificInfo;/* 0x04 */ } MPI2_CONFIG_PAGE_MAN_6, - *PTR_MPI2_CONFIG_PAGE_MAN_6, - Mpi2ManufacturingPage6_t, - *pMpi2ManufacturingPage6_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_6, + Mpi2ManufacturingPage6_t, MPI2_POINTER pMpi2ManufacturingPage6_t; #define MPI2_MANUFACTURING6_PAGEVERSION (0x00) -/*Manufacturing Page 7 */ +/* Manufacturing Page 7 */ -typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { - U32 Pinout; /*0x00 */ - U8 Connector[16]; /*0x04 */ - U8 Location; /*0x14 */ - U8 ReceptacleID; /*0x15 */ - U16 Slot; /*0x16 */ - U16 Slotx2; /*0x18 */ - U16 Slotx4; /*0x1A */ -} MPI2_MANPAGE7_CONNECTOR_INFO, - *PTR_MPI2_MANPAGE7_CONNECTOR_INFO, - Mpi2ManPage7ConnectorInfo_t, - *pMpi2ManPage7ConnectorInfo_t; +typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO +{ + U32 Pinout; /* 0x00 */ + U8 Connector[16]; /* 0x04 */ + U8 Location; /* 0x14 */ + U8 ReceptacleID; /* 0x15 */ + U16 Slot; /* 0x16 */ + U16 Slotx2; /* 0x18 */ + U16 Slotx4; /* 0x1A */ +} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO, + Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t; -/*defines for the Pinout field */ +/* defines for the Pinout field */ #define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) #define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) @@ -848,7 +857,7 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { #define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12) #define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13) -/*defines for the Location field */ +/* defines for the Location field */ #define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) #define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02) #define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04) @@ -857,53 +866,53 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { #define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) #define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) -/*defines for the Slot field */ +/* defines for the Slot field */ #define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX #define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_MAN_7 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 Reserved2; /*0x08 */ - U32 Flags; /*0x0C */ - U8 EnclosureName[16]; /*0x10 */ - U8 NumPhys; /*0x20 */ - U8 Reserved3; /*0x21 */ - U16 Reserved4; /*0x22 */ - MPI2_MANPAGE7_CONNECTOR_INFO - ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */ +typedef struct _MPI2_CONFIG_PAGE_MAN_7 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U32 Flags; /* 0x0C */ + U8 EnclosureName[16]; /* 0x10 */ + U8 NumPhys; /* 0x20 */ + U8 Reserved3; /* 0x21 */ + U16 Reserved4; /* 0x22 */ + MPI2_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /* 0x24 */ } MPI2_CONFIG_PAGE_MAN_7, - *PTR_MPI2_CONFIG_PAGE_MAN_7, - Mpi2ManufacturingPage7_t, - *pMpi2ManufacturingPage7_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7, + Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t; #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) -/*defines for the Flags field */ +/* defines for the Flags field */ #define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) +#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020) /* - *Generic structure to use for product-specific manufacturing pages - *(currently Manufacturing Page 8 through Manufacturing Page 31). + * Generic structure to use for product-specific manufacturing pages + * (currently Manufacturing Page 8 through Manufacturing Page 31). */ -typedef struct _MPI2_CONFIG_PAGE_MAN_PS { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 ProductSpecificInfo;/*0x04 */ +typedef struct _MPI2_CONFIG_PAGE_MAN_PS +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 ProductSpecificInfo;/* 0x04 */ } MPI2_CONFIG_PAGE_MAN_PS, - *PTR_MPI2_CONFIG_PAGE_MAN_PS, - Mpi2ManufacturingPagePS_t, - *pMpi2ManufacturingPagePS_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_PS, + Mpi2ManufacturingPagePS_t, MPI2_POINTER pMpi2ManufacturingPagePS_t; #define MPI2_MANUFACTURING8_PAGEVERSION (0x00) #define MPI2_MANUFACTURING9_PAGEVERSION (0x00) @@ -932,39 +941,39 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_PS { /**************************************************************************** -* IO Unit Config Pages +* IO Unit Config Pages ****************************************************************************/ -/*IO Unit Page 0 */ +/* IO Unit Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U64 UniqueValue; /*0x04 */ - MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */ - MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */ -} MPI2_CONFIG_PAGE_IO_UNIT_0, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, - Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U64 UniqueValue; /* 0x04 */ + MPI2_VERSION_UNION NvdataVersionDefault; /* 0x08 */ + MPI2_VERSION_UNION NvdataVersionPersistent; /* 0x0A */ +} MPI2_CONFIG_PAGE_IO_UNIT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, + Mpi2IOUnitPage0_t, MPI2_POINTER pMpi2IOUnitPage0_t; #define MPI2_IOUNITPAGE0_PAGEVERSION (0x02) -/*IO Unit Page 1 */ +/* IO Unit Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Flags; /*0x04 */ -} MPI2_CONFIG_PAGE_IO_UNIT_1, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, - Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Flags; /* 0x04 */ +} MPI2_CONFIG_PAGE_IO_UNIT_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, + Mpi2IOUnitPage1_t, MPI2_POINTER pMpi2IOUnitPage1_t; #define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) /* IO Unit Page 1 Flags defines */ #define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00000000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00010000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00020000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000) #define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) @@ -980,70 +989,65 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { #define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) -/*IO Unit Page 3 */ +/* IO Unit Page 3 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for GPIOCount at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for GPIOCount at runtime. */ #ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX #define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 GPIOCount; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 Reserved2; /*0x06 */ - U16 - GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */ -} MPI2_CONFIG_PAGE_IO_UNIT_3, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, - Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 GPIOCount; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U16 GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/* 0x08 */ +} MPI2_CONFIG_PAGE_IO_UNIT_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, + Mpi2IOUnitPage3_t, MPI2_POINTER pMpi2IOUnitPage3_t; #define MPI2_IOUNITPAGE3_PAGEVERSION (0x01) -/*defines for IO Unit Page 3 GPIOVal field */ +/* defines for IO Unit Page 3 GPIOVal field */ #define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC) #define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) #define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000) #define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) -/*IO Unit Page 5 */ +/* IO Unit Page 5 */ /* - *Upper layer code (drivers, utilities, etc.) should leave this define set to - *one and check the value returned for NumDmaEngines at runtime. + * Upper layer code (drivers, utilities, etc.) should leave this define set to + * one and check the value returned for NumDmaEngines at runtime. */ #ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES #define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) #endif -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U64 - RaidAcceleratorBufferBaseAddress; /*0x04 */ - U64 - RaidAcceleratorBufferSize; /*0x0C */ - U64 - RaidAcceleratorControlBaseAddress; /*0x14 */ - U8 RAControlSize; /*0x1C */ - U8 NumDmaEngines; /*0x1D */ - U8 RAMinControlSize; /*0x1E */ - U8 RAMaxControlSize; /*0x1F */ - U32 Reserved1; /*0x20 */ - U32 Reserved2; /*0x24 */ - U32 Reserved3; /*0x28 */ - U32 - DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */ -} MPI2_CONFIG_PAGE_IO_UNIT_5, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, - Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U64 RaidAcceleratorBufferBaseAddress; /* 0x04 */ + U64 RaidAcceleratorBufferSize; /* 0x0C */ + U64 RaidAcceleratorControlBaseAddress; /* 0x14 */ + U8 RAControlSize; /* 0x1C */ + U8 NumDmaEngines; /* 0x1D */ + U8 RAMinControlSize; /* 0x1E */ + U8 RAMaxControlSize; /* 0x1F */ + U32 Reserved1; /* 0x20 */ + U32 Reserved2; /* 0x24 */ + U32 Reserved3; /* 0x28 */ + U32 DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /* 0x2C */ +} MPI2_CONFIG_PAGE_IO_UNIT_5, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, + Mpi2IOUnitPage5_t, MPI2_POINTER pMpi2IOUnitPage5_t; #define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) -/*defines for IO Unit Page 5 DmaEngineCapabilities field */ +/* defines for IO Unit Page 5 DmaEngineCapabilities field */ #define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) #define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) @@ -1053,61 +1057,56 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { #define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001) -/*IO Unit Page 6 */ +/* IO Unit Page 6 */ -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U16 Flags; /*0x04 */ - U8 RAHostControlSize; /*0x06 */ - U8 Reserved0; /*0x07 */ - U64 - RaidAcceleratorHostControlBaseAddress; /*0x08 */ - U32 Reserved1; /*0x10 */ - U32 Reserved2; /*0x14 */ - U32 Reserved3; /*0x18 */ -} MPI2_CONFIG_PAGE_IO_UNIT_6, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, - Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 Flags; /* 0x04 */ + U8 RAHostControlSize; /* 0x06 */ + U8 Reserved0; /* 0x07 */ + U64 RaidAcceleratorHostControlBaseAddress; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ + U32 Reserved3; /* 0x18 */ +} MPI2_CONFIG_PAGE_IO_UNIT_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, + Mpi2IOUnitPage6_t, MPI2_POINTER pMpi2IOUnitPage6_t; #define MPI2_IOUNITPAGE6_PAGEVERSION (0x00) -/*defines for IO Unit Page 6 Flags field */ +/* defines for IO Unit Page 6 Flags field */ #define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) -/*IO Unit Page 7 */ +/* IO Unit Page 7 */ -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 CurrentPowerMode; /*0x04 */ - U8 PreviousPowerMode; /*0x05 */ - U8 PCIeWidth; /*0x06 */ - U8 PCIeSpeed; /*0x07 */ - U32 ProcessorState; /*0x08 */ - U32 - PowerManagementCapabilities; /*0x0C */ - U16 IOCTemperature; /*0x10 */ - U8 - IOCTemperatureUnits; /*0x12 */ - U8 IOCSpeed; /*0x13 */ - U16 BoardTemperature; /*0x14 */ - U8 - BoardTemperatureUnits; /*0x16 */ - U8 Reserved3; /*0x17 */ - U32 BoardPowerRequirement; /*0x18 */ - U32 PCISlotPowerAllocation; /*0x1C */ -/* reserved prior to MPI v2.6 */ - U8 Flags; /* 0x20 */ - U8 Reserved6; /* 0x21 */ - U16 Reserved7; /* 0x22 */ - U32 Reserved8; /* 0x24 */ -} MPI2_CONFIG_PAGE_IO_UNIT_7, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, - Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 CurrentPowerMode; /* 0x04 */ /* reserved in MPI 2.0 */ + U8 PreviousPowerMode; /* 0x05 */ /* reserved in MPI 2.0 */ + U8 PCIeWidth; /* 0x06 */ + U8 PCIeSpeed; /* 0x07 */ + U32 ProcessorState; /* 0x08 */ + U32 PowerManagementCapabilities; /* 0x0C */ + U16 IOCTemperature; /* 0x10 */ + U8 IOCTemperatureUnits; /* 0x12 */ + U8 IOCSpeed; /* 0x13 */ + U16 BoardTemperature; /* 0x14 */ + U8 BoardTemperatureUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ + U32 BoardPowerRequirement; /* 0x18 */ /* reserved prior to MPI v2.6 */ + U32 PCISlotPowerAllocation; /* 0x1C */ /* reserved prior to MPI v2.6 */ + U8 Flags; /* 0x20 */ /* reserved prior to MPI v2.6 */ + U8 Reserved6; /* 0x21 */ + U16 Reserved7; /* 0x22 */ + U32 Reserved8; /* 0x24 */ +} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, + Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; -#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) -/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ +/* defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ #define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) #define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00) #define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40) @@ -1122,20 +1121,20 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { #define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06) -/*defines for IO Unit Page 7 PCIeWidth field */ +/* defines for IO Unit Page 7 PCIeWidth field */ #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10) -/*defines for IO Unit Page 7 PCIeSpeed field */ +/* defines for IO Unit Page 7 PCIeSpeed field */ #define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) #define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) #define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) #define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03) -/*defines for IO Unit Page 7 ProcessorState field */ +/* defines for IO Unit Page 7 ProcessorState field */ #define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) #define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) @@ -1143,7 +1142,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { #define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) #define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) -/*defines for IO Unit Page 7 PowerManagementCapabilities field */ +/* defines for IO Unit Page 7 PowerManagementCapabilities field */ #define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000) #define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000) #define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000) @@ -1159,31 +1158,31 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { #define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040) #define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020) #define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010) -#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) -#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) -#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) -#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) /* obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) /* obsolete */ +#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) /* obsolete */ +#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) /* obsolete */ -/*obsolete names for the PowerManagementCapabilities bits (above) */ +/* obsolete names for the PowerManagementCapabilities bits (above) */ #define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) #define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) #define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) -#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */ -#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */ -/*defines for IO Unit Page 7 IOCTemperatureUnits field */ +/* defines for IO Unit Page 7 IOCTemperatureUnits field */ #define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) #define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) #define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) -/*defines for IO Unit Page 7 IOCSpeed field */ +/* defines for IO Unit Page 7 IOCSpeed field */ #define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) #define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) #define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) #define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) -/*defines for IO Unit Page 7 BoardTemperatureUnits field */ +/* defines for IO Unit Page 7 BoardTemperatureUnits field */ #define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) @@ -1191,137 +1190,134 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { /* defines for IO Unit Page 7 Flags field */ #define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) -/*IO Unit Page 8 */ + +/* IO Unit Page 8 */ #define MPI2_IOUNIT8_NUM_THRESHOLDS (4) -typedef struct _MPI2_IOUNIT8_SENSOR { - U16 Flags; /*0x00 */ - U16 Reserved1; /*0x02 */ - U16 - Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */ - U32 Reserved2; /*0x0C */ - U32 Reserved3; /*0x10 */ - U32 Reserved4; /*0x14 */ -} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR, - Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t; +typedef struct _MPI2_IOUNIT8_SENSOR +{ + U16 Flags; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U16 Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */ + U32 Reserved2; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U32 Reserved4; /* 0x14 */ +} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR, + Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t; -/*defines for IO Unit Page 8 Sensor Flags field */ +/* defines for IO Unit Page 8 Sensor Flags field */ #define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) #define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) #define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) #define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumSensors at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumSensors at runtime. */ #ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES #define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) #endif -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 Reserved2; /*0x08 */ - U8 NumSensors; /*0x0C */ - U8 PollingInterval; /*0x0D */ - U16 Reserved3; /*0x0E */ - MPI2_IOUNIT8_SENSOR - Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */ -} MPI2_CONFIG_PAGE_IO_UNIT_8, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, - Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U8 NumSensors; /* 0x0C */ + U8 PollingInterval; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_IOUNIT8_SENSOR Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, + Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t; #define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) -/*IO Unit Page 9 */ +/* IO Unit Page 9 */ -typedef struct _MPI2_IOUNIT9_SENSOR { - U16 CurrentTemperature; /*0x00 */ - U16 Reserved1; /*0x02 */ - U8 Flags; /*0x04 */ - U8 Reserved2; /*0x05 */ - U16 Reserved3; /*0x06 */ - U32 Reserved4; /*0x08 */ - U32 Reserved5; /*0x0C */ -} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR, - Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t; +typedef struct _MPI2_IOUNIT9_SENSOR +{ + U16 CurrentTemperature; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U8 Flags; /* 0x04 */ + U8 Reserved2; /* 0x05 */ + U16 Reserved3; /* 0x06 */ + U32 Reserved4; /* 0x08 */ + U32 Reserved5; /* 0x0C */ +} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR, + Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t; -/*defines for IO Unit Page 9 Sensor Flags field */ +/* defines for IO Unit Page 9 Sensor Flags field */ #define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumSensors at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumSensors at runtime. */ #ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES #define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) #endif -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 Reserved2; /*0x08 */ - U8 NumSensors; /*0x0C */ - U8 Reserved4; /*0x0D */ - U16 Reserved3; /*0x0E */ - MPI2_IOUNIT9_SENSOR - Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */ -} MPI2_CONFIG_PAGE_IO_UNIT_9, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, - Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U8 NumSensors; /* 0x0C */ + U8 Reserved4; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_IOUNIT9_SENSOR Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, + Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t; #define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) -/*IO Unit Page 10 */ +/* IO Unit Page 10 */ -typedef struct _MPI2_IOUNIT10_FUNCTION { - U8 CreditPercent; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ -} MPI2_IOUNIT10_FUNCTION, - *PTR_MPI2_IOUNIT10_FUNCTION, - Mpi2IOUnit10Function_t, - *pMpi2IOUnit10Function_t; +typedef struct _MPI2_IOUNIT10_FUNCTION +{ + U8 CreditPercent; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ +} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION, + Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t; /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumFunctions at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumFunctions at runtime. */ #ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES #define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) #endif -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 NumFunctions; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 Reserved2; /*0x06 */ - U32 Reserved3; /*0x08 */ - U32 Reserved4; /*0x0C */ - MPI2_IOUNIT10_FUNCTION - Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */ -} MPI2_CONFIG_PAGE_IO_UNIT_10, - *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, - Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t; +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumFunctions; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ + MPI2_IOUNIT10_FUNCTION Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES]; /* 0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, + Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t; #define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) /* IO Unit Page 11 (for MPI v2.6 and later) */ -typedef struct _MPI26_IOUNIT11_SPINUP_GROUP { - U8 MaxTargetSpinup; /* 0x00 */ - U8 SpinupDelay; /* 0x01 */ - U8 SpinupFlags; /* 0x02 */ - U8 Reserved1; /* 0x03 */ -} MPI26_IOUNIT11_SPINUP_GROUP, - *PTR_MPI26_IOUNIT11_SPINUP_GROUP, - Mpi26IOUnit11SpinupGroup_t, - *pMpi26IOUnit11SpinupGroup_t; +typedef struct _MPI26_IOUNIT11_SPINUP_GROUP +{ + U8 MaxTargetSpinup; /* 0x00 */ + U8 SpinupDelay; /* 0x01 */ + U8 SpinupFlags; /* 0x02 */ + U8 Reserved1; /* 0x03 */ +} MPI26_IOUNIT11_SPINUP_GROUP, MPI2_POINTER PTR_MPI26_IOUNIT11_SPINUP_GROUP, + Mpi26IOUnit11SpinupGroup_t, MPI2_POINTER pMpi26IOUnit11SpinupGroup_t; /* defines for IO Unit Page 11 SpinupFlags */ #define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01) @@ -1335,25 +1331,26 @@ typedef struct _MPI26_IOUNIT11_SPINUP_GROUP { #define MPI26_IOUNITPAGE11_PHY_MAX (4) #endif -typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /*0x08 */ - U32 Reserved2; /*0x18 */ - U32 Reserved3; /*0x1C */ - U32 Reserved4; /*0x20 */ - U8 BootDeviceWaitTime; /*0x24 */ - U8 Reserved5; /*0x25 */ - U16 Reserved6; /*0x26 */ - U8 NumPhys; /*0x28 */ - U8 PEInitialSpinupDelay; /*0x29 */ - U8 PEReplyDelay; /*0x2A */ - U8 Flags; /*0x2B */ - U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/*0x2C */ +typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */ + U32 Reserved2; /* 0x18 */ + U32 Reserved3; /* 0x1C */ + U32 Reserved4; /* 0x20 */ + U8 BootDeviceWaitTime; /* 0x24 */ + U8 SATADeviceWaitTime; /* 0x25 */ + U8 PCIeWaitTime; /* 0x26 */ + U8 Reserved6; /* 0x27 */ + U8 NumPhys; /* 0x28 */ + U8 PEInitialSpinupDelay; /* 0x29 */ + U8 PEReplyDelay; /* 0x2A */ + U8 Flags; /* 0x2B */ + U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/* 0x2C */ } MPI26_CONFIG_PAGE_IO_UNIT_11, - *PTR_MPI26_CONFIG_PAGE_IO_UNIT_11, - Mpi26IOUnitPage11_t, - *pMpi26IOUnitPage11_t; + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_IO_UNIT_11, + Mpi26IOUnitPage11_t, MPI2_POINTER pMpi26IOUnitPage11_t; #define MPI26_IOUNITPAGE11_PAGEVERSION (0x00) @@ -1365,109 +1362,94 @@ typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 { - - - /**************************************************************************** -* IOC Config Pages +* IOC Config Pages ****************************************************************************/ -/*IOC Page 0 */ +/* IOC Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_IOC_0 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 Reserved2; /*0x08 */ - U16 VendorID; /*0x0C */ - U16 DeviceID; /*0x0E */ - U8 RevisionID; /*0x10 */ - U8 Reserved3; /*0x11 */ - U16 Reserved4; /*0x12 */ - U32 ClassCode; /*0x14 */ - U16 SubsystemVendorID; /*0x18 */ - U16 SubsystemID; /*0x1A */ -} MPI2_CONFIG_PAGE_IOC_0, - *PTR_MPI2_CONFIG_PAGE_IOC_0, - Mpi2IOCPage0_t, *pMpi2IOCPage0_t; +typedef struct _MPI2_CONFIG_PAGE_IOC_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U16 VendorID; /* 0x0C */ + U16 DeviceID; /* 0x0E */ + U8 RevisionID; /* 0x10 */ + U8 Reserved3; /* 0x11 */ + U16 Reserved4; /* 0x12 */ + U32 ClassCode; /* 0x14 */ + U16 SubsystemVendorID; /* 0x18 */ + U16 SubsystemID; /* 0x1A */ +} MPI2_CONFIG_PAGE_IOC_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_0, + Mpi2IOCPage0_t, MPI2_POINTER pMpi2IOCPage0_t; #define MPI2_IOCPAGE0_PAGEVERSION (0x02) -/*IOC Page 1 */ +/* IOC Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_IOC_1 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Flags; /*0x04 */ - U32 CoalescingTimeout; /*0x08 */ - U8 CoalescingDepth; /*0x0C */ - U8 PCISlotNum; /*0x0D */ - U8 PCIBusNum; /*0x0E */ - U8 PCIDomainSegment; /*0x0F */ - U32 Reserved1; /*0x10 */ - U32 ProductSpecific; /* 0x14 */ -} MPI2_CONFIG_PAGE_IOC_1, - *PTR_MPI2_CONFIG_PAGE_IOC_1, - Mpi2IOCPage1_t, *pMpi2IOCPage1_t; +typedef struct _MPI2_CONFIG_PAGE_IOC_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Flags; /* 0x04 */ + U32 CoalescingTimeout; /* 0x08 */ + U8 CoalescingDepth; /* 0x0C */ + U8 PCISlotNum; /* 0x0D */ + U8 PCIBusNum; /* 0x0E */ + U8 PCIDomainSegment; /* 0x0F */ + U32 Reserved1; /* 0x10 */ + U32 ProductSpecific; /* 0x14 */ +} MPI2_CONFIG_PAGE_IOC_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_1, + Mpi2IOCPage1_t, MPI2_POINTER pMpi2IOCPage1_t; #define MPI2_IOCPAGE1_PAGEVERSION (0x05) -/*defines for IOC Page 1 Flags field */ +/* defines for IOC Page 1 Flags field */ #define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001) #define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF) #define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF) #define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF) -/*IOC Page 6 */ +/* IOC Page 6 */ -typedef struct _MPI2_CONFIG_PAGE_IOC_6 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 - CapabilitiesFlags; /*0x04 */ - U8 MaxDrivesRAID0; /*0x08 */ - U8 MaxDrivesRAID1; /*0x09 */ - U8 - MaxDrivesRAID1E; /*0x0A */ - U8 - MaxDrivesRAID10; /*0x0B */ - U8 MinDrivesRAID0; /*0x0C */ - U8 MinDrivesRAID1; /*0x0D */ - U8 - MinDrivesRAID1E; /*0x0E */ - U8 - MinDrivesRAID10; /*0x0F */ - U32 Reserved1; /*0x10 */ - U8 - MaxGlobalHotSpares; /*0x14 */ - U8 MaxPhysDisks; /*0x15 */ - U8 MaxVolumes; /*0x16 */ - U8 MaxConfigs; /*0x17 */ - U8 MaxOCEDisks; /*0x18 */ - U8 Reserved2; /*0x19 */ - U16 Reserved3; /*0x1A */ - U32 - SupportedStripeSizeMapRAID0; /*0x1C */ - U32 - SupportedStripeSizeMapRAID1E; /*0x20 */ - U32 - SupportedStripeSizeMapRAID10; /*0x24 */ - U32 Reserved4; /*0x28 */ - U32 Reserved5; /*0x2C */ - U16 - DefaultMetadataSize; /*0x30 */ - U16 Reserved6; /*0x32 */ - U16 - MaxBadBlockTableEntries; /*0x34 */ - U16 Reserved7; /*0x36 */ - U32 - IRNvsramVersion; /*0x38 */ -} MPI2_CONFIG_PAGE_IOC_6, - *PTR_MPI2_CONFIG_PAGE_IOC_6, - Mpi2IOCPage6_t, *pMpi2IOCPage6_t; +typedef struct _MPI2_CONFIG_PAGE_IOC_6 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 CapabilitiesFlags; /* 0x04 */ + U8 MaxDrivesRAID0; /* 0x08 */ + U8 MaxDrivesRAID1; /* 0x09 */ + U8 MaxDrivesRAID1E; /* 0x0A */ + U8 MaxDrivesRAID10; /* 0x0B */ + U8 MinDrivesRAID0; /* 0x0C */ + U8 MinDrivesRAID1; /* 0x0D */ + U8 MinDrivesRAID1E; /* 0x0E */ + U8 MinDrivesRAID10; /* 0x0F */ + U32 Reserved1; /* 0x10 */ + U8 MaxGlobalHotSpares; /* 0x14 */ + U8 MaxPhysDisks; /* 0x15 */ + U8 MaxVolumes; /* 0x16 */ + U8 MaxConfigs; /* 0x17 */ + U8 MaxOCEDisks; /* 0x18 */ + U8 Reserved2; /* 0x19 */ + U16 Reserved3; /* 0x1A */ + U32 SupportedStripeSizeMapRAID0; /* 0x1C */ + U32 SupportedStripeSizeMapRAID1E; /* 0x20 */ + U32 SupportedStripeSizeMapRAID10; /* 0x24 */ + U32 Reserved4; /* 0x28 */ + U32 Reserved5; /* 0x2C */ + U16 DefaultMetadataSize; /* 0x30 */ + U16 Reserved6; /* 0x32 */ + U16 MaxBadBlockTableEntries; /* 0x34 */ + U16 Reserved7; /* 0x36 */ + U32 IRNvsramVersion; /* 0x38 */ +} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6, + Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t; #define MPI2_IOCPAGE6_PAGEVERSION (0x05) -/*defines for IOC Page 6 CapabilitiesFlags */ +/* defines for IOC Page 6 CapabilitiesFlags */ #define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020) #define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) #define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) @@ -1476,46 +1458,45 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_6 { #define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) -/*IOC Page 7 */ +/* IOC Page 7 */ #define MPI2_IOCPAGE7_EVENTMASK_WORDS (4) -typedef struct _MPI2_CONFIG_PAGE_IOC_7 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 - EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */ - U16 SASBroadcastPrimitiveMasks; /*0x18 */ - U16 SASNotifyPrimitiveMasks; /*0x1A */ - U32 Reserved3; /*0x1C */ -} MPI2_CONFIG_PAGE_IOC_7, - *PTR_MPI2_CONFIG_PAGE_IOC_7, - Mpi2IOCPage7_t, *pMpi2IOCPage7_t; +typedef struct _MPI2_CONFIG_PAGE_IOC_7 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */ + U16 SASBroadcastPrimitiveMasks; /* 0x18 */ + U16 SASNotifyPrimitiveMasks; /* 0x1A */ + U32 Reserved3; /* 0x1C */ +} MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7, + Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t; #define MPI2_IOCPAGE7_PAGEVERSION (0x02) -/*IOC Page 8 */ +/* IOC Page 8 */ -typedef struct _MPI2_CONFIG_PAGE_IOC_8 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 NumDevsPerEnclosure; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 Reserved2; /*0x06 */ - U16 MaxPersistentEntries; /*0x08 */ - U16 MaxNumPhysicalMappedIDs; /*0x0A */ - U16 Flags; /*0x0C */ - U16 Reserved3; /*0x0E */ - U16 IRVolumeMappingFlags; /*0x10 */ - U16 Reserved4; /*0x12 */ - U32 Reserved5; /*0x14 */ -} MPI2_CONFIG_PAGE_IOC_8, - *PTR_MPI2_CONFIG_PAGE_IOC_8, - Mpi2IOCPage8_t, *pMpi2IOCPage8_t; +typedef struct _MPI2_CONFIG_PAGE_IOC_8 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumDevsPerEnclosure; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U16 MaxPersistentEntries; /* 0x08 */ + U16 MaxNumPhysicalMappedIDs; /* 0x0A */ + U16 Flags; /* 0x0C */ + U16 Reserved3; /* 0x0E */ + U16 IRVolumeMappingFlags; /* 0x10 */ + U16 Reserved4; /* 0x12 */ + U32 Reserved5; /* 0x14 */ +} MPI2_CONFIG_PAGE_IOC_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_8, + Mpi2IOCPage8_t, MPI2_POINTER pMpi2IOCPage8_t; #define MPI2_IOCPAGE8_PAGEVERSION (0x00) -/*defines for IOC Page 8 Flags field */ +/* defines for IOC Page 8 Flags field */ #define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020) #define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010) @@ -1526,39 +1507,39 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_8 { #define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001) #define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000) -/*defines for IOC Page 8 IRVolumeMappingFlags */ +/* defines for IOC Page 8 IRVolumeMappingFlags */ #define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) #define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) #define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001) /**************************************************************************** -* BIOS Config Pages +* BIOS Config Pages ****************************************************************************/ -/*BIOS Page 1 */ +/* BIOS Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 BiosOptions; /*0x04 */ - U32 IOCSettings; /*0x08 */ - U8 SSUTimeout; /*0x0C */ - U8 MaxEnclosureLevel; /*0x0D */ - U16 Reserved2; /*0x0E */ - U32 DeviceSettings; /*0x10 */ - U16 NumberOfDevices; /*0x14 */ - U16 UEFIVersion; /*0x16 */ - U16 IOTimeoutBlockDevicesNonRM; /*0x18 */ - U16 IOTimeoutSequential; /*0x1A */ - U16 IOTimeoutOther; /*0x1C */ - U16 IOTimeoutBlockDevicesRM; /*0x1E */ -} MPI2_CONFIG_PAGE_BIOS_1, - *PTR_MPI2_CONFIG_PAGE_BIOS_1, - Mpi2BiosPage1_t, *pMpi2BiosPage1_t; +typedef struct _MPI2_CONFIG_PAGE_BIOS_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 BiosOptions; /* 0x04 */ + U32 IOCSettings; /* 0x08 */ + U8 SSUTimeout; /* 0x0C */ + U8 MaxEnclosureLevel; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U32 DeviceSettings; /* 0x10 */ + U16 NumberOfDevices; /* 0x14 */ + U16 UEFIVersion; /* 0x16 */ + U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */ + U16 IOTimeoutSequential; /* 0x1A */ + U16 IOTimeoutOther; /* 0x1C */ + U16 IOTimeoutBlockDevicesRM; /* 0x1E */ +} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, + Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; #define MPI2_BIOSPAGE1_PAGEVERSION (0x07) -/*values for BIOS Page 1 BiosOptions field */ +/* values for BIOS Page 1 BiosOptions field */ #define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000) #define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000) @@ -1569,25 +1550,25 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { #define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) #define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) -#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) +#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) -#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) -#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) -#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) -#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) -#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) +#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) +#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) +#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) -#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) -#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) -#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) -#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) -#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) -#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) +#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) +#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) +#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) -#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) -/*values for BIOS Page 1 IOCSettings field */ +/* values for BIOS Page 1 IOCSettings field */ #define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) #define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) #define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) @@ -1605,14 +1586,14 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { #define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008) -/*values for BIOS Page 1 DeviceSettings field */ +/* values for BIOS Page 1 DeviceSettings field */ #define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010) #define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008) #define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004) #define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) #define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) -/*defines for BIOS Page 1 UEFIVersion field */ +/* defines for BIOS Page 1 UEFIVersion field */ #define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00) #define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8) #define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF) @@ -1620,88 +1601,86 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { -/*BIOS Page 2 */ +/* BIOS Page 2 */ -typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER { - U32 Reserved1; /*0x00 */ - U32 Reserved2; /*0x04 */ - U32 Reserved3; /*0x08 */ - U32 Reserved4; /*0x0C */ - U32 Reserved5; /*0x10 */ - U32 Reserved6; /*0x14 */ +typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER +{ + U32 Reserved1; /* 0x00 */ + U32 Reserved2; /* 0x04 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ + U32 Reserved5; /* 0x10 */ + U32 Reserved6; /* 0x14 */ } MPI2_BOOT_DEVICE_ADAPTER_ORDER, - *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, - Mpi2BootDeviceAdapterOrder_t, - *pMpi2BootDeviceAdapterOrder_t; + MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, + Mpi2BootDeviceAdapterOrder_t, MPI2_POINTER pMpi2BootDeviceAdapterOrder_t; -typedef struct _MPI2_BOOT_DEVICE_SAS_WWID { - U64 SASAddress; /*0x00 */ - U8 LUN[8]; /*0x08 */ - U32 Reserved1; /*0x10 */ - U32 Reserved2; /*0x14 */ -} MPI2_BOOT_DEVICE_SAS_WWID, - *PTR_MPI2_BOOT_DEVICE_SAS_WWID, - Mpi2BootDeviceSasWwid_t, - *pMpi2BootDeviceSasWwid_t; +typedef struct _MPI2_BOOT_DEVICE_SAS_WWID +{ + U64 SASAddress; /* 0x00 */ + U8 LUN[8]; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ +} MPI2_BOOT_DEVICE_SAS_WWID, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_SAS_WWID, + Mpi2BootDeviceSasWwid_t, MPI2_POINTER pMpi2BootDeviceSasWwid_t; -typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT { - U64 EnclosureLogicalID; /*0x00 */ - U32 Reserved1; /*0x08 */ - U32 Reserved2; /*0x0C */ - U16 SlotNumber; /*0x10 */ - U16 Reserved3; /*0x12 */ - U32 Reserved4; /*0x14 */ +typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT +{ + U64 EnclosureLogicalID; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + U16 SlotNumber; /* 0x10 */ + U16 Reserved3; /* 0x12 */ + U32 Reserved4; /* 0x14 */ } MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, - *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, - Mpi2BootDeviceEnclosureSlot_t, - *pMpi2BootDeviceEnclosureSlot_t; + MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + Mpi2BootDeviceEnclosureSlot_t, MPI2_POINTER pMpi2BootDeviceEnclosureSlot_t; -typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME { - U64 DeviceName; /*0x00 */ - U8 LUN[8]; /*0x08 */ - U32 Reserved1; /*0x10 */ - U32 Reserved2; /*0x14 */ -} MPI2_BOOT_DEVICE_DEVICE_NAME, - *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, - Mpi2BootDeviceDeviceName_t, - *pMpi2BootDeviceDeviceName_t; +typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME +{ + U64 DeviceName; /* 0x00 */ + U8 LUN[8]; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ +} MPI2_BOOT_DEVICE_DEVICE_NAME, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, + Mpi2BootDeviceDeviceName_t, MPI2_POINTER pMpi2BootDeviceDeviceName_t; -typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE { - MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; - MPI2_BOOT_DEVICE_SAS_WWID SasWwid; - MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; - MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; -} MPI2_BIOSPAGE2_BOOT_DEVICE, - *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, - Mpi2BiosPage2BootDevice_t, - *pMpi2BiosPage2BootDevice_t; +typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE +{ + MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + MPI2_BOOT_DEVICE_SAS_WWID SasWwid; + MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; +} MPI2_BIOSPAGE2_BOOT_DEVICE, MPI2_POINTER PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, + Mpi2BiosPage2BootDevice_t, MPI2_POINTER pMpi2BiosPage2BootDevice_t; -typedef struct _MPI2_CONFIG_PAGE_BIOS_2 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 Reserved2; /*0x08 */ - U32 Reserved3; /*0x0C */ - U32 Reserved4; /*0x10 */ - U32 Reserved5; /*0x14 */ - U32 Reserved6; /*0x18 */ - U8 ReqBootDeviceForm; /*0x1C */ - U8 Reserved7; /*0x1D */ - U16 Reserved8; /*0x1E */ - MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */ - U8 ReqAltBootDeviceForm; /*0x38 */ - U8 Reserved9; /*0x39 */ - U16 Reserved10; /*0x3A */ - MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */ - U8 CurrentBootDeviceForm; /*0x58 */ - U8 Reserved11; /*0x59 */ - U16 Reserved12; /*0x5A */ - MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */ -} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2, - Mpi2BiosPage2_t, *pMpi2BiosPage2_t; +typedef struct _MPI2_CONFIG_PAGE_BIOS_2 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U32 Reserved3; /* 0x0C */ + U32 Reserved4; /* 0x10 */ + U32 Reserved5; /* 0x14 */ + U32 Reserved6; /* 0x18 */ + U8 ReqBootDeviceForm; /* 0x1C */ + U8 Reserved7; /* 0x1D */ + U16 Reserved8; /* 0x1E */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /* 0x20 */ + U8 ReqAltBootDeviceForm; /* 0x38 */ + U8 Reserved9; /* 0x39 */ + U16 Reserved10; /* 0x3A */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /* 0x3C */ + U8 CurrentBootDeviceForm; /* 0x58 */ + U8 Reserved11; /* 0x59 */ + U16 Reserved12; /* 0x5A */ + MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /* 0x58 */ +} MPI2_CONFIG_PAGE_BIOS_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_2, + Mpi2BiosPage2_t, MPI2_POINTER pMpi2BiosPage2_t; #define MPI2_BIOSPAGE2_PAGEVERSION (0x04) -/*values for BIOS Page 2 BootDeviceForm fields */ +/* values for BIOS Page 2 BootDeviceForm fields */ #define MPI2_BIOSPAGE2_FORM_MASK (0x0F) #define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) #define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05) @@ -1709,42 +1688,43 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_2 { #define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07) -/*BIOS Page 3 */ +/* BIOS Page 3 */ #define MPI2_BIOSPAGE3_NUM_ADAPTER (4) -typedef struct _MPI2_ADAPTER_INFO { - U8 PciBusNumber; /*0x00 */ - U8 PciDeviceAndFunctionNumber; /*0x01 */ - U16 AdapterFlags; /*0x02 */ -} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO, - Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t; +typedef struct _MPI2_ADAPTER_INFO +{ + U8 PciBusNumber; /* 0x00 */ + U8 PciDeviceAndFunctionNumber; /* 0x01 */ + U16 AdapterFlags; /* 0x02 */ +} MPI2_ADAPTER_INFO, MPI2_POINTER PTR_MPI2_ADAPTER_INFO, + Mpi2AdapterInfo_t, MPI2_POINTER pMpi2AdapterInfo_t; #define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) #define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) -typedef struct _MPI2_ADAPTER_ORDER_AUX { - U64 WWID; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ -} MPI2_ADAPTER_ORDER_AUX, *PTR_MPI2_ADAPTER_ORDER_AUX, - Mpi2AdapterOrderAux_t, *pMpi2AdapterOrderAux_t; +typedef struct _MPI2_ADAPTER_ORDER_AUX +{ + U64 WWID; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_ADAPTER_ORDER_AUX, MPI2_POINTER PTR_MPI2_ADAPTER_ORDER_AUX, + Mpi2AdapterOrderAux_t, MPI2_POINTER pMpi2AdapterOrderAux_t; - -typedef struct _MPI2_CONFIG_PAGE_BIOS_3 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U32 GlobalFlags; /*0x04 */ - U32 BiosVersion; /*0x08 */ - MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER]; - U32 Reserved1; /*0x1C */ - MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER]; -} MPI2_CONFIG_PAGE_BIOS_3, - *PTR_MPI2_CONFIG_PAGE_BIOS_3, - Mpi2BiosPage3_t, *pMpi2BiosPage3_t; +typedef struct _MPI2_CONFIG_PAGE_BIOS_3 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 GlobalFlags; /* 0x04 */ + U32 BiosVersion; /* 0x08 */ + MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER]; /* 0x0C */ + U32 Reserved1; /* 0x1C */ + MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER]; /* 0x20 */ /* MPI v2.5 and newer */ +} MPI2_CONFIG_PAGE_BIOS_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_3, + Mpi2BiosPage3_t, MPI2_POINTER pMpi2BiosPage3_t; #define MPI2_BIOSPAGE3_PAGEVERSION (0x01) -/*values for BIOS Page 3 GlobalFlags */ +/* values for BIOS Page 3 GlobalFlags */ #define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) #define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004) #define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010) @@ -1755,62 +1735,64 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_3 { #define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040) -/*BIOS Page 4 */ +/* BIOS Page 4 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES #define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) #endif -typedef struct _MPI2_BIOS4_ENTRY { - U64 ReassignmentWWID; /*0x00 */ - U64 ReassignmentDeviceName; /*0x08 */ -} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY, - Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t; +typedef struct _MPI2_BIOS4_ENTRY +{ + U64 ReassignmentWWID; /* 0x00 */ + U64 ReassignmentDeviceName; /* 0x08 */ +} MPI2_BIOS4_ENTRY, MPI2_POINTER PTR_MPI2_BIOS4_ENTRY, + Mpi2MBios4Entry_t, MPI2_POINTER pMpi2Bios4Entry_t; -typedef struct _MPI2_CONFIG_PAGE_BIOS_4 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 NumPhys; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 Reserved2; /*0x06 */ - MPI2_BIOS4_ENTRY - Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */ -} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4, - Mpi2BiosPage4_t, *pMpi2BiosPage4_t; +typedef struct _MPI2_CONFIG_PAGE_BIOS_4 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhys; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + MPI2_BIOS4_ENTRY Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /* 0x08 */ +} MPI2_CONFIG_PAGE_BIOS_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_4, + Mpi2BiosPage4_t, MPI2_POINTER pMpi2BiosPage4_t; #define MPI2_BIOSPAGE4_PAGEVERSION (0x01) /**************************************************************************** -* RAID Volume Config Pages +* RAID Volume Config Pages ****************************************************************************/ -/*RAID Volume Page 0 */ +/* RAID Volume Page 0 */ -typedef struct _MPI2_RAIDVOL0_PHYS_DISK { - U8 RAIDSetNum; /*0x00 */ - U8 PhysDiskMap; /*0x01 */ - U8 PhysDiskNum; /*0x02 */ - U8 Reserved; /*0x03 */ -} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK, - Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t; +typedef struct _MPI2_RAIDVOL0_PHYS_DISK +{ + U8 RAIDSetNum; /* 0x00 */ + U8 PhysDiskMap; /* 0x01 */ + U8 PhysDiskNum; /* 0x02 */ + U8 Reserved; /* 0x03 */ +} MPI2_RAIDVOL0_PHYS_DISK, MPI2_POINTER PTR_MPI2_RAIDVOL0_PHYS_DISK, + Mpi2RaidVol0PhysDisk_t, MPI2_POINTER pMpi2RaidVol0PhysDisk_t; -/*defines for the PhysDiskMap field */ +/* defines for the PhysDiskMap field */ #define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01) #define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02) -typedef struct _MPI2_RAIDVOL0_SETTINGS { - U16 Settings; /*0x00 */ - U8 HotSparePool; /*0x01 */ - U8 Reserved; /*0x02 */ -} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS, - Mpi2RaidVol0Settings_t, - *pMpi2RaidVol0Settings_t; +typedef struct _MPI2_RAIDVOL0_SETTINGS +{ + U16 Settings; /* 0x00 */ + U8 HotSparePool; /* 0x01 */ + U8 Reserved; /* 0x02 */ +} MPI2_RAIDVOL0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDVOL0_SETTINGS, + Mpi2RaidVol0Settings_t, MPI2_POINTER pMpi2RaidVol0Settings_t; -/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ #define MPI2_RAID_HOT_SPARE_POOL_0 (0x01) #define MPI2_RAID_HOT_SPARE_POOL_1 (0x02) #define MPI2_RAID_HOT_SPARE_POOL_2 (0x04) @@ -1820,7 +1802,7 @@ typedef struct _MPI2_RAIDVOL0_SETTINGS { #define MPI2_RAID_HOT_SPARE_POOL_6 (0x40) #define MPI2_RAID_HOT_SPARE_POOL_7 (0x80) -/*RAID Volume Page 0 VolumeSettings defines */ +/* RAID Volume Page 0 VolumeSettings defines */ #define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008) #define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004) @@ -1830,40 +1812,39 @@ typedef struct _MPI2_RAIDVOL0_SETTINGS { #define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhysDisks at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhysDisks at runtime. */ #ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX #define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U16 DevHandle; /*0x04 */ - U8 VolumeState; /*0x06 */ - U8 VolumeType; /*0x07 */ - U32 VolumeStatusFlags; /*0x08 */ - MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */ - U64 MaxLBA; /*0x10 */ - U32 StripeSize; /*0x18 */ - U16 BlockSize; /*0x1C */ - U16 Reserved1; /*0x1E */ - U8 SupportedPhysDisks;/*0x20 */ - U8 ResyncRate; /*0x21 */ - U16 DataScrubDuration; /*0x22 */ - U8 NumPhysDisks; /*0x24 */ - U8 Reserved2; /*0x25 */ - U8 Reserved3; /*0x26 */ - U8 InactiveStatus; /*0x27 */ - MPI2_RAIDVOL0_PHYS_DISK - PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */ -} MPI2_CONFIG_PAGE_RAID_VOL_0, - *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, - Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t; +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x04 */ + U8 VolumeState; /* 0x06 */ + U8 VolumeType; /* 0x07 */ + U32 VolumeStatusFlags; /* 0x08 */ + MPI2_RAIDVOL0_SETTINGS VolumeSettings; /* 0x0C */ + U64 MaxLBA; /* 0x10 */ + U32 StripeSize; /* 0x18 */ + U16 BlockSize; /* 0x1C */ + U16 Reserved1; /* 0x1E */ + U8 SupportedPhysDisks; /* 0x20 */ + U8 ResyncRate; /* 0x21 */ + U16 DataScrubDuration; /* 0x22 */ + U8 NumPhysDisks; /* 0x24 */ + U8 Reserved2; /* 0x25 */ + U8 Reserved3; /* 0x26 */ + U8 InactiveStatus; /* 0x27 */ + MPI2_RAIDVOL0_PHYS_DISK PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /* 0x28 */ +} MPI2_CONFIG_PAGE_RAID_VOL_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, + Mpi2RaidVolPage0_t, MPI2_POINTER pMpi2RaidVolPage0_t; #define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A) -/*values for RAID VolumeState */ +/* values for RAID VolumeState */ #define MPI2_RAID_VOL_STATE_MISSING (0x00) #define MPI2_RAID_VOL_STATE_FAILED (0x01) #define MPI2_RAID_VOL_STATE_INITIALIZING (0x02) @@ -1871,14 +1852,14 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { #define MPI2_RAID_VOL_STATE_DEGRADED (0x04) #define MPI2_RAID_VOL_STATE_OPTIMAL (0x05) -/*values for RAID VolumeType */ +/* values for RAID VolumeType */ #define MPI2_RAID_VOL_TYPE_RAID0 (0x00) #define MPI2_RAID_VOL_TYPE_RAID1E (0x01) #define MPI2_RAID_VOL_TYPE_RAID1 (0x02) #define MPI2_RAID_VOL_TYPE_RAID10 (0x05) #define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF) -/*values for RAID Volume Page 0 VolumeStatusFlags field */ +/* values for RAID Volume Page 0 VolumeStatusFlags field */ #define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000) #define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000) #define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000) @@ -1899,13 +1880,13 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { #define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002) #define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001) -/*values for RAID Volume Page 0 SupportedPhysDisks field */ +/* values for RAID Volume Page 0 SupportedPhysDisks field */ #define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08) #define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04) #define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02) #define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01) -/*values for RAID Volume Page 0 InactiveStatus field */ +/* values for RAID Volume Page 0 InactiveStatus field */ #define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) #define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01) #define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02) @@ -1915,79 +1896,78 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { #define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06) -/*RAID Volume Page 1 */ +/* RAID Volume Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U16 DevHandle; /*0x04 */ - U16 Reserved0; /*0x06 */ - U8 GUID[24]; /*0x08 */ - U8 Name[16]; /*0x20 */ - U64 WWID; /*0x30 */ - U32 Reserved1; /*0x38 */ - U32 Reserved2; /*0x3C */ -} MPI2_CONFIG_PAGE_RAID_VOL_1, - *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, - Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t; +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x04 */ + U16 Reserved0; /* 0x06 */ + U8 GUID[24]; /* 0x08 */ + U8 Name[16]; /* 0x20 */ + U64 WWID; /* 0x30 */ + U32 Reserved1; /* 0x38 */ + U32 Reserved2; /* 0x3C */ +} MPI2_CONFIG_PAGE_RAID_VOL_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, + Mpi2RaidVolPage1_t, MPI2_POINTER pMpi2RaidVolPage1_t; #define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03) /**************************************************************************** -* RAID Physical Disk Config Pages +* RAID Physical Disk Config Pages ****************************************************************************/ -/*RAID Physical Disk Page 0 */ +/* RAID Physical Disk Page 0 */ -typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS { - U16 Reserved1; /*0x00 */ - U8 HotSparePool; /*0x02 */ - U8 Reserved2; /*0x03 */ -} MPI2_RAIDPHYSDISK0_SETTINGS, - *PTR_MPI2_RAIDPHYSDISK0_SETTINGS, - Mpi2RaidPhysDisk0Settings_t, - *pMpi2RaidPhysDisk0Settings_t; +typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS +{ + U16 Reserved1; /* 0x00 */ + U8 HotSparePool; /* 0x02 */ + U8 Reserved2; /* 0x03 */ +} MPI2_RAIDPHYSDISK0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_SETTINGS, + Mpi2RaidPhysDisk0Settings_t, MPI2_POINTER pMpi2RaidPhysDisk0Settings_t; -/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ +/* use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ -typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA { - U8 VendorID[8]; /*0x00 */ - U8 ProductID[16]; /*0x08 */ - U8 ProductRevLevel[4]; /*0x18 */ - U8 SerialNum[32]; /*0x1C */ +typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA +{ + U8 VendorID[8]; /* 0x00 */ + U8 ProductID[16]; /* 0x08 */ + U8 ProductRevLevel[4]; /* 0x18 */ + U8 SerialNum[32]; /* 0x1C */ } MPI2_RAIDPHYSDISK0_INQUIRY_DATA, - *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, - Mpi2RaidPhysDisk0InquiryData_t, - *pMpi2RaidPhysDisk0InquiryData_t; + MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + Mpi2RaidPhysDisk0InquiryData_t, MPI2_POINTER pMpi2RaidPhysDisk0InquiryData_t; -typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U16 DevHandle; /*0x04 */ - U8 Reserved1; /*0x06 */ - U8 PhysDiskNum; /*0x07 */ - MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */ - U32 Reserved2; /*0x0C */ - MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */ - U32 Reserved3; /*0x4C */ - U8 PhysDiskState; /*0x50 */ - U8 OfflineReason; /*0x51 */ - U8 IncompatibleReason; /*0x52 */ - U8 PhysDiskAttributes; /*0x53 */ - U32 PhysDiskStatusFlags;/*0x54 */ - U64 DeviceMaxLBA; /*0x58 */ - U64 HostMaxLBA; /*0x60 */ - U64 CoercedMaxLBA; /*0x68 */ - U16 BlockSize; /*0x70 */ - U16 Reserved5; /*0x72 */ - U32 Reserved6; /*0x74 */ +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 PhysDiskNum; /* 0x07 */ + MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /* 0x10 */ + U32 Reserved3; /* 0x4C */ + U8 PhysDiskState; /* 0x50 */ + U8 OfflineReason; /* 0x51 */ + U8 IncompatibleReason; /* 0x52 */ + U8 PhysDiskAttributes; /* 0x53 */ + U32 PhysDiskStatusFlags; /* 0x54 */ + U64 DeviceMaxLBA; /* 0x58 */ + U64 HostMaxLBA; /* 0x60 */ + U64 CoercedMaxLBA; /* 0x68 */ + U16 BlockSize; /* 0x70 */ + U16 Reserved5; /* 0x72 */ + U32 Reserved6; /* 0x74 */ } MPI2_CONFIG_PAGE_RD_PDISK_0, - *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, - Mpi2RaidPhysDiskPage0_t, - *pMpi2RaidPhysDiskPage0_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, + Mpi2RaidPhysDiskPage0_t, MPI2_POINTER pMpi2RaidPhysDiskPage0_t; #define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05) -/*PhysDiskState defines */ +/* PhysDiskState defines */ #define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00) #define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01) #define MPI2_RAID_PD_STATE_OFFLINE (0x02) @@ -1997,7 +1977,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { #define MPI2_RAID_PD_STATE_REBUILDING (0x06) #define MPI2_RAID_PD_STATE_OPTIMAL (0x07) -/*OfflineReason defines */ +/* OfflineReason defines */ #define MPI2_PHYSDISK0_ONLINE (0x00) #define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01) #define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03) @@ -2006,7 +1986,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { #define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06) #define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF) -/*IncompatibleReason defines */ +/* IncompatibleReason defines */ #define MPI2_PHYSDISK0_COMPATIBLE (0x00) #define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01) #define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02) @@ -2016,7 +1996,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { #define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) #define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) -/*PhysDiskAttributes defines */ +/* PhysDiskAttributes defines */ #define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) #define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) #define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) @@ -2025,7 +2005,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { #define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) #define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) -/*PhysDiskStatusFlags defines */ +/* PhysDiskStatusFlags defines */ #define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040) #define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020) #define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010) @@ -2036,58 +2016,57 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { #define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001) -/*RAID Physical Disk Page 1 */ +/* RAID Physical Disk Page 1 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhysDiskPaths at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhysDiskPaths at runtime. */ #ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX #define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) #endif -typedef struct _MPI2_RAIDPHYSDISK1_PATH { - U16 DevHandle; /*0x00 */ - U16 Reserved1; /*0x02 */ - U64 WWID; /*0x04 */ - U64 OwnerWWID; /*0x0C */ - U8 OwnerIdentifier; /*0x14 */ - U8 Reserved2; /*0x15 */ - U16 Flags; /*0x16 */ -} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH, - Mpi2RaidPhysDisk1Path_t, - *pMpi2RaidPhysDisk1Path_t; +typedef struct _MPI2_RAIDPHYSDISK1_PATH +{ + U16 DevHandle; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U64 WWID; /* 0x04 */ + U64 OwnerWWID; /* 0x0C */ + U8 OwnerIdentifier; /* 0x14 */ + U8 Reserved2; /* 0x15 */ + U16 Flags; /* 0x16 */ +} MPI2_RAIDPHYSDISK1_PATH, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK1_PATH, + Mpi2RaidPhysDisk1Path_t, MPI2_POINTER pMpi2RaidPhysDisk1Path_t; -/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ +/* RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ #define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004) #define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) #define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001) -typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { - MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 NumPhysDiskPaths; /*0x04 */ - U8 PhysDiskNum; /*0x05 */ - U16 Reserved1; /*0x06 */ - U32 Reserved2; /*0x08 */ - MPI2_RAIDPHYSDISK1_PATH - PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */ +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhysDiskPaths; /* 0x04 */ + U8 PhysDiskNum; /* 0x05 */ + U16 Reserved1; /* 0x06 */ + U32 Reserved2; /* 0x08 */ + MPI2_RAIDPHYSDISK1_PATH PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/* 0x0C */ } MPI2_CONFIG_PAGE_RD_PDISK_1, - *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, - Mpi2RaidPhysDiskPage1_t, - *pMpi2RaidPhysDiskPage1_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, + Mpi2RaidPhysDiskPage1_t, MPI2_POINTER pMpi2RaidPhysDiskPage1_t; #define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02) /**************************************************************************** -* values for fields used by several types of SAS Config Pages +* values for fields used by several types of SAS Config Pages ****************************************************************************/ -/*values for NegotiatedLinkRates fields */ +/* values for NegotiatedLinkRates fields */ #define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0) #define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4) #define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) -/*link rates used for Negotiated Physical and Logical Link Rate */ +/* link rates used for Negotiated Physical and Logical Link Rate */ #define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) #define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) #define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) @@ -2102,7 +2081,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { #define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C) -/*values for AttachedPhyInfo fields */ +/* values for AttachedPhyInfo fields */ #define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) #define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) #define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) @@ -2119,7 +2098,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { #define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) -/*values for PhyInfo fields */ +/* values for PhyInfo fields */ #define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) #define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) @@ -2160,7 +2139,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { #define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020) -/*values for SAS ProgrammedLinkRate fields */ +/* values for SAS ProgrammedLinkRate fields */ #define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0) #define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) #define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) @@ -2177,7 +2156,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { #define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C) -/*values for SAS HwLinkRate fields */ +/* values for SAS HwLinkRate fields */ #define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0) #define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) #define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) @@ -2194,64 +2173,64 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { /**************************************************************************** -* SAS IO Unit Config Pages +* SAS IO Unit Config Pages ****************************************************************************/ -/*SAS IO Unit Page 0 */ +/* SAS IO Unit Page 0 */ -typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA { - U8 Port; /*0x00 */ - U8 PortFlags; /*0x01 */ - U8 PhyFlags; /*0x02 */ - U8 NegotiatedLinkRate; /*0x03 */ - U32 ControllerPhyDeviceInfo;/*0x04 */ - U16 AttachedDevHandle; /*0x08 */ - U16 ControllerDevHandle; /*0x0A */ - U32 DiscoveryStatus; /*0x0C */ - U32 Reserved; /*0x10 */ -} MPI2_SAS_IO_UNIT0_PHY_DATA, - *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, - Mpi2SasIOUnit0PhyData_t, - *pMpi2SasIOUnit0PhyData_t; +typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA +{ + U8 Port; /* 0x00 */ + U8 PortFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 NegotiatedLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo;/* 0x04 */ + U16 AttachedDevHandle; /* 0x08 */ + U16 ControllerDevHandle; /* 0x0A */ + U32 DiscoveryStatus; /* 0x0C */ + U32 Reserved; /* 0x10 */ +} MPI2_SAS_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, + Mpi2SasIOUnit0PhyData_t, MPI2_POINTER pMpi2SasIOUnit0PhyData_t; + +#define MPI26_SASIOUNIT0_PHY_PORT_NO_PORT (0xFF) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI2_SAS_IOUNIT0_PHY_MAX #define MPI2_SAS_IOUNIT0_PHY_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1;/*0x08 */ - U8 NumPhys; /*0x0C */ - U8 Reserved2;/*0x0D */ - U16 Reserved3;/*0x0E */ - MPI2_SAS_IO_UNIT0_PHY_DATA - PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhys; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_SAS_IO_UNIT0_PHY_DATA PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /* 0x10 */ } MPI2_CONFIG_PAGE_SASIOUNIT_0, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, - Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, + Mpi2SasIOUnitPage0_t, MPI2_POINTER pMpi2SasIOUnitPage0_t; #define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05) -/*values for SAS IO Unit Page 0 PortFlags */ +/* values for SAS IO Unit Page 0 PortFlags */ #define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) #define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) -/*values for SAS IO Unit Page 0 PhyFlags */ +/* values for SAS IO Unit Page 0 PhyFlags */ #define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) #define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) #define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) #define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) -/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ +/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ -/*see mpi2_sas.h for values for - *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ +/* see mpi2_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ -/*values for SAS IO Unit Page 0 DiscoveryStatus */ +/* values for SAS IO Unit Page 0 DiscoveryStatus */ #define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) #define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000) #define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000) @@ -2274,59 +2253,50 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 { #define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001) -/*SAS IO Unit Page 1 */ +/* SAS IO Unit Page 1 */ -typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA { - U8 Port; /*0x00 */ - U8 PortFlags; /*0x01 */ - U8 PhyFlags; /*0x02 */ - U8 MaxMinLinkRate; /*0x03 */ - U32 ControllerPhyDeviceInfo; /*0x04 */ - U16 MaxTargetPortConnectTime; /*0x08 */ - U16 Reserved1; /*0x0A */ -} MPI2_SAS_IO_UNIT1_PHY_DATA, - *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, - Mpi2SasIOUnit1PhyData_t, - *pMpi2SasIOUnit1PhyData_t; +typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA +{ + U8 Port; /* 0x00 */ + U8 PortFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 MaxMinLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo; /* 0x04 */ + U16 MaxTargetPortConnectTime; /* 0x08 */ + U16 Reserved1; /* 0x0A */ +} MPI2_SAS_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, + Mpi2SasIOUnit1PhyData_t, MPI2_POINTER pMpi2SasIOUnit1PhyData_t; /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI2_SAS_IOUNIT1_PHY_MAX #define MPI2_SAS_IOUNIT1_PHY_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U16 - ControlFlags; /*0x08 */ - U16 - SASNarrowMaxQueueDepth; /*0x0A */ - U16 - AdditionalControlFlags; /*0x0C */ - U16 - SASWideMaxQueueDepth; /*0x0E */ - U8 - NumPhys; /*0x10 */ - U8 - SATAMaxQDepth; /*0x11 */ - U8 - ReportDeviceMissingDelay; /*0x12 */ - U8 - IODeviceMissingDelay; /*0x13 */ - MPI2_SAS_IO_UNIT1_PHY_DATA - PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 ControlFlags; /* 0x08 */ + U16 SASNarrowMaxQueueDepth; /* 0x0A */ + U16 AdditionalControlFlags; /* 0x0C */ + U16 SASWideMaxQueueDepth; /* 0x0E */ + U8 NumPhys; /* 0x10 */ + U8 SATAMaxQDepth; /* 0x11 */ + U8 ReportDeviceMissingDelay; /* 0x12 */ + U8 IODeviceMissingDelay; /* 0x13 */ + MPI2_SAS_IO_UNIT1_PHY_DATA PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /* 0x14 */ } MPI2_CONFIG_PAGE_SASIOUNIT_1, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, - Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, + Mpi2SasIOUnitPage1_t, MPI2_POINTER pMpi2SasIOUnitPage1_t; #define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09) -/*values for SAS IO Unit Page 1 ControlFlags */ +/* values for SAS IO Unit Page 1 ControlFlags */ #define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) #define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) -#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */ #define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) #define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600) @@ -2342,9 +2312,9 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { #define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) #define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) #define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) -#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) +#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */ -/*values for SAS IO Unit Page 1 AdditionalControlFlags */ +/* values for SAS IO Unit Page 1 AdditionalControlFlags */ #define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) #define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) #define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) @@ -2355,20 +2325,20 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { #define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) #define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) -/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ +/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ #define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) #define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) -/*values for SAS IO Unit Page 1 PortFlags */ +/* values for SAS IO Unit Page 1 PortFlags */ #define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) -/*values for SAS IO Unit Page 1 PhyFlags */ +/* values for SAS IO Unit Page 1 PhyFlags */ #define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) #define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) #define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) #define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) -/*values for SAS IO Unit Page 1 MaxMinLinkRate */ +/* values for SAS IO Unit Page 1 MaxMinLinkRate */ #define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) #define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) #define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) @@ -2382,101 +2352,89 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { #define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) #define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C) -/*see mpi2_sas.h for values for - *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ +/* see mpi2_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ -/*SAS IO Unit Page 4 (for MPI v2.5 and earlier) */ +/* SAS IO Unit Page 4 (for MPI v2.5 and earlier) */ -typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP { - U8 MaxTargetSpinup; /*0x00 */ - U8 SpinupDelay; /*0x01 */ - U8 SpinupFlags; /*0x02 */ - U8 Reserved1; /*0x03 */ -} MPI2_SAS_IOUNIT4_SPINUP_GROUP, - *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, - Mpi2SasIOUnit4SpinupGroup_t, - *pMpi2SasIOUnit4SpinupGroup_t; -/*defines for SAS IO Unit Page 4 SpinupFlags */ +typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP +{ + U8 MaxTargetSpinup; /* 0x00 */ + U8 SpinupDelay; /* 0x01 */ + U8 SpinupFlags; /* 0x02 */ + U8 Reserved1; /* 0x03 */ +} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, + Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t; + +/* defines for SAS IO Unit Page 4 SpinupFlags */ #define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI2_SAS_IOUNIT4_PHY_MAX #define MPI2_SAS_IOUNIT4_PHY_MAX (4) #endif -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */ - MPI2_SAS_IOUNIT4_SPINUP_GROUP - SpinupGroupParameters[4]; /*0x08 */ - U32 - Reserved1; /*0x18 */ - U32 - Reserved2; /*0x1C */ - U32 - Reserved3; /*0x20 */ - U8 - BootDeviceWaitTime; /*0x24 */ - U8 - SATADeviceWaitTime; /*0x25 */ - U16 - Reserved5; /*0x26 */ - U8 - NumPhys; /*0x28 */ - U8 - PEInitialSpinupDelay; /*0x29 */ - U8 - PEReplyDelay; /*0x2A */ - U8 - Flags; /*0x2B */ - U8 - PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + MPI2_SAS_IOUNIT4_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */ + U32 Reserved1; /* 0x18 */ + U32 Reserved2; /* 0x1C */ + U32 Reserved3; /* 0x20 */ + U8 BootDeviceWaitTime; /* 0x24 */ + U8 SATADeviceWaitTime; /* 0x25 */ + U16 Reserved5; /* 0x26 */ + U8 NumPhys; /* 0x28 */ + U8 PEInitialSpinupDelay; /* 0x29 */ + U8 PEReplyDelay; /* 0x2A */ + U8 Flags; /* 0x2B */ + U8 PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /* 0x2C */ } MPI2_CONFIG_PAGE_SASIOUNIT_4, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, - Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, + Mpi2SasIOUnitPage4_t, MPI2_POINTER pMpi2SasIOUnitPage4_t; #define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02) -/*defines for Flags field */ +/* defines for Flags field */ #define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01) -/*defines for PHY field */ +/* defines for PHY field */ #define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) -/*SAS IO Unit Page 5 */ +/* SAS IO Unit Page 5 */ -typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { - U8 ControlFlags; /*0x00 */ - U8 PortWidthModGroup; /*0x01 */ - U16 InactivityTimerExponent; /*0x02 */ - U8 SATAPartialTimeout; /*0x04 */ - U8 Reserved2; /*0x05 */ - U8 SATASlumberTimeout; /*0x06 */ - U8 Reserved3; /*0x07 */ - U8 SASPartialTimeout; /*0x08 */ - U8 Reserved4; /*0x09 */ - U8 SASSlumberTimeout; /*0x0A */ - U8 Reserved5; /*0x0B */ +typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS +{ + U8 ControlFlags; /* 0x00 */ + U8 PortWidthModGroup; /* 0x01 */ + U16 InactivityTimerExponent; /* 0x02 */ + U8 SATAPartialTimeout; /* 0x04 */ + U8 Reserved2; /* 0x05 */ + U8 SATASlumberTimeout; /* 0x06 */ + U8 Reserved3; /* 0x07 */ + U8 SASPartialTimeout; /* 0x08 */ + U8 Reserved4; /* 0x09 */ + U8 SASSlumberTimeout; /* 0x0A */ + U8 Reserved5; /* 0x0B */ } MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, - *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, - Mpi2SasIOUnit5PhyPmSettings_t, - *pMpi2SasIOUnit5PhyPmSettings_t; + MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t; -/*defines for ControlFlags field */ +/* defines for ControlFlags field */ #define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) #define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) #define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) #define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) -/*defines for PortWidthModeGroup field */ +/* defines for PortWidthModeGroup field */ #define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF) -/*defines for InactivityTimerExponent field */ +/* defines for InactivityTimerExponent field */ #define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) #define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) #define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) @@ -2496,42 +2454,43 @@ typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { #define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI2_SAS_IOUNIT5_PHY_MAX #define MPI2_SAS_IOUNIT5_PHY_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 NumPhys; /*0x08 */ - U8 Reserved1;/*0x09 */ - U16 Reserved2;/*0x0A */ - U32 Reserved3;/*0x0C */ - MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS - SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhys; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */ } MPI2_CONFIG_PAGE_SASIOUNIT_5, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, - Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, + Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t; #define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01) -/*SAS IO Unit Page 6 */ +/* SAS IO Unit Page 6 */ -typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS { - U8 CurrentStatus; /*0x00 */ - U8 CurrentModulation; /*0x01 */ - U8 CurrentUtilization; /*0x02 */ - U8 Reserved1; /*0x03 */ - U32 Reserved2; /*0x04 */ +typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS +{ + U8 CurrentStatus; /* 0x00 */ + U8 CurrentModulation; /* 0x01 */ + U8 CurrentUtilization; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 Reserved2; /* 0x04 */ } MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, - *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, - Mpi2SasIOUnit6PortWidthModGroupStatus_t, - *pMpi2SasIOUnit6PortWidthModGroupStatus_t; + MPI2_POINTER PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + Mpi2SasIOUnit6PortWidthModGroupStatus_t, + MPI2_POINTER pMpi2SasIOUnit6PortWidthModGroupStatus_t; -/*defines for CurrentStatus field */ +/* defines for CurrentStatus field */ #define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00) #define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01) #define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02) @@ -2541,104 +2500,102 @@ typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS { #define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06) #define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07) -/*defines for CurrentModulation field */ +/* defines for CurrentModulation field */ #define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00) #define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01) #define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02) #define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumGroups at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumGroups at runtime. */ #ifndef MPI2_SAS_IOUNIT6_GROUP_MAX #define MPI2_SAS_IOUNIT6_GROUP_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x08 */ - U32 Reserved2; /*0x0C */ - U8 NumGroups; /*0x10 */ - U8 Reserved3; /*0x11 */ - U16 Reserved4; /*0x12 */ - MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS - PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + U8 NumGroups; /* 0x10 */ + U8 Reserved3; /* 0x11 */ + U16 Reserved4; /* 0x12 */ + MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS + PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /* 0x14 */ } MPI2_CONFIG_PAGE_SASIOUNIT_6, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, - Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, + Mpi2SasIOUnitPage6_t, MPI2_POINTER pMpi2SasIOUnitPage6_t; #define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00) -/*SAS IO Unit Page 7 */ +/* SAS IO Unit Page 7 */ -typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS { - U8 Flags; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U8 Threshold75Pct; /*0x04 */ - U8 Threshold50Pct; /*0x05 */ - U8 Threshold25Pct; /*0x06 */ - U8 Reserved3; /*0x07 */ +typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS +{ + U8 Flags; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U8 Threshold75Pct; /* 0x04 */ + U8 Threshold50Pct; /* 0x05 */ + U8 Threshold25Pct; /* 0x06 */ + U8 Reserved3; /* 0x07 */ } MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, - *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, - Mpi2SasIOUnit7PortWidthModGroupSettings_t, - *pMpi2SasIOUnit7PortWidthModGroupSettings_t; + MPI2_POINTER PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + Mpi2SasIOUnit7PortWidthModGroupSettings_t, + MPI2_POINTER pMpi2SasIOUnit7PortWidthModGroupSettings_t; -/*defines for Flags field */ +/* defines for Flags field */ #define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumGroups at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumGroups at runtime. */ #ifndef MPI2_SAS_IOUNIT7_GROUP_MAX #define MPI2_SAS_IOUNIT7_GROUP_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 SamplingInterval; /*0x08 */ - U8 WindowLength; /*0x09 */ - U16 Reserved1; /*0x0A */ - U32 Reserved2; /*0x0C */ - U32 Reserved3; /*0x10 */ - U8 NumGroups; /*0x14 */ - U8 Reserved4; /*0x15 */ - U16 Reserved5; /*0x16 */ - MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS - PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 SamplingInterval; /* 0x08 */ + U8 WindowLength; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Reserved2; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U8 NumGroups; /* 0x14 */ + U8 Reserved4; /* 0x15 */ + U16 Reserved5; /* 0x16 */ + MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS + PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX]; /* 0x18 */ } MPI2_CONFIG_PAGE_SASIOUNIT_7, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, - Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, + Mpi2SasIOUnitPage7_t, MPI2_POINTER pMpi2SasIOUnitPage7_t; #define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00) -/*SAS IO Unit Page 8 */ +/* SAS IO Unit Page 8 */ -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U32 - Reserved1; /*0x08 */ - U32 - PowerManagementCapabilities; /*0x0C */ - U8 - TxRxSleepStatus; /*0x10 */ - U8 - Reserved2; /*0x11 */ - U16 - Reserved3; /*0x12 */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 PowerManagementCapabilities; /* 0x0C */ + U8 TxRxSleepStatus; /* 0x10 */ /* reserved in MPI 2.0 */ + U8 Reserved2; /* 0x11 */ + U16 Reserved3; /* 0x12 */ } MPI2_CONFIG_PAGE_SASIOUNIT_8, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, - Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, + Mpi2SasIOUnitPage8_t, MPI2_POINTER pMpi2SasIOUnitPage8_t; #define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) -/*defines for PowerManagementCapabilities field */ +/* defines for PowerManagementCapabilities field */ #define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) #define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) #define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) @@ -2650,7 +2607,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { #define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) #define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) -/*defines for TxRxSleepStatus field */ +/* defines for TxRxSleepStatus field */ #define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00) #define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01) #define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02) @@ -2658,102 +2615,67 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { -/*SAS IO Unit Page 16 */ +/* SAS IO Unit Page 16 */ -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U64 - TimeStamp; /*0x08 */ - U32 - Reserved1; /*0x10 */ - U32 - Reserved2; /*0x14 */ - U32 - FastPathPendedRequests; /*0x18 */ - U32 - FastPathUnPendedRequests; /*0x1C */ - U32 - FastPathHostRequestStarts; /*0x20 */ - U32 - FastPathFirmwareRequestStarts; /*0x24 */ - U32 - FastPathHostCompletions; /*0x28 */ - U32 - FastPathFirmwareCompletions; /*0x2C */ - U32 - NonFastPathRequestStarts; /*0x30 */ - U32 - NonFastPathHostCompletions; /*0x30 */ +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U64 TimeStamp; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ + U32 FastPathPendedRequests; /* 0x18 */ + U32 FastPathUnPendedRequests; /* 0x1C */ + U32 FastPathHostRequestStarts; /* 0x20 */ + U32 FastPathFirmwareRequestStarts; /* 0x24 */ + U32 FastPathHostCompletions; /* 0x28 */ + U32 FastPathFirmwareCompletions; /* 0x2C */ + U32 NonFastPathRequestStarts; /* 0x30 */ + U32 NonFastPathHostCompletions; /* 0x30 */ } MPI2_CONFIG_PAGE_SASIOUNIT16, - *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, - Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, + Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t; #define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00) /**************************************************************************** -* SAS Expander Config Pages +* SAS Expander Config Pages ****************************************************************************/ -/*SAS Expander Page 0 */ +/* SAS Expander Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U8 - PhysicalPort; /*0x08 */ - U8 - ReportGenLength; /*0x09 */ - U16 - EnclosureHandle; /*0x0A */ - U64 - SASAddress; /*0x0C */ - U32 - DiscoveryStatus; /*0x14 */ - U16 - DevHandle; /*0x18 */ - U16 - ParentDevHandle; /*0x1A */ - U16 - ExpanderChangeCount; /*0x1C */ - U16 - ExpanderRouteIndexes; /*0x1E */ - U8 - NumPhys; /*0x20 */ - U8 - SASLevel; /*0x21 */ - U16 - Flags; /*0x22 */ - U16 - STPBusInactivityTimeLimit; /*0x24 */ - U16 - STPMaxConnectTimeLimit; /*0x26 */ - U16 - STP_SMP_NexusLossTime; /*0x28 */ - U16 - MaxNumRoutedSasAddresses; /*0x2A */ - U64 - ActiveZoneManagerSASAddress;/*0x2C */ - U16 - ZoneLockInactivityLimit; /*0x34 */ - U16 - Reserved1; /*0x36 */ - U8 - TimeToReducedFunc; /*0x38 */ - U8 - InitialTimeToReducedFunc; /*0x39 */ - U8 - MaxReducedFuncTime; /*0x3A */ - U8 - Reserved2; /*0x3B */ -} MPI2_CONFIG_PAGE_EXPANDER_0, - *PTR_MPI2_CONFIG_PAGE_EXPANDER_0, - Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t; +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 ReportGenLength; /* 0x09 */ + U16 EnclosureHandle; /* 0x0A */ + U64 SASAddress; /* 0x0C */ + U32 DiscoveryStatus; /* 0x14 */ + U16 DevHandle; /* 0x18 */ + U16 ParentDevHandle; /* 0x1A */ + U16 ExpanderChangeCount; /* 0x1C */ + U16 ExpanderRouteIndexes; /* 0x1E */ + U8 NumPhys; /* 0x20 */ + U8 SASLevel; /* 0x21 */ + U16 Flags; /* 0x22 */ + U16 STPBusInactivityTimeLimit; /* 0x24 */ + U16 STPMaxConnectTimeLimit; /* 0x26 */ + U16 STP_SMP_NexusLossTime; /* 0x28 */ + U16 MaxNumRoutedSasAddresses; /* 0x2A */ + U64 ActiveZoneManagerSASAddress;/* 0x2C */ + U16 ZoneLockInactivityLimit; /* 0x34 */ + U16 Reserved1; /* 0x36 */ + U8 TimeToReducedFunc; /* 0x38 */ + U8 InitialTimeToReducedFunc; /* 0x39 */ + U8 MaxReducedFuncTime; /* 0x3A */ + U8 Reserved2; /* 0x3B */ +} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0, + Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t; #define MPI2_SASEXPANDER0_PAGEVERSION (0x06) -/*values for SAS Expander Page 0 DiscoveryStatus field */ +/* values for SAS Expander Page 0 DiscoveryStatus field */ #define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) #define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000) #define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000) @@ -2775,7 +2697,7 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 { #define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002) #define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) -/*values for SAS Expander Page 0 Flags field */ +/* values for SAS Expander Page 0 Flags field */ #define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) #define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) #define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) @@ -2789,137 +2711,91 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 { #define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) -/*SAS Expander Page 1 */ +/* SAS Expander Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U8 - PhysicalPort; /*0x08 */ - U8 - Reserved1; /*0x09 */ - U16 - Reserved2; /*0x0A */ - U8 - NumPhys; /*0x0C */ - U8 - Phy; /*0x0D */ - U16 - NumTableEntriesProgrammed; /*0x0E */ - U8 - ProgrammedLinkRate; /*0x10 */ - U8 - HwLinkRate; /*0x11 */ - U16 - AttachedDevHandle; /*0x12 */ - U32 - PhyInfo; /*0x14 */ - U32 - AttachedDeviceInfo; /*0x18 */ - U16 - ExpanderDevHandle; /*0x1C */ - U8 - ChangeCount; /*0x1E */ - U8 - NegotiatedLinkRate; /*0x1F */ - U8 - PhyIdentifier; /*0x20 */ - U8 - AttachedPhyIdentifier; /*0x21 */ - U8 - Reserved3; /*0x22 */ - U8 - DiscoveryInfo; /*0x23 */ - U32 - AttachedPhyInfo; /*0x24 */ - U8 - ZoneGroup; /*0x28 */ - U8 - SelfConfigStatus; /*0x29 */ - U16 - Reserved4; /*0x2A */ -} MPI2_CONFIG_PAGE_EXPANDER_1, - *PTR_MPI2_CONFIG_PAGE_EXPANDER_1, - Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t; +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumPhys; /* 0x0C */ + U8 Phy; /* 0x0D */ + U16 NumTableEntriesProgrammed; /* 0x0E */ + U8 ProgrammedLinkRate; /* 0x10 */ + U8 HwLinkRate; /* 0x11 */ + U16 AttachedDevHandle; /* 0x12 */ + U32 PhyInfo; /* 0x14 */ + U32 AttachedDeviceInfo; /* 0x18 */ + U16 ExpanderDevHandle; /* 0x1C */ + U8 ChangeCount; /* 0x1E */ + U8 NegotiatedLinkRate; /* 0x1F */ + U8 PhyIdentifier; /* 0x20 */ + U8 AttachedPhyIdentifier; /* 0x21 */ + U8 Reserved3; /* 0x22 */ + U8 DiscoveryInfo; /* 0x23 */ + U32 AttachedPhyInfo; /* 0x24 */ + U8 ZoneGroup; /* 0x28 */ + U8 SelfConfigStatus; /* 0x29 */ + U16 Reserved4; /* 0x2A */ +} MPI2_CONFIG_PAGE_EXPANDER_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_1, + Mpi2ExpanderPage1_t, MPI2_POINTER pMpi2ExpanderPage1_t; #define MPI2_SASEXPANDER1_PAGEVERSION (0x02) -/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ +/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ -/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ +/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ -/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ +/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */ -/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines - *used for the AttachedDeviceInfo field */ +/* see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines used for the AttachedDeviceInfo field */ -/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ +/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ -/*values for SAS Expander Page 1 DiscoveryInfo field */ +/* values for SAS Expander Page 1 DiscoveryInfo field */ #define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) #define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) #define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) -/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ +/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ /**************************************************************************** -* SAS Device Config Pages +* SAS Device Config Pages ****************************************************************************/ -/*SAS Device Page 0 */ +/* SAS Device Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U16 - Slot; /*0x08 */ - U16 - EnclosureHandle; /*0x0A */ - U64 - SASAddress; /*0x0C */ - U16 - ParentDevHandle; /*0x14 */ - U8 - PhyNum; /*0x16 */ - U8 - AccessStatus; /*0x17 */ - U16 - DevHandle; /*0x18 */ - U8 - AttachedPhyIdentifier; /*0x1A */ - U8 - ZoneGroup; /*0x1B */ - U32 - DeviceInfo; /*0x1C */ - U16 - Flags; /*0x20 */ - U8 - PhysicalPort; /*0x22 */ - U8 - MaxPortConnections; /*0x23 */ - U64 - DeviceName; /*0x24 */ - U8 - PortGroups; /*0x2C */ - U8 - DmaGroup; /*0x2D */ - U8 - ControlGroup; /*0x2E */ - U8 - EnclosureLevel; /*0x2F */ - U32 - ConnectorName[4]; /*0x30 */ - U32 - Reserved3; /*0x34 */ -} MPI2_CONFIG_PAGE_SAS_DEV_0, - *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, - Mpi2SasDevicePage0_t, - *pMpi2SasDevicePage0_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 Slot; /* 0x08 */ + U16 EnclosureHandle; /* 0x0A */ + U64 SASAddress; /* 0x0C */ + U16 ParentDevHandle; /* 0x14 */ + U8 PhyNum; /* 0x16 */ + U8 AccessStatus; /* 0x17 */ + U16 DevHandle; /* 0x18 */ + U8 AttachedPhyIdentifier; /* 0x1A */ + U8 ZoneGroup; /* 0x1B */ + U32 DeviceInfo; /* 0x1C */ + U16 Flags; /* 0x20 */ + U8 PhysicalPort; /* 0x22 */ + U8 MaxPortConnections; /* 0x23 */ + U64 DeviceName; /* 0x24 */ + U8 PortGroups; /* 0x2C */ + U8 DmaGroup; /* 0x2D */ + U8 ControlGroup; /* 0x2E */ + U8 EnclosureLevel; /* 0x2F */ + U8 ConnectorName[4]; /* 0x30 */ + U32 Reserved3; /* 0x34 */ +} MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, + Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t; #define MPI2_SASDEVICE0_PAGEVERSION (0x09) -/*values for SAS Device Page 0 AccessStatus field */ +/* values for SAS Device Page 0 AccessStatus field */ #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) #define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) #define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) @@ -2928,7 +2804,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { #define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) #define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) #define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) -/*specific values for SATA Init failures */ +/* specific values for SATA Init failures */ #define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) #define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) #define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) @@ -2941,9 +2817,9 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { #define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) #define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) -/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ +/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ -/*values for SAS Device Page 0 Flags field */ +/* values for SAS Device Page 0 Flags field */ #define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000) #define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) @@ -2961,172 +2837,135 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { #define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) +/* SAS Device Page 1 */ -/*SAS Device Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U32 - Reserved1; /*0x08 */ - U64 - SASAddress; /*0x0C */ - U32 - Reserved2; /*0x14 */ - U16 - DevHandle; /*0x18 */ - U16 - Reserved3; /*0x1A */ - U8 - InitialRegDeviceFIS[20];/*0x1C */ -} MPI2_CONFIG_PAGE_SAS_DEV_1, - *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, - Mpi2SasDevicePage1_t, - *pMpi2SasDevicePage1_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U64 SASAddress; /* 0x0C */ + U32 Reserved2; /* 0x14 */ + U16 DevHandle; /* 0x18 */ + U16 Reserved3; /* 0x1A */ + U8 InitialRegDeviceFIS[20];/* 0x1C */ +} MPI2_CONFIG_PAGE_SAS_DEV_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, + Mpi2SasDevicePage1_t, MPI2_POINTER pMpi2SasDevicePage1_t; #define MPI2_SASDEVICE1_PAGEVERSION (0x01) /**************************************************************************** -* SAS PHY Config Pages +* SAS PHY Config Pages ****************************************************************************/ -/*SAS PHY Page 0 */ +/* SAS PHY Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U16 - OwnerDevHandle; /*0x08 */ - U16 - Reserved1; /*0x0A */ - U16 - AttachedDevHandle; /*0x0C */ - U8 - AttachedPhyIdentifier; /*0x0E */ - U8 - Reserved2; /*0x0F */ - U32 - AttachedPhyInfo; /*0x10 */ - U8 - ProgrammedLinkRate; /*0x14 */ - U8 - HwLinkRate; /*0x15 */ - U8 - ChangeCount; /*0x16 */ - U8 - Flags; /*0x17 */ - U32 - PhyInfo; /*0x18 */ - U8 - NegotiatedLinkRate; /*0x1C */ - U8 - Reserved3; /*0x1D */ - U16 - Reserved4; /*0x1E */ -} MPI2_CONFIG_PAGE_SAS_PHY_0, - *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, - Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 OwnerDevHandle; /* 0x08 */ + U16 Reserved1; /* 0x0A */ + U16 AttachedDevHandle; /* 0x0C */ + U8 AttachedPhyIdentifier; /* 0x0E */ + U8 Reserved2; /* 0x0F */ + U32 AttachedPhyInfo; /* 0x10 */ + U8 ProgrammedLinkRate; /* 0x14 */ + U8 HwLinkRate; /* 0x15 */ + U8 ChangeCount; /* 0x16 */ + U8 Flags; /* 0x17 */ + U32 PhyInfo; /* 0x18 */ + U8 NegotiatedLinkRate; /* 0x1C */ + U8 Reserved3; /* 0x1D */ + U16 Reserved4; /* 0x1E */ +} MPI2_CONFIG_PAGE_SAS_PHY_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, + Mpi2SasPhyPage0_t, MPI2_POINTER pMpi2SasPhyPage0_t; #define MPI2_SASPHY0_PAGEVERSION (0x03) -/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ +/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ -/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ +/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ -/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ +/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ -/*values for SAS PHY Page 0 Flags field */ +/* values for SAS PHY Page 0 Flags field */ #define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) -/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ +/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */ -/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ +/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ -/*SAS PHY Page 1 */ +/* SAS PHY Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U32 - Reserved1; /*0x08 */ - U32 - InvalidDwordCount; /*0x0C */ - U32 - RunningDisparityErrorCount; /*0x10 */ - U32 - LossDwordSynchCount; /*0x14 */ - U32 - PhyResetProblemCount; /*0x18 */ -} MPI2_CONFIG_PAGE_SAS_PHY_1, - *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, - Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 InvalidDwordCount; /* 0x0C */ + U32 RunningDisparityErrorCount; /* 0x10 */ + U32 LossDwordSynchCount; /* 0x14 */ + U32 PhyResetProblemCount; /* 0x18 */ +} MPI2_CONFIG_PAGE_SAS_PHY_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, + Mpi2SasPhyPage1_t, MPI2_POINTER pMpi2SasPhyPage1_t; #define MPI2_SASPHY1_PAGEVERSION (0x01) -/*SAS PHY Page 2 */ +/* SAS PHY Page 2 */ -typedef struct _MPI2_SASPHY2_PHY_EVENT { - U8 PhyEventCode; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 PhyEventInfo; /*0x04 */ -} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT, - Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t; +typedef struct _MPI2_SASPHY2_PHY_EVENT +{ + U8 PhyEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 PhyEventInfo; /* 0x04 */ +} MPI2_SASPHY2_PHY_EVENT, MPI2_POINTER PTR_MPI2_SASPHY2_PHY_EVENT, + Mpi2SasPhy2PhyEvent_t, MPI2_POINTER pMpi2SasPhy2PhyEvent_t; -/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ +/* use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhyEvents at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhyEvents at runtime. */ #ifndef MPI2_SASPHY2_PHY_EVENT_MAX #define MPI2_SASPHY2_PHY_EVENT_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U32 - Reserved1; /*0x08 */ - U8 - NumPhyEvents; /*0x0C */ - U8 - Reserved2; /*0x0D */ - U16 - Reserved3; /*0x0E */ - MPI2_SASPHY2_PHY_EVENT - PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */ -} MPI2_CONFIG_PAGE_SAS_PHY_2, - *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, - Mpi2SasPhyPage2_t, - *pMpi2SasPhyPage2_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhyEvents; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_SASPHY2_PHY_EVENT PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /* 0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, + Mpi2SasPhyPage2_t, MPI2_POINTER pMpi2SasPhyPage2_t; #define MPI2_SASPHY2_PAGEVERSION (0x00) -/*SAS PHY Page 3 */ +/* SAS PHY Page 3 */ -typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG { - U8 PhyEventCode; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U8 CounterType; /*0x04 */ - U8 ThresholdWindow; /*0x05 */ - U8 TimeUnits; /*0x06 */ - U8 Reserved3; /*0x07 */ - U32 EventThreshold; /*0x08 */ - U16 ThresholdFlags; /*0x0C */ - U16 Reserved4; /*0x0E */ -} MPI2_SASPHY3_PHY_EVENT_CONFIG, - *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, - Mpi2SasPhy3PhyEventConfig_t, - *pMpi2SasPhy3PhyEventConfig_t; +typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG +{ + U8 PhyEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U8 CounterType; /* 0x04 */ + U8 ThresholdWindow; /* 0x05 */ + U8 TimeUnits; /* 0x06 */ + U8 Reserved3; /* 0x07 */ + U32 EventThreshold; /* 0x08 */ + U16 ThresholdFlags; /* 0x0C */ + U16 Reserved4; /* 0x0E */ +} MPI2_SASPHY3_PHY_EVENT_CONFIG, MPI2_POINTER PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, + Mpi2SasPhy3PhyEventConfig_t, MPI2_POINTER pMpi2SasPhy3PhyEventConfig_t; -/*values for PhyEventCode field */ +/* values for PhyEventCode field */ #define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00) #define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) #define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) @@ -3164,83 +3003,70 @@ typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG { #define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0) #define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) #define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) - -/*Following codes are product specific and in MPI v2.6 and later */ -#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3) +/* Following codes are product specific and in MPI v2.6 and later */ +#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3) #define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4) -#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5) -#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6) -#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7) -#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8) -#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9) -#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA) -#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB) -#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC) +#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5) +#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6) +#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7) +#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9) +#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA) +#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB) +#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC) - -/*values for the CounterType field */ +/* values for the CounterType field */ #define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) #define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) #define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) -/*values for the TimeUnits field */ +/* values for the TimeUnits field */ #define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) #define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) #define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) #define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) -/*values for the ThresholdFlags field */ +/* values for the ThresholdFlags field */ #define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002) #define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhyEvents at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhyEvents at runtime. */ #ifndef MPI2_SASPHY3_PHY_EVENT_MAX #define MPI2_SASPHY3_PHY_EVENT_MAX (1) #endif -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U32 - Reserved1; /*0x08 */ - U8 - NumPhyEvents; /*0x0C */ - U8 - Reserved2; /*0x0D */ - U16 - Reserved3; /*0x0E */ - MPI2_SASPHY3_PHY_EVENT_CONFIG - PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */ -} MPI2_CONFIG_PAGE_SAS_PHY_3, - *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, - Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhyEvents; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_SASPHY3_PHY_EVENT_CONFIG PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /* 0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, + Mpi2SasPhyPage3_t, MPI2_POINTER pMpi2SasPhyPage3_t; #define MPI2_SASPHY3_PAGEVERSION (0x00) -/*SAS PHY Page 4 */ +/* SAS PHY Page 4 */ -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U16 - Reserved1; /*0x08 */ - U8 - Reserved2; /*0x0A */ - U8 - Flags; /*0x0B */ - U8 - InitialFrame[28]; /*0x0C */ -} MPI2_CONFIG_PAGE_SAS_PHY_4, - *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, - Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 Reserved1; /* 0x08 */ + U8 Reserved2; /* 0x0A */ + U8 Flags; /* 0x0B */ + U8 InitialFrame[28]; /* 0x0C */ +} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, + Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t; #define MPI2_SASPHY4_PAGEVERSION (0x00) -/*values for the Flags field */ +/* values for the Flags field */ #define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) #define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) @@ -3248,76 +3074,65 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 { /**************************************************************************** -* SAS Port Config Pages +* SAS Port Config Pages ****************************************************************************/ -/*SAS Port Page 0 */ +/* SAS Port Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U8 - PortNumber; /*0x08 */ - U8 - PhysicalPort; /*0x09 */ - U8 - PortWidth; /*0x0A */ - U8 - PhysicalPortWidth; /*0x0B */ - U8 - ZoneGroup; /*0x0C */ - U8 - Reserved1; /*0x0D */ - U16 - Reserved2; /*0x0E */ - U64 - SASAddress; /*0x10 */ - U32 - DeviceInfo; /*0x18 */ - U32 - Reserved3; /*0x1C */ - U32 - Reserved4; /*0x20 */ -} MPI2_CONFIG_PAGE_SAS_PORT_0, - *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, - Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t; +typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PortNumber; /* 0x08 */ + U8 PhysicalPort; /* 0x09 */ + U8 PortWidth; /* 0x0A */ + U8 PhysicalPortWidth; /* 0x0B */ + U8 ZoneGroup; /* 0x0C */ + U8 Reserved1; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U64 SASAddress; /* 0x10 */ + U32 DeviceInfo; /* 0x18 */ + U32 Reserved3; /* 0x1C */ + U32 Reserved4; /* 0x20 */ +} MPI2_CONFIG_PAGE_SAS_PORT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, + Mpi2SasPortPage0_t, MPI2_POINTER pMpi2SasPortPage0_t; #define MPI2_SASPORT0_PAGEVERSION (0x00) -/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ +/* see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ /**************************************************************************** -* SAS Enclosure Config Pages +* SAS Enclosure Config Pages ****************************************************************************/ -/*SAS Enclosure Page 0 */ +/* SAS Enclosure Page 0, Enclosure Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x08 */ - U64 EnclosureLogicalID; /*0x0C */ - U16 Flags; /*0x14 */ - U16 EnclosureHandle; /*0x16 */ - U16 NumSlots; /*0x18 */ - U16 StartSlot; /*0x1A */ - U8 ChassisSlot; /*0x1C */ - U8 EnclosureLevel; /*0x1D */ - U16 SEPDevHandle; /*0x1E */ - U8 OEMRD; /*0x20 */ - U8 Reserved1a; /*0x21 */ - U16 Reserved2; /*0x22 */ - U32 Reserved3; /*0x24 */ +typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U64 EnclosureLogicalID; /* 0x0C */ + U16 Flags; /* 0x14 */ + U16 EnclosureHandle; /* 0x16 */ + U16 NumSlots; /* 0x18 */ + U16 StartSlot; /* 0x1A */ + U8 ChassisSlot; /* 0x1C */ + U8 EnclosureLevel; /* 0x1D */ + U16 SEPDevHandle; /* 0x1E */ + U8 OEMRD; /* 0x20 */ + U8 Reserved1a; /* 0x21 */ + U16 Reserved2; /* 0x22 */ + U32 Reserved3; /* 0x24 */ } MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, - *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, - Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t, - MPI26_CONFIG_PAGE_ENCLOSURE_0, - *PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0, - Mpi26EnclosurePage0_t, *pMpi26EnclosurePage0_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t, + MPI26_CONFIG_PAGE_ENCLOSURE_0, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0, + Mpi26EnclosurePage0_t, MPI2_POINTER pMpi26EnclosurePage0_t; #define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) -/*values for SAS Enclosure Page 0 Flags field */ +/* values for SAS Enclosure Page 0 Flags field */ #define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID (0x0080) #define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) #define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) @@ -3332,7 +3147,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { #define MPI26_ENCLOSURE0_PAGEVERSION (0x04) -/*Values for Enclosure Page 0 Flags field */ +/* Values for Enclosure Page 0 Flags field */ #define MPI26_ENCLS0_FLAGS_OEMRD_VALID (0x0080) #define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) #define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) @@ -3344,16 +3159,17 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { #define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) #define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) #define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) +#define MPI26_ENCLS0_FLAGS_MNG_PCIE_SW_SES_ENCL (0x0006) /**************************************************************************** -* Log Config Page +* Log Config Page ****************************************************************************/ -/*Log Page 0 */ +/* Log Page 0 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumLogEntries at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumLogEntries at runtime. */ #ifndef MPI2_LOG_0_NUM_LOG_ENTRIES #define MPI2_LOG_0_NUM_LOG_ENTRIES (1) @@ -3361,66 +3177,66 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { #define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C) -typedef struct _MPI2_LOG_0_ENTRY { - U64 TimeStamp; /*0x00 */ - U32 Reserved1; /*0x08 */ - U16 LogSequence; /*0x0C */ - U16 LogEntryQualifier; /*0x0E */ - U8 VP_ID; /*0x10 */ - U8 VF_ID; /*0x11 */ - U16 Reserved2; /*0x12 */ - U8 - LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */ -} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY, - Mpi2Log0Entry_t, *pMpi2Log0Entry_t; +typedef struct _MPI2_LOG_0_ENTRY +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U16 LogSequence; /* 0x0C */ + U16 LogEntryQualifier; /* 0x0E */ + U8 VP_ID; /* 0x10 */ + U8 VF_ID; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + U8 LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/* 0x14 */ +} MPI2_LOG_0_ENTRY, MPI2_POINTER PTR_MPI2_LOG_0_ENTRY, + Mpi2Log0Entry_t, MPI2_POINTER pMpi2Log0Entry_t; -/*values for Log Page 0 LogEntry LogEntryQualifier field */ +/* values for Log Page 0 LogEntry LogEntryQualifier field */ #define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000) #define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001) #define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002) #define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000) #define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF) -typedef struct _MPI2_CONFIG_PAGE_LOG_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x08 */ - U32 Reserved2; /*0x0C */ - U16 NumLogEntries;/*0x10 */ - U16 Reserved3; /*0x12 */ - MPI2_LOG_0_ENTRY - LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */ -} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0, - Mpi2LogPage0_t, *pMpi2LogPage0_t; +typedef struct _MPI2_CONFIG_PAGE_LOG_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + U16 NumLogEntries; /* 0x10 */ + U16 Reserved3; /* 0x12 */ + MPI2_LOG_0_ENTRY LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /* 0x14 */ +} MPI2_CONFIG_PAGE_LOG_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_LOG_0, + Mpi2LogPage0_t, MPI2_POINTER pMpi2LogPage0_t; #define MPI2_LOG_0_PAGEVERSION (0x02) /**************************************************************************** -* RAID Config Page +* RAID Config Page ****************************************************************************/ -/*RAID Page 0 */ +/* RAID Page 0 */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumElements at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumElements at runtime. */ #ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS #define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) #endif -typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT { - U16 ElementFlags; /*0x00 */ - U16 VolDevHandle; /*0x02 */ - U8 HotSparePool; /*0x04 */ - U8 PhysDiskNum; /*0x05 */ - U16 PhysDiskDevHandle; /*0x06 */ +typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT +{ + U16 ElementFlags; /* 0x00 */ + U16 VolDevHandle; /* 0x02 */ + U8 HotSparePool; /* 0x04 */ + U8 PhysDiskNum; /* 0x05 */ + U16 PhysDiskDevHandle; /* 0x06 */ } MPI2_RAIDCONFIG0_CONFIG_ELEMENT, - *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, - Mpi2RaidConfig0ConfigElement_t, - *pMpi2RaidConfig0ConfigElement_t; + MPI2_POINTER PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + Mpi2RaidConfig0ConfigElement_t, MPI2_POINTER pMpi2RaidConfig0ConfigElement_t; -/*values for the ElementFlags field */ +/* values for the ElementFlags field */ #define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) #define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000) #define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) @@ -3428,104 +3244,105 @@ typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT { #define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) -typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 NumHotSpares; /*0x08 */ - U8 NumPhysDisks; /*0x09 */ - U8 NumVolumes; /*0x0A */ - U8 ConfigNum; /*0x0B */ - U32 Flags; /*0x0C */ - U8 ConfigGUID[24]; /*0x10 */ - U32 Reserved1; /*0x28 */ - U8 NumElements; /*0x2C */ - U8 Reserved2; /*0x2D */ - U16 Reserved3; /*0x2E */ - MPI2_RAIDCONFIG0_CONFIG_ELEMENT - ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */ +typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumHotSpares; /* 0x08 */ + U8 NumPhysDisks; /* 0x09 */ + U8 NumVolumes; /* 0x0A */ + U8 ConfigNum; /* 0x0B */ + U32 Flags; /* 0x0C */ + U8 ConfigGUID[24]; /* 0x10 */ + U32 Reserved1; /* 0x28 */ + U8 NumElements; /* 0x2C */ + U8 Reserved2; /* 0x2D */ + U16 Reserved3; /* 0x2E */ + MPI2_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /* 0x30 */ } MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, - *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, - Mpi2RaidConfigurationPage0_t, - *pMpi2RaidConfigurationPage0_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + Mpi2RaidConfigurationPage0_t, MPI2_POINTER pMpi2RaidConfigurationPage0_t; #define MPI2_RAIDCONFIG0_PAGEVERSION (0x00) -/*values for RAID Configuration Page 0 Flags field */ +/* values for RAID Configuration Page 0 Flags field */ #define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001) /**************************************************************************** -* Driver Persistent Mapping Config Pages +* Driver Persistent Mapping Config Pages ****************************************************************************/ -/*Driver Persistent Mapping Page 0 */ +/* Driver Persistent Mapping Page 0 */ -typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY { - U64 PhysicalIdentifier; /*0x00 */ - U16 MappingInformation; /*0x08 */ - U16 DeviceIndex; /*0x0A */ - U32 PhysicalBitsMapping; /*0x0C */ - U32 Reserved1; /*0x10 */ +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY +{ + U64 PhysicalIdentifier; /* 0x00 */ + U16 MappingInformation; /* 0x08 */ + U16 DeviceIndex; /* 0x0A */ + U32 PhysicalBitsMapping; /* 0x0C */ + U32 Reserved1; /* 0x10 */ } MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, - *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, - Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + Mpi2DriverMap0Entry_t, MPI2_POINTER pMpi2DriverMap0Entry_t; -typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */ +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /* 0x08 */ } MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, - *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, - Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t; + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + Mpi2DriverMappingPage0_t, MPI2_POINTER pMpi2DriverMappingPage0_t; #define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00) -/*values for Driver Persistent Mapping Page 0 MappingInformation field */ +/* values for Driver Persistent Mapping Page 0 MappingInformation field */ #define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0) #define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4) #define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) /**************************************************************************** -* Ethernet Config Pages +* Ethernet Config Pages ****************************************************************************/ -/*Ethernet Page 0 */ +/* Ethernet Page 0 */ -/*IP address (union of IPv4 and IPv6) */ -typedef union _MPI2_ETHERNET_IP_ADDR { - U32 IPv4Addr; - U32 IPv6Addr[4]; -} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR, - Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t; +/* IP address (union of IPv4 and IPv6) */ +typedef union _MPI2_ETHERNET_IP_ADDR +{ + U32 IPv4Addr; + U32 IPv6Addr[4]; +} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR, + Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t; #define MPI2_ETHERNET_HOST_NAME_LENGTH (32) -typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 NumInterfaces; /*0x08 */ - U8 Reserved0; /*0x09 */ - U16 Reserved1; /*0x0A */ - U32 Status; /*0x0C */ - U8 MediaState; /*0x10 */ - U8 Reserved2; /*0x11 */ - U16 Reserved3; /*0x12 */ - U8 MacAddress[6]; /*0x14 */ - U8 Reserved4; /*0x1A */ - U8 Reserved5; /*0x1B */ - MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */ - MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */ - MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */ - MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */ - MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */ - MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */ - U8 - HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ -} MPI2_CONFIG_PAGE_ETHERNET_0, - *PTR_MPI2_CONFIG_PAGE_ETHERNET_0, - Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t; +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumInterfaces; /* 0x08 */ + U8 Reserved0; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Status; /* 0x0C */ + U8 MediaState; /* 0x10 */ + U8 Reserved2; /* 0x11 */ + U16 Reserved3; /* 0x12 */ + U8 MacAddress[6]; /* 0x14 */ + U8 Reserved4; /* 0x1A */ + U8 Reserved5; /* 0x1B */ + MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */ + MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */ + MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */ + MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */ + MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */ + MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */ + U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0, + Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t; #define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) -/*values for Ethernet Page 0 Status field */ +/* values for Ethernet Page 0 Status field */ #define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) #define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) #define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) @@ -3539,7 +3356,7 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { #define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) #define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) -/*values for Ethernet Page 0 MediaState field */ +/* values for Ethernet Page 0 MediaState field */ #define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) #define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) #define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) @@ -3551,54 +3368,35 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { #define MPI2_ETHPG0_MS_1GBIT (0x03) -/*Ethernet Page 1 */ +/* Ethernet Page 1 */ -typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U32 - Reserved0; /*0x08 */ - U32 - Flags; /*0x0C */ - U8 - MediaState; /*0x10 */ - U8 - Reserved1; /*0x11 */ - U16 - Reserved2; /*0x12 */ - U8 - MacAddress[6]; /*0x14 */ - U8 - Reserved3; /*0x1A */ - U8 - Reserved4; /*0x1B */ - MPI2_ETHERNET_IP_ADDR - StaticIpAddress; /*0x1C */ - MPI2_ETHERNET_IP_ADDR - StaticSubnetMask; /*0x2C */ - MPI2_ETHERNET_IP_ADDR - StaticGatewayIpAddress; /*0x3C */ - MPI2_ETHERNET_IP_ADDR - StaticDNS1IpAddress; /*0x4C */ - MPI2_ETHERNET_IP_ADDR - StaticDNS2IpAddress; /*0x5C */ - U32 - Reserved5; /*0x6C */ - U32 - Reserved6; /*0x70 */ - U32 - Reserved7; /*0x74 */ - U32 - Reserved8; /*0x78 */ - U8 - HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ -} MPI2_CONFIG_PAGE_ETHERNET_1, - *PTR_MPI2_CONFIG_PAGE_ETHERNET_1, - Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t; +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved0; /* 0x08 */ + U32 Flags; /* 0x0C */ + U8 MediaState; /* 0x10 */ + U8 Reserved1; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + U8 MacAddress[6]; /* 0x14 */ + U8 Reserved3; /* 0x1A */ + U8 Reserved4; /* 0x1B */ + MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */ + MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */ + MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */ + MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */ + MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */ + U32 Reserved5; /* 0x6C */ + U32 Reserved6; /* 0x70 */ + U32 Reserved7; /* 0x74 */ + U32 Reserved8; /* 0x78 */ + U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1, + Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t; #define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) -/*values for Ethernet Page 1 Flags field */ +/* values for Ethernet Page 1 Flags field */ #define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) #define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) #define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) @@ -3609,7 +3407,7 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { #define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) #define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) -/*values for Ethernet Page 1 MediaState field */ +/* values for Ethernet Page 1 MediaState field */ #define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) #define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) #define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) @@ -3622,36 +3420,33 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { /**************************************************************************** -* Extended Manufacturing Config Pages +* Extended Manufacturing Config Pages ****************************************************************************/ /* - *Generic structure to use for product-specific extended manufacturing pages - *(currently Extended Manufacturing Page 40 through Extended Manufacturing - *Page 60). + * Generic structure to use for product-specific extended manufacturing pages + * (currently Extended Manufacturing Page 40 through Extended Manufacturing + * Page 60). */ -typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { - MPI2_CONFIG_EXTENDED_PAGE_HEADER - Header; /*0x00 */ - U32 - ProductSpecificInfo; /*0x08 */ +typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 ProductSpecificInfo; /* 0x08 */ } MPI2_CONFIG_PAGE_EXT_MAN_PS, - *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, - Mpi2ExtManufacturingPagePS_t, - *pMpi2ExtManufacturingPagePS_t; - -/*PageVersion should be provided by product-specific code */ + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, + Mpi2ExtManufacturingPagePS_t, MPI2_POINTER pMpi2ExtManufacturingPagePS_t; +/* PageVersion should be provided by product-specific code */ /**************************************************************************** -* values for fields used by several types of PCIe Config Pages +* values for fields used by several types of PCIe Config Pages ****************************************************************************/ -/*values for NegotiatedLinkRates fields */ +/* values for NegotiatedLinkRates fields */ #define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) -/*link rates used for Negotiated Physical Link Rate */ +/* link rates used for Negotiated Physical Link Rate */ #define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00) #define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01) #define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02) @@ -3661,112 +3456,128 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { /**************************************************************************** -* PCIe IO Unit Config Pages (MPI v2.6 and later) +* PCIe IO Unit Config Pages (MPI v2.6 and later) ****************************************************************************/ -/*PCIe IO Unit Page 0 */ +/* PCIe IO Unit Page 0 */ -typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA { - U8 Link; /*0x00 */ - U8 LinkFlags; /*0x01 */ - U8 PhyFlags; /*0x02 */ - U8 NegotiatedLinkRate; /*0x03 */ - U32 ControllerPhyDeviceInfo;/*0x04 */ - U16 AttachedDevHandle; /*0x08 */ - U16 ControllerDevHandle; /*0x0A */ - U32 EnumerationStatus; /*0x0C */ - U32 Reserved1; /*0x10 */ -} MPI26_PCIE_IO_UNIT0_PHY_DATA, - *PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA, - Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t; +typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA +{ + U8 Link; /* 0x00 */ + U8 LinkFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 NegotiatedLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo;/* 0x04 */ + U16 AttachedDevHandle; /* 0x08 */ + U16 ControllerDevHandle; /* 0x0A */ + U32 EnumerationStatus; /* 0x0C */ + U32 Reserved1; /* 0x10 */ +} MPI26_PCIE_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA, + Mpi26PCIeIOUnit0PhyData_t, MPI2_POINTER pMpi26PCIeIOUnit0PhyData_t; /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI26_PCIE_IOUNIT0_PHY_MAX #define MPI26_PCIE_IOUNIT0_PHY_MAX (1) #endif -typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U32 Reserved1; /*0x08 */ - U8 NumPhys; /*0x0C */ - U8 InitStatus; /*0x0D */ - U16 Reserved3; /*0x0E */ - MPI26_PCIE_IO_UNIT0_PHY_DATA - PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */ +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhys; /* 0x0C */ + U8 InitStatus; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI26_PCIE_IO_UNIT0_PHY_DATA PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /* 0x10 */ } MPI26_CONFIG_PAGE_PIOUNIT_0, - *PTR_MPI26_CONFIG_PAGE_PIOUNIT_0, - Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t; + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PIOUNIT_0, + Mpi26PCIeIOUnitPage0_t, MPI2_POINTER pMpi26PCIeIOUnitPage0_t; #define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00) -/*values for PCIe IO Unit Page 0 LinkFlags */ -#define MPI26_PCIEIOUNIT0_LINKFLAGS_ENUMERATION_IN_PROGRESS (0x08) +/* values for PCIe IO Unit Page 0 LinkFlags */ +#define MPI26_PCIEIOUNIT0_LF_ENUMERATION_IN_PROGRESS (0x08) +#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_MASK (0x01) +#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_PAGE1 (0x00) +#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_BACKPLANE (0x01) -/*values for PCIe IO Unit Page 0 PhyFlags */ +/* values for PCIe IO Unit Page 0 PhyFlags */ #define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) -/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ +/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ -/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo - *values - */ +/* see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo values */ -/*values for PCIe IO Unit Page 0 EnumerationStatus */ +/* values for PCIe IO Unit Page 0 EnumerationStatus */ #define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000) #define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000) -/*PCIe IO Unit Page 1 */ +/* PCIe IO Unit Page 1 */ -typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA { - U8 Link; /*0x00 */ - U8 LinkFlags; /*0x01 */ - U8 PhyFlags; /*0x02 */ - U8 MaxMinLinkRate; /*0x03 */ - U32 ControllerPhyDeviceInfo; /*0x04 */ - U32 Reserved1; /*0x08 */ -} MPI26_PCIE_IO_UNIT1_PHY_DATA, - *PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA, - Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t; +typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA +{ + U8 Link; /* 0x00 */ + U8 LinkFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 MaxMinLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo; /* 0x04 */ + U32 Reserved1; /* 0x08 */ +} MPI26_PCIE_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA, + Mpi26PCIeIOUnit1PhyData_t, MPI2_POINTER pMpi26PCIeIOUnit1PhyData_t; -/*values for LinkFlags */ +/* values for LinkFlags */ #define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK (0x00) #define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN (0x01) #define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumPhys at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. */ #ifndef MPI26_PCIE_IOUNIT1_PHY_MAX #define MPI26_PCIE_IOUNIT1_PHY_MAX (1) #endif -typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U16 ControlFlags; /*0x08 */ - U16 Reserved; /*0x0A */ - U16 AdditionalControlFlags; /*0x0C */ - U16 NVMeMaxQueueDepth; /*0x0E */ - U8 NumPhys; /*0x10 */ - U8 DMDReportPCIe; /*0x11 */ - U16 Reserved2; /*0x12 */ - MPI26_PCIE_IO_UNIT1_PHY_DATA - PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */ +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 ControlFlags; /* 0x08 */ + U16 Reserved; /* 0x0A */ + U16 AdditionalControlFlags; /* 0x0C */ + U16 NVMeMaxQueueDepth; /* 0x0E */ + U8 NumPhys; /* 0x10 */ + U8 DMDReportPCIe; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + MPI26_PCIE_IO_UNIT1_PHY_DATA PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/* 0x14 */ } MPI26_CONFIG_PAGE_PIOUNIT_1, - *PTR_MPI26_CONFIG_PAGE_PIOUNIT_1, - Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t; + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PIOUNIT_1, + Mpi26PCIeIOUnitPage1_t, MPI2_POINTER pMpi26PCIeIOUnitPage1_t; #define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00) -/*values for PCIe IO Unit Page 1 PhyFlags */ +/* values for PCIe IO Unit Page 1 ControlFlags */ +#define MPI26_PCIEIOUNIT1_CF_BP_CONFIG_DISABLE (0x0080) +#define MPI26_PCIEIOUNIT1_CF_BP_AUTO_CLOCK_ENABLE (0x0060) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_MASK (0x0030) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_ALL_DIS (0x0000) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_SRIS_EN (0x0010) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_SRNS_EN (0x0020) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_MASK (0x000F) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_DISABLE (0x0000) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_2_5_MAX (0x0002) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_5_0_MAX (0x0003) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_8_0_MAX (0x0004) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_16_0_MAX (0x0005) + + +/* values for PCIe IO Unit Page 1 PhyFlags */ #define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) #define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01) -/*values for PCIe IO Unit Page 1 MaxMinLinkRate */ +/* values for PCIe IO Unit Page 1 MaxMinLinkRate */ #define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0) #define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4) #define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20) @@ -3774,103 +3585,108 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 { #define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40) #define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50) -/*values for PCIe IO Unit Page 1 DMDReportPCIe */ +/* values for PCIe IO Unit Page 1 DMDReportPCIe */ #define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80) #define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC (0x00) #define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC (0x80) #define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK (0x7F) -/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo - *values - */ + +/* see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo values */ /**************************************************************************** -* PCIe Switch Config Pages (MPI v2.6 and later) +* PCIe Switch Config Pages (MPI v2.6 and later) ****************************************************************************/ -/*PCIe Switch Page 0 */ +/* PCIe Switch Page 0 */ -typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 PhysicalPort; /*0x08 */ - U8 Reserved1; /*0x09 */ - U16 Reserved2; /*0x0A */ - U16 DevHandle; /*0x0C */ - U16 ParentDevHandle; /*0x0E */ - U8 NumPorts; /*0x10 */ - U8 PCIeLevel; /*0x11 */ - U16 Reserved3; /*0x12 */ - U32 Reserved4; /*0x14 */ - U32 Reserved5; /*0x18 */ - U32 Reserved6; /*0x1C */ -} MPI26_CONFIG_PAGE_PSWITCH_0, *PTR_MPI26_CONFIG_PAGE_PSWITCH_0, - Mpi26PCIeSwitchPage0_t, *pMpi26PCIeSwitchPage0_t; +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 DevHandle; /* 0x0C */ + U16 ParentDevHandle; /* 0x0E */ + U8 NumPorts; /* 0x10 */ + U8 PCIeLevel; /* 0x11 */ + U16 Reserved3; /* 0x12 */ + U32 Reserved4; /* 0x14 */ + U32 Reserved5; /* 0x18 */ + U32 Reserved6; /* 0x1C */ +} MPI26_CONFIG_PAGE_PSWITCH_0, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PSWITCH_0, + Mpi26PCIeSwitchPage0_t, MPI2_POINTER pMpi26PCIeSwitchPage0_t; #define MPI26_PCIESWITCH0_PAGEVERSION (0x00) -/*PCIe Switch Page 1 */ +/* PCIe Switch Page 1 */ -typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 PhysicalPort; /*0x08 */ - U8 Reserved1; /*0x09 */ - U16 Reserved2; /*0x0A */ - U8 NumPorts; /*0x0C */ - U8 PortNum; /*0x0D */ - U16 AttachedDevHandle; /*0x0E */ - U16 SwitchDevHandle; /*0x10 */ - U8 NegotiatedPortWidth; /*0x12 */ - U8 NegotiatedLinkRate; /*0x13 */ - U32 Reserved4; /*0x14 */ - U32 Reserved5; /*0x18 */ -} MPI26_CONFIG_PAGE_PSWITCH_1, *PTR_MPI26_CONFIG_PAGE_PSWITCH_1, - Mpi26PCIeSwitchPage1_t, *pMpi26PCIeSwitchPage1_t; +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumPorts; /* 0x0C */ + U8 PortNum; /* 0x0D */ + U16 AttachedDevHandle; /* 0x0E */ + U16 SwitchDevHandle; /* 0x10 */ + U8 NegotiatedPortWidth; /* 0x12 */ + U8 NegotiatedLinkRate; /* 0x13 */ + U16 Flags; /* 0x14 */ + U16 Reserved4; /* 0x16 */ + U32 Reserved5; /* 0x18 */ +} MPI26_CONFIG_PAGE_PSWITCH_1, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PSWITCH_1, + Mpi26PCIeSwitchPage1_t, MPI2_POINTER pMpi26PCIeSwitchPage1_t; -#define MPI26_PCIESWITCH1_PAGEVERSION (0x00) +#define MPI26_PCIESWITCH1_PAGEVERSION (0x00) -/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ +/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ /* defines for the Flags field */ #define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE (0x0002) #define MPI26_PCIESWITCH1_RETIMER_PRESENCE (0x0001) + + /**************************************************************************** -* PCIe Device Config Pages (MPI v2.6 and later) +* PCIe Device Config Pages (MPI v2.6 and later) ****************************************************************************/ -/*PCIe Device Page 0 */ +/* PCIe Device Page 0 */ -typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U16 Slot; /*0x08 */ - U16 EnclosureHandle; /*0x0A */ - U64 WWID; /*0x0C */ - U16 ParentDevHandle; /*0x14 */ - U8 PortNum; /*0x16 */ - U8 AccessStatus; /*0x17 */ - U16 DevHandle; /*0x18 */ - U8 PhysicalPort; /*0x1A */ - U8 Reserved1; /*0x1B */ - U32 DeviceInfo; /*0x1C */ - U32 Flags; /*0x20 */ - U8 SupportedLinkRates; /*0x24 */ - U8 MaxPortWidth; /*0x25 */ - U8 NegotiatedPortWidth; /*0x26 */ - U8 NegotiatedLinkRate; /*0x27 */ - U8 EnclosureLevel; /*0x28 */ - U8 Reserved2; /*0x29 */ - U16 Reserved3; /*0x2A */ - U8 ConnectorName[4]; /*0x2C */ - U32 Reserved4; /*0x30 */ - U32 Reserved5; /*0x34 */ -} MPI26_CONFIG_PAGE_PCIEDEV_0, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_0, - Mpi26PCIeDevicePage0_t, *pMpi26PCIeDevicePage0_t; +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 Slot; /* 0x08 */ + U16 EnclosureHandle; /* 0x0A */ + U64 WWID; /* 0x0C */ + U16 ParentDevHandle; /* 0x14 */ + U8 PortNum; /* 0x16 */ + U8 AccessStatus; /* 0x17 */ + U16 DevHandle; /* 0x18 */ + U8 PhysicalPort; /* 0x1A */ + U8 Reserved1; /* 0x1B */ + U32 DeviceInfo; /* 0x1C */ + U32 Flags; /* 0x20 */ + U8 SupportedLinkRates; /* 0x24 */ + U8 MaxPortWidth; /* 0x25 */ + U8 NegotiatedPortWidth; /* 0x26 */ + U8 NegotiatedLinkRate; /* 0x27 */ + U8 EnclosureLevel; /* 0x28 */ + U8 Reserved2; /* 0x29 */ + U16 Reserved3; /* 0x2A */ + U8 ConnectorName[4]; /* 0x2C */ + U32 Reserved4; /* 0x30 */ + U32 Reserved5; /* 0x34 */ +} MPI26_CONFIG_PAGE_PCIEDEV_0, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIEDEV_0, + Mpi26PCIeDevicePage0_t, MPI2_POINTER pMpi26PCIeDevicePage0_t; #define MPI26_PCIEDEVICE0_PAGEVERSION (0x01) -/*values for PCIe Device Page 0 AccessStatus field */ +/* values for PCIe Device Page 0 AccessStatus field */ #define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00) #define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04) #define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02) @@ -3892,11 +3708,9 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 { #define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F) -/*see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo - *field - */ +/* see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo field */ -/*values for PCIe Device Page 0 Flags field*/ +/* values for PCIe Device Page 0 Flags field */ #define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE (0x00020000) #define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE (0x00010000) #define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x00008000) @@ -3918,108 +3732,120 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 { #define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02) #define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01) -/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ +/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ -/*PCIe Device Page 2 */ +/* PCIe Device Page 2 */ -typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U16 DevHandle; /*0x08 */ - U8 ControllerResetTO; /* 0x0A */ - U8 Reserved1; /* 0x0B */ - U32 MaximumDataTransferSize; /*0x0C */ - U32 Capabilities; /*0x10 */ - U16 NOIOB; /* 0x14 */ - U16 Reserved2; /* 0x16 */ -} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, - Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t; +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x08 */ + U8 ControllerResetTO; /* 0x0A */ + U8 Reserved1; /* 0x0B */ + U32 MaximumDataTransferSize;/* 0x0C */ + U32 Capabilities; /* 0x10 */ + U16 NOIOB; /* 0x14 */ + U16 ShutdownLatency; /* 0x16 */ + U16 VendorID; /* 0x18 */ + U16 DeviceID; /* 0x1A */ + U16 SubsystemVendorID; /* 0x1C */ + U16 SubsystemID; /* 0x1E */ + U8 RevisionID; /* 0x20 */ + U8 Reserved21[3]; /* 0x21 */ +} MPI26_CONFIG_PAGE_PCIEDEV_2, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, + Mpi26PCIeDevicePage2_t, MPI2_POINTER pMpi26PCIeDevicePage2_t; #define MPI26_PCIEDEVICE2_PAGEVERSION (0x01) -/*defines for PCIe Device Page 2 Capabilities field */ -#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008) -#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004) -#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002) -#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001) +/* defines for PCIe Device Page 2 Capabilities field */ +#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008) +#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004) +#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002) +#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001) /* Defines for the NOIOB field */ #define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000) + /**************************************************************************** -* PCIe Link Config Pages (MPI v2.6 and later) +* PCIe Link Config Pages (MPI v2.6 and later) ****************************************************************************/ -/*PCIe Link Page 1 */ +/* PCIe Link Page 1 */ -typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 Link; /*0x08 */ - U8 Reserved1; /*0x09 */ - U16 Reserved2; /*0x0A */ - U32 CorrectableErrorCount; /*0x0C */ - U16 NonFatalErrorCount; /*0x10 */ - U16 Reserved3; /*0x12 */ - U16 FatalErrorCount; /*0x14 */ - U16 Reserved4; /*0x16 */ -} MPI26_CONFIG_PAGE_PCIELINK_1, *PTR_MPI26_CONFIG_PAGE_PCIELINK_1, - Mpi26PcieLinkPage1_t, *pMpi26PcieLinkPage1_t; +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 Link; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 CorrectableErrorCount; /* 0x0C */ + U16 NonFatalErrorCount; /* 0x10 */ + U16 Reserved3; /* 0x12 */ + U16 FatalErrorCount; /* 0x14 */ + U16 Reserved4; /* 0x16 */ +} MPI26_CONFIG_PAGE_PCIELINK_1, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_1, + Mpi26PcieLinkPage1_t, MPI2_POINTER pMpi26PcieLinkPage1_t; #define MPI26_PCIELINK1_PAGEVERSION (0x00) -/*PCIe Link Page 2 */ +/* PCIe Link Page 2 */ -typedef struct _MPI26_PCIELINK2_LINK_EVENT { - U8 LinkEventCode; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 LinkEventInfo; /*0x04 */ -} MPI26_PCIELINK2_LINK_EVENT, *PTR_MPI26_PCIELINK2_LINK_EVENT, - Mpi26PcieLink2LinkEvent_t, *pMpi26PcieLink2LinkEvent_t; +typedef struct _MPI26_PCIELINK2_LINK_EVENT +{ + U8 LinkEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 LinkEventInfo; /* 0x04 */ +} MPI26_PCIELINK2_LINK_EVENT, MPI2_POINTER PTR_MPI26_PCIELINK2_LINK_EVENT, + Mpi26PcieLink2LinkEvent_t, MPI2_POINTER pMpi26PcieLink2LinkEvent_t; -/*use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */ +/* use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumLinkEvents at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumLinkEvents at runtime. */ #ifndef MPI26_PCIELINK2_LINK_EVENT_MAX #define MPI26_PCIELINK2_LINK_EVENT_MAX (1) #endif -typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 Link; /*0x08 */ - U8 Reserved1; /*0x09 */ - U16 Reserved2; /*0x0A */ - U8 NumLinkEvents; /*0x0C */ - U8 Reserved3; /*0x0D */ - U16 Reserved4; /*0x0E */ - MPI26_PCIELINK2_LINK_EVENT - LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */ -} MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2, - Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t; +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 Link; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumLinkEvents; /* 0x0C */ + U8 Reserved3; /* 0x0D */ + U16 Reserved4; /* 0x0E */ + MPI26_PCIELINK2_LINK_EVENT LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /* 0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_2, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_2, + Mpi26PcieLinkPage2_t, MPI2_POINTER pMpi26PcieLinkPage2_t; #define MPI26_PCIELINK2_PAGEVERSION (0x00) -/*PCIe Link Page 3 */ -typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG { - U8 LinkEventCode; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U8 CounterType; /*0x04 */ - U8 ThresholdWindow; /*0x05 */ - U8 TimeUnits; /*0x06 */ - U8 Reserved3; /*0x07 */ - U32 EventThreshold; /*0x08 */ - U16 ThresholdFlags; /*0x0C */ - U16 Reserved4; /*0x0E */ -} MPI26_PCIELINK3_LINK_EVENT_CONFIG, *PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG, - Mpi26PcieLink3LinkEventConfig_t, *pMpi26PcieLink3LinkEventConfig_t; +/* PCIe Link Page 3 */ -/*values for LinkEventCode field */ +typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG +{ + U8 LinkEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U8 CounterType; /* 0x04 */ + U8 ThresholdWindow; /* 0x05 */ + U8 TimeUnits; /* 0x06 */ + U8 Reserved3; /* 0x07 */ + U32 EventThreshold; /* 0x08 */ + U16 ThresholdFlags; /* 0x0C */ + U16 Reserved4; /* 0x0E */ +} MPI26_PCIELINK3_LINK_EVENT_CONFIG, MPI2_POINTER PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG, + Mpi26PcieLink3LinkEventConfig_t, MPI2_POINTER pMpi26PcieLink3LinkEventConfig_t; + +/* values for LinkEventCode field */ #define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00) #define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01) #define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02) @@ -4040,42 +3866,43 @@ typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG { #define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11) #define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12) -/*values for the CounterType field */ +/* values for the CounterType field */ #define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00) #define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01) #define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02) -/*values for the TimeUnits field */ +/* values for the TimeUnits field */ #define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00) #define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01) #define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02) #define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03) -/*values for the ThresholdFlags field */ +/* values for the ThresholdFlags field */ #define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001) /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check the value returned for NumLinkEvents at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumLinkEvents at runtime. */ #ifndef MPI26_PCIELINK3_LINK_EVENT_MAX #define MPI26_PCIELINK3_LINK_EVENT_MAX (1) #endif -typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 { - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ - U8 Link; /*0x08 */ - U8 Reserved1; /*0x09 */ - U16 Reserved2; /*0x0A */ - U8 NumLinkEvents; /*0x0C */ - U8 Reserved3; /*0x0D */ - U16 Reserved4; /*0x0E */ - MPI26_PCIELINK3_LINK_EVENT_CONFIG - LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */ -} MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3, - Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t; +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 Link; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumLinkEvents; /* 0x0C */ + U8 Reserved3; /* 0x0D */ + U16 Reserved4; /* 0x0E */ + MPI26_PCIELINK3_LINK_EVENT_CONFIG LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /* 0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_3, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_3, + Mpi26PcieLinkPage3_t, MPI2_POINTER pMpi26PcieLinkPage3_t; #define MPI26_PCIELINK3_PAGEVERSION (0x00) #endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_hbd.h b/drivers/scsi/mpt3sas/mpi/mpi2_hbd.h new file mode 100755 index 000000000000..8d4af26144fb --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_hbd.h @@ -0,0 +1,122 @@ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_hbd.h + * Title: MPI Host Based Discovery messages and structures + * Creation Date: October 21, 2009 + * + * mpi2_hbd.h Version: 02.00.04 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 10-28-09 02.00.00 Initial version. + * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from + * HBD Action request, replaced by AdditionalInfo field. + * 11-18-11 02.00.02 Incorporating additions for MPI v2.5. + * 11-18-14 02.00.03 Updated copyright information. + * 02-17-16 02.00.04 Added SAS 4 22.5 gbs speed support. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_HBD_H +#define MPI2_HBD_H + +/**************************************************************************** +* Host Based Discovery Action messages +****************************************************************************/ + +/* Host Based Discovery Action Request Message */ +typedef struct _MPI2_HBD_ACTION_REQUEST +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U32 Reserved4; /* 0x0C */ + U64 SASAddress; /* 0x10 */ + U32 Reserved5; /* 0x18 */ + U32 HbdDeviceInfo; /* 0x1C */ + U16 ParentDevHandle; /* 0x20 */ + U16 MaxQDepth; /* 0x22 */ + U8 FirstPhyIdentifier; /* 0x24 */ + U8 Port; /* 0x25 */ + U8 MaxConnections; /* 0x26 */ + U8 MaxRate; /* 0x27 */ + U32 AdditionalInfo; /* 0x28 */ + U16 InitialAWT; /* 0x2C */ + U16 Reserved7; /* 0x2E */ + U32 Reserved8; /* 0x30 */ +} MPI2_HBD_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_HBD_ACTION_REQUEST, + Mpi2HbdActionRequest_t, MPI2_POINTER pMpi2HbdActionRequest_t; + +/* values for the Operation field */ +#define MPI2_HBD_OP_ADD_DEVICE (0x01) +#define MPI2_HBD_OP_REMOVE_DEVICE (0x02) +#define MPI2_HBD_OP_UPDATE_DEVICE (0x03) + +/* values for the HbdDeviceInfo field */ +#define MPI2_HBD_DEVICE_INFO_VIRTUAL_DEVICE (0x00004000) +#define MPI2_HBD_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI2_HBD_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI2_HBD_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI2_HBD_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI2_HBD_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI2_HBD_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI2_HBD_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI2_HBD_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI2_HBD_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI2_HBD_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI2_HBD_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI2_HBD_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI2_HBD_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI2_HBD_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI2_HBD_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +/* values for the MaxRate field */ +#define MPI2_HBD_MAX_RATE_MASK (0x0F) +#define MPI2_HBD_MAX_RATE_1_5 (0x08) +#define MPI2_HBD_MAX_RATE_3_0 (0x09) +#define MPI2_HBD_MAX_RATE_6_0 (0x0A) +#define MPI25_HBD_MAX_RATE_12_0 (0x0B) +#define MPI26_HBD_MAX_RATE_22_5 (0x0C) + + +/* Host Based Discovery Action Reply Message */ +typedef struct _MPI2_HBD_ACTION_REPLY +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_HBD_ACTION_REPLY, MPI2_POINTER PTR_MPI2_HBD_ACTION_REPLY, + Mpi2HbdActionReply_t, MPI2_POINTER pMpi2HbdActionReply_t; + + +#endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_history.txt b/drivers/scsi/mpt3sas/mpi/mpi2_history.txt new file mode 100755 index 000000000000..3609b5afa91c --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_history.txt @@ -0,0 +1,796 @@ + ============================== + Fusion-MPT MPI 2.0 / 2.5 Header File Change History + ============================== + + Copyright 2000-2015 Avago Technologies. All rights reserved. + + --------------------------------------- + Header Set Release Version: 02.00.50 + Header Set Release Date: 09-29-17 + --------------------------------------- + + Filename Current version Prior version + ---------- --------------- ------------- + mpi2.h 02.00.50 02.00.49 + mpi2_cnfg.h 02.00.42 02.00.41 + mpi2_init.h 02.00.21 02.00.21 + mpi2_ioc.h 02.00.34 02.00.33 + mpi2_raid.h 02.00.11 02.00.11 + mpi2_sas.h 02.00.10 02.00.10 + mpi2_targ.h 02.00.09 02.00.09 + mpi2_tool.h 02.00.14 02.00.14 + mpi2_type.h 02.00.01 02.00.01 + mpi2_ra.h 02.00.01 02.00.01 + mpi2_hbd.h 02.00.04 02.00.04 + mpi2_pci.h 02.00.02 02.00.02 + mpi2_history.txt 02.00.46 02.00.45 + + + * Date Version Description + * -------- -------- ------------------------------------------------------ + +mpi2.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-14 02.00.36 Updated copyright information. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 03-16-15 02.00.37 Updated for MPI v2.6. + * Bumped MPI2_HEADER_VERSION_UNIT. + * Added Scratchpad registers and + * AtomicRequestDescriptorPost register to + * MPI2_SYSTEM_INTERFACE_REGS. + * Added MPI2_DIAG_SBR_RELOAD. + * Added MPI2_IOCSTATUS_INSUFFICIENT_POWER. + * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. + * Added V7 HostDiagnostic register defines + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT + * 01-04-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT + * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines + * to be unique within first 32 characters. + * Removed AHCI support. + * Removed SOP support. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. + * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. + * -------------------------------------------------------------------------- + +mpi2_cnfg.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for + * MPI2_CONFIG_PAGE_MAN_7. + * Added EnclosureLevel and ConnectorName fields to + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added EnclosureLevel field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and + * more defines for the BiosOptions field.. + * 11-18-14 02.00.30 Updated copyright information. + * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. + * Added AdapterOrderAux fields to BIOS Page 3. + * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added BoardPowerRequirement, PCISlotPowerAllocation, and + * Flags field to IO Unit Page 7. + * Added IO Unit Page 11. + * Added new SAS Phy Event codes + * Added PCIe configuration pages. + * 03-19-15 02.00.32 Fixed PCIe Link Config page structure names to be + * unique in first 32 characters. + * 05-25-15 02.00.33 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 08-25-15 02.00.34 Added PCIe Device Page 2 SGL format capability. + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. + * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. + * Added Link field to PCIe Link Pages + * Added EnclosureLevel and ConnectorName to PCIe + * Device Page 0. + * Added define for PCIE IoUnit page 1 max rate shift. + * Added comment for reserved ExtPageTypes. + * Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * Added NegotiatedLinkRate and NegotiatedPortWidth to + * PCIe device page 0. + * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines + * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. + * Changed declaration of ConnectorName in PCIe DevicePage0 + * to match SAS DevicePage 0. + * Added SATADeviceWaitTime to IO Unit Page 11. + * Added MPI26_MFGPAGE_DEVID_SAS4008 + * Added x16 PCIe width to IO Unit Page 7 + * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 + * phy data. + * Added InitStatus to PCIe IO Unit Page 1 header. + * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. + * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and + * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. + * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. + * Added ChassisSlot field to SAS Enclosure Page 0. + * Added ChassisSlot Valid bit (bit 5) to the Flags field + * in SAS Enclosure Page 0. + * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and + * MPI26_MFGPAGE_DEVID_SAS3916 defines. + * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. + * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. + * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. + * Added NOIOB field to PCIe Device Page 2. + * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to + * the Capabilities field of PCIe Device Page 2. + * -------------------------------------------------------------------------- + +mpi2_init.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. + * 11-18-14 02.00.16 Updated copyright information. + * 03-16-15 02.00.17 Updated for MPI v2.6. + * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. + * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and + * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. + * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to + * be unique within first 32 characters. + * -------------------------------------------------------------------------- + +mpi2_ioc.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * 11-18-14 02.00.25 Updated copyright information. + * 03-16-15 02.00.26 Updated for MPI v2.6. + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. + * Added MPI2_EVENT_PCIE_LINK_COUNTER and + * MPI26_EVENT_DATA_PCIE_LINK_COUNTER. + * Added MPI26_CTRL_OP_SHUTDOWN. + * Added MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. + * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. + * Added ConigurationFlags field to IOCInit message to + * support NVMe SGL format control. + * Added PCIe SRIOV support. + * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.29 Added Archclass for 4008 product. + * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED. + * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload + * Request Message. + * Added new defines for the ImageType field of FWUpload + * Request Message. + * Added new values for the RegionType field in the Layout + * Data sections of the FLASH Layout Extended Image Data. + * Added new defines for the ReasonCode field of + * Active Cable Exception Event. + * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and + * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. + * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and + * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. + * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. + * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related + * defines for the ReasonCode field. + * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. + * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED + * to the ReasonCode field in PCIe Device Status Change + * Event Data. + * -------------------------------------------------------------------------- + +mpi2_raid.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * -------------------------------------------------------------------------- + +mpi2_sas.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. + * 11-18-14 02.00.09 Updated copyright information. + * 03-16-15 02.00.10 Updated for MPI v2.6. + * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. + * -------------------------------------------------------------------------- + +mpi2_targ.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to + * BufferPostFlags field of CommandBufferPostBase Request. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 10-02-08 02.00.03 Removed NextCmdBufferOffset from + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * Target Status Send Request only takes a single SGE for + * response data. + * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure. + * 11-18-11 02.00.05 Incorporating additions for MPI v2.5. + * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT + * request message structure. + * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and + * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS. + * 06-13-14 02.00.07 Added MinMSIxIndex and MaxMSIxIndex fields to + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * 11-18-14 02.00.08 Updated copyright information. + * 03-16-15 02.00.09 Updated for MPI v2.6. + * Added MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH. + * -------------------------------------------------------------------------- + +mpi2_tool.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * 11-18-14 02.00.13 Updated copyright information. + * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean + * Tool Request Message. + * -------------------------------------------------------------------------- + +mpi2_type.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + +mpi2_ra.h + * 05-06-09 02.00.00 Initial version. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + +mpi2_hbd.h + * 10-28-09 02.00.00 Initial version. + * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from + * HBD Action request, replaced by AdditionalInfo field. + * 11-18-11 02.00.02 Incorporating additions for MPI v2.5. + * 11-18-14 02.00.03 Updated copyright information. + * 02-17-16 02.00.04 Added SAS 4 22.5 gbs speed support. + * -------------------------------------------------------------------------- + +mpi2_pci.h + * 03-16-15 02.00.00 Initial version. + * 02-17-16 02.00.01 Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to + * NVME Encapsulated Request. + * -------------------------------------------------------------------------- + +mpi2_history.txt Parts list history + +Filename 02.00.50 02.00.49 02.00.48 +---------- -------- -------- -------- +mpi2.h 02.00.50 02.00.49 02.00.48 +mpi2_cnfg.h 02.00.42 02.00.41 02.00.40 +mpi2_init.h 02.00.21 02.00.21 02.00.21 +mpi2_ioc.h 02.00.34 02.00.33 02.00.32 +mpi2_raid.h 02.00.11 02.00.11 02.00.11 +mpi2_sas.h 02.00.10 02.00.10 02.00.10 +mpi2_targ.h 02.00.09 02.00.09 02.00.09 +mpi2_tool.h 02.00.14 02.00.14 02.00.14 +mpi2_type.h 02.00.01 02.00.01 02.00.01 +mpi2_ra.h 02.00.01 02.00.01 02.00.01 +mpi2_hbd.h 02.00.04 02.00.04 02.00.04 +mpi2_pci.h 02.00.02 02.00.02 02.00.02 + +Filename 02.00.47 02.00.46 02.00.45 02.00.44 02.00.43 02.00.42 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.47 02.00.46 02.00.45 02.00.44 02.00.43 02.00.42 +mpi2_cnfg.h 02.00.39 02.00.39 02.00.38 02.00.37 02.00.36 02.00.35 +mpi2_init.h 02.00.21 02.00.21 02.00.21 02.00.21 02.00.21 02.00.20 +mpi2_ioc.h 02.00.31 02.00.30 02.00.29 02.00.28 02.00.28 02.00.27 +mpi2_raid.h 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 +mpi2_sas.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 +mpi2_targ.h 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 +mpi2_tool.h 02.00.14 02.00.14 02.00.13 02.00.13 02.00.13 02.00.13 +mpi2_type.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_ra.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_hbd.h 02.00.04 02.00.04 02.00.04 02.00.04 02.00.04 02.00.03 +mpi2_pci.h 02.00.02 02.00.02 02.00.02 02.00.01 02.00.01 02.00.00 + +Filename 02.00.41 02.00.40 02.00.39 02.00.38 02.00.37 02.00.36 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.41 02.00.40 02.00.39 02.00.38 02.00.37 02.00.36 +mpi2_cnfg.h 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 +mpi2_init.h 02.00.19 02.00.18 02.00.17 02.00.17 02.00.17 02.00.16 +mpi2_ioc.h 02.00.27 02.00.27 02.00.26 02.00.26 02.00.26 02.00.25 +mpi2_raid.h 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 +mpi2_sas.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.09 +mpi2_targ.h 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 02.00.08 +mpi2_tool.h 02.00.13 02.00.13 02.00.13 02.00.13 02.00.13 02.00.13 +mpi2_type.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_ra.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_hbd.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.03 02.00.03 +mpi2_pci.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 + +Filename 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 +mpi2_cnfg.h 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.25 +mpi2_init.h 02.00.15 02.00.15 02.00.15 02.00.15 02.00.15 02.00.15 +mpi2_ioc.h 02.00.24 02.00.24 02.00.24 02.00.23 02.00.22 02.00.22 +mpi2_raid.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.09 +mpi2_sas.h 02.00.08 02.00.08 02.00.08 02.00.08 02.00.07 02.00.07 +mpi2_targ.h 02.00.07 02.00.06 02.00.06 02.00.06 02.00.06 02.00.06 +mpi2_tool.h 02.00.12 02.00.12 02.00.11 02.00.11 02.00.10 02.00.10 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 + +Filename 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24 +mpi2_cnfg.h 02.00.24 02.00.23 02.00.22 02.00.22 02.00.22 02.00.22 +mpi2_init.h 02.00.14 02.00.14 02.00.14 02.00.14 02.00.13 02.00.13 +mpi2_ioc.h 02.00.22 02.00.22 02.00.22 02.00.21 02.00.21 02.00.20 +mpi2_raid.h 02.00.09 02.00.09 02.00.09 02.00.08 02.00.08 02.00.08 +mpi2_sas.h 02.00.07 02.00.07 02.00.07 02.00.07 02.00.06 02.00.06 +mpi2_targ.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 +mpi2_tool.h 02.00.10 02.00.10 02.00.10 02.00.09 02.00.08 02.00.08 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 + +Filename 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 +mpi2_cnfg.h 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 02.00.17 +mpi2_init.h 02.00.12 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 +mpi2_ioc.h 02.00.20 02.00.19 02.00.18 02.00.17 02.00.17 02.00.16 +mpi2_raid.h 02.00.07 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 +mpi2_sas.h 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 02.00.05 +mpi2_targ.h 02.00.05 02.00.04 02.00.04 02.00.04 02.00.04 02.00.04 +mpi2_tool.h 02.00.08 02.00.07 02.00.07 02.00.06 02.00.06 02.00.06 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 + +Filename 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 +mpi2_cnfg.h 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11 +mpi2_init.h 02.00.10 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 +mpi2_ioc.h 02.00.15 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11 +mpi2_raid.h 02.00.05 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03 +mpi2_sas.h 02.00.05 02.00.04 02.00.03 02.00.03 02.00.02 02.00.02 +mpi2_targ.h 02.00.04 02.00.04 02.00.04 02.00.03 02.00.03 02.00.03 +mpi2_tool.h 02.00.06 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.01 02.00.00 02.00.00 02.00.00 + +Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 +mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06 +mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03 +mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06 +mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02 +mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02 +mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 + +Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 +mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 +mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 +mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h old mode 100644 new mode 100755 index a3f677853098..4631c92693d6 --- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h @@ -1,113 +1,110 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2016-2020 Broadcom Limited. All rights reserved. + * Copyright 2016-2020 Broadcom Limited. All rights reserved. * - * Name: mpi2_image.h - * Description: Contains definitions for firmware and other component images - * Creation Date: 04/02/2018 - * Version: 02.06.04 + * Name: mpi2_image.h + * Description: Contains definitions for firmware and other component images. + * Creation Date: 04/02/2018 + * Version: 02.06.05 * * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 08-01-18 02.06.00 Initial version for MPI 2.6.5. - * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 - * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE - * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES - * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP - * Shorten some defines to be compatible with DOS + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 08-01-18 02.06.00 Initial version for MPI 2.6.5. + * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 + * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE + * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP + * Shorten some defines to be compatible with DOS + * 06-24-19 02.06.05 Whitespace adjustments to help with identifier checking tool. */ #ifndef MPI2_IMAGE_H #define MPI2_IMAGE_H -/*FW Image Header */ -typedef struct _MPI2_FW_IMAGE_HEADER { - U32 Signature; /*0x00 */ - U32 Signature0; /*0x04 */ - U32 Signature1; /*0x08 */ - U32 Signature2; /*0x0C */ - MPI2_VERSION_UNION MPIVersion; /*0x10 */ - MPI2_VERSION_UNION FWVersion; /*0x14 */ - MPI2_VERSION_UNION NVDATAVersion; /*0x18 */ - MPI2_VERSION_UNION PackageVersion; /*0x1C */ - U16 VendorID; /*0x20 */ - U16 ProductID; /*0x22 */ - U16 ProtocolFlags; /*0x24 */ - U16 Reserved26; /*0x26 */ - U32 IOCCapabilities; /*0x28 */ - U32 ImageSize; /*0x2C */ - U32 NextImageHeaderOffset; /*0x30 */ - U32 Checksum; /*0x34 */ - U32 Reserved38; /*0x38 */ - U32 Reserved3C; /*0x3C */ - U32 Reserved40; /*0x40 */ - U32 Reserved44; /*0x44 */ - U32 Reserved48; /*0x48 */ - U32 Reserved4C; /*0x4C */ - U32 Reserved50; /*0x50 */ - U32 Reserved54; /*0x54 */ - U32 Reserved58; /*0x58 */ - U32 Reserved5C; /*0x5C */ - U32 BootFlags; /*0x60 */ - U32 FirmwareVersionNameWhat; /*0x64 */ - U8 FirmwareVersionName[32]; /*0x68 */ - U32 VendorNameWhat; /*0x88 */ - U8 VendorName[32]; /*0x8C */ - U32 PackageNameWhat; /*0x88 */ - U8 PackageName[32]; /*0x8C */ - U32 ReservedD0; /*0xD0 */ - U32 ReservedD4; /*0xD4 */ - U32 ReservedD8; /*0xD8 */ - U32 ReservedDC; /*0xDC */ - U32 ReservedE0; /*0xE0 */ - U32 ReservedE4; /*0xE4 */ - U32 ReservedE8; /*0xE8 */ - U32 ReservedEC; /*0xEC */ - U32 ReservedF0; /*0xF0 */ - U32 ReservedF4; /*0xF4 */ - U32 ReservedF8; /*0xF8 */ - U32 ReservedFC; /*0xFC */ -} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER, - Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t; +/* FW Image Header */ +typedef struct _MPI2_FW_IMAGE_HEADER +{ + U32 Signature; /* 0x00 */ + U32 Signature0; /* 0x04 */ + U32 Signature1; /* 0x08 */ + U32 Signature2; /* 0x0C */ + MPI2_VERSION_UNION MPIVersion; /* 0x10 */ + MPI2_VERSION_UNION FWVersion; /* 0x14 */ + MPI2_VERSION_UNION NVDATAVersion; /* 0x18 */ + MPI2_VERSION_UNION PackageVersion; /* 0x1C */ + U16 VendorID; /* 0x20 */ + U16 ProductID; /* 0x22 */ + U16 ProtocolFlags; /* 0x24 */ + U16 Reserved26; /* 0x26 */ + U32 IOCCapabilities; /* 0x28 */ + U32 ImageSize; /* 0x2C */ + U32 NextImageHeaderOffset; /* 0x30 */ + U32 Checksum; /* 0x34 */ + U32 Reserved38; /* 0x38 */ + U32 Reserved3C; /* 0x3C */ + U32 Reserved40; /* 0x40 */ + U32 Reserved44; /* 0x44 */ + U32 Reserved48; /* 0x48 */ + U32 Reserved4C; /* 0x4C */ + U32 Reserved50; /* 0x50 */ + U32 Reserved54; /* 0x54 */ + U32 Reserved58; /* 0x58 */ + U32 Reserved5C; /* 0x5C */ + U32 BootFlags; /* 0x60 */ /* reserved in MPI v2.5 and earlier */ + U32 FirmwareVersionNameWhat; /* 0x64 */ + U8 FirmwareVersionName[32]; /* 0x68 */ + U32 VendorNameWhat; /* 0x88 */ + U8 VendorName[32]; /* 0x8C */ + U32 PackageNameWhat; /* 0x88 */ + U8 PackageName[32]; /* 0x8C */ + U32 ReservedD0; /* 0xD0 */ + U32 ReservedD4; /* 0xD4 */ + U32 ReservedD8; /* 0xD8 */ + U32 ReservedDC; /* 0xDC */ + U32 ReservedE0; /* 0xE0 */ + U32 ReservedE4; /* 0xE4 */ + U32 ReservedE8; /* 0xE8 */ + U32 ReservedEC; /* 0xEC */ + U32 ReservedF0; /* 0xF0 */ + U32 ReservedF4; /* 0xF4 */ + U32 ReservedF8; /* 0xF8 */ + U32 ReservedFC; /* 0xFC */ +} MPI2_FW_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_FW_IMAGE_HEADER, + Mpi2FWImageHeader_t, MPI2_POINTER pMpi2FWImageHeader_t; -/*Signature field */ +/* Signature field */ #define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) #define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) #define MPI2_FW_HEADER_SIGNATURE (0xEA000000) #define MPI26_FW_HEADER_SIGNATURE (0xEB000000) -/*Signature0 field */ +/* Signature0 field */ #define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) #define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) -/*Last byte is defined by architecture */ -#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) +#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) /* Last byte is defined by architecture */ #define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) #define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) #define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) -/*legacy (0x5AEAA55A) */ #define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02) -#define MPI26_FW_HEADER_SIGNATURE0 \ - (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0) -#define MPI26_FW_HEADER_SIGNATURE0_3516 \ - (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1) -#define MPI26_FW_HEADER_SIGNATURE0_4008 \ - (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3) +#define MPI26_FW_HEADER_SIGNATURE0 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_0) // legacy (0x5AEAA55A) +#define MPI26_FW_HEADER_SIGNATURE0_3516 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_1) +#define MPI26_FW_HEADER_SIGNATURE0_4008 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_3) -/*Signature1 field */ +/* Signature1 field */ #define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) #define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) #define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5) -/*Signature2 field */ +/* Signature2 field */ #define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) #define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) #define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA) -/*defines for using the ProductID field */ + +/* defines for using the ProductID field */ #define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) #define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) @@ -116,17 +113,18 @@ typedef struct _MPI2_FW_IMAGE_HEADER { #define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) #define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) + #define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) -/*SAS ProductID Family bits */ +/* SAS ProductID Family bits */ #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) #define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) #define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) #define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028) #define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031) -/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ +/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ -/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ +/* use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ #define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) #define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) @@ -134,9 +132,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER { #define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60) #define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG (0x00000001) #define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG (0x00000002) -/*This image has a auto-discovery version of SPI */ -#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004) - +#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004) /* This image has a auto-discovery version of SPI */ #define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) @@ -149,70 +145,67 @@ typedef struct _MPI2_FW_IMAGE_HEADER { * Component Image Format and related defines * ****************************************************************************/ -/*Maximum number of Hash Exclusion entries in a Component Image Header */ +/* Maximum number of Hash Exclusion entries in a Component Image Header */ #define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL (4) -/*Hash Exclusion Format */ -typedef struct _MPI26_HASH_EXCLUSION_FORMAT { - U32 Offset; /*0x00 */ - U32 Size; /*0x04 */ -} MPI26_HASH_EXCLUSION_FORMAT, - *PTR_MPI26_HASH_EXCLUSION_FORMAT, - Mpi26HashSxclusionFormat_t, - *pMpi26HashExclusionFormat_t; +/* Hash Exclusion Format */ +typedef struct _MPI26_HASH_EXCLUSION_FORMAT +{ + U32 Offset; /* 0x00 */ + U32 Size; /* 0x04 */ +} MPI26_HASH_EXCLUSION_FORMAT, MPI2_POINTER PTR_MPI26_HASH_EXCLUSION_FORMAT, +Mpi26HashSxclusionFormat_t, MPI2_POINTER pMpi26HashExclusionFormat_t; -/*FW Image Header */ -typedef struct _MPI26_COMPONENT_IMAGE_HEADER { - U32 Signature0; /*0x00 */ - U32 LoadAddress; /*0x04 */ - U32 DataSize; /*0x08 */ - U32 StartAddress; /*0x0C */ - U32 Signature1; /*0x10 */ - U32 FlashOffset; /*0x14 */ - U32 FlashSize; /*0x18 */ - U32 VersionStringOffset; /*0x1C */ - U32 BuildDateStringOffset; /*0x20 */ - U32 BuildTimeStringOffset; /*0x24 */ - U32 EnvironmentVariableOffset; /*0x28 */ - U32 ApplicationSpecific; /*0x2C */ - U32 Signature2; /*0x30 */ - U32 HeaderSize; /*0x34 */ - U32 Crc; /*0x38 */ - U8 NotFlashImage; /*0x3C */ - U8 Compressed; /*0x3D */ - U16 Reserved3E; /*0x3E */ - U32 SecondaryFlashOffset; /*0x40 */ - U32 Reserved44; /*0x44 */ - U32 Reserved48; /*0x48 */ - MPI2_VERSION_UNION RMCInterfaceVersion; /*0x4C */ - MPI2_VERSION_UNION Reserved50; /*0x50 */ - MPI2_VERSION_UNION FWVersion; /*0x54 */ - MPI2_VERSION_UNION NvdataVersion; /*0x58 */ - MPI26_HASH_EXCLUSION_FORMAT - HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL];/*0x5C */ - U32 NextImageHeaderOffset; /*0x7C */ - U32 Reserved80[32]; /*0x80 -- 0xFC */ -} MPI26_COMPONENT_IMAGE_HEADER, - *PTR_MPI26_COMPONENT_IMAGE_HEADER, - Mpi26ComponentImageHeader_t, - *pMpi26ComponentImageHeader_t; +/* FW Image Header */ +typedef struct _MPI26_COMPONENT_IMAGE_HEADER +{ + U32 Signature0; /* 0x00 */ + U32 LoadAddress; /* 0x04 */ + U32 DataSize; /* 0x08 */ + U32 StartAddress; /* 0x0C */ + U32 Signature1; /* 0x10 */ + U32 FlashOffset; /* 0x14 */ + U32 FlashSize; /* 0x18 */ + U32 VersionStringOffset; /* 0x1C */ + U32 BuildDateStringOffset; /* 0x20 */ + U32 BuildTimeStringOffset; /* 0x24 */ + U32 EnvironmentVariableOffset; /* 0x28 */ + U32 ApplicationSpecific; /* 0x2C */ + U32 Signature2; /* 0x30 */ + U32 HeaderSize; /* 0x34 */ + U32 Crc; /* 0x38 */ + U8 NotFlashImage; /* 0x3C */ + U8 Compressed; /* 0x3D */ + U16 Reserved3E; /* 0x3E */ + U32 SecondaryFlashOffset; /* 0x40 */ + U32 Reserved44; /* 0x44 */ + U32 Reserved48; /* 0x48 */ + MPI2_VERSION_UNION RMCInterfaceVersion; /* 0x4C */ + MPI2_VERSION_UNION Reserved50; /* 0x50 */ + MPI2_VERSION_UNION FWVersion; /* 0x54 */ + MPI2_VERSION_UNION NvdataVersion; /* 0x58 */ + MPI26_HASH_EXCLUSION_FORMAT HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL]; /* 0x5C */ + U32 NextImageHeaderOffset; /* 0x7C */ + U32 Reserved80[32]; /* 0x80 -- 0xFC */ +} MPI26_COMPONENT_IMAGE_HEADER, MPI2_POINTER PTR_MPI26_COMPONENT_IMAGE_HEADER, + Mpi26ComponentImageHeader_t, MPI2_POINTER pMpi26ComponentImageHeader_t; /**** Definitions for Signature0 field ****/ #define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042) /**** Definitions for Signature1 field ****/ -#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041) -#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243) -#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D) -#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942) -#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948) -#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948) -#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043) -#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053) -#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) -#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) -#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) +#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041) +#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243) +#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D) +#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942) +#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948) +#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948) +#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043) +#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053) +#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) +#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) +#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) /**** Definitions for Signature2 field ****/ #define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) @@ -245,23 +238,25 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER { #define MPI26_IMAGE_HEADER_SIZE (0x100) -/*Extended Image Header */ -typedef struct _MPI2_EXT_IMAGE_HEADER { - U8 ImageType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 Checksum; /*0x04 */ - U32 ImageSize; /*0x08 */ - U32 NextImageHeaderOffset; /*0x0C */ - U32 PackageVersion; /*0x10 */ - U32 Reserved3; /*0x14 */ - U32 Reserved4; /*0x18 */ - U32 Reserved5; /*0x1C */ - U8 IdentifyString[32]; /*0x20 */ -} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER, - Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t; +/* Extended Image Header */ +typedef struct _MPI2_EXT_IMAGE_HEADER -/*useful offsets */ +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Checksum; /* 0x04 */ + U32 ImageSize; /* 0x08 */ + U32 NextImageHeaderOffset; /* 0x0C */ + U32 PackageVersion; /* 0x10 */ + U32 Reserved3; /* 0x14 */ + U32 Reserved4; /* 0x18 */ + U32 Reserved5; /* 0x1C */ + U8 IdentifyString[32]; /* 0x20 */ +} MPI2_EXT_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_EXT_IMAGE_HEADER, + Mpi2ExtImageHeader_t, MPI2_POINTER pMpi2ExtImageHeader_t; + +/* useful offsets */ #define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) #define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) #define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C) @@ -269,7 +264,7 @@ typedef struct _MPI2_EXT_IMAGE_HEADER { #define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) -/*defines for the ImageType field */ +/* defines for the ImageType field */ #define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) #define MPI2_EXT_IMAGE_TYPE_FW (0x01) #define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) @@ -278,66 +273,70 @@ typedef struct _MPI2_EXT_IMAGE_HEADER { #define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) #define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) #define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) -#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) +#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) /* MPI v2.5 and newer */ #define MPI2_EXT_IMAGE_TYPE_RDE (0x0A) #define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B) #define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) #define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) -#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) +#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */ -/*FLASH Layout Extended Image Data */ + +/* FLASH Layout Extended Image Data */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check RegionsPerLayout at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check RegionsPerLayout at runtime. */ #ifndef MPI2_FLASH_NUMBER_OF_REGIONS #define MPI2_FLASH_NUMBER_OF_REGIONS (1) #endif /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check NumberOfLayouts at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumberOfLayouts at runtime. */ #ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS #define MPI2_FLASH_NUMBER_OF_LAYOUTS (1) #endif -typedef struct _MPI2_FLASH_REGION { - U8 RegionType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 RegionOffset; /*0x04 */ - U32 RegionSize; /*0x08 */ - U32 Reserved3; /*0x0C */ -} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION, - Mpi2FlashRegion_t, *pMpi2FlashRegion_t; +typedef struct _MPI2_FLASH_REGION +{ + U8 RegionType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 RegionOffset; /* 0x04 */ + U32 RegionSize; /* 0x08 */ + U32 Reserved3; /* 0x0C */ +} MPI2_FLASH_REGION, MPI2_POINTER PTR_MPI2_FLASH_REGION, + Mpi2FlashRegion_t, MPI2_POINTER pMpi2FlashRegion_t; -typedef struct _MPI2_FLASH_LAYOUT { - U32 FlashSize; /*0x00 */ - U32 Reserved1; /*0x04 */ - U32 Reserved2; /*0x08 */ - U32 Reserved3; /*0x0C */ - MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */ -} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT, - Mpi2FlashLayout_t, *pMpi2FlashLayout_t; +typedef struct _MPI2_FLASH_LAYOUT +{ + U32 FlashSize; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U32 Reserved3; /* 0x0C */ + MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];/* 0x10 */ +} MPI2_FLASH_LAYOUT, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT, + Mpi2FlashLayout_t, MPI2_POINTER pMpi2FlashLayout_t; -typedef struct _MPI2_FLASH_LAYOUT_DATA { - U8 ImageRevision; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 SizeOfRegion; /*0x02 */ - U8 Reserved2; /*0x03 */ - U16 NumberOfLayouts; /*0x04 */ - U16 RegionsPerLayout; /*0x06 */ - U16 MinimumSectorAlignment; /*0x08 */ - U16 Reserved3; /*0x0A */ - U32 Reserved4; /*0x0C */ - MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */ -} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA, - Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t; +typedef struct _MPI2_FLASH_LAYOUT_DATA +{ + U8 ImageRevision; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 SizeOfRegion; /* 0x02 */ + U8 Reserved2; /* 0x03 */ + U16 NumberOfLayouts; /* 0x04 */ + U16 RegionsPerLayout; /* 0x06 */ + U16 MinimumSectorAlignment; /* 0x08 */ + U16 Reserved3; /* 0x0A */ + U32 Reserved4; /* 0x0C */ + MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];/* 0x10 */ +} MPI2_FLASH_LAYOUT_DATA, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT_DATA, + Mpi2FlashLayoutData_t, MPI2_POINTER pMpi2FlashLayoutData_t; -/*defines for the RegionType field */ +/* defines for the RegionType field */ #define MPI2_FLASH_REGION_UNUSED (0x00) #define MPI2_FLASH_REGION_FIRMWARE (0x01) #define MPI2_FLASH_REGION_BIOS (0x02) @@ -348,7 +347,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA { #define MPI2_FLASH_REGION_CONFIG_2 (0x08) #define MPI2_FLASH_REGION_MEGARAID (0x09) #define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A) -#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) +#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) /* older name */ #define MPI2_FLASH_REGION_CBB_BACKUP (0x0D) #define MPI2_FLASH_REGION_SBR (0x0E) #define MPI2_FLASH_REGION_SBR_BACKUP (0x0F) @@ -360,76 +359,81 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA { #define MPI2_FLASH_REGION_CPLD (0x15) #define MPI2_FLASH_REGION_PSOC (0x16) -/*ImageRevision */ +/* ImageRevision */ #define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) -/*Supported Devices Extended Image Data */ + +/* Supported Devices Extended Image Data */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check NumberOfDevices at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumberOfDevices at runtime. */ #ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES #define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1) #endif -typedef struct _MPI2_SUPPORTED_DEVICE { - U16 DeviceID; /*0x00 */ - U16 VendorID; /*0x02 */ - U16 DeviceIDMask; /*0x04 */ - U16 Reserved1; /*0x06 */ - U8 LowPCIRev; /*0x08 */ - U8 HighPCIRev; /*0x09 */ - U16 Reserved2; /*0x0A */ - U32 Reserved3; /*0x0C */ -} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE, - Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t; +typedef struct _MPI2_SUPPORTED_DEVICE +{ + U16 DeviceID; /* 0x00 */ + U16 VendorID; /* 0x02 */ + U16 DeviceIDMask; /* 0x04 */ + U16 Reserved1; /* 0x06 */ + U8 LowPCIRev; /* 0x08 */ + U8 HighPCIRev; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ +} MPI2_SUPPORTED_DEVICE, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICE, + Mpi2SupportedDevice_t, MPI2_POINTER pMpi2SupportedDevice_t; -typedef struct _MPI2_SUPPORTED_DEVICES_DATA { - U8 ImageRevision; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 NumberOfDevices; /*0x02 */ - U8 Reserved2; /*0x03 */ - U32 Reserved3; /*0x04 */ - MPI2_SUPPORTED_DEVICE - SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */ -} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA, - Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t; +typedef struct _MPI2_SUPPORTED_DEVICES_DATA +{ + U8 ImageRevision; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 NumberOfDevices; /* 0x02 */ + U8 Reserved2; /* 0x03 */ + U32 Reserved3; /* 0x04 */ + MPI2_SUPPORTED_DEVICE SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES]; /* 0x08 */ +} MPI2_SUPPORTED_DEVICES_DATA, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICES_DATA, + Mpi2SupportedDevicesData_t, MPI2_POINTER pMpi2SupportedDevicesData_t; -/*ImageRevision */ +/* ImageRevision */ #define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00) -/*Init Extended Image Data */ -typedef struct _MPI2_INIT_IMAGE_FOOTER { - U32 BootFlags; /*0x00 */ - U32 ImageSize; /*0x04 */ - U32 Signature0; /*0x08 */ - U32 Signature1; /*0x0C */ - U32 Signature2; /*0x10 */ - U32 ResetVector; /*0x14 */ -} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER, - Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t; +/* Init Extended Image Data */ -/*defines for the BootFlags field */ +typedef struct _MPI2_INIT_IMAGE_FOOTER + +{ + U32 BootFlags; /* 0x00 */ + U32 ImageSize; /* 0x04 */ + U32 Signature0; /* 0x08 */ + U32 Signature1; /* 0x0C */ + U32 Signature2; /* 0x10 */ + U32 ResetVector; /* 0x14 */ +} MPI2_INIT_IMAGE_FOOTER, MPI2_POINTER PTR_MPI2_INIT_IMAGE_FOOTER, + Mpi2InitImageFooter_t, MPI2_POINTER pMpi2InitImageFooter_t; + +/* defines for the BootFlags field */ #define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00) -/*defines for the ImageSize field */ +/* defines for the ImageSize field */ #define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04) -/*defines for the Signature0 field */ +/* defines for the Signature0 field */ #define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08) #define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA) -/*defines for the Signature1 field */ +/* defines for the Signature1 field */ #define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C) #define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5) -/*defines for the Signature2 field */ +/* defines for the Signature2 field */ #define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10) #define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A) -/*Signature fields as individual bytes */ +/* Signature fields as individual bytes */ #define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA) #define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A) #define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5) @@ -445,21 +449,22 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER { #define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA) #define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A) -/*defines for the ResetVector field */ +/* defines for the ResetVector field */ #define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) /* Encrypted Hash Extended Image Data */ -typedef struct _MPI25_ENCRYPTED_HASH_ENTRY { - U8 HashImageType; /*0x00 */ - U8 HashAlgorithm; /*0x01 */ - U8 EncryptionAlgorithm; /*0x02 */ - U8 Reserved1; /*0x03 */ - U32 Reserved2; /*0x04 */ - U32 EncryptedHash[1]; /*0x08 */ /* variable length */ -} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY, -Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; +typedef struct _MPI25_ENCRYPTED_HASH_ENTRY +{ + U8 HashImageType; /* 0x00 */ + U8 HashAlgorithm; /* 0x01 */ + U8 EncryptionAlgorithm; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 EncryptedHash[1]; /* 0x08 */ /* variable length */ +} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY, + Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t; /* values for HashImageType */ #define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) @@ -475,12 +480,12 @@ Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; #define MPI25_HASH_ALGORITHM_UNUSED (0x00) #define MPI25_HASH_ALGORITHM_SHA256 (0x01) -#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0) -#define MPI26_HASH_ALGORITHM_VER_NONE (0x00) -#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20) -#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40) -#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60) -#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) +#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0) +#define MPI26_HASH_ALGORITHM_VER_NONE (0x00) +#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20) +#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40) +#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60) +#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) #define MPI26_HASH_ALGORITHM_SIZE_256 (0x01) #define MPI26_HASH_ALGORITHM_SIZE_512 (0x02) @@ -496,14 +501,16 @@ Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; #define MPI26_ENCRYPTION_ALG_RSA2048 (0x04) #define MPI26_ENCRYPTION_ALG_RSA4096 (0x05) -typedef struct _MPI25_ENCRYPTED_HASH_DATA { - U8 ImageVersion; /*0x00 */ - U8 NumHash; /*0x01 */ - U16 Reserved1; /*0x02 */ - U32 Reserved2; /*0x04 */ - MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /*0x08 */ -} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA, -Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t; +typedef struct _MPI25_ENCRYPTED_HASH_DATA +{ + U8 ImageVersion; /* 0x00 */ + U8 NumHash; /* 0x01 */ + U16 Reserved1; /* 0x02 */ + U32 Reserved2; /* 0x04 */ + MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */ /* variable number of entries */ +} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA, + Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t; #endif /* MPI2_IMAGE_H */ + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h old mode 100644 new mode 100755 index 8f1b903fe0a9..63a0f4968f0c --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -1,63 +1,62 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2_init.h - * Title: MPI SCSI initiator mode messages and structures - * Creation Date: June 23, 2006 + * Name: mpi2_init.h + * Title: MPI SCSI initiator mode messages and structures + * Creation Date: June 23, 2006 * - * mpi2_init.h Version: 02.00.21 + * mpi2_init.h Version: 02.00.21 * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. - * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. - * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. - * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. - * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. - * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO - * Control field Task Attribute flags. - * Moved LUN field defines to mpi2.h becasue they are - * common to many structures. - * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to - * Query Asynchronous Event. - * Defined two new bits in the SlotStatus field of the SCSI - * Enclosure Processor Request and Reply. - * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for - * both SCSI IO Error Reply and SCSI Task Management Reply. - * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. - * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. - * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. - * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. - * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. - * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. - * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command - * Priority to match SAM-4. - * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. - * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. - * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, - * replacing the Reserved4 field. - * 11-18-14 02.00.16 Updated copyright information. - * 03-16-15 02.00.17 Updated for MPI v2.6. - * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. - * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and - * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. - * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. - * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. - * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. - * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to - * be unique within first 32 characters. - * -------------------------------------------------------------------------- + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. + * 11-18-14 02.00.16 Updated copyright information. + * 03-16-15 02.00.17 Updated for MPI v2.6. + * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. + * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and + * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. + * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to + * be unique within first 32 characters. + * -------------------------------------------------------------------------- */ #ifndef MPI2_INIT_H @@ -65,109 +64,112 @@ /***************************************************************************** * -* SCSI Initiator Messages +* SCSI Initiator Messages * *****************************************************************************/ /**************************************************************************** -* SCSI IO messages and associated structures +* SCSI IO messages and associated structures ****************************************************************************/ -typedef struct _MPI2_SCSI_IO_CDB_EEDP32 { - U8 CDB[20]; /*0x00 */ - __be32 PrimaryReferenceTag; /*0x14 */ - U16 PrimaryApplicationTag; /*0x18 */ - U16 PrimaryApplicationTagMask; /*0x1A */ - U32 TransferLength; /*0x1C */ -} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32, - Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t; +typedef struct _MPI2_SCSI_IO_CDB_EEDP32 +{ + U8 CDB[20]; /* 0x00 */ + __be32 PrimaryReferenceTag; /* 0x14 */ + U16 PrimaryApplicationTag; /* 0x18 */ + U16 PrimaryApplicationTagMask; /* 0x1A */ + U32 TransferLength; /* 0x1C */ +} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32, + Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t; -/*MPI v2.0 CDB field */ -typedef union _MPI2_SCSI_IO_CDB_UNION { - U8 CDB32[32]; - MPI2_SCSI_IO_CDB_EEDP32 EEDP32; - MPI2_SGE_SIMPLE_UNION SGE; -} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION, - Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t; +/* MPI v2.0 CDB field */ +typedef union _MPI2_SCSI_IO_CDB_UNION +{ + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_SGE_SIMPLE_UNION SGE; +} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION, + Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t; -/*MPI v2.0 SCSI IO Request Message */ -typedef struct _MPI2_SCSI_IO_REQUEST { - U16 DevHandle; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved1; /*0x04 */ - U8 Reserved2; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U32 SenseBufferLowAddress; /*0x0C */ - U16 SGLFlags; /*0x10 */ - U8 SenseBufferLength; /*0x12 */ - U8 Reserved4; /*0x13 */ - U8 SGLOffset0; /*0x14 */ - U8 SGLOffset1; /*0x15 */ - U8 SGLOffset2; /*0x16 */ - U8 SGLOffset3; /*0x17 */ - U32 SkipCount; /*0x18 */ - U32 DataLength; /*0x1C */ - U32 BidirectionalDataLength; /*0x20 */ - U16 IoFlags; /*0x24 */ - U16 EEDPFlags; /*0x26 */ - U32 EEDPBlockSize; /*0x28 */ - U32 SecondaryReferenceTag; /*0x2C */ - U16 SecondaryApplicationTag; /*0x30 */ - U16 ApplicationTagTranslationMask; /*0x32 */ - U8 LUN[8]; /*0x34 */ - U32 Control; /*0x3C */ - MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */ +/* MPI v2.0 SCSI IO Request Message */ +typedef struct _MPI2_SCSI_IO_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U32 SenseBufferLowAddress; /* 0x0C */ + U16 SGLFlags; /* 0x10 */ + U8 SenseBufferLength; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U32 EEDPBlockSize; /* 0x28 */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U8 LUN[8]; /* 0x34 */ + U32 Control; /* 0x3C */ + MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ -#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ - MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */ + MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; #endif - MPI2_SGE_IO_UNION SGL; /*0x60 */ + MPI2_SGE_IO_UNION SGL; /* 0x60 */ -} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST, - Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t; +} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST, + Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t; -/*SCSI IO MsgFlags bits */ +/* SCSI IO MsgFlags bits */ -/*MsgFlags for SenseBufferAddressSpace */ +/* MsgFlags for SenseBufferAddressSpace */ #define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C) #define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) #define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) -#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) -#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) -#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) /* for MPI v2.5 and earlier only */ +#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) /* for MPI v2.5 and earlier only */ +#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08) /* for MPI v2.6 only */ -/*SCSI IO SGLFlags bits */ +/* SCSI IO SGLFlags bits */ -/*base values for Data Location Address Space */ +/* base values for Data Location Address Space */ #define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C) #define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00) #define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04) #define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08) #define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) -/*base values for Type */ +/* base values for Type */ #define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03) #define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00) #define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01) #define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02) -/*shift values for each sub-field */ +/* shift values for each sub-field */ #define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12) #define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8) #define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) #define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) -/*number of SGLOffset fields */ +/* number of SGLOffset fields */ #define MPI2_SCSIIO_NUM_SGLOFFSETS (4) -/*SCSI IO IoFlags bits */ +/* SCSI IO IoFlags bits */ -/*Large CDB Address Space */ +/* Large CDB Address Space */ #define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000) #define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000) #define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000) @@ -180,7 +182,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST { #define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200) #define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) -/*SCSI IO EEDPFlags bits */ +/* SCSI IO EEDPFlags bits */ #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) #define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000) @@ -202,9 +204,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST { #define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007) -/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ +/* SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ -/*SCSI IO Control bits */ +/* SCSI IO Control bits */ #define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000) #define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) @@ -217,7 +219,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST { #define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) #define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) -/*alternate name for the previous field; called Command Priority in SAM-4 */ +/* alternate name for the previous field; called Command Priority in SAM-4 */ #define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800) #define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11) @@ -232,65 +234,68 @@ typedef struct _MPI2_SCSI_IO_REQUEST { #define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040) #define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080) -/*MPI v2.5 CDB field */ -typedef union _MPI25_SCSI_IO_CDB_UNION { - U8 CDB32[32]; - MPI2_SCSI_IO_CDB_EEDP32 EEDP32; - MPI2_IEEE_SGE_SIMPLE64 SGE; -} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION, - Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t; -/*MPI v2.5/2.6 SCSI IO Request Message */ -typedef struct _MPI25_SCSI_IO_REQUEST { - U16 DevHandle; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved1; /*0x04 */ - U8 Reserved2; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U32 SenseBufferLowAddress; /*0x0C */ - U8 DMAFlags; /*0x10 */ - U8 Reserved5; /*0x11 */ - U8 SenseBufferLength; /*0x12 */ - U8 Reserved4; /*0x13 */ - U8 SGLOffset0; /*0x14 */ - U8 SGLOffset1; /*0x15 */ - U8 SGLOffset2; /*0x16 */ - U8 SGLOffset3; /*0x17 */ - U32 SkipCount; /*0x18 */ - U32 DataLength; /*0x1C */ - U32 BidirectionalDataLength; /*0x20 */ - U16 IoFlags; /*0x24 */ - U16 EEDPFlags; /*0x26 */ - U16 EEDPBlockSize; /*0x28 */ - U16 Reserved6; /*0x2A */ - U32 SecondaryReferenceTag; /*0x2C */ - U16 SecondaryApplicationTag; /*0x30 */ - U16 ApplicationTagTranslationMask; /*0x32 */ - U8 LUN[8]; /*0x34 */ - U32 Control; /*0x3C */ - MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */ +/* MPI v2.5 CDB field */ +typedef union _MPI25_SCSI_IO_CDB_UNION +{ + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_IEEE_SGE_SIMPLE64 SGE; +} MPI25_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI25_SCSI_IO_CDB_UNION, + Mpi25ScsiIoCdb_t, MPI2_POINTER pMpi25ScsiIoCdb_t; -#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ - MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; +/* MPI v2.5/2.6 SCSI IO Request Message */ +typedef struct _MPI25_SCSI_IO_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U32 SenseBufferLowAddress; /* 0x0C */ + U8 DMAFlags; /* 0x10 */ + U8 Reserved5; /* 0x11 */ + U8 SenseBufferLength; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U16 EEDPBlockSize; /* 0x28 */ + U16 Reserved6; /* 0x2A */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U8 LUN[8]; /* 0x34 */ + U32 Control; /* 0x3C */ + MPI25_SCSI_IO_CDB_UNION CDB; /* 0x40 */ + +#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */ + MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; #endif - MPI25_SGE_IO_UNION SGL; /*0x60 */ + MPI25_SGE_IO_UNION SGL; /* 0x60 */ -} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST, - Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t; +} MPI25_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI25_SCSI_IO_REQUEST, + Mpi25SCSIIORequest_t, MPI2_POINTER pMpi25SCSIIORequest_t; -/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ +/* use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ -/*Defines for the DMAFlags field - * Each setting affects 4 SGLS, from SGL0 to SGL3. - * D = Data - * C = Cache DIF - * I = Interleaved - * H = Host DIF +/* Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF */ #define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F) #define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00) @@ -310,22 +315,22 @@ typedef struct _MPI25_SCSI_IO_REQUEST { #define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E) #define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F) -/*number of SGLOffset fields */ +/* number of SGLOffset fields */ #define MPI25_SCSIIO_NUM_SGLOFFSETS (4) -/*defines for the IoFlags field */ -#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) -#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) -#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) +/* defines for the IoFlags field */ +#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) +#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) +#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) -#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) +#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) /* MPI v2.6 and later */ #define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) #define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) -#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400) +#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400) /* MPI v2.6 and later; IOC use only */ #define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) -/*MPI v2.5 defines for the EEDPFlags bits */ -/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ +/* MPI v2.5 defines for the EEDPFlags bits */ +/* use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ #define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) #define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000) #define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) @@ -336,52 +341,51 @@ typedef struct _MPI25_SCSI_IO_REQUEST { #define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) #define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) -/*use MPI2_LUN_ defines from mpi2.h for the LUN field */ +/* use MPI2_LUN_ defines from mpi2.h for the LUN field */ -/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */ +/* use MPI2_SCSIIO_CONTROL_ defines for the Control field */ -/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so - * MPI2_SCSI_IO_REPLY is used for both. + +/* NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so + * MPI2_SCSI_IO_REPLY is used for both. */ -/*SCSI IO Error Reply Message */ -typedef struct _MPI2_SCSI_IO_REPLY { - U16 DevHandle; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved1; /*0x04 */ - U8 Reserved2; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U8 SCSIStatus; /*0x0C */ - U8 SCSIState; /*0x0D */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U32 TransferCount; /*0x14 */ - U32 SenseCount; /*0x18 */ - U32 ResponseInfo; /*0x1C */ - U16 TaskTag; /*0x20 */ - U16 SCSIStatusQualifier; /* 0x22 */ - U32 BidirectionalTransferCount; /*0x24 */ - /* MPI 2.5+ only; Reserved in MPI 2.0 */ - U32 EEDPErrorOffset; /* 0x28 */ - /* MPI 2.5+ only; Reserved in MPI 2.0 */ - U16 EEDPObservedAppTag; /* 0x2C */ - /* MPI 2.5+ only; Reserved in MPI 2.0 */ - U16 EEDPObservedGuard; /* 0x2E */ - /* MPI 2.5+ only; Reserved in MPI 2.0 */ - U32 EEDPObservedRefTag; /* 0x30 */ -} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, - Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; +/* SCSI IO Error Reply Message */ +typedef struct _MPI2_SCSI_IO_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U8 SCSIStatus; /* 0x0C */ + U8 SCSIState; /* 0x0D */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 TransferCount; /* 0x14 */ + U32 SenseCount; /* 0x18 */ + U32 ResponseInfo; /* 0x1C */ + U16 TaskTag; /* 0x20 */ + U16 SCSIStatusQualifier; /* 0x22 */ + U32 BidirectionalTransferCount; /* 0x24 */ + U32 EEDPErrorOffset; /* 0x28 */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedAppTag; /* 0x2C */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedGuard; /* 0x2E */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPObservedRefTag; /* 0x30 */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ +} MPI2_SCSI_IO_REPLY, MPI2_POINTER PTR_MPI2_SCSI_IO_REPLY, + Mpi2SCSIIOReply_t, MPI2_POINTER pMpi2SCSIIOReply_t; -/*SCSI IO Reply MsgFlags bits */ +/* SCSI IO Reply MsgFlags bits */ #define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01) #define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02) #define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04) -/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ + +/* SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ #define MPI2_SCSI_STATUS_GOOD (0x00) #define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02) @@ -390,12 +394,12 @@ typedef struct _MPI2_SCSI_IO_REPLY { #define MPI2_SCSI_STATUS_INTERMEDIATE (0x10) #define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) #define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18) -#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */ +#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /* obsolete */ #define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28) #define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30) #define MPI2_SCSI_STATUS_TASK_ABORTED (0x40) -/*SCSI IO Reply SCSIState flags */ +/* SCSI IO Reply SCSIState flags */ #define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10) #define MPI2_SCSI_STATE_TERMINATED (0x08) @@ -403,39 +407,41 @@ typedef struct _MPI2_SCSI_IO_REPLY { #define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) #define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) -/*masks and shifts for the ResponseInfo field */ +/* masks and shifts for the ResponseInfo field */ #define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) #define MPI2_SCSI_RI_SHIFT_REASONCODE (0) #define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) + /**************************************************************************** -* SCSI Task Management messages +* SCSI Task Management messages ****************************************************************************/ -/*SCSI Task Management Request Message */ -typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST { - U16 DevHandle; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U8 Reserved1; /*0x04 */ - U8 TaskType; /*0x05 */ - U8 Reserved2; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U8 LUN[8]; /*0x0C */ - U32 Reserved4[7]; /*0x14 */ - U16 TaskMID; /*0x30 */ - U16 Reserved5; /*0x32 */ +/* SCSI Task Management Request Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Reserved1; /* 0x04 */ + U8 TaskType; /* 0x05 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U8 LUN[8]; /* 0x0C */ + U32 Reserved4[7]; /* 0x14 */ + U16 TaskMID; /* 0x30 */ + U16 Reserved5; /* 0x32 */ } MPI2_SCSI_TASK_MANAGE_REQUEST, - *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, - Mpi2SCSITaskManagementRequest_t, - *pMpi2SCSITaskManagementRequest_t; + MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, + Mpi2SCSITaskManagementRequest_t, + MPI2_POINTER pMpi2SCSITaskManagementRequest_t; -/*TaskType values */ +/* TaskType values */ #define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) #define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) @@ -447,43 +453,42 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST { #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) -/*obsolete TaskType name */ -#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \ - (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) +/* obsolete TaskType name */ +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) -/*MsgFlags bits */ - -#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) +/* MsgFlags bits */ +#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) #define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00) -#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) -#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) -#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) - -#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) +#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) +#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) +#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) #define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18) -/*SCSI Task Management Reply Message */ -typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { - U16 DevHandle; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U8 ResponseCode; /*0x04 */ - U8 TaskType; /*0x05 */ - U8 Reserved1; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U16 Reserved3; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U32 TerminationCount; /*0x14 */ - U32 ResponseInfo; /*0x18 */ -} MPI2_SCSI_TASK_MANAGE_REPLY, - *PTR_MPI2_SCSI_TASK_MANAGE_REPLY, - Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t; -/*ResponseCode values */ +/* SCSI Task Management Reply Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 ResponseCode; /* 0x04 */ + U8 TaskType; /* 0x05 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 TerminationCount; /* 0x14 */ + U32 ResponseInfo; /* 0x18 */ +} MPI2_SCSI_TASK_MANAGE_REPLY, + MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY, + Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t; + +/* ResponseCode values */ #define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) #define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) @@ -494,7 +499,7 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) -/*masks and shifts for the ResponseInfo field */ +/* masks and shifts for the ResponseInfo field */ #define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) #define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) @@ -505,41 +510,43 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { #define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) #define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) + /**************************************************************************** -* SCSI Enclosure Processor messages +* SCSI Enclosure Processor messages ****************************************************************************/ -/*SCSI Enclosure Processor Request Message */ -typedef struct _MPI2_SEP_REQUEST { - U16 DevHandle; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U8 Action; /*0x04 */ - U8 Flags; /*0x05 */ - U8 Reserved1; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U32 SlotStatus; /*0x0C */ - U32 Reserved3; /*0x10 */ - U32 Reserved4; /*0x14 */ - U32 Reserved5; /*0x18 */ - U16 Slot; /*0x1C */ - U16 EnclosureHandle; /*0x1E */ -} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST, - Mpi2SepRequest_t, *pMpi2SepRequest_t; +/* SCSI Enclosure Processor Request Message */ +typedef struct _MPI2_SEP_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Action; /* 0x04 */ + U8 Flags; /* 0x05 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 SlotStatus; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U32 Reserved4; /* 0x14 */ + U32 Reserved5; /* 0x18 */ + U16 Slot; /* 0x1C */ + U16 EnclosureHandle; /* 0x1E */ +} MPI2_SEP_REQUEST, MPI2_POINTER PTR_MPI2_SEP_REQUEST, + Mpi2SepRequest_t, MPI2_POINTER pMpi2SepRequest_t; -/*Action defines */ +/* Action defines */ #define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00) #define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01) -/*Flags defines */ +/* Flags defines */ #define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) #define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) -/*SlotStatus defines */ -#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000) +/* SlotStatus defines */ +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000) /* MPI v2.6 and newer */ #define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) #define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) #define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) @@ -552,30 +559,32 @@ typedef struct _MPI2_SEP_REQUEST { #define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) #define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) -/*SCSI Enclosure Processor Reply Message */ -typedef struct _MPI2_SEP_REPLY { - U16 DevHandle; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U8 Action; /*0x04 */ - U8 Flags; /*0x05 */ - U8 Reserved1; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U16 Reserved3; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U32 SlotStatus; /*0x14 */ - U32 Reserved4; /*0x18 */ - U16 Slot; /*0x1C */ - U16 EnclosureHandle; /*0x1E */ -} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY, - Mpi2SepReply_t, *pMpi2SepReply_t; -/*SlotStatus defines */ -#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000) +/* SCSI Enclosure Processor Reply Message */ +typedef struct _MPI2_SEP_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Action; /* 0x04 */ + U8 Flags; /* 0x05 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 SlotStatus; /* 0x14 */ + U32 Reserved4; /* 0x18 */ + U16 Slot; /* 0x1C */ + U16 EnclosureHandle; /* 0x1E */ +} MPI2_SEP_REPLY, MPI2_POINTER PTR_MPI2_SEP_REPLY, + Mpi2SepReply_t, MPI2_POINTER pMpi2SepReply_t; + +/* SlotStatus defines */ +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000) /* MPI v2.6 and newer */ #define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) #define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) #define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) @@ -588,4 +597,7 @@ typedef struct _MPI2_SEP_REPLY { #define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) #define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) + #endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h old mode 100644 new mode 100755 index 68ea408cd5c5..ad2b1292320b --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -1,181 +1,183 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2_ioc.h - * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages - * Creation Date: October 11, 2006 + * Name: mpi2_ioc.h + * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages + * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.37 + * mpi2_ioc.h Version: 02.00.37 * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to - * MaxTargets. - * Added TotalImageSize field to FWDownload Request. - * Added reserved words to FWUpload Request. - * 06-26-07 02.00.02 Added IR Configuration Change List Event. - * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit - * request and replaced it with - * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. - * Replaced the MinReplyQueueDepth field of the IOCFacts - * reply with MaxReplyDescriptorPostQueueDepth. - * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum - * depth for the Reply Descriptor Post Queue. - * Added SASAddress field to Initiator Device Table - * Overflow Event data. - * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING - * for SAS Initiator Device Status Change Event data. - * Modified Reason Code defines for SAS Topology Change - * List Event data, including adding a bit for PHY Vacant - * status, and adding a mask for the Reason Code. - * Added define for - * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. - * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. - * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of - * the IOCFacts Reply. - * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Moved MPI2_VERSION_UNION to mpi2.h. - * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks - * instead of enables, and added SASBroadcastPrimitiveMasks - * field. - * Added Log Entry Added Event and related structure. - * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. - * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. - * Added MaxVolumes and MaxPersistentEntries fields to - * IOCFacts reply. - * Added ProtocalFlags and IOCCapabilities fields to - * MPI2_FW_IMAGE_HEADER. - * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. - * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to - * a U16 (from a U32). - * Removed extra 's' from EventMasks name. - * 06-27-08 02.00.08 Fixed an offset in a comment. - * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. - * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and - * renamed MinReplyFrameSize to ReplyFrameSize. - * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. - * Added two new RAIDOperation values for Integrated RAID - * Operations Status Event data. - * Added four new IR Configuration Change List Event data - * ReasonCode values. - * Added two new ReasonCode defines for SAS Device Status - * Change Event data. - * Added three new DiscoveryStatus bits for the SAS - * Discovery event data. - * Added Multiplexing Status Change bit to the PhyStatus - * field of the SAS Topology Change List event data. - * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. - * BootFlags are now product-specific. - * Added defines for the indivdual signature bytes - * for MPI2_INIT_IMAGE_FOOTER. - * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. - * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR - * define. - * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE - * define. - * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. - * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. - * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. - * Added two new reason codes for SAS Device Status Change - * Event. - * Added new event: SAS PHY Counter. - * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. - * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Added new product id family for 2208. - * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. - * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. - * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. - * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. - * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. - * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. - * Added Host Based Discovery Phy Event data. - * Added defines for ProductID Product field - * (MPI2_FW_HEADER_PID_). - * Modified values for SAS ProductID Family - * (MPI2_FW_HEADER_PID_FAMILY_). - * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. - * Added PowerManagementControl Request structures and - * defines. - * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. - * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. - * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. - * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added - * SASNotifyPrimitiveMasks field to - * MPI2_EVENT_NOTIFICATION_REQUEST. - * Added Temperature Threshold Event. - * Added Host Message Event. - * Added Send Host Message request and reply. - * 05-25-11 02.00.18 For Extended Image Header, added - * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and - * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. - * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. - * 08-24-11 02.00.19 Added PhysicalPort field to - * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. - * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. - * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. - * 03-29-12 02.00.21 Added a product specific range to event values. - * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. - * Added ElapsedSeconds field to - * MPI2_EVENT_DATA_IR_OPERATION_STATUS. - * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE - * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. - * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. - * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. - * Added Encrypted Hash Extended Image. - * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. - * 11-18-14 02.00.25 Updated copyright information. - * 03-16-15 02.00.26 Updated for MPI v2.6. - * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and - * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. - * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and - * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. - * Added MPI26_CTRL_OP_SHUTDOWN. - * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. - * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. - * Added ConigurationFlags field to IOCInit message to - * support NVMe SGL format control. - * Added PCIe SRIOV support. - * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. - * Added PCIe 4 16.0 GT/sec speec support. - * Removed AHCI support. - * Removed SOP support. - * 07-01-16 02.00.29 Added Archclass for 4008 product. - * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED - * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload - * Request Message. - * Added new defines for the ImageType field of FWUpload - * Request Message. - * Added new values for the RegionType field in the Layout - * Data sections of the FLASH Layout Extended Image Data. - * Added new defines for the ReasonCode field of - * Active Cable Exception Event. - * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and - * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. - * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and - * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. - * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. - * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related - * defines for the ReasonCode field. - * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. - * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED - * to the ReasonCode field in PCIe Device Status Change - * Event Data. - * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC. - * Moved FW image definitions ionto new mpi2_image,h - * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) - * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES - * -------------------------------------------------------------------------- + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * 11-18-14 02.00.25 Updated copyright information. + * 03-16-15 02.00.26 Updated for MPI v2.6. + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. + * Added MPI2_EVENT_PCIE_LINK_COUNTER and + * MPI26_EVENT_DATA_PCIE_LINK_COUNTER. + * Added MPI26_CTRL_OP_SHUTDOWN. + * Added MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. + * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. + * Added ConigurationFlags field to IOCInit message to + * support NVMe SGL format control. + * Added PCIe SRIOV support. + * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.29 Added Archclass for 4008 product. + * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED + * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload + * Request Message. + * Added new defines for the ImageType field of FWUpload + * Request Message. + * Added new values for the RegionType field in the Layout + * Data sections of the FLASH Layout Extended Image Data. + * Added new defines for the ReasonCode field of + * Active Cable Exception Event. + * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and + * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. + * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and + * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. + * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. + * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related + * defines for the ReasonCode field. + * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. + * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED + * to the ReasonCode field in PCIe Device Status Change + * Event Data. + * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC. + * Moved FW image definitions ionto new mpi2_image,h + * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) + * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * -------------------------------------------------------------------------- */ #ifndef MPI2_IOC_H @@ -183,46 +185,47 @@ /***************************************************************************** * -* IOC Messages +* IOC Messages * *****************************************************************************/ /**************************************************************************** -* IOCInit message +* IOCInit message ****************************************************************************/ -/*IOCInit Request message */ -typedef struct _MPI2_IOC_INIT_REQUEST { - U8 WhoInit; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 MsgVersion; /*0x0C */ - U16 HeaderVersion; /*0x0E */ - U32 Reserved5; /*0x10 */ - U16 ConfigurationFlags; /* 0x14 */ - U8 HostPageSize; /*0x16 */ - U8 HostMSIxVectors; /*0x17 */ - U16 Reserved8; /*0x18 */ - U16 SystemRequestFrameSize; /*0x1A */ - U16 ReplyDescriptorPostQueueDepth; /*0x1C */ - U16 ReplyFreeQueueDepth; /*0x1E */ - U32 SenseBufferAddressHigh; /*0x20 */ - U32 SystemReplyAddressHigh; /*0x24 */ - U64 SystemRequestFrameBaseAddress; /*0x28 */ - U64 ReplyDescriptorPostQueueAddress; /*0x30 */ - U64 ReplyFreeQueueAddress; /*0x38 */ - U64 TimeStamp; /*0x40 */ -} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST, - Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t; +/* IOCInit Request message */ +typedef struct _MPI2_IOC_INIT_REQUEST +{ + U8 WhoInit; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 MsgVersion; /* 0x0C */ + U16 HeaderVersion; /* 0x0E */ + U32 Reserved5; /* 0x10 */ + U16 ConfigurationFlags; /* 0x14 */ + U8 HostPageSize; /* 0x16 */ + U8 HostMSIxVectors; /* 0x17 */ + U16 Reserved8; /* 0x18 */ + U16 SystemRequestFrameSize; /* 0x1A */ + U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ + U16 ReplyFreeQueueDepth; /* 0x1E */ + U32 SenseBufferAddressHigh; /* 0x20 */ + U32 SystemReplyAddressHigh; /* 0x24 */ + U64 SystemRequestFrameBaseAddress; /* 0x28 */ + U64 ReplyDescriptorPostQueueAddress;/* 0x30 */ + U64 ReplyFreeQueueAddress; /* 0x38 */ + U64 TimeStamp; /* 0x40 */ +} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST, + Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t; -/*WhoInit values */ +/* WhoInit values */ #define MPI2_WHOINIT_NOT_INITIALIZED (0x00) #define MPI2_WHOINIT_SYSTEM_BIOS (0x01) #define MPI2_WHOINIT_ROM_BIOS (0x02) @@ -233,129 +236,133 @@ typedef struct _MPI2_IOC_INIT_REQUEST { /* MsgFlags */ #define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) - -/*MsgVersion */ +/* MsgVersion */ #define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) #define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) #define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF) #define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0) -/*HeaderVersion */ +/* HeaderVersion */ #define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00) #define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8) #define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) #define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) -/*ConfigurationFlags */ +/* ConfigurationFlags */ #define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001) -/*minimum depth for a Reply Descriptor Post Queue */ +/* minimum depth for a Reply Descriptor Post Queue */ #define MPI2_RDPQ_DEPTH_MIN (16) /* Reply Descriptor Post Queue Array Entry */ -typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { - U64 RDPQBaseAddress; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ +typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY +{ + U64 RDPQBaseAddress; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ } MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, -*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, -Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry; + MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, + Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry; +/* IOCInit Reply message */ +typedef struct _MPI2_IOC_INIT_REPLY +{ + U8 WhoInit; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_IOC_INIT_REPLY, MPI2_POINTER PTR_MPI2_IOC_INIT_REPLY, + Mpi2IOCInitReply_t, MPI2_POINTER pMpi2IOCInitReply_t; -/*IOCInit Reply message */ -typedef struct _MPI2_IOC_INIT_REPLY { - U8 WhoInit; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY, - Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t; /**************************************************************************** -* IOCFacts message +* IOCFacts message ****************************************************************************/ -/*IOCFacts Request message */ -typedef struct _MPI2_IOC_FACTS_REQUEST { - U16 Reserved1; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ -} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST, - Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t; +/* IOCFacts Request message */ +typedef struct _MPI2_IOC_FACTS_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ +} MPI2_IOC_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_IOC_FACTS_REQUEST, + Mpi2IOCFactsRequest_t, MPI2_POINTER pMpi2IOCFactsRequest_t; -/*IOCFacts Reply message */ -typedef struct _MPI2_IOC_FACTS_REPLY { - U16 MsgVersion; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 HeaderVersion; /*0x04 */ - U8 IOCNumber; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U16 IOCExceptions; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U8 MaxChainDepth; /*0x14 */ - U8 WhoInit; /*0x15 */ - U8 NumberOfPorts; /*0x16 */ - U8 MaxMSIxVectors; /*0x17 */ - U16 RequestCredit; /*0x18 */ - U16 ProductID; /*0x1A */ - U32 IOCCapabilities; /*0x1C */ - MPI2_VERSION_UNION FWVersion; /*0x20 */ - U16 IOCRequestFrameSize; /*0x24 */ - U16 IOCMaxChainSegmentSize; /*0x26 */ - U16 MaxInitiators; /*0x28 */ - U16 MaxTargets; /*0x2A */ - U16 MaxSasExpanders; /*0x2C */ - U16 MaxEnclosures; /*0x2E */ - U16 ProtocolFlags; /*0x30 */ - U16 HighPriorityCredit; /*0x32 */ - U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */ - U8 ReplyFrameSize; /*0x36 */ - U8 MaxVolumes; /*0x37 */ - U16 MaxDevHandle; /*0x38 */ - U16 MaxPersistentEntries; /*0x3A */ - U16 MinDevHandle; /*0x3C */ - U8 CurrentHostPageSize; /* 0x3E */ - U8 Reserved4; /* 0x3F */ - U8 SGEModifierMask; /*0x40 */ - U8 SGEModifierValue; /*0x41 */ - U8 SGEModifierShift; /*0x42 */ - U8 Reserved5; /*0x43 */ -} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY, - Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t; -/*MsgVersion */ +/* IOCFacts Reply message */ +typedef struct _MPI2_IOC_FACTS_REPLY +{ + U16 MsgVersion; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 HeaderVersion; /* 0x04 */ + U8 IOCNumber; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U16 IOCExceptions; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 MaxChainDepth; /* 0x14 */ + U8 WhoInit; /* 0x15 */ + U8 NumberOfPorts; /* 0x16 */ + U8 MaxMSIxVectors; /* 0x17 */ + U16 RequestCredit; /* 0x18 */ + U16 ProductID; /* 0x1A */ + U32 IOCCapabilities; /* 0x1C */ + MPI2_VERSION_UNION FWVersion; /* 0x20 */ + U16 IOCRequestFrameSize; /* 0x24 */ + U16 IOCMaxChainSegmentSize; /* 0x26 */ /* MPI 2.5 only; Reserved in MPI 2.0 */ + U16 MaxInitiators; /* 0x28 */ + U16 MaxTargets; /* 0x2A */ + U16 MaxSasExpanders; /* 0x2C */ + U16 MaxEnclosures; /* 0x2E */ + U16 ProtocolFlags; /* 0x30 */ + U16 HighPriorityCredit; /* 0x32 */ + U16 MaxReplyDescriptorPostQueueDepth; /* 0x34 */ + U8 ReplyFrameSize; /* 0x36 */ + U8 MaxVolumes; /* 0x37 */ + U16 MaxDevHandle; /* 0x38 */ + U16 MaxPersistentEntries; /* 0x3A */ + U16 MinDevHandle; /* 0x3C */ + U8 CurrentHostPageSize; /* 0x3E */ + U8 Reserved4; /* 0x3F */ + U8 SGEModifierMask; /* 0x40 */ + U8 SGEModifierValue; /* 0x41 */ + U8 SGEModifierShift; /* 0x42 */ + U8 Reserved5; /* 0x43 */ +} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY, + Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t; + +/* MsgVersion */ #define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) #define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8) #define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) #define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0) -/*HeaderVersion */ +/* HeaderVersion */ #define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00) #define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8) #define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF) #define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) -/*IOCExceptions */ +/* IOCExceptions */ #define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400) #define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) #define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) @@ -372,11 +379,11 @@ typedef struct _MPI2_IOC_FACTS_REPLY { #define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) #define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) -/*defines for WhoInit field are after the IOCInit Request */ +/* defines for WhoInit field are after the IOCInit Request */ -/*ProductID field uses MPI2_FW_HEADER_PID_ */ +/* ProductID field uses MPI2_FW_HEADER_PID_ */ -/*IOCCapabilities */ +/* IOCCapabilities */ #define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) #define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) #define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) @@ -395,157 +402,165 @@ typedef struct _MPI2_IOC_FACTS_REPLY { #define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) #define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) -/*ProtocolFlags */ -#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008) +/* ProtocolFlags */ +#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008) /* MPI v2.6 and later */ #define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) #define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) + /**************************************************************************** -* PortFacts message +* PortFacts message ****************************************************************************/ -/*PortFacts Request message */ -typedef struct _MPI2_PORT_FACTS_REQUEST { - U16 Reserved1; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 PortNumber; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ -} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST, - Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t; +/* PortFacts Request message */ +typedef struct _MPI2_PORT_FACTS_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 PortNumber; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ +} MPI2_PORT_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_PORT_FACTS_REQUEST, + Mpi2PortFactsRequest_t, MPI2_POINTER pMpi2PortFactsRequest_t; -/*PortFacts Reply message */ -typedef struct _MPI2_PORT_FACTS_REPLY { - U16 Reserved1; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 PortNumber; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U16 Reserved4; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U8 Reserved5; /*0x14 */ - U8 PortType; /*0x15 */ - U16 Reserved6; /*0x16 */ - U16 MaxPostedCmdBuffers; /*0x18 */ - U16 Reserved7; /*0x1A */ -} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY, - Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t; +/* PortFacts Reply message */ +typedef struct _MPI2_PORT_FACTS_REPLY +{ + U16 Reserved1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 PortNumber; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 Reserved5; /* 0x14 */ + U8 PortType; /* 0x15 */ + U16 Reserved6; /* 0x16 */ + U16 MaxPostedCmdBuffers; /* 0x18 */ + U16 Reserved7; /* 0x1A */ +} MPI2_PORT_FACTS_REPLY, MPI2_POINTER PTR_MPI2_PORT_FACTS_REPLY, + Mpi2PortFactsReply_t, MPI2_POINTER pMpi2PortFactsReply_t; -/*PortType values */ +/* PortType values */ #define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00) #define MPI2_PORTFACTS_PORTTYPE_FC (0x10) #define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) #define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) #define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) -#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40) +#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40) /* MPI v2.6 and later */ /**************************************************************************** -* PortEnable message +* PortEnable message ****************************************************************************/ -/*PortEnable Request message */ -typedef struct _MPI2_PORT_ENABLE_REQUEST { - U16 Reserved1; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U8 Reserved2; /*0x04 */ - U8 PortFlags; /*0x05 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ -} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST, - Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t; +/* PortEnable Request message */ +typedef struct _MPI2_PORT_ENABLE_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Reserved2; /* 0x04 */ + U8 PortFlags; /* 0x05 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ +} MPI2_PORT_ENABLE_REQUEST, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REQUEST, + Mpi2PortEnableRequest_t, MPI2_POINTER pMpi2PortEnableRequest_t; + + +/* PortEnable Reply message */ +typedef struct _MPI2_PORT_ENABLE_REPLY +{ + U16 Reserved1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Reserved2; /* 0x04 */ + U8 PortFlags; /* 0x05 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_PORT_ENABLE_REPLY, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REPLY, + Mpi2PortEnableReply_t, MPI2_POINTER pMpi2PortEnableReply_t; -/*PortEnable Reply message */ -typedef struct _MPI2_PORT_ENABLE_REPLY { - U16 Reserved1; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U8 Reserved2; /*0x04 */ - U8 PortFlags; /*0x05 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY, - Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t; /**************************************************************************** -* EventNotification message +* EventNotification message ****************************************************************************/ -/*EventNotification Request message */ +/* EventNotification Request message */ #define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4) -typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST { - U16 Reserved1; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 Reserved5; /*0x0C */ - U32 Reserved6; /*0x10 */ - U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */ - U16 SASBroadcastPrimitiveMasks; /*0x24 */ - U16 SASNotifyPrimitiveMasks; /*0x26 */ - U32 Reserved8; /*0x28 */ +typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */ + U16 SASBroadcastPrimitiveMasks; /* 0x24 */ + U16 SASNotifyPrimitiveMasks; /* 0x26 */ + U32 Reserved8; /* 0x28 */ } MPI2_EVENT_NOTIFICATION_REQUEST, - *PTR_MPI2_EVENT_NOTIFICATION_REQUEST, - Mpi2EventNotificationRequest_t, - *pMpi2EventNotificationRequest_t; + MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST, + Mpi2EventNotificationRequest_t, MPI2_POINTER pMpi2EventNotificationRequest_t; -/*EventNotification Reply message */ -typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { - U16 EventDataLength; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved1; /*0x04 */ - U8 AckRequired; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U16 Reserved3; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U16 Event; /*0x14 */ - U16 Reserved4; /*0x16 */ - U32 EventContext; /*0x18 */ - U32 EventData[1]; /*0x1C */ -} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY, - Mpi2EventNotificationReply_t, - *pMpi2EventNotificationReply_t; -/*AckRequired */ +/* EventNotification Reply message */ +typedef struct _MPI2_EVENT_NOTIFICATION_REPLY +{ + U16 EventDataLength; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 AckRequired; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 Event; /* 0x14 */ + U16 Reserved4; /* 0x16 */ + U32 EventContext; /* 0x18 */ + U32 EventData[1]; /* 0x1C */ +} MPI2_EVENT_NOTIFICATION_REPLY, MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REPLY, + Mpi2EventNotificationReply_t, MPI2_POINTER pMpi2EventNotificationReply_t; + +/* AckRequired */ #define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00) #define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) -/*Event */ +/* Event */ #define MPI2_EVENT_LOG_DATA (0x0001) #define MPI2_EVENT_STATE_CHANGE (0x0002) #define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) #define MPI2_EVENT_EVENT_CHANGE (0x000A) -#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */ +#define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */ #define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) #define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) #define MPI2_EVENT_SAS_DISCOVERY (0x0016) @@ -554,7 +569,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { #define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) #define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) #define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) -#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D) /* MPI v2.6 and later */ #define MPI2_EVENT_IR_VOLUME (0x001E) #define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) #define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) @@ -567,88 +582,95 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { #define MPI2_EVENT_TEMP_THRESHOLD (0x0027) #define MPI2_EVENT_HOST_MESSAGE (0x0028) #define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) -#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030) -#define MPI2_EVENT_PCIE_ENUMERATION (0x0031) -#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032) -#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033) -#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) -#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) +#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030) /* MPI v2.6 and later */ +#define MPI2_EVENT_PCIE_ENUMERATION (0x0031) /* MPI v2.6 and later */ +#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032) /* MPI v2.6 and later */ +#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033) /* MPI v2.6 and later */ +#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) /* MPI v2.6 and later */ +#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) /* MPI v2.5 and later */ #define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) #define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) -/*Log Entry Added Event data */ -/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ +/* Log Entry Added Event data */ + +/* the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ #define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C) -typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED { - U64 TimeStamp; /*0x00 */ - U32 Reserved1; /*0x08 */ - U16 LogSequence; /*0x0C */ - U16 LogEntryQualifier; /*0x0E */ - U8 VP_ID; /*0x10 */ - U8 VF_ID; /*0x11 */ - U16 Reserved2; /*0x12 */ - U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */ +typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U16 LogSequence; /* 0x0C */ + U16 LogEntryQualifier; /* 0x0E */ + U8 VP_ID; /* 0x10 */ + U8 VF_ID; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH];/* 0x14 */ } MPI2_EVENT_DATA_LOG_ENTRY_ADDED, - *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, - Mpi2EventDataLogEntryAdded_t, - *pMpi2EventDataLogEntryAdded_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t; -/*GPIO Interrupt Event data */ -typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { - U8 GPIONum; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ +/* GPIO Interrupt Event data */ + +typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT +{ + U8 GPIONum; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ } MPI2_EVENT_DATA_GPIO_INTERRUPT, - *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, - Mpi2EventDataGpioInterrupt_t, - *pMpi2EventDataGpioInterrupt_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, + Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t; -/*Temperature Threshold Event data */ -typedef struct _MPI2_EVENT_DATA_TEMPERATURE { - U16 Status; /*0x00 */ - U8 SensorNum; /*0x02 */ - U8 Reserved1; /*0x03 */ - U16 CurrentTemperature; /*0x04 */ - U16 Reserved2; /*0x06 */ - U32 Reserved3; /*0x08 */ - U32 Reserved4; /*0x0C */ +/* Temperature Threshold Event data */ + +typedef struct _MPI2_EVENT_DATA_TEMPERATURE +{ + U16 Status; /* 0x00 */ + U8 SensorNum; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U16 CurrentTemperature; /* 0x04 */ + U16 Reserved2; /* 0x06 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ } MPI2_EVENT_DATA_TEMPERATURE, - *PTR_MPI2_EVENT_DATA_TEMPERATURE, - Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE, + Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t; -/*Temperature Threshold Event data Status bits */ +/* Temperature Threshold Event data Status bits */ #define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) #define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) #define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) #define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) -/*Host Message Event data */ -typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { - U8 SourceVF_ID; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 Reserved3; /*0x04 */ - U32 HostData[1]; /*0x08 */ -} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, - Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; +/* Host Message Event data */ -/*Power Performance Change Event data */ +typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE +{ + U8 SourceVF_ID; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Reserved3; /* 0x04 */ + U32 HostData[1]; /* 0x08 */ +} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE, + Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t; -typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { - U8 CurrentPowerMode; /*0x00 */ - U8 PreviousPowerMode; /*0x01 */ - U16 Reserved1; /*0x02 */ + +/* Power Performance Change Event data */ + +typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE +{ + U8 CurrentPowerMode; /* 0x00 */ + U8 PreviousPowerMode; /* 0x01 */ + U16 Reserved1; /* 0x02 */ } MPI2_EVENT_DATA_POWER_PERF_CHANGE, - *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, - Mpi2EventDataPowerPerfChange_t, - *pMpi2EventDataPowerPerfChange_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, + Mpi2EventDataPowerPerfChange_t, MPI2_POINTER pMpi2EventDataPowerPerfChange_t; -/*defines for CurrentPowerMode and PreviousPowerMode fields */ +/* defines for CurrentPowerMode and PreviousPowerMode fields */ #define MPI2_EVENT_PM_INIT_MASK (0xC0) #define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00) #define MPI2_EVENT_PM_INIT_HOST (0x40) @@ -662,70 +684,77 @@ typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { #define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) #define MPI2_EVENT_PM_MODE_STANDBY (0x06) + /* Active Cable Exception Event data */ -typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT { - U32 ActiveCablePowerRequirement; /* 0x00 */ - U8 ReasonCode; /* 0x04 */ - U8 ReceptacleID; /* 0x05 */ - U16 Reserved1; /* 0x06 */ +typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT +{ + U32 ActiveCablePowerRequirement; /* 0x00 */ + U8 ReasonCode; /* 0x04 */ + U8 ReceptacleID; /* 0x05 */ + U16 Reserved1; /* 0x06 */ } MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - *PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - Mpi25EventDataActiveCableExcept_t, - *pMpi25EventDataActiveCableExcept_t, - MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - Mpi26EventDataActiveCableExcept_t, - *pMpi26EventDataActiveCableExcept_t; + MPI2_POINTER PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi25EventDataActiveCableExcept_t, + MPI2_POINTER pMpi25EventDataActiveCableExcept_t, + MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + MPI2_POINTER PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi26EventDataActiveCableExcept_t, + MPI2_POINTER pMpi26EventDataActiveCableExcept_t; -/*MPI2.5 defines for the ReasonCode field */ +/* MPI2.5 defines for the ReasonCode field */ #define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) #define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01) #define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02) -/* defines for ReasonCode field */ +/* MPI2.6 defines for the ReasonCode field */ #define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) #define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01) #define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02) -/*Hard Reset Received Event data */ +/* Hard Reset Received Event data */ -typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { - U8 Reserved1; /*0x00 */ - U8 Port; /*0x01 */ - U16 Reserved2; /*0x02 */ +typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED +{ + U8 Reserved1; /* 0x00 */ + U8 Port; /* 0x01 */ + U16 Reserved2; /* 0x02 */ } MPI2_EVENT_DATA_HARD_RESET_RECEIVED, - *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, - Mpi2EventDataHardResetReceived_t, - *pMpi2EventDataHardResetReceived_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + Mpi2EventDataHardResetReceived_t, + MPI2_POINTER pMpi2EventDataHardResetReceived_t; -/*Task Set Full Event data */ -/* this event is obsolete */ -typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL { - U16 DevHandle; /*0x00 */ - U16 CurrentDepth; /*0x02 */ -} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL, - Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t; +/* Task Set Full Event data */ +/* this event is obsolete */ -/*SAS Device Status Change Event data */ +typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL +{ + U16 DevHandle; /* 0x00 */ + U16 CurrentDepth; /* 0x02 */ +} MPI2_EVENT_DATA_TASK_SET_FULL, MPI2_POINTER PTR_MPI2_EVENT_DATA_TASK_SET_FULL, + Mpi2EventDataTaskSetFull_t, MPI2_POINTER pMpi2EventDataTaskSetFull_t; -typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE { - U16 TaskTag; /*0x00 */ - U8 ReasonCode; /*0x02 */ - U8 PhysicalPort; /*0x03 */ - U8 ASC; /*0x04 */ - U8 ASCQ; /*0x05 */ - U16 DevHandle; /*0x06 */ - U32 Reserved2; /*0x08 */ - U64 SASAddress; /*0x0C */ - U8 LUN[8]; /*0x14 */ + +/* SAS Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE +{ + U16 TaskTag; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U8 ASC; /* 0x04 */ + U8 ASCQ; /* 0x05 */ + U16 DevHandle; /* 0x06 */ + U32 Reserved2; /* 0x08 */ + U64 SASAddress; /* 0x0C */ + U8 LUN[8]; /* 0x14 */ } MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, - *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, - Mpi2EventDataSasDeviceStatusChange_t, - *pMpi2EventDataSasDeviceStatusChange_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + Mpi2EventDataSasDeviceStatusChange_t, + MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t; -/*SAS Device Status Change Event data ReasonCode values */ +/* SAS Device Status Change Event data ReasonCode values */ #define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) #define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) #define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) @@ -740,91 +769,98 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE { #define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) #define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) -/*Integrated RAID Operation Status Event data */ -typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS { - U16 VolDevHandle; /*0x00 */ - U16 Reserved1; /*0x02 */ - U8 RAIDOperation; /*0x04 */ - U8 PercentComplete; /*0x05 */ - U16 Reserved2; /*0x06 */ - U32 ElapsedSeconds; /*0x08 */ +/* Integrated RAID Operation Status Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS +{ + U16 VolDevHandle; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U8 RAIDOperation; /* 0x04 */ + U8 PercentComplete; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U32 ElapsedSeconds; /* 0x08 */ } MPI2_EVENT_DATA_IR_OPERATION_STATUS, - *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, - Mpi2EventDataIrOperationStatus_t, - *pMpi2EventDataIrOperationStatus_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, + Mpi2EventDataIrOperationStatus_t, + MPI2_POINTER pMpi2EventDataIrOperationStatus_t; -/*Integrated RAID Operation Status Event data RAIDOperation values */ +/* Integrated RAID Operation Status Event data RAIDOperation values */ #define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00) #define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) #define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) #define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) #define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) -/*Integrated RAID Volume Event data */ -typedef struct _MPI2_EVENT_DATA_IR_VOLUME { - U16 VolDevHandle; /*0x00 */ - U8 ReasonCode; /*0x02 */ - U8 Reserved1; /*0x03 */ - U32 NewValue; /*0x04 */ - U32 PreviousValue; /*0x08 */ -} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME, - Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t; +/* Integrated RAID Volume Event data */ -/*Integrated RAID Volume Event data ReasonCode values */ +typedef struct _MPI2_EVENT_DATA_IR_VOLUME +{ + U16 VolDevHandle; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 NewValue; /* 0x04 */ + U32 PreviousValue; /* 0x08 */ +} MPI2_EVENT_DATA_IR_VOLUME, MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_VOLUME, + Mpi2EventDataIrVolume_t, MPI2_POINTER pMpi2EventDataIrVolume_t; + +/* Integrated RAID Volume Event data ReasonCode values */ #define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01) #define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02) #define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) -/*Integrated RAID Physical Disk Event data */ -typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK { - U16 Reserved1; /*0x00 */ - U8 ReasonCode; /*0x02 */ - U8 PhysDiskNum; /*0x03 */ - U16 PhysDiskDevHandle; /*0x04 */ - U16 Reserved2; /*0x06 */ - U16 Slot; /*0x08 */ - U16 EnclosureHandle; /*0x0A */ - U32 NewValue; /*0x0C */ - U32 PreviousValue; /*0x10 */ +/* Integrated RAID Physical Disk Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK +{ + U16 Reserved1; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysDiskNum; /* 0x03 */ + U16 PhysDiskDevHandle; /* 0x04 */ + U16 Reserved2; /* 0x06 */ + U16 Slot; /* 0x08 */ + U16 EnclosureHandle; /* 0x0A */ + U32 NewValue; /* 0x0C */ + U32 PreviousValue; /* 0x10 */ } MPI2_EVENT_DATA_IR_PHYSICAL_DISK, - *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, - Mpi2EventDataIrPhysicalDisk_t, - *pMpi2EventDataIrPhysicalDisk_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + Mpi2EventDataIrPhysicalDisk_t, MPI2_POINTER pMpi2EventDataIrPhysicalDisk_t; -/*Integrated RAID Physical Disk Event data ReasonCode values */ +/* Integrated RAID Physical Disk Event data ReasonCode values */ #define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01) #define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02) #define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) -/*Integrated RAID Configuration Change List Event data */ + +/* Integrated RAID Configuration Change List Event data */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check NumElements at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumElements at runtime. */ #ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT #define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1) #endif -typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT { - U16 ElementFlags; /*0x00 */ - U16 VolDevHandle; /*0x02 */ - U8 ReasonCode; /*0x04 */ - U8 PhysDiskNum; /*0x05 */ - U16 PhysDiskDevHandle; /*0x06 */ -} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, - Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t; +typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT +{ + U16 ElementFlags; /* 0x00 */ + U16 VolDevHandle; /* 0x02 */ + U8 ReasonCode; /* 0x04 */ + U8 PhysDiskNum; /* 0x05 */ + U16 PhysDiskDevHandle; /* 0x06 */ +} MPI2_EVENT_IR_CONFIG_ELEMENT, MPI2_POINTER PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, + Mpi2EventIrConfigElement_t, MPI2_POINTER pMpi2EventIrConfigElement_t; -/*IR Configuration Change List Event data ElementFlags values */ +/* IR Configuration Change List Event data ElementFlags values */ #define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) #define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) #define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) #define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) -/*IR Configuration Change List Event data ReasonCode values */ +/* IR Configuration Change List Event data ReasonCode values */ #define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01) #define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02) #define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) @@ -835,43 +871,45 @@ typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT { #define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) #define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) -typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST { - U8 NumElements; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 Reserved2; /*0x02 */ - U8 ConfigNum; /*0x03 */ - U32 Flags; /*0x04 */ - MPI2_EVENT_IR_CONFIG_ELEMENT - ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */ +typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST +{ + U8 NumElements; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 Reserved2; /* 0x02 */ + U8 ConfigNum; /* 0x03 */ + U32 Flags; /* 0x04 */ + MPI2_EVENT_IR_CONFIG_ELEMENT ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT]; /* 0x08 */ } MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, - *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, - Mpi2EventDataIrConfigChangeList_t, - *pMpi2EventDataIrConfigChangeList_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + Mpi2EventDataIrConfigChangeList_t, + MPI2_POINTER pMpi2EventDataIrConfigChangeList_t; -/*IR Configuration Change List Event data Flags values */ +/* IR Configuration Change List Event data Flags values */ #define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) -/*SAS Discovery Event data */ -typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY { - U8 Flags; /*0x00 */ - U8 ReasonCode; /*0x01 */ - U8 PhysicalPort; /*0x02 */ - U8 Reserved1; /*0x03 */ - U32 DiscoveryStatus; /*0x04 */ +/* SAS Discovery Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY +{ + U8 Flags; /* 0x00 */ + U8 ReasonCode; /* 0x01 */ + U8 PhysicalPort; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 DiscoveryStatus; /* 0x04 */ } MPI2_EVENT_DATA_SAS_DISCOVERY, - *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, - Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, + Mpi2EventDataSasDiscovery_t, MPI2_POINTER pMpi2EventDataSasDiscovery_t; -/*SAS Discovery Event data Flags values */ +/* SAS Discovery Event data Flags values */ #define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02) #define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01) -/*SAS Discovery Event data ReasonCode values */ +/* SAS Discovery Event data ReasonCode values */ #define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01) #define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02) -/*SAS Discovery Event data DiscoveryStatus values */ +/* SAS Discovery Event data DiscoveryStatus values */ #define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000) #define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000) #define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000) @@ -893,19 +931,21 @@ typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY { #define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002) #define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001) -/*SAS Broadcast Primitive Event data */ -typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE { - U8 PhyNum; /*0x00 */ - U8 Port; /*0x01 */ - U8 PortWidth; /*0x02 */ - U8 Primitive; /*0x03 */ +/* SAS Broadcast Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE +{ + U8 PhyNum; /* 0x00 */ + U8 Port; /* 0x01 */ + U8 PortWidth; /* 0x02 */ + U8 Primitive; /* 0x03 */ } MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, - *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, - Mpi2EventDataSasBroadcastPrimitive_t, - *pMpi2EventDataSasBroadcastPrimitive_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + Mpi2EventDataSasBroadcastPrimitive_t, + MPI2_POINTER pMpi2EventDataSasBroadcastPrimitive_t; -/*defines for the Primitive field */ +/* defines for the Primitive field */ #define MPI2_EVENT_PRIMITIVE_CHANGE (0x01) #define MPI2_EVENT_PRIMITIVE_SES (0x02) #define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03) @@ -915,93 +955,101 @@ typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE { #define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) #define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) -/*SAS Notify Primitive Event data */ -typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE { - U8 PhyNum; /*0x00 */ - U8 Port; /*0x01 */ - U8 Reserved1; /*0x02 */ - U8 Primitive; /*0x03 */ +/* SAS Notify Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE +{ + U8 PhyNum; /* 0x00 */ + U8 Port; /* 0x01 */ + U8 Reserved1; /* 0x02 */ + U8 Primitive; /* 0x03 */ } MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, - *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, - Mpi2EventDataSasNotifyPrimitive_t, - *pMpi2EventDataSasNotifyPrimitive_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + Mpi2EventDataSasNotifyPrimitive_t, + MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t; -/*defines for the Primitive field */ +/* defines for the Primitive field */ #define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) #define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) #define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) #define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) -/*SAS Initiator Device Status Change Event data */ -typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE { - U8 ReasonCode; /*0x00 */ - U8 PhysicalPort; /*0x01 */ - U16 DevHandle; /*0x02 */ - U64 SASAddress; /*0x04 */ +/* SAS Initiator Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE +{ + U8 ReasonCode; /* 0x00 */ + U8 PhysicalPort; /* 0x01 */ + U16 DevHandle; /* 0x02 */ + U64 SASAddress; /* 0x04 */ } MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, - *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, - Mpi2EventDataSasInitDevStatusChange_t, - *pMpi2EventDataSasInitDevStatusChange_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + Mpi2EventDataSasInitDevStatusChange_t, + MPI2_POINTER pMpi2EventDataSasInitDevStatusChange_t; -/*SAS Initiator Device Status Change event ReasonCode values */ +/* SAS Initiator Device Status Change event ReasonCode values */ #define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01) #define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) -/*SAS Initiator Device Table Overflow Event data */ -typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW { - U16 MaxInit; /*0x00 */ - U16 CurrentInit; /*0x02 */ - U64 SASAddress; /*0x04 */ +/* SAS Initiator Device Table Overflow Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW +{ + U16 MaxInit; /* 0x00 */ + U16 CurrentInit; /* 0x02 */ + U64 SASAddress; /* 0x04 */ } MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, - *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, - Mpi2EventDataSasInitTableOverflow_t, - *pMpi2EventDataSasInitTableOverflow_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + Mpi2EventDataSasInitTableOverflow_t, + MPI2_POINTER pMpi2EventDataSasInitTableOverflow_t; -/*SAS Topology Change List Event data */ + +/* SAS Topology Change List Event data */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check NumEntries at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumEntries at runtime. */ #ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT #define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1) #endif -typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY { - U16 AttachedDevHandle; /*0x00 */ - U8 LinkRate; /*0x02 */ - U8 PhyStatus; /*0x03 */ -} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, - Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t; +typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY +{ + U16 AttachedDevHandle; /* 0x00 */ + U8 LinkRate; /* 0x02 */ + U8 PhyStatus; /* 0x03 */ +} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, MPI2_POINTER PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, + Mpi2EventSasTopoPhyEntry_t, MPI2_POINTER pMpi2EventSasTopoPhyEntry_t; -typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST { - U16 EnclosureHandle; /*0x00 */ - U16 ExpanderDevHandle; /*0x02 */ - U8 NumPhys; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 Reserved2; /*0x06 */ - U8 NumEntries; /*0x08 */ - U8 StartPhyNum; /*0x09 */ - U8 ExpStatus; /*0x0A */ - U8 PhysicalPort; /*0x0B */ - MPI2_EVENT_SAS_TOPO_PHY_ENTRY - PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */ +typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST +{ + U16 EnclosureHandle; /* 0x00 */ + U16 ExpanderDevHandle; /* 0x02 */ + U8 NumPhys; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U8 NumEntries; /* 0x08 */ + U8 StartPhyNum; /* 0x09 */ + U8 ExpStatus; /* 0x0A */ + U8 PhysicalPort; /* 0x0B */ + MPI2_EVENT_SAS_TOPO_PHY_ENTRY PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /* 0x0C*/ } MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, - *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, - Mpi2EventDataSasTopologyChangeList_t, - *pMpi2EventDataSasTopologyChangeList_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + Mpi2EventDataSasTopologyChangeList_t, + MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t; -/*values for the ExpStatus field */ +/* values for the ExpStatus field */ #define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) #define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) #define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) #define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) #define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) -/*defines for the LinkRate field */ +/* defines for the LinkRate field */ #define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0) #define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) #define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F) @@ -1020,10 +1068,10 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST { #define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) #define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C) -/*values for the PhyStatus field */ +/* values for the PhyStatus field */ #define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) #define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10) -/*values for the PhyStatus ReasonCode sub-field */ +/* values for the PhyStatus ReasonCode sub-field */ #define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F) #define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) #define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) @@ -1031,154 +1079,158 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST { #define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) #define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) -/*SAS Enclosure Device Status Change Event data */ -typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE { - U16 EnclosureHandle; /*0x00 */ - U8 ReasonCode; /*0x02 */ - U8 PhysicalPort; /*0x03 */ - U64 EnclosureLogicalID; /*0x04 */ - U16 NumSlots; /*0x0C */ - U16 StartSlot; /*0x0E */ - U32 PhyBits; /*0x10 */ +/* SAS Enclosure Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE +{ + U16 EnclosureHandle; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U64 EnclosureLogicalID; /* 0x04 */ + U16 NumSlots; /* 0x0C */ + U16 StartSlot; /* 0x0E */ + U32 PhyBits; /* 0x10 */ } MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, - *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, - Mpi2EventDataSasEnclDevStatusChange_t, - *pMpi2EventDataSasEnclDevStatusChange_t, - MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, - *PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, - Mpi26EventDataEnclDevStatusChange_t, - *pMpi26EventDataEnclDevStatusChange_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + Mpi2EventDataSasEnclDevStatusChange_t, + MPI2_POINTER pMpi2EventDataSasEnclDevStatusChange_t, + MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + MPI2_POINTER PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + Mpi26EventDataEnclDevStatusChange_t, + MPI2_POINTER pMpi26EventDataEnclDevStatusChange_t; -/*SAS Enclosure Device Status Change event ReasonCode values */ +/* SAS Enclosure Device Status Change event ReasonCode values */ #define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) #define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) -/*Enclosure Device Status Change event ReasonCode values */ +/* Enclosure Device Status Change event ReasonCode values */ #define MPI26_EVENT_ENCL_RC_ADDED (0x01) #define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02) +/* SAS PHY Counter Event data */ -typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR { - U16 DevHandle; /*0x00 */ - U8 ReasonCode; /*0x02 */ - U8 PhysicalPort; /*0x03 */ - U32 Reserved1[2]; /*0x04 */ - U64 SASAddress; /*0x0C */ - U32 Reserved2[2]; /*0x14 */ -} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, - *PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, - Mpi25EventDataSasDeviceDiscoveryError_t, - *pMpi25EventDataSasDeviceDiscoveryError_t; - -/*SAS Device Discovery Error Event data ReasonCode values */ -#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) -#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) - -/*SAS PHY Counter Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER { - U64 TimeStamp; /*0x00 */ - U32 Reserved1; /*0x08 */ - U8 PhyEventCode; /*0x0C */ - U8 PhyNum; /*0x0D */ - U16 Reserved2; /*0x0E */ - U32 PhyEventInfo; /*0x10 */ - U8 CounterType; /*0x14 */ - U8 ThresholdWindow; /*0x15 */ - U8 TimeUnits; /*0x16 */ - U8 Reserved3; /*0x17 */ - U32 EventThreshold; /*0x18 */ - U16 ThresholdFlags; /*0x1C */ - U16 Reserved4; /*0x1E */ +typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 PhyEventCode; /* 0x0C */ + U8 PhyNum; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U32 PhyEventInfo; /* 0x10 */ + U8 CounterType; /* 0x14 */ + U8 ThresholdWindow; /* 0x15 */ + U8 TimeUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ + U32 EventThreshold; /* 0x18 */ + U16 ThresholdFlags; /* 0x1C */ + U16 Reserved4; /* 0x1E */ } MPI2_EVENT_DATA_SAS_PHY_COUNTER, - *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, - Mpi2EventDataSasPhyCounter_t, - *pMpi2EventDataSasPhyCounter_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, + Mpi2EventDataSasPhyCounter_t, MPI2_POINTER pMpi2EventDataSasPhyCounter_t; -/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h - *for the PhyEventCode field */ +/* use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h for the PhyEventCode field */ -/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h - *for the CounterType field */ +/* use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType field */ -/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h - *for the TimeUnits field */ +/* use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits field */ -/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h - *for the ThresholdFlags field */ +/* use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */ -/*SAS Quiesce Event data */ -typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE { - U8 ReasonCode; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 Reserved3; /*0x04 */ +/* SAS Quiesce Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE +{ + U8 ReasonCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Reserved3; /* 0x04 */ } MPI2_EVENT_DATA_SAS_QUIESCE, - *PTR_MPI2_EVENT_DATA_SAS_QUIESCE, - Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t; + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_QUIESCE, + Mpi2EventDataSasQuiesce_t, MPI2_POINTER pMpi2EventDataSasQuiesce_t; -/*SAS Quiesce Event data ReasonCode values */ +/* SAS Quiesce Event data ReasonCode values */ #define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01) #define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02) -/*Host Based Discovery Phy Event data */ -typedef struct _MPI2_EVENT_HBD_PHY_SAS { - U8 Flags; /*0x00 */ - U8 NegotiatedLinkRate; /*0x01 */ - U8 PhyNum; /*0x02 */ - U8 PhysicalPort; /*0x03 */ - U32 Reserved1; /*0x04 */ - U8 InitialFrame[28]; /*0x08 */ -} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS, - Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t; +typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR +{ + U16 DevHandle; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U32 Reserved1[2]; /* 0x04 */ + U64 SASAddress; /* 0x0C */ + U32 Reserved2[2]; /* 0x14 */ +} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + MPI2_POINTER PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + Mpi25EventDataSasDeviceDiscoveryError_t, + MPI2_POINTER pMpi25EventDataSasDeviceDiscoveryError_t; -/*values for the Flags field */ +/* SAS Device Discovery Error Event data ReasonCode values */ +#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) +#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) + + +/* Host Based Discovery Phy Event data */ + +typedef struct _MPI2_EVENT_HBD_PHY_SAS +{ + U8 Flags; /* 0x00 */ + U8 NegotiatedLinkRate; /* 0x01 */ + U8 PhyNum; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U32 Reserved1; /* 0x04 */ + U8 InitialFrame[28]; /* 0x08 */ +} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS, + Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t; + +/* values for the Flags field */ #define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) #define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) -/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h - *for the NegotiatedLinkRate field */ +/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for the NegotiatedLinkRate field */ -typedef union _MPI2_EVENT_HBD_DESCRIPTOR { - MPI2_EVENT_HBD_PHY_SAS Sas; -} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR, - Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t; +typedef union _MPI2_EVENT_HBD_DESCRIPTOR +{ + MPI2_EVENT_HBD_PHY_SAS Sas; +} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR, + Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t; -typedef struct _MPI2_EVENT_DATA_HBD_PHY { - U8 DescriptorType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 Reserved3; /*0x04 */ - MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */ -} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY, - Mpi2EventDataHbdPhy_t, - *pMpi2EventDataMpi2EventDataHbdPhy_t; +typedef struct _MPI2_EVENT_DATA_HBD_PHY +{ + U8 DescriptorType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Reserved3; /* 0x04 */ + MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */ +} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY, + Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t; -/*values for the DescriptorType field */ +/* values for the DescriptorType field */ #define MPI2_EVENT_HBD_DT_SAS (0x01) -/*PCIe Device Status Change Event data (MPI v2.6 and later) */ +/* PCIe Device Status Change Event data (MPI v2.6 and later) */ -typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE { - U16 TaskTag; /*0x00 */ - U8 ReasonCode; /*0x02 */ - U8 PhysicalPort; /*0x03 */ - U8 ASC; /*0x04 */ - U8 ASCQ; /*0x05 */ - U16 DevHandle; /*0x06 */ - U32 Reserved2; /*0x08 */ - U64 WWID; /*0x0C */ - U8 LUN[8]; /*0x14 */ +typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE +{ + U16 TaskTag; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U8 ASC; /* 0x04 */ + U8 ASCQ; /* 0x05 */ + U16 DevHandle; /* 0x06 */ + U32 Reserved2; /* 0x08 */ + U64 WWID; /* 0x0C */ + U8 LUN[8]; /* 0x14 */ } MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, - *PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, - Mpi26EventDataPCIeDeviceStatusChange_t, - *pMpi26EventDataPCIeDeviceStatusChange_t; + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, + Mpi26EventDataPCIeDeviceStatusChange_t, + MPI2_POINTER pMpi26EventDataPCIeDeviceStatusChange_t; -/*PCIe Device Status Change Event data ReasonCode values */ +/* PCIe Device Status Change Event data ReasonCode values */ #define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05) #define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07) #define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) @@ -1193,66 +1245,66 @@ typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE { #define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11) -/*PCIe Enumeration Event data (MPI v2.6 and later) */ +/* PCIe Enumeration Event data (MPI v2.6 and later) */ -typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION { - U8 Flags; /*0x00 */ - U8 ReasonCode; /*0x01 */ - U8 PhysicalPort; /*0x02 */ - U8 Reserved1; /*0x03 */ - U32 EnumerationStatus; /*0x04 */ +typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION +{ + U8 Flags; /* 0x00 */ + U8 ReasonCode; /* 0x01 */ + U8 PhysicalPort; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 EnumerationStatus; /* 0x04 */ } MPI26_EVENT_DATA_PCIE_ENUMERATION, - *PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION, - Mpi26EventDataPCIeEnumeration_t, - *pMpi26EventDataPCIeEnumeration_t; + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION, + Mpi26EventDataPCIeEnumeration_t, + MPI2_POINTER pMpi26EventDataPCIeEnumeration_t; -/*PCIe Enumeration Event data Flags values */ +/* PCIe Enumeration Event data Flags values */ #define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02) #define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01) -/*PCIe Enumeration Event data ReasonCode values */ +/* PCIe Enumeration Event data ReasonCode values */ #define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01) #define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02) -/*PCIe Enumeration Event data EnumerationStatus values */ +/* PCIe Enumeration Event data EnumerationStatus values */ #define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) #define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) #define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) -/*PCIe Topology Change List Event data (MPI v2.6 and later) */ +/* PCIe Topology Change List Event data (MPI v2.6 and later) */ /* - *Host code (drivers, BIOS, utilities, etc.) should leave this define set to - *one and check NumEntries at runtime. + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumEntries at runtime. */ #ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT #define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1) #endif -typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY { - U16 AttachedDevHandle; /*0x00 */ - U8 PortStatus; /*0x02 */ - U8 Reserved1; /*0x03 */ - U8 CurrentPortInfo; /*0x04 */ - U8 Reserved2; /*0x05 */ - U8 PreviousPortInfo; /*0x06 */ - U8 Reserved3; /*0x07 */ +typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY +{ + U16 AttachedDevHandle; /* 0x00 */ + U8 PortStatus; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U8 CurrentPortInfo; /* 0x04 */ + U8 Reserved2; /* 0x05 */ + U8 PreviousPortInfo; /* 0x06 */ + U8 Reserved3; /* 0x07 */ } MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, - *PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, - Mpi26EventPCIeTopoPortEntry_t, - *pMpi26EventPCIeTopoPortEntry_t; + MPI2_POINTER PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, + Mpi26EventPCIeTopoPortEntry_t, + MPI2_POINTER pMpi26EventPCIeTopoPortEntry_t; -/*PCIe Topology Change List Event data PortStatus values */ +/* PCIe Topology Change List Event data PortStatus values */ #define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01) #define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02) #define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03) #define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04) #define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05) -/*PCIe Topology Change List Event data defines for CurrentPortInfo and - *PreviousPortInfo - */ +/* PCIe Topology Change List Event data defines for CurrentPortInfo and PreviousPortInfo */ #define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0) #define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00) #define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10) @@ -1269,172 +1321,173 @@ typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY { #define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04) #define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05) -typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST { - U16 EnclosureHandle; /*0x00 */ - U16 SwitchDevHandle; /*0x02 */ - U8 NumPorts; /*0x04 */ - U8 Reserved1; /*0x05 */ - U16 Reserved2; /*0x06 */ - U8 NumEntries; /*0x08 */ - U8 StartPortNum; /*0x09 */ - U8 SwitchStatus; /*0x0A */ - U8 PhysicalPort; /*0x0B */ - MPI26_EVENT_PCIE_TOPO_PORT_ENTRY - PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */ +typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST +{ + U16 EnclosureHandle; /* 0x00 */ + U16 SwitchDevHandle; /* 0x02 */ + U8 NumPorts; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U8 NumEntries; /* 0x08 */ + U8 StartPortNum; /* 0x09 */ + U8 SwitchStatus; /* 0x0A */ + U8 PhysicalPort; /* 0x0B */ + MPI26_EVENT_PCIE_TOPO_PORT_ENTRY PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /* 0x0C */ } MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, - *PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, - Mpi26EventDataPCIeTopologyChangeList_t, - *pMpi26EventDataPCIeTopologyChangeList_t; + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, + Mpi26EventDataPCIeTopologyChangeList_t, + MPI2_POINTER pMpi26EventDataPCIeTopologyChangeList_t; -/*PCIe Topology Change List Event data SwitchStatus values */ +/* PCIe Topology Change List Event data SwitchStatus values */ #define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) #define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01) #define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02) #define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03) #define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04) -/*PCIe Link Counter Event data (MPI v2.6 and later) */ +/* PCIe Link Counter Event data (MPI v2.6 and later) */ -typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER { - U64 TimeStamp; /*0x00 */ - U32 Reserved1; /*0x08 */ - U8 LinkEventCode; /*0x0C */ - U8 LinkNum; /*0x0D */ - U16 Reserved2; /*0x0E */ - U32 LinkEventInfo; /*0x10 */ - U8 CounterType; /*0x14 */ - U8 ThresholdWindow; /*0x15 */ - U8 TimeUnits; /*0x16 */ - U8 Reserved3; /*0x17 */ - U32 EventThreshold; /*0x18 */ - U16 ThresholdFlags; /*0x1C */ - U16 Reserved4; /*0x1E */ +typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 LinkEventCode; /* 0x0C */ + U8 LinkNum; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U32 LinkEventInfo; /* 0x10 */ + U8 CounterType; /* 0x14 */ + U8 ThresholdWindow; /* 0x15 */ + U8 TimeUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ + U32 EventThreshold; /* 0x18 */ + U16 ThresholdFlags; /* 0x1C */ + U16 Reserved4; /* 0x1E */ } MPI26_EVENT_DATA_PCIE_LINK_COUNTER, - *PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER, - Mpi26EventDataPcieLinkCounter_t, *pMpi26EventDataPcieLinkCounter_t; + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER, + Mpi26EventDataPcieLinkCounter_t, MPI2_POINTER pMpi26EventDataPcieLinkCounter_t; -/*use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode - *field - */ +/* use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode field */ -/*use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType - *field - */ +/* use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType field */ -/*use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits - *field - */ +/* use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits field */ -/*use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags - *field - */ +/* use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */ /**************************************************************************** -* EventAck message +* EventAck message ****************************************************************************/ -/*EventAck Request message */ -typedef struct _MPI2_EVENT_ACK_REQUEST { - U16 Reserved1; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Event; /*0x0C */ - U16 Reserved5; /*0x0E */ - U32 EventContext; /*0x10 */ -} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST, - Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t; +/* EventAck Request message */ +typedef struct _MPI2_EVENT_ACK_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Event; /* 0x0C */ + U16 Reserved5; /* 0x0E */ + U32 EventContext; /* 0x10 */ +} MPI2_EVENT_ACK_REQUEST, MPI2_POINTER PTR_MPI2_EVENT_ACK_REQUEST, + Mpi2EventAckRequest_t, MPI2_POINTER pMpi2EventAckRequest_t; + + +/* EventAck Reply message */ +typedef struct _MPI2_EVENT_ACK_REPLY +{ + U16 Reserved1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_EVENT_ACK_REPLY, MPI2_POINTER PTR_MPI2_EVENT_ACK_REPLY, + Mpi2EventAckReply_t, MPI2_POINTER pMpi2EventAckReply_t; -/*EventAck Reply message */ -typedef struct _MPI2_EVENT_ACK_REPLY { - U16 Reserved1; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY, - Mpi2EventAckReply_t, *pMpi2EventAckReply_t; /**************************************************************************** -* SendHostMessage message +* SendHostMessage message ****************************************************************************/ -/*SendHostMessage Request message */ -typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST { - U16 HostDataLength; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved1; /*0x04 */ - U8 Reserved2; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U8 Reserved4; /*0x0C */ - U8 DestVF_ID; /*0x0D */ - U16 Reserved5; /*0x0E */ - U32 Reserved6; /*0x10 */ - U32 Reserved7; /*0x14 */ - U32 Reserved8; /*0x18 */ - U32 Reserved9; /*0x1C */ - U32 Reserved10; /*0x20 */ - U32 HostData[1]; /*0x24 */ +/* SendHostMessage Request message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST +{ + U16 HostDataLength; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U8 Reserved4; /* 0x0C */ + U8 DestVF_ID; /* 0x0D */ + U16 Reserved5; /* 0x0E */ + U32 Reserved6; /* 0x10 */ + U32 Reserved7; /* 0x14 */ + U32 Reserved8; /* 0x18 */ + U32 Reserved9; /* 0x1C */ + U32 Reserved10; /* 0x20 */ + U32 HostData[1]; /* 0x24 */ } MPI2_SEND_HOST_MESSAGE_REQUEST, - *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, - Mpi2SendHostMessageRequest_t, - *pMpi2SendHostMessageRequest_t; + MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, + Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t; + + +/* SendHostMessage Reply message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY +{ + U16 HostDataLength; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY, + Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t; -/*SendHostMessage Reply message */ -typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY { - U16 HostDataLength; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved1; /*0x04 */ - U8 Reserved2; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U16 Reserved4; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY, - Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t; /**************************************************************************** -* FWDownload message +* FWDownload message ****************************************************************************/ -/*MPI v2.0 FWDownload Request message */ -typedef struct _MPI2_FW_DOWNLOAD_REQUEST { - U8 ImageType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 TotalImageSize; /*0x0C */ - U32 Reserved5; /*0x10 */ - MPI2_MPI_SGE_UNION SGL; /*0x14 */ -} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST, - Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest; +/* MPI v2.0 FWDownload Request message */ +typedef struct _MPI2_FW_DOWNLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 TotalImageSize; /* 0x0C */ + U32 Reserved5; /* 0x10 */ + MPI2_MPI_SGE_UNION SGL; /* 0x14 */ +} MPI2_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REQUEST, + Mpi2FWDownloadRequest, MPI2_POINTER pMpi2FWDownloadRequest; #define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01) @@ -1446,7 +1499,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST { #define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) #define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) #define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) -#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) +#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) /* MPI v2.5 and newer */ #define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D) #define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E) #define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F) @@ -1455,84 +1508,91 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST { #define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12) #define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13) #define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14) -/*MPI v2.6 and newer */ -#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) -#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) +#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) /* MPI v2.6 and newer */ +#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) /* MPI v2.6 and newer */ #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) -#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF) +#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF) /* MPI v2.6 and newer */ -/*MPI v2.0 FWDownload TransactionContext Element */ -typedef struct _MPI2_FW_DOWNLOAD_TCSGE { - U8 Reserved1; /*0x00 */ - U8 ContextSize; /*0x01 */ - U8 DetailsLength; /*0x02 */ - U8 Flags; /*0x03 */ - U32 Reserved2; /*0x04 */ - U32 ImageOffset; /*0x08 */ - U32 ImageSize; /*0x0C */ -} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE, - Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t; -/*MPI v2.5 FWDownload Request message */ -typedef struct _MPI25_FW_DOWNLOAD_REQUEST { - U8 ImageType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 TotalImageSize; /*0x0C */ - U32 Reserved5; /*0x10 */ - U32 Reserved6; /*0x14 */ - U32 ImageOffset; /*0x18 */ - U32 ImageSize; /*0x1C */ - MPI25_SGE_IO_UNION SGL; /*0x20 */ -} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST, - Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest; +/* MPI v2.0 FWDownload TransactionContext Element */ +typedef struct _MPI2_FW_DOWNLOAD_TCSGE +{ + U8 Reserved1; /* 0x00 */ + U8 ContextSize; /* 0x01 */ + U8 DetailsLength; /* 0x02 */ + U8 Flags; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 ImageOffset; /* 0x08 */ + U32 ImageSize; /* 0x0C */ +} MPI2_FW_DOWNLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_TCSGE, + Mpi2FWDownloadTCSGE_t, MPI2_POINTER pMpi2FWDownloadTCSGE_t; + + +/* MPI v2.5 FWDownload Request message */ +typedef struct _MPI25_FW_DOWNLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 TotalImageSize; /* 0x0C */ + U32 Reserved5; /* 0x10 */ + U32 Reserved6; /* 0x14 */ + U32 ImageOffset; /* 0x18 */ + U32 ImageSize; /* 0x1C */ + MPI25_SGE_IO_UNION SGL; /* 0x20 */ +} MPI25_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_DOWNLOAD_REQUEST, + Mpi25FWDownloadRequest, MPI2_POINTER pMpi25FWDownloadRequest; + + +/* FWDownload Reply message */ +typedef struct _MPI2_FW_DOWNLOAD_REPLY +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_FW_DOWNLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REPLY, + Mpi2FWDownloadReply_t, MPI2_POINTER pMpi2FWDownloadReply_t; -/*FWDownload Reply message */ -typedef struct _MPI2_FW_DOWNLOAD_REPLY { - U8 ImageType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY, - Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t; /**************************************************************************** -* FWUpload message +* FWUpload message ****************************************************************************/ -/*MPI v2.0 FWUpload Request message */ -typedef struct _MPI2_FW_UPLOAD_REQUEST { - U8 ImageType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 Reserved5; /*0x0C */ - U32 Reserved6; /*0x10 */ - MPI2_MPI_SGE_UNION SGL; /*0x14 */ -} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST, - Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t; +/* MPI v2.0 FWUpload Request message */ +typedef struct _MPI2_FW_UPLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + MPI2_MPI_SGE_UNION SGL; /* 0x14 */ +} MPI2_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REQUEST, + Mpi2FWUploadRequest_t, MPI2_POINTER pMpi2FWUploadRequest_t; #define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00) #define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01) @@ -1553,197 +1613,204 @@ typedef struct _MPI2_FW_UPLOAD_REQUEST { #define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13) #define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14) +/* MPI v2.0 FWUpload TransactionContext Element */ +typedef struct _MPI2_FW_UPLOAD_TCSGE +{ + U8 Reserved1; /* 0x00 */ + U8 ContextSize; /* 0x01 */ + U8 DetailsLength; /* 0x02 */ + U8 Flags; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 ImageOffset; /* 0x08 */ + U32 ImageSize; /* 0x0C */ +} MPI2_FW_UPLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_UPLOAD_TCSGE, + Mpi2FWUploadTCSGE_t, MPI2_POINTER pMpi2FWUploadTCSGE_t; -/*MPI v2.0 FWUpload TransactionContext Element */ -typedef struct _MPI2_FW_UPLOAD_TCSGE { - U8 Reserved1; /*0x00 */ - U8 ContextSize; /*0x01 */ - U8 DetailsLength; /*0x02 */ - U8 Flags; /*0x03 */ - U32 Reserved2; /*0x04 */ - U32 ImageOffset; /*0x08 */ - U32 ImageSize; /*0x0C */ -} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE, - Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t; -/*MPI v2.5 FWUpload Request message */ -typedef struct _MPI25_FW_UPLOAD_REQUEST { - U8 ImageType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 Reserved5; /*0x0C */ - U32 Reserved6; /*0x10 */ - U32 Reserved7; /*0x14 */ - U32 ImageOffset; /*0x18 */ - U32 ImageSize; /*0x1C */ - MPI25_SGE_IO_UNION SGL; /*0x20 */ -} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST, - Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t; +/* MPI v2.5 FWUpload Request message */ +typedef struct _MPI25_FW_UPLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + U32 Reserved7; /* 0x14 */ + U32 ImageOffset; /* 0x18 */ + U32 ImageSize; /* 0x1C */ + MPI25_SGE_IO_UNION SGL; /* 0x20 */ +} MPI25_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_UPLOAD_REQUEST, + Mpi25FWUploadRequest_t, MPI2_POINTER pMpi25FWUploadRequest_t; -/*FWUpload Reply message */ -typedef struct _MPI2_FW_UPLOAD_REPLY { - U8 ImageType; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U32 ActualImageSize; /*0x14 */ -} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY, - Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t; + +/* FWUpload Reply message */ +typedef struct _MPI2_FW_UPLOAD_REPLY +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 ActualImageSize; /* 0x14 */ +} MPI2_FW_UPLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REPLY, + Mpi2FWUploadReply_t, MPI2_POINTER pMPi2FWUploadReply_t; /**************************************************************************** -* PowerManagementControl message +* PowerManagementControl message ****************************************************************************/ -/*PowerManagementControl Request message */ -typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST { - U8 Feature; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U8 Parameter1; /*0x0C */ - U8 Parameter2; /*0x0D */ - U8 Parameter3; /*0x0E */ - U8 Parameter4; /*0x0F */ - U32 Reserved5; /*0x10 */ - U32 Reserved6; /*0x14 */ -} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, - Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t; +/* PowerManagementControl Request message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST +{ + U8 Feature; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Parameter1; /* 0x0C */ + U8 Parameter2; /* 0x0D */ + U8 Parameter3; /* 0x0E */ + U8 Parameter4; /* 0x0F */ + U32 Reserved5; /* 0x10 */ + U32 Reserved6; /* 0x14 */ +} MPI2_PWR_MGMT_CONTROL_REQUEST, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, + Mpi2PwrMgmtControlRequest_t, MPI2_POINTER pMpi2PwrMgmtControlRequest_t; -/*defines for the Feature field */ +/* defines for the Feature field */ #define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) #define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) -#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */ +#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */ #define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) -#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) +#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) /* reserved in MPI 2.0 */ #define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) #define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) -/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ -/*Parameter1 contains a PHY number */ -/*Parameter2 indicates power condition action using these defines */ +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ +/* Parameter1 contains a PHY number */ +/* Parameter2 indicates power condition action using these defines */ #define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01) #define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02) #define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03) -/*Parameter3 and Parameter4 are reserved */ +/* Parameter3 and Parameter4 are reserved */ -/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION - * Feature */ -/*Parameter1 contains SAS port width modulation group number */ -/*Parameter2 indicates IOC action using these defines */ +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION Feature */ +/* Parameter1 contains SAS port width modulation group number */ +/* Parameter2 indicates IOC action using these defines */ #define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01) #define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02) #define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03) -/*Parameter3 indicates desired modulation level using these defines */ +/* Parameter3 indicates desired modulation level using these defines */ #define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00) #define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01) #define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02) #define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03) -/*Parameter4 is reserved */ +/* Parameter4 is reserved */ -/*this next set (_PCIE_LINK) is obsolete */ -/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ -/*Parameter1 indicates desired PCIe link speed using these defines */ -#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */ -#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */ -#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */ -/*Parameter2 indicates desired PCIe link width using these defines */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */ -/*Parameter3 and Parameter4 are reserved */ +/* this next set (_PCIE_LINK) is obsolete */ +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ +/* Parameter1 indicates desired PCIe link speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */ +/* Parameter2 indicates desired PCIe link width using these defines */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */ +/* Parameter3 and Parameter4 are reserved */ -/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ -/*Parameter1 indicates desired IOC hardware clock speed using these defines */ +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ +/* Parameter1 indicates desired IOC hardware clock speed using these defines */ #define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01) #define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02) #define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04) #define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08) -/*Parameter2, Parameter3, and Parameter4 are reserved */ +/* Parameter2, Parameter3, and Parameter4 are reserved */ -/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/ -/*Parameter1 indicates host action regarding global power management mode */ +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature */ +/* Parameter1 indicates host action regarding global power management mode */ #define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01) #define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02) #define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03) -/*Parameter2 indicates the requested global power management mode */ +/* Parameter2 indicates the requested global power management mode */ #define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01) #define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08) #define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40) -/*Parameter3 and Parameter4 are reserved */ +/* Parameter3 and Parameter4 are reserved */ + + +/* PowerManagementControl Reply message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY +{ + U8 Feature; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_PWR_MGMT_CONTROL_REPLY, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REPLY, + Mpi2PwrMgmtControlReply_t, MPI2_POINTER pMpi2PwrMgmtControlReply_t; -/*PowerManagementControl Reply message */ -typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY { - U8 Feature; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY, - Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t; /**************************************************************************** * IO Unit Control messages (MPI v2.6 and later only.) ****************************************************************************/ /* IO Unit Control Request Message */ -typedef struct _MPI26_IOUNIT_CONTROL_REQUEST { - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 IOCParameter; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U8 PhyNum; /* 0x0E */ - U8 PrimFlags; /* 0x0F */ - U32 Primitive; /* 0x10 */ - U8 LookupMethod; /* 0x14 */ - U8 Reserved5; /* 0x15 */ - U16 SlotNumber; /* 0x16 */ - U64 LookupAddress; /* 0x18 */ - U32 IOCParameterValue; /* 0x20 */ - U32 Reserved7; /* 0x24 */ - U32 Reserved8; /* 0x28 */ +typedef struct _MPI26_IOUNIT_CONTROL_REQUEST +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U8 PhyNum; /* 0x0E */ + U8 PrimFlags; /* 0x0F */ + U32 Primitive; /* 0x10 */ + U8 LookupMethod; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 SlotNumber; /* 0x16 */ + U64 LookupAddress; /* 0x18 */ + U32 IOCParameterValue; /* 0x20 */ + U32 Reserved7; /* 0x24 */ + U32 Reserved8; /* 0x28 */ } MPI26_IOUNIT_CONTROL_REQUEST, - *PTR_MPI26_IOUNIT_CONTROL_REQUEST, - Mpi26IoUnitControlRequest_t, - *pMpi26IoUnitControlRequest_t; + MPI2_POINTER PTR_MPI26_IOUNIT_CONTROL_REQUEST, + Mpi26IoUnitControlRequest_t, MPI2_POINTER pMpi26IoUnitControlRequest_t; /* values for the Operation field */ #define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02) @@ -1782,24 +1849,24 @@ typedef struct _MPI26_IOUNIT_CONTROL_REQUEST { /* IO Unit Control Reply Message */ -typedef struct _MPI26_IOUNIT_CONTROL_REPLY { - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 IOCParameter; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI26_IOUNIT_CONTROL_REPLY, - *PTR_MPI26_IOUNIT_CONTROL_REPLY, - Mpi26IoUnitControlReply_t, - *pMpi26IoUnitControlReply_t; +typedef struct _MPI26_IOUNIT_CONTROL_REPLY +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI26_IOUNIT_CONTROL_REPLY, MPI2_POINTER PTR_MPI26_IOUNIT_CONTROL_REPLY, + Mpi26IoUnitControlReply_t, MPI2_POINTER pMpi26IoUnitControlReply_t; #endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h old mode 100644 new mode 100755 index bb7b79cfa558..612dbcc940a9 --- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h @@ -1,32 +1,32 @@ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2_pci.h - * Title: MPI PCIe Attached Devices structures and definitions. - * Creation Date: October 9, 2012 + * Name: mpi2_pci.h + * Title: MPI PCIe Attached Devices structures and definitions. + * Creation Date: October 9, 2012 * - * mpi2_pci.h Version: 02.00.04 + * mpi2_pci.h Version: 02.00.04 * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 03-16-15 02.00.00 Initial version. - * 02-17-16 02.00.01 Removed AHCI support. - * Removed SOP support. - * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to - * NVME Encapsulated Request. - * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req - * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI - * Shortten some defines to be compatible with DOS - * -------------------------------------------------------------------------- + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 03-16-15 02.00.00 Initial version. + * 02-17-16 02.00.01 Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to + * NVME Encapsulated Request. + * 07-22-18 02.00.03 Updated flags field for NVME Encapsulated req + * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI + * Shortten some defines to be compatible with DOS + * -------------------------------------------------------------------------- */ #ifndef MPI2_PCI_H @@ -34,8 +34,8 @@ /* - *Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event - *data and PCIe Configuration pages. + * Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event + * data and PCIe Configuration pages. */ #define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010) @@ -45,38 +45,40 @@ #define MPI26_PCIE_DEVINFO_NVME (0x00000003) #define MPI26_PCIE_DEVINFO_SCSI (0x00000004) + /**************************************************************************** * NVMe Encapsulated message ****************************************************************************/ -/*NVME Encapsulated Request Message */ -typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST { - U16 DevHandle; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 EncapsulatedCommandLength; /*0x04 */ - U8 Reserved1; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U32 Reserved3; /*0x0C */ - U64 ErrorResponseBaseAddress; /*0x10 */ - U16 ErrorResponseAllocationLength; /*0x18 */ - U16 Flags; /*0x1A */ - U32 DataLength; /*0x1C */ - U8 NVMe_Command[4]; /*0x20 */ +/* NVME Encapsulated Request Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 EncapsulatedCommandLength; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U64 ErrorResponseBaseAddress; /* 0x10 */ + U16 ErrorResponseAllocationLength; /* 0x18 */ + U16 Flags; /* 0x1A */ + U32 DataLength; /* 0x1C */ + U8 NVMe_Command[4]; /* 0x20 */ /* variable length */ -} MPI26_NVME_ENCAPSULATED_REQUEST, *PTR_MPI26_NVME_ENCAPSULATED_REQUEST, - Mpi26NVMeEncapsulatedRequest_t, *pMpi26NVMeEncapsulatedRequest_t; +} MPI26_NVME_ENCAPSULATED_REQUEST, MPI2_POINTER PTR_MPI26_NVME_ENCAPSULATED_REQUEST, + Mpi26NVMeEncapsulatedRequest_t, MPI2_POINTER pMpi26NVMeEncapsulatedRequest_t; -/*defines for the Flags field */ +/* defines for the Flags field */ #define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020) -/*Submission Queue Type*/ +/* Submission Queue Type*/ #define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010) #define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) #define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) -/*Error Response Address Space */ +/* Error Response Address Space */ #define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C) #define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000) #define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008) @@ -88,26 +90,29 @@ typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST { #define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003) -/*NVMe Encapuslated Reply Message */ -typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY { - U16 DevHandle; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 EncapsulatedCommandLength; /*0x04 */ - U8 Reserved1; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U16 Reserved3; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U16 ErrorResponseCount; /*0x14 */ - U16 Reserved4; /*0x16 */ +/* NVMe Encapuslated Reply Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 EncapsulatedCommandLength; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 ErrorResponseCount; /* 0x14 */ + U16 Reserved4; /* 0x16 */ } MPI26_NVME_ENCAPSULATED_ERROR_REPLY, - *PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY, - Mpi26NVMeEncapsulatedErrorReply_t, - *pMpi26NVMeEncapsulatedErrorReply_t; + MPI2_POINTER PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY, + Mpi26NVMeEncapsulatedErrorReply_t, + MPI2_POINTER pMpi26NVMeEncapsulatedErrorReply_t; #endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ra.h b/drivers/scsi/mpt3sas/mpi/mpi2_ra.h new file mode 100755 index 000000000000..b7f10e96ba54 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ra.h @@ -0,0 +1,86 @@ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_ra.h + * Title: MPI RAID Accelerator messages and structures + * Creation Date: April 13, 2009 + * + * mpi2_ra.h Version: 02.00.01 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 05-06-09 02.00.00 Initial version. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_RA_H +#define MPI2_RA_H + +/* generic structure for RAID Accelerator Control Block */ +typedef struct _MPI2_RAID_ACCELERATOR_CONTROL_BLOCK +{ + U32 Reserved[8]; /* 0x00 */ + U32 RaidAcceleratorCDB[1]; /* 0x20 */ +} MPI2_RAID_ACCELERATOR_CONTROL_BLOCK, + MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_CONTROL_BLOCK, + Mpi2RAIDAcceleratorControlBlock_t, + MPI2_POINTER pMpi2RAIDAcceleratorControlBlock_t; + + +/****************************************************************************** +* +* RAID Accelerator Messages +* +*******************************************************************************/ + +/* RAID Accelerator Request Message */ +typedef struct _MPI2_RAID_ACCELERATOR_REQUEST +{ + U16 Reserved0; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U64 RaidAcceleratorControlBlockAddress; /* 0x0C */ + U8 DmaEngineNumber; /* 0x14 */ + U8 Reserved4; /* 0x15 */ + U16 Reserved5; /* 0x16 */ + U32 Reserved6; /* 0x18 */ + U32 Reserved7; /* 0x1C */ + U32 Reserved8; /* 0x20 */ +} MPI2_RAID_ACCELERATOR_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REQUEST, + Mpi2RAIDAcceleratorRequest_t, MPI2_POINTER pMpi2RAIDAcceleratorRequest_t; + + +/* RAID Accelerator Error Reply Message */ +typedef struct _MPI2_RAID_ACCELERATOR_REPLY +{ + U16 Reserved0; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 ProductSpecificData[3]; /* 0x14 */ +} MPI2_RAID_ACCELERATOR_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REPLY, + Mpi2RAIDAcceleratorReply_t, MPI2_POINTER pMpi2RAIDAcceleratorReply_t; + + +#endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h old mode 100644 new mode 100755 index b770eb516c14..af56ac4065ff --- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -1,39 +1,38 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2_raid.h - * Title: MPI Integrated RAID messages and structures - * Creation Date: April 26, 2007 + * Name: mpi2_raid.h + * Title: MPI Integrated RAID messages and structures + * Creation Date: April 26, 2007 * - * mpi2_raid.h Version: 02.00.11 + * mpi2_raid.h Version: 02.00.11 * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Modifications to RAID Action request and reply, - * including the Actions and ActionData. - * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. - * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that - * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT - * can be sized by the build environment. - * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of - * VolumeCreationFlags and marked the old one as obsolete. - * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. - * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with - * related structures and defines. - * Added product-specific range to RAID Action values. - * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. - * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. - * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. - * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. - * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. - * 11-18-14 02.00.11 Updated copyright information. - * -------------------------------------------------------------------------- + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * 11-18-14 02.00.11 Updated copyright information. + * -------------------------------------------------------------------------- */ #ifndef MPI2_RAID_H @@ -41,130 +40,134 @@ /***************************************************************************** * -* Integrated RAID Messages +* Integrated RAID Messages * *****************************************************************************/ /**************************************************************************** -* RAID Action messages +* RAID Action messages ****************************************************************************/ /* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ #define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) -/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ +/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ #define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) #define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) -/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for - *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ +/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ -/*ActionDataWord defines for use with - *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ +/* ActionDataWord defines for use with MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ #define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001) -/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ -typedef struct _MPI2_RAID_ACTION_RATE_DATA { - U8 RateToChange; /*0x00 */ - U8 RateOrMode; /*0x01 */ - U16 DataScrubDuration; /*0x02 */ -} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA, - Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t; +/* ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ +typedef struct _MPI2_RAID_ACTION_RATE_DATA +{ + U8 RateToChange; /* 0x00 */ + U8 RateOrMode; /* 0x01 */ + U16 DataScrubDuration; /* 0x02 */ +} MPI2_RAID_ACTION_RATE_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_RATE_DATA, + Mpi2RaidActionRateData_t, MPI2_POINTER pMpi2RaidActionRateData_t; #define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00) #define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01) #define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02) -/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ -typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION { - U8 RAIDFunction; /*0x00 */ - U8 Flags; /*0x01 */ - U16 Reserved1; /*0x02 */ +/* ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION +{ + U8 RAIDFunction; /* 0x00 */ + U8 Flags; /* 0x01 */ + U16 Reserved1; /* 0x02 */ } MPI2_RAID_ACTION_START_RAID_FUNCTION, - *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, - Mpi2RaidActionStartRaidFunction_t, - *pMpi2RaidActionStartRaidFunction_t; + MPI2_POINTER PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, + Mpi2RaidActionStartRaidFunction_t, + MPI2_POINTER pMpi2RaidActionStartRaidFunction_t; -/*defines for the RAIDFunction field */ +/* defines for the RAIDFunction field */ #define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00) #define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01) #define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02) -/*defines for the Flags field */ +/* defines for the Flags field */ #define MPI2_RAID_ACTION_START_NEW (0x00) #define MPI2_RAID_ACTION_START_RESUME (0x01) -/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ -typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION { - U8 RAIDFunction; /*0x00 */ - U8 Flags; /*0x01 */ - U16 Reserved1; /*0x02 */ +/* ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION +{ + U8 RAIDFunction; /* 0x00 */ + U8 Flags; /* 0x01 */ + U16 Reserved1; /* 0x02 */ } MPI2_RAID_ACTION_STOP_RAID_FUNCTION, - *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, - Mpi2RaidActionStopRaidFunction_t, - *pMpi2RaidActionStopRaidFunction_t; + MPI2_POINTER PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + Mpi2RaidActionStopRaidFunction_t, + MPI2_POINTER pMpi2RaidActionStopRaidFunction_t; -/*defines for the RAIDFunction field */ +/* defines for the RAIDFunction field */ #define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00) #define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01) #define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02) -/*defines for the Flags field */ +/* defines for the Flags field */ #define MPI2_RAID_ACTION_STOP_ABORT (0x00) #define MPI2_RAID_ACTION_STOP_PAUSE (0x01) -/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ -typedef struct _MPI2_RAID_ACTION_HOT_SPARE { - U8 HotSparePool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 DevHandle; /*0x02 */ -} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE, - Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t; +/* ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ +typedef struct _MPI2_RAID_ACTION_HOT_SPARE +{ + U8 HotSparePool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 DevHandle; /* 0x02 */ +} MPI2_RAID_ACTION_HOT_SPARE, MPI2_POINTER PTR_MPI2_RAID_ACTION_HOT_SPARE, + Mpi2RaidActionHotSpare_t, MPI2_POINTER pMpi2RaidActionHotSpare_t; -/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ -typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE { - U8 Flags; /*0x00 */ - U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */ - U16 Reserved1; /*0x02 */ +/* ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ +typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE +{ + U8 Flags; /* 0x00 */ + U8 DeviceFirmwareUpdateModeTimeout; /* 0x01 */ + U16 Reserved1; /* 0x02 */ } MPI2_RAID_ACTION_FW_UPDATE_MODE, - *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, - Mpi2RaidActionFwUpdateMode_t, - *pMpi2RaidActionFwUpdateMode_t; + MPI2_POINTER PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, + Mpi2RaidActionFwUpdateMode_t, MPI2_POINTER pMpi2RaidActionFwUpdateMode_t; -/*ActionDataWord defines for use with - *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ +/* ActionDataWord defines for use with MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ #define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00) #define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01) -typedef union _MPI2_RAID_ACTION_DATA { - U32 Word; - MPI2_RAID_ACTION_RATE_DATA Rates; - MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; - MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; - MPI2_RAID_ACTION_HOT_SPARE HotSpare; - MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; -} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA, - Mpi2RaidActionData_t, *pMpi2RaidActionData_t; +typedef union _MPI2_RAID_ACTION_DATA +{ + U32 Word; + MPI2_RAID_ACTION_RATE_DATA Rates; + MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + MPI2_RAID_ACTION_HOT_SPARE HotSpare; + MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +} MPI2_RAID_ACTION_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_DATA, + Mpi2RaidActionData_t, MPI2_POINTER pMpi2RaidActionData_t; -/*RAID Action Request Message */ -typedef struct _MPI2_RAID_ACTION_REQUEST { - U8 Action; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 VolDevHandle; /*0x04 */ - U8 PhysDiskNum; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U32 Reserved3; /*0x0C */ - MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */ - MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */ -} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST, - Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t; -/*RAID Action request Action values */ +/* RAID Action Request Message */ +typedef struct _MPI2_RAID_ACTION_REQUEST +{ + U8 Action; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 VolDevHandle; /* 0x04 */ + U8 PhysDiskNum; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + MPI2_RAID_ACTION_DATA ActionDataWord; /* 0x10 */ + MPI2_SGE_SIMPLE_UNION ActionDataSGE; /* 0x14 */ +} MPI2_RAID_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACTION_REQUEST, + Mpi2RaidActionRequest_t, MPI2_POINTER pMpi2RaidActionRequest_t; + +/* RAID Action request Action values */ #define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01) #define MPI2_RAID_ACTION_CREATE_VOLUME (0x02) @@ -190,99 +193,107 @@ typedef struct _MPI2_RAID_ACTION_REQUEST { #define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) #define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) -/*RAID Volume Creation Structure */ + +/* RAID Volume Creation Structure */ /* - *The following define can be customized for the targeted product. + * The following define can be customized for the targeted product. */ #ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS #define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1) #endif -typedef struct _MPI2_RAID_VOLUME_PHYSDISK { - U8 RAIDSetNum; /*0x00 */ - U8 PhysDiskMap; /*0x01 */ - U16 PhysDiskDevHandle; /*0x02 */ -} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK, - Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t; +typedef struct _MPI2_RAID_VOLUME_PHYSDISK +{ + U8 RAIDSetNum; /* 0x00 */ + U8 PhysDiskMap; /* 0x01 */ + U16 PhysDiskDevHandle; /* 0x02 */ +} MPI2_RAID_VOLUME_PHYSDISK, MPI2_POINTER PTR_MPI2_RAID_VOLUME_PHYSDISK, + Mpi2RaidVolumePhysDisk_t, MPI2_POINTER pMpi2RaidVolumePhysDisk_t; -/*defines for the PhysDiskMap field */ +/* defines for the PhysDiskMap field */ #define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01) #define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02) -typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT { - U8 NumPhysDisks; /*0x00 */ - U8 VolumeType; /*0x01 */ - U16 Reserved1; /*0x02 */ - U32 VolumeCreationFlags; /*0x04 */ - U32 VolumeSettings; /*0x08 */ - U8 Reserved2; /*0x0C */ - U8 ResyncRate; /*0x0D */ - U16 DataScrubDuration; /*0x0E */ - U64 VolumeMaxLBA; /*0x10 */ - U32 StripeSize; /*0x18 */ - U8 Name[16]; /*0x1C */ - MPI2_RAID_VOLUME_PHYSDISK - PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */ +typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT +{ + U8 NumPhysDisks; /* 0x00 */ + U8 VolumeType; /* 0x01 */ + U16 Reserved1; /* 0x02 */ + U32 VolumeCreationFlags; /* 0x04 */ + U32 VolumeSettings; /* 0x08 */ + U8 Reserved2; /* 0x0C */ + U8 ResyncRate; /* 0x0D */ + U16 DataScrubDuration; /* 0x0E */ + U64 VolumeMaxLBA; /* 0x10 */ + U32 StripeSize; /* 0x18 */ + U8 Name[16]; /* 0x1C */ + MPI2_RAID_VOLUME_PHYSDISK PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS];/* 0x2C */ } MPI2_RAID_VOLUME_CREATION_STRUCT, - *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, - Mpi2RaidVolumeCreationStruct_t, - *pMpi2RaidVolumeCreationStruct_t; + MPI2_POINTER PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, + Mpi2RaidVolumeCreationStruct_t, MPI2_POINTER pMpi2RaidVolumeCreationStruct_t; -/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ +/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ -/*defines for the VolumeCreationFlags field */ +/* defines for the VolumeCreationFlags field */ #define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) -#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) +#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) /* MPI 2.0 only */ #define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) #define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) -/*The following is an obsolete define. - *It must be shifted left 24 bits in order to set the proper bit. +/* The following is an obsolete define. + * It must be shifted left 24 bits in order to set the proper bit. */ #define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) -/*RAID Online Capacity Expansion Structure */ -typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION { - U32 Flags; /*0x00 */ - U16 DevHandle0; /*0x04 */ - U16 Reserved1; /*0x06 */ - U16 DevHandle1; /*0x08 */ - U16 Reserved2; /*0x0A */ +/* RAID Online Capacity Expansion Structure */ + +typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION +{ + U32 Flags; /* 0x00 */ + U16 DevHandle0; /* 0x04 */ + U16 Reserved1; /* 0x06 */ + U16 DevHandle1; /* 0x08 */ + U16 Reserved2; /* 0x0A */ } MPI2_RAID_ONLINE_CAPACITY_EXPANSION, - *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, - Mpi2RaidOnlineCapacityExpansion_t, - *pMpi2RaidOnlineCapacityExpansion_t; + MPI2_POINTER PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + Mpi2RaidOnlineCapacityExpansion_t, + MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t; -/*RAID Compatibility Input Structure */ -typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT { - U16 SourceDevHandle; /*0x00 */ - U16 CandidateDevHandle; /*0x02 */ - U32 Flags; /*0x04 */ - U32 Reserved1; /*0x08 */ - U32 Reserved2; /*0x0C */ +/* RAID Compatibility Input Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT +{ + U16 SourceDevHandle; /* 0x00 */ + U16 CandidateDevHandle; /* 0x02 */ + U32 Flags; /* 0x04 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ } MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, - *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, - Mpi2RaidCompatibilityInputStruct_t, - *pMpi2RaidCompatibilityInputStruct_t; + MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + Mpi2RaidCompatibilityInputStruct_t, + MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t; -/*defines for RAID Compatibility Structure Flags field */ +/* defines for RAID Compatibility Structure Flags field */ #define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002) #define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001) -/*RAID Volume Indicator Structure */ -typedef struct _MPI2_RAID_VOL_INDICATOR { - U64 TotalBlocks; /*0x00 */ - U64 BlocksRemaining; /*0x08 */ - U32 Flags; /*0x10 */ - U32 ElapsedSeconds; /* 0x14 */ -} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR, - Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t; +/* RAID Volume Indicator Structure */ -/*defines for RAID Volume Indicator Flags field */ +typedef struct _MPI2_RAID_VOL_INDICATOR +{ + U64 TotalBlocks; /* 0x00 */ + U64 BlocksRemaining; /* 0x08 */ + U32 Flags; /* 0x10 */ + U32 ElapsedSeconds; /* 0x14 */ +} MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR, + Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t; + +/* defines for RAID Volume Indicator Flags field */ #define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) + #define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) #define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) #define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) @@ -290,26 +301,28 @@ typedef struct _MPI2_RAID_VOL_INDICATOR { #define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) #define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) -/*RAID Compatibility Result Structure */ -typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT { - U8 State; /*0x00 */ - U8 Reserved1; /*0x01 */ - U16 Reserved2; /*0x02 */ - U32 GenericAttributes; /*0x04 */ - U32 OEMSpecificAttributes; /*0x08 */ - U32 Reserved3; /*0x0C */ - U32 Reserved4; /*0x10 */ +/* RAID Compatibility Result Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT +{ + U8 State; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 GenericAttributes; /* 0x04 */ + U32 OEMSpecificAttributes; /* 0x08 */ + U32 Reserved3; /* 0x0C */ + U32 Reserved4; /* 0x10 */ } MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, - *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, - Mpi2RaidCompatibilityResultStruct_t, - *pMpi2RaidCompatibilityResultStruct_t; + MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + Mpi2RaidCompatibilityResultStruct_t, + MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t; -/*defines for RAID Compatibility Result Structure State field */ +/* defines for RAID Compatibility Result Structure State field */ #define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00) #define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01) -/*defines for RAID Compatibility Result Structure GenericAttributes field */ +/* defines for RAID Compatibility Result Structure GenericAttributes field */ #define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010) #define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C) @@ -320,37 +333,42 @@ typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT { #define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002) #define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001) -/*RAID Action Reply ActionData union */ -typedef union _MPI2_RAID_ACTION_REPLY_DATA { - U32 Word[6]; - MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; - U16 VolDevHandle; - U8 VolumeState; - U8 PhysDiskNum; - MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; -} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA, - Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t; -/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for - *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ +/* RAID Action Reply ActionData union */ +typedef union _MPI2_RAID_ACTION_REPLY_DATA +{ + U32 Word[6]; + MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA, + Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t; + +/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + + +/* RAID Action Reply Message */ +typedef struct _MPI2_RAID_ACTION_REPLY +{ + U8 Action; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 VolDevHandle; /* 0x04 */ + U8 PhysDiskNum; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + MPI2_RAID_ACTION_REPLY_DATA ActionData; /* 0x14 */ +} MPI2_RAID_ACTION_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY, + Mpi2RaidActionReply_t, MPI2_POINTER pMpi2RaidActionReply_t; -/*RAID Action Reply Message */ -typedef struct _MPI2_RAID_ACTION_REPLY { - U8 Action; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 VolDevHandle; /*0x04 */ - U8 PhysDiskNum; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved2; /*0x0A */ - U16 Reserved3; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */ -} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY, - Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t; #endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h old mode 100644 new mode 100755 index 16c922a8a02b..3b515efef2c8 --- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -1,49 +1,48 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2_sas.h - * Title: MPI Serial Attached SCSI structures and definitions - * Creation Date: February 9, 2007 + * Name: mpi2_sas.h + * Title: MPI Serial Attached SCSI structures and definitions + * Creation Date: February 9, 2007 * - * mpi2_sas.h Version: 02.00.10 + * mpi2_sas.h Version: 02.00.10 * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit - * Control Request. - * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control - * Request. - * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST - * to MPI2_SGE_IO_UNION since it supports chained SGLs. - * 05-12-10 02.00.04 Modified some comments. - * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. - * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. - * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA - * Passthrough Request message. - * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete - * for anything newer than MPI v2.0. - * 11-18-14 02.00.09 Updated copyright information. - * 03-16-15 02.00.10 Updated for MPI v2.6. - * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. - * -------------------------------------------------------------------------- + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. + * 11-18-14 02.00.09 Updated copyright information. + * 03-16-15 02.00.10 Updated for MPI v2.6. + * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. + * -------------------------------------------------------------------------- */ #ifndef MPI2_SAS_H #define MPI2_SAS_H /* - *Values for SASStatus. + * Values for SASStatus. */ #define MPI2_SASSTATUS_SUCCESS (0x00) #define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01) @@ -67,9 +66,10 @@ #define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) #define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) + /* - *Values for the SAS DeviceInfo field used in SAS Device Status Change Event - *data and SAS Configuration pages. + * Values for the SAS DeviceInfo field used in SAS Device Status Change Event + * data and SAS Configuration pages. */ #define MPI2_SAS_DEVICE_INFO_SEP (0x00004000) #define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) @@ -90,170 +90,180 @@ #define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) #define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + /***************************************************************************** * -* SAS Messages +* SAS Messages * *****************************************************************************/ /**************************************************************************** -* SMP Passthrough messages +* SMP Passthrough messages ****************************************************************************/ -/*SMP Passthrough Request Message */ -typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST { - U8 PassthroughFlags; /*0x00 */ - U8 PhysicalPort; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 RequestDataLength; /*0x04 */ - U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U32 Reserved2; /*0x0C */ - U64 SASAddress; /*0x10 */ - U32 Reserved3; /*0x18 */ - U32 Reserved4; /*0x1C */ - MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */ -} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST, - Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t; +/* SMP Passthrough Request Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST +{ + U8 PassthroughFlags; /* 0x00 */ + U8 PhysicalPort; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 RequestDataLength; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Reserved2; /* 0x0C */ + U64 SASAddress; /* 0x10 */ + U32 Reserved3; /* 0x18 */ + U32 Reserved4; /* 0x1C */ + MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */ /* MPI v2.5: IEEE Simple 64 elements only */ +} MPI2_SMP_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REQUEST, + Mpi2SmpPassthroughRequest_t, MPI2_POINTER pMpi2SmpPassthroughRequest_t; -/*values for PassthroughFlags field */ +/* values for PassthroughFlags field */ #define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) -/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ +/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ -/*SMP Passthrough Reply Message */ -typedef struct _MPI2_SMP_PASSTHROUGH_REPLY { - U8 PassthroughFlags; /*0x00 */ - U8 PhysicalPort; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 ResponseDataLength; /*0x04 */ - U8 SGLFlags; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U8 Reserved2; /*0x0C */ - U8 SASStatus; /*0x0D */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U32 Reserved3; /*0x14 */ - U8 ResponseData[4]; /*0x18 */ -} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY, - Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t; -/*values for PassthroughFlags field */ +/* SMP Passthrough Reply Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REPLY +{ + U8 PassthroughFlags; /* 0x00 */ + U8 PhysicalPort; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 ResponseDataLength; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U8 Reserved2; /* 0x0C */ + U8 SASStatus; /* 0x0D */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 Reserved3; /* 0x14 */ + U8 ResponseData[4]; /* 0x18 */ +} MPI2_SMP_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REPLY, + Mpi2SmpPassthroughReply_t, MPI2_POINTER pMpi2SmpPassthroughReply_t; + +/* values for PassthroughFlags field */ #define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) -/*values for SASStatus field are at the top of this file */ +/* values for SASStatus field are at the top of this file */ + /**************************************************************************** -* SATA Passthrough messages +* SATA Passthrough messages ****************************************************************************/ -typedef union _MPI2_SATA_PT_SGE_UNION { - MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */ - MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */ - MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; - MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */ - MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */ -} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION, - Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t; +typedef union _MPI2_SATA_PT_SGE_UNION +{ + MPI2_SGE_SIMPLE_UNION MpiSimple; /* MPI v2.0 only */ + MPI2_SGE_CHAIN_UNION MpiChain; /* MPI v2.0 only */ + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /* MPI v2.0 only */ + MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /* MPI v2.5 only */ +} MPI2_SATA_PT_SGE_UNION, MPI2_POINTER PTR_MPI2_SATA_PT_SGE_UNION, + Mpi2SataPTSGEUnion_t, MPI2_POINTER pMpi2SataPTSGEUnion_t; -/*SATA Passthrough Request Message */ -typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST { - U16 DevHandle; /*0x00 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 PassthroughFlags; /*0x04 */ - U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U32 Reserved2; /*0x0C */ - U32 Reserved3; /*0x10 */ - U32 Reserved4; /*0x14 */ - U32 DataLength; /*0x18 */ - U8 CommandFIS[20]; /*0x1C */ - MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/ -} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST, - Mpi2SataPassthroughRequest_t, - *pMpi2SataPassthroughRequest_t; -/*values for PassthroughFlags field */ +/* SATA Passthrough Request Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 PassthroughFlags; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Reserved2; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U32 Reserved4; /* 0x14 */ + U32 DataLength; /* 0x18 */ + U8 CommandFIS[20]; /* 0x1C */ + MPI2_SATA_PT_SGE_UNION SGL; /* 0x30 */ /* MPI v2.5: IEEE 64 elements only */ +} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, + Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; + +/* values for PassthroughFlags field */ #define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) -#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040) +#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040) /* MPI v2.6 and newer */ #define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) #define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) #define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) #define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) #define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) -/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ +/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ -/*SATA Passthrough Reply Message */ -typedef struct _MPI2_SATA_PASSTHROUGH_REPLY { - U16 DevHandle; /*0x00 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 PassthroughFlags; /*0x04 */ - U8 SGLFlags; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved1; /*0x0A */ - U8 Reserved2; /*0x0C */ - U8 SASStatus; /*0x0D */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U8 StatusFIS[20]; /*0x14 */ - U32 StatusControlRegisters; /*0x28 */ - U32 TransferCount; /*0x2C */ -} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY, - Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t; -/*values for SASStatus field are at the top of this file */ +/* SATA Passthrough Reply Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 PassthroughFlags; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U8 Reserved2; /* 0x0C */ + U8 SASStatus; /* 0x0D */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 StatusFIS[20]; /* 0x14 */ + U32 StatusControlRegisters; /* 0x28 */ + U32 TransferCount; /* 0x2C */ +} MPI2_SATA_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REPLY, + Mpi2SataPassthroughReply_t, MPI2_POINTER pMpi2SataPassthroughReply_t; + +/* values for SASStatus field are at the top of this file */ + /**************************************************************************** -* SAS IO Unit Control messages -* (MPI v2.5 and earlier only. -* Replaced by IO Unit Control messages in MPI v2.6 and later.) +* SAS IO Unit Control messages +* (MPI v2.5 and earlier only. +* Replaced by IO Unit Control messages in MPI v2.6 and later.) ****************************************************************************/ -/*SAS IO Unit Control Request Message */ -typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST { - U8 Operation; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 DevHandle; /*0x04 */ - U8 IOCParameter; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U16 Reserved4; /*0x0C */ - U8 PhyNum; /*0x0E */ - U8 PrimFlags; /*0x0F */ - U32 Primitive; /*0x10 */ - U8 LookupMethod; /*0x14 */ - U8 Reserved5; /*0x15 */ - U16 SlotNumber; /*0x16 */ - U64 LookupAddress; /*0x18 */ - U32 IOCParameterValue; /*0x20 */ - U32 Reserved7; /*0x24 */ - U32 Reserved8; /*0x28 */ +/* SAS IO Unit Control Request Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U8 PhyNum; /* 0x0E */ + U8 PrimFlags; /* 0x0F */ + U32 Primitive; /* 0x10 */ + U8 LookupMethod; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 SlotNumber; /* 0x16 */ + U64 LookupAddress; /* 0x18 */ + U32 IOCParameterValue; /* 0x20 */ + U32 Reserved7; /* 0x24 */ + U32 Reserved8; /* 0x28 */ } MPI2_SAS_IOUNIT_CONTROL_REQUEST, - *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, - Mpi2SasIoUnitControlRequest_t, - *pMpi2SasIoUnitControlRequest_t; + MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, + Mpi2SasIoUnitControlRequest_t, MPI2_POINTER pMpi2SasIoUnitControlRequest_t; -/*values for the Operation field */ +/* values for the Operation field */ #define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) #define MPI2_SAS_OP_PHY_LINK_RESET (0x06) #define MPI2_SAS_OP_PHY_HARD_RESET (0x07) @@ -272,33 +282,38 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST { #define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) #define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) -/*values for the PrimFlags field */ +/* values for the PrimFlags field */ #define MPI2_SAS_PRIMFLAGS_SINGLE (0x08) #define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02) #define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01) -/*values for the LookupMethod field */ +/* values for the LookupMethod field */ #define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01) #define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02) #define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) -/*SAS IO Unit Control Reply Message */ -typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY { - U8 Operation; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 DevHandle; /*0x04 */ - U8 IOCParameter; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved3; /*0x0A */ - U16 Reserved4; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ + +/* SAS IO Unit Control Reply Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ } MPI2_SAS_IOUNIT_CONTROL_REPLY, - *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, - Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t; + MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, + Mpi2SasIoUnitControlReply_t, MPI2_POINTER pMpi2SasIoUnitControlReply_t; + #endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_targ.h b/drivers/scsi/mpt3sas/mpi/mpi2_targ.h new file mode 100755 index 000000000000..2e88a535c9a9 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_targ.h @@ -0,0 +1,575 @@ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_targ.h + * Title: MPI Target mode messages and structures + * Creation Date: September 8, 2006 + * + * mpi2_targ.h Version: 02.00.09 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to + * BufferPostFlags field of CommandBufferPostBase Request. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 10-02-08 02.00.03 Removed NextCmdBufferOffset from + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * Target Status Send Request only takes a single SGE for + * response data. + * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure. + * 11-18-11 02.00.05 Incorporating additions for MPI v2.5. + * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT + * request message structure. + * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and + * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS. + * 06-13-14 02.00.07 Added MinMSIxIndex and MaxMSIxIndex fields to + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * 11-18-14 02.00.08 Updated copyright information. + * 03-16-15 02.00.09 Updated for MPI v2.6. + * Added MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TARG_H +#define MPI2_TARG_H + + +/****************************************************************************** +* +* SCSI Target Messages +* +*******************************************************************************/ + +/**************************************************************************** +* Target Command Buffer Post Base Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST +{ + U8 BufferPostFlags; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 TotalCmdBuffers; /* 0x04 */ + U8 Reserved; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 CmdBufferLength; /* 0x10 */ + U8 MinMSIxIndex; /* 0x12 */ /* MPI 2.5 and newer only; Reserved in MPI 2.0 */ + U8 MaxMSIxIndex; /* 0x13 */ /* MPI 2.5 and newer only; Reserved in MPI 2.0 */ + U32 BaseAddressLow; /* 0x14 */ + U32 BaseAddressHigh; /* 0x18 */ +} MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST, + MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST, + Mpi2TargetCmdBufferPostBaseRequest_t, + MPI2_POINTER pMpi2TargetCmdBufferPostBaseRequest_t; + +/* values for the BufferPostflags field */ +#define MPI2_CMD_BUF_POST_BASE_ADDRESS_SPACE_MASK (0x0C) +#define MPI2_CMD_BUF_POST_BASE_SYSTEM_ADDRESS_SPACE (0x00) +#define MPI2_CMD_BUF_POST_BASE_IOCDDR_ADDRESS_SPACE (0x04) +#define MPI2_CMD_BUF_POST_BASE_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.5 and earlier */ +#define MPI26_CMD_BUF_POST_BASE_IOCCTL_ADDRESS_SPACE (0x08) /* for MPI v2.6 only */ +#define MPI2_CMD_BUF_POST_BASE_IOCPLBNTA_ADDRESS_SPACE (0x0C) /* only for MPI v2.5 and earlier */ + +#define MPI2_CMD_BUF_POST_BASE_FLAGS_AUTO_POST_ALL (0x01) + + +/**************************************************************************** +* Target Command Buffer Post List Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST +{ + U16 Reserved; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 CmdBufferCount; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 IoIndex[2]; /* 0x10 */ +} MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST, + MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST, + Mpi2TargetCmdBufferPostListRequest_t, + MPI2_POINTER pMpi2TargetCmdBufferPostListRequest_t; + +/**************************************************************************** +* Target Command Buffer Post Base List Reply +****************************************************************************/ + +typedef struct _MPI2_TARGET_BUF_POST_BASE_LIST_REPLY +{ + U8 Flags; /* 0x00 */ + U8 Reserved; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 IoIndex; /* 0x14 */ + U16 Reserved5; /* 0x16 */ + U32 Reserved6; /* 0x18 */ +} MPI2_TARGET_BUF_POST_BASE_LIST_REPLY, + MPI2_POINTER PTR_MPI2_TARGET_BUF_POST_BASE_LIST_REPLY, + Mpi2TargetCmdBufferPostBaseListReply_t, + MPI2_POINTER pMpi2TargetCmdBufferPostBaseListReply_t; + +/* Flags defines */ +#define MPI2_CMD_BUF_POST_REPLY_IOINDEX_VALID (0x01) + + +/**************************************************************************** +* Command Buffer Formats (with 16 byte CDB) +****************************************************************************/ + +typedef struct _MPI2_TARGET_SSP_CMD_BUFFER +{ + U8 FrameType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 InitiatorConnectionTag; /* 0x02 */ + U32 HashedSourceSASAddress; /* 0x04 */ + U16 Reserved2; /* 0x08 */ + U16 Flags; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 Tag; /* 0x10 */ + U16 TargetPortTransferTag; /* 0x12 */ + U32 DataOffset; /* 0x14 */ + /* COMMAND information unit starts here */ + U8 LogicalUnitNumber[8]; /* 0x18 */ + U8 Reserved4; /* 0x20 */ + U8 TaskAttribute; /* lower 3 bits */ /* 0x21 */ + U8 Reserved5; /* 0x22 */ + U8 AdditionalCDBLength; /* upper 5 bits */ /* 0x23 */ + U8 CDB[16]; /* 0x24 */ + /* Additional CDB bytes extend past the CDB field */ +} MPI2_TARGET_SSP_CMD_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_CMD_BUFFER, + Mpi2TargetSspCmdBuffer, MPI2_POINTER pMp2iTargetSspCmdBuffer; + +typedef struct _MPI2_TARGET_SSP_TASK_BUFFER +{ + U8 FrameType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 InitiatorConnectionTag; /* 0x02 */ + U32 HashedSourceSASAddress; /* 0x04 */ + U16 Reserved2; /* 0x08 */ + U16 Flags; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 Tag; /* 0x10 */ + U16 TargetPortTransferTag; /* 0x12 */ + U32 DataOffset; /* 0x14 */ + /* TASK information unit starts here */ + U8 LogicalUnitNumber[8]; /* 0x18 */ + U16 Reserved4; /* 0x20 */ + U8 TaskManagementFunction; /* 0x22 */ + U8 Reserved5; /* 0x23 */ + U16 ManagedTaskTag; /* 0x24 */ + U16 Reserved6; /* 0x26 */ + U32 Reserved7; /* 0x28 */ + U32 Reserved8; /* 0x2C */ + U32 Reserved9; /* 0x30 */ +} MPI2_TARGET_SSP_TASK_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_TASK_BUFFER, + Mpi2TargetSspTaskBuffer, MPI2_POINTER pMpi2TargetSspTaskBuffer; + +/* mask and shift for HashedSourceSASAddress field */ +#define MPI2_TARGET_HASHED_SAS_ADDRESS_MASK (0xFFFFFF00) +#define MPI2_TARGET_HASHED_SAS_ADDRESS_SHIFT (8) + + +/**************************************************************************** +* MPI v2.0 Target Assist Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_ASSIST_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 TargetAssistFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 QueueTag; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 IoIndex; /* 0x0C */ + U16 InitiatorConnectionTag; /* 0x0E */ + U16 SGLFlags; /* 0x10 */ + U8 SequenceNumber; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U32 EEDPBlockSize; /* 0x28 */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U32 PrimaryReferenceTag; /* 0x34 */ + U16 PrimaryApplicationTag; /* 0x38 */ + U16 PrimaryApplicationTagMask; /* 0x3A */ + U32 RelativeOffset; /* 0x3C */ + U32 Reserved5; /* 0x40 */ + U32 Reserved6; /* 0x44 */ + U32 Reserved7; /* 0x48 */ + U32 Reserved8; /* 0x4C */ + MPI2_SGE_IO_UNION SGL[1]; /* 0x50 */ +} MPI2_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI2_TARGET_ASSIST_REQUEST, + Mpi2TargetAssistRequest_t, MPI2_POINTER pMpi2TargetAssistRequest_t; + +/* Target Assist TargetAssistFlags bits */ + +#define MPI2_TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80) +#define MPI2_TARGET_ASSIST_FLAGS_TLR (0x10) +#define MPI2_TARGET_ASSIST_FLAGS_RETRANSMIT (0x04) +#define MPI2_TARGET_ASSIST_FLAGS_AUTO_STATUS (0x02) +#define MPI2_TARGET_ASSIST_FLAGS_DATA_DIRECTION (0x01) + +/* Target Assist SGLFlags bits */ + +/* base values for Data Location Address Space */ +#define MPI2_TARGET_ASSIST_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_TARGET_ASSIST_SGLFLAGS_PLBNTA_ADDR (0x0C) + +/* base values for Type */ +#define MPI2_TARGET_ASSIST_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_TARGET_ASSIST_SGLFLAGS_MPI_TYPE (0x00) +#define MPI2_TARGET_ASSIST_SGLFLAGS_32IEEE_TYPE (0x01) +#define MPI2_TARGET_ASSIST_SGLFLAGS_64IEEE_TYPE (0x02) + +/* shift values for each sub-field */ +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL3_SHIFT (12) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL2_SHIFT (8) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL1_SHIFT (4) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL0_SHIFT (0) + +/* Target Assist IoFlags bits */ + +#define MPI2_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI2_TARGET_ASSIST_IOFLAGS_MULTICAST (0x0400) +#define MPI2_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200) + +/* Target Assist EEDPFlags bits */ + +#define MPI2_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI2_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI2_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI2_TA_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_TA_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_TA_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI2_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI2_TA_EEDPFLAGS_MASK_OP (0x0007) +#define MPI2_TA_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI2_TA_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI2_TA_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI2_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_TA_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_TA_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI2_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + + +/**************************************************************************** +* MPI v2.5 Target Assist Request +****************************************************************************/ + +typedef struct _MPI25_TARGET_ASSIST_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 TargetAssistFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 QueueTag; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 IoIndex; /* 0x0C */ + U16 InitiatorConnectionTag; /* 0x0E */ + U8 DMAFlags; /* 0x10 */ + U8 Reserved9; /* 0x11 */ + U8 SequenceNumber; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U16 EEDPBlockSize; /* 0x28 */ + U16 Reserved10; /* 0x2A */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U32 PrimaryReferenceTag; /* 0x34 */ + U16 PrimaryApplicationTag; /* 0x38 */ + U16 PrimaryApplicationTagMask; /* 0x3A */ + U32 RelativeOffset; /* 0x3C */ + U32 Reserved5; /* 0x40 */ + U32 Reserved6; /* 0x44 */ + U32 Reserved7; /* 0x48 */ + U32 Reserved8; /* 0x4C */ + MPI25_SGE_IO_UNION SGL; /* 0x50 */ +} MPI25_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI25_TARGET_ASSIST_REQUEST, + Mpi25TargetAssistRequest_t, MPI2_POINTER pMpi25TargetAssistRequest_t; + +/* use MPI2_TARGET_ASSIST_FLAGS_ defines for the Flags field */ + +/* Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF + */ +#define MPI25_TA_DMAFLAGS_OP_MASK (0x0F) +#define MPI25_TA_DMAFLAGS_OP_D_D_D_D (0x00) +#define MPI25_TA_DMAFLAGS_OP_D_D_D_C (0x01) +#define MPI25_TA_DMAFLAGS_OP_D_D_D_I (0x02) +#define MPI25_TA_DMAFLAGS_OP_D_D_C_C (0x03) +#define MPI25_TA_DMAFLAGS_OP_D_D_C_I (0x04) +#define MPI25_TA_DMAFLAGS_OP_D_D_I_I (0x05) +#define MPI25_TA_DMAFLAGS_OP_D_C_C_C (0x06) +#define MPI25_TA_DMAFLAGS_OP_D_C_C_I (0x07) +#define MPI25_TA_DMAFLAGS_OP_D_C_I_I (0x08) +#define MPI25_TA_DMAFLAGS_OP_D_I_I_I (0x09) +#define MPI25_TA_DMAFLAGS_OP_D_H_D_D (0x0A) +#define MPI25_TA_DMAFLAGS_OP_D_H_D_C (0x0B) +#define MPI25_TA_DMAFLAGS_OP_D_H_D_I (0x0C) +#define MPI25_TA_DMAFLAGS_OP_D_H_C_C (0x0D) +#define MPI25_TA_DMAFLAGS_OP_D_H_C_I (0x0E) +#define MPI25_TA_DMAFLAGS_OP_D_H_I_I (0x0F) + +/* defines for the IoFlags field */ +#define MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) /* MPI v2.6 and later */ +#define MPI25_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI25_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200) + +/* defines for the EEDPFlags field */ +#define MPI25_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI25_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI25_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI25_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI25_TA_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI25_TA_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI25_TA_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI25_TA_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) +#define MPI25_TA_EEDPFLAGS_COMPATIBLE_MODE (0x0000) +#define MPI25_TA_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI25_TA_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) +#define MPI25_TA_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) + +#define MPI25_TA_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) +#define MPI25_TA_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) +#define MPI25_TA_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) + +#define MPI25_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI25_TA_EEDPFLAGS_MASK_OP (0x0007) +#define MPI25_TA_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI25_TA_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI25_TA_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI25_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI25_TA_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI25_TA_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI25_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + + +/**************************************************************************** +* Target Status Send Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_STATUS_SEND_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 StatusFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 QueueTag; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 IoIndex; /* 0x0C */ + U16 InitiatorConnectionTag; /* 0x0E */ + U16 SGLFlags; /* 0x10 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ + U16 Reserved4; /* 0x12 */ + U8 SGLOffset0; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 Reserved6; /* 0x16 */ + U32 Reserved7; /* 0x18 */ + U32 Reserved8; /* 0x1C */ + MPI2_SIMPLE_SGE_UNION StatusDataSGE; /* 0x20 */ /* MPI v2.5: This must be an IEEE Simple Element 64. */ +} MPI2_TARGET_STATUS_SEND_REQUEST, + MPI2_POINTER PTR_MPI2_TARGET_STATUS_SEND_REQUEST, + Mpi2TargetStatusSendRequest_t, MPI2_POINTER pMpi2TargetStatusSendRequest_t; + +/* Target Status Send StatusFlags bits */ + +#define MPI2_TSS_FLAGS_REPOST_CMD_BUFFER (0x80) +#define MPI2_TSS_FLAGS_RETRANSMIT (0x04) +#define MPI2_TSS_FLAGS_AUTO_GOOD_STATUS (0x01) + +/* Target Status Send SGLFlags bits - MPI v2.0 only */ +/* Data Location Address Space */ +#define MPI2_TSS_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_TSS_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_TSS_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_TSS_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_TSS_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) +/* Type */ +#define MPI2_TSS_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_TSS_SGLFLAGS_MPI_TYPE (0x00) +#define MPI2_TSS_SGLFLAGS_IEEE32_TYPE (0x01) +#define MPI2_TSS_SGLFLAGS_IEEE64_TYPE (0x02) + + + +/* + * NOTE: The SSP status IU is big-endian. When used on a little-endian system, + * this structure properly orders the bytes. + */ +typedef struct _MPI2_TARGET_SSP_RSP_IU +{ + U32 Reserved0[6]; /* reserved for SSP header */ /* 0x00 */ + + /* start of RESPONSE information unit */ + U32 Reserved1; /* 0x18 */ + U32 Reserved2; /* 0x1C */ + U16 Reserved3; /* 0x20 */ + U8 DataPres; /* lower 2 bits */ /* 0x22 */ + U8 Status; /* 0x23 */ + U32 Reserved4; /* 0x24 */ + U32 SenseDataLength; /* 0x28 */ + U32 ResponseDataLength; /* 0x2C */ + + /* start of Response or Sense Data (size may vary dynamically) */ + U8 ResponseSenseData[4]; /* 0x30 */ +} MPI2_TARGET_SSP_RSP_IU, MPI2_POINTER PTR_MPI2_TARGET_SSP_RSP_IU, + Mpi2TargetSspRspIu_t, MPI2_POINTER pMpi2TargetSspRspIu_t; + + +/**************************************************************************** +* Target Standard Reply - used with Target Assist or Target Status Send +****************************************************************************/ + +typedef struct _MPI2_TARGET_STANDARD_REPLY +{ + U16 Reserved; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 IoIndex; /* 0x14 */ + U16 Reserved5; /* 0x16 */ + U32 TransferCount; /* 0x18 */ + U32 BidirectionalTransferCount; /* 0x1C */ +} MPI2_TARGET_STANDARD_REPLY, MPI2_POINTER PTR_MPI2_TARGET_STANDARD_REPLY, + Mpi2TargetErrorReply_t, MPI2_POINTER pMpi2TargetErrorReply_t; + + +/**************************************************************************** +* Target Mode Abort Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_MODE_ABORT_REQUEST +{ + U8 AbortType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 IoIndexToAbort; /* 0x0C */ + U16 InitiatorDevHandle; /* 0x0E */ + U32 MidToAbort; /* 0x10 */ +} MPI2_TARGET_MODE_ABORT, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT, + Mpi2TargetModeAbort_t, MPI2_POINTER pMpi2TargetModeAbort_t; + +/* Target Mode Abort AbortType values */ + +#define MPI2_TARGET_MODE_ABORT_ALL_CMD_BUFFERS (0x00) +#define MPI2_TARGET_MODE_ABORT_ALL_IO (0x01) +#define MPI2_TARGET_MODE_ABORT_EXACT_IO (0x02) +#define MPI2_TARGET_MODE_ABORT_EXACT_IO_REQUEST (0x03) +#define MPI2_TARGET_MODE_ABORT_IO_REQUEST_AND_IO (0x04) +#define MPI2_TARGET_MODE_ABORT_DEVHANDLE (0x05) +#define MPI2_TARGET_MODE_ABORT_ALL_COMMANDS (0x06) + + +/**************************************************************************** +* Target Mode Abort Reply +****************************************************************************/ + +typedef struct _MPI2_TARGET_MODE_ABORT_REPLY +{ + U16 Reserved; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 AbortCount; /* 0x14 */ +} MPI2_TARGET_MODE_ABORT_REPLY, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT_REPLY, + Mpi2TargetModeAbortReply_t, MPI2_POINTER pMpi2TargetModeAbortReply_t; + + +#endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h old mode 100644 new mode 100755 index 17ef7f63b938..1ac0c59f78da --- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -1,47 +1,46 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * - * Name: mpi2_tool.h - * Title: MPI diagnostic tool structures and definitions - * Creation Date: March 26, 2007 + * Name: mpi2_tool.h + * Title: MPI diagnostic tool structures and definitions + * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.16 + * mpi2_tool.h Version: 02.00.16 * - * Version History - * --------------- + * Version History + * --------------- * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release - * structures and defines. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. - * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request - * and reply messages. - * Added MPI2_DIAG_BUF_TYPE_EXTENDED. - * Incremented MPI2_DIAG_BUF_TYPE_COUNT. - * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. - * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer - * Post Request. - * 05-25-11 02.00.07 Added Flags field and related defines to - * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. - * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. - * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request - * message. - * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that - * it uses MPI Chain SGE as well as MPI Simple SGE. - * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. - * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. - * 11-18-14 02.00.13 Updated copyright information. - * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean - * Tool Request Message. - * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. - * Added option for DeviceInfo field in ISTWI tool. - * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS. - * -------------------------------------------------------------------------- + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * 11-18-14 02.00.13 Updated copyright information. + * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean + * Tool Request Message. + * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. + * Added option for DeviceInfo field in ISTWI tool. + * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS + * -------------------------------------------------------------------------- */ #ifndef MPI2_TOOL_H @@ -49,11 +48,11 @@ /***************************************************************************** * -* Toolbox Messages +* Toolbox Messages * *****************************************************************************/ -/*defines for the Tools */ +/* defines for the Tools */ #define MPI2_TOOLBOX_CLEAN_TOOL (0x00) #define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) #define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) @@ -63,47 +62,51 @@ #define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) #define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN (0x08) -/**************************************************************************** -* Toolbox reply -****************************************************************************/ - -typedef struct _MPI2_TOOLBOX_REPLY { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY, - Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t; /**************************************************************************** -* Toolbox Clean Tool request +* Toolbox reply ****************************************************************************/ -typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 Flags; /*0x0C */ -} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST, - Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t; +typedef struct _MPI2_TOOLBOX_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_TOOLBOX_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_REPLY, + Mpi2ToolboxReply_t, MPI2_POINTER pMpi2ToolboxReply_t; -/*values for the Flags field */ + +/**************************************************************************** +* Toolbox Clean Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Flags; /* 0x0C */ + } MPI2_TOOLBOX_CLEAN_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_CLEAN_REQUEST, + Mpi2ToolboxCleanRequest_t, MPI2_POINTER pMpi2ToolboxCleanRequest_t; + +/* values for the Flags field */ #define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) #define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) @@ -126,97 +129,103 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST { #define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) #define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) -/**************************************************************************** -* Toolbox Memory Move request -****************************************************************************/ - -typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */ -} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, - Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t; /**************************************************************************** -* Toolbox Diagnostic Data Upload request +* Toolbox Memory Move request ****************************************************************************/ -typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U8 SGLFlags; /*0x0C */ - U8 Reserved5; /*0x0D */ - U16 Reserved6; /*0x0E */ - U32 Flags; /*0x10 */ - U32 DataLength; /*0x14 */ - MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */ +typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + MPI2_SGE_SIMPLE_UNION SGL; /* 0x0C */ +} MPI2_TOOLBOX_MEM_MOVE_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, + Mpi2ToolboxMemMoveRequest_t, MPI2_POINTER pMpi2ToolboxMemMoveRequest_t; + + +/**************************************************************************** +* Toolbox Diagnostic Data Upload request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 SGLFlags; /* 0x0C */ + U8 Reserved5; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U32 Flags; /* 0x10 */ + U32 DataLength; /* 0x14 */ + MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */ } MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, - *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, - Mpi2ToolboxDiagDataUploadRequest_t, - *pMpi2ToolboxDiagDataUploadRequest_t; + MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + Mpi2ToolboxDiagDataUploadRequest_t, + MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t; -/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER +{ + U32 DiagDataLength; /* 00h */ + U8 FormatCode; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 06h */ +} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, + Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t; -typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER { - U32 DiagDataLength; /*00h */ - U8 FormatCode; /*04h */ - U8 Reserved1; /*05h */ - U16 Reserved2; /*06h */ -} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, - Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t; /**************************************************************************** -* Toolbox ISTWI Read Write Tool +* Toolbox ISTWI Read Write Tool ****************************************************************************/ -/*Toolbox ISTWI Read Write Tool request message */ -typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 Reserved5; /*0x0C */ - U32 Reserved6; /*0x10 */ - U8 DevIndex; /*0x14 */ - U8 Action; /*0x15 */ - U8 SGLFlags; /*0x16 */ - U8 Flags; /*0x17 */ - U16 TxDataLength; /*0x18 */ - U16 RxDataLength; /*0x1A */ - U32 Reserved8; /*0x1C */ - U32 Reserved9; /*0x20 */ - U32 Reserved10; /*0x24 */ - U32 Reserved11; /*0x28 */ - U32 Reserved12; /*0x2C */ - MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */ +/* Toolbox ISTWI Read Write Tool request message */ +typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + U8 DevIndex; /* 0x14 */ + U8 Action; /* 0x15 */ + U8 SGLFlags; /* 0x16 */ + U8 Flags; /* 0x17 */ + U16 TxDataLength; /* 0x18 */ + U16 RxDataLength; /* 0x1A */ + U32 DeviceInfo[3]; /* 0x1C */ + U32 Reserved11; /* 0x28 */ + U32 Reserved12; /* 0x2C */ + MPI2_SGE_SIMPLE_UNION SGL; /* 0x30 */ } MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, - *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, - Mpi2ToolboxIstwiReadWriteRequest_t, - *pMpi2ToolboxIstwiReadWriteRequest_t; + MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + Mpi2ToolboxIstwiReadWriteRequest_t, + MPI2_POINTER pMpi2ToolboxIstwiReadWriteRequest_t; -/*values for the Action field */ +/* values for the Action field */ #define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01) #define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02) #define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03) @@ -224,141 +233,149 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { #define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) #define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) -/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ -/*values for the Flags field */ +/* values for the Flags field */ #define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) #define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) -/*MPI26 TOOLBOX Request MsgFlags defines */ +/* MPI26 TOOLBOX Request MsgFlags defines */ #define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01) -/*Request uses Man Page 43 device index addressing */ -#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00) -/*Request uses Man Page 43 device info struct addressing */ -#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01) +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00) /* Request uses Man Page 43 device index addressing */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01) /* Request uses Man Page 43 device info struct addressing */ + + +/* Toolbox ISTWI Read Write Tool reply message */ +typedef struct _MPI2_TOOLBOX_ISTWI_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 DevIndex; /* 0x14 */ + U8 Action; /* 0x15 */ + U8 IstwiStatus; /* 0x16 */ + U8 Reserved6; /* 0x17 */ + U16 TxDataCount; /* 0x18 */ + U16 RxDataCount; /* 0x1A */ +} MPI2_TOOLBOX_ISTWI_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_REPLY, + Mpi2ToolboxIstwiReply_t, MPI2_POINTER pMpi2ToolboxIstwiReply_t; -/*Toolbox ISTWI Read Write Tool reply message */ -typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U8 DevIndex; /*0x14 */ - U8 Action; /*0x15 */ - U8 IstwiStatus; /*0x16 */ - U8 Reserved6; /*0x17 */ - U16 TxDataCount; /*0x18 */ - U16 RxDataCount; /*0x1A */ -} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY, - Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t; /**************************************************************************** -* Toolbox Beacon Tool request +* Toolbox Beacon Tool request ****************************************************************************/ -typedef struct _MPI2_TOOLBOX_BEACON_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U8 Reserved5; /*0x0C */ - U8 PhysicalPort; /*0x0D */ - U8 Reserved6; /*0x0E */ - U8 Flags; /*0x0F */ -} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST, - Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t; +typedef struct _MPI2_TOOLBOX_BEACON_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Reserved5; /* 0x0C */ + U8 PhysicalPort; /* 0x0D */ + U8 Reserved6; /* 0x0E */ + U8 Flags; /* 0x0F */ +} MPI2_TOOLBOX_BEACON_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_BEACON_REQUEST, + Mpi2ToolboxBeaconRequest_t, MPI2_POINTER pMpi2ToolboxBeaconRequest_t; -/*values for the Flags field */ +/* values for the Flags field */ #define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00) #define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) + /**************************************************************************** -* Toolbox Diagnostic CLI Tool +* Toolbox Diagnostic CLI Tool ****************************************************************************/ #define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) -/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */ -typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U8 SGLFlags; /*0x0C */ - U8 Reserved5; /*0x0D */ - U16 Reserved6; /*0x0E */ - U32 DataLength; /*0x10 */ - U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ - MPI2_MPI_SGE_IO_UNION SGL; /*0x70 */ +/* MPI v2.0 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 SGLFlags; /* 0x0C */ + U8 Reserved5; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U32 DataLength; /* 0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */ + MPI2_MPI_SGE_IO_UNION SGL; /* 0x70 */ } MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - Mpi2ToolboxDiagnosticCliRequest_t, - *pMpi2ToolboxDiagnosticCliRequest_t; + MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi2ToolboxDiagnosticCliRequest_t, + MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t; -/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ -/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */ -typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U32 Reserved5; /*0x0C */ - U32 DataLength; /*0x10 */ - U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ - MPI25_SGE_IO_UNION SGL; /* 0x70 */ + +/* MPI v2.5 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 DataLength; /* 0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */ + MPI25_SGE_IO_UNION SGL; /* 0x70 */ } MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - Mpi25ToolboxDiagnosticCliRequest_t, - *pMpi25ToolboxDiagnosticCliRequest_t; + MPI2_POINTER PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi25ToolboxDiagnosticCliRequest_t, + MPI2_POINTER pMpi25ToolboxDiagnosticCliRequest_t; -/*Toolbox Diagnostic CLI Tool reply message */ -typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U32 ReturnedDataLength; /*0x14 */ + +/* Toolbox Diagnostic CLI Tool reply message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 ReturnedDataLength; /* 0x14 */ } MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY, - *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, - Mpi2ToolboxDiagnosticCliReply_t, - *pMpi2ToolboxDiagnosticCliReply_t; + MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, + Mpi2ToolboxDiagnosticCliReply_t, + MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t; /**************************************************************************** @@ -366,25 +383,26 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { ****************************************************************************/ /* Toolbox Console Text Display Tool request message */ -typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST { - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U8 Console; /* 0x0C */ - U8 Flags; /* 0x0D */ - U16 Reserved6; /* 0x0E */ - U8 TextToDisplay[4]; /* 0x10 */ +typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Console; /* 0x0C */ + U8 Flags; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U8 TextToDisplay[4]; /* 0x10 */ /* actual length determined at runtime based on frame size */ } MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, -*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, -Mpi2ToolboxTextDisplayRequest_t, -*pMpi2ToolboxTextDisplayRequest_t; + MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, + Mpi2ToolboxTextDisplayRequest_t, + MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t; /* defines for the Console field */ #define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) @@ -398,34 +416,32 @@ Mpi2ToolboxTextDisplayRequest_t, #define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) -/*************************************************************************** - * Toolbox Backend Lane Margining Tool - *************************************************************************** - */ +/**************************************************************************** +* Toolbox Backend Lane Margining Tool +****************************************************************************/ -/*Toolbox Backend Lane Margining Tool request message */ -typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U8 Command; /*0x0C */ - U8 SwitchPort; /*0x0D */ - U16 DevHandle; /*0x0E */ - U8 RegisterOffset; /*0x10 */ - U8 Reserved5; /*0x11 */ - U16 DataLength; /*0x12 */ - MPI25_SGE_IO_UNION SGL; /*0x14 */ -} MPI26_TOOLBOX_LANE_MARGINING_REQUEST, - *PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST, - Mpi26ToolboxLaneMarginingRequest_t, - *pMpi2ToolboxLaneMarginingRequest_t; +/* Toolbox Backend Lane Margining Tool request message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Command; /* 0x0C */ + U8 SwitchPort; /* 0x0D */ + U16 DevHandle; /* 0x0E */ + U8 RegisterOffset; /* 0x10 */ + U8 Reserved5; /* 0x11 */ + U16 DataLength; /* 0x12 */ + MPI25_SGE_IO_UNION SGL; /* 0x14 */ +} MPI26_TOOLBOX_LANE_MARGINING_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST, + Mpi26ToolboxLaneMarginingRequest_t, MPI2_POINTER pMpi2ToolboxLaneMarginingRequest_t; /* defines for the Command field */ #define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE (0x01) @@ -434,132 +450,143 @@ typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST { #define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE (0x04) -/*Toolbox Backend Lane Margining Tool reply message */ -typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY { - U8 Tool; /*0x00 */ - U8 Reserved1; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U16 ReturnedDataLength; /*0x14 */ - U16 Reserved6; /*0x16 */ +/* Toolbox Backend Lane Margining Tool reply message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 ReturnedDataLength; /* 0x14 */ + U16 Reserved6; /* 0x16 */ } MPI26_TOOLBOX_LANE_MARGINING_REPLY, - *PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY, - Mpi26ToolboxLaneMarginingReply_t, - *pMpi26ToolboxLaneMarginingReply_t; + MPI2_POINTER PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY, + Mpi26ToolboxLaneMarginingReply_t, + MPI2_POINTER pMpi26ToolboxLaneMarginingReply_t; /***************************************************************************** * -* Diagnostic Buffer Messages +* Diagnostic Buffer Messages * *****************************************************************************/ + /**************************************************************************** -* Diagnostic Buffer Post request +* Diagnostic Buffer Post request ****************************************************************************/ -typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST { - U8 ExtendedType; /*0x00 */ - U8 BufferType; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U64 BufferAddress; /*0x0C */ - U32 BufferLength; /*0x14 */ - U32 Reserved5; /*0x18 */ - U32 Reserved6; /*0x1C */ - U32 Flags; /*0x20 */ - U32 ProductSpecific[23]; /*0x24 */ -} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST, - Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t; +typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST +{ + U8 ExtendedType; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U64 BufferAddress; /* 0x0C */ + U32 BufferLength; /* 0x14 */ + U32 Reserved5; /* 0x18 */ + U32 Reserved6; /* 0x1C */ + U32 Flags; /* 0x20 */ + U32 ProductSpecific[23]; /* 0x24 */ +} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST, + Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t; -/*values for the ExtendedType field */ +/* values for the ExtendedType field */ #define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) -/*values for the BufferType field */ +/* values for the BufferType field */ #define MPI2_DIAG_BUF_TYPE_TRACE (0x00) #define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) #define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) -/*count of the number of buffer types */ +/* count of the number of buffer types */ #define MPI2_DIAG_BUF_TYPE_COUNT (0x03) -/*values for the Flags field */ -#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) +/* values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) /* for MPI v2.0 products only */ #define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) -/**************************************************************************** -* Diagnostic Buffer Post reply -****************************************************************************/ - -typedef struct _MPI2_DIAG_BUFFER_POST_REPLY { - U8 ExtendedType; /*0x00 */ - U8 BufferType; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ - U32 TransferLength; /*0x14 */ -} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY, - Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t; /**************************************************************************** -* Diagnostic Release request +* Diagnostic Buffer Post reply ****************************************************************************/ -typedef struct _MPI2_DIAG_RELEASE_REQUEST { - U8 Reserved1; /*0x00 */ - U8 BufferType; /*0x01 */ - U8 ChainOffset; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ -} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST, - Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t; +typedef struct _MPI2_DIAG_BUFFER_POST_REPLY +{ + U8 ExtendedType; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 TransferLength; /* 0x14 */ +} MPI2_DIAG_BUFFER_POST_REPLY, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REPLY, + Mpi2DiagBufferPostReply_t, MPI2_POINTER pMpi2DiagBufferPostReply_t; + /**************************************************************************** -* Diagnostic Buffer Post reply +* Diagnostic Release request ****************************************************************************/ -typedef struct _MPI2_DIAG_RELEASE_REPLY { - U8 Reserved1; /*0x00 */ - U8 BufferType; /*0x01 */ - U8 MsgLength; /*0x02 */ - U8 Function; /*0x03 */ - U16 Reserved2; /*0x04 */ - U8 Reserved3; /*0x06 */ - U8 MsgFlags; /*0x07 */ - U8 VP_ID; /*0x08 */ - U8 VF_ID; /*0x09 */ - U16 Reserved4; /*0x0A */ - U16 Reserved5; /*0x0C */ - U16 IOCStatus; /*0x0E */ - U32 IOCLogInfo; /*0x10 */ -} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY, - Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t; +typedef struct _MPI2_DIAG_RELEASE_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ +} MPI2_DIAG_RELEASE_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REQUEST, + Mpi2DiagReleaseRequest_t, MPI2_POINTER pMpi2DiagReleaseRequest_t; + + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REPLY +{ + U8 Reserved1; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_DIAG_RELEASE_REPLY, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REPLY, + Mpi2DiagReleaseReply_t, MPI2_POINTER pMpi2DiagReleaseReply_t; + #endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h old mode 100644 new mode 100755 index 36494439a419..e5f465721f4b --- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h @@ -1,6 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2000-2014 Avago Technologies. All rights reserved. + * Copyright 2000-2020 Broadcom Inc. All rights reserved. * * * Name: mpi2_type.h @@ -22,25 +21,82 @@ #ifndef MPI2_TYPE_H #define MPI2_TYPE_H + /******************************************************************************* - * Define * if it hasn't already been defined. By default - * * is defined to be a near pointer. MPI2_POINTER can be defined as - * a far pointer by defining * as "far *" before this header file is + * Define MPI2_POINTER if it hasn't already been defined. By default + * MPI2_POINTER is defined to be a near pointer. MPI2_POINTER can be defined as + * a far pointer by defining MPI2_POINTER as "far *" before this header file is * included. */ +#ifndef MPI2_POINTER +#define MPI2_POINTER * +#endif + +#ifdef MPT3SAS_BASE_H_INCLUDED + +/***************************************************************************** + * + * Basic Types + * + *****************************************************************************/ + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __attribute__((aligned(4))); + +/***************************************************************************** + * + * Pointer Types + * + *****************************************************************************/ + +typedef U8 *PU8; +typedef U16 *PU16; +typedef U32 *PU32; +typedef U64 *PU64; + +#endif /* the basic types may have already been included by mpi_type.h */ -#ifndef MPI_TYPE_H +#if !defined(MPI_TYPE_H) && !defined(MPT3SAS_BASE_H_INCLUDED) /***************************************************************************** * * Basic Types * *****************************************************************************/ -typedef u8 U8; -typedef __le16 U16; -typedef __le32 U32; -typedef __le64 U64 __attribute__ ((aligned(4))); +typedef signed char S8; +typedef unsigned char U8; +typedef signed short S16; +typedef unsigned short U16; + + +#if defined(unix) || defined(__arm) || defined(ALPHA) || defined(__PPC__) || defined(__ppc) + + typedef signed int S32; + typedef unsigned int U32; + +#else + + typedef signed long S32; + typedef unsigned long U32; + +#endif + + +typedef struct _S64 +{ + U32 Low; + S32 High; +} S64; + +typedef struct _U64 +{ + U32 Low; + U32 High; +} U64; + /***************************************************************************** * @@ -48,11 +104,16 @@ typedef __le64 U64 __attribute__ ((aligned(4))); * *****************************************************************************/ -typedef U8 *PU8; -typedef U16 *PU16; -typedef U32 *PU32; -typedef U64 *PU64; +typedef S8 *PS8; +typedef U8 *PU8; +typedef S16 *PS16; +typedef U16 *PU16; +typedef S32 *PS32; +typedef U32 *PU32; +typedef S64 *PS64; +typedef U64 *PU64; #endif #endif + diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c old mode 100644 new mode 100755 index fea3cb6a090b..700944d33a6d --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3,9 +3,10 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -43,6 +44,7 @@ * USA. */ +#include #include #include #include @@ -60,15 +62,22 @@ #include #include #include /* To get host page size per arch */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) #include - +#endif #include "mpt3sas_base.h" static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; +#if defined(TARGET_MODE) +static struct STM_CALLBACK stm_callbacks; +EXPORT_SYMBOL(mpt3sas_ioc_list); +#endif #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ +#define HBA_HOTUNPLUG_POLLING_INTERVAL 1000 /* in milliseconds */ +#define SATA_SMART_POLLING_INTERVAL 300 /* in seconds */ /* maximum controller queue depth */ #define MAX_HBA_QUEUE_DEPTH 30000 @@ -85,36 +94,54 @@ static int msix_disable = -1; module_param(msix_disable, int, 0444); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) static int smp_affinity_enable = 1; module_param(smp_affinity_enable, int, 0444); -MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); +MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); +#endif static int max_msix_vectors = -1; module_param(max_msix_vectors, int, 0444); -MODULE_PARM_DESC(max_msix_vectors, - " max msix vectors"); +MODULE_PARM_DESC(max_msix_vectors, " max msix vectors"); static int irqpoll_weight = -1; module_param(irqpoll_weight, int, 0444); MODULE_PARM_DESC(irqpoll_weight, - "irq poll weight (default= one fourth of HBA queue depth)"); + "irq poll weight (default= one fourth of HBA queue depth)"); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) +/* diag_buffer_enable is bitwise + * bit 0 set = TRACE + * bit 1 set = SNAPSHOT + * bit 2 set = EXTENDED + * + * Either bit can be set, or both + */ +static int diag_buffer_enable = -1; +module_param(diag_buffer_enable, int, 0444); +MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " + "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); + +int disable_discovery = -1; +module_param(disable_discovery, int, 0444); +MODULE_PARM_DESC(disable_discovery, " disable discovery "); +#endif static int mpt3sas_fwfault_debug; -MODULE_PARM_DESC(mpt3sas_fwfault_debug, - " enable detection of firmware fault and halt firmware - (default=0)"); +MODULE_PARM_DESC(mpt3sas_fwfault_debug, " enable detection of firmware fault " + "and halt firmware - (default=0)"); static int perf_mode = -1; module_param(perf_mode, int, 0444); MODULE_PARM_DESC(perf_mode, - "Performance mode (only for Aero/Sea Generation), options:\n\t\t" - "0 - balanced: high iops mode is enabled &\n\t\t" - "interrupt coalescing is enabled only on high iops queues,\n\t\t" - "1 - iops: high iops mode is disabled &\n\t\t" - "interrupt coalescing is enabled on all queues,\n\t\t" - "2 - latency: high iops mode is disabled &\n\t\t" - "interrupt coalescing is enabled on all queues with timeout value 0xA,\n" - "\t\tdefault - default perf_mode is 'balanced'" - ); + "Performance mode (only for Aero/Sea Generation), options:\n\t\t" + "0 - balanced: high iops mode is enabled &" + " interrupt coalescing is enabled only on high iops queues,\n\t\t" + "1 - iops: high iops mode is disabled &" + " interrupt coalescing is enabled on all queues,\n\t\t" + "2 - latency: high iops mode is disabled &" + " interrupt coalescing is enabled on all queues with timeout value 0xA,\n" + "\t\tdefault - default perf_mode is 'balanced'."); enum mpt3sas_perf_mode { MPT_PERF_MODE_DEFAULT = -1, @@ -123,46 +150,19 @@ enum mpt3sas_perf_mode { MPT_PERF_MODE_LATENCY = 2, }; -static int -_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); - -/** - * mpt3sas_base_check_cmd_timeout - Function - * to check timeout and command termination due - * to Host reset. - * - * @ioc: per adapter object. - * @status: Status of issued command. - * @mpi_request:mf request pointer. - * @sz: size of buffer. - * - * @Returns - 1/0 Reset to be done or Not - */ -u8 -mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, - u8 status, void *mpi_request, int sz) -{ - u8 issue_reset = 0; - - if (!(status & MPT3_CMD_RESET)) - issue_reset = 1; - - ioc_err(ioc, "Command %s\n", - issue_reset == 0 ? "terminated due to Host Reset" : "Timeout"); - _debug_dump_mf(mpi_request, sz); - - return issue_reset; -} +static void +_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc); /** * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. - * @val: ? - * @kp: ? * - * Return: ? */ static int +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) +#else +_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) +#endif { int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc; @@ -170,8 +170,8 @@ _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) if (ret) return ret; + printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); /* global ioc spinlock to protect controller list on list operations */ - pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); spin_lock(&gioc_lock); list_for_each_entry(ioc, &mpt3sas_ioc_list, list) ioc->fwfault_debug = mpt3sas_fwfault_debug; @@ -182,11 +182,11 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, param_get_int, &mpt3sas_fwfault_debug, 0644); /** - * _base_readl_aero - retry readl for max three times. + * _base_readl_aero - retry the readl for max three times * @addr - MPT Fusion system interface register address * - * Retry the readl() for max three times if it gets zero value - * while reading the system interface register. + * Retry the readl() for max three times it gets zero value + * while reading the system interface registers. */ static inline u32 _base_readl_aero(const volatile void __iomem *addr) @@ -207,36 +207,63 @@ _base_readl(const volatile void __iomem *addr) return readl(addr); } + +/** + * mpt3sas_base_check_cmd_timeout - Function + * to check timeout and command termination due + * to Host reset. + * + * @ioc: per adapter object. + * @status: Status of issued command. + * @mpi_request:mf request pointer. + * @sz: size of buffer. + * + * @Returns - 1/0 Reset to be done or Not + */ +u8 +mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, + U8 status, void *mpi_request, int sz) +{ + u8 issue_reset = 0; + + if (!(status & MPT3_CMD_RESET)) + issue_reset = 1; + + printk(MPT3SAS_ERR_FMT "Command %s\n", ioc->name, + ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout")); + _debug_dump_mf(mpi_request, sz); + + return issue_reset; +} + /** * _base_clone_reply_to_sys_mem - copies reply to reply free iomem * in BAR0 space. * * @ioc: per adapter object * @reply: reply message frame(lower 32bit addr) - * @index: System request message index. + * @index: System request message index. + * + * @Returns - Nothing */ static void _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, u32 index) { - /* - * 256 is offset within sys register. - * 256 offset MPI frame starts. Max MPI frame supported is 32. - * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts - */ + /*256 is offset within sys register. + 256 offset MPI frame starts. Max MPI frame supported is 32. + 32 * 128 = 4K. From here, Clone of reply free for mcpu starts*/ u16 cmd_credit = ioc->facts.RequestCredit + 1; - void __iomem *reply_free_iomem = (void __iomem *)ioc->chip + - MPI_FRAME_START_OFFSET + - (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); - - writel(reply, reply_free_iomem); + void *reply_free_iomem = (void*)ioc->chip + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); + writel(reply, reply_free_iomem); } /** - * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames - * to system/BAR0 region. + * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames + * to system/BAR0 region. * - * @dst_iomem: Pointer to the destination location in BAR0 space. + * @dst_iomem: Pointer to the destinaltion location in BAR0 space. * @src: Pointer to the Source data. * @size: Size of data to be copied. */ @@ -247,26 +274,24 @@ _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size) u32 *src_virt_mem = (u32 *)src; for (i = 0; i < size/4; i++) - writel((u32)src_virt_mem[i], - (void __iomem *)dst_iomem + (i * 4)); + writel((u32)src_virt_mem[i], dst_iomem + (i * 4)); } /** * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region - * - * @dst_iomem: Pointer to the destination location in BAR0 space. + * + * @dst_iomem: Pointer to the destinaltion location in BAR0 space. * @src: Pointer to the Source data. * @size: Size of data to be copied. */ static void -_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) +_base_clone_to_sys_mem( void *dst_iomem, void *src, u32 size) { int i; u32 *src_virt_mem = (u32 *)(src); - for (i = 0; i < size/4; i++) - writel((u32)src_virt_mem[i], - (void __iomem *)dst_iomem + (i * 4)); + for ( i = 0; i < size/4; i++ ) + writel((u32)src_virt_mem[i], dst_iomem + (i * 4)); } /** @@ -277,33 +302,31 @@ _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) * @smid: system request message index * @sge_chain_count: Scatter gather chain count. * - * Return: the chain address. + * @Return: chain address. */ -static inline void __iomem* +static inline void * _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 sge_chain_count) { - void __iomem *base_chain, *chain_virt; + void *base_chain, *chain_virt; u16 cmd_credit = ioc->facts.RequestCredit + 1; - - base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET + - (cmd_credit * ioc->request_sz) + - REPLY_FREE_POOL_SIZE; + base_chain = (void *)ioc->chip + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + + (cmd_credit * 4 * sizeof(U32)); chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth * ioc->request_sz) + (sge_chain_count * ioc->request_sz); return chain_virt; } /** - * _base_get_chain_phys - Calculates and Returns physical address - * in BAR0 for scatter gather chains, for - * the provided smid. + * _base_get_chain_phys - Calculates and Returns physical address in BAR0 for + * scatter gather chains, for the provided smid. * * @ioc: per adapter object * @smid: system request message index * @sge_chain_count: Scatter gather chain count. * - * Return: Physical chain address. + * @Return - Physical chain address. */ static inline phys_addr_t _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, @@ -313,44 +336,44 @@ _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 cmd_credit = ioc->facts.RequestCredit + 1; base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET + - (cmd_credit * ioc->request_sz) + - REPLY_FREE_POOL_SIZE; + (cmd_credit * ioc->request_sz) + + REPLY_FREE_POOL_SIZE; chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth * ioc->request_sz) + (sge_chain_count * ioc->request_sz); return chain_phys; } /** - * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host - * buffer address for the provided smid. - * (Each smid can have 64K starts from 17024) - * + * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host buffer address + * for the provided smid. + * (Each smid can have 64K starts from 17024) + * * @ioc: per adapter object * @smid: system request message index * - * Return: Pointer to buffer location in BAR0. + * @Returns - Pointer to buffer location in BAR0. */ -static void __iomem * +static void * _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) { u16 cmd_credit = ioc->facts.RequestCredit + 1; // Added extra 1 to reach end of chain. - void __iomem *chain_end = _base_get_chain(ioc, - cmd_credit + 1, - ioc->facts.MaxChainDepth); + void *chain_end = _base_get_chain(ioc, + cmd_credit + 1, + ioc->facts.MaxChainDepth); return chain_end + (smid * 64 * 1024); } /** - * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped - * Host buffer Physical address for the provided smid. - * (Each smid can have 64K starts from 17024) - * + * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped Host buffer + * Physical address for the provided smid. + * (Each smid can have 64K starts from 17024) + * * @ioc: per adapter object * @smid: system request message index * - * Return: Pointer to buffer location in BAR0. + * @Returns - Pointer to buffer location in BAR0. */ static phys_addr_t _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -363,15 +386,14 @@ _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) } /** - * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain - * lookup list and Provides chain_buffer - * address for the matching dma address. - * (Each smid can have 64K starts from 17024) - * + * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain lookup list and Provides + * chain_buffer address for the matching dma address. + * (Each smid can have 64K starts from 17024) + * * @ioc: per adapter object - * @chain_buffer_dma: Chain buffer dma address. + * @chain_buffer_dma: Chain buffer dma address. * - * Return: Pointer to chain buffer. Or Null on Failure. + * @Returns - Pointer to chain buffer. Or Null on Failure. */ static void * _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, @@ -379,15 +401,18 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, { u16 index, j; struct chain_tracker *ct; - + for (index = 0; index < ioc->scsiio_depth; index++) { for (j = 0; j < ioc->chains_needed_per_io; j++) { ct = &ioc->chain_lookup[index].chains_per_smid[j]; if (ct && ct->chain_buffer_dma == chain_buffer_dma) return ct->chain_buffer; + else + continue; } } - ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n"); + printk(MPT3SAS_ERR_FMT "Provided chain_buffer_dma" + " address is not in the lookup list \n", ioc->name); return NULL; } @@ -400,6 +425,8 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object. * @mpi_request: mf request pointer. * @smid: system request message index. + * + * @Returns: Nothing. */ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, void *mpi_request, u16 smid) @@ -439,11 +466,12 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, * address associated with sgel->Address. */ - if (is_scsiio_req) { + if (is_scsiio_req) + { /* Get scsi_cmd using smid */ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); - if (scmd == NULL) { - ioc_err(ioc, "scmd is NULL\n"); + if(scmd == NULL) { + printk(MPT3SAS_ERR_FMT "scmd is NULL\n", ioc->name); return; } @@ -454,12 +482,10 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, /* * 0 - 255 System register * 256 - 4352 MPI Frame. (This is based on maxCredit 32) - * 4352 - 4864 Reply_free pool (512 byte is reserved - * considering maxCredit 32. Reply need extra - * room, for mCPU case kept four times of - * maxCredit). - * 4864 - 17152 SGE chain element. (32cmd * 3 chain of - * 128 byte size = 12288) + * 4352 - 4864 Reply_free pool (512 byte is reserved considering + * maxCredit 32. Reply need extra room, for mCPU case kept + * four times of maxCredit). + * 4864 - 17152 SGE chain element. (32cmd * 3 chain of 128 byte size = 12288) * 17152 - x Host buffer mapped with smid. * (Each smid can have 64K Max IO.) * BAR0+Last 1K MSIX Addr and Data @@ -572,36 +598,102 @@ eob_clone_chain: * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc * @arg: input argument, used to derive ioc * - * Return: - * 0 if controller is removed from pci subsystem. - * -1 for other case. + * Return 0 if controller is removed from pci subsystem. + * Return -1 for other case. */ static int mpt3sas_remove_dead_ioc_func(void *arg) { struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; struct pci_dev *pdev; - if (!ioc) + if ((ioc == NULL)) return -1; pdev = ioc->pdev; - if (!pdev) + if ((pdev == NULL)) return -1; - pci_stop_and_remove_bus_device_locked(pdev); + +#if defined(DISABLE_RESET_SUPPORT) + ssleep(2); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,3)) + pci_stop_and_remove_bus_device(pdev); +#else + pci_remove_bus_device(pdev); +#endif return 0; } +/** + * mpt3sas_base_pci_device_is_unplugged - Check whether HBA device is + * hot unplugged or not + * @ioc: per adapter object + * + * Return 1 if the HBA device is hot unplugged else return 0. + */ +u8 +mpt3sas_base_pci_device_is_unplugged(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + struct pci_bus *bus = pdev->bus; + int devfn = pdev->devfn; + u32 vendor_id; + + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendor_id)) + return 1; + + /* some broken boards return 0 or ~0 if a slot is empty: */ + if (vendor_id == 0xffffffff || vendor_id == 0x00000000 || + vendor_id == 0x0000ffff || vendor_id == 0xffff0000) + return 1; + + /* + * Configuration Request Retry Status. Some root ports return the + * actual device ID instead of the synthetic ID (0xFFFF) required + * by the PCIe spec. Ignore the device ID and only check for + * (vendor id == 1). + */ + + if ((vendor_id & 0xffff) == 0x0001) + return 1; + + return 0; +} + +/** + * mpt3sas_base_pci_device_is_available - check whether pci device is + * available for any transactions with FW + * + * @ioc: per adapter object + * + * Return 1 if pci device state is up and running else return 0. + */ +u8 +mpt3sas_base_pci_device_is_available(struct MPT3SAS_ADAPTER *ioc) +{ + if (ioc->pci_error_recovery || mpt3sas_base_pci_device_is_unplugged(ioc)) + return 0; + return 1; +} + /** * _base_fault_reset_work - workq handling ioc fault conditions * @work: input argument, used to derive ioc - * * Context: sleep. + * + * Return nothing. */ static void +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) _base_fault_reset_work(struct work_struct *work) { struct MPT3SAS_ADAPTER *ioc = container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); +#else +_base_fault_reset_work(void *arg) +{ + struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; +#endif unsigned long flags; u32 doorbell; int rc; @@ -609,13 +701,14 @@ _base_fault_reset_work(struct work_struct *work) spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->shost_recovery || ioc->pci_error_recovery) + if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->remove_host) goto rearm_timer; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { - ioc_err(ioc, "SAS host is non-operational !!!!\n"); + printk(MPT3SAS_ERR_FMT "SAS host is non-operational !!!!\n", + ioc->name); /* It may be possible that EEH recovery can resolve some of * pci bus failure issues rather removing the dead ioc function @@ -631,6 +724,12 @@ _base_fault_reset_work(struct work_struct *work) goto rearm_timer; } + /* + * Set remove_host flag early since kernel thread will + * take some time to execute. + */ + ioc->remove_host = 1; + /* * Call _scsih_flush_pending_cmds callback so that we flush all * pending commands back to OS. This call is required to aovid @@ -639,20 +738,16 @@ _base_fault_reset_work(struct work_struct *work) * command back from HW. */ ioc->schedule_dead_ioc_flush_running_cmds(ioc); - /* - * Set remove_host flag early since kernel thread will - * take some time to execute. - */ - ioc->remove_host = 1; + /*Remove the Dead Host */ p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, - "%s_dead_ioc_%d", ioc->driver_name, ioc->id); + "%s_dead_ioc_%d", ioc->driver_name, ioc->id); if (IS_ERR(p)) - ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: Running mpt3sas_dead_ioc " + "thread failed !!!!\n", ioc->name, __func__); else - ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: Running mpt3sas_dead_ioc " + "thread success !!!!\n", ioc->name, __func__); return; /* don't rearm timer */ } @@ -660,8 +755,8 @@ _base_fault_reset_work(struct work_struct *work) if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); - ioc_warn(ioc, "%s: hard reset: %s\n", - __func__, rc == 0 ? "success" : "failed"); + printk(MPT3SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name, + __func__, (rc == 0) ? "success" : "failed"); doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) mpt3sas_base_fault_info(ioc, doorbell & @@ -669,6 +764,12 @@ _base_fault_reset_work(struct work_struct *work) if (rc && (doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) return; /* don't rearm timer */ +#if defined(TARGET_MODE) + } else { + if (stm_callbacks.watchdog) + /* target mode drivers watchdog */ + stm_callbacks.watchdog(ioc); +#endif } spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); @@ -680,11 +781,88 @@ _base_fault_reset_work(struct work_struct *work) spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); } +static void +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) +_base_hba_hot_unplug_work(struct work_struct *work) +{ + struct MPT3SAS_ADAPTER *ioc = + container_of(work, struct MPT3SAS_ADAPTER, hba_hot_unplug_work.work); +#else +_base_hba_hot_unplug_work(void *arg) +{ + struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; +#endif + unsigned long flags; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->shost_recovery || ioc->pci_error_recovery) + goto rearm_timer; + + if (mpt3sas_base_pci_device_is_unplugged(ioc)) { + if (ioc->remove_host) { + printk(MPT3SAS_ERR_FMT + "The IOC seems hot unplugged and the driver is " + "waiting for pciehp module to remove the PCIe " + "device instance associated with IOC!!!\n", + ioc->name); + goto rearm_timer; + } + + /* Set remove_host flag here, since kernel will invoke driver's + * .remove() callback function one after the other for all hot + * un-plugged devices, so it may take some time to call + * .remove() function for subsequent hot un-plugged + * PCI devices. + */ + ioc->remove_host = 1; + _base_clear_outstanding_mpt_commands(ioc); + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + } + +rearm_timer: + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies(HBA_HOTUNPLUG_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +/** + * _base_sata_smart_poll_work - worker thread which will poll for SMART error + * SATA drives for every 5 mints + * @work: input argument, used to derive ioc + * Context: sleep. + * + * Returns nothing. + */ +static void +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) +_base_sata_smart_poll_work(struct work_struct *work) +{ + struct MPT3SAS_ADAPTER *ioc = + container_of(work, struct MPT3SAS_ADAPTER, smart_poll_work.work); +#else +_base_sata_smart_poll_work(void *arg) +{ + struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; +#endif + if (ioc->shost_recovery || !mpt3sas_base_pci_device_is_available(ioc)) + goto rearm_timer; + mpt3sas_scsih_sata_smart_polling(ioc); +rearm_timer: + if (ioc->smart_poll_work_q) + queue_delayed_work(ioc->smart_poll_work_q, + &ioc->smart_poll_work, + SATA_SMART_POLLING_INTERVAL * HZ); +} + /** * mpt3sas_base_start_watchdog - start the fault_reset_work_q * @ioc: per adapter object - * * Context: sleep. + * + * Return nothing. */ void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) @@ -695,16 +873,20 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) return; /* initialize fault polling */ - +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); +#else + INIT_WORK(&ioc->fault_reset_work, _base_fault_reset_work, (void *)ioc); +#endif snprintf(ioc->fault_reset_work_q_name, - sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", - ioc->driver_name, ioc->id); + sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", + ioc->driver_name, ioc->id); ioc->fault_reset_work_q = create_singlethread_workqueue(ioc->fault_reset_work_q_name); if (!ioc->fault_reset_work_q) { - ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); - return; + printk(MPT3SAS_ERR_FMT "%s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; } spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); if (ioc->fault_reset_work_q) @@ -717,8 +899,9 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) /** * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q * @ioc: per adapter object - * * Context: sleep. + * + * Return nothing. */ void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) @@ -731,7 +914,124 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) ioc->fault_reset_work_q = NULL; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); if (wq) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) +#else + if (!cancel_delayed_work(&ioc->fault_reset_work)) +#endif + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +void +mpt3sas_base_start_hba_unplug_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->hba_hot_unplug_work_q) + return; + + /* initialize fault polling */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) + INIT_DELAYED_WORK(&ioc->hba_hot_unplug_work, + _base_hba_hot_unplug_work); +#else + INIT_WORK(&ioc->hba_hot_unplug_work, + _base_hba_hot_unplug_work, (void *)ioc); +#endif + snprintf(ioc->hba_hot_unplug_work_q_name, + sizeof(ioc->hba_hot_unplug_work_q_name), "poll_%s%d_hba_unplug", + ioc->driver_name, ioc->id); + ioc->hba_hot_unplug_work_q = + create_singlethread_workqueue(ioc->hba_hot_unplug_work_q_name); + if (!ioc->hba_hot_unplug_work_q) { + printk(MPT3SAS_ERR_FMT "%s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +void +mpt3sas_base_stop_hba_unplug_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + wq = ioc->hba_hot_unplug_work_q; + ioc->hba_hot_unplug_work_q = NULL; + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); + + if (wq) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) + if (!cancel_delayed_work_sync(&ioc->hba_hot_unplug_work)) +#else + if (!cancel_delayed_work(&ioc->hba_hot_unplug_work)) +#endif + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +/** + * mpt3sas_base_start_smart_polling - Create and start the smart polling + * thread for SMART SATA drive + * @ioc: per adapter object + * Context: sleep. + * + * Return nothing. + */ +void +mpt3sas_base_start_smart_polling(struct MPT3SAS_ADAPTER *ioc) +{ + if (ioc->smart_poll_work_q) + return; + + /* initialize SMART SATA drive polling */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) + INIT_DELAYED_WORK(&ioc->smart_poll_work, _base_sata_smart_poll_work); +#else + INIT_WORK(&ioc->smart_poll_work, _base_sata_smart_poll_work, (void *)ioc); +#endif + snprintf(ioc->smart_poll_work_q_name, + sizeof(ioc->smart_poll_work_q_name), "smart_poll_%d", ioc->id); + ioc->smart_poll_work_q = + create_singlethread_workqueue(ioc->smart_poll_work_q_name); + if (!ioc->smart_poll_work_q) { + printk(MPT3SAS_ERR_FMT "%s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + if (ioc->smart_poll_work_q) + queue_delayed_work(ioc->smart_poll_work_q, + &ioc->smart_poll_work, + SATA_SMART_POLLING_INTERVAL * HZ); +} + +/** + * mpt3sas_base_stop_smart_polling - stop the smart polling thread + * @ioc: per adapter object + * Context: sleep. + * + * Return nothing. + */ +void +mpt3sas_base_stop_smart_polling(struct MPT3SAS_ADAPTER *ioc) +{ + struct workqueue_struct *wq; + + wq = ioc->smart_poll_work_q; + ioc->smart_poll_work_q = NULL; + if (wq) { + if (!cancel_delayed_work(&ioc->smart_poll_work)) flush_workqueue(wq); destroy_workqueue(wq); } @@ -741,16 +1041,20 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) * mpt3sas_base_fault_info - verbose translation of firmware FAULT code * @ioc: per adapter object * @fault_code: fault code + * + * Return nothing. */ void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) { - ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code); + printk(MPT3SAS_ERR_FMT "fault_state(0x%04x)!\n", + ioc->name, fault_code); } /** * mpt3sas_halt_firmware - halt's mpt controller firmware * @ioc: per adapter object + * @set_fault: set fw fault * * For debugging timeout related issues. Writing 0xCOFFEE00 * to the doorbell register will halt controller firmware. With @@ -758,35 +1062,84 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) * obtain a ring buffer from controller UART. */ void -mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) +mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc, u8 set_fault) { u32 doorbell; - if (!ioc->fwfault_debug) + if ((!ioc->fwfault_debug) && (!set_fault)) return; - dump_stack(); + if (!set_fault) + dump_stack(); doorbell = ioc->base_readl(&ioc->chip->Doorbell); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) mpt3sas_base_fault_info(ioc , doorbell); else { writel(0xC0FFEE00, &ioc->chip->Doorbell); - ioc_err(ioc, "Firmware is halted due to command timeout\n"); + if (!set_fault) + printk(MPT3SAS_ERR_FMT "Firmware is halted due to" + " command timeout\n", ioc->name); } + + if (set_fault) + return; if (ioc->fwfault_debug == 2) - for (;;) - ; + for (;;); else panic("panic in %s\n", __func__); } + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) && \ + (defined(RHEL_MAJOR) && (RHEL_MAJOR != 6)))) +/** + * _base_group_cpus_on_irq - when there are more cpus than available + * msix vectors, then group cpus + * together on same irq + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_group_cpus_on_irq(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + unsigned int i, cpu, group, nr_cpus, nr_msix, index = 0; + cpu = cpumask_first(cpu_online_mask); + nr_msix = ioc->reply_queue_count - ioc->high_iops_queues; + nr_cpus = num_online_cpus(); + group = nr_cpus / nr_msix; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + + if (reply_q->msix_index < ioc->high_iops_queues) + continue; + + if (cpu >= nr_cpus) + break; + + if (index < nr_cpus % nr_msix) + group++; + + for (i = 0 ; i < group ; i++) { + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + cpu = cpumask_next(cpu, cpu_online_mask); + } + index++; + } +} +#endif + /** * _base_sas_ioc_info - verbose translation of the ioc status * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @request_hdr: request mf + * + * Return nothing. */ static void _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, @@ -807,6 +1160,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return; + + switch (ioc_status) { /**************************************************************************** @@ -854,9 +1209,6 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: desc = "config invalid type"; break; - case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: - desc = "config invalid page"; - break; case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: desc = "config invalid data"; break; @@ -890,13 +1242,16 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, ****************************************************************************/ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: - desc = "eedp guard error"; + if (!ioc->disable_eedp_support) + desc = "eedp guard error"; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: - desc = "eedp ref tag error"; + if (!ioc->disable_eedp_support) + desc = "eedp ref tag error"; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: - desc = "eedp app tag error"; + if (!ioc->disable_eedp_support) + desc = "eedp app tag error"; break; /**************************************************************************** @@ -999,8 +1354,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, break; } - ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", - desc, ioc_status, request_hdr, func_str); + printk(MPT3SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p)," + " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str); _debug_dump_mf(request_hdr, frame_sz/4); } @@ -1009,6 +1364,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, * _base_display_event_data - verbose translation of firmware asyn events * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware + * + * Return nothing. */ static void _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, @@ -1022,6 +1379,17 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, event = le16_to_cpu(mpi_reply->Event); + if (ioc->warpdrive_msg) { + switch (event) { + case MPI2_EVENT_IR_OPERATION_STATUS: + case MPI2_EVENT_IR_VOLUME: + case MPI2_EVENT_IR_PHYSICAL_DISK: + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + case MPI2_EVENT_LOG_ENTRY_ADDED: + return; + } + } + switch (event) { case MPI2_EVENT_LOG_DATA: desc = "Log Data"; @@ -1039,20 +1407,19 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, desc = "Device Status Change"; break; case MPI2_EVENT_IR_OPERATION_STATUS: - if (!ioc->hide_ir_msg) - desc = "IR Operation Status"; + desc = "IR Operation Status"; break; case MPI2_EVENT_SAS_DISCOVERY: { Mpi2EventDataSasDiscovery_t *event_data = (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; - ioc_info(ioc, "Discovery: (%s)", - event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? - "start" : "stop"); + printk(MPT3SAS_INFO_FMT "SAS Discovery: (%s)", ioc->name, + (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? + "start" : "stop"); if (event_data->DiscoveryStatus) - pr_cont(" discovery_status(0x%08x)", + printk("discovery_status(0x%08x)", le32_to_cpu(event_data->DiscoveryStatus)); - pr_cont("\n"); + printk("\n"); return; } case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: @@ -1071,20 +1438,16 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, desc = "SAS Enclosure Device Status Change"; break; case MPI2_EVENT_IR_VOLUME: - if (!ioc->hide_ir_msg) - desc = "IR Volume"; + desc = "IR Volume"; break; case MPI2_EVENT_IR_PHYSICAL_DISK: - if (!ioc->hide_ir_msg) - desc = "IR Physical Disk"; + desc = "IR Physical Disk"; break; case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: - if (!ioc->hide_ir_msg) - desc = "IR Configuration Change List"; + desc = "IR Configuration Change List"; break; case MPI2_EVENT_LOG_ENTRY_ADDED: - if (!ioc->hide_ir_msg) - desc = "Log Entry Added"; + desc = "Log Entry Added"; break; case MPI2_EVENT_TEMP_THRESHOLD: desc = "Temperature Threshold"; @@ -1102,13 +1465,13 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, { Mpi26EventDataPCIeEnumeration_t *event_data = (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; - ioc_info(ioc, "PCIE Enumeration: (%s)", - event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ? - "start" : "stop"); + printk(MPT3SAS_INFO_FMT "PCIE Enumeration: (%s)", ioc->name, + (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? + "start" : "stop"); if (event_data->EnumerationStatus) - pr_cont("enumeration_status(0x%08x)", - le32_to_cpu(event_data->EnumerationStatus)); - pr_cont("\n"); + printk("enumeration_status(0x%08x)", + le32_to_cpu(event_data->EnumerationStatus)); + printk("\n"); return; } case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: @@ -1119,13 +1482,15 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, if (!desc) return; - ioc_info(ioc, "%s\n", desc); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, desc); } /** * _base_sas_log_info - verbose translation of firmware log info * @ioc: per adapter object * @log_info: log info + * + * Return nothing. */ static void _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) @@ -1163,16 +1528,17 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) originator_str = "PL"; break; case 2: - if (!ioc->hide_ir_msg) - originator_str = "IR"; - else + if (ioc->warpdrive_msg) originator_str = "WarpDrive"; + else + originator_str = "IR"; break; } - ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", - log_info, - originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode); + printk(MPT3SAS_WARN_FMT "log_info(0x%08x): originator(%s), " + "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info, + originator_str, sas_loginfo.dw.code, + sas_loginfo.dw.subcode); } /** @@ -1181,6 +1547,8 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) + * + * Return nothing. */ static void _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -1192,18 +1560,16 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (unlikely(!mpi_reply)) { - ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply->IOCStatus); - if ((ioc_status & MPI2_IOCSTATUS_MASK) && (ioc->logging_level & MPT_DEBUG_REPLY)) { _base_sas_ioc_info(ioc , mpi_reply, mpt3sas_base_get_msg_frame(ioc, smid)); } - if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); _base_sas_log_info(ioc, loginfo); @@ -1222,9 +1588,8 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * - * Return: - * 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -1256,9 +1621,8 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * - * Return: - * 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) @@ -1273,28 +1637,24 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) return 1; if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) return 1; - _base_display_event_data(ioc, mpi_reply); - if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) goto out; smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); if (!smid) { - delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), - GFP_ATOMIC); + delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), GFP_ATOMIC); if (!delayed_event_ack) goto out; INIT_LIST_HEAD(&delayed_event_ack->list); delayed_event_ack->Event = mpi_reply->Event; delayed_event_ack->EventContext = mpi_reply->EventContext; list_add_tail(&delayed_event_ack->list, - &ioc->delayed_event_ack_list); - dewtprintk(ioc, - ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n", - le16_to_cpu(mpi_reply->Event))); + &ioc->delayed_event_ack_list); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "DELAYED: EVENT ACK: event (0x%04x)\n", + ioc->name, le16_to_cpu(mpi_reply->Event))); goto out; } - ack_request = mpt3sas_base_get_msg_frame(ioc, smid); memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); ack_request->Function = MPI2_FUNCTION_EVENT_ACK; @@ -1315,8 +1675,18 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) return 1; } -static struct scsiio_tracker * -_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) +inline struct scsiio_tracker * +mpt3sas_base_scsi_cmd_priv(struct scsi_cmnd *scmd) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) + return (struct scsiio_tracker *)scmd->host_scribble; +#else + return scsi_cmd_priv(scmd); +#endif +} + +struct scsiio_tracker * +mpt3sas_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) { struct scsi_cmnd *cmd; @@ -1326,7 +1696,7 @@ _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); if (cmd) - return scsi_cmd_priv(cmd); + return mpt3sas_base_scsi_cmd_priv(cmd); return NULL; } @@ -1336,24 +1706,28 @@ _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) * @ioc: per adapter object * @smid: system request message index * - * Return: callback index. + * Return callback index. */ static u8 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) { int i; - u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; + u16 ctl_smid = ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_IOCTL; + u16 discovery_smid = + ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_DISCOVERY; u8 cb_idx = 0xFF; if (smid < ioc->hi_priority_smid) { struct scsiio_tracker *st; if (smid < ctl_smid) { - st = _get_st_from_smid(ioc, smid); + st = mpt3sas_get_st_from_smid(ioc, smid); if (st) cb_idx = st->cb_idx; - } else if (smid == ctl_smid) + } else if (smid < discovery_smid) cb_idx = ioc->ctl_cb_idx; + else + cb_idx = ioc->scsih_cb_idx; } else if (smid < ioc->internal_smid) { i = smid - ioc->hi_priority_smid; cb_idx = ioc->hpr_lookup[i].cb_idx; @@ -1365,13 +1739,15 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) } /** - * _base_mask_interrupts - disable interrupts + * mpt3sas_base_mask_interrupts - disable interrupts * @ioc: per adapter object * * Disabling ResetIRQ, Reply and Doorbell Interrupts + * + * Return nothing. */ -static void -_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) { u32 him_register; @@ -1387,6 +1763,8 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object * * Enabling only Reply Interrupts + * + * Return nothing. */ static void _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) @@ -1407,25 +1785,13 @@ union reply_descriptor { } u; }; -static u32 base_mod64(u64 dividend, u32 divisor) -{ - u32 remainder; - - if (!divisor) - pr_err("mpt3sas: DIVISOR is zero, in div fn\n"); - remainder = do_div(dividend, divisor); - return remainder; -} - /** - * _base_process_reply_queue - Process reply descriptors from reply - * descriptor post queue. - * @reply_q: per IRQ's reply queue object. + * _base_process_reply_queue - process the reply descriptors from reply queue + * @reply_q : per IRQ's reply queue object * - * Return: number of reply descriptors processed from reply - * descriptor queue. + * returns number of reply descriptors processed from a reply queue. */ -static int +int _base_process_reply_queue(struct adapter_reply_queue *reply_q) { union reply_descriptor rd; @@ -1493,6 +1859,11 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) smid); } } else { +#if defined(TARGET_MODE) + if (stm_callbacks.smid_handler) + stm_callbacks.smid_handler(ioc, + msix_index, reply); +#endif _base_async_event(ioc, msix_index, reply); } @@ -1505,13 +1876,29 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) ioc->reply_free[ioc->reply_free_host_index] = cpu_to_le32(reply); if (ioc->is_mcpu_endpoint) - _base_clone_reply_to_sys_mem(ioc, - reply, - ioc->reply_free_host_index); + _base_clone_reply_to_sys_mem(ioc, reply, + ioc->reply_free_host_index); + wmb(); writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + } +#if defined(TARGET_MODE) + } else if (request_descript_type == + MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER) { + if (stm_callbacks.target_command) + stm_callbacks.target_command(ioc, + &rpf->TargetCommandBuffer, msix_index); + } else if (request_descript_type == + MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS) { + if (stm_callbacks.target_assist) + stm_callbacks.target_assist(ioc, + &rpf->TargetAssistSuccess); + mpt3sas_base_free_smid(ioc, smid); } +#else + } +#endif rpf->Words = cpu_to_le64(ULLONG_MAX); reply_q->reply_post_host_index = @@ -1522,6 +1909,7 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) reply_q->reply_post_free[reply_q->reply_post_host_index]. Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; completed_cmds++; + /* Update the reply post host index after continuously * processing the threshold number of Reply Descriptors. * So that FW can find enough entries to post the Reply @@ -1530,21 +1918,23 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) if (!base_mod64(completed_cmds, ioc->thresh_hold)) { if (ioc->combined_reply_queue) { writel(reply_q->reply_post_host_index | - ((msix_index & 7) << - MPI2_RPHI_MSIX_INDEX_SHIFT), - ioc->replyPostRegisterIndex[msix_index/8]); + ((msix_index & 7) << + MPI2_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index/8]); } else { writel(reply_q->reply_post_host_index | - (msix_index << - MPI2_RPHI_MSIX_INDEX_SHIFT), - &ioc->chip->ReplyPostHostIndex); + (msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); } + +#if defined(MPT3SAS_ENABLE_IRQ_POLL) if (!reply_q->irq_poll_scheduled) { reply_q->irq_poll_scheduled = true; irq_poll_sched(&reply_q->irqpoll); } atomic_dec(&reply_q->busy); return completed_cmds; +#endif } if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) goto out; @@ -1561,36 +1951,21 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) return completed_cmds; } - if (ioc->is_warpdrive) { - writel(reply_q->reply_post_host_index, - ioc->reply_post_host_index[msix_index]); + wmb(); + if (ioc->is_warpdrive) { + writel(reply_q->reply_post_host_index, + ioc->reply_post_host_index[msix_index]); atomic_dec(&reply_q->busy); return completed_cmds; } - /* Update Reply Post Host Index. - * For those HBA's which support combined reply queue feature - * 1. Get the correct Supplemental Reply Post Host Index Register. - * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host - * Index Register address bank i.e replyPostRegisterIndex[], - * 2. Then update this register with new reply host index value - * in ReplyPostIndex field and the MSIxIndex field with - * msix_index value reduced to a value between 0 and 7, - * using a modulo 8 operation. Since each Supplemental Reply Post - * Host Index Register supports 8 MSI-X vectors. - * - * For other HBA's just update the Reply Post Host Index register with - * new reply host index value in ReplyPostIndex Field and msix_index - * value in MSIxIndex field. - */ - if (ioc->combined_reply_queue) + if (ioc->combined_reply_queue) { writel(reply_q->reply_post_host_index | ((msix_index & 7) << - MPI2_RPHI_MSIX_INDEX_SHIFT), - ioc->replyPostRegisterIndex[msix_index/8]); - else + MPI2_RPHI_MSIX_INDEX_SHIFT), ioc->replyPostRegisterIndex[msix_index/8]); + } else { writel(reply_q->reply_post_host_index | (msix_index << - MPI2_RPHI_MSIX_INDEX_SHIFT), - &ioc->chip->ReplyPostHostIndex); + MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); + } atomic_dec(&reply_q->busy); return completed_cmds; } @@ -1599,42 +1974,52 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) * _base_interrupt - MPT adapter (IOC) specific interrupt handler. * @irq: irq number (not used) * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure + * @r: pt_regs pointer (not used) * - * Return: IRQ_HANDLED if processed, else IRQ_NONE. + * Return IRQ_HANDLE if processed, else IRQ_NONE. */ static irqreturn_t +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)) _base_interrupt(int irq, void *bus_id) +#else +_base_interrupt(int irq, void *bus_id, struct pt_regs *r) +#endif { struct adapter_reply_queue *reply_q = bus_id; struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; if (ioc->mask_interrupts) return IRQ_NONE; + +#if defined(MPT3SAS_ENABLE_IRQ_POLL) if (reply_q->irq_poll_scheduled) return IRQ_HANDLED; +#endif + return ((_base_process_reply_queue(reply_q) > 0) ? - IRQ_HANDLED : IRQ_NONE); + IRQ_HANDLED : IRQ_NONE); } +#if defined(MPT3SAS_ENABLE_IRQ_POLL) /** * _base_irqpoll - IRQ poll callback handler * @irqpoll - irq_poll object * @budget - irq poll weight * * returns number of reply descriptors processed - */ -static int -_base_irqpoll(struct irq_poll *irqpoll, int budget) -{ + */ +int +_base_irqpoll(struct irq_poll *irqpoll, int budget) { struct adapter_reply_queue *reply_q; int num_entries = 0; reply_q = container_of(irqpoll, struct adapter_reply_queue, - irqpoll); + irqpoll); if (reply_q->irq_line_enable) { disable_irq(reply_q->os_irq); reply_q->irq_line_enable = false; } + num_entries = _base_process_reply_queue(reply_q); if (num_entries < budget) { irq_poll_complete(irqpoll); @@ -1651,30 +2036,38 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget) * @ioc: per adapter object * * returns nothing - */ -static void + */ +void _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc) { struct adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) return; list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { - irq_poll_init(&reply_q->irqpoll, - ioc->hba_queue_depth/4, _base_irqpoll); + irq_poll_init(&reply_q->irqpoll, ioc->thresh_hold, _base_irqpoll); reply_q->irq_poll_scheduled = false; reply_q->irq_line_enable = true; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) + if (ioc->msix_enable) + reply_q->os_irq = reply_q->vector; + else + reply_q->os_irq = ioc->pdev->irq; +#else reply_q->os_irq = pci_irq_vector(ioc->pdev, reply_q->msix_index); +#endif } } +#endif /** * _base_is_controller_msix_enabled - is controller support muli-reply queues * @ioc: per adapter object * - * Return: Whether or not MSI/X is enabled. */ static inline int _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) @@ -1683,13 +2076,26 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; } + /** - * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts - * @ioc: per adapter object - * Context: non ISR conext - * - * Called when a Task Management request has completed. - */ + ** This routine was added because mpt3sas_base_flush_reply_queues() + ** skips over reply queues that are currently busy (i.e. being handled + ** by interrupt processing in another core). If a reply queue was busy, + ** then we need to call synchronize_irq() to make sure the other core + ** has finished flushing the queue and completed any calls to the + ** mid-layer scsi_done() routine. + ** It might be possible to just add the synchronize_irq() call to + ** mpt3sas_base_flush_reply_queues(), but that means it would be called + ** from an IRQ context, which may lead to deadlocks or other issues. + ** + ** mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts + ** @ioc: per adapter object + ** Context: non ISR conext + ** + ** Called when a Task Management request has completed. + ** + ** Return nothing. + **/ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) { @@ -1700,14 +2106,15 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) */ if (!_base_is_controller_msix_enabled(ioc)) return; - list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { if (ioc->shost_recovery || ioc->remove_host || - ioc->pci_error_recovery) + ioc->pci_error_recovery) return; /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue; + +#if defined(MPT3SAS_ENABLE_IRQ_POLL) if (reply_q->irq_poll_scheduled) { /* Calling irq_poll_disable will wait for any pending * callbacks to have completed. @@ -1719,25 +2126,37 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) enable_irq(reply_q->os_irq); continue; } +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) + synchronize_irq(reply_q->vector); +#else synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); +#endif } } + /** - * mpt3sas_base_release_callback_handler - clear interrupt callback handler + * mpt3sas_base_release_callback_handler - clear interupt callback handler * @cb_idx: callback index + * + * Return nothing. */ void mpt3sas_base_release_callback_handler(u8 cb_idx) { mpt_callbacks[cb_idx] = NULL; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_release_callback_handler); +#endif /** - * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler + * mpt3sas_base_register_callback_handler - obtain index for the ISR handler * @cb_func: callback function * - * Return: Index of @cb_func. + * Returns cb_func. */ u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) @@ -1751,9 +2170,14 @@ mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) mpt_callbacks[cb_idx] = cb_func; return cb_idx; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_register_callback_handler); +#endif /** - * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler + * mpt3sas_base_initialize_callback_handler - initialize the ISR handler + * + * Return nothing. */ void mpt3sas_base_initialize_callback_handler(void) @@ -1764,6 +2188,51 @@ mpt3sas_base_initialize_callback_handler(void) mpt3sas_base_release_callback_handler(cb_idx); } +#if defined(TARGET_MODE) +/** + * mpt3sas_base_stm_release_callback_handler - clear STM callback handler + * + * Return nothing. + */ +void +mpt3sas_base_stm_release_callback_handler(void) +{ + stm_callbacks.watchdog = NULL; + stm_callbacks.target_command = NULL; + stm_callbacks.target_assist = NULL; + stm_callbacks.smid_handler = NULL; + stm_callbacks.reset_handler = NULL; +} +EXPORT_SYMBOL(mpt3sas_base_stm_release_callback_handler); + +/** + * mpt3sas_base_stm_register_callback_handler - Set the STM callbacks + * @stm_funcs: Structure containing the function pointers + * + */ +void +mpt3sas_base_stm_register_callback_handler(struct STM_CALLBACK stm_funcs) +{ + + stm_callbacks.watchdog = stm_funcs.watchdog; + stm_callbacks.target_command = stm_funcs.target_command; + stm_callbacks.target_assist = stm_funcs.target_assist; + stm_callbacks.smid_handler = stm_funcs.smid_handler; + stm_callbacks.reset_handler = stm_funcs.reset_handler; +} +EXPORT_SYMBOL(mpt3sas_base_stm_register_callback_handler); + +/** + * mpt3sas_base_stm_initialize_callback_handler - initialize the stm handler + * + * Return nothing. + */ +void +mpt3sas_base_stm_initialize_callback_handler(void) +{ + mpt3sas_base_stm_release_callback_handler(); +} +#endif /** * _base_build_zero_len_sge - build zero length sg entry @@ -1773,6 +2242,8 @@ mpt3sas_base_initialize_callback_handler(void) * Create a zero length scatter gather entry to insure the IOCs hardware has * something to use if the target device goes brain dead and tries * to send data even when none is asked for. + * + * Return nothing. */ static void _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) @@ -1789,6 +2260,8 @@ _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) * @paddr: virtual address for SGE * @flags_length: SGE flags and data transfer length * @dma_addr: Physical address + * + * Return nothing. */ static void _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) @@ -1807,6 +2280,8 @@ _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) * @paddr: virtual address for SGE * @flags_length: SGE flags and data transfer length * @dma_addr: Physical address + * + * Return nothing. */ static void _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) @@ -1822,17 +2297,17 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) /** * _base_get_chain_buffer_tracker - obtain chain tracker * @ioc: per adapter object - * @scmd: SCSI commands of the IO request + * @smid: smid associated to an IO request * - * Return: chain tracker from chain_lookup table using key as + * Returns chain tracker from chain_lookup table using key as * smid and smid's chain_offset. */ static struct chain_tracker * _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, - struct scsi_cmnd *scmd) + struct scsi_cmnd *scmd) { struct chain_tracker *chain_req; - struct scsiio_tracker *st = scsi_cmd_priv(scmd); + struct scsiio_tracker *st = mpt3sas_base_scsi_cmd_priv(scmd); u16 smid = st->smid; u8 chain_offset = atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); @@ -1854,6 +2329,8 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, * @data_out_sz: data xfer size for WRITES * @data_in_dma: physical address for READS * @data_in_sz: data xfer size for READS + * + * Return nothing. */ static void _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, @@ -1905,68 +2382,70 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, /* IEEE format sgls */ /** - * _base_build_nvme_prp - This function is called for NVMe end devices to build - * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP - * entry of the NVMe message (PRP1). If the data buffer is small enough to be - * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is - * used to describe a larger data buffer. If the data buffer is too large to - * describe using the two PRP entriess inside the NVMe message, then PRP1 - * describes the first data memory segment, and PRP2 contains a pointer to a PRP - * list located elsewhere in memory to describe the remaining data memory - * segments. The PRP list will be contiguous. - * - * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP - * consists of a list of PRP entries to describe a number of noncontigous - * physical memory segments as a single memory buffer, just as a SGL does. Note - * however, that this function is only used by the IOCTL call, so the memory - * given will be guaranteed to be contiguous. There is no need to translate - * non-contiguous SGL into a PRP in this case. All PRPs will describe - * contiguous space that is one page size each. - * - * Each NVMe message contains two PRP entries. The first (PRP1) either contains - * a PRP list pointer or a PRP element, depending upon the command. PRP2 - * contains the second PRP element if the memory being described fits within 2 - * PRP entries, or a PRP list pointer if the PRP spans more than two entries. - * - * A PRP list pointer contains the address of a PRP list, structured as a linear - * array of PRP entries. Each PRP entry in this list describes a segment of - * physical memory. - * - * Each 64-bit PRP entry comprises an address and an offset field. The address - * always points at the beginning of a 4KB physical memory page, and the offset - * describes where within that 4KB page the memory segment begins. Only the - * first element in a PRP list may contain a non-zero offest, implying that all - * memory segments following the first begin at the start of a 4KB page. - * - * Each PRP element normally describes 4KB of physical memory, with exceptions - * for the first and last elements in the list. If the memory being described - * by the list begins at a non-zero offset within the first 4KB page, then the - * first PRP element will contain a non-zero offset indicating where the region - * begins within the 4KB page. The last memory segment may end before the end - * of the 4KB segment, depending upon the overall size of the memory being - * described by the PRP list. - * - * Since PRP entries lack any indication of size, the overall data buffer length - * is used to determine where the end of the data memory buffer is located, and - * how many PRP entries are required to describe it. - * - * @ioc: per adapter object - * @smid: system request message index for getting asscociated SGL - * @nvme_encap_request: the NVMe request msg frame pointer - * @data_out_dma: physical address for WRITES - * @data_out_sz: data xfer size for WRITES - * @data_in_dma: physical address for READS - * @data_in_sz: data xfer size for READS - */ +* _base_build_nvme_prp - This function is called for NVMe end devices to build +* a native SGL (NVMe PRP). The native SGL is built starting in the first PRP +* entry of the NVMe message (PRP1). If the data buffer is small enough to be +* described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is +* used to describe a larger data buffer. If the data buffer is too large to +* describe using the two PRP entriess inside the NVMe message, then PRP1 +* describes the first data memory segment, and PRP2 contains a pointer to a PRP +* list located elsewhere in memory to describe the remaining data memory +* segments. The PRP list will be contiguous. + +* The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP +* consists of a list of PRP entries to describe a number of noncontigous +* physical memory segments as a single memory buffer, just as a SGL does. Note +* however, that this function is only used by the IOCTL call, so the memory +* given will be guaranteed to be contiguous. There is no need to translate +* non-contiguous SGL into a PRP in this case. All PRPs will describe +* contiguous space that is one page size each. +* +* Each NVMe message contains two PRP entries. The first (PRP1) either contains +* a PRP list pointer or a PRP element, depending upon the command. PRP2 +* contains the second PRP element if the memory being described fits within 2 +* PRP entries, or a PRP list pointer if the PRP spans more than two entries. +* +* A PRP list pointer contains the address of a PRP list, structured as a linear +* array of PRP entries. Each PRP entry in this list describes a segment of +* physical memory. +* +* Each 64-bit PRP entry comprises an address and an offset field. The address +* always points at the beginning of a 4KB physical memory page, and the offset +* describes where within that 4KB page the memory segment begins. Only the +* first element in a PRP list may contain a non-zero offest, implying that all +* memory segments following the first begin at the start of a 4KB page. +* +* Each PRP element normally describes 4KB of physical memory, with exceptions +* for the first and last elements in the list. If the memory being described +* by the list begins at a non-zero offset within the first 4KB page, then the +* first PRP element will contain a non-zero offset indicating where the region +* begins within the 4KB page. The last memory segment may end before the end +* of the 4KB segment, depending upon the overall size of the memory being +* described by the PRP list. +* +* Since PRP entries lack any indication of size, the overall data buffer length +* is used to determine where the end of the data memory buffer is located, and +* how many PRP entries are required to describe it. +* +* @ioc: per adapter object +* @smid: system request message index for getting asscociated SGL +* @nvme_encap_request: the NVMe request msg frame pointer +* @data_out_dma: physical address for WRITES +* @data_out_sz: data xfer size for WRITES +* @data_in_dma: physical address for READS +* @data_in_sz: data xfer size for READS +* +* Returns nothing. +*/ static void _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, - Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, - dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, - size_t data_in_sz) + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) { int prp_size = NVME_PRP_SIZE; - __le64 *prp_entry, *prp1_entry, *prp2_entry; - __le64 *prp_page; + u64 *prp_entry, *prp1_entry, *prp2_entry; + u64 *prp_page; dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; u32 offset, entry_len; u32 page_mask_result, page_mask; @@ -1980,15 +2459,30 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, */ if (!data_in_sz && !data_out_sz) return; + + /* + * Set pointers to PRP1 and PRP2, which are in the NVMe command. + * PRP1 is located at a 24 byte offset from the start of the NVMe + * command. Then set the current PRP entry pointer to PRP1. + */ prp1_entry = &nvme_cmd->prp1; prp2_entry = &nvme_cmd->prp2; prp_entry = prp1_entry; + /* * For the PRP entries, use the specially allocated buffer of * contiguous memory. + * We don't need any PRP list, if data lengh is <= page_size, + * Also we are reusing this buffer while framing native NVMe + * DSM command for SCSI UNMAP. */ - prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); - prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); + if (data_in_sz > ioc->page_size || data_out_sz > ioc->page_size) { + prp_page = (u64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); + prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); + } else { + prp_page = NULL; + prp_page_dma = 0; + } /* * Check if we are within 1 entry of a page boundary we don't @@ -1996,9 +2490,10 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, */ page_mask = ioc->page_size - 1; page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; - if (!page_mask_result) { + if (!page_mask_result) + { /* Bump up to next page boundary. */ - prp_page = (__le64 *)((u8 *)prp_page + prp_size); + prp_page = (u64 *)((u8 *)prp_page + prp_size); prp_page_dma = prp_page_dma + prp_size; } @@ -2009,22 +2504,27 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, prp_entry_dma = prp_page_dma; /* Get physical address and length of the data buffer. */ - if (data_in_sz) { + if (data_in_sz) + { dma_addr = data_in_dma; length = data_in_sz; - } else { + } + else + { dma_addr = data_out_dma; length = data_out_sz; } /* Loop while the length is not zero. */ - while (length) { + while (length) + { /* * Check if we need to put a list pointer here if we are at * page boundary - prp_size (8 bytes). */ page_mask_result = (prp_entry_dma + prp_size) & page_mask; - if (!page_mask_result) { + if (!page_mask_result) + { /* * This is the last entry in a PRP List, so we need to * put a PRP list pointer here. What this does is: @@ -2046,7 +2546,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, offset = dma_addr & page_mask; entry_len = ioc->page_size - offset; - if (prp_entry == prp1_entry) { + if (prp_entry == prp1_entry) + { /* * Must fill in the first PRP pointer (PRP1) before * moving on. @@ -2058,13 +2559,16 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, * command (PRP2). */ prp_entry = prp2_entry; - } else if (prp_entry == prp2_entry) { + } + else if (prp_entry == prp2_entry) + { /* * Should the PRP2 entry be a PRP List pointer or just * a regular PRP pointer? If there is more than one * more page of data, must use a PRP List pointer. */ - if (length > ioc->page_size) { + if (length > ioc->page_size) + { /* * PRP2 will contain a PRP List pointer because * more PRP's are needed with this command. The @@ -2078,14 +2582,18 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, * first PRP List. */ prp_entry = prp_page; - } else { + } + else + { /* * After this, the PRP Entries are complete. * This command uses 2 PRP's and no PRP list. */ *prp2_entry = cpu_to_le64(dma_addr); } - } else { + } + else + { /* * Put entry in list and bump the addresses. * @@ -2118,27 +2626,27 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, * * @ioc: per adapter object * @scmd: SCSI command from the mid-layer + * @sg_scmd: SG list pointer * @mpi_request: mpi request * @smid: msg Index * @sge_count: scatter gather element count. * - * Return: true: PRPs are built - * false: IEEE SGLs needs to be built + * Returns: Nothing */ -static void +void base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, - struct scsi_cmnd *scmd, + struct scsi_cmnd *scmd, + struct scatterlist *sg_scmd, Mpi25SCSIIORequest_t *mpi_request, u16 smid, int sge_count) { int sge_len, num_prp_in_chain = 0; Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; - __le64 *curr_buff; + u64 *curr_buff; dma_addr_t msg_dma, sge_addr, offset; u32 page_mask, page_mask_result; - struct scatterlist *sg_scmd; u32 first_prp_len; - int data_len = scsi_bufflen(scmd); + int data_len; u32 nvme_pg_size; nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); @@ -2191,7 +2699,11 @@ base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, /* Build first prp, sge need not to be page aligned*/ ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; - sg_scmd = scsi_sglist(scmd); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + data_len = scmd->request_bufflen; +#else + data_len = scsi_bufflen(scmd); +#endif sge_addr = sg_dma_address(sg_scmd); sge_len = sg_dma_len(sg_scmd); @@ -2207,7 +2719,11 @@ base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, sge_addr += first_prp_len; sge_len -= first_prp_len; } else if (data_len && (sge_len == first_prp_len)) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sge_addr = sg_dma_address(sg_scmd); sge_len = sg_dma_len(sg_scmd); } @@ -2242,26 +2758,62 @@ base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, if (sge_len > 0) continue; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sge_addr = sg_dma_address(sg_scmd); sge_len = sg_dma_len(sg_scmd); } main_chain_element->Length = cpu_to_le32(num_prp_in_chain * sizeof(u64)); + return; } +u32 base_mod64(u64 dividend, u32 divisor) +{ + u32 remainder; + + if (!divisor) + pr_err(KERN_ERR "mpt3sas : DIVISOR is zero, in div fn\n"); + remainder = do_div(dividend, divisor); + return remainder; +} + + +/** + * base_is_prp_possible - This function is called for PCIe end devices to + * check if we need to build a native NVMe PRP. + * @ioc: per adapter object + * @pcie_device: points to the PCIe device's info + * @scmd: scsi command + * @sg_scmd: SG list pointer + * @sge_count: scatter gather element count. + * + * Returns 1 if native NVMe PRP to be built else 0 to build native SGL. + */ static bool base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, - struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) + struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, + struct scatterlist *sg_scmd, int sge_count) { u32 data_length = 0; bool build_prp = true; + u32 i, nvme_pg_size; + + nvme_pg_size = max_t(u32, ioc->page_size, + NVME_PRP_PAGE_SIZE); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + data_length = cpu_to_le32(scmd->request_bufflen); +#else + data_length = cpu_to_le32(scsi_bufflen(scmd)); +#endif - data_length = scsi_bufflen(scmd); if (pcie_device && - (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) { + (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) { build_prp = false; return build_prp; } @@ -2269,9 +2821,64 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 * we built IEEE SGL */ - if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) + if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) { build_prp = false; + return build_prp; + } + /* + ** Below code detects gaps/holes in IO data buffers. + ** What does holes/gaps mean? + ** Any SGE except first one in a SGL starts at non NVME page size + ** aligned address OR Any SGE except last one in a SGL ends at + ** non NVME page size boundary. + ** + ** Driver has already informed block layer by setting boundary rules + ** for bio merging done at NVME page size boundary calling kernel API + ** blk_queue_virt_boundary inside slave_config. + ** Still there is possibility of IO coming with holes to driver because + ** of IO merging done by IO scheduler. + ** + ** With SCSI BLK MQ enabled, there will be no IO with holes as there + ** is no IO scheduling so no IO merging. + ** + ** With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and + ** then sending IOs with holes. + ** + ** Though driver can request block layer to disable IO merging by + ** calling queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, + ** sdev->request_queue) but user may tune sysfs parameter- nomerges + ** again to 0 or 1. + ** + ** If in future IO scheduling is enabled with SCSI BLK MQ, + ** this algorithm to detect holes will be required in driver + ** for SCSI BLK MQ enabled case as well. + ** + **/ + scsi_for_each_sg(scmd, sg_scmd, sge_count, i) { + if ((i != 0) && (i != (sge_count - 1))) { + if (base_mod64(sg_dma_len(sg_scmd), nvme_pg_size) || + base_mod64(sg_dma_address(sg_scmd), + nvme_pg_size)) { + build_prp = false; + break; + } + } + if ((sge_count > 1) && (i == 0)) { + if ((base_mod64((sg_dma_address(sg_scmd) + + sg_dma_len(sg_scmd)), nvme_pg_size))) { + build_prp = false; + break; + } + } + + if ((sge_count > 1) && (i == (sge_count - 1))) { + if (base_mod64(sg_dma_address(sg_scmd), nvme_pg_size)) { + build_prp = false; + break; + } + } + } return build_prp; } @@ -2288,27 +2895,61 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, * @scmd: scsi command * @pcie_device: points to the PCIe device's info * - * Return: 0 if native SGL was built, 1 if no SGL was built + * Returns 0 if native SGL was built, 1 if no SGL was built */ static int _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, struct _pcie_device *pcie_device) { + struct scatterlist *sg_scmd; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + u32 sges_left; +#else int sges_left; +#endif /* Get the SG list pointer and info. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + if (!scmd->use_sg) { + /* single buffer sge */ + scmd->SCp.dma_handle = pci_map_single(ioc->pdev, + scmd->request_buffer, scmd->request_bufflen, + scmd->sc_data_direction); + if (pci_dma_mapping_error(scmd->SCp.dma_handle)) { + sdev_printk(KERN_ERR, scmd->device, "pci_map_single" + " failed: request for %d bytes!\n", + scmd->request_bufflen); + return 1; + } + sg_scmd = (struct scatterlist *) scmd->request_buffer; + sges_left = 1; + } else { + /* sg list provided */ + sg_scmd = (struct scatterlist *) scmd->request_buffer; + sges_left = pci_map_sg(ioc->pdev, sg_scmd, scmd->use_sg, + scmd->sc_data_direction); + + if (!sges_left) { + sdev_printk(KERN_ERR, scmd->device, "pci_map_sg" + " failed: request for %d bytes!\n", + scmd->request_bufflen); + return 1; + } + } +#else + sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); if (sges_left < 0) { - sdev_printk(KERN_ERR, scmd->device, - "scsi_dma_map failed: request for %d bytes!\n", - scsi_bufflen(scmd)); + sdev_printk(KERN_ERR, scmd->device, "scsi_dma_map" + " failed: request for %d bytes!\n", scsi_bufflen(scmd)); return 1; } +#endif /* Check if we need to build a native SG list. */ if (base_is_prp_possible(ioc, pcie_device, - scmd, sges_left) == 0) { + scmd, sg_scmd, sges_left) == 0) { /* We built a native SG list, just return. */ goto out; } @@ -2316,80 +2957,56 @@ _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, /* * Build native NVMe PRP. */ - base_make_prp_nvme(ioc, scmd, mpi_request, + base_make_prp_nvme(ioc, scmd, sg_scmd, mpi_request, smid, sges_left); return 0; out: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + if (scmd->use_sg) + pci_unmap_sg(ioc->pdev, (struct scatterlist *) + scmd->request_buffer, scmd->use_sg, + scmd->sc_data_direction); + else if (scmd->request_bufflen) + pci_unmap_single(ioc->pdev, scmd->SCp.dma_handle, + scmd->request_bufflen, scmd->sc_data_direction); +#else scsi_dma_unmap(scmd); +#endif return 1; } /** - * _base_add_sg_single_ieee - add sg element for IEEE format - * @paddr: virtual address for SGE - * @flags: SGE flags - * @chain_offset: number of 128 byte elements from start of segment - * @length: data transfer length - * @dma_addr: Physical address - */ -static void -_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, - dma_addr_t dma_addr) -{ - Mpi25IeeeSgeChain64_t *sgel = paddr; - - sgel->Flags = flags; - sgel->NextChainOffset = chain_offset; - sgel->Length = cpu_to_le32(length); - sgel->Address = cpu_to_le64(dma_addr); -} - -/** - * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format - * @ioc: per adapter object - * @paddr: virtual address for SGE - * - * Create a zero length scatter gather entry to insure the IOCs hardware has - * something to use if the target device goes brain dead and tries - * to send data even when none is asked for. - */ -static void -_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) -{ - u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | - MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | - MPI25_IEEE_SGE_FLAGS_END_OF_LIST); - - _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); -} - -/** - * _base_build_sg_scmd - main sg creation routine + * _base_build_sg_scmd - mpt2sas main sg creation routine * pcie_device is unused here! * @ioc: per adapter object * @scmd: scsi command - * @smid: system request message index + * @smid: system request message index * @unused: unused pcie_device pointer + * * Context: none. * * The main routine that builds scatter gather table from a given * scsi request sent via the .queuecommand main handler. * - * Return: 0 success, anything else error + * Returns 0 success, anything else error */ static int _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) { - Mpi2SCSIIORequest_t *mpi_request; + Mpi25SCSIIORequest_t *mpi_request; dma_addr_t chain_dma; struct scatterlist *sg_scmd; void *sg_local, *chain; u32 chain_offset; u32 chain_length; u32 chain_flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + u32 sges_left; +#else int sges_left; +#endif u32 sges_in_segment; u32 sgl_flags; u32 sgl_flags_last_element; @@ -2409,15 +3026,64 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, << MPI2_SGE_FLAGS_SHIFT; sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + /* single buffer sge */ + if (!scmd->use_sg) { + scmd->SCp.dma_handle = pci_map_single(ioc->pdev, + scmd->request_buffer, scmd->request_bufflen, + scmd->sc_data_direction); + if (pci_dma_mapping_error(scmd->SCp.dma_handle)) { + sdev_printk(KERN_ERR, scmd->device, "pci_map_single" + " failed: request for %d bytes!\n", + scmd->request_bufflen); + return -ENOMEM; + } + ioc->base_add_sg_single(&mpi_request->SGL, + sgl_flags_end_buffer | scmd->request_bufflen, + scmd->SCp.dma_handle); + return 0; + } + + /* sg list provided */ + sg_scmd = (struct scatterlist *) scmd->request_buffer; + sges_left = pci_map_sg(ioc->pdev, sg_scmd, scmd->use_sg, + scmd->sc_data_direction); + +#if defined(CRACK_MONKEY_EEDP) + if (!ioc->disable_eedp_support) { + if (scmd->cmnd[0] == INQUIRY) { + scmd->host_scribble = + page_address(((struct scatterlist *) + scmd->request_buffer)[0].page)+ + ((struct scatterlist *) + scmd->request_buffer)[0].offset; + } + } +#endif /* CRACK_MONKEY_EEDP */ + if (!sges_left) { + sdev_printk(KERN_ERR, scmd->device, "pci_map_sg" + " failed: request for %d bytes!\n", scmd->request_bufflen); + return -ENOMEM; + } +#else sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); if (sges_left < 0) { - sdev_printk(KERN_ERR, scmd->device, - "scsi_dma_map failed: request for %d bytes!\n", - scsi_bufflen(scmd)); + sdev_printk(KERN_ERR, scmd->device, "scsi_dma_map" + " failed: request for %d bytes!\n", scsi_bufflen(scmd)); return -ENOMEM; } +#if defined(CRACK_MONKEY_EEDP) + if (!ioc->disable_eedp_support) { + if (scmd->cmnd[0] == INQUIRY) + scmd->host_scribble = page_address(sg_page(sg_scmd)) + + sg_scmd[0].offset; + } +#endif /* CRACK_MONKEY_EEDP */ +#endif + + sg_local = &mpi_request->SGL; sges_in_segment = ioc->max_sges_in_main_message; if (sges_left <= sges_in_segment) @@ -2435,7 +3101,11 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, else ioc->base_add_sg_single(sg_local, sgl_flags | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sg_local += ioc->sge_size; sges_left--; sges_in_segment--; @@ -2477,7 +3147,11 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, ioc->base_add_sg_single(sg_local, sgl_flags | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sg_local += ioc->sge_size; sges_left--; sges_in_segment--; @@ -2501,7 +3175,11 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, else ioc->base_add_sg_single(sg_local, sgl_flags | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sg_local += ioc->sge_size; sges_left--; } @@ -2509,19 +3187,65 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, return 0; } +/* IEEE format sgls */ + +/** + * _base_add_sg_single_ieee - add sg element for IEEE format + * @paddr: virtual address for SGE + * @flags: SGE flags + * @chain_offset: number of 128 byte elements from start of segment + * @length: data transfer length + * @dma_addr: Physical address + * + * Return nothing. + */ +static void +_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, + dma_addr_t dma_addr) +{ + Mpi25IeeeSgeChain64_t *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + * + * Return nothing. + */ +static void +_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST); + + _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + /** * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format * @ioc: per adapter object * @scmd: scsi command - * @smid: system request message index + * @smid: system request message index * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be * constructed on need. + * * Context: none. * * The main routine that builds scatter gather table from a given * scsi request sent via the .queuecommand main handler. * - * Return: 0 success, anything else error + * Returns 0 success, anything else error */ static int _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, @@ -2529,17 +3253,26 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, { Mpi25SCSIIORequest_t *mpi_request; dma_addr_t chain_dma; - struct scatterlist *sg_scmd; - void *sg_local, *chain; + struct scatterlist *sg_scmd; /* s/g data entry */ + void *sg_local, *chain, *sgl_zero_addr; u32 chain_offset; u32 chain_length; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + u32 sges_left; +#else int sges_left; - u32 sges_in_segment; +#endif + u32 sges_in_segment, sges_in_request_frame = 0; u8 simple_sgl_flags; u8 simple_sgl_flags_last; u8 chain_sgl_flags; struct chain_tracker *chain_req; - +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + struct scatterlist *sg_prot_scmd = NULL; /* s/g prot entry */ + void *sg_prot_local; + int prot_sges_left; + u32 prot_sges_in_segment; +#endif mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); /* init scatter gather flags */ @@ -2552,23 +3285,80 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, /* Check if we need to build a native SG list. */ if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, - smid, scmd, pcie_device) == 0)) { + smid, scmd, pcie_device) == 0)) { /* We built a native SG list, just return. */ return 0; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + /* single buffer sge */ + if (!scmd->use_sg) { + scmd->SCp.dma_handle = pci_map_single(ioc->pdev, + scmd->request_buffer, scmd->request_bufflen, + scmd->sc_data_direction); + if (pci_dma_mapping_error(scmd->SCp.dma_handle)) { + sdev_printk(KERN_ERR, scmd->device, "pci_map_single" + " failed: request for %d bytes!\n", + scmd->request_bufflen); + return -ENOMEM; + } + + _base_add_sg_single_ieee(&mpi_request->SGL, + simple_sgl_flags_last, 0, scmd->request_bufflen, + scmd->SCp.dma_handle); + return 0; + } + + /* sg list provided */ + sg_scmd = (struct scatterlist *) scmd->request_buffer; + sges_left = pci_map_sg(ioc->pdev, sg_scmd, scmd->use_sg, + scmd->sc_data_direction); + +#if defined(CRACK_MONKEY_EEDP) + if (!ioc->disable_eedp_support) { + if (scmd->cmnd[0] == INQUIRY) { + scmd->host_scribble = + page_address(((struct scatterlist *) + scmd->request_buffer)[0].page)+ + ((struct scatterlist *) + scmd->request_buffer)[0].offset; + } + } +#endif /* CRACK_MONKEY */ + if (!sges_left) { + sdev_printk(KERN_ERR, scmd->device, "pci_map_sg" + " failed: request for %d bytes!\n", scmd->request_bufflen); + return -ENOMEM; + } +#else sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); if (sges_left < 0) { - sdev_printk(KERN_ERR, scmd->device, - "scsi_dma_map failed: request for %d bytes!\n", - scsi_bufflen(scmd)); + sdev_printk(KERN_ERR, scmd->device, "scsi_dma_map" + " failed: request for %d bytes!\n", scsi_bufflen(scmd)); return -ENOMEM; } - sg_local = &mpi_request->SGL; - sges_in_segment = (ioc->request_sz - +#if defined(CRACK_MONKEY_EEDP) + if (!ioc->disable_eedp_support) { + if (scmd->cmnd[0] == INQUIRY) + scmd->host_scribble = page_address(sg_page(sg_scmd)) + + sg_scmd[0].offset; + } +#endif /* CRACK_MONKEY */ +#endif + sgl_zero_addr = sg_local = &mpi_request->SGL; + + if (mpi_request->DMAFlags == MPI25_TA_DMAFLAGS_OP_D_H_D_D) { + /* reserve last SGE for SGL1 */ + sges_in_request_frame = sges_in_segment = + ((ioc->request_sz - ioc->sge_size_ieee) - + offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; + } else { + sges_in_segment = (ioc->request_sz - offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; + } + if (sges_left <= sges_in_segment) goto fill_in_last_segment; @@ -2579,7 +3369,11 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, while (sges_in_segment > 1) { _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sg_local += ioc->sge_size_ieee; sges_left--; sges_in_segment--; @@ -2611,7 +3405,11 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, while (sges_in_segment) { _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sg_local += ioc->sge_size_ieee; sges_left--; sges_in_segment--; @@ -2636,14 +3434,99 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, else _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + sg_scmd++; +#else sg_scmd = sg_next(sg_scmd); +#endif sg_local += ioc->sge_size_ieee; sges_left--; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (mpi_request->DMAFlags == MPI25_TA_DMAFLAGS_OP_D_H_D_D) + { + mpi_request->SGLOffset1 = ((ioc->request_sz - ioc->sge_size_ieee)/4); + sg_prot_scmd = scsi_prot_sglist(scmd); + prot_sges_left = dma_map_sg(&ioc->pdev->dev, scsi_prot_sglist(scmd), + scsi_prot_sg_count(scmd), scmd->sc_data_direction); + if (!prot_sges_left) { + sdev_printk(KERN_ERR, scmd->device, "pci_map_sg" + " failed: request for %d bytes!\n", scsi_bufflen(scmd)); + scsi_dma_unmap(scmd); + return -ENOMEM; + } + + sg_prot_local = sgl_zero_addr + + (sges_in_request_frame * ioc->sge_size_ieee); + prot_sges_in_segment = 1; + + if (prot_sges_left <= prot_sges_in_segment) + goto fill_in_last_prot_segment; + + + /* initializing the chain flags and pointers */ + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + prot_sges_in_segment = (prot_sges_left <= + ioc->max_sges_in_chain_message) ? prot_sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (prot_sges_left == prot_sges_in_segment) ? + 0 : prot_sges_in_segment; + chain_length = prot_sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + _base_add_sg_single_ieee(sg_prot_local, chain_sgl_flags, + chain_offset, chain_length, chain_dma); + + sg_prot_local = chain; + if (!chain_offset) + goto fill_in_last_prot_segment; + + /* fill in chain segments */ + while (prot_sges_in_segment) { + _base_add_sg_single_ieee(sg_prot_local, simple_sgl_flags, 0, + sg_dma_len(sg_prot_scmd), sg_dma_address(sg_prot_scmd)); + sg_prot_scmd = sg_next(sg_prot_scmd); + sg_prot_local += ioc->sge_size_ieee; + prot_sges_left--; + prot_sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + fill_in_last_prot_segment: + + /* fill the last segment */ + while (prot_sges_left) { + if (prot_sges_left == 1) { + _base_add_sg_single_ieee(sg_prot_local, + simple_sgl_flags_last, 0, sg_dma_len(sg_prot_scmd), + sg_dma_address(sg_prot_scmd)); + } + else { + _base_add_sg_single_ieee(sg_prot_local, simple_sgl_flags, 0, + sg_dma_len(sg_prot_scmd), sg_dma_address(sg_prot_scmd)); + } + sg_prot_scmd = sg_next(sg_prot_scmd); + sg_prot_local += ioc->sge_size_ieee; + prot_sges_left--; + } + } +#endif return 0; } + /** * _base_build_sg_ieee - build generic sg for IEEE format * @ioc: per adapter object @@ -2652,6 +3535,8 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, * @data_out_sz: data xfer size for WRITES * @data_in_dma: physical address for READS * @data_in_sz: data xfer size for READS + * + * Return nothing. */ static void _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, @@ -2696,122 +3581,281 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) /** * _base_config_dma_addressing - set dma addressing * @ioc: per adapter object * @pdev: PCI device struct * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { - u64 required_mask, coherent_mask; struct sysinfo s; + char *desc = "64"; + u64 consistant_dma_mask = DMA_BIT_MASK(64); + + if(ioc->is_mcpu_endpoint) + goto try_32bit: + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ - int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64; + if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { + consistant_dma_mask = DMA_BIT_MASK(63); + desc = "63"; + } + + if (sizeof(dma_addr_t) > 4) { + uint64_t required_mask; + + /* have to first set mask to 64 to find max mask required */ + if (pci_set_dma_mask(pdev, consistant_dma_mask) != 0) + goto try_32bit; + + required_mask = dma_get_required_mask(&pdev->dev); + + if (required_mask > DMA_32BIT_MASK && + !pci_set_consistent_dma_mask(pdev, consistant_dma_mask)) { + ioc->base_add_sg_single = &_base_add_sg_single_64; + ioc->sge_size = sizeof(Mpi2SGESimple64_t); + goto out; + } + } + + try_32bit: + + if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK) && + !pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + ioc->base_add_sg_single = &_base_add_sg_single_32; + ioc->sge_size = sizeof(Mpi2SGESimple32_t); + desc = "32"; + } else + return -ENODEV; + + out: + si_meminfo(&s); + printk(MPT3SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, " + "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram)); + + return 0; +} +#else +/** + * _base_config_dma_addressing - set dma addressing + * @ioc: per adapter object + * @pdev: PCI device struct + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) +{ + struct sysinfo s; + char *desc = "64"; + u64 consistant_dma_mask = DMA_BIT_MASK(64); if (ioc->is_mcpu_endpoint) goto try_32bit; - required_mask = dma_get_required_mask(&pdev->dev); - if (sizeof(dma_addr_t) == 4 || required_mask == 32) - goto try_32bit; + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ + if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { + consistant_dma_mask = DMA_BIT_MASK(63); + desc = "63"; + } - if (ioc->dma_mask) - coherent_mask = DMA_BIT_MASK(dma_mask); - else - coherent_mask = DMA_BIT_MASK(32); + if (sizeof(dma_addr_t) > 4) { + const uint64_t required_mask = + dma_get_required_mask(&pdev->dev); + if ((required_mask > DMA_BIT_MASK(32)) && + !pci_set_dma_mask(pdev, consistant_dma_mask) && + !pci_set_consistent_dma_mask(pdev, consistant_dma_mask)) { + ioc->base_add_sg_single = &_base_add_sg_single_64; + ioc->sge_size = sizeof(Mpi2SGESimple64_t); + goto out; + } + } - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || - dma_set_coherent_mask(&pdev->dev, coherent_mask)) - goto try_32bit; - - ioc->base_add_sg_single = &_base_add_sg_single_64; - ioc->sge_size = sizeof(Mpi2SGESimple64_t); - ioc->dma_mask = dma_mask; - goto out; - - try_32bit: - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) +try_32bit: + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + ioc->base_add_sg_single = &_base_add_sg_single_32; + ioc->sge_size = sizeof(Mpi2SGESimple32_t); + desc = "32"; + } else return -ENODEV; - ioc->base_add_sg_single = &_base_add_sg_single_32; - ioc->sge_size = sizeof(Mpi2SGESimple32_t); - ioc->dma_mask = 32; out: si_meminfo(&s); - ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", - ioc->dma_mask, convert_to_kb(s.totalram)); + printk(MPT3SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, " + "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram)); return 0; } - -static int -_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, - struct pci_dev *pdev) -{ - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) - return -ENODEV; - } - return 0; -} +#endif /** - * _base_check_enable_msix - checks MSIX capabable. + * _base_check_and_get_msix_vectors - checks MSIX capabable. * @ioc: per adapter object * - * Check to see if card is capable of MSIX, and set number - * of available msix vectors + * Check to see if card is capable of MSIX, and return number + * of avaliable msix vectors */ -static int -_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) +int +_base_check_and_get_msix_vectors(struct pci_dev *pdev) { int base; - u16 message_control; - + u16 message_control, msix_vector_count; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + u8 revision; +#endif + /* Check whether controller SAS2008 B0 controller, - * if it is SAS2008 B0 controller use IO-APIC instead of MSIX - */ - if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && - ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { + if it is SAS2008 B0 controller use IO-APIC instead of MSIX */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); + if (pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + revision == SAS2_PCI_DEVICE_B0_REVISION) { +#else + if (pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { +#endif return -EINVAL; } - base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); - if (!base) { - dfailprintk(ioc, ioc_info(ioc, "msix not supported\n")); + base = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (!base) return -EINVAL; + + /* NUMA_IO not supported for older controllers */ + switch(pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2004: + case MPI2_MFGPAGE_DEVID_SAS2008: + case MPI2_MFGPAGE_DEVID_SAS2108_1: + case MPI2_MFGPAGE_DEVID_SAS2108_2: + case MPI2_MFGPAGE_DEVID_SAS2108_3: + case MPI2_MFGPAGE_DEVID_SAS2116_1: + case MPI2_MFGPAGE_DEVID_SAS2116_2: + return 1; } /* get msix vector count */ - /* NUMA_IO not supported for older controllers */ - if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) - ioc->msix_vector_count = 1; - else { - pci_read_config_word(ioc->pdev, base + 2, &message_control); - ioc->msix_vector_count = (message_control & 0x3FF) + 1; + pci_read_config_word(pdev, base + 2, &message_control); + msix_vector_count = (message_control & 0x3FF) + 1; + + return msix_vector_count; +} + +enum mpt3sas_pci_bus_speed { + MPT_PCIE_SPEED_2_5GT = 0x14, + MPT_PCIE_SPEED_5_0GT = 0x15, + MPT_PCIE_SPEED_8_0GT = 0x16, + MPT_PCIE_SPEED_16_0GT = 0x17, + MPT_PCI_SPEED_UNKNOWN = 0xff, +}; + +const unsigned char mpt3sas_pcie_link_speed[] = { + MPT_PCI_SPEED_UNKNOWN, /* 0 */ + MPT_PCIE_SPEED_2_5GT, /* 1 */ + MPT_PCIE_SPEED_5_0GT, /* 2 */ + MPT_PCIE_SPEED_8_0GT, /* 3 */ + MPT_PCIE_SPEED_16_0GT, /* 4 */ + MPT_PCI_SPEED_UNKNOWN, /* 5 */ + MPT_PCI_SPEED_UNKNOWN, /* 6 */ + MPT_PCI_SPEED_UNKNOWN, /* 7 */ + MPT_PCI_SPEED_UNKNOWN, /* 8 */ + MPT_PCI_SPEED_UNKNOWN, /* 9 */ + MPT_PCI_SPEED_UNKNOWN, /* A */ + MPT_PCI_SPEED_UNKNOWN, /* B */ + MPT_PCI_SPEED_UNKNOWN, /* C */ + MPT_PCI_SPEED_UNKNOWN, /* D */ + MPT_PCI_SPEED_UNKNOWN, /* E */ + MPT_PCI_SPEED_UNKNOWN /* F */ +}; + + +/** + * _base_check_and_enable_high_iops_queues - enable high iops mode + * @ ioc - per adapter object + * @ hba_msix_vector_count - msix vectors supported by HBA + * + * Enable high iops queues only if + * - HBA is a SEA/AERO controller and + * - MSI-Xs vector supported by the HBA is 128 and + * - total CPU count in the system >=16 and + * - loaded driver with default max_msix_vectors module parameter and + * - system booted in non kdump mode + * + * returns nothing. + */ +static void +_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, + int hba_msix_vector_count) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + u16 lnksta; + enum mpt3sas_pci_bus_speed speed; + + if (perf_mode == MPT_PERF_MODE_IOPS || + perf_mode == MPT_PERF_MODE_LATENCY) { + ioc->high_iops_queues = 0; + return; } - dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n", - ioc->msix_vector_count)); - return 0; + + if (perf_mode == MPT_PERF_MODE_DEFAULT) { + + pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); + speed = mpt3sas_pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + + dev_info(&ioc->pdev->dev, "PCIe device speed is %s\n", + speed == MPT_PCIE_SPEED_2_5GT ? "2.5GHz" : + speed == MPT_PCIE_SPEED_5_0GT ? "5.0GHz" : + speed == MPT_PCIE_SPEED_8_0GT ? "8.0GHz" : + speed == MPT_PCIE_SPEED_16_0GT ? "16.0GHz" : + "Unknown"); + + if (speed < MPT_PCIE_SPEED_16_0GT) { + ioc->high_iops_queues = 0; + return; + } + } + + if (!reset_devices && + hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES && + num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES && + max_msix_vectors == -1) + ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES; + else +#endif + ioc->high_iops_queues = 0; +} + + +/** + * mpt3sas_base_disable_msix - disables msix + * @ioc: per adapter object + * + */ +void +mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + pci_free_irq_vectors(ioc->pdev); +#else + pci_disable_msix(ioc->pdev); +#endif + ioc->msix_enable = 0; } /** - * _base_free_irq - free irq + * mpt3sas_base_free_irq - free irq * @ioc: per adapter object * * Freeing respective reply_queue from the list. */ -static void -_base_free_irq(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) { struct adapter_reply_queue *reply_q, *next; @@ -2819,12 +3863,25 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) return; list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { +#if defined(MPT3SAS_ENABLE_IRQ_POLL) + irq_poll_disable(&reply_q->irqpoll); +#endif list_del(&reply_q->list); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) if (ioc->smp_affinity_enable) irq_set_affinity_hint(pci_irq_vector(ioc->pdev, reply_q->msix_index), NULL); free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), - reply_q); + reply_q); +#elif ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) + if (ioc->smp_affinity_enable) { + irq_set_affinity_hint(reply_q->vector, NULL); + free_cpumask_var(reply_q->affinity_hint); + } + free_irq(reply_q->vector, reply_q); +#else + free_irq(reply_q->vector, reply_q); +#endif kfree(reply_q); } } @@ -2833,25 +3890,39 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) * _base_request_irq - request irq * @ioc: per adapter object * @index: msix index into vector table + * @vector: irq vector * * Inserting respective reply_queue into the list. */ static int +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) -{ - struct pci_dev *pdev = ioc->pdev; +#else +_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) +#endif +{ struct adapter_reply_queue *reply_q; int r; reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); if (!reply_q) { - ioc_err(ioc, "unable to allocate memory %zu!\n", - sizeof(struct adapter_reply_queue)); + printk(MPT3SAS_ERR_FMT "unable to allocate memory %d!\n", + ioc->name, (int)sizeof(struct adapter_reply_queue)); return -ENOMEM; } reply_q->ioc = ioc; reply_q->msix_index = index; - +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0))) + if (ioc->smp_affinity_enable) { + if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) { + kfree(reply_q); + return -ENOMEM; + } + cpumask_clear(reply_q->affinity_hint); + } +#endif atomic_set(&reply_q->busy, 0); if (ioc->msix_enable) snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", @@ -2859,47 +3930,244 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) else snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", ioc->driver_name, ioc->id); - r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, - IRQF_SHARED, reply_q->name, reply_q); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + r = request_irq(pci_irq_vector(ioc->pdev, index), _base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); if (r) { - pr_err("%s: unable to allocate interrupt %d!\n", - reply_q->name, pci_irq_vector(pdev, index)); + printk(MPT3SAS_ERR_FMT "unable to allocate interrupt %d!\n", + reply_q->name, pci_irq_vector(ioc->pdev, index)); kfree(reply_q); return -EBUSY; } +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)) + reply_q->vector = vector; + r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, + reply_q); + if (r) { + printk(MPT3SAS_ERR_FMT "unable to allocate interrupt %d!\n", + reply_q->name, vector); + kfree(reply_q); + return -EBUSY; + } +#else + reply_q->vector = vector; + r = request_irq(vector, _base_interrupt, SA_SHIRQ, reply_q->name, + reply_q); + if (r) { + printk(MPT3SAS_ERR_FMT "unable to allocate interrupt %d!\n", + reply_q->name, vector); + kfree(reply_q); + return -EBUSY; + } +#endif INIT_LIST_HEAD(&reply_q->list); list_add_tail(&reply_q->list, &ioc->reply_queue_list); return 0; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) /** - * _base_assign_reply_queues - assigning msix index for each cpu + * _base_alloc_irq_vectors - allocate msix vectors * @ioc: per adapter object * - * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + */ +static int +_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) +{ + int i, irq_flags = PCI_IRQ_MSIX; + struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; + struct irq_affinity *descp = &desc; + + if (ioc->smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY; + else + descp = NULL; + + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "high_iops_queues: %d, reply_queue_count: %d\n", + ioc->name, ioc->high_iops_queues, + ioc->reply_queue_count)); + + i = pci_alloc_irq_vectors_affinity(ioc->pdev, + ioc->high_iops_queues, + ioc->reply_queue_count, irq_flags, descp); + + return i; +} +#endif + +/** + * _base_enable_msix - enables msix, failback to io_apic + * @ioc: per adapter object * - * It would nice if we could call irq_set_affinity, however it is not - * an exported symbol + */ +static int +_base_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) + struct msix_entry *entries, *a; +#endif + int r, i, msix_vector_count, local_max_msix_vectors; + u8 try_msix = 0; + + ioc->msix_load_balance = false; + + if (msix_disable == -1 || msix_disable == 0) + try_msix = 1; + + if (!try_msix) + goto try_ioapic; + + msix_vector_count = _base_check_and_get_msix_vectors(ioc->pdev); + if (msix_vector_count <= 0) { + dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "msix not " + "supported\n", ioc->name)); + goto try_ioapic; + } + + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "MSI-X vectors supported: %d, no of cores: %d\n", + ioc->name, msix_vector_count, ioc->cpu_count)); + + if (ioc->is_aero_ioc) + _base_check_and_enable_high_iops_queues(ioc, msix_vector_count); + + ioc->reply_queue_count = min_t(int, + ioc->cpu_count + ioc->high_iops_queues, + msix_vector_count); + + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) + { + if(reset_devices) + local_max_msix_vectors = 1; + else + local_max_msix_vectors = 8; + }else + local_max_msix_vectors = max_msix_vectors; + + + if (local_max_msix_vectors > 0) { + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + } else if (local_max_msix_vectors == 0) + goto try_ioapic; + + /* + * Enable msix_load_balance only if combined reply queue mode is + * disabled on SAS3 & above generation HBA devices. + */ + if (!ioc->combined_reply_queue && + ioc->hba_mpi_version_belonged != MPI2_VERSION) { + ioc->msix_load_balance = true; + } + + /* + * smp affinity setting is not need when msix load balance + * is enabled. + */ + if (ioc->msix_load_balance) + ioc->smp_affinity_enable = 0; + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + r = _base_alloc_irq_vectors(ioc); + if (r < 0) { + dfailprintk(ioc, printk(MPT3SAS_INFO_FMT + "pci_alloc_irq_vectors failed (r=%d) !!!\n", + ioc->name, r)); + goto try_ioapic; + } + + ioc->msix_enable = 1; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = _base_request_irq(ioc, i); + if (r) { + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); + goto try_ioapic; + } + } +#else + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), + GFP_KERNEL); + if (!entries) { + dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "kcalloc " + "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__, + __LINE__, __func__)); + goto try_ioapic; + } + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) + a->entry = i; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) + r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count); +#else + r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); +#endif + + if (r) { + dfailprintk(ioc, printk(MPT3SAS_INFO_FMT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) + "pci_enable_msix_exact " +#else + "pci_enable_msix " +#endif + "failed (r=%d) !!!\n", ioc->name, r)); + kfree(entries); + goto try_ioapic; + } + + ioc->msix_enable = 1; + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { + r = _base_request_irq(ioc, i, a->vector); + if (r) { + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); + kfree(entries); + goto try_ioapic; + } + } + + kfree(entries); +#endif + + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "High IOPs queues : %s \n", + ioc->name, ioc->high_iops_queues ? "enabled" : "disabled")); + + return 0; + +/* failback to io_apic interrupt routing */ + try_ioapic: + ioc->high_iops_queues = 0; + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "High IOPs queues : disabled \n", ioc->name)); + ioc->reply_queue_count = 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + r = _base_request_irq(ioc, 0); +#else + r = _base_request_irq(ioc, 0, ioc->pdev->irq); +#endif + return r; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) +/** + * _base_import_managed_irqs_affinity - import msix affinity of managed IRQs + * into local cpu mapping table. + * @ioc - per adapter object + * + * Return nothing. */ static void -_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) +_base_import_managed_irqs_affinity(struct MPT3SAS_ADAPTER *ioc) { - unsigned int cpu, nr_cpus, nr_msix, index = 0; struct adapter_reply_queue *reply_q; + unsigned int cpu, nr_msix; int local_numa_node; + unsigned int index = 0; - if (!_base_is_controller_msix_enabled(ioc)) - return; - - if (ioc->msix_load_balance) - return; - - memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); - - nr_cpus = num_online_cpus(); - nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, - ioc->facts.MaxMSIxVectors); + nr_msix = ioc->reply_queue_count; if (!nr_msix) return; @@ -2927,8 +4195,9 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) mask = pci_irq_get_affinity(ioc->pdev, reply_q->msix_index); if (!mask) { - ioc_warn(ioc, "no affinity for msi %x\n", - reply_q->msix_index); + dinitprintk(ioc, printk(MPT3SAS_WARN_FMT + "no affinity for msi %x\n", ioc->name, + reply_q->msix_index)); goto fall_back; } @@ -2942,256 +4211,653 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) } fall_back: - cpu = cpumask_first(cpu_online_mask); - nr_msix -= ioc->high_iops_queues; - index = 0; + _base_group_cpus_on_irq(ioc); +} +#endif - list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { - unsigned int i, group = nr_cpus / nr_msix; +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)))) - if (reply_q->msix_index < ioc->high_iops_queues) +/** + * _base_distribute_msix_vectors_explicity - distribute allocated msix vectors + * among the numa node and then distribute the per node allocated vectors + * among the CPUs exist in that node. + * @ioc - per adapter object + * + * Return nothing. + */ +static void +_base_distribute_msix_vectors_explicity(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + unsigned int cpu, nr_msix; + unsigned long nr_nodes; + int node_index=0; + + /* First distribute the msix vectors among the numa nodes, + * then distribute the per node allocated vectors among the + * CPUs exist in that node. So that same MSIX vector is not + * assigned to the CPUs across the numa nodes. + * + * Only general reply queues are used for determine the affinity hint, + * high iops queues are excluded, so these high iops msix vectors are + * affinity to local numa node. + */ + nr_nodes = num_online_nodes(); + nr_msix = ioc->reply_queue_count - ioc->high_iops_queues; + reply_q = list_entry(ioc->reply_queue_list.next, + struct adapter_reply_queue, list); + while (reply_q && reply_q->msix_index < ioc->high_iops_queues) + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + + for (node_index=0; node_index < nr_nodes; node_index++) { + unsigned int group, nr_msix_group; + int cpu_count = 0, cpu_group_count = 0; + int nr_cpus_per_node = cpumask_weight(cpumask_of_node(node_index)); + nr_msix_group = nr_msix / nr_nodes; + + if (node_index < nr_msix % nr_nodes) + nr_msix_group++; + + if (!nr_msix_group) continue; - if (cpu >= nr_cpus) + for_each_cpu(cpu, cpumask_of_node(node_index)) { + + if(!reply_q) + return; + + group = nr_cpus_per_node / nr_msix_group; + + if (cpu_count < ((nr_cpus_per_node % nr_msix_group) * (group + 1))) + group++; + + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + if (ioc->smp_affinity_enable) + cpumask_or(reply_q->affinity_hint, + reply_q->affinity_hint, get_cpu_mask(cpu)); + + cpu_count++; + cpu_group_count++; + if (cpu_group_count < group) + continue; + + if (ioc->smp_affinity_enable) { + if (irq_set_affinity_hint(reply_q->vector, + reply_q->affinity_hint)) + dinitprintk(ioc, printk(MPT3SAS_FMT + "error setting affinity hint for irq " + " vector %d\n", ioc->name, reply_q->vector)); + } + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + + while (reply_q && + reply_q->msix_index < ioc->high_iops_queues) + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + + cpu_group_count = 0; + } + } +} +#endif + +/** + * _base_assign_reply_queues - assigning msix index for each cpu + * @ioc: per adapter object + * + * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + * + * It would nice if we could call irq_set_affinity, however it is not + * an exported symbol + */ +static void +_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + int reply_queue; + + if (!_base_is_controller_msix_enabled(ioc)) + return; + + if (ioc->msix_load_balance) + return; + + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + + /* NUMA Hardware bug workaround - drop to less reply queues */ + if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) { + ioc->reply_queue_count = ioc->facts.MaxMSIxVectors; + reply_queue = 0; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->msix_index = reply_queue; + if (++reply_queue == ioc->reply_queue_count) + reply_queue = 0; + } + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + + _base_import_managed_irqs_affinity(ioc); + +#elif ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)) || ((LINUX_VERSION_CODE >= \ + KERNEL_VERSION(2,6,36)))) + + _base_distribute_msix_vectors_explicity(ioc); + +#else + /* when there are more cpus than available msix vectors, + * then group cpus togeather on same irq + */ + + _base_group_cpus_on_irq(ioc); +#endif +} + +/** + * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by + * a write to the doorbell) + * @ioc: per adapter object + * @timeout: timeout in second + * + * Returns 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. + */ +static int +_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 1000*timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "successfull count(%d), timeout(%d)\n", ioc->name, + __func__, count, timeout)); + return 0; + } + msleep(1); + count++; + } while (--cntdn); + + printk(MPT3SAS_ERR_FMT "%s: failed due to timeout count(%d), " + "int_status(%x)!\n", ioc->name, __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_spin_on_doorbell_int - waiting for controller interrupt(generated by + * a write to the doorbell) + * @ioc: per adapter object + * @timeout: timeout in second + * + * Returns 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. + */ +static int +_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 2000 * timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "%s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, timeout)); + return 0; + } + udelay(500); + count++; + } while (--cntdn); + + printk(MPT3SAS_ERR_FMT "%s: failed due to timeout count(%d), " + "int_status(%x)!\n", ioc->name, __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. + * @ioc: per adapter object + * @timeout: timeout in second + * + * Returns 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to + * doorbell. + */ +static int +_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = 1000*timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "successfull count(%d), timeout(%d)\n", ioc->name, + __func__, count, timeout)); + return 0; + } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + doorbell = ioc->base_readl(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc , doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + + msleep(1); + count++; + } while (--cntdn); + + out: + printk(MPT3SAS_ERR_FMT "%s: failed due to timeout count(%d), " + "int_status(%x)!\n", ioc->name, __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use + * @ioc: per adapter object + * @timeout: timeout in second + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = 1000*timeout; + do { + doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell); + if (!(doorbell_reg & MPI2_DOORBELL_USED)) { + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "successfull count(%d), timeout(%d)\n", ioc->name, + __func__, count, timeout)); + return 0; + } + msleep(1); + count++; + } while (--cntdn); + + printk(MPT3SAS_ERR_FMT "%s: failed due to timeout count(%d), " + "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg); + return -EFAULT; +} + +/** + * _base_handshake_req_reply_wait - send request thru doorbell interface + * @ioc: per adapter object + * @request_bytes: request length + * @request: pointer having request payload + * @reply_bytes: reply length + * @reply: pointer to reply payload + * @timeout: timeout in second + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, + u32 *request, int reply_bytes, u16 *reply, int timeout) +{ + MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; + int i; + u8 failed; + __le32 *mfp; + + /* make sure doorbell is not in use */ + if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { + printk(MPT3SAS_ERR_FMT "doorbell is in use " + " (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + + /* clear pending doorbell interrupts from previous state changes */ + if (ioc->base_readl(&ioc->chip->HostInterruptStatus) & + MPI2_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + + /* send message to ioc */ + writel(((MPI2_FUNCTION_HANDSHAKE<chip->Doorbell); + + if ((_base_spin_on_doorbell_int(ioc, 5))) { + printk(MPT3SAS_ERR_FMT "doorbell handshake " + "int failed (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + + if ((_base_wait_for_doorbell_ack(ioc, 5))) { + printk(MPT3SAS_ERR_FMT "doorbell handshake " + "ack failed (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + + /* send message 32-bits at a time */ + for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { + writel((u32)(request[i]), &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 5))) + failed = 1; + } + + if (failed) { + printk(MPT3SAS_ERR_FMT "doorbell handshake " + "sending request failed (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + + /* now wait for the reply */ + if ((_base_wait_for_doorbell_int(ioc, timeout))) { + printk(MPT3SAS_ERR_FMT "doorbell handshake " + "int failed (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + + /* read the first two 16-bits, it gives the total length of the reply */ + reply[0] = (u16)(ioc->base_readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((_base_wait_for_doorbell_int(ioc, 5))) { + printk(MPT3SAS_ERR_FMT "doorbell handshake " + "int failed (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + reply[1] = (u16)(ioc->base_readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((_base_wait_for_doorbell_int(ioc, 5))) { + printk(MPT3SAS_ERR_FMT "doorbell " + "handshake int failed (line=%d)\n", ioc->name, + __LINE__); + return -EFAULT; + } + if (i >= reply_bytes/2) /* overflow case */ + ioc->base_readl(&ioc->chip->Doorbell); + else + reply[i] = (u16)(ioc->base_readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + + if (_base_wait_for_doorbell_int(ioc, 5)) { + printk(MPT3SAS_ERR_FMT "doorbell handshake " + "int failed (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "doorbell is in use " + " (line=%d)\n", ioc->name, __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + mfp = (__le32 *)reply; + printk(KERN_INFO "\toffset:data\n"); + for (i = 0; i < reply_bytes/4; i++) + printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + return 0; +} + +/** + * _base_wait_on_iocstate - waiting on a particular ioc state + * @ioc_state: controller state { READY, OPERATIONAL, or RESET } + * @timeout: timeout in second + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = 1000*timeout; + do { + current_state = mpt3sas_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == MPI2_IOC_STATE_FAULT) + break; + msleep(1); + count++; + } while (--cntdn); + + return current_state; +} + +/** + * _base_diag_reset - the "big hammer" start of day reset + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + printk(MPT3SAS_INFO_FMT "sending diag reset !!\n", ioc->name); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "Locking pci cfg space access\n", + ioc->name)); + pci_cfg_access_lock(ioc->pdev); +#endif + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "clear interrupts\n", + ioc->name)); + + count = 0; + do { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "write magic " + "sequence\n", ioc->name)); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); + + /* wait 100 msec */ + msleep(100); + + if (count++ > 20) + goto out; + + host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "wrote magic " + "sequence: count(%d), host_diagnostic(0x%08x)\n", + ioc->name, count, host_diagnostic)); + + } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); + + hcb_size = ioc->base_readl(&ioc->chip->HCBSize); + + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "diag reset: issued\n", + ioc->name)); + writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); + +#if defined(DISABLE_RESET_SUPPORT) + count = 0; + do { + /* wait 50 msec per loop */ + msleep(50); + + host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); + if (host_diagnostic == 0xFFFFFFFF) + goto out; + else if (count++ >= 300) /* wait for upto 15 seconds */ + goto out; + if (!(count % 20)) + printk(KERN_INFO "waiting on diag reset bit to clear, " + "count = %d\n", (count / 20)); + } while (host_diagnostic & MPI2_DIAG_RESET_ADAPTER); +#else + /* This delay allows the chip PCIe hardware time to finish reset tasks */ + msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); + + /* Approximately 300 second max wait */ + for (count = 0; count < (300000000 / + MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { + + host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); + + if (host_diagnostic == 0xFFFFFFFF) + goto out; + if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; - if (index < nr_cpus % nr_msix) - group++; - - for (i = 0 ; i < group ; i++) { - ioc->cpu_msix_table[cpu] = reply_q->msix_index; - cpu = cpumask_next(cpu, cpu_online_mask); - } - index++; + /* Wait to pass the second read delay window */ + msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC/1000); } -} +#endif -/** - * _base_check_and_enable_high_iops_queues - enable high iops mode - * @ ioc - per adapter object - * @ hba_msix_vector_count - msix vectors supported by HBA - * - * Enable high iops queues only if - * - HBA is a SEA/AERO controller and - * - MSI-Xs vector supported by the HBA is 128 and - * - total CPU count in the system >=16 and - * - loaded driver with default max_msix_vectors module parameter and - * - system booted in non kdump mode - * - * returns nothing. - */ -static void -_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, - int hba_msix_vector_count) -{ - u16 lnksta, speed; + if (host_diagnostic & MPI2_DIAG_HCB_MODE) { - if (perf_mode == MPT_PERF_MODE_IOPS || - perf_mode == MPT_PERF_MODE_LATENCY) { - ioc->high_iops_queues = 0; - return; + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "restart the adapter " + "assuming the HCB Address points to good F/W\n", + ioc->name)); + host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; + host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "re-enable the HCDW\n", ioc->name)); + writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, + &ioc->chip->HCBSize); } - if (perf_mode == MPT_PERF_MODE_DEFAULT) { + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "restart the adapter\n", + ioc->name)); + writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, + &ioc->chip->HostDiagnostic); - pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); - speed = lnksta & PCI_EXP_LNKSTA_CLS; + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "disable writes to the " + "diagnostic register\n", ioc->name)); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); - if (speed < 0x4) { - ioc->high_iops_queues = 0; - return; - } + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "Wait for FW to go to the " + "READY state\n", ioc->name)); + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); + if (ioc_state) { + printk(MPT3SAS_ERR_FMT "%s: failed going to ready state " + " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); + goto out; } - if (!reset_devices && ioc->is_aero_ioc && - hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES && - num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES && - max_msix_vectors == -1) - ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES; - else - ioc->high_iops_queues = 0; -} - -/** - * _base_disable_msix - disables msix - * @ioc: per adapter object - * - */ -static void -_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) -{ - if (!ioc->msix_enable) - return; - pci_free_irq_vectors(ioc->pdev); - ioc->msix_enable = 0; -} - -/** - * _base_alloc_irq_vectors - allocate msix vectors - * @ioc: per adapter object - * - */ -static int -_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) -{ - int i, irq_flags = PCI_IRQ_MSIX; - struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; - struct irq_affinity *descp = &desc; - - if (ioc->smp_affinity_enable) - irq_flags |= PCI_IRQ_AFFINITY; - else - descp = NULL; - - ioc_info(ioc, " %d %d\n", ioc->high_iops_queues, - ioc->msix_vector_count); - - i = pci_alloc_irq_vectors_affinity(ioc->pdev, - ioc->high_iops_queues, - ioc->msix_vector_count, irq_flags, descp); - - return i; -} - -/** - * _base_enable_msix - enables msix, failback to io_apic - * @ioc: per adapter object - * - */ -static int -_base_enable_msix(struct MPT3SAS_ADAPTER *ioc) -{ - int r; - int i, local_max_msix_vectors; - u8 try_msix = 0; - - ioc->msix_load_balance = false; - - if (msix_disable == -1 || msix_disable == 0) - try_msix = 1; - - if (!try_msix) - goto try_ioapic; - - if (_base_check_enable_msix(ioc) != 0) - goto try_ioapic; - - ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); - pr_info("\t no of cores: %d, max_msix_vectors: %d\n", - ioc->cpu_count, max_msix_vectors); - if (ioc->is_aero_ioc) - _base_check_and_enable_high_iops_queues(ioc, - ioc->msix_vector_count); - ioc->reply_queue_count = - min_t(int, ioc->cpu_count + ioc->high_iops_queues, - ioc->msix_vector_count); - - if (!ioc->rdpq_array_enable && max_msix_vectors == -1) - local_max_msix_vectors = (reset_devices) ? 1 : 8; - else - local_max_msix_vectors = max_msix_vectors; - - if (local_max_msix_vectors > 0) - ioc->reply_queue_count = min_t(int, local_max_msix_vectors, - ioc->reply_queue_count); - else if (local_max_msix_vectors == 0) - goto try_ioapic; - - /* - * Enable msix_load_balance only if combined reply queue mode is - * disabled on SAS3 & above generation HBA devices. - */ - if (!ioc->combined_reply_queue && - ioc->hba_mpi_version_belonged != MPI2_VERSION) { - ioc->msix_load_balance = true; - } - - /* - * smp affinity setting is not need when msix load balance - * is enabled. - */ - if (ioc->msix_load_balance) - ioc->smp_affinity_enable = 0; - - r = _base_alloc_irq_vectors(ioc); - if (r < 0) { - dfailprintk(ioc, - ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", - r)); - goto try_ioapic; - } - - ioc->msix_enable = 1; - ioc->reply_queue_count = r; - for (i = 0; i < ioc->reply_queue_count; i++) { - r = _base_request_irq(ioc, i); - if (r) { - _base_free_irq(ioc); - _base_disable_msix(ioc); - goto try_ioapic; - } - } - - ioc_info(ioc, "High IOPs queues : %s\n", - ioc->high_iops_queues ? "enabled" : "disabled"); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); +#endif + printk(MPT3SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name); return 0; -/* failback to io_apic interrupt routing */ - try_ioapic: - ioc->high_iops_queues = 0; - ioc_info(ioc, "High IOPs queues : disabled\n"); - ioc->reply_queue_count = 1; - r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); - if (r < 0) { - dfailprintk(ioc, - ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", - r)); - } else - r = _base_request_irq(ioc, 0); + out: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); +#endif - return r; + printk(MPT3SAS_ERR_FMT "diag reset: FAILED\n", ioc->name); + return -EFAULT; } /** - * mpt3sas_base_unmap_resources - free controller resources + * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL * @ioc: per adapter object + * @timeout: + * + * Returns 0 for success, non-zero for failure. */ -static void -mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) -{ - struct pci_dev *pdev = ioc->pdev; - - dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - - _base_free_irq(ioc); - _base_disable_msix(ioc); - - kfree(ioc->replyPostRegisterIndex); - ioc->replyPostRegisterIndex = NULL; - - - if (ioc->chip_phys) { - iounmap(ioc->chip); - ioc->chip_phys = 0; - } - - if (pci_is_enabled(pdev)) { - pci_release_selected_regions(ioc->pdev, ioc->bars); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - } -} - static int -_base_diag_reset(struct MPT3SAS_ADAPTER *ioc); +_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 ioc_state; + int rc; + + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + + if (!mpt3sas_base_pci_device_is_available(ioc)) + return 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + + if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || + (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "unexpected doorbell " + "active!\n", ioc->name)); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, + timeout); + if (ioc_state) { + printk(MPT3SAS_ERR_FMT "%s: failed going to ready state " + " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); + return -EFAULT; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc); + return rc; +} /** * _base_check_for_fault_and_issue_reset - check if IOC is in fault state * and if it is in fault state then issue diag reset. * @ioc: per adapter object * - * Returns: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) @@ -3199,11 +4865,15 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) u32 ioc_state; int rc = -EFAULT; - dinitprintk(ioc, pr_info("%s\n", __func__)); - if (ioc->pci_error_recovery) - return 0; + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + + if (!mpt3sas_base_pci_device_is_available(ioc)) + return rc; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); - dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state)); + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mpt3sas_base_fault_info(ioc, ioc_state & @@ -3214,11 +4884,158 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) return rc; } +/** + * _base_get_ioc_facts - obtain ioc facts reply and save in ioc + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2IOCFactsRequest_t mpi_request; + Mpi2IOCFactsReply_t mpi_reply; + struct mpt3sas_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + + r = _base_wait_for_iocstate(ioc, 10); + if (r) { + printk(MPT3SAS_ERR_FMT "%s: failed getting to correct state\n", + ioc->name, __func__); + return r; + } + mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); + mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); + + if (r != 0) { + printk(MPT3SAS_ERR_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + + facts = &ioc->facts; + memset(facts, 0, sizeof(struct mpt3sas_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->IOCNumber = mpi_reply.IOCNumber; + printk(MPT3SAS_INFO_FMT + "IOC Number : %d\n", ioc->name, facts->IOCNumber); + ioc->IOCNumber = facts->IOCNumber; + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + if (ioc->msix_enable && (facts->MaxMSIxVectors <= + MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) + ioc->combined_reply_queue = 0; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) + && (!reset_devices)) + ioc->rdpq_array_capable = 1; + else + ioc->rdpq_array_capable = 0; + if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) + && ioc->is_aero_ioc) + ioc->atomic_desc_capable = 1; + else + ioc->atomic_desc_capable = 0; + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = + le16_to_cpu(mpi_reply.IOCRequestFrameSize); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + facts->IOCMaxChainSegmentSize = + le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); + } + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = + le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; + + /* + * Get the Page Size from IOC Facts. If it's 0, default to 4k. + */ + ioc->page_size = 1 << facts->CurrentHostPageSize; + if (ioc->page_size == 1) { + printk(MPT3SAS_INFO_FMT "CurrentHostPageSize is 0: Setting " + "default host page size to 4k\n", ioc->name); + ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; + } + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "CurrentHostPageSize(%d)\n", + ioc->name, facts->CurrentHostPageSize)); + + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "hba queue depth(%d), " + "max chains per io(%d)\n", ioc->name, facts->RequestCredit, + facts->MaxChainDepth)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "request frame size(%d), " + "reply frame size(%d)\n", ioc->name, + facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); + return 0; +} + +/** + * _base_unmap_resources - free controller resources + * @ioc: per adapter object + */ +static void +_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + printk(MPT3SAS_INFO_FMT "%s\n", + ioc->name, __func__); + + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); + + kfree(ioc->replyPostRegisterIndex); + + /* synchronizing freeing resource with pci_access_mutex lock */ + mutex_lock(&ioc->pci_access_mutex); + if (ioc->chip_phys) { + iounmap(ioc->chip); + ioc->chip_phys = 0; + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) + pci_release_regions(pdev); +#else + pci_release_selected_regions(ioc->pdev, ioc->bars); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + pci_disable_pcie_error_reporting(pdev); +#endif + pci_disable_device(pdev); + mutex_unlock(&ioc->pci_access_mutex); + return; +} + /** * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) @@ -3227,46 +5044,74 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) u32 memap_sz; u32 pio_sz; int i, r = 0, rc; +#ifndef CPQ_CIM u64 pio_chip = 0; +#endif phys_addr_t chip_phys = 0; struct adapter_reply_queue *reply_q; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", + ioc->name, __func__)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) + if (pci_enable_device(pdev)) { + printk(MPT3SAS_WARN_FMT "pci_enable_device: failed\n", + ioc->name); + return -ENODEV; + } + + if (pci_request_regions(pdev, ioc->driver_name)) { + printk(MPT3SAS_WARN_FMT "pci_request_regions: failed\n", + ioc->name); + r = -ENODEV; + goto out_fail; + } +#else ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); if (pci_enable_device_mem(pdev)) { - ioc_warn(ioc, "pci_enable_device_mem: failed\n"); - ioc->bars = 0; + printk(MPT3SAS_WARN_FMT "pci_enable_device_mem: " + "failed\n", ioc->name); return -ENODEV; } if (pci_request_selected_regions(pdev, ioc->bars, ioc->driver_name)) { - ioc_warn(ioc, "pci_request_selected_regions: failed\n"); - ioc->bars = 0; + printk(MPT3SAS_WARN_FMT "pci_request_selected_regions: " + "failed\n", ioc->name); r = -ENODEV; goto out_fail; } +#endif /* AER (Advanced Error Reporting) hooks */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) pci_enable_pcie_error_reporting(pdev); +#endif pci_set_master(pdev); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) + /* for SLES10 ~ PCI EEH support */ + pci_save_state(pdev); +#endif if (_base_config_dma_addressing(ioc, pdev) != 0) { - ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev)); + printk(MPT3SAS_WARN_FMT "no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); r = -ENODEV; goto out_fail; } - for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && - (!memap_sz || !pio_sz); i++) { + for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { if (pio_sz) continue; +#if defined(CPQ_CIM) + ioc->pio_chip = (u64)pci_resource_start(pdev, i); +#else pio_chip = (u64)pci_resource_start(pdev, i); +#endif pio_sz = pci_resource_len(pdev, i); } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { if (memap_sz) @@ -3275,21 +5120,21 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) chip_phys = ioc->chip_phys; memap_sz = pci_resource_len(pdev, i); ioc->chip = ioremap(ioc->chip_phys, memap_sz); + if (ioc->chip == NULL) { + printk(MPT3SAS_ERR_FMT "unable to map adapter " + "memory!\n", ioc->name); + r = -EINVAL; + goto out_fail; + } } } - if (ioc->chip == NULL) { - ioc_err(ioc, "unable to map adapter memory! or resource not found\n"); - r = -EINVAL; - goto out_fail; - } - - _base_mask_interrupts(ioc); + mpt3sas_base_mask_interrupts(ioc); r = _base_get_ioc_facts(ioc); if (r) { rc = _base_check_for_fault_and_issue_reset(ioc); - if (rc || (_base_get_ioc_facts(ioc))) + if (rc || (r = _base_get_ioc_facts(ioc))) goto out_fail; } @@ -3302,64 +5147,71 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_fail; +#if defined(MPT3SAS_ENABLE_IRQ_POLL) if (!ioc->is_driver_loading) _base_init_irqpolls(ioc); - /* Use the Combined reply queue feature only for SAS3 C0 & higher - * revision HBAs and also only when reply queue count is greater than 8 - */ +#endif + if (ioc->combined_reply_queue) { - /* Determine the Supplemental Reply Post Host Index Registers - * Addresse. Supplemental Reply Post Host Index Registers - * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and - * each register is at offset bytes of - * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. - */ - ioc->replyPostRegisterIndex = kcalloc( - ioc->combined_reply_index_count, - sizeof(resource_size_t *), GFP_KERNEL); + /* If this is an 96 vector supported device, set up ReplyPostIndex addresses */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)) + ioc->replyPostRegisterIndex = kcalloc(ioc->nc_reply_index_count, + sizeof(resource_size_t *), GFP_KERNEL); +#else + ioc->replyPostRegisterIndex = kcalloc(ioc->nc_reply_index_count, + sizeof(u64 *), GFP_KERNEL); +#endif if (!ioc->replyPostRegisterIndex) { - dfailprintk(ioc, - ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n")); + dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "allocation " + "for reply Post Register Index failed!!!\n", ioc->name)); r = -ENOMEM; goto out_fail; } - for (i = 0; i < ioc->combined_reply_index_count; i++) { - ioc->replyPostRegisterIndex[i] = (resource_size_t *) - ((u8 __force *)&ioc->chip->Doorbell + - MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + - (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)) + for ( i = 0; i < ioc->nc_reply_index_count; i++ ) { + ioc->replyPostRegisterIndex[i] =(resource_size_t *) + ((u8 *)&ioc->chip->Doorbell + + MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + (i * 0x10)); } +#else + for ( i = 0; i < ioc->nc_reply_index_count; i++ ) { + ioc->replyPostRegisterIndex[i] = (u64 *) + ((u8 *)&ioc->chip->Doorbell + + MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + (i * 0x10)); + } +#endif } - - if (ioc->is_warpdrive) { - ioc->reply_post_host_index[0] = (resource_size_t __iomem *) - &ioc->chip->ReplyPostHostIndex; - - for (i = 1; i < ioc->cpu_msix_table_sz; i++) - ioc->reply_post_host_index[i] = - (resource_size_t __iomem *) - ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) - * 4))); - } - list_for_each_entry(reply_q, &ioc->reply_queue_list, list) - pr_info("%s: %s enabled: IRQ %d\n", - reply_q->name, - ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC", - pci_irq_vector(ioc->pdev, reply_q->msix_index)); - - ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n", - &chip_phys, ioc->chip, memap_sz); - ioc_info(ioc, "ioport(0x%016llx), size(%d)\n", - (unsigned long long)pio_chip, pio_sz); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) + printk(MPT3SAS_INFO_FMT "%s: IRQ %d\n", + reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"),reply_q->vector); +#else + printk(MPT3SAS_INFO_FMT "%s: IRQ %d\n", + reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), + pci_irq_vector(ioc->pdev, reply_q->msix_index)); +#endif + printk(MPT3SAS_INFO_FMT "iomem(%pap), mapped(0x%p), size(%d)\n", + ioc->name, &chip_phys, ioc->chip, memap_sz); +#if defined(CPQ_CIM) + printk(MPT3SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", + ioc->name, (unsigned long long)ioc->pio_chip, pio_sz); +#else + printk(MPT3SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", + ioc->name, (unsigned long long)pio_chip, pio_sz); +#endif +/* This is causing SLES10 to fail when loading */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) /* Save PCI configuration state for recovery from PCI AER/EEH errors */ pci_save_state(pdev); +#endif return 0; out_fail: - mpt3sas_base_unmap_resources(ioc); + _base_unmap_resources(ioc); return r; } @@ -3368,20 +5220,23 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object * @smid: system request message index(smid zero is invalid) * - * Return: virt pointer to message frame. + * Returns virt pointer to message frame. */ void * mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) { return (void *)(ioc->request + (smid * ioc->request_sz)); } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_get_msg_frame); +#endif /** * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr * @ioc: per adapter object * @smid: system request message index * - * Return: virt pointer to sense buffer. + * Returns virt pointer to sense buffer. */ void * mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -3394,7 +5249,7 @@ mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) * @ioc: per adapter object * @smid: system request message index * - * Return: phys pointer to the low 32bit address of the sense buffer. + * Returns phys pointer to the low 32bit address of the sense buffer. */ __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -3403,12 +5258,26 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) SCSI_SENSE_BUFFERSIZE)); } +/** + * mpt3sas_base_get_sense_buffer_dma_64 - obtain a sense buffer dma addr + * @ioc: per adapter object + * @smid: system request message index + * + * Returns phys pointer to the 64bit address of the sense buffer. + */ +__le64 +mpt3sas_base_get_sense_buffer_dma_64(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le64(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + /** * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr * @ioc: per adapter object * @smid: system request message index * - * Return: virt pointer to a PCIe SGL. + * Returns virt pointer to a PCIe SGL. */ void * mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -3421,7 +5290,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) * @ioc: per adapter object * @smid: system request message index * - * Return: phys pointer to the address of the PCIe buffer. + * Returns phys pointer to the address of the PCIe buffer. */ dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -3443,6 +5312,9 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) return NULL; return ioc->reply + (phys_addr - (u32)ioc->reply_dma); } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_get_reply_virt_addr); +#endif /** * _base_get_msix_index - get the msix index @@ -3455,38 +5327,42 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) */ static inline u8 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, - struct scsi_cmnd *scmd) + struct scsi_cmnd *scmd) { /* Enables reply_queue load balancing */ if (ioc->msix_load_balance) - return ioc->reply_queue_count ? - base_mod64(atomic64_add_return(1, + return ioc->reply_queue_count ? base_mod64(atomic64_add_return(1, &ioc->total_io_cnt), ioc->reply_queue_count) : 0; return ioc->cpu_msix_table[raw_smp_processor_id()]; } /** - * _base_get_high_iops_msix_index - get the msix index of - * high iops queues + * _base_get_high_iops_msix_index - get the msix index of high iops queues * @ioc: per adapter object * @scmd: scsi_cmnd object * - * Returns: msix index of high iops reply queues. + * returns msix index of high iops reply queues, * i.e. high iops reply queue on which IO request's * reply should be posted by the HBA firmware. */ static inline u8 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, - struct scsi_cmnd *scmd) + struct scsi_cmnd *scmd) { /** * Round robin the IO interrupts among the high iops - * reply queues in terms of batch count 16 when outstanding + * reply queues in terms of batch count 4 when outstanding * IOs on the target device is >=8. */ +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 7 && RHEL_MINOR >= 2)) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) if (atomic_read(&scmd->device->device_busy) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) +#else + if (scmd->device->device_busy > + MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) +#endif return base_mod64(( atomic64_add_return(1, &ioc->high_iops_outstanding) / MPT3SAS_HIGH_IOPS_BATCH_COUNT), @@ -3500,7 +5376,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @cb_idx: callback index * - * Return: smid (zero is invalid) + * Returns smid (zero is invalid) */ u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) @@ -3512,7 +5388,8 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); if (list_empty(&ioc->internal_free_list)) { spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - ioc_err(ioc, "%s: smid not available\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: smid not available\n", + ioc->name, __func__); return 0; } @@ -3524,6 +5401,9 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return smid; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_get_smid); +#endif /** * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue @@ -3531,30 +5411,36 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) * @cb_idx: callback index * @scmd: pointer to scsi command object * - * Return: smid (zero is invalid) + * Returns smid (zero is invalid) */ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, struct scsi_cmnd *scmd) { - struct scsiio_tracker *request = scsi_cmd_priv(scmd); + struct scsiio_tracker *request; unsigned int tag = scmd->request->tag; u16 smid; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)) + scmd->host_scribble = (unsigned char *)(&ioc->scsi_lookup[tag]); +#endif + request = mpt3sas_base_scsi_cmd_priv(scmd); smid = tag + 1; request->cb_idx = cb_idx; request->smid = smid; request->scmd = scmd; - INIT_LIST_HEAD(&request->chain_list); return smid; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_get_smid_scsiio); +#endif /** * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue * @ioc: per adapter object * @cb_idx: callback index * - * Return: smid (zero is invalid) + * Returns smid (zero is invalid) */ u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) @@ -3582,52 +5468,55 @@ static void _base_recovery_check(struct MPT3SAS_ADAPTER *ioc) { /* - * See _wait_for_commands_to_complete() call with regards to this code. + * See mpt3sas_wait_for_commands_to_complete() call with + * regards to this code. */ if (ioc->shost_recovery && ioc->pending_io_count) { - ioc->pending_io_count = scsi_host_busy(ioc->shost); - if (ioc->pending_io_count == 0) + if (ioc->pending_io_count == 1) wake_up(&ioc->reset_wq); + ioc->pending_io_count--; } } void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, - struct scsiio_tracker *st) + struct scsiio_tracker *st) { + if (!st) + return; if (WARN_ON(st->smid == 0)) return; st->cb_idx = 0xFF; st->direct_io = 0; st->scmd = NULL; atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); - st->smid = 0; } /** * mpt3sas_base_free_smid - put smid back on free_list * @ioc: per adapter object * @smid: system request message index + * + * Return nothing. */ void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) { unsigned long flags; int i; + struct scsiio_tracker *st; + void *request; if (smid < ioc->hi_priority_smid) { - struct scsiio_tracker *st; - void *request; + /* scsiio queue */ + st = mpt3sas_get_st_from_smid(ioc, smid); - st = _get_st_from_smid(ioc, smid); if (!st) { _base_recovery_check(ioc); return; } - /* Clear MPI request frame */ request = mpt3sas_base_get_msg_frame(ioc, smid); memset(request, 0, ioc->request_sz); - mpt3sas_base_clear_st(ioc, st); _base_recovery_check(ioc); return; @@ -3648,6 +5537,9 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) } spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_free_smid); +#endif /** * _base_mpi_ep_writeq - 32 bit write to MMIO @@ -3655,24 +5547,28 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) * @addr: address in MMIO space * @writeq_lock: spin lock * - * This special handling for MPI EP to take care of 32 bit + * This special handling for MPI EP to take care of 32 bit * environment where its not quarenteed to send the entire word * in one transfer. */ static inline void -_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, - spinlock_t *writeq_lock) +_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) { unsigned long flags; + __u64 data_out = b; spin_lock_irqsave(writeq_lock, flags); - __raw_writel((u32)(b), addr); - __raw_writel((u32)(b >> 32), (addr + 4)); + writel((u32)(data_out), addr); + writel((u32)(data_out >> 32), (addr + 4)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) + mmiowb(); +#endif spin_unlock_irqrestore(writeq_lock, flags); } /** * _base_writeq - 64 bit write to MMIO + * @ioc: per adapter object * @b: data payload * @addr: address in MMIO space * @writeq_lock: spin lock @@ -3685,15 +5581,19 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, static inline void _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) { - wmb(); - __raw_writeq(b, addr); - barrier(); + writeq(b, addr); } #else static inline void _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) { - _base_mpi_ep_writeq(b, addr, writeq_lock); + unsigned long flags; + __u64 data_out = b; + + spin_lock_irqsave(writeq_lock, flags); + writel((u32)(data_out), addr); + writel((u32)(data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); } #endif @@ -3708,10 +5608,9 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) static u8 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid) { - struct scsiio_tracker *st = NULL; + struct scsiio_tracker *st; - if (smid < ioc->hi_priority_smid) - st = _get_st_from_smid(ioc, smid); + st = (smid < ioc->hi_priority_smid) ? (mpt3sas_get_st_from_smid(ioc, smid)) : (NULL); if (st == NULL) return _base_get_msix_index(ioc, NULL); @@ -3725,35 +5624,36 @@ _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid) * @ioc: per adapter object * @smid: system request message index * @handle: device handle + * + * Return nothing. */ static void -_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, - u16 smid, u16 handle) +_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) { Mpi2RequestDescriptorUnion_t descriptor; u64 *request = (u64 *)&descriptor; void *mpi_req_iomem; - __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); - _clone_sg_entries(ioc, (void *) mfp, smid); - mpi_req_iomem = (void __force *)ioc->chip + - MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); - _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, - ioc->request_sz); + __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); + _clone_sg_entries(ioc, (void*) mfp, smid); + mpi_req_iomem = (void*)ioc->chip + MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem( mpi_req_iomem, (void*)mfp, ioc->request_sz); + descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; - descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); descriptor.SCSIIO.SMID = cpu_to_le16(smid); descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); descriptor.SCSIIO.LMID = 0; _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, &ioc->scsi_lookup_lock); } - /** * _base_put_smid_scsi_io - send SCSI_IO request to firmware * @ioc: per adapter object * @smid: system request message index * @handle: device handle + * + * Return nothing. */ static void _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) @@ -3761,12 +5661,12 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) Mpi2RequestDescriptorUnion_t descriptor; u64 *request = (u64 *)&descriptor; - descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; - descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); descriptor.SCSIIO.SMID = cpu_to_le16(smid); descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, &ioc->scsi_lookup_lock); } @@ -3776,17 +5676,19 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) * @ioc: per adapter object * @smid: system request message index * @handle: device handle + * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. + * + * Return nothing. */ static void -_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, - u16 handle) +_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) { Mpi2RequestDescriptorUnion_t descriptor; u64 *request = (u64 *)&descriptor; descriptor.SCSIIO.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; - descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); descriptor.SCSIIO.SMID = cpu_to_le16(smid); descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); descriptor.SCSIIO.LMID = 0; @@ -3795,28 +5697,26 @@ _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, } /** - * _base_put_smid_hi_priority - send Task Management request to firmware + * _base_put_smid_hi_priority - send Task Managment request to firmware * @ioc: per adapter object * @smid: system request message index - * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. + * @msix_task: msix_task will be same as msix of IO incase of task abort else + * 0. Return nothing. */ static void -_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, - u16 msix_task) +_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 msix_task) { Mpi2RequestDescriptorUnion_t descriptor; void *mpi_req_iomem; u64 *request; if (ioc->is_mcpu_endpoint) { + MPI2RequestHeader_t *request_hdr; __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); - - /* TBD 256 is offset within sys register. */ - mpi_req_iomem = (void __force *)ioc->chip - + MPI_FRAME_START_OFFSET - + (smid * ioc->request_sz); - _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, - ioc->request_sz); + request_hdr = (MPI2RequestHeader_t*) mfp; + /*TBD 256 is offset within sys register. */ + mpi_req_iomem = (void*)ioc->chip + MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem( mpi_req_iomem, (void*)mfp, ioc->request_sz); } request = (u64 *)&descriptor; @@ -3828,22 +5728,23 @@ _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, descriptor.HighPriority.LMID = 0; descriptor.HighPriority.Reserved1 = 0; if (ioc->is_mcpu_endpoint) - _base_mpi_ep_writeq(*request, - &ioc->chip->RequestDescriptorPostLow, - &ioc->scsi_lookup_lock); + _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); else _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, &ioc->scsi_lookup_lock); } /** - * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to + * _base_put_smid_nvme_encap - send NVMe encapsulated request to * firmware * @ioc: per adapter object * @smid: system request message index + * + * Return nothing. */ -void -mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) +static void +_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) { Mpi2RequestDescriptorUnion_t descriptor; u64 *request = (u64 *)&descriptor; @@ -3862,6 +5763,8 @@ mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) * _base_put_smid_default - Default, primarily used for config pages * @ioc: per adapter object * @smid: system request message index + * + * Return nothing. */ static void _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -3869,41 +5772,68 @@ _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) Mpi2RequestDescriptorUnion_t descriptor; void *mpi_req_iomem; u64 *request; + MPI2RequestHeader_t *request_hdr; if (ioc->is_mcpu_endpoint) { __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); - - _clone_sg_entries(ioc, (void *) mfp, smid); + request_hdr = (MPI2RequestHeader_t*) mfp; + + _clone_sg_entries(ioc, (void*) mfp, smid); + /* TBD 256 is offset within sys register */ - mpi_req_iomem = (void __force *)ioc->chip + - MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); - _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, - ioc->request_sz); + mpi_req_iomem = (void*)ioc->chip + MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem( mpi_req_iomem, (void*)mfp, ioc->request_sz); } request = (u64 *)&descriptor; descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; - descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); descriptor.Default.SMID = cpu_to_le16(smid); descriptor.Default.LMID = 0; descriptor.Default.DescriptorTypeDependent = 0; if (ioc->is_mcpu_endpoint) - _base_mpi_ep_writeq(*request, - &ioc->chip->RequestDescriptorPostLow, - &ioc->scsi_lookup_lock); + _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); else _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, - &ioc->scsi_lookup_lock); + &ioc->scsi_lookup_lock); } +#if defined(TARGET_MODE) /** - * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using - * Atomic Request Descriptor + * _base_put_smid_target_assist - send Target Assist/Status to firmware * @ioc: per adapter object * @smid: system request message index - * @handle: device handle, unused in this function, for function type match + * @io_index: value used to track the IO * * Return nothing. */ +void +_base_put_smid_target_assist(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 io_index) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.SCSITarget.RequestFlags = + MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; + descriptor.SCSITarget.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSITarget.SMID = cpu_to_le16(smid); + descriptor.SCSITarget.LMID = 0; + descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index); + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} +#endif + +/** +* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using +* Atomic Request Descriptor +* @ioc: per adapter object +* @smid: system request message index +* @handle: device handle, unused in this function, for function type match +* +* Return nothing. +*/ static void _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) @@ -3919,13 +5849,13 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, } /** - * _base_put_smid_fast_path_atomic - send fast path request to firmware - * using Atomic Request Descriptor - * @ioc: per adapter object - * @smid: system request message index - * @handle: device handle, unused in this function, for function type match - * Return nothing - */ +* _base_put_smid_fast_path_atomic - send fast path request to firmware +* using Atomic Request Descriptor +* @ioc: per adapter object +* @smid: system request message index +* @handle: device handle, unused in this function, for function type match +* Return nothing. +*/ static void _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) @@ -3941,14 +5871,15 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, } /** - * _base_put_smid_hi_priority_atomic - send Task Management request to - * firmware using Atomic Request Descriptor - * @ioc: per adapter object - * @smid: system request message index - * @msix_task: msix_task will be same as msix of IO incase of task abort else 0 - * - * Return nothing. - */ +* _base_put_smid_hi_priority_atomic - send Task Managment request to +* firmware using Atomic Request Descriptor +* @ioc: per adapter object +* @smid: system request message index +* +* @msix_task: msix_task will be same as msix of IO incase of task abort else 0 +* +* Return nothing. +*/ static void _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 msix_task) @@ -3964,13 +5895,34 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, } /** - * _base_put_smid_default - Default, primarily used for config pages - * use Atomic Request Descriptor - * @ioc: per adapter object - * @smid: system request message index - * - * Return nothing. - */ +* _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to +* firmware using Atomic Request Descriptor +* @ioc: per adapter object +* @smid: system request message index +* +* Return nothing. +*/ +static void +_base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** +* _base_put_smid_default - Default, primarily used for config pages +* use Atomic Request Descriptor +* @ioc: per adapter object +* @smid: system request message index +* +* Return nothing. +*/ static void _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) { @@ -3984,103 +5936,136 @@ _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); } + +#if defined(TARGET_MODE) +/** + * _base_put_smid_target_assist_atomic - send Target Assist/Status to + * firmware using Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @io_index: value used to track the IO, unused, for function type match + * + * Return nothing. + */ +static void +_base_put_smid_target_assist_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 io_index) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} +#endif + + /** * _base_display_OEMs_branding - Display branding string * @ioc: per adapter object + * + * Return nothing. */ static void _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) { - if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) - return; - - switch (ioc->pdev->subsystem_vendor) { + switch(ioc->pdev->subsystem_vendor) { case PCI_VENDOR_ID_INTEL: switch (ioc->pdev->device) { case MPI2_MFGPAGE_DEVID_SAS2008: switch (ioc->pdev->subsystem_device) { case MPT2SAS_INTEL_RMS2LL080_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS2LL080_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL080_BRANDING); break; case MPT2SAS_INTEL_RMS2LL040_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS2LL040_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL040_BRANDING); break; case MPT2SAS_INTEL_SSD910_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_SSD910_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_SSD910_BRANDING); break; default: - ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Intel(R) Controller:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; case MPI2_MFGPAGE_DEVID_SAS2308_2: switch (ioc->pdev->subsystem_device) { case MPT2SAS_INTEL_RS25GB008_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RS25GB008_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RS25GB008_BRANDING); break; case MPT2SAS_INTEL_RMS25JB080_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS25JB080_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25JB080_BRANDING); break; case MPT2SAS_INTEL_RMS25JB040_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS25JB040_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25JB040_BRANDING); break; case MPT2SAS_INTEL_RMS25KB080_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS25KB080_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25KB080_BRANDING); break; case MPT2SAS_INTEL_RMS25KB040_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS25KB040_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25KB040_BRANDING); break; case MPT2SAS_INTEL_RMS25LB040_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS25LB040_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25LB040_BRANDING); break; case MPT2SAS_INTEL_RMS25LB080_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_INTEL_RMS25LB080_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25LB080_BRANDING); break; default: - ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Intel(R) Controller:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; case MPI25_MFGPAGE_DEVID_SAS3008: switch (ioc->pdev->subsystem_device) { case MPT3SAS_INTEL_RMS3JC080_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_INTEL_RMS3JC080_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RMS3JC080_BRANDING); break; - case MPT3SAS_INTEL_RS3GC008_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_INTEL_RS3GC008_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3GC008_BRANDING); break; case MPT3SAS_INTEL_RS3FC044_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_INTEL_RS3FC044_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3FC044_BRANDING); break; case MPT3SAS_INTEL_RS3UC080_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_INTEL_RS3UC080_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3UC080_BRANDING); + break; + case MPT3SAS_INTEL_RS3PC_SSDID: + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3PC_BRANDING); break; default: - ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Intel(R) Controller:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; default: - ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Intel(R) Controller:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; @@ -4089,187 +6074,205 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) case MPI2_MFGPAGE_DEVID_SAS2008: switch (ioc->pdev->subsystem_device) { case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); break; case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); break; case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); break; case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); break; case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); break; case MPT2SAS_DELL_PERC_H200_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_DELL_PERC_H200_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_BRANDING); break; - case MPT2SAS_DELL_6GBPS_SAS_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_DELL_6GBPS_SAS_BRANDING); + case MPT2SAS_DELL_6GBPS_SAS_SSDID: + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_DELL_6GBPS_SAS_BRANDING); break; default: - ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Dell 6Gbps SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; case MPI25_MFGPAGE_DEVID_SAS3008: switch (ioc->pdev->subsystem_device) { case MPT3SAS_DELL_12G_HBA_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_DELL_12G_HBA_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_DELL_12G_HBA_BRANDING); break; - default: - ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + case MPT3SAS_DELL_HBA330_ADP_SSDID: + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_DELL_HBA330_ADP_BRANDING); break; - } - break; + case MPT3SAS_DELL_HBA330_MINI_SSDID: + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_DELL_HBA330_MINI_BRANDING); + break; + default: + printk(MPT3SAS_INFO_FMT "Dell 12Gbps SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); + break; + } + break; default: - ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); - break; + printk(MPT3SAS_INFO_FMT "Dell SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); + break; } break; +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5 && RHEL_MINOR >= 7)) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) case PCI_VENDOR_ID_CISCO: switch (ioc->pdev->device) { case MPI25_MFGPAGE_DEVID_SAS3008: switch (ioc->pdev->subsystem_device) { case MPT3SAS_CISCO_12G_8E_HBA_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_CISCO_12G_8E_HBA_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_8E_HBA_BRANDING); break; case MPT3SAS_CISCO_12G_8I_HBA_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_CISCO_12G_8I_HBA_BRANDING); - break; - case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_8I_HBA_BRANDING); break; default: - ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Cisco 12Gbps SAS HBA:" + " Device ID:0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; case MPI25_MFGPAGE_DEVID_SAS3108_1: switch (ioc->pdev->subsystem_device) { case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); break; case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: - ioc_info(ioc, "%s\n", - MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING); break; default: - ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Cisco 12Gbps SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; default: - ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "Cisco SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; +#endif case MPT2SAS_HP_3PAR_SSVID: switch (ioc->pdev->device) { case MPI2_MFGPAGE_DEVID_SAS2004: switch (ioc->pdev->subsystem_device) { case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); break; default: - ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "HP 6Gbps SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; case MPI2_MFGPAGE_DEVID_SAS2308_2: switch (ioc->pdev->subsystem_device) { case MPT2SAS_HP_2_4_INTERNAL_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_HP_2_4_INTERNAL_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_2_4_INTERNAL_BRANDING); break; case MPT2SAS_HP_2_4_EXTERNAL_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_HP_2_4_EXTERNAL_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_2_4_EXTERNAL_BRANDING); break; case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); break; case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: - ioc_info(ioc, "%s\n", - MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); + printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); break; default: - ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "HP 12Gbps SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } break; default: - ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n", - ioc->pdev->subsystem_device); + printk(MPT3SAS_INFO_FMT "HP 12Gbps SAS HBA:" + " Device ID: 0x%X Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->device, ioc->pdev->subsystem_device); break; } - default: - break; } } /** - * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg - * version from FW Image Header. + * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg + * version from FW Image Header. * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ - static int +static int _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) { - Mpi2FWImageHeader_t *FWImgHdr; + Mpi2FWImageHeader_t *fw_img_hdr; + Mpi26ComponentImageHeader_t *cmp_img_hdr; Mpi25FWUploadRequest_t *mpi_request; Mpi2FWUploadReply_t mpi_reply; int r = 0; + u32 package_version = 0; void *fwpkg_data = NULL; dma_addr_t fwpkg_data_dma; u16 smid, ioc_status; size_t data_length; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); if (ioc->base_cmds.status & MPT3_CMD_PENDING) { - ioc_err(ioc, "%s: internal command already in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: internal command already in use\n", + ioc->name, __func__); return -EAGAIN; } data_length = sizeof(Mpi2FWImageHeader_t); - fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, - &fwpkg_data_dma, GFP_KERNEL); + fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length, + &fwpkg_data_dma); if (!fwpkg_data) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -ENOMEM; } smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); r = -EAGAIN; goto out; } @@ -4280,158 +6283,173 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t)); mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD; mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH; - mpi_request->ImageSize = cpu_to_le32(data_length); + mpi_request->ImageSize = data_length; ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, - data_length); + data_length); init_completion(&ioc->base_cmds.done); ioc->put_smid_default(ioc, smid); /* Wait for 15 seconds */ - wait_for_completion_timeout(&ioc->base_cmds.done, + wait_for_completion_timeout(&ioc->base_cmds.done, FW_IMG_HDR_READ_TIMEOUT*HZ); - ioc_info(ioc, "%s: complete\n", __func__); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: complete\n", + ioc->name, __func__)); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); _debug_dump_mf(mpi_request, - sizeof(Mpi25FWUploadRequest_t)/4); + sizeof(Mpi25FWUploadRequest_t)/4); r = -ETIME; - } else { + } + else { memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t)); if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) { memcpy(&mpi_reply, ioc->base_cmds.reply, - sizeof(Mpi2FWUploadReply_t)); - ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & - MPI2_IOCSTATUS_MASK; + sizeof(Mpi2FWUploadReply_t)); + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { - FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data; - if (FWImgHdr->PackageVersion.Word) { - ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n", - FWImgHdr->PackageVersion.Struct.Major, - FWImgHdr->PackageVersion.Struct.Minor, - FWImgHdr->PackageVersion.Struct.Unit, - FWImgHdr->PackageVersion.Struct.Dev); + fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data; + if (le32_to_cpu(fw_img_hdr->Signature) == + MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) { + cmp_img_hdr = (Mpi26ComponentImageHeader_t *)(fwpkg_data); + package_version = + le32_to_cpu(cmp_img_hdr->ApplicationSpecific); } - } else { + else + package_version = + le32_to_cpu(fw_img_hdr->PackageVersion.Word); + if (package_version) + printk(MPT3SAS_INFO_FMT "FW Package Version(%02d.%02d.%02d.%02d)\n", + ioc->name, ((package_version) & 0xFF000000) >> 24, + ((package_version) & 0x00FF0000) >> 16, + ((package_version) & 0x0000FF00) >> 8, + (package_version) & 0x000000FF); + } + else { _debug_dump_mf(&mpi_reply, - sizeof(Mpi2FWUploadReply_t)/4); + sizeof(Mpi2FWUploadReply_t)/4); } } } - ioc->base_cmds.status = MPT3_CMD_NOT_USED; -out: + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + out: if (fwpkg_data) - dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, - fwpkg_data_dma); + pci_free_consistent(ioc->pdev, data_length, fwpkg_data, + fwpkg_data_dma); return r; } /** * _base_display_ioc_capabilities - Disply IOC's capabilities. * @ioc: per adapter object + * + * Return nothing. */ static void _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) { int i = 0; char desc[16]; + u8 revision; u32 iounit_pg1_flags; u32 bios_version; - bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); strncpy(desc, ioc->manu_pg0.ChipName, 16); - ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", - desc, - (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, - (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, - (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, - ioc->facts.FWVersion.Word & 0x000000FF, - ioc->pdev->revision, - (bios_version & 0xFF000000) >> 24, - (bios_version & 0x00FF0000) >> 16, - (bios_version & 0x0000FF00) >> 8, - bios_version & 0x000000FF); - + bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + printk(MPT3SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " + "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", + ioc->name, desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, + revision, + (bios_version & 0xFF000000) >> 24, + (bios_version & 0x00FF0000) >> 16, + (bios_version & 0x0000FF00) >> 8, + bios_version & 0x000000FF); _base_display_OEMs_branding(ioc); - if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { - pr_info("%sNVMe", i ? "," : ""); - i++; - } - - ioc_info(ioc, "Protocol=("); + printk(MPT3SAS_INFO_FMT "Protocol=(", ioc->name); if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { - pr_cont("Initiator"); + printk("Initiator"); i++; } if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { - pr_cont("%sTarget", i ? "," : ""); + printk("%sTarget", i ? "," : ""); + i++; + } + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { + printk("%sNVMe", i ? "," : ""); i++; } i = 0; - pr_cont("), Capabilities=("); + printk("), "); + printk("Capabilities=("); - if (!ioc->hide_ir_msg) { - if (ioc->facts.IOCCapabilities & - MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { - pr_cont("Raid"); + if ((!ioc->warpdrive_msg) && (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) { + printk("Raid"); i++; - } } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { - pr_cont("%sTLR", i ? "," : ""); + printk("%sTLR", i ? "," : ""); i++; } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { - pr_cont("%sMulticast", i ? "," : ""); + printk("%sMulticast", i ? "," : ""); i++; } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { - pr_cont("%sBIDI Target", i ? "," : ""); + printk("%sBIDI Target", i ? "," : ""); i++; } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { - pr_cont("%sEEDP", i ? "," : ""); + printk("%sEEDP", i ? "," : ""); i++; } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { - pr_cont("%sSnapshot Buffer", i ? "," : ""); + printk("%sSnapshot Buffer", i ? "," : ""); i++; } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { - pr_cont("%sDiag Trace Buffer", i ? "," : ""); + printk("%sDiag Trace Buffer", i ? "," : ""); i++; } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { - pr_cont("%sDiag Extended Buffer", i ? "," : ""); + printk("%sDiag Extended Buffer", i ? "," : ""); i++; } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { - pr_cont("%sTask Set Full", i ? "," : ""); + printk("%sTask Set Full", i ? "," : ""); i++; } iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { - pr_cont("%sNCQ", i ? "," : ""); + printk("%sNCQ", i ? "," : ""); i++; } - pr_cont(")\n"); + printk(")\n"); } /** @@ -4440,6 +6458,8 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) * @device_missing_delay: amount of time till device is reported missing * @io_missing_delay: interval IO is returned when there is a missing device * + * Return nothing. + * * Passed on the command line, this function will modify the device missing * delay, as well as the io missing delay. This should be called at driver * load time. @@ -4464,21 +6484,21 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, sizeof(Mpi2SasIOUnit1PhyData_t)); sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg1) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } @@ -4510,16 +6530,16 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, else dmd_new = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; - ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n", - dmd_orignal, dmd_new); - ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n", - io_missing_delay_original, - io_missing_delay); + printk(MPT3SAS_INFO_FMT "device_missing_delay: old(%d), " + "new(%d)\n", ioc->name, dmd_orignal, dmd_new); + printk(MPT3SAS_INFO_FMT "ioc_missing_delay: old(%d), " + "new(%d)\n", ioc->name, io_missing_delay_original, + io_missing_delay); ioc->device_missing_delay = dmd_new; ioc->io_missing_delay = io_missing_delay; } -out: + out: kfree(sas_iounit_pg1); } @@ -4543,11 +6563,11 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) case MPT_PERF_MODE_DEFAULT: case MPT_PERF_MODE_BALANCED: if (ioc->high_iops_queues) { - ioc_info(ioc, - "Enable interrupt coalescing only for first\t" - "%d reply queues\n", - MPT3SAS_HIGH_IOPS_REPLY_QUEUES); - /* + printk(MPT3SAS_INFO_FMT + "Enable interrupt coalescing only for first %d" + " reply queues\n", ioc->name, + MPT3SAS_HIGH_IOPS_REPLY_QUEUES); + /* * If 31st bit is zero then interrupt coalescing is * enabled for all reply descriptor post queues. * If 31st bit is set to one then user can @@ -4559,10 +6579,10 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1)); mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); - ioc_info(ioc, "performance mode: balanced\n"); + printk(MPT3SAS_INFO_FMT + "performance mode: balanced\n", ioc->name); return; } - /* Fall through */ case MPT_PERF_MODE_LATENCY: /* * Enable interrupt coalescing on all reply queues @@ -4572,15 +6592,16 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); ioc_pg1.ProductSpecific = 0; mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); - ioc_info(ioc, "performance mode: latency\n"); + printk(MPT3SAS_INFO_FMT + "performance mode: latency\n", ioc->name); break; case MPT_PERF_MODE_IOPS: /* * Enable interrupt coalescing on all reply queues. */ - ioc_info(ioc, + printk(MPT3SAS_INFO_FMT "performance mode: iops with coalescing timeout: 0x%x\n", - le32_to_cpu(ioc_pg1.CoalescingTimeout)); + ioc->name, le32_to_cpu(ioc_pg1.CoalescingTimeout)); ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); ioc_pg1.ProductSpecific = 0; mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); @@ -4591,42 +6612,51 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) /** * _base_static_config_pages - static start of day config pages * @ioc: per adapter object + * + * Return nothing. */ static void _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) { Mpi2ConfigReply_t mpi_reply; u32 iounit_pg1_flags; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + u32 cap; + int ret; +#endif ioc->nvme_abort_timeout = 30; mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); - if (ioc->ir_firmware) + if (ioc->ir_firmware || ioc->is_warpdrive) mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, &ioc->manu_pg10); - /* - * Ensure correct T10 PI operation if vendor left EEDPTagMode - * flag unset in NVDATA. - */ mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); - if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { - pr_err("%s: overriding NVDATA EEDPTagMode setting\n", - ioc->name); - ioc->manu_pg11.EEDPTagMode &= ~0x3; - ioc->manu_pg11.EEDPTagMode |= 0x1; - mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, - &ioc->manu_pg11); + if ((!ioc->is_gen35_ioc) && (!ioc->disable_eedp_support)) { + /* + * Ensure correct T10 PI operation if vendor left EEDPTagMode + * flag unset in NVDATA. + */ + if (ioc->manu_pg11.EEDPTagMode == 0) { + printk(KERN_ERR "%s: overriding NVDATA EEDPTagMode setting\n", + ioc->name); + ioc->manu_pg11.EEDPTagMode &= ~0x3; + ioc->manu_pg11.EEDPTagMode |= 0x1; + mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + } } + if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK) ioc->tm_custom_handling = 1; - else { + else { ioc->tm_custom_handling = 0; if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT) ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT; - else if (ioc->manu_pg11.NVMeAbortTO > - NVME_TASK_ABORT_MAX_TIMEOUT) + else if (ioc->manu_pg11.NVMeAbortTO > NVME_TASK_ABORT_MAX_TIMEOUT) ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT; - else + else ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO; } @@ -4638,13 +6668,16 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); _base_display_ioc_capabilities(ioc); +#if defined(CPQ_CIM) + mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1); +#endif /* * Enable task_set_full handling in iounit_pg1 when the * facts capabilities indicate that its supported. */ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); if ((ioc->facts.IOCCapabilities & - MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) iounit_pg1_flags &= ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; else @@ -4652,18 +6685,40 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + - if (ioc->iounit_pg8.NumSensors) + if(ioc->iounit_pg8.NumSensors) ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + /* Enabling PCIe extended tag feature if HBA supports, + * On Aero/SEA card sometimes this feature is getting disabled + * on kernel lessthan 4.11. */ + pcie_capability_read_dword(ioc->pdev, PCI_EXP_DEVCAP, &cap); + if (cap & PCI_EXP_DEVCAP_EXT_TAG) { + ret = pcie_capability_set_word(ioc->pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_EXT_TAG); + if (!ret) + dev_info(&ioc->pdev->dev, + "Enabled Extended Tags as Controller Supports\n"); + else + dev_info(&ioc->pdev->dev, + "Unable to Enable Extended Tags feature\n"); + } +#endif if (ioc->is_aero_ioc) _base_update_ioc_page1_inlinewith_perf_mode(ioc); } + /** * mpt3sas_free_enclosure_list - release memory * @ioc: per adapter object * * Free memory allocated during encloure add. + * + * Return nothing. */ void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) @@ -4683,99 +6738,142 @@ mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object * * Free memory allocated from _base_allocate_memory_pools. + * + * Return nothing. */ static void _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) { - int i = 0; - int j = 0; + int i, j; struct chain_tracker *ct; - struct reply_post_struct *rps; + u16 set_of_rdpqs = (ioc->hba_mpi_version_belonged == MPI26_VERSION) ? 16 : 8; - dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); if (ioc->request) { - dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, + pci_free_consistent(ioc->pdev, ioc->request_dma_sz, ioc->request, ioc->request_dma); - dexitprintk(ioc, - ioc_info(ioc, "request_pool(0x%p): free\n", - ioc->request)); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "request_pool(0x%p)" + ": free\n", ioc->name, ioc->request)); ioc->request = NULL; } if (ioc->sense) { - dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); - dma_pool_destroy(ioc->sense_dma_pool); - dexitprintk(ioc, - ioc_info(ioc, "sense_pool(0x%p): free\n", - ioc->sense)); + pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + if (ioc->sense_dma_pool) + pci_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "sense_pool(0x%p)" + ": free\n", ioc->name, ioc->sense)); ioc->sense = NULL; } if (ioc->reply) { - dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); - dma_pool_destroy(ioc->reply_dma_pool); - dexitprintk(ioc, - ioc_info(ioc, "reply_pool(0x%p): free\n", - ioc->reply)); + pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + if (ioc->reply_dma_pool) + pci_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_pool(0x%p)" + ": free\n", ioc->name, ioc->reply)); ioc->reply = NULL; } if (ioc->reply_free) { - dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, ioc->reply_free_dma); - dma_pool_destroy(ioc->reply_free_dma_pool); - dexitprintk(ioc, - ioc_info(ioc, "reply_free_pool(0x%p): free\n", - ioc->reply_free)); + if (ioc->reply_free_dma_pool) + pci_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free_pool" + "(0x%p): free\n", ioc->name, ioc->reply_free)); ioc->reply_free = NULL; } if (ioc->reply_post) { - do { - rps = &ioc->reply_post[i]; - if (rps->reply_post_free) { - dma_pool_free( - ioc->reply_post_free_dma_pool, - rps->reply_post_free, - rps->reply_post_free_dma); - dexitprintk(ioc, - ioc_info(ioc, "reply_post_free_pool(0x%p): free\n", - rps->reply_post_free)); - rps->reply_post_free = NULL; + if (ioc->rdpq_array_enable) { + for (i=0; ireply_queue_count; i++) { + if ((i%set_of_rdpqs == 0) && ((i+(set_of_rdpqs - 1)) < ioc->reply_queue_count)) + { + if (ioc->reply_post[i].reply_post_free) { + pci_pool_free( + ioc->reply_post[i].dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT + "reply_post_free_pool(0x%p): free\n", + ioc->name, + ioc->reply_post[i].reply_post_free)); + ioc->reply_post[i].reply_post_free = NULL; + } + } + else if(i%set_of_rdpqs == 0) { + if (ioc->reply_post[i].reply_post_free) { + pci_pool_free( + ioc->reply_post[i].dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT + "reply_post_free_pool(0x%p): free\n", + ioc->name, + ioc->reply_post[i].reply_post_free)); + ioc->reply_post[i].reply_post_free = NULL; + } + } + } + + if (ioc->reply_post_free_array) { + pci_pool_free(ioc->reply_post_free_array_dma_pool, + ioc->reply_post_free_array, ioc->reply_post_free_array_dma); + ioc->reply_post_free_array = NULL; + } + if (ioc->reply_post_free_array_dma_pool) + pci_pool_destroy(ioc->reply_post_free_array_dma_pool); + } else { + if (ioc->reply_post[0].reply_post_free) { + pci_pool_free(ioc->reply_post_free_dma_pool, + ioc->reply_post[0].reply_post_free, + ioc->reply_post[0].reply_post_free_dma); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT + "reply_post_free_pool(0x%p): free\n", ioc->name, + ioc->reply_post[0].reply_post_free)); + ioc->reply_post[0].reply_post_free = NULL; } - } while (ioc->rdpq_array_enable && - (++i < ioc->reply_queue_count)); - if (ioc->reply_post_free_array && - ioc->rdpq_array_enable) { - dma_pool_free(ioc->reply_post_free_array_dma_pool, - ioc->reply_post_free_array, - ioc->reply_post_free_array_dma); - ioc->reply_post_free_array = NULL; } - dma_pool_destroy(ioc->reply_post_free_array_dma_pool); - dma_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_dma_pool) + pci_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_dma_pool_align) + pci_pool_destroy(ioc->reply_post_free_dma_pool_align); + if (ioc->reply_post_free_dma_pool_mod_rdpq_set) + pci_pool_destroy(ioc->reply_post_free_dma_pool_mod_rdpq_set); + if (ioc->reply_post_free_dma_pool_mod_rdpq_set_align) + pci_pool_destroy(ioc->reply_post_free_dma_pool_mod_rdpq_set_align); kfree(ioc->reply_post); } - - if (ioc->pcie_sgl_dma_pool) { + + if(ioc->pcie_sgl_dma_pool) { for (i = 0; i < ioc->scsiio_depth; i++) { - dma_pool_free(ioc->pcie_sgl_dma_pool, - ioc->pcie_sg_lookup[i].pcie_sgl, - ioc->pcie_sg_lookup[i].pcie_sgl_dma); + pci_pool_free(ioc->pcie_sgl_dma_pool, + ioc->pcie_sg_lookup[i].pcie_sgl, + ioc->pcie_sg_lookup[i].pcie_sgl_dma); } if (ioc->pcie_sgl_dma_pool) - dma_pool_destroy(ioc->pcie_sgl_dma_pool); + pci_pool_destroy(ioc->pcie_sgl_dma_pool); } + if (ioc->pcie_sg_lookup) + kfree(ioc->pcie_sg_lookup); + if (ioc->config_page) { - dexitprintk(ioc, - ioc_info(ioc, "config_page(0x%p): free\n", - ioc->config_page)); - dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT + "config_page(0x%p): free\n", ioc->name, + ioc->config_page)); + pci_free_consistent(ioc->pdev, ioc->config_page_sz, ioc->config_page, ioc->config_page_dma); } - +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)) + if (ioc->scsi_lookup) { + free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages); + ioc->scsi_lookup = NULL; + } +#endif kfree(ioc->hpr_lookup); kfree(ioc->internal_lookup); if (ioc->chain_lookup) { @@ -4784,13 +6882,14 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) j < ioc->chains_needed_per_io; j++) { ct = &ioc->chain_lookup[i].chains_per_smid[j]; if (ct && ct->chain_buffer) - dma_pool_free(ioc->chain_dma_pool, - ct->chain_buffer, - ct->chain_buffer_dma); + pci_pool_free(ioc->chain_dma_pool, + ct->chain_buffer, + ct->chain_buffer_dma); } kfree(ioc->chain_lookup[i].chains_per_smid); } - dma_pool_destroy(ioc->chain_dma_pool); + if (ioc->chain_dma_pool) + pci_pool_destroy(ioc->chain_dma_pool); kfree(ioc->chain_lookup); ioc->chain_lookup = NULL; } @@ -4798,23 +6897,24 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) /** * is_MSB_are_same - checks whether all reply queues in a set are - * having same upper 32bits in their base memory address. + * having same upper 32bits in their base memory address. * @reply_pool_start_address: Base address of a reply queue set * @pool_sz: Size of single Reply Descriptor Post Queues pool size * - * Return: 1 if reply queues in a set have a same upper 32bits in their base - * memory address, else 0. + * Returns 1 if reply queues in a set have a same upper 32bits in their base memory address, + * else 0 */ static int is_MSB_are_same(long reply_pool_start_address, u32 pool_sz) { long reply_pool_end_address; + unsigned long bit_divisor_16 = 0x10000; reply_pool_end_address = reply_pool_start_address + pool_sz; - if (upper_32_bits(reply_pool_start_address) == - upper_32_bits(reply_pool_end_address)) + if (((reply_pool_start_address / bit_divisor_16) / (bit_divisor_16)) == + ((reply_pool_end_address / bit_divisor_16) / bit_divisor_16)) return 1; else return 0; @@ -4824,7 +6924,7 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz) * _base_allocate_memory_pools - allocate start of day memory pools * @ioc: per adapter object * - * Return: 0 success, anything else error. + * Returns 0 success, anything else error */ static int _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) @@ -4832,22 +6932,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) struct mpt3sas_facts *facts; u16 max_sge_elements; u16 chains_needed_per_io; - u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz; - u32 retry_sz; + u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz, mod_sz=0, rc=0; + u32 retry_sz, reduce_sz; u16 max_request_credit, nvme_blocks_needed; unsigned short sg_tablesize; u16 sge_size; + u16 set_of_rdpqs = (ioc->hba_mpi_version_belonged == MPI26_VERSION) ? 16 : 8; +#if defined(TARGET_MODE) + int num_cmd_buffers; +#endif int i, j; struct chain_tracker *ct; + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - +#if defined(TARGET_MODE) + num_cmd_buffers = min_t(int, NUM_CMD_BUFFERS, + ioc->pfacts[0].MaxPostedCmdBuffers); +#endif retry_sz = 0; facts = &ioc->facts; - /* command line tunables for max sgl entries */ - if (max_sgl_entries != -1) + if (max_sgl_entries != -1) sg_tablesize = max_sgl_entries; else { if (ioc->hba_mpi_version_belonged == MPI2_VERSION) @@ -4855,52 +6962,67 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) else sg_tablesize = MPT3SAS_SG_DEPTH; } - /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ if (reset_devices) sg_tablesize = min_t(unsigned short, sg_tablesize, - MPT_KDUMP_MIN_PHYS_SEGMENTS); - - if (ioc->is_mcpu_endpoint) - ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; - else { - if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) - sg_tablesize = MPT_MIN_PHYS_SEGMENTS; - else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { - sg_tablesize = min_t(unsigned short, sg_tablesize, - SG_MAX_SEGMENTS); - ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n", - sg_tablesize, MPT_MAX_PHYS_SEGMENTS); - } - ioc->shost->sg_tablesize = sg_tablesize; + MPT_KDUMP_MIN_PHYS_SEGMENTS); + if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) + sg_tablesize = MPT_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) + sg_tablesize = min_t(unsigned short, sg_tablesize, + MPT_MAX_SG_SEGMENTS); + printk(MPT3SAS_WARN_FMT + "sg_tablesize(%u) is bigger than kernel" + " defined %s(%u)\n", ioc->name, + sg_tablesize, MPT_MAX_PHYS_SEGMENTS_STRING, MPT_MAX_PHYS_SEGMENTS); +#else + sg_tablesize = MPT_MAX_PHYS_SEGMENTS; +#endif } + if(ioc->is_mcpu_endpoint) + ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; + else + ioc->shost->sg_tablesize = sg_tablesize; + +#if defined(TARGET_MODE) + /* allocating 5 extra mf's */ + ioc->internal_depth = min_t(int, + (max_t(int, facts->HighPriorityCredit, num_cmd_buffers) + (5)), + (facts->RequestCredit / 4)); +#else ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), (facts->RequestCredit / 4)); +#endif if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + - INTERNAL_SCSIIO_CMDS_COUNT)) { - ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n", - facts->RequestCredit); + INTERNAL_SCSIIO_CMDS_COUNT)) { + printk(MPT3SAS_ERR_FMT "IOC doesn't have enough" + " RequestCredits, it has just %d number of credits\n", + ioc->name, facts->RequestCredit); return -ENOMEM; - } + } ioc->internal_depth = 10; } ioc->hi_priority_depth = ioc->internal_depth - (5); - /* command line tunables for max controller queue depth */ + + /* command line tunables for max controller queue depth */ if (max_queue_depth != -1 && max_queue_depth != 0) { max_request_credit = min_t(u16, max_queue_depth + - ioc->internal_depth, facts->RequestCredit); + ioc->internal_depth, facts->RequestCredit); if (max_request_credit > MAX_HBA_QUEUE_DEPTH) max_request_credit = MAX_HBA_QUEUE_DEPTH; - } else if (reset_devices) + } + else if (reset_devices) max_request_credit = min_t(u16, facts->RequestCredit, - (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); + (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); else max_request_credit = min_t(u16, facts->RequestCredit, MAX_HBA_QUEUE_DEPTH); +retry: /* Firmware maintains additional facts->HighPriorityCredit number of * credits for HiPriprity Request messages, so hba queue depth will be * sum of max_request_credit and high priority queue depth. @@ -4912,19 +7034,16 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) /* reply frame size */ ioc->reply_sz = facts->ReplyFrameSize * 4; - /* chain segment size */ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { if (facts->IOCMaxChainSegmentSize) - ioc->chain_segment_sz = - facts->IOCMaxChainSegmentSize * - MAX_CHAIN_ELEMT_SZ; + ioc->chain_segment_sz = facts->IOCMaxChainSegmentSize * MAX_CHAIN_ELEMT_SZ; else - /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ - ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * - MAX_CHAIN_ELEMT_SZ; - } else - ioc->chain_segment_sz = ioc->request_sz; + /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ + ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * MAX_CHAIN_ELEMT_SZ; + } + else + ioc->chain_segment_sz = ioc->request_sz; /* calculate the max scatter element size */ sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); @@ -4932,8 +7051,18 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) retry_allocation: total_sz = 0; /* calculate number of sg elements left over in the 1st frame */ - max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - - sizeof(Mpi2SGEIOUnion_t)) + sge_size); + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { + max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - + sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size); + } + else { + /* reserve 2 SGE's, one for chain SGE and + * anther for SGL1 (i.e. for meta data) + */ + max_sge_elements = ioc->request_sz - + ((sizeof(Mpi25SCSIIORequest_t) - + sizeof(Mpi25SGEIOUnion_t)) + 2 * sge_size); + } ioc->max_sges_in_main_message = max_sge_elements/sge_size; /* now do the same for a chain buffer */ @@ -4952,91 +7081,42 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message * chains_needed_per_io), ioc->shost->sg_tablesize); } - ioc->chains_needed_per_io = chains_needed_per_io; + + /* Double the chains if DIX support is enabled for Meta data SGLs*/ + if ((prot_mask & 0x78) && ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->chains_needed_per_io = chains_needed_per_io * 2; + else + ioc->chains_needed_per_io = chains_needed_per_io; /* reply free queue sizing - taking into account for 64 FW events */ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; /* mCPU manage single counters for simplicity */ - if (ioc->is_mcpu_endpoint) + if(ioc->is_mcpu_endpoint) ioc->reply_post_queue_depth = ioc->reply_free_queue_depth; else { /* calculate reply descriptor post queue depth */ ioc->reply_post_queue_depth = ioc->hba_queue_depth + - ioc->reply_free_queue_depth + 1; + ioc->reply_free_queue_depth + 1 ; /* align the reply post queue on the next 16 count boundary */ if (ioc->reply_post_queue_depth % 16) - ioc->reply_post_queue_depth += 16 - - (ioc->reply_post_queue_depth % 16); + ioc->reply_post_queue_depth += 16 - (ioc->reply_post_queue_depth % 16); } + if (ioc->reply_post_queue_depth > facts->MaxReplyDescriptorPostQueueDepth) { - ioc->reply_post_queue_depth = - facts->MaxReplyDescriptorPostQueueDepth - + ioc->reply_post_queue_depth = facts->MaxReplyDescriptorPostQueueDepth - (facts->MaxReplyDescriptorPostQueueDepth % 16); - ioc->hba_queue_depth = - ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->hba_queue_depth = ((ioc->reply_post_queue_depth - 64) / 2) -1; ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; } - dinitprintk(ioc, - ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n", - ioc->max_sges_in_main_message, - ioc->max_sges_in_chain_message, - ioc->shost->sg_tablesize, - ioc->chains_needed_per_io)); - - /* reply post queue, 16 byte align */ - reply_post_free_sz = ioc->reply_post_queue_depth * - sizeof(Mpi2DefaultReplyDescriptor_t); - - sz = reply_post_free_sz; - if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) - sz *= ioc->reply_queue_count; - - ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? - (ioc->reply_queue_count):1, - sizeof(struct reply_post_struct), GFP_KERNEL); - - if (!ioc->reply_post) { - ioc_err(ioc, "reply_post_free pool: kcalloc failed\n"); - goto out; - } - ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool", - &ioc->pdev->dev, sz, 16, 0); - if (!ioc->reply_post_free_dma_pool) { - ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n"); - goto out; - } - i = 0; - do { - ioc->reply_post[i].reply_post_free = - dma_pool_zalloc(ioc->reply_post_free_dma_pool, - GFP_KERNEL, - &ioc->reply_post[i].reply_post_free_dma); - if (!ioc->reply_post[i].reply_post_free) { - ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n"); - goto out; - } - dinitprintk(ioc, - ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->reply_post[i].reply_post_free, - ioc->reply_post_queue_depth, - 8, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n", - (u64)ioc->reply_post[i].reply_post_free_dma)); - total_sz += sz; - } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); - - if (ioc->dma_mask > 32) { - if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { - ioc_warn(ioc, "no suitable consistent DMA mask for %s\n", - pci_name(ioc->pdev)); - goto out; - } - } + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "scatter gather: " + "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " + "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, + ioc->chains_needed_per_io)); ioc->scsiio_depth = ioc->hba_queue_depth - ioc->hi_priority_depth - ioc->internal_depth; @@ -5044,16 +7124,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) /* set the scsi host can_queue depth * with some internal commands that could be outstanding */ +#if defined(TARGET_MODE) + /* allocating 2 extra mf's */ + ioc->shost->can_queue = ioc->scsiio_depth - + (num_cmd_buffers + INTERNAL_SCSIIO_CMDS_COUNT); +#else ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; - dinitprintk(ioc, - ioc_info(ioc, "scsi host: can_queue depth (%d)\n", - ioc->shost->can_queue)); - - +#endif + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "scsi host: " + "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); + /* contiguous pool for request and chains, 16 byte align, one extra " * "frame for smid=0 */ - ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); /* hi-priority queue */ @@ -5063,25 +7146,34 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) sz += (ioc->internal_depth * ioc->request_sz); ioc->request_dma_sz = sz; - ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, - &ioc->request_dma, GFP_KERNEL); + ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); if (!ioc->request) { - ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n", - ioc->hba_queue_depth, ioc->chains_needed_per_io, - ioc->request_sz, sz / 1024); - if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) + printk(MPT3SAS_ERR_FMT "request pool: pci_alloc_consistent " + "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " + "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, + ioc->chains_needed_per_io, ioc->request_sz, sz/1024); + if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) { + rc = -ENOMEM; goto out; + } retry_sz = 64; - ioc->hba_queue_depth -= retry_sz; - _base_release_memory_pools(ioc); - goto retry_allocation; + if((ioc->hba_queue_depth - retry_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= retry_sz; + goto retry_allocation; + } + else { + rc = -ENOMEM; + goto out; + } } memset(ioc->request, 0, sz); if (retry_sz) - ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", - ioc->hba_queue_depth, ioc->chains_needed_per_io, - ioc->request_sz, sz / 1024); + printk(MPT3SAS_ERR_FMT "request pool: pci_alloc_consistent " + "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " + "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, + ioc->chains_needed_per_io, ioc->request_sz, sz/1024); /* hi-priority queue */ ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * @@ -5095,113 +7187,166 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz); - dinitprintk(ioc, - ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->request, ioc->hba_queue_depth, - ioc->request_sz, - (ioc->hba_queue_depth * ioc->request_sz) / 1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "request pool(0x%p): " + "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->request, ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz)/1024)); - dinitprintk(ioc, - ioc_info(ioc, "request pool: dma(0x%llx)\n", - (unsigned long long)ioc->request_dma)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "request pool: dma(0x%llx)\n", + ioc->name, (unsigned long long) ioc->request_dma)); total_sz += sz; - dinitprintk(ioc, - ioc_info(ioc, "scsiio(0x%p): depth(%d)\n", - ioc->request, ioc->scsiio_depth)); - - ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); - sz = ioc->scsiio_depth * sizeof(struct chain_lookup); - ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); - if (!ioc->chain_lookup) { - ioc_err(ioc, "chain_lookup: __get_free_pages failed\n"); - goto out; - } - - sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker); - for (i = 0; i < ioc->scsiio_depth; i++) { - ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); - if (!ioc->chain_lookup[i].chains_per_smid) { - ioc_err(ioc, "chain_lookup: kzalloc failed\n"); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)) + sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker); + ioc->scsi_lookup_pages = get_order(sz); + ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages( + GFP_KERNEL, ioc->scsi_lookup_pages); + if (!ioc->scsi_lookup) { + // Retry allocating memory by reducing the queue depth + if ((max_request_credit - 64) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + if (ioc->request) { + pci_free_consistent(ioc->pdev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + ioc->request = NULL; + } + goto retry; + } + else { + printk(MPT3SAS_ERR_FMT "scsi_lookup: get_free_pages failed, " + "sz(%d)\n", ioc->name, (int)sz); + rc = -ENOMEM; goto out; } } +#endif + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "scsiio(0x%p): " + "depth(%d)\n", ioc->name, ioc->request, + ioc->scsiio_depth)); /* initialize hi-priority queue smid's */ ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, sizeof(struct request_tracker), GFP_KERNEL); if (!ioc->hpr_lookup) { - ioc_err(ioc, "hpr_lookup: kcalloc failed\n"); + printk(MPT3SAS_ERR_FMT "hpr_lookup: kcalloc failed\n", + ioc->name); + rc = -ENOMEM; goto out; } ioc->hi_priority_smid = ioc->scsiio_depth + 1; - dinitprintk(ioc, - ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n", - ioc->hi_priority, - ioc->hi_priority_depth, ioc->hi_priority_smid)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "hi_priority(0x%p): " + "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority, + ioc->hi_priority_depth, ioc->hi_priority_smid)); /* initialize internal queue smid's */ ioc->internal_lookup = kcalloc(ioc->internal_depth, sizeof(struct request_tracker), GFP_KERNEL); if (!ioc->internal_lookup) { - ioc_err(ioc, "internal_lookup: kcalloc failed\n"); + printk(MPT3SAS_ERR_FMT "internal_lookup: kcalloc failed\n", + ioc->name); + rc = -ENOMEM; goto out; } ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; - dinitprintk(ioc, - ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n", - ioc->internal, - ioc->internal_depth, ioc->internal_smid)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "internal(0x%p): " + "depth(%d), start smid(%d)\n", ioc->name, ioc->internal, + ioc->internal_depth, ioc->internal_smid)); + + sz = ioc->scsiio_depth * sizeof(struct chain_lookup); + + ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup) { + // Retry allocating memory by reducing the queue depth + if ((max_request_credit - 64) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + _base_release_memory_pools(ioc); + goto retry; + } + else { + printk(MPT3SAS_ERR_FMT "chain_lookup: __get_free_pages " + "failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + } + + sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker); + for (i=0; i < ioc->scsiio_depth; i++) { + ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup[i].chains_per_smid) { + // Retry allocating memory by reducing the queue depth + if ((max_request_credit - 64) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + _base_release_memory_pools(ioc); + goto retry; + } + else { + printk(MPT3SAS_ERR_FMT "chain_lookup: " + " kzalloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + } + } + /* - * The number of NVMe page sized blocks needed is: - * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 - * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry - * that is placed in the main message frame. 8 is the size of each PRP - * entry or PRP list pointer entry. 8 is subtracted from page_size - * because of the PRP list pointer entry at the end of a page, so this - * is not counted as a PRP entry. The 1 added page is a round up. - * - * To avoid allocation failures due to the amount of memory that could - * be required for NVMe PRP's, only each set of NVMe blocks will be - * contiguous, so a new set is allocated for each possible I/O. - */ + * The number of NVMe page sized blocks needed is: + * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 + * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry + * that is placed in the main message frame. 8 is the size of each PRP + * entry or PRP list pointer entry. 8 is subtracted from page_size + * because of the PRP list pointer entry at the end of a page, so this + * is not counted as a PRP entry. The 1 added page is a round up. + * + * To avoid allocation failures due to the amount of memory that could + * be required for NVMe PRP's, only each set of NVMe blocks will be + * contiguous, so a new set is allocated for each possible I/O. + */ ioc->chains_per_prp_buffer = 0; - if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { - nvme_blocks_needed = - (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; + if(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { + nvme_blocks_needed = (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); nvme_blocks_needed++; sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); if (!ioc->pcie_sg_lookup) { - ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n"); + printk(MPT3SAS_ERR_FMT "PCIe SGL lookup: kzalloc " + "failed\n", ioc->name); + rc = -ENOMEM; goto out; } + sz = nvme_blocks_needed * ioc->page_size; - ioc->pcie_sgl_dma_pool = - dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); + ioc->pcie_sgl_dma_pool = pci_pool_create("PCIe SGL pool", ioc->pdev, sz, + ioc->page_size, 0); if (!ioc->pcie_sgl_dma_pool) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n"); + printk(MPT3SAS_ERR_FMT "PCIe SGL pool: pci_pool_create failed\n", + ioc->name); + rc = -ENOMEM; goto out; } ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; - ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, - ioc->chains_needed_per_io); + ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); for (i = 0; i < ioc->scsiio_depth; i++) { - ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( + ioc->pcie_sg_lookup[i].pcie_sgl = pci_pool_alloc( ioc->pcie_sgl_dma_pool, GFP_KERNEL, &ioc->pcie_sg_lookup[i].pcie_sgl_dma); if (!ioc->pcie_sg_lookup[i].pcie_sgl) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); + printk(MPT3SAS_ERR_FMT "PCIe SGL pool: pci_pool_alloc " + "failed\n", ioc->name); + rc = -ENOMEM; goto out; } for (j = 0; j < ioc->chains_per_prp_buffer; j++) { ct = &ioc->chain_lookup[i].chains_per_smid[j]; ct->chain_buffer = - ioc->pcie_sg_lookup[i].pcie_sgl + + ioc->pcie_sg_lookup[i].pcie_sgl + (j * ioc->chain_segment_sz); ct->chain_buffer_dma = ioc->pcie_sg_lookup[i].pcie_sgl_dma + @@ -5209,188 +7354,435 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) } } - dinitprintk(ioc, - ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->scsiio_depth, sz, - (sz * ioc->scsiio_depth) / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n", - ioc->chains_per_prp_buffer)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "PCIe sgl pool depth(%d), " + "element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "Number of chains can " + "fit in a PRP page(%d)\n", ioc->name, + ioc->chains_per_prp_buffer)); total_sz += sz * ioc->scsiio_depth; } - ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, - ioc->chain_segment_sz, 16, 0); + ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, + ioc->chain_segment_sz, 16, 0); if (!ioc->chain_dma_pool) { - ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n"); + printk(MPT3SAS_ERR_FMT "chain_dma_pool: pci_pool_create " + "failed\n", ioc->name); + rc = -ENOMEM; goto out; } for (i = 0; i < ioc->scsiio_depth; i++) { - for (j = ioc->chains_per_prp_buffer; - j < ioc->chains_needed_per_io; j++) { + for (j = ioc->chains_per_prp_buffer; j < ioc->chains_needed_per_io; j++) { ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = dma_pool_alloc( - ioc->chain_dma_pool, GFP_KERNEL, - &ct->chain_buffer_dma); + ct->chain_buffer = pci_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, &ct->chain_buffer_dma); if (!ct->chain_buffer) { - ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n"); - _base_release_memory_pools(ioc); - goto out; + if ((max_request_credit - 64) > + (ioc->internal_depth + + INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + _base_release_memory_pools(ioc); + goto retry_allocation; + } else { + printk(MPT3SAS_ERR_FMT "chain_lookup: " + " pci_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } } + total_sz += ioc->chain_segment_sz; } - total_sz += ioc->chain_segment_sz; } - dinitprintk(ioc, - ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->chain_depth, ioc->chain_segment_sz, - (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "chain_lookup depth" + "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz))/1024)); /* sense buffers, 4 byte align */ sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; - ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, - 4, 0); + ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, + 0); if (!ioc->sense_dma_pool) { - ioc_err(ioc, "sense pool: dma_pool_create failed\n"); + printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_create failed\n", + ioc->name); + rc = -ENOMEM; goto out; } - ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, + ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, &ioc->sense_dma); if (!ioc->sense) { - ioc_err(ioc, "sense pool: dma_pool_alloc failed\n"); + printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n", + ioc->name); + rc = -ENOMEM; goto out; } /* sense buffer requires to be in same 4 gb region. * Below function will check the same. - * In case of failure, new pci pool will be created with updated - * alignment. Older allocation and pool will be destroyed. - * Alignment will be used such a way that next allocation if - * success, will always meet same 4gb region requirement. + * In case of failure, new pci pool will be created with updated alignment. + * Older allocation and pool will be destroyed. + * Alignment will be used such a way that next allocation if success, will + * always meet same 4gb region requirement. * Actual requirement is not alignment, but we need start and end of * DMA address must have same upper 32 bit address. */ if (!is_MSB_are_same((long)ioc->sense, sz)) { - //Release Sense pool & Reallocate - dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); - dma_pool_destroy(ioc->sense_dma_pool); + //Release Sense pool & Allocations + pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + pci_pool_destroy(ioc->sense_dma_pool); ioc->sense = NULL; ioc->sense_dma_pool = - dma_pool_create("sense pool", &ioc->pdev->dev, sz, - roundup_pow_of_two(sz), 0); + pci_pool_create("sense pool", ioc->pdev, sz, + roundup_pow_of_two(sz), 0); if (!ioc->sense_dma_pool) { - ioc_err(ioc, "sense pool: pci_pool_create failed\n"); + printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_create failed\n", + ioc->name); + rc = -ENOMEM; goto out; } - ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, + ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, &ioc->sense_dma); if (!ioc->sense) { - ioc_err(ioc, "sense pool: pci_pool_alloc failed\n"); + printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n", + ioc->name); + rc = -ENOMEM; goto out; } } - dinitprintk(ioc, - ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->sense, ioc->scsiio_depth, - SCSI_SENSE_BUFFERSIZE, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "sense_dma(0x%llx)\n", - (unsigned long long)ioc->sense_dma)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "sense pool(0x%p): depth(%d), element_size(%d), pool_size" + "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "sense_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->sense_dma)); total_sz += sz; /* reply pool, 4 byte align */ sz = ioc->reply_free_queue_depth * ioc->reply_sz; - ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, - 4, 0); + ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, + 0); if (!ioc->reply_dma_pool) { - ioc_err(ioc, "reply pool: dma_pool_create failed\n"); + printk(MPT3SAS_ERR_FMT "reply pool: pci_pool_create failed\n", + ioc->name); + rc = -ENOMEM; goto out; } - ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, &ioc->reply_dma); if (!ioc->reply) { - ioc_err(ioc, "reply pool: dma_pool_alloc failed\n"); + printk(MPT3SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n", + ioc->name); + rc = -ENOMEM; goto out; } ioc->reply_dma_min_address = (u32)(ioc->reply_dma); ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; - dinitprintk(ioc, - ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->reply, ioc->reply_free_queue_depth, - ioc->reply_sz, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "reply_dma(0x%llx)\n", - (unsigned long long)ioc->reply_dma)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply pool(0x%p): depth" + "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply, + ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_dma)); total_sz += sz; /* reply free queue, 16 byte align */ sz = ioc->reply_free_queue_depth * 4; - ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", - &ioc->pdev->dev, sz, 16, 0); + ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", + ioc->pdev, sz, 16, 0); if (!ioc->reply_free_dma_pool) { - ioc_err(ioc, "reply_free pool: dma_pool_create failed\n"); + printk(MPT3SAS_ERR_FMT "reply_free pool: pci_pool_create " + "failed\n", ioc->name); + rc = -ENOMEM; goto out; } - ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL, + ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, &ioc->reply_free_dma); if (!ioc->reply_free) { - ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n"); + printk(MPT3SAS_ERR_FMT "reply_free pool: pci_pool_alloc " + "failed\n", ioc->name); + rc = -ENOMEM; goto out; } - dinitprintk(ioc, - ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->reply_free, ioc->reply_free_queue_depth, - 4, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "reply_free_dma (0x%llx)\n", - (unsigned long long)ioc->reply_free_dma)); + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free pool(0x%p): " + "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free_dma" + "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma)); total_sz += sz; if (ioc->rdpq_array_enable) { +/* + * According to MPI25 Spec each set of 8 reply queues(0-7, 8-15, ..) whereas for + * MPI26 Spec each set of 16 reply queues(0-15, 16-31, ..) should be within 4GB + * boundary and also reply queues in a set must have same upper 32-bits in their + * memory address. so here driver is allocating the DMA'able memory for reply + * queues based on MPI Spec specification. + */ + ioc->reply_post = kcalloc(ioc->reply_queue_count, + sizeof(struct reply_post_struct), GFP_KERNEL); + + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + sz = reply_post_free_sz * set_of_rdpqs; + ioc->reply_post_free_dma_pool = + pci_pool_create("reply_post_free pool", ioc->pdev, sz, + 16, 0); + ioc->reply_post_free_dma_pool_align = + pci_pool_create("reply_post_free pool alligned", ioc->pdev, sz, + roundup_pow_of_two(sz), 0); + if ((!ioc->reply_post_free_dma_pool) || + (!ioc->reply_post_free_dma_pool_align)) { + printk(MPT3SAS_ERR_FMT "reply_post_free pool: " + "pci_pool_create failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + if (ioc->reply_queue_count % set_of_rdpqs) + { + mod_sz = reply_post_free_sz * (ioc->reply_queue_count % set_of_rdpqs); + ioc->reply_post_free_dma_pool_mod_rdpq_set = + pci_pool_create("reply_post_free_mod_rdpq_set pool", ioc->pdev, mod_sz, + 16, 0); + ioc->reply_post_free_dma_pool_mod_rdpq_set_align = + pci_pool_create("reply_post_free_mod_rdpq_set pool", ioc->pdev, mod_sz, + roundup_pow_of_two(mod_sz), 0); + if (!ioc->reply_post_free_dma_pool_mod_rdpq_set || + !ioc->reply_post_free_dma_pool_mod_rdpq_set_align) { + printk(MPT3SAS_ERR_FMT "reply_post_free_mod_rdpq_set pool: " + "pci_pool_create failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + } + for (i = 0; i < ioc->reply_queue_count; i++) { + if ((i % set_of_rdpqs == 0) && ((i + (set_of_rdpqs - 1)) < ioc->reply_queue_count)) { + ioc->reply_post[i].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + printk(MPT3SAS_ERR_FMT "reply_post_free pool: " + "pci_pool_alloc failed\n", ioc->name); + reduce_sz = 64; + if((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + _base_release_memory_pools(ioc); + ioc->hba_queue_depth -= reduce_sz; + goto retry_allocation; + } + else { + rc = -ENOMEM; + goto out; + } + } + + if (!is_MSB_are_same((long)ioc->reply_post[i].reply_post_free, sz)) + { + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "bad reply post free pool (0x%p), reply_post_free_dma = (0x%llx)\n", ioc->name, + ioc->reply_post[i].reply_post_free, + (unsigned long long)ioc->reply_post[i].reply_post_free_dma)); + + pci_pool_free(ioc->reply_post_free_dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + + ioc->reply_post[i].reply_post_free = NULL; + ioc->reply_post[i].reply_post_free_dma = 0; + + //Retry Here with modified alignment + ioc->reply_post[i].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool_align, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + printk(MPT3SAS_ERR_FMT "reply_post_free pool: " + "pci_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + ioc->reply_post[i].dma_pool = ioc->reply_post_free_dma_pool_align; + } else + ioc->reply_post[i].dma_pool = ioc->reply_post_free_dma_pool; + + memset(ioc->reply_post[i].reply_post_free, 0, sz); + total_sz += sz; + } + else if (i % set_of_rdpqs == 0) { + ioc->reply_post[i].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool_mod_rdpq_set, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + printk(MPT3SAS_ERR_FMT "reply_post_free pool: " + "pci_pool_alloc failed\n", ioc->name); + reduce_sz = 64; + if((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + _base_release_memory_pools(ioc); + ioc->hba_queue_depth -= reduce_sz; + goto retry_allocation; + } + else { + rc = -ENOMEM; + goto out; + } + } + if (!is_MSB_are_same((long)ioc->reply_post[i].reply_post_free, mod_sz)) + { + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "bad reply post free pool (0x%p), reply_post_free_dma = (0x%llx)\n", ioc->name, + ioc->reply_post[i].reply_post_free, + (unsigned long long)ioc->reply_post[i].reply_post_free_dma)); + + pci_pool_free(ioc->reply_post_free_dma_pool_mod_rdpq_set, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + + ioc->reply_post[i].reply_post_free = NULL; + ioc->reply_post[i].reply_post_free_dma = 0; + + //Try Allocating from modified allignment pool + ioc->reply_post[i].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool_mod_rdpq_set_align, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + printk(MPT3SAS_ERR_FMT "reply_post_free pool: " + "pci_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + ioc->reply_post[i].dma_pool = + ioc->reply_post_free_dma_pool_mod_rdpq_set_align; + } else + ioc->reply_post[i].dma_pool = + ioc->reply_post_free_dma_pool_mod_rdpq_set; + + memset(ioc->reply_post[i].reply_post_free, 0, mod_sz); + total_sz += mod_sz; + } + else { + ioc->reply_post[i].reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) + ((long)ioc->reply_post[i-1].reply_post_free + reply_post_free_sz); + ioc->reply_post[i].reply_post_free_dma = (dma_addr_t) + (ioc->reply_post[i-1].reply_post_free_dma + reply_post_free_sz); + } + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "reply post free pool (0x%p): depth(%d), " + "element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_post[i].reply_post_free, + ioc->reply_post_queue_depth, set_of_rdpqs, reply_post_free_sz/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "reply_post_free_dma = (0x%llx)\n", ioc->name, + (unsigned long long)ioc->reply_post[i].reply_post_free_dma)); + } + reply_post_free_array_sz = ioc->reply_queue_count * sizeof(Mpi2IOCInitRDPQArrayEntry); ioc->reply_post_free_array_dma_pool = - dma_pool_create("reply_post_free_array pool", - &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + pci_pool_create("reply_post_free_array pool", + ioc->pdev, reply_post_free_array_sz, 16, 0); if (!ioc->reply_post_free_array_dma_pool) { - dinitprintk(ioc, - ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n")); + printk(MPT3SAS_ERR_FMT "reply_post_free_array pool: " + "pci_pool_create failed\n", ioc->name); + rc = -ENOMEM; goto out; } ioc->reply_post_free_array = - dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + pci_pool_alloc(ioc->reply_post_free_array_dma_pool, GFP_KERNEL, &ioc->reply_post_free_array_dma); if (!ioc->reply_post_free_array) { - dinitprintk(ioc, - ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n")); + printk(MPT3SAS_ERR_FMT "reply_post_free_array pool: " + "pci_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; goto out; } + } else { + ioc->reply_post = kzalloc(sizeof(struct reply_post_struct), + GFP_KERNEL); + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + if (_base_is_controller_msix_enabled(ioc)) + sz = reply_post_free_sz * ioc->reply_queue_count; + else + sz = reply_post_free_sz; + ioc->reply_post_free_dma_pool = + pci_pool_create("reply_post_free pool", + ioc->pdev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + printk(MPT3SAS_ERR_FMT "reply_post_free pool: " + "pci_pool_create failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + ioc->reply_post[0].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, &ioc->reply_post[0].reply_post_free_dma); + if (!ioc->reply_post[0].reply_post_free) { + printk(MPT3SAS_ERR_FMT "reply_post_free pool: " + "pci_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + memset(ioc->reply_post[0].reply_post_free, 0, sz); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply post free pool" + "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->reply_post[0].reply_post_free, + ioc->reply_post_queue_depth, set_of_rdpqs, sz/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "reply_post_free_dma = (0x%llx)\n", ioc->name, + (unsigned long long)ioc->reply_post[0].reply_post_free_dma)); + total_sz += sz; } + ioc->config_page_sz = 512; - ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, - ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL); + ioc->config_page = pci_alloc_consistent(ioc->pdev, + ioc->config_page_sz, &ioc->config_page_dma); if (!ioc->config_page) { - ioc_err(ioc, "config page: dma_pool_alloc failed\n"); + printk(MPT3SAS_ERR_FMT "config page: pci_pool_alloc " + "failed\n", ioc->name); + rc = -ENOMEM; goto out; } - dinitprintk(ioc, - ioc_info(ioc, "config page(0x%p): size(%d)\n", - ioc->config_page, ioc->config_page_sz)); - dinitprintk(ioc, - ioc_info(ioc, "config_page_dma(0x%llx)\n", - (unsigned long long)ioc->config_page_dma)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "config page(0x%p): size" + "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "config_page_dma" + "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma)); total_sz += ioc->config_page_sz; - ioc_info(ioc, "Allocated physical memory: size(%d kB)\n", - total_sz / 1024); - ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", - ioc->shost->can_queue, facts->RequestCredit); - ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n", - ioc->shost->sg_tablesize); - return 0; + printk(MPT3SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n", + ioc->name, total_sz/1024); + printk(MPT3SAS_INFO_FMT "Current Controller Queue Depth(%d), " + "Max Controller Queue Depth(%d)\n", + ioc->name, ioc->shost->can_queue, facts->RequestCredit); + printk(MPT3SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n", + ioc->name, ioc->shost->sg_tablesize); out: - return -ENOMEM; + return rc; +} + +/** + *_base_flush_ios_and_panic - Flush the IOs and panic + * @ioc: Pointer to MPT_ADAPTER structure + * + * Return nothing. + */ +void +_base_flush_ios_and_panic(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) +{ + ioc->adapter_over_temp = 1; + mpt3sas_base_stop_smart_polling(ioc); + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_stop_hba_unplug_watchdog(ioc); + mpt3sas_scsih_flush_running_cmds(ioc); + mpt3sas_base_fault_info(ioc, fault_code); + panic("TEMPERATURE FAULT: STOPPING; panic in %s\n", __func__); } /** @@ -5398,7 +7790,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) * @ioc: Pointer to MPT_ADAPTER structure * @cooked: Request raw or cooked IOC state * - * Return: all IOC Doorbell register bits if cooked==0, else just the + * Returns all IOC Doorbell register bits if cooked==0, else just the * Doorbell bits in MPI_IOC_STATE_MASK. */ u32 @@ -5408,180 +7800,18 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) s = ioc->base_readl(&ioc->chip->Doorbell); sc = s & MPI2_IOC_STATE_MASK; + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && + (sc != MPI2_IOC_STATE_MASK)) { + if ((sc == MPI2_IOC_STATE_FAULT) && + ((s & MPI2_DOORBELL_DATA_MASK) == + IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED)) + _base_flush_ios_and_panic(ioc, s & MPI2_DOORBELL_DATA_MASK); + } return cooked ? sc : s; } - -/** - * _base_wait_on_iocstate - waiting on a particular ioc state - * @ioc: ? - * @ioc_state: controller state { READY, OPERATIONAL, or RESET } - * @timeout: timeout in second - * - * Return: 0 for success, non-zero for failure. - */ -static int -_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) -{ - u32 count, cntdn; - u32 current_state; - - count = 0; - cntdn = 1000 * timeout; - do { - current_state = mpt3sas_base_get_iocstate(ioc, 1); - if (current_state == ioc_state) - return 0; - if (count && current_state == MPI2_IOC_STATE_FAULT) - break; - - usleep_range(1000, 1500); - count++; - } while (--cntdn); - - return current_state; -} - -/** - * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by - * a write to the doorbell) - * @ioc: per adapter object - * - * Return: 0 for success, non-zero for failure. - * - * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. - */ - -static int -_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) -{ - u32 cntdn, count; - u32 int_status; - - count = 0; - cntdn = 1000 * timeout; - do { - int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); - if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { - dhsprintk(ioc, - ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", - __func__, count, timeout)); - return 0; - } - - usleep_range(1000, 1500); - count++; - } while (--cntdn); - - ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", - __func__, count, int_status); - return -EFAULT; -} - -static int -_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) -{ - u32 cntdn, count; - u32 int_status; - - count = 0; - cntdn = 2000 * timeout; - do { - int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); - if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { - dhsprintk(ioc, - ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", - __func__, count, timeout)); - return 0; - } - - udelay(500); - count++; - } while (--cntdn); - - ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", - __func__, count, int_status); - return -EFAULT; - -} - -/** - * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. - * @ioc: per adapter object - * @timeout: timeout in second - * - * Return: 0 for success, non-zero for failure. - * - * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to - * doorbell. - */ -static int -_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) -{ - u32 cntdn, count; - u32 int_status; - u32 doorbell; - - count = 0; - cntdn = 1000 * timeout; - do { - int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); - if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { - dhsprintk(ioc, - ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", - __func__, count, timeout)); - return 0; - } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { - doorbell = ioc->base_readl(&ioc->chip->Doorbell); - if ((doorbell & MPI2_IOC_STATE_MASK) == - MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc , doorbell); - return -EFAULT; - } - } else if (int_status == 0xFFFFFFFF) - goto out; - - usleep_range(1000, 1500); - count++; - } while (--cntdn); - - out: - ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", - __func__, count, int_status); - return -EFAULT; -} - -/** - * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use - * @ioc: per adapter object - * @timeout: timeout in second - * - * Return: 0 for success, non-zero for failure. - */ -static int -_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) -{ - u32 cntdn, count; - u32 doorbell_reg; - - count = 0; - cntdn = 1000 * timeout; - do { - doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell); - if (!(doorbell_reg & MPI2_DOORBELL_USED)) { - dhsprintk(ioc, - ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", - __func__, count, timeout)); - return 0; - } - - usleep_range(1000, 1500); - count++; - } while (--cntdn); - - ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", - __func__, count, doorbell_reg); - return -EFAULT; -} +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_get_iocstate); +#endif /** * _base_send_ioc_reset - send doorbell reset @@ -5589,7 +7819,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET * @timeout: timeout in second * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) @@ -5598,7 +7828,8 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) int r = 0; if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { - ioc_err(ioc, "%s: unknown reset_type\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: unknown reset_type\n", + ioc->name, __func__); return -EFAULT; } @@ -5606,7 +7837,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) return -EFAULT; - ioc_info(ioc, "sending message unit reset !!\n"); + printk(MPT3SAS_INFO_FMT "sending message unit reset !!\n", ioc->name); writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, &ioc->chip->Doorbell); @@ -5614,165 +7845,53 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) r = -EFAULT; goto out; } - ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, + timeout); if (ioc_state) { - ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", - __func__, ioc_state); + printk(MPT3SAS_ERR_FMT "%s: failed going to ready state " + " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); r = -EFAULT; goto out; } out: - ioc_info(ioc, "message unit reset: %s\n", - r == 0 ? "SUCCESS" : "FAILED"); + printk(MPT3SAS_INFO_FMT "message unit reset: %s\n", + ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); return r; } -/** - * mpt3sas_wait_for_ioc - IOC's operational state is checked here. - * @ioc: per adapter object - * @wait_count: timeout in seconds - * - * Return: Waits up to timeout seconds for the IOC to - * become operational. Returns 0 if IOC is present - * and operational; otherwise returns -EFAULT. - */ - int -mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout) +mpt3sas_wait_for_ioc_to_operational(struct MPT3SAS_ADAPTER *ioc, + int wait_count) { int wait_state_count = 0; u32 ioc_state; - do { - ioc_state = mpt3sas_base_get_iocstate(ioc, 1); - if (ioc_state == MPI2_IOC_STATE_OPERATIONAL) - break; - ssleep(1); - ioc_info(ioc, "%s: waiting for operational state(count=%d)\n", - __func__, ++wait_state_count); - } while (--timeout); - if (!timeout) { - ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__); + if (mpt3sas_base_pci_device_is_unplugged(ioc)) return -EFAULT; - } - if (wait_state_count) - ioc_info(ioc, "ioc is operational\n"); - return 0; -} -/** - * _base_handshake_req_reply_wait - send request thru doorbell interface - * @ioc: per adapter object - * @request_bytes: request length - * @request: pointer having request payload - * @reply_bytes: reply length - * @reply: pointer to reply payload - * @timeout: timeout in second - * - * Return: 0 for success, non-zero for failure. - */ -static int -_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, - u32 *request, int reply_bytes, u16 *reply, int timeout) -{ - MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; - int i; - u8 failed; - __le32 *mfp; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { - /* make sure doorbell is not in use */ - if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { - ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); - return -EFAULT; - } + if (mpt3sas_base_pci_device_is_unplugged(ioc)) + return -EFAULT; - /* clear pending doorbell interrupts from previous state changes */ - if (ioc->base_readl(&ioc->chip->HostInterruptStatus) & - MPI2_HIS_IOC2SYS_DB_STATUS) - writel(0, &ioc->chip->HostInterruptStatus); - - /* send message to ioc */ - writel(((MPI2_FUNCTION_HANDSHAKE<chip->Doorbell); - - if ((_base_spin_on_doorbell_int(ioc, 5))) { - ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", - __LINE__); - return -EFAULT; - } - writel(0, &ioc->chip->HostInterruptStatus); - - if ((_base_wait_for_doorbell_ack(ioc, 5))) { - ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n", - __LINE__); - return -EFAULT; - } - - /* send message 32-bits at a time */ - for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { - writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); - if ((_base_wait_for_doorbell_ack(ioc, 5))) - failed = 1; - } - - if (failed) { - ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n", - __LINE__); - return -EFAULT; - } - - /* now wait for the reply */ - if ((_base_wait_for_doorbell_int(ioc, timeout))) { - ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", - __LINE__); - return -EFAULT; - } - - /* read the first two 16-bits, it gives the total length of the reply */ - reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell) - & MPI2_DOORBELL_DATA_MASK); - writel(0, &ioc->chip->HostInterruptStatus); - if ((_base_wait_for_doorbell_int(ioc, 5))) { - ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", - __LINE__); - return -EFAULT; - } - reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell) - & MPI2_DOORBELL_DATA_MASK); - writel(0, &ioc->chip->HostInterruptStatus); - - for (i = 2; i < default_reply->MsgLength * 2; i++) { - if ((_base_wait_for_doorbell_int(ioc, 5))) { - ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", - __LINE__); + if (wait_state_count++ == wait_count) { + printk(MPT3SAS_ERR_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); return -EFAULT; } - if (i >= reply_bytes/2) /* overflow case */ - ioc->base_readl(&ioc->chip->Doorbell); - else - reply[i] = le16_to_cpu( - ioc->base_readl(&ioc->chip->Doorbell) - & MPI2_DOORBELL_DATA_MASK); - writel(0, &ioc->chip->HostInterruptStatus); + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + printk(MPT3SAS_INFO_FMT "%s: waiting for " + "operational state(count=%d)\n", ioc->name, + __func__, wait_state_count); } + if (wait_state_count) + printk(MPT3SAS_INFO_FMT "%s: ioc is operational\n", + ioc->name, __func__); - _base_wait_for_doorbell_int(ioc, 5); - if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { - dhsprintk(ioc, - ioc_info(ioc, "doorbell is in use (line=%d)\n", - __LINE__)); - } - writel(0, &ioc->chip->HostInterruptStatus); - - if (ioc->logging_level & MPT_DEBUG_INIT) { - mfp = (__le32 *)reply; - pr_info("\toffset:data\n"); - for (i = 0; i < reply_bytes/4; i++) - pr_info("\t[0x%02x]:%08x\n", i*4, - le32_to_cpu(mfp[i])); - } - return 0; + return 0; } /** @@ -5787,7 +7906,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, * identifying information about the device, in addition allows the host to * remove IOC resources associated with the device. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, @@ -5795,27 +7914,30 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, Mpi2SasIoUnitControlRequest_t *mpi_request) { u16 smid; - u8 issue_reset = 0; + u8 issue_reset; int rc; void *request; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); mutex_lock(&ioc->base_cmds.mutex); if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: base_cmd in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: base_cmd in use\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } - rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); if (rc) goto out; smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } @@ -5837,7 +7959,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, ioc->ioc_link_reset_in_progress) ioc->ioc_link_reset_in_progress = 0; if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, ioc->base_cmds.status, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4); @@ -5860,6 +7982,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, mutex_unlock(&ioc->base_cmds.mutex); return rc; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_sas_iounit_control); +#endif /** * mpt3sas_base_scsi_enclosure_processor - sending request to sep device @@ -5870,34 +7995,37 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, * The SCSI Enclosure Processor request message causes the IOC to * communicate with SES devices to control LED status signals. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) { u16 smid; - u8 issue_reset = 0; + u8 issue_reset; int rc; void *request; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); mutex_lock(&ioc->base_cmds.mutex); if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: base_cmd in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: base_cmd in use\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } - rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); if (rc) goto out; smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } @@ -5905,15 +8033,15 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, rc = 0; ioc->base_cmds.status = MPT3_CMD_PENDING; request = mpt3sas_base_get_msg_frame(ioc, smid); - ioc->base_cmds.smid = smid; memset(request, 0, ioc->request_sz); - memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(Mpi2SepRequest_t)); init_completion(&ioc->base_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->base_cmds.done, msecs_to_jiffies(10000)); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, ioc->base_cmds.status, mpi_request, sizeof(Mpi2SepRequest_t)/4); @@ -5940,9 +8068,8 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, /** * _base_get_port_facts - obtain port facts reply and save in ioc * @ioc: per adapter object - * @port: ? * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) @@ -5952,7 +8079,8 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) struct mpt3sas_port_facts *pfacts; int mpi_reply_sz, mpi_request_sz, r; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); @@ -5963,7 +8091,8 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); if (r != 0) { - ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + printk(MPT3SAS_ERR_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); return r; } @@ -5978,169 +8107,11 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) return 0; } -/** - * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL - * @ioc: per adapter object - * @timeout: - * - * Return: 0 for success, non-zero for failure. - */ -static int -_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) -{ - u32 ioc_state; - int rc; - - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - - if (ioc->pci_error_recovery) { - dfailprintk(ioc, - ioc_info(ioc, "%s: host in pci error recovery\n", - __func__)); - return -EFAULT; - } - - ioc_state = mpt3sas_base_get_iocstate(ioc, 0); - dhsprintk(ioc, - ioc_info(ioc, "%s: ioc_state(0x%08x)\n", - __func__, ioc_state)); - - if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || - (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) - return 0; - - if (ioc_state & MPI2_DOORBELL_USED) { - dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); - goto issue_diag_reset; - } - - if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & - MPI2_DOORBELL_DATA_MASK); - goto issue_diag_reset; - } - - ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); - if (ioc_state) { - dfailprintk(ioc, - ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", - __func__, ioc_state)); - return -EFAULT; - } - - issue_diag_reset: - rc = _base_diag_reset(ioc); - return rc; -} - -/** - * _base_get_ioc_facts - obtain ioc facts reply and save in ioc - * @ioc: per adapter object - * - * Return: 0 for success, non-zero for failure. - */ -static int -_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) -{ - Mpi2IOCFactsRequest_t mpi_request; - Mpi2IOCFactsReply_t mpi_reply; - struct mpt3sas_facts *facts; - int mpi_reply_sz, mpi_request_sz, r; - - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - - r = _base_wait_for_iocstate(ioc, 10); - if (r) { - dfailprintk(ioc, - ioc_info(ioc, "%s: failed getting to correct state\n", - __func__)); - return r; - } - mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); - mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); - memset(&mpi_request, 0, mpi_request_sz); - mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; - r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, - (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); - - if (r != 0) { - ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); - return r; - } - - facts = &ioc->facts; - memset(facts, 0, sizeof(struct mpt3sas_facts)); - facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); - facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); - facts->VP_ID = mpi_reply.VP_ID; - facts->VF_ID = mpi_reply.VF_ID; - facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); - facts->MaxChainDepth = mpi_reply.MaxChainDepth; - facts->WhoInit = mpi_reply.WhoInit; - facts->NumberOfPorts = mpi_reply.NumberOfPorts; - facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; - if (ioc->msix_enable && (facts->MaxMSIxVectors <= - MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) - ioc->combined_reply_queue = 0; - facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); - facts->MaxReplyDescriptorPostQueueDepth = - le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); - facts->ProductID = le16_to_cpu(mpi_reply.ProductID); - facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); - if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) - ioc->ir_firmware = 1; - if ((facts->IOCCapabilities & - MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) - ioc->rdpq_array_capable = 1; - if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) - && ioc->is_aero_ioc) - ioc->atomic_desc_capable = 1; - facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); - facts->IOCRequestFrameSize = - le16_to_cpu(mpi_reply.IOCRequestFrameSize); - if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { - facts->IOCMaxChainSegmentSize = - le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); - } - facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); - facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); - ioc->shost->max_id = -1; - facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); - facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); - facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); - facts->HighPriorityCredit = - le16_to_cpu(mpi_reply.HighPriorityCredit); - facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; - facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); - facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; - - /* - * Get the Page Size from IOC Facts. If it's 0, default to 4k. - */ - ioc->page_size = 1 << facts->CurrentHostPageSize; - if (ioc->page_size == 1) { - ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n"); - ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; - } - dinitprintk(ioc, - ioc_info(ioc, "CurrentHostPageSize(%d)\n", - facts->CurrentHostPageSize)); - - dinitprintk(ioc, - ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n", - facts->RequestCredit, facts->MaxChainDepth)); - dinitprintk(ioc, - ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n", - facts->IOCRequestFrameSize * 4, - facts->ReplyFrameSize * 4)); - return 0; -} - /** * _base_send_ioc_init - send ioc_init to firmware * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) @@ -6148,18 +8119,23 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) Mpi2IOCInitRequest_t mpi_request; Mpi2IOCInitReply_t mpi_reply; int i, r = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) ktime_t current_time; +#else + struct timeval current_time; +#endif u16 ioc_status; - u32 reply_post_free_array_sz = 0; + u32 reply_post_free_ary_sz; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); mpi_request.Function = MPI2_FUNCTION_IOC_INIT; mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; mpi_request.VF_ID = 0; /* TODO */ mpi_request.VP_ID = 0; - mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); + mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; @@ -6181,13 +8157,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) cpu_to_le64((u64)ioc->reply_free_dma); if (ioc->rdpq_array_enable) { - reply_post_free_array_sz = ioc->reply_queue_count * + reply_post_free_ary_sz = ioc->reply_queue_count * sizeof(Mpi2IOCInitRDPQArrayEntry); - memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz); + memset(ioc->reply_post_free_array, 0, reply_post_free_ary_sz); for (i = 0; i < ioc->reply_queue_count; i++) ioc->reply_post_free_array[i].RDPQBaseAddress = - cpu_to_le64( - (u64)ioc->reply_post[i].reply_post_free_dma); + cpu_to_le64((u64)ioc->reply_post[i].reply_post_free_dma); mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; mpi_request.ReplyDescriptorPostQueueAddress = cpu_to_le64((u64)ioc->reply_post_free_array_dma); @@ -6199,17 +8174,23 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) current_time = ktime_get_real(); mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); +#else + do_gettimeofday(¤t_time); + mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + + (current_time.tv_usec / 1000)); +#endif if (ioc->logging_level & MPT_DEBUG_INIT) { __le32 *mfp; int i; mfp = (__le32 *)&mpi_request; - pr_info("\toffset:data\n"); + printk(KERN_INFO "\toffset:data\n"); for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) - pr_info("\t[0x%02x]:%08x\n", i*4, + printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, le32_to_cpu(mfp[i])); } @@ -6218,14 +8199,15 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); if (r != 0) { - ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + printk(MPT3SAS_ERR_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); return r; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { - ioc_err(ioc, "%s: failed\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed\n", ioc->name, __func__); r = -EIO; } @@ -6239,8 +8221,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -6271,10 +8253,12 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { mpt3sas_port_enable_complete(ioc); return 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) } else { ioc->start_scan_failed = ioc_status; ioc->start_scan = 0; return 1; +#endif } } complete(&ioc->port_enable_cmds.done); @@ -6285,7 +8269,7 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, * _base_send_port_enable - send port_enable(discovery stuff) to firmware * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) @@ -6296,16 +8280,18 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) u16 smid; u16 ioc_status; - ioc_info(ioc, "sending port enable !!\n"); + printk(MPT3SAS_INFO_FMT "sending port enable !!\n", ioc->name); if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { - ioc_err(ioc, "%s: internal command already in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: internal command already in use\n", + ioc->name, __func__); return -EAGAIN; } smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); return -EAGAIN; } @@ -6317,11 +8303,13 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) init_completion(&ioc->port_enable_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); + wait_for_completion_timeout(&ioc->port_enable_cmds.done, + 300*HZ); if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); _debug_dump_mf(mpi_request, - sizeof(Mpi2PortEnableRequest_t)/4); + sizeof(Mpi2PortEnableRequest_t)/4); if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) r = -EFAULT; else @@ -6332,23 +8320,25 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) mpi_reply = ioc->port_enable_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n", - __func__, ioc_status); + printk(MPT3SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n", + ioc->name, __func__, ioc_status); r = -EFAULT; goto out; } out: ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; - ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED"); + printk(MPT3SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ? + "SUCCESS" : "FAILED")); return r; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) /** * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) @@ -6356,16 +8346,18 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) Mpi2PortEnableRequest_t *mpi_request; u16 smid; - ioc_info(ioc, "sending port enable !!\n"); + printk(MPT3SAS_INFO_FMT "sending port enable !!\n", ioc->name); if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { - ioc_err(ioc, "%s: internal command already in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: internal command already in use\n", + ioc->name, __func__); return -EAGAIN; } smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); return -EAGAIN; } @@ -6378,6 +8370,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) ioc->put_smid_default(ioc, smid); return 0; } +#endif /** * _base_determine_wait_on_discovery - desposition @@ -6386,7 +8379,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) * Decide whether to wait on discovery to complete. Used to either * locate boot device, or report volumes ahead of physical devices. * - * Return: 1 for wait, 0 for don't wait. + * Returns 1 for wait, 0 for don't wait */ static int _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) @@ -6458,7 +8451,7 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) * _base_event_notification - send event notification * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_event_notification(struct MPT3SAS_ADAPTER *ioc) @@ -6468,16 +8461,19 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc) int r = 0; int i; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); if (ioc->base_cmds.status & MPT3_CMD_PENDING) { - ioc_err(ioc, "%s: internal command already in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: internal command already in use\n", + ioc->name, __func__); return -EAGAIN; } smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); return -EAGAIN; } ioc->base_cmds.status = MPT3_CMD_PENDING; @@ -6494,15 +8490,17 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc) ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); _debug_dump_mf(mpi_request, - sizeof(Mpi2EventNotificationRequest_t)/4); + sizeof(Mpi2EventNotificationRequest_t)/4); if (ioc->base_cmds.status & MPT3_CMD_RESET) r = -EFAULT; else r = -ETIME; } else - dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: complete\n", + ioc->name, __func__)); ioc->base_cmds.status = MPT3_CMD_NOT_USED; return r; } @@ -6510,7 +8508,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc) /** * mpt3sas_base_validate_event_type - validating event types * @ioc: per adapter object - * @event_type: firmware event + * @event: firmware event * * This will turn on firmware event notification when application * ask for that event. We don't mask events that are already enabled. @@ -6544,134 +8542,30 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) mutex_unlock(&ioc->base_cmds.mutex); } -/** - * _base_diag_reset - the "big hammer" start of day reset - * @ioc: per adapter object - * - * Return: 0 for success, non-zero for failure. - */ -static int -_base_diag_reset(struct MPT3SAS_ADAPTER *ioc) -{ - u32 host_diagnostic; - u32 ioc_state; - u32 count; - u32 hcb_size; - - ioc_info(ioc, "sending diag reset !!\n"); - - drsprintk(ioc, ioc_info(ioc, "clear interrupts\n")); - - count = 0; - do { - /* Write magic sequence to WriteSequence register - * Loop until in diagnostic mode - */ - drsprintk(ioc, ioc_info(ioc, "write magic sequence\n")); - writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); - writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); - writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); - writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); - writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); - writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); - writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); - - /* wait 100 msec */ - msleep(100); - - if (count++ > 20) - goto out; - - host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); - drsprintk(ioc, - ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", - count, host_diagnostic)); - - } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); - - hcb_size = ioc->base_readl(&ioc->chip->HCBSize); - - drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); - writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, - &ioc->chip->HostDiagnostic); - - /*This delay allows the chip PCIe hardware time to finish reset tasks*/ - msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); - - /* Approximately 300 second max wait */ - for (count = 0; count < (300000000 / - MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { - - host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); - - if (host_diagnostic == 0xFFFFFFFF) - goto out; - if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) - break; - - msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000); - } - - if (host_diagnostic & MPI2_DIAG_HCB_MODE) { - - drsprintk(ioc, - ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n")); - host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; - host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; - writel(host_diagnostic, &ioc->chip->HostDiagnostic); - - drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n")); - writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, - &ioc->chip->HCBSize); - } - - drsprintk(ioc, ioc_info(ioc, "restart the adapter\n")); - writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, - &ioc->chip->HostDiagnostic); - - drsprintk(ioc, - ioc_info(ioc, "disable writes to the diagnostic register\n")); - writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); - - drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n")); - ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); - if (ioc_state) { - ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", - __func__, ioc_state); - goto out; - } - - ioc_info(ioc, "diag reset: SUCCESS\n"); - return 0; - - out: - ioc_err(ioc, "diag reset: FAILED\n"); - return -EFAULT; -} /** - * _base_make_ioc_ready - put controller in READY state + * mpt3sas_base_make_ioc_ready - put controller in READY state * @ioc: per adapter object * @type: FORCE_BIG_HAMMER or SOFT_RESET * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ -static int -_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) +int +mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) { u32 ioc_state; int rc; int count; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); - if (ioc->pci_error_recovery) + if (!mpt3sas_base_pci_device_is_available(ioc)) return 0; ioc_state = mpt3sas_base_get_iocstate(ioc, 0); - dhsprintk(ioc, - ioc_info(ioc, "%s: ioc_state(0x%08x)\n", - __func__, ioc_state)); + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); /* if in RESET state, it should move to READY state shortly */ count = 0; @@ -6679,8 +8573,9 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) while ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_READY) { if (count++ == 10) { - ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", - __func__, ioc_state); + printk(MPT3SAS_ERR_FMT "%s: failed going to " + " ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); return -EFAULT; } ssleep(1); @@ -6692,7 +8587,8 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) return 0; if (ioc_state & MPI2_DOORBELL_USED) { - dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "unexpected doorbell " + "active!\n", ioc->name)); goto issue_diag_reset; } @@ -6720,23 +8616,24 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) * _base_make_ioc_operational - put controller in OPERATIONAL state * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) { - int r, i, index, rc; - unsigned long flags; + int r, rc, i, index; + unsigned long flags; u32 reply_address; u16 smid; struct _tr_list *delayed_tr, *delayed_tr_next; struct _sc_list *delayed_sc, *delayed_sc_next; struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; - u8 hide_flag; struct adapter_reply_queue *reply_q; Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; + u8 hide_flag; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); /* clean the delayed target reset list */ list_for_each_entry_safe(delayed_tr, delayed_tr_next, @@ -6745,13 +8642,18 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) kfree(delayed_tr); } - list_for_each_entry_safe(delayed_tr, delayed_tr_next, &ioc->delayed_tr_volume_list, list) { list_del(&delayed_tr->list); kfree(delayed_tr); } + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_internal_tm_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_sc, delayed_sc_next, &ioc->delayed_sc_list, list) { list_del(&delayed_sc->list); @@ -6764,8 +8666,17 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) kfree(delayed_event_ack); } + /* initialize the scsi lookup free list */ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); - +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)) + smid = 1; + for (i = 0; i < ioc->scsiio_depth; i++, smid++) { + ioc->scsi_lookup[i].cb_idx = 0xFF; + ioc->scsi_lookup[i].smid = smid; + ioc->scsi_lookup[i].scmd = NULL; + ioc->scsi_lookup[i].direct_io = 0; + } +#endif /* hi-priority queue */ INIT_LIST_HEAD(&ioc->hpr_free_list); smid = ioc->hi_priority_smid; @@ -6785,7 +8696,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) list_add_tail(&ioc->internal_lookup[i].tracker_list, &ioc->internal_free_list); } - spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); /* initialize Reply Free Queue */ @@ -6794,8 +8704,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) ioc->reply_sz) { ioc->reply_free[i] = cpu_to_le32(reply_address); if (ioc->is_mcpu_endpoint) - _base_clone_reply_to_sys_mem(ioc, - reply_address, i); + _base_clone_reply_to_sys_mem(ioc, reply_address, i); } /* initialize reply queues */ @@ -6818,10 +8727,11 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) reply_post_free_contig += ioc->reply_post_queue_depth; } - reply_q->reply_post_host_index = 0; + reply_q->reply_post_host_index = 0; + for (i = 0; i < ioc->reply_post_queue_depth; i++) reply_q->reply_post_free[i].Words = - cpu_to_le64(ULLONG_MAX); + cpu_to_le64(ULLONG_MAX); if (!_base_is_controller_msix_enabled(ioc)) goto skip_init_reply_post_free_queue; } @@ -6831,14 +8741,14 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) if (r) { /* * No need to check IOC state for fault state & issue - * diag reset during host reset. This check is need + * diag reset during host reset. This check is need * only during driver load time. */ if (!ioc->is_driver_loading) return r; rc = _base_check_for_fault_and_issue_reset(ioc); - if (rc || (_base_send_ioc_init(ioc))) + if (rc || (r = _base_send_ioc_init(ioc))) return r; } @@ -6848,23 +8758,22 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) /* initialize reply post host index */ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { - if (ioc->combined_reply_queue) - writel((reply_q->msix_index & 7)<< - MPI2_RPHI_MSIX_INDEX_SHIFT, - ioc->replyPostRegisterIndex[reply_q->msix_index/8]); - else - writel(reply_q->msix_index << - MPI2_RPHI_MSIX_INDEX_SHIFT, - &ioc->chip->ReplyPostHostIndex); + if (ioc->combined_reply_queue) { + for ( i = 0; i < ioc->nc_reply_index_count; i++ ) + writel((reply_q->msix_index & 7)<< MPI2_RPHI_MSIX_INDEX_SHIFT, + ioc->replyPostRegisterIndex[i]); + } else { + writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + } if (!_base_is_controller_msix_enabled(ioc)) goto skip_init_reply_post_host_index; } skip_init_reply_post_host_index: - + mpt3sas_base_start_hba_unplug_watchdog(ioc); _base_unmask_interrupts(ioc); - if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { r = _base_display_fwpkg_version(ioc); if (r) @@ -6872,25 +8781,38 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) } _base_static_config_pages(ioc); + + /* event_unmask and port_enable must be issued without any/very less window */ r = _base_event_notification(ioc); if (r) return r; if (ioc->is_driver_loading) { - - if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier - == 0x80) { + if (ioc->is_warpdrive && + ioc->manu_pg10.OEMIdentifier == 0x80) { hide_flag = (u8) ( - le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & - MFG_PAGE10_HIDE_SSDS_MASK); + le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & + MFG_PAGE10_HIDE_SSDS_MASK); if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) ioc->mfg_pg10_hide_flag = hide_flag; - } + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + if (ioc->is_warpdrive) + mpt3sas_enable_diag_buffer(ioc, 1); + else if (diag_buffer_enable != -1 && diag_buffer_enable != 0) + mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); + else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) + mpt3sas_enable_diag_buffer(ioc, 1); + if (disable_discovery > 0) + return r; +#endif ioc->wait_for_discovery_to_complete = _base_determine_wait_on_discovery(ioc); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) return r; /* scan_start and scan_finished support */ +#endif } r = _base_send_port_enable(ioc); @@ -6903,23 +8825,23 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) /** * mpt3sas_base_free_resources - free resources controller resources * @ioc: per adapter object + * + * Return nothing. */ void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) { - dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); - /* synchronizing freeing resource with pci_access_mutex lock */ - mutex_lock(&ioc->pci_access_mutex); - if (ioc->chip_phys && ioc->chip) { - _base_mask_interrupts(ioc); - ioc->shost_recovery = 1; - _base_make_ioc_ready(ioc, SOFT_RESET); - ioc->shost_recovery = 0; - } + if (!ioc->chip_phys) + return; - mpt3sas_base_unmap_resources(ioc); - mutex_unlock(&ioc->pci_access_mutex); + mpt3sas_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + _base_unmap_resources(ioc); return; } @@ -6927,15 +8849,16 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) * mpt3sas_base_attach - attach controller instance * @ioc: per adapter object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) { - int r, i, rc; + int r, rc, i; int cpu_id, last_cpu_id = 0; - dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); /* setup cpu_msix_table */ ioc->cpu_count = num_online_cpus(); @@ -6945,44 +8868,69 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); ioc->reply_queue_count = 1; if (!ioc->cpu_msix_table) { - dfailprintk(ioc, - ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n")); + dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "allocation for " + "cpu_msix_table failed!!!\n", ioc->name)); r = -ENOMEM; goto out_free_resources; } if (ioc->is_warpdrive) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)) ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, sizeof(resource_size_t *), GFP_KERNEL); +#else + ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, + sizeof(u64 *), GFP_KERNEL); +#endif if (!ioc->reply_post_host_index) { - dfailprintk(ioc, - ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n")); + dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "allocation " + "for reply_post_host_index failed!!!\n", + ioc->name)); r = -ENOMEM; goto out_free_resources; } } - - ioc->smp_affinity_enable = smp_affinity_enable; - ioc->rdpq_array_enable_assigned = 0; - ioc->dma_mask = 0; + if (ioc->is_aero_ioc) ioc->base_readl = &_base_readl_aero; else ioc->base_readl = &_base_readl; + + ioc->smp_affinity_enable = smp_affinity_enable; + r = mpt3sas_base_map_resources(ioc); if (r) goto out_free_resources; + if (ioc->is_warpdrive) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)) + ioc->reply_post_host_index[0] = + (resource_size_t *)&ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = (resource_size_t *) + ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); +#else + ioc->reply_post_host_index[0] = + (u64 *)&ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = (u64 *) + ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); +#endif + } + pci_set_drvdata(ioc->pdev, ioc->shost); r = _base_get_ioc_facts(ioc); if (r) { rc = _base_check_for_fault_and_issue_reset(ioc); - if (rc || (_base_get_ioc_facts(ioc))) + if (rc || (r =_base_get_ioc_facts(ioc))) goto out_free_resources; } - - switch (ioc->hba_mpi_version_belonged) { + switch(ioc->hba_mpi_version_belonged) { case MPI2_VERSION: ioc->build_sg_scmd = &_base_build_sg_scmd; ioc->build_sg = &_base_build_sg; @@ -6992,50 +8940,59 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) case MPI25_VERSION: case MPI26_VERSION: /* - * In SAS3.0, + * SAS3.0 support + * * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and * Target Status - all require the IEEE formated scatter gather * elements. + * */ ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; ioc->build_sg = &_base_build_sg_ieee; ioc->build_nvme_prp = &_base_build_nvme_prp; ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); + if (ioc->high_iops_queues) - ioc->get_msix_index_for_smlio = - &_base_get_high_iops_msix_index; - else + ioc->get_msix_index_for_smlio = &_base_get_high_iops_msix_index; + else ioc->get_msix_index_for_smlio = &_base_get_msix_index; + break; } + if (ioc->atomic_desc_capable) { ioc->put_smid_default = &_base_put_smid_default_atomic; ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; - ioc->put_smid_fast_path = - &_base_put_smid_fast_path_atomic; - ioc->put_smid_hi_priority = - &_base_put_smid_hi_priority_atomic; + ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic; + ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic; + ioc->put_smid_nvme_encap= &_base_put_smid_nvme_encap_atomic; +#if defined(TARGET_MODE) + ioc->put_smid_target_assist = &_base_put_smid_target_assist_atomic; +#endif } else { ioc->put_smid_default = &_base_put_smid_default; - ioc->put_smid_fast_path = &_base_put_smid_fast_path; - ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; if (ioc->is_mcpu_endpoint) - ioc->put_smid_scsi_io = - &_base_put_smid_mpi_ep_scsi_io; + ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io; else ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; + ioc->put_smid_fast_path = &_base_put_smid_fast_path; + ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; + ioc->put_smid_nvme_encap= &_base_put_smid_nvme_encap; +#if defined(TARGET_MODE) + ioc->put_smid_target_assist = &_base_put_smid_target_assist; +#endif } /* * These function pointers for other requests that don't - * the require IEEE scatter gather elements. + * require IEEE scatter gather elements. * * For example Configuration Pages and SAS IOUNIT Control don't. */ ioc->build_sg_mpi = &_base_build_sg; ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; - r = _base_make_ioc_ready(ioc, SOFT_RESET); + r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); if (r) goto out_free_resources; @@ -7050,7 +9007,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) r = _base_get_port_facts(ioc, i); if (r) { rc = _base_check_for_fault_and_issue_reset(ioc); - if (rc || (_base_get_port_facts(ioc, i))) + if (rc || (r = _base_get_port_facts(ioc, i))) goto out_free_resources; } } @@ -7059,12 +9016,14 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_free_resources; - if (irqpoll_weight > 0) + if (irqpoll_weight > 0) ioc->thresh_hold = irqpoll_weight; else ioc->thresh_hold = ioc->hba_queue_depth/4; +#if defined(MPT3SAS_ENABLE_IRQ_POLL) _base_init_irqpolls(ioc); +#endif init_waitqueue_head(&ioc->reset_wq); /* allocate memory pd handle bitmask list */ @@ -7094,11 +9053,16 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) goto out_free_resources; ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; - ioc->device_remove_in_progress = - kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); + ioc->device_remove_in_progress = kzalloc(ioc->device_remove_in_progress_sz, + GFP_KERNEL); if (!ioc->device_remove_in_progress) goto out_free_resources; + ioc->tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + ioc->tm_tr_retry = kzalloc(ioc->tm_tr_retry_sz, GFP_KERNEL); + if (!ioc->tm_tr_retry) + goto out_free_resources; + ioc->fwfault_debug = mpt3sas_fwfault_debug; /* base internal command bits */ @@ -7136,10 +9100,16 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; mutex_init(&ioc->ctl_cmds.mutex); + /* ctl module diag_buffer internal command bits */ + ioc->ctl_diag_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_diag_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->ctl_diag_cmds.mutex); + if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || !ioc->config_cmds.reply || - !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { + !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense || + !ioc->ctl_diag_cmds.reply) { r = -ENOMEM; goto out_free_resources; } @@ -7163,13 +9133,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { if (ioc->is_gen35_ioc) { - _base_unmask_events(ioc, - MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); - _base_unmask_events(ioc, - MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); } } + +#if defined(TARGET_MODE) + _base_unmask_events(ioc, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); + _base_unmask_events(ioc, MPI2_EVENT_HARD_RESET_RECEIVED); +#endif + r = _base_make_ioc_operational(ioc); if (r) goto out_free_resources; @@ -7183,6 +9158,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->non_operational_loop = 0; ioc->got_task_abort_from_ioctl = 0; + ioc->got_task_abort_from_sysfs = 0; return 0; out_free_resources: @@ -7197,6 +9173,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); kfree(ioc->device_remove_in_progress); kfree(ioc->pend_os_device_add); kfree(ioc->tm_cmds.reply); @@ -7207,6 +9184,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) kfree(ioc->port_enable_cmds.reply); kfree(ioc->ctl_cmds.reply); kfree(ioc->ctl_cmds.sense); + kfree(ioc->ctl_diag_cmds.reply); kfree(ioc->pfacts); ioc->ctl_cmds.reply = NULL; ioc->base_cmds.reply = NULL; @@ -7222,13 +9200,17 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) /** * mpt3sas_base_detach - remove controller instance * @ioc: per adapter object + * + * Return nothing. */ void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) { - dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_stop_hba_unplug_watchdog(ioc); mpt3sas_base_free_resources(ioc); _base_release_memory_pools(ioc); mpt3sas_free_enclosure_list(ioc); @@ -7238,9 +9220,11 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); kfree(ioc->device_remove_in_progress); kfree(ioc->pend_os_device_add); kfree(ioc->pfacts); + kfree(ioc->ctl_diag_cmds.reply); kfree(ioc->ctl_cmds.reply); kfree(ioc->ctl_cmds.sense); kfree(ioc->base_cmds.reply); @@ -7251,26 +9235,12 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) kfree(ioc->config_cmds.reply); } -/** - * _base_pre_reset_handler - pre reset handler - * @ioc: per adapter object - */ -static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +static void +_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc) { - mpt3sas_scsih_pre_reset_handler(ioc); - mpt3sas_ctl_pre_reset_handler(ioc); - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); -} + struct _internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; -/** - * _base_after_reset_handler - after reset handler - * @ioc: per adapter object - */ -static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) -{ - mpt3sas_scsih_after_reset_handler(ioc); - mpt3sas_ctl_after_reset_handler(ioc); - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { ioc->transport_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); @@ -7285,55 +9255,115 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) ioc->port_enable_failed = 1; ioc->port_enable_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) if (ioc->is_driver_loading) { ioc->start_scan_failed = - MPI2_IOCSTATUS_INTERNAL_ERROR; + MPI2_IOCSTATUS_INTERNAL_ERROR; ioc->start_scan = 0; ioc->port_enable_cmds.status = - MPT3_CMD_NOT_USED; - } else { + MPT3_CMD_NOT_USED; + } else complete(&ioc->port_enable_cmds.done); - } +#else + complete(&ioc->port_enable_cmds.done); +#endif } if (ioc->config_cmds.status & MPT3_CMD_PENDING) { ioc->config_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); - ioc->config_cmds.smid = USHRT_MAX; + ioc->config_cmds.smid = USHORT_MAX; complete(&ioc->config_cmds.done); } + + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, + &ioc->scsih_q_intenal_cmds, list) { + if ((scsih_qcmd->status) & MPT3_CMD_PENDING) { + scsih_qcmd->status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, scsih_qcmd->smid); + } + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); } /** - * _base_reset_done_handler - reset done handler + * _base_reset_handler - reset callback handler (for base) * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + * + * Return nothing. */ -static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +static void +_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) { - mpt3sas_scsih_reset_done_handler(ioc); - mpt3sas_ctl_reset_done_handler(ioc); - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); + mpt3sas_scsih_reset_handler(ioc, reset_phase); + mpt3sas_ctl_reset_handler(ioc, reset_phase); +#if defined(TARGET_MODE) + if (stm_callbacks.reset_handler) + stm_callbacks.reset_handler(ioc, reset_phase); +#endif + + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + _base_clear_outstanding_mpt_commands(ioc); + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + break; + } } /** * mpt3sas_wait_for_commands_to_complete - reset controller * @ioc: Pointer to MPT_ADAPTER structure * - * This function is waiting 10s for all pending commands to complete + * This function waiting(3s) for all pending commands to complete * prior to putting controller in reset. */ void mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) { u32 ioc_state; + unsigned long flags; + u16 i; + struct scsiio_tracker *st; ioc->pending_io_count = 0; + + if (!mpt3sas_base_pci_device_is_available(ioc)) { + printk(MPT3SAS_ERR_FMT + "%s: pci error recovery reset or" + " pci device unplug occured\n", + ioc->name, __func__); + return; + } ioc_state = mpt3sas_base_get_iocstate(ioc, 0); if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) return; /* pending command count */ - ioc->pending_io_count = scsi_host_busy(ioc->shost); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = 1; i <= ioc->scsiio_depth; i++) { + st = mpt3sas_get_st_from_smid(ioc, i); + if (st && st->smid !=0) { + if (st->cb_idx != 0xFF) + ioc->pending_io_count++; + } + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); if (!ioc->pending_io_count) return; @@ -7348,16 +9378,19 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) * IOC variables accordingly. * * @ioc: Pointer to MPT_ADAPTER structure + * @old_facts: IOCFacts object before firmware upgrade */ static int _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) { - u16 pd_handles_sz; + u16 pd_handles_sz, tm_tr_retry_sz; void *pd_handles = NULL, *blocking_handles = NULL; void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + u8 *tm_tr_retry = NULL; struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; - if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) + { pd_handles_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) pd_handles_sz++; @@ -7365,22 +9398,21 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, GFP_KERNEL); if (!pd_handles) { - ioc_info(ioc, + printk(MPT3SAS_ERR_FMT "Unable to allocate the memory for pd_handles of sz: %d\n", - pd_handles_sz); + ioc->name, pd_handles_sz ); return -ENOMEM; } memset(pd_handles + ioc->pd_handles_sz, 0, (pd_handles_sz - ioc->pd_handles_sz)); ioc->pd_handles = pd_handles; - blocking_handles = krealloc(ioc->blocking_handles, - pd_handles_sz, GFP_KERNEL); + blocking_handles = krealloc(ioc->blocking_handles, pd_handles_sz, + GFP_KERNEL); if (!blocking_handles) { - ioc_info(ioc, - "Unable to allocate the memory for " - "blocking_handles of sz: %d\n", - pd_handles_sz); + printk(MPT3SAS_ERR_FMT + "Unable to allocate the memory for blocking_handles of sz: %d\n", + ioc->name, pd_handles_sz ); return -ENOMEM; } memset(blocking_handles + ioc->pd_handles_sz, 0, @@ -7388,12 +9420,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) ioc->blocking_handles = blocking_handles; ioc->pd_handles_sz = pd_handles_sz; - pend_os_device_add = krealloc(ioc->pend_os_device_add, - pd_handles_sz, GFP_KERNEL); + pend_os_device_add = krealloc(ioc->pend_os_device_add, pd_handles_sz, + GFP_KERNEL); if (!pend_os_device_add) { - ioc_info(ioc, + printk(MPT3SAS_ERR_FMT "Unable to allocate the memory for pend_os_device_add of sz: %d\n", - pd_handles_sz); + ioc->name, pd_handles_sz); return -ENOMEM; } memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, @@ -7401,13 +9433,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add = pend_os_device_add; ioc->pend_os_device_add_sz = pd_handles_sz; - device_remove_in_progress = krealloc( - ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); + device_remove_in_progress = krealloc(ioc->device_remove_in_progress, + pd_handles_sz, GFP_KERNEL); if (!device_remove_in_progress) { - ioc_info(ioc, - "Unable to allocate the memory for " - "device_remove_in_progress of sz: %d\n " - , pd_handles_sz); + printk(MPT3SAS_ERR_FMT + "Unable to allocate the memory for device_remove_in_progress of sz: %d\n", + ioc->name, pd_handles_sz); return -ENOMEM; } memset(device_remove_in_progress + @@ -7415,8 +9446,24 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) (pd_handles_sz - ioc->device_remove_in_progress_sz)); ioc->device_remove_in_progress = device_remove_in_progress; ioc->device_remove_in_progress_sz = pd_handles_sz; + + tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + tm_tr_retry = krealloc(ioc->tm_tr_retry, tm_tr_retry_sz, + GFP_KERNEL); + if (!tm_tr_retry) { + printk(MPT3SAS_ERR_FMT + "Unable to allocate the memory for tm_tr_retry of sz: %d\n", + ioc->name, tm_tr_retry_sz); + return -ENOMEM; + } + memset(tm_tr_retry + ioc->tm_tr_retry_sz, 0, + (tm_tr_retry_sz - ioc->tm_tr_retry_sz)); + ioc->tm_tr_retry = tm_tr_retry; + ioc->tm_tr_retry_sz = tm_tr_retry_sz; } + // TODO - Check for other IOCFacts attributies also. + memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); return 0; } @@ -7426,51 +9473,62 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) * @ioc: Pointer to MPT_ADAPTER structure * @type: FORCE_BIG_HAMMER or SOFT_RESET * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int -mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, - enum reset_type type) +mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) { int r; unsigned long flags; u32 ioc_state; u8 is_fault = 0, is_trigger = 0; - dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", ioc->name, + __func__)); - if (ioc->pci_error_recovery) { - ioc_err(ioc, "%s: pci error recovery reset\n", __func__); + /* wait for an active reset in progress to complete */ + if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { + do { + ssleep(1); + } while (ioc->shost_recovery == 1); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); + return ioc->ioc_reset_status; + } + + if (!mpt3sas_base_pci_device_is_available(ioc)) { + printk(MPT3SAS_ERR_FMT + "%s: pci error recovery reset or" + " pci device unplug occured\n", + ioc->name, __func__); + if (mpt3sas_base_pci_device_is_unplugged(ioc)) + ioc->schedule_dead_ioc_flush_running_cmds(ioc); r = 0; goto out_unlocked; } - - if (mpt3sas_fwfault_debug) - mpt3sas_halt_firmware(ioc); - - /* wait for an active reset in progress to complete */ - mutex_lock(&ioc->reset_in_progress_mutex); + + mpt3sas_halt_firmware(ioc, 0); spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); ioc->shost_recovery = 1; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_REGISTERED) && (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_RELEASED))) { is_trigger = 1; - ioc_state = mpt3sas_base_get_iocstate(ioc, 0); if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) is_fault = 1; } - _base_pre_reset_handler(ioc); + _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); mpt3sas_wait_for_commands_to_complete(ioc); - _base_mask_interrupts(ioc); - r = _base_make_ioc_ready(ioc, type); + mpt3sas_base_mask_interrupts(ioc); + r = mpt3sas_base_make_ioc_ready(ioc, type); if (r) goto out; - _base_after_reset_handler(ioc); + _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); /* If this hard reset is called while port enable is active, then * there is no reason to call make_ioc_operational @@ -7480,45 +9538,75 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, r = -EFAULT; goto out; } + r = _base_get_ioc_facts(ioc); if (r) goto out; r = _base_check_ioc_facts_changes(ioc); if (r) { - ioc_info(ioc, + printk(MPT3SAS_ERR_FMT "Some of the parameters got changed in this new firmware" - " image and it requires system reboot\n"); + " image and it requires system reboot\n", ioc->name); goto out; } + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) - panic("%s: Issue occurred with flashing controller firmware." - "Please reboot the system and ensure that the correct" - " firmware version is running\n", ioc->name); + panic("%s: Issue occurred with flashing controller firmware. " + "Please reboot the system and ensure that the correct" + " firmware version is running\n", ioc->name); r = _base_make_ioc_operational(ioc); if (!r) - _base_reset_done_handler(ioc); + _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); out: - dtmprintk(ioc, - ioc_info(ioc, "%s: %s\n", - __func__, r == 0 ? "SUCCESS" : "FAILED")); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: %s\n", + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_status = r; ioc->shost_recovery = 0; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ioc->ioc_reset_count++; mutex_unlock(&ioc->reset_in_progress_mutex); +#if defined(DISABLE_RESET_SUPPORT) + if (r != 0) { + struct task_struct *p; + + ioc->remove_host = 1; + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, + "mpt3sas_dead_ioc_%d", ioc->id); + if (IS_ERR(p)) + printk(MPT3SAS_ERR_FMT "%s: Running" + " mpt3sas_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + printk(MPT3SAS_ERR_FMT "%s: Running" + " mpt3sas_dead_ioc thread success !!!!\n", + ioc->name, __func__); + } +#else + /* Flush all the outstanding IOs even when diag reset fails*/ + if (r != 0) + ioc->schedule_dead_ioc_flush_running_cmds(ioc); +#endif + out_unlocked: - if ((r == 0) && is_trigger) { + if ((r == 0) && is_trigger) { if (is_fault) mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); else mpt3sas_trigger_master(ioc, MASTER_TRIGGER_ADAPTER_RESET); } - dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); return r; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_base_hard_reset_handler); +#endif + diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h old mode 100644 new mode 100755 index faca0a5e71f8..d040e467171d --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -3,9 +3,10 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -51,11 +52,12 @@ #include "mpi/mpi2_ioc.h" #include "mpi/mpi2_cnfg.h" #include "mpi/mpi2_init.h" +#include "mpi/mpi2_image.h" #include "mpi/mpi2_raid.h" +#include "mpi/mpi2_targ.h" #include "mpi/mpi2_tool.h" #include "mpi/mpi2_sas.h" #include "mpi/mpi2_pci.h" -#include "mpi/mpi2_image.h" #include #include @@ -67,36 +69,53 @@ #include #include #include -#include +#include "mpt3sas_compatibility.h" #include "mpt3sas_debug.h" #include "mpt3sas_trigger_diag.h" -/* driver versioning info */ +/* mpt3sas driver versioning info */ #define MPT3SAS_DRIVER_NAME "mpt3sas" -#define MPT3SAS_AUTHOR "Avago Technologies " -#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "31.100.00.00" -#define MPT3SAS_MAJOR_VERSION 31 -#define MPT3SAS_MINOR_VERSION 100 +#define MPT3SAS_AUTHOR "Broadcom Inc. " +#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 & SAS 3.5 Device Driver" +#define MPT3SAS_DRIVER_VERSION "32.00.00.00" +#define MPT3SAS_MAJOR_VERSION 32 +#define MPT3SAS_MINOR_VERSION 00 #define MPT3SAS_BUILD_VERSION 0 -#define MPT3SAS_RELEASE_VERSION 00 +#define MPT3SAS_RELEASE_VERSION 0 +/* mpt2sas driver versioning info */ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "20.102.00.00" +#define MPT2SAS_DRIVER_VERSION "20.00.03.00" #define MPT2SAS_MAJOR_VERSION 20 -#define MPT2SAS_MINOR_VERSION 102 -#define MPT2SAS_BUILD_VERSION 0 -#define MPT2SAS_RELEASE_VERSION 00 +#define MPT2SAS_MINOR_VERSION 0 +#define MPT2SAS_BUILD_VERSION 3 +#define MPT2SAS_RELEASE_VERSION 0 /* * Set MPT3SAS_SG_DEPTH value based on user input. */ -#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) +#define MPT_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS +#elif ((defined(CONFIG_SUSE_KERNEL) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(4,4,59))) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0))) +#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE +#else +#define MPT_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS +#endif #define MPT_MIN_PHYS_SEGMENTS 16 #define MPT_KDUMP_MIN_PHYS_SEGMENTS 32 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0) || (defined(CONFIG_SUSE_KERNEL) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(4,4,59)))) +#define MPT_MAX_SG_SEGMENTS SG_MAX_SEGMENTS +#define MPT_MAX_PHYS_SEGMENTS_STRING "SG_CHUNK_SIZE" +#else +#define MPT_MAX_SG_SEGMENTS SCSI_MAX_SG_CHAIN_SEGMENTS +#define MPT_MAX_PHYS_SEGMENTS_STRING "SCSI_MAX_SG_SEGMENTS" +#endif + #define MCPU_MAX_CHAINS_PER_IO 3 #ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE @@ -111,6 +130,22 @@ #define MPT2SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS #endif +#if 0 +#if defined(TARGET_MODE) +#include "../target/mpt3sas_stm_common.h" +#endif +#endif + +#if (((defined(CONFIG_SUSE_KERNEL) || defined(RHEL_RELEASE_CODE)) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) +#define RAMP_UP_SUPPORT +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif /* !U32_MAX */ + /* * Generic Defines */ @@ -118,20 +153,29 @@ #define MPT3SAS_SAS_QUEUE_DEPTH 254 #define MPT3SAS_RAID_QUEUE_DEPTH 128 #define MPT3SAS_KDUMP_SCSI_IO_DEPTH 200 - -#define MPT3SAS_RAID_MAX_SECTORS 8192 #define MPT3SAS_HOST_PAGE_SIZE_4K 12 #define MPT3SAS_NVME_QUEUE_DEPTH 128 + +#define MPT3SAS_RAID_MAX_SECTORS 8192 + #define MPT_NAME_LENGTH 32 /* generic length of strings */ +#define MPT_DRIVER_NAME_LENGTH 24 #define MPT_STRING_LENGTH 64 #define MPI_FRAME_START_OFFSET 256 -#define REPLY_FREE_POOL_SIZE 512 /*(32 maxcredix *4)*(4 times)*/ +#define REPLY_FREE_POOL_SIZE 512 /*(32 maxcredit *4)*(4 times)*/ #define MPT_MAX_CALLBACKS 32 +#if defined(TARGET_MODE) +#undef MPT_MAX_CALLBACKS +#define MPT_MAX_CALLBACKS 32 +#endif + #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ /* reserved for issuing internally framed scsi io cmds */ #define INTERNAL_SCSIIO_CMDS_COUNT 3 +#define INTERNAL_SCSIIO_FOR_IOCTL 1 +#define INTERNAL_SCSIIO_FOR_DISCOVERY 2 #define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/ @@ -140,47 +184,48 @@ #define MAX_CHAIN_ELEMT_SZ 16 #define DEFAULT_NUM_FWCHAIN_ELEMTS 8 -#define FW_IMG_HDR_READ_TIMEOUT 15 - -#define IOC_OPERATIONAL_WAIT_COUNT 10 - /* * NVMe defines */ #define NVME_PRP_SIZE 8 /* PRP size */ #define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */ +#define NVME_PRP_PAGE_SIZE 4096 /* Page size */ + #define NVME_TASK_ABORT_MIN_TIMEOUT 6 #define NVME_TASK_ABORT_MAX_TIMEOUT 60 #define NVME_TASK_MNGT_CUSTOM_MASK (0x0010) -#define NVME_PRP_PAGE_SIZE 4096 /* Page size */ struct mpt3sas_nvme_cmd { u8 rsvd[24]; - __le64 prp1; - __le64 prp2; + u64 prp1; + u64 prp2; }; +/* + * reset phases + */ +#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */ +#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */ +#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */ + /* * logging format */ -#define ioc_err(ioc, fmt, ...) \ - pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__) -#define ioc_notice(ioc, fmt, ...) \ - pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__) -#define ioc_warn(ioc, fmt, ...) \ - pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__) -#define ioc_info(ioc, fmt, ...) \ - pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define MPT3SAS_FMT "%s: " +#define MPT3SAS_INFO_FMT KERN_INFO MPT3SAS_FMT +#define MPT3SAS_NOTE_FMT KERN_NOTICE MPT3SAS_FMT +#define MPT3SAS_WARN_FMT KERN_WARNING MPT3SAS_FMT +#define MPT3SAS_ERR_FMT KERN_ERR MPT3SAS_FMT /* * WarpDrive Specific Log codes */ #define MPT2_WARPDRIVE_LOGENTRY (0x8002) -#define MPT2_WARPDRIVE_LC_SSDT (0x41) +#define MPT2_WARPDRIVE_LC_SSDT (0x41) #define MPT2_WARPDRIVE_LC_SSDLW (0x43) #define MPT2_WARPDRIVE_LC_SSDLF (0x44) -#define MPT2_WARPDRIVE_LC_BRMF (0x4D) +#define MPT2_WARPDRIVE_LC_BRMF (0x4D) /* * per target private data @@ -191,22 +236,33 @@ struct mpt3sas_nvme_cmd { #define MPT_TARGET_FASTPATH_IO 0x08 #define MPT_TARGET_FLAGS_PCIE_DEVICE 0x10 -#define SAS2_PCI_DEVICE_B0_REVISION (0x01) -#define SAS3_PCI_DEVICE_C0_REVISION (0x02) +#define SAS2_PCI_DEVICE_B0_REVISION (0x01) +#define SAS3_PCI_DEVICE_C0_REVISION (0x02) -/* Atlas PCIe Switch Management Port */ -#define MPI26_ATLAS_PCIe_SWITCH_DEVID (0x00B2) +/* MSI-x ReplyPostRegister defines */ +#define NUM_REPLY_POST_INDEX_REGISTERS_16 16 +#define NUM_REPLY_POST_INDEX_REGISTERS_12 12 +#define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8) + +/* Enable DDIO counters */ +#define MPT2SAS_WD_DDIOCOUNT +#if 0 /* remove to turn on WD debug messages */ +#define MPT2SAS_WD_LOGGING +#endif + +#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6 +#define FW_IMG_HDR_READ_TIMEOUT 15 /* - * Intel HBA branding + * Intel SAS2 HBA branding */ -#define MPT2SAS_INTEL_RMS25JB080_BRANDING \ +#define MPT2SAS_INTEL_RMS25JB080_BRANDING \ "Intel(R) Integrated RAID Module RMS25JB080" -#define MPT2SAS_INTEL_RMS25JB040_BRANDING \ +#define MPT2SAS_INTEL_RMS25JB040_BRANDING \ "Intel(R) Integrated RAID Module RMS25JB040" -#define MPT2SAS_INTEL_RMS25KB080_BRANDING \ +#define MPT2SAS_INTEL_RMS25KB080_BRANDING \ "Intel(R) Integrated RAID Module RMS25KB080" -#define MPT2SAS_INTEL_RMS25KB040_BRANDING \ +#define MPT2SAS_INTEL_RMS25KB040_BRANDING \ "Intel(R) Integrated RAID Module RMS25KB040" #define MPT2SAS_INTEL_RMS25LB040_BRANDING \ "Intel(R) Integrated RAID Module RMS25LB040" @@ -216,44 +272,51 @@ struct mpt3sas_nvme_cmd { "Intel Integrated RAID Module RMS2LL080" #define MPT2SAS_INTEL_RMS2LL040_BRANDING \ "Intel Integrated RAID Module RMS2LL040" -#define MPT2SAS_INTEL_RS25GB008_BRANDING \ +#define MPT2SAS_INTEL_RS25GB008_BRANDING \ "Intel(R) RAID Controller RS25GB008" -#define MPT2SAS_INTEL_SSD910_BRANDING \ +#define MPT2SAS_INTEL_SSD910_BRANDING \ "Intel(R) SSD 910 Series" +/* + * Intel SAS2 HBA SSDIDs + */ +#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516 +#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517 +#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518 +#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519 +#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A +#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B +#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E +#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F +#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 +#define MPT2SAS_INTEL_SSD910_SSDID 0x3700 + +/* + * Intel SAS3 HBA branding + */ #define MPT3SAS_INTEL_RMS3JC080_BRANDING \ "Intel(R) Integrated RAID Module RMS3JC080" #define MPT3SAS_INTEL_RS3GC008_BRANDING \ - "Intel(R) RAID Controller RS3GC008" + "Intel(R) RAID Controller RS3GC008" #define MPT3SAS_INTEL_RS3FC044_BRANDING \ - "Intel(R) RAID Controller RS3FC044" + "Intel(R) RAID Controller RS3FC044" #define MPT3SAS_INTEL_RS3UC080_BRANDING \ - "Intel(R) RAID Controller RS3UC080" + "Intel(R) RAID Controller RS3UC080" +#define MPT3SAS_INTEL_RS3PC_BRANDING \ + "Intel(R) RAID Integrated RAID RS3PC" /* - * Intel HBA SSDIDs + * Intel SAS3 HBA SSDIDs */ -#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516 -#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517 -#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518 -#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519 -#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A -#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B -#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E -#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F -#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 -#define MPT2SAS_INTEL_SSD910_SSDID 0x3700 - -#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521 -#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522 -#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523 -#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 +#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521 +#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522 +#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523 +#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 +#define MPT3SAS_INTEL_RS3PC_SSDID 0x3527 /* - * Dell HBA branding + * Dell SAS2 HBA branding */ -#define MPT2SAS_DELL_BRANDING_SIZE 32 - #define MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING "Dell 6Gbps SAS HBA" #define MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING "Dell PERC H200 Adapter" #define MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING "Dell PERC H200 Integrated" @@ -262,35 +325,46 @@ struct mpt3sas_nvme_cmd { #define MPT2SAS_DELL_PERC_H200_BRANDING "Dell PERC H200" #define MPT2SAS_DELL_6GBPS_SAS_BRANDING "Dell 6Gbps SAS" +/* + * Dell SAS2 HBA SSDIDs + */ +#define MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID 0x1F1C +#define MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID 0x1F1D +#define MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID 0x1F1E +#define MPT2SAS_DELL_PERC_H200_MODULAR_SSDID 0x1F1F +#define MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID 0x1F20 +#define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21 +#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22 +/* + * Dell SAS3 HBA branding + */ +#define MPT3SAS_DELL_HBA330_ADP_BRANDING \ + "Dell HBA330 Adp" #define MPT3SAS_DELL_12G_HBA_BRANDING \ - "Dell 12Gbps HBA" + "Dell 12Gbps SAS HBA" +#define MPT3SAS_DELL_HBA330_MINI_BRANDING \ + "Dell HBA330 Mini" /* - * Dell HBA SSDIDs + * Dell SAS3 HBA SSDIDs */ -#define MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID 0x1F1C -#define MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID 0x1F1D -#define MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID 0x1F1E -#define MPT2SAS_DELL_PERC_H200_MODULAR_SSDID 0x1F1F -#define MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID 0x1F20 -#define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21 -#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22 - -#define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46 +#define MPT3SAS_DELL_HBA330_ADP_SSDID 0x1F45 +#define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46 +#define MPT3SAS_DELL_HBA330_MINI_SSDID 0x1F53 /* - * Cisco HBA branding + * Cisco SAS3 HBA branding */ -#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \ - "Cisco 9300-8E 12G SAS HBA" -#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \ - "Cisco 9300-8i 12G SAS HBA" -#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \ - "Cisco 12G Modular SAS Pass through Controller" -#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \ - "UCS C3X60 12G SAS Pass through Controller" +#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \ + "Cisco 9300-8E 12G SAS HBA" +#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \ + "Cisco 9300-8i 12G SAS HBA" +#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \ + "Cisco 12G Modular SAS Pass through Controller" +#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \ + "UCS C3X60 12G SAS Pass through Controller" /* - * Cisco HBA SSSDIDs + * Cisco SAS3 HBA SSSDIDs */ #define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C #define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154 @@ -298,17 +372,9 @@ struct mpt3sas_nvme_cmd { #define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156 /* - * status bits for ioc->diag_buffer_status + * HP SAS2 HBA branding */ -#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) -#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) -#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) - -/* - * HP HBA branding - */ -#define MPT2SAS_HP_3PAR_SSVID 0x1590 - +#define MPT2SAS_HP_3PAR_SSVID 0x1590 #define MPT2SAS_HP_2_4_INTERNAL_BRANDING \ "HP H220 Host Bus Adapter" #define MPT2SAS_HP_2_4_EXTERNAL_BRANDING \ @@ -321,7 +387,7 @@ struct mpt3sas_nvme_cmd { "HP H210i Host Bus Adapter" /* - * HO HBA SSDIDs + * HP SAS2 HBA SSDIDs */ #define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 #define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 @@ -329,15 +395,38 @@ struct mpt3sas_nvme_cmd { #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 -/* - * Combined Reply Queue constants, - * There are twelve Supplemental Reply Post Host Index Registers - * and each register is at offset 0x10 bytes from the previous one. + /* + * status bits for ioc->diag_buffer_status */ -#define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8) -#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 -#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 -#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) +#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) +#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) +#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) +#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08) +#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10) + +/* + * End to End Data Protection Support + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + +#define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP +#define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP +#define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP + +/* the read capacity 16 byte parameter block - defined in SBC-3 */ +struct read_cap_parameter { + u64 logical_block_addr; + u32 logical_block_length; + u8 prot_en:1; + u8 p_type:3; + u8 reserved0:4; + u8 logical_blocks_per_phyical_block:4; + u8 reserved1:4; + u16 lowest_aligned_log_block_address:14; + u16 reserved2:2; + u8 reserved3[16]; +}; +#endif /* OEM Identifiers */ #define MFG10_OEM_ID_INVALID (0x00000000) @@ -353,7 +442,36 @@ struct mpt3sas_nvme_cmd { #define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) #define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) -#define VIRTUAL_IO_FAILED_RETRY (0x32010081) +/* SCSI ADDITIONAL SENSE Codes */ +#define FIXED_SENSE_DATA 0x70 +#define SCSI_ASC_NO_SENSE 0x00 +#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03 +#define SCSI_ASC_LUN_NOT_READY 0x04 +#define SCSI_ASC_WARNING 0x0B +#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10 +#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10 +#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10 +#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11 +#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D +#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20 +#define SCSI_ASC_ILLEGAL_COMMAND 0x20 +#define SCSI_ASC_ILLEGAL_BLOCK 0x21 +#define SCSI_ASC_INVALID_CDB 0x24 +#define SCSI_ASC_INVALID_LUN 0x25 +#define SCSI_ASC_INVALID_PARAMETER 0x26 +#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31 +#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44 + +/* SCSI ADDITIONAL SENSE Code Qualifiers */ + +#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00 +#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01 +#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01 +#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02 +#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03 +#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 +#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08 +#define SCSI_ASCQ_INVALID_LUN_ID 0x09 /* High IOPs definitions */ #define MPT3SAS_DEVICE_HIGH_IOPS_DEPTH 8 @@ -361,6 +479,152 @@ struct mpt3sas_nvme_cmd { #define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16 #define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128 +/* Get status code type(bits 27-25) with status code(bits 24-17) + * value from status field. + */ +#define get_nvme_sct_with_sc(nvme_status) \ + ((nvme_status & 0xFFE) >> 1) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) +#define UNMAP 0x42 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) + +/* DataSetManagment commands attributes */ +enum { + NVME_DSMGMT_IDR = 1 << 0, + NVME_DSMGMT_IDW = 1 << 1, + NVME_DSMGMT_AD = 1 << 2, +}; + +/* I/O commands */ +enum nvme_opcode { + nvme_cmd_flush = 0x00, + nvme_cmd_write = 0x01, + nvme_cmd_read = 0x02, + nvme_cmd_write_uncor = 0x04, + nvme_cmd_compare = 0x05, + nvme_cmd_write_zeroes = 0x08, + nvme_cmd_dsm = 0x09, + nvme_cmd_resv_register = 0x0d, + nvme_cmd_resv_report = 0x0e, + nvme_cmd_resv_acquire = 0x11, + nvme_cmd_resv_release = 0x15, +}; + +/* NVMe commmands status */ +enum { + NVME_SC_SUCCESS = 0x0, + NVME_SC_INVALID_OPCODE = 0x1, + NVME_SC_INVALID_FIELD = 0x2, + NVME_SC_CMDID_CONFLICT = 0x3, + NVME_SC_DATA_XFER_ERROR = 0x4, + NVME_SC_POWER_LOSS = 0x5, + NVME_SC_INTERNAL = 0x6, + NVME_SC_ABORT_REQ = 0x7, + NVME_SC_ABORT_QUEUE = 0x8, + NVME_SC_FUSED_FAIL = 0x9, + NVME_SC_FUSED_MISSING = 0xa, + NVME_SC_INVALID_NS = 0xb, + NVME_SC_CMD_SEQ_ERROR = 0xc, + NVME_SC_SGL_INVALID_LAST = 0xd, + NVME_SC_SGL_INVALID_COUNT = 0xe, + NVME_SC_SGL_INVALID_DATA = 0xf, + NVME_SC_SGL_INVALID_METADATA = 0x10, + NVME_SC_SGL_INVALID_TYPE = 0x11, + NVME_SC_LBA_RANGE = 0x80, + NVME_SC_CAP_EXCEEDED = 0x81, + NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_RESERVATION_CONFLICT = 0x83, + NVME_SC_CQ_INVALID = 0x100, + NVME_SC_QID_INVALID = 0x101, + NVME_SC_QUEUE_SIZE = 0x102, + NVME_SC_ABORT_LIMIT = 0x103, + NVME_SC_ABORT_MISSING = 0x104, + NVME_SC_ASYNC_LIMIT = 0x105, + NVME_SC_FIRMWARE_SLOT = 0x106, + NVME_SC_FIRMWARE_IMAGE = 0x107, + NVME_SC_INVALID_VECTOR = 0x108, + NVME_SC_INVALID_LOG_PAGE = 0x109, + NVME_SC_INVALID_FORMAT = 0x10a, + NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, + NVME_SC_INVALID_QUEUE = 0x10c, + NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, + NVME_SC_FEATURE_NOT_PER_NS = 0x10f, + NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + NVME_SC_BAD_ATTRIBUTES = 0x180, + NVME_SC_INVALID_PI = 0x181, + NVME_SC_READ_ONLY = 0x182, + NVME_SC_WRITE_FAULT = 0x280, + NVME_SC_READ_ERROR = 0x281, + NVME_SC_GUARD_CHECK = 0x282, + NVME_SC_APPTAG_CHECK = 0x283, + NVME_SC_REFTAG_CHECK = 0x284, + NVME_SC_COMPARE_FAILED = 0x285, + NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_DNR = 0x4000, +}; + +/* DSM range Definition */ +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +/* DataSetManagement command */ +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +/* Generic NVMe command */ +struct nvme_command { + union { + struct nvme_dsm_cmd dsm; + }; +}; + +/* NVMe completion queue entry */ +struct nvme_completion { + __le32 result; /* Used by admin commands to return data */ + __u32 rsvd; + __le16 sq_head; /* how much of this queue may be reclaimed */ + __le16 sq_id; /* submission queue that generated this entry */ + __u16 command_id; /* of the command which completed */ + __le16 status; /* did the command fail, and if so, why? */ +}; +#endif + +/* SCSI UNMAP block descriptor structure */ +struct scsi_unmap_blk_desc { + __be64 slba; + __be32 nlb; + u32 resv; +}; + +/* SCSI UNMAP command's data */ +struct scsi_unmap_parm_list { + __be16 unmap_data_len; + __be16 unmap_blk_desc_data_len; + u32 resv; + struct scsi_unmap_blk_desc desc[0]; +}; + +#ifndef SCSI_MPT2SAS +#define IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED (0x2810) /* Over temp threshold has exceeded. */ +#endif + /* OEM Specific Flags will come from OEM specific header files */ struct Mpi2ManufacturingPage10_t { MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ @@ -376,7 +640,6 @@ struct Mpi2ManufacturingPage10_t { U32 Reserved5[18]; /* 24h - 60h*/ }; - /* Miscellaneous options */ struct Mpi2ManufacturingPage11_t { MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ @@ -391,23 +654,25 @@ struct Mpi2ManufacturingPage11_t { u8 Reserved6; /* 2Fh */ __le32 Reserved7[7]; /* 30h - 4Bh */ u8 NVMeAbortTO; /* 4Ch */ - u8 Reserved8; /* 4Dh */ - u16 Reserved9; /* 4Eh */ - __le32 Reserved10[4]; /* 50h - 60h */ + u8 NumPerDevEvents; /* 4Dh */ + u8 HostTraceBufferDecrementSizeKB; /* 4Eh */ + u8 HostTraceBufferFlags; /* 4Fh */ + u16 HostTraceBufferMaxSizeKB; /* 50h */ + u16 HostTraceBufferMinSizeKB; /* 52h */ + __le32 Reserved10[2]; /* 54h - 5Bh */ }; /** * struct MPT3SAS_TARGET - starget private hostdata * @starget: starget object * @sas_address: target sas address - * @raid_device: raid_device pointer to access volume data * @handle: device handle * @num_luns: number luns * @flags: MPT_TARGET_FLAGS_XXX flags * @deleted: target flaged for deletion * @tm_busy: target is busy with TM request. - * @sas_dev: The sas_device associated with this target - * @pcie_dev: The pcie device associated with this target + * @port: hba port entry + * @sdev: The sas_device associated with this target */ struct MPT3SAS_TARGET { struct scsi_target *starget; @@ -418,20 +683,16 @@ struct MPT3SAS_TARGET { u32 flags; u8 deleted; u8 tm_busy; - struct _sas_device *sas_dev; - struct _pcie_device *pcie_dev; + struct hba_port *port; + struct _sas_device *sas_dev; + struct _pcie_device *pcie_dev; }; - /* * per device private data */ #define MPT_DEVICE_FLAGS_INIT 0x01 - -#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) -#define MFG_PAGE10_HIDE_ALL_DISKS (0x00) -#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01) -#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02) +#define MPT_DEVICE_TLR_ON 0x02 /** * struct MPT3SAS_DEVICE - sdev private hostdata @@ -452,24 +713,33 @@ struct MPT3SAS_DEVICE { u32 flags; u8 configured_lun; u8 block; + u8 deleted; u8 tlr_snoop_check; u8 ignore_delay_remove; - /* Iopriority Command Handling */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + u8 eedp_enable; + u8 eedp_type; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + /* Iopriority Command Handling */ u8 ncq_prio_enable; - /* - * Bug workaround for SATL handling: the mpt2/3sas firmware - * doesn't return BUSY or TASK_SET_FULL for subsequent - * commands while a SATL pass through is in operation as the - * spec requires, it simply does nothing with them until the - * pass through completes, causing them possibly to timeout if - * the passthrough is a long executing command (like format or - * secure erase). This variable allows us to do the right - * thing while a SATL command is pending. - */ +#endif +/* +* Bug workaround for SATL handling: the mpt2/3sas firmware +* doesn't return BUSY or TASK_SET_FULL for subsequent +* commands while a SATL pass through is in operation as the +* spec requires, it simply does nothing with them until the +* pass through completes, causing them possibly to timeout if +* the passthrough is a long executing command (like format or +* secure erase). This variable allows us to do the right +* thing while a SATL command is pending. +*/ unsigned long ata_command_pending; - }; +/* Bit indicates ATA command pending or not */ +#define CMND_PENDING_BIT 0 + #define MPT3_CMD_NOT_USED 0x8000 /* free */ #define MPT3_CMD_COMPLETE 0x0001 /* completed */ #define MPT3_CMD_PENDING 0x0002 /* pending */ @@ -493,8 +763,35 @@ struct _internal_cmd { u16 status; u16 smid; }; +/** + * struct _internal_qcmd - internal q commands struct + * @list: list of internal q cmds + * @request: request message pointer + * @reply: reply message pointer + * @sense: sense data + * @status: MPT2_CMD_XXX status + * @smid: system message id + * @transfer_packet: SCSI IO transfer packet pointer + */ +struct _internal_qcmd { + struct list_head list; + void *request; + void *reply; + void *sense; + u16 status; + u16 smid; + struct _scsi_io_transfer *transfer_packet; +}; +#if (defined(CONFIG_SUSE_KERNEL) && defined(scsi_is_sas_phy_local)) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) +#define MPT_WIDE_PORT_API 1 +#define MPT_WIDE_PORT_API_PLUS 1 +#endif - +#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) +#define MFG_PAGE10_HIDE_ALL_DISKS (0x00) +#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01) +#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02) /** * struct _sas_device - attached device information @@ -517,9 +814,15 @@ struct _internal_cmd { * @fast_path: fast path feature enable bit * @pfa_led_on: flag for PFA LED status * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add() - * addition routine. + * addition routine + * @enclosure_level: used for enclosure services * @chassis_slot: chassis slot * @is_chassis_slot_valid: chassis slot valid or not + * @connector_name: ASCII value from pg0.ConnectorName + * @refcount: reference count for deletion + * @port: hba port entry + * @rphy: device's sas_rphy address used to identify this device structure in + * target_alloc callback function */ struct _sas_device { struct list_head list; @@ -540,52 +843,83 @@ struct _sas_device { u8 responding; u8 fast_path; u8 pfa_led_on; - u8 pend_sas_rphy_add; + struct kref refcount; + u8 *serial_number; + u8 pend_sas_rphy_add; u8 enclosure_level; u8 chassis_slot; u8 is_chassis_slot_valid; u8 connector_name[5]; - struct kref refcount; + u8 ssd_device; +#ifndef SCSI_MPT2SAS + u8 supports_sata_smart; +#endif + struct hba_port *port; + struct sas_rphy *rphy; }; +/** + * sas_device_get - Increment the sas device reference count + * @s : sas_device object + * + * When ever this function called it will increment the + * reference count of the sas device for which this function called. + * + */ static inline void sas_device_get(struct _sas_device *s) { kref_get(&s->refcount); } +/** + * sas_device_free - Release the sas device object + * @r - kref object + * + * Free's the sas device object. It will be called + * when reference count reaches to zero. + */ static inline void sas_device_free(struct kref *r) { kfree(container_of(r, struct _sas_device, refcount)); } +/** + * sas_device_put - Decrement the sas device reference count + * @s : sas_device object + * + * When ever this function called it will decrement the + * reference count of the sas device for which this function called. + * + * When refernce count reaches to Zero, this will call sas_device_free + * to the sas_device object. + */ static inline void sas_device_put(struct _sas_device *s) { kref_put(&s->refcount, sas_device_free); } -/* - * struct _pcie_device - attached PCIe device information - * @list: pcie device list - * @starget: starget object - * @wwid: device WWID - * @handle: device handle - * @device_info: bitfield provides detailed info about the device - * @id: target id - * @channel: target channel - * @slot: slot number - * @port_num: port number - * @responding: used in _scsih_pcie_device_mark_responding - * @fast_path: fast path feature enable bit - * @nvme_mdts: MaximumDataTransferSize from PCIe Device Page 2 for - * NVMe device only - * @enclosure_handle: enclosure handle - * @enclosure_logical_id: enclosure logical identifier - * @enclosure_level: The level of device's enclosure from the controller - * @connector_name: ASCII value of the Connector's name - * @serial_number: pointer of serial number string allocated runtime - * @access_status: Device's Access Status - * @refcount: reference count for deletion - */ +/** +* struct _pcie_device - attached PCIe device information +* @list: pcie device list +* @starget: starget object +* @wwid: device WWID +* @handle: device handle +* @device_info: bitfield provides detailed info about the device +* @id: target id +* @channel: target channel +* @slot: slot number +* @port_num: port number +* @responding: used in _scsih_pcie_device_mark_responding +* @fast_path: fast path feature enable bit +* @nvme_mdts: MaximumDataTransferSize from PCIe Device Page 2 for NVMe device only +* @enclosure_handle: enclosure handle +* @enclosure_logical_id: enclosure logical identifier +* @enclosure_level: The level of device's enclosure from the controller +* @connector_name: ASCII value of the Connector's name +* @serial_number: pointer of serial number string allocated runtime +* @access_status: Device's Access Status +* @refcount: reference count for deletion +*/ struct _pcie_device { struct list_head list; struct scsi_target *starget; @@ -602,12 +936,13 @@ struct _pcie_device { u16 enclosure_handle; u64 enclosure_logical_id; u8 enclosure_level; - u8 connector_name[4]; + u8 connector_name[5]; u8 *serial_number; u8 reset_timeout; u8 access_status; struct kref refcount; }; + /** * pcie_device_get - Increment the pcie device reference count * @@ -649,6 +984,9 @@ static inline void pcie_device_put(struct _pcie_device *p) { kref_put(&p->refcount, pcie_device_free); } + + + /** * struct _raid_device - raid volume link list * @list: sas device list @@ -656,7 +994,6 @@ static inline void pcie_device_put(struct _pcie_device *p) * @sdev: scsi device struct (volumes are single lun) * @wwid: unique identifier for the volume * @handle: device handle - * @block_size: Block size of the volume * @id: target id * @channel: target channel * @volume_type: the raid level @@ -711,6 +1048,7 @@ struct _boot_device { * struct _sas_port - wide/narrow sas port information * @port_list: list of ports belonging to expander * @num_phys: number of phys belonging to this port + * @hba_port: hba port entry * @remote_identify: attached device identification * @rphy: sas transport rphy object * @port: sas transport wide/narrow port object @@ -719,9 +1057,12 @@ struct _boot_device { struct _sas_port { struct list_head port_list; u8 num_phys; + struct hba_port *hba_port; struct sas_identify remote_identify; struct sas_rphy *rphy; +#if defined(MPT_WIDE_PORT_API) struct sas_port *port; +#endif struct list_head phy_list; }; @@ -735,6 +1076,7 @@ struct _sas_port { * @handle: device handle for this phy * @attached_handle: device handle for attached device * @phy_belongs_to_port: port has been created for this phy + * @port: hba port entry */ struct _sas_phy { struct list_head port_siblings; @@ -745,6 +1087,7 @@ struct _sas_phy { u16 handle; u16 attached_handle; u8 phy_belongs_to_port; + struct hba_port *port; }; /** @@ -758,6 +1101,7 @@ struct _sas_phy { * @enclosure_handle: handle for this a member of an enclosure * @device_info: bitwise defining capabilities of this sas_host/expander * @responding: used in _scsih_expander_device_mark_responding + * @port: hba port entry * @phy: a list of phys that make up this sas_host/expander * @sas_port_list: list of ports attached to this sas_host/expander */ @@ -771,6 +1115,7 @@ struct _sas_node { u16 enclosure_handle; u64 enclosure_logical_id; u8 responding; + struct hba_port *port; struct _sas_phy *phy; struct list_head sas_port_list; }; @@ -796,7 +1141,7 @@ enum reset_type { SOFT_RESET, }; -/** +/* * struct pcie_sg_list - PCIe SGL buffer (contiguous per I/O) * @pcie_sgl: PCIe native SGL for NVMe devices * @pcie_sgl_dma: physical address @@ -825,9 +1170,12 @@ struct chain_lookup { /** * struct scsiio_tracker - scsi mf request tracker * @smid: system message id + * @scmd: scsi request pointer * @cb_idx: callback index * @direct_io: To indicate whether I/O is direct (WARPDRIVE) - * @chain_list: list of associated firmware chain tracker + * @pcie_sg_list: PCIe native SGL in contiguous memory + * @chain_list: list of chains for this I/O + * @tracker_list: list of free request (ioc->free_list) * @msix_io: IO's msix */ struct scsiio_tracker { @@ -835,7 +1183,6 @@ struct scsiio_tracker { struct scsi_cmnd *scmd; u8 cb_idx; u8 direct_io; - struct pcie_sg_list pcie_sg_list; struct list_head chain_list; u16 msix_io; }; @@ -873,7 +1220,7 @@ struct _sc_list { }; /** - * struct _event_ack_list - delayed event acknowledgment list + * struct _event_ack_list - delayed event acknowledgement list * @Event: Event ID * @EventContext: used to track the event uniquely */ @@ -900,27 +1247,43 @@ struct _event_ack_list { struct adapter_reply_queue { struct MPT3SAS_ADAPTER *ioc; u8 msix_index; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) + unsigned int vector; +#endif u32 reply_post_host_index; Mpi2ReplyDescriptorsUnion_t *reply_post_free; char name[MPT_NAME_LENGTH]; atomic_t busy; +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)) || ((LINUX_VERSION_CODE >= \ + KERNEL_VERSION(2,6,36)))) + cpumask_var_t affinity_hint; +#endif +#if defined(MPT3SAS_ENABLE_IRQ_POLL) u32 os_irq; struct irq_poll irqpoll; - bool irq_poll_scheduled; + bool irq_poll_scheduled; bool irq_line_enable; +#endif struct list_head list; }; +/* IOC Facts and Port Facts converted from little endian to cpu */ +union mpi3_version_union { + MPI2_VERSION_STRUCT Struct; + u32 Word; +}; + + typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); /* SAS3.0 support */ typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device); typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge, - dma_addr_t data_out_dma, size_t data_out_sz, - dma_addr_t data_in_dma, size_t data_in_sz); + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz); typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, - void *paddr); + void *paddr); /* SAS3.5 support */ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid, @@ -929,22 +1292,17 @@ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid, size_t data_in_sz); /* To support atomic and non atomic descriptors*/ -typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid, +typedef void (*PUT_SMID_IO_FP_HIP_TA) (struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 funcdep); -typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid); +typedef void (*PUT_SMID_DEF_NVME) (struct MPT3SAS_ADAPTER *ioc, u16 smid); + typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr); -/* + + /* * To get high iops reply queue's msix index when high iops mode is enabled * else get the msix index of general reply queues. */ -typedef u8 (*GET_MSIX_INDEX) (struct MPT3SAS_ADAPTER *ioc, - struct scsi_cmnd *scmd); - -/* IOC Facts and Port Facts converted from little endian to cpu */ -union mpi3_version_union { - MPI2_VERSION_STRUCT Struct; - u32 Word; -}; +typedef u8 (*GET_MSIX_INDEX) (struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd); struct mpt3sas_facts { u16 MsgVersion; @@ -964,7 +1322,11 @@ struct mpt3sas_facts { u32 IOCCapabilities; union mpi3_version_union FWVersion; u16 IOCRequestFrameSize; +#ifndef SCSI_MPT2SAS u16 IOCMaxChainSegmentSize; +#else + u16 Reserved3; +#endif u16 MaxInitiators; u16 MaxTargets; u16 MaxSasExpanders; @@ -989,10 +1351,33 @@ struct mpt3sas_port_facts { }; struct reply_post_struct { - Mpi2ReplyDescriptorsUnion_t *reply_post_free; + Mpi2ReplyDescriptorsUnion_t *reply_post_free; dma_addr_t reply_post_free_dma; + struct dma_pool *dma_pool; + }; + /** + * struct hba_port - Saves each HBA's Wide/Narrow port info + * @port_id: port number + * @sas_address: sas address of this wide/narrow port's attached device + * @phy_mask: HBA PHY's belonging to this port + * @flags: hba port flags + */ +struct hba_port { + struct list_head list; + u8 port_id; + u64 sas_address; + u32 phy_mask; + u8 flags; +}; + +/* hba port flags */ +#define HBA_PORT_FLAG_DIRTY_PORT 0x01 +#define HBA_PORT_FLAG_NEW_PORT 0x02 + +#define MULTIPATH_DISABLED_PORT_ID 0xFF + typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); /** * struct MPT3SAS_ADAPTER - per adapter struct @@ -1011,9 +1396,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @ir_firmware: IR firmware present * @bars: bitmask of BAR's that must be configured * @mask_interrupts: ignore interrupt - * @dma_mask: used to set the consistent dma mask - * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and - * pci resource handling + * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and + * pci resource handling * @fault_reset_work_q_name: fw fault work queue * @fault_reset_work_q: "" * @fault_reset_work: "" @@ -1026,7 +1410,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @shost_recovery: host reset in progress * @ioc_reset_in_progress_lock: * @ioc_link_reset_in_progress: phy/hard reset in progress - * @ignore_loginfos: ignore loginfos during task management + * @ignore_loginfos: ignore loginfos during task managment * @remove_host: flag for when driver unloads, to avoid sending dev resets * @pci_error_recovery: flag to prevent ioc access until slot reset completes * @wait_for_discovery_to_complete: flag set at driver load time when @@ -1035,41 +1419,43 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @port_enable_failed: flag set when port enable has failed * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work * @start_scan_failed: means port enable failed, return's the ioc_status + * @adapter_over_temp: adapter reached temperature threshold 3 * @msix_enable: flag indicating msix is enabled - * @msix_vector_count: number msix vectors + * @msix_table: virt address to the msix table * @cpu_msix_table: table for mapping cpus to msix index * @cpu_msix_table_sz: table size + * @multipath_on_hba: flag to determine multipath on hba is enabled or not * @total_io_cnt: Gives total IO count, used to load balance the interrupts - * @high_iops_outstanding: used to load balance the interrupts - * within high iops reply queues - * @msix_load_balance: Enables load balancing of interrupts across - * the multiple MSIXs - * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands - * @thresh_hold: Max number of reply descriptors processed - * before updating Host Index + * @high_iops_outstanding: used to load balance the interrupts within high iops reply queues + * @high_iops_queues: high iops reply queues count * @drv_support_bitmap: driver's supported feature bit map + * @msix_load_balance: Enables load balancing of interrupts across the multiple MSIXs + * @thresh_hold: Max number of reply descriptors processed before updating Host Index + * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands * @scsi_io_cb_idx: shost generated commands * @tm_cb_idx: task management commands * @scsih_cb_idx: scsih internal commands - * @transport_cb_idx: transport internal commands - * @ctl_cb_idx: clt internal commands + * @ctl_cb_idx: ctl internal commands + * @ctl_tm_cb_idx: ctl task management commands * @base_cb_idx: base internal commands * @config_cb_idx: base internal commands * @tm_tr_cb_idx : device removal target reset handshake * @tm_tr_volume_cb_idx : volume removal target reset + * @tm_tr_internal_cb_idx : internal task managemnet commands queue * @base_cmds: * @transport_cmds: * @scsih_cmds: * @tm_cmds: * @ctl_cmds: * @config_cmds: + * @scsih_q_intenal_cmds: base internal queue commands list + * @scsih_q_internal_lock: internal queue commands lock * @base_add_sg_single: handler for either 32/64 bit sgl's * @event_type: bits indicating which events to log * @event_context: unique id for each logged event * @event_log: event log pointer * @event_masks: events that are masked * @facts: static facts data - * @prev_fw_facts: previous fw facts data * @pfacts: static port facts data * @manu_pg0: static manufacturing page 0 * @manu_pg10: static manufacturing page 10 @@ -1093,7 +1479,6 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @io_missing_delay: time for IO completed by fw when PDR enabled * @device_missing_delay: time for device missing by fw when PDR enabled * @sas_id : used for setting volume target IDs - * @pcie_target_id: used for setting pcie target IDs * @blocking_handles: bitmask used to identify which devices need blocking * @pd_handles : bitmask for PD handles * @pd_handles_sz : size of pd_handle bitmask @@ -1112,14 +1497,19 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @free_list: free list of request * @pending_io_count: * @reset_wq: + * @pending_tm_count: pending task mangement request + * @terminated_tm_count: terminated request + * @pending_tm_wq: wait queue + * @out_of_frames + * @no_frames_tm_wq * @chain: pool of chains * @chain_dma: * @max_sges_in_main_message: number sg elements in main message * @max_sges_in_chain_message: number sg elements per chain * @chains_needed_per_io: max chains per io - * @chain_depth: total chains allocated - * @chain_segment_sz: gives the max number of - * SGEs accommodate on single chain buffer + * @chain_segment_sz: givesthe max number of SGEs accomodate on single + * chain buffer + * @chains_per_prp_buffer: number of chain segments can fit in a PRP buffer * @hi_priority_smid: * @hi_priority: * @hi_priority_dma: @@ -1144,6 +1534,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @reply_free: pool for reply free queue (32 bit addr) * @reply_free_dma: * @reply_free_dma_pool: + * @reply_post_free_dma_pool_mod_rdpq_set: + * @reply_post_free_dma_pool_mod_rdpq_set_align: * @reply_free_host_index: tail index in pool to insert free replys * @reply_post_queue_depth: reply post queue depth * @reply_post_struct: struct for reply_post_free physical & virt address @@ -1153,44 +1545,72 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * is assigned only ones * @reply_queue_count: number of reply queue's * @reply_queue_list: link list contaning the reply queue info - * @msix96_vector: 96 MSI-X vector support + * @reply_post_host_index: head index in the pool where FW completes IO + * @combined_reply_queue: non combined reply queue support + * @smp_affinity_enable: sets the smp affinity for enabled IRQs * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue * @delayed_tr_list: target reset link list * @delayed_tr_volume_list: volume target reset link list - * @delayed_sc_list: - * @delayed_event_ack_list: + * @delayed_internal_tm_list: internal tmf link list * @temp_sensors_count: flag to carry the number of temperature sensors - * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and - * pci resource handling. PCI resource freeing will lead to free - * vital hardware/memory resource, which might be in use by cli/sysfs - * path functions resulting in Null pointer reference followed by kernel - * crash. To avoid the above race condition we use mutex syncrhonization - * which ensures the syncrhonization between cli/sysfs_show path. - * @atomic_desc_capable: Atomic Request Descriptor support. - * @GET_MSIX_INDEX: Get the msix index of high iops queues. + * @port_table_list: list containing HBA's wide/narrow port's info */ struct MPT3SAS_ADAPTER { struct list_head list; struct Scsi_Host *shost; u8 id; + u8 IOCNumber; int cpu_count; char name[MPT_NAME_LENGTH]; - char driver_name[MPT_NAME_LENGTH - 8]; + char driver_name[MPT_DRIVER_NAME_LENGTH]; char tmp_string[MPT_STRING_LENGTH]; struct pci_dev *pdev; Mpi2SystemInterfaceRegs_t __iomem *chip; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)) +#if defined(CPQ_CIM) + resource_size_t pio_chip; +#endif phys_addr_t chip_phys; +#else +#if defined(CPQ_CIM) + u64 pio_chip; +#endif + phys_addr_t chip_phys; +#endif int logging_level; int fwfault_debug; u8 ir_firmware; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) int bars; +#endif u8 mask_interrupts; - int dma_mask; + + struct mutex pci_access_mutex; /* fw fault handler */ char fault_reset_work_q_name[20]; + char hba_hot_unplug_work_q_name[20]; struct workqueue_struct *fault_reset_work_q; + struct workqueue_struct *hba_hot_unplug_work_q; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) struct delayed_work fault_reset_work; + struct delayed_work hba_hot_unplug_work; +#else + struct work_struct fault_reset_work; + struct work_struct hba_hot_unplug_work; +#endif + +#ifndef SCSI_MPT2SAS + /* SATA SMART pooling handler */ + char smart_poll_work_q_name[20]; + struct workqueue_struct *smart_poll_work_q; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) + struct delayed_work smart_poll_work; +#else + struct work_struct smart_poll_work; +#endif + u8 adapter_over_temp; +#endif /*End of not defined SCSI_MPT2SAS */ /* fw event handler */ char firmware_event_name[20]; @@ -1203,11 +1623,15 @@ struct MPT3SAS_ADAPTER { u8 broadcast_aen_busy; u16 broadcast_aen_pending; u8 shost_recovery; + u16 hba_mpi_version_belonged; u8 got_task_abort_from_ioctl; + u8 got_task_abort_from_sysfs; struct mutex reset_in_progress_mutex; spinlock_t ioc_reset_in_progress_lock; + spinlock_t hba_hot_unplug_lock; u8 ioc_link_reset_in_progress; + int ioc_reset_status; u8 ignore_loginfos; u8 remove_host; @@ -1215,24 +1639,28 @@ struct MPT3SAS_ADAPTER { u8 wait_for_discovery_to_complete; u8 is_driver_loading; u8 port_enable_failed; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) u8 start_scan; u16 start_scan_failed; - +#endif u8 msix_enable; - u16 msix_vector_count; u8 *cpu_msix_table; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)) + resource_size_t **reply_post_host_index; +#else + u64 **reply_post_host_index; +#endif u16 cpu_msix_table_sz; - resource_size_t __iomem **reply_post_host_index; u32 ioc_reset_count; MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; u32 non_operational_loop; - atomic64_t total_io_cnt; + u8 multipath_on_hba; + atomic64_t total_io_cnt; atomic64_t high_iops_outstanding; - bool msix_load_balance; + bool msix_load_balance; u16 thresh_hold; u8 high_iops_queues; u32 drv_support_bitmap; - bool enable_sdev_max_qd; /* internal commands, callback index */ u8 scsi_io_cb_idx; @@ -1240,11 +1668,14 @@ struct MPT3SAS_ADAPTER { u8 transport_cb_idx; u8 scsih_cb_idx; u8 ctl_cb_idx; + u8 ctl_tm_cb_idx; + u8 ctl_diag_cb_idx; u8 base_cb_idx; u8 port_enable_cb_idx; u8 config_cb_idx; u8 tm_tr_cb_idx; u8 tm_tr_volume_cb_idx; + u8 tm_tr_internal_cb_idx; u8 tm_sas_control_cb_idx; struct _internal_cmd base_cmds; struct _internal_cmd port_enable_cmds; @@ -1252,8 +1683,12 @@ struct MPT3SAS_ADAPTER { struct _internal_cmd scsih_cmds; struct _internal_cmd tm_cmds; struct _internal_cmd ctl_cmds; + struct _internal_cmd ctl_diag_cmds; struct _internal_cmd config_cmds; + struct list_head scsih_q_intenal_cmds; + spinlock_t scsih_q_internal_lock; + MPT_ADD_SGE base_add_sg_single; /* function ptr for either IEEE or MPI sg elements */ @@ -1261,7 +1696,6 @@ struct MPT3SAS_ADAPTER { MPT_BUILD_SG build_sg; MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge; u16 sge_size_ieee; - u16 hba_mpi_version_belonged; /* function ptr for MPI sg elements only */ MPT_BUILD_SG build_sg_mpi; @@ -1276,10 +1710,10 @@ struct MPT3SAS_ADAPTER { void *event_log; u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + u8 disable_eedp_support; u8 tm_custom_handling; u8 nvme_abort_timeout; - /* static config pages */ struct mpt3sas_facts facts; struct mpt3sas_facts prev_fw_facts; @@ -1293,7 +1727,10 @@ struct MPT3SAS_ADAPTER { Mpi2IOUnitPage0_t iounit_pg0; Mpi2IOUnitPage1_t iounit_pg1; Mpi2IOUnitPage8_t iounit_pg8; - Mpi2IOCPage1_t ioc_pg1_copy; +#if defined(CPQ_CIM) + Mpi2IOCPage1_t ioc_pg1; +#endif + Mpi2IOCPage1_t ioc_pg1_copy; struct _boot_device req_boot_device; struct _boot_device req_alt_boot_device; @@ -1329,7 +1766,7 @@ struct MPT3SAS_ADAPTER { u16 config_page_sz; void *config_page; dma_addr_t config_page_dma; - void *config_vaddr; + void *config_vaddr; /* scsiio request */ u16 hba_queue_depth; @@ -1339,13 +1776,23 @@ struct MPT3SAS_ADAPTER { u8 *request; dma_addr_t request_dma; u32 request_dma_sz; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)) + struct scsiio_tracker *scsi_lookup; + ulong scsi_lookup_pages; +#endif struct pcie_sg_list *pcie_sg_lookup; spinlock_t scsi_lookup_lock; int pending_io_count; wait_queue_head_t reset_wq; + int pending_tm_count; + u32 terminated_tm_count; + wait_queue_head_t pending_tm_wq; + u8 out_of_frames; + wait_queue_head_t no_frames_tm_wq; /* PCIe SGL */ struct dma_pool *pcie_sgl_dma_pool; + /* Host Page Size */ u32 page_size; @@ -1353,11 +1800,9 @@ struct MPT3SAS_ADAPTER { struct chain_lookup *chain_lookup; struct list_head free_chain_list; struct dma_pool *chain_dma_pool; - ulong chain_pages; u16 max_sges_in_main_message; u16 max_sges_in_chain_message; u16 chains_needed_per_io; - u32 chain_depth; u16 chain_segment_sz; u16 chains_per_prp_buffer; @@ -1400,28 +1845,34 @@ struct MPT3SAS_ADAPTER { /* reply post queue */ u16 reply_post_queue_depth; struct reply_post_struct *reply_post; - u8 rdpq_array_capable; - u8 rdpq_array_enable; - u8 rdpq_array_enable_assigned; struct dma_pool *reply_post_free_dma_pool; + struct dma_pool *reply_post_free_dma_pool_align; + struct dma_pool *reply_post_free_dma_pool_mod_rdpq_set; + struct dma_pool *reply_post_free_dma_pool_mod_rdpq_set_align; struct dma_pool *reply_post_free_array_dma_pool; Mpi2IOCInitRDPQArrayEntry *reply_post_free_array; dma_addr_t reply_post_free_array_dma; u8 reply_queue_count; struct list_head reply_queue_list; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; - u8 combined_reply_queue; - u8 combined_reply_index_count; + u8 combined_reply_queue; + u8 nc_reply_index_count; u8 smp_affinity_enable; /* reply post register index */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)) resource_size_t **replyPostRegisterIndex; +#else + u64 **replyPostRegisterIndex; +#endif struct list_head delayed_tr_list; struct list_head delayed_tr_volume_list; + struct list_head delayed_internal_tm_list; struct list_head delayed_sc_list; struct list_head delayed_event_ack_list; - u8 temp_sensors_count; - struct mutex pci_access_mutex; /* diag buffer support */ u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; @@ -1433,28 +1884,52 @@ struct MPT3SAS_ADAPTER { u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; u32 ring_buffer_offset; u32 ring_buffer_sz; +#if defined(TARGET_MODE) + char stm_name[MPT_NAME_LENGTH]; + u8 stm_io_cb_idx; /* normal io */ + u8 stm_tm_cb_idx; /* task managment */ + u8 stm_tm_imm_cb_idx; /* immediate TM request */ + u8 stm_post_cb_idx; /* post all buffers */ + struct _internal_cmd stm_tm_cmds; /* TM requests */ + struct _internal_cmd stm_post_cmds; /* post cmd buffer request */ + void *priv; +#endif u8 is_warpdrive; u8 is_mcpu_endpoint; u8 hide_ir_msg; + u8 warpdrive_msg; u8 mfg_pg10_hide_flag; u8 hide_drives; + + u8 atomic_desc_capable; + BASE_READ_REG base_readl; + PUT_SMID_IO_FP_HIP_TA put_smid_scsi_io; + PUT_SMID_IO_FP_HIP_TA put_smid_fast_path; + PUT_SMID_IO_FP_HIP_TA put_smid_hi_priority; +#if defined(TARGET_MODE) + PUT_SMID_IO_FP_HIP_TA put_smid_target_assist; +#endif + PUT_SMID_DEF_NVME put_smid_default; + PUT_SMID_DEF_NVME put_smid_nvme_encap; + GET_MSIX_INDEX get_msix_index_for_smlio; +#ifdef MPT2SAS_WD_DDIOCOUNT + u64 ddio_count; + u64 ddio_err_count; +#endif spinlock_t diag_trigger_lock; u8 diag_trigger_active; - u8 atomic_desc_capable; - BASE_READ_REG base_readl; struct SL_WH_MASTER_TRIGGER_T diag_trigger_master; struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; void *device_remove_in_progress; u16 device_remove_in_progress_sz; + u8 *tm_tr_retry; + u32 tm_tr_retry_sz; + u8 temp_sensors_count; u8 is_gen35_ioc; u8 is_aero_ioc; - PUT_SMID_IO_FP_HIP put_smid_scsi_io; - PUT_SMID_IO_FP_HIP put_smid_fast_path; - PUT_SMID_IO_FP_HIP put_smid_hi_priority; - PUT_SMID_DEFAULT put_smid_default; - GET_MSIX_INDEX get_msix_index_for_smlio; + struct list_head port_table_list; }; #define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001 @@ -1462,53 +1937,58 @@ struct MPT3SAS_ADAPTER { typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); +#if defined(TARGET_MODE) +typedef void (*STM_CALLBACK_WITH_IOC)(struct MPT3SAS_ADAPTER *ioc); +typedef void (*STM_CALLBACK_FOR_TGT_CMD)(struct MPT3SAS_ADAPTER *ioc, + Mpi2TargetCommandBufferReplyDescriptor_t *rpf, u8 msix_index); +typedef void (*STM_CALLBACK_FOR_TGT_ASSIST)(struct MPT3SAS_ADAPTER *ioc, + Mpi2TargetAssistSuccessReplyDescriptor_t *rpf); +typedef u8 (*STM_CALLBACK_FOR_SMID)(struct MPT3SAS_ADAPTER *ioc, + u8 msix_index, u32 reply); +typedef void (*STM_CALLBACK_FOR_RESET)(struct MPT3SAS_ADAPTER *ioc, + int reset_phase); + +struct STM_CALLBACK { + STM_CALLBACK_WITH_IOC watchdog; + STM_CALLBACK_FOR_TGT_CMD target_command; + STM_CALLBACK_FOR_TGT_ASSIST target_assist; + STM_CALLBACK_FOR_SMID smid_handler; + STM_CALLBACK_FOR_RESET reset_handler; +}; +#endif /* base shared API */ extern struct list_head mpt3sas_ioc_list; -extern char driver_name[MPT_NAME_LENGTH]; -/* spinlock on list operations over IOCs - * Case: when multiple warpdrive cards(IOCs) are in use - * Each IOC will added to the ioc list structure on initialization. - * Watchdog threads run at regular intervals to check IOC for any - * fault conditions which will trigger the dead_ioc thread to - * deallocate pci resource, resulting deleting the IOC netry from list, - * this deletion need to protected by spinlock to enusre that - * ioc removal is syncrhonized, if not synchronized it might lead to - * list_del corruption as the ioc list is traversed in cli path. - */ extern spinlock_t gioc_lock; - void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc); +#ifndef SCSI_MPT2SAS +void mpt3sas_base_start_smart_polling(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_stop_smart_polling(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_scsih_sata_smart_polling(struct MPT3SAS_ADAPTER *ioc); +#endif int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc); int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc); -int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, - enum reset_type type); +int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid); +__le64 mpt3sas_base_get_sense_buffer_dma_64(struct MPT3SAS_ADAPTER *ioc, + u16 smid); void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid); dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid); void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, - u16 handle); -void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, - u16 msix_task); -void mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid); -void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid); /* hi-priority queue */ u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, - struct scsi_cmnd *scmd); -void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, - struct scsiio_tracker *st); + struct scsi_cmnd *scmd); u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); @@ -1524,6 +2004,7 @@ void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr); u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); +int _base_check_and_get_msix_vectors(struct pci_dev *pdev); void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, @@ -1535,60 +2016,110 @@ int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type); -void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc, u8 set_fault); void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, u16 device_missing_delay, u8 io_missing_delay); +struct scsiio_tracker * mpt3sas_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, + struct scsiio_tracker *st); +struct scsiio_tracker * mpt3sas_base_scsi_cmd_priv(struct scsi_cmnd *scmd); + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); - -void -mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); - +#endif +u8 mpt3sas_base_pci_device_is_unplugged(struct MPT3SAS_ADAPTER *ioc); +u8 mpt3sas_base_pci_device_is_available(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, u8 status, void *mpi_request, int sz); -int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); + + +int mpt3sas_wait_for_ioc_to_operational(struct MPT3SAS_ADAPTER *ioc, + int wait_count); +void mpt3sas_base_start_hba_unplug_watchdog(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_stop_hba_unplug_watchdog(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); +void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc); /* scsih shared API */ +extern char driver_name[MPT_NAME_LENGTH]; +extern int prot_mask; struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid); u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply); -void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); - -int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, - u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method); -int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, - u64 lun, u8 type, u16 smid_task, u16 msix_task, +void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); +int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, u16 smid_task, + u8 timeout, u8 tr_method); +int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, u16 smid_task, u8 timeout, u8 tr_method); - void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); -void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); +void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port); void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address); + u64 sas_address, struct hba_port *port); u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, - u16 smid); + u16 smid); +struct hba_port * +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port); struct _sas_node *mpt3sas_scsih_expander_find_by_handle( struct MPT3SAS_ADAPTER *ioc, u16 handle); struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address( - struct MPT3SAS_ADAPTER *ioc, u64 sas_address); + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +struct _sas_device *__mpt3sas_get_sdev_by_addr_and_rphy( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, struct sas_rphy *rphy); struct _sas_device *mpt3sas_get_sdev_by_addr( - struct MPT3SAS_ADAPTER *ioc, u64 sas_address); -struct _sas_device *__mpt3sas_get_sdev_by_addr( - struct MPT3SAS_ADAPTER *ioc, u64 sas_address); + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); struct _raid_device * -mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); -void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth); +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 + handle); +void +_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device); +void +mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc); +u32 base_mod64(u64 dividend, u32 divisor); + +/** + * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if scsi device. + */ +static inline int +mpt3sas_scsih_is_pcie_scsi_device(u32 device_info) +{ + if ((device_info & + MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI) + return 1; + else + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) +/* NCQ Prio Handling Check */ +u8 mpt3sas_scsih_ncq_prio_supp(struct scsi_device *sdev); +#endif /* config shared API */ u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -1603,14 +2134,12 @@ int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, struct Mpi2ManufacturingPage10_t *config_page); - int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, struct Mpi2ManufacturingPage11_t *config_page); int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, struct Mpi2ManufacturingPage11_t *config_page); - int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page); int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1634,8 +2163,6 @@ int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, u16 sz); int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page); -int mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page); int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1680,44 +2207,86 @@ int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, u16 *volume_handle); int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, u64 *wwid); +#if defined(CPQ_CIM) +int mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page); +#endif +int +mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); /* ctl shared API */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) extern struct device_attribute *mpt3sas_host_attrs[]; +#else +extern struct class_device_attribute *mpt3sas_host_attrs[]; +#endif extern struct device_attribute *mpt3sas_dev_attrs[]; -void mpt3sas_ctl_init(ushort hbas_to_enumerate); -void mpt3sas_ctl_exit(ushort hbas_to_enumerate); +void mpt3sas_ctl_init(int enumerate_hba); +void mpt3sas_ctl_exit(int enumerate_hba); u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); -void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); +u8 mpt3sas_ctl_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 mpt3sas_ctl_diag_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply); void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, Mpi2EventNotificationReply_t *mpi_reply); void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, - u8 bits_to_register); + u8 bits_to_regsiter); int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset); +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc); + +int ctl_release(struct inode *inode, struct file *filep); +void ctl_init(void); +void ctl_exit(void); + +#if defined(CPQ_CIM) +long +_ctl_ioctl_csmi(struct MPT3SAS_ADAPTER *ioc, unsigned int cmd, void __user *arg); +#endif /* transport shared API */ -extern struct scsi_transport_template *mpt3sas_transport_template; u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, - u16 handle, u64 sas_address); + u16 handle, u64 sas_address, struct hba_port *port); void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, - u64 sas_address_parent); + u64 sas_address_parent, struct hba_port *port); int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev); void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate, + struct hba_port *port); extern struct sas_function_template mpt3sas_transport_functions; extern struct scsi_transport_template *mpt3sas_transport_template; +void +mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy); +#if defined(MPT_WIDE_PORT_API) +void +mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, + u64 sas_address, struct hba_port *port); +#endif + +#if defined(TARGET_MODE) +void mpt3sas_base_stm_initialize_callback_handler(void); +void mpt3sas_base_stm_release_callback_handler(void); +#if defined(STM_RING_BUFFER) +extern void sysfs_dump_kernel_thread_state(struct MPT_STM_PRIV *priv); +extern void sysfs_dump_ring_buffer(struct MPT_STM_PRIV *priv); +#endif /* STM_RING_BUFFER */ +#endif + /* trigger data externs */ void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); @@ -1735,28 +2304,31 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, /* warpdrive APIs */ u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, - struct _raid_device *raid_device); + struct _raid_device *raid_device); void mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request); -/* NCQ Prio Handling Check */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev); +void _scsih_hide_unhide_sas_devices(struct MPT3SAS_ADAPTER *ioc); -/** - * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device - * @device_info: bitfield providing information about the device. - * Context: none - * - * Returns 1 if scsi device. - */ -static inline int -mpt3sas_scsih_is_pcie_scsi_device(u32 device_info) -{ - if ((device_info & - MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI) - return 1; - else - return 0; -} +#ifdef MPT2SAS_WD_DDIOCOUNT +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +ssize_t +_ctl_ioc_ddio_count_show(struct device *cdev, struct device_attribute *attr, + char *buf); +ssize_t +_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, + char *buf); +ssize_t +_ctl_ioc_ddio_err_count_show(struct device *cdev, struct device_attribute *attr, + char *buf); +#else +ssize_t +_ctl_ioc_ddio_count_show(struct class_device *cdev, char *buf); +ssize_t +_ctl_ioc_ddio_err_count_show(struct class_device *cdev, char *buf); +ssize_t +_ctl_BRM_status_show(struct class_device *cdev, char *buf); +#endif +#endif #endif /* MPT3SAS_BASE_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_compatibility.h b/drivers/scsi/mpt3sas/mpt3sas_compatibility.h new file mode 100755 index 000000000000..d33ea1c73e82 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_compatibility.h @@ -0,0 +1,207 @@ +/* + * Compatiblity Header for compilation working across multiple kernels + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef FUSION_LINUX_COMPAT_H +#define FUSION_LINUX_COMPAT_H + +#include + +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR >= 3)) || \ + (defined(CONFIG_SUSE_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))) +#include +#define MPT3SAS_ENABLE_IRQ_POLL +#endif + +#if ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,59) && LINUX_VERSION_CODE < KERNEL_VERSION(4,12,14))) +extern int scsi_internal_device_block(struct scsi_device *sdev, bool wait); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +extern int scsi_internal_device_block(struct scsi_device *sdev); +#endif + +#if ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,70)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) \ +&& LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))) +extern int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)) +extern int scsi_internal_device_unblock(struct scsi_device *sdev); +#endif + +#ifndef DID_TRANSPORT_DISRUPTED +#define DID_TRANSPORT_DISRUPTED DID_BUS_BUSY +#endif + +#ifndef ULLONG_MAX +#define ULLONG_MAX (~0ULL) +#endif + +#ifndef USHORT_MAX +#define USHORT_MAX ((u16)(~0U)) +#endif + +#ifndef UINT_MAX +#define UINT_MAX (~0U) +#endif + +/* + * TODO Need to change 'shost_private' back to 'shost_priv' when suppying patchs + * upstream. Since Red Hat decided to backport this to rhel5.2 (2.6.18-92.el5) + * from the 2.6.23 kernel, it will make it difficult for us to add the proper + * glue in our driver. + */ +static inline void *shost_private(struct Scsi_Host *shost) +{ + return (void *)shost->hostdata; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) +static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) +{ + return scmd->request->sector; +} +#endif + +/** + * mpt_scsi_build_sense_buffer - build sense data in a buffer + * @desc: Sense format (non zero == descriptor format, + * 0 == fixed format) + * @buf: Where to build sense data + * @key: Sense key + * @asc: Additional sense code + * @ascq: Additional sense code qualifier + * + * Note: scsi_build_sense_buffer was added in the 2.6.26 kernel + * It was backported in RHEL5.3 2.6.18-128.el5 + **/ +static inline void mpt_scsi_build_sense_buffer(int desc, u8 *buf, u8 key, + u8 asc, u8 ascq) +{ + if (desc) { + buf[0] = 0x72; /* descriptor, current */ + buf[1] = key; + buf[2] = asc; + buf[3] = ascq; + buf[7] = 0; + } else { + buf[0] = 0x70; /* fixed, current */ + buf[2] = key; + buf[7] = 0xa; + buf[12] = asc; + buf[13] = ascq; + } +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) +/** + * mpt_scsilun_to_int: convert a scsi_lun to an int + * @scsilun: struct scsi_lun to be converted. + * + * Description: + * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered + * integer, and return the result. The caller must check for + * truncation before using this function. + * + * Notes: + * The struct scsi_lun is assumed to be four levels, with each level + * effectively containing a SCSI byte-ordered (big endian) short; the + * addressing bits of each level are ignored (the highest two bits). + * For a description of the LUN format, post SCSI-3 see the SCSI + * Architecture Model, for SCSI-3 see the SCSI Controller Commands. + * + * Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function + * returns the integer: 0x0b030a04 + **/ +static inline int mpt_scsilun_to_int(struct scsi_lun *scsilun) +{ + int i; + unsigned int lun; + + lun = 0; + for (i = 0; i < sizeof(lun); i += 2) + lun = lun | (((scsilun->scsi_lun[i] << 8) | + scsilun->scsi_lun[i + 1]) << (i * 8)); + return lun; +} +#else +static inline int mpt_scsilun_to_int(struct scsi_lun *scsilun) +{ + return scsilun_to_int(scsilun); +} +#endif + +/** + * mpt3sas_set_requeue_or_reset - + * @scmd: pointer to scsi command object + * + * Tells whether scmd needs retried by using DID_RESET / DID_REQUEUE + */ +static inline void mpt3sas_set_requeue_or_reset(struct scsi_cmnd *scmd) +{ +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR == 3)) || \ +((LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) && \ +(LINUX_VERSION_CODE < KERNEL_VERSION(4,4,14)))) + scmd->result = DID_REQUEUE << 16; +#else + scmd->result = DID_RESET << 16; +#endif +} + +/** + * mpt3sas_determine_failed_or_fast_io_fail_status - + * + * Return FAST_IO_FAIL status if kernel supports fast io fail feature + * otherwise return FAILED status. + */ +static inline int +mpt3sas_determine_failed_or_fast_io_fail_status(void) +{ +#ifdef FAST_IO_FAIL + return FAST_IO_FAIL; +#else + return FAILED; +#endif +} + +#endif /* FUSION_LINUX_COMPAT_H */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c old mode 100644 new mode 100755 index 14a1a2793dd5..57752c27d24e --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -2,9 +2,10 @@ * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -42,6 +43,7 @@ * USA. */ +#include #include #include #include @@ -91,7 +93,7 @@ struct config_request { * @mpi_reply: reply message frame * Context: none. * - * Function for displaying debug info helpful when debugging issues + * Function for displaying debug info helpfull when debugging issues * in this module. */ static void @@ -175,18 +177,19 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, if (!desc) return; - ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", - calling_function_name, desc, - mpi_request->Header.PageNumber, mpi_request->Action, - le32_to_cpu(mpi_request->PageAddress), smid); + printk(MPT3SAS_INFO_FMT "%s: %s(%d), action(%d), form(0x%08x), " + "smid(%d)\n", ioc->name, calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); if (!mpi_reply) return; if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) - ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n", - le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo)); + printk(MPT3SAS_INFO_FMT + "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); } /** @@ -196,7 +199,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, * * A wrapper for obtaining dma-able memory for config page request. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, @@ -208,8 +211,9 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, &mem->page_dma, GFP_KERNEL); if (!mem->page) { - ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n", - __func__, mem->sz); + printk(MPT3SAS_ERR_FMT "%s: dma_alloc_coherent" + " failed asking for (%d) bytes!!\n", + ioc->name, __func__, mem->sz); r = -ENOMEM; } } else { /* use tmp buffer if less than 512 bytes */ @@ -227,7 +231,7 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, * * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static void _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, @@ -248,8 +252,8 @@ _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, * * The callback handler when using _config_request. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -270,7 +274,7 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, } ioc->config_cmds.status &= ~MPT3_CMD_PENDING; _config_display_some_debug(ioc, smid, "config_done", mpi_reply); - ioc->config_cmds.smid = USHRT_MAX; + ioc->config_cmds.smid = USHORT_MAX; complete(&ioc->config_cmds.done); return 1; } @@ -292,7 +296,7 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, * * The callback index is set inside `ioc->config_cb_idx. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t @@ -308,7 +312,8 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: config_cmd in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: config_cmd in use\n", + ioc->name, __func__); mutex_unlock(&ioc->config_cmds.mutex); return -EAGAIN; } @@ -356,17 +361,19 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t r = -EFAULT; goto free_mem; } - ioc_info(ioc, "%s: attempting retry (%d)\n", - __func__, retry_count); + printk(MPT3SAS_INFO_FMT "%s: attempting retry (%d)\n", + ioc->name, __func__, retry_count); } - r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT); + r = mpt3sas_wait_for_ioc_to_operational(ioc, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT); if (r) goto free_mem; smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); ioc->config_cmds.status = MPT3_CMD_NOT_USED; r = -EAGAIN; goto free_mem; @@ -381,7 +388,8 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t _config_display_some_debug(ioc, smid, "config_request", NULL); init_completion(&ioc->config_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); + wait_for_completion_timeout(&ioc->config_cmds.done, + timeout*HZ); if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { mpt3sas_base_check_cmd_timeout(ioc, ioc->config_cmds.status, mpi_request, @@ -389,8 +397,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t retry_count++; if (ioc->config_cmds.smid == smid) mpt3sas_base_free_smid(ioc, smid); - if ((ioc->shost_recovery) || (ioc->config_cmds.status & - MPT3_CMD_RESET) || ioc->pci_error_recovery) + if (ioc->shost_recovery || + (ioc->config_cmds.status & MPT3_CMD_RESET) || + ioc->pci_error_recovery) goto retry_config; issue_host_reset = 1; r = -EFAULT; @@ -406,10 +415,12 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t (mpi_reply->Header.PageType & 0xF)) { _debug_dump_mf(mpi_request, ioc->request_sz/4); _debug_dump_reply(mpi_reply, ioc->request_sz/4); - panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", - ioc->name, __func__, - mpi_request->Header.PageType & 0xF, - mpi_reply->Header.PageType & 0xF); + panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" + " mpi_reply mismatch: Requested PageType(0x%02x)" + " Reply PageType(0x%02x)\n", + ioc->name, __func__, + (mpi_request->Header.PageType & 0xF), + (mpi_reply->Header.PageType & 0xF)); } if (((mpi_request->Header.PageType & 0xF) == @@ -417,18 +428,19 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t mpi_request->ExtPageType != mpi_reply->ExtPageType) { _debug_dump_mf(mpi_request, ioc->request_sz/4); _debug_dump_reply(mpi_reply, ioc->request_sz/4); - panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", - ioc->name, __func__, - mpi_request->ExtPageType, - mpi_reply->ExtPageType); + panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" + " mpi_reply mismatch: Requested ExtPageType(0x%02x)" + " Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, mpi_request->ExtPageType, + mpi_reply->ExtPageType); } ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; } if (retry_count) - ioc_info(ioc, "%s: retry (%d) completed!!\n", - __func__, retry_count); + printk(MPT3SAS_INFO_FMT "%s: retry (%d) completed!!\n", + ioc->name, __func__, retry_count); if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && config_page && mpi_request->Action == @@ -443,10 +455,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t _debug_dump_reply(mpi_reply, ioc->request_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); - panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", - ioc->name, __func__, - mpi_request->Header.PageType & 0xF, - p[3] & 0xF); + panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" + " config page mismatch:" + " Requested PageType(0x%02x)" + " Reply PageType(0x%02x)\n", + ioc->name, __func__, + (mpi_request->Header.PageType & 0xF), + (p[3] & 0xF)); } if (((mpi_request->Header.PageType & 0xF) == @@ -456,9 +471,12 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t _debug_dump_reply(mpi_reply, ioc->request_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); - panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", - ioc->name, __func__, - mpi_request->ExtPageType, p[6]); + panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" + " config page mismatch:" + " Requested ExtPageType(0x%02x)" + " Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->ExtPageType, p[6]); } } memcpy(config_page, mem.page, min_t(u16, mem.sz, @@ -484,7 +502,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, @@ -521,7 +539,7 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, * @sz: size of buffer passed in config_page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, @@ -558,7 +576,7 @@ mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, @@ -595,7 +613,7 @@ mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, @@ -632,7 +650,7 @@ mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, @@ -669,7 +687,7 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, @@ -705,7 +723,7 @@ mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -741,7 +759,7 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, @@ -777,7 +795,7 @@ mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -813,7 +831,7 @@ mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -850,7 +868,7 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, * @sz: size of buffer passed in config_page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, @@ -885,7 +903,7 @@ mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, @@ -914,41 +932,6 @@ mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, return r; } -/** - * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 - * @ioc: per adapter object - * @mpi_reply: reply mf payload returned from firmware - * @config_page: contents of the config page - * Context: sleep. - * - * Return: 0 for success, non-zero for failure. - */ -int -mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) -{ - Mpi2ConfigRequest_t mpi_request; - int r; - - memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); - mpi_request.Function = MPI2_FUNCTION_CONFIG; - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; - mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; - mpi_request.Header.PageNumber = 8; - mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; - ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); - if (r) - goto out; - - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, - sizeof(*config_page)); - out: - return r; -} /** * mpt3sas_config_get_ioc_pg1 - obtain ioc page 1 * @ioc: per adapter object @@ -956,7 +939,7 @@ mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -992,7 +975,7 @@ mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, * @config_page: contents of the config page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -1021,6 +1004,42 @@ mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, return r; } +/** + * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + /** * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0 * @ioc: per adapter object @@ -1030,7 +1049,7 @@ mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, * @handle: device handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, @@ -1071,7 +1090,7 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, * @handle: device handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -1103,7 +1122,7 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, return r; } -/** +/* * mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware @@ -1112,36 +1131,36 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, * @handle: device handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page, - u32 form, u32 handle) + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page, + u32 form, u32 handle) { - Mpi2ConfigRequest_t mpi_request; - int r; + Mpi2ConfigRequest_t mpi_request; + int r; - memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); - mpi_request.Function = MPI2_FUNCTION_CONFIG; - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; - mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; - mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; - mpi_request.Header.PageVersion = MPI26_PCIEDEVICE0_PAGEVERSION; - mpi_request.Header.PageNumber = 0; - ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); - if (r) - goto out; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; + mpi_request.Header.PageVersion = MPI26_PCIEDEVICE0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; - mpi_request.PageAddress = cpu_to_le32(form | handle); - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, - sizeof(*config_page)); -out: - return r; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; } /** @@ -1153,36 +1172,36 @@ out: * @handle: device handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, - u32 form, u32 handle) + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, + u32 form, u32 handle) { - Mpi2ConfigRequest_t mpi_request; - int r; + Mpi2ConfigRequest_t mpi_request; + int r; - memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); - mpi_request.Function = MPI2_FUNCTION_CONFIG; - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; - mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; - mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; - mpi_request.Header.PageVersion = MPI26_PCIEDEVICE2_PAGEVERSION; - mpi_request.Header.PageNumber = 2; - ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); - if (r) - goto out; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; + mpi_request.Header.PageVersion = MPI26_PCIEDEVICE2_PAGEVERSION; + mpi_request.Header.PageNumber = 2; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; - mpi_request.PageAddress = cpu_to_le32(form | handle); - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, - sizeof(*config_page)); -out: - return r; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; } /** @@ -1191,7 +1210,7 @@ out: * @num_phys: pointer returned with the number of phys * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) @@ -1241,7 +1260,7 @@ mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, @@ -1282,7 +1301,7 @@ mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -1311,6 +1330,9 @@ mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, out: return r; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_config_get_sas_iounit_pg1); +#endif /** * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1 @@ -1323,7 +1345,7 @@ mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -1355,6 +1377,9 @@ mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, out: return r; } +#if defined(TARGET_MODE) +EXPORT_SYMBOL(mpt3sas_config_set_sas_iounit_pg1); +#endif /** * mpt3sas_config_get_expander_pg0 - obtain expander page 0 @@ -1365,7 +1390,7 @@ mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, * @handle: expander handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1405,7 +1430,7 @@ mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t * @handle: expander handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1448,7 +1473,7 @@ mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t * @handle: expander handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1487,7 +1512,7 @@ mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t * @phy_number: phy number * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1527,7 +1552,7 @@ mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t * @phy_number: phy number * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1568,7 +1593,7 @@ mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t * @handle: volume handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, @@ -1606,7 +1631,7 @@ mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, * @num_pds: returns pds count * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, @@ -1658,7 +1683,7 @@ mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, * @sz: size of buffer passed in config_page * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, @@ -1697,7 +1722,7 @@ mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, * @form_specific: specific to the form * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -1736,7 +1761,7 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t * @volume_handle: volume handle * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, @@ -1826,7 +1851,7 @@ mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, * @wwid: volume wwid * Context: sleep. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, diff --git a/drivers/scsi/mpt3sas/mpt3sas_csmi_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_csmi_ctl.c new file mode 100755 index 000000000000..8f3106dbcfaa --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_csmi_ctl.c @@ -0,0 +1,412 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2012-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto: MPT-FusionLinux.pdl@broadcom.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "csmi/csmisas.h" +static int _csmisas_get_driver_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_DRIVER_INFO_BUFFER *karg); +static int _csmisas_get_cntlr_status(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_CNTLR_STATUS_BUFFER *karg); +static int _csmisas_get_cntlr_config(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_CNTLR_CONFIG_BUFFER *karg); +static int _csmisas_get_phy_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_PHY_INFO_BUFFER *karg); +static int _csmisas_get_scsi_address(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_GET_SCSI_ADDRESS_BUFFER *karg); +static int _csmisas_get_link_errors(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_LINK_ERRORS_BUFFER *karg); +static int _csmisas_smp_passthru(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SMP_PASSTHRU_BUFFER *karg); +static int _csmisas_firmware_download(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER *karg); +static int _csmisas_get_raid_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_INFO_BUFFER *karg); +static int _csmisas_get_raid_config(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_CONFIG_BUFFER *karg); +static int _csmisas_get_raid_features(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_FEATURES_BUFFER *karg); +static int _csmisas_set_raid_control(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_CONTROL_BUFFER *karg); +static int _csmisas_get_raid_element(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_ELEMENT_BUFFER *karg); +static int _csmisas_set_raid_operation(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_RAID_SET_OPERATION_BUFFER *karg); +static int _csmisas_set_phy_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SET_PHY_INFO_BUFFER *karg); +static int _csmisas_ssp_passthru(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SSP_PASSTHRU_BUFFER *karg); +static int _csmisas_stp_passthru(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_STP_PASSTHRU_BUFFER *karg); +static int _csmisas_get_sata_signature(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SATA_SIGNATURE_BUFFER *karg); +static int _csmisas_get_device_address(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER *karg); +static int _csmisas_task_managment(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_SSP_TASK_IU_BUFFER *karg); +static int _csmisas_phy_control(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_PHY_CONTROL_BUFFER *karg); +static int _csmisas_get_connector_info(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_CONNECTOR_INFO_BUFFER *karg); +static int _csmisas_get_location(struct MPT3SAS_ADAPTER *ioc, + CSMI_SAS_GET_LOCATION_BUFFER *karg); + +#define MPT2SAS_HP_3PAR_SSVID 0x1590 +#define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 +#define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 + +/** + * _ctl_check_for_hp_branded_controllers - customer branding check + * @ioc: per adapter object + * + * HP controllers are only allowed to do CSMI IOCTL's + * + * Returns; "1" if HP controller, else "0" + */ +static int +_ctl_check_for_hp_branded_controllers(struct MPT3SAS_ADAPTER *ioc) +{ + int rc = 0; + + if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID) + goto out; + + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2004: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: + rc = 1; + break; + default: + break; + } + break; + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_2_4_INTERNAL_SSDID: + case MPT2SAS_HP_2_4_EXTERNAL_SSDID: + case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: + case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: + rc = 1; + break; + default: + break; + } + break; + default: + break; + } + + out: + return rc; +} + +long +_ctl_ioctl_csmi(struct MPT3SAS_ADAPTER *ioc, unsigned int cmd, void __user *arg) +{ + IOCTL_HEADER karg; + void *payload; + u32 payload_sz; + int payload_pages; + long ret = -EINVAL; + + if (copy_from_user(&karg, arg, sizeof(IOCTL_HEADER))) { + printk(KERN_ERR "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (_ctl_check_for_hp_branded_controllers(ioc) != 1) + return -EPERM; + + payload_sz = karg.Length + sizeof(IOCTL_HEADER); + payload_pages = get_order(payload_sz); + payload = (void *)__get_free_pages(GFP_KERNEL, payload_pages); + if (!payload) + goto out; + + if (copy_from_user(payload, arg, payload_sz)) { + printk(KERN_ERR "%s():%d: failure\n", __func__, __LINE__); + ret = -EFAULT; + goto out_free_pages; + } + + switch (cmd) { + case CC_CSMI_SAS_GET_DRIVER_INFO: + ret = _csmisas_get_driver_info(ioc, payload); + break; + case CC_CSMI_SAS_GET_CNTLR_STATUS: + ret = _csmisas_get_cntlr_status(ioc, payload); + break; + case CC_CSMI_SAS_GET_SCSI_ADDRESS: + ret = _csmisas_get_scsi_address(ioc, payload); + break; + case CC_CSMI_SAS_GET_DEVICE_ADDRESS: + ret = _csmisas_get_device_address(ioc, payload); + break; + case CC_CSMI_SAS_GET_CNTLR_CONFIG: + ret = _csmisas_get_cntlr_config(ioc, payload); + break; + case CC_CSMI_SAS_GET_PHY_INFO: + ret = _csmisas_get_phy_info(ioc, payload); + break; + case CC_CSMI_SAS_GET_SATA_SIGNATURE: + ret = _csmisas_get_sata_signature(ioc, payload); + break; + case CC_CSMI_SAS_GET_LINK_ERRORS: + ret = _csmisas_get_link_errors(ioc, payload); + break; + case CC_CSMI_SAS_SMP_PASSTHRU: + ret = _csmisas_smp_passthru(ioc, payload); + break; + case CC_CSMI_SAS_SSP_PASSTHRU: + ret = _csmisas_ssp_passthru(ioc, payload); + break; + case CC_CSMI_SAS_FIRMWARE_DOWNLOAD: + ret = _csmisas_firmware_download(ioc, payload); + break; + case CC_CSMI_SAS_GET_RAID_INFO: + ret = _csmisas_get_raid_info(ioc, payload); + break; + case CC_CSMI_SAS_GET_RAID_CONFIG: + ret = _csmisas_get_raid_config(ioc, payload); + break; + case CC_CSMI_SAS_GET_RAID_FEATURES: + ret = _csmisas_get_raid_features(ioc, payload); + break; + case CC_CSMI_SAS_SET_RAID_CONTROL: + ret = _csmisas_set_raid_control(ioc, payload); + break; + case CC_CSMI_SAS_GET_RAID_ELEMENT: + ret = _csmisas_get_raid_element(ioc, payload); + break; + case CC_CSMI_SAS_SET_RAID_OPERATION: + ret = _csmisas_set_raid_operation(ioc, payload); + break; + case CC_CSMI_SAS_SET_PHY_INFO: + ret = _csmisas_set_phy_info(ioc, payload); + break; + case CC_CSMI_SAS_STP_PASSTHRU: + ret = _csmisas_stp_passthru(ioc, payload); + break; + case CC_CSMI_SAS_TASK_MANAGEMENT: + ret = _csmisas_task_managment(ioc, payload); + break; + case CC_CSMI_SAS_PHY_CONTROL: + ret = _csmisas_phy_control(ioc, payload); + break; + case CC_CSMI_SAS_GET_CONNECTOR_INFO: + ret = _csmisas_get_connector_info(ioc, payload); + break; + case CC_CSMI_SAS_GET_LOCATION: + ret = _csmisas_get_location(ioc, payload); + break; + } + + if (copy_to_user(arg, payload, payload_sz)) { + printk(KERN_ERR "%s():%d: failure\n", + __func__, __LINE__); + ret = -EFAULT; + } + + out_free_pages: + free_pages((unsigned long)payload, payload_pages); + out: + return ret; +} + +/** + * _ctl_do_fw_download - Download fw to HBA + * @ioc - pointer to ioc structure + * @fwbuf - the fw buffer to flash + * @fwlen - the size of the firmware + * + * This is the fw download engine for ioctls + */ +static int +_ctl_do_fw_download(struct MPT3SAS_ADAPTER *ioc, char *fwbuf, size_t fwlen) +{ + MPI2RequestHeader_t *mpi_request = NULL; + MPI2DefaultReply_t *mpi_reply; + Mpi2FWDownloadRequest *dlmsg; + Mpi2FWDownloadTCSGE_t *tcsge; + u16 ioc_status; + u16 smid; + unsigned long timeout, timeleft; + u8 issue_reset; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + u32 sgl_flags; + long ret; + u32 chunk_sz = 0; + u32 cur_offset = 0; + u32 remaining_bytes = (u32)fwlen; + + issue_reset = 0; + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + + ret = mpt3sas_wait_for_ioc_to_operational(ioc, 10); + if (ret) + goto out; + + again: + if (remaining_bytes > FW_DL_CHUNK_SIZE) + chunk_sz = FW_DL_CHUNK_SIZE; + else + chunk_sz = remaining_bytes; + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(*mpi_request)); + /* + * Construct f/w download request + */ + dlmsg = (Mpi2FWDownloadRequest *)mpi_request; + dlmsg->ImageType = MPI2_FW_DOWNLOAD_ITYPE_FW; + dlmsg->Function = MPI2_FUNCTION_FW_DOWNLOAD; + dlmsg->TotalImageSize = cpu_to_le32(fwlen); + + if (remaining_bytes == chunk_sz) + dlmsg->MsgFlags = MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; + + /* Construct TrasactionContext Element */ + tcsge = (Mpi2FWDownloadTCSGE_t *)&dlmsg->SGL; + memset(tcsge, 0, sizeof(Mpi2FWDownloadTCSGE_t)); + + /* spec defines 12 or size of element. which is better */ + tcsge->DetailsLength = offsetof(Mpi2FWDownloadTCSGE_t, ImageSize); + tcsge->Flags = MPI2_SGE_FLAGS_TRANSACTION_ELEMENT; + tcsge->ImageSize = cpu_to_le32(chunk_sz); + tcsge->ImageOffset = cpu_to_le32(cur_offset); + + ret = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + ioc->ctl_cmds.smid = smid; + data_out_sz = chunk_sz; + + /* obtain dma-able memory for data transfer */ + if (!data_out) { + data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, + &data_out_dma); + } + if (!data_out) { + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + memcpy(data_out, fwbuf+cur_offset, data_out_sz); + + /* add scatter gather elements */ + psge = (void *)mpi_request + sizeof(Mpi2FWDownloadRequest) - + sizeof(MPI2_MPI_SGE_UNION) + sizeof(Mpi2FWDownloadTCSGE_t); + + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + + /* send command to firmware */ + _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); + + init_completion(&ioc->ctl_cmds.done); + ioc->put_smid_default(ioc, smid); + + timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + timeout*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + printk(MPT3SAS_ERR_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, 0); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + cur_offset += chunk_sz; + remaining_bytes -= chunk_sz; + if (remaining_bytes > 0) + goto again; + + goto out; + issue_host_reset: + /* Reset is only here for error conditions */ + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + out: + + /* free memory associated with sg buffers */ + if (data_out) { + pci_free_consistent(ioc->pdev, data_out_sz, data_out, + data_out_dma); + data_out = NULL; + } + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return ret; +} +#include "csmi/csmisas.c" diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c old mode 100644 new mode 100755 index 7d696952b376..0475b2571b66 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3,10 +3,11 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) - * + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 @@ -43,6 +44,7 @@ * USA. */ +#include #include #include #include @@ -55,7 +57,7 @@ #include #include -#include +#include #include "mpt3sas_base.h" #include "mpt3sas_ctl.h" @@ -63,8 +65,44 @@ static struct fasync_struct *async_queue; static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); +#ifdef MPT2SAS_WD_DDIOCOUNT +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +ssize_t +_ctl_ioc_ddio_count_show(struct device *cdev, struct device_attribute *attr, + char *buf); +static DEVICE_ATTR(ddio_count, S_IRUGO, + _ctl_ioc_ddio_count_show, NULL); +#else +ssize_t +_ctl_ioc_ddio_count_show(struct class_device *cdev, char *buf); +static CLASS_DEVICE_ATTR(ddio_count, S_IRUGO, + _ctl_ioc_ddio_count_show, NULL); +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +ssize_t +_ctl_ioc_ddio_err_count_show(struct device *cdev, struct device_attribute *attr, + char *buf); +static DEVICE_ATTR(ddio_err_count, S_IRUGO, + _ctl_ioc_ddio_err_count_show, NULL); +#else +ssize_t +_ctl_ioc_ddio_err_count_show(struct class_device *cdev, char *buf); +static CLASS_DEVICE_ATTR(ddio_err_count, S_IRUGO, + _ctl_ioc_ddio_err_count_show, NULL); +#endif +#endif /* End of MPT2SAS_WD_DDIOCOUNT */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +ssize_t +_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, + char *buf); +static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); +#else +ssize_t +_ctl_BRM_status_show(struct class_device *cdev, char *buf); +static CLASS_DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); +#endif /** * enum block_state - blocking state * @NON_BLOCKING: non blocking @@ -185,15 +223,17 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, if (!desc) return; - ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid); + printk(MPT3SAS_INFO_FMT "%s: %s, smid(%d)\n", + ioc->name, calling_function_name, desc, smid); if (!mpi_reply) return; if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) - ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n", - le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo)); + printk(MPT3SAS_INFO_FMT + "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == @@ -206,32 +246,38 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, sas_device = mpt3sas_get_sdev_by_handle(ioc, le16_to_cpu(scsi_reply->DevHandle)); if (sas_device) { - ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", - (u64)sas_device->sas_address, - sas_device->phy); - ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", - (u64)sas_device->enclosure_logical_id, - sas_device->slot); + printk(MPT3SAS_WARN_FMT "\tsas_address(0x%016llx), " + "phy(%d)\n", ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + if(sas_device->enclosure_handle != 0) + printk(MPT3SAS_WARN_FMT + "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot); sas_device_put(sas_device); } + if (!sas_device) { pcie_device = mpt3sas_get_pdev_by_handle(ioc, le16_to_cpu(scsi_reply->DevHandle)); if (pcie_device) { - ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n", - (unsigned long long)pcie_device->wwid, - pcie_device->port_num); - if (pcie_device->enclosure_handle != 0) - ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot); + printk(MPT3SAS_WARN_FMT "\tWWID(0x%016llx), " + "port(%d)\n", ioc->name, (unsigned long long) + pcie_device->wwid, pcie_device->port_num); + if(pcie_device->enclosure_handle != 0) + printk(MPT3SAS_WARN_FMT + "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + pcie_device->enclosure_logical_id,pcie_device->slot); pcie_device_put(pcie_device); } } if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) - ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n", - scsi_reply->SCSIState, - scsi_reply->SCSIStatus); + printk(MPT3SAS_INFO_FMT + "\tscsi_state(0x%02x), scsi_status" + "(0x%02x)\n", ioc->name, + scsi_reply->SCSIState, + scsi_reply->SCSIStatus); } } @@ -245,8 +291,8 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, * * The callback handler when using ioc->ctl_cb_idx. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -309,7 +355,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, * The bitmask in ioc->event_type[] indicates which events should be * be saved in the driver event_log. This bitmask is set by application. * - * Return: 1 when event should be captured, or zero means no match. + * Returns 1 when event should be captured, or zero means no match. */ static int _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) @@ -331,6 +377,8 @@ _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) * mpt3sas_ctl_add_to_event_log - add event * @ioc: per adapter object * @mpi_reply: reply message frame + * + * Return nothing. */ void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, @@ -385,8 +433,8 @@ mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, @@ -402,20 +450,19 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, /** * _ctl_verify_adapter - validates ioc_number passed from application - * @ioc_number: ? + * @ioc: per adapter object * @iocpp: The ioc pointer is returned in this. * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. * - * Return: (-1) means error, else ioc_number. + * Return (-1) means error, else ioc_number. */ static int _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, - int mpi_version) + int mpi_version) { struct MPT3SAS_ADAPTER *ioc; int version = 0; - /* global ioc lock to protect controller on list operations */ spin_lock(&gioc_lock); list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { if (ioc->id != ioc_number) @@ -424,19 +471,15 @@ _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, * ioctl device or not, if not continue the search. */ version = ioc->hba_mpi_version_belonged; - /* MPI25_VERSION and MPI26_VERSION uses same ioctl - * device. - */ - if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { - if ((version == MPI25_VERSION) || - (version == MPI26_VERSION)) + if (mpi_version == (MPI25_VERSION | MPI26_VERSION)){ + if ((version == MPI25_VERSION) || (version == MPI26_VERSION)) goto out; else continue; - } else { + } + else if (version != mpi_version) continue; - } out: spin_unlock(&gioc_lock); *iocpp = ioc; @@ -447,74 +490,82 @@ out: return -1; } -/** - * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) - * @ioc: per adapter object - * - * The handler for doing any required cleanup or initialization. - */ -void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc) { - int i; - u8 issue_reset; - - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); - for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { - if (!(ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_REGISTERED)) - continue; - if ((ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_RELEASED)) - continue; - mpt3sas_send_diag_release(ioc, i, &issue_reset); - } -} - -/** - * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) - * @ioc: per adapter object - * - * The handler for doing any required cleanup or initialization. - */ -void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) -{ - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { ioc->ctl_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); complete(&ioc->ctl_cmds.done); } + if (ioc->ctl_diag_cmds.status & MPT3_CMD_PENDING) { + ioc->ctl_diag_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->ctl_diag_cmds.smid); + complete(&ioc->ctl_diag_cmds.done); + } } /** * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) * @ioc: per adapter object + * @reset_phase: phase * * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET */ -void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) { int i; + u8 issue_reset; - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + /* add a log message to indicate the release */ + printk(MPT3SAS_INFO_FMT "%s: Releasing the trace buffer " + "due to adapter reset.", ioc->name, __func__); + mpt3sas_send_diag_release(ioc, i, &issue_reset); + } + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); - for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { - if (!(ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_REGISTERED)) - continue; - if ((ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_RELEASED)) - continue; - ioc->diag_buffer_status[i] |= - MPT3_DIAG_BUFFER_IS_DIAG_RESET; + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + ioc->diag_buffer_status[i] |= + MPT3_DIAG_BUFFER_IS_DIAG_RESET; + } + break; } } /** * _ctl_fasync - - * @fd: ? - * @filep: ? - * @mode: ? + * @fd - + * @filep - + * @mode - * * Called when application request fasyn callback handler. */ @@ -524,25 +575,37 @@ _ctl_fasync(int fd, struct file *filep, int mode) return fasync_helper(fd, filep, mode, &async_queue); } +/** + * ctl_release - + * @inode - + * @filep - + * + * Called when application releases the fasyn callback handler. + */ +int +ctl_release(struct inode *inode, struct file *filep) +{ + return fasync_helper(-1, filep, 0, &async_queue); +} + /** * _ctl_poll - - * @filep: ? - * @wait: ? + * @file - + * @wait - * */ -static __poll_t +static unsigned int _ctl_poll(struct file *filep, poll_table *wait) { struct MPT3SAS_ADAPTER *ioc; poll_wait(filep, &ctl_poll_wait, wait); - /* global ioc lock to protect controller on list operations */ spin_lock(&gioc_lock); list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { if (ioc->aen_event_read_flag) { spin_unlock(&gioc_lock); - return EPOLLIN | EPOLLRDNORM; + return POLLIN | POLLRDNORM; } } spin_unlock(&gioc_lock); @@ -552,10 +615,10 @@ _ctl_poll(struct file *filep, poll_table *wait) /** * _ctl_set_task_mid - assign an active smid to tm request * @ioc: per adapter object - * @karg: (struct mpt3_ioctl_command) - * @tm_request: pointer to mf from user space + * @karg - (struct mpt3_ioctl_command) + * @tm_request - pointer to mf from user space * - * Return: 0 when an smid if found, else fail. + * Returns 0 when an smid if found, else fail. * during failure, the reply frame is filled. */ static int @@ -571,6 +634,7 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, u32 sz; u32 lun; char *desc = NULL; + struct scsiio_tracker *st = NULL; if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) desc = "abort_task"; @@ -579,14 +643,14 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, else return 0; - lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + lun = mpt_scsilun_to_int((struct scsi_lun *)tm_request->LUN); handle = le16_to_cpu(tm_request->DevHandle); - for (smid = ioc->scsiio_depth; smid && !found; smid--) { - struct scsiio_tracker *st; - + for (smid = ioc->shost->can_queue; smid && !found; smid--) { + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); - if (!scmd) + if (scmd == NULL || scmd->device == NULL || + scmd->device->hostdata == NULL) continue; if (lun != scmd->device->lun) continue; @@ -595,7 +659,9 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, continue; if (priv_data->sas_target->handle != handle) continue; - st = scsi_cmd_priv(scmd); + st = mpt3sas_base_scsi_cmd_priv(scmd); + if ((!st) || (st->smid == 0)) + continue; /* * If the given TaskMID from the user space is zero, then the @@ -609,10 +675,9 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, } if (!found) { - dctlprintk(ioc, - ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n", - desc, le16_to_cpu(tm_request->DevHandle), - lun)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, + desc, le16_to_cpu(tm_request->DevHandle), lun)); tm_reply = ioc->ctl_cmds.reply; tm_reply->DevHandle = tm_request->DevHandle; tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; @@ -623,23 +688,23 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz)) - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return 1; } - dctlprintk(ioc, - ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", - desc, le16_to_cpu(tm_request->DevHandle), lun, - le16_to_cpu(tm_request->TaskMID))); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, + desc, le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); return 0; } /** * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode * @ioc: per adapter object - * @karg: (struct mpt3_ioctl_command) - * @mf: pointer to mf in user space + * @karg - (struct mpt3_ioctl_command) + * @mf - pointer to mf in user space */ static long _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, @@ -650,7 +715,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; struct _pcie_device *pcie_device = NULL; u16 smid; - u8 timeout; + unsigned long timeout; u8 issue_reset; u32 sz, sz_arg; void *psge; @@ -661,24 +726,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, dma_addr_t data_in_dma = 0; size_t data_in_sz = 0; long ret; - u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; + u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; issue_reset = 0; if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); ret = -EAGAIN; goto out; } - ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + ret = mpt3sas_wait_for_ioc_to_operational(ioc, 10); if (ret) goto out; mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); if (!mpi_request) { - ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a memory for " + "mpi_request\n", ioc->name, __func__); ret = -ENOMEM; goto out; } @@ -692,7 +758,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, /* copy in request message frame from user */ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; goto out; @@ -701,13 +767,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); ret = -EAGAIN; goto out; } } else { /* Use first reserved smid for passthrough ioctls */ - smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; + smid = ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_IOCTL; } ret = 0; @@ -722,13 +789,12 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || - mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || + mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH || mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { - + device_handle = le16_to_cpu(mpi_request->FunctionDependent1); - if (!device_handle || (device_handle > - ioc->facts.MaxDevHandle)) { + if (!device_handle || (device_handle > ioc->facts.MaxDevHandle)) { ret = -EINVAL; mpt3sas_base_free_smid(ioc, smid); goto out; @@ -737,10 +803,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, /* obtain dma-able memory for data transfer */ if (data_out_sz) /* WRITE */ { - data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, - &data_out_dma, GFP_KERNEL); + data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, + &data_out_dma); if (!data_out) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENOMEM; mpt3sas_base_free_smid(ioc, smid); @@ -748,7 +814,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } if (copy_from_user(data_out, karg.data_out_buf_ptr, data_out_sz)) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; mpt3sas_base_free_smid(ioc, smid); @@ -757,10 +823,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } if (data_in_sz) /* READ */ { - data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, - &data_in_dma, GFP_KERNEL); + data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, + &data_in_dma); if (!data_in) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENOMEM; mpt3sas_base_free_smid(ioc, smid); @@ -778,6 +844,21 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, case MPI2_FUNCTION_NVME_ENCAPSULATED: { nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; + + if (!ioc->pcie_sg_lookup) { + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT + "HBA doesn't supports NVMe." + " Hence rejecting NVMe Encapsulated request.\n", + ioc->name)); + + if (ioc->logging_level & MPT_DEBUG_TM) + _debug_dump_mf(nvme_encap_request, ioc->request_sz/4); + + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + /* * Get the Physical Address of the sense buffer. * Use Error Response buffer address field to hold the sense @@ -790,22 +871,22 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, nvme_encap_request->ErrorResponseBaseAddress = cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL); nvme_encap_request->ErrorResponseBaseAddress |= - cpu_to_le64(le32_to_cpu( - mpt3sas_base_get_sense_buffer_dma(ioc, smid))); + cpu_to_le64(le32_to_cpu( + mpt3sas_base_get_sense_buffer_dma(ioc, smid))); nvme_encap_request->ErrorResponseAllocationLength = cpu_to_le16(NVME_ERROR_RESPONSE_SIZE); memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE); ioc->build_nvme_prp(ioc, smid, nvme_encap_request, data_out_dma, data_out_sz, data_in_dma, data_in_sz); if (test_bit(device_handle, ioc->device_remove_in_progress)) { - dtmprintk(ioc, - ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n", - device_handle)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "handle(0x%04x) :" + "ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); mpt3sas_base_free_smid(ioc, smid); ret = -EINVAL; goto out; } - mpt3sas_base_put_smid_nvme_encap(ioc, smid); + ioc->put_smid_nvme_encap(ioc, smid); break; } case MPI2_FUNCTION_SCSI_IO_REQUEST: @@ -817,16 +898,17 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, scsiio_request->SenseBufferLowAddress = mpt3sas_base_get_sense_buffer_dma(ioc, smid); memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (test_bit(device_handle, ioc->device_remove_in_progress)) { - dtmprintk(ioc, - ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", - device_handle)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "handle(0x%04x) :" + "ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); mpt3sas_base_free_smid(ioc, smid); ret = -EINVAL; goto out; } - ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, - data_in_dma, data_in_sz); if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) ioc->put_smid_scsi_io(ioc, smid, device_handle); else @@ -838,10 +920,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, Mpi2SCSITaskManagementRequest_t *tm_request = (Mpi2SCSITaskManagementRequest_t *)request; - dtmprintk(ioc, - ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", - le16_to_cpu(tm_request->DevHandle), - tm_request->TaskType)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "TASK_MGMT: " + "handle(0x%04x), task_type(0x%02x)\n", ioc->name, + le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); + ioc->got_task_abort_from_ioctl = 1; if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || @@ -856,13 +938,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, ioc->got_task_abort_from_ioctl = 0; if (test_bit(device_handle, ioc->device_remove_in_progress)) { - dtmprintk(ioc, - ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", - device_handle)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "handle(0x%04x) :" + "ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); mpt3sas_base_free_smid(ioc, smid); ret = -EINVAL; goto out; } + mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( tm_request->DevHandle)); ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, @@ -883,7 +966,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, data = (u8 *)&smp_request->SGL; else { if (unlikely(data_out == NULL)) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); mpt3sas_base_free_smid(ioc, smid); ret = -EINVAL; @@ -903,16 +986,16 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } case MPI2_FUNCTION_SATA_PASSTHROUGH: { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); if (test_bit(device_handle, ioc->device_remove_in_progress)) { - dtmprintk(ioc, - ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", - device_handle)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "handle(0x%04x) :" + "ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); mpt3sas_base_free_smid(ioc, smid); ret = -EINVAL; goto out; } - ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, - data_in_sz); ioc->put_smid_default(ioc, smid); break; } @@ -927,39 +1010,43 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, case MPI2_FUNCTION_TOOLBOX: { Mpi2ToolboxCleanRequest_t *toolbox_request = - (Mpi2ToolboxCleanRequest_t *)mpi_request; + (Mpi2ToolboxCleanRequest_t*)mpi_request; - if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) - || (toolbox_request->Tool == - MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN)) - ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, - data_in_dma, data_in_sz); - else if (toolbox_request->Tool == - MPI2_TOOLBOX_MEMORY_MOVE_TOOL) { + if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL + || toolbox_request->Tool == + MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN) + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + } else if (toolbox_request->Tool == MPI2_TOOLBOX_MEMORY_MOVE_TOOL) { + Mpi2ToolboxMemMoveRequest_t *mem_move_request = - (Mpi2ToolboxMemMoveRequest_t *)request; + (Mpi2ToolboxMemMoveRequest_t*)request; Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL; - ioc->build_sg_mpi(ioc, psge, data_out_dma, - data_out_sz, data_in_dma, data_in_sz); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (data_out_sz && !data_in_sz) { - dst = - (Mpi2SGESimple64_t *)&mem_move_request->SGL; + dst = (Mpi2SGESimple64_t *)&mem_move_request->SGL; src = (void *)dst + ioc->sge_size; memcpy(&tmp, src, ioc->sge_size); memcpy(src, dst, ioc->sge_size); memcpy(dst, &tmp, ioc->sge_size); } + if (ioc->logging_level & MPT_DEBUG_TM) { - ioc_info(ioc, - "Mpi2ToolboxMemMoveRequest_t request msg\n"); - _debug_dump_mf(mem_move_request, - ioc->request_sz/4); + printk(MPT3SAS_INFO_FMT + "Mpi2ToolboxMemMoveRequest_t request msg\n", + ioc->name); + _debug_dump_mf(mem_move_request, ioc->request_sz/4); } - } else + + } else { ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, - data_in_dma, data_in_sz); + data_in_dma, data_in_sz); + } ioc->put_smid_default(ioc, smid); break; } @@ -976,7 +1063,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } /* drop to default case for posting the request */ } - /* fall through */ default: ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); @@ -984,10 +1070,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, break; } - if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) + timeout = karg.timeout; + if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; - else - timeout = karg.timeout; + wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { Mpi2SCSITaskManagementRequest_t *tm_request = @@ -1002,7 +1088,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, ioc->ignore_loginfos = 0; } if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, ioc->ctl_cmds.status, mpi_request, karg.data_sge_offset); @@ -1016,17 +1102,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, Mpi2SCSITaskManagementReply_t *tm_reply = (Mpi2SCSITaskManagementReply_t *)mpi_reply; - ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n", - le16_to_cpu(tm_reply->IOCStatus), - le32_to_cpu(tm_reply->IOCLogInfo), - le32_to_cpu(tm_reply->TerminationCount)); + printk(MPT3SAS_INFO_FMT "TASK_MGMT: " + "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " + "TerminationCount(0x%08x)\n", ioc->name, + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); } /* copy out xdata to user */ if (data_in_sz) { if (copy_to_user(karg.data_in_buf_ptr, data_in, data_in_sz)) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; @@ -1038,7 +1126,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz)) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; @@ -1051,17 +1139,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED)) { if (karg.sense_data_ptr == NULL) { - ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n"); + printk(MPT3SAS_ERR_FMT "Response buffer provided" + " by application is NULL; Response data will" + " not be returned.\n", ioc->name); goto out; } - sz_arg = (mpi_request->Function == - MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : - SCSI_SENSE_BUFFERSIZE; + sz_arg = (mpi_request->Function == + MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : + SCSI_SENSE_BUFFERSIZE; sz = min_t(u32, karg.max_sense_bytes, sz_arg); if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, - __LINE__, __func__); + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); ret = -ENODATA; goto out; } @@ -1074,24 +1164,24 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { - ioc_info(ioc, "issue target reset: handle = (0x%04x)\n", - le16_to_cpu(mpi_request->FunctionDependent1)); - mpt3sas_halt_firmware(ioc); + printk(MPT3SAS_INFO_FMT "issue target reset: handle " + "= (0x%04x)\n", ioc->name, + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_halt_firmware(ioc, 0); pcie_device = mpt3sas_get_pdev_by_handle(ioc, - le16_to_cpu(mpi_request->FunctionDependent1)); + le16_to_cpu(mpi_request->FunctionDependent1)); if (pcie_device && (!ioc->tm_custom_handling) && - (!(mpt3sas_scsih_is_pcie_scsi_device( - pcie_device->device_info)))) + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) mpt3sas_scsih_issue_locked_tm(ioc, - le16_to_cpu(mpi_request->FunctionDependent1), - 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, - 0, pcie_device->reset_timeout, - MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); - else + le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, + 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + pcie_device->reset_timeout, + MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); + else mpt3sas_scsih_issue_locked_tm(ioc, - le16_to_cpu(mpi_request->FunctionDependent1), - 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, - 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, + 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, + MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); } else mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); } @@ -1102,11 +1192,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, /* free memory associated with sg buffers */ if (data_in) - dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, + pci_free_consistent(ioc->pdev, data_in_sz, data_in, data_in_dma); if (data_out) - dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, + pci_free_consistent(ioc->pdev, data_out_sz, data_out, data_out_dma); kfree(mpi_request); @@ -1117,20 +1207,22 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, /** * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content */ static long _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) { struct mpt3_ioctl_iocinfo karg; + u8 revision; - dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", ioc->name, + __func__)); memset(&karg, 0 , sizeof(karg)); if (ioc->pfacts) karg.port_number = ioc->pfacts[0].PortNumber; - karg.hw_rev = ioc->pdev->revision; + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); + karg.hw_rev = revision; karg.pci_id = ioc->pdev->device; karg.subsystem_device = ioc->pdev->subsystem_device; karg.subsystem_vendor = ioc->pdev->subsystem_vendor; @@ -1150,18 +1242,19 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); break; case MPI25_VERSION: + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; + strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); + break; case MPI26_VERSION: + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; + strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); if (ioc->is_gen35_ioc) karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35; - else - karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; - strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); break; } karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); - if (copy_to_user(arg, &karg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } @@ -1171,7 +1264,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) /** * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content */ static long _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) @@ -1179,20 +1272,20 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) struct mpt3_ioctl_eventquery karg; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", ioc->name, + __func__)); karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; memcpy(karg.event_types, ioc->event_type, MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); if (copy_to_user(arg, &karg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } @@ -1202,7 +1295,7 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) /** * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content */ static long _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) @@ -1210,13 +1303,13 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) struct mpt3_ioctl_eventenable karg; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", ioc->name, + __func__)); memcpy(ioc->event_type, karg.event_types, MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); @@ -1230,7 +1323,7 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); if (!ioc->event_log) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -ENOMEM; } @@ -1240,7 +1333,7 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) /** * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content */ static long _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) @@ -1250,13 +1343,13 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) struct mpt3_ioctl_eventreport __user *uarg = arg; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", ioc->name, + __func__)); number_bytes = karg.hdr.max_data_size - sizeof(struct mpt3_ioctl_header); @@ -1271,7 +1364,7 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } @@ -1284,7 +1377,7 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) /** * _ctl_do_reset - main handler for MPT3HARDRESET opcode * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content */ static long _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) @@ -1293,20 +1386,24 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) int retval; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - if (ioc->shost_recovery || ioc->pci_error_recovery || - ioc->is_driver_loading) + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) return -EAGAIN; - dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", ioc->name, + __func__)); + scsi_block_requests(ioc->shost); retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); - ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + printk(MPT3SAS_INFO_FMT "host reset: %s\n", + ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); return 0; } @@ -1329,13 +1426,13 @@ _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && - btdh->handle == sas_device->handle) { + btdh->handle == sas_device->handle) { btdh->bus = sas_device->channel; btdh->id = sas_device->id; rc = 1; goto out; } else if (btdh->bus == sas_device->channel && btdh->id == - sas_device->id && btdh->handle == 0xFFFF) { + sas_device->id && btdh->handle == 0xFFFF) { btdh->handle = sas_device->handle; rc = 1; goto out; @@ -1382,6 +1479,7 @@ _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc, return rc; } + /** * _ctl_btdh_search_raid_device - searching for raid device * @ioc: per adapter object @@ -1401,13 +1499,13 @@ _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->raid_device_lock, flags); list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && - btdh->handle == raid_device->handle) { + btdh->handle == raid_device->handle) { btdh->bus = raid_device->channel; btdh->id = raid_device->id; rc = 1; goto out; } else if (btdh->bus == raid_device->channel && btdh->id == - raid_device->id && btdh->handle == 0xFFFF) { + raid_device->id && btdh->handle == 0xFFFF) { btdh->handle = raid_device->handle; rc = 1; goto out; @@ -1421,7 +1519,7 @@ _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, /** * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content */ static long _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) @@ -1430,13 +1528,13 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) int rc; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); rc = _ctl_btdh_search_sas_device(ioc, &karg); if (!rc) @@ -1445,7 +1543,7 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) _ctl_btdh_search_raid_device(ioc, &karg); if (copy_to_user(arg, &karg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } @@ -1484,6 +1582,62 @@ _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) return rc; } +/** + * _ctl_diag_get_bufftype - return diag buffer type + * either TRACE, SNAPSHOT, or EXTENDED + * @ioc: per adapter object + * @unique_id: specifies the unique_id for the buffer + * + * returns MPT3_DIAG_UID_NOT_FOUND if the id not found + */ +static u8 +_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id) +{ + u8 index; + + for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) + { + if (ioc->unique_id[index] == unique_id) + return index; + } + + return MPT3_DIAG_UID_NOT_FOUND; +} + +/** + * mpt3sas_ctl_diag_done - ctl diag_buffer completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using ioc->ctl_diag_cb_idx. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_diag_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->ctl_diag_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->ctl_diag_cmds.smid != smid) + return 1; + ioc->ctl_diag_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->ctl_diag_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->ctl_diag_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->ctl_diag_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->ctl_diag_cmds.done); + return 1; +} /** * _ctl_diag_register_2 - wrapper for registering diag buffer support @@ -1507,60 +1661,131 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state; u8 issue_reset = 0; - dctlprintk(ioc, ioc_info(ioc, "%s\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { - ioc_err(ioc, "%s: failed due to ioc not operational\n", - __func__); + printk(MPT3SAS_ERR_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } - if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + if (ioc->ctl_diag_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } buffer_type = diag_register->buffer_type; if (!_ctl_diag_capability(ioc, buffer_type)) { - ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have capability for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } + if( diag_register->unique_id == 0 ) { + printk(MPT3SAS_ERR_FMT "%s: Invalid UID(0x%08x), " + "buffer_type(0x%02x)\n", ioc->name, __func__, + diag_register->unique_id, buffer_type); + return -EINVAL; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_APP_OWNED) && + !(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) is already " + "registered by application with UID(0x%08x)\n", ioc->name, + __func__, buffer_type, ioc->unique_id[buffer_type]); + return -EINVAL; + } + if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_REGISTERED) { - ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n", - __func__, buffer_type); - return -EINVAL; + /* + * If driver posts buffer initially, then an application wants to Register + * that buffer (own it) without Releasing first, the application Register + * command MUST have the same buffer type and size in the Register + * command (obtained from the Query command). + * Otherwise that Register command will be failed. + * If the application has released the buffer but wants to re-register it, + * it should be allowed as long as the Unique-Id/Size match. + */ + + if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID && + ioc->diag_buffer_sz[buffer_type] == diag_register->requested_buffer_size) { + + if (!(ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)) { + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: diag_buffer (%d) ownership changed. " + "old-ID(0x%08x), new-ID(0x%08x)\n", ioc->name, __func__, buffer_type, + ioc->unique_id[buffer_type],diag_register->unique_id)); + + /* application wants to own the buffer with the same size */ + ioc->unique_id[buffer_type] = diag_register->unique_id; + rc = 0; /* success */ + goto out; + } + } + else if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID) { + if (ioc->unique_id[buffer_type] != diag_register->unique_id || + ioc->diag_buffer_sz[buffer_type] != diag_register->requested_buffer_size || + !(ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)) { + printk(MPT3SAS_ERR_FMT "%s: already has a registered " + "buffer for buffer_type(0x%02x)\n", ioc->name, __func__, + buffer_type); + return -EINVAL; + } + } + else { + printk(MPT3SAS_ERR_FMT "%s: already has a registered " + "buffer for buffer_type(0x%02x)\n", ioc->name, __func__, + buffer_type); + return -EINVAL; + } + } else if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { + + if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID || + ioc->diag_buffer_sz[buffer_type] != + diag_register->requested_buffer_size) { + + printk(MPT3SAS_ERR_FMT "%s: already a buffer is " + "allocated for buffer_type(0x%02x) of " + "size %d bytes, so please try registering again" + "with same size\n", ioc->name, __func__, + buffer_type, ioc->diag_buffer_sz[buffer_type]); + return -EINVAL; + } } if (diag_register->requested_buffer_size % 4) { - ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: the requested_buffer_size " + "is not 4 byte aligned\n", ioc->name, __func__); return -EINVAL; } - smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_diag_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } rc = 0; - ioc->ctl_cmds.status = MPT3_CMD_PENDING; - memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + ioc->ctl_diag_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_diag_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); - ioc->ctl_cmds.smid = smid; + ioc->ctl_diag_cmds.smid = smid; request_data = ioc->diag_buffer[buffer_type]; request_data_sz = diag_register->requested_buffer_size; ioc->unique_id[buffer_type] = diag_register->unique_id; - ioc->diag_buffer_status[buffer_type] = 0; + ioc->diag_buffer_status[buffer_type] &= MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; memcpy(ioc->product_specific[buffer_type], diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; @@ -1568,9 +1793,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, if (request_data) { request_data_dma = ioc->diag_buffer_dma[buffer_type]; if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { - dma_free_coherent(&ioc->pdev->dev, - ioc->diag_buffer_sz[buffer_type], - request_data, request_data_dma); + pci_free_consistent(ioc->pdev, + ioc->diag_buffer_sz[buffer_type], + request_data, request_data_dma); request_data = NULL; } } @@ -1578,13 +1803,15 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, if (request_data == NULL) { ioc->diag_buffer_sz[buffer_type] = 0; ioc->diag_buffer_dma[buffer_type] = 0; - request_data = dma_alloc_coherent(&ioc->pdev->dev, - request_data_sz, &request_data_dma, GFP_KERNEL); + request_data = pci_alloc_consistent( + ioc->pdev, request_data_sz, &request_data_dma); if (request_data == NULL) { - ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n", - __func__, request_data_sz); + printk(MPT3SAS_ERR_FMT "%s: failed allocating memory" + " for diag buffers, requested size(%d)\n", + ioc->name, __func__, request_data_sz); mpt3sas_base_free_smid(ioc, smid); - return -ENOMEM; + rc = -ENOMEM; + goto out; } ioc->diag_buffer[buffer_type] = request_data; ioc->diag_buffer_sz[buffer_type] = request_data_sz; @@ -1599,47 +1826,48 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; - dctlprintk(ioc, - ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", - __func__, request_data, - (unsigned long long)request_data_dma, - le32_to_cpu(mpi_request->BufferLength))); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: diag_buffer(0x%p), " + "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data, + (unsigned long long)request_data_dma, + le32_to_cpu(mpi_request->BufferLength))); for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) mpi_request->ProductSpecific[i] = cpu_to_le32(ioc->product_specific[buffer_type][i]); - init_completion(&ioc->ctl_cmds.done); + init_completion(&ioc->ctl_diag_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->ctl_cmds.done, + wait_for_completion_timeout(&ioc->ctl_diag_cmds.done, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); - if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = + if (!(ioc->ctl_diag_cmds.status & MPT3_CMD_COMPLETE)) { + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, + ioc->ctl_diag_cmds.status, mpi_request, sizeof(Mpi2DiagBufferPostRequest_t)/4); goto issue_host_reset; } /* process the completed Reply Message Frame */ - if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { - ioc_err(ioc, "%s: no reply message\n", __func__); + if ((ioc->ctl_diag_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + printk(MPT3SAS_ERR_FMT "%s: no reply message\n", + ioc->name, __func__); rc = -EFAULT; goto out; } - mpi_reply = ioc->ctl_cmds.reply; + mpi_reply = ioc->ctl_diag_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT3_DIAG_BUFFER_IS_REGISTERED; - dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: success\n", + ioc->name, __func__)); } else { - ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", - __func__, - ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "%s: ioc_status(0x%04x) " + "log_info(0x%08x)\n", ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } @@ -1650,10 +1878,14 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, out: if (rc && request_data) - dma_free_coherent(&ioc->pdev->dev, request_data_sz, + { + pci_free_consistent(ioc->pdev, request_data_sz, request_data, request_data_dma); + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } - ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + ioc->ctl_diag_cmds.status = MPT3_CMD_NOT_USED; return rc; } @@ -1669,22 +1901,91 @@ void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) { struct mpt3_diag_register diag_register; + u32 ret_val; + u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10; + u32 min_trace_buff_size = 0; + u32 decr_trace_buff_size = 0; memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); if (bits_to_register & 1) { - ioc_info(ioc, "registering trace buffer support\n"); + + printk(MPT3SAS_INFO_FMT "registering trace buffer support\n", + ioc->name); ioc->diag_trigger_master.MasterData = - (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; - /* register for 2MB buffers */ - diag_register.requested_buffer_size = 2 * (1024 * 1024); - diag_register.unique_id = 0x7075900; - _ctl_diag_register_2(ioc, &diag_register); + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION)? + MPT2DIAGBUFFUNIQUEID:MPT3DIAGBUFFUNIQUEID; + + if (trace_buff_size != 0) + { + diag_register.requested_buffer_size = trace_buff_size; + min_trace_buff_size = ioc->manu_pg11.HostTraceBufferMinSizeKB<<10; + decr_trace_buff_size = ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10; + + if (min_trace_buff_size > trace_buff_size) + { + /* The buff size is not set correctly */ + printk(MPT3SAS_ERR_FMT + "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n", + ioc->name, min_trace_buff_size>>10, trace_buff_size>>10); + printk(MPT3SAS_ERR_FMT + "Using zero Min Trace Buff Size\n", ioc->name); + min_trace_buff_size = 0; + } + + if (decr_trace_buff_size == 0) + { + /* retry the min size if decrement is not available */ + decr_trace_buff_size = trace_buff_size - min_trace_buff_size; + } + } + else + { + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + } + + do + { + ret_val = _ctl_diag_register_2(ioc, &diag_register); + + if (ret_val == -ENOMEM && min_trace_buff_size && + (trace_buff_size - decr_trace_buff_size) >= min_trace_buff_size) + { + /* adjust the buffer size */ + trace_buff_size -= decr_trace_buff_size; + diag_register.requested_buffer_size = trace_buff_size; + } + else + { + break; + } + } while(true); + + if (ret_val == -ENOMEM) + { + printk(MPT3SAS_ERR_FMT + "Cannot allocate trace buffer memory. Last memory tried = %d KB\n", + ioc->name, diag_register.requested_buffer_size>>10); + } + else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] + & MPT3_DIAG_BUFFER_IS_REGISTERED) + { + printk(MPT3SAS_INFO_FMT "Trace buffer memory %d KB allocated\n", + ioc->name, diag_register.requested_buffer_size>>10); + + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } } if (bits_to_register & 2) { - ioc_info(ioc, "registering snapshot buffer support\n"); + printk(MPT3SAS_INFO_FMT "registering snapshot buffer support\n", + ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; /* register for 2MB buffers */ diag_register.requested_buffer_size = 2 * (1024 * 1024); @@ -1693,7 +1994,8 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) } if (bits_to_register & 4) { - ioc_info(ioc, "registering extended buffer support\n"); + printk(MPT3SAS_INFO_FMT "registering extended buffer support\n", + ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; /* register for 2MB buffers */ diag_register.requested_buffer_size = 2 * (1024 * 1024); @@ -1705,7 +2007,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) /** * _ctl_diag_register - application register with driver * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content * * This will allow the driver to setup any required buffers that will be * needed by firmware to communicate with the driver. @@ -1717,19 +2019,25 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) long rc; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } rc = _ctl_diag_register_2(ioc, &karg); + + if (!rc && (ioc->diag_buffer_status[karg.buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + ioc->diag_buffer_status[karg.buffer_type] |= + MPT3_DIAG_BUFFER_IS_APP_OWNED; + return rc; } /** * _ctl_diag_unregister - application unregister with driver * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content * * This will allow the driver to cleanup any memory allocated for diag * messages and to free up any resources. @@ -1744,60 +2052,79 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) u8 buffer_type; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + printk(MPT3SAS_ERR_FMT "%s: buffer with unique_id(0x%08x)" + "not found\n", ioc->name, __func__, karg.unique_id); + return -EINVAL; + } - buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { - ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have capability for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " + "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if ((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { - ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) has not been " + "released\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id != ioc->unique_id[buffer_type]) { - ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", - __func__, karg.unique_id); + printk(MPT3SAS_ERR_FMT "%s: unique_id(0x%08x) is not " + "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { - ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have memory allocated for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } - request_data_sz = ioc->diag_buffer_sz[buffer_type]; - request_data_dma = ioc->diag_buffer_dma[buffer_type]; - dma_free_coherent(&ioc->pdev->dev, request_data_sz, - request_data, request_data_dma); - ioc->diag_buffer[buffer_type] = NULL; - ioc->diag_buffer_status[buffer_type] = 0; + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) + { + ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_APP_OWNED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_REGISTERED; + } + else + { + request_data_sz = ioc->diag_buffer_sz[buffer_type]; + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + pci_free_consistent(ioc->pdev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] = 0; + } + return 0; } /** * _ctl_diag_query - query relevant info associated with diag buffers * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content * * The application will send only buffer_type and unique_id. Driver will * inspect unique_id first, if valid, fill in all the info. If unique_id is @@ -1812,52 +2139,62 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) u8 buffer_type; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); karg.application_flags = 0; buffer_type = karg.buffer_type; if (!_ctl_diag_capability(ioc, buffer_type)) { - ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have capability for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } - if ((ioc->diag_buffer_status[buffer_type] & - MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", - __func__, buffer_type); - return -EINVAL; + if (!(ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) { + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " + "registered\n", ioc->name, __func__, buffer_type); + return -EINVAL; + } } - if (karg.unique_id & 0xffffff00) { + if (karg.unique_id) { if (karg.unique_id != ioc->unique_id[buffer_type]) { - ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", - __func__, karg.unique_id); + printk(MPT3SAS_ERR_FMT "%s: unique_id(0x%08x) is not " + "registered\n", ioc->name, __func__, + karg.unique_id); return -EINVAL; } } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { - ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have buffer for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } - if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) - karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | - MPT3_APP_FLAGS_BUFFER_VALID); - else - karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | - MPT3_APP_FLAGS_BUFFER_VALID | - MPT3_APP_FLAGS_FW_BUFFER_ACCESS); + karg.application_flags = 0; + + if ((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_REGISTERED)) + karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID; + + if (!((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) || + (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_DIAG_RESET))) + karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS; + + if (!(ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) + karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC; + + if ((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_APP_OWNED)) + karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED; for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) karg.product_specific[i] = @@ -1869,8 +2206,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { - ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n", - __func__, arg); + printk(MPT3SAS_ERR_FMT "%s: unable to write mpt3_diag_query " + "data @ %p\n", ioc->name, __func__, arg); return -EFAULT; } return 0; @@ -1879,8 +2216,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) /** * mpt3sas_send_diag_release - Diag Release Message * @ioc: per adapter object - * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED - * @issue_reset: specifies whether host reset is required. + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @issue_reset - specifies whether host reset is required. * */ int @@ -1894,8 +2231,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, u32 ioc_state; int rc; - dctlprintk(ioc, ioc_info(ioc, "%s\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); rc = 0; *issue_reset = 0; @@ -1906,83 +2243,88 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, MPT3_DIAG_BUFFER_IS_REGISTERED) ioc->diag_buffer_status[buffer_type] |= MPT3_DIAG_BUFFER_IS_RELEASED; - dctlprintk(ioc, - ioc_info(ioc, "%s: skipping due to FAULT state\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "skipping due to FAULT state\n", ioc->name, + __func__)); rc = -EAGAIN; goto out; } - if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + if (ioc->ctl_diag_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } - smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_diag_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } - ioc->ctl_cmds.status = MPT3_CMD_PENDING; - memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + ioc->ctl_diag_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_diag_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); - ioc->ctl_cmds.smid = smid; + ioc->ctl_diag_cmds.smid = smid; mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; mpi_request->BufferType = buffer_type; mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; - init_completion(&ioc->ctl_cmds.done); + init_completion(&ioc->ctl_diag_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->ctl_cmds.done, + wait_for_completion_timeout(&ioc->ctl_diag_cmds.done, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); - if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + if (!(ioc->ctl_diag_cmds.status & MPT3_CMD_COMPLETE)) { *issue_reset = mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, + ioc->ctl_diag_cmds.status, mpi_request, sizeof(Mpi2DiagReleaseRequest_t)/4); + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; rc = -EFAULT; goto out; } /* process the completed Reply Message Frame */ - if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { - ioc_err(ioc, "%s: no reply message\n", __func__); + if ((ioc->ctl_diag_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + printk(MPT3SAS_ERR_FMT "%s: no reply message\n", + ioc->name, __func__); rc = -EFAULT; goto out; } - mpi_reply = ioc->ctl_cmds.reply; + mpi_reply = ioc->ctl_diag_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT3_DIAG_BUFFER_IS_RELEASED; - dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: success\n", + ioc->name, __func__)); } else { - ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", - __func__, - ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "%s: ioc_status(0x%04x) " + "log_info(0x%08x)\n", ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } out: - ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + ioc->ctl_diag_cmds.status = MPT3_CMD_NOT_USED; return rc; } /** * _ctl_diag_release - request to send Diag Release Message to firmware - * @ioc: ? - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content * * This allows ownership of the specified buffer to returned to the driver, * allowing an application to read the buffer without fear that firmware is - * overwriting information in the buffer. + * overwritting information in the buffer. */ static long _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) @@ -1994,46 +2336,53 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) u8 issue_reset = 0; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + printk(MPT3SAS_ERR_FMT "%s: buffer with unique_id(0x%08x)" + "not found\n", ioc->name, __func__, karg.unique_id); + return -EINVAL; + } - buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { - ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have capability for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " + "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id != ioc->unique_id[buffer_type]) { - ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", - __func__, karg.unique_id); + printk(MPT3SAS_ERR_FMT "%s: unique_id(0x%08x) is not " + "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) { - ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n", - __func__, buffer_type); - return 0; + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) " + "is already released\n", ioc->name, __func__, + buffer_type); + return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { - ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have memory allocated for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } @@ -2044,8 +2393,9 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) MPT3_DIAG_BUFFER_IS_RELEASED; ioc->diag_buffer_status[buffer_type] &= ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; - ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) " + "was released due to host reset\n", ioc->name, __func__, + buffer_type); return 0; } @@ -2060,7 +2410,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) /** * _ctl_diag_read_buffer - request for copy of the diag buffer * @ioc: per adapter object - * @arg: user space buffer containing ioctl content + * @arg - user space buffer containing ioctl content */ static long _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) @@ -2078,39 +2428,53 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) u8 issue_reset = 0; if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } - dctlprintk(ioc, ioc_info(ioc, "%s\n", - __func__)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + printk(MPT3SAS_ERR_FMT "%s: buffer with unique_id(0x%08x)" + "not found\n", ioc->name, __func__, karg.unique_id); + return -EINVAL; + } - buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { - ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have capability for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + printk(MPT3SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " + "registered\n", ioc->name, __func__, buffer_type); + return -EINVAL; + } + if (karg.unique_id != ioc->unique_id[buffer_type]) { - ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", - __func__, karg.unique_id); + printk(MPT3SAS_ERR_FMT "%s: unique_id(0x%08x) is not " + "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { - ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", - __func__, buffer_type); + printk(MPT3SAS_ERR_FMT "%s: doesn't have buffer for " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } request_size = ioc->diag_buffer_sz[buffer_type]; if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { - ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: either the starting_offset " + "or bytes_to_read are not 4 byte aligned\n", ioc->name, + __func__); return -EINVAL; } @@ -2118,10 +2482,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) return -EINVAL; diag_data = (void *)(request_data + karg.starting_offset); - dctlprintk(ioc, - ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n", - __func__, diag_data, karg.starting_offset, - karg.bytes_to_read)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: diag_buffer(%p), " + "offset(%d), sz(%d)\n", ioc->name, __func__, + diag_data, karg.starting_offset, karg.bytes_to_read)); /* Truncate data on requests that are too large */ if ((diag_data + karg.bytes_to_read < diag_data) || @@ -2132,45 +2495,47 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) if (copy_to_user((void __user *)uarg->diagnostic_data, diag_data, copy_size)) { - ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", - __func__, diag_data); + printk(MPT3SAS_ERR_FMT "%s: Unable to write " + "mpt_diag_read_buffer_t data @ %p\n", ioc->name, + __func__, diag_data); return -EFAULT; } if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) return 0; - dctlprintk(ioc, - ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n", - __func__, buffer_type)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: Reregister " + "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type)); if ((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { - dctlprintk(ioc, - ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n", - __func__, buffer_type)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "buffer_type(0x%02x) is still registered\n", ioc->name, + __func__, buffer_type)); return 0; } /* Get a free request frame and save the message context. */ - if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + if (ioc->ctl_diag_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } - smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_diag_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } rc = 0; - ioc->ctl_cmds.status = MPT3_CMD_PENDING; - memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + ioc->ctl_diag_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_diag_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); - ioc->ctl_cmds.smid = smid; + ioc->ctl_diag_cmds.smid = smid; mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; mpi_request->BufferType = buffer_type; @@ -2184,37 +2549,41 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; - init_completion(&ioc->ctl_cmds.done); + init_completion(&ioc->ctl_diag_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->ctl_cmds.done, + wait_for_completion_timeout(&ioc->ctl_diag_cmds.done, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); - if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = + if (!(ioc->ctl_diag_cmds.status & MPT3_CMD_COMPLETE)) { + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, + ioc->ctl_diag_cmds.status, mpi_request, sizeof(Mpi2DiagBufferPostRequest_t)/4); goto issue_host_reset; } /* process the completed Reply Message Frame */ - if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { - ioc_err(ioc, "%s: no reply message\n", __func__); + if ((ioc->ctl_diag_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + printk(MPT3SAS_ERR_FMT "%s: no reply message\n", + ioc->name, __func__); rc = -EFAULT; goto out; } - mpi_reply = ioc->ctl_cmds.reply; + mpi_reply = ioc->ctl_diag_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT3_DIAG_BUFFER_IS_REGISTERED; - dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: success\n", + ioc->name, __func__)); } else { - ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", - __func__, ioc_status, - le32_to_cpu(mpi_reply->IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "%s: ioc_status(0x%04x) " + "log_info(0x%08x)\n", ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } @@ -2224,7 +2593,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) out: - ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + ioc->ctl_diag_cmds.status = MPT3_CMD_NOT_USED; return rc; } @@ -2234,8 +2603,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) /** * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. * @ioc: per adapter object - * @cmd: ioctl opcode - * @arg: (struct mpt3_ioctl_command32) + * @cmd - ioctl opcode + * @arg - (struct mpt3_ioctl_command32) * * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. */ @@ -2253,7 +2622,7 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, uarg = (struct mpt3_ioctl_command32 __user *) arg; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } @@ -2278,12 +2647,12 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, /** * _ctl_ioctl_main - main ioctl entry point - * @file: (struct file) - * @cmd: ioctl opcode - * @arg: user space data buffer - * @compat: handles 32 bit applications in 64bit os + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - user space data buffer + * @compat - handles 32 bit applications in 64bit os * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & - * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. + * MPI25_VERSION for mpt3ctl ioctl device. */ static long _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, @@ -2297,35 +2666,40 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, /* get IOCTL header */ if (copy_from_user(&ioctl_header, (char __user *)arg, sizeof(struct mpt3_ioctl_header))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(ioctl_header.ioc_number, - &ioc, mpi_version) == -1 || !ioc) + &ioc, mpi_version) == -1 || !ioc) return -ENODEV; - /* pci_access_mutex lock acquired by ioctl path */ mutex_lock(&ioc->pci_access_mutex); - - if (ioc->shost_recovery || ioc->pci_error_recovery || - ioc->is_driver_loading || ioc->remove_host) { + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) { ret = -EAGAIN; - goto out_unlock_pciaccess; + goto unlock_pci_access; } state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; if (state == NON_BLOCKING) { if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { ret = -EAGAIN; - goto out_unlock_pciaccess; + goto unlock_pci_access; } } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { ret = -ERESTARTSYS; - goto out_unlock_pciaccess; + goto unlock_pci_access; } +#if defined(CPQ_CIM) + if ((cmd > 0xCC770000) && (cmd < 0xCC77003D)) { + ret = _ctl_ioctl_csmi(ioc, cmd, arg); + goto out; + } +#endif switch (cmd) { case MPT3IOCINFO: @@ -2347,7 +2721,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, } #endif if (copy_from_user(&karg, arg, sizeof(karg))) { - pr_err("failure at %s:%d/%s()!\n", + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; break; @@ -2357,6 +2731,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, ret = -EINVAL; break; } + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { uarg = arg; ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); @@ -2403,61 +2778,62 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, ret = _ctl_diag_read_buffer(ioc, arg); break; default: - dctlprintk(ioc, - ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n", - cmd)); + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT + "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); break; } - +#if defined(CPQ_CIM) +out: +#endif mutex_unlock(&ioc->ctl_cmds.mutex); -out_unlock_pciaccess: +unlock_pci_access: mutex_unlock(&ioc->pci_access_mutex); return ret; } /** - * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) - * @file: (struct file) - * @cmd: ioctl opcode - * @arg: ? + * _ctl_ioctl - main ioctl entry point (unlocked) + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - */ static long _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; - /* pass MPI25_VERSION | MPI26_VERSION value, - * to indicate that this ioctl cmd + /* pass MPI25_VERSION value, to indicate that this ioctl cmd * came from mpt3ctl ioctl device. */ - ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI25_VERSION | MPI26_VERSION); return ret; } /** * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) - * @file: (struct file) - * @cmd: ioctl opcode - * @arg: ? + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - */ -static long -_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +long +_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - long ret; + long ret; - /* pass MPI2_VERSION value, to indicate that this ioctl cmd - * came from mpt2ctl ioctl device. - */ - ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); - return ret; + /* pass MPI2_VERSION value, to indicate that this ioctl cmd + * came from mpt2ctl ioctl device. + */ + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); + return ret; } + #ifdef CONFIG_COMPAT /** - *_ ctl_ioctl_compat - main ioctl entry point (compat) - * @file: ? - * @cmd: ? - * @arg: ? + * _ctl_ioctl_compat - main ioctl entry point (compat) + * @file - + * @cmd - + * @arg - * * This routine handles 32 bit applications in 64bit os. */ @@ -2473,15 +2849,15 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) /** *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) - * @file: ? - * @cmd: ? - * @arg: ? + * @file - + * @cmd - + * @arg - * * This routine handles 32 bit applications in 64bit os. */ -static long -_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) -{ +long +_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long +arg) { long ret; ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); @@ -2491,19 +2867,23 @@ _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) /* scsi host attributes */ /** - * version_fw_show - firmware version - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_version_fw_show - firmware version + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -version_fw_show(struct device *cdev, struct device_attribute *attr, +_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_version_fw_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, @@ -2511,22 +2891,30 @@ version_fw_show(struct device *cdev, struct device_attribute *attr, (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, ioc->facts.FWVersion.Word & 0x000000FF); } -static DEVICE_ATTR_RO(version_fw); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); +#else +static CLASS_DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); +#endif /** - * version_bios_show - bios version - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_version_bios_show - bios version + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -version_bios_show(struct device *cdev, struct device_attribute *attr, +_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_version_bios_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); @@ -2536,341 +2924,1110 @@ version_bios_show(struct device *cdev, struct device_attribute *attr, (version & 0x0000FF00) >> 8, version & 0x000000FF); } -static DEVICE_ATTR_RO(version_bios); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); +#else +static CLASS_DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); +#endif /** - * version_mpi_show - MPI (message passing interface) version - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_version_mpi_show - MPI (message passing interface) version + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -version_mpi_show(struct device *cdev, struct device_attribute *attr, +_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_version_mpi_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); } -static DEVICE_ATTR_RO(version_mpi); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); +#else +static CLASS_DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); +#endif /** - * version_product_show - product name - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_version_product_show - product name + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -version_product_show(struct device *cdev, struct device_attribute *attr, +_ctl_version_product_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_version_product_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); } -static DEVICE_ATTR_RO(version_product); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); +#else +static CLASS_DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, + NULL); +#endif /** - * version_nvdata_persistent_show - ndvata persistent version - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_version_nvdata_persistent_show - ndvata persistent version + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -version_nvdata_persistent_show(struct device *cdev, +_ctl_version_nvdata_persistent_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_version_nvdata_persistent_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); } -static DEVICE_ATTR_RO(version_nvdata_persistent); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, + _ctl_version_nvdata_persistent_show, NULL); +#else +static CLASS_DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, + _ctl_version_nvdata_persistent_show, NULL); +#endif /** - * version_nvdata_default_show - nvdata default version - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_version_nvdata_default_show - nvdata default version + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -version_nvdata_default_show(struct device *cdev, struct device_attribute +_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_version_nvdata_default_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); } -static DEVICE_ATTR_RO(version_nvdata_default); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(version_nvdata_default, S_IRUGO, + _ctl_version_nvdata_default_show, NULL); +#else +static CLASS_DEVICE_ATTR(version_nvdata_default, S_IRUGO, + _ctl_version_nvdata_default_show, NULL); +#endif /** - * board_name_show - board name - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_board_name_show - board name + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -board_name_show(struct device *cdev, struct device_attribute *attr, +_ctl_board_name_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_board_name_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); } -static DEVICE_ATTR_RO(board_name); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); +#else +static CLASS_DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); +#endif /** - * board_assembly_show - board assembly name - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_board_assembly_show - board assembly name + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -board_assembly_show(struct device *cdev, struct device_attribute *attr, +_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_board_assembly_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); } -static DEVICE_ATTR_RO(board_assembly); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); +#else +static CLASS_DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, + NULL); +#endif /** - * board_tracer_show - board tracer number - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_board_tracer_show - board tracer number + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -board_tracer_show(struct device *cdev, struct device_attribute *attr, +_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_board_tracer_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); } -static DEVICE_ATTR_RO(board_tracer); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); +#else +static CLASS_DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); +#endif /** - * io_delay_show - io missing delay - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_io_delay_show - io missing delay + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is for firmware implemention for deboucing device * removal events. * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -io_delay_show(struct device *cdev, struct device_attribute *attr, +_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_io_delay_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); } -static DEVICE_ATTR_RO(io_delay); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); +#else +static CLASS_DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); +#endif /** - * device_delay_show - device missing delay - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_device_delay_show - device missing delay + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is for firmware implemention for deboucing device * removal events. * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -device_delay_show(struct device *cdev, struct device_attribute *attr, +_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_device_delay_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); } -static DEVICE_ATTR_RO(device_delay); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); +#else +static CLASS_DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); +#endif /** - * fw_queue_depth_show - global credits - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_fw_queue_depth_show - global credits + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is firmware queue depth limit * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, +_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_fw_queue_depth_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); } -static DEVICE_ATTR_RO(fw_queue_depth); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); +#else +static CLASS_DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, + NULL); +#endif /** - * sas_address_show - sas address - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_sas_address_show - sas address + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is the controller sas address * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -host_sas_address_show(struct device *cdev, struct device_attribute *attr, +_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, char *buf) - +#else +static ssize_t +_ctl_host_sas_address_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "0x%016llx\n", (unsigned long long)ioc->sas_hba.sas_address); } -static DEVICE_ATTR_RO(host_sas_address); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(host_sas_address, S_IRUGO, + _ctl_host_sas_address_show, NULL); +#else +static CLASS_DEVICE_ATTR(host_sas_address, S_IRUGO, + _ctl_host_sas_address_show, NULL); +#endif /** - * logging_level_show - logging level - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_logging_level_show - logging level + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -logging_level_show(struct device *cdev, struct device_attribute *attr, +_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_logging_level_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); } +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -logging_level_store(struct device *cdev, struct device_attribute *attr, +_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) +#else +static ssize_t +_ctl_logging_level_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); int val = 0; if (sscanf(buf, "%x", &val) != 1) return -EINVAL; ioc->logging_level = val; - ioc_info(ioc, "logging_level=%08xh\n", - ioc->logging_level); + printk(MPT3SAS_INFO_FMT "logging_level=%08xh\n", ioc->name, + ioc->logging_level); return strlen(buf); } -static DEVICE_ATTR_RW(logging_level); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, + _ctl_logging_level_store); +#else +static CLASS_DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, + _ctl_logging_level_show, _ctl_logging_level_store); +#endif /** - * fwfault_debug_show - show/store fwfault_debug - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_fwfault_debug_show - show/store fwfault_debug + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * mpt3sas_fwfault_debug is command line option * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -fwfault_debug_show(struct device *cdev, struct device_attribute *attr, +_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_fwfault_debug_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); } +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -fwfault_debug_store(struct device *cdev, struct device_attribute *attr, +_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) +#else +static ssize_t +_ctl_fwfault_debug_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); int val = 0; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; ioc->fwfault_debug = val; - ioc_info(ioc, "fwfault_debug=%d\n", - ioc->fwfault_debug); + printk(MPT3SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name, + ioc->fwfault_debug); return strlen(buf); } -static DEVICE_ATTR_RW(fwfault_debug); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, + _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); +#else +static CLASS_DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, + _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); +#endif /** - * ioc_reset_count_show - ioc reset count - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_raid_device_find_by_handle - raid device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on handle, then return raid_device + * object. + */ +static struct _raid_device * +_ctl_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * mpt3sas_ctl_tm_done - ctl task management request callback + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when using ioc->ctl_tm_cb_idx + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u8 rc; + unsigned long flags; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _raid_device *raid_device; + u16 smid_task_abort; + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + rc = 1; + if (unlikely(!mpi_reply)) { + printk(MPT3SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + handle = le16_to_cpu(mpi_reply->DevHandle); + + /* search for sas device */ + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + smid_task_abort = 0; + if (mpi_reply->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + smid_task_abort = le16_to_cpu(mpi_request->TaskMID); + } + printk(KERN_INFO "\tcomplete: sas_addr(0x%016llx), " + "handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)sas_device->sas_address, handle, + (smid_task_abort ? smid_task_abort : smid), + le32_to_cpu(mpi_reply->TerminationCount)); + sas_device_put(sas_device); + } + + if (!sas_device) { + /* search for pcie device */ + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device) { + smid_task_abort = 0; + if (mpi_reply->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + smid_task_abort = le16_to_cpu(mpi_request->TaskMID); + } + printk(KERN_INFO "\tcomplete: wwid(0x%016llx), " + "handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)pcie_device->wwid, handle, + (smid_task_abort ? smid_task_abort : smid), + le32_to_cpu(mpi_reply->TerminationCount)); + pcie_device_put(pcie_device); + } + } + + /* search for IR volume */ + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _ctl_raid_device_find_by_handle(ioc, handle); + if (raid_device) + printk(KERN_INFO "\tcomplete: wwid(0x%016llx), " + "handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)raid_device->wwid, handle, + smid, le32_to_cpu(mpi_reply->TerminationCount)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + /* handle pending TM request */ + ioc->terminated_tm_count += le32_to_cpu(mpi_reply->TerminationCount); + if (ioc->out_of_frames) { + rc = 0; + mpt3sas_base_free_smid(ioc, smid); + ioc->out_of_frames = 0; + wake_up(&ioc->no_frames_tm_wq); + } + ioc->pending_tm_count--; + if (!ioc->pending_tm_count) + wake_up(&ioc->pending_tm_wq); + + return rc; +} + +/** + * mpt3sas_ctl_tm_sysfs - issue task management request + * @ioc: per adapter object + * @task_type - task management type ~ MPI2_SCSITASKMGMT_TASKTYPE_XXX + * + * This code was added to help debug firmware task management issues. + * It will send overlapping task mangement request using hi-priority + * request queue to every device/lun/task (depending on the task type). + * + * This will freeze all scsi host IO queue while sending the TM's. + * + * This will wait for all pending request to complete before returning. + * If you run out of free hi-priority message frames, we will wait for pending + * request to complete, then issue the next. You should never see the + * "out of hi-priority requests" message. + */ +static void +mpt3sas_ctl_tm_sysfs(struct MPT3SAS_ADAPTER *ioc, u8 task_type) +{ + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _raid_device *raid_device; + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid, handle, hpr_smid; + struct MPT3SAS_DEVICE *device_priv_data; + struct MPT3SAS_TARGET *target_priv_data; + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + unsigned long flags; + int tm_count; + int lun; + u32 doorbell; + struct scsiio_tracker *st; + u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE; + + if (list_empty(&ioc->sas_device_list) && list_empty(&ioc->pcie_device_list)) + return; + + /* turn off incoming commands to shost during task management */ + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery || ioc->remove_host) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + printk(MPT3SAS_ERR_FMT "%s: busy : host reset in progress, try" + " later\n", ioc->name, __func__); + return; + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + scsi_block_requests(ioc->shost); + + init_waitqueue_head(&ioc->pending_tm_wq); + ioc->ignore_loginfos = 1; + ioc->pending_tm_count = 0; + ioc->terminated_tm_count = 0; + ioc->out_of_frames = 0; + tm_count = 0; + + switch (task_type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + + for (smid = 1; + smid <= ioc->shost->can_queue; + smid++) { + /* wait for free hpr message frames */ + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = mpt3sas_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) + continue; + lun = scmd->device->lun; + device_priv_data = scmd->device->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + /* not supported by IR volumes & physical components */ + if (target_priv_data->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT || + target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) + continue; + handle = device_priv_data->sas_target->handle; + hpr_smid = mpt3sas_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + printk(MPT3SAS_ERR_FMT "%s: out of hi-priority" + " requests!!\n", ioc->name, __func__); + goto out_of_frames; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + mpi_request->TaskMID = + cpu_to_le16(st->smid); + int_to_scsilun(lun, + (struct scsi_lun *)mpi_request->LUN); + starget_printk(KERN_INFO, + device_priv_data->sas_target->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), " + "smid(%d)\n", (unsigned long long) + device_priv_data->sas_target->sas_address, handle, + st->smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + goto fault_in_progress; + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + + /* sas/sata devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + /* wait for free hpr message frames */ + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + } + if (!sas_device->starget) + continue; + /* skip IR physical components */ + if (test_bit(sas_device->handle, ioc->pd_handles)) + continue; + hpr_smid = mpt3sas_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + printk(MPT3SAS_ERR_FMT "%s: out of hi-priority" + " requests!!\n", ioc->name, __func__); + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto out_of_frames; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(sas_device->handle); + mpi_request->TaskType = + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, sas_device->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x)," + " smid(%d)\n", + (unsigned long long)sas_device->sas_address, + sas_device->handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + /* pcie devices */ + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + /* wait for free hpr message frames */ + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + } + if (!pcie_device->starget) + continue; + + hpr_smid = mpt3sas_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + printk(MPT3SAS_ERR_FMT "%s: out of hi-priority" + " requests!!\n", ioc->name, __func__); + spin_unlock_irqrestore(&ioc->pcie_device_lock, + flags); + goto out_of_frames; + } + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(pcie_device->handle); + mpi_request->TaskType = + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + starget_printk(KERN_INFO, pcie_device->starget, + "sending tm: wwid(0x%016llx), handle(0x%04x)," + " smid(%d)\n", + (unsigned long long)pcie_device->wwid, + pcie_device->handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + /* IR volumes */ + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + /* wait for free hpr message frames */ + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->raid_device_lock, + flags); + } + if (!raid_device->starget) + continue; + hpr_smid = mpt3sas_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + printk(MPT3SAS_ERR_FMT "%s: out of hi-priority" + " requests!!\n", ioc->name, __func__); + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto out_of_frames; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(raid_device->handle); + mpi_request->TaskType = + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, raid_device->starget, + "sending tm: wwid(0x%016llx), handle(0x%04x)," + " smid(%d)\n", + (unsigned long long)raid_device->wwid, + raid_device->handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + + shost_for_each_device(sdev, ioc->shost) { + /* wait for free hpr message frames */ + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + device_priv_data = sdev->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + /* skip IR physical components */ + if (target_priv_data->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; + /* ABRT_TASK_SET not supported by IR volumes */ + if ((target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) + && (task_type == + MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) + continue; + handle = device_priv_data->sas_target->handle; + + if (target_priv_data->flags & + MPT_TARGET_FLAGS_PCIE_DEVICE) { + pcie_device = + mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } + hpr_smid = mpt3sas_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + printk(MPT3SAS_ERR_FMT "%s: out of hi-priority" + " requests!!\n", ioc->name, __func__); + scsi_device_put(sdev); + goto out_of_frames; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = task_type; + mpi_request->MsgFlags = tr_method; + int_to_scsilun(sdev->lun, (struct scsi_lun *) + mpi_request->LUN); + sdev_printk(KERN_INFO, sdev, "sending tm: " + "sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)target_priv_data->sas_address, + handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + scsi_device_put(sdev); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + } + + out_of_frames: + + /* waiting up to 30 seconds for all the task management request to + * complete before returning + */ + if (ioc->pending_tm_count) + wait_event_timeout(ioc->pending_tm_wq, + !ioc->pending_tm_count, 30*HZ); + + printk(MPT3SAS_INFO_FMT "task management requests issued(%d)\n", + ioc->name, tm_count); + printk(MPT3SAS_INFO_FMT "number IO terminated(%d)\n", + ioc->name, ioc->terminated_tm_count); + + fault_in_progress: + + scsi_unblock_requests(ioc->shost); + + ioc->ignore_loginfos = 0; +} + +/** + * _ctl_task_management_store - issue task management request + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'write' shost attribute. + */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static ssize_t +_ctl_task_management_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +#else +static ssize_t +_ctl_task_management_store(struct class_device *cdev, const char *buf, + size_t count) +#endif +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + int opcode = 0; + + if (sscanf(buf, "%d", &opcode) != 1) + return -EINVAL; + + switch (opcode) { + + case 1: + scsi_block_requests(ioc->shost); + printk(MPT3SAS_INFO_FMT "diag reset: %s\n", ioc->name, + ((!mpt3sas_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER)) ? "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + + break; + + case 2: + scsi_block_requests(ioc->shost); + printk(MPT3SAS_INFO_FMT "message unit reset: %s\n", ioc->name, + ((!mpt3sas_base_hard_reset_handler(ioc, + SOFT_RESET)) ? "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + + break; + + case 3: + printk(MPT3SAS_INFO_FMT "TASKTYPE_ABORT_TASK:\n", ioc->name); + ioc->got_task_abort_from_sysfs = 1; + mpt3sas_ctl_tm_sysfs(ioc, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + ioc->got_task_abort_from_sysfs = 0; + break; + + case 4: + printk(MPT3SAS_INFO_FMT "TASKTYPE_TARGET_RESET:\n", ioc->name); + mpt3sas_ctl_tm_sysfs(ioc, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); + break; + + case 5: + printk(MPT3SAS_INFO_FMT "TASKTYPE_LOGICAL_UNIT_RESET:\n", + ioc->name); + mpt3sas_ctl_tm_sysfs(ioc, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); + break; + + case 6: + printk(MPT3SAS_INFO_FMT "TASKTYPE_ABRT_TASK_SET\n", ioc->name); + mpt3sas_ctl_tm_sysfs(ioc, + MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET); + break; + + default: + printk(MPT3SAS_INFO_FMT "unsupported opcode(%d)\n", ioc->name, + opcode); + break; + }; + + return strlen(buf); +} +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(task_management, S_IWUSR, NULL, _ctl_task_management_store); +#else +static CLASS_DEVICE_ATTR(task_management, S_IWUSR, NULL, + _ctl_task_management_store); +#endif + +#if defined(TARGET_MODE) && defined(STM_RING_BUFFER) +/* ring buffer support - for debugging target mode issues */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static ssize_t +_ctl_stm_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +#else +static ssize_t +_ctl_stm_store(struct class_device *cdev, const char *buf, size_t count) +#endif +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + struct MPT_STM_PRIV *priv = ioc->priv; + int opcode = 0; + + if (sscanf(buf, "%d", &opcode) != 1) + return -EINVAL; + + switch (opcode) { + case 1: + sysfs_dump_kernel_thread_state(priv); + break; + case 2: + sysfs_dump_ring_buffer(priv); + break; + default: + break; + } + + return strlen(buf); +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(stm, S_IWUSR, NULL, _ctl_stm_store); +#else +static CLASS_DEVICE_ATTR(stm, S_IWUSR, NULL, _ctl_stm_store); +#endif +#endif /* STM_RING_BUFFER */ + +/** + * _ctl_ioc_reset_count_show - ioc reset count + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is firmware queue depth limit * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, +_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_ioc_reset_count_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); } -static DEVICE_ATTR_RO(ioc_reset_count); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); +#else +static CLASS_DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, + NULL); +#endif /** - * reply_queue_count_show - number of reply queues - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_ioc_reply_queue_count_show - number of reply queues + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is number of reply queues * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -reply_queue_count_show(struct device *cdev, +_ctl_ioc_reply_queue_count_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_ioc_reply_queue_count_show(struct class_device *cdev, char *buf) +#endif { u8 reply_queue_count; struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); if ((ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) @@ -2880,82 +4037,13 @@ reply_queue_count_show(struct device *cdev, return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); } -static DEVICE_ATTR_RO(reply_queue_count); - -/** - * BRM_status_show - Backup Rail Monitor Status - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned - * - * This is number of reply queues - * - * A sysfs 'read-only' shost attribute. - */ -static ssize_t -BRM_status_show(struct device *cdev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); - Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; - Mpi2ConfigReply_t mpi_reply; - u16 backup_rail_monitor_status = 0; - u16 ioc_status; - int sz; - ssize_t rc = 0; - - if (!ioc->is_warpdrive) { - ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", - __func__); - goto out; - } - /* pci_access_mutex lock acquired by sysfs show path */ - mutex_lock(&ioc->pci_access_mutex); - if (ioc->pci_error_recovery || ioc->remove_host) { - mutex_unlock(&ioc->pci_access_mutex); - return 0; - } - - /* allocate upto GPIOVal 36 entries */ - sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); - io_unit_pg3 = kzalloc(sz, GFP_KERNEL); - if (!io_unit_pg3) { - ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n", - __func__, sz); - goto out; - } - - if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != - 0) { - ioc_err(ioc, "%s: failed reading iounit_pg3\n", - __func__); - goto out; - } - - ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", - __func__, ioc_status); - goto out; - } - - if (io_unit_pg3->GPIOCount < 25) { - ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n", - __func__, io_unit_pg3->GPIOCount); - goto out; - } - - /* BRM status is in bit zero of GPIOVal[24] */ - backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); - rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); - - out: - kfree(io_unit_pg3); - mutex_unlock(&ioc->pci_access_mutex); - return rc; -} -static DEVICE_ATTR_RO(BRM_status); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, + NULL); +#else +static CLASS_DEVICE_ATTR(reply_queue_count, S_IRUGO, + _ctl_ioc_reply_queue_count_show, NULL); +#endif struct DIAG_BUFFER_START { __le32 Size; @@ -2968,32 +4056,36 @@ struct DIAG_BUFFER_START { }; /** - * host_trace_buffer_size_show - host buffer size (trace only) - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_host_trace_buffer_size_show - host buffer size (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -host_trace_buffer_size_show(struct device *cdev, +_ctl_host_trace_buffer_size_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_host_trace_buffer_size_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); u32 size = 0; struct DIAG_BUFFER_START *request_data; if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { - ioc_err(ioc, "%s: host_trace_buffer is not registered\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: host_trace_buffer is not " + "registered\n", ioc->name, __func__); return 0; } if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - ioc_err(ioc, "%s: host_trace_buffer is not registered\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: host_trace_buffer is not " + "registered\n", ioc->name, __func__); return 0; } @@ -3008,13 +4100,18 @@ host_trace_buffer_size_show(struct device *cdev, ioc->ring_buffer_sz = size; return snprintf(buf, PAGE_SIZE, "%d\n", size); } -static DEVICE_ATTR_RO(host_trace_buffer_size); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, + _ctl_host_trace_buffer_size_show, NULL); +#else +static CLASS_DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, + _ctl_host_trace_buffer_size_show, NULL); +#endif /** - * host_trace_buffer_show - firmware ring buffer (trace only) - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. * @@ -3022,25 +4119,30 @@ static DEVICE_ATTR_RO(host_trace_buffer_size); * In order to read beyond 4k bytes, you will have to write out the * offset to the same attribute, it will move the pointer. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, +_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_host_trace_buffer_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); void *request_data; u32 size; if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { - ioc_err(ioc, "%s: host_trace_buffer is not registered\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: host_trace_buffer is not " + "registered\n", ioc->name, __func__); return 0; } if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - ioc_err(ioc, "%s: host_trace_buffer is not registered\n", - __func__); + printk(MPT3SAS_ERR_FMT "%s: host_trace_buffer is not " + "registered\n", ioc->name, __func__); return 0; } @@ -3054,12 +4156,18 @@ host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, return size; } +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, +_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) +#else +static ssize_t +_ctl_host_trace_buffer_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); int val = 0; if (sscanf(buf, "%d", &val) != 1) @@ -3068,27 +4176,36 @@ host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, ioc->ring_buffer_offset = val; return strlen(buf); } -static DEVICE_ATTR_RW(host_trace_buffer); - +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); +#else +static CLASS_DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); +#endif /*****************************************/ /** - * host_trace_buffer_enable_show - firmware ring buffer (trace only) - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. * * This is a mechnism to post/release host_trace_buffers */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -host_trace_buffer_enable_show(struct device *cdev, +_ctl_host_trace_buffer_enable_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_host_trace_buffer_enable_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & @@ -3101,19 +4218,25 @@ host_trace_buffer_enable_show(struct device *cdev, return snprintf(buf, PAGE_SIZE, "post\n"); } +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -host_trace_buffer_enable_store(struct device *cdev, +_ctl_host_trace_buffer_enable_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) +#else +static ssize_t +_ctl_host_trace_buffer_enable_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); char str[10] = ""; struct mpt3_diag_register diag_register; u8 issue_reset = 0; /* don't allow post/release occurr while recovery is active */ - if (ioc->shost_recovery || ioc->remove_host || - ioc->pci_error_recovery || ioc->is_driver_loading) + if (ioc->shost_recovery || ioc->remove_host + || ioc->pci_error_recovery || ioc->is_driver_loading) return -EBUSY; if (sscanf(buf, "%9s", str) != 1) @@ -3128,12 +4251,49 @@ host_trace_buffer_enable_store(struct device *cdev, MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) goto out; memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); - ioc_info(ioc, "posting host trace buffers\n"); + printk(MPT3SAS_INFO_FMT "posting host trace buffers\n", + ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; - diag_register.requested_buffer_size = (1024 * 1024); - diag_register.unique_id = 0x7075900; + if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 && + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) + { + /* post the same buffer allocated previously */ + diag_register.requested_buffer_size = + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE]; + } + else + { + /* + * Free the diag buffer memory which was previously + * allocated by an application. + */ + if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_APP_OWNED)) { + pci_free_consistent(ioc->pdev, + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]); + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] = NULL; + } + + diag_register.requested_buffer_size = (1024 * 1024); + } + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION)? + MPT2DIAGBUFFUNIQUEID:MPT3DIAGBUFFUNIQUEID; ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; _ctl_diag_register_2(ioc, &diag_register); + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] + & MPT3_DIAG_BUFFER_IS_REGISTERED) + { + printk(MPT3SAS_INFO_FMT "Trace buffer %d KB allocated through sysfs\n", + ioc->name, diag_register.requested_buffer_size>>10); + + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } } else if (!strcmp(str, "release")) { /* exit out if host buffers are already released */ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) @@ -3144,7 +4304,8 @@ host_trace_buffer_enable_store(struct device *cdev, if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_RELEASED)) goto out; - ioc_info(ioc, "releasing host trace buffer\n"); + printk(MPT3SAS_INFO_FMT "releasing host trace buffer\n", + ioc->name); mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset); } @@ -3152,25 +4313,36 @@ host_trace_buffer_enable_store(struct device *cdev, out: return strlen(buf); } -static DEVICE_ATTR_RW(host_trace_buffer_enable); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_enable_show, + _ctl_host_trace_buffer_enable_store); +#else +static CLASS_DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_enable_show, + _ctl_host_trace_buffer_enable_store); +#endif /*********** diagnostic trigger suppport *********************************/ /** - * diag_trigger_master_show - show the diag_trigger_master attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_master_show(struct device *cdev, +_ctl_diag_trigger_master_show(struct device *cdev, struct device_attribute *attr, char *buf) - +#else +static ssize_t +_ctl_diag_trigger_master_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t rc; @@ -3182,21 +4354,24 @@ diag_trigger_master_show(struct device *cdev, } /** - * diag_trigger_master_store - store the diag_trigger_master attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned - * @count: ? + * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_master_store(struct device *cdev, +_ctl_diag_trigger_master_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) - +#else +static ssize_t +_ctl_diag_trigger_master_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t rc; @@ -3210,23 +4385,32 @@ diag_trigger_master_store(struct device *cdev, spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); return rc; } -static DEVICE_ATTR_RW(diag_trigger_master); - +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); +#else +static CLASS_DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); +#endif /** - * diag_trigger_event_show - show the diag_trigger_event attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_event_show(struct device *cdev, +_ctl_diag_trigger_event_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_diag_trigger_event_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t rc; @@ -3238,21 +4422,24 @@ diag_trigger_event_show(struct device *cdev, } /** - * diag_trigger_event_store - store the diag_trigger_event attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned - * @count: ? + * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_event_store(struct device *cdev, +_ctl_diag_trigger_event_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) - +#else +static ssize_t +_ctl_diag_trigger_event_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t sz; @@ -3266,23 +4453,32 @@ diag_trigger_event_store(struct device *cdev, spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); return sz; } -static DEVICE_ATTR_RW(diag_trigger_event); - +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); +#else +static CLASS_DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); +#endif /** - * diag_trigger_scsi_show - show the diag_trigger_scsi attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_scsi_show(struct device *cdev, +_ctl_diag_trigger_scsi_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_diag_trigger_scsi_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t rc; @@ -3294,20 +4490,24 @@ diag_trigger_scsi_show(struct device *cdev, } /** - * diag_trigger_scsi_store - store the diag_trigger_scsi attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned - * @count: ? + * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_scsi_store(struct device *cdev, +_ctl_diag_trigger_scsi_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) +#else +static ssize_t +_ctl_diag_trigger_scsi_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t sz; @@ -3320,23 +4520,33 @@ diag_trigger_scsi_store(struct device *cdev, spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); return sz; } -static DEVICE_ATTR_RW(diag_trigger_scsi); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); +#else +static CLASS_DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); +#endif /** - * diag_trigger_scsi_show - show the diag_trigger_mpi attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_mpi_show(struct device *cdev, +_ctl_diag_trigger_mpi_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_diag_trigger_mpi_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t rc; @@ -3348,159 +4558,77 @@ diag_trigger_mpi_show(struct device *cdev, } /** - * diag_trigger_mpi_store - store the diag_trigger_mpi attribute - * @cdev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned - * @count: ? + * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -diag_trigger_mpi_store(struct device *cdev, +_ctl_diag_trigger_mpi_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) +#else +static ssize_t +_ctl_diag_trigger_mpi_store(struct class_device *cdev, const char *buf, + size_t count) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); unsigned long flags; ssize_t sz; spin_lock_irqsave(&ioc->diag_trigger_lock, flags); sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); memset(&ioc->diag_trigger_mpi, 0, - sizeof(ioc->diag_trigger_mpi)); + sizeof(struct SL_WH_EVENT_TRIGGERS_T)); memcpy(&ioc->diag_trigger_mpi, buf, sz); if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); return sz; } - -static DEVICE_ATTR_RW(diag_trigger_mpi); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); +#else +static CLASS_DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); +#endif /*********** diagnostic trigger suppport *** END ****************************/ -/*****************************************/ - /** - * drv_support_bitmap_show - driver supported feature bitmap + * _ctl_drv_support_bitmap_show - driver supported feature bitmap * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) static ssize_t -drv_support_bitmap_show(struct device *cdev, +_ctl_drv_support_bitmap_show(struct device *cdev, struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_drv_support_bitmap_show(struct class_device *cdev, char *buf) +#endif { struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); - + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); } -static DEVICE_ATTR_RO(drv_support_bitmap); - -/** - * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled - * @cdev - pointer to embedded class device - * @buf - the buffer returned - * - * A sysfs read/write shost attribute. This attribute is used to set the - * targets queue depth to HBA IO queue depth if this attribute is enabled. - */ -static ssize_t -enable_sdev_max_qd_show(struct device *cdev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); - - return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); -} - -/** - * enable_sdev_max_qd_store - Enable/disable sdev max qd - * @cdev - pointer to embedded class device - * @buf - the buffer returned - * - * A sysfs read/write shost attribute. This attribute is used to set the - * targets queue depth to HBA IO queue depth if this attribute is enabled. - * If this attribute is disabled then targets will have corresponding default - * queue depth. - */ -static ssize_t -enable_sdev_max_qd_store(struct device *cdev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(cdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); - struct MPT3SAS_DEVICE *sas_device_priv_data; - struct MPT3SAS_TARGET *sas_target_priv_data; - int val = 0; - struct scsi_device *sdev; - struct _raid_device *raid_device; - int qdepth; - - if (kstrtoint(buf, 0, &val) != 0) - return -EINVAL; - - switch (val) { - case 0: - ioc->enable_sdev_max_qd = 0; - shost_for_each_device(sdev, ioc->shost) { - sas_device_priv_data = sdev->hostdata; - if (!sas_device_priv_data) - continue; - sas_target_priv_data = sas_device_priv_data->sas_target; - if (!sas_target_priv_data) - continue; - - if (sas_target_priv_data->flags & - MPT_TARGET_FLAGS_VOLUME) { - raid_device = - mpt3sas_raid_device_find_by_handle(ioc, - sas_target_priv_data->handle); - - switch (raid_device->volume_type) { - case MPI2_RAID_VOL_TYPE_RAID0: - if (raid_device->device_info & - MPI2_SAS_DEVICE_INFO_SSP_TARGET) - qdepth = - MPT3SAS_SAS_QUEUE_DEPTH; - else - qdepth = - MPT3SAS_SATA_QUEUE_DEPTH; - break; - case MPI2_RAID_VOL_TYPE_RAID1E: - case MPI2_RAID_VOL_TYPE_RAID1: - case MPI2_RAID_VOL_TYPE_RAID10: - case MPI2_RAID_VOL_TYPE_UNKNOWN: - default: - qdepth = MPT3SAS_RAID_QUEUE_DEPTH; - } - } else if (sas_target_priv_data->flags & - MPT_TARGET_FLAGS_PCIE_DEVICE) - qdepth = MPT3SAS_NVME_QUEUE_DEPTH; - else - qdepth = MPT3SAS_SAS_QUEUE_DEPTH; - - mpt3sas_scsih_change_queue_depth(sdev, qdepth); - } - break; - case 1: - ioc->enable_sdev_max_qd = 1; - shost_for_each_device(sdev, ioc->shost) - mpt3sas_scsih_change_queue_depth(sdev, - shost->can_queue); - break; - default: - return -EINVAL; - } - - return strlen(buf); -} -static DEVICE_ATTR_RW(enable_sdev_max_qd); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(drv_support_bitmap, S_IRUGO, + _ctl_drv_support_bitmap_show, NULL); +#else +static CLASS_DEVICE_ATTR(drv_support_bitmap, S_IRUGO, + _ctl_drv_support_bitmap_show, NULL); +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) struct device_attribute *mpt3sas_host_attrs[] = { &dev_attr_version_fw, &dev_attr_version_bios, @@ -3517,35 +4645,80 @@ struct device_attribute *mpt3sas_host_attrs[] = { &dev_attr_fwfault_debug, &dev_attr_fw_queue_depth, &dev_attr_host_sas_address, + &dev_attr_task_management, &dev_attr_ioc_reset_count, &dev_attr_host_trace_buffer_size, &dev_attr_host_trace_buffer, &dev_attr_host_trace_buffer_enable, &dev_attr_reply_queue_count, +#if defined(TARGET_MODE) && defined(STM_RING_BUFFER) + &dev_attr_stm, +#endif &dev_attr_diag_trigger_master, &dev_attr_diag_trigger_event, &dev_attr_diag_trigger_scsi, &dev_attr_diag_trigger_mpi, - &dev_attr_drv_support_bitmap, &dev_attr_BRM_status, - &dev_attr_enable_sdev_max_qd, +#ifdef MPT2SAS_WD_DDIOCOUNT + &dev_attr_ddio_count, + &dev_attr_ddio_err_count, +#endif + &dev_attr_drv_support_bitmap, NULL, }; +#else +struct class_device_attribute *mpt3sas_host_attrs[] = { + &class_device_attr_version_fw, + &class_device_attr_version_bios, + &class_device_attr_version_mpi, + &class_device_attr_version_product, + &class_device_attr_version_nvdata_persistent, + &class_device_attr_version_nvdata_default, + &class_device_attr_board_name, + &class_device_attr_board_assembly, + &class_device_attr_board_tracer, + &class_device_attr_io_delay, + &class_device_attr_device_delay, + &class_device_attr_logging_level, + &class_device_attr_fwfault_debug, + &class_device_attr_fw_queue_depth, + &class_device_attr_host_sas_address, + &class_device_attr_task_management, + &class_device_attr_ioc_reset_count, + &class_device_attr_host_trace_buffer_size, + &class_device_attr_host_trace_buffer, + &class_device_attr_host_trace_buffer_enable, + &class_device_attr_reply_queue_count, +#if defined(TARGET_MODE) && defined(STM_RING_BUFFER) + &class_device_attr_stm, +#endif + &class_device_attr_diag_trigger_master, + &class_device_attr_diag_trigger_event, + &class_device_attr_diag_trigger_scsi, + &class_device_attr_diag_trigger_mpi, + &class_device_attr_BRM_status, +#ifdef MPT2SAS_WD_DDIOCOUNT + &class_device_attr_ddio_count, + &class_device_attr_ddio_err_count, +#endif + &class_device_attr_drv_support_bitmap, + NULL, +}; +#endif /* device attributes */ /** - * sas_address_show - sas address - * @dev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_device_sas_address_show - sas address + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is the sas address for the target * * A sysfs 'read-only' shost attribute. */ static ssize_t -sas_address_show(struct device *dev, struct device_attribute *attr, +_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); @@ -3554,20 +4727,19 @@ sas_address_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "0x%016llx\n", (unsigned long long)sas_device_priv_data->sas_target->sas_address); } -static DEVICE_ATTR_RO(sas_address); +static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); /** - * sas_device_handle_show - device handle - * @dev: pointer to embedded class device - * @attr: ? - * @buf: the buffer returned + * _ctl_device_handle_show - device handle + * @cdev - pointer to embedded class device + * @buf - the buffer returned * * This is the firmware assigned device handle * * A sysfs 'read-only' shost attribute. */ static ssize_t -sas_device_handle_show(struct device *dev, struct device_attribute *attr, +_ctl_device_handle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); @@ -3576,19 +4748,19 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "0x%04x\n", sas_device_priv_data->sas_target->handle); } -static DEVICE_ATTR_RO(sas_device_handle); +static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) /** - * sas_ncq_io_prio_show - send prioritized io commands to device - * @dev: pointer to embedded device - * @attr: ? - * @buf: the buffer returned + * _ctl_device_ncq_io_prio_show - send prioritized io commands to device + * @dev - pointer to embedded device + * @buf - the buffer returned * * A sysfs 'read/write' sdev attribute, only works with SATA */ static ssize_t -sas_ncq_prio_enable_show(struct device *dev, - struct device_attribute *attr, char *buf) +_ctl_device_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; @@ -3598,34 +4770,39 @@ sas_ncq_prio_enable_show(struct device *dev, } static ssize_t -sas_ncq_prio_enable_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +_ctl_device_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; - bool ncq_prio_enable = 0; + int ncq_prio_enable = 0; - if (kstrtobool(buf, &ncq_prio_enable)) + if (sscanf(buf, "%d", &ncq_prio_enable) != 1) return -EINVAL; - if (!scsih_ncq_prio_supp(sdev)) + if (!mpt3sas_scsih_ncq_prio_supp(sdev)) return -EINVAL; sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; return strlen(buf); } -static DEVICE_ATTR_RW(sas_ncq_prio_enable); +static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR, + _ctl_device_ncq_prio_enable_show, + _ctl_device_ncq_prio_enable_store); +#endif struct device_attribute *mpt3sas_dev_attrs[] = { &dev_attr_sas_address, &dev_attr_sas_device_handle, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) &dev_attr_sas_ncq_prio_enable, +#endif NULL, }; -/* file operations table for mpt3ctl device */ -static const struct file_operations ctl_fops = { +/* file operations table for mpt3ctl device */ static const struct +file_operations ctl_fops = { .owner = THIS_MODULE, .unlocked_ioctl = _ctl_ioctl, .poll = _ctl_poll, @@ -3635,15 +4812,15 @@ static const struct file_operations ctl_fops = { #endif }; -/* file operations table for mpt2ctl device */ -static const struct file_operations ctl_gen2_fops = { +/* file operations table for mpt2ctl device */ static const struct +file_operations ctl_gen2_fops = { .owner = THIS_MODULE, .unlocked_ioctl = _ctl_mpt2_ioctl, .poll = _ctl_poll, .fasync = _ctl_fasync, #ifdef CONFIG_COMPAT - .compat_ioctl = _ctl_mpt2_ioctl_compat, -#endif + .compat_ioctl = _ctl_mpt2_ioctl_compat, +#endif }; static struct miscdevice ctl_dev = { @@ -3652,6 +4829,7 @@ static struct miscdevice ctl_dev = { .fops = &ctl_fops, }; + static struct miscdevice gen2_ctl_dev = { .minor = MPT2SAS_MINOR, .name = MPT2SAS_DEV_NAME, @@ -3660,38 +4838,38 @@ static struct miscdevice gen2_ctl_dev = { /** * mpt3sas_ctl_init - main entry point for ctl. - * @hbas_to_enumerate: ? + * */ void -mpt3sas_ctl_init(ushort hbas_to_enumerate) +mpt3sas_ctl_init(int enumerate_hba) { async_queue = NULL; + + /* Don't register mpt3ctl ioctl device if + * hbas_to_enumarate is one. + */ + if (enumerate_hba != 1) + if (misc_register(&ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); - /* Don't register mpt3ctl ioctl device if - * hbas_to_enumarate is one. - */ - if (hbas_to_enumerate != 1) - if (misc_register(&ctl_dev) < 0) - pr_err("%s can't register misc device [minor=%d]\n", - MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); - - /* Don't register mpt3ctl ioctl device if - * hbas_to_enumarate is two. - */ - if (hbas_to_enumerate != 2) - if (misc_register(&gen2_ctl_dev) < 0) - pr_err("%s can't register misc device [minor=%d]\n", - MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); + /* Don't register mpt2ctl ioctl device if + * hbas_to_enumarate is two. + */ + if (enumerate_hba != 2) + if (misc_register(&gen2_ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); init_waitqueue_head(&ctl_poll_wait); } /** * mpt3sas_ctl_exit - exit point for ctl - * @hbas_to_enumerate: ? + * */ void -mpt3sas_ctl_exit(ushort hbas_to_enumerate) +mpt3sas_ctl_exit(int enumerate_hba) { struct MPT3SAS_ADAPTER *ioc; int i; @@ -3702,24 +4880,21 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate) for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!ioc->diag_buffer[i]) continue; - if (!(ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_REGISTERED)) - continue; - if ((ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_RELEASED)) - continue; - dma_free_coherent(&ioc->pdev->dev, - ioc->diag_buffer_sz[i], - ioc->diag_buffer[i], - ioc->diag_buffer_dma[i]); + pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], + ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); ioc->diag_buffer[i] = NULL; ioc->diag_buffer_status[i] = 0; } kfree(ioc->event_log); } - if (hbas_to_enumerate != 1) + if (enumerate_hba != 1) misc_deregister(&ctl_dev); - if (hbas_to_enumerate != 2) + if (enumerate_hba != 2) misc_deregister(&gen2_ctl_dev); + } + +#ifdef CPQ_CIM +#include "mpt3sas_csmi_ctl.c" +#endif diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h old mode 100644 new mode 100755 index 18b46faef6f1..603738dc7af8 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -3,9 +3,10 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -50,6 +51,15 @@ #include #endif +/** + * NOTE + * FWDOWNLOAD - PR is let me know if we need to implement this + * DIAGBUFFER - PR said hold off + */ + +/** + * HACK - changeme (MPT_MINOR = 220 ) + */ #ifndef MPT2SAS_MINOR #define MPT2SAS_MINOR (MPT_MINOR + 1) #endif @@ -95,6 +105,14 @@ #define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ struct mpt3_diag_read_buffer) +/* Trace Buffer default UniqueId */ +#define MPT2DIAGBUFFUNIQUEID (0x07075900) +#define MPT3DIAGBUFFUNIQUEID (0x4252434D) + +/* UID not found */ +#define MPT3_DIAG_UID_NOT_FOUND (0xFF) + + /** * struct mpt3_ioctl_header - main header structure * @ioc_number - IOC unit number @@ -136,15 +154,15 @@ struct mpt3_ioctl_pci_info { }; -#define MPT2_IOCTL_INTERFACE_SCSI (0x00) -#define MPT2_IOCTL_INTERFACE_FC (0x01) -#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) -#define MPT2_IOCTL_INTERFACE_SAS (0x03) -#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) +#define MPT2_IOCTL_INTERFACE_SCSI (0x00) +#define MPT2_IOCTL_INTERFACE_FC (0x01) +#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) +#define MPT2_IOCTL_INTERFACE_SAS (0x03) +#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) #define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05) -#define MPT3_IOCTL_INTERFACE_SAS3 (0x06) -#define MPT3_IOCTL_INTERFACE_SAS35 (0x07) -#define MPT2_IOCTL_VERSION_LENGTH (32) +#define MPT3_IOCTL_INTERFACE_SAS3 (0x06) +#define MPT3_IOCTL_INTERFACE_SAS35 (0x07) +#define MPT2_IOCTL_VERSION_LENGTH (32) /** * struct mpt3_ioctl_iocinfo - generic controller info @@ -310,6 +328,7 @@ struct mpt3_ioctl_btdh_mapping { #define MPT3_APP_FLAGS_APP_OWNED (0x0001) #define MPT3_APP_FLAGS_BUFFER_VALID (0x0002) #define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004) +#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008) /* flags for mpt3_diag_read_buffer */ #define MPT3_FLAGS_REREGISTER (0x0001) @@ -390,7 +409,7 @@ struct mpt3_diag_query { * * This allows ownership of the specified buffer to returned to the driver, * allowing an application to read the buffer without fear that firmware is - * overwriting information in the buffer. + * overwritting information in the buffer. */ struct mpt3_diag_release { struct mpt3_ioctl_header hdr; @@ -421,4 +440,6 @@ struct mpt3_diag_read_buffer { uint32_t diagnostic_data[1]; }; +/* Chunk size to use when doing a FW Download */ +#define FW_DL_CHUNK_SIZE 0x4000 #endif /* MPT3SAS_CTL_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h old mode 100644 new mode 100755 index cceeb2c16e64..967c76ff9c82 --- a/drivers/scsi/mpt3sas/mpt3sas_debug.h +++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h @@ -2,9 +2,10 @@ * Logging Support for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -54,20 +55,20 @@ #define MPT_DEBUG_EXIT 0x00000040 #define MPT_DEBUG_FAIL 0x00000080 #define MPT_DEBUG_TM 0x00000100 -#define MPT_DEBUG_REPLY 0x00000200 +#define MPT_DEBUG_REPLY 0x00000200 #define MPT_DEBUG_HANDSHAKE 0x00000400 #define MPT_DEBUG_CONFIG 0x00000800 #define MPT_DEBUG_DL 0x00001000 -#define MPT_DEBUG_RESET 0x00002000 +#define MPT_DEBUG_RESET 0x00002000 #define MPT_DEBUG_SCSI 0x00004000 -#define MPT_DEBUG_IOCTL 0x00008000 +#define MPT_DEBUG_IOCTL 0x00008000 +#define MPT_DEBUG_CSMISAS 0x00010000 #define MPT_DEBUG_SAS 0x00020000 #define MPT_DEBUG_TRANSPORT 0x00040000 -#define MPT_DEBUG_TASK_SET_FULL 0x00080000 +#define MPT_DEBUG_TASK_SET_FULL 0x00080000 #define MPT_DEBUG_TRIGGER_DIAG 0x00200000 - #define MPT_CHECK_LOGGING(IOC, CMD, BITS) \ { \ if (IOC->logging_level & BITS) \ @@ -123,6 +124,9 @@ #define dctlprintk(IOC, CMD) \ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL) +#define dcsmisasprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CSMISAS) + #define dsasprintk(IOC, CMD) \ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS) @@ -138,13 +142,15 @@ #define dtransportprintk(IOC, CMD) \ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT) +#define dTMprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TARGET_MODE) + #define dTriggerDiagPrintk(IOC, CMD) \ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG) - + /* inline functions for dumping debug data*/ - /** * _debug_dump_mf - print message frame contents * @mpi_request: pointer to message frame @@ -156,13 +162,13 @@ _debug_dump_mf(void *mpi_request, int sz) int i; __le32 *mfp = (__le32 *)mpi_request; - pr_info("mf:\n\t"); + printk(KERN_INFO "mf:\n\t"); for (i = 0; i < sz; i++) { if (i && ((i % 8) == 0)) - pr_info("\n\t"); - pr_info("%08x ", le32_to_cpu(mfp[i])); + printk("\n\t"); + printk("%08x ", le32_to_cpu(mfp[i])); } - pr_info("\n"); + printk("\n"); } /** * _debug_dump_reply - print message frame contents @@ -175,13 +181,13 @@ _debug_dump_reply(void *mpi_request, int sz) int i; __le32 *mfp = (__le32 *)mpi_request; - pr_info("reply:\n\t"); + printk(KERN_INFO "reply:\n\t"); for (i = 0; i < sz; i++) { if (i && ((i % 8) == 0)) - pr_info("\n\t"); - pr_info("%08x ", le32_to_cpu(mfp[i])); + printk("\n\t"); + printk("%08x ", le32_to_cpu(mfp[i])); } - pr_info("\n"); + printk("\n"); } /** * _debug_dump_config - print config page contents @@ -194,13 +200,13 @@ _debug_dump_config(void *mpi_request, int sz) int i; __le32 *mfp = (__le32 *)mpi_request; - pr_info("config:\n\t"); + printk(KERN_INFO "config:\n\t"); for (i = 0; i < sz; i++) { if (i && ((i % 8) == 0)) - pr_info("\n\t"); - pr_info("%08x ", le32_to_cpu(mfp[i])); + printk("\n\t"); + printk("%08x ", le32_to_cpu(mfp[i])); } - pr_info("\n"); + printk("\n"); } #endif /* MPT3SAS_DEBUG_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c old mode 100644 new mode 100755 index c8e512ba6d39..00b3096571a6 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2,9 +2,10 @@ * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -42,6 +43,7 @@ * USA. */ +#include #include #include #include @@ -52,9 +54,14 @@ #include #include #include -#include -#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) +#include +#endif #include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) +#include +#endif +#include #include "mpt3sas_base.h" @@ -65,18 +72,51 @@ /* forward proto's */ static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_expander); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) static void _firmware_event_work(struct work_struct *work); +static void _firmware_event_work_delayed(struct work_struct *work); +#else +static void _firmware_event_work(void *arg); +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) +static enum device_responsive_state +_scsih_read_capacity_16(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, + void *data, u32 data_length); +#endif + +static enum device_responsive_state +_scsih_inquiry_vpd_sn(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 **serial_number); +static enum device_responsive_state +_scsih_inquiry_vpd_supported_pages(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u32 lun, void *data, u32 data_length); +static enum device_responsive_state +_scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 *is_ssd_device, + u8 tr_timeout, u8 tr_method); +static enum device_responsive_state +_scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method); +static enum device_responsive_state +_scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method); static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device); static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count, u8 is_pd); -static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); +static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count); static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, struct _pcie_device *pcie_device); -static void -_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); + static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); +static void +_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle); + +void _scsih_log_entry_add_event(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataLogEntryAdded_t *log_entry); +static u16 +_scsih_determine_hba_mpi_version(struct pci_dev *pdev); /* global parameters */ LIST_HEAD(mpt3sas_ioc_list); @@ -93,6 +133,8 @@ MODULE_ALIAS("mpt2sas"); static u8 scsi_io_cb_idx = -1; static u8 tm_cb_idx = -1; static u8 ctl_cb_idx = -1; +static u8 ctl_tm_cb_idx = -1; +static u8 ctl_diag_cb_idx = -1; static u8 base_cb_idx = -1; static u8 port_enable_cb_idx = -1; static u8 transport_cb_idx = -1; @@ -103,36 +145,72 @@ static int mpt3_ids; static u8 tm_tr_cb_idx = -1 ; static u8 tm_tr_volume_cb_idx = -1 ; +static u8 tm_tr_internal_cb_idx =-1; static u8 tm_sas_control_cb_idx = -1; /* command line options */ static u32 logging_level; -MODULE_PARM_DESC(logging_level, - " bits for enabling additional logging info (default=0)"); +MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info " + "(default=0)"); +static u32 sdev_queue_depth = MPT3SAS_SAS_QUEUE_DEPTH; +MODULE_PARM_DESC(sdev_queue_depth, " globally setting SAS device queue depth "); static ushort max_sectors = 0xFFFF; module_param(max_sectors, ushort, 0444); MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); +static int command_retry_count = 144; +module_param(command_retry_count, int, 0444); +MODULE_PARM_DESC(command_retry_count, " Device discovery TUR command retry " + "count: (default=144)"); static int missing_delay[2] = {-1, -1}; module_param_array(missing_delay, int, NULL, 0444); MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); +static int host_lock_mode = 0; +module_param(host_lock_mode, int, 0444); +MODULE_PARM_DESC(host_lock_mode, "Enable SCSI host lock if set to 1" + "(default=0)"); + /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ #define MPT3SAS_MAX_LUN (16895) -static u64 max_lun = MPT3SAS_MAX_LUN; -module_param(max_lun, ullong, 0444); +static int max_lun = MPT3SAS_MAX_LUN; +module_param(max_lun, int, 0444); MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); -static ushort hbas_to_enumerate; -module_param(hbas_to_enumerate, ushort, 0444); +/* hbas_to_enumerate is set to -1 by default. + * This allows to enumerate all sas2, sas3 & above generation HBAs + * only on kernel version > 4.4. + * On Lower kernel versions < 4.4. this enumerates only sas3 driver. + * Module Param 0 will enumerate all sas2, sas3 & above generation HBAs + * on all kernels. + */ +static int hbas_to_enumerate = -1; +module_param(hbas_to_enumerate, int, 0444); MODULE_PARM_DESC(hbas_to_enumerate, - " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ - 1 - enumerates only SAS 2.0 generation HBAs\n \ - 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); + " 0 - enumerates all SAS 2.0, PCIe HBA, SAS3.0 & above generation HBAs\n \ + 1 - enumerates only PCIe HBA & SAS 2.0 generation HBAs\n \ + 2 - enumerates PCIe HBA, SAS 3.0 & above generation HBAs (default=-1," + " Enumerates all SAS 2.0, PCIe HBA, SAS 3.0 & above generation HBAs else" + " PCIe HBA, SAS 3.0 & above generation HBAs only)"); +static int multipath_on_hba = -1; +module_param(multipath_on_hba, int, 0444); +MODULE_PARM_DESC(multipath_on_hba, + " Multipath support to add same target device\n \ + as many times as it is visible to HBA from various paths\n \ + (by default: \n \ + SAS 2.0,SAS 3.0 HBA & SAS3.5 HBA-" + "This will be disabled)"); + +/* Enable or disable EEDP support */ +static int disable_eedp = 0; +module_param(disable_eedp, uint, 0444); +MODULE_PARM_DESC(disable_eedp, " disable EEDP support: (default=0)"); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) /* diag_buffer_enable is bitwise * bit 0 set = TRACE * bit 1 set = SNAPSHOT @@ -142,27 +220,81 @@ MODULE_PARM_DESC(hbas_to_enumerate, */ static int diag_buffer_enable = -1; module_param(diag_buffer_enable, int, 0444); -MODULE_PARM_DESC(diag_buffer_enable, - " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); +MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " + "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); static int disable_discovery = -1; module_param(disable_discovery, int, 0444); MODULE_PARM_DESC(disable_discovery, " disable discovery "); +#else +extern int disable_discovery; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +static unsigned int allow_drive_spindown = 1; +module_param(allow_drive_spindown, uint, 0444); +MODULE_PARM_DESC(allow_drive_spindown, " allow host driver to " + "issue START STOP UNIT(STOP) command to spindown the drive before shut down or driver unload, default=1, \n" + " \t\tDont spindown any SATA drives =0 / Spindown SSD but not HDD = 1/ Spindown HDD but not SSD =2/ Spindown all SATA drives =3"); +#endif -/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ -static int prot_mask = -1; +/** +permit overriding the host protection capabilities mask (EEDP/T10 PI). +Ref tag errors are observed on kernels < 3.18 on issuing IOs on 4K +drive. This is a kernel Bug. +Disabled DIX support by default. +*/ +int prot_mask = 0x07; module_param(prot_mask, int, 0444); -MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); +MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x07"); -static bool enable_sdev_max_qd; -module_param(enable_sdev_max_qd, bool, 0444); -MODULE_PARM_DESC(enable_sdev_max_qd, - "Enable sdev max qd as can_queue, def=disabled(0)"); +#ifndef SCSI_MPT2SAS +/* permit overriding the host protection algorithm mask (T10 CRC/ IP Checksum) */ +static int protection_guard_mask = 3; +module_param(protection_guard_mask, int, 0444); +MODULE_PARM_DESC(protection_guard_mask, " host protection algorithm mask, def=3 "); +#endif + +/* permit overriding the SCSI command issuing capability of + the driver to bring the drive to READY state*/ +static int issue_scsi_cmd_to_bringup_drive = 1; +module_param(issue_scsi_cmd_to_bringup_drive, int, 0444); +MODULE_PARM_DESC(issue_scsi_cmd_to_bringup_drive, " allow host driver to " + "issue SCSI commands to bring the drive to READY state, default=1 "); +static int sata_smart_polling = 0; +module_param(sata_smart_polling, uint, 0444); +MODULE_PARM_DESC(sata_smart_polling, " poll for smart errors on SATA drives: (default=0)"); +#if (defined(CONFIG_SUSE_KERNEL) && defined(scsi_is_sas_phy_local)) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) +#define MPT_WIDE_PORT_API 1 +#define MPT_WIDE_PORT_API_PLUS 1 +#endif /* raid transport support */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) static struct raid_template *mpt3sas_raid_template; static struct raid_template *mpt2sas_raid_template; +#endif +/** + * enum device_responsive_state - responsive state + * @DEVICE_READY: device is ready to be added + * @DEVICE_RETRY: device can be retried later + * @DEVICE_RETRY_UA: retry unit attentions + * @DEVICE_START_UNIT: requires start unit + * @DEVICE_STOP_UNIT: requires stop unit + * @DEVICE_ERROR: device reported some fatal error + * + * Look at _scsih_wait_for_target_to_become_ready() + * + */ +enum device_responsive_state { + DEVICE_READY, + DEVICE_RETRY, + DEVICE_RETRY_UA, + DEVICE_START_UNIT, + DEVICE_STOP_UNIT, + DEVICE_ERROR, +}; /** * struct sense_info - common structure for obtaining sense keys @@ -185,21 +317,27 @@ struct sense_info { * struct fw_event_work - firmware event struct * @list: link list framework * @work: work object (ioc->fault_reset_work_q) + * @cancel_pending_work: flag set during reset handling * @ioc: per adapter object * @device_handle: device handle * @VF_ID: virtual function id * @VP_ID: virtual port id * @ignore: flag meaning this event has been marked to ignore - * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h - * @refcount: kref for this event + * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h + * @refcount: reference count for fw_event_work * @event_data: reply event data payload follows + * @retries: number of times this event has been retried(for each device) * * This object stored on ioc->fw_event_list. */ struct fw_event_work { struct list_head list; struct work_struct work; - + u8 cancel_pending_work; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) + struct delayed_work delayed_work; + u8 delayed_work_active; +#endif struct MPT3SAS_ADAPTER *ioc; u16 device_handle; u8 VF_ID; @@ -207,24 +345,57 @@ struct fw_event_work { u8 ignore; u16 event; struct kref refcount; - char event_data[0] __aligned(4); + void *event_data; + u8 *retries; }; +/** + * fw_event_work_free - Free the firmware event work structure + * @r : kref object + * + * Free the firmware event work structure. This will be called + * corresponding firmware event refernce count reaches to zero. + */ static void fw_event_work_free(struct kref *r) { - kfree(container_of(r, struct fw_event_work, refcount)); + struct fw_event_work *fw_work; + fw_work = container_of(r, struct fw_event_work, refcount); + kfree(fw_work->event_data); + kfree(fw_work->retries); + kfree(fw_work); } +/** fw_event_work_get - Increment the firmware event work's + * reference count. + * @fw_work: firmware event work's object + * + * Increments the firmware event work's reference count. + */ static void fw_event_work_get(struct fw_event_work *fw_work) { kref_get(&fw_work->refcount); } +/** fw_event_work_put - Decrement the firmware event work's + * reference count. + * @fw_work: firmware event work's object + * + * Decrements the firmware event work's reference count. + * When reference count reaches to zero it will indirectly + * call fw_event_work_free() to free the firmware event work's + * object. + */ static void fw_event_work_put(struct fw_event_work *fw_work) { kref_put(&fw_work->refcount, fw_event_work_free); } +/** alloc_fw_event_work - allocate's memory for firmware event work + * @len: extra memory length + * + * allocate's memory for firmware event works and also initlialize + * firmware event work's reference count. + */ static struct fw_event_work *alloc_fw_event_work(int len) { struct fw_event_work *fw_event; @@ -237,6 +408,19 @@ static struct fw_event_work *alloc_fw_event_work(int len) return fw_event; } +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0))) +static inline unsigned int mpt3sas_scsi_prot_interval(struct scsi_cmnd *scmd) +{ + return scmd->device->sector_size; +} + +static inline u32 mpt3sas_scsi_prot_ref_tag(struct scsi_cmnd *scmd) +{ + return blk_rq_pos(scmd->request) >> + (ilog2(mpt3sas_scsi_prot_interval(scmd)) - 9) & 0xffffffff; +} +#endif + /** * struct _scsi_io_transfer - scsi io transfer * @handle: sas device handle (assigned by firmware) @@ -287,13 +471,15 @@ struct _scsi_io_transfer { /** * _scsih_set_debug_level - global setting of ioc->logging_level. - * @val: ? - * @kp: ? * * Note: The logging levels are defined in mpt3sas_debug.h. */ static int +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) _scsih_set_debug_level(const char *val, const struct kernel_param *kp) +#else +_scsih_set_debug_level(const char *val, struct kernel_param *kp) +#endif { int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc; @@ -301,7 +487,7 @@ _scsih_set_debug_level(const char *val, const struct kernel_param *kp) if (ret) return ret; - pr_info("setting logging_level(0x%08x)\n", logging_level); + printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level); spin_lock(&gioc_lock); list_for_each_entry(ioc, &mpt3sas_ioc_list, list) ioc->logging_level = logging_level; @@ -316,7 +502,7 @@ module_param_call(logging_level, _scsih_set_debug_level, param_get_int, * @sas_address: sas address * @boot_device: boot device object from bios page 2 * - * Return: 1 when there's a match, 0 means no match. + * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_sas_address(u64 sas_address, @@ -330,7 +516,7 @@ _scsih_srch_boot_sas_address(u64 sas_address, * @device_name: device name specified in INDENTIFY fram * @boot_device: boot device object from bios page 2 * - * Return: 1 when there's a match, 0 means no match. + * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_device_name(u64 device_name, @@ -345,7 +531,7 @@ _scsih_srch_boot_device_name(u64 device_name, * @slot_number: slot number * @boot_device: boot device object from bios page 2 * - * Return: 1 when there's a match, 0 means no match. + * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, @@ -356,16 +542,118 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, SlotNumber)) ? 1 : 0; } +static void +_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device, struct scsi_device *sdev, + struct scsi_target *starget) +{ + if(sdev) { + if(sas_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, "enclosure logical id" + "(0x%016llx), slot(%d) \n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if(sas_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, "enclosure level" + "(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } + else if(starget) { + if(sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, "enclosure" + "logical id(0x%016llx), slot(%d) \n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if(sas_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, "enclosure level" + "(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + starget_printk(KERN_INFO, starget, "chassis slot" + "(0x%04x)\n", + sas_device->chassis_slot); + } + else { + + if(sas_device->enclosure_handle != 0) + printk(MPT3SAS_INFO_FMT "enclosure logical id" + "(0x%016llx), slot(%d) \n",ioc->name, + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if(sas_device->connector_name[0] != '\0') + printk(MPT3SAS_INFO_FMT "enclosure level(0x%04x)," + "connector name( %s)\n",ioc->name, + sas_device->enclosure_level, + sas_device->connector_name); + if(sas_device->is_chassis_slot_valid) + printk(MPT3SAS_INFO_FMT "chassis slot(0x%04x)\n", + ioc->name, sas_device->chassis_slot); + } + +} + +/** + * mpt3sas_get_port_by_id - get port entry corresponding to provided + * port number from port list + * @ioc: per adapter object + * @port: port number + * + * Search for port entry corresponding to provided port number, + * if available return port address otherwise return NULL. + */ +struct hba_port * +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port_id) +{ + + struct hba_port *port, *port_next; + + if (!ioc->multipath_on_hba) + port_id = MULTIPATH_DISABLED_PORT_ID; + + list_for_each_entry_safe (port, port_next, + &ioc->port_table_list, list) { + if (port->port_id == port_id && + !(port->flags & HBA_PORT_FLAG_DIRTY_PORT)) + return port; + } + + if (unlikely(!ioc->multipath_on_hba)) { + port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + if (!port) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } + port->port_id = MULTIPATH_DISABLED_PORT_ID; + printk(MPT3SAS_INFO_FMT + "hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + + return port; + } + + return NULL; +} + /** * _scsih_is_boot_device - search for matching boot device. - * @sas_address: sas address + * @sas_address: sas address or WWID if PCIe device * @device_name: device name specified in INDENTIFY fram * @enclosure_logical_id: enclosure logical id - * @slot: slot number + * @slot_number: slot number * @form: specifies boot device form * @boot_device: boot device object from bios page 2 * - * Return: 1 when there's a match, 0 means no match. + * Returns 1 when there's a match, 0 means no match. */ static int _scsih_is_boot_device(u64 sas_address, u64 device_name, @@ -403,11 +691,10 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name, /** * _scsih_get_sas_address - set the sas_address for given device handle - * @ioc: ? * @handle: device handle * @sas_address: sas address * - * Return: 0 success, non-zero when failure + * Returns 0 success, non-zero when failure */ static int _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, @@ -421,32 +708,33 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); return -ENXIO; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { - /* For HBA, vSES doesn't return HBA SAS address. Instead return + /* For HBA vSES don't return hba sas address instead return * vSES's sas address. */ if ((handle <= ioc->sas_hba.num_phys) && - (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & - MPI2_SAS_DEVICE_INFO_SEP))) + (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP))) *sas_address = ioc->sas_hba.sas_address; else *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); return 0; } - /* we hit this because the given parent handle doesn't exist */ + /* we hit this becuase the given parent handle doesn't exist */ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return -ENXIO; /* else error case */ - ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", - handle, ioc_status, __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x), " + "failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); return -EIO; } @@ -454,7 +742,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, * _scsih_determine_boot_device - determine boot device. * @ioc: per adapter object * @device: sas_device or pcie_device object - * @channel: SAS or PCIe channel + * @channel: SAS(0), raid(1) or PCIe(2) channel * * Determines whether this device should be first reported device to * to scsi-ml or sas transport, this purpose is for persistent boot device. @@ -465,7 +753,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, */ static void _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, - u32 channel) + u32 channel) { struct _sas_device *sas_device; struct _pcie_device *pcie_device; @@ -509,9 +797,10 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, (ioc->bios_pg2.ReqBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.RequestedBootDevice)) { - dinitprintk(ioc, - ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", - __func__, (u64)sas_address)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s: req_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); ioc->req_boot_device.device = device; ioc->req_boot_device.channel = channel; } @@ -523,9 +812,10 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, (ioc->bios_pg2.ReqAltBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.RequestedAltBootDevice)) { - dinitprintk(ioc, - ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", - __func__, (u64)sas_address)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s: req_alt_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); ioc->req_alt_boot_device.device = device; ioc->req_alt_boot_device.channel = channel; } @@ -537,9 +827,10 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, (ioc->bios_pg2.CurrentBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.CurrentBootDevice)) { - dinitprintk(ioc, - ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", - __func__, (u64)sas_address)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s: current_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); ioc->current_boot_device.device = device; ioc->current_boot_device.channel = channel; } @@ -548,7 +839,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, static struct _sas_device * __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, - struct MPT3SAS_TARGET *tgt_priv) + struct MPT3SAS_TARGET *tgt_priv) { struct _sas_device *ret; @@ -561,9 +852,20 @@ __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, return ret; } -static struct _sas_device * +/** + * mpt3sas_get_sdev_from_target - sas device search + * @ioc: per adapter object + * @tgt_priv: starget private object + * + * Context: This function will acquire ioc->sas_device_lock and will release + * before returning the sas_device object. + * + * This searches for sas_device from target, then return sas_device + * object. + */ +struct _sas_device * mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, - struct MPT3SAS_TARGET *tgt_priv) + struct MPT3SAS_TARGET *tgt_priv) { struct _sas_device *ret; unsigned long flags; @@ -600,7 +902,7 @@ __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, * * This searches for pcie_device from target, then return pcie_device object. */ -static struct _pcie_device * +struct _pcie_device * mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, struct MPT3SAS_TARGET *tgt_priv) { @@ -614,20 +916,74 @@ mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, return ret; } +/** + * __mpt3sas_get_sdev_by_addr - sas device search + * @ioc: per adapter object + * @sas_address: sas address + * @port: hba port entry + * + * Context: This function will acquire ioc->sas_device_lock and will release + * before returning the sas_device object. + * + * This searches for sas_device from sas adress and port number + * then return sas_device object. + */ struct _sas_device * __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address) + u64 sas_address, struct hba_port *port) +{ + struct _sas_device *sas_device; + + if (!port) + return NULL; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + + return NULL; + +found_device: + sas_device_get(sas_device); + return sas_device; +} + +/** + * __mpt3sas_get_sdev_by_addr_on_rphy - sas device search + * @ioc: per adapter object + * @sas_address: sas address + * @rphy: sas_rphy pointer + * + * Context: This function will acquire ioc->sas_device_lock and will release + * before returning the sas_device object. + * + * This searches for sas_device from sas adress and rphy pointer + * then return sas_device object. + */ +struct _sas_device * +__mpt3sas_get_sdev_by_addr_and_rphy(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct sas_rphy *rphy) { struct _sas_device *sas_device; assert_spin_locked(&ioc->sas_device_lock); list_for_each_entry(sas_device, &ioc->sas_device_list, list) - if (sas_device->sas_address == sas_address) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) goto found_device; list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) - if (sas_device->sas_address == sas_address) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) goto found_device; return NULL; @@ -641,21 +997,27 @@ found_device: * mpt3sas_get_sdev_by_addr - sas device search * @ioc: per adapter object * @sas_address: sas address - * Context: Calling function should acquire ioc->sas_device_lock + * @port: hba port entry * - * This searches for sas_device based on sas_address, then return sas_device - * object. + * Context: This function will acquire ioc->sas_device_lock and will release + * before returning the sas_device object. + * + * This searches for sas_device based on sas_address & port number, + * then return sas_device object. */ struct _sas_device * mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address) + u64 sas_address, struct hba_port *port) { - struct _sas_device *sas_device; + struct _sas_device *sas_device = NULL; unsigned long flags; + if(!port) + return sas_device; + spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_by_addr(ioc, - sas_address); + sas_address, port); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return sas_device; @@ -687,7 +1049,9 @@ found_device: * mpt3sas_get_sdev_by_handle - sas device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) - * Context: Calling function should acquire ioc->sas_device_lock + * + * Context: This function will acquire ioc->sas_device_lock and will release + * before returning the sas_device object. * * This searches for sas_device based on sas_address, then return sas_device * object. @@ -705,64 +1069,6 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) return sas_device; } -/** - * _scsih_display_enclosure_chassis_info - display device location info - * @ioc: per adapter object - * @sas_device: per sas device object - * @sdev: scsi device struct - * @starget: scsi target struct - */ -static void -_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, - struct _sas_device *sas_device, struct scsi_device *sdev, - struct scsi_target *starget) -{ - if (sdev) { - if (sas_device->enclosure_handle != 0) - sdev_printk(KERN_INFO, sdev, - "enclosure logical id (0x%016llx), slot(%d) \n", - (unsigned long long) - sas_device->enclosure_logical_id, - sas_device->slot); - if (sas_device->connector_name[0] != '\0') - sdev_printk(KERN_INFO, sdev, - "enclosure level(0x%04x), connector name( %s)\n", - sas_device->enclosure_level, - sas_device->connector_name); - if (sas_device->is_chassis_slot_valid) - sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", - sas_device->chassis_slot); - } else if (starget) { - if (sas_device->enclosure_handle != 0) - starget_printk(KERN_INFO, starget, - "enclosure logical id(0x%016llx), slot(%d) \n", - (unsigned long long) - sas_device->enclosure_logical_id, - sas_device->slot); - if (sas_device->connector_name[0] != '\0') - starget_printk(KERN_INFO, starget, - "enclosure level(0x%04x), connector name( %s)\n", - sas_device->enclosure_level, - sas_device->connector_name); - if (sas_device->is_chassis_slot_valid) - starget_printk(KERN_INFO, starget, - "chassis slot(0x%04x)\n", - sas_device->chassis_slot); - } else { - if (sas_device->enclosure_handle != 0) - ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", - (u64)sas_device->enclosure_logical_id, - sas_device->slot); - if (sas_device->connector_name[0] != '\0') - ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", - sas_device->enclosure_level, - sas_device->connector_name); - if (sas_device->is_chassis_slot_valid) - ioc_info(ioc, "chassis slot(0x%04x)\n", - sas_device->chassis_slot); - } -} - /** * _scsih_sas_device_remove - remove sas_device from list. * @ioc: per adapter object @@ -771,16 +1077,18 @@ _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, * * If sas_device is on the list, remove it and decrement its reference count. */ -static void +void _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; + int was_on_sas_device_list = 0; if (!sas_device) return; - ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", - sas_device->handle, (u64)sas_device->sas_address); + printk(MPT3SAS_INFO_FMT "%s: removing handle(0x%04x), sas_addr" + "(0x%016llx)\n", ioc->name, __func__, sas_device->handle, + (unsigned long long) sas_device->sas_address); _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); @@ -788,24 +1096,32 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, * The lock serializes access to the list, but we still need to verify * that nobody removed the entry while we were waiting on the lock. */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); if (!list_empty(&sas_device->list)) { list_del_init(&sas_device->list); - sas_device_put(sas_device); + was_on_sas_device_list = 1; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + kfree(sas_device->serial_number); + sas_device_put(sas_device); + } } /** * _scsih_device_remove_by_handle - removing device object by handle * @ioc: per adapter object * @handle: device handle + * + * Return nothing. */ static void _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _sas_device *sas_device; unsigned long flags; + int was_on_sas_device_list = 0; if (ioc->shost_recovery) return; @@ -813,39 +1129,51 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); if (sas_device) { - list_del_init(&sas_device->list); - sas_device_put(sas_device); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + sas_device_put(sas_device); + } } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (sas_device) { + if (was_on_sas_device_list) { _scsih_remove_device(ioc, sas_device); sas_device_put(sas_device); } } /** - * mpt3sas_device_remove_by_sas_address - removing device object by sas address + * mpt3sas_device_remove_by_sas_address - removing device object by + * sas address & port number * @ioc: per adapter object * @sas_address: device sas_address + * @port: hba port entry + * + * Return nothing. */ void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address) + u64 sas_address, struct hba_port *port) { struct _sas_device *sas_device; unsigned long flags; + int was_on_sas_device_list = 0; if (ioc->shost_recovery) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); - sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address, port); if (sas_device) { - list_del_init(&sas_device->list); - sas_device_put(sas_device); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + sas_device_put(sas_device); + } } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (sas_device) { + if (was_on_sas_device_list) { _scsih_remove_device(ioc, sas_device); sas_device_put(sas_device); } @@ -865,12 +1193,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, { unsigned long flags; - dewtprintk(ioc, - ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", - __func__, sas_device->handle, - (u64)sas_device->sas_address)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: handle" + "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, + sas_device->handle, (unsigned long long)sas_device->sas_address)); - dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL)); spin_lock_irqsave(&ioc->sas_device_lock, flags); @@ -884,20 +1211,25 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, } if (!mpt3sas_transport_port_add(ioc, sas_device->handle, - sas_device->sas_address_parent)) { + sas_device->sas_address_parent, sas_device->port)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { - /* +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + /* CQ 206770: * When asyn scanning is enabled, its not possible to remove * devices while scanning is turned on due to an oops in * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() */ if (!ioc->is_driver_loading) { +#endif mpt3sas_transport_port_remove(ioc, sas_device->sas_address, - sas_device->sas_address_parent); + sas_device->sas_address_parent, + sas_device->port); _scsih_sas_device_remove(ioc, sas_device); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) } +#endif } else clear_bit(sas_device->handle, ioc->pend_os_device_add); } @@ -916,12 +1248,11 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, { unsigned long flags; - dewtprintk(ioc, - ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", - __func__, sas_device->handle, - (u64)sas_device->sas_address)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: handle" + "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, + sas_device->handle, (unsigned long long)sas_device->sas_address)); - dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL)); spin_lock_irqsave(&ioc->sas_device_lock, flags); @@ -932,7 +1263,8 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, } -static struct _pcie_device * + +struct _pcie_device * __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) { struct _pcie_device *pcie_device; @@ -965,7 +1297,7 @@ found_device: * * This searches for pcie_device based on wwid, then return pcie_device object. */ -static struct _pcie_device * +struct _pcie_device * mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) { struct _pcie_device *pcie_device; @@ -979,7 +1311,7 @@ mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) } -static struct _pcie_device * +struct _pcie_device * __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, int channel) { @@ -1002,7 +1334,34 @@ found_device: return pcie_device; } -static struct _pcie_device * + +/** + * mpt3sas_get_pdev_by_idchannel - pcie device search + * @ioc: per adapter object + * @id: Target ID + * @channel: Channel ID + * + * Context: This function will acquire ioc->pcie_device_lock and will release + * before returning the pcie_device object. + * + * This searches for pcie_device based on id and channel, then return + * pcie_device object. + */ +struct _pcie_device * +mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, int channel) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, id, channel); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return pcie_device; +} + + +struct _pcie_device * __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _pcie_device *pcie_device; @@ -1059,41 +1418,45 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) */ static void _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, - struct _pcie_device *pcie_device) + struct _pcie_device *pcie_device) { unsigned long flags; int was_on_pcie_device_list = 0; if (!pcie_device) return; - ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", - pcie_device->handle, (u64)pcie_device->wwid); - if (pcie_device->enclosure_handle != 0) - ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot); - if (pcie_device->connector_name[0] != '\0') - ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", - pcie_device->enclosure_level, - pcie_device->connector_name); + printk(MPT3SAS_INFO_FMT "removing handle(0x%04x), wwid" + "(0x%016llx)\n", ioc->name, pcie_device->handle, + (unsigned long long) pcie_device->wwid); + if(pcie_device->enclosure_handle != 0) + printk(MPT3SAS_INFO_FMT "removing " + "enclosure logical id(0x%016llx), slot(%d) \n", + ioc->name, (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot); + if(pcie_device->connector_name[0] != '\0') + printk(MPT3SAS_INFO_FMT "removing " + "enclosure level(0x%04x), connector name( %s)\n", + ioc->name, pcie_device->enclosure_level, + pcie_device->connector_name); spin_lock_irqsave(&ioc->pcie_device_lock, flags); if (!list_empty(&pcie_device->list)) { list_del_init(&pcie_device->list); was_on_pcie_device_list = 1; } - spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (was_on_pcie_device_list) { kfree(pcie_device->serial_number); pcie_device_put(pcie_device); } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); } - /** * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle * @ioc: per adapter object * @handle: device handle + * + * Return nothing. */ static void _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -1121,6 +1484,40 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) } } +/** + * _scsih_pcie_device_remove_by_wwid - removing device object by + * wwid + * @ioc: per adapter object + * @wwid: pcie device wwid + * + * Return nothing. + */ +static void +_scsih_pcie_device_remove_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + int was_on_pcie_device_list = 0; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); + if (pcie_device) { + if (!list_empty(&pcie_device->list)) { + list_del_init(&pcie_device->list); + was_on_pcie_device_list = 1; + pcie_device_put(pcie_device); + } + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (was_on_pcie_device_list) { + _scsih_pcie_device_remove_from_sml(ioc, pcie_device); + pcie_device_put(pcie_device); + } +} + /** * _scsih_pcie_device_add - add pcie_device object * @ioc: per adapter object @@ -1134,81 +1531,80 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, { unsigned long flags; - dewtprintk(ioc, - ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", - __func__, - pcie_device->handle, (u64)pcie_device->wwid)); - if (pcie_device->enclosure_handle != 0) - dewtprintk(ioc, - ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", - __func__, - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot)); - if (pcie_device->connector_name[0] != '\0') - dewtprintk(ioc, - ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", - __func__, pcie_device->enclosure_level, - pcie_device->connector_name)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: handle" + "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, + pcie_device->handle, (unsigned long long)pcie_device->wwid)); + if(pcie_device->enclosure_handle != 0) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s: enclosure logical id(0x%016llx), slot( %d)\n", ioc->name, + __func__, (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if(pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enclosure level(0x%04x), " + "connector name( %s)\n", ioc->name, __func__, + pcie_device->enclosure_level, pcie_device->connector_name)); spin_lock_irqsave(&ioc->pcie_device_lock, flags); pcie_device_get(pcie_device); list_add_tail(&pcie_device->list, &ioc->pcie_device_list); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); - + if (pcie_device->access_status == - MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { clear_bit(pcie_device->handle, ioc->pend_os_device_add); return; } if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { _scsih_pcie_device_remove(ioc, pcie_device); } else if (!pcie_device->starget) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) if (!ioc->is_driver_loading) { +#endif /*TODO-- Need to find out whether this condition will occur or not*/ clear_bit(pcie_device->handle, ioc->pend_os_device_add); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) } +#endif } else clear_bit(pcie_device->handle, ioc->pend_os_device_add); } -/* - * _scsih_pcie_device_init_add - insert pcie_device to the init list. - * @ioc: per adapter object - * @pcie_device: the pcie_device object - * Context: This function will acquire ioc->pcie_device_lock. - * - * Adding new object at driver load time to the ioc->pcie_device_init_list. - */ +/** +* _scsih_pcie_device_init_add - insert pcie_device to the init list. +* @ioc: per adapter object +* @pcie_device: the pcie_device object +* Context: This function will acquire ioc->pcie_device_lock. +* +* Adding new object at driver load time to the ioc->pcie_device_init_list. +*/ static void _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, - struct _pcie_device *pcie_device) + struct _pcie_device *pcie_device) { unsigned long flags; - dewtprintk(ioc, - ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", - __func__, - pcie_device->handle, (u64)pcie_device->wwid)); - if (pcie_device->enclosure_handle != 0) - dewtprintk(ioc, - ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", - __func__, - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot)); - if (pcie_device->connector_name[0] != '\0') - dewtprintk(ioc, - ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", - __func__, pcie_device->enclosure_level, - pcie_device->connector_name)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: handle" + "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, + pcie_device->handle, (unsigned long long)pcie_device->wwid)); + if(pcie_device->enclosure_handle != 0) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s: enclosure logical id(0x%016llx), slot( %d)\n", ioc->name, + __func__, (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if(pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enclosure level(0x%04x), " + "connector name( %s)\n", ioc->name, __func__, + pcie_device->enclosure_level, pcie_device->connector_name)); spin_lock_irqsave(&ioc->pcie_device_lock, flags); pcie_device_get(pcie_device); list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); if (pcie_device->access_status != - MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); } + /** * _scsih_raid_device_find_by_id - raid device search * @ioc: per adapter object @@ -1246,7 +1642,8 @@ _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) * object. */ struct _raid_device * -mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 +handle) { struct _raid_device *raid_device, *r; @@ -1265,7 +1662,7 @@ mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) /** * _scsih_raid_device_find_by_wwid - raid device search * @ioc: per adapter object - * @wwid: ? + * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->raid_device_lock * * This searches for raid_device based on wwid, then return raid_device @@ -1300,14 +1697,30 @@ _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, struct _raid_device *raid_device) { unsigned long flags; - - dewtprintk(ioc, - ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", - __func__, - raid_device->handle, (u64)raid_device->wwid)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + u8 protection_mask; +#endif + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: handle" + "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, + raid_device->handle, (unsigned long long)raid_device->wwid)); spin_lock_irqsave(&ioc->raid_device_lock, flags); list_add_tail(&raid_device->list, &ioc->raid_device_list); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (!ioc->disable_eedp_support) { + /* Disable DIX0 protection capability */ + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, protection_mask & 0x77); + printk(MPT3SAS_INFO_FMT ": Disabling DIX0 prot capability because HBA does" + "not support DIX0 operation on volumes\n",ioc->name); + } + } + } +#endif + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } @@ -1363,12 +1776,12 @@ mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) * This searches for enclosure device based on handle, then returns the * enclosure object. */ -static struct _enclosure_node * +struct _enclosure_node * mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _enclosure_node *enclosure_dev, *r; - r = NULL; + list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) continue; @@ -1378,24 +1791,47 @@ mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) out: return r; } + +/** + * TODO: search for pcie switch + * mpt3sas_scsih_switch_find_by_handle - pcie switch search + * @ioc: per adapter object + * @handle: switch handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_node_lock + * + * This searches for switch device based on handle, then returns the + * sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_switch_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + printk(MPT3SAS_ERR_FMT "%s is not yet implemented", + ioc->name, __func__); + return NULL; +} + /** * mpt3sas_scsih_expander_find_by_sas_address - expander device search * @ioc: per adapter object * @sas_address: sas address + * @port: hba port entry * Context: Calling function should acquire ioc->sas_node_lock. * - * This searches for expander device based on sas_address, then returns the - * sas_node object. + * This searches for expander device based on sas_address & port number, + * then returns the sas_node object. */ struct _sas_node * mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address) + u64 sas_address, struct hba_port *port) { struct _sas_node *sas_expander, *r; - r = NULL; + if (!port) + return r; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { - if (sas_expander->sas_address != sas_address) + if (sas_expander->sas_address != sas_address || + sas_expander->port != port) continue; r = sas_expander; goto out; @@ -1411,6 +1847,8 @@ mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, * Context: This function will acquire ioc->sas_node_lock. * * Adding new object to the ioc->sas_expander_list. + * + * Return nothing. */ static void _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, @@ -1424,14 +1862,14 @@ _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, } /** - * _scsih_is_end_device - determines if device is an end device + * _scsih_is_sas_end_device - determines if device is an end device * @device_info: bitfield providing information about the device. * Context: none * - * Return: 1 if end device. + * Returns 1 if the device is SAS/SATA/STP end device. */ static int -_scsih_is_end_device(u32 device_info) +_scsih_is_sas_end_device(u32 device_info) { if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | @@ -1443,8 +1881,7 @@ _scsih_is_end_device(u32 device_info) } /** - * _scsih_is_nvme_pciescsi_device - determines if - * device is an pcie nvme/scsi device + * _scsih_is_nvme_pciescsi_device - determines if device is an pcie nvme/scsi device * @device_info: bitfield providing information about the device. * Context: none * @@ -1454,20 +1891,80 @@ static int _scsih_is_nvme_pciescsi_device(u32 device_info) { if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) - == MPI26_PCIE_DEVINFO_NVME) || - ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) - == MPI26_PCIE_DEVINFO_SCSI)) + == MPI26_PCIE_DEVINFO_NVME) || + ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) + == MPI26_PCIE_DEVINFO_SCSI)) return 1; else return 0; } +/** + * _scsih_scsi_lookup_find_by_target - search for matching channel:id + * @ioc: per adapter object + * @id: target id + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, + int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; + smid <= ioc->shost->can_queue; smid++) { + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel) + return 1; + } + return 0; +} + +/** + * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun + * @ioc: per adapter object + * @id: target id + * @lun: lun number + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id:lun in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel && + scmd->device->lun == lun) + return 1; + } + return 0; +} + /** * mpt3sas_scsih_scsi_lookup_get - returns scmd entry * @ioc: per adapter object * @smid: system request message index - * - * Return: the smid stored scmd pointer. + + * Returns the smid stored scmd pointer. * Then will dereference the stored scmd pointer. */ struct scsi_cmnd * @@ -1478,11 +1975,8 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) Mpi25SCSIIORequest_t *mpi_request; if (smid > 0 && - smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { - u32 unique_tag = smid - 1; - + smid <= ioc->shost->can_queue) { mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); - /* * If SCSI IO request is outstanding at driver level then * DevHandle filed must be non-zero. If DevHandle is zero @@ -1491,30 +1985,34 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) */ if (!mpi_request->DevHandle) return scmd; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + scmd = scsi_host_find_tag(ioc->shost, smid - 1); +#else - scmd = scsi_host_find_tag(ioc->shost, unique_tag); + scmd = ioc->scsi_lookup[smid -1].scmd; +#endif if (scmd) { - st = scsi_cmd_priv(scmd); - if (st->cb_idx == 0xFF || st->smid == 0) + st = mpt3sas_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) scmd = NULL; } } return scmd; } -/** - * scsih_change_queue_depth - setting device queue depth - * @sdev: scsi device struct - * @qdepth: requested queue depth - * - * Return: queue depth. - */ -static int -scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) +static void +_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) { +#else +static int +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ +#endif + struct Scsi_Host *shost = sdev->host; int max_depth; - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; @@ -1522,13 +2020,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) max_depth = shost->can_queue; - /* - * limit max device queue for SATA to 32 if enable_sdev_max_qd - * is disabled. - */ - if (ioc->enable_sdev_max_qd) - goto not_sata; - + /* limit max device queue for SATA to 32 */ sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) goto not_sata; @@ -1550,44 +2042,137 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) not_sata: - if (!sdev->tagged_supported) + if (!sdev->tagged_supported || sdev->type == TYPE_ENCLOSURE) max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); +#elif (!defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) + scsi_adjust_queue_depth(sdev, ((qdepth == 1) ? 0 : MSG_SIMPLE_TAG), + qdepth); + if (sdev->inquiry_len > 7) + sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " + "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, + ((sdev->type == TYPE_ENCLOSURE) ? 0 : sdev->tagged_supported), + sdev->simple_tags, sdev->ordered_tags, sdev->scsi_level, + (sdev->inquiry[7] & 2) >> 1); + + return sdev->queue_depth; +#else return scsi_change_queue_depth(sdev, qdepth); +#endif } +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) /** - * mpt3sas_scsih_change_queue_depth - setting device queue depth + * _scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth + * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP + * (see include/scsi/scsi_host.h for definition) * - * Returns nothing. + * Returns queue depth. */ -void -mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +static int +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { - struct Scsi_Host *shost = sdev->host; - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) + _scsih_adjust_queue_depth(sdev, qdepth); + else if (reason == SCSI_QDEPTH_QFULL) + scsi_track_queue_full(sdev, qdepth); + else + return -EOPNOTSUPP; - if (ioc->enable_sdev_max_qd) - qdepth = shost->can_queue; + if (sdev->inquiry_len > 7) + sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " + "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, + ((sdev->type == TYPE_ENCLOSURE) ? 0 : sdev->tagged_supported), + sdev->simple_tags, sdev->ordered_tags, sdev->scsi_level, + (sdev->inquiry[7] & 2) >> 1); - scsih_change_queue_depth(sdev, qdepth); + return sdev->queue_depth; } +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) +/** + * _scsih_change_queue_type - changing device queue tag type + * @sdev: scsi device struct + * @tag_type: requested tag type + * + * Returns queue tag type. + */ +static int +_scsih_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + + return tag_type; +} +#endif /** - * scsih_target_alloc - target add routine + * _scsih_set_sdev_queue_depth - global setting of device queue depth + * + * Note: only for SAS devices + */ +static int +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) +_scsih_set_sdev_queue_depth(const char *val, const struct kernel_param *kp) +#else +_scsih_set_sdev_queue_depth(const char *val, struct kernel_param *kp) +#endif +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + struct sas_rphy *rphy; + struct scsi_device *sdev; + + if (ret) + return ret; + + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + shost_for_each_device(sdev, ioc->shost) { + rphy = target_to_rphy(sdev->sdev_target); + if (rphy->identify.target_port_protocols & + SAS_PROTOCOL_SSP) +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) + _scsih_change_queue_depth(sdev, + sdev_queue_depth, SCSI_QDEPTH_DEFAULT); +#else + _scsih_change_queue_depth(sdev, + sdev_queue_depth); +#endif + } + } + + return 0; +} +module_param_call(sdev_queue_depth, _scsih_set_sdev_queue_depth, param_get_int, + &sdev_queue_depth, 0600); + +/** + * _scsih_target_alloc - target add routine * @starget: scsi target struct * - * Return: 0 if ok. Any other return is assumed to be an error and + * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_target_alloc(struct scsi_target *starget) +_scsih_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; @@ -1595,8 +2180,7 @@ scsih_target_alloc(struct scsi_target *starget) unsigned long flags; struct sas_rphy *rphy; - sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), - GFP_KERNEL); + sas_target_priv_data = kzalloc(sizeof(struct MPT3SAS_TARGET), GFP_KERNEL); if (!sas_target_priv_data) return -ENOMEM; @@ -1629,6 +2213,7 @@ scsih_target_alloc(struct scsi_target *starget) if (pcie_device) { sas_target_priv_data->handle = pcie_device->handle; sas_target_priv_data->sas_address = pcie_device->wwid; + sas_target_priv_data->port = NULL; sas_target_priv_data->pcie_dev = pcie_device; pcie_device->starget = starget; pcie_device->id = starget->id; @@ -1646,12 +2231,13 @@ scsih_target_alloc(struct scsi_target *starget) /* sas/sata devices */ spin_lock_irqsave(&ioc->sas_device_lock, flags); rphy = dev_to_rphy(starget->dev.parent); - sas_device = __mpt3sas_get_sdev_by_addr(ioc, - rphy->identify.sas_address); + sas_device = __mpt3sas_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); if (sas_device) { sas_target_priv_data->handle = sas_device->handle; sas_target_priv_data->sas_address = sas_device->sas_address; + sas_target_priv_data->port = sas_device->port; sas_target_priv_data->sas_dev = sas_device; sas_device->starget = starget; sas_device->id = starget->id; @@ -1659,9 +2245,10 @@ scsih_target_alloc(struct scsi_target *starget) if (test_bit(sas_device->handle, ioc->pd_handles)) sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT; - if (sas_device->fast_path) - sas_target_priv_data->flags |= - MPT_TARGET_FASTPATH_IO; + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (sas_device->fast_path) + sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO; + } } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); @@ -1669,14 +2256,16 @@ scsih_target_alloc(struct scsi_target *starget) } /** - * scsih_target_destroy - target destroy routine + * _scsih_target_destroy - target destroy routine * @starget: scsi target struct + * + * Returns nothing. */ static void -scsih_target_destroy(struct scsi_target *starget) +_scsih_target_destroy(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; @@ -1710,8 +2299,8 @@ scsih_target_destroy(struct scsi_target *starget) if (pcie_device) { /* - * Corresponding get() is in _scsih_target_alloc() - */ + * Corresponding get() is in _scsih_target_alloc() + */ sas_target_priv_data->pcie_dev = NULL; pcie_device_put(pcie_device); pcie_device_put(pcie_device); @@ -1733,7 +2322,6 @@ scsih_target_destroy(struct scsi_target *starget) */ sas_target_priv_data->sas_dev = NULL; sas_device_put(sas_device); - sas_device_put(sas_device); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); @@ -1744,14 +2332,14 @@ scsih_target_destroy(struct scsi_target *starget) } /** - * scsih_slave_alloc - device add routine + * _scsih_slave_alloc - device add routine * @sdev: scsi device struct * - * Return: 0 if ok. Any other return is assumed to be an error and + * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_alloc(struct scsi_device *sdev) +_scsih_slave_alloc(struct scsi_device *sdev) { struct Scsi_Host *shost; struct MPT3SAS_ADAPTER *ioc; @@ -1763,8 +2351,7 @@ scsih_slave_alloc(struct scsi_device *sdev) struct _pcie_device *pcie_device; unsigned long flags; - sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), - GFP_KERNEL); + sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), GFP_KERNEL); if (!sas_device_priv_data) return -ENOMEM; @@ -1780,7 +2367,7 @@ scsih_slave_alloc(struct scsi_device *sdev) sdev->no_uld_attach = 1; shost = dev_to_shost(&starget->dev); - ioc = shost_priv(shost); + ioc = shost_private(shost); if (starget->channel == RAID_CHANNEL) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, @@ -1792,11 +2379,11 @@ scsih_slave_alloc(struct scsi_device *sdev) if (starget->channel == PCIE_CHANNEL) { spin_lock_irqsave(&ioc->pcie_device_lock, flags); pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, - sas_target_priv_data->sas_address); + sas_target_priv_data->sas_address); if (pcie_device && (pcie_device->starget == NULL)) { sdev_printk(KERN_INFO, sdev, - "%s : pcie_device->starget set to starget @ %d\n", - __func__, __LINE__); + "%s : pcie_device->starget set to starget @ %d\n", __func__, + __LINE__); pcie_device->starget = starget; } @@ -1804,14 +2391,15 @@ scsih_slave_alloc(struct scsi_device *sdev) pcie_device_put(pcie_device); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); - } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_by_addr(ioc, - sas_target_priv_data->sas_address); + sas_target_priv_data->sas_address, + sas_target_priv_data->port); if (sas_device && (sas_device->starget == NULL)) { sdev_printk(KERN_INFO, sdev, - "%s : sas_device->starget set to starget @ %d\n", - __func__, __LINE__); + "%s : sas_device->starget set to starget @ %d\n", __func__, + __LINE__); sas_device->starget = starget; } @@ -1821,15 +2409,21 @@ scsih_slave_alloc(struct scsi_device *sdev) spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) + sdev->tagged_supported = 1; + scsi_activate_tcq(sdev, sdev->queue_depth); +#endif return 0; } /** - * scsih_slave_destroy - device destroy routine + * _scsih_slave_destroy - device destroy routine * @sdev: scsi device struct + * + * Returns nothing. */ static void -scsih_slave_destroy(struct scsi_device *sdev) +_scsih_slave_destroy(struct scsi_device *sdev) { struct MPT3SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; @@ -1847,9 +2441,9 @@ scsih_slave_destroy(struct scsi_device *sdev) sas_target_priv_data->num_luns--; shost = dev_to_shost(&starget->dev); - ioc = shost_priv(shost); + ioc = shost_private(shost); - if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)) { spin_lock_irqsave(&ioc->pcie_device_lock, flags); pcie_device = __mpt3sas_get_pdev_from_target(ioc, sas_target_priv_data); @@ -1870,6 +2464,7 @@ scsih_slave_destroy(struct scsi_device *sdev) if (sas_device) sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } @@ -1895,16 +2490,16 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } @@ -1923,19 +2518,20 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) /* * raid transport support - * Enabled for SLES11 and newer, in older kernels the driver will panic when - * unloading the driver followed by a load - I believe that the subroutine + * unloading the driver followed by a load - I beleive that the subroutine * raid_class_release() is not cleaning up properly. */ /** - * scsih_is_raid - return boolean indicating device is raid volume - * @dev: the device struct object + * _scsih_is_raid - return boolean indicating device is raid volume + * @dev the device struct object */ static int -scsih_is_raid(struct device *dev) +_scsih_is_raid(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); @@ -1945,23 +2541,15 @@ scsih_is_raid(struct device *dev) return (sdev->channel == RAID_CHANNEL) ? 1 : 0; } -static int -scsih_is_nvme(struct device *dev) -{ - struct scsi_device *sdev = to_scsi_device(dev); - - return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; -} - /** - * scsih_get_resync - get raid volume resync percent complete - * @dev: the device struct object + * _scsih_get_resync - get raid volume resync percent complete + * @dev the device struct object */ static void -scsih_get_resync(struct device *dev) +_scsih_get_resync(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + struct MPT3SAS_ADAPTER *ioc = shost_private(sdev->host); static struct _raid_device *raid_device; unsigned long flags; Mpi2RaidVolPage0_t vol_pg0; @@ -1973,7 +2561,7 @@ scsih_get_resync(struct device *dev) percent_complete = 0; handle = 0; if (ioc->is_warpdrive) - goto out; + return; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, @@ -1990,8 +2578,8 @@ scsih_get_resync(struct device *dev) if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); percent_complete = 0; goto out; } @@ -2012,17 +2600,18 @@ scsih_get_resync(struct device *dev) raid_set_resync(mpt3sas_raid_template, dev, percent_complete); break; } + } /** - * scsih_get_state - get raid volume level - * @dev: the device struct object + * _scsih_get_state - get raid volume level + * @dev the device struct object */ static void -scsih_get_state(struct device *dev) +_scsih_get_state(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + struct MPT3SAS_ADAPTER *ioc = shost_private(sdev->host); static struct _raid_device *raid_device; unsigned long flags; Mpi2RaidVolPage0_t vol_pg0; @@ -2044,8 +2633,8 @@ scsih_get_state(struct device *dev) if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } @@ -2082,7 +2671,6 @@ scsih_get_state(struct device *dev) /** * _scsih_set_level - set raid level - * @ioc: ? * @sdev: scsi device struct * @volume_type: volume type */ @@ -2097,10 +2685,8 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, level = RAID_LEVEL_0; break; case MPI2_RAID_VOL_TYPE_RAID10: - level = RAID_LEVEL_10; - break; case MPI2_RAID_VOL_TYPE_RAID1E: - level = RAID_LEVEL_1E; + level = RAID_LEVEL_10; break; case MPI2_RAID_VOL_TYPE_RAID1: level = RAID_LEVEL_1; @@ -2110,23 +2696,23 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, switch (ioc->hba_mpi_version_belonged) { case MPI2_VERSION: raid_set_level(mpt2sas_raid_template, - &sdev->sdev_gendev, level); + &sdev->sdev_gendev, level); break; case MPI25_VERSION: case MPI26_VERSION: raid_set_level(mpt3sas_raid_template, - &sdev->sdev_gendev, level); + &sdev->sdev_gendev, level); break; } } - +#endif /* raid transport support - (2.6.27 and newer) */ /** * _scsih_get_volume_capabilities - volume capabilities * @ioc: per adapter object - * @raid_device: the raid_device object + * @sas_device: the raid_device object * - * Return: 0 for success, else 1 + * Returns 0 for success, else 1 */ static int _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, @@ -2141,9 +2727,9 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, &num_pds)) || !num_pds) { - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); return 1; } @@ -2152,17 +2738,17 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, sizeof(Mpi2RaidVol0PhysDisk_t)); vol_pg0 = kzalloc(sz, GFP_KERNEL); if (!vol_pg0) { - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); return 1; } if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); kfree(vol_pg0); return 1; } @@ -2199,6 +2785,11 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, static void _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) { + u8 data[30]; + u8 page_len, ii; + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; /* only for TAPE */ if (sdev->type != TYPE_TAPE) @@ -2207,25 +2798,102 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) return; - sas_enable_tlr(sdev); - sdev_printk(KERN_INFO, sdev, "TLR %s\n", - sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); - return; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + return; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + return; + /* is Protocol-specific logical unit information (0x90) present ?? */ + if (_scsih_inquiry_vpd_supported_pages(ioc, + sas_target_priv_data->handle, sdev->lun, data, + sizeof(data)) != DEVICE_READY) { + sas_device = mpt3sas_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device) { + sdev_printk(KERN_INFO, sdev, "%s: DEVICE NOT READY: handle(0x%04x), " + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx) \n", + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address, sas_device->phy, + (unsigned long long)sas_device->device_name); + + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); + + sas_device_put(sas_device); + } + return; + } + page_len = data[3]; + for (ii = 4; ii < page_len + 4; ii++) { + if (data[ii] == 0x90) { + sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON; + return; + } + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +/** + * _scsih_enable_ssu_on_sata - Enable drive spin down/up + * @sas_device: sas device object + * @sdev: scsi device struct + * + * Enables START_STOP command for SATA drives (sata drive spin down/up) + * + */ +static void +_scsih_enable_ssu_on_sata(struct _sas_device *sas_device, + struct scsi_device *sdev) +{ + if (!(sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) + return; + + switch (allow_drive_spindown) { + + case 1: + if (sas_device->ssd_device) + sdev->manage_start_stop = 1; + break; + case 2: + if (!sas_device->ssd_device) + sdev->manage_start_stop = 1; + break; + case 3: + sdev->manage_start_stop = 1; + break; + } + +} +#endif + +/** + * _scsih_set_queue_flag - Sets a queue flag. + * @flag: flag to be set + * @q: request queue + * + * Sets queue flag based on flag(QUEUE_FLAG_NOMERGES/QUEUE_FLAG_SG_GAPS) + * + */ +static void +_scsih_set_queue_flag(unsigned int flag, struct request_queue *q) +{ + set_bit(flag, &q->queue_flags); } /** - * scsih_slave_configure - device configure routine. + * _scsih_slave_configure - device configure routine. * @sdev: scsi device struct * - * Return: 0 if ok. Any other return is assumed to be an error and + * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_configure(struct scsi_device *sdev) +_scsih_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; @@ -2238,6 +2906,9 @@ scsih_slave_configure(struct scsi_device *sdev) char *r_level = ""; u16 handle, volume_handle = 0; u64 volume_wwid = 0; + u8 *serial_number = NULL; + enum device_responsive_state retval; + u8 count=0; qdepth = 1; sas_device_priv_data = sdev->hostdata; @@ -2253,22 +2924,19 @@ scsih_slave_configure(struct scsi_device *sdev) raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device) { - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__)); return 1; } if (_scsih_get_volume_capabilities(ioc, raid_device)) { - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__)); return 1; } - /* - * WARPDRIVE: Initialize the required data for Direct IO - */ mpt3sas_init_warpdrive_properties(ioc, raid_device); /* RAID Queue Depth Support @@ -2282,7 +2950,7 @@ scsih_slave_configure(struct scsi_device *sdev) ds = "SSP"; } else { qdepth = MPT3SAS_SATA_QUEUE_DEPTH; - if (raid_device->device_info & + if (raid_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) ds = "SATA"; else @@ -2318,27 +2986,34 @@ scsih_slave_configure(struct scsi_device *sdev) break; } - if (!ioc->hide_ir_msg) - sdev_printk(KERN_INFO, sdev, - "%s: handle(0x%04x), wwid(0x%016llx)," - " pd_count(%d), type(%s)\n", + if (!ioc->warpdrive_msg) + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " + "wwid(0x%016llx), pd_count(%d), type(%s)\n", r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { blk_queue_max_hw_sectors(sdev->request_queue, MPT3SAS_RAID_MAX_SECTORS); sdev_printk(KERN_INFO, sdev, - "Set queue's max_sector to: %u\n", - MPT3SAS_RAID_MAX_SECTORS); + "Set queue's max_sector to: %u\n", + MPT3SAS_RAID_MAX_SECTORS); } +#endif - mpt3sas_scsih_change_queue_depth(sdev, qdepth); +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); +#else + _scsih_change_queue_depth(sdev, qdepth); +#endif - /* raid transport support */ +/* raid transport support */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) if (!ioc->is_warpdrive) _scsih_set_level(ioc, sdev, raid_device->volume_type); +#endif return 0; } @@ -2346,88 +3021,113 @@ scsih_slave_configure(struct scsi_device *sdev) if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { if (mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle)) { - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); return 1; } if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, volume_handle, &volume_wwid)) { - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); return 1; } } + _scsih_inquiry_vpd_sn(ioc, handle, &serial_number); + /* PCIe handling */ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { spin_lock_irqsave(&ioc->pcie_device_lock, flags); pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, - sas_device_priv_data->sas_target->sas_address); + sas_device_priv_data->sas_target->sas_address); if (!pcie_device) { spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__)); + kfree(serial_number); return 1; } - + pcie_device->serial_number = serial_number; + /*TODO-right Queue Depth?*/ qdepth = MPT3SAS_NVME_QUEUE_DEPTH; ds = "NVMe"; - sdev_printk(KERN_INFO, sdev, - "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", - ds, handle, (unsigned long long)pcie_device->wwid, + /*TODO-Add device name when defined*/ + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), wwid(0x%016llx), " + "port(%d)\n", ds, handle, (unsigned long long)pcie_device->wwid, pcie_device->port_num); - if (pcie_device->enclosure_handle != 0) - sdev_printk(KERN_INFO, sdev, - "%s: enclosure logical id(0x%016llx), slot(%d)\n", - ds, - (unsigned long long)pcie_device->enclosure_logical_id, - pcie_device->slot); - if (pcie_device->connector_name[0] != '\0') - sdev_printk(KERN_INFO, sdev, - "%s: enclosure level(0x%04x)," - "connector name( %s)\n", ds, - pcie_device->enclosure_level, + if(pcie_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, "%s: enclosure logical id(0x%016llx)," + " slot(%d)\n", ds, (unsigned long long) + pcie_device->enclosure_logical_id, pcie_device->slot); + if(pcie_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, "%s: enclosure level(0x%04x), " + "connector name( %s)\n", ds, pcie_device->enclosure_level, pcie_device->connector_name); - - if (pcie_device->nvme_mdts) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + if (pcie_device->nvme_mdts) { blk_queue_max_hw_sectors(sdev->request_queue, - pcie_device->nvme_mdts/512); - + pcie_device->nvme_mdts/512); + } +#endif pcie_device_put(pcie_device); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); - mpt3sas_scsih_change_queue_depth(sdev, qdepth); + if (serial_number) + sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n", serial_number); + +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); +#else + _scsih_change_queue_depth(sdev, qdepth); +#endif + /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be - ** merged and can eliminate holes created during merging - ** operation. - **/ - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, - sdev->request_queue); + * merged and can eliminate holes created during merging + * operation. + */ + _scsih_set_queue_flag(QUEUE_FLAG_NOMERGES, + sdev->request_queue); + +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR >= 3)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))) blk_queue_virt_boundary(sdev->request_queue, - ioc->page_size - 1); + ioc->page_size - 1); +#else +#ifdef QUEUE_FLAG_SG_GAPS + /* Enable QUEUE_FLAG_SG_GAPS flag, So that kernel won't + * issue any IO's to the driver which has SG GAPs (holes). + */ + _scsih_set_queue_flag(QUEUE_FLAG_SG_GAPS, + sdev->request_queue); +#endif +#endif + /*TODO -- Do we need to support EEDP for NVMe devices*/ return 0; } spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_by_addr(ioc, - sas_device_priv_data->sas_target->sas_address); + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); if (!sas_device) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - dfailprintk(ioc, - ioc_warn(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); + dfailprintk(ioc, printk(MPT3SAS_WARN_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + kfree(serial_number); return 1; } sas_device->volume_handle = volume_handle; sas_device->volume_wwid = volume_wwid; + sas_device->serial_number = serial_number; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { - qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + qdepth = (int) sdev_queue_depth > 0 ? sdev_queue_depth : + MPT3SAS_SAS_QUEUE_DEPTH; ssp_target = 1; - if (sas_device->device_info & - MPI2_SAS_DEVICE_INFO_SEP) { + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SEP) { sdev_printk(KERN_WARNING, sdev, "set ignore_delay_remove for handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); @@ -2444,7 +3144,7 @@ scsih_slave_configure(struct scsi_device *sdev) ds = "SATA"; } - sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", ds, handle, (unsigned long long)sas_device->sas_address, sas_device->phy, (unsigned long long)sas_device->device_name); @@ -2454,22 +3154,89 @@ scsih_slave_configure(struct scsi_device *sdev) sas_device_put(sas_device); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (!ssp_target) + if (!ssp_target) { _scsih_display_sata_capabilities(ioc, handle, sdev); + do { + retval = _scsih_ata_pass_thru_idd(ioc,handle, + &sas_device->ssd_device, 30, 0); + } while ((retval == DEVICE_RETRY ||retval == DEVICE_RETRY_UA) + && count++ < 3); + } - mpt3sas_scsih_change_queue_depth(sdev, qdepth); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + /* Enable spin-down/up based on module param allow_drive_spindown */ + _scsih_enable_ssu_on_sata(sas_device, sdev); +#endif + if (serial_number) + sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n", + serial_number); + +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); +#else + _scsih_change_queue_depth(sdev, qdepth); +#endif if (ssp_target) { sas_read_port_mode_page(sdev); _scsih_enable_tlr(ioc, sdev); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + if (!ioc->disable_eedp_support) { + if (ssp_target && (!(sas_target_priv_data->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT))) { + struct read_cap_parameter data; + enum device_responsive_state retcode; + u8 retry_count = 0; + + if (!(sdev->inquiry[5] & 1)) + goto out; +retry: + /* issue one retry to handle UA's */ + memset(&data, 0, sizeof(struct read_cap_parameter)); + retcode = _scsih_read_capacity_16(ioc, + sas_target_priv_data->handle, sdev->lun, &data, + sizeof(struct read_cap_parameter)); + + if ((retcode == DEVICE_RETRY || retcode == DEVICE_RETRY_UA) + && (!retry_count++)) + goto retry; + if (retcode != DEVICE_READY) { + sdev_printk(KERN_INFO, sdev, "%s: DEVICE NOT READY: handle(0x%04x), " + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, (unsigned long long)sas_device->device_name); + + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); + + goto out; + } + if (!data.prot_en) + goto out; + sas_device_priv_data->eedp_type = data.p_type + 1; + + if (sas_device_priv_data->eedp_type == 2) { + sdev_printk(KERN_INFO, sdev, "formatted with " + "DIF Type 2 protection which is currently " + "unsupported.\n"); + goto out; + } + + sas_device_priv_data->eedp_enable = 1; + sdev_printk(KERN_INFO, sdev, "Enabling DIF Type %d " + "protection\n", sas_device_priv_data->eedp_type); + } +out: + return 0; + } +#endif return 0; } /** - * scsih_bios_param - fetch head, sector, cylinder info for a disk + * _scsih_bios_param - fetch head, sector, cylinder info for a disk * @sdev: scsi device struct * @bdev: pointer to block device context * @capacity: device size (in 512 byte sectors) @@ -2477,9 +3244,11 @@ scsih_slave_configure(struct scsi_device *sdev) * params[0] number of heads (max 255) * params[1] number of sectors (max 63) * params[2] number of cylinders + * + * Return nothing. */ static int -scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, +_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int params[]) { int heads; @@ -2518,6 +3287,8 @@ scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, * _scsih_response_code - translation of device response code * @ioc: per adapter object * @response_code: response code returned by the device + * + * Return nothing. */ static void _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) @@ -2553,7 +3324,8 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) desc = "unknown"; break; } - ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); + printk(MPT3SAS_WARN_FMT "response_code(0x%01x): %s\n", + ioc->name, response_code, desc); } /** @@ -2566,8 +3338,8 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) * * The callback handler when using scsih_issue_tm. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) @@ -2648,11 +3420,12 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) /** * mpt3sas_scsih_issue_tm - main routine for sending tm requests * @ioc: per adapter struct - * @handle: device handle + * @device_handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS * @lun: lun number * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task - * @msix_task: MSIX table index supplied by the OS * @timeout: timeout in seconds * @tr_method: Target Reset Method * Context: user @@ -2660,36 +3433,41 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) * A generic API for sending task management requests to firmware. * * The callback index is set inside `ioc->tm_cb_idx`. - * The caller is responsible to check for outstanding commands. * - * Return: SUCCESS or FAILED. + * Return SUCCESS or FAILED. */ int -mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, - u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method) +mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, + uint id, uint lun, u8 type, u16 smid_task, u8 timeout, + u8 tr_method) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; + Mpi25SCSIIORequest_t *request; u16 smid = 0; u32 ioc_state; + struct scsiio_tracker *scsi_lookup = NULL; int rc; - + u16 msix_task = 0; lockdep_assert_held(&ioc->tm_cmds.mutex); if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { - ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); + printk(MPT3SAS_INFO_FMT "%s: tm_cmd busy!!!\n", + __func__, ioc->name); return FAILED; } if (ioc->shost_recovery || ioc->remove_host || - ioc->pci_error_recovery) { - ioc_info(ioc, "%s: host reset in progress!\n", __func__); + ioc->pci_error_recovery) { + printk(MPT3SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); return FAILED; } ioc_state = mpt3sas_base_get_iocstate(ioc, 0); if (ioc_state & MPI2_DOORBELL_USED) { - dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "unexpected doorbell " + "active!\n", ioc->name)); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); return (!rc) ? SUCCESS : FAILED; } @@ -2703,13 +3481,16 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); return FAILED; } - dtmprintk(ioc, - ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", - handle, type, smid_task, timeout, tr_method)); + if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + scsi_lookup = mpt3sas_get_st_from_smid(ioc, smid_task); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "sending tm: handle(0x%04x)," + " task_type(0x%02x), timeout(%d) tr_method(0x%x) smid(%d)\n", ioc->name, handle, type, + timeout, tr_method, smid_task)); ioc->tm_cmds.status = MPT3_CMD_PENDING; mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->tm_cmds.smid = smid; @@ -2723,30 +3504,33 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); mpt3sas_scsih_set_tm_flag(ioc, handle); init_completion(&ioc->tm_cmds.done); + if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) && + (scsi_lookup && (scsi_lookup->msix_io < ioc->reply_queue_count))) + msix_task = scsi_lookup->msix_io; + else + msix_task = 0; ioc->put_smid_hi_priority(ioc, smid, msix_task); wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { if (mpt3sas_base_check_cmd_timeout(ioc, ioc->tm_cmds.status, mpi_request, sizeof(Mpi2SCSITaskManagementRequest_t)/4)) { - rc = mpt3sas_base_hard_reset_handler(ioc, - FORCE_BIG_HAMMER); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; goto out; } } - /* sync IRQs in case those were busy during flush. */ mpt3sas_base_sync_reply_irqs(ioc); if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); mpi_reply = ioc->tm_cmds.reply; - dtmprintk(ioc, - ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", - le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo), - le32_to_cpu(mpi_reply->TerminationCount))); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "complete tm: " + "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); if (ioc->logging_level & MPT_DEBUG_TM) { _scsih_response_code(ioc, mpi_reply->ResponseCode); if (mpi_reply->IOCStatus) @@ -2754,26 +3538,90 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, sizeof(Mpi2SCSITaskManagementRequest_t)/4); } } - rc = SUCCESS; + + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + /* + * If DevHandle filed in smid_task's entry of request pool + * doesn't matches with device handle on which this task abort + * TM is received then it means that TM has successfully + * aborted the timed out command. Since smid_task's entry in + * request pool will be memset to zero once the timed out + * command is returned to the SML. If the command is not + * aborted then smid_task’s entry won’t be cleared and it will + * have same DevHandle value on which this task abort TM is + * received and driver will return the TM status as FAILED. + */ + request = mpt3sas_base_get_msg_frame(ioc, smid_task); + if (le16_to_cpu(request->DevHandle) != handle) + break; + rc = FAILED; + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) { + rc = SUCCESS; + break; + } + if (_scsih_scsi_lookup_find_by_target(ioc, id, channel)) + rc = FAILED; + else + rc = SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) + rc = FAILED; + else + rc = SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } out: mpt3sas_scsih_clear_tm_flag(ioc, handle); ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + return rc; } -int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, - u64 lun, u8 type, u16 smid_task, u16 msix_task, - u8 timeout, u8 tr_method) +/** + * mpt3sas_scsih_issue_locked_tm - calling function for mpt3sas_scsih_issue_tm() + * @ioc: per adapter struct + * @handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * @timeout: timeout in seconds + * @tr_method: Target Reset Method + * Context: user + * + * This function acquires mutex before calling mpt3sas_scsih_issue_tm() and + * releases mutex after sending tm requests via mpt3sas_scsih_issue_tm(). + * + * Return SUCCESS or FAILED. + */ +int +mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, + uint id, uint lun, u8 type, u16 smid_task, u8 timeout, + u8 tr_method) { - int ret; + int ret; - mutex_lock(&ioc->tm_cmds.mutex); - ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task, - msix_task, timeout, tr_method); - mutex_unlock(&ioc->tm_cmds.mutex); + mutex_lock(&ioc->tm_cmds.mutex); + ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, + smid_task, timeout, tr_method); + mutex_unlock(&ioc->tm_cmds.mutex); - return ret; + return ret; } /** @@ -2795,16 +3643,16 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) if (!priv_target) return; - if (ioc->hide_ir_msg) + + if (ioc->warpdrive_msg) device_str = "WarpDrive"; else device_str = "volume"; scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { - starget_printk(KERN_INFO, starget, - "%s handle(0x%04x), %s wwid(0x%016llx)\n", - device_str, priv_target->handle, + starget_printk(KERN_INFO, starget, "%s handle(0x%04x), " + "%s wwid(0x%016llx)\n", device_str, priv_target->handle, device_str, (unsigned long long)priv_target->sas_address); } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { @@ -2816,17 +3664,15 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) pcie_device->handle, (unsigned long long)pcie_device->wwid, pcie_device->port_num); - if (pcie_device->enclosure_handle != 0) + if(pcie_device->enclosure_handle != 0) starget_printk(KERN_INFO, starget, "enclosure logical id(0x%016llx), slot(%d)\n", - (unsigned long long) - pcie_device->enclosure_logical_id, + (unsigned long long)pcie_device->enclosure_logical_id, pcie_device->slot); - if (pcie_device->connector_name[0] != '\0') + if(pcie_device->connector_name[0] != '\0') starget_printk(KERN_INFO, starget, "enclosure level(0x%04x), connector name( %s)\n", - pcie_device->enclosure_level, - pcie_device->connector_name); + pcie_device->enclosure_level, pcie_device->connector_name); pcie_device_put(pcie_device); } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); @@ -2844,13 +3690,12 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) (unsigned long long)sas_device->volume_wwid); } starget_printk(KERN_INFO, starget, - "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", - sas_device->handle, - (unsigned long long)sas_device->sas_address, - sas_device->phy); + "%s: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy); - _scsih_display_enclosure_chassis_info(NULL, sas_device, - NULL, starget); + _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); sas_device_put(sas_device); } @@ -2859,31 +3704,41 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) } /** - * scsih_abort - eh threads main abort routine + * _scsih_abort - eh threads main abort routine * @scmd: pointer to scsi command object * - * Return: SUCCESS if command aborted else FAILED + * Returns SUCCESS if command aborted else FAILED */ static int -scsih_abort(struct scsi_cmnd *scmd) +_scsih_abort(struct scsi_cmnd *scmd) { - struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_ADAPTER *ioc = shost_private(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; - struct scsiio_tracker *st = scsi_cmd_priv(scmd); u16 handle; int r; - + struct scsiio_tracker *st = mpt3sas_base_scsi_cmd_priv(scmd); u8 timeout = 30; struct _pcie_device *pcie_device = NULL; - sdev_printk(KERN_INFO, scmd->device, - "attempting task abort! scmd(%p)\n", scmd); + + sdev_printk(KERN_INFO, scmd->device, "attempting task abort! " + "scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); + if (mpt3sas_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(%p)\n", + ((ioc->remove_host)?("shost is getting removed!"): + ("pci device been removed!")), scmd); + if (st && st->smid) + mpt3sas_base_free_smid(ioc, st->smid); + scmd->result = DID_NO_CONNECT << 16; + r = mpt3sas_determine_failed_or_fast_io_fail_status(); + goto out; + } + sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target || - ioc->remove_host) { - sdev_printk(KERN_INFO, scmd->device, - "device been deleted! scmd(%p)\n", scmd); + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, "device been deleted! " + "scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -2906,19 +3761,18 @@ scsih_abort(struct scsi_cmnd *scmd) goto out; } - mpt3sas_halt_firmware(ioc); + mpt3sas_halt_firmware(ioc, 0); handle = sas_device_priv_data->sas_target->handle; pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); if (pcie_device && (!ioc->tm_custom_handling) && - (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) timeout = ioc->nvme_abort_timeout; - r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, - st->smid, st->msix_io, timeout, 0); - /* Command must be cleared after abort */ - if (r == SUCCESS && st->cb_idx != 0xFF) - r = FAILED; + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid, timeout, + 0); + out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); @@ -2927,16 +3781,17 @@ scsih_abort(struct scsi_cmnd *scmd) return r; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) /** - * scsih_dev_reset - eh threads main device reset routine + * _scsih_dev_reset - eh threads main device reset routine * @scmd: pointer to scsi command object * - * Return: SUCCESS if command aborted else FAILED + * Returns SUCCESS if command aborted else FAILED */ static int -scsih_dev_reset(struct scsi_cmnd *scmd) +_scsih_dev_reset(struct scsi_cmnd *scmd) { - struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_ADAPTER *ioc = shost_private(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; struct _sas_device *sas_device = NULL; struct _pcie_device *pcie_device = NULL; @@ -2944,19 +3799,31 @@ scsih_dev_reset(struct scsi_cmnd *scmd) u8 tr_method = 0; u8 tr_timeout = 30; int r; - struct scsi_target *starget = scmd->device->sdev_target; struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; - sdev_printk(KERN_INFO, scmd->device, - "attempting device reset! scmd(%p)\n", scmd); + if (ioc->is_warpdrive) { + r = FAILED; + goto out; + } + + sdev_printk(KERN_INFO, scmd->device, "attempting device reset! " + "scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); + if (mpt3sas_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(%p)\n", + ((ioc->remove_host)?("shost is getting removed!"): + ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = mpt3sas_determine_failed_or_fast_io_fail_status(); + goto out; + } + sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target || - ioc->remove_host) { - sdev_printk(KERN_INFO, scmd->device, - "device been deleted! scmd(%p)\n", scmd); + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, "device been deleted! " + "scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -2983,18 +3850,16 @@ scsih_dev_reset(struct scsi_cmnd *scmd) pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); if (pcie_device && (!ioc->tm_custom_handling) && - (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { tr_timeout = pcie_device->reset_timeout; tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; - } else + } + else tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; - r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, - tr_timeout, tr_method); - /* Check for busy commands after reset */ - if (r == SUCCESS && atomic_read(&scmd->device->device_busy)) - r = FAILED; + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, tr_timeout, tr_method); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); @@ -3008,15 +3873,15 @@ scsih_dev_reset(struct scsi_cmnd *scmd) } /** - * scsih_target_reset - eh threads main target reset routine + * _scsih_target_reset - eh threads main target reset routine * @scmd: pointer to scsi command object * - * Return: SUCCESS if command aborted else FAILED + * Returns SUCCESS if command aborted else FAILED */ static int -scsih_target_reset(struct scsi_cmnd *scmd) +_scsih_target_reset(struct scsi_cmnd *scmd) { - struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_ADAPTER *ioc = shost_private(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; struct _sas_device *sas_device = NULL; struct _pcie_device *pcie_device = NULL; @@ -3027,15 +3892,23 @@ scsih_target_reset(struct scsi_cmnd *scmd) struct scsi_target *starget = scmd->device->sdev_target; struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; - starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n", - scmd); + starget_printk(KERN_INFO, starget, "attempting target reset! " + "scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); + if (mpt3sas_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(%p)\n", + ((ioc->remove_host)?("shost is getting removed!"): + ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = mpt3sas_determine_failed_or_fast_io_fail_status(); + goto out; + } + sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target || - ioc->remove_host) { - starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", - scmd); + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + starget_printk(KERN_INFO, starget, "target been deleted! " + "scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -3062,17 +3935,17 @@ scsih_target_reset(struct scsi_cmnd *scmd) pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); if (pcie_device && (!ioc->tm_custom_handling) && - (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { tr_timeout = pcie_device->reset_timeout; tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; - } else + } + else tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; - r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0, - MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, + + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, tr_timeout, tr_method); - /* Check for busy commands after reset */ - if (r == SUCCESS && atomic_read(&starget->target_busy)) - r = FAILED; + out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); @@ -3081,36 +3954,128 @@ scsih_target_reset(struct scsi_cmnd *scmd) sas_device_put(sas_device); if (pcie_device) pcie_device_put(pcie_device); + return r; } +#else /* prior to 2.6.26 kernel */ /** - * scsih_host_reset - eh threads main host reset routine + * _scsih_dev_reset - eh threads main device reset routine * @scmd: pointer to scsi command object * - * Return: SUCCESS if command aborted else FAILED + * Returns SUCCESS if command aborted else FAILED */ -static int -scsih_host_reset(struct scsi_cmnd *scmd) +int +_scsih_dev_reset(struct scsi_cmnd *scmd) { - struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); - int r, retval; + struct MPT3SAS_ADAPTER *ioc = shost_private(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device = NULL; + unsigned long flags; + u16 handle; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct _pcie_device *pcie_device = NULL; + u8 tr_timeout = 30; + u8 tr_method = 0; - ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd); - scsi_print_command(scmd); + starget_printk(KERN_INFO, starget, "attempting target reset! " + "scmd(%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); - if (ioc->is_driver_loading || ioc->remove_host) { - ioc_info(ioc, "Blocking the host reset\n"); + if (mpt3sas_base_pci_device_is_unplugged(ioc)) { + sdev_printk(KERN_INFO, scmd->device, + "pci device been removed! scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + starget_printk(KERN_INFO, starget, "target been deleted! " + "scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) + handle = sas_device->volume_handle; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; r = FAILED; goto out; } + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } + else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + tr_timeout, tr_method); + + out: + starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (sas_device) + sas_device_put(sas_device); + if (pcie_device) + pcie_device_put(pcie_device); + return r; +} +#endif + +/** + * _scsih_host_reset - eh threads main host reset routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +static int +_scsih_host_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_private(scmd->device->host); + int r, retval; + + printk(MPT3SAS_INFO_FMT "attempting host reset! scmd(%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + + if (ioc->is_driver_loading || ioc->remove_host){ + printk(MPT3SAS_INFO_FMT "Blocking the host reset\n", + ioc->name); + r = FAILED; + goto out; + } + retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); r = (retval < 0) ? FAILED : SUCCESS; -out: - ioc_info(ioc, "host reset: %s scmd(%p)\n", - r == SUCCESS ? "SUCCESS" : "FAILED", scmd); + + out: + printk(MPT3SAS_INFO_FMT "host reset: %s scmd(%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } @@ -3123,6 +4088,8 @@ out: * * This adds the firmware event object into link list, then queues it up to * be processed from user context. + * + * Return nothing. */ static void _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) @@ -3136,7 +4103,11 @@ _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) fw_event_work_get(fw_event); INIT_LIST_HEAD(&fw_event->list); list_add_tail(&fw_event->list, &ioc->fw_event_list); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) INIT_WORK(&fw_event->work, _firmware_event_work); +#else + INIT_WORK(&fw_event->work, _firmware_event_work, (void *)fw_event); +#endif fw_event_work_get(fw_event); queue_work(ioc->firmware_event_thread, &fw_event->work); spin_unlock_irqrestore(&ioc->fw_event_lock, flags); @@ -3149,6 +4120,8 @@ _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) * Context: This function will acquire ioc->fw_event_lock. * * If the fw_event is on the fw_event_list, remove it and do a put. + * + * Return nothing. */ static void _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work @@ -3164,25 +4137,64 @@ _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } +/** + * _scsih_fw_event_requeue - requeue an event + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * Return nothing. + */ +static void +_scsih_fw_event_requeue(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work + *fw_event, unsigned long delay) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + fw_event_work_get(fw_event); + list_add_tail(&fw_event->list, &ioc->fw_event_list); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) + if (!fw_event->delayed_work_active) { + fw_event->delayed_work_active = 1; + INIT_DELAYED_WORK(&fw_event->delayed_work, + _firmware_event_work_delayed); + } + queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work, + msecs_to_jiffies(delay)); +#else + queue_delayed_work(ioc->firmware_event_thread, &fw_event->work, + msecs_to_jiffies(delay)); +#endif + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} /** * mpt3sas_send_trigger_data_event - send event for processing trigger data * @ioc: per adapter object * @event_data: trigger event data + * + * Return nothing. */ void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) { struct fw_event_work *fw_event; - u16 sz; if (ioc->is_driver_loading) return; - sz = sizeof(*event_data); - fw_event = alloc_fw_event_work(sz); + fw_event = alloc_fw_event_work(sizeof(*event_data)); if (!fw_event) return; + fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC); + if (!fw_event->event_data) { + fw_event_work_put(fw_event); + return; + } fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; fw_event->ioc = ioc; memcpy(fw_event->event_data, event_data, sizeof(*event_data)); @@ -3193,6 +4205,8 @@ mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, /** * _scsih_error_recovery_delete_devices - remove devices not responding * @ioc: per adapter object + * + * Return nothing. */ static void _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) @@ -3213,6 +4227,8 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) /** * mpt3sas_port_enable_complete - port enable completed (fake event) * @ioc: per adapter object + * + * Return nothing. */ void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) @@ -3228,6 +4244,14 @@ mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) fw_event_work_put(fw_event); } +/** + * dequeue_next_fw_event - Dequeue the firmware event from list + * @ioc: per adapter object + * + * dequeue the firmware event from firmware event list + * + * Return firmware event. + */ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) { unsigned long flags; @@ -3250,6 +4274,8 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) * * Walk the firmware event queue, either killing timers, or waiting * for outstanding events to complete + * + * Return nothing. */ static void _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) @@ -3261,22 +4287,28 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) return; while ((fw_event = dequeue_next_fw_event(ioc))) { - /* - * Wait on the fw_event to complete. If this returns 1, then - * the event was never executed, and we need a put for the - * reference the work had on the fw_event. - * - * If it did execute, we wait for it to finish, and the put will - * happen from _firmware_event_work() - */ - if (cancel_work_sync(&fw_event->work)) - fw_event_work_put(fw_event); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) + if (fw_event->delayed_work_active) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) + cancel_delayed_work_sync(&fw_event->delayed_work); +#else + cancel_delayed_work(&fw_event->delayed_work); +#endif + else + cancel_work_sync(&fw_event->work); fw_event_work_put(fw_event); + +#else + cancel_delayed_work(&fw_event->work); + fw_event_work_put(fw_event); + +#endif + fw_event_work_put(fw_event); } } -/** + /** * _scsih_internal_device_block - block the sdev device * @sdev: per device object * @sas_device_priv_data : per device driver private data @@ -3294,7 +4326,16 @@ _scsih_internal_device_block(struct scsi_device *sdev, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 1; +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) || \ + (defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR > 3)) || \ + (defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14))) r = scsi_internal_device_block_nowait(sdev); +#elif ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,59)) || LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) + r = scsi_internal_device_block(sdev, false); +#else + r = scsi_internal_device_block(sdev); +#endif + if (r == -EINVAL) sdev_printk(KERN_WARNING, sdev, "device_block failed with return(%d) for handle(0x%04x)\n", @@ -3318,7 +4359,15 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) || \ + (defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14))) r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); +#elif ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,70)) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) + r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); +#else + r = scsi_internal_device_unblock(sdev); +#endif + if (r == -EINVAL) { /* The device has been set to SDEV_RUNNING by SD layer during * device addition but the request queue is still stopped by @@ -3330,14 +4379,29 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, "performing a block followed by an unblock\n", r, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 1; +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) || \ + (defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR > 3)) || \ + (defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14))) r = scsi_internal_device_block_nowait(sdev); +#elif ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,59)) || LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) + r = scsi_internal_device_block(sdev, false); +#else + r = scsi_internal_device_block(sdev); +#endif if (r) sdev_printk(KERN_WARNING, sdev, "retried device_block " "failed with return(%d) for handle(0x%04x)\n", r, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) || \ + (defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14))) r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); +#elif ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,70)) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) + r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); +#else + r = scsi_internal_device_unblock(sdev); +#endif if (r) sdev_printk(KERN_WARNING, sdev, "retried device_unblock" " failed with return(%d) for handle(0x%04x)\n", @@ -3348,39 +4412,259 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, /** * _scsih_ublock_io_all_device - unblock every device * @ioc: per adapter object + * @no_turs: don't issue TEST_UNIT_READY * - * change the device state from block to running + * make sure device is reponsponding before unblocking */ static void -_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) +_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc, u8 no_turs) { struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target; + enum device_responsive_state rc; struct scsi_device *sdev; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + int count; + u8 tr_timeout = 30; + u8 tr_method = 0; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target || sas_target->deleted) + continue; if (!sas_device_priv_data->block) continue; + count = 0; + if ((no_turs) || (!issue_scsi_cmd_to_bringup_drive)) { + sdev_printk(KERN_WARNING, sdev, "device_unblocked, " + "handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + _scsih_internal_device_unblock(sdev, sas_device_priv_data); + continue; + } + + do { + pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } + rc = _scsih_wait_for_device_to_become_ready(ioc, + sas_target->handle, 0, (sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT ||rc == DEVICE_RETRY_UA) + ssleep(1); + if (pcie_device) + pcie_device_put(pcie_device); + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT ||rc == DEVICE_RETRY_UA) + && count++ < command_retry_count); + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; - dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, - "device_running, handle(0x%04x)\n", - sas_device_priv_data->sas_target->handle)); _scsih_internal_device_unblock(sdev, sas_device_priv_data); + + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, "%s: device_offlined, " + "handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + scsi_device_set_state(sdev, SDEV_OFFLINE); + sas_device = mpt3sas_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (sas_device) { + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); + sas_device_put(sas_device); + } else { + pcie_device = mpt3sas_get_pdev_by_wwid(ioc, + sas_device_priv_data->sas_target->sas_address); + if (pcie_device) { + if(pcie_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, "enclosure logical id" + "(0x%016llx), slot(%d) \n", (unsigned long long) + pcie_device->enclosure_logical_id, + pcie_device->slot); + if(pcie_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, "enclosure level(0x%04x)," + " connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + pcie_device_put(pcie_device); + } + } + } else + sdev_printk(KERN_WARNING, sdev, "device_unblocked, " + "handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); } } +/** + * _scsih_ublock_io_device_wait - unblock IO for target + * @ioc: per adapter object + * @sas_addr: sas address + * @port: hba port entry + * + * make sure device is reponsponding before unblocking + */ +static void +_scsih_ublock_io_device_wait(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target; + enum device_responsive_state rc; + struct scsi_device *sdev; + int count, host_reset_completion_count; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + u8 tr_timeout = 30; + u8 tr_method = 0; + + /* moving devices from SDEV_OFFLINE to SDEV_BLOCK */ + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (sdev->sdev_state == SDEV_OFFLINE) { + sas_device_priv_data->block = 1; + sas_device_priv_data->deleted = 0; + scsi_device_set_state(sdev, SDEV_RUNNING); +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) || \ + (defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR > 3)) || \ + (defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14))) + scsi_internal_device_block_nowait(sdev); +#elif ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,59)) || LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) + scsi_internal_device_block(sdev, false); +#else + scsi_internal_device_block(sdev); +#endif + } + } + + /* moving devices from SDEV_BLOCK to SDEV_RUNNING state */ + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (!sas_device_priv_data->block) + continue; + count = 0; + do { + host_reset_completion_count = 0; + pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } + rc = _scsih_wait_for_device_to_become_ready(ioc, + sas_target->handle, 0, (sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT ||rc == DEVICE_RETRY_UA) { + do { + msleep(500); + host_reset_completion_count++; + } while (rc == DEVICE_RETRY && + ioc->shost_recovery); + if (host_reset_completion_count > 1) { + rc = _scsih_wait_for_device_to_become_ready(ioc, + sas_target->handle, 0, (sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, + tr_timeout, tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT ||rc == DEVICE_RETRY_UA) + msleep(500); + } + continue; + } + if (pcie_device) + pcie_device_put(pcie_device); + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT ||rc == DEVICE_RETRY_UA) + && count++ <= command_retry_count); + + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) || \ + (defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14))) + scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); +#elif ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,70)) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) + scsi_internal_device_unblock(sdev, SDEV_RUNNING); +#else + scsi_internal_device_unblock(sdev); +#endif + + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, + "%s: device_offlined, handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + + sas_device = mpt3sas_get_sdev_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) { + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); + sas_device_put(sas_device); + } else { + pcie_device = mpt3sas_get_pdev_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (pcie_device) { + if(pcie_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "device_offlined, enclosure logical id(0x%016llx)," + " slot(%d)\n", (unsigned long long) + pcie_device->enclosure_logical_id, + pcie_device->slot); + if(pcie_device->connector_name[0] != '\0') + sdev_printk(KERN_WARNING, sdev, + "device_offlined, enclosure level(0x%04x), " + "connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + pcie_device_put(pcie_device); + } + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } else { + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } + } +} /** * _scsih_ublock_io_device - prepare device to be deleted * @ioc: per adapter object - * @sas_address: sas address + * @sas_addr: sas address + * @port: hba port entry * * unblock then put device in offline state */ static void -_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) +_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; @@ -3390,19 +4674,65 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->sas_address - != sas_address) + != sas_address || + sas_device_priv_data->sas_target->port + != port) continue; - if (sas_device_priv_data->block) - _scsih_internal_device_unblock(sdev, - sas_device_priv_data); + if (sas_device_priv_data->block) { + _scsih_internal_device_unblock(sdev, sas_device_priv_data); + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } +} + +/** + * _scsih_ublock_io_device_to_running - set the device state to SDEV_RUNNING + * @ioc: per adapter object + * @sas_addr: sas address + * @port: hab port entry + * + * unblock the device to receive IO during device addition. Device + * responsiveness is not checked before unblocking + */ +static void +_scsih_ublock_io_device_to_running(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address || + sas_device_priv_data->sas_target->port + != port) + continue; + if (sas_device_priv_data->block) { + sas_device_priv_data->block = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) || \ + (defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14))) + scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); +#elif ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,70)) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) + scsi_internal_device_unblock(sdev, SDEV_RUNNING); +#else + scsi_internal_device_unblock(sdev); +#endif + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } } } /** * _scsih_block_io_all_device - set the device state to SDEV_BLOCK * @ioc: per adapter object + * @handle: device handle * - * During device pull we need to appropriately set the sdev state. + * During device pull we need to appropiately set the sdev state. */ static void _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) @@ -3431,7 +4761,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object * @handle: device handle * - * During device pull we need to appropriately set the sdev state. + * During device pull we need to appropiately set the sdev state. */ static void _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -3460,7 +4790,6 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) } _scsih_internal_device_block(sdev, sas_device_priv_data); } - if (sas_device) sas_device_put(sas_device); } @@ -3492,10 +4821,11 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, SAS_END_DEVICE) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_by_addr(ioc, - mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); if (sas_device) { set_bit(sas_device->handle, - ioc->blocking_handles); + ioc->blocking_handles); sas_device_put(sas_device); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); @@ -3511,7 +4841,8 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, SAS_FANOUT_EXPANDER_DEVICE) { expander_sibling = mpt3sas_scsih_expander_find_by_sas_address( - ioc, mpt3sas_port->remote_identify.sas_address); + ioc, mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); _scsih_block_io_to_children_attached_to_ex(ioc, expander_sibling); } @@ -3524,7 +4855,7 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, * @event_data: topology change event data * * This routine set sdev state to SDEV_BLOCK for all devices - * direct attached during device pull. + * direct attached during device pull/reconnect. */ static void _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, @@ -3554,21 +4885,19 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, * direct attached during device pull/reconnect. */ static void -_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, - Mpi26EventDataPCIeTopologyChangeList_t *event_data) +_scsih_block_io_to_pcie_children_attached_directly( struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeTopologyChangeList_t *event_data) { int i; u16 handle; u16 reason_code; for (i = 0; i < event_data->NumEntries; i++) { - handle = - le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + handle = le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); if (!handle) continue; reason_code = event_data->PortEntry[i].PortStatus; - if (reason_code == - MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) + if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) _scsih_block_io_device(ioc, handle); } } @@ -3599,19 +4928,20 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) unsigned long flags; struct _tr_list *delayed_tr; u32 ioc_state; + struct hba_port *port = NULL; u8 tr_method = 0; if (ioc->pci_error_recovery) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", - __func__, handle)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host in pci " + "error recovery: handle(0x%04x)\n", __func__, ioc->name, + handle)); return; } ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", - __func__, handle)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host is not " + "operational: handle(0x%04x)\n", __func__, ioc->name, + handle)); return; } @@ -3628,6 +4958,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) sas_target_priv_data = sas_device->starget->hostdata; sas_target_priv_data->deleted = 1; sas_address = sas_device->sas_address; + port = sas_device->port; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) { @@ -3637,45 +4968,34 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) pcie_device->starget->hostdata) { sas_target_priv_data = pcie_device->starget->hostdata; sas_target_priv_data->deleted = 1; - sas_address = pcie_device->wwid; + sas_address =pcie_device->wwid; } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (pcie_device && (!ioc->tm_custom_handling) && - (!(mpt3sas_scsih_is_pcie_scsi_device( - pcie_device->device_info)))) - tr_method = - MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; else tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; } if (sas_target_priv_data) { - dewtprintk(ioc, - ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", - handle, (u64)sas_address)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting delete flag: " + "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, + handle, (unsigned long long)sas_address)); if (sas_device) { - if (sas_device->enclosure_handle != 0) - dewtprintk(ioc, - ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", - (u64)sas_device->enclosure_logical_id, - sas_device->slot)); - if (sas_device->connector_name[0] != '\0') - dewtprintk(ioc, - ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", - sas_device->enclosure_level, - sas_device->connector_name)); + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); } else if (pcie_device) { if (pcie_device->enclosure_handle != 0) - dewtprintk(ioc, - ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot)); - if (pcie_device->connector_name[0] != '\0') - dewtprintk(ioc, - ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", - pcie_device->enclosure_level, - pcie_device->connector_name)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "setting delete flag: " + "enclosure logical id(0x%016llx), slot(%d) \n",ioc->name, + (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if(pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "setting delete flag: " + "enclosure level(0x%04x), connector name( %s)\n", ioc->name, + pcie_device->enclosure_level, pcie_device->connector_name)); } - _scsih_ublock_io_device(ioc, sas_address); + _scsih_ublock_io_device(ioc, sas_address, port); sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; } @@ -3687,15 +5007,15 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); - dewtprintk(ioc, - ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", - handle)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); goto out; } - dewtprintk(ioc, - ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", - handle, smid, ioc->tm_tr_cb_idx)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "tr_send:handle(0x%04x), " + "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid, + ioc->tm_tr_cb_idx)); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; @@ -3705,7 +5025,6 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) set_bit(handle, ioc->device_remove_in_progress); ioc->put_smid_hi_priority(ioc, smid, 0); mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); - out: if (sas_device) sas_device_put(sas_device); @@ -3724,10 +5043,12 @@ out: * This is the target reset completion routine. * This code is part of the code to initiate the device removal * handshake protocol with controller firmware. - * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) + * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) or + * iounit control request (MPI26_CTRL_OP_REMOVE_DEVICE) based on controller's + * MPI version * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -3743,39 +5064,37 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, struct _sc_list *delayed_sc; if (ioc->pci_error_recovery) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host in pci error recovery\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host in pci " + "error recovery\n", __func__, ioc->name)); return 1; } ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host is not operational\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host is not " + "operational\n", __func__, ioc->name)); return 1; } if (unlikely(!mpi_reply)) { - ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 1; } mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); handle = le16_to_cpu(mpi_request_tm->DevHandle); if (handle != le16_to_cpu(mpi_reply->DevHandle)) { - dewtprintk(ioc, - ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", - handle, - le16_to_cpu(mpi_reply->DevHandle), smid)); + dewtprintk(ioc, printk(MPT3SAS_ERR_FMT "spurious interrupt: " + "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); return 0; } mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); - dewtprintk(ioc, - ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", - handle, smid, le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo), - le32_to_cpu(mpi_reply->TerminationCount))); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " + "loginfo(0x%08x), completed(%d)\n", ioc->name, + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); if (!smid_sas_ctrl) { @@ -3785,19 +5104,25 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, INIT_LIST_HEAD(&delayed_sc->list); delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); - dewtprintk(ioc, - ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", - handle)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "DELAYED:sc:handle(0x%04x), (open)\n", + ioc->name, handle)); return _scsih_check_for_pending_tm(ioc, smid); } - dewtprintk(ioc, - ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", - handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "sc_send:handle(0x%04x), " + "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl, + ioc->tm_sas_control_cb_idx)); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); - memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); - mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; - mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + if (ioc->hba_mpi_version_belonged < MPI26_VERSION ) { + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + } else { + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_REMOVE_DEVICE; + } mpi_request->DevHandle = mpi_request_tm->DevHandle; ioc->put_smid_default(ioc, smid_sas_ctrl); @@ -3825,7 +5150,12 @@ inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, return true; } + if (ioc->adapter_over_temp) + return false; + if (ioc->remove_host) { + if (mpt3sas_base_pci_device_is_unplugged(ioc)) + return false; switch (scmd->cmnd[0]) { case SYNCHRONIZE_CACHE: @@ -3847,34 +5177,56 @@ inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * - * This is the sas iounit control completion routine. + * This is the sas iounit / iounit control completion routine. * This code is part of the code to initiate the device removal * handshake protocol with controller firmware. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { - Mpi2SasIoUnitControlReply_t *mpi_reply = - mpt3sas_base_get_reply_virt_addr(ioc, reply); + MPI2DefaultReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + u16 dev_handle; + if (likely(mpi_reply)) { - dewtprintk(ioc, - ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", - le16_to_cpu(mpi_reply->DevHandle), smid, - le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo))); - if (le16_to_cpu(mpi_reply->IOCStatus) == - MPI2_IOCSTATUS_SUCCESS) { - clear_bit(le16_to_cpu(mpi_reply->DevHandle), + if (ioc->hba_mpi_version_belonged < MPI26_VERSION ) { + dev_handle = ((Mpi2SasIoUnitControlReply_t *)mpi_reply)->DevHandle; + } else { + dev_handle = ((Mpi26IoUnitControlReply_t *)mpi_reply)->DevHandle; + } + + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "sc_complete:handle(0x%04x), (open) " + "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(dev_handle), smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + if (le16_to_cpu(mpi_reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) { + clear_bit(le16_to_cpu(dev_handle), + ioc->device_remove_in_progress); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + } else if (ioc->tm_tr_retry[le16_to_cpu(dev_handle)] < 3){ + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "re-initiating tm_tr_send:handle(0x%04x)\n", + ioc->name, le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)]++; + _scsih_tm_tr_send(ioc, le16_to_cpu(dev_handle)); + } else { + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "Exiting out of tm_tr_send retries:handle(0x%04x)\n", + ioc->name, le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + clear_bit(le16_to_cpu(dev_handle), ioc->device_remove_in_progress); } } else { - ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); } return mpt3sas_check_for_pending_internal_cmds(ioc, smid); } @@ -3897,9 +5249,8 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) struct _tr_list *delayed_tr; if (ioc->pci_error_recovery) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host reset in progress!\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host reset in " + "progress!\n", __func__, ioc->name)); return; } @@ -3911,15 +5262,15 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); - dewtprintk(ioc, - ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", - handle)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); return; } - dewtprintk(ioc, - ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", - handle, smid, ioc->tm_tr_volume_cb_idx)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "tr_send:handle(0x%04x), " + "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid, + ioc->tm_tr_volume_cb_idx)); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; @@ -3936,8 +5287,8 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, @@ -3949,36 +5300,120 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, mpt3sas_base_get_reply_virt_addr(ioc, reply); if (ioc->shost_recovery || ioc->pci_error_recovery) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host reset in progress!\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host reset in " + "progress!\n", __func__, ioc->name)); return 1; } if (unlikely(!mpi_reply)) { - ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 1; } mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); handle = le16_to_cpu(mpi_request_tm->DevHandle); if (handle != le16_to_cpu(mpi_reply->DevHandle)) { - dewtprintk(ioc, - ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", - handle, le16_to_cpu(mpi_reply->DevHandle), - smid)); + dewtprintk(ioc, printk(MPT3SAS_ERR_FMT "spurious interrupt: " + "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); return 0; } - dewtprintk(ioc, - ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", - handle, smid, le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo), - le32_to_cpu(mpi_reply->TerminationCount))); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " + "loginfo(0x%08x), completed(%d)\n", ioc->name, + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); return _scsih_check_for_pending_tm(ioc, smid); } +/** + * _scsih_tm_internal_tr_send - send target reset request + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This is designed to send multiple task management request (TR)at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_internal_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _tr_list *delayed_tr; + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _pcie_device *pcie_device; + u8 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_internal_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_internal_tm_list); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "tr_send:handle(0x%04x), " + "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid, + ioc->tm_tr_internal_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + ioc->put_smid_hi_priority(ioc, smid, 0); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); +} + +/** + * _scsih_tm_internal_tr_complete - internal target reset completion + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_internal_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + + if (likely(mpi_reply)) { + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "tr_complete:handle(0x%04x), (open) " + "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } else { + printk(MPT3SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + return _scsih_check_for_pending_tm(ioc, smid);; +} + /** * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages * @ioc: per adapter object @@ -4004,9 +5439,9 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - dewtprintk(ioc, - ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", - le16_to_cpu(event), smid, ioc->base_cb_idx)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "EVENT ACK: event(0x%04x), " + "smid(%d), cb(%d)\n", ioc->name, le16_to_cpu(event), smid, + ioc->base_cb_idx)); ack_request = mpt3sas_base_get_msg_frame(ioc, smid); memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); ack_request->Function = MPI2_FUNCTION_EVENT_ACK; @@ -4018,8 +5453,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, } /** - * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed - * sas_io_unit_ctrl messages + * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed sas_io_unit_ctrl messages * @ioc: per adapter object * @smid: system request message index * @handle: device handle @@ -4027,8 +5461,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, * Context - processed in interrupt context. */ static void -_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, - u16 smid, u16 handle) +_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) { Mpi2SasIoUnitControlRequest_t *mpi_request; u32 ioc_state; @@ -4036,21 +5469,18 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, unsigned long flags; if (ioc->remove_host) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host has been removed\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host has been " + "removed\n", __func__, ioc->name)); return; } else if (ioc->pci_error_recovery) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host in pci error recovery\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host in pci " + "error recovery\n", __func__, ioc->name)); return; } ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { - dewtprintk(ioc, - ioc_info(ioc, "%s: host is not operational\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: host is not " + "operational\n", __func__, ioc->name)); return; } @@ -4062,13 +5492,19 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - dewtprintk(ioc, - ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", - handle, smid, ioc->tm_sas_control_cb_idx)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "sc_send:handle(0x%04x), " + "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid, + ioc->tm_sas_control_cb_idx)); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); - memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); - mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; - mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + if (ioc->hba_mpi_version_belonged < MPI26_VERSION ) { + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + } else { + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_REMOVE_DEVICE; + } mpi_request->DevHandle = cpu_to_le16(handle); ioc->put_smid_default(ioc, smid); } @@ -4083,8 +5519,8 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, * This will check delayed internal messages list, and process the * next request. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -4103,7 +5539,7 @@ mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) } if (!list_empty(&ioc->delayed_sc_list)) { - delayed_sc = list_entry(ioc->delayed_sc_list.next, + delayed_sc= list_entry(ioc->delayed_sc_list.next, struct _sc_list, list); _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, delayed_sc->handle); @@ -4122,8 +5558,8 @@ mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) * This will check delayed target reset list, and feed the * next reqeust. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) @@ -4150,6 +5586,16 @@ _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) return 0; } + if (!list_empty(&ioc->delayed_internal_tm_list)) { + delayed_tr = list_entry(ioc->delayed_internal_tm_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_internal_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + return 1; } @@ -4163,6 +5609,8 @@ _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) * This handles the case where driver receives multiple expander * add and delete events in a single shot. When there is a delete event * the routine will void any pending add events waiting in the event queue. + * + * Return nothing. */ static void _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, @@ -4217,16 +5665,15 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || fw_event->ignore) continue; - local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) - fw_event->event_data; + local_event_data = fw_event->event_data; if (local_event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED || local_event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { if (le16_to_cpu(local_event_data->ExpanderDevHandle) == expander_handle) { - dewtprintk(ioc, - ioc_info(ioc, "setting ignoring flag\n")); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "setting ignoring flag\n", ioc->name)); fw_event->ignore = 1; } } @@ -4244,6 +5691,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, * or device add and delete events in a single shot. When there * is a delete event the routine will void any pending add * events waiting in the event queue. + * + * Return nothing. */ static void _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, @@ -4255,9 +5704,8 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, int i, reason_code; u16 handle, switch_handle; - for (i = 0; i < event_data->NumEntries; i++) { - handle = - le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + for (i = 0 ; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); if (!handle) continue; reason_code = event_data->PortEntry[i].PortStatus; @@ -4267,17 +5715,14 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, switch_handle = le16_to_cpu(event_data->SwitchDevHandle); if (!switch_handle) { - _scsih_block_io_to_pcie_children_attached_directly( - ioc, event_data); + _scsih_block_io_to_pcie_children_attached_directly(ioc, event_data); return; } /* TODO We are not supporting cascaded PCIe Switch removal yet*/ - if ((event_data->SwitchStatus + if ((event_data->SwitchStatus == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || - (event_data->SwitchStatus == - MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) - _scsih_block_io_to_pcie_children_attached_directly( - ioc, event_data); + (event_data->SwitchStatus == MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) + _scsih_block_io_to_pcie_children_attached_directly(ioc, event_data); if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) return; @@ -4288,17 +5733,15 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || fw_event->ignore) continue; - local_event_data = - (Mpi26EventDataPCIeTopologyChangeList_t *) - fw_event->event_data; + local_event_data = fw_event->event_data; if (local_event_data->SwitchStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED || local_event_data->SwitchStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { if (le16_to_cpu(local_event_data->SwitchDevHandle) == switch_handle) { - dewtprintk(ioc, - ioc_info(ioc, "setting ignoring flag for switch event\n")); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "setting ignoring flag for switch event\n", ioc->name)); fw_event->ignore = 1; } } @@ -4327,9 +5770,10 @@ _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) sas_target_priv_data = raid_device->starget->hostdata; sas_target_priv_data->deleted = 1; - dewtprintk(ioc, - ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", - handle, (u64)raid_device->wwid)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "setting delete flag: handle(0x%04x), " + "wwid(0x%016llx)\n", ioc->name, handle, + (unsigned long long) raid_device->wwid)); } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } @@ -4366,6 +5810,8 @@ _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) * volume has been deleted or removed. When the target reset is sent * to volume, the PD target resets need to be queued to start upon * completion of the volume target reset. + * + * Return nothing. */ static void _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, @@ -4431,9 +5877,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); - dewtprintk(ioc, - ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", - handle)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, + handle)); } else _scsih_tm_tr_send(ioc, handle); } @@ -4449,6 +5895,8 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, * This will handle the case when the cable connected to entire volume is * pulled. We will take care of setting the deleted flag so normal IO will * not be sent. + * + * Return nothing. */ static void _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, @@ -4470,135 +5918,351 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @event_data: the temp threshold event data * Context: interrupt time. + * + * Return nothing. */ static void _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataTemperature_t *event_data) { + u32 doorbell; + if (ioc->temp_sensors_count >= event_data->SensorNum) { - ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", - le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", - le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", - le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", - le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", + printk(MPT3SAS_ERR_FMT "Temperature Threshold flags %s%s%s%s" + "exceeded for Sensor: %d !!!\n", ioc->name, + ((event_data->Status & 0x1) == 1) ? "0 " : " ", + ((event_data->Status & 0x2) == 2) ? "1 " : " ", + ((event_data->Status & 0x4) == 4) ? "2 " : " ", + ((event_data->Status & 0x8) == 8) ? "3 " : " ", event_data->SensorNum); - ioc_err(ioc, "Current Temp In Celsius: %d\n", - event_data->CurrentTemperature); + printk(MPT3SAS_ERR_FMT "Current Temp In Celsius: %d\n", + ioc->name, event_data->CurrentTemperature); + + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + mpt3sas_base_fault_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } } } +/** + * _scsih_set_satl_pending - sets variable as per bool, + * indicating ATA command is pending or not. + * + * @scmd: pointer to scsi command object + * @pending: boolean(true or false) + * + * Returns 0 else 1 if ATA command is pending. + */ static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) { struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; - if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) return 0; if (pending) - return test_and_set_bit(0, &priv->ata_command_pending); + return test_and_set_bit(CMND_PENDING_BIT, &priv->ata_command_pending); - clear_bit(0, &priv->ata_command_pending); + clear_bit(CMND_PENDING_BIT, &priv->ata_command_pending); return 0; } /** - * _scsih_flush_running_cmds - completing outstanding commands. + * mpt3sas_scsih_flush_running_cmds - completing outstanding commands. * @ioc: per adapter object * * The flushing out of all pending scmd commands following host reset, * where all IO is dropped to the floor. + * + * Return nothing. */ -static void -_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) { struct scsi_cmnd *scmd; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) + Mpi25SCSIIORequest_t *mpi_request; +#endif struct scsiio_tracker *st; u16 smid; - int count = 0; + u16 count = 0; - for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); if (!scmd) continue; count++; + + st = mpt3sas_base_scsi_cmd_priv(scmd); + /* It may be possible that SCSI scmd got prepared by SML + but it has not issued to the driver, for these type of + scmd's don't do anything" */ + if (st && st->smid == 0) + continue; _scsih_set_satl_pending(scmd, false); - st = scsi_cmd_priv(scmd); - mpt3sas_base_clear_st(ioc, st); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + if (scmd->use_sg) { + pci_unmap_sg(ioc->pdev, + (struct scatterlist *) scmd->request_buffer, + scmd->use_sg, scmd->sc_data_direction); + } else if (scmd->request_bufflen) { + pci_unmap_single(ioc->pdev, + scmd->SCp.dma_handle, scmd->request_bufflen, + scmd->sc_data_direction); + } +#else + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (mpi_request->DMAFlags == MPI25_TA_DMAFLAGS_OP_D_H_D_D) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + dma_unmap_sg(scmd->device->host->dma_dev, scsi_prot_sglist(scmd), + scsi_prot_sg_count(scmd), scmd->sc_data_direction); +#else + dma_unmap_sg(scmd->device->host->shost_gendev.parent, scsi_prot_sglist(scmd), + scsi_prot_sg_count(scmd), scmd->sc_data_direction); +#endif + } + } scsi_dma_unmap(scmd); - if (ioc->pci_error_recovery || ioc->remove_host) +#endif + mpt3sas_base_clear_st(ioc, st); + + if ((!mpt3sas_base_pci_device_is_available(ioc)) || + (ioc->ioc_reset_status != 0) + || ioc->adapter_over_temp + || ioc->remove_host) scmd->result = DID_NO_CONNECT << 16; else - scmd->result = DID_RESET << 16; + mpt3sas_set_requeue_or_reset(scmd); + scmd->scsi_done(scmd); } - dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "completing %d cmds\n", + ioc->name, count)); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) +static u8 opcode_protection[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, + 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; +#endif /** * _scsih_setup_eedp - setup MPI request for EEDP transfer * @ioc: per adapter object * @scmd: pointer to scsi command object - * @mpi_request: pointer to the SCSI_IO request message frame + * @mpi_request: pointer to the SCSI_IO reqest message frame * * Supporting protection 1 and 3. + * + * Returns nothing */ static void _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, Mpi25SCSIIORequest_t *mpi_request) { u16 eedp_flags; - unsigned char prot_op = scsi_get_prot_op(scmd); - unsigned char prot_type = scsi_get_prot_type(scmd); - Mpi25SCSIIORequest_t *mpi_request_3v = - (Mpi25SCSIIORequest_t *)mpi_request; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + struct MPT3SAS_DEVICE *sas_device_priv_data; + u8 scsi_opcode; - if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) + sas_device_priv_data = scmd->device->hostdata; + + if (!sas_device_priv_data->eedp_enable) return; - if (prot_op == SCSI_PROT_READ_STRIP) - eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; - else if (prot_op == SCSI_PROT_WRITE_INSERT) - eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; - else + /* check whether scsi opcode supports eedp transfer */ + scsi_opcode = scmd->cmnd[0]; + eedp_flags = opcode_protection[scsi_opcode]; + if (!eedp_flags) return; - switch (prot_type) { - case SCSI_PROT_DIF_TYPE1: - case SCSI_PROT_DIF_TYPE2: + /* set RDPROTECT, WRPROTECT, VRPROTECT bits to (001b) */ + scmd->cmnd[1] = (scmd->cmnd[1] & 0x1F) | 0x20; + + switch (sas_device_priv_data->eedp_type) { + case 1: /* type 1 */ /* * enable ref/guard checking * auto increment ref tag */ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + if (ioc->is_gen35_ioc) + eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; mpi_request->CDB.EEDP32.PrimaryReferenceTag = - cpu_to_be32(t10_pi_ref_tag(scmd->request)); + cpu_to_be32(scsi_get_lba(scmd)); + break; - case SCSI_PROT_DIF_TYPE3: + case 3: /* type 3 */ /* * enable guard checking */ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; - + if (ioc->is_gen35_ioc) + eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; break; } - mpi_request_3v->EEDPBlockSize = - cpu_to_le16(scmd->device->sector_size); +#else /* sles11 and newer */ - if (ioc->is_gen35_ioc) - eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; + unsigned char prot_op = scsi_get_prot_op(scmd); + unsigned char prot_type = scsi_get_prot_type(scmd); + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { + if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) + return; + + if (prot_op == SCSI_PROT_READ_STRIP) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + else if (prot_op == SCSI_PROT_WRITE_INSERT) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + else + return; + } + else { + + if (prot_op == SCSI_PROT_NORMAL) + return; + + /* Translate SCSI opcode to a protection opcode */ + switch (prot_op) { +// The prints are added only for testing purpose. Will be removed before GCA + case SCSI_PROT_READ_INSERT: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_H_D_D; + + break; + + case SCSI_PROT_WRITE_STRIP: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_H_D_D; + break; + + case SCSI_PROT_READ_STRIP: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_D_D_D; + break; + + + case SCSI_PROT_WRITE_INSERT: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_D_D_D; + break; + + + case SCSI_PROT_READ_PASS: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_OP| MPI25_TA_EEDPFLAGS_CHECK_REFTAG |MPI25_TA_EEDPFLAGS_CHECK_APPTAG |MPI25_TA_EEDPFLAGS_CHECK_GUARD; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_H_D_D; + break; + + case SCSI_PROT_WRITE_PASS: + if(scsi_host_get_guard(scmd->device->host) & SHOST_DIX_GUARD_IP){ + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP|MPI25_TA_EEDPFLAGS_CHECK_APPTAG |MPI25_TA_EEDPFLAGS_CHECK_GUARD|MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_H_D_D; + mpi_request->ApplicationTagTranslationMask = 0xffff; + + } else { + + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_OP| MPI25_TA_EEDPFLAGS_CHECK_REFTAG |MPI25_TA_EEDPFLAGS_CHECK_APPTAG |MPI25_TA_EEDPFLAGS_CHECK_GUARD; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_H_D_D; + + } + break; + + default : + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_NOOP_OP; + mpi_request->DMAFlags = MPI25_TA_DMAFLAGS_OP_D_D_D_D; + break; + } + + /* In case of T10 CRC algorithm, bits 5:4 of eedp_flags should be set to 0 + * if(scsi_host_get_guard(scmd->device->host) == SHOST_DIX_GUARD_CRC) + * eedp_flags |= (0<<4); + */ + if(scsi_host_get_guard(scmd->device->host) & SHOST_DIX_GUARD_IP) + eedp_flags |= (1<<4); + } + switch (prot_type) { + case SCSI_PROT_DIF_TYPE0: + if (ioc->hba_mpi_version_belonged != MPI2_VERSION){ + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; + mpi_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(scsi_get_lba(scmd)); + } + break; + + case SCSI_PROT_DIF_TYPE1: + case SCSI_PROT_DIF_TYPE2: + + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; + mpi_request->CDB.EEDP32.PrimaryReferenceTag = +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) || ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 8))) \ + || (defined(CONFIG_SUSE_KERNEL) && ((CONFIG_SUSE_VERSION == 15) && (CONFIG_SUSE_PATCHLEVEL >= 1))) \ + || (defined(CONFIG_SUSE_KERNEL) && ((CONFIG_SUSE_VERSION == 12) && (CONFIG_SUSE_PATCHLEVEL >= 5)))) + + cpu_to_be32(t10_pi_ref_tag(scmd->request)); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)) + cpu_to_be32(scsi_prot_ref_tag(scmd)); +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0))) + cpu_to_be32(mpt3sas_scsi_prot_ref_tag(scmd)); +#else + cpu_to_be32(scsi_get_lba(scmd)); +#endif + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + if (ioc->is_gen35_ioc) + eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; + break; + + case SCSI_PROT_DIF_TYPE3: + /* + * enable guard checking + */ + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + if (ioc->is_gen35_ioc) + eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; + break; + } + +#endif + mpi_request->EEDPBlockSize = + cpu_to_le32(scmd->device->sector_size); mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); + } /** * _scsih_eedp_error_handling - return sense code for EEDP errors * @scmd: pointer to scsi command object * @ioc_status: ioc status + * + * Returns nothing */ static void _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) @@ -4619,53 +6283,182 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) ascq = 0x00; break; } - scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, + mpt_scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, ascq); scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +/** _scsih_build_nvme_unmap - Build Native NVMe DSM command equivalent + * to SCSI Unmap. + * Return 0 - for success, + * 1 - to immediately return back the command with success status to SML + * negative value - to fallback to firmware path i.e. issue scsi unmap + * to FW without any translation. + */ +int _scsih_build_nvme_unmap(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + u16 handle, u64 lun, u32 nvme_mdts) +{ + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; + struct scsi_unmap_parm_list *plist; + struct nvme_dsm_range *nvme_dsm_ranges = NULL; + struct nvme_command *c; + int i, res=0; + u16 ndesc, list_len, data_length, smid; + dma_addr_t nvme_dsm_ranges_dma_handle = 0; + + list_len = get_unaligned_be16(&scmd->cmnd[7]); + if (!list_len) + return -EINVAL; + + plist = kmalloc(list_len, GFP_KERNEL); + if (!plist) + return -ENOMEM; + + /* Copy SCSI unmap data to a local buffer */ + scsi_sg_copy_to_buffer(scmd, plist, list_len); + + /* return back the unmap command to SML with success status, + * if number of descripts is zero. + */ + ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; + if (!ndesc) { + res = 1; + goto out; + } else if (ndesc > 256) { + res = -EINVAL; + goto out; + } + + data_length = ndesc * sizeof(*nvme_dsm_ranges); + if (nvme_mdts && (data_length > nvme_mdts)) { + res = -EINVAL; + goto out; + } + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + res = -ENOMEM; + goto out; + } + + nvme_dsm_ranges = + (struct nvme_dsm_range *)mpt3sas_base_get_pcie_sgl(ioc, smid); + nvme_dsm_ranges_dma_handle = + (dma_addr_t)mpt3sas_base_get_pcie_sgl_dma(ioc, smid); + + memset(nvme_dsm_ranges, 0, data_length); + + /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data + * for each descriptors contained in SCSI UNMAP data. + */ + for (i = 0; i < ndesc; i++) { + nvme_dsm_ranges[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb)); + nvme_dsm_ranges[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba)); + nvme_dsm_ranges[i].cattr = 0; + } + + /* Build MPI2.6's NVMe Encapsulated Request Message */ + nvme_encap_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(nvme_encap_request, 0, sizeof(Mpi26NVMeEncapsulatedRequest_t)); + + nvme_encap_request->Function = MPI2_FUNCTION_NVME_ENCAPSULATED; + nvme_encap_request->ErrorResponseBaseAddress = + cpu_to_le64(mpt3sas_base_get_sense_buffer_dma_64(ioc, smid)); + nvme_encap_request->ErrorResponseAllocationLength = + cpu_to_le16(sizeof(struct nvme_completion)); + nvme_encap_request->EncapsulatedCommandLength = + cpu_to_le16(sizeof(struct nvme_command)); + nvme_encap_request->DataLength = cpu_to_le32(data_length); + nvme_encap_request->DevHandle = cpu_to_le16(handle); + nvme_encap_request->Flags = MPI26_NVME_FLAGS_WRITE; + + /* Build NVMe DSM command */ + c = (struct nvme_command *) nvme_encap_request->NVMe_Command; + c->dsm.opcode = nvme_cmd_dsm; + c->dsm.nsid = cpu_to_le32(lun + 1); + c->dsm.nr = cpu_to_le32(ndesc - 1); + c->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + ioc->build_nvme_prp(ioc, smid, nvme_encap_request, + nvme_dsm_ranges_dma_handle, data_length, 0, 0); + + ioc->put_smid_nvme_encap(ioc, smid); +out: + kfree(plist); + return res; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) +static inline u8 scsih_is_io_belongs_to_RT_class(struct scsi_cmnd *scmd) +{ + struct request *rq = scmd->request; + return (IOPRIO_PRIO_CLASS(req_get_ioprio(rq)) == IOPRIO_CLASS_RT); +} +#endif + /** * scsih_qcmd - main scsi request entry point - * @shost: SCSI host pointer * @scmd: pointer to scsi command object + * @done: function pointer to be invoked on completion * * The callback index is set inside `ioc->scsi_io_cb_idx`. * - * Return: 0 on success. If there's a failure, return either: + * Returns 0 on success. If there's a failure, return either: * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)) +scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +#else scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +#endif { - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_ADAPTER *ioc = shost_private(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _raid_device *raid_device; - struct request *rq = scmd->request; - int class; Mpi25SCSIIORequest_t *mpi_request; struct _pcie_device *pcie_device = NULL; u32 mpi_control; u16 smid; u16 handle; + int rc = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + unsigned long irq_flags = 0; +#endif if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + if(host_lock_mode) + { + spin_lock_irqsave(shost->host_lock, irq_flags); + } +#else + scmd->scsi_done = done; +#endif + sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); - return 0; + goto out; } if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); - return 0; - } + goto out; + } sas_target_priv_data = sas_device_priv_data->sas_target; @@ -4674,22 +6467,42 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); - return 0; + goto out; } + /* + * Avoid error handling escallation when blocked + */ + if (sas_device_priv_data->block && + scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY) { + scmd->result = (DRIVER_SENSE << 24) | + SAM_STAT_CHECK_CONDITION; + scmd->sense_buffer[0] = 0x70; + scmd->sense_buffer[2] = UNIT_ATTENTION; + scmd->sense_buffer[12] = 0x29; + /* ASCQ = I_T NEXUS LOSS OCCURRED */ + scmd->sense_buffer[13] = 0x07; + scmd->scsi_done(scmd); + goto out; + } + /* host recovery or link resets sent via IOCTLs */ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { - /* host recovery or link resets sent via IOCTLs */ - return SCSI_MLQUEUE_HOST_BUSY; - } else if (sas_target_priv_data->deleted) { - /* device has been deleted */ + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + /* device has been deleted */ + else if (sas_target_priv_data->deleted || + sas_device_priv_data->deleted) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); - return 0; + goto out; + /* device busy with task managment */ } else if (sas_target_priv_data->tm_busy || - sas_device_priv_data->block) { - /* device busy with task management */ - return SCSI_MLQUEUE_DEVICE_BUSY; + sas_device_priv_data->block) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; } /* @@ -4698,10 +6511,38 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) * since we're lockless at this point */ do { - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) - return SCSI_MLQUEUE_DEVICE_BUSY; + if (test_bit(CMND_PENDING_BIT, + &sas_device_priv_data->ata_command_pending)) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } } while (_scsih_set_satl_pending(scmd, true)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + /* For NVME device type other than SCSI Devic issue UNMAP command + * directly to NVME drives by constructing equivalent native NVMe + * DataSetManagement command. + * For pcie SCSI Device fallback to firmware + * path to issue scsi unmap without any translation + */ + if ((sas_target_priv_data->pcie_dev && scmd->cmnd[0] == UNMAP) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + sas_target_priv_data->pcie_dev->device_info)))) { + pcie_device = sas_target_priv_data->pcie_dev; + rc = _scsih_build_nvme_unmap(ioc, scmd, handle, sas_device_priv_data->lun, + pcie_device->nvme_mdts); + if (rc == 1) { /* return command to SML with success status */ + scmd->result = DID_OK << 16; + scmd->scsi_done(scmd); + rc = 0; + goto out; + } else if (!rc) { /* Issued NVMe Encapsulated Request Message */ + goto out; + } else /* Issue a normal scsi UNMAP command to FW */ + rc = 0; + } +#endif + if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; else if (scmd->sc_data_direction == DMA_TO_DEVICE) @@ -4710,31 +6551,46 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; /* set tags */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) + if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { + if (scmd->device->tagged_supported) { + if (scmd->device->ordered_tags) + mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; + else + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + } else + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + } else + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; +#else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) /* NCQ Prio supported, make sure control indicated high priority */ if (sas_device_priv_data->ncq_prio_enable) { - class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); - if (class == IOPRIO_CLASS_RT) + if (scsih_is_io_belongs_to_RT_class(scmd)) mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; } - /* Make sure Device is not raid volume. - * We do not expose raid functionality to upper layer for warpdrive. - */ - if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) - && !scsih_is_nvme(&scmd->device->sdev_gendev)) - && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) +#endif + + if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && + scmd->cmd_len != 32) mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = SCSI_MLQUEUE_HOST_BUSY; _scsih_set_satl_pending(scmd, false); goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); - memset(mpi_request, 0, ioc->request_sz); - _scsih_setup_eedp(ioc, scmd, mpi_request); + /* the message frame allocated will be of sizeof(MPT3SAS_SCSIIORequest2SGLS_t) */ + if (!ioc->disable_eedp_support) { + _scsih_setup_eedp(ioc, scmd, mpi_request); + } if (scmd->cmd_len == 32) mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; @@ -4744,7 +6600,11 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) else mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; mpi_request->DevHandle = cpu_to_le16(handle); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + mpi_request->DataLength = cpu_to_le32(scmd->request_bufflen); +#else mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); +#endif mpi_request->Control = cpu_to_le32(mpi_control); mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; @@ -4760,19 +6620,22 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) pcie_device = sas_target_priv_data->pcie_dev; if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { mpt3sas_base_free_smid(ioc, smid); + rc = SCSI_MLQUEUE_HOST_BUSY; _scsih_set_satl_pending(scmd, false); goto out; } } else ioc->build_zero_len_sge(ioc, &mpi_request->SGL); - raid_device = sas_target_priv_data->raid_device; - if (raid_device && raid_device->direct_io_enabled) - mpt3sas_setup_direct_io(ioc, scmd, - raid_device, mpi_request); - + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { + raid_device = sas_target_priv_data->raid_device; + if (raid_device && raid_device->direct_io_enabled) + mpt3sas_setup_direct_io(ioc, scmd, raid_device, + mpi_request); + } if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { - if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && + (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO)) { mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | MPI25_SCSIIO_IOFLAGS_FAST_PATH); ioc->put_smid_fast_path(ioc, smid, handle); @@ -4781,16 +6644,22 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) le16_to_cpu(mpi_request->DevHandle)); } else ioc->put_smid_default(ioc, smid); - return 0; out: - return SCSI_MLQUEUE_HOST_BUSY; + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + if(host_lock_mode) + spin_unlock_irqrestore(shost->host_lock, irq_flags); + #endif + return rc; + } /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data * @sense_buffer: sense data returned by target * @data: normalized skey/asc/ascq + * + * Return nothing. */ static void _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) @@ -4813,22 +6682,23 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) * @ioc: per adapter object * @scmd: pointer to scsi command object * @mpi_reply: reply mf payload returned from firmware - * @smid: ? * * scsi_status - SCSI Status code returned from target device * scsi_state - state info associated with SCSI_IO determined by ioc * ioc_status - ioc supplied status info + * + * Return nothing. */ static void _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, - Mpi2SCSIIOReply_t *mpi_reply, u16 smid) + Mpi2SCSIIOReply_t *mpi_reply, u16 smid, u8 scsi_status, + u16 error_response_count) { u32 response_info; u8 *response_bytes; u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; u8 scsi_state = mpi_reply->SCSIState; - u8 scsi_status = mpi_reply->SCSIStatus; char *desc_ioc_state = NULL; char *desc_scsi_status = NULL; char *desc_scsi_state = ioc->tmp_string; @@ -4838,10 +6708,12 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct scsi_target *starget = scmd->device->sdev_target; struct MPT3SAS_TARGET *priv_target = starget->hostdata; char *device_str = NULL; + u8 function = mpi_reply->Function; if (!priv_target) return; - if (ioc->hide_ir_msg) + + if (ioc->warpdrive_msg) device_str = "WarpDrive"; else device_str = "volume"; @@ -4893,17 +6765,23 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, desc_ioc_state = "scsi ext terminated"; break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: - desc_ioc_state = "eedp guard error"; - break; + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp guard error"; + break; + } case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: - desc_ioc_state = "eedp ref tag error"; - break; + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp ref tag error"; + break; + } case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: - desc_ioc_state = "eedp app tag error"; - break; + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp app tag error"; + break; + } case MPI2_IOCSTATUS_INSUFFICIENT_POWER: - desc_ioc_state = "insufficient power"; - break; + desc_ioc_state = "insufficient power"; + break; default: desc_ioc_state = "unknown"; break; @@ -4965,54 +6843,81 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { - ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", - device_str, (u64)priv_target->sas_address); - } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + printk(MPT3SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name, + device_str, (unsigned long long)priv_target->sas_address); + } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE){ pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); if (pcie_device) { - ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", - (u64)pcie_device->wwid, pcie_device->port_num); - if (pcie_device->enclosure_handle != 0) - ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot); + printk(MPT3SAS_WARN_FMT "\twwid(0x%016llx), port(%d)\n", ioc->name, + (unsigned long long)pcie_device->wwid, pcie_device->port_num); + if(pcie_device->enclosure_handle != 0) + printk(MPT3SAS_WARN_FMT "\tenclosure logical id(0x%016llx), " + "slot(%d)\n", ioc->name, + (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot); if (pcie_device->connector_name[0]) - ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", - pcie_device->enclosure_level, - pcie_device->connector_name); + printk(MPT3SAS_WARN_FMT + "\tenclosure level(0x%04x), connector name( %s)\n", + ioc->name, pcie_device->enclosure_level, + pcie_device->connector_name); pcie_device_put(pcie_device); } } else { sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); if (sas_device) { - ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", - (u64)sas_device->sas_address, sas_device->phy); - - _scsih_display_enclosure_chassis_info(ioc, sas_device, - NULL, NULL); - + printk(MPT3SAS_WARN_FMT "\t%s: sas_address(0x%016llx), " + "phy(%d)\n", ioc->name, __func__, (unsigned long long) + sas_device->sas_address, sas_device->phy); + _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); sas_device_put(sas_device); } } - ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", - le16_to_cpu(mpi_reply->DevHandle), - desc_ioc_state, ioc_status, smid); - ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", - scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); - ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", - le16_to_cpu(mpi_reply->TaskTag), - le32_to_cpu(mpi_reply->TransferCount), scmd->result); - ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", - desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); + printk(MPT3SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), " + "smid(%d)\n", ioc->name, le16_to_cpu(mpi_reply->DevHandle), + desc_ioc_state, ioc_status, smid); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + printk(MPT3SAS_WARN_FMT "\trequest_len(%d), underflow(%d), " + "resid(%d)\n", ioc->name, scmd->request_bufflen, scmd->underflow, + scmd->resid); +#else + printk(MPT3SAS_WARN_FMT "\trequest_len(%d), underflow(%d), " + "resid(%d)\n", ioc->name, scsi_bufflen(scmd), scmd->underflow, + scsi_get_resid(scmd)); +#endif + if (function == MPI2_FUNCTION_NVME_ENCAPSULATED) { + printk(MPT3SAS_WARN_FMT "\tsc->result(0x%08x)\n", ioc->name, + scmd->result); + } else { + printk(MPT3SAS_WARN_FMT "\ttag(%d), transfer_count(%d), " + "sc->result(0x%08x)\n", ioc->name, + le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + } + + printk(MPT3SAS_WARN_FMT "\tscsi_status(%s)(0x%02x), " + "scsi_state(%s)(0x%02x)\n", ioc->name, desc_scsi_status, + scsi_status, desc_scsi_state, scsi_state); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { struct sense_info data; _scsih_normalize_sense(scmd->sense_buffer, &data); - ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", - data.skey, data.asc, data.ascq, - le32_to_cpu(mpi_reply->SenseCount)); + printk(MPT3SAS_WARN_FMT "\t[sense_key,asc,ascq]: " + "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey, + data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount)); } + + if (function == MPI2_FUNCTION_NVME_ENCAPSULATED) { + if(error_response_count) { + struct sense_info data; + _scsih_normalize_sense(scmd->sense_buffer, &data); + printk(MPT3SAS_WARN_FMT "\t[sense_key,asc,ascq]: " + "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, + data.skey, data.asc, data.ascq, + error_response_count); + } + } + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32_to_cpu(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; @@ -5025,6 +6930,8 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, * @ioc: per adapter object * @handle: device handle * Context: process + * + * Return nothing. */ static void _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -5046,29 +6953,31 @@ _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, &mpi_request)) != 0) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); goto out; } sas_device->pfa_led_on = 1; if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { - dewtprintk(ioc, - ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", - le16_to_cpu(mpi_reply.IOCStatus), - le32_to_cpu(mpi_reply.IOCLogInfo))); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "enclosure_processor: " + "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); goto out; } out: sas_device_put(sas_device); } - /** - * _scsih_turn_off_pfa_led - turn off Fault LED + * _scsih_turn_off_pfa_led - turn off PFA LED * @ioc: per adapter object * @sas_device: sas device whose PFA LED has to turned off * Context: process + * + * Return nothing. */ +/*TODO -- No Support for Turning of PFA LED yet*/ static void _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device) @@ -5085,26 +6994,27 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, - &mpi_request)) != 0) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + &mpi_request)) != 0) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); return; } if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { - dewtprintk(ioc, - ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", - le16_to_cpu(mpi_reply.IOCStatus), - le32_to_cpu(mpi_reply.IOCLogInfo))); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "enclosure_processor: " + "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); return; } } - /** * _scsih_send_event_to_turn_on_pfa_led - fire delayed event * @ioc: per adapter object * @handle: device handle * Context: interrupt. + * + * Return nothing. */ static void _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -5126,9 +7036,12 @@ _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) * @ioc: per adapter object * @handle: device handle * Context: interrupt. + * + * Return nothing. */ static void -_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) +_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 from_sata_smart_polling) { struct scsi_target *starget; struct MPT3SAS_TARGET *sas_target_priv_data; @@ -5141,30 +7054,32 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) /* only handle non-raid devices */ spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); - if (!sas_device) + if (!sas_device) { goto out_unlock; - + } starget = sas_device->starget; sas_target_priv_data = starget->hostdata; if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) goto out_unlock; - _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + /* Issue SEP request message to turn on the PFA LED whenever this + * function is invoked from SATA SMART polling thread or when subsystem + * sub system vendor is IBM */ + if (from_sata_smart_polling || + ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) _scsih_send_event_to_turn_on_pfa_led(ioc, handle); /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + sizeof(Mpi2EventDataSasDeviceStatusChange_t); - event_reply = kzalloc(sz, GFP_KERNEL); + event_reply = kzalloc(sz, GFP_ATOMIC); if (!event_reply) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } @@ -5192,6 +7107,218 @@ out_unlock: goto out; } +/** _scsih_nvme_trans_status_code + * + * Convert Native NVMe command error status to + * equivalent SCSI error status. + * + * Returns appropriate scsi_status + */ +static u8 _scsih_nvme_trans_status_code(u16 nvme_status, struct sense_info *data) +{ + u8 status = MPI2_SCSI_STATUS_GOOD; + + switch (get_nvme_sct_with_sc(nvme_status)) { + /* Generic Command Status */ + case NVME_SC_SUCCESS: + status = MPI2_SCSI_STATUS_GOOD; + data->skey = NO_SENSE; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_OPCODE: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_ILLEGAL_COMMAND; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_FIELD: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_INVALID_CDB; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_DATA_XFER_ERROR: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MEDIUM_ERROR; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_POWER_LOSS: + status = MPI2_SCSI_STATUS_TASK_ABORTED; + data->skey = ABORTED_COMMAND; + data->asc = SCSI_ASC_WARNING; + data->ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; + break; + case NVME_SC_INTERNAL: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = HARDWARE_ERROR; + data->asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ABORT_REQ: + status = MPI2_SCSI_STATUS_TASK_ABORTED; + data->skey = ABORTED_COMMAND; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ABORT_QUEUE: + status = MPI2_SCSI_STATUS_TASK_ABORTED; + data->skey = ABORTED_COMMAND; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_FUSED_FAIL: + status = MPI2_SCSI_STATUS_TASK_ABORTED; + data->skey = ABORTED_COMMAND; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_FUSED_MISSING: + status = MPI2_SCSI_STATUS_TASK_ABORTED; + data->skey = ABORTED_COMMAND; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_NS: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; + data->ascq = SCSI_ASCQ_INVALID_LUN_ID; + break; + case NVME_SC_LBA_RANGE: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_ILLEGAL_BLOCK; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_CAP_EXCEEDED: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MEDIUM_ERROR; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_NS_NOT_READY: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = NOT_READY; + data->asc = SCSI_ASC_LUN_NOT_READY; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + + /* Command Specific Status */ + case NVME_SC_INVALID_FORMAT: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_FORMAT_COMMAND_FAILED; + data->ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; + break; + case NVME_SC_BAD_ATTRIBUTES: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_INVALID_CDB; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + + /* Media Errors */ + case NVME_SC_WRITE_FAULT: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MEDIUM_ERROR; + data->asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_READ_ERROR: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MEDIUM_ERROR; + data->asc = SCSI_ASC_UNRECOVERED_READ_ERROR; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_GUARD_CHECK: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MEDIUM_ERROR; + data->asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; + data->ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; + break; + case NVME_SC_APPTAG_CHECK: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MEDIUM_ERROR; + data->asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; + data->ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; + break; + case NVME_SC_REFTAG_CHECK: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MEDIUM_ERROR; + data->asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; + data->ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; + break; + case NVME_SC_COMPARE_FAILED: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = MISCOMPARE; + data->asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ACCESS_DENIED: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; + data->ascq = SCSI_ASCQ_INVALID_LUN_ID; + break; + + /* Unspecified/Default */ + case NVME_SC_CMDID_CONFLICT: + case NVME_SC_CMD_SEQ_ERROR: + case NVME_SC_CQ_INVALID: + case NVME_SC_QID_INVALID: + case NVME_SC_QUEUE_SIZE: + case NVME_SC_ABORT_LIMIT: + case NVME_SC_ABORT_MISSING: + case NVME_SC_ASYNC_LIMIT: + case NVME_SC_FIRMWARE_SLOT: + case NVME_SC_FIRMWARE_IMAGE: + case NVME_SC_INVALID_VECTOR: + case NVME_SC_INVALID_LOG_PAGE: + default: + status = MPI2_SCSI_STATUS_CHECK_CONDITION; + data->skey = ILLEGAL_REQUEST; + data->asc = SCSI_ASC_NO_SENSE; + data->ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + } + + return status; +} + +/** _scsih_complete_nvme_unmap + * + * Complete native NVMe command issued using NVMe Encapsulated + * Request Message. + */ +static u8 +_scsih_complete_nvme_unmap(struct MPT3SAS_ADAPTER *ioc, u16 smid, + struct scsi_cmnd *scmd, u32 reply, u16 *error_response_count) +{ + Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply; + struct nvme_completion *nvme_completion = NULL; + u8 scsi_status = MPI2_SCSI_STATUS_GOOD; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + *error_response_count = le16_to_cpu(mpi_reply->ErrorResponseCount); + if (*error_response_count) { + struct sense_info data; + nvme_completion = + (struct nvme_completion *) mpt3sas_base_get_sense_buffer(ioc, + smid); + scsi_status = _scsih_nvme_trans_status_code( + le16_to_cpu(nvme_completion->status), &data); + + scmd->sense_buffer[0] = FIXED_SENSE_DATA; + scmd->sense_buffer[2] = data.skey; + scmd->sense_buffer[12] = data.asc; + scmd->sense_buffer[13] = data.ascq; + } + + return scsi_status; +} + /** * _scsih_io_done - scsi request callback * @ioc: per adapter object @@ -5199,10 +7326,10 @@ out_unlock: * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * - * Callback handler when using _scsih_qcmd. + * Callback handler when using scsih_qcmd. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) @@ -5210,16 +7337,14 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) Mpi25SCSIIORequest_t *mpi_request; Mpi2SCSIIOReply_t *mpi_reply; struct scsi_cmnd *scmd; - struct scsiio_tracker *st; - u16 ioc_status; + u16 ioc_status, error_response_count=0; u32 xfer_cnt; u8 scsi_state; u8 scsi_status; u32 log_info; struct MPT3SAS_DEVICE *sas_device_priv_data; u32 response_code = 0; - - mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + struct scsiio_tracker *st; scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); if (scmd == NULL) @@ -5229,6 +7354,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply == NULL) { scmd->result = DID_OK << 16; goto out; @@ -5246,12 +7372,22 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) * WARPDRIVE: If direct_io is set then it is directIO, * the failed direct I/O should be redirected to volume */ - st = scsi_cmd_priv(scmd); + st = mpt3sas_base_scsi_cmd_priv(scmd); if (st->direct_io && - ((ioc_status & MPI2_IOCSTATUS_MASK) - != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { - st->direct_io = 0; + ((ioc_status & MPI2_IOCSTATUS_MASK) + != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { +#ifdef MPT2SAS_WD_DDIOCOUNT + ioc->ddio_err_count++; +#endif +#ifdef MPT2SAS_WD_LOGGING + if (ioc->logging_level & MPT_DEBUG_SCSI) { + printk(MPT3SAS_INFO_FMT "scmd(%p) failed when issued" + "as direct IO, retrying\n", ioc->name, scmd); + scsi_print_command(scmd); + } +#endif st->scmd = scmd; + st->direct_io = 0; memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); mpi_request->DevHandle = cpu_to_le16(sas_device_priv_data->sas_target->handle); @@ -5259,6 +7395,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) sas_device_priv_data->sas_target->handle); return 0; } + /* turning off TLR */ scsi_state = mpi_reply->SCSIState; if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) @@ -5266,18 +7403,12 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; if (!sas_device_priv_data->tlr_snoop_check) { sas_device_priv_data->tlr_snoop_check++; - if ((!ioc->is_warpdrive && - !scsih_is_raid(&scmd->device->sdev_gendev) && - !scsih_is_nvme(&scmd->device->sdev_gendev)) - && sas_is_tlr_enabled(scmd->device) && - response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { - sas_disable_tlr(scmd->device); - sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); - } + if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && + response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) + sas_device_priv_data->flags &= + ~MPT_DEVICE_TLR_ON; } - xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); - scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) log_info = le32_to_cpu(mpi_reply->IOCLogInfo); else @@ -5285,6 +7416,22 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) ioc_status &= MPI2_IOCSTATUS_MASK; scsi_status = mpi_reply->SCSIStatus; + if (mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED && + sas_device_priv_data->sas_target->pcie_dev && + scmd->cmnd[0] == UNMAP) { + scsi_status = + _scsih_complete_nvme_unmap(ioc, smid, scmd, reply, + &error_response_count); + xfer_cnt = le32_to_cpu(mpi_request->DataLength); + } else + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + scmd->resid = scmd->request_bufflen - xfer_cnt; +#else + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); +#endif + if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && (scsi_status == MPI2_SCSI_STATUS_BUSY || scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || @@ -5303,14 +7450,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) /* failure prediction threshold exceeded */ if (data.asc == 0x5D) _scsih_smart_predicted_fault(ioc, - le16_to_cpu(mpi_reply->DevHandle)); + le16_to_cpu(mpi_reply->DevHandle), 0); mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); - if ((ioc->logging_level & MPT_DEBUG_REPLY) && - ((scmd->sense_buffer[2] == UNIT_ATTENTION) || - (scmd->sense_buffer[2] == MEDIUM_ERROR) || - (scmd->sense_buffer[2] == HARDWARE_ERROR))) - _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); } switch (ioc_status) { case MPI2_IOCSTATUS_BUSY: @@ -5337,20 +7479,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) scmd->device->expecting_cc_ua = 1; } break; - } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { - scmd->result = DID_RESET << 16; + } else if(log_info == 0x32010081) { + mpt3sas_set_requeue_or_reset(scmd); break; } else if ((scmd->device->channel == RAID_CHANNEL) && - (scsi_state == (MPI2_SCSI_STATE_TERMINATED | - MPI2_SCSI_STATE_NO_SCSI_STATUS))) { - scmd->result = DID_RESET << 16; + (scsi_state == (MPI2_SCSI_STATE_TERMINATED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) { + mpt3sas_set_requeue_or_reset(scmd); break; } scmd->result = DID_SOFT_ERROR << 16; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: - scmd->result = DID_RESET << 16; + mpt3sas_set_requeue_or_reset(scmd); break; case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: @@ -5375,7 +7517,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) MPI2_SCSI_STATE_NO_SCSI_STATUS)) scmd->result = DID_SOFT_ERROR << 16; else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) - scmd->result = DID_RESET << 16; + mpt3sas_set_requeue_or_reset(scmd); else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; @@ -5389,8 +7531,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + scmd->resid = 0; +#else scsi_set_resid(scmd, 0); - /* fall through */ +#endif case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SUCCESS: scmd->result = (DID_OK << 16) | scsi_status; @@ -5400,14 +7545,16 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) MPI2_SCSI_STATE_NO_SCSI_STATUS))) scmd->result = DID_SOFT_ERROR << 16; else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) - scmd->result = DID_RESET << 16; + mpt3sas_set_requeue_or_reset(scmd); break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: - _scsih_eedp_error_handling(scmd, ioc_status); - break; + if (!ioc->disable_eedp_support) { + _scsih_eedp_error_handling(scmd, ioc_status); + break; + } case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_INVALID_FUNCTION: @@ -5425,16 +7572,390 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) } if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) - _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); + _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid, scsi_status, + error_response_count); out: +#if defined(CRACK_MONKEY_EEDP) + if (!ioc->disable_eedp_support) { + if (scmd->cmnd[0] == INQUIRY && scmd->host_scribble) { + char *some_data = scmd->host_scribble; + char inq_str[16]; + + memset(inq_str, 0, 16); + strncpy(inq_str, &some_data[16], 10); + if (!strcmp(inq_str, "Harpy Disk")) + some_data[5] |= 1; + scmd->host_scribble = NULL; + } + } +#endif /* CRACK_MONKEY_EEDP */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + if (scmd->use_sg) + pci_unmap_sg(ioc->pdev, (struct scatterlist *) + scmd->request_buffer, scmd->use_sg, + scmd->sc_data_direction); + else if (scmd->request_bufflen) + pci_unmap_single(ioc->pdev, scmd->SCp.dma_handle, + scmd->request_bufflen, scmd->sc_data_direction); +#else scsi_dma_unmap(scmd); + + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (mpi_request->DMAFlags == MPI25_TA_DMAFLAGS_OP_D_H_D_D) + { + dma_unmap_sg(&ioc->pdev->dev, scsi_prot_sglist(scmd), + scsi_prot_sg_count(scmd), scmd->sc_data_direction); + } + } +#endif + mpt3sas_base_free_smid(ioc, smid); scmd->scsi_done(scmd); return 0; } +/** + * _scsih_get_port_table_after_reset - Construct temporary port table + * @ioc: per adapter object + * @port_table: address where port table needs to be constructed + * + * return number of HBA port entries available after reset. + */ +static u8 +_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_table) +{ + u16 sz, ioc_status; + int i, j; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found=0, port_count=0, port_id; + + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return port_count; + } + + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + found = 0; + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + MPI2_SAS_NEG_LINK_RATE_1_5) + continue; + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. + AttachedDevHandle); + if (_scsih_get_sas_address(ioc, attached_handle, &attached_sas_addr) + != 0) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + + for (j = 0; j < port_count; j++) { + port_id = sas_iounit_pg0->PhyData[i].Port; + if ((port_table[j].port_id == port_id) && + (port_table[j].sas_address == attached_sas_addr)) { + port_table[j].phy_mask |= (1 << i); + found = 1; + break; + } + } + + if (found) + continue; + + port_id = sas_iounit_pg0->PhyData[i].Port; + port_table[port_count].port_id = port_id; + port_table[port_count].phy_mask = (1 << i); + port_table[port_count].sas_address = attached_sas_addr; + port_count++; + } +out: + kfree(sas_iounit_pg0); + return port_count; +} + +#define MATCHED_WITH_ADDR_AND_PHYMASK 1 +#define MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT 2 +#define MATCHED_WITH_ADDR_AND_SUBPHYMASK 3 +#define MATCHED_WITH_ADDR 4 + +/** + * _scsih_look_and_get_matched_port_entry - Get matched port entry + * from HBA port table + * @ioc: per adapter object + * @port_entry - port entry from temporary port table which needs to be + * searched for matched entry in the HBA port table + * @matched_port_entry - save matched port entry here + * @count - count of matched entries + * + * return type of matched entry found. + */ +static int +_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_entry, + struct hba_port **matched_port_entry, int *count) +{ + struct hba_port *port_table_entry = NULL; + struct hba_port *port_sasaddr_phymask = NULL; + struct hba_port *port_sasaddr_subphymask_port = NULL; + struct hba_port *port_sasaddr_subphymask = NULL; + struct hba_port *port_sasaddr = NULL; + int lcount=0; + *matched_port_entry = NULL; + + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) + continue; + + if((port_table_entry->sas_address == port_entry->sas_address) && + (port_table_entry->phy_mask == port_entry->phy_mask)) { + port_sasaddr_phymask = port_table_entry; + break; + } + + if((port_table_entry->sas_address == port_entry->sas_address) && + (port_table_entry->phy_mask & port_entry->phy_mask) && + (port_table_entry->port_id == port_entry->port_id)) { + port_sasaddr_subphymask_port = port_table_entry; + continue; + } + + if((port_table_entry->sas_address == port_entry->sas_address) && + (port_table_entry->phy_mask & port_entry->phy_mask)) { + port_sasaddr_subphymask = port_table_entry; + continue; + } + + if(port_table_entry->sas_address == port_entry->sas_address) { + port_sasaddr = port_table_entry; + lcount++; + } + } + + if (port_sasaddr_phymask) { + *matched_port_entry = port_sasaddr_phymask; + return MATCHED_WITH_ADDR_AND_PHYMASK; + } + + if (port_sasaddr_subphymask_port) { + *matched_port_entry = port_sasaddr_subphymask_port; + return MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; + } + + if (port_sasaddr_subphymask) { + *matched_port_entry = port_sasaddr_subphymask; + return MATCHED_WITH_ADDR_AND_SUBPHYMASK; + } + + if (port_sasaddr) { + *matched_port_entry = port_sasaddr; + *count = lcount; + return MATCHED_WITH_ADDR; + } + + return 0; +} + +/** + * _scsih_del_phy_part_of_anther_port - remove phy if it + * is a part of anther port + *@ioc: per adapter object + *@port_table: port table after reset + *@index: port entry index + *@port_count: number of ports available after host reset + *@offset: HBA phy bit offset + * + */ +static void +_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_table, + int index, u8 port_count, int offset) +{ + struct _sas_node *sas_node = &ioc->sas_hba; + u32 i, found=0; + + for (i=0; i < port_count; i++) + { + if (i == index) + continue; + + if (port_table[i].phy_mask & (1 << offset)) { + mpt3sas_transport_del_phy_from_an_existing_port( + ioc, sas_node, &sas_node->phy[offset]); + found = 1; + break; + } + } + if (!found) + port_table[index].phy_mask |= (1 << offset); +} + +/** + * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from + * right port + *@ioc: per adapter object + *@hba_port_entry: hba port table entry + *@port_table: temporary port table + *@index: port entry index + *@port_count: number of ports available after host reset + * + */ +static void +_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *hba_port_entry, struct hba_port *port_table, + int index, u8 port_count) +{ + u32 phy_mask, offset=0; + struct _sas_node *sas_node = &ioc->sas_hba; + phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; + for (offset=0; offset < ioc->sas_hba.num_phys; offset++) { + if (phy_mask & (1 << offset)) { + if (!(port_table[index].phy_mask & (1 << offset))) { + _scsih_del_phy_part_of_anther_port( + ioc, port_table, index, port_count, offset); + } else { + #if defined(MPT_WIDE_PORT_API) + if (sas_node->phy[offset].phy_belongs_to_port) + mpt3sas_transport_del_phy_from_an_existing_port( + ioc, sas_node, &sas_node->phy[offset]); + mpt3sas_transport_add_phy_to_an_existing_port( + ioc, sas_node, &sas_node->phy[offset], + hba_port_entry->sas_address, + hba_port_entry); + #endif + } + } + } +} + +/** + * _scsih_del_dirty_port_entries - delete dirty port entries from port list + * after host reset + *@ioc: per adapter object + * + */ +static void +_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) +{ + struct hba_port *port, *port_next; + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || + port->flags & HBA_PORT_FLAG_NEW_PORT) + continue; + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "Deleting port table entry having" + " Port: %d\t Phy_mask 0x%08x\n", ioc->name, + port->port_id, + port->phy_mask)); + list_del(&port->list); + kfree(port); + } +} + +/** + * _scsih_sas_port_refresh - Update HBA port table after host reset + *@ioc: per adapter object + */ +static void +_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) +{ + u8 port_count=0; + struct hba_port port_table[ioc->sas_hba.num_phys]; + struct hba_port *port_table_entry; + struct hba_port *port_entry = NULL; + int i, j, ret, count=0, lcount=0; + u64 sas_addr; + + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "updating ports for sas_host(0x%016llx)\n", + ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); + + port_count = _scsih_get_port_table_after_reset(ioc, &port_table[0]); + if (!port_count) + return; + + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "New Port table\n", ioc->name)); + for (j = 0; j < port_count; j++) + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table[j].port_id, + port_table[j].phy_mask, port_table[j].sas_address)); + + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; + } + + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "Old Port table\n", ioc->name)); + port_table_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table_entry->port_id, + port_table_entry->phy_mask, + port_table_entry->sas_address)); + } + + for ( j = 0; j < port_count; j++) { + + ret = _scsih_look_and_get_matched_port_entry(ioc, + &port_table[j], &port_entry, &count); + if(!port_entry) { + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "No Matched entry for sas_addr(0x%16llx), Port:%d\n", + ioc->name, port_table[j].sas_address, + port_table[j].port_id)); + continue; + } + + switch (ret) { + case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: + case MATCHED_WITH_ADDR_AND_SUBPHYMASK: + _scsih_add_or_del_phys_from_existing_port(ioc, + port_entry, &port_table[0], j, port_count); + break; + case MATCHED_WITH_ADDR: + sas_addr = port_table[j].sas_address; + for (i = 0; i < port_count; i++) { + if (port_table[i].sas_address == sas_addr) + lcount++; + } + + if ((count > 1) || (lcount > 1)) + port_entry = NULL; + else + _scsih_add_or_del_phys_from_existing_port(ioc, + port_entry, &port_table[0], j, port_count); + } + + if (!port_entry) + continue; + + if (port_entry->port_id != port_table[j].port_id) + port_entry->port_id = port_table[j].port_id; + port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; + port_entry->phy_mask = port_table[j].phy_mask; + } + + port_table_entry = NULL; + +} + /** * _scsih_sas_host_refresh - refreshing sas host object contents * @ioc: per adapter object @@ -5443,6 +7964,8 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) * During port enable, fw will send topology events for every device. Its * possible that the handles may change from the previous setting, so this * code keeping handles updating if changed. + * + * Return nothing. */ static void _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) @@ -5453,18 +7976,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; u16 attached_handle; - u8 link_rate; + u8 link_rate, port_id; + struct hba_port *port; - dtmprintk(ioc, - ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", - (u64)ioc->sas_hba.sas_address)); + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT + "updating handles for sas_host(0x%016llx)\n", + ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg0) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } @@ -5479,13 +8003,31 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) if (i == 0) ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(mpt3sas_get_port_by_id(ioc, port_id))) { + port = kzalloc(sizeof(struct hba_port), + GFP_KERNEL); + if (!port) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + port->port_id = port_id; + printk(MPT3SAS_INFO_FMT "hba_port entry: %p," + " port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + if (ioc->shost_recovery) + port->flags = HBA_PORT_FLAG_NEW_PORT; + list_add_tail(&port->list, &ioc->port_table_list); + } ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; + ioc->sas_hba.phy[i].port = mpt3sas_get_port_by_id(ioc, port_id); mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, - attached_handle, i, link_rate); + attached_handle, i, link_rate, ioc->sas_hba.phy[i].port); } out: kfree(sas_iounit_pg0); @@ -5496,6 +8038,8 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object * * Creating host side data object, stored in ioc->sas_hba + * + * Return nothing. */ static void _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) @@ -5510,21 +8054,23 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) u16 ioc_status; u16 sz; u8 device_missing_delay; - u8 num_phys; + u8 num_phys, port_id; + struct hba_port *port; mpt3sas_config_get_number_hba_phys(ioc, &num_phys); if (!num_phys) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } - ioc->sas_hba.phy = kcalloc(num_phys, - sizeof(struct _sas_phy), GFP_KERNEL); + + ioc->sas_hba.phy = kcalloc(num_phys, sizeof(struct _sas_phy), GFP_KERNEL); if (!ioc->sas_hba.phy) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); - goto out; + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; } + ioc->sas_hba.num_phys = num_phys; /* sas_iounit page 0 */ @@ -5532,21 +8078,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) sizeof(Mpi2SasIOUnit0PhyData_t)); sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg0) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, sas_iounit_pg0, sz))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } @@ -5555,21 +8101,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) sizeof(Mpi2SasIOUnit1PhyData_t)); sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg1) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } @@ -5585,49 +8131,66 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, i))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } if (i == 0) ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> PhyData[0].ControllerDevHandle); + + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(mpt3sas_get_port_by_id(ioc, port_id))) { + port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + if (!port) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + port->port_id = port_id; + printk(MPT3SAS_INFO_FMT "hba_port entry: %p," + " port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + } ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; ioc->sas_hba.phy[i].phy_id = i; + ioc->sas_hba.phy[i].port = mpt3sas_get_port_by_id(ioc, port_id); mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], phy_pg0, ioc->sas_hba.parent_dev); } if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc->sas_hba.enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); - ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", - ioc->sas_hba.handle, - (u64)ioc->sas_hba.sas_address, - ioc->sas_hba.num_phys); + printk(MPT3SAS_INFO_FMT "host_add: handle(0x%04x), " + "sas_addr(0x%016llx), phys(%d)\n", ioc->name, ioc->sas_hba.handle, + (unsigned long long) ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys); if (ioc->sas_hba.enclosure_handle) { if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, - &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, - ioc->sas_hba.enclosure_handle))) + &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) ioc->sas_hba.enclosure_logical_id = - le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); } out: @@ -5642,7 +8205,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) * * Creating expander object, stored in ioc->sas_expander_list. * - * Return: 0 for success, else error. + * Return 0 for success, else error. */ static int _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -5657,6 +8220,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) u64 sas_address, sas_address_parent = 0; int i; unsigned long flags; + u8 port_id; struct _sas_port *mpt3sas_port = NULL; int rc = 0; @@ -5669,16 +8233,16 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -1; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -1; } @@ -5686,14 +8250,17 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) != 0) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -1; } + + port_id = expander_pg0.PhysicalPort; if (sas_address_parent != ioc->sas_hba.sas_address) { spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, - sas_address_parent); + sas_expander = + mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address_parent, mpt3sas_get_port_by_id(ioc, port_id)); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_expander) { rc = _scsih_expander_add(ioc, parent_handle); @@ -5705,7 +8272,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_address = le64_to_cpu(expander_pg0.SASAddress); sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, - sas_address); + sas_address, mpt3sas_get_port_by_id(ioc, port_id)); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (sas_expander) @@ -5714,8 +8281,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) sas_expander = kzalloc(sizeof(struct _sas_node), GFP_KERNEL); if (!sas_expander) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -1; } @@ -5723,28 +8290,36 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) sas_expander->num_phys = expander_pg0.NumPhys; sas_expander->sas_address_parent = sas_address_parent; sas_expander->sas_address = sas_address; + sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id); + if (!sas_expander->port) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } - ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", - handle, parent_handle, - (u64)sas_expander->sas_address, sas_expander->num_phys); + printk(MPT3SAS_INFO_FMT "expander_add: handle(0x%04x)," + " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, + handle, parent_handle, (unsigned long long) + sas_expander->sas_address, sas_expander->num_phys); if (!sas_expander->num_phys) goto out_fail; sas_expander->phy = kcalloc(sas_expander->num_phys, sizeof(struct _sas_phy), GFP_KERNEL); if (!sas_expander->phy) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } INIT_LIST_HEAD(&sas_expander->sas_port_list); mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, - sas_address_parent); + sas_address_parent, sas_expander->port); if (!mpt3sas_port) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } @@ -5753,19 +8328,20 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) for (i = 0 ; i < sas_expander->num_phys ; i++) { if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, &expander_pg1, i, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } sas_expander->phy[i].handle = handle; sas_expander->phy[i].phy_id = i; + sas_expander->phy[i].port = mpt3sas_get_port_by_id(ioc, port_id); if ((mpt3sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], expander_pg1, sas_expander->parent_dev))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } @@ -5781,13 +8357,14 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) } _scsih_expander_node_add(ioc, sas_expander); - return 0; + return 0; out_fail: if (mpt3sas_port) - mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, - sas_address_parent); + mpt3sas_transport_port_remove(ioc, + sas_expander->sas_address, + sas_address_parent, sas_expander->port); kfree(sas_expander); return rc; } @@ -5796,9 +8373,12 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) * mpt3sas_expander_remove - removing expander object * @ioc: per adapter object * @sas_address: expander sas_address + * + * Return nothing. */ void -mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) +mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) { struct _sas_node *sas_expander; unsigned long flags; @@ -5806,9 +8386,12 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) if (ioc->shost_recovery) return; + if (!port) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, - sas_address); + sas_address, port); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (sas_expander) _scsih_expander_node_remove(ioc, sas_expander); @@ -5824,8 +8407,8 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) * Callback handler when sending internal generated SCSI_IO. * The callback index passed is `ioc->scsih_cb_idx` * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ static u8 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) @@ -5848,20 +8431,1194 @@ _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) return 1; } +/** + * _scsi_send_scsi_io - send internal SCSI_IO to target + * @ioc: per adapter object + * @transfer_packet: packet describing the transfer + * @tr_timeout: Target Reset Timeout + * @tr_method: Target Reset Method + * Context: user + * + * Returns 0 for success, non-zero for failure. + */ +static int +_scsi_send_scsi_io(struct MPT3SAS_ADAPTER *ioc, struct _scsi_io_transfer + *transfer_packet, u8 tr_timeout, u8 tr_method) +{ + Mpi2SCSIIOReply_t *mpi_reply; + Mpi2SCSIIORequest_t *mpi_request; + u16 smid; + unsigned long timeleft; + u8 issue_reset = 0; + int rc; + void *priv_sense; + u32 mpi_control; + void *psge; + dma_addr_t data_out_dma = 0; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + size_t data_out_sz = 0; + u16 handle; + u8 retry_count = 0, host_reset_count = 0; + int tm_return_code; + if (ioc->pci_error_recovery) { + printk(MPT3SAS_INFO_FMT "%s: pci error recovery in progress!\n", + ioc->name, __func__); + return -EFAULT; + } + if (ioc->shost_recovery) { + printk(MPT3SAS_INFO_FMT "%s: host recovery in progress!\n", + ioc->name, __func__); + return -EAGAIN; + } + + handle = transfer_packet->handle; + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { + printk(MPT3SAS_INFO_FMT "%s: no device!\n", + __func__, ioc->name); + return -EFAULT; + } + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + retry_loop: + if (test_bit(handle, ioc->device_remove_in_progress)) { + printk(MPT3SAS_INFO_FMT "%s: device removal in progress\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + + /* Use second reserved smid for discovery related IOs */ + smid = ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_DISCOVERY; + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); + if (transfer_packet->is_raid) + mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + + switch (transfer_packet->dir) { + case DMA_TO_DEVICE: + mpi_control = MPI2_SCSIIO_CONTROL_WRITE; + data_out_dma = transfer_packet->data_dma; + data_out_sz = transfer_packet->data_length; + break; + case DMA_FROM_DEVICE: + mpi_control = MPI2_SCSIIO_CONTROL_READ; + data_in_dma = transfer_packet->data_dma; + data_in_sz = transfer_packet->data_length; + break; + case DMA_BIDIRECTIONAL: + mpi_control = MPI2_SCSIIO_CONTROL_BIDIRECTIONAL; + /* TODO - is BIDI support needed ?? */ + BUG(); + break; + default: + case DMA_NONE: + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; + break; + } + + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + + mpi_request->Control = cpu_to_le32(mpi_control | + MPI2_SCSIIO_CONTROL_SIMPLEQ); + mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length); + mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + priv_sense = mpt3sas_base_get_sense_buffer(ioc, smid); + mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; + mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length); + int_to_scsilun(transfer_packet->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb, + transfer_packet->cdb_length); + init_completion(&ioc->scsih_cmds.done); + if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) + ioc->put_smid_scsi_io(ioc, smid, handle); + else + ioc->put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->scsih_cmds.done, + transfer_packet->timeout*HZ); + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + issue_reset = + mpt3sas_base_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2SCSIIORequest_t)/4); + goto issue_target_reset; + } + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + transfer_packet->valid_reply = 1; + mpi_reply = ioc->scsih_cmds.reply; + transfer_packet->sense_length = + le32_to_cpu(mpi_reply->SenseCount); + if (transfer_packet->sense_length) + memcpy(transfer_packet->sense, priv_sense, + transfer_packet->sense_length); + transfer_packet->transfer_length = + le32_to_cpu(mpi_reply->TransferCount); + transfer_packet->ioc_status = + le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + transfer_packet->scsi_state = mpi_reply->SCSIState; + transfer_packet->scsi_status = mpi_reply->SCSIStatus; + transfer_packet->log_info = + le32_to_cpu(mpi_reply->IOCLogInfo); + } + goto out; + + issue_target_reset: + if (issue_reset) { + printk(MPT3SAS_INFO_FMT "issue target reset: handle" + "(0x%04x)\n", ioc->name, handle); + tm_return_code = + mpt3sas_scsih_issue_locked_tm(ioc, handle, + 0xFFFFFFFF, 0xFFFFFFFF, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + tr_timeout, tr_method); + + if (tm_return_code == SUCCESS) { + printk(MPT3SAS_INFO_FMT "target reset completed: handle" + "(0x%04x)\n", ioc->name, handle); + /* If the command is successfully aborted due to + * target reset TM then do up to three retries else + * command will be terminated by the host reset TM and + * hence retry once. + */ + if (((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) && + retry_count++ < 3) || + ((ioc->scsih_cmds.status & MPT3_CMD_RESET) && + host_reset_count++ == 0)) { + printk(MPT3SAS_INFO_FMT "issue retry: " + "handle (0x%04x)\n", ioc->name, handle); + goto retry_loop; + } + } else + printk(MPT3SAS_INFO_FMT "target reset didn't complete:" + " handle(0x%04x)\n", ioc->name, handle); + rc = -EFAULT; + } else + rc = -EAGAIN; + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + return rc; +} + +/** + * _scsih_determine_disposition - + * @ioc: per adapter object + * @transfer_packet: packet describing the transfer + * Context: user + * + * Determines if an internal generated scsi_io is good data, or + * whether it needs to be retried or treated as an error. + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_determine_disposition(struct MPT3SAS_ADAPTER *ioc, + struct _scsi_io_transfer *transfer_packet) +{ + static enum device_responsive_state rc; + struct sense_info sense_info = {0, 0, 0}; + u8 check_sense = 0; + char *desc = NULL; + + if (!transfer_packet->valid_reply) + return DEVICE_READY; + + switch (transfer_packet->ioc_status) { + case MPI2_IOCSTATUS_BUSY: + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + rc = DEVICE_RETRY; + break; + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + if (transfer_packet->log_info == 0x31170000) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->cdb[0] == REPORT_LUNS) + rc = DEVICE_READY; + else + rc = DEVICE_RETRY; + break; + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SUCCESS: + if (!transfer_packet->scsi_state && + !transfer_packet->scsi_status) { + rc = DEVICE_READY; + break; + } + if (transfer_packet->scsi_state & + MPI2_SCSI_STATE_AUTOSENSE_VALID) { + rc = DEVICE_ERROR; + check_sense = 1; + break; + } + if (transfer_packet->scsi_state & + (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS | + MPI2_SCSI_STATE_TERMINATED)) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->scsi_status >= + MPI2_SCSI_STATUS_BUSY) { + rc = DEVICE_RETRY; + break; + } + rc = DEVICE_READY; + break; + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + if (transfer_packet->scsi_state & + MPI2_SCSI_STATE_TERMINATED) + rc = DEVICE_RETRY; + else + rc = DEVICE_ERROR; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + default: + rc = DEVICE_ERROR; + break; + } + + if (check_sense) { + _scsih_normalize_sense(transfer_packet->sense, &sense_info); + if (sense_info.skey == UNIT_ATTENTION) + rc = DEVICE_RETRY_UA; + else if (sense_info.skey == NOT_READY) { + /* medium isn't present */ + if (sense_info.asc == 0x3a) + rc = DEVICE_READY; + /* LOGICAL UNIT NOT READY */ + else if (sense_info.asc == 0x04) { + if (sense_info.ascq == 0x03 || + sense_info.ascq == 0x0b || + sense_info.ascq == 0x0c) { + rc = DEVICE_ERROR; + } else + rc = DEVICE_START_UNIT; + } + /* LOGICAL UNIT HAS NOT SELF-CONFIGURED YET */ + else if (sense_info.asc == 0x3e && !sense_info.ascq) + rc = DEVICE_START_UNIT; + } else if (sense_info.skey == ILLEGAL_REQUEST && + transfer_packet->cdb[0] == REPORT_LUNS) { + rc = DEVICE_READY; + } else if (sense_info.skey == MEDIUM_ERROR) { + + /* medium is corrupt, lets add the device so + * users can collect some info as needed + */ + + if (sense_info.asc == 0x31) + rc = DEVICE_READY; + } else if (sense_info.skey == HARDWARE_ERROR) { + /* Defect List Error, still add the device */ + if (sense_info.asc == 0x19) + rc = DEVICE_READY; + } + } + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { + switch (rc) { + case DEVICE_READY: + desc = "ready"; + break; + case DEVICE_RETRY: + desc = "retry"; + break; + case DEVICE_RETRY_UA: + desc = "retry_ua"; + break; + case DEVICE_START_UNIT: + desc = "start_unit"; + break; + case DEVICE_STOP_UNIT: + desc = "stop_unit"; + break; + case DEVICE_ERROR: + desc = "error"; + break; + } + + printk(MPT3SAS_INFO_FMT "\tioc_status(0x%04x), " + "loginfo(0x%08x), scsi_status(0x%02x), " + "scsi_state(0x%02x), rc(%s)\n", + ioc->name, transfer_packet->ioc_status, + transfer_packet->log_info, transfer_packet->scsi_status, + transfer_packet->scsi_state, desc); + + if (check_sense) + printk(MPT3SAS_INFO_FMT "\t[sense_key,asc,ascq]: " + "[0x%02x,0x%02x,0x%02x]\n", ioc->name, + sense_info.skey, sense_info.asc, sense_info.ascq); + } + return rc; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) +/** + * _scsih_read_capacity_16 - send READ_CAPACITY_16 to target + * @ioc: per adapter object + * @handle: expander handle + * @data: report luns data payload + * @data_length: length of data in bytes + * Context: user + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_read_capacity_16(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, + void *data, u32 data_length) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *parameter_data; + int return_code; + + parameter_data = NULL; + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + parameter_data = pci_alloc_consistent(ioc->pdev, data_length, + &transfer_packet->data_dma); + if (!parameter_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + memset(parameter_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->lun = lun; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 16; + transfer_packet->cdb[0] = SERVICE_ACTION_IN; + transfer_packet->cdb[1] = 0x10; + transfer_packet->cdb[13] = data_length; + transfer_packet->timeout = 10; + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + if (rc == DEVICE_READY) + memcpy(data, parameter_data, data_length); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } + + out: + if (parameter_data) + pci_free_consistent(ioc->pdev, data_length, parameter_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} +#endif + +/** + * _scsih_inquiry_vpd_sn - obtain device serial number + * @ioc: per adapter object + * @handle: device handle + * @serial_number: returns pointer to serial_number + * Context: user + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_inquiry_vpd_sn(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 **serial_number) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u8 *inq_data; + int return_code; + u32 data_length; + u8 len; + struct _pcie_device *pcie_device = NULL; + u8 tr_timeout = 30; + u8 tr_method = 0; + + inq_data = NULL; + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + data_length = 252; + inq_data = pci_alloc_consistent(ioc->pdev, data_length, + &transfer_packet->data_dma); + if (!inq_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[2] = 0x80; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 5; + + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } + else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + if (rc == DEVICE_READY) { + len = strlen(&inq_data[4]) + 1; + *serial_number = kmalloc(len, GFP_KERNEL); + if (*serial_number) + strncpy(*serial_number, &inq_data[4], len); + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } + + out: + if (pcie_device) + pcie_device_put(pcie_device); + if (inq_data) + pci_free_consistent(ioc->pdev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +/** + * _scsih_inquiry_vpd_supported_pages - get supported pages + * @ioc: per adapter object + * @handle: device handle + * @data: report luns data payload + * @data_length: length of data in bytes + * Context: user + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_inquiry_vpd_supported_pages(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u32 lun, void *data, u32 data_length) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *inq_data; + int return_code; + + inq_data = NULL; + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + inq_data = pci_alloc_consistent(ioc->pdev, data_length, + &transfer_packet->data_dma); + if (!inq_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->lun = lun; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 5; + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + if (rc == DEVICE_READY) + memcpy(data, inq_data, data_length); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } + + out: + if (inq_data) + pci_free_consistent(ioc->pdev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +/** + * _scsih_report_luns - send REPORT_LUNS to target + * @ioc: per adapter object + * @handle: expander handle + * @data: report luns data payload + * @data_length: length of data in bytes + * @is_pd: is this hidden raid component + * @tr_timeout: Target Reset Timeout + * @tr_method: Target Reset Method + * Context: user + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_report_luns(struct MPT3SAS_ADAPTER *ioc, u16 handle, void *data, + u32 data_length, u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *lun_data; + int return_code; + int retries; + + lun_data = NULL; + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + lun_data = pci_alloc_consistent(ioc->pdev, data_length, + &transfer_packet->data_dma); + if (!lun_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + for (retries = 0; retries < 4; retries++) { + rc = DEVICE_ERROR; + printk(MPT3SAS_INFO_FMT "REPORT_LUNS: handle(0x%04x), " + "retries(%d)\n", ioc->name, handle, retries); + memset(lun_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = REPORT_LUNS; + transfer_packet->cdb[6] = (data_length >> 24) & 0xFF; + transfer_packet->cdb[7] = (data_length >> 16) & 0xFF; + transfer_packet->cdb[8] = (data_length >> 8) & 0xFF; + transfer_packet->cdb[9] = data_length & 0xFF; + transfer_packet->timeout = 5; + transfer_packet->is_raid = is_pd; + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + if (rc == DEVICE_READY) { + memcpy(data, lun_data, data_length); + goto out; + } else if (rc == DEVICE_ERROR) + goto out; + break; + case -EAGAIN: + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + break; + } + } + out: + + if (rc == DEVICE_RETRY) { + rc = DEVICE_ERROR; + } + if (lun_data) + pci_free_consistent(ioc->pdev, data_length, lun_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +/** + * _scsih_issue_logsense - obtain page 0x2f info by issuing Log Sense CMD + * @ioc: per adapter object + * @handle: device handle + * @lun: lun number + * Context: User. + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_issue_logsense(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u8 *log_sense_data; + int return_code; + u32 data_length; + + log_sense_data = NULL; + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + /* Allocate 16 byte DMA buffer to recieve the LOG Sense Data*/ + data_length = 16; + log_sense_data = pci_alloc_consistent(ioc->pdev, data_length, + &transfer_packet->data_dma); + if (!log_sense_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + memset(log_sense_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->lun = lun; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 10; + transfer_packet->cdb[0] = LOG_SENSE; /* Operation Code: 0x4D */ + transfer_packet->cdb[2] = 0x6F; /* Page Code: 0x2F & PC: 0x1 */ + transfer_packet->cdb[8] = data_length; /* Allocation Length:16 bytes */ + transfer_packet->timeout = 5; + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + if (rc == DEVICE_READY) { + /* Check for ASC field value in the Log Sense Data, + * if this value is 0x5D then issue SEP message to + * turn on PFA LED for this drive */ + if (log_sense_data[8] == 0x5D) + _scsih_smart_predicted_fault(ioc, handle, 1); + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (transfer_packet) { + if (log_sense_data) + pci_free_consistent(ioc->pdev, data_length, + log_sense_data, transfer_packet->data_dma); + kfree(transfer_packet); + } + return rc; +} + +/** + * _scsih_start_unit - send START_UNIT to target + * @ioc: per adapter object + * @handle: expander handle + * @lun: lun number + * @is_pd: is this hidden raid component + * @tr_timeout: Target Reset Timeout + * @tr_method: Target Reset Method + * Context: user + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_start_unit(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, u8 is_pd, + u8 tr_timeout, u8 tr_method) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = START_STOP; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = 1; + transfer_packet->timeout = 15; + transfer_packet->is_raid = is_pd; + + printk(MPT3SAS_INFO_FMT "START_UNIT: handle(0x%04x), " + "lun(%d)\n", ioc->name, handle, lun); + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } + out: + kfree(transfer_packet); + return rc; +} + +/** + * mpt3sas_scsih_sata_smart_polling - loop every device attached to host + * and issue the LOG SENSE command + * to SATA drives. + * @ioc - per adapter object + * Context: + */ +void +mpt3sas_scsih_sata_smart_polling(struct MPT3SAS_ADAPTER *ioc) +{ + struct scsi_device *sdev; + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device; + static u32 reset_count; + u32 reset_occured = 0; + + if (reset_count < ioc->ioc_reset_count) { + reset_occured = 1; + reset_count = ioc->ioc_reset_count; + } + + __shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + + if (!sas_device_priv_data->sas_target->port) + continue; + + sas_device = mpt3sas_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + + /* Don't send Log Sense CMD, + * if device doesn't supports SATA SMART */ + if (!sas_device) + continue; + + if (!sas_device->supports_sata_smart) { + sas_device_put(sas_device); + continue; + } + + /* Clear the pfa_led_on flag, when polling first time + * after host reset */ + if (reset_occured) + sas_device->pfa_led_on = 0; + else if (sas_device->pfa_led_on) { + sas_device_put(sas_device); + continue; + } + + sas_device_put(sas_device); + + /* Don't send Log Sense CMD, if device is part of a volume or RAID + * compenent */ + if ((sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) || + (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_VOLUME)) + continue; + + if (sas_device_priv_data->block) + continue; + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + _scsih_issue_logsense(ioc, sas_device_priv_data->sas_target->handle, sdev->lun); + } +} + +/** + * _scsih_test_unit_ready - send TUR to target + * @ioc: per adapter object + * @handle: expander handle + * @lun: lun number + * @is_pd: is this hidden raid component + * @tr_timeout: Target Reset timeout value for Pcie devie + * @tr_method: pcie device Target reset method + * Context: user + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_test_unit_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, + u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + int sata_init_failure = 0; + + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = TEST_UNIT_READY; + transfer_packet->timeout = 10; + transfer_packet->is_raid = is_pd; + + sata_init_retry: + printk(MPT3SAS_INFO_FMT "TEST_UNIT_READY: handle(0x%04x), " + "lun(%d)\n", ioc->name, handle, lun); + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + if (rc == DEVICE_RETRY && + transfer_packet->log_info == 0x31111000) { + if (!sata_init_failure++) { + printk(MPT3SAS_INFO_FMT + "SATA Initialization Timeout," + "sending a retry\n", ioc->name); + rc = DEVICE_READY; + goto sata_init_retry; + } else { + printk(MPT3SAS_ERR_FMT + "SATA Initialization Failed\n", ioc->name); + rc = DEVICE_ERROR; + } + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } + out: + kfree(transfer_packet); + return rc; +} + +/** + * _scsih_ata_pass_thru_idd - obtain SATA device Identify Device Data + * @ioc: per adapter object + * @handle: device handle + * @is_ssd_device : is this SATA SSD device + * @tr_timeout: Target Reset Timeout + * @tr_method: Target Reset Method + * Context: user + * + * Returns device_responsive_state + */ +static enum device_responsive_state +_scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *is_ssd_device, u8 tr_timeout, u8 tr_method) +{ + struct _scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u8 *idd_data; + int return_code; + u32 data_length; + + idd_data = NULL; + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + data_length = 512; + idd_data = pci_alloc_consistent(ioc->pdev, data_length, + &transfer_packet->data_dma); + if (!idd_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + memset(idd_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = ATA_12; + transfer_packet->cdb[1] = 0x8; + transfer_packet->cdb[2] = 0xd; + transfer_packet->cdb[3] = 0x1; + transfer_packet->cdb[9] = 0xec; + transfer_packet->timeout = 5; + + return_code = _scsi_send_scsi_io(ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = _scsih_determine_disposition(ioc, transfer_packet); + if (rc == DEVICE_READY) { + // Check if nominal media rotation rate is set to 1 i.e. SSD device + if(idd_data[434] == 1) + *is_ssd_device = 1; + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } + + out: + if (idd_data) { + pci_free_consistent(ioc->pdev, data_length, idd_data, + transfer_packet->data_dma); + } + kfree(transfer_packet); + return rc; +} #define MPT3_MAX_LUNS (255) +/** + * _scsih_wait_for_device_to_become_ready - handle busy devices + * @ioc: per adapter object + * @handle: expander handle + * @retry_count: number of times this event has been retried + * @is_pd: is this hidden raid component + * @lun: lun number + * @tr_timeout: Target Reset Timeout + * @tr_method: Target Reset Method + * + * Some devices spend too much time in busy state, queue event later + * + * Return the device_responsive_state. + */ +static enum device_responsive_state +_scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + + if (ioc->pci_error_recovery) + return DEVICE_ERROR; + + if (ioc->shost_recovery) + return DEVICE_RETRY; + + rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method); + if (rc == DEVICE_READY || rc == DEVICE_ERROR) + return rc; + else if (rc == DEVICE_START_UNIT) { + rc = _scsih_start_unit(ioc, handle, lun, is_pd, tr_timeout, tr_method); + if (rc == DEVICE_ERROR) + return rc; + rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method); + } + + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_RETRY_UA) && retry_count >= command_retry_count) + rc = DEVICE_ERROR; + return rc; +} + +/** + * _scsih_wait_for_target_to_become_ready - handle busy devices + * @ioc: per adapter object + * @handle: expander handle + * @retry_count: number of times this event has been retried + * @is_pd: is this hidden raid component + * @tr_timeout: Target Reset timeout value + * @tr_method: Target Reset method Hot/Protocol level. + * + * Some devices spend too much time in busy state, queue event later + * + * Return the device_responsive_state. + */ +static enum device_responsive_state +_scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + struct scsi_lun *lun_data; + u32 length, num_luns; + u8 *data; + int lun; + struct scsi_lun *lunp; + + lun_data = kcalloc(MPT3_MAX_LUNS, sizeof(struct scsi_lun), GFP_KERNEL); + if (!lun_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return DEVICE_RETRY; + } + + rc = _scsih_report_luns(ioc, handle, lun_data, + MPT3_MAX_LUNS * sizeof(struct scsi_lun), is_pd, tr_timeout, tr_method); + + if (rc != DEVICE_READY) + goto out; + + /* some debug bits*/ + data = (u8 *)lun_data; + length = ((data[0] << 24) | (data[1] << 16) | + (data[2] << 8) | (data[3] << 0)); + + num_luns = (length / sizeof(struct scsi_lun)); +#if 0 /* debug */ + if (num_luns) { + struct scsi_lun *lunp; + for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; + lunp++) + printk(KERN_INFO "%x\n", mpt_scsilun_to_int(lunp)); + } +#endif + lunp = &lun_data[1]; + lun = (num_luns) ? mpt_scsilun_to_int(&lun_data[1]) : 0; + rc = _scsih_wait_for_device_to_become_ready(ioc, handle, retry_count, + is_pd, lun, tr_timeout, tr_method); + + if (rc == DEVICE_ERROR) + { + struct scsi_lun *lunq; + for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) + { + rc = _scsih_wait_for_device_to_become_ready(ioc, handle, + retry_count, is_pd, mpt_scsilun_to_int(lunq), tr_timeout, tr_method); + if (rc != DEVICE_ERROR) + goto out; + } + } +out: + kfree(lun_data); + return rc; +} /** * _scsih_check_access_status - check access flags * @ioc: per adapter object * @sas_address: sas address * @handle: sas device handle - * @access_status: errors returned during discovery of the device + * @access_flags: errors returned during discovery of the device * - * Return: 0 for success, else failure + * Return 0 for success, else failure */ static u8 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, @@ -5912,8 +9669,9 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, if (!rc) return 0; - ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", - desc, (u64)sas_address, handle); + printk(MPT3SAS_ERR_FMT "discovery errors(%s): sas_address(0x%016llx), " + "handle(0x%04x)\n", ioc->name, desc, + (unsigned long long)sas_address, handle); return rc; } @@ -5922,8 +9680,10 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, * @ioc: per adapter object * @parent_sas_address: sas address of parent expander or sas host * @handle: attached device handle - * @phy_number: phy number + * @phy_numberv: phy number * @link_rate: new link rate + * + * Returns nothing. */ static void _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, @@ -5931,7 +9691,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, { Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; - struct _sas_device *sas_device; + struct _sas_device *sas_device = NULL; struct _enclosure_node *enclosure_dev = NULL; u32 ioc_status; unsigned long flags; @@ -5939,6 +9699,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, struct scsi_target *starget; struct MPT3SAS_TARGET *sas_target_priv_data; u32 device_info; + u8 *serial_number = NULL; + u8 *original_serial_number = NULL; + int rc; + struct hba_port *port; if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) @@ -5956,13 +9720,16 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, /* check if this is end device */ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); - if (!(_scsih_is_end_device(device_info))) + if (!(_scsih_is_sas_end_device(device_info))) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(sas_device_pg0.SASAddress); - sas_device = __mpt3sas_get_sdev_by_addr(ioc, - sas_address); + port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort); + if (!port) + goto out_unlock; + + sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); if (!sas_device) goto out_unlock; @@ -5970,36 +9737,34 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, if (unlikely(sas_device->handle != handle)) { starget = sas_device->starget; sas_target_priv_data = starget->hostdata; - starget_printk(KERN_INFO, starget, - "handle changed from(0x%04x) to (0x%04x)!!!\n", - sas_device->handle, handle); + starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)" + " to (0x%04x)!!!\n", sas_device->handle, handle); sas_target_priv_data->handle = handle; sas_device->handle = handle; - if (le16_to_cpu(sas_device_pg0.Flags) & - MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { - sas_device->enclosure_level = - sas_device_pg0.EnclosureLevel; - memcpy(sas_device->connector_name, - sas_device_pg0.ConnectorName, 4); + if ((le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) + && (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { + sas_device->enclosure_level = sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, sas_device_pg0.ConnectorName, 4); sas_device->connector_name[4] = '\0'; - } else { + } + else { sas_device->enclosure_level = 0; sas_device->connector_name[0] = '\0'; } sas_device->enclosure_handle = - le16_to_cpu(sas_device_pg0.EnclosureHandle); + le16_to_cpu(sas_device_pg0.EnclosureHandle); sas_device->is_chassis_slot_valid = 0; enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, - sas_device->enclosure_handle); + sas_device->enclosure_handle); if (enclosure_dev) { sas_device->enclosure_logical_id = - le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); if (le16_to_cpu(enclosure_dev->pg0.Flags) & MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { sas_device->is_chassis_slot_valid = 1; sas_device->chassis_slot = - enclosure_dev->pg0.ChassisSlot; + enclosure_dev->pg0.ChassisSlot; } } } @@ -6007,8 +9772,8 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, /* check if device is present */ if (!(le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { - ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", - handle); + printk(MPT3SAS_ERR_FMT "device is not present " + "handle(0x%04x), flags!!!\n", ioc->name, handle); goto out_unlock; } @@ -6017,15 +9782,46 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, sas_device_pg0.AccessStatus)) goto out_unlock; + original_serial_number = sas_device->serial_number; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - _scsih_ublock_io_device(ioc, sas_address); + if (issue_scsi_cmd_to_bringup_drive) + _scsih_ublock_io_device_wait(ioc, sas_address, port); + else + _scsih_ublock_io_device_to_running(ioc, sas_address, port); - if (sas_device) - sas_device_put(sas_device); - return; + /* check to see if serial number still the same, if not, delete + * and re-add new device + */ + if (!original_serial_number) + goto out; + if (_scsih_inquiry_vpd_sn(ioc, handle, &serial_number) == DEVICE_READY + && serial_number) { + rc = strcmp(original_serial_number, serial_number); + kfree(serial_number); + if (!rc) { + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + + /* Turn on the PFA LED for a SATA SMART drive on + * enclosure, whoes PFA LED is turned on before it went + * to blocked state */ + if (sata_smart_polling) { + if (sas_device->pfa_led_on && sas_device->supports_sata_smart) + _scsih_send_event_to_turn_on_pfa_led(ioc, handle); + } + } + goto out; + } + mpt3sas_device_remove_by_sas_address(ioc, + sas_address, port); + mpt3sas_transport_update_links(ioc, parent_sas_address, + handle, phy_number, link_rate, port); + _scsih_add_device(ioc, handle, 0, 0); + } + goto out; out_unlock: spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +out: if (sas_device) sas_device_put(sas_device); } @@ -6034,15 +9830,15 @@ out_unlock: * _scsih_add_device - creating sas device object * @ioc: per adapter object * @handle: sas device handle - * @phy_num: phy number end device attached to + * @retry_count: number of times this event has been retried * @is_pd: is this hidden raid component * * Creating end device object, stored in ioc->sas_device_list. * - * Return: 0 for success, non-zero for failure. + * Return 1 means queue the event later, 0 means complete the event */ static int -_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, +_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count, u8 is_pd) { Mpi2ConfigReply_t mpi_reply; @@ -6052,64 +9848,113 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u32 ioc_status; u64 sas_address; u32 device_info; + enum device_responsive_state rc; + u8 connector_name[5], port_id; + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); - return -1; + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); - return -1; + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; } /* check if this is end device */ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); - if (!(_scsih_is_end_device(device_info))) - return -1; + if (!(_scsih_is_sas_end_device(device_info))) + return 0; set_bit(handle, ioc->pend_os_device_add); sas_address = le64_to_cpu(sas_device_pg0.SASAddress); /* check if device is present */ if (!(le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { - ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", - handle); - return -1; + printk(MPT3SAS_ERR_FMT "device is not present " + "handle(0x04%x)!!!\n", ioc->name, handle); + return 0; } /* check if there were any issues with discovery */ if (_scsih_check_access_status(ioc, sas_address, handle, sas_device_pg0.AccessStatus)) - return -1; - + return 0; + port_id = sas_device_pg0.PhysicalPort; sas_device = mpt3sas_get_sdev_by_addr(ioc, - sas_address); + sas_address, + mpt3sas_get_port_by_id(ioc, port_id)); + if (sas_device) { clear_bit(handle, ioc->pend_os_device_add); sas_device_put(sas_device); - return -1; + return 0; } - if (sas_device_pg0.EnclosureHandle) { + if (le16_to_cpu(sas_device_pg0.EnclosureHandle)) { enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, - le16_to_cpu(sas_device_pg0.EnclosureHandle)); - if (enclosure_dev == NULL) - ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", - sas_device_pg0.EnclosureHandle); + le16_to_cpu(sas_device_pg0.EnclosureHandle)); + if(enclosure_dev == NULL) + printk(MPT3SAS_INFO_FMT "Enclosure handle(0x%04x)" + "doesn't match with enclosure device!\n", + ioc->name, le16_to_cpu(sas_device_pg0.EnclosureHandle)); } + /* + * Wait for device that is becoming ready + * queue request later if device is busy. + */ + if ((!ioc->wait_for_discovery_to_complete) && + (issue_scsi_cmd_to_bringup_drive)) { + printk(MPT3SAS_INFO_FMT "detecting: handle(0x%04x), " + "sas_address(0x%016llx), phy(%d)\n", ioc->name, handle, + (unsigned long long)sas_address, sas_device_pg0.PhyNum); + rc = _scsih_wait_for_target_to_become_ready(ioc, handle, + retry_count, is_pd, 30, 0); + if (rc != DEVICE_READY) { + if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "device not ready: slot(%d) \n", ioc->name, + __func__, + le16_to_cpu(sas_device_pg0.Slot))); + if ((le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) && + (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { + memcpy(connector_name, + sas_device_pg0.ConnectorName, 4); + connector_name[4] = '\0'; + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "device not ready: " + "enclosure level(0x%04x), " + "connector name( %s)\n", ioc->name, + __func__, sas_device_pg0.EnclosureLevel, + connector_name)); + } + + if ((enclosure_dev) && (le16_to_cpu(enclosure_dev->pg0.Flags) & + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID)) + printk(MPT3SAS_INFO_FMT "chassis slot(0x%04x)\n", + ioc->name, enclosure_dev->pg0.ChassisSlot); + + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + return 1; + else if (rc == DEVICE_ERROR) + return 0; + } + } sas_device = kzalloc(sizeof(struct _sas_device), GFP_KERNEL); if (!sas_device) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 0; } @@ -6118,23 +9963,34 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, if (_scsih_get_sas_address(ioc, le16_to_cpu(sas_device_pg0.ParentDevHandle), &sas_device->sas_address_parent) != 0) - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); sas_device->enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); if (sas_device->enclosure_handle != 0) - sas_device->slot = - le16_to_cpu(sas_device_pg0.Slot); + sas_device->slot = le16_to_cpu(sas_device_pg0.Slot); sas_device->device_info = device_info; sas_device->sas_address = sas_address; + sas_device->port = mpt3sas_get_port_by_id(ioc, port_id); + if (!sas_device->port) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } sas_device->phy = sas_device_pg0.PhyNum; - sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & - MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; - if (le16_to_cpu(sas_device_pg0.Flags) - & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { - sas_device->enclosure_level = - sas_device_pg0.EnclosureLevel; + sas_device->supports_sata_smart = + (le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED); + } + + if ((le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) + && (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { + sas_device->enclosure_level = sas_device_pg0.EnclosureLevel; memcpy(sas_device->connector_name, sas_device_pg0.ConnectorName, 4); sas_device->connector_name[4] = '\0'; @@ -6154,7 +10010,6 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, enclosure_dev->pg0.ChassisSlot; } } - /* get device name */ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); @@ -6163,6 +10018,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, else _scsih_sas_device_add(ioc, sas_device); +out: sas_device_put(sas_device); return 0; } @@ -6170,7 +10026,9 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, /** * _scsih_remove_device - removing sas device object * @ioc: per adapter object - * @sas_device: the sas_device object + * @sas_device_delete: the sas_device object + * + * Return nothing. */ static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, @@ -6178,24 +10036,24 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, { struct MPT3SAS_TARGET *sas_target_priv_data; - if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && - (sas_device->pfa_led_on)) { + if (((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + || ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && sata_smart_polling)) + && (sas_device->pfa_led_on)) { _scsih_turn_off_pfa_led(ioc, sas_device); sas_device->pfa_led_on = 0; } - dewtprintk(ioc, - ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", - __func__, - sas_device->handle, (u64)sas_device->sas_address)); - - dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter: " + "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, + sas_device->handle, (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL)); if (sas_device->starget && sas_device->starget->hostdata) { sas_target_priv_data = sas_device->starget->hostdata; sas_target_priv_data->deleted = 1; - _scsih_ublock_io_device(ioc, sas_device->sas_address); + _scsih_ublock_io_device(ioc, sas_device->sas_address, + sas_device->port); sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; } @@ -6203,19 +10061,24 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, if (!ioc->hide_drives) mpt3sas_transport_port_remove(ioc, sas_device->sas_address, - sas_device->sas_address_parent); - - ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", - sas_device->handle, (u64)sas_device->sas_address); + sas_device->sas_address_parent, + sas_device->port); + printk(MPT3SAS_INFO_FMT "removing handle(0x%04x), sas_addr" + "(0x%016llx)\n", ioc->name, sas_device->handle, + (unsigned long long) sas_device->sas_address); _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); - dewtprintk(ioc, - ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", - __func__, - sas_device->handle, (u64)sas_device->sas_address)); - dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit: " + "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, + sas_device->handle, (unsigned long long) + sas_device->sas_address)); + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL)); + + kfree(sas_device->serial_number); + + } /** @@ -6253,8 +10116,9 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, status_str = "unknown status"; break; } - ioc_info(ioc, "sas topology change: (%s)\n", status_str); - pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ + printk(MPT3SAS_INFO_FMT "sas topology change: (%s)\n", + ioc->name, status_str); + printk(KERN_INFO "\thandle(0x%04x), enclosure_handle(0x%04x) " "start_phy(%02d), count(%d)\n", le16_to_cpu(event_data->ExpanderDevHandle), le16_to_cpu(event_data->EnclosureHandle), @@ -6288,7 +10152,7 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, } link_rate = event_data->PHY[i].LinkRate >> 4; prev_link_rate = event_data->PHY[i].LinkRate & 0xF; - pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ + printk(KERN_INFO "\tphy(%02d), attached_handle(0x%04x): %s:" " link rate: new(0x%02x), old(0x%02x)\n", phy_number, handle, status_str, link_rate, prev_link_rate); @@ -6311,12 +10175,14 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, u16 reason_code; u8 phy_number, max_phys; struct _sas_node *sas_expander; + struct _sas_device *sas_device; u64 sas_address; unsigned long flags; u8 link_rate, prev_link_rate; - Mpi2EventDataSasTopologyChangeList_t *event_data = - (Mpi2EventDataSasTopologyChangeList_t *) - fw_event->event_data; + int rc; + int requeue_event; + struct hba_port *port; + Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_topology_change_event_debug(ioc, event_data); @@ -6330,11 +10196,13 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, _scsih_sas_host_refresh(ioc); if (fw_event->ignore) { - dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "ignoring expander " + "event\n", ioc->name)); return 0; } parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort); /* handle expander add */ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) @@ -6343,10 +10211,11 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, - parent_handle); + parent_handle); if (sas_expander) { sas_address = sas_expander->sas_address; max_phys = sas_expander->num_phys; + port = sas_expander->port; } else if (parent_handle < ioc->sas_hba.num_phys) { sas_address = ioc->sas_hba.sas_address; max_phys = ioc->sas_hba.num_phys; @@ -6357,10 +10226,10 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->sas_node_lock, flags); /* handle siblings events */ - for (i = 0; i < event_data->NumEntries; i++) { + for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) { if (fw_event->ignore) { - dewtprintk(ioc, - ioc_info(ioc, "ignoring expander event\n")); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "ignoring " + "expander event\n", ioc->name)); return 0; } if (ioc->remove_host || ioc->pci_error_recovery) @@ -6374,6 +10243,15 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) continue; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) + if (fw_event->delayed_work_active && (reason_code == + MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) { + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "ignoring " + "Targ not responding event phy in re-queued event " + "processing\n", ioc->name)); + continue; + } +#endif handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; @@ -6389,7 +10267,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, break; mpt3sas_transport_update_links(ioc, sas_address, - handle, phy_number, link_rate); + handle, phy_number, link_rate, port); if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) break; @@ -6397,10 +10275,33 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, _scsih_check_device(ioc, sas_address, handle, phy_number, link_rate); + /* This code after this point handles the test case + * where a device has been added, however its returning + * BUSY for sometime. Then before the Device Missing + * Delay expires and the device becomes READY, the + * device is removed and added back. + */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, + handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + + if (sas_device) { + sas_device_put(sas_device); + break; + } + if (!test_bit(handle, ioc->pend_os_device_add)) break; - /* fall through */ + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "handle(0x%04x) device not found: convert " + "event to a device add\n", ioc->name, + handle)); + event_data->PHY[i].PhyStatus &= 0xF0; + event_data->PHY[i].PhyStatus |= + MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED; case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: @@ -6408,10 +10309,20 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, break; mpt3sas_transport_update_links(ioc, sas_address, - handle, phy_number, link_rate); + handle, phy_number, link_rate, port); - _scsih_add_device(ioc, handle, phy_number, 0); + if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + break; + rc = _scsih_add_device(ioc, handle, + fw_event->retries[i], 0); + if (rc) {/* retry due to busy device */ + fw_event->retries[i]++; + requeue_event = 1; + } else {/* mark entry vacant */ + event_data->PHY[i].PhyStatus |= + MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT; + } break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: @@ -6423,16 +10334,17 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, /* handle expander removal */ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && sas_expander) - mpt3sas_expander_remove(ioc, sas_address); + mpt3sas_expander_remove(ioc, sas_address, port); - return 0; + return requeue_event; } /** * _scsih_sas_device_status_change_event_debug - debug for device event - * @ioc: ? * @event_data: event data payload * Context: user. + * + * Return nothing. */ static void _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, @@ -6484,21 +10396,25 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, reason_str = "unknown reason"; break; } - ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", - reason_str, le16_to_cpu(event_data->DevHandle), - (u64)le64_to_cpu(event_data->SASAddress), - le16_to_cpu(event_data->TaskTag)); + printk(MPT3SAS_INFO_FMT "device status change: (%s)\n" + "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) - pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", - event_data->ASC, event_data->ASCQ); - pr_cont("\n"); + printk(MPT3SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, + event_data->ASC, event_data->ASCQ); + printk(KERN_INFO "\n"); } + /** * _scsih_sas_device_status_change_event - handle device status change * @ioc: per adapter object - * @event_data: The fw event + * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, @@ -6523,8 +10439,8 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(event_data->SASAddress); - sas_device = __mpt3sas_get_sdev_by_addr(ioc, - sas_address); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, + mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort)); if (!sas_device || !sas_device->starget) goto out; @@ -6539,12 +10455,11 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, else target_priv_data->tm_busy = 0; - if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) - ioc_info(ioc, - "%s tm_busy flag for handle(0x%04x)\n", + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + printk(MPT3SAS_INFO_FMT "%s tm_busy flag for handle(0x%04x)\n", + ioc->name, (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", target_priv_data->handle); - out: if (sas_device) sas_device_put(sas_device); @@ -6558,9 +10473,9 @@ out: * @ioc: per adapter object * @wwid: wwid * @handle: sas device handle - * @access_status: errors returned during discovery of the device + * @access_flags: errors returned during discovery of the device * - * Return: 0 for success, else failure + * Return 0 for success, else failure */ static u8 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, @@ -6579,10 +10494,9 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, break; case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: desc = "PCIe device blocked"; - ioc_info(ioc, - "Device with Access Status (%s): wwid(0x%016llx), " - "handle(0x%04x)\n ll only be added to the internal list", - desc, (u64)wwid, handle); + printk(MPT3SAS_INFO_FMT "Device with Access Status (%s): wwid(0x%016llx), " + "handle(0x%04x)\n ll be added to the internal list", + ioc->name, desc, (unsigned long long)wwid, handle); rc = 0; break; case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: @@ -6609,7 +10523,7 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: desc = "nvme identify failed"; break; - case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: + case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: desc = "nvme qconfig failed"; break; case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: @@ -6628,16 +10542,17 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, desc = "nvme failure status"; break; default: - ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", - access_status, (u64)wwid, handle); + printk(MPT3SAS_ERR_FMT "NVMe discovery error(0x%02x): wwid(0x%016llx)," + " handle(0x%04x)\n", ioc->name, access_status, + (unsigned long long)wwid, handle); return rc; } if (!rc) return rc; - ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", - desc, (u64)wwid, handle); + printk(MPT3SAS_ERR_FMT "NVMe discovery error(%s): wwid(0x%016llx), " + "handle(0x%04x)\n", ioc->name, desc, (unsigned long long)wwid, handle); return rc; } @@ -6646,6 +10561,8 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, * from SML and free up associated memory * @ioc: per adapter object * @pcie_device: the pcie_device object + * + * Return nothing. */ static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, @@ -6653,60 +10570,69 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, { struct MPT3SAS_TARGET *sas_target_priv_data; - dewtprintk(ioc, - ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", - __func__, - pcie_device->handle, (u64)pcie_device->wwid)); - if (pcie_device->enclosure_handle != 0) - dewtprintk(ioc, - ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", - __func__, - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot)); - if (pcie_device->connector_name[0] != '\0') - dewtprintk(ioc, - ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", - __func__, - pcie_device->enclosure_level, - pcie_device->connector_name)); +#if 0 + if (((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) || + (sata_smart_polling)) && (pcie_device->pfa_led_on)) { + _scsih_turn_off_pfa_led(ioc, pcie_device); + pcie_device->pfa_led_on = 0; + } +#endif + + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter: " + "handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, + pcie_device->handle, (unsigned long long) + pcie_device->wwid)); + if(pcie_device->enclosure_handle != 0) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter: " + "enclosure logical id(0x%016llx), slot(%d) \n", + ioc->name, __func__, + (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if(pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter: " + "enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, + pcie_device->enclosure_level, pcie_device->connector_name)); if (pcie_device->starget && pcie_device->starget->hostdata) { sas_target_priv_data = pcie_device->starget->hostdata; sas_target_priv_data->deleted = 1; - _scsih_ublock_io_device(ioc, pcie_device->wwid); + _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; } - ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", - pcie_device->handle, (u64)pcie_device->wwid); - if (pcie_device->enclosure_handle != 0) - ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot); - if (pcie_device->connector_name[0] != '\0') - ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", - pcie_device->enclosure_level, - pcie_device->connector_name); + printk(MPT3SAS_INFO_FMT "removing handle(0x%04x), wwid" + "(0x%016llx)\n", ioc->name, pcie_device->handle, + (unsigned long long) pcie_device->wwid); + if(pcie_device->enclosure_handle != 0) + printk(MPT3SAS_INFO_FMT "removing :" + "enclosure logical id(0x%016llx), slot(%d) \n", + ioc->name, (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot); + if(pcie_device->connector_name[0] != '\0') + printk(MPT3SAS_INFO_FMT "removing :" + "enclosure level(0x%04x), connector name( %s)\n", + ioc->name, pcie_device->enclosure_level, + pcie_device->connector_name); - if (pcie_device->starget && (pcie_device->access_status != - MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) + if (pcie_device->starget && + (pcie_device->access_status != MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) scsi_remove_target(&pcie_device->starget->dev); - dewtprintk(ioc, - ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", - __func__, - pcie_device->handle, (u64)pcie_device->wwid)); - if (pcie_device->enclosure_handle != 0) - dewtprintk(ioc, - ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", - __func__, - (u64)pcie_device->enclosure_logical_id, - pcie_device->slot)); - if (pcie_device->connector_name[0] != '\0') - dewtprintk(ioc, - ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", - __func__, - pcie_device->enclosure_level, - pcie_device->connector_name)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit: " + "handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, + pcie_device->handle, (unsigned long long) + pcie_device->wwid)); + if(pcie_device->enclosure_handle != 0) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit: " + "enclosure logical id(0x%016llx), slot(%d) \n", + ioc->name, __func__, + (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if(pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit: " + "enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, pcie_device->enclosure_level, + pcie_device->connector_name)); kfree(pcie_device->serial_number); } @@ -6716,6 +10642,8 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, * _scsih_pcie_check_device - checking device responsiveness * @ioc: per adapter object * @handle: attached device handle + * + * Returns nothing. */ static void _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -6729,9 +10657,12 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) struct scsi_target *starget; struct MPT3SAS_TARGET *sas_target_priv_data; u32 device_info; + u8 *serial_number = NULL; + u8 *original_serial_number = NULL; + int rc; - if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, - &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) + if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, &pcie_device_pg0, + MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) return; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; @@ -6756,9 +10687,8 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) starget = pcie_device->starget; sas_target_priv_data = starget->hostdata; pcie_device->access_status = pcie_device_pg0.AccessStatus; - starget_printk(KERN_INFO, starget, - "handle changed from(0x%04x) to (0x%04x)!!!\n", - pcie_device->handle, handle); + starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)" + " to (0x%04x)!!!\n", pcie_device->handle, handle); sas_target_priv_data->handle = handle; pcie_device->handle = handle; @@ -6766,8 +10696,9 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; - memcpy(&pcie_device->connector_name[0], - &pcie_device_pg0.ConnectorName[0], 4); + memcpy(pcie_device->connector_name, + pcie_device_pg0.ConnectorName, 4); + pcie_device->connector_name[4] = '\0'; } else { pcie_device->enclosure_level = 0; pcie_device->connector_name[0] = '\0'; @@ -6777,8 +10708,8 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) /* check if device is present */ if (!(le32_to_cpu(pcie_device_pg0.Flags) & MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { - ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", - handle); + printk(MPT3SAS_ERR_FMT "device is not present " + "handle(0x%04x), flags!!!\n", ioc->name, handle); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); pcie_device_put(pcie_device); return; @@ -6792,11 +10723,30 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) return; } + original_serial_number = pcie_device->serial_number; spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); pcie_device_put(pcie_device); + if (issue_scsi_cmd_to_bringup_drive) + _scsih_ublock_io_device_wait(ioc, wwid, NULL); + else + _scsih_ublock_io_device_to_running(ioc, wwid, NULL); - _scsih_ublock_io_device(ioc, wwid); - + /* check to see if serial number still the same, if not, delete + * and re-add new device + */ + if (!original_serial_number) + return; + + if (_scsih_inquiry_vpd_sn(ioc, handle, &serial_number) == DEVICE_READY + && serial_number) { + rc = strcmp(original_serial_number, serial_number); + kfree(serial_number); + if (!rc) { + return; + } + _scsih_pcie_device_remove_by_wwid(ioc, wwid); + _scsih_pcie_add_device(ioc, handle, 0); + } return; } @@ -6804,33 +10754,42 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) * _scsih_pcie_add_device - creating pcie device object * @ioc: per adapter object * @handle: pcie device handle + * @retry_count: number of times this event has been retried * * Creating end device object, stored in ioc->pcie_device_list. * - * Return: 1 means queue the event later, 0 means complete the event + * Return 1 means queue the event later, 0 means complete the event */ static int -_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count) { Mpi26PCIeDevicePage0_t pcie_device_pg0; Mpi26PCIeDevicePage2_t pcie_device_pg2; Mpi2ConfigReply_t mpi_reply; struct _pcie_device *pcie_device; struct _enclosure_node *enclosure_dev; + u32 pcie_device_type; u32 ioc_status; u64 wwid; + enum device_responsive_state rc; + u8 connector_name[5]; + u8 tr_timeout = 30; + u8 tr_method = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + u8 protection_mask; +#endif if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 0; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 0; } @@ -6840,8 +10799,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) /* check if device is present */ if (!(le32_to_cpu(pcie_device_pg0.Flags) & MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { - ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", - handle); + printk(MPT3SAS_ERR_FMT "device is not present " + "handle(0x04%x)!!!\n", ioc->name, handle); return 0; } @@ -6850,8 +10809,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) pcie_device_pg0.AccessStatus)) return 0; - if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu - (pcie_device_pg0.DeviceInfo)))) + if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu(pcie_device_pg0.DeviceInfo)))) return 0; pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); @@ -6867,29 +10825,74 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) */ if (!(mpt3sas_scsih_is_pcie_scsi_device( le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { - if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, - &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, - handle)) { - ioc_err(ioc, - "failure at %s:%d/%s()!\n", __FILE__, - __LINE__, __func__); + if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, &pcie_device_pg2, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__); return 0; } - ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & - MPI2_IOCSTATUS_MASK; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, - "failure at %s:%d/%s()!\n", __FILE__, - __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 0; } + + if (!ioc->tm_custom_handling) { + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + if (pcie_device_pg2.ControllerResetTO) + tr_timeout = pcie_device_pg2.ControllerResetTO; + + } + } + + /* + * Wait for device that is becoming ready + * queue request later if device is busy. + */ + if ((!ioc->wait_for_discovery_to_complete) && + (issue_scsi_cmd_to_bringup_drive) && + (pcie_device_pg0.AccessStatus != + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) { + printk(MPT3SAS_INFO_FMT "detecting: handle(0x%04x), " + "wwid(0x%016llx), port(%d)\n", ioc->name, handle, + (unsigned long long)wwid, pcie_device_pg0.PortNum); + + rc = _scsih_wait_for_target_to_become_ready(ioc, handle, + retry_count, 0, tr_timeout, tr_method); + if (rc != DEVICE_READY) { + if (le16_to_cpu(pcie_device_pg0.EnclosureHandle) != 0) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "device not ready: slot(%d) \n", ioc->name, + __func__, + le16_to_cpu(pcie_device_pg0.Slot))); + + if (le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { + memcpy(connector_name, + pcie_device_pg0.ConnectorName, 4); + connector_name[4] = '\0'; + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "device not ready: enclosure " + "level(0x%04x), connector name( %s)\n", + ioc->name, __func__, + pcie_device_pg0.EnclosureLevel, + connector_name)); + } + + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT ||rc == DEVICE_RETRY_UA) + return 1; + else if (rc == DEVICE_ERROR) + return 0; + } } pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); if (!pcie_device) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 0; } @@ -6903,6 +10906,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) pcie_device->port_num = pcie_device_pg0.PortNum; pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + pcie_device_type = pcie_device->device_info & + MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE; pcie_device->enclosure_handle = le16_to_cpu(pcie_device_pg0.EnclosureHandle); @@ -6912,8 +10917,9 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) if (le32_to_cpu(pcie_device_pg0.Flags) & MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; - memcpy(&pcie_device->connector_name[0], - &pcie_device_pg0.ConnectorName[0], 4); + memcpy(pcie_device->connector_name, + pcie_device_pg0.ConnectorName, 4); + pcie_device->connector_name[4] = '\0'; } else { pcie_device->enclosure_level = 0; pcie_device->connector_name[0] = '\0'; @@ -6928,19 +10934,38 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) pcie_device->enclosure_logical_id = le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); } - /* TODO -- Add device name once FW supports it */ + /*TODO -- Add device name once FW supports it*/ +#if 0 + /* get device name */ + pcie_device->device_name = le64_to_cpu(pcie_device_pg0.DeviceName); +#endif if (!(mpt3sas_scsih_is_pcie_scsi_device( - le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { pcie_device->nvme_mdts = - le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); + le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); if (pcie_device_pg2.ControllerResetTO) pcie_device->reset_timeout = - pcie_device_pg2.ControllerResetTO; + pcie_device_pg2.ControllerResetTO; else pcie_device->reset_timeout = 30; - } else + } + else pcie_device->reset_timeout = 30; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (!ioc->disable_eedp_support) { + /* Disable DIX0 protection capability */ + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, protection_mask & 0x77); + printk(MPT3SAS_INFO_FMT ": Disabling DIX0 prot capability because HBA does" + "not support DIX0 operation on NVME drives\n",ioc->name); + } + } + } +#endif + if (ioc->wait_for_discovery_to_complete) _scsih_pcie_device_init_add(ioc, pcie_device); else @@ -6986,15 +11011,15 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, status_str = "unknown status"; break; } - ioc_info(ioc, "pcie topology change: (%s)\n", status_str); - pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" + printk(MPT3SAS_INFO_FMT "pcie topology change: (%s)\n", + ioc->name, status_str); + printk(KERN_INFO "\tswitch_handle(0x%04x), enclosure_handle(0x%04x) " "start_port(%02d), count(%d)\n", le16_to_cpu(event_data->SwitchDevHandle), le16_to_cpu(event_data->EnclosureHandle), event_data->StartPortNum, event_data->NumEntries); for (i = 0; i < event_data->NumEntries; i++) { - handle = - le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + handle = le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); if (!handle) continue; port_number = event_data->StartPortNum + i; @@ -7023,7 +11048,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; - pr_info("\tport(%02d), attached_handle(0x%04x): %s:" + printk(KERN_INFO "\tport(%02d), attached_handle(0x%04x): %s:" " link rate: new(0x%02x), old(0x%02x)\n", port_number, handle, status_str, link_rate, prev_link_rate); } @@ -7037,7 +11062,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, * Context: user. * */ -static void +static int _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { @@ -7047,34 +11072,33 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, u8 link_rate, prev_link_rate; unsigned long flags; int rc; - Mpi26EventDataPCIeTopologyChangeList_t *event_data = - (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; + int requeue_event; + Mpi26EventDataPCIeTopologyChangeList_t *event_data = fw_event->event_data; struct _pcie_device *pcie_device; if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_pcie_topology_change_event_debug(ioc, event_data); - if (ioc->shost_recovery || ioc->remove_host || - ioc->pci_error_recovery) - return; + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; if (fw_event->ignore) { - dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); - return; + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "ignoring switch event\n", + ioc->name)); + return 0; } /* handle siblings events */ - for (i = 0; i < event_data->NumEntries; i++) { + for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) { if (fw_event->ignore) { - dewtprintk(ioc, - ioc_info(ioc, "ignoring switch event\n")); - return; + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "ignoring switch event\n", + ioc->name)); + return 0; } if (ioc->remove_host || ioc->pci_error_recovery) - return; + return 0; reason_code = event_data->PortEntry[i].PortStatus; - handle = - le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + handle = le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); if (!handle) continue; @@ -7112,42 +11136,46 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, if (!test_bit(handle, ioc->pend_os_device_add)) break; - dewtprintk(ioc, - ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", - handle)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "handle(0x%04x) device not found: convert " + "event to a device add\n", ioc->name, handle)); event_data->PortEntry[i].PortStatus &= 0xF0; event_data->PortEntry[i].PortStatus |= MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; - /* fall through */ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: if (ioc->shost_recovery) break; if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) break; - rc = _scsih_pcie_add_device(ioc, handle); - if (!rc) { - /* mark entry vacant */ - /* TODO This needs to be reviewed and fixed, - * we dont have an entry - * to make an event void like vacant - */ + rc = _scsih_pcie_add_device(ioc, handle, fw_event->retries[i]); + if (rc) {/* retry due to busy device */ + fw_event->retries[i]++; + requeue_event = 1; + } else {/* mark entry vacant */ + /* TODO This needs to be reviewed and fixed, we dont have an entry + * to make an event void like vacant + */ event_data->PortEntry[i].PortStatus |= MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; } break; - case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: _scsih_pcie_device_remove_by_handle(ioc, handle); break; } } + return requeue_event; } + /** - * _scsih_pcie_device_status_change_event_debug - debug for device event - * @ioc: ? + * _scsih_pcie_device_status_change_event_debug - debug for + * device event * @event_data: event data payload * Context: user. + * + * Return nothing. */ static void _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, @@ -7197,15 +11225,15 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, break; } - ioc_info(ioc, "PCIE device status change: (%s)\n" - "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", - reason_str, le16_to_cpu(event_data->DevHandle), - (u64)le64_to_cpu(event_data->WWID), - le16_to_cpu(event_data->TaskTag)); + printk(MPT3SAS_INFO_FMT "PCIE device status change: (%s)\n" + "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->WWID), + le16_to_cpu(event_data->TaskTag)); if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) - pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", + printk(MPT3SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, event_data->ASC, event_data->ASCQ); - pr_cont("\n"); + printk(KERN_INFO "\n"); } /** @@ -7214,6 +11242,8 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, @@ -7223,8 +11253,9 @@ _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, struct _pcie_device *pcie_device; u64 wwid; unsigned long flags; - Mpi26EventDataPCIeDeviceStatusChange_t *event_data = - (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; + Mpi26EventDataPCIeDeviceStatusChange_t *event_data = fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_pcie_device_status_change_event_debug(ioc, event_data); @@ -7239,12 +11270,14 @@ _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, wwid = le64_to_cpu(event_data->WWID); pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); - if (!pcie_device || !pcie_device->starget) + if (!pcie_device || !pcie_device->starget) { goto out; + } target_priv_data = pcie_device->starget->hostdata; - if (!target_priv_data) + if (!target_priv_data) { goto out; + } if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) @@ -7258,12 +11291,15 @@ out: spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); } + /** * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure * event * @ioc: per adapter object * @event_data: event data payload * Context: user. + * + * Return nothing. */ static void _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, @@ -7283,12 +11319,12 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, break; } - ioc_info(ioc, "enclosure status change: (%s)\n" - "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", - reason_str, - le16_to_cpu(event_data->EnclosureHandle), - (u64)le64_to_cpu(event_data->EnclosureLogicalID), - le16_to_cpu(event_data->StartSlot)); + printk(MPT3SAS_INFO_FMT "enclosure status change: (%s)\n" + "\thandle(0x%04x), enclosure logical id(0x%016llx)" + " number slots(%d)\n", ioc->name, reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); } /** @@ -7296,6 +11332,8 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, @@ -7303,40 +11341,39 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, { Mpi2ConfigReply_t mpi_reply; struct _enclosure_node *enclosure_dev = NULL; - Mpi2EventDataSasEnclDevStatusChange_t *event_data = - (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; + Mpi2EventDataSasEnclDevStatusChange_t *event_data = fw_event->event_data; int rc; - u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_enclosure_dev_status_change_event_debug(ioc, - (Mpi2EventDataSasEnclDevStatusChange_t *) - fw_event->event_data); + fw_event->event_data); + if (ioc->shost_recovery) return; - if (enclosure_handle) + event_data->EnclosureHandle = le16_to_cpu(event_data->EnclosureHandle); + + if (event_data->EnclosureHandle) enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, - enclosure_handle); + event_data->EnclosureHandle); switch (event_data->ReasonCode) { case MPI2_EVENT_SAS_ENCL_RC_ADDED: if (!enclosure_dev) { enclosure_dev = - kzalloc(sizeof(struct _enclosure_node), - GFP_KERNEL); + kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); if (!enclosure_dev) { - ioc_info(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); return; } rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, - &enclosure_dev->pg0, - MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, - enclosure_handle); + &enclosure_dev->pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + event_data->EnclosureHandle); if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & - MPI2_IOCSTATUS_MASK)) { + MPI2_IOCSTATUS_MASK)) { kfree(enclosure_dev); return; } @@ -7361,6 +11398,8 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, @@ -7368,27 +11407,26 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, { struct scsi_cmnd *scmd; struct scsi_device *sdev; - struct scsiio_tracker *st; u16 smid, handle; u32 lun; struct MPT3SAS_DEVICE *sas_device_priv_data; u32 termination_count; u32 query_count; Mpi2SCSITaskManagementReply_t *mpi_reply; - Mpi2EventDataSasBroadcastPrimitive_t *event_data = - (Mpi2EventDataSasBroadcastPrimitive_t *) - fw_event->event_data; + Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; u16 ioc_status; unsigned long flags; int r; u8 max_retries = 0; u8 task_abort_retries; + struct scsiio_tracker *st; mutex_lock(&ioc->tm_cmds.mutex); - ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", - __func__, event_data->PhyNum, event_data->PortWidth); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter: phy number(%d), " + "width(%d)\n", ioc->name, __func__, event_data->PhyNum, + event_data->PortWidth)); - _scsih_block_io_all_device(ioc); + _scsih_block_io_all_device(ioc); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); mpi_reply = ioc->tm_cmds.reply; @@ -7396,22 +11434,25 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, /* sanity checks for retrying this loop */ if (max_retries++ == 5) { - dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: giving up\n", + ioc->name, __func__)); goto out; } else if (max_retries > 1) - dewtprintk(ioc, - ioc_info(ioc, "%s: %d retry\n", - __func__, max_retries - 1)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: %d retry\n", + ioc->name, __func__, max_retries - 1)); termination_count = 0; query_count = 0; - for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { if (ioc->shost_recovery) goto out; + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); if (!scmd) continue; - st = scsi_cmd_priv(scmd); + st = mpt3sas_base_scsi_cmd_priv(scmd); + if (!st || st->smid == 0) + continue; sdev = scmd->device; sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) @@ -7428,7 +11469,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) continue; - + handle = sas_device_priv_data->sas_target->handle; lun = sas_device_priv_data->lun; query_count++; @@ -7437,9 +11478,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, goto out; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - r = mpt3sas_scsih_issue_tm(ioc, handle, lun, - MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, - st->msix_io, 30, 0); + r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 30, 0); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt3sas_scsih_issue_tm: FAILED when sending " @@ -7450,9 +11490,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - sdev_printk(KERN_WARNING, sdev, - "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", - ioc_status, scmd); + sdev_printk(KERN_WARNING, sdev, "query task: FAILED " + "with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status, + scmd); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } @@ -7468,9 +11508,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, task_abort_retries = 0; tm_retry: if (task_abort_retries++ == 60) { - dewtprintk(ioc, - ioc_info(ioc, "%s: ABORT_TASK: giving up\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s: ABORT_TASK: giving up\n", ioc->name, + __func__)); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } @@ -7478,10 +11518,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, if (ioc->shost_recovery) goto out_no_lock; - r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid, - st->msix_io, 30, 0); - if (r == FAILED || st->cb_idx != 0xFF) { + r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, + sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid, 30, + 0); + if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " "scmd(%p)\n", scmd); @@ -7499,10 +11539,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, } if (ioc->broadcast_aen_pending) { - dewtprintk(ioc, - ioc_info(ioc, - "%s: loop back due to pending AEN\n", - __func__)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: loop back due to" + " pending AEN\n", ioc->name, __func__)); ioc->broadcast_aen_pending = 0; goto broadcast_aen_retry; } @@ -7511,13 +11549,13 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); out_no_lock: - dewtprintk(ioc, - ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", - __func__, query_count, termination_count)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s - exit, query_count = %d termination_count = %d\n", + ioc->name, __func__, query_count, termination_count)); ioc->broadcast_aen_busy = 0; if (!ioc->shost_recovery) - _scsih_ublock_io_all_device(ioc); + _scsih_ublock_io_all_device(ioc, 1); mutex_unlock(&ioc->tm_cmds.mutex); } @@ -7526,22 +11564,23 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { - Mpi2EventDataSasDiscovery_t *event_data = - (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; + Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data; if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { - ioc_info(ioc, "discovery event: (%s)", - event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? - "start" : "stop"); - if (event_data->DiscoveryStatus) - pr_cont("discovery_status(0x%08x)", - le32_to_cpu(event_data->DiscoveryStatus)); - pr_cont("\n"); + printk(MPT3SAS_INFO_FMT "sas discovery event: (%s)", ioc->name, + (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + printk("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + printk("\n"); } if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && @@ -7556,31 +11595,37 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, } /** - * _scsih_sas_device_discovery_error_event - display SAS device discovery error - * events + * _scsih_sas_device_discovery_error_event - display SAS device discovery error events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { - Mpi25EventDataSasDeviceDiscoveryError_t *event_data = - (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; + Mpi25EventDataSasDeviceDiscoveryError_t *event_data = fw_event->event_data; switch (event_data->ReasonCode) { + case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: - ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", - le16_to_cpu(event_data->DevHandle), - (u64)le64_to_cpu(event_data->SASAddress), - event_data->PhysicalPort); + printk(MPT3SAS_WARN_FMT "SMP command sent to the expander" \ + "(handle:0x%04x, sas_address:0x%016llx," \ + "physical_port:0x%02x) has failed\n", \ + ioc->name, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); break; + case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: - ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", - le16_to_cpu(event_data->DevHandle), - (u64)le64_to_cpu(event_data->SASAddress), - event_data->PhysicalPort); + printk(MPT3SAS_WARN_FMT "SMP command sent to the expander" \ + "(handle:0x%04x, sas_address:0x%016llx," \ + "physical_port:0x%02x) has timed out\n", \ + ioc->name, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); break; default: break; @@ -7592,25 +11637,25 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { - Mpi26EventDataPCIeEnumeration_t *event_data = - (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; + Mpi26EventDataPCIeEnumeration_t *event_data = fw_event->event_data; - if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) - return; - - ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", - (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? - "started" : "completed", - event_data->Flags); + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { + printk(MPT3SAS_INFO_FMT "pcie enumeration event: (%s) Flag 0x%02x", + ioc->name, + ((event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? + "started" : "completed"), event_data->Flags); if (event_data->EnumerationStatus) - pr_cont("enumeration_status(0x%08x)", - le32_to_cpu(event_data->EnumerationStatus)); - pr_cont("\n"); + printk("enumeration_status(0x%08x)", + le32_to_cpu(event_data->EnumerationStatus)); + printk("\n"); + } } /** @@ -7619,7 +11664,7 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, * @handle: device handle for physical disk * @phys_disk_num: physical disk number * - * Return: 0 for success, else failure. + * Return 0 for success, else failure. */ static int _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) @@ -7632,13 +11677,11 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) u16 ioc_status; u32 log_info; - if (ioc->hba_mpi_version_belonged == MPI2_VERSION) - return rc; - mutex_lock(&ioc->scsih_cmds.mutex); if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } @@ -7646,7 +11689,8 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; rc = -EAGAIN; goto out; @@ -7660,16 +11704,15 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; mpi_request->PhysDiskNum = phys_disk_num; - dewtprintk(ioc, - ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", - handle, phys_disk_num)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "IR RAID_ACTION: turning fast " + "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name, + handle, phys_disk_num)); init_completion(&ioc->scsih_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); - if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, ioc->scsih_cmds.status, mpi_request, sizeof(Mpi2RaidActionRequest_t)/4); @@ -7687,13 +11730,15 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) log_info = 0; ioc_status &= MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - dewtprintk(ioc, - ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", - ioc_status, log_info)); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "IR RAID_ACTION: failed: ioc_status(0x%04x), " + "loginfo(0x%08x)!!!\n", ioc->name, ioc_status, + log_info)); rc = -EFAULT; } else - dewtprintk(ioc, - ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT + "IR RAID_ACTION: completed successfully\n", + ioc->name)); } out: @@ -7714,10 +11759,17 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) static void _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)) + int rc; +#endif sdev->no_uld_attach = no_uld_attach ? 1 : 0; sdev_printk(KERN_INFO, sdev, "%s raid component\n", sdev->no_uld_attach ? "hiding" : "exposing"); - WARN_ON(scsi_device_reprobe(sdev)); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)) + rc = scsi_device_reprobe(sdev); +#else + scsi_device_reprobe(sdev); +#endif } /** @@ -7725,6 +11777,8 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) * @ioc: per adapter object * @element: IR config element data * Context: user. + * + * Return nothing. */ static void _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, @@ -7738,8 +11792,9 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); if (!wwid) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); return; } @@ -7752,8 +11807,9 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); if (!raid_device) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); return; } @@ -7769,7 +11825,7 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, _scsih_raid_device_remove(ioc, raid_device); } else { spin_lock_irqsave(&ioc->raid_device_lock, flags); - _scsih_determine_boot_device(ioc, raid_device, 1); + _scsih_determine_boot_device(ioc, raid_device, RAID_CHANNEL); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } } @@ -7779,6 +11835,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @handle: volume device handle * Context: user. + * + * Return nothing. */ static void _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -7787,7 +11845,9 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) unsigned long flags; struct MPT3SAS_TARGET *sas_target_priv_data; struct scsi_target *starget = NULL; - +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + u8 protection_mask; +#endif spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); if (raid_device) { @@ -7796,14 +11856,31 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) sas_target_priv_data = starget->hostdata; sas_target_priv_data->deleted = 1; } - ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", - raid_device->handle, (u64)raid_device->wwid); + printk(MPT3SAS_INFO_FMT "removing handle(0x%04x), wwid" + "(0x%016llx)\n", ioc->name, raid_device->handle, + (unsigned long long) raid_device->wwid); list_del(&raid_device->list); kfree(raid_device); } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (starget) scsi_remove_target(&starget->dev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if ((!ioc->disable_eedp_support) && (prot_mask & SHOST_DIX_TYPE0_PROTECTION) + && (ioc->hba_mpi_version_belonged != MPI2_VERSION)){ + /* are there any volumes ? */ + if (list_empty(&ioc->raid_device_list)){ + /* Enabling DIX0 protection capability */ + protection_mask = scsi_host_get_prot(ioc->shost); + if (!(protection_mask & SHOST_DIX_TYPE0_PROTECTION)) { + scsi_host_set_prot(ioc->shost, protection_mask | 8); + printk(MPT3SAS_INFO_FMT ": Enabling DIX0 prot capability \n", + ioc->name); + } + } + } +#endif } /** @@ -7811,6 +11888,8 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) * @ioc: per adapter object * @element: IR config element data * Context: user. + * + * Return nothing. */ static void _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, @@ -7833,6 +11912,8 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, sas_target_priv_data = starget->hostdata; sas_target_priv_data->flags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT; + sas_device->pfa_led_on = 0; + sas_device_put(sas_device); } } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); @@ -7842,8 +11923,6 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, /* exposing raid component */ if (starget) starget_for_each_device(starget, NULL, _scsih_reprobe_lun); - - sas_device_put(sas_device); } /** @@ -7851,6 +11930,8 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @element: IR config element data * Context: user. + * + * Return nothing. */ static void _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, @@ -7880,19 +11961,19 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, MPT_TARGET_FLAGS_RAID_COMPONENT; sas_device->volume_handle = volume_handle; sas_device->volume_wwid = volume_wwid; + sas_device_put(sas_device); } } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) return; - /* hiding raid component */ - _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + if(ioc->hba_mpi_version_belonged != MPI2_VERSION) + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); if (starget) starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); - sas_device_put(sas_device); } /** @@ -7900,6 +11981,8 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @element: IR config element data * Context: user. + * + * Return nothing. */ static void _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, @@ -7915,6 +11998,8 @@ _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @element: IR config element data * Context: user. + * + * Return nothing. */ static void _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, @@ -7929,35 +12014,36 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, u16 parent_handle; set_bit(handle, ioc->pd_handles); - sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); if (sas_device) { - _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + if(ioc->hba_mpi_version_belonged != MPI2_VERSION) + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); sas_device_put(sas_device); return; } if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) mpt3sas_transport_update_links(ioc, sas_address, handle, - sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); - - _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort)); + if(ioc->hba_mpi_version_belonged != MPI2_VERSION) + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); _scsih_add_device(ioc, handle, 0, 1); } @@ -7966,6 +12052,8 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @event_data: event data payload * Context: user. + * + * Return nothing. */ static void _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, @@ -7978,10 +12066,10 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; - ioc_info(ioc, "raid config change: (%s), elements(%d)\n", - le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? - "foreign" : "native", - event_data->NumElements); + printk(MPT3SAS_INFO_FMT "raid config change: (%s), elements(%d)\n", + ioc->name, (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? + "foreign" : "native", event_data->NumElements); for (i = 0; i < event_data->NumElements; i++, element++) { switch (element->ReasonCode) { case MPI2_EVENT_IR_CHANGE_RC_ADDED: @@ -8031,7 +12119,7 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, element_str = "unknown element"; break; } - pr_info("\t(%s:%s), vol handle(0x%04x), " \ + printk(KERN_INFO "\t(%s:%s), vol handle(0x%04x), " "pd handle(0x%04x), pd num(0x%02x)\n", element_str, reason_str, le16_to_cpu(element->VolDevHandle), le16_to_cpu(element->PhysDiskDevHandle), @@ -8044,6 +12132,8 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, @@ -8052,12 +12142,10 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element; int i; u8 foreign_config; - Mpi2EventDataIrConfigChangeList_t *event_data = - (Mpi2EventDataIrConfigChangeList_t *) - fw_event->event_data; + Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; - if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && - (!ioc->hide_ir_msg)) + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) _scsih_sas_ir_config_change_event_debug(ioc, event_data); foreign_config = (le32_to_cpu(event_data->Flags) & @@ -8065,16 +12153,13 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; if (ioc->shost_recovery && - ioc->hba_mpi_version_belonged != MPI2_VERSION) { + ioc->hba_mpi_version_belonged != MPI2_VERSION) { for (i = 0; i < event_data->NumElements; i++, element++) { - if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) - _scsih_ir_fastpath(ioc, - le16_to_cpu(element->PhysDiskDevHandle), - element->PhysDiskNum); + if(element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) + _scsih_ir_fastpath(ioc, le16_to_cpu(element->PhysDiskDevHandle), element->PhysDiskNum); } return; } - for (i = 0; i < event_data->NumElements; i++, element++) { switch (element->ReasonCode) { @@ -8114,6 +12199,8 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, @@ -8125,8 +12212,7 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, u16 handle; u32 state; int rc; - Mpi2EventDataIrVolume_t *event_data = - (Mpi2EventDataIrVolume_t *) fw_event->event_data; + Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; if (ioc->shost_recovery) return; @@ -8136,12 +12222,10 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, handle = le16_to_cpu(event_data->VolDevHandle); state = le32_to_cpu(event_data->NewValue); - if (!ioc->hide_ir_msg) - dewtprintk(ioc, - ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", - __func__, handle, - le32_to_cpu(event_data->PreviousValue), - state)); + if (!ioc->warpdrive_msg) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: handle(0x%04x), " + "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_VOL_STATE_MISSING: case MPI2_RAID_VOL_STATE_FAILED: @@ -8161,15 +12245,17 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); if (!wwid) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); break; } raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); if (!raid_device) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); break; } @@ -8195,6 +12281,8 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, @@ -8206,8 +12294,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; - Mpi2EventDataIrPhysicalDisk_t *event_data = - (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; + Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; u64 sas_address; if (ioc->shost_recovery) @@ -8219,13 +12306,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, handle = le16_to_cpu(event_data->PhysDiskDevHandle); state = le32_to_cpu(event_data->NewValue); - if (!ioc->hide_ir_msg) - dewtprintk(ioc, - ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", - __func__, handle, - le32_to_cpu(event_data->PreviousValue), - state)); - + if (!ioc->warpdrive_msg) + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: handle(0x%04x), " + "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_PD_STATE_ONLINE: case MPI2_RAID_PD_STATE_DEGRADED: @@ -8237,31 +12321,33 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, set_bit(handle, ioc->pd_handles); sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { sas_device_put(sas_device); return; } - if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) - mpt3sas_transport_update_links(ioc, sas_address, handle, - sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + mpt3sas_transport_update_links(ioc, sas_address, + handle, sas_device_pg0.PhyNum, + MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort)); _scsih_add_device(ioc, handle, 0, 1); @@ -8280,6 +12366,8 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @event_data: event data payload * Context: user. + * + * Return nothing. */ static void _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, @@ -8308,10 +12396,11 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, if (!reason_str) return; - ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", - reason_str, - le16_to_cpu(event_data->VolDevHandle), - event_data->PercentComplete); + printk(MPT3SAS_INFO_FMT "raid operational status: (%s)" + "\thandle(0x%04x), percent complete(%d)\n", + ioc->name, reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); } /** @@ -8319,20 +12408,20 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { - Mpi2EventDataIrOperationStatus_t *event_data = - (Mpi2EventDataIrOperationStatus_t *) - fw_event->event_data; + Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; static struct _raid_device *raid_device; unsigned long flags; u16 handle; - if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && - (!ioc->hide_ir_msg)) + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) _scsih_sas_ir_operation_status_event_debug(ioc, event_data); @@ -8376,6 +12465,8 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) * * After host reset, find out whether devices are still responding. * Used in _scsih_remove_unresponsive_sas_devices. + * + * Return nothing. */ static void _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, @@ -8383,23 +12474,29 @@ Mpi2SasDevicePage0_t *sas_device_pg0) { struct MPT3SAS_TARGET *sas_target_priv_data = NULL; struct scsi_target *starget; - struct _sas_device *sas_device = NULL; + struct _sas_device *sas_device; struct _enclosure_node *enclosure_dev = NULL; unsigned long flags; + struct hba_port *port; + + port = mpt3sas_get_port_by_id(ioc, sas_device_pg0->PhysicalPort); if (sas_device_pg0->EnclosureHandle) { enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, le16_to_cpu(sas_device_pg0->EnclosureHandle)); - if (enclosure_dev == NULL) - ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", - sas_device_pg0->EnclosureHandle); + if(enclosure_dev == NULL) + printk(MPT3SAS_INFO_FMT "Enclosure handle(0x%04x)" + "doesn't match with enclosure device!\n", + ioc->name, sas_device_pg0->EnclosureHandle); } + spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if ((sas_device->sas_address == le64_to_cpu( sas_device_pg0->SASAddress)) && (sas_device->slot == - le16_to_cpu(sas_device_pg0->Slot))) { + le16_to_cpu(sas_device_pg0->Slot)) && + (sas_device->port == port)) { sas_device->responding = 1; starget = sas_device->starget; if (starget && starget->hostdata) { @@ -8410,25 +12507,23 @@ Mpi2SasDevicePage0_t *sas_device_pg0) sas_target_priv_data = NULL; if (starget) { starget_printk(KERN_INFO, starget, - "handle(0x%04x), sas_addr(0x%016llx)\n", - le16_to_cpu(sas_device_pg0->DevHandle), - (unsigned long long) - sas_device->sas_address); - + "handle(0x%04x), sas_address(0x%016llx)," + " port: %d\n", sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->port->port_id); if (sas_device->enclosure_handle != 0) starget_printk(KERN_INFO, starget, - "enclosure logical id(0x%016llx)," - " slot(%d)\n", - (unsigned long long) - sas_device->enclosure_logical_id, - sas_device->slot); + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); } + if (le16_to_cpu(sas_device_pg0->Flags) & - MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { - sas_device->enclosure_level = - sas_device_pg0->EnclosureLevel; - memcpy(&sas_device->connector_name[0], - &sas_device_pg0->ConnectorName[0], 4); + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID && + (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { + sas_device->enclosure_level = sas_device_pg0->EnclosureLevel; + memcpy(sas_device->connector_name, sas_device_pg0->ConnectorName, 4); + sas_device->connector_name[4] = '\0'; } else { sas_device->enclosure_level = 0; sas_device->connector_name[0] = '\0'; @@ -8438,25 +12533,25 @@ Mpi2SasDevicePage0_t *sas_device_pg0) le16_to_cpu(sas_device_pg0->EnclosureHandle); sas_device->is_chassis_slot_valid = 0; if (enclosure_dev) { - sas_device->enclosure_logical_id = le64_to_cpu( - enclosure_dev->pg0.EnclosureLogicalID); + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); if (le16_to_cpu(enclosure_dev->pg0.Flags) & - MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { sas_device->is_chassis_slot_valid = 1; sas_device->chassis_slot = - enclosure_dev->pg0.ChassisSlot; + enclosure_dev->pg0.ChassisSlot; } } if (sas_device->handle == le16_to_cpu( sas_device_pg0->DevHandle)) goto out; - pr_info("\thandle changed from(0x%04x)!!!\n", + printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", sas_device->handle); sas_device->handle = le16_to_cpu( sas_device_pg0->DevHandle); if (sas_target_priv_data) - sas_target_priv_data->handle = + sas_target_priv_data->handle = le16_to_cpu(sas_device_pg0->DevHandle); goto out; } @@ -8469,6 +12564,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0) * _scsih_create_enclosure_list_after_reset - Free Existing list, * And create enclosure list by scanning all Enclosure Page(0)s * @ioc: per adapter object + * + * Return nothing. */ static void _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) @@ -8487,7 +12584,8 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) enclosure_dev = kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); if (!enclosure_dev) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } @@ -8497,7 +12595,7 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) enclosure_handle); if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & - MPI2_IOCSTATUS_MASK)) { + MPI2_IOCSTATUS_MASK)) { kfree(enclosure_dev); return; } @@ -8505,7 +12603,7 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) &ioc->enclosure_list); enclosure_handle = le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); - } while (1); + } while(1); } /** @@ -8514,6 +12612,8 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) * * After host reset, find out whether devices are still responding. * If not remove. + * + * Return nothing. */ static void _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) @@ -8524,7 +12624,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) u16 handle; u32 device_info; - ioc_info(ioc, "search for end-devices: start\n"); + printk(MPT3SAS_INFO_FMT "search for end-devices: start\n", ioc->name); if (list_empty(&ioc->sas_device_list)) goto out; @@ -8535,17 +12635,23 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_INFO_FMT "\tbreak from %s: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; + } handle = le16_to_cpu(sas_device_pg0.DevHandle); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); - if (!(_scsih_is_end_device(device_info))) + if (!(_scsih_is_sas_end_device(device_info))) continue; _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); } out: - ioc_info(ioc, "search for end-devices: complete\n"); + printk(MPT3SAS_INFO_FMT "search for end-devices: complete\n", + ioc->name); } /** @@ -8555,10 +12661,12 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) * * After host reset, find out whether devices are still responding. * Used in _scsih_remove_unresponding_devices. + * + * Return nothing. */ static void _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, - Mpi26PCIeDevicePage0_t *pcie_device_pg0) + Mpi26PCIeDevicePage0_t *pcie_device_pg0) { struct MPT3SAS_TARGET *sas_target_priv_data = NULL; struct scsi_target *starget; @@ -8570,8 +12678,7 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) && (pcie_device->slot == le16_to_cpu( pcie_device_pg0->Slot))) { - pcie_device->access_status = - pcie_device_pg0->AccessStatus; + pcie_device->access_status = pcie_device_pg0->AccessStatus; pcie_device->responding = 1; starget = pcie_device->starget; if (starget && starget->hostdata) { @@ -8594,13 +12701,14 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, pcie_device->slot); } - if (((le32_to_cpu(pcie_device_pg0->Flags)) & + if ((le32_to_cpu(pcie_device_pg0->Flags) & MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { pcie_device->enclosure_level = pcie_device_pg0->EnclosureLevel; - memcpy(&pcie_device->connector_name[0], - &pcie_device_pg0->ConnectorName[0], 4); + memcpy(pcie_device->connector_name, + pcie_device_pg0->ConnectorName, 4); + pcie_device->connector_name[4] = '\0'; } else { pcie_device->enclosure_level = 0; pcie_device->connector_name[0] = '\0'; @@ -8609,7 +12717,7 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, if (pcie_device->handle == le16_to_cpu( pcie_device_pg0->DevHandle)) goto out; - pr_info("\thandle changed from(0x%04x)!!!\n", + printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", pcie_device->handle); pcie_device->handle = le16_to_cpu( pcie_device_pg0->DevHandle); @@ -8630,6 +12738,8 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, * * After host reset, find out whether devices are still responding. * If not remove. + * + * Return nothing. */ static void _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) @@ -8640,7 +12750,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) u16 handle; u32 device_info; - ioc_info(ioc, "search for end-devices: start\n"); + printk(MPT3SAS_INFO_FMT "search for end-devices: start\n", ioc->name); if (list_empty(&ioc->pcie_device_list)) goto out; @@ -8652,9 +12762,10 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", - __func__, ioc_status, - le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from %s: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, + __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(pcie_device_pg0.DevHandle); @@ -8664,7 +12775,8 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); } out: - ioc_info(ioc, "search for PCIe end-devices: complete\n"); + printk(MPT3SAS_INFO_FMT "search for PCIe end-devices: complete\n", + ioc->name); } /** @@ -8674,13 +12786,15 @@ out: * @handle: device handle * * After host reset, find out whether devices are still responding. - * Used in _scsih_remove_unresponsive_raid_devices. + * Used in _scsih_remove_unresponding_devices. + * + * Return nothing. */ static void _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, u16 handle) { - struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; unsigned long flags; @@ -8705,14 +12819,16 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, * across the host reset so re-initialize the * required data for Direct IO */ - mpt3sas_init_warpdrive_properties(ioc, raid_device); + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + mpt3sas_init_warpdrive_properties(ioc, raid_device); + spin_lock_irqsave(&ioc->raid_device_lock, flags); if (raid_device->handle == handle) { spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return; } - pr_info("\thandle changed from(0x%04x)!!!\n", + printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", raid_device->handle); raid_device->handle = handle; if (sas_target_priv_data) @@ -8730,6 +12846,8 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, * * After host reset, find out whether devices are still responding. * If not remove. + * + * Return nothing. */ static void _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) @@ -8745,7 +12863,8 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) if (!ioc->ir_firmware) return; - ioc_info(ioc, "search for raid volumes: start\n"); + printk(MPT3SAS_INFO_FMT "search for raid volumes: start\n", + ioc->name); if (list_empty(&ioc->raid_device_list)) goto out; @@ -8755,8 +12874,13 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_INFO_FMT "\tbreak from %s: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; + } handle = le16_to_cpu(volume_pg1.DevHandle); if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, @@ -8780,32 +12904,42 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_INFO_FMT "\tbreak from %s: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; + } phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); set_bit(handle, ioc->pd_handles); } } out: - ioc_info(ioc, "search for responding raid volumes: complete\n"); + printk(MPT3SAS_INFO_FMT "search for responding raid volumes: " + "complete\n", ioc->name); } /** * _scsih_mark_responding_expander - mark a expander as responding * @ioc: per adapter object - * @expander_pg0:SAS Expander Config Page0 + * @expander_pg0: SAS Expander Config Page0 * * After host reset, find out whether devices are still responding. - * Used in _scsih_remove_unresponsive_expanders. + * Used in _scsih_remove_unresponding_devices. + * + * Return nothing. */ static void _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, - Mpi2ExpanderPage0_t *expander_pg0) + Mpi2ExpanderPage0_t *expander_pg0) { - struct _sas_node *sas_expander = NULL; + struct _sas_node *sas_expander; unsigned long flags; int i; + u8 port_id = expander_pg0->PhysicalPort; + struct hba_port *port = mpt3sas_get_port_by_id(ioc, port_id); struct _enclosure_node *enclosure_dev = NULL; u16 handle = le16_to_cpu(expander_pg0->DevHandle); u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); @@ -8818,20 +12952,21 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->sas_node_lock, flags); list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { - if (sas_expander->sas_address != sas_address) + if (sas_expander->sas_address != sas_address || + (sas_expander->port != port)) continue; sas_expander->responding = 1; if (enclosure_dev) { sas_expander->enclosure_logical_id = - le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); sas_expander->enclosure_handle = - le16_to_cpu(expander_pg0->EnclosureHandle); + le16_to_cpu(expander_pg0->EnclosureHandle); } if (sas_expander->handle == handle) goto out; - pr_info("\texpander(0x%016llx): handle changed" \ + printk(KERN_INFO "\texpander(0x%016llx): handle changed" " from(0x%04x) to (0x%04x)!!!\n", (unsigned long long)sas_expander->sas_address, sas_expander->handle, handle); @@ -8850,6 +12985,8 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, * * After host reset, find out whether devices are still responding. * If not remove. + * + * Return nothing. */ static void _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) @@ -8859,8 +12996,9 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) u16 ioc_status; u64 sas_address; u16 handle; + u8 port; - ioc_info(ioc, "search for expanders: start\n"); + printk(MPT3SAS_INFO_FMT "search for expanders: start\n", ioc->name); if (list_empty(&ioc->sas_expander_list)) goto out; @@ -8871,24 +13009,34 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_INFO_FMT "\tbreak from %s: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; + } handle = le16_to_cpu(expander_pg0.DevHandle); sas_address = le64_to_cpu(expander_pg0.SASAddress); - pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n", - handle, - (unsigned long long)sas_address); + port = expander_pg0.PhysicalPort; + printk(KERN_INFO "\texpander present: handle(0x%04x), " + "sas_addr(0x%016llx), port:%d\n", handle, + (unsigned long long)sas_address, + ((ioc->multipath_on_hba)? + (port):(MULTIPATH_DISABLED_PORT_ID))); _scsih_mark_responding_expander(ioc, &expander_pg0); } out: - ioc_info(ioc, "search for expanders: complete\n"); + printk(MPT3SAS_INFO_FMT "search for expanders: complete\n", ioc->name); } /** * _scsih_remove_unresponding_devices - removing unresponding devices * @ioc: per adapter object + * + * Return nothing. */ static void _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) @@ -8901,17 +13049,21 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) unsigned long flags; LIST_HEAD(head); - ioc_info(ioc, "removing unresponding devices: start\n"); + + printk(MPT3SAS_INFO_FMT "removing unresponding devices: start\n", + ioc->name); /* removing unresponding end devices */ - ioc_info(ioc, "removing unresponding devices: end-devices\n"); + printk(MPT3SAS_INFO_FMT "removing unresponding devices: sas end-devices\n", + ioc->name); + /* * Iterate, pulling off devices marked as non-responding. We become the * owner for the reference the list had on any object we prune. */ spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry_safe(sas_device, sas_device_next, - &ioc->sas_device_list, list) { + &ioc->sas_device_list, list) { if (!sas_device->responding) list_move_tail(&sas_device->list, &head); else @@ -8928,7 +13080,8 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) sas_device_put(sas_device); } - ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); + printk(MPT3SAS_INFO_FMT "removing unresponding devices: pcie end-devices\n" + ,ioc->name); INIT_LIST_HEAD(&head); spin_lock_irqsave(&ioc->pcie_device_lock, flags); list_for_each_entry_safe(pcie_device, pcie_device_next, @@ -8948,7 +13101,8 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) /* removing unresponding volumes */ if (ioc->ir_firmware) { - ioc_info(ioc, "removing unresponding devices: volumes\n"); + printk(MPT3SAS_INFO_FMT "removing unresponding devices: " + "volumes\n", ioc->name); list_for_each_entry_safe(raid_device, raid_device_next, &ioc->raid_device_list, list) { if (!raid_device->responding) @@ -8960,7 +13114,8 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) } /* removing unresponding expanders */ - ioc_info(ioc, "removing unresponding devices: expanders\n"); + printk(MPT3SAS_INFO_FMT "removing unresponding devices: expanders\n", + ioc->name); spin_lock_irqsave(&ioc->sas_node_lock, flags); INIT_LIST_HEAD(&tmp_list); list_for_each_entry_safe(sas_expander, sas_expander_next, @@ -8976,10 +13131,11 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) _scsih_expander_node_remove(ioc, sas_expander); } - ioc_info(ioc, "removing unresponding devices: complete\n"); + printk(MPT3SAS_INFO_FMT "removing unresponding devices: complete\n", + ioc->name); /* unblock devices */ - _scsih_ublock_io_all_device(ioc); + _scsih_ublock_io_all_device(ioc, 0); } static void @@ -8993,20 +13149,24 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, for (i = 0 ; i < sas_expander->num_phys ; i++) { if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, &expander_pg1, i, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return; } - mpt3sas_transport_update_links(ioc, sas_expander->sas_address, + mpt3sas_transport_update_links(ioc, + sas_expander->sas_address, le16_to_cpu(expander_pg1.AttachedDevHandle), i, - expander_pg1.NegotiatedLinkRate >> 4); + expander_pg1.NegotiatedLinkRate >> 4, + sas_expander->port); } } /** * _scsih_scan_for_devices_after_reset - scan for devices after host reset * @ioc: per adapter object + * + * Return nothing. */ static void _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) @@ -9019,7 +13179,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2EventIrConfigElement_t element; Mpi2ConfigReply_t mpi_reply; - u8 phys_disk_num; + u8 phys_disk_num, port_id; u16 ioc_status; u16 handle, parent_handle; u64 sas_address; @@ -9030,11 +13190,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) u8 retry_count; unsigned long flags; - ioc_info(ioc, "scan devices: start\n"); + printk(MPT3SAS_INFO_FMT "scan devices: start\n", ioc->name); _scsih_sas_host_refresh(ioc); - ioc_info(ioc, "\tscan devices: expanders start\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: expanders start\n", ioc->name); /* expanders */ handle = 0xFFFF; @@ -9043,35 +13203,43 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from expander scan: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(expander_pg0.DevHandle); spin_lock_irqsave(&ioc->sas_node_lock, flags); - expander_device = mpt3sas_scsih_expander_find_by_sas_address( - ioc, le64_to_cpu(expander_pg0.SASAddress)); + port_id = expander_pg0.PhysicalPort; + expander_device = + mpt3sas_scsih_expander_find_by_sas_address( + ioc, le64_to_cpu(expander_pg0.SASAddress), + mpt3sas_get_port_by_id(ioc, port_id)); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (expander_device) _scsih_refresh_expander_links(ioc, expander_device, handle); else { - ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", - handle, - (u64)le64_to_cpu(expander_pg0.SASAddress)); + printk(MPT3SAS_INFO_FMT "\tBEFORE adding expander: " + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); _scsih_expander_add(ioc, handle); - ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", - handle, - (u64)le64_to_cpu(expander_pg0.SASAddress)); + printk(MPT3SAS_INFO_FMT "\tAFTER adding expander: " + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); } } - ioc_info(ioc, "\tscan devices: expanders complete\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: expanders complete\n", + ioc->name); if (!ioc->ir_firmware) goto skip_to_sas; - ioc_info(ioc, "\tscan devices: phys disk start\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: phys disk start\n", ioc->name); /* phys disk */ phys_disk_num = 0xFF; @@ -9081,8 +13249,10 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from phys disk scan: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; } phys_disk_num = pd_pg0.PhysDiskNum; @@ -9099,38 +13269,45 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from phys disk scan " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { - ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", - handle, - (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + printk(MPT3SAS_INFO_FMT "\tBEFORE adding phys disk: " + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + port_id = sas_device_pg0.PhysicalPort; mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, - MPI2_SAS_NEG_LINK_RATE_1_5); + MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, port_id)); set_bit(handle, ioc->pd_handles); retry_count = 0; /* This will retry adding the end device. * _scsih_add_device() will decide on retries and * return "1" when it should be retried */ - while (_scsih_add_device(ioc, handle, retry_count++, + while(_scsih_add_device(ioc, handle, retry_count++, 1)) { ssleep(1); } - ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", - handle, - (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + printk(MPT3SAS_INFO_FMT "\tAFTER adding phys disk: " + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); } } - ioc_info(ioc, "\tscan devices: phys disk complete\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: phys disk complete\n", + ioc->name); - ioc_info(ioc, "\tscan devices: volumes start\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: volumes start\n", ioc->name); /* volumes */ handle = 0xFFFF; @@ -9139,8 +13316,10 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from volume scan: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(volume_pg1.DevHandle); @@ -9157,8 +13336,10 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from volume scan: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; } if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || @@ -9167,19 +13348,23 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; element.VolDevHandle = volume_pg1.DevHandle; - ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", - volume_pg1.DevHandle); + printk(MPT3SAS_INFO_FMT "\tBEFORE adding volume: " + " handle (0x%04x)\n", ioc->name, + volume_pg1.DevHandle); _scsih_sas_volume_add(ioc, &element); - ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", - volume_pg1.DevHandle); + printk(MPT3SAS_INFO_FMT "\tAFTER adding volume: " + " handle (0x%04x)\n", ioc->name, + volume_pg1.DevHandle); } } - ioc_info(ioc, "\tscan devices: volumes complete\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: volumes complete\n", + ioc->name); skip_to_sas: - ioc_info(ioc, "\tscan devices: end devices start\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: sas end devices start\n", + ioc->name); /* sas devices */ handle = 0xFFFF; @@ -9189,98 +13374,97 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from sas end device scan:" + " ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(sas_device_pg0.DevHandle); - if (!(_scsih_is_end_device( + if (!(_scsih_is_sas_end_device( le32_to_cpu(sas_device_pg0.DeviceInfo)))) continue; + port_id = sas_device_pg0.PhysicalPort; sas_device = mpt3sas_get_sdev_by_addr(ioc, - le64_to_cpu(sas_device_pg0.SASAddress)); + le64_to_cpu(sas_device_pg0.SASAddress), + mpt3sas_get_port_by_id(ioc, port_id)); if (sas_device) { sas_device_put(sas_device); continue; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { - ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", - handle, - (u64)le64_to_cpu(sas_device_pg0.SASAddress)); - mpt3sas_transport_update_links(ioc, sas_address, handle, - sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + printk(MPT3SAS_INFO_FMT "\tBEFORE adding sas end device: " + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + mpt3sas_transport_update_links(ioc, sas_address, + handle, sas_device_pg0.PhyNum, + MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, port_id)); retry_count = 0; /* This will retry adding the end device. * _scsih_add_device() will decide on retries and * return "1" when it should be retried */ - while (_scsih_add_device(ioc, handle, retry_count++, + while(_scsih_add_device(ioc, handle, retry_count++, 0)) { ssleep(1); } - ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", - handle, - (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + printk(MPT3SAS_INFO_FMT "\tAFTER adding sas end device: " + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); } } - ioc_info(ioc, "\tscan devices: end devices complete\n"); - ioc_info(ioc, "\tscan devices: pcie end devices start\n"); + printk(MPT3SAS_INFO_FMT "\tscan devices: sas end devices complete\n", + ioc->name); + + printk(MPT3SAS_INFO_FMT "\tscan devices: pcie end devices start\n", + ioc->name); /* pcie devices */ handle = 0xFFFF; while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) { - ioc_status = le16_to_cpu(mpi_reply.IOCStatus) - & MPI2_IOCSTATUS_MASK; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "\tbreak from pcie end device scan:" + " ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(pcie_device_pg0.DevHandle); - if (!(_scsih_is_nvme_pciescsi_device( - le32_to_cpu(pcie_device_pg0.DeviceInfo)))) + if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu(pcie_device_pg0.DeviceInfo)))) continue; pcie_device = mpt3sas_get_pdev_by_wwid(ioc, - le64_to_cpu(pcie_device_pg0.WWID)); + le64_to_cpu(pcie_device_pg0.WWID)); if (pcie_device) { pcie_device_put(pcie_device); continue; } retry_count = 0; parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); - _scsih_pcie_add_device(ioc, handle); + while(_scsih_pcie_add_device(ioc, handle, retry_count++)) { + ssleep(1); + } + printk(MPT3SAS_INFO_FMT "\tAFTER adding pcie end device: " + "handle (0x%04x), wwid(0x%016llx)\n", ioc->name, + handle, (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID)); + } + printk(MPT3SAS_INFO_FMT "\tpcie devices: pcie end devices complete\n", + ioc->name); - ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", - handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); - } - ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); - ioc_info(ioc, "scan devices: complete\n"); + printk(MPT3SAS_INFO_FMT "scan devices: complete\n", ioc->name); } -/** - * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) - * @ioc: per adapter object - * - * The handler for doing any required cleanup or initialization. - */ -void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) -{ - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); -} - -/** - * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih) - * @ioc: per adapter object - * - * The handler for doing any required cleanup or initialization. - */ void -mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) +mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) { - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); + struct _internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; + if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { ioc->scsih_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); @@ -9292,32 +13476,67 @@ mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) complete(&ioc->tm_cmds.done); } + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, &ioc->scsih_q_intenal_cmds, list) { + scsih_qcmd->status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, scsih_qcmd->smid); + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); + memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); memset(ioc->device_remove_in_progress, 0, ioc->device_remove_in_progress_sz); + memset(ioc->tm_tr_retry, 0, ioc->tm_tr_retry_sz); +#ifdef MPT2SAS_WD_DDIOCOUNT + ioc->ddio_count = 0; + ioc->ddio_err_count = 0; +#endif _scsih_fw_event_cleanup_queue(ioc); - _scsih_flush_running_cmds(ioc); + mpt3sas_scsih_flush_running_cmds(ioc); } /** * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) * @ioc: per adapter object + * @reset_phase: phase * * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + * + * Return nothing. */ void -mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) { - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); - if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && - !ioc->sas_hba.num_phys)) { - _scsih_prep_device_scan(ioc); - _scsih_create_enclosure_list_after_reset(ioc); - _scsih_search_responding_sas_devices(ioc); - _scsih_search_responding_pcie_devices(ioc); - _scsih_search_responding_raid_devices(ioc); - _scsih_search_responding_expanders(ioc); - _scsih_error_recovery_delete_devices(ioc); + + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: " + "MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && + !ioc->sas_hba.num_phys)) { + if (ioc->multipath_on_hba) + _scsih_sas_port_refresh(ioc); + _scsih_prep_device_scan(ioc); + _scsih_create_enclosure_list_after_reset(ioc); + _scsih_search_responding_sas_devices(ioc); + _scsih_search_responding_pcie_devices(ioc); + _scsih_search_responding_raid_devices(ioc); + _scsih_search_responding_expanders(ioc); + _scsih_error_recovery_delete_devices(ioc); + } + break; } } @@ -9326,6 +13545,8 @@ mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. + * + * Return nothing. */ static void _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) @@ -9340,37 +13561,47 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) switch (fw_event->event) { case MPT3SAS_PROCESS_TRIGGER_DIAG: - mpt3sas_process_trigger_data(ioc, - (struct SL_WH_TRIGGERS_EVENT_DATA_T *) - fw_event->event_data); + mpt3sas_process_trigger_data(ioc, fw_event->event_data); break; case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: - while (scsi_host_in_recovery(ioc->shost) || - ioc->shost_recovery) { + while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) + { /* * If we're unloading, bail. Otherwise, this can become * an infinite loop. */ if (ioc->remove_host) goto out; + ssleep(1); } _scsih_remove_unresponding_devices(ioc); + _scsih_del_dirty_port_entries(ioc); _scsih_scan_for_devices_after_reset(ioc); + if(ioc->hba_mpi_version_belonged == MPI2_VERSION) + _scsih_hide_unhide_sas_devices(ioc); break; case MPT3SAS_PORT_ENABLE_COMPLETE: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) ioc->start_scan = 0; +#else + complete(&ioc->port_enable_cmds.done); +#endif if (missing_delay[0] != -1 && missing_delay[1] != -1) mpt3sas_base_update_missing_delay(ioc, missing_delay[0], missing_delay[1]); - dewtprintk(ioc, - ioc_info(ioc, "port enable: complete from worker thread\n")); + + dewtprintk(ioc, printk(MPT3SAS_INFO_FMT "port enable: complete " + "from worker thread\n", ioc->name)); break; case MPT3SAS_TURN_ON_PFA_LED: _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); break; case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: - _scsih_sas_topology_change_event(ioc, fw_event); + if (_scsih_sas_topology_change_event(ioc, fw_event)) { + _scsih_fw_event_requeue(ioc, fw_event, 1000); + return; + } break; case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) @@ -9388,8 +13619,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) _scsih_sas_broadcast_primitive_event(ioc, fw_event); break; case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: - _scsih_sas_enclosure_dev_status_change_event(ioc, - fw_event); + _scsih_sas_enclosure_dev_status_change_event(ioc, fw_event); break; case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: _scsih_sas_ir_config_change_event(ioc, fw_event); @@ -9410,8 +13640,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) _scsih_pcie_enumeration_event(ioc, fw_event); break; case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: - _scsih_pcie_topology_change_event(ioc, fw_event); + if (_scsih_pcie_topology_change_event(ioc, fw_event)) { + _scsih_fw_event_requeue(ioc, fw_event, 1000); return; + } break; } out: @@ -9419,13 +13651,17 @@ out: } /** - * _firmware_event_work + * _firmware_event_work and _firmware_event_work_delayed + * @ioc: per adapter object * @work: The fw_event_work object * Context: user. * * wrappers for the work thread handling firmware events + * + * Return nothing. */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) static void _firmware_event_work(struct work_struct *work) { @@ -9434,6 +13670,23 @@ _firmware_event_work(struct work_struct *work) _mpt3sas_fw_work(fw_event->ioc, fw_event); } +static void +_firmware_event_work_delayed(struct work_struct *work) +{ + struct fw_event_work *fw_event = container_of(work, + struct fw_event_work, delayed_work.work); + + _mpt3sas_fw_work(fw_event->ioc, fw_event); +} +#else +static void +_firmware_event_work(void *arg) +{ + struct fw_event_work *fw_event = (struct fw_event_work *)arg; + + _mpt3sas_fw_work(fw_event->ioc, fw_event); +} +#endif /** * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) @@ -9445,8 +13698,8 @@ _firmware_event_work(struct work_struct *work) * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, @@ -9458,15 +13711,15 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u16 sz; Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; - /* events turned off due to host reset */ + /* events turned off due to host reset*/ if (ioc->pci_error_recovery) return 1; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (unlikely(!mpi_reply)) { - ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 1; } @@ -9515,39 +13768,16 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, (Mpi2EventDataIrVolume_t *) mpi_reply->EventData); break; + case MPI2_EVENT_LOG_ENTRY_ADDED: - { - Mpi2EventDataLogEntryAdded_t *log_entry; - u32 *log_code; - - if (!ioc->is_warpdrive) - break; - - log_entry = (Mpi2EventDataLogEntryAdded_t *) - mpi_reply->EventData; - log_code = (u32 *)log_entry->LogData; - - if (le16_to_cpu(log_entry->LogEntryQualifier) - != MPT2_WARPDRIVE_LOGENTRY) - break; - - switch (le32_to_cpu(*log_code)) { - case MPT2_WARPDRIVE_LC_SSDT: - ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); - break; - case MPT2_WARPDRIVE_LC_SSDLW: - ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); - break; - case MPT2_WARPDRIVE_LC_SSDLF: - ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); - break; - case MPT2_WARPDRIVE_LC_BRMF: - ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); + if(ioc->hba_mpi_version_belonged == MPI2_VERSION) { + if (!ioc->is_warpdrive) + break; + _scsih_log_entry_add_event(ioc, + (Mpi2EventDataLogEntryAdded_t *) + mpi_reply->EventData); break; } - - break; - } case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: _scsih_sas_device_status_change_event(ioc, (Mpi2EventDataSasDeviceStatusChange_t *) @@ -9567,40 +13797,75 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, (Mpi2EventDataTemperature_t *) mpi_reply->EventData); break; + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: ActiveCableEventData = - (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; + (Mpi26EventDataActiveCableExcept_t*) mpi_reply->EventData; switch (ActiveCableEventData->ReasonCode) { + case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: - ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", - ActiveCableEventData->ReceptacleID); - pr_notice("cannot be powered and devices connected\n"); - pr_notice("to this active cable will not be seen\n"); - pr_notice("This active cable requires %d mW of power\n", - ActiveCableEventData->ActiveCablePowerRequirement); + printk(MPT3SAS_INFO_FMT "Currently an active cable with ReceptacleID %d\n" + ,ioc->name, ActiveCableEventData->ReceptacleID); + printk(KERN_INFO "cannot be powered and devices connected to this active cable\n"); + printk(KERN_INFO "will not be seen. This active cable\n"); + printk(KERN_INFO "requires %d mW of power\n", ActiveCableEventData->ActiveCablePowerRequirement); break; case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: - ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", - ActiveCableEventData->ReceptacleID); - pr_notice( - "is not running at optimal speed(12 Gb/s rate)\n"); + printk(MPT3SAS_INFO_FMT "Currently a cable with ReceptacleID %d" \ + " is not running at optimal speed(12 Gb/s rate)\n", + ioc->name, ActiveCableEventData->ReceptacleID); break; } - break; default: /* ignore the rest */ return 1; } - sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; - fw_event = alloc_fw_event_work(sz); + fw_event = alloc_fw_event_work(0); if (!fw_event) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return 1; } + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event->event_data = kzalloc(sz, GFP_ATOMIC); + if (!fw_event->event_data) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + fw_event_work_put(fw_event); + return 1; + } + + if (event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST) { + Mpi2EventDataSasTopologyChangeList_t *topo_event_data = + (Mpi2EventDataSasTopologyChangeList_t *) + mpi_reply->EventData; + fw_event->retries = kzalloc(topo_event_data->NumEntries, + GFP_ATOMIC); + if (!fw_event->retries) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + kfree(fw_event->event_data); + fw_event_work_put(fw_event); + return 1; + } + } + + if (event == MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST) { + Mpi26EventDataPCIeTopologyChangeList_t *topo_event_data = + (Mpi26EventDataPCIeTopologyChangeList_t *) mpi_reply->EventData; + fw_event->retries = kzalloc(topo_event_data->NumEntries, + GFP_ATOMIC); + if (!fw_event->retries) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + kfree(fw_event->event_data); + fw_event_work_put(fw_event); + return 1; + } + } memcpy(fw_event->event_data, mpi_reply->EventData, sz); fw_event->ioc = ioc; @@ -9619,6 +13884,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, * * Removing object and freeing associated memory from the * ioc->sas_expander_list. + * + * Return nothing. */ static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, @@ -9635,21 +13902,24 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) mpt3sas_device_remove_by_sas_address(ioc, - mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); else if (mpt3sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt3sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) mpt3sas_expander_remove(ioc, - mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); } mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, - sas_expander->sas_address_parent); + sas_expander->sas_address_parent, sas_expander->port); - ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n", - sas_expander->handle, (unsigned long long) - sas_expander->sas_address); + printk(MPT3SAS_INFO_FMT "expander_remove: handle" + "(0x%04x), sas_addr(0x%016llx), port:%d\n", ioc->name, + sas_expander->handle, (unsigned long long) + sas_expander->sas_address, sas_expander->port->port_id); spin_lock_irqsave(&ioc->sas_node_lock, flags); list_del(&sas_expander->list); @@ -9659,12 +13929,74 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, kfree(sas_expander); } +static void +_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; + + /* are there any NVMe devices ? */ + if (list_empty(&ioc->pcie_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); + goto out; + } + + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + /* Wait for six seconds */ + wait_for_completion_timeout(&ioc->scsih_cmds.done, + IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + printk(MPT3SAS_INFO_FMT "Io Unit Control shutdown (complete): " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + /** * _scsih_ir_shutdown - IR shutdown notification * @ioc: per adapter object * * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that * the host system is shutting down. + * + * Return nothing. */ static void _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) @@ -9681,17 +14013,22 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) if (list_empty(&ioc->raid_device_list)) return; + if (mpt3sas_base_pci_device_is_unplugged(ioc)) + return; + mutex_lock(&ioc->scsih_cmds.mutex); if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); goto out; } ioc->scsih_cmds.status = MPT3_CMD_PENDING; smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; goto out; } @@ -9703,23 +14040,25 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; - if (!ioc->hide_ir_msg) - ioc_info(ioc, "IR shutdown (sending)\n"); + if (!ioc->warpdrive_msg) + printk(MPT3SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); init_completion(&ioc->scsih_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); goto out; } if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { mpi_reply = ioc->scsih_cmds.reply; - if (!ioc->hide_ir_msg) - ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", - le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo)); + if (!ioc->warpdrive_msg) + printk(MPT3SAS_INFO_FMT "IR shutdown (complete): " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); } out: @@ -9727,30 +14066,90 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) mutex_unlock(&ioc->scsih_cmds.mutex); } +/** + * _scsih_get_shost_and_ioc - get shost and ioc + * and verify whether they are NULL or not + * @pdev: PCI device struct + * @shost: address of scsi host pointer + * @ioc: address of HBA adapter pointer + * + * Return zero if *shost and *ioc are not NULL otherwise return error number. + */ +static int +_scsih_get_shost_and_ioc(struct pci_dev *pdev, + struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) +{ + *shost = pci_get_drvdata(pdev); + if (*shost == NULL) { + dev_err(&pdev->dev,"pdev's driver data is null\n"); + return -ENXIO; + } + + *ioc = shost_private(*shost); + if (*ioc == NULL) { + dev_err(&pdev->dev,"shost's private data is null\n"); + return -ENXIO; + } + + return 0; +} + + /** * scsih_remove - detach and remove add host * @pdev: PCI device struct * * Routine called when unloading the driver. + * Return nothing. */ -static void scsih_remove(struct pci_dev *pdev) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +static void +#else +static void __devexit +#endif +scsih_remove(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; struct _sas_port *mpt3sas_port, *next_port; struct _raid_device *raid_device, *next; struct MPT3SAS_TARGET *sas_target_priv_data; struct _pcie_device *pcie_device, *pcienext; struct workqueue_struct *wq; unsigned long flags; + struct hba_port *port, *port_next; Mpi2ConfigReply_t mpi_reply; + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to remove device\n"); + return; + } + +/* Kashyap - don't port this to upstream + * This is a fix for CQ 186643 + * + * The reason for this fix is becuase Fast Load CQ 176830 + * This is due to Asnyc Scanning implementation. + * This allows the driver to load before the initialization has completed. + * Under Red Hat, the rmmod is not doing reference count checking, so it + * will allow driver unload even though async scanning is still active. + * This issue doesn't occur with SuSE. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + while (ioc->is_driver_loading) + ssleep(1); +#endif ioc->remove_host = 1; mpt3sas_wait_for_commands_to_complete(ioc); - _scsih_flush_running_cmds(ioc); + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && + (sata_smart_polling)) + mpt3sas_base_stop_smart_polling(ioc); + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + mpt3sas_scsih_flush_running_cmds(ioc); _scsih_fw_event_cleanup_queue(ioc); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); spin_lock_irqsave(&ioc->fw_event_lock, flags); wq = ioc->firmware_event_thread; @@ -9758,16 +14157,21 @@ static void scsih_remove(struct pci_dev *pdev) spin_unlock_irqrestore(&ioc->fw_event_lock, flags); if (wq) destroy_workqueue(wq); + /* * Copy back the unmodified ioc page1. so that on next driver load, * current modified changes on ioc page1 won't take effect. */ if (ioc->is_aero_ioc) mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, - &ioc->ioc_pg1_copy); + &ioc->ioc_pg1_copy); + /* release all the volumes */ _scsih_ir_shutdown(ioc); + sas_remove_host(shost); + scsi_remove_host(shost); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, list) { if (raid_device->starget) { @@ -9776,8 +14180,9 @@ static void scsih_remove(struct pci_dev *pdev) sas_target_priv_data->deleted = 1; scsi_remove_target(&raid_device->starget->dev); } - ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", - raid_device->handle, (u64)raid_device->wwid); + printk(MPT3SAS_INFO_FMT "removing handle(0x%04x), wwid" + "(0x%016llx)\n", ioc->name, raid_device->handle, + (unsigned long long) raid_device->wwid); _scsih_raid_device_remove(ioc, raid_device); } list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, @@ -9793,13 +14198,22 @@ static void scsih_remove(struct pci_dev *pdev) if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) mpt3sas_device_remove_by_sas_address(ioc, - mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); else if (mpt3sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt3sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) mpt3sas_expander_remove(ioc, - mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + } + + list_for_each_entry_safe(port, + port_next, + &ioc->port_table_list, list) { + list_del(&port->list); + kfree(port); } /* free phys attached to the sas_host */ @@ -9819,20 +14233,26 @@ static void scsih_remove(struct pci_dev *pdev) /** * scsih_shutdown - routine call during system shutdown * @pdev: PCI device struct + * + * Return nothing. */ static void scsih_shutdown(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; struct workqueue_struct *wq; unsigned long flags; Mpi2ConfigReply_t mpi_reply; + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to shutdown device\n"); + return; + } + ioc->remove_host = 1; mpt3sas_wait_for_commands_to_complete(ioc); - _scsih_flush_running_cmds(ioc); _scsih_fw_event_cleanup_queue(ioc); @@ -9842,19 +14262,25 @@ scsih_shutdown(struct pci_dev *pdev) spin_unlock_irqrestore(&ioc->fw_event_lock, flags); if (wq) destroy_workqueue(wq); + /* - * Copy back the unmodified ioc page1 so that on next driver load, + * Copy back the unmodified ioc page1. so that on next driver load, * current modified changes on ioc page1 won't take effect. */ if (ioc->is_aero_ioc) mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, - &ioc->ioc_pg1_copy); + &ioc->ioc_pg1_copy); _scsih_ir_shutdown(ioc); - mpt3sas_base_detach(ioc); + _scsih_nvme_shutdown(ioc); + mpt3sas_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); } - /** * _scsih_probe_boot_devices - reports 1st device * @ioc: per adapter object @@ -9877,6 +14303,10 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) unsigned long flags; int rc; int tid; + struct hba_port *port; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + u8 protection_mask; +#endif /* no Bios, return immediately */ if (!ioc->bios_pg3.BiosVersion) @@ -9899,6 +14329,19 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) if (channel == RAID_CHANNEL) { raid_device = device; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if ((!ioc->disable_eedp_support) && + (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { + /* Disable DIX0 protection capability */ + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, protection_mask & 0x77); + printk(MPT3SAS_INFO_FMT ": Disabling DIX0 prot capability " + "because HBA does not support DIX0 operation on volumes\n", + ioc->name); + } + } +#endif rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) @@ -9912,27 +14355,58 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); if (rc) _scsih_pcie_device_remove(ioc, pcie_device); + else if (!pcie_device->starget) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + /* CQ 206770: + * When asyn scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device() + * ->sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { +#endif + /*TODO-- Need to find out whether this condition will occur + or not*/ + //_scsih_pcie_device_remove(ioc, pcie_device); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + } +#endif + } } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = device; handle = sas_device->handle; sas_address_parent = sas_device->sas_address_parent; sas_address = sas_device->sas_address; + port = sas_device->port; list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!port) + return; + if (ioc->hide_drives) return; + if (!mpt3sas_transport_port_add(ioc, handle, - sas_address_parent)) { + sas_address_parent, port)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + /* CQ 206770: + * When asyn scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device() + * ->sysfs_addrm_start() + */ if (!ioc->is_driver_loading) { +#endif mpt3sas_transport_port_remove(ioc, - sas_address, - sas_address_parent); + sas_address, sas_address_parent, port); _scsih_sas_device_remove(ioc, sas_device); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) } +#endif } } } @@ -9960,6 +14434,15 @@ _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) } } +/** + * get_next_sas_device - Get the next sas device + * @ioc: per adapter object + * + * Get the next sas device from sas_device_init_list list. + * + * Returns sas device structure if sas_device_init_list list is + * not empty otherwise returns NULL + */ static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) { struct _sas_device *sas_device = NULL; @@ -9976,6 +14459,14 @@ static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) return sas_device; } +/** + * sas_device_make_active - Add sas device to sas_device_list list + * @ioc: per adapter object + * @sas_device: sas device object + * + * Add the sas device which has registered with SCSI Transport Later + * to sas_device_list list + */ static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device) { @@ -10013,31 +14504,41 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) { struct _sas_device *sas_device; - if (ioc->hide_drives) - return; - while ((sas_device = get_next_sas_device(ioc))) { + if (ioc->hide_drives) { + sas_device_make_active(ioc, sas_device); + sas_device_put(sas_device); + continue; + } + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, - sas_device->sas_address_parent)) { + sas_device->sas_address_parent, + sas_device->port)) { _scsih_sas_device_remove(ioc, sas_device); sas_device_put(sas_device); continue; } else if (!sas_device->starget) { - /* +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + /* CQ 206770: * When asyn scanning is enabled, its not possible to * remove devices while scanning is turned on due to an * oops in scsi_sysfs_add_sdev()->add_device()-> * sysfs_addrm_start() */ if (!ioc->is_driver_loading) { +#endif mpt3sas_transport_port_remove(ioc, sas_device->sas_address, - sas_device->sas_address_parent); + sas_device->sas_address_parent, + sas_device->port); _scsih_sas_device_remove(ioc, sas_device); sas_device_put(sas_device); continue; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) } +#endif } + sas_device_make_active(ioc, sas_device); sas_device_put(sas_device); } @@ -10049,7 +14550,7 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) * * Get the next pcie device from pcie_device_init_list list. * - * Return: pcie device structure if pcie_device_init_list list is not empty + * Returns pcie device structure if pcie_device_init_list list is not empty * otherwise returns NULL */ static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) @@ -10094,50 +14595,52 @@ static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, } /** - * _scsih_probe_pcie - reporting PCIe devices to scsi-ml - * @ioc: per adapter object - * - * Called during initial loading of the driver. - */ +* _scsih_probe_pcie - reporting PCIe devices to scsi-ml +* @ioc: per adapter object +* +* Called during initial loading of the driver. +*/ static void _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) { - struct _pcie_device *pcie_device; int rc; - - /* PCIe Device List */ + struct _pcie_device *pcie_device; + while ((pcie_device = get_next_pcie_device(ioc))) { if (pcie_device->starget) { pcie_device_put(pcie_device); continue; } if (pcie_device->access_status == - MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { pcie_device_make_active(ioc, pcie_device); pcie_device_put(pcie_device); continue; } rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, - pcie_device->id, 0); + pcie_device->id, 0); if (rc) { _scsih_pcie_device_remove(ioc, pcie_device); pcie_device_put(pcie_device); continue; } else if (!pcie_device->starget) { - /* - * When async scanning is enabled, its not possible to +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + /* CQ 206770: + * When asyn scanning is enabled, its not possible to * remove devices while scanning is turned on due to an * oops in scsi_sysfs_add_sdev()->add_device()-> * sysfs_addrm_start() */ if (!ioc->is_driver_loading) { - /* TODO-- Need to find out whether this condition will - * occur or not - */ +#endif + /*TODO-- Need to find out whether this condition will occur + or not*/ _scsih_pcie_device_remove(ioc, pcie_device); pcie_device_put(pcie_device); continue; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) } +#endif } pcie_device_make_active(ioc, pcie_device); pcie_device_put(pcie_device); @@ -10178,8 +14681,9 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) } } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) /** - * scsih_scan_start - scsi lld callback for .scan_start + * _scsih_scan_start - scsi lld callback for .scan_start * @shost: SCSI host pointer * * The shost has the ability to discover targets on its own instead @@ -10187,12 +14691,17 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) * firmware discovery. */ static void -scsih_scan_start(struct Scsi_Host *shost) +_scsih_scan_start(struct Scsi_Host *shost) { struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); int rc; - if (diag_buffer_enable != -1 && diag_buffer_enable != 0) + + if (ioc->is_warpdrive) + mpt3sas_enable_diag_buffer(ioc, 1); + else if (diag_buffer_enable != -1 && diag_buffer_enable != 0) mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); + else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) + mpt3sas_enable_diag_buffer(ioc, 1); if (disable_discovery > 0) return; @@ -10201,11 +14710,11 @@ scsih_scan_start(struct Scsi_Host *shost) rc = mpt3sas_port_enable(ioc); if (rc != 0) - ioc_info(ioc, "port enable: FAILED\n"); + printk(MPT3SAS_INFO_FMT "port enable: FAILED\n", ioc->name); } /** - * scsih_scan_finished - scsi lld callback for .scan_finished + * _scsih_scan_finished - scsi lld callback for .scan_finished * @shost: SCSI host pointer * @time: elapsed time of the scan in jiffies * @@ -10214,36 +14723,37 @@ scsih_scan_start(struct Scsi_Host *shost) * we wait for firmware discovery to complete, then return 1. */ static int -scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) +_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); if (disable_discovery > 0) { ioc->is_driver_loading = 0; ioc->wait_for_discovery_to_complete = 0; - return 1; + goto out; } if (time >= (300 * HZ)) { ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; - ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); + printk(MPT3SAS_INFO_FMT "port enable: FAILED with timeout " + "(timeout=300s)\n", ioc->name); ioc->is_driver_loading = 0; - return 1; + goto out; } if (ioc->start_scan) return 0; if (ioc->start_scan_failed) { - ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", - ioc->start_scan_failed); + printk(MPT3SAS_INFO_FMT "port enable: FAILED with " + "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed); ioc->is_driver_loading = 0; ioc->wait_for_discovery_to_complete = 0; ioc->remove_host = 1; - return 1; + goto out; } - ioc_info(ioc, "port enable: SUCCESS\n"); + printk(MPT3SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name); ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; if (ioc->wait_for_discovery_to_complete) { @@ -10252,98 +14762,139 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) } mpt3sas_base_start_watchdog(ioc); ioc->is_driver_loading = 0; + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) + && (sata_smart_polling)) + mpt3sas_base_start_smart_polling(ioc); +out: return 1; } +#endif /* shost template for SAS 2.0 HBA devices */ static struct scsi_host_template mpt2sas_driver_template = { - .module = THIS_MODULE, - .name = "Fusion MPT SAS Host", - .proc_name = MPT2SAS_DRIVER_NAME, - .queuecommand = scsih_qcmd, - .target_alloc = scsih_target_alloc, - .slave_alloc = scsih_slave_alloc, - .slave_configure = scsih_slave_configure, - .target_destroy = scsih_target_destroy, - .slave_destroy = scsih_slave_destroy, - .scan_finished = scsih_scan_finished, - .scan_start = scsih_scan_start, - .change_queue_depth = scsih_change_queue_depth, - .eh_abort_handler = scsih_abort, - .eh_device_reset_handler = scsih_dev_reset, - .eh_target_reset_handler = scsih_target_reset, - .eh_host_reset_handler = scsih_host_reset, - .bios_param = scsih_bios_param, - .can_queue = 1, - .this_id = -1, - .sg_tablesize = MPT2SAS_SG_DEPTH, - .max_sectors = 32767, - .cmd_per_lun = 7, - .shost_attrs = mpt3sas_host_attrs, - .sdev_attrs = mpt3sas_dev_attrs, - .track_queue_depth = 1, - .cmd_size = sizeof(struct scsiio_tracker), + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT2SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = _scsih_target_alloc, + .slave_alloc = _scsih_slave_alloc, + .slave_configure = _scsih_slave_configure, + .target_destroy = _scsih_target_destroy, + .slave_destroy = _scsih_slave_destroy, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + .scan_finished = _scsih_scan_finished, + .scan_start = _scsih_scan_start, +#endif + .change_queue_depth = _scsih_change_queue_depth, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) + .change_queue_type = _scsih_change_queue_type, +#endif + .eh_abort_handler = _scsih_abort, + .eh_device_reset_handler = _scsih_dev_reset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + .eh_target_reset_handler = _scsih_target_reset, +#endif + .eh_host_reset_handler = _scsih_host_reset, + .bios_param = _scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT2SAS_SG_DEPTH, + .max_sectors = 32767, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0)) + .max_segment_size = 0xffffffff, +#endif + .cmd_per_lun = 7, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) + .use_clustering = ENABLE_CLUSTERING, +#endif + .shost_attrs = mpt3sas_host_attrs, + .sdev_attrs = mpt3sas_dev_attrs, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)) + .track_queue_depth = 1, +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) + .cmd_size = sizeof(struct scsiio_tracker), +#endif }; /* raid transport support for SAS 2.0 HBA devices */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) static struct raid_function_template mpt2sas_raid_functions = { - .cookie = &mpt2sas_driver_template, - .is_raid = scsih_is_raid, - .get_resync = scsih_get_resync, - .get_state = scsih_get_state, + .cookie = &mpt2sas_driver_template, + .is_raid = _scsih_is_raid, + .get_resync = _scsih_get_resync, + .get_state = _scsih_get_state, }; +#endif /* shost template for SAS 3.0 HBA devices */ static struct scsi_host_template mpt3sas_driver_template = { - .module = THIS_MODULE, - .name = "Fusion MPT SAS Host", - .proc_name = MPT3SAS_DRIVER_NAME, - .queuecommand = scsih_qcmd, - .target_alloc = scsih_target_alloc, - .slave_alloc = scsih_slave_alloc, - .slave_configure = scsih_slave_configure, - .target_destroy = scsih_target_destroy, - .slave_destroy = scsih_slave_destroy, - .scan_finished = scsih_scan_finished, - .scan_start = scsih_scan_start, - .change_queue_depth = scsih_change_queue_depth, - .eh_abort_handler = scsih_abort, - .eh_device_reset_handler = scsih_dev_reset, - .eh_target_reset_handler = scsih_target_reset, - .eh_host_reset_handler = scsih_host_reset, - .bios_param = scsih_bios_param, - .can_queue = 1, - .this_id = -1, - .sg_tablesize = MPT3SAS_SG_DEPTH, - .max_sectors = 32767, + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT3SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = _scsih_target_alloc, + .slave_alloc = _scsih_slave_alloc, + .slave_configure = _scsih_slave_configure, + .target_destroy = _scsih_target_destroy, + .slave_destroy = _scsih_slave_destroy, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) + .scan_finished = _scsih_scan_finished, + .scan_start = _scsih_scan_start, +#endif + .change_queue_depth = _scsih_change_queue_depth, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) + .change_queue_type = _scsih_change_queue_type, +#endif + .eh_abort_handler = _scsih_abort, + .eh_device_reset_handler = _scsih_dev_reset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + .eh_target_reset_handler = _scsih_target_reset, +#endif + .eh_host_reset_handler = _scsih_host_reset, + .bios_param = _scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT3SAS_SG_DEPTH, + .max_sectors = 32767, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0)) .max_segment_size = 0xffffffff, - .cmd_per_lun = 7, - .shost_attrs = mpt3sas_host_attrs, - .sdev_attrs = mpt3sas_dev_attrs, - .track_queue_depth = 1, - .cmd_size = sizeof(struct scsiio_tracker), +#endif + .cmd_per_lun = 7, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) + .use_clustering = ENABLE_CLUSTERING, +#endif + .shost_attrs = mpt3sas_host_attrs, + .sdev_attrs = mpt3sas_dev_attrs, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)) + .track_queue_depth = 1, +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) + .cmd_size = sizeof(struct scsiio_tracker), +#endif }; /* raid transport support for SAS 3.0 HBA devices */ -static struct raid_function_template mpt3sas_raid_functions = { - .cookie = &mpt3sas_driver_template, - .is_raid = scsih_is_raid, - .get_resync = scsih_get_resync, - .get_state = scsih_get_state, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + static struct raid_function_template mpt3sas_raid_functions = { + .cookie = &mpt3sas_driver_template, + .is_raid = _scsih_is_raid, + .get_resync = _scsih_get_resync, + .get_state = _scsih_get_state, }; +#endif /** * _scsih_determine_hba_mpi_version - determine in which MPI version class - * this device belongs to. + * this device belongs to. * @pdev: PCI device struct * * return MPI2_VERSION for SAS 2.0 HBA devices, - * MPI25_VERSION for SAS 3.0 HBA devices, and - * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices + * MPI25_VERSION for SAS 3.0 HBA devices. */ + static u16 -_scsih_determine_hba_mpi_version(struct pci_dev *pdev) -{ +_scsih_determine_hba_mpi_version(struct pci_dev *pdev) { switch (pdev->device) { case MPI2_MFGPAGE_DEVID_SSS6200: @@ -10363,8 +14914,8 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) case MPI2_MFGPAGE_DEVID_SAS2308_1: case MPI2_MFGPAGE_DEVID_SAS2308_2: case MPI2_MFGPAGE_DEVID_SAS2308_3: - case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: - case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: + case MPI26_MFGPAGE_DEVID_SWCH_MPI_EP: + case MPI26_MFGPAGE_DEVID_SWCH_MPI_EP_1: return MPI2_VERSION; case MPI25_MFGPAGE_DEVID_SAS3004: case MPI25_MFGPAGE_DEVID_SAS3008: @@ -10389,15 +14940,22 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) case MPI26_MFGPAGE_DEVID_SAS3516: case MPI26_MFGPAGE_DEVID_SAS3516_1: case MPI26_MFGPAGE_DEVID_SAS3416: + case MPI26_MFGPAGE_DEVID_SAS3716: case MPI26_MFGPAGE_DEVID_SAS3616: - case MPI26_ATLAS_PCIe_SWITCH_DEVID: - case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: - case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: + case MPI26_MFGPAGE_DEVID_SAS3708: + case MPI26_MFGPAGE_DEVID_PEX88000: + case MPI26_MFGPAGE_DEVID_INVALID0_3816: case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: + case MPI26_MFGPAGE_DEVID_INVALID1_3816: + case MPI26_MFGPAGE_DEVID_INVALID0_3916: + case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: + case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: + case MPI26_MFGPAGE_DEVID_INVALID1_3916: return MPI26_VERSION; } return 0; + } /** @@ -10405,7 +14963,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) * @pdev: PCI device struct * @id: pci device id * - * Return: 0 success, anything else error. + * Returns 0 success, anything else error. */ static int _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -10414,32 +14972,59 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct Scsi_Host *shost = NULL; int rv; u16 hba_mpi_version; + u8 revision; + int enumerate_hba = 0; /* Determine in which MPI version class this pci device belongs */ hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); + if (hba_mpi_version == 0) return -ENODEV; + if (hbas_to_enumerate == -1) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) + enumerate_hba = 0; +#else + enumerate_hba = 2; +#endif + } + else + enumerate_hba = hbas_to_enumerate; + /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, * for other generation HBA's return with -ENODEV */ - if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) + if ((enumerate_hba == 1) && (hba_mpi_version != MPI2_VERSION)) return -ENODEV; - /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, - * for other generation HBA's return with -ENODEV + /* Enumerate PCIe HBA (MPI Endpoint), SAS 3.0 & above generation HBA's + * if hbas_to_enumerate is two, for other HBA's return with -ENODEV. */ - if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION - || hba_mpi_version == MPI26_VERSION))) - return -ENODEV; + if (enumerate_hba == 2) { + switch (hba_mpi_version) { + case MPI2_VERSION: + if (pdev->device == MPI26_MFGPAGE_DEVID_SWCH_MPI_EP || + pdev->device == MPI26_MFGPAGE_DEVID_SWCH_MPI_EP_1) + break; + else + return -ENODEV; + case MPI25_VERSION: + case MPI26_VERSION: + break; + default: + return -ENODEV; + } + } switch (hba_mpi_version) { case MPI2_VERSION: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); +#endif /* Use mpt2sas driver host template for SAS 2.0 HBA's */ shost = scsi_host_alloc(&mpt2sas_driver_template, - sizeof(struct MPT3SAS_ADAPTER)); + sizeof(struct MPT3SAS_ADAPTER)); if (!shost) return -ENODEV; ioc = shost_priv(shost); @@ -10447,32 +15032,36 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->hba_mpi_version_belonged = hba_mpi_version; ioc->id = mpt2_ids++; sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); - switch (pdev->device) { - case MPI2_MFGPAGE_DEVID_SSS6200: + if (pdev->device == MPI2_MFGPAGE_DEVID_SSS6200) { ioc->is_warpdrive = 1; ioc->hide_ir_msg = 1; - break; - case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: - case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: - ioc->is_mcpu_endpoint = 1; - break; - default: - ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; - break; } + else if (pdev->device == MPI26_MFGPAGE_DEVID_SWCH_MPI_EP || + pdev->device == MPI26_MFGPAGE_DEVID_SWCH_MPI_EP_1) + ioc->is_mcpu_endpoint = 1; + else + ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; + + if(multipath_on_hba == -1 || multipath_on_hba == 0) + ioc->multipath_on_hba = 0; + else + ioc->multipath_on_hba = 1; + break; case MPI25_VERSION: case MPI26_VERSION: /* Use mpt3sas driver host template for SAS 3.0 HBA's */ shost = scsi_host_alloc(&mpt3sas_driver_template, - sizeof(struct MPT3SAS_ADAPTER)); + sizeof(struct MPT3SAS_ADAPTER)); if (!shost) return -ENODEV; + ioc = shost_priv(shost); memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); ioc->hba_mpi_version_belonged = hba_mpi_version; ioc->id = mpt3_ids++; sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); + switch (pdev->device) { case MPI26_MFGPAGE_DEVID_SAS3508: case MPI26_MFGPAGE_DEVID_SAS3508_1: @@ -10480,15 +15069,23 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) case MPI26_MFGPAGE_DEVID_SAS3516: case MPI26_MFGPAGE_DEVID_SAS3516_1: case MPI26_MFGPAGE_DEVID_SAS3416: + case MPI26_MFGPAGE_DEVID_SAS3716: case MPI26_MFGPAGE_DEVID_SAS3616: - case MPI26_ATLAS_PCIe_SWITCH_DEVID: + case MPI26_MFGPAGE_DEVID_SAS3708: + case MPI26_MFGPAGE_DEVID_PEX88000: ioc->is_gen35_ioc = 1; break; + case MPI26_MFGPAGE_DEVID_INVALID0_3816: + case MPI26_MFGPAGE_DEVID_INVALID0_3916: + case MPI26_MFGPAGE_DEVID_INVALID1_3816: + case MPI26_MFGPAGE_DEVID_INVALID1_3916: + dev_printk(KERN_ERR, &pdev->dev, + "HBA is in Non Secure mode"); + return 1; case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: - dev_info(&pdev->dev, - "HBA is in Configurable Secure mode\n"); - /* fall through */ + dev_printk(KERN_INFO, &pdev->dev, + "HBA is in Configurable Secure mode"); case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; @@ -10496,22 +15093,35 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) default: ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; } - if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && - pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || - (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); +#else + revision = pdev->revision; +#endif + + if ((revision >= SAS3_PCI_DEVICE_C0_REVISION) || + (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { ioc->combined_reply_queue = 1; - if (ioc->is_gen35_ioc) - ioc->combined_reply_index_count = - MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; - else - ioc->combined_reply_index_count = - MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; + if (ioc->is_gen35_ioc) { + ioc->nc_reply_index_count = NUM_REPLY_POST_INDEX_REGISTERS_16; + } else { + ioc->nc_reply_index_count = NUM_REPLY_POST_INDEX_REGISTERS_12; + } } + + if(multipath_on_hba == -1 || multipath_on_hba == 0) + ioc->multipath_on_hba = 0; + else + ioc->multipath_on_hba = 1; + break; default: return -ENODEV; } + /* init local params */ + ioc = shost_private(shost); INIT_LIST_HEAD(&ioc->list); spin_lock(&gioc_lock); list_add_tail(&ioc->list, &mpt3sas_ioc_list); @@ -10521,6 +15131,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->scsi_io_cb_idx = scsi_io_cb_idx; ioc->tm_cb_idx = tm_cb_idx; ioc->ctl_cb_idx = ctl_cb_idx; + ioc->ctl_tm_cb_idx = ctl_tm_cb_idx; + ioc->ctl_diag_cb_idx = ctl_diag_cb_idx; ioc->base_cb_idx = base_cb_idx; ioc->port_enable_cb_idx = port_enable_cb_idx; ioc->transport_cb_idx = transport_cb_idx; @@ -10528,16 +15140,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->config_cb_idx = config_cb_idx; ioc->tm_tr_cb_idx = tm_tr_cb_idx; ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_tr_internal_cb_idx = tm_tr_internal_cb_idx; ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; ioc->logging_level = logging_level; - ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; + ioc->schedule_dead_ioc_flush_running_cmds = &mpt3sas_scsih_flush_running_cmds; + /* * Enable MEMORY MOVE support flag. */ ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; - ioc->enable_sdev_max_qd = enable_sdev_max_qd; - /* misc semaphores and spin locks */ mutex_init(&ioc->reset_in_progress_mutex); /* initializing pci_access_mutex lock */ @@ -10547,11 +15159,13 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&ioc->sas_device_lock); spin_lock_init(&ioc->sas_node_lock); spin_lock_init(&ioc->fw_event_lock); - spin_lock_init(&ioc->raid_device_lock); spin_lock_init(&ioc->pcie_device_lock); + spin_lock_init(&ioc->raid_device_lock); spin_lock_init(&ioc->diag_trigger_lock); + spin_lock_init(&ioc->scsih_q_internal_lock); INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->port_table_list); INIT_LIST_HEAD(&ioc->sas_device_init_list); INIT_LIST_HEAD(&ioc->sas_expander_list); INIT_LIST_HEAD(&ioc->enclosure_list); @@ -10564,64 +15178,98 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->delayed_sc_list); INIT_LIST_HEAD(&ioc->delayed_event_ack_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->delayed_internal_tm_list); + INIT_LIST_HEAD(&ioc->scsih_q_intenal_cmds); INIT_LIST_HEAD(&ioc->reply_queue_list); sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); - /* init shost parameters */ shost->max_cmd_len = 32; shost->max_lun = max_lun; shost->transportt = mpt3sas_transport_template; shost->unique_id = ioc->id; - if (ioc->is_mcpu_endpoint) { - /* mCPU MPI support 64K max IO */ - shost->max_sectors = 128; - ioc_info(ioc, "The max_sectors value is set to %d\n", - shost->max_sectors); - } else { - if (max_sectors != 0xFFFF) { - if (max_sectors < 64) { - shost->max_sectors = 64; - ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", - max_sectors); - } else if (max_sectors > 32767) { - shost->max_sectors = 32767; - ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", - max_sectors); - } else { - shost->max_sectors = max_sectors & 0xFFFE; - ioc_info(ioc, "The max_sectors value is set to %d\n", - shost->max_sectors); - } +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR == 2)) || \ +((LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) && \ +(LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)))) + /* Disable blk mq support on these kernels. + * Refer CQ# 1351060 for more details. + */ + if (shost->use_blk_mq) { + shost_printk(KERN_INFO, shost, "Disabled use_blk_mq flag"); + shost->use_blk_mq = 0; + } +#endif + +#ifdef RHEL_MAJOR +#if (RHEL_MAJOR == 6 && RHEL_MINOR >= 2) + if(!host_lock_mode) + shost->hostt->lockless = 1; +#endif +#endif + + if (max_sectors != 0xFFFF) { + if (max_sectors < 64) { + shost->max_sectors = 64; + printk(MPT3SAS_WARN_FMT "Invalid value %d passed " + "for max_sectors, range is 64 to 32767. Assigning " + "value of 64.\n", ioc->name, max_sectors); + } else if (max_sectors > 32767) { + shost->max_sectors = 32767; + printk(MPT3SAS_WARN_FMT "Invalid value %d passed " + "for max_sectors, range is 64 to 32767. Assigning " + "default value of 32767.\n", ioc->name, + max_sectors); + } else { + shost->max_sectors = max_sectors & 0xFFFE; + printk(MPT3SAS_INFO_FMT "The max_sectors value is " + "set to %d\n", ioc->name, shost->max_sectors); } } - /* register EEDP capabilities with SCSI layer */ - if (prot_mask > 0) - scsi_host_set_prot(shost, prot_mask); - else - scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION - | SHOST_DIF_TYPE2_PROTECTION - | SHOST_DIF_TYPE3_PROTECTION); - scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + if(ioc->is_mcpu_endpoint) { + /* mCPU MPI support 64K max IO */ + shost->max_sectors = 128; + printk(MPT3SAS_INFO_FMT "The max_sectors value is " + "set to %d\n", ioc->name, shost->max_sectors); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + rv = scsi_add_host(shost, &pdev->dev); + if(rv) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + goto out_add_shost_fail; + } +#endif + + ioc->disable_eedp_support = disable_eedp; + /* event thread */ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), - "fw_event_%s%d", ioc->driver_name, ioc->id); + "fw_event_%s%d", ioc->driver_name, ioc->id); +#if defined(alloc_ordered_workqueue) ioc->firmware_event_thread = alloc_ordered_workqueue( ioc->firmware_event_name, 0); +#else + ioc->firmware_event_thread = create_singlethread_workqueue( + ioc->firmware_event_name); +#endif if (!ioc->firmware_event_thread) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rv = -ENODEV; goto out_thread_fail; } ioc->is_driver_loading = 1; if ((mpt3sas_base_attach(ioc))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rv = -ENODEV; goto out_attach_fail; } @@ -10640,20 +15288,84 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) } else ioc->hide_drives = 0; - rv = scsi_add_host(shost, &pdev->dev); - if (rv) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); - goto out_add_shost_fail; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (!ioc->disable_eedp_support) { + /* register EEDP capabilities with SCSI layer */ + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + if (prot_mask) + scsi_host_set_prot(shost, prot_mask); + else + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + break; + case MPI25_VERSION: + case MPI26_VERSION: + if (prot_mask) { + if (list_empty(&ioc->raid_device_list)) + scsi_host_set_prot(shost, (prot_mask & 0x7f)); + else + scsi_host_set_prot(shost, (prot_mask & 0x77)); + + printk(MPT3SAS_INFO_FMT + ": host protection capabilities enabled %s%s%s%s%s%s%s\n", + ioc->name, + (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", + (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", + (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", + (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", + (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", + (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", + (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); + if (protection_guard_mask) + scsi_host_set_guard(shost, (protection_guard_mask & 3)); + else + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + } + break; + } } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) + rv = scsi_init_shared_tag_map(shost, shost->can_queue); + if (rv) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_add_shost_fail; + } +#endif + rv = scsi_add_host(shost, &pdev->dev); + if(rv) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + goto out_add_shost_fail; + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) scsi_scan_host(shost); +#else + ioc->wait_for_discovery_to_complete = 0; + _scsih_probe_devices(ioc); + mpt3sas_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; + + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) + && (sata_smart_polling)) + mpt3sas_base_start_smart_polling(ioc); +#endif return 0; + out_add_shost_fail: mpt3sas_base_detach(ioc); - out_attach_fail: +out_attach_fail: destroy_workqueue(ioc->firmware_event_thread); - out_thread_fail: +out_thread_fail: spin_lock(&gioc_lock); list_del(&ioc->list); spin_unlock(&gioc_lock); @@ -10667,21 +15379,35 @@ out_add_shost_fail: * @pdev: PCI device struct * @state: PM state change to (usually PCI_D3) * - * Return: 0 success, anything else error. + * Returns 0 success, anything else error. */ static int scsih_suspend(struct pci_dev *pdev, pm_message_t state) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; pci_power_t device_state; + int rc; + rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); + if (rc) { + dev_err(&pdev->dev, "unable to suspend device\n"); + return rc; + } + + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && (sata_smart_polling)) + mpt3sas_base_stop_smart_polling(ioc); mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_stop_hba_unplug_watchdog(ioc); flush_scheduled_work(); scsi_block_requests(shost); device_state = pci_choose_state(pdev, state); - ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", - pdev, pci_name(pdev), device_state); + _scsih_ir_shutdown(ioc); + _scsih_nvme_shutdown(ioc); + + printk(MPT3SAS_INFO_FMT "pdev=0x%p, slot=%s, entering " + "operating state [D%d]\n", ioc->name, pdev, + pci_name(pdev), device_state); pci_save_state(pdev); mpt3sas_base_free_resources(ioc); @@ -10693,18 +15419,25 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state) * scsih_resume - power management resume main entry point * @pdev: PCI device struct * - * Return: 0 success, anything else error. + * Returns 0 success, anything else error. */ static int scsih_resume(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; pci_power_t device_state = pdev->current_state; int r; - ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", - pdev, pci_name(pdev), device_state); + r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); + if (r) { + dev_err(&pdev->dev, "unable to resume device\n"); + return r; + } + + printk(MPT3SAS_INFO_FMT "pdev=0x%p, slot=%s, previous " + "operating state [D%d]\n", ioc->name, pdev, + pci_name(pdev), device_state); pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); @@ -10717,6 +15450,9 @@ scsih_resume(struct pci_dev *pdev) mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); scsi_unblock_requests(shost); mpt3sas_base_start_watchdog(ioc); + mpt3sas_base_start_hba_unplug_watchdog(ioc); + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && (sata_smart_polling)) + mpt3sas_base_start_smart_polling(ioc); return 0; } #endif /* CONFIG_PM */ @@ -10728,15 +15464,22 @@ scsih_resume(struct pci_dev *pdev) * * Description: Called when a PCI error is detected. * - * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. + * Return value: + * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; - ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "device unavailable\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + printk(MPT3SAS_INFO_FMT "PCI error: detected callback, state(%d)!!\n", + ioc->name, state); switch (state) { case pci_channel_io_normal: @@ -10745,14 +15488,20 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) /* Fatal error, prepare for slot reset */ ioc->pci_error_recovery = 1; scsi_block_requests(ioc->shost); + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && (sata_smart_polling)) + mpt3sas_base_stop_smart_polling(ioc); mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_stop_hba_unplug_watchdog(ioc); mpt3sas_base_free_resources(ioc); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent error, prepare for device removal */ ioc->pci_error_recovery = 1; + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && (sata_smart_polling)) + mpt3sas_base_stop_smart_polling(ioc); mpt3sas_base_stop_watchdog(ioc); - _scsih_flush_running_cmds(ioc); + mpt3sas_base_stop_hba_unplug_watchdog(ioc); + mpt3sas_scsih_flush_running_cmds(ioc); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; @@ -10769,23 +15518,35 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) static pci_ers_result_t scsih_pci_slot_reset(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; int rc; - ioc_info(ioc, "PCI error: slot reset callback!!\n"); + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to perform slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + printk(MPT3SAS_INFO_FMT "PCI error: slot reset callback!!\n", + ioc->name); ioc->pci_error_recovery = 0; ioc->pdev = pdev; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) pci_restore_state(pdev); +#endif rc = mpt3sas_base_map_resources(ioc); if (rc) return PCI_ERS_RESULT_DISCONNECT; + else { + if(ioc->is_warpdrive) + ioc->pci_error_recovery = 0; + } rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); - ioc_warn(ioc, "hard reset: %s\n", - (rc == 0) ? "success" : "failed"); + printk(MPT3SAS_WARN_FMT "hard reset: %s\n", ioc->name, + (rc == 0) ? "success" : "failed"); if (!rc) return PCI_ERS_RESULT_RECOVERED; @@ -10804,13 +15565,24 @@ scsih_pci_slot_reset(struct pci_dev *pdev) static void scsih_pci_resume(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; - ioc_info(ioc, "PCI error: resume callback!!\n"); + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to resume device\n"); + return; + } + printk(MPT3SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + pci_cleanup_aer_uncorrect_error_status(pdev); +#endif mpt3sas_base_start_watchdog(ioc); + mpt3sas_base_start_hba_unplug_watchdog(ioc); scsi_unblock_requests(ioc->shost); + if ((ioc->hba_mpi_version_belonged != MPI2_VERSION) && (sata_smart_polling)) + mpt3sas_base_start_smart_polling(ioc); } /** @@ -10820,31 +15592,35 @@ scsih_pci_resume(struct pci_dev *pdev) static pci_ers_result_t scsih_pci_mmio_enabled(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct Scsi_Host *shost = NULL; + struct MPT3SAS_ADAPTER *ioc = NULL; - ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to enable mmio\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + printk(MPT3SAS_INFO_FMT "PCI error: mmio enabled callback!!\n", + ioc->name); /* TODO - dump whatever for debugging purposes */ - /* This called only if scsih_pci_error_detected returns - * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still - * works, no need to reset slot. - */ + /* Driver ready to resume IOs. */ return PCI_ERS_RESULT_RECOVERED; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) /** - * scsih__ncq_prio_supp - Check for NCQ command priority support + * mpt3sas_scsih_ncq_prio_supp - Check for NCQ command priority support * @sdev: scsi device struct * * This is called when a user indicates they would like to enable * ncq command priorities. This works only on SATA devices. */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev) +u8 mpt3sas_scsih_ncq_prio_supp(struct scsi_device *sdev) { unsigned char *buf; - bool ncq_prio_supp = false; + u8 ncq_prio_supp = 0; if (!scsi_device_supports_vpd(sdev)) return ncq_prio_supp; @@ -10859,16 +15635,24 @@ bool scsih_ncq_prio_supp(struct scsi_device *sdev) kfree(buf); return ncq_prio_supp; } +#endif + /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))) +static DEFINE_PCI_DEVICE_TABLE(mpt3sas_pci_table) = { +#else static const struct pci_device_id mpt3sas_pci_table[] = { +#endif /* Spitfire ~ 2004 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, PCI_ANY_ID, PCI_ANY_ID }, + /* Falcon ~ 2008 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, PCI_ANY_ID, PCI_ANY_ID }, + /* Liberator ~ 2108 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, PCI_ANY_ID, PCI_ANY_ID }, @@ -10876,11 +15660,13 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, PCI_ANY_ID, PCI_ANY_ID }, + /* Meteor ~ 2116 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, PCI_ANY_ID, PCI_ANY_ID }, + /* Thunderbolt ~ 2208 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, PCI_ANY_ID, PCI_ANY_ID }, @@ -10894,6 +15680,7 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, PCI_ANY_ID, PCI_ANY_ID }, + /* Mustang ~ 2308 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, PCI_ANY_ID, PCI_ANY_ID }, @@ -10901,18 +15688,20 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, PCI_ANY_ID, PCI_ANY_ID }, - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SWCH_MPI_EP, PCI_ANY_ID, PCI_ANY_ID }, - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SWCH_MPI_EP_1, PCI_ANY_ID, PCI_ANY_ID }, /* SSS6200 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, PCI_ANY_ID, PCI_ANY_ID }, + /* Fury ~ 3004 and 3008 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, PCI_ANY_ID, PCI_ANY_ID }, + /* Invader ~ 3108 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, PCI_ANY_ID, PCI_ANY_ID }, @@ -10922,12 +15711,12 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, PCI_ANY_ID, PCI_ANY_ID }, - /* Cutlass ~ 3216 and 3224 */ + + /* Intruder & Cutlass ~ 3316 and 3324 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, PCI_ANY_ID, PCI_ANY_ID }, - /* Intruder ~ 3316 and 3324 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, @@ -10944,6 +15733,7 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, PCI_ANY_ID, PCI_ANY_ID }, + /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, PCI_ANY_ID, PCI_ANY_ID }, @@ -10957,58 +15747,74 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, PCI_ANY_ID, PCI_ANY_ID }, - /* Mercator ~ 3616*/ + + /* Marlin, Mercator & Marut ~ 3716, 3616 and 3708*/ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3716, + PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3708, + PCI_ANY_ID, PCI_ANY_ID }, - /* Aero SI 0x00E1 Configurable Secure - * 0x00E2 Hard Secure - */ + /* Atlas PCIe Switch Management Port*/ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_PEX88000, + PCI_ANY_ID, PCI_ANY_ID }, + + /* Aero SI –> 0x00E0 Invalid 0x00E1 Configurable Secure + 0x00E2 Hard Secure 0x00E3 Tampered */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, + PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, PCI_ANY_ID, PCI_ANY_ID }, - - /* Atlas PCIe Switch Management Port */ - { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, PCI_ANY_ID, PCI_ANY_ID }, - /* Sea SI 0x00E5 Configurable Secure - * 0x00E6 Hard Secure - */ + /* Sea SI –> 0x00E4 Invalid, 0x00E5 Configurable Secure + 0x00E6 Hard Secure 0x00E7 Tampered */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, + PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, + PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; + MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); static struct pci_error_handlers _mpt3sas_err_handler = { - .error_detected = scsih_pci_error_detected, - .mmio_enabled = scsih_pci_mmio_enabled, - .slot_reset = scsih_pci_slot_reset, - .resume = scsih_pci_resume, + .error_detected = scsih_pci_error_detected, + .mmio_enabled = scsih_pci_mmio_enabled, + .slot_reset = scsih_pci_slot_reset, + .resume = scsih_pci_resume, }; static struct pci_driver mpt3sas_driver = { - .name = MPT3SAS_DRIVER_NAME, - .id_table = mpt3sas_pci_table, - .probe = _scsih_probe, - .remove = scsih_remove, - .shutdown = scsih_shutdown, - .err_handler = &_mpt3sas_err_handler, + .name = MPT3SAS_DRIVER_NAME, + .id_table = mpt3sas_pci_table, + .probe = _scsih_probe, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) + .remove = scsih_remove, +#else + .remove = __devexit_p(scsih_remove), +#endif + .shutdown = scsih_shutdown, + .err_handler = &_mpt3sas_err_handler, #ifdef CONFIG_PM - .suspend = scsih_suspend, - .resume = scsih_resume, + .suspend = scsih_suspend, + .resume = scsih_resume, #endif }; /** * scsih_init - main entry point for this driver. * - * Return: 0 success, anything else error. + * Returns 0 success, anything else error. */ static int scsih_init(void) @@ -11021,7 +15827,7 @@ scsih_init(void) /* queuecommand callback hander */ scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); - /* task management callback handler */ + /* task managment callback handler */ tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); /* base internal commands callback handler */ @@ -11042,6 +15848,10 @@ scsih_init(void) /* ctl module callback handler */ ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); + ctl_tm_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_ctl_tm_done); + ctl_diag_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_ctl_diag_done); tm_tr_cb_idx = mpt3sas_base_register_callback_handler( _scsih_tm_tr_complete); @@ -11049,8 +15859,15 @@ scsih_init(void) tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( _scsih_tm_volume_tr_complete); + tm_tr_internal_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_internal_tr_complete); + tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( - _scsih_sas_control_complete); + _scsih_sas_control_complete); + +#if defined(TARGET_MODE) + mpt3sas_base_stm_initialize_callback_handler(); +#endif return 0; } @@ -11058,11 +15875,17 @@ scsih_init(void) /** * scsih_exit - exit point for this driver (when it is a module). * - * Return: 0 success, anything else error. + * Returns 0 success, anything else error. */ static void scsih_exit(void) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + int enumerate_hba = 0; +#endif +#if defined(TARGET_MODE) + mpt3sas_base_stm_release_callback_handler(); +#endif mpt3sas_base_release_callback_handler(scsi_io_cb_idx); mpt3sas_base_release_callback_handler(tm_cb_idx); @@ -11072,73 +15895,95 @@ scsih_exit(void) mpt3sas_base_release_callback_handler(scsih_cb_idx); mpt3sas_base_release_callback_handler(config_cb_idx); mpt3sas_base_release_callback_handler(ctl_cb_idx); + mpt3sas_base_release_callback_handler(ctl_tm_cb_idx); + mpt3sas_base_release_callback_handler(ctl_diag_cb_idx); mpt3sas_base_release_callback_handler(tm_tr_cb_idx); mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); + mpt3sas_base_release_callback_handler(tm_tr_internal_cb_idx); mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); /* raid transport support */ - if (hbas_to_enumerate != 1) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (hbas_to_enumerate == -1) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) + enumerate_hba = 0; +#else + enumerate_hba = 2; +#endif + } + else + enumerate_hba = hbas_to_enumerate; + + if (enumerate_hba != 1) raid_class_release(mpt3sas_raid_template); - if (hbas_to_enumerate != 2) + if (enumerate_hba != 2) raid_class_release(mpt2sas_raid_template); +#endif sas_release_transport(mpt3sas_transport_template); } /** * _mpt3sas_init - main entry point for this driver. * - * Return: 0 success, anything else error. + * Returns 0 success, anything else error. */ static int __init _mpt3sas_init(void) { int error; - + int enumerate_hba = 0; pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, - MPT3SAS_DRIVER_VERSION); - + MPT3SAS_DRIVER_VERSION); mpt3sas_transport_template = - sas_attach_transport(&mpt3sas_transport_functions); + sas_attach_transport(&mpt3sas_transport_functions); if (!mpt3sas_transport_template) return -ENODEV; - /* No need attach mpt3sas raid functions template - * if hbas_to_enumarate value is one. - */ - if (hbas_to_enumerate != 1) { + * if hbas_to_enumarate value is one. + */ + if (hbas_to_enumerate == -1) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) + enumerate_hba = 0; +#else + enumerate_hba = 2; +#endif + } + else + enumerate_hba = hbas_to_enumerate; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (enumerate_hba != 1) { mpt3sas_raid_template = - raid_class_attach(&mpt3sas_raid_functions); + raid_class_attach(&mpt3sas_raid_functions); if (!mpt3sas_raid_template) { sas_release_transport(mpt3sas_transport_template); return -ENODEV; } } - /* No need to attach mpt2sas raid functions template - * if hbas_to_enumarate value is two - */ - if (hbas_to_enumerate != 2) { + * if hbas_to_enumarate value is two + */ + if (enumerate_hba != 2) { mpt2sas_raid_template = - raid_class_attach(&mpt2sas_raid_functions); + raid_class_attach(&mpt2sas_raid_functions); if (!mpt2sas_raid_template) { sas_release_transport(mpt3sas_transport_template); return -ENODEV; } } - +#endif error = scsih_init(); if (error) { scsih_exit(); return error; } - mpt3sas_ctl_init(hbas_to_enumerate); + mpt3sas_ctl_init(enumerate_hba); error = pci_register_driver(&mpt3sas_driver); if (error) scsih_exit(); - return error; } @@ -11149,11 +15994,20 @@ _mpt3sas_init(void) static void __exit _mpt3sas_exit(void) { - pr_info("mpt3sas version %s unloading\n", - MPT3SAS_DRIVER_VERSION); + int enumerate_hba = 0; + pr_info("mpt3sas version %s unloading\n", MPT3SAS_DRIVER_VERSION); - mpt3sas_ctl_exit(hbas_to_enumerate); + if (hbas_to_enumerate == -1) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) + enumerate_hba = 0; +#else + enumerate_hba = 2; +#endif + } + else + enumerate_hba = hbas_to_enumerate; + mpt3sas_ctl_exit(enumerate_hba); pci_unregister_driver(&mpt3sas_driver); scsih_exit(); @@ -11161,3 +16015,4 @@ _mpt3sas_exit(void) module_init(_mpt3sas_init); module_exit(_mpt3sas_exit); + diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c old mode 100644 new mode 100755 index 5324662751bf..ede8194ef366 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -2,9 +2,10 @@ * SAS Transport Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -60,24 +61,102 @@ #include "mpt3sas_base.h" +#if ((defined(CONFIG_SUSE_KERNEL) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,70)) || \ + (defined(RHEL_MAJOR) && (RHEL_MAJOR == 6) && (RHEL_MINOR >= 3)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))) + + #define SAS_PHY_HOSTDATA +#endif + /** * _transport_sas_node_find_by_sas_address - sas node search * @ioc: per adapter object * @sas_address: sas address of expander or sas host + * @port: port entry * Context: Calling function should acquire ioc->sas_node_lock. * - * Search for either hba phys or expander device based on handle, then returns - * the sas_node object. + * Search for either hba phys or expander device based on sas address and + * port number, then returns the sas_node object. */ static struct _sas_node * _transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address) + u64 sas_address, struct hba_port *port) { if (ioc->sas_hba.sas_address == sas_address) return &ioc->sas_hba; else return mpt3sas_scsih_expander_find_by_sas_address(ioc, - sas_address); + sas_address, port); +} + +#if !defined(SAS_PHY_HOSTDATA) +static int +_transport_phy_belonged_to_sas_node(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node * sas_node, struct sas_phy *sphy) +{ + int i; + + for (i=0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].phy == sphy) + return 1; + } + + return 0; +} + +static struct _sas_node * +_transport_sas_node_find_by_sas_phy(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *sphy) +{ + struct _sas_node *sas_node, *sas_expander; + u64 sas_address = sphy->identify.sas_address; + + if (ioc->sas_hba.sas_address == sas_address) { + sas_node = &ioc->sas_hba; + if (_transport_phy_belonged_to_sas_node(ioc, sas_node, sphy)) + return sas_node; + } else { + list_for_each_entry(sas_expander, &ioc->sas_expander_list, + list) { + if (sas_expander->sas_address != sas_address) + continue; + + if (_transport_phy_belonged_to_sas_node(ioc, + sas_expander, sphy)) + return sas_expander; + } + } + + return NULL; +} +#endif + +static int +_transport_find_parent_node(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy) +{ + unsigned long flags; +#if defined(SAS_PHY_HOSTDATA) + struct hba_port *port = phy->hostdata; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + port) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#else + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if ((_transport_sas_node_find_by_sas_phy(ioc, phy)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#endif + return 0; } /** @@ -102,7 +181,13 @@ _transport_convert_phy_link_rate(u8 link_rate) rc = SAS_LINK_RATE_6_0_GBPS; break; case MPI25_SAS_NEG_LINK_RATE_12_0: +#if ((defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,76)) \ + || (defined(RHEL_MAJOR) && (RHEL_MAJOR == 6 && RHEL_MINOR >= 5)) \ + || LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) rc = SAS_LINK_RATE_12_0_GBPS; +#else + rc = SAS_LINK_RATE_6_0_GBPS; +#endif break; case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED: rc = SAS_PHY_DISABLED; @@ -114,9 +199,10 @@ _transport_convert_phy_link_rate(u8 link_rate) rc = SAS_SATA_PORT_SELECTOR; break; case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: - rc = SAS_PHY_RESET_IN_PROGRESS; - break; - + /* only supported in SLES10 SP1 kernels, not RHEL5 */ +/* rc = SAS_PHY_RESET_IN_PROGRESS; + * break; + */ default: case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: @@ -134,7 +220,7 @@ _transport_convert_phy_link_rate(u8 link_rate) * * Populates sas identify info. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, @@ -146,22 +232,24 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 ioc_status; if (ioc->shost_recovery || ioc->pci_error_recovery) { - ioc_info(ioc, "%s: host reset in progress!\n", __func__); + printk(MPT3SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); return -EFAULT; } if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -ENXIO; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n", - handle, ioc_status, __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)" + "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); return -EIO; } @@ -223,8 +311,8 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, * Callback handler when sending internal generated transport cmds. * The callback index passed is `ioc->transport_cb_idx` * - * Return: 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. */ u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -248,6 +336,7 @@ mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, return 1; } +#if defined(MPT_WIDE_PORT_API) /* report manufacture request structure */ struct rep_manu_request { u8 smp_frame_type; @@ -284,7 +373,7 @@ struct rep_manu_reply { * * Fills in the sas_expander_device object when SMP port is created. * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, @@ -305,26 +394,29 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, size_t data_out_sz; if (ioc->shost_recovery || ioc->pci_error_recovery) { - ioc_info(ioc, "%s: host reset in progress!\n", __func__); + printk(MPT3SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); return -EFAULT; } mutex_lock(&ioc->transport_cmds.mutex); if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: transport_cmds in use\n", __func__); - rc = -EAGAIN; - goto out; + printk(MPT3SAS_ERR_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; } ioc->transport_cmds.status = MPT3_CMD_PENDING; - rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); if (rc) goto out; smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } @@ -335,10 +427,11 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, data_out_sz = sizeof(struct rep_manu_request); data_in_sz = sizeof(struct rep_manu_reply); - data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, - &data_out_dma, GFP_KERNEL); + data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz, + &data_out_dma); + if (!data_out) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); rc = -ENOMEM; mpt3sas_base_free_smid(ioc, smid); @@ -363,15 +456,17 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - dtransportprintk(ioc, - ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n", - (u64)sas_address)); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "report_manufacture - " + "send to sas_addr(0x%016llx)\n", ioc->name, + (unsigned long long)sas_address)); init_completion(&ioc->transport_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2SmpPassthroughRequest_t)/4); if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) @@ -379,16 +474,17 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, goto issue_host_reset; } - dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n")); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "report_manufacture - " + "complete\n", ioc->name)); if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { u8 *tmp; mpi_reply = ioc->transport_cmds.reply; - dtransportprintk(ioc, - ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n", - le16_to_cpu(mpi_reply->ResponseDataLength))); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "report_manufacture - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); if (le16_to_cpu(mpi_reply->ResponseDataLength) != sizeof(struct rep_manu_reply)) @@ -412,8 +508,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, manufacture_reply->component_revision_id; } } else - dtransportprintk(ioc, - ioc_info(ioc, "report_manufacture - no reply\n")); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "report_manufacture - no reply\n", ioc->name)); issue_host_reset: if (issue_reset) @@ -421,45 +517,54 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, out: ioc->transport_cmds.status = MPT3_CMD_NOT_USED; if (data_out) - dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz, data_out, data_out_dma); mutex_unlock(&ioc->transport_cmds.mutex); return rc; } - +#endif /** * _transport_delete_port - helper function to removing a port * @ioc: per adapter object * @mpt3sas_port: mpt3sas per port object + * + * Returns nothing. */ static void _transport_delete_port(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port) { u64 sas_address = mpt3sas_port->remote_identify.sas_address; + struct hba_port *port = mpt3sas_port->hba_port; enum sas_device_type device_type = mpt3sas_port->remote_identify.device_type; +#if defined(MPT_WIDE_PORT_API) dev_printk(KERN_INFO, &mpt3sas_port->port->dev, "remove: sas_addr(0x%016llx)\n", (unsigned long long) sas_address); +#endif ioc->logging_level |= MPT_DEBUG_TRANSPORT; if (device_type == SAS_END_DEVICE) - mpt3sas_device_remove_by_sas_address(ioc, sas_address); + mpt3sas_device_remove_by_sas_address(ioc, + sas_address, port); else if (device_type == SAS_EDGE_EXPANDER_DEVICE || device_type == SAS_FANOUT_EXPANDER_DEVICE) - mpt3sas_expander_remove(ioc, sas_address); + mpt3sas_expander_remove(ioc, sas_address, port); ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; } +#if defined(MPT_WIDE_PORT_API) /** * _transport_delete_phy - helper function to removing single phy from port * @ioc: per adapter object * @mpt3sas_port: mpt3sas per port object * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. */ static void _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, @@ -482,6 +587,8 @@ _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @mpt3sas_port: mpt3sas per port object * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. */ static void _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, @@ -500,16 +607,18 @@ _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, } /** - * _transport_add_phy_to_an_existing_port - adding new phy to existing port + * mpt3sas_transport_add_phy_to_an_existing_port - adding new phy to existing port * @ioc: per adapter object * @sas_node: sas node object (either expander or sas host) * @mpt3sas_phy: mpt3sas per phy object * @sas_address: sas address of device/expander were phy needs to be added to + * + * Returns nothing. */ -static void -_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, +void +mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, - u64 sas_address) + u64 sas_address, struct hba_port *port) { struct _sas_port *mpt3sas_port; struct _sas_phy *phy_srch; @@ -517,30 +626,38 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, if (mpt3sas_phy->phy_belongs_to_port == 1) return; + if (!port) + return; + list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list, port_list) { if (mpt3sas_port->remote_identify.sas_address != sas_address) continue; + if (mpt3sas_port->hba_port != port) + continue; list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, port_siblings) { if (phy_srch == mpt3sas_phy) return; } _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy); - return; + return; } } +#endif /** - * _transport_del_phy_from_an_existing_port - delete phy from existing port + * mpt3sas_transport_del_phy_from_an_existing_port - delete phy from existing port * @ioc: per adapter object * @sas_node: sas node object (either expander or sas host) * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. */ -static void -_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, +void +mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy) { struct _sas_port *mpt3sas_port, *next; @@ -555,12 +672,18 @@ _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, port_siblings) { if (phy_srch != mpt3sas_phy) continue; - - if (mpt3sas_port->num_phys == 1) +#if defined(MPT_WIDE_PORT_API) + /* don't delete port during host reset, + * just delete phy. + */ + if (mpt3sas_port->num_phys == 1 && !ioc->shost_recovery) _transport_delete_port(ioc, mpt3sas_port); else _transport_delete_phy(ioc, mpt3sas_port, mpt3sas_phy); +#else + _transport_delete_port(ioc, mpt3sas_port); +#endif return; } } @@ -571,21 +694,23 @@ _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, * @ioc: per adapter object * @sas_node: sas node object (either expander or sas host) * @sas_address: sas address of device being added + * @port: hba port entry * * See the explanation above from _transport_delete_duplicate_port */ static void _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, - u64 sas_address) + u64 sas_address, struct hba_port *port) { int i; for (i = 0; i < sas_node->num_phys; i++) { - if (sas_node->phy[i].remote_identify.sas_address != sas_address) + if (sas_node->phy[i].remote_identify.sas_address != sas_address || + sas_node->phy[i].port != port) continue; if (sas_node->phy[i].phy_belongs_to_port == 1) - _transport_del_phy_from_an_existing_port(ioc, sas_node, - &sas_node->phy[i]); + mpt3sas_transport_del_phy_from_an_existing_port(ioc, + sas_node, &sas_node->phy[i]); } } @@ -594,15 +719,16 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, * @ioc: per adapter object * @handle: handle of attached device * @sas_address: sas address of parent expander or sas host + * @port: hba port entry * Context: This function will acquire ioc->sas_node_lock. * * Adding new port object to the sas_node->sas_port_list. * - * Return: mpt3sas_port. + * Returns mpt3sas_port. */ struct _sas_port * mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, - u64 sas_address) + u64 sas_address, struct hba_port *hba_port) { struct _sas_phy *mpt3sas_phy, *next; struct _sas_port *mpt3sas_port; @@ -611,124 +737,156 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, struct sas_rphy *rphy; struct _sas_device *sas_device = NULL; int i; +#if defined(MPT_WIDE_PORT_API) struct sas_port *port; +#endif + + if (!hba_port) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } mpt3sas_port = kzalloc(sizeof(struct _sas_port), GFP_KERNEL); if (!mpt3sas_port) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return NULL; } INIT_LIST_HEAD(&mpt3sas_port->port_list); INIT_LIST_HEAD(&mpt3sas_port->phy_list); spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address, hba_port); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_node) { - ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n", - __func__, (u64)sas_address); + printk(MPT3SAS_ERR_FMT "%s: Could not find " + "parent sas_address(0x%016llx)!\n", ioc->name, + __func__, (unsigned long long)sas_address); goto out_fail; } if ((_transport_set_identify(ioc, handle, &mpt3sas_port->remote_identify))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out_fail; } if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out_fail; } + mpt3sas_port->hba_port = hba_port; _transport_sanity_check(ioc, sas_node, - mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->remote_identify.sas_address, hba_port); for (i = 0; i < sas_node->num_phys; i++) { if (sas_node->phy[i].remote_identify.sas_address != - mpt3sas_port->remote_identify.sas_address) + mpt3sas_port->remote_identify.sas_address || + sas_node->phy[i].port != hba_port) continue; list_add_tail(&sas_node->phy[i].port_siblings, &mpt3sas_port->phy_list); + if (sas_node->handle <= ioc->sas_hba.num_phys) { + hba_port->phy_mask |= (1 << i); + } mpt3sas_port->num_phys++; } if (!mpt3sas_port->num_phys) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out_fail; } + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device = mpt3sas_get_sdev_by_addr(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + if (!sas_device) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + sas_device->pend_sas_rphy_add = 1; + } +#if defined(MPT_WIDE_PORT_API) if (!sas_node->parent_dev) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out_fail; } + port = sas_port_alloc_num(sas_node->parent_dev); if ((sas_port_add(port))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); goto out_fail; } list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list, port_siblings) { if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) - dev_printk(KERN_INFO, &port->dev, - "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", - handle, (unsigned long long) + dev_printk(KERN_INFO, &port->dev, "add: handle(0x%04x)" + ", sas_addr(0x%016llx), phy(%d)\n", handle, + (unsigned long long) mpt3sas_port->remote_identify.sas_address, mpt3sas_phy->phy_id); sas_port_add_phy(port, mpt3sas_phy->phy); mpt3sas_phy->phy_belongs_to_port = 1; + mpt3sas_phy->port = hba_port; } mpt3sas_port->port = port; - if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { rphy = sas_end_device_alloc(port); - else + sas_device->rphy=rphy; + if (sas_node->handle <= ioc->sas_hba.num_phys) + hba_port->sas_address = sas_device->sas_address; + } else { rphy = sas_expander_alloc(port, mpt3sas_port->remote_identify.device_type); - + if (sas_node->handle <= ioc->sas_hba.num_phys) + hba_port->sas_address = + mpt3sas_port->remote_identify.sas_address; + } +#else + mpt3sas_phy = list_entry(mpt3sas_port->phy_list.next, struct _sas_phy, + port_siblings); + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(mpt3sas_phy->phy); + sas_device->rphy=rphy; + } else + rphy = sas_expander_alloc(mpt3sas_phy->phy, + mpt3sas_port->remote_identify.device_type); +#endif rphy->identify = mpt3sas_port->remote_identify; - if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { - sas_device = mpt3sas_get_sdev_by_addr(ioc, - mpt3sas_port->remote_identify.sas_address); - if (!sas_device) { - dfailprintk(ioc, - ioc_info(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__)); - goto out_fail; - } - sas_device->pend_sas_rphy_add = 1; - } - if ((sas_rphy_add(rphy))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); } - if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { sas_device->pend_sas_rphy_add = 0; sas_device_put(sas_device); } - if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) - dev_printk(KERN_INFO, &rphy->dev, - "add: handle(0x%04x), sas_addr(0x%016llx)\n", - handle, (unsigned long long) + dev_printk(KERN_INFO, &rphy->dev, "add: handle(0x%04x), " + "sas_addr(0x%016llx)\n", handle, + (unsigned long long) mpt3sas_port->remote_identify.sas_address); mpt3sas_port->rphy = rphy; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(MPT_WIDE_PORT_API) /* fill in report manufacture */ if (mpt3sas_port->remote_identify.device_type == MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || @@ -737,6 +895,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, _transport_expander_report_manufacture(ioc, mpt3sas_port->remote_identify.sas_address, rphy_to_expander_device(rphy)); +#endif return mpt3sas_port; out_fail: @@ -752,25 +911,34 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, * @ioc: per adapter object * @sas_address: sas address of attached device * @sas_address_parent: sas address of parent expander or sas host + * @port: port entry * Context: This function will acquire ioc->sas_node_lock. * * Removing object and freeing associated memory from the * ioc->sas_port_list. + * + * Return nothing. */ void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, - u64 sas_address_parent) + u64 sas_address_parent, struct hba_port *port) { int i; unsigned long flags; struct _sas_port *mpt3sas_port, *next; struct _sas_node *sas_node; u8 found = 0; +#if defined(MPT_WIDE_PORT_API) struct _sas_phy *mpt3sas_phy, *next_phy; +#endif + struct hba_port *hba_port, *hba_port_next=NULL; + + if (!port) + return; spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_node = _transport_sas_node_find_by_sas_address(ioc, - sas_address_parent); + sas_address_parent, port); if (!sas_node) { spin_unlock_irqrestore(&ioc->sas_node_lock, flags); return; @@ -779,16 +947,35 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, port_list) { if (mpt3sas_port->remote_identify.sas_address != sas_address) continue; + if (mpt3sas_port->hba_port != port) + continue; found = 1; list_del(&mpt3sas_port->port_list); goto out; } + out: if (!found) { spin_unlock_irqrestore(&ioc->sas_node_lock, flags); return; } + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + list_for_each_entry_safe(hba_port, hba_port_next, + &ioc->port_table_list, list) { + if (hba_port != port) + continue; + if (hba_port->sas_address != sas_address) + continue; + printk(MPT3SAS_INFO_FMT "remove hba_port entry: %p" + " port: %d from hba_port list\n", ioc->name, + hba_port, hba_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + } + } + for (i = 0; i < sas_node->num_phys; i++) { if (sas_node->phy[i].remote_identify.sas_address == sas_address) memset(&sas_node->phy[i].remote_identify, 0 , @@ -797,6 +984,7 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(MPT_WIDE_PORT_API) list_for_each_entry_safe(mpt3sas_phy, next_phy, &mpt3sas_port->phy_list, port_siblings) { if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) @@ -806,13 +994,20 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, mpt3sas_port->remote_identify.sas_address, mpt3sas_phy->phy_id); mpt3sas_phy->phy_belongs_to_port = 0; - if (!ioc->remove_host) - sas_port_delete_phy(mpt3sas_port->port, - mpt3sas_phy->phy); + if(!ioc->remove_host) + sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); list_del(&mpt3sas_phy->port_siblings); } - if (!ioc->remove_host) + if(!ioc->remove_host) sas_port_delete(mpt3sas_port->port); +#else + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_port->rphy->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long)sas_address); + if(!ioc->remove_host) + sas_rphy_delete(mpt3sas_port->rphy); +#endif kfree(mpt3sas_port); } @@ -823,7 +1018,7 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, * @phy_pg0: sas phy page 0 * @parent_dev: parent device class object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy @@ -836,14 +1031,14 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); phy = sas_phy_alloc(parent_dev, phy_index); if (!phy) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -1; } if ((_transport_set_identify(ioc, mpt3sas_phy->handle, &mpt3sas_phy->identify))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); sas_phy_free(phy); return -1; } @@ -863,10 +1058,20 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); phy->maximum_linkrate = _transport_convert_phy_link_rate( phy_pg0.ProgrammedLinkRate >> 4); +#if defined(SAS_PHY_HOSTDATA) + phy->hostdata = mpt3sas_phy->port; +#endif + +#if !defined(MPT_WIDE_PORT_API_PLUS) + phy->local_attached = 1; +#endif +#if !defined(MPT_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif if ((sas_phy_add(phy))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); sas_phy_free(phy); return -1; } @@ -891,7 +1096,7 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy * @expander_pg1: expander page 1 * @parent_dev: parent device class object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy @@ -904,14 +1109,14 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); phy = sas_phy_alloc(parent_dev, phy_index); if (!phy) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -1; } if ((_transport_set_identify(ioc, mpt3sas_phy->handle, &mpt3sas_phy->identify))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); sas_phy_free(phy); return -1; } @@ -933,10 +1138,17 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); phy->maximum_linkrate = _transport_convert_phy_link_rate( expander_pg1.ProgrammedLinkRate >> 4); +#if defined(SAS_PHY_HOSTDATA) + phy->hostdata = mpt3sas_phy->port; +#endif + +#if !defined(MPT_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif if ((sas_phy_add(phy))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); sas_phy_free(phy); return -1; } @@ -958,22 +1170,28 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy * @ioc: per adapter object * @sas_address: sas address of parent expander or sas host * @handle: attached device handle - * @phy_number: phy number + * @phy_numberv: phy number * @link_rate: new link rate + * @port: hba port entry + * + * Returns nothing. */ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address, u16 handle, u8 phy_number, u8 link_rate) + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate, + struct hba_port *port) { unsigned long flags; struct _sas_node *sas_node; struct _sas_phy *mpt3sas_phy; + struct hba_port *hba_port = NULL; if (ioc->shost_recovery || ioc->pci_error_recovery) return; spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address, port); if (!sas_node) { spin_unlock_irqrestore(&ioc->sas_node_lock, flags); return; @@ -985,8 +1203,21 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { _transport_set_identify(ioc, handle, &mpt3sas_phy->remote_identify); - _transport_add_phy_to_an_existing_port(ioc, sas_node, - mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); +#if defined(MPT_WIDE_PORT_API) + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + list_for_each_entry(hba_port, + &ioc->port_table_list, list) { + if (hba_port->sas_address == sas_address && + hba_port == port) + hba_port->phy_mask |= + (1 << mpt3sas_phy->phy_id); + } + } + mpt3sas_transport_add_phy_to_an_existing_port(ioc, sas_node, + mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address, + port); +#endif } else memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct sas_identify)); @@ -1009,14 +1240,14 @@ static inline void * phy_to_ioc(struct sas_phy *phy) { struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); - return shost_priv(shost); + return shost_private(shost); } static inline void * rphy_to_ioc(struct sas_rphy *rphy) { struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); - return shost_priv(shost); + return shost_private(shost); } /* report phy error log structure */ @@ -1051,7 +1282,7 @@ struct phy_error_log_reply { * @ioc: per adapter object * @phy: The sas phy object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. * */ static int @@ -1071,26 +1302,29 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, u32 sz; if (ioc->shost_recovery || ioc->pci_error_recovery) { - ioc_info(ioc, "%s: host reset in progress!\n", __func__); + printk(MPT3SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); return -EFAULT; } mutex_lock(&ioc->transport_cmds.mutex); if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: transport_cmds in use\n", __func__); - rc = -EAGAIN; - goto out; + printk(MPT3SAS_ERR_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; } ioc->transport_cmds.status = MPT3_CMD_PENDING; - rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); if (rc) goto out; smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } @@ -1100,10 +1334,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, sz = sizeof(struct phy_error_log_request) + sizeof(struct phy_error_log_reply); - data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, - GFP_KERNEL); + data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); if (!data_out) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); rc = -ENOMEM; mpt3sas_base_free_smid(ioc, smid); @@ -1129,21 +1362,21 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, cpu_to_le16(sizeof(struct phy_error_log_request)); psge = &mpi_request->SGL; - ioc->build_sg(ioc, psge, data_out_dma, - sizeof(struct phy_error_log_request), + ioc->build_sg(ioc, psge, data_out_dma, sizeof(struct phy_error_log_request), data_out_dma + sizeof(struct phy_error_log_request), sizeof(struct phy_error_log_reply)); - - dtransportprintk(ioc, - ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", - (u64)phy->identify.sas_address, - phy->number)); + + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "phy_error_log - " + "send to sas_addr(0x%016llx), phy(%d)\n", ioc->name, + (unsigned long long)phy->identify.sas_address, phy->number)); init_completion(&ioc->transport_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2SmpPassthroughRequest_t)/4); if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) @@ -1151,15 +1384,16 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, goto issue_host_reset; } - dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n")); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "phy_error_log - " + "complete\n", ioc->name)); if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { mpi_reply = ioc->transport_cmds.reply; - dtransportprintk(ioc, - ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n", - le16_to_cpu(mpi_reply->ResponseDataLength))); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "phy_error_log - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); if (le16_to_cpu(mpi_reply->ResponseDataLength) != sizeof(struct phy_error_log_reply)) @@ -1168,9 +1402,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, phy_error_log_reply = data_out + sizeof(struct phy_error_log_request); - dtransportprintk(ioc, - ioc_info(ioc, "phy_error_log - function_result(%d)\n", - phy_error_log_reply->function_result)); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "phy_error_log - function_result(%d)\n", + ioc->name, phy_error_log_reply->function_result)); phy->invalid_dword_count = be32_to_cpu(phy_error_log_reply->invalid_dword); @@ -1182,8 +1416,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, be32_to_cpu(phy_error_log_reply->phy_reset_problem); rc = 0; } else - dtransportprintk(ioc, - ioc_info(ioc, "phy_error_log - no reply\n")); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "phy_error_log - no reply\n", ioc->name)); issue_host_reset: if (issue_reset) @@ -1191,7 +1425,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, out: ioc->transport_cmds.status = MPT3_CMD_NOT_USED; if (data_out) - dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); mutex_unlock(&ioc->transport_cmds.mutex); return rc; @@ -1201,24 +1435,20 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, * _transport_get_linkerrors - return phy counters for both hba and expanders * @phy: The sas phy object * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. * */ static int _transport_get_linkerrors(struct sas_phy *phy) { struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); - unsigned long flags; Mpi2ConfigReply_t mpi_reply; Mpi2SasPhyPage1_t phy_pg1; + int rc = 0; - spin_lock_irqsave(&ioc->sas_node_lock, flags); - if (_transport_sas_node_find_by_sas_address(ioc, - phy->identify.sas_address) == NULL) { - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); - return -EINVAL; - } - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + rc = _transport_find_parent_node(ioc, phy); + if (rc) + return rc; if (phy->identify.sas_address != ioc->sas_hba.sas_address) return _transport_get_expander_phy_error_log(ioc, phy); @@ -1226,16 +1456,16 @@ _transport_get_linkerrors(struct sas_phy *phy) /* get hba phy error logs */ if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, phy->number))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -ENXIO; } if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) - ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n", - phy->number, - le16_to_cpu(mpi_reply.IOCStatus), - le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "phy(%d), ioc_status" + "(0x%04x), loginfo(0x%08x)\n", ioc->name, + phy->number, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); phy->running_disparity_error_count = @@ -1249,11 +1479,10 @@ _transport_get_linkerrors(struct sas_phy *phy) /** * _transport_get_enclosure_identifier - - * @rphy: The sas phy object - * @identifier: ? + * @phy: The sas phy object * * Obtain the enclosure logical id for an expander. - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) @@ -1264,8 +1493,8 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) int rc; spin_lock_irqsave(&ioc->sas_device_lock, flags); - sas_device = __mpt3sas_get_sdev_by_addr(ioc, - rphy->identify.sas_address); + sas_device = __mpt3sas_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); if (sas_device) { *identifier = sas_device->enclosure_logical_id; rc = 0; @@ -1281,9 +1510,9 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) /** * _transport_get_bay_identifier - - * @rphy: The sas phy object + * @phy: The sas phy object * - * Return: the slot id for a device that resides inside an enclosure. + * Returns the slot id for a device that resides inside an enclosure. */ static int _transport_get_bay_identifier(struct sas_rphy *rphy) @@ -1294,8 +1523,8 @@ _transport_get_bay_identifier(struct sas_rphy *rphy) int rc; spin_lock_irqsave(&ioc->sas_device_lock, flags); - sas_device = __mpt3sas_get_sdev_by_addr(ioc, - rphy->identify.sas_address); + sas_device = __mpt3sas_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); if (sas_device) { rc = sas_device->slot; sas_device_put(sas_device); @@ -1339,9 +1568,8 @@ struct phy_control_reply { * _transport_expander_phy_control - expander phy control * @ioc: per adapter object * @phy: The sas phy object - * @phy_operation: ? * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. * */ static int @@ -1361,26 +1589,29 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, u32 sz; if (ioc->shost_recovery || ioc->pci_error_recovery) { - ioc_info(ioc, "%s: host reset in progress!\n", __func__); + printk(MPT3SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); return -EFAULT; } mutex_lock(&ioc->transport_cmds.mutex); if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: transport_cmds in use\n", __func__); - rc = -EAGAIN; - goto out; + printk(MPT3SAS_ERR_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; } ioc->transport_cmds.status = MPT3_CMD_PENDING; - rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); if (rc) goto out; smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto out; } @@ -1390,10 +1621,9 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, sz = sizeof(struct phy_control_request) + sizeof(struct phy_control_reply); - data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, - GFP_KERNEL); + data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); if (!data_out) { - pr_err("failure at %s:%d/%s()!\n", __FILE__, + printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); rc = -ENOMEM; mpt3sas_base_free_smid(ioc, smid); @@ -1424,21 +1654,22 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, cpu_to_le16(sizeof(struct phy_error_log_request)); psge = &mpi_request->SGL; - ioc->build_sg(ioc, psge, data_out_dma, - sizeof(struct phy_control_request), + ioc->build_sg(ioc, psge, data_out_dma, sizeof(struct phy_control_request), data_out_dma + sizeof(struct phy_control_request), sizeof(struct phy_control_reply)); - dtransportprintk(ioc, - ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", - (u64)phy->identify.sas_address, - phy->number, phy_operation)); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "phy_control - " + "send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name, + (unsigned long long)phy->identify.sas_address, phy->number, + phy_operation)); init_completion(&ioc->transport_cmds.done); ioc->put_smid_default(ioc, smid); - wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + printk(MPT3SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2SmpPassthroughRequest_t)/4); if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) @@ -1446,15 +1677,16 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, goto issue_host_reset; } - dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n")); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "phy_control - " + "complete\n", ioc->name)); if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { mpi_reply = ioc->transport_cmds.reply; - dtransportprintk(ioc, - ioc_info(ioc, "phy_control - reply data transfer size(%d)\n", - le16_to_cpu(mpi_reply->ResponseDataLength))); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "phy_control - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); if (le16_to_cpu(mpi_reply->ResponseDataLength) != sizeof(struct phy_control_reply)) @@ -1463,14 +1695,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, phy_control_reply = data_out + sizeof(struct phy_control_request); - dtransportprintk(ioc, - ioc_info(ioc, "phy_control - function_result(%d)\n", - phy_control_reply->function_result)); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "phy_control - function_result(%d)\n", + ioc->name, phy_control_reply->function_result)); rc = 0; } else - dtransportprintk(ioc, - ioc_info(ioc, "phy_control - no reply\n")); + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "phy_control - no reply\n", ioc->name)); issue_host_reset: if (issue_reset) @@ -1478,8 +1710,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, out: ioc->transport_cmds.status = MPT3_CMD_NOT_USED; if (data_out) - dma_free_coherent(&ioc->pdev->dev, sz, data_out, - data_out_dma); + pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); mutex_unlock(&ioc->transport_cmds.mutex); return rc; @@ -1490,7 +1721,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, * @phy: The sas phy object * @hard_reset: * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _transport_phy_reset(struct sas_phy *phy, int hard_reset) @@ -1498,15 +1729,11 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); Mpi2SasIoUnitControlReply_t mpi_reply; Mpi2SasIoUnitControlRequest_t mpi_request; - unsigned long flags; + int rc = 0; - spin_lock_irqsave(&ioc->sas_node_lock, flags); - if (_transport_sas_node_find_by_sas_address(ioc, - phy->identify.sas_address) == NULL) { - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); - return -EINVAL; - } - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + rc = _transport_find_parent_node(ioc, phy); + if (rc) + return rc; /* handle expander phys */ if (phy->identify.sas_address != ioc->sas_hba.sas_address) @@ -1522,26 +1749,28 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) mpi_request.PhyNum = phy->number; if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); return -ENXIO; } if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) - ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", - phy->number, le16_to_cpu(mpi_reply.IOCStatus), - le32_to_cpu(mpi_reply.IOCLogInfo)); + printk(MPT3SAS_INFO_FMT "phy(%d), ioc_status" + "(0x%04x), loginfo(0x%08x)\n", ioc->name, + phy->number, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); return 0; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) /** * _transport_phy_enable - enable/disable phys * @phy: The sas phy object * @enable: enable phy when true * * Only support sas_host direct attached phys. - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _transport_phy_enable(struct sas_phy *phy, int enable) @@ -1553,16 +1782,11 @@ _transport_phy_enable(struct sas_phy *phy, int enable) u16 ioc_status; u16 sz; int rc = 0; - unsigned long flags; int i, discovery_active; - spin_lock_irqsave(&ioc->sas_node_lock, flags); - if (_transport_sas_node_find_by_sas_address(ioc, - phy->identify.sas_address) == NULL) { - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); - return -EINVAL; - } - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + rc = _transport_find_parent_node(ioc, phy); + if (rc) + return rc; /* handle expander phys */ if (phy->identify.sas_address != ioc->sas_hba.sas_address) @@ -1577,23 +1801,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable) sizeof(Mpi2SasIOUnit0PhyData_t)); sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg0) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -ENOMEM; goto out; } if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, sas_iounit_pg0, sz))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -ENXIO; goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -EIO; goto out; } @@ -1602,8 +1826,10 @@ _transport_phy_enable(struct sas_phy *phy, int enable) for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) { if (sas_iounit_pg0->PhyData[i].PortFlags & MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { - ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n", - sas_iounit_pg0->PhyData[i].Port, i); + printk(MPT3SAS_ERR_FMT "discovery is active on " + "port = %d, phy = %d: unable to enable/disable " + "phys, try again later!\n", ioc->name, + sas_iounit_pg0->PhyData[i].Port, i); discovery_active = 1; } } @@ -1618,23 +1844,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable) sizeof(Mpi2SasIOUnit1PhyData_t)); sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg1) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -ENOMEM; goto out; } if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -ENXIO; goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -EIO; goto out; } @@ -1677,8 +1903,7 @@ _transport_phy_enable(struct sas_phy *phy, int enable) * @rates: rates defined in sas_phy_linkrates * * Only support sas_host direct attached phys. - * - * Return: 0 for success, non-zero for failure. + * Returns 0 for success, non-zero for failure. */ static int _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) @@ -1691,15 +1916,10 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) u16 sz; int i; int rc = 0; - unsigned long flags; - - spin_lock_irqsave(&ioc->sas_node_lock, flags); - if (_transport_sas_node_find_by_sas_address(ioc, - phy->identify.sas_address) == NULL) { - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); - return -EINVAL; - } - spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + rc = _transport_find_parent_node(ioc, phy); + if (rc) + return rc; if (!rates->minimum_linkrate) rates->minimum_linkrate = phy->minimum_linkrate; @@ -1726,23 +1946,23 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) sizeof(Mpi2SasIOUnit1PhyData_t)); sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg1) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -ENOMEM; goto out; } if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz))) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -ENXIO; goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -EIO; goto out; } @@ -1761,8 +1981,8 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz)) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); rc = -ENXIO; goto out; } @@ -1786,7 +2006,11 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) kfree(sas_iounit_pg1); return rc; } +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) \ + || (defined(CONFIG_SUSE_KERNEL) && ((CONFIG_SUSE_VERSION == 15) && (CONFIG_SUSE_PATCHLEVEL >= 1))) \ + || (defined(CONFIG_SUSE_KERNEL) && ((CONFIG_SUSE_VERSION == 12) && (CONFIG_SUSE_PATCHLEVEL >= 5)))) static int _transport_map_smp_buffer(struct device *dev, struct bsg_buffer *buf, dma_addr_t *dma_addr, size_t *dma_len, void **p) @@ -1821,7 +2045,6 @@ _transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf, /** * _transport_smp_handler - transport portal for smp passthru - * @job: ? * @shost: shost object * @rphy: sas transport rphy object * @@ -1838,6 +2061,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, Mpi2SmpPassthroughReply_t *mpi_reply; int rc; u16 smid; + u32 ioc_state; void *psge; dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; @@ -1845,10 +2069,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, void *addr_out = NULL; size_t dma_len_in; size_t dma_len_out; + u16 wait_state_count; unsigned int reslen = 0; if (ioc->shost_recovery || ioc->pci_error_recovery) { - ioc_info(ioc, "%s: host reset in progress!\n", __func__); + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); rc = -EFAULT; goto job_done; } @@ -1858,10 +2084,11 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, goto job_done; if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { - ioc_err(ioc, "%s: transport_cmds in use\n", - __func__); + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, + __func__); + mutex_unlock(&ioc->transport_cmds.mutex); rc = -EAGAIN; - goto out; + goto job_done; } ioc->transport_cmds.status = MPT3_CMD_PENDING; @@ -1880,13 +2107,30 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, if (rc) goto unmap_out; - rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); - if (rc) - goto unmap_in; + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto unmap_in; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); if (!smid) { - ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); rc = -EAGAIN; goto unmap_in; } @@ -1907,15 +2151,16 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, dma_len_in - 4); - dtransportprintk(ioc, - ioc_info(ioc, "%s: sending smp request\n", __func__)); + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - sending smp request\n", ioc->name, __func__)); init_completion(&ioc->transport_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { - ioc_err(ioc, "%s: timeout\n", __func__); + pr_err(MPT3SAS_FMT "%s : timeout\n", + __func__, ioc->name); _debug_dump_mf(mpi_request, sizeof(Mpi2SmpPassthroughRequest_t)/4); if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) { @@ -1925,11 +2170,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, } } - dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__)); + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - complete\n", ioc->name, __func__)); if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) { - dtransportprintk(ioc, - ioc_info(ioc, "%s: no reply\n", __func__)); + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - no reply\n", ioc->name, __func__)); rc = -ENXIO; goto unmap_in; } @@ -1937,16 +2183,16 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, mpi_reply = ioc->transport_cmds.reply; dtransportprintk(ioc, - ioc_info(ioc, "%s: reply data transfer size(%d)\n", - __func__, - le16_to_cpu(mpi_reply->ResponseDataLength))); + pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); job->reply_len = sizeof(*mpi_reply); reslen = le16_to_cpu(mpi_reply->ResponseDataLength); if (addr_in) { - sg_copy_to_buffer(job->reply_payload.sg_list, + sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, addr_in, job->reply_payload.payload_len); } @@ -1961,18 +2207,326 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, out: ioc->transport_cmds.status = MPT3_CMD_NOT_USED; mutex_unlock(&ioc->transport_cmds.mutex); -job_done: + job_done: bsg_job_done(job, rc, reslen); } +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) && LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +/** + * _transport_smp_handler - transport portal for smp passthru + * @shost: shost object + * @rphy: sas transport rphy object + * @req: + * + * This used primarily for smp_utils. + * Example: + * smp_rep_general /sys/class/bsg/expander-5:0 + */ +static int +_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, + struct request *req) +{ + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + int rc; +#else + int rc, i; +#endif + u16 smid; + void *psge; + u8 issue_reset = 0; + dma_addr_t dma_addr_in = 0; + dma_addr_t dma_addr_out = 0; + dma_addr_t pci_dma_in = 0; + dma_addr_t pci_dma_out = 0; + void *pci_addr_in = NULL; + void *pci_addr_out = NULL; + struct request *rsp = req->next_rq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + struct bio_vec bvec; + struct bvec_iter iter; +#else + struct bio_vec *bvec = NULL; +#endif + if (!rsp) { + printk(MPT3SAS_ERR_FMT "%s: the smp response space is " + "missing\n", ioc->name, __func__); + return -EINVAL; + } + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + printk(MPT3SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + return rc; + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + printk(MPT3SAS_ERR_FMT "%s: transport_cmds in use\n", ioc->name, + __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + /* Check if the request is split across multiple segments */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + if (bio_multiple_segments(req->bio)) { +#else + if (req->bio->bi_vcnt > 1) { +#endif + u32 offset = 0; + + /* Allocate memory and copy the request */ + pci_addr_out = pci_alloc_consistent(ioc->pdev, + blk_rq_bytes(req), &pci_dma_out); + if (!pci_addr_out) { + printk(MPT3SAS_INFO_FMT "%s(): PCI Addr out = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto out; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + bio_for_each_segment(bvec, req->bio, iter) { + memcpy(pci_addr_out + offset, + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; +#else + bio_for_each_segment(bvec, req->bio, i) { + memcpy(pci_addr_out + offset, + page_address(bvec->bv_page) + bvec->bv_offset, + bvec->bv_len); + offset += bvec->bv_len; +#endif + } + } else { + dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), + blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) { +#else + if (pci_dma_mapping_error(dma_addr_out)) { +#endif + printk(MPT3SAS_INFO_FMT "%s(): DMA Addr out = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto free_pci; + } + } + + /* Check if the response needs to be populated across + * multiple segments */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + if (bio_multiple_segments(rsp->bio)) { +#else + if (rsp->bio->bi_vcnt > 1) { +#endif + pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), + &pci_dma_in); + if (!pci_addr_in) { + printk(MPT3SAS_INFO_FMT "%s(): PCI Addr in = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto unmap; + } + } else { + dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), + blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) { +#else + if (pci_dma_mapping_error(dma_addr_in)) { +#endif + printk(MPT3SAS_INFO_FMT "%s(): DMA Addr in = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto unmap; + } + } + + rc = mpt3sas_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto unmap; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + printk(MPT3SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto unmap; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); + psge = &mpi_request->SGL; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + if (bio_multiple_segments(req->bio)) +#else + if (req->bio->bi_vcnt > 1) +#endif + ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), + pci_dma_in, (blk_rq_bytes(rsp) + 4)); + else + ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4), + dma_addr_in, (blk_rq_bytes(rsp) + 4)); + + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "%s - " + "sending smp request\n", ioc->name, __func__)); + + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + printk(MPT3SAS_ERR_FMT "%s : timeout\n", + __func__, ioc->name); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT "%s - " + "complete\n", ioc->name, __func__)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) + memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply)); + scsi_req(req)->sense_len = sizeof(*mpi_reply); +#else + memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); + req->sense_len = sizeof(*mpi_reply); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) + scsi_req(req)->resid_len = 0; + scsi_req(rsp)->resid_len -= + le16_to_cpu(mpi_reply->ResponseDataLength); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) && LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) + req->resid_len = 0; + rsp->resid_len -= + le16_to_cpu(mpi_reply->ResponseDataLength); +#else + req->data_len = 0; + rsp->data_len -= + le16_to_cpu(mpi_reply->ResponseDataLength); +#endif + + /* check if the resp needs to be copied from the allocated + * pci mem */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + if (bio_multiple_segments(req->bio)) { +#else + if (rsp->bio->bi_vcnt > 1) { +#endif + u32 offset = 0; + u32 bytes_to_copy = + le16_to_cpu(mpi_reply->ResponseDataLength); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bytes_to_copy); + break; + } else { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; + } + offset += bvec.bv_len; +#else + bio_for_each_segment(bvec, rsp->bio, i) { + if (bytes_to_copy <= bvec->bv_len) { + memcpy(page_address(bvec->bv_page) + + bvec->bv_offset, pci_addr_in + + offset, bytes_to_copy); + break; + } else { + memcpy(page_address(bvec->bv_page) + + bvec->bv_offset, pci_addr_in + + offset, bvec->bv_len); + bytes_to_copy -= bvec->bv_len; + } + offset += bvec->bv_len; +#endif + } + } + } else { + dtransportprintk(ioc, printk(MPT3SAS_INFO_FMT + "%s - no reply\n", ioc->name, __func__)); + rc = -ENXIO; + } + + issue_host_reset: + if (issue_reset) { + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + } + + unmap: + if (dma_addr_out) + pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req), + PCI_DMA_BIDIRECTIONAL); + if (dma_addr_in) + pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp), + PCI_DMA_BIDIRECTIONAL); + + free_pci: + if (pci_addr_out) + pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out, + pci_dma_out); + + if (pci_addr_in) + pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in, + pci_dma_in); + + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} +#endif + struct sas_function_template mpt3sas_transport_functions = { .get_linkerrors = _transport_get_linkerrors, .get_enclosure_identifier = _transport_get_enclosure_identifier, .get_bay_identifier = _transport_get_bay_identifier, .phy_reset = _transport_phy_reset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) .phy_enable = _transport_phy_enable, .set_phy_speed = _transport_phy_speed, +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) .smp_handler = _transport_smp_handler, +#endif }; struct scsi_transport_template *mpt3sas_transport_template; diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c old mode 100644 new mode 100755 index 6ac453fd5937..5eab03e8ac99 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c @@ -1,11 +1,12 @@ /* - * This module provides common API to set Diagnostic trigger for MPT + * This module provides common API to set Diagnostic trigger for MPT * (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -43,6 +44,7 @@ * USA. */ +#include #include #include #include @@ -55,14 +57,14 @@ #include #include -#include +#include #include "mpt3sas_base.h" /** * _mpt3sas_raise_sigio - notifiy app * @ioc: per adapter object - * @event_data: ? + * @event_data: */ static void _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, @@ -72,7 +74,8 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, u16 sz, event_data_sz; unsigned long flags; - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", + ioc->name, __func__)); sz = offsetof(Mpi2EventNotificationReply_t, EventData) + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; @@ -84,52 +87,57 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); memcpy(&mpi_reply->EventData, event_data, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: add to driver event log\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: add to driver " + "event log\n", ioc->name, __func__)); mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); kfree(mpi_reply); out: /* clearing the diag_trigger_active flag */ spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: clearing diag_trigger_active flag\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: clearing " + "diag_trigger_active flag\n", ioc->name, __func__)); ioc->diag_trigger_active = 0; spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); } /** * mpt3sas_process_trigger_data - process the event data for the trigger * @ioc: per adapter object - * @event_data: ? + * @event_data: */ void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) { u8 issue_reset = 0; + u32 *trig_data = (u32*)&event_data->u.master; - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", + ioc->name, __func__)); /* release the diag buffer trace */ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: release trace diag buffer\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: release " + "trace diag buffer\n", ioc->name, __func__)); + + /* add a log message so that user knows which event caused the release */ + printk(MPT3SAS_INFO_FMT "%s: Releasing the trace buffer." + "Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n", ioc->name, + __func__, event_data->trigger_type, trig_data[0], trig_data[1]); + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset); } _mpt3sas_raise_sigio(ioc, event_data); - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); } /** @@ -167,9 +175,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) by_pass_checks: - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n", - __func__, trigger_bitmask)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "trigger_bitmask = 0x%08x\n", ioc->name, __func__, + trigger_bitmask)); /* don't send trigger if an trigger is currently active */ if (ioc->diag_trigger_active) { @@ -181,9 +189,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { found_match = 1; ioc->diag_trigger_active = 1; - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: setting diag_trigger_active flag\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); } spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); @@ -201,15 +208,15 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) mpt3sas_send_trigger_data_event(ioc, &event_data); out: - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); } /** * mpt3sas_trigger_event - Event trigger handler * @ioc: per adapter object - * @event: ? - * @log_entry_qualifier: ? + * @event: + * @log_entry_qualifier: * */ void @@ -238,9 +245,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, return; } - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n", - __func__, event, log_entry_qualifier)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "event = 0x%04x, log_entry_qualifier = 0x%04x\n", ioc->name, + __func__, event, log_entry_qualifier)); /* don't send trigger if an trigger is currently active */ if (ioc->diag_trigger_active) { @@ -262,34 +269,32 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, } found_match = 1; ioc->diag_trigger_active = 1; - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: setting diag_trigger_active flag\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); } spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); if (!found_match) goto out; - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: setting diag_trigger_active flag\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; event_data.u.event.EventValue = event; event_data.u.event.LogEntryQualifier = log_entry_qualifier; mpt3sas_send_trigger_data_event(ioc, &event_data); out: - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); } /** * mpt3sas_trigger_scsi - SCSI trigger handler * @ioc: per adapter object - * @sense_key: ? - * @asc: ? - * @ascq: ? + * @sense_key: + * @asc: + * @ascq: * */ void @@ -318,9 +323,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, return; } - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", - __func__, sense_key, asc, ascq)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", ioc->name, + __func__, sense_key, asc, ascq)); /* don't send trigger if an trigger is currently active */ if (ioc->diag_trigger_active) { @@ -346,9 +351,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, if (!found_match) goto out; - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: setting diag_trigger_active flag\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; event_data.u.scsi.SenseKey = sense_key; @@ -356,15 +360,15 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, event_data.u.scsi.ASCQ = ascq; mpt3sas_send_trigger_data_event(ioc, &event_data); out: - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); } /** * mpt3sas_trigger_mpi - MPI trigger handler * @ioc: per adapter object - * @ioc_status: ? - * @loginfo: ? + * @ioc_status: + * @loginfo: * */ void @@ -392,9 +396,9 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) return; } - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n", - __func__, ioc_status, loginfo)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "ioc_status = 0x%04x, loginfo = 0x%08x\n", ioc->name, __func__, + ioc_status, loginfo)); /* don't send trigger if an trigger is currently active */ if (ioc->diag_trigger_active) { @@ -419,15 +423,14 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) if (!found_match) goto out; - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: setting diag_trigger_active flag\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); event_data.trigger_type = MPT3SAS_TRIGGER_MPI; event_data.u.mpi.IOCStatus = ioc_status; event_data.u.mpi.IocLogInfo = loginfo; mpt3sas_send_trigger_data_event(ioc, &event_data); out: - dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", - __func__)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); } diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h old mode 100644 new mode 100755 index 6586a463bea9..be8e748f46f0 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h @@ -1,12 +1,13 @@ /* * This is the Fusion MPT base driver providing common API layer interface - * to set Diagnostic triggers for MPT (Message Passing Technology) based + * to set Diagnostic triggers for MPT (Message Passing Technology) based * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2014 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c old mode 100644 new mode 100755 index cc07ba41f507..f21d50a2a904 --- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c +++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c @@ -1,9 +1,10 @@ /* * Scsi Host Layer for MPT (Message Passing Technology) based controllers * - * Copyright (C) 2012-2014 LSI Corporation - * Copyright (C) 2013-2015 Avago Technologies - * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * Copyright (C) 2012-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto: MPT-FusionLinux.pdl@broadcom.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -36,8 +37,11 @@ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * You should have received a copy of the GNU General Public License - * along with this program. + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. */ + #include #include #include @@ -46,13 +50,62 @@ #include "mpt3sas_base.h" +/** + * _scsih_hide_unhide_sas_devices - add/remove device to/from OS + * @ioc: per adapter object + * + * Return nothing. + */ +void +_scsih_hide_unhide_sas_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device, *sas_device_next; + + if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag != + MFG_PAGE10_HIDE_IF_VOL_PRESENT) + return; + + if (ioc->hide_drives) { + if (mpt3sas_get_num_volumes(ioc)) + return; + ioc->hide_drives = 0; + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!mpt3sas_transport_port_add(ioc, + sas_device->handle, + sas_device->sas_address_parent, + sas_device->port)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + _scsih_sas_device_remove(ioc, sas_device); + } + } + } else { + if (!mpt3sas_get_num_volumes(ioc)) + return; + ioc->hide_drives = 1; + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + } + } +} + + /** * _warpdrive_disable_ddio - Disable direct I/O for all the volumes * @ioc: per adapter object */ + static void -_warpdrive_disable_ddio(struct MPT3SAS_ADAPTER *ioc) -{ +_warpdrive_disable_ddio(struct MPT3SAS_ADAPTER *ioc) { Mpi2RaidVolPage1_t vol_pg1; Mpi2ConfigReply_t mpi_reply; struct _raid_device *raid_device; @@ -83,8 +136,7 @@ _warpdrive_disable_ddio(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object */ u8 -mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc) -{ +mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc) { Mpi2RaidVolPage1_t vol_pg1; Mpi2ConfigReply_t mpi_reply; u16 handle; @@ -108,9 +160,9 @@ mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc) /** * mpt3sas_init_warpdrive_properties - Set properties for warpdrive direct I/O. * @ioc: per adapter object - * @raid_device: the raid_device object - */ -void + * @raid_device: the raid_device object + */ +void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, struct _raid_device *raid_device) { @@ -127,17 +179,20 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, return; if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n"); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "globally as drives are exposed\n", ioc->name); return; } if (mpt3sas_get_num_volumes(ioc) > 1) { _warpdrive_disable_ddio(ioc); - ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n"); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "globally as number of drives > 1\n", ioc->name); return; } if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, &num_pds)) || !num_pds) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n"); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "Failure in computing number of drives\n", ioc->name); return; } @@ -145,13 +200,15 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, sizeof(Mpi2RaidVol0PhysDisk_t)); vol_pg0 = kzalloc(sz, GFP_KERNEL); if (!vol_pg0) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n"); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "Memory allocation failure for RVPG0\n", ioc->name); return; } if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n"); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "Failure in retrieving RVPG0\n", ioc->name); kfree(vol_pg0); return; } @@ -161,8 +218,10 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, * assumed for WARPDRIVE, disable direct I/O */ if (num_pds > MPT_MAX_WARPDRIVE_PDS) { - ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n", - raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS); + printk(MPT3SAS_WARN_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x): num_mem=%d, " + "max_mem_allowed=%d\n", ioc->name, raid_device->handle, + num_pds, MPT_MAX_WARPDRIVE_PDS); kfree(vol_pg0); return; } @@ -172,21 +231,24 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, vol_pg0->PhysDisk[count].PhysDiskNum) || le16_to_cpu(pd_pg0.DevHandle) == MPT3SAS_INVALID_DEVICE_HANDLE) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n", - raid_device->handle, - vol_pg0->PhysDisk[count].PhysDiskNum); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is " + "disabled for the drive with handle(0x%04x) member" + "handle retrieval failed for member number=%d\n", + ioc->name, raid_device->handle, + vol_pg0->PhysDisk[count].PhysDiskNum); goto out_error; } /* Disable direct I/O if member drive lba exceeds 4 bytes */ dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA); if (dev_max_lba >> 32) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n", - raid_device->handle, - le16_to_cpu(pd_pg0.DevHandle), - (u64)dev_max_lba); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is " + "disabled for the drive with handle(0x%04x) member" + "handle (0x%04x) unsupported max lba 0x%016llx\n", + ioc->name, raid_device->handle, + le16_to_cpu(pd_pg0.DevHandle), + (unsigned long long)dev_max_lba); goto out_error; } - raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle); } @@ -195,36 +257,44 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, * not RAID0 */ if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n", - raid_device->handle, raid_device->volume_type, - (le32_to_cpu(vol_pg0->StripeSize) * - le16_to_cpu(vol_pg0->BlockSize)) / 1024, - le16_to_cpu(vol_pg0->BlockSize)); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x): type=%d, " + "s_sz=%uK, blk_size=%u\n", ioc->name, + raid_device->handle, raid_device->volume_type, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024, + le16_to_cpu(vol_pg0->BlockSize)); goto out_error; } + stripe_sz = le32_to_cpu(vol_pg0->StripeSize); stripe_exp = find_first_bit(&stripe_sz, 32); if (stripe_exp == 32) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n", - raid_device->handle, - (le32_to_cpu(vol_pg0->StripeSize) * - le16_to_cpu(vol_pg0->BlockSize)) / 1024); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x) invalid stripe sz %uK\n", + ioc->name, raid_device->handle, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024); goto out_error; } raid_device->stripe_exponent = stripe_exp; + block_sz = le16_to_cpu(vol_pg0->BlockSize); block_exp = find_first_bit(&block_sz, 16); if (block_exp == 16) { - ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n", - raid_device->handle, le16_to_cpu(vol_pg0->BlockSize)); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x) invalid block sz %u\n", + ioc->name, raid_device->handle, + le16_to_cpu(vol_pg0->BlockSize)); goto out_error; } + raid_device->block_exponent = block_exp; raid_device->direct_io_enabled = 1; - ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n", - raid_device->handle); + printk(MPT3SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive" + " with handle(0x%04x)\n", ioc->name, raid_device->handle); /* * WARPDRIVE: Though the following fields are not used for direct IO, * stored for future purpose: @@ -246,54 +316,300 @@ out_error: } /** - * mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O + * _scsih_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O * @ioc: per adapter object * @scmd: pointer to scsi command object * @raid_device: pointer to raid device data structure * @mpi_request: pointer to the SCSI_IO reqest message frame + * @smid: system request message index + * + * Returns nothing */ void mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request) { - sector_t v_lba, p_lba, stripe_off, column, io_size; + u32 p_lba, stripe_off, stripe_unit, column, io_size; + u64 v_lba; u32 stripe_sz, stripe_exp; - u8 num_pds, cmd = scmd->cmnd[0]; - struct scsiio_tracker *st = scsi_cmd_priv(scmd); + u8 num_pds, *cdb_ptr, i; + u8 cdb0 = scmd->cmnd[0]; + u64 v_llba; + struct scsiio_tracker *st = mpt3sas_base_scsi_cmd_priv(scmd); - if (cmd != READ_10 && cmd != WRITE_10 && - cmd != READ_16 && cmd != WRITE_16) - return; + /* + * Try Direct I/O to RAID memeber disks + */ + if (cdb0 == READ_16 || cdb0 == READ_10 || + cdb0 == WRITE_16 || cdb0 == WRITE_10) { + cdb_ptr = mpi_request->CDB.CDB32; - if (cmd == READ_10 || cmd == WRITE_10) - v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]); - else - v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]); + if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4] + | cdb_ptr[5])) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + io_size = scmd->request_bufflen >> + raid_device->block_exponent; +#else + io_size = scsi_bufflen(scmd) >> + raid_device->block_exponent; +#endif - io_size = scsi_bufflen(scmd) >> raid_device->block_exponent; + /* get virtual lba */ + if ((cdb0 < READ_16)) { + v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[2])); + } + else { + v_lba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2])); + } - if (v_lba + io_size - 1 > raid_device->max_lba) - return; + i = (cdb0 < READ_16) ? 2 : 6; - stripe_sz = raid_device->stripe_sz; - stripe_exp = raid_device->stripe_exponent; - stripe_off = v_lba & (stripe_sz - 1); + if (((u64)v_lba + (u64)io_size - 1) <= raid_device->max_lba) { + stripe_sz = raid_device->stripe_sz; + stripe_exp = raid_device->stripe_exponent; + stripe_off = v_lba & (stripe_sz - 1); - /* Return unless IO falls within a stripe */ - if (stripe_off + io_size > stripe_sz) - return; + /* Check whether IO falls within a stripe */ + if ((stripe_off + io_size) <= stripe_sz) { + num_pds = raid_device->num_pds; + p_lba = v_lba >> stripe_exp; + stripe_unit = p_lba / num_pds; + column = p_lba % num_pds; + p_lba = (stripe_unit << stripe_exp) + + stripe_off; + mpi_request->DevHandle = + cpu_to_le16(raid_device-> + pd_handle[column]); + (*(__be32 *)(&cdb_ptr[i])) = + cpu_to_be32(p_lba); + /* + * WD: To indicate this I/O is directI/O + */ + st->direct_io = 1; +#ifdef MPT2SAS_WD_DDIOCOUNT + ioc->ddio_count++; +#endif +#ifdef MPT2SAS_WD_LOGGING + if (ioc->logging_level & + MPT_DEBUG_SCSI) { + printk(MPT3SAS_INFO_FMT + "scmd(%p) as direct IO\n", + ioc->name, scmd); + scsi_print_command(scmd); + } +#endif + } + } + } else { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + io_size = scmd->request_bufflen >> + raid_device->block_exponent; +#else + io_size = scsi_bufflen(scmd) >> + raid_device->block_exponent; +#endif + /* get virtual lba */ + v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2])); - num_pds = raid_device->num_pds; - p_lba = v_lba >> stripe_exp; - column = sector_div(p_lba, num_pds); - p_lba = (p_lba << stripe_exp) + stripe_off; - mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]); + if ((v_llba + (u64)io_size - 1) <= + raid_device->max_lba) { + stripe_sz = raid_device->stripe_sz; + stripe_exp = raid_device->stripe_exponent; + stripe_off = (u32) (v_llba & (stripe_sz - 1)); - if (cmd == READ_10 || cmd == WRITE_10) - put_unaligned_be32(lower_32_bits(p_lba), - &mpi_request->CDB.CDB32[2]); - else - put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]); + /* Check whether IO falls within a stripe */ + if ((stripe_off + io_size) <= stripe_sz) { + num_pds = raid_device->num_pds; + p_lba = (u32)(v_llba >> stripe_exp); + stripe_unit = p_lba / num_pds; + column = p_lba % num_pds; + p_lba = (stripe_unit << stripe_exp) + + stripe_off; + mpi_request->DevHandle = + cpu_to_le16(raid_device-> + pd_handle[column]); + (*(__be64 *)(&cdb_ptr[2])) = + cpu_to_be64((u64)p_lba); + /* + * WD: To indicate this I/O is directI/O + */ + st->direct_io = 1; +#ifdef MPT2SAS_WD_DDIOCOUNT + ioc->ddio_count++; +#endif +#ifdef MPT2SAS_WD_LOGGING + if (ioc->logging_level & + MPT_DEBUG_SCSI) { + printk(MPT3SAS_INFO_FMT + "scmd(%p) as direct IO\n", + ioc->name, scmd); + scsi_print_command(scmd); + } +#endif + } + } - st->direct_io = 1; + } + } +} + +void +_scsih_log_entry_add_event(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataLogEntryAdded_t *log_entry) +{ + __le32 *log_code; + + if (!ioc->is_warpdrive) + return; + + log_code = (__le32 *)log_entry->LogData; + + mpt3sas_trigger_event(ioc, MPI2_EVENT_LOG_ENTRY_ADDED, + le16_to_cpu(log_entry->LogEntryQualifier)); + + if (le16_to_cpu(log_entry->LogEntryQualifier) + != MPT2_WARPDRIVE_LOGENTRY) + return; + + switch (le32_to_cpu(*log_code)) { + case MPT2_WARPDRIVE_LC_SSDT: + printk(MPT3SAS_WARN_FMT "WarpDrive Warning: " + "IO Throttling has occurred in the WarpDrive " + "subsystem. Check WarpDrive documentation for " + "additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_SSDLW: + printk(MPT3SAS_WARN_FMT "WarpDrive Warning: " + "Program/Erase Cycles for the WarpDrive subsystem " + "in degraded range. Check WarpDrive documentation " + "for additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_SSDLF: + printk(MPT3SAS_ERR_FMT "WarpDrive Fatal Error: " + "There are no Program/Erase Cycles for the " + "WarpDrive subsystem. The storage device will be " + "in read-only mode. Check WarpDrive documentation " + "for additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_BRMF: + printk(MPT3SAS_ERR_FMT "WarpDrive Fatal Error: " + "The Backup Rail Monitor has failed on the " + "WarpDrive subsystem. Check WarpDrive " + "documentation for additional details.\n", + ioc->name); + break; + } +} + +#ifdef MPT2SAS_WD_DDIOCOUNT +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +ssize_t +_ctl_ioc_ddio_count_show(struct device *cdev, struct device_attribute *attr, + char *buf) +#else +ssize_t +_ctl_ioc_ddio_count_show(struct class_device *cdev, char *buf) +#endif +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->ddio_count); +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +ssize_t +_ctl_ioc_ddio_err_count_show(struct device *cdev, struct device_attribute *attr, + char *buf) +#else +ssize_t +_ctl_ioc_ddio_err_count_show(struct class_device *cdev, char *buf) +#endif +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long) ioc->ddio_err_count); +} +#endif + +/** + * _ctl_BRM_status_show - Backup Rail Monitor Status + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +ssize_t +_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, + char *buf) +#else +ssize_t +_ctl_BRM_status_show(struct class_device *cdev, char *buf) +#endif +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; + Mpi2ConfigReply_t mpi_reply; + u16 backup_rail_monitor_status = 0; + u16 ioc_status; + int sz; + ssize_t rc = 0; + + if (!ioc->is_warpdrive) { + printk(MPT3SAS_ERR_FMT "%s: BRM attribute is only for " + "warpdrive\n", ioc->name, __func__); + return 0; + } else { + if (ioc->pci_error_recovery || ioc->remove_host) { + return 0; + } + } + + mutex_lock(&ioc->pci_access_mutex); + /* allocate upto GPIOVal 36 entries */ + sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); + io_unit_pg3 = kzalloc(sz, GFP_KERNEL); + if (!io_unit_pg3) { + printk(MPT3SAS_ERR_FMT "%s: failed allocating memory " + "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); + goto out; + } + + if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != + 0) { + printk(MPT3SAS_ERR_FMT + "%s: failed reading iounit_pg3\n", ioc->name, + __func__); + goto out; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT3SAS_ERR_FMT "%s: iounit_pg3 failed with " + "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); + goto out; + } + + if (io_unit_pg3->GPIOCount < 25) { + printk(MPT3SAS_ERR_FMT "%s: iounit_pg3->GPIOCount less than" + " 25 entries, detected (%d) entries\n", ioc->name, __func__, + io_unit_pg3->GPIOCount); + goto out; + } + + /* BRM status is in bit zero of GPIOVal[24] */ + backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); + rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); + + out: + kfree(io_unit_pg3); + mutex_unlock(&ioc->pci_access_mutex); + return rc; } diff --git a/drivers/scsi/mpt3sas/scripts/expander_attributes.sh b/drivers/scsi/mpt3sas/scripts/expander_attributes.sh new file mode 100755 index 000000000000..cfa2175af292 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/expander_attributes.sh @@ -0,0 +1,26 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +cd /sys/class/sas_expander +for i in *; do + echo -e \\n + echo $i + cd $i + for f in *; do + if [ -f $f -a -r $f ]; then + echo -n -e \\t "$f:"; + cat $f ; + fi; + done; + cd .. +done; +echo -e \\n diff --git a/drivers/scsi/mpt3sas/scripts/get_sas_address.sh b/drivers/scsi/mpt3sas/scripts/get_sas_address.sh new file mode 100755 index 000000000000..5c4d4d2e9989 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/get_sas_address.sh @@ -0,0 +1,19 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +if (( $# != 1 )) +then + clear + echo Usage: ./get_sas_address /dev/sg + echo -e This is not supported for direct attached SATA drives.\\n + exit 1 +fi; +sg_vpd -p di_port -q $1 diff --git a/drivers/scsi/mpt3sas/scripts/hba_properties.sh b/drivers/scsi/mpt3sas/scripts/hba_properties.sh new file mode 100755 index 000000000000..fa48b1f6378b --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/hba_properties.sh @@ -0,0 +1,46 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +clear +scsi_host="/sys/class/scsi_host" +cd ${scsi_host} +subfolders=`ls -1` +for i in ${subfolders}; do + cd ${i}; + if [ `cat proc_name` == "mpt3sas" ]; then + board=`cat board_name` + assembly=`cat board_assembly` + tracer=`cat board_tracer` + ioc=`cat unique_id`; + bios=`cat version_bios` + fw=`cat version_fw` + driver=`cat /sys/module/mpt3sas/version` + product=`cat version_product` + mpi=`cat version_mpi` + nvdata_d=`cat version_nvdata_default` + nvdata_p=`cat version_nvdata_persistent` + logging_level=`cat logging_level` + io_delay=`cat io_delay` + device_delay=`cat device_delay` + fw_queue_depth=`cat fw_queue_depth` + sas_address=`cat host_sas_address` + echo -e \\n${i}: ioc${ioc}: fw=${fw} bios=${bios} driver=${driver} mpi=${mpi} + echo -e \\t ${product}: board_name=${board} assembly=${assembly} tracer=${tracer} + echo -e \\t nvdata_persistent=${nvdata_p} nvdata_default=${nvdata_d} + echo -e \\t io_delay=${io_delay} device_delay=${device_delay} + echo -e \\t logging_level=${logging_level} + echo -e \\t fw_queue_depth=${fw_queue_depth} + echo -e \\t sas_address=${sas_address} + fi; + cd ${scsi_host} +done; +echo -e \\n diff --git a/drivers/scsi/mpt3sas/scripts/sas_device_attributes.sh b/drivers/scsi/mpt3sas/scripts/sas_device_attributes.sh new file mode 100755 index 000000000000..79a93de33c4d --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/sas_device_attributes.sh @@ -0,0 +1,43 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +declare -r CURRENT_PATH=$PWD +declare -r SCSI_HOST="/sys/class/scsi_host" +declare -r SAS_DEVICE="/sys/class/sas_device" + +function display() +{ + cd ${SAS_DEVICE} + for i in end_device-$1:*; do + echo -e \\n + echo $i + cd $i + for f in *; do + if [ -f $f -a -r $f ]; then + echo -n -e \\t "$f:"; + cat $f ; + fi; + done; + cd .. + done; + echo -e \\n +} + +for HOST in $(ls ${SCSI_HOST}); do + cd ${SCSI_HOST}/${HOST}; + if [ `cat proc_name` != "mpt3sas" ]; then + continue; + fi; + host_number=${HOST//host/} + display ${host_number} + cd ${CURRENT_PATH} +done; diff --git a/drivers/scsi/mpt3sas/scripts/sas_dump_rhel5.sh b/drivers/scsi/mpt3sas/scripts/sas_dump_rhel5.sh new file mode 100755 index 000000000000..73f9a69db942 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/sas_dump_rhel5.sh @@ -0,0 +1,181 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +#set -x +current_path=`pwd`; +count=0; +none_count=0; +smp_count=0; +dev_count=0; +phy_count=0; +stp_count=0; +ssp_count=0; +sata_count=0; +port_count=0 + +port_info() +{ +clear; +echo +echo --------------------------------- port info ------------------------------------ +echo -e \\t\\tindex\\tnum_phys\\tdevice +cd /sys/class/sas_port +for i in $( ls -1); do + cd ${i}; + num_phys=`cat num_phys` + device_type="undefined" + if [ `ls device | grep -c expander` != 0 ]; then + device_type=`ls device | grep expander`; + elif [ `ls device | grep -c host` != 0 ]; then + device_type=`ls device | grep host`; + elif [ `ls device | grep -c end_device` != 0 ]; then + device_type=`ls device | grep end_device`; + fi; + echo -e ${i}\\t${port_count}\\t${num_phys}\\t${device_type} + cd .. + let port_count=port_count+1 +done; +} + +phy_info() +{ +echo +echo --------------------------------- Phy info ------------------------------------ +echo -e \\t\\tindex\\ttype\\tphy\\tport\\tSAS_ADDR\\t\\trate +cd /sys/class/sas_phy +for i in $( ls -1); do + cd ${i}; + sas_address=`cat sas_address` + phy=`cat phy_identifier` + rate=`cat negotiated_linkrate` + sas_type=`cat target_port_protocols` + echo -e ${i}\\t${phy_count}\\t${sas_type}\\t${phy}\\t${sas_address}\\t${rate} + cd .. + let phy_count=${phy_count}+1 +done; +} + + +initiator_info() +{ +echo +echo ---------------------------- Initiator info ------------------------------------ +echo -e \\t\\tindex\\ttype\\tphy\\tSAS_ADDR +count=0; +#cd /sys/class/sas_rphy; +cd /sys/class/sas_device; +for i in $( ls -1); do + cd ${i}; + if [ `cat target_port_protocols` == "none" ]; then + let none_count=${none_count}+1 + sas_address=`cat sas_address` + phy=`cat phy_identifier` + bay_id=`cat bay_identifier`; + enclosure_id=`cat enclosure_identifier`; + echo ${i}: index=${count} phy_id=${phy} SAS=${sas_address} + echo -e \\t enclosure slot=${bay_id} WWN=${enclosure_id} + fi; + cd .. + let count=${count}+1 +done; +} + +expander_info() +{ +# smp +echo +echo ---------------------------- Expander info ------------------------------------ +echo -e \\t\\tindex\\ttype\\tphy\\tSAS_ADDR +count=0; +#cd /sys/class/sas_rphy; +cd /sys/class/sas_device; +for i in $( ls -1); do + cd ${i}; + if [ `cat target_port_protocols` == "smp" ]; then + let smp_count=${smp_count}+1 + sas_address=`cat sas_address` + sas_type=`cat target_port_protocols` + phy=`cat phy_identifier` + echo -e ${i}\\t${count}\\t${sas_type}\\t${phy}\\t${sas_address} + fi; + cd .. + let count=${count}+1 +done; +} + + +target_info() +{ +# devices +echo +echo ---------------------------- Target info ------------------------------------ +echo -e \\t\\t\\tindex\\ttype\\tphy\\tslot\\tSAS_ADDR\\t\\tEnclosure ID +count=0; +#cd /sys/class/sas_rphy; +cd /sys/class/sas_device; +for i in $( ls -1); do + cd ${i}; + if [ `cat target_port_protocols` == "none" ]; then + cd .. + let count=${count}+1 + continue; + fi; + if [ `cat target_port_protocols` == "smp" ]; then + cd .. + let count=${count}+1 + continue; + fi; + if [ `cat target_port_protocols` == "stp" ]; then + let stp_count=${stp_count}+1 + fi; + if [ `cat target_port_protocols` == "ssp" ]; then + let ssp_count=${ssp_count}+1 + fi; + if [ `cat target_port_protocols` == "sata" ]; then + let sata_count=${sata_count}+1 + fi; + let dev_count=${dev_count}+1 + bay_id=`cat bay_identifier`; + enclosure_id=`cat enclosure_identifier`; + sas_address=`cat sas_address` + sas_type=`cat target_port_protocols` + phy=`cat phy_identifier` + echo -e ${i}\\t${count}\\t${sas_type}\\t${phy}\\t${bay_id}\\t${sas_address}\\t${enclosure_id} + cd .. + let count=${count}+1 +done; +} + +statistics_info() +{ +echo +echo -------- Statistics ------------------ +echo phy count = ${phy_count} +echo rphy count = ${count} +echo none = ${none_count} +echo smp = ${smp_count} +echo ssp = ${ssp_count} +echo stp = ${stp_count} +echo sata = ${sata_count} +echo total devices = ${dev_count} +cd ${current_path} +} + + + +clear; +port_info +phy_info +initiator_info +expander_info +target_info +statistics_info + diff --git a/drivers/scsi/mpt3sas/scripts/sas_dump_sles10.sh b/drivers/scsi/mpt3sas/scripts/sas_dump_sles10.sh new file mode 100755 index 000000000000..c55b4cc229f0 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/sas_dump_sles10.sh @@ -0,0 +1,141 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +#set -x +current_path=`pwd`; +count=0; +none_count=0; +smp_count=0; +dev_count=0; +phy_count=0; +stp_count=0; +ssp_count=0; +sata_count=0; + +clear; +echo +echo --------------------------------- Phy info ------------------------------------ +echo -e \\t\\tindex\\ttype\\tphy\\tport\\tSAS_ADDR\\t\\trate +cd /sys/class/sas_phy +for i in $( ls -1); do + cd ${i}; + sas_address=`cat sas_address` + phy=`cat phy_identifier` + port=`cat port_identifier` + rate=`cat negotiated_linkrate` + sas_type=`cat target_port_protocols` + echo -e ${i}\\t${phy_count}\\t${sas_type}\\t${phy}\\t${port}\\t${sas_address}\\t${rate} + cd .. + let phy_count=${phy_count}+1 +done; + + + +####################################################### +# none +echo +echo ---------------------------- Initiator info ------------------------------------ +echo -e \\t\\tindex\\ttype\\tphy\\tSAS_ADDR +count=0; +#cd /sys/class/sas_rphy; +cd /sys/class/sas_device; +for i in $( ls -1); do + cd ${i}; + if [ `cat target_port_protocols` == "none" ]; then + let none_count=${none_count}+1 + sas_address=`cat sas_address` + phy=`cat phy_identifier` + bay_id=`cat bay_identifier`; + enclosure_id=`cat enclosure_identifier`; + echo ${i}: index=${count} phy_id=${phy} SAS=${sas_address} + echo -e \\t enclosure slot=${bay_id} WWN=${enclosure_id} + fi; + cd .. + let count=${count}+1 +done; + +# smp +echo +echo ---------------------------- Expander info ------------------------------------ +echo -e \\t\\tindex\\ttype\\tphy\\tSAS_ADDR +count=0; +#cd /sys/class/sas_rphy; +cd /sys/class/sas_device; +for i in $( ls -1); do + cd ${i}; + if [ `cat target_port_protocols` == "smp" ]; then + let smp_count=${smp_count}+1 + sas_address=`cat sas_address` + sas_type=`cat target_port_protocols` + phy=`cat phy_identifier` + echo -e ${i}\\t${count}\\t${sas_type}\\t${phy}\\t${sas_address} + fi; + cd .. + let count=${count}+1 +done; + +stp_count=0; +ssp_count=0; +sata_count=0; + +# devices +echo +echo ---------------------------- Target info ------------------------------------ +echo -e \\t\\t\\tindex\\ttype\\tphy\\tslot\\tSAS_ADDR\\t\\tEnclosure ID +count=0; +#cd /sys/class/sas_rphy; +cd /sys/class/sas_device; +for i in $( ls -1); do + cd ${i}; + if [ `cat target_port_protocols` == "none" ]; then + cd .. + let count=${count}+1 + continue; + fi; + if [ `cat target_port_protocols` == "smp" ]; then + cd .. + let count=${count}+1 + continue; + fi; + if [ `cat target_port_protocols` == "stp" ]; then + let stp_count=${stp_count}+1 + fi; + if [ `cat target_port_protocols` == "ssp" ]; then + let ssp_count=${ssp_count}+1 + fi; + if [ `cat target_port_protocols` == "sata" ]; then + let sata_count=${sata_count}+1 + fi; + let dev_count=${dev_count}+1 + bay_id=`cat bay_identifier`; + enclosure_id=`cat enclosure_identifier`; + sas_address=`cat sas_address` + sas_type=`cat target_port_protocols` + phy=`cat phy_identifier` + echo -e ${i}\\t${count}\\t${sas_type}\\t${phy}\\t${bay_id}\\t${sas_address}\\t${enclosure_id} + cd .. + let count=${count}+1 +done; + + +echo +echo -------- Statistics ------------------ +echo phy count = ${phy_count} +echo rphy count = ${count} +echo none = ${none_count} +echo smp = ${smp_count} +echo ssp = ${ssp_count} +echo stp = ${stp_count} +echo sata = ${sata_count} +echo total devices = ${dev_count} +cd ${current_path} + + diff --git a/drivers/scsi/mpt3sas/scripts/sdev_attributes.sh b/drivers/scsi/mpt3sas/scripts/sdev_attributes.sh new file mode 100755 index 000000000000..95948b3897f3 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/sdev_attributes.sh @@ -0,0 +1,26 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +cd /sys/class/scsi_device +for i in *; do + echo -e \\n + echo $i + cd $i/device + for f in *; do + if [ -f $f -a -r $f ]; then + echo -n -e \\t "$f:"; + cat $f ; + fi; + done; + cd ../.. +done; +echo -e \\n diff --git a/drivers/scsi/mpt3sas/scripts/set_affinity.sh b/drivers/scsi/mpt3sas/scripts/set_affinity.sh new file mode 100755 index 000000000000..fa2bf00fc628 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/set_affinity.sh @@ -0,0 +1,140 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +#!/bin/bash + +#set -x + +if [ -f /etc/SuSE-release ] && [ -f /etc/init.d/irq_balancer ] ; then + /etc/init.d/irq_balancer stop +elif [ -f /etc/redhat-release ] && [ -f /etc/init.d/irqbalance ]; then + /etc/init.d/irqbalance stop +else + exit 1; +fi; + +# this is count of the number cpu's we have set the affinity for +cpu_affinity_count=0 + +# This is total number of cores +cpu_count=0 +for i in /sys/devices/system/cpu/cpu[0-9]* ; do + let cpu_count=${cpu_count}+1 +done + +# line below for is for debuging +#cpu_count=24 + +scsi_host="/sys/class/scsi_host" +cd ${scsi_host} +subfolders=`ls -1` +for i in ${subfolders}; do + cd ${scsi_host}/${i}; + if [ `cat proc_name` != "mpt3sas" ]; then + continue; + fi; + if [ ! -f reply_queue_count ]; then + echo "reply_queue_count sysfs attribute doesn't exist" + continue; + fi; + if [ `cat reply_queue_count` -lt 2 ]; then + continue; + fi; + ioc=`cat unique_id` + index=0 + affinity=1 + cpu_affinity_count=0 + msix_count=`cat /proc/interrupts | grep -c mpt3sas${ioc}-msix` + if [ ${cpu_count} -gt ${msix_count} ]; then + let grouping=${cpu_count}/${msix_count} + let grouping_mod=${cpu_count}%${msix_count} +# line below for is for debuging +# echo grouping = ${grouping}, grouping_mod = ${grouping_mod} + if [ ${grouping} -lt 2 ]; then + cpu_grouping=2; + elif [ ${grouping} -eq 2 ] && [ ${grouping_mod} -eq 0 ]; then + cpu_grouping=2; + elif [ ${grouping} -lt 4 ]; then + cpu_grouping=4; + elif [ ${grouping} -eq 4 ] && [ ${grouping_mod} -eq 0 ]; then + cpu_grouping=4; + elif [ ${grouping} -lt 8 ]; then + cpu_grouping=8; + elif [ ${grouping} -eq 8 ] && [ ${grouping_mod} -eq 0 ]; then + cpu_grouping=8; + else + cpu_grouping=16; + fi; + else + cpu_grouping=0; + fi; + echo -e "\n" + echo ioc number = ${ioc} + echo number of core processors = ${cpu_count} + echo msix vector count = ${msix_count} + if [ ${cpu_grouping} -eq 0 ] ; then + echo number of cores per msix vector = 1 + else + echo number of cores per msix vector = ${cpu_grouping} + fi; + echo -e "\n" + while [ ${msix_count} -gt ${index} ] && \ + ( [ ${cpu_affinity_count} -lt ${cpu_count} ] || \ + [ ${cpu_affinity_count} -eq ${cpu_count} ] ); do + b=0 + a=`cat /proc/interrupts | grep -w mpt3sas${ioc}-msix${index} | cut -d : -f 1` + irq_number=`basename ${a}` + cd /proc/irq/${irq_number} + if [ ${cpu_grouping} -ne 0 ]; then + loop=1 + calculate_affinity=${affinity} + let cpu_affinity_count=${cpu_affinity_count}+1 + while [ ${loop} -lt ${cpu_grouping} ]; do + let affinity=${affinity}*2 + let calculate_affinity=${calculate_affinity}+${affinity} + let loop=${loop}+1 + let cpu_affinity_count=${cpu_affinity_count}+1 + done; + b=`printf "%x" ${calculate_affinity}` + else + let cpu_affinity_count=${cpu_affinity_count}+1 + b=`printf "%x" ${affinity}` + fi; +# line below for is for debuging +# echo cpu_affinity_count = ${cpu_affinity_count} + if [ ${cpu_affinity_count} -lt ${cpu_count} ] || \ + [ ${cpu_affinity_count} -eq ${cpu_count} ]; then + let length=${#b} + let number_of_words=${length}/8 + let extra_word=${length}%8 + let i=0 + if [ ${extra_word} -gt 0 ]; then + echo -n "${b:0:${extra_word}}" > /tmp/smp_affinity-${index} + if [ ${number_of_words} -gt 0 ]; then + echo -n "," >> /tmp/smp_affinity-${index} + fi + fi + while [ $i -lt ${number_of_words} ]; do + echo -n "${b:${i}*8+${extra_word}:8}" >> /tmp/smp_affinity-${index} + let i=${i}+1 + if [ $i -lt ${number_of_words} ]; then + echo -n "," >> /tmp/smp_affinity-${index} + fi + done + cat /tmp/smp_affinity-${index} > smp_affinity + rm -f /tmp/smp_affinity-${index} +# line below for is for debuging +# echo -e "\tmsix index = ${index}, irq number = ${irq_number}, cpu affinity mask = ${b}" + echo -e "\tmsix index = ${index}, irq number = ${irq_number}, cpu affinity mask = `cat smp_affinity`" + let index=${index}+1 + let affinity=${affinity}*2 + fi; + done + echo -e "\nWe have set affinity for ${index} msix vectors and ${cpu_count} core processors\n" +done diff --git a/drivers/scsi/mpt3sas/scripts/set_device_queue_depth.sh b/drivers/scsi/mpt3sas/scripts/set_device_queue_depth.sh new file mode 100755 index 000000000000..73d4d7871288 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/set_device_queue_depth.sh @@ -0,0 +1,27 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +if (( $# != 1 )); then + echo -e \\n"useage: set_device_queue_depth " + echo -e \\n + exit 1 +fi + +cd /sys/class/scsi_device +for i in *; do + cd $i/device + if [ `cat type` == "0" ]; then + echo $1 > queue_depth + echo [$i] timeout=`cat queue_depth` + fi; + cd - +done; diff --git a/drivers/scsi/mpt3sas/scripts/set_device_timeout.sh b/drivers/scsi/mpt3sas/scripts/set_device_timeout.sh new file mode 100755 index 000000000000..21173cfc7b76 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/set_device_timeout.sh @@ -0,0 +1,27 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +if (( $# != 1 )); then + echo -e \\n"useage: set_device_timeout " + echo -e \\n + exit 1 +fi + +cd /sys/class/scsi_device +for i in *; do + cd $i/device + if [ `cat type` == "0" ]; then + echo $1 > timeout + echo [$i] timeout=`cat timeout` + fi; + cd - +done; diff --git a/drivers/scsi/mpt3sas/scripts/set_driver_debug.pl b/drivers/scsi/mpt3sas/scripts/set_driver_debug.pl new file mode 100755 index 000000000000..db877be2494e --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/set_driver_debug.pl @@ -0,0 +1,316 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/usr/bin/perl +use strict; +use warnings; + +# generate the section by running the following (can be shorted more if somebody is bored): +# fgrep "#define MPT_DEBUG" mpt3sas_debug.h | sed 's/#define //;s/\s\s*/ /g;s/\(MPT_DEBUG.*\) \(0x.*\)/\t\t\2 => "\1",/' +# fgrep "#define MPT_DEBUG" mpt3sas_debug.h | sed 's/#define //;s/\s\s*/ /g;s/\(MPT_DEBUG_.*\) \(0x.*\)/\t{level=>"\2",name=>"\1"},/' +my $gen_debug = { + mptsas => [ +{mask=>"0x00000001",name=>"MPT_DEBUG"}, +{mask=>"0x00000002",name=>"MPT_DEBUG_MSG_FRAME"}, +{mask=>"0x00000004",name=>"MPT_DEBUG_SG"}, +{mask=>"0x00000008",name=>"MPT_DEBUG_EVENTS"}, +{mask=>"0x00000010",name=>"MPT_DEBUG_VERBOSE_EVENTS"}, +{mask=>"0x00000020",name=>"MPT_DEBUG_INIT"}, +{mask=>"0x00000040",name=>"MPT_DEBUG_EXIT"}, +{mask=>"0x00000080",name=>"MPT_DEBUG_FAIL"}, +{mask=>"0x00000100",name=>"MPT_DEBUG_TM"}, +{mask=>"0x00000200",name=>"MPT_DEBUG_DV"}, +{mask=>"0x00000400",name=>"MPT_DEBUG_REPLY"}, +{mask=>"0x00000800",name=>"MPT_DEBUG_HANDSHAKE"}, +{mask=>"0x00001000",name=>"MPT_DEBUG_CONFIG"}, +{mask=>"0x00002000",name=>"MPT_DEBUG_DL"}, +{mask=>"0x00008000",name=>"MPT_DEBUG_RESET"}, +{mask=>"0x00010000",name=>"MPT_DEBUG_SCSI"}, +{mask=>"0x00020000",name=>"MPT_DEBUG_IOCTL"}, +{mask=>"0x00080000",name=>"MPT_DEBUG_FC"}, +{mask=>"0x00100000",name=>"MPT_DEBUG_SAS"}, +{mask=>"0x00200000",name=>"MPT_DEBUG_SAS_WIDE"}, +{mask=>"0x00400000",name=>"MPT_DEBUG_36GB_MEM"}, + ], + mpt2sasbtm => [ +{mask=>"0x00000001",name=>"MPT_DEBUG"}, +{mask=>"0x00000002",name=>"MPT_DEBUG_MSG_FRAME"}, +{mask=>"0x00000004",name=>"MPT_DEBUG_SG"}, +{mask=>"0x00000008",name=>"MPT_DEBUG_EVENTS"}, +{mask=>"0x00000010",name=>"MPT_DEBUG_EVENT_WORK_TASK"}, +{mask=>"0x00000020",name=>"MPT_DEBUG_INIT"}, +{mask=>"0x00000040",name=>"MPT_DEBUG_EXIT"}, +{mask=>"0x00000080",name=>"MPT_DEBUG_FAIL"}, +{mask=>"0x00000100",name=>"MPT_DEBUG_TM"}, +{mask=>"0x00000200",name=>"MPT_DEBUG_REPLY"}, +{mask=>"0x00000400",name=>"MPT_DEBUG_HANDSHAKE"}, +{mask=>"0x00000800",name=>"MPT_DEBUG_CONFIG"}, +{mask=>"0x00001000",name=>"MPT_DEBUG_DL"}, +{mask=>"0x00002000",name=>"MPT_DEBUG_RESET"}, +{mask=>"0x00004000",name=>"MPT_DEBUG_SCSI"}, +{mask=>"0x00008000",name=>"MPT_DEBUG_IOCTL"}, +{mask=>"0x00020000",name=>"MPT_DEBUG_SAS"}, +{mask=>"0x00040000",name=>"MPT_DEBUG_HOST_MAPPING"}, +{mask=>"0x00080000",name=>"MPT_DEBUG_TASK_SET_FULL"}, + ], + mpt2sas => [ +{mask=>"0x00000001",name=>"MPT_DEBUG"}, +{mask=>"0x00000002",name=>"MPT_DEBUG_MSG_FRAME"}, +{mask=>"0x00000004",name=>"MPT_DEBUG_SG"}, +{mask=>"0x00000008",name=>"MPT_DEBUG_EVENTS"}, +{mask=>"0x00000010",name=>"MPT_DEBUG_EVENT_WORK_TASK"}, +{mask=>"0x00000020",name=>"MPT_DEBUG_INIT"}, +{mask=>"0x00000040",name=>"MPT_DEBUG_EXIT"}, +{mask=>"0x00000080",name=>"MPT_DEBUG_FAIL"}, +{mask=>"0x00000100",name=>"MPT_DEBUG_TM"}, +{mask=>"0x00000200",name=>"MPT_DEBUG_REPLY"}, +{mask=>"0x00000400",name=>"MPT_DEBUG_HANDSHAKE"}, +{mask=>"0x00000800",name=>"MPT_DEBUG_CONFIG"}, +{mask=>"0x00001000",name=>"MPT_DEBUG_DL"}, +{mask=>"0x00002000",name=>"MPT_DEBUG_RESET"}, +{mask=>"0x00004000",name=>"MPT_DEBUG_SCSI"}, +{mask=>"0x00008000",name=>"MPT_DEBUG_IOCTL"}, +{mask=>"0x00020000",name=>"MPT_DEBUG_SAS"}, +{mask=>"0x00040000",name=>"MPT_DEBUG_TRANSPORT"}, +{mask=>"0x00080000",name=>"MPT_DEBUG_TASK_SET_FULL"}, +{mask=>"0x00100000",name=>"MPT_DEBUG_TARGET_MODE"}, + ], + mpt3sas => [ +{mask=>"0x00000001",name=>"MPT_DEBUG"}, +{mask=>"0x00000002",name=>"MPT_DEBUG_MSG_FRAME"}, +{mask=>"0x00000004",name=>"MPT_DEBUG_SG"}, +{mask=>"0x00000008",name=>"MPT_DEBUG_EVENTS"}, +{mask=>"0x00000010",name=>"MPT_DEBUG_EVENT_WORK_TASK"}, +{mask=>"0x00000020",name=>"MPT_DEBUG_INIT"}, +{mask=>"0x00000040",name=>"MPT_DEBUG_EXIT"}, +{mask=>"0x00000080",name=>"MPT_DEBUG_FAIL"}, +{mask=>"0x00000100",name=>"MPT_DEBUG_TM"}, +{mask=>"0x00000200",name=>"MPT_DEBUG_REPLY"}, +{mask=>"0x00000400",name=>"MPT_DEBUG_HANDSHAKE"}, +{mask=>"0x00000800",name=>"MPT_DEBUG_CONFIG"}, +{mask=>"0x00001000",name=>"MPT_DEBUG_DL"}, +{mask=>"0x00002000",name=>"MPT_DEBUG_RESET"}, +{mask=>"0x00004000",name=>"MPT_DEBUG_SCSI"}, +{mask=>"0x00008000",name=>"MPT_DEBUG_IOCTL"}, +{mask=>"0x00020000",name=>"MPT_DEBUG_SAS"}, +{mask=>"0x00040000",name=>"MPT_DEBUG_TRANSPORT"}, +{mask=>"0x00080000",name=>"MPT_DEBUG_TASK_SET_FULL"}, +{mask=>"0x00100000",name=>"MPT_DEBUG_TARGET_MODE"}, + ] + }; + +while(do_card_menu() >= 0) { +} + +sub do_card_menu +{ + my @hosts = glob("/sys/class/scsi_host/*"); + my @lsi_hosts; + my $next; + my $host; + my $index; + my $option = -1; + my $tmpstr; + my $selected_card = -1; + + printf("\nCard Selection\n"); + foreach $host (@hosts) { + open( FILE, "< ${host}/proc_name") or die "can't open $host/proc_name : $!"; + $tmpstr = ; + if ($tmpstr =~ /mpi*/){ + push(@lsi_hosts, substr($host, 25)); + } + close(FILE); + } + + while ($option < 0) { + $index = 1; + foreach $host (@lsi_hosts) { + open(FILE, "< /sys/class/scsi_host/host${host}/board_name") or die "can't open $host/board_name : $!"; + $tmpstr = ; + chomp($tmpstr); + close(FILE); + printf("%2d. Adapter %2d - %s\n", $index, $host, $tmpstr); + $index++; + } + if (-e "/sys/module/mptbase/parameters/mpt_debug_level") { + printf("%2d. Global mptlinux\n", 97); + } + if (-e "/sys/module/mpt2sas/parameters/logging_level") { + printf("%2d. Global mpt3sas\n", 98); + } + if (-e "/sys/module/mpt2sasbtm/parameters/logging_level") { + printf("%2d. Global mpt3sas\n", 99); + } + if (-e "/sys/module/mpt3sas/parameters/logging_level") { + printf("%2d. Global mpt3sas\n", 100); + } + printf("\nSelect a card or global driver [0 to exit] > ", $index); + $option = ; + if (!($option =~ /^\n/) && $option == 0) { + $selected_card = -1; + } elsif ($option =~ /^\n/ || $option < 0 || ($option >= $index && $option < 97) || $option > 100) { + $option = -1; + } elsif ($option == 97) { + do_global_driver("/sys/module/mptbase/parameters/mpt_debug_level"); + $selected_card = $option; + } elsif ($option == 98) { + do_global_driver("/sys/module/mpt2sas/parameters/logging_level"); + $selected_card = $option; + } elsif ($option == 99) { + do_global_driver("/sys/module/mpt2sasbtm/parameters/logging_level"); + $selected_card = $option; + } elsif ($option == 100) { + do_global_driver("/sys/module/mpt3sas/parameters/logging_level"); + $selected_card = $option; + } else { + $selected_card = $lsi_hosts[$option - 1]; + do_debug_menu($selected_card); + } + } + return $selected_card; +} + + +sub do_debug_menu +{ + my $card = $_[0]; + my $option = -1; + my $curlevel = get_current_debug_level($card); + my $gen = get_gen($card); + my $index; + + while ($option != 0) { + printf("\nDebug Options for Adapter $card \n"); + $index = 1; + foreach my $hash (@{$gen_debug->{$gen}}) { + printf("%2d. %-28s %s: %s\n", $index, $hash->{name}, $hash->{mask}, ($curlevel & hex($hash->{mask})) ? "ON" : "OFF"); + $index++; + } + printf("\n99. Set Hex Debug Value 0x%08X\n", $curlevel); + printf("\nToggle debug option [1-23, 100 or 0 to exit] > "); + + $option = ; + if ($option =~ /^\n/ || $option < 0 || ($option > 23 && $option < 100) || $option > 100) { + $option = -1; + } elsif ($option == 100) { + my $tmp_level; + printf("\nEnter desired hex debug value (ie 0x811) > "); + $tmp_level = ; + chomp($tmp_level); + if ($tmp_level =~ /0x/) { + $curlevel = hex($tmp_level); + } else { + $curlevel = $tmp_level; + } + set_debug_level($card, $curlevel); + } else { + $curlevel = $curlevel ^ (1 << ($option - 1)); + set_debug_level($card, $curlevel); + } + } +} + +sub get_gen +{ + my $card = $_[0]; + my $file = "/sys/class/scsi_host/host${card}/proc_name"; + my $gen; + + open(FILE, "< $file") or die "can't open $file : $!"; + $gen = ; + chomp($gen); + return $gen; +} + +sub set_debug_level +{ + my $card = $_[0]; + my $level = $_[1]; + my $file = "/sys/class/scsi_host/host${card}"; + my $input; + + if (-e "$file/debug_level") { + $file .= "/debug_level"; + } elsif(-e "$file/logging_level") { + $file .= "/logging_level"; + } else { + print("Unable to find logging or debug file\n"); + exit(1); + } + + open(FILE, "> $file") or die "can't open $file : $!"; + + printf(FILE "%X\n", $level); + + close(FILE); +} + +sub get_current_debug_level +{ + my $card = $_[0]; + my $file = "/sys/class/scsi_host/host${card}"; + my $current_level = 0; + my $input; + + if (-e "$file/debug_level") { + $file .= "/debug_level"; + } elsif(-e "$file/logging_level") { + $file .= "/logging_level"; + } else { + print("Unable to find logging or debug file\n"); + exit(1); + } + + open(FILE, "< $file") or die "can't open $file : $!"; + + $input = ; + $input = substr($input, 0, 8); + $current_level = hex($input); + + close(FILE); + + return $current_level; +} + + +sub do_global_driver +{ + my $file = shift; + my $input; + my $current_level; + my $tmp_level; + + open(FILE, "< $file") or die "can't open $file : $!"; + + $input = ; + chomp($input); + $current_level = $input; + + close(FILE); + + printf("\nCurrent Debug Level 0x%08X\n", $current_level); + printf("\nEnter desired hex debug value (ie 0x811) > "); + $tmp_level = ; + chomp($tmp_level); + if ($tmp_level =~ /0x/) { + $current_level = hex($tmp_level); + } else { + $current_level = $tmp_level; + } + + open(FILE, "> $file") or die "can't open $file : $!"; + printf(FILE "%s\n", $tmp_level); + close(FILE); + + return 0 +} + + diff --git a/drivers/scsi/mpt3sas/scripts/set_logging_level.sh b/drivers/scsi/mpt3sas/scripts/set_logging_level.sh new file mode 100755 index 000000000000..13709caaa6f7 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/set_logging_level.sh @@ -0,0 +1,32 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +#set -x +if (( $# != 1 )); then + echo -e \\n"useage: set_logging_level " + echo -e \\n + exit 1 +fi + +scsi_host="/sys/class/scsi_host" +cd ${scsi_host} +subfolders=`ls -1` +for i in ${subfolders}; do + cd ${i} + if [ `cat proc_name` != "mpt3sas" ]; then + cd ${scsi_host} + continue; + fi; + echo $1 > logging_level + logging_level=`cat logging_level` + echo for ${i} logging_level=$logging_level + cd ${scsi_host} +done; diff --git a/drivers/scsi/mpt3sas/scripts/shost_attributes.pl b/drivers/scsi/mpt3sas/scripts/shost_attributes.pl new file mode 100755 index 000000000000..e12f645187d5 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/shost_attributes.pl @@ -0,0 +1,61 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/usr/bin/perl + +#main +my($path) = "/sys/class/scsi_host"; +my(@line); +my($shost); +my($proc_name); + +chdir($path); +@line = `ls -1`; + +foreach (@line) { + chomp; + $shost = join("/", $path, $_); + $proc_name = `cat $shost/proc_name`; + chomp $proc_name; + &print_shost($shost, $_) if ($proc_name eq 'mptspi'); + &print_shost($shost, $_) if ($proc_name eq 'mptsas'); + &print_shost($shost, $_) if ($proc_name eq 'mpt2sas'); + &print_shost($shost, $_) if ($proc_name eq 'mpt3sas'); +} + +# subroutine: print_shost +sub print_shost +{ + my($shost_path, $shost_name) = @_; + my($attribute); + my(@line); + + chdir($shost_path); + print $shost_name, "\n"; + @line = `ls -1`; + foreach $attribute (@line) { + chomp $attribute; + &print_attribute($attribute) if (-f $attribute); + } +} + +#subroutine: print_attribute +sub print_attribute +{ + my($attribute_name) = @_; + my($contents); + + return if ($attribute_name eq 'scan'); + return if ($attribute_name eq 'uevent'); + + $contents = `cat $attribute_name`; + print "\t$attribute_name = $contents"; +} + diff --git a/drivers/scsi/mpt3sas/scripts/shost_attributes.sh b/drivers/scsi/mpt3sas/scripts/shost_attributes.sh new file mode 100755 index 000000000000..6c849f57d024 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/shost_attributes.sh @@ -0,0 +1,25 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +cd /sys/class/scsi_host +for i in *; do + echo -e \\n + echo $i + cd $i + for f in *; do + if [ -f $f -a -r $f ]; then + echo -n -e \\t "$f:"; + cat $f ; + fi; + done; + cd .. +done; diff --git a/drivers/scsi/mpt3sas/scripts/statistics.sh b/drivers/scsi/mpt3sas/scripts/statistics.sh new file mode 100755 index 000000000000..8759f48dbe3a --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/statistics.sh @@ -0,0 +1,30 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +while true ; do + cd /sys/class/scsi_device + for i in `ls -1 .`; do + iodone_cnt=`cat ${i}/device/iodone_cnt` + iorequest_cnt=`cat ${i}/device/iorequest_cnt` + state=`cat ${i}/device/state` + let pending_count=${iorequest_cnt}-${iodone_cnt} + echo ${i}: pending = ${pending_count}, state = ${state} + done + echo -e \\n + cd /sys/class/scsi_host + for i in `ls -1 .`; do + host_busy=`cat ${i}/host_busy` + echo ${i}: host_busy = ${host_busy} + done + sleep 1 + clear +done diff --git a/drivers/scsi/mpt3sas/scripts/task_management.sh b/drivers/scsi/mpt3sas/scripts/task_management.sh new file mode 100755 index 000000000000..3613a0b9c171 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/task_management.sh @@ -0,0 +1,38 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +DR=1 +MUR=2 +TASKTYPE_ABORT_TASK=3 +TASKTYPE_TARGET_RESET=4 +TASKTYPE_LOGICAL_UNIT_RESET=5 +TASKTYPE_ABRT_TASK_SET=6 + +dmesg -c > /dev/null +clear +scsi_host="/sys/class/scsi_host" +cd ${scsi_host} +subfolders=`ls -1` +for i in ${subfolders}; do + cd ${i}; + if [ `cat proc_name` == "mpt3sas" ]; then +# echo ${TASKTYPE_ABORT_TASK} > task_management + echo ${TASKTYPE_TARGET_RESET} > task_management +# echo ${TASKTYPE_LOGICAL_UNIT_RESET} > task_management +# echo ${TASKTYPE_ABRT_TASK_SET} > task_management +# echo ${MUR} > task_management +# echo ${DR} > task_management + fi; + cd ${scsi_host} +done; +echo -e \\n +dmesg diff --git a/drivers/scsi/mpt3sas/trace_buffer/kill_tb.sh b/drivers/scsi/mpt3sas/trace_buffer/kill_tb.sh new file mode 100755 index 000000000000..761117cf01f3 --- /dev/null +++ b/drivers/scsi/mpt3sas/trace_buffer/kill_tb.sh @@ -0,0 +1,13 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +kill -s HUP `ps -ef | egrep start_tb.sh | grep bash | awk '{print $2}'` diff --git a/drivers/scsi/mpt3sas/trace_buffer/pull_trace_buffer.sh b/drivers/scsi/mpt3sas/trace_buffer/pull_trace_buffer.sh new file mode 100755 index 000000000000..15360d3391d5 --- /dev/null +++ b/drivers/scsi/mpt3sas/trace_buffer/pull_trace_buffer.sh @@ -0,0 +1,38 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +#set -x +scsi_host="/sys/class/scsi_host" +page_size=4095 +offset=0; +total_size=0; +CWD=`pwd`; +file_name='trace_buffer' + +cd ${scsi_host}; +subfolders=`ls -1`; +for i in ${subfolders}; do + cd ${i}; + if [ `cat proc_name` != "mpt3sas" ]; then + cd -; + continue; + fi; + rm -f ${CWD}/${i}.${file_name}; + echo release > host_trace_buffer_enable; + total_size=`cat host_trace_buffer_size`; + while [ "${offset}" -lt "${total_size}" ] ; do + echo $offset > host_trace_buffer; + cat host_trace_buffer >> ${CWD}/${i}.${file_name}; + let offset=${offset}+${page_size}; + done; + cd -; +done; diff --git a/drivers/scsi/mpt3sas/trace_buffer/start_tb.sh b/drivers/scsi/mpt3sas/trace_buffer/start_tb.sh new file mode 100755 index 000000000000..400ddbc5e63b --- /dev/null +++ b/drivers/scsi/mpt3sas/trace_buffer/start_tb.sh @@ -0,0 +1,173 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +# +# + +#debugging parameters +# shopt -o -s xtrace + +# global parmeters +declare -r SCRIPT=${0##*/} +declare -r SCSI_HOST="/sys/class/scsi_host" +declare -ir PAGE_SIZE=4095 +declare -r CURRENT_PATH=$PWD +declare -r FILE_NAME="trace_buffer" +declare -ir HOST_NUMBER=$1 +declare -i IOC_RESET_COUNT=0 + +# release_buffers: turn OFF trace buffers, and flush to host memory +function release_buffers() +{ + declare -r CURRENT_STATUS=$(cat host_trace_buffer_enable) + + if [ -z ${CURRENT_STATUS} ] ; then + return 0; + fi; + if [ "${CURRENT_STATUS}" != "release" ] ; then + printf "\n%s: releasing trace buffers for host%d\n" ${SCRIPT} \ + ${HOST_NUMBER} + echo release > host_trace_buffer_enable; + fi; + + return 0; +} + +# post_buffers: turn ON trace buffers +function post_buffers() +{ + declare -r CURRENT_STATUS=$(cat host_trace_buffer_enable) + + if [ -z ${CURRENT_STATUS} ] ; then + return 0; + fi; + if [ "${CURRENT_STATUS}" != "post" ] ; then + printf "\n%s: posting trace buffers for host%d\n" ${SCRIPT} \ + ${HOST_NUMBER} + echo post > host_trace_buffer_enable; + fi; + + return 0; +} + +# dump_trace_buffer: dump trace buffer to file (naming appended with timestamp) +function dump_trace_buffer() +{ + declare DATE=$(date +%Y:%m:%d_%H:%M:%S) + declare -i TOTAL_SIZE=$(cat host_trace_buffer_size); + declare -i OFFSET=0 + + printf "\n" + release_buffers + UNIQUE_FILE_NAME="${FILE_NAME}_host${HOST_NUMBER}_${DATE}" + printf "%s: reading into filename = %s " ${SCRIPT} ${UNIQUE_FILE_NAME} + while [ ${OFFSET} -lt ${TOTAL_SIZE} ] ; do + echo ${OFFSET} > host_trace_buffer; + cat host_trace_buffer >> ${CURRENT_PATH}/${UNIQUE_FILE_NAME} + OFFSET=OFFSET+PAGE_SIZE; + printf "." + done; + printf "\ndone\n" + + return 0; +} + +# check_for_diag_reset: check whether sysfs attribute "ioc_reset_count" has incremented +function check_for_diag_reset() +{ + declare -i ioc_reset_count=$(cat ioc_reset_count) + + if [ ${ioc_reset_count} -gt ${IOC_RESET_COUNT} ] ; then + IOC_RESET_COUNT=ioc_reset_count + return 1 + fi; + + return 0 +} + +# killing_script: called when program is getting killed +function killing_script() +{ + dump_trace_buffer + printf "\n%s: script is stopping for host%d!!!\n" \ + ${SCRIPT} ${HOST_NUMBER} + exit 0; +} + +# polling_loop: this is where we will be most the time +function polling_loop() +{ + trap "killing_script" SIGHUP SIGINT SIGTERM + + # posting buffers if they have not been already + post_buffers + + printf "%s: starting polling for reset on host%d" ${SCRIPT} \ + ${HOST_NUMBER} + while true ; do + if test ! -d ${SCSI_HOST}/host${HOST_NUMBER} ; then + printf "\n%s: stopping script for host%d!!!\n" \ + ${SCRIPT} ${HOST_NUMBER} + exit 0; + fi; + printf "." + check_for_diag_reset + if [ $? -ne 0 ] ; then + printf "\n" + printf "%s: reset has occurred on host%d!!!\n" \ + ${SCRIPT} ${HOST_NUMBER} + dump_trace_buffer + post_buffers + printf "%s: resuming polling for host%d" ${SCRIPT} \ + ${HOST_NUMBER} + fi; + sleep 1 + done + printf "\n" +} + +# main routine + +# useage banner +clear +if [ $# != 1 ]; then + printf "useage: %s \n" ${SCRIPT} + exit 1 +fi; + +# sanity check to make sure folder exists +if test ! -d ${SCSI_HOST}/host${HOST_NUMBER} ; then + printf "%s: FAILURE: host%d controller doesn't exist\n" ${SCRIPT} \ + ${HOST_NUMBER} + exit 1 +fi; + +cd ${SCSI_HOST}/host${HOST_NUMBER}; + +# sanity check to make proc_name is for mpt3sas +if [ `cat proc_name` != "mpt3sas" ]; then + printf "%s: FAILURE: host%d controller is not mpt3sas\n" ${SCRIPT} \ + ${HOST_NUMBER} + exit 1 +fi; + +# delete existing trace buffers +rm -fr ${CURRENT_PATH}/${FILE_NAME}_* + +# initialze the diag reset count +IOC_RESET_COUNT=$(cat ioc_reset_count) + +# call the routine that will be polling for diag resets +printf "\n" +polling_loop + +cd ${CURRENT_PATH} +exit 0 diff --git a/drivers/scsi/mpt3sas/uload.sh b/drivers/scsi/mpt3sas/uload.sh new file mode 100755 index 000000000000..b9f8baae5b8d --- /dev/null +++ b/drivers/scsi/mpt3sas/uload.sh @@ -0,0 +1,18 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash +# +# uload.sh: a helper script for unloading the drivers +# + +rmmod mpt3sas + +exit 0 \ No newline at end of file diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 73261902d75d..161bf4760eac 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2382,6 +2382,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("task 0x%p done with io_status 0x%x" " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); + if (t->slow_task) + complete(&t->slow_task->completion); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7259bce85e0e..1fbc5c6c6c14 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -176,6 +176,7 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, faddr = ha->flt_region_nvram; if (IS_QLA28XX(ha)) { + qla28xx_get_aux_images(vha, &active_regions); if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_nvram_sec; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 99f0a1a08143..cbaf178fc979 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2399,7 +2399,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job) struct qla_active_regions regions = { }; struct active_regions active_regions = { }; - qla28xx_get_aux_images(vha, &active_regions); + qla27xx_get_active_image(vha, &active_regions); regions.global_image = active_regions.global; if (IS_QLA28XX(ha)) { diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 30afc59c1870..7bbff91f8883 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -2519,12 +2519,6 @@ qla83xx_fw_dump_failed: /* Driver Debug Functions. */ /****************************************************************************/ -static inline int -ql_mask_match(uint level) -{ - return (level & ql2xextended_error_logging) == level; -} - /* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index bb01b680ce9f..433e95502808 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -374,3 +374,9 @@ extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, struct qla_hw_data *); extern int qla24xx_soft_reset(struct qla_hw_data *); + +static inline int +ql_mask_match(uint level) +{ + return (level & ql2xextended_error_logging) == level; +} diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6ffa9877c28b..c57b95a20688 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -591,19 +591,23 @@ typedef struct srb { */ uint8_t cmd_type; uint8_t pad[3]; - atomic_t ref_count; struct kref cmd_kref; /* need to migrate ref_count over to this */ void *priv; wait_queue_head_t nvme_ls_waitq; struct fc_port *fcport; struct scsi_qla_host *vha; unsigned int start_timer:1; + unsigned int abort:1; + unsigned int aborted:1; + unsigned int completed:1; + uint32_t handle; uint16_t flags; uint16_t type; const char *name; int iocbs; struct qla_qpair *qpair; + struct srb *cmd_sp; struct list_head elem; u32 gen1; /* scratch */ u32 gen2; /* scratch */ @@ -2397,6 +2401,8 @@ typedef struct fc_port { unsigned int id_changed:1; unsigned int scan_needed:1; unsigned int n2n_flag:1; + unsigned int explicit_logout:1; + unsigned int prli_pend_timer:1; struct completion nvme_del_done; uint32_t nvme_prli_service_param; @@ -2423,6 +2429,7 @@ typedef struct fc_port { struct work_struct free_work; struct work_struct reg_work; uint64_t jiffies_at_registration; + unsigned long prli_expired; struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; uint16_t tgt_id; @@ -4816,6 +4823,9 @@ struct sff_8247_a0 { ha->current_topology == ISP_CFG_N || \ !ha->current_topology) +#define PRLI_PHASE(_cls) \ + ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP)) + #include "qla_target.h" #include "qla_gbl.h" #include "qla_dbg.h" diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 732bb871c433..dc2366a29665 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1523,6 +1523,10 @@ struct qla_flt_header { #define FLT_REG_NVRAM_SEC_28XX_1 0x10F #define FLT_REG_NVRAM_SEC_28XX_2 0x111 #define FLT_REG_NVRAM_SEC_28XX_3 0x113 +#define FLT_REG_MPI_PRI_28XX 0xD3 +#define FLT_REG_MPI_SEC_28XX 0xF0 +#define FLT_REG_PEP_PRI_28XX 0xD1 +#define FLT_REG_PEP_SEC_28XX 0xF1 struct qla_flt_region { uint16_t code; diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 5298ed10059f..84bb4a048016 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3005,7 +3005,7 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (res == QLA_FUNCTION_TIMEOUT) - return; + goto done; if (res == (DID_ERROR << 16)) { /* entry status error */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 1d041313ec52..ac4c47fc5f4c 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -101,8 +101,22 @@ static void qla24xx_abort_iocb_timeout(void *data) u32 handle; unsigned long flags; + if (sp->cmd_sp) + ql_dbg(ql_dbg_async, sp->vha, 0x507c, + "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", + sp->cmd_sp->handle, sp->cmd_sp->type, + sp->handle, sp->type); + else + ql_dbg(ql_dbg_async, sp->vha, 0x507c, + "Abort timeout 2 - hdl=%x, type=%x\n", + sp->handle, sp->type); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { + if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == + sp->cmd_sp)) + qpair->req->outstanding_cmds[handle] = NULL; + /* removing the abort */ if (qpair->req->outstanding_cmds[handle] == sp) { qpair->req->outstanding_cmds[handle] = NULL; @@ -111,6 +125,9 @@ static void qla24xx_abort_iocb_timeout(void *data) } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + if (sp->cmd_sp) + sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); + abt->u.abt.comp_status = CS_TIMEOUT; sp->done(sp, QLA_OS_TIMER_EXPIRED); } @@ -142,6 +159,7 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) sp->type = SRB_ABT_CMD; sp->name = "abort"; sp->qpair = cmd_sp->qpair; + sp->cmd_sp = cmd_sp; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; @@ -516,6 +534,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; + fcport->disc_state = DSC_LOGIN_PEND; return qla2x00_post_work(vha, e); } @@ -667,7 +686,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, port_id_t id; u64 wwn; u16 data[2]; - u8 current_login_state; + u8 current_login_state, nvme_cls; fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, @@ -726,10 +745,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, loop_id = le16_to_cpu(e->nport_handle); loop_id = (loop_id & 0x7fff); - if (fcport->fc4f_nvme) - current_login_state = e->current_login_state >> 4; - else - current_login_state = e->current_login_state & 0xf; + nvme_cls = e->current_login_state >> 4; + current_login_state = e->current_login_state & 0xf; + + if (PRLI_PHASE(nvme_cls)) { + current_login_state = nvme_cls; + fcport->fc4_type &= ~FS_FC4TYPE_FCP; + fcport->fc4_type |= FS_FC4TYPE_NVME; + } else if (PRLI_PHASE(current_login_state)) { + fcport->fc4_type |= FS_FC4TYPE_FCP; + fcport->fc4_type &= ~FS_FC4TYPE_NVME; + } ql_dbg(ql_dbg_disc, vha, 0x20e2, @@ -1135,19 +1161,18 @@ static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", sp->name, res, fcport->port_name, mb[1], mb[2]); - if (res == QLA_FUNCTION_TIMEOUT) { - dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, - sp->u.iocb_cmd.u.mbx.in_dma); - return; - } - fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + if (res == QLA_FUNCTION_TIMEOUT) + goto done; + memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; ea.sp = sp; qla24xx_handle_gpdb_event(vha, &ea); +done: dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, sp->u.iocb_cmd.u.mbx.in_dma); @@ -1201,12 +1226,19 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online) + if (!vha->flags.online) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); return rval; + } - if (fcport->fw_login_state == DSC_LS_PLOGI_PEND || - fcport->fw_login_state == DSC_LS_PRLI_PEND) + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || + fcport->fw_login_state == DSC_LS_PRLI_PEND) && + qla_dual_mode_enabled(vha)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); return rval; + } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -1584,6 +1616,10 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) break; default: if (fcport->login_pause) { + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -4830,6 +4866,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_WORK(&fcport->free_work, qlt_free_session_done); INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); @@ -4908,14 +4945,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) set_bit(RSCN_UPDATE, &flags); clear_bit(LOCAL_LOOP_UPDATE, &flags); - } else if (ha->current_topology == ISP_CFG_N) { - clear_bit(RSCN_UPDATE, &flags); - if (qla_tgt_mode_enabled(vha)) { - /* allow the other side to start the login */ - clear_bit(LOCAL_LOOP_UPDATE, &flags); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } - } else if (ha->current_topology == ISP_CFG_NL) { + } else if (ha->current_topology == ISP_CFG_NL || + ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || @@ -5032,7 +5063,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb, sizeof(ha->plogi_els_payld.data)); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } else { ql_dbg(ql_dbg_init, vha, 0x00d1, "PLOGI ELS param read fail.\n"); @@ -5879,8 +5909,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || - (fcport->flags & FCF_LOGIN_NEEDED) == 0) + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) continue; if (fcport->scan_state == QLA_FCPORT_SCAN) { @@ -5903,7 +5932,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) } } - if (fcport->scan_state == QLA_FCPORT_FOUND) + if (fcport->scan_state == QLA_FCPORT_FOUND && + (fcport->flags & FCF_LOGIN_NEEDED) != 0) qla24xx_fcport_handle_login(vha, fcport); } return (rval); @@ -9003,8 +9033,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) struct qla_hw_data *ha = qpair->hw; qpair->delete_in_progress = 1; - while (atomic_read(&qpair->ref_count)) - msleep(500); ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 518eb954cf42..bdf1994251b9 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2405,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) static void qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) { + u16 control_flags = LCF_COMMAND_LOGO; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; - logio->control_flags = - cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); - if (!sp->fcport->keep_nport_handle) - logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); + + if (sp->fcport->explicit_logout) { + control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; + } else { + control_flags |= LCF_IMPL_LOGO; + + if (!sp->fcport->keep_nport_handle) + control_flags |= LCF_FREE_NPORT; + } + + logio->control_flags = cpu_to_le16(control_flags); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; @@ -2676,7 +2684,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI ELS IOCB:\n"); ql_dump_buffer(ql_log_info, vha, 0x0109, - (uint8_t *)els_iocb, 0x70); + (uint8_t *)els_iocb, + sizeof(*els_iocb)); } else { els_iocb->control_flags = 1 << 13; els_iocb->tx_byte_count = @@ -2842,7 +2851,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, - (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); + (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, + sizeof(*elsio->u.els_plogi.els_plogi_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 009fd5a33fcd..3e9c5768815e 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1061,8 +1061,6 @@ global_port_update: ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); - - qlt_async_event(mb[0], vha, mb); break; } @@ -1079,8 +1077,6 @@ global_port_update: set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); - - qlt_async_event(mb[0], vha, mb); break; case MBA_RSCN_UPDATE: /* State Change Registration */ @@ -1901,6 +1897,18 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, inbuf = (uint32_t *)&sts->nvme_ersp_data; outbuf = (uint32_t *)fd->rspaddr; iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + if (unlikely(iocb->u.nvme.rsp_pyld_len > + sizeof(struct nvme_fc_ersp_iu))) { + if (ql_mask_match(ql_dbg_io)) { + WARN_ONCE(1, "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + ql_log(ql_log_warn, fcport->vha, 0x5100, + "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + } + iocb->u.nvme.rsp_pyld_len = + sizeof(struct nvme_fc_ersp_iu); + } iter = iocb->u.nvme.rsp_pyld_len >> 2; for (; iter; iter--) *outbuf++ = swab32(*inbuf++); @@ -2466,6 +2474,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + if (sp->abort) + sp->aborted = 1; + else + sp->completed = 1; + if (sp->cmd_type != TYPE_SRB) { req->outstanding_cmds[handle] = NULL; ql_dbg(ql_dbg_io, vha, 0x3015, @@ -3624,7 +3637,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) skip_msix: ql_log(ql_log_info, vha, 0x0037, - "Falling back-to MSI mode -%d.\n", ret); + "Falling back-to MSI mode -- ret=%d.\n", ret); if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && @@ -3632,13 +3645,13 @@ skip_msix: goto skip_msi; ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); - if (!ret) { + if (ret > 0) { ql_dbg(ql_dbg_init, vha, 0x0038, "MSI: Enabled.\n"); ha->flags.msi_enabled = 1; } else ql_log(ql_log_warn, vha, 0x0039, - "Falling back-to INTa mode -- %d.\n", ret); + "Falling back-to INTa mode -- ret=%d.\n", ret); skip_msi: /* Skip INTx on ISP82xx. */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 4a1f21c11758..1ef8907314e5 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3920,6 +3920,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, vha->d_id.b24 = 0; vha->d_id.b.al_pa = 1; ha->flags.n2n_bigger = 1; + ha->flags.n2n_ae = 0; id.b.al_pa = 2; ql_dbg(ql_dbg_async, vha, 0x5075, @@ -3930,6 +3931,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, "Format 1: Remote login - Waiting for WWPN %8phC.\n", rptid_entry->u.f1.port_name); ha->flags.n2n_bigger = 0; + ha->flags.n2n_ae = 1; } qla24xx_post_newsess_work(vha, &id, rptid_entry->u.f1.port_name, @@ -3941,7 +3943,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, /* if our portname is higher then initiate N2N login */ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); - ha->flags.n2n_ae = 1; return; break; case TOPO_FL: @@ -6150,9 +6151,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, mcp->mb[7] = LSW(MSD(req_dma)); mcp->mb[8] = MSW(addr); /* Setting RAM ID to valid */ - mcp->mb[10] |= BIT_7; /* For MCTP RAM ID is 0x40 */ - mcp->mb[10] |= 0x40; + mcp->mb[10] = BIT_7 | 0x40; mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| MBX_0; @@ -6287,17 +6287,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) case QLA_SUCCESS: ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", __func__, sp->name); - sp->free(sp); break; default: ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", __func__, sp->name, rval); - sp->free(sp); break; } - return rval; - done_free_sp: sp->free(sp); done: diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 238240984bc1..eabc5127174e 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -946,7 +946,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); if (!sp) - goto done; + return rval; sp->type = SRB_CTRL_VP; sp->name = "ctrl_vp"; @@ -962,7 +962,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) ql_dbg(ql_dbg_async, vha, 0xffff, "%s: %s Failed submission. %x.\n", __func__, sp->name, rval); - goto done_free_sp; + goto done; } ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", @@ -980,16 +980,13 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) case QLA_SUCCESS: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", __func__, sp->name); - goto done_free_sp; + break; default: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", __func__, sp->name, rval); - goto done_free_sp; + break; } done: - return rval; - -done_free_sp: sp->free(sp); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 6cc19e060afc..bfcd02fdf2b8 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -224,8 +224,8 @@ static void qla_nvme_abort_work(struct work_struct *work) if (ha->flags.host_shutting_down) { ql_log(ql_log_info, sp->fcport->vha, 0xffff, - "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n", - __func__, sp, sp->type, atomic_read(&sp->ref_count)); + "%s Calling done on sp: %p, type: 0x%x\n", + __func__, sp, sp->type); sp->done(sp, 0); goto out; } @@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) } static struct nvme_fc_port_template qla_nvme_fc_transport = { + .module = THIS_MODULE, .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 2b2028f2383e..c855d013ba8a 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha) return (u8 *)&ha->hablob->fw->data[offset]; } -static __le32 -qla82xx_get_fw_size(struct qla_hw_data *ha) +static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; @@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha) return cpu_to_le32(uri_desc->size); } - return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); + return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * @@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha) } flashaddr = FLASH_ADDR_START; - size = (__force u32)qla82xx_get_fw_size(ha) / 8; + size = qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 726ad4cbf4a6..06037e3c7854 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -698,11 +698,6 @@ void qla2x00_sp_compl(srb_t *sp, int res) struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0)) - return; - - atomic_dec(&sp->ref_count); - sp->free(sp); cmd->result = res; CMD_SP(cmd) = NULL; @@ -794,11 +789,6 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res) struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0)) - return; - - atomic_dec(&sp->ref_count); - sp->free(sp); cmd->result = res; CMD_SP(cmd) = NULL; @@ -903,7 +893,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; - atomic_set(&sp->ref_count, 1); + CMD_SP(cmd) = (void *)sp; sp->free = qla2x00_sp_free_dma; sp->done = qla2x00_sp_compl; @@ -985,18 +975,16 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; - atomic_set(&sp->ref_count, 1); CMD_SP(cmd) = (void *)sp; sp->free = qla2xxx_qpair_sp_free_dma; sp->done = qla2xxx_qpair_sp_compl; - sp->qpair = qpair; rval = ha->isp_ops->start_scsi_mq(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); if (rval == QLA_INTERFACE_ERROR) - goto qc24_fail_command; + goto qc24_free_sp_fail_command; goto qc24_host_busy_free_sp; } @@ -1008,6 +996,11 @@ qc24_host_busy_free_sp: qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; +qc24_free_sp_fail_command: + sp->free(sp); + CMD_SP(cmd) = NULL; + qla2xxx_rel_qpair_sp(sp->qpair, sp); + qc24_fail_command: cmd->scsi_done(cmd); @@ -1184,16 +1177,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) return return_status; } -static int -sp_get(struct srb *sp) -{ - if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count)) - /* kref get fail */ - return ENXIO; - else - return 0; -} - #define ISP_REG_DISCONNECT 0xffffffffU /************************************************************************** * qla2x00_isp_reg_stat @@ -1249,6 +1232,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) uint64_t lun; int rval; struct qla_hw_data *ha = vha->hw; + uint32_t ratov_j; + struct qla_qpair *qpair; + unsigned long flags; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8042, @@ -1261,13 +1247,26 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) return ret; sp = scsi_cmd_priv(cmd); + qpair = sp->qpair; - if (sp->fcport && sp->fcport->deleted) + if ((sp->fcport && sp->fcport->deleted) || !qpair) return SUCCESS; - /* Return if the command has already finished. */ - if (sp_get(sp)) + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + if (sp->completed) { + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return SUCCESS; + } + + if (sp->abort || sp->aborted) { + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + return FAILED; + } + + sp->abort = 1; + sp->comp = ∁ + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + id = cmd->device->id; lun = cmd->device->lun; @@ -1276,47 +1275,37 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", vha->host_no, id, lun, sp, cmd, sp->handle); + /* + * Abort will release the original Command/sp from FW. Let the + * original command call scsi_done. In return, he will wakeup + * this sleeping thread. + */ rval = ha->isp_ops->abort_command(sp); + ql_dbg(ql_dbg_taskm, vha, 0x8003, "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); + /* Wait for the command completion. */ + ratov_j = ha->r_a_tov/10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); switch (rval) { case QLA_SUCCESS: - /* - * The command has been aborted. That means that the firmware - * won't report a completion. - */ - sp->done(sp, DID_ABORT << 16); - ret = SUCCESS; - break; - case QLA_FUNCTION_PARAMETER_ERROR: { - /* Wait for the command completion. */ - uint32_t ratov = ha->r_a_tov/10; - uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000); - - WARN_ON_ONCE(sp->comp); - sp->comp = ∁ if (!wait_for_completion_timeout(&comp, ratov_j)) { ql_dbg(ql_dbg_taskm, vha, 0xffff, "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", - __func__, ha->r_a_tov); + __func__, ha->r_a_tov/10); ret = FAILED; } else { ret = SUCCESS; } break; - } default: - /* - * Either abort failed or abort and completion raced. Let - * the SCSI core retry the abort in the former case. - */ ret = FAILED; break; } sp->comp = NULL; - atomic_dec(&sp->ref_count); + ql_log(ql_log_info, vha, 0x801c, "Abort command issued nexus=%ld:%d:%llu -- %x.\n", vha->host_no, id, lun, ret); @@ -1708,32 +1697,53 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, scsi_qla_host_t *vha = qp->vha; struct qla_hw_data *ha = vha->hw; int rval; + bool ret_cmd; + uint32_t ratov_j; - if (sp_get(sp)) + if (qla2x00_chip_is_down(vha)) { + sp->done(sp, res); return; + } if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !qla2x00_isp_reg_stat(ha))) { - sp->comp = ∁ - spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); - rval = ha->isp_ops->abort_command(sp); + if (sp->comp) { + sp->done(sp, res); + return; + } + sp->comp = ∁ + sp->abort = 1; + spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); + + rval = ha->isp_ops->abort_command(sp); + /* Wait for command completion. */ + ret_cmd = false; + ratov_j = ha->r_a_tov/10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); switch (rval) { case QLA_SUCCESS: - sp->done(sp, res); + if (wait_for_completion_timeout(&comp, ratov_j)) { + ql_dbg(ql_dbg_taskm, vha, 0xffff, + "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", + __func__, ha->r_a_tov/10); + ret_cmd = true; + } + /* else FW return SP to driver */ break; - case QLA_FUNCTION_PARAMETER_ERROR: - wait_for_completion(&comp); + default: + ret_cmd = true; break; } spin_lock_irqsave(qp->qp_lock_ptr, *flags); - sp->comp = NULL; + if (ret_cmd && (!sp->completed || !sp->aborted)) + sp->done(sp, res); + } else { + sp->done(sp, res); } - - atomic_dec(&sp->ref_count); } static void @@ -1755,7 +1765,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - req->outstanding_cmds[cnt] = NULL; switch (sp->cmd_type) { case TYPE_SRB: qla2x00_abort_srb(qp, sp, res, &flags); @@ -1777,6 +1786,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) default: break; } + req->outstanding_cmds[cnt] = NULL; } } spin_unlock_irqrestore(qp->qp_lock_ptr, flags); @@ -4666,7 +4676,8 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->sfp_data = NULL; if (ha->flt) - dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + dma_free_coherent(&ha->pdev->dev, + sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, ha->flt, ha->flt_dma); ha->flt = NULL; ha->flt_dma = 0; diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index f2d5115b2d8d..bbe90354f49b 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -847,15 +847,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_sec = start; break; case FLT_REG_FW_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_fw_sec = start; break; case FLT_REG_BOOTLOAD_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_boot_sec = start; break; case FLT_REG_AUX_IMG_PRI_28XX: @@ -2725,8 +2725,11 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Region %x is secure\n", region.code); - if (region.code == FLT_REG_FW || - region.code == FLT_REG_FW_SEC_27XX) { + switch (region.code) { + case FLT_REG_FW: + case FLT_REG_FW_SEC_27XX: + case FLT_REG_MPI_PRI_28XX: + case FLT_REG_MPI_SEC_28XX: fw_array = dwptr; /* 1st fw array */ @@ -2757,9 +2760,23 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, buf_size_without_sfub += risc_size; fw_array += risc_size; } - } else { - ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, - "Secure region %x not supported\n", + break; + + case FLT_REG_PEP_PRI_28XX: + case FLT_REG_PEP_SEC_28XX: + fw_array = dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); + risc_attr = be32_to_cpu(fw_array[9]); + + buf_size_without_sfub = risc_size; + fw_array += risc_size; + break; + + default: + ql_log(ql_log_warn + ql_dbg_verbose, vha, + 0xffff, "Secure region %x not supported\n", region.code); rval = QLA_COMMAND_ERROR; goto done; @@ -2880,7 +2897,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, "Sending Secure Flash MB Cmd\n"); rval = qla28xx_secure_flash_update(vha, 0, region.code, buf_size_without_sfub, sfub_dma, - sizeof(struct secure_flash_update_block)); + sizeof(struct secure_flash_update_block) >> 2); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Secure Flash MB Cmd failed %x.", rval); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a06e56224a55..cb8a892e2d39 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1104,6 +1104,7 @@ void qlt_free_session_done(struct work_struct *work) } } + sess->explicit_logout = 0; spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); sess->free_pending = 0; @@ -1160,7 +1161,6 @@ void qlt_unreg_sess(struct fc_port *sess) sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; - INIT_WORK(&sess->free_work, qlt_free_session_done); queue_work(sess->vha->hw->wq, &sess->free_work); } EXPORT_SYMBOL(qlt_unreg_sess); @@ -1257,6 +1257,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; spin_unlock_irqrestore(&sess->vha->work_lock, flags); + sess->prli_pend_timer = 0; sess->disc_state = DSC_DELETE_PEND; qla24xx_chk_fcp_state(sess); @@ -1265,7 +1266,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) "Scheduling sess %p for deletion %8phC\n", sess, sess->port_name); - INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } @@ -4804,6 +4804,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, switch (sess->disc_state) { case DSC_DELETED: + case DSC_LOGIN_PEND: qlt_plogi_ack_unref(vha, pla); break; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 042a24314edc..abe7f79bb789 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -246,6 +246,8 @@ static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) */ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) { + if (!mcmd) + return; INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); } @@ -348,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) target_sess_cmd_list_set_waiting(se_sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + sess->explicit_logout = 1; tcm_qla2xxx_put_sess(sess); } diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index dac9a7013208..02636b4785c5 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -640,9 +640,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { - dma_free_coherent(&ha->pdev->dev, - sizeof(struct addr_ctrl_blk), - init_fw_cb, init_fw_cb_dma); goto exit_init_fw_cb; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 8c674eca09f1..5504ab11decc 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, ha->queues_dma); - if (ha->fw_dump) + if (ha->fw_dump) vfree(ha->fw_dump); ha->queues_len = 0; @@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) return QLA_SUCCESS; mem_alloc_error_exit: - qla4xxx_mem_free(ha); return QLA_ERROR; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index d323523f5f9d..32965ec76965 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void) return -EINVAL; } + if (sdebug_num_tgts < 0) { + pr_err("num_tgts must be >= 0\n"); + return -EINVAL; + } + if (sdebug_guard > 1) { pr_err("guard must be 0 or 1\n"); return -EINVAL; diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 0f17e7dac1b0..ac35c301c792 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -9,7 +9,7 @@ #include #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) -#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9]) +#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8])) static const char * scsi_trace_misc(struct trace_seq *, unsigned char *, int); @@ -18,15 +18,18 @@ static const char * scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u32 lba = 0, txlen; lba |= ((cdb[1] & 0x1F) << 16); lba |= (cdb[2] << 8); lba |= cdb[3]; - txlen = cdb[4]; + /* + * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be read (READ(6)) or written (WRITE(6)). + */ + txlen = cdb[4] ? cdb[4] : 256; - trace_seq_printf(p, "lba=%llu txlen=%llu", - (unsigned long long)lba, (unsigned long long)txlen); + trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen); trace_seq_putc(p, 0); return ret; @@ -36,17 +39,12 @@ static const char * scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u32 lba, txlen; - lba |= (cdb[2] << 24); - lba |= (cdb[3] << 16); - lba |= (cdb[4] << 8); - lba |= cdb[5]; - txlen |= (cdb[7] << 8); - txlen |= cdb[8]; + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be16(&cdb[7]); - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME) @@ -61,19 +59,12 @@ static const char * scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u32 lba, txlen; - lba |= (cdb[2] << 24); - lba |= (cdb[3] << 16); - lba |= (cdb[4] << 8); - lba |= cdb[5]; - txlen |= (cdb[6] << 24); - txlen |= (cdb[7] << 16); - txlen |= (cdb[8] << 8); - txlen |= cdb[9]; + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be32(&cdb[6]); - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); trace_seq_putc(p, 0); @@ -84,23 +75,13 @@ static const char * scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u64 lba; + u32 txlen; - lba |= ((u64)cdb[2] << 56); - lba |= ((u64)cdb[3] << 48); - lba |= ((u64)cdb[4] << 40); - lba |= ((u64)cdb[5] << 32); - lba |= (cdb[6] << 24); - lba |= (cdb[7] << 16); - lba |= (cdb[8] << 8); - lba |= cdb[9]; - txlen |= (cdb[10] << 24); - txlen |= (cdb[11] << 16); - txlen |= (cdb[12] << 8); - txlen |= cdb[13]; + lba = get_unaligned_be64(&cdb[2]); + txlen = get_unaligned_be32(&cdb[10]); - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME_16) @@ -115,8 +96,8 @@ static const char * scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; - sector_t lba = 0, txlen = 0; - u32 ei_lbrt = 0; + u64 lba; + u32 ei_lbrt, txlen; switch (SERVICE_ACTION32(cdb)) { case READ_32: @@ -136,26 +117,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) goto out; } - lba |= ((u64)cdb[12] << 56); - lba |= ((u64)cdb[13] << 48); - lba |= ((u64)cdb[14] << 40); - lba |= ((u64)cdb[15] << 32); - lba |= (cdb[16] << 24); - lba |= (cdb[17] << 16); - lba |= (cdb[18] << 8); - lba |= cdb[19]; - ei_lbrt |= (cdb[20] << 24); - ei_lbrt |= (cdb[21] << 16); - ei_lbrt |= (cdb[22] << 8); - ei_lbrt |= cdb[23]; - txlen |= (cdb[28] << 24); - txlen |= (cdb[29] << 16); - txlen |= (cdb[30] << 8); - txlen |= cdb[31]; + lba = get_unaligned_be64(&cdb[12]); + ei_lbrt = get_unaligned_be32(&cdb[20]); + txlen = get_unaligned_be32(&cdb[28]); - trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u", - cmd, (unsigned long long)lba, - (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt); + trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u", + cmd, lba, txlen, cdb[10] >> 5, ei_lbrt); if (SERVICE_ACTION32(cdb) == WRITE_SAME_32) trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); @@ -170,7 +137,7 @@ static const char * scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - unsigned int regions = cdb[7] << 8 | cdb[8]; + unsigned int regions = get_unaligned_be16(&cdb[7]); trace_seq_printf(p, "regions=%u", (regions - 8) / 16); trace_seq_putc(p, 0); @@ -182,8 +149,8 @@ static const char * scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; - sector_t lba = 0; - u32 alloc_len = 0; + u64 lba; + u32 alloc_len; switch (SERVICE_ACTION16(cdb)) { case SAI_READ_CAPACITY_16: @@ -197,21 +164,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) goto out; } - lba |= ((u64)cdb[2] << 56); - lba |= ((u64)cdb[3] << 48); - lba |= ((u64)cdb[4] << 40); - lba |= ((u64)cdb[5] << 32); - lba |= (cdb[6] << 24); - lba |= (cdb[7] << 16); - lba |= (cdb[8] << 8); - lba |= cdb[9]; - alloc_len |= (cdb[10] << 24); - alloc_len |= (cdb[11] << 16); - alloc_len |= (cdb[12] << 8); - alloc_len |= cdb[13]; + lba = get_unaligned_be64(&cdb[2]); + alloc_len = get_unaligned_be32(&cdb[10]); - trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, - (unsigned long long)lba, alloc_len); + trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len); out: trace_seq_putc(p, 0); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 417b868d8735..271afea654e2 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -24,6 +24,8 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" +#define ISCSI_SEND_MAX_ALLOWED 10 + #define CREATE_TRACE_POINTS #include @@ -2945,6 +2947,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) return err; } +static int iscsi_session_has_conns(int sid) +{ + struct iscsi_cls_conn *conn; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + if (iscsi_conn_get_sid(conn) == sid) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&connlock, flags); + + return found; +} + static int iscsi_set_iface_params(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) @@ -3522,10 +3542,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); - if (session) - transport->destroy_session(session); - else + if (!session) err = -EINVAL; + else if (iscsi_session_has_conns(ev->u.d_session.sid)) + err = -EBUSY; + else + transport->destroy_session(session); break; case ISCSI_UEVENT_UNBIND_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); @@ -3682,6 +3704,7 @@ iscsi_if_rx(struct sk_buff *skb) struct nlmsghdr *nlh; struct iscsi_uevent *ev; uint32_t group; + int retries = ISCSI_SEND_MAX_ALLOWED; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || @@ -3712,6 +3735,10 @@ iscsi_if_rx(struct sk_buff *skb) break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); + if (err == -EAGAIN && --retries < 0) { + printk(KERN_WARNING "Send reply failed, error %d\n", err); + break; + } } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index ef138c57e2a6..182fd25c7c43 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1391,9 +1391,6 @@ static void sas_expander_release(struct device *dev) struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_expander_device *edev = rphy_to_expander_device(rphy); - if (rphy->q) - blk_cleanup_queue(rphy->q); - put_device(dev->parent); kfree(edev); } @@ -1403,9 +1400,6 @@ static void sas_end_device_release(struct device *dev) struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_end_device *edev = rphy_to_end_device(rphy); - if (rphy->q) - blk_cleanup_queue(rphy->q); - put_device(dev->parent); kfree(edev); } @@ -1634,8 +1628,7 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); - if (rphy->q) - bsg_unregister_queue(rphy->q); + bsg_remove_queue(rphy->q); transport_remove_device(dev); device_del(dev); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index ebb40160539f..6a2f8bacface 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1694,20 +1694,30 @@ static void sd_rescan(struct device *dev) static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; + struct gendisk *disk = bdev->bd_disk; + struct scsi_disk *sdkp = scsi_disk(disk); + struct scsi_device *sdev = sdkp->device; + void __user *p = compat_ptr(arg); int error; + error = scsi_verify_blk_ioctl(bdev, cmd); + if (error < 0) + return error; + error = scsi_ioctl_block_when_processing_errors(sdev, cmd, (mode & FMODE_NDELAY) != 0); if (error) return error; + + if (is_sed_ioctl(cmd)) + return sed_ioctl(sdkp->opal_dev, cmd, p); /* * Let the static ioctl translation table take care of it. */ if (!sdev->host->hostt->compat_ioctl) return -ENOIOCTLCMD; - return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); + return sdev->host->hostt->compat_ioctl(sdev, cmd, p); } #endif @@ -2192,8 +2202,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer u8 type; int ret = 0; - if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { + sdkp->protection_type = 0; return ret; + } type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ @@ -3159,9 +3171,11 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sd_validate_opt_xfer_size(sdkp, dev_max)) { q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); - } else + } else { + q->limits.io_opt = 0; rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), (sector_t)BLK_DEF_MAX_SECTORS); + } /* Do not exceed controller limit */ rw_max = min(rw_max, queue_max_hw_sectors(q)); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 542d2bac2922..5087ed6afbdc 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1835,9 +1835,11 @@ static int storvsc_probe(struct hv_device *device, */ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); /* + * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. */ - host->nr_hw_queues = num_present_cpus(); + if (!dev_is_ide) + host->nr_hw_queues = num_present_cpus(); /* * Set the error handler work queue. diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 955e4c938d49..701b842296f0 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -501,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = { .eh_host_reset_handler = sun3scsi_host_reset, .can_queue = 16, .this_id = 7, - .sg_tablesize = SG_NONE, + .sg_tablesize = 1, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = NCR5380_CMD_SIZE, @@ -523,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) sun3_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun; - if (setup_sg_tablesize >= 0) + if (setup_sg_tablesize > 0) sun3_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) sun3_scsi_template.this_id = setup_hostid & 7; diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c index b2af04c57a39..6feeb0faf123 100644 --- a/drivers/scsi/ufs/cdns-pltfrm.c +++ b/drivers/scsi/ufs/cdns-pltfrm.c @@ -99,6 +99,12 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, */ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); + /* + * Disabling Autohibern8 feature in cadence UFS + * to mask unexpected interrupt trigger. + */ + hba->ahit = 0; + return 0; } diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index 0f6ff33ce52e..d4a8be5ffd52 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -13,6 +13,7 @@ #include "ufshcd.h" #include "ufshcd-pltfrm.h" +#include "ufs_quirks.h" #include "unipro.h" #include "ufs-mediatek.h" @@ -286,6 +287,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) return 0; } +static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba, + struct ufs_dev_desc *card) +{ + if (card->wmanufacturerid == UFS_VENDOR_SAMSUNG) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); + + return 0; +} + /** * struct ufs_hba_mtk_vops - UFS MTK specific variant operations * @@ -298,6 +308,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .setup_clocks = ufs_mtk_setup_clocks, .link_startup_notify = ufs_mtk_link_startup_notify, .pwr_change_notify = ufs_mtk_pwr_change_notify, + .apply_dev_quirks = ufs_mtk_apply_dev_quirks, .suspend = ufs_mtk_suspend, .resume = ufs_mtk_resume, }; diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index a5b71487a206..411ef60b2c14 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -905,7 +905,8 @@ out: return err; } -static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) +static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba, + struct ufs_dev_desc *card) { int err = 0; diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index 969a36b15897..ad2abc96c0f1 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) return; spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ahit == ahit) - goto out_unlock; - hba->ahit = ahit; - if (!pm_runtime_suspended(hba->dev)) - ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); -out_unlock: + if (hba->ahit != ahit) + hba->ahit = ahit; spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!pm_runtime_suspended(hba->dev)) { + pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); + ufshcd_auto_hibern8_enable(hba); + ufshcd_release(hba); + pm_runtime_put(hba->dev); + } } /* Convert Auto-Hibernate Idle Timer register value to microseconds */ diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index dc2f6d2b46ed..d2197a31abe5 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -202,7 +202,7 @@ int ufs_bsg_probe(struct ufs_hba *hba) bsg_dev->parent = get_device(parent); bsg_dev->release = ufs_bsg_node_release; - dev_set_name(bsg_dev, "ufs-bsg"); + dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no); ret = device_add(bsg_dev); if (ret) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 11a87f51c442..d9ea0ae4f374 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2986,10 +2986,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out_unlock; } - hba->dev_cmd.query.descriptor = NULL; *buf_len = be16_to_cpu(response->upiu_res.length); out_unlock: + hba->dev_cmd.query.descriptor = NULL; mutex_unlock(&hba->dev_cmd.lock); out: ufshcd_release(hba); @@ -3885,15 +3885,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) ktime_to_us(ktime_sub(ktime_get(), start)), ret); if (ret) { + int err; + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", __func__, ret); /* - * If link recovery fails then return error so that caller - * don't retry the hibern8 enter again. + * If link recovery fails then return error code returned from + * ufshcd_link_recovery(). + * If link recovery succeeds then return -EAGAIN to attempt + * hibern8 enter retry again. */ - if (ufshcd_link_recovery(hba)) - ret = -ENOLINK; + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "%s: link recovery failed", __func__); + ret = err; + } else { + ret = -EAGAIN; + } } else ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, POST_CHANGE); @@ -3907,7 +3916,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { ret = __ufshcd_uic_hibern8_enter(hba); - if (!ret || ret == -ENOLINK) + if (!ret) goto out; } out: @@ -3941,7 +3950,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) return ret; } -static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) { unsigned long flags; @@ -4779,7 +4788,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } /* end of switch */ - if (host_byte(result) != DID_OK) + if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); return result; } @@ -5014,6 +5023,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) hba->auto_bkops_enabled = false; trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); + hba->is_urgent_bkops_lvl_checked = false; out: return err; } @@ -5038,6 +5048,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; ufshcd_disable_auto_bkops(hba); } + hba->is_urgent_bkops_lvl_checked = false; } static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) @@ -5084,6 +5095,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); + hba->urgent_bkops_lvl = curr_status; out: return err; } @@ -5309,8 +5321,8 @@ static void ufshcd_err_handler(struct work_struct *work) /* * if host reset is required then skip clearing the pending - * transfers forcefully because they will automatically get - * cleared after link startup. + * transfers forcefully because they will get cleared during + * host reset and restore */ if (needs_reset) goto skip_pending_xfer_clear; @@ -6193,9 +6205,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) int err; unsigned long flags; - /* Reset the host controller */ + /* + * Stop the host controller and complete the requests + * cleared by h/w + */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_hba_stop(hba, false); + hba->silence_err_logs = true; + ufshcd_complete_requests(hba); + hba->silence_err_logs = false; spin_unlock_irqrestore(hba->host->host_lock, flags); /* scale up clocks to max frequency before full reinitialization */ @@ -6229,7 +6247,6 @@ out: static int ufshcd_reset_and_restore(struct ufs_hba *hba) { int err = 0; - unsigned long flags; int retries = MAX_HOST_RESET_RETRIES; do { @@ -6239,15 +6256,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba) err = ufshcd_host_reset_and_restore(hba); } while (err && --retries); - /* - * After reset the door-bell might be cleared, complete - * outstanding requests in s/w here. - */ - spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); - return err; } @@ -6713,7 +6721,8 @@ out: return ret; } -static void ufshcd_tune_unipro_params(struct ufs_hba *hba) +static void ufshcd_tune_unipro_params(struct ufs_hba *hba, + struct ufs_dev_desc *card) { if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { ufshcd_tune_pa_tactivate(hba); @@ -6727,7 +6736,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) ufshcd_quirk_tune_host_pa_tactivate(hba); - ufshcd_vops_apply_dev_quirks(hba); + ufshcd_vops_apply_dev_quirks(hba, card); } static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) @@ -6770,23 +6779,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba) &hba->desc_size.geom_desc); if (err) hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, &hba->desc_size.hlth_desc); if (err) hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; } -static void ufshcd_def_desc_sizes(struct ufs_hba *hba) -{ - hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; - hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; - hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; - hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; - hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; - hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; - hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; -} - static struct ufs_ref_clk ufs_ref_clk_freqs[] = { {19200000, REF_CLK_FREQ_19_2_MHZ}, {26000000, REF_CLK_FREQ_26_MHZ}, @@ -6881,9 +6880,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* UniPro link is active now */ ufshcd_set_link_active(hba); - /* Enable Auto-Hibernate if configured */ - ufshcd_auto_hibern8_enable(hba); - ret = ufshcd_verify_dev_init(hba); if (ret) goto out; @@ -6903,10 +6899,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) } ufs_fixup_device_setup(hba, &card); + ufshcd_tune_unipro_params(hba, &card); ufs_put_device_desc(&card); - ufshcd_tune_unipro_params(hba); - /* UFS device is also active now */ ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); @@ -6934,6 +6929,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* set the state as operational after switching to desired gear */ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + /* Enable Auto-Hibernate if configured */ + ufshcd_auto_hibern8_enable(hba); + /* * If we are in error handling context or in power management callbacks * context, no need to scan the host @@ -6951,7 +6949,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_init_icc_levels(hba); /* Add required well known logical units to scsi mid layer */ - if (ufshcd_scsi_add_wlus(hba)) + ret = ufshcd_scsi_add_wlus(hba); + if (ret) goto out; /* Initialize devfreq after UFS device is detected */ @@ -7950,12 +7949,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (hba->clk_scaling.is_allowed) ufshcd_resume_clkscaling(hba); - /* Schedule clock gating in case of no access to UFS device yet */ - ufshcd_release(hba); - /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + /* Schedule clock gating in case of no access to UFS device yet */ + ufshcd_release(hba); + goto out; set_old_link_state: @@ -8274,9 +8273,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->mmio_base = mmio_base; hba->irq = irq; - /* Set descriptor lengths to specification defaults */ - ufshcd_def_desc_sizes(hba); - err = ufshcd_hba_init(hba); if (err) goto out_error; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index c94cfda52829..5260e594e0b9 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -322,7 +322,7 @@ struct ufs_hba_variant_ops { void (*setup_task_mgmt)(struct ufs_hba *, int, u8); void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, enum ufs_notify_change_status); - int (*apply_dev_quirks)(struct ufs_hba *); + int (*apply_dev_quirks)(struct ufs_hba *, struct ufs_dev_desc *); int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); @@ -513,6 +513,7 @@ struct ufs_stats { * @uic_error: UFS interconnect layer error status * @saved_err: sticky error mask * @saved_uic_err: sticky UIC error mask + * @silence_err_logs: flag to silence error logs * @dev_cmd: ufs device management command information * @last_dme_cmd_tstamp: time stamp of the last completed DME command * @auto_bkops_enabled: to track whether bkops is enabled in device @@ -670,6 +671,7 @@ struct ufs_hba { u32 saved_err; u32 saved_uic_err; struct ufs_stats ufs_stats; + bool silence_err_logs; /* Device management request data */ struct ufs_dev_cmd dev_cmd; @@ -916,6 +918,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); + #define SD_ASCII_STD true #define SD_RAW false int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, @@ -1043,10 +1047,11 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, return hba->vops->hibern8_notify(hba, cmd, status); } -static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) +static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba, + struct ufs_dev_desc *card) { if (hba->vops && hba->vops->apply_dev_quirks) - return hba->vops->apply_dev_quirks(hba); + return hba->vops->apply_dev_quirks(hba, card); return 0; } diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c index ca8e3abeb2c7..a23a8e5794f5 100644 --- a/drivers/scsi/zorro_esp.c +++ b/drivers/scsi/zorro_esp.c @@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp) static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { - return dma_len > 0xFFFF ? 0xFFFF : dma_len; + return dma_len > (1U << 16) ? (1U << 16) : dma_len; +} + +static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr, + u32 dma_len) +{ + /* The old driver used 0xfffc as limit, so do that here too */ + return dma_len > 0xfffc ? 0xfffc : dma_len; } static void zorro_esp_reset_dma(struct esp *esp) @@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = fastlane_esp_irq_pending, - .dma_length_limit = zorro_esp_dma_length_limit, + .dma_length_limit = fastlane_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = fastlane_esp_dma_invalidate, diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c index 5823f5b67d16..3f0261d53ad9 100644 --- a/drivers/soc/amlogic/meson-ee-pwrc.c +++ b/drivers/soc/amlogic/meson-ee-pwrc.c @@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, struct meson_ee_pwrc *pwrc, struct meson_ee_pwrc_domain *dom) { + int ret; + dom->pwrc = pwrc; dom->num_rstc = dom->desc.reset_names_count; dom->num_clks = dom->desc.clk_names_count; @@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, * prepare/enable counters won't be in sync. */ if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) { - int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); + ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); if (ret) return ret; - pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false); - } else - pm_genpd_init(&dom->base, NULL, - (dom->desc.get_power ? - dom->desc.get_power(dom) : true)); + ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov, + false); + if (ret) + return ret; + } else { + ret = pm_genpd_init(&dom->base, NULL, + (dom->desc.get_power ? + dom->desc.get_power(dom) : true)); + if (ret) + return ret; + } return 0; } @@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev) pwrc->xlate.domains[i] = &dom->base; } - of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); - - return 0; + return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); } static void meson_ee_pwrc_shutdown(struct platform_device *pdev) diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c index 48f7ac238861..f3d8d53ab84d 100644 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c @@ -97,13 +97,13 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer, return ret ? ret : copied; } -static unsigned int snoop_file_poll(struct file *file, +static __poll_t snoop_file_poll(struct file *file, struct poll_table_struct *pt) { struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file); poll_wait(file, &chan->wq, pt); - return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0; + return !kfifo_is_empty(&chan->fifo) ? EPOLLIN : 0; } static const struct file_operations snoop_fops = { diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c index c68882eb80f7..f638e7790470 100644 --- a/drivers/soc/imx/soc-imx-scu.c +++ b/drivers/soc/imx/soc-imx-scu.c @@ -25,7 +25,7 @@ struct imx_sc_msg_misc_get_soc_id { u32 id; } resp; } data; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_misc_get_soc_uid { struct imx_sc_rpc_msg hdr; diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 7aa0517ff2f3..73a852b2f417 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -38,6 +38,7 @@ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout) client->pkt_cnt = 0; client->client.dev = dev; client->client.tx_block = false; + client->client.knows_txdone = true; client->chan = mbox_request_channel(&client->client, index); if (IS_ERR(client->chan)) { @@ -155,7 +156,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask); offset_mask |= CMDQ_WRITE_ENABLE_MASK; } - err |= cmdq_pkt_write(pkt, value, subsys, offset_mask); + err |= cmdq_pkt_write(pkt, subsys, offset_mask, value); return err; } diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c index 9090ea12eaf3..4a6111635f82 100644 --- a/drivers/soc/qcom/llcc-slice.c +++ b/drivers/soc/qcom/llcc-slice.c @@ -48,7 +48,7 @@ static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; -static const struct regmap_config llcc_regmap_config = { +static struct regmap_config llcc_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, @@ -323,6 +323,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, if (IS_ERR(base)) return ERR_CAST(base); + llcc_regmap_config.name = name; return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config); } diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 5741ec3fa814..51850cc68b70 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = { static struct rpmhpd sdm845_mx_ao = { .pd = { .name = "mx_ao", }, + .active_only = true, .peer = &sdm845_mx, .res_name = "mx.lvl", }; @@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = { static struct rpmhpd sdm845_cx_ao = { .pd = { .name = "cx_ao", }, + .active_only = true, .peer = &sdm845_cx, .parent = &sdm845_mx_ao.pd, .res_name = "cx.lvl", diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 3299cf5365f3..6651755e9f20 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -326,7 +326,7 @@ static int __init renesas_soc_init(void) if (np) { chipid = of_iomap(np, 0); of_node_put(np); - } else if (soc->id) { + } else if (soc->id && family->reg) { chipid = ioremap(family->reg, 4); } if (chipid) { diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index be9424a87173..9c3ef0a02fd4 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -35,7 +35,8 @@ defined(CONFIG_ARCH_TEGRA_124_SOC) || \ defined(CONFIG_ARCH_TEGRA_132_SOC) || \ defined(CONFIG_ARCH_TEGRA_210_SOC) || \ - defined(CONFIG_ARCH_TEGRA_186_SOC) + defined(CONFIG_ARCH_TEGRA_186_SOC) || \ + defined(CONFIG_ARCH_TEGRA_194_SOC) static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset) { if (WARN_ON(!fuse->base)) diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index df76778af601..f8b9c4058926 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -123,7 +123,7 @@ void __init tegra_init_apbmisc(void) apbmisc.flags = IORESOURCE_MEM; /* strapping options */ - if (tegra_get_chip_id() == TEGRA124) { + if (of_machine_is_compatible("nvidia,tegra124")) { straps.start = 0x7000e864; straps.end = 0x7000e867; } else { diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 9f9c1c677cf4..0447afa970f5 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1899,6 +1899,20 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq, event->id, &pmc->irq, pmc); + /* + * GPIOs don't have an equivalent interrupt in the + * parent controller (GIC). However some code, such + * as the one in irq_get_irqchip_state(), require a + * valid IRQ chip to be set. Make sure that's the + * case by passing NULL here, which will install a + * dummy IRQ chip for the interrupt in the parent + * domain. + */ + if (domain->parent) + irq_domain_set_hwirq_and_chip(domain->parent, + virq, 0, NULL, + NULL); + break; } } @@ -1908,10 +1922,22 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq, * dummy hardware IRQ number. This is used in the ->irq_set_type() * and ->irq_set_wake() callbacks to return early for these IRQs. */ - if (i == soc->num_wake_events) + if (i == soc->num_wake_events) { err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX, &pmc->irq, pmc); + /* + * Interrupts without a wake event don't have a corresponding + * interrupt in the parent controller (GIC). Pass NULL for the + * chip here, which causes a dummy IRQ chip to be installed + * for the interrupt in the parent domain, to make this + * explicit. + */ + if (domain->parent) + irq_domain_set_hwirq_and_chip(domain->parent, virq, 0, + NULL, NULL); + } + return err; } diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 378369d9364a..e9ece45d7a33 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc) ret = rproc_boot(m3_ipc->rproc); if (ret) dev_err(dev, "rproc_boot failed\n"); + else + m3_ipc_state = m3_ipc; do_exit(0); } @@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) goto err_put_rproc; } - m3_ipc_state = m3_ipc; - return 0; err_put_rproc: diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 13c54eac0cc3..d1839707128a 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -479,7 +479,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) unsigned int link_id = sdw->instance; int pdi_conf = 0; - pdi->intel_alh_id = (link_id * 16) + pdi->num + 5; + /* the Bulk and PCM streams are not contiguous */ + pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; + if (pdi->num >= 2) + pdi->intel_alh_id += 2; /* * Program stream parameters to stream SHIM register @@ -508,7 +511,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) unsigned int link_id = sdw->instance; unsigned int conf; - pdi->intel_alh_id = (link_id * 16) + pdi->num + 5; + /* the Bulk and PCM streams are not contiguous */ + pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; + if (pdi->num >= 2) + pdi->intel_alh_id += 2; /* Program Stream config ALH register */ conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id)); diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index fd8007ebb145..13def7f78b9e 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -149,6 +149,7 @@ struct atmel_qspi { struct clk *qspick; struct platform_device *pdev; const struct atmel_qspi_caps *caps; + resource_size_t mmap_size; u32 pending; u32 mr; u32 scr; @@ -329,6 +330,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) u32 sr, offset; int err; + /* + * Check if the address exceeds the MMIO window size. An improvement + * would be to add support for regular SPI mode and fall back to it + * when the flash memories overrun the controller's memory space. + */ + if (op->addr.val + op->data.nbytes > aq->mmap_size) + return -ENOTSUPP; + err = atmel_qspi_set_cfg(aq, op, &offset); if (err) return err; @@ -480,6 +489,8 @@ static int atmel_qspi_probe(struct platform_device *pdev) goto exit; } + aq->mmap_size = resource_size(res); + /* Get the peripheral clock */ aq->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(aq->pclk)) diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index acf318e7330c..abbc1582f457 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -302,7 +302,6 @@ struct atmel_spi { bool use_cs_gpios; bool keep_cs; - bool cs_active; u32 fifo_size; }; @@ -1183,10 +1182,8 @@ static int atmel_spi_setup(struct spi_device *spi) as = spi_master_get_devdata(spi->master); /* see notes above re chipselect */ - if (!atmel_spi_is_v2(as) - && spi->chip_select == 0 - && (spi->mode & SPI_CS_HIGH)) { - dev_dbg(&spi->dev, "setup: can't be active-high\n"); + if (!as->use_cs_gpios && (spi->mode & SPI_CS_HIGH)) { + dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n"); return -EINVAL; } @@ -1376,11 +1373,9 @@ static int atmel_spi_one_transfer(struct spi_master *master, &msg->transfers)) { as->keep_cs = true; } else { - as->cs_active = !as->cs_active; - if (as->cs_active) - cs_activate(as, msg->spi); - else - cs_deactivate(as, msg->spi); + cs_deactivate(as, msg->spi); + udelay(10); + cs_activate(as, msg->spi); } } @@ -1403,7 +1398,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master, atmel_spi_lock(as); cs_activate(as, spi); - as->cs_active = true; as->keep_cs = false; msg->status = 0; diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index c6836a931dbf..36f7eb8ab2df 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -367,7 +367,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) goto out_disable_clk; rate = clk_get_rate(pll_clk); - clk_disable_unprepare(pll_clk); if (!rate) { ret = -EINVAL; goto out_disable_pll_clk; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index c36587b42e95..82a0ee09cbe1 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -168,16 +168,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi) /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure - * @enable: Select (1) or deselect (0) the chip select line + * @is_high: Select(0) or deselect (1) the chip select line */ -static void cdns_spi_chipselect(struct spi_device *spi, bool enable) +static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); - if (!enable) { + if (is_high) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c index d12e149f1a41..fd6b9caffaf0 100644 --- a/drivers/spi/spi-cavium-thunderx.c +++ b/drivers/spi/spi-cavium-thunderx.c @@ -82,6 +82,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev, error: clk_disable_unprepare(p->clk); + pci_release_regions(pdev); spi_master_put(master); return ret; } @@ -96,6 +97,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev) return; clk_disable_unprepare(p->clk); + pci_release_regions(pdev); /* Put everything in a known state. */ writeq(0, p->register_base + OCTEON_SPI_CFG(p)); } diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 9a49e073e8b7..11cac7e10663 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -129,10 +129,11 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct chip_data *chip = spi_get_ctldata(spi); + /* Chip select logic is inverted from spi_set_cs() */ if (chip && chip->cs_control) - chip->cs_control(enable); + chip->cs_control(!enable); - if (enable) + if (!enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -171,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws) static void dw_writer(struct dw_spi *dws) { - u32 max = tx_max(dws); + u32 max; u16 txw = 0; + spin_lock(&dws->buf_lock); + max = tx_max(dws); while (max--) { /* Set the tx word if the transfer's original "tx" is not null */ if (dws->tx_end - dws->len) { @@ -185,13 +188,16 @@ static void dw_writer(struct dw_spi *dws) dw_write_io_reg(dws, DW_SPI_DR, txw); dws->tx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void dw_reader(struct dw_spi *dws) { - u32 max = rx_max(dws); + u32 max; u16 rxw; + spin_lock(&dws->buf_lock); + max = rx_max(dws); while (max--) { rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ @@ -203,6 +209,7 @@ static void dw_reader(struct dw_spi *dws) } dws->rx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void int_error_stop(struct dw_spi *dws, const char *msg) @@ -275,18 +282,20 @@ static int dw_spi_transfer_one(struct spi_controller *master, { struct dw_spi *dws = spi_controller_get_devdata(master); struct chip_data *chip = spi_get_ctldata(spi); + unsigned long flags; u8 imask = 0; u16 txlevel = 0; u32 cr0; int ret; dws->dma_mapped = 0; - + spin_lock_irqsave(&dws->buf_lock, flags); dws->tx = (void *)transfer->tx_buf; dws->tx_end = dws->tx + transfer->len; dws->rx = transfer->rx_buf; dws->rx_end = dws->rx + transfer->len; dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); spi_enable_chip(dws, 0); @@ -308,7 +317,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, cr0 = (transfer->bits_per_word - 1) | (chip->type << SPI_FRF_OFFSET) | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) | - (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET)) + (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) | + (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET)) | (chip->tmode << SPI_TMOD_OFFSET); /* @@ -469,6 +479,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->type = SSI_MOTO_SPI; dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); + spin_lock_init(&dws->buf_lock); spi_controller_set_devdata(master, dws); diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index c9c15881e982..f3a2f157a2b1 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -120,6 +120,7 @@ struct dw_spi { size_t len; void *tx; void *tx_end; + spinlock_t buf_lock; void *rx; void *rx_end; int dma_mapped; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index bec758e978fb..d47bd26577b3 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -583,21 +583,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi) dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { - /* Write two TX FIFO entries first, and then the corresponding - * CMD FIFO entry. + /* Write the CMD FIFO entry first, and then the two + * corresponding TX FIFO entries. */ u32 data = dspi_pop_tx(dspi); - if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) { - /* LSB */ - tx_fifo_write(dspi, data & 0xFFFF); - tx_fifo_write(dspi, data >> 16); - } else { - /* MSB */ - tx_fifo_write(dspi, data >> 16); - tx_fifo_write(dspi, data & 0xFFFF); - } cmd_fifo_write(dspi); + tx_fifo_write(dspi, data & 0xFFFF); + tx_fifo_write(dspi, data >> 16); } else { /* Write one entry to both TX FIFO and CMD FIFO * simultaneously. diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index d08e9324140e..92e460d4f3d1 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev) fsl_lpspi->dev = &pdev->dev; fsl_lpspi->is_slave = is_slave; + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + controller->transfer_one = fsl_lpspi_transfer_one; + controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; + controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + controller->dev.of_node = pdev->dev.of_node; + controller->bus_num = pdev->id; + controller->slave_abort = fsl_lpspi_slave_abort; + + ret = devm_spi_register_controller(&pdev->dev, controller); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_controller error.\n"); + goto out_controller_put; + } + if (!fsl_lpspi->is_slave) { for (i = 0; i < controller->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); @@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) controller->prepare_message = fsl_lpspi_prepare_message; } - controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - controller->transfer_one = fsl_lpspi_transfer_one; - controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; - controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; - controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; - controller->dev.of_node = pdev->dev.of_node; - controller->bus_num = pdev->id; - controller->slave_abort = fsl_lpspi_slave_abort; - init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -938,7 +944,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(fsl_lpspi->dev); if (ret < 0) { dev_err(fsl_lpspi->dev, "failed to enable clock\n"); - return ret; + goto out_controller_put; } temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); @@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); - ret = devm_spi_register_controller(&pdev->dev, controller); - if (ret < 0) { - dev_err(&pdev->dev, "spi_register_controller error.\n"); - goto out_controller_put; - } - return 0; out_controller_put: diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index c02e24c01136..43078ba3def5 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -63,6 +63,11 @@ #define QUADSPI_IPCR 0x08 #define QUADSPI_IPCR_SEQID(x) ((x) << 24) +#define QUADSPI_FLSHCR 0x0c +#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0) +#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8) +#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16) + #define QUADSPI_BUF3CR 0x1c #define QUADSPI_BUF3CR_ALLMST_MASK BIT(31) #define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8) @@ -95,6 +100,9 @@ #define QUADSPI_FR 0x160 #define QUADSPI_FR_TFF_MASK BIT(0) +#define QUADSPI_RSER 0x164 +#define QUADSPI_RSER_TFIE BIT(0) + #define QUADSPI_SPTRCLR 0x16c #define QUADSPI_SPTRCLR_IPPTRC BIT(8) #define QUADSPI_SPTRCLR_BFPTRC BIT(0) @@ -112,9 +120,6 @@ #define QUADSPI_LCKER_LOCK BIT(0) #define QUADSPI_LCKER_UNLOCK BIT(1) -#define QUADSPI_RSER 0x164 -#define QUADSPI_RSER_TFIE BIT(0) - #define QUADSPI_LUT_BASE 0x310 #define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) #define QUADSPI_LUT_REG(idx) \ @@ -181,6 +186,12 @@ */ #define QUADSPI_QUIRK_BASE_INTERNAL BIT(4) +/* + * Controller uses TDH bits in register QUADSPI_FLSHCR. + * They need to be set in accordance with the DDR/SDR mode. + */ +#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5) + struct fsl_qspi_devtype_data { unsigned int rxfifo; unsigned int txfifo; @@ -209,7 +220,8 @@ static const struct fsl_qspi_devtype_data imx7d_data = { .rxfifo = SZ_128, .txfifo = SZ_512, .ahb_buf_size = SZ_1K, - .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK | + QUADSPI_QUIRK_USE_TDH_SETTING, .little_endian = true, }; @@ -217,7 +229,8 @@ static const struct fsl_qspi_devtype_data imx6ul_data = { .rxfifo = SZ_128, .txfifo = SZ_512, .ahb_buf_size = SZ_1K, - .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK | + QUADSPI_QUIRK_USE_TDH_SETTING, .little_endian = true, }; @@ -275,6 +288,11 @@ static inline int needs_amba_base_offset(struct fsl_qspi *q) return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); } +static inline int needs_tdh_setting(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING; +} + /* * An IC bug makes it necessary to rearrange the 32-bit data. * Later chips, such as IMX6SLX, have fixed this bug. @@ -380,7 +398,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem, op->data.nbytes > q->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } static void fsl_qspi_prepare_lut(struct fsl_qspi *q, @@ -710,6 +728,16 @@ static int fsl_qspi_default_setup(struct fsl_qspi *q) qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, base + QUADSPI_MCR); + /* + * Previous boot stages (BootROM, bootloader) might have used DDR + * mode and did not clear the TDH bits. As we currently use SDR mode + * only, clear the TDH bits if necessary. + */ + if (needs_tdh_setting(q)) + qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) & + ~QUADSPI_FLSHCR_TDH_MASK, + base + QUADSPI_FLSHCR); + reg = qspi_readl(q, base + QUADSPI_SMPR); qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK | QUADSPI_SMPR_FSPHS_MASK diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 4b80ace1d137..be7c6ba73072 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -612,6 +612,7 @@ static struct spi_master * fsl_spi_probe(struct device *dev, master->setup = fsl_spi_setup; master->cleanup = fsl_spi_cleanup; master->transfer_one_message = fsl_spi_do_one_msg; + master->use_gpio_descriptors = true; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->max_bits_per_word = 32; @@ -728,17 +729,27 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) } } #endif - - pdata->cs_control = fsl_spi_cs_control; + /* + * Handle the case where we have one hardwired (always selected) + * device on the first "chipselect". Else we let the core code + * handle any GPIOs or native chip selects and assign the + * appropriate callback for dealing with the CS lines. This isn't + * supported on the GRLIB variant. + */ + ret = gpiod_count(dev, "cs"); + if (ret <= 0) + pdata->max_chipselect = 1; + else + pdata->cs_control = fsl_spi_cs_control; } ret = of_address_to_resource(np, 0, &mem); if (ret) goto err; - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - ret = -EINVAL; + irq = platform_get_irq(ofdev, 0); + if (irq < 0) { + ret = irq; goto err; } @@ -751,7 +762,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) return 0; err: - irq_dispose_mapping(irq); return ret; } diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 1d3e23ec20a6..f9c5bbb74714 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -371,8 +371,10 @@ static int spi_gpio_probe(struct platform_device *pdev) return -ENOMEM; status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master); - if (status) + if (status) { + spi_master_put(master); return status; + } if (of_id) status = spi_gpio_probe_dt(pdev, master); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 439b01e4a2c8..f4a8f470aecc 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -673,6 +673,8 @@ static int img_spfi_probe(struct platform_device *pdev) dma_release_channel(spfi->tx_ch); if (spfi->rx_ch) dma_release_channel(spfi->rx_ch); + spfi->tx_ch = NULL; + spfi->rx_ch = NULL; dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); } else { master->dma_tx = spfi->tx_ch; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 501b923f2c27..28ae5229f889 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -439,7 +439,7 @@ static bool nxp_fspi_supports_op(struct spi_mem *mem, op->data.nbytes > f->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } /* Instead of busy looping invoke readl_poll_timeout functionality. */ diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 848e03e5f42d..4433cb4de564 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -985,20 +985,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, * Note that we currently allow DMA only if we get a channel * for both rx and tx. Otherwise we'll do PIO for both rx and tx. */ -static int omap2_mcspi_request_dma(struct spi_device *spi) +static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi, + struct omap2_mcspi_dma *mcspi_dma) { - struct spi_master *master = spi->master; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; int ret = 0; - mcspi = spi_master_get_devdata(master); - mcspi_dma = mcspi->dma_channels + spi->chip_select; - - init_completion(&mcspi_dma->dma_rx_completion); - init_completion(&mcspi_dma->dma_tx_completion); - - mcspi_dma->dma_rx = dma_request_chan(&master->dev, + mcspi_dma->dma_rx = dma_request_chan(mcspi->dev, mcspi_dma->dma_rx_ch_name); if (IS_ERR(mcspi_dma->dma_rx)) { ret = PTR_ERR(mcspi_dma->dma_rx); @@ -1006,7 +998,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) goto no_dma; } - mcspi_dma->dma_tx = dma_request_chan(&master->dev, + mcspi_dma->dma_tx = dma_request_chan(mcspi->dev, mcspi_dma->dma_tx_ch_name); if (IS_ERR(mcspi_dma->dma_tx)) { ret = PTR_ERR(mcspi_dma->dma_tx); @@ -1015,20 +1007,40 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) mcspi_dma->dma_rx = NULL; } + init_completion(&mcspi_dma->dma_rx_completion); + init_completion(&mcspi_dma->dma_tx_completion); + no_dma: return ret; } +static void omap2_mcspi_release_dma(struct spi_master *master) +{ + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi_dma *mcspi_dma; + int i; + + for (i = 0; i < master->num_chipselect; i++) { + mcspi_dma = &mcspi->dma_channels[i]; + + if (mcspi_dma->dma_rx) { + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; + } + if (mcspi_dma->dma_tx) { + dma_release_channel(mcspi_dma->dma_tx); + mcspi_dma->dma_tx = NULL; + } + } +} + static int omap2_mcspi_setup(struct spi_device *spi) { int ret; struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); struct omap2_mcspi_regs *ctx = &mcspi->ctx; - struct omap2_mcspi_dma *mcspi_dma; struct omap2_mcspi_cs *cs = spi->controller_state; - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) @@ -1053,13 +1065,6 @@ static int omap2_mcspi_setup(struct spi_device *spi) } } - if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { - ret = omap2_mcspi_request_dma(spi); - if (ret) - dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n", - ret); - } - ret = pm_runtime_get_sync(mcspi->dev); if (ret < 0) { pm_runtime_put_noidle(mcspi->dev); @@ -1076,12 +1081,8 @@ static int omap2_mcspi_setup(struct spi_device *spi) static void omap2_mcspi_cleanup(struct spi_device *spi) { - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; struct omap2_mcspi_cs *cs; - mcspi = spi_master_get_devdata(spi->master); - if (spi->controller_state) { /* Unlink controller state from context save list */ cs = spi->controller_state; @@ -1090,19 +1091,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) kfree(cs); } - if (spi->chip_select < spi->master->num_chipselect) { - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - - if (mcspi_dma->dma_rx) { - dma_release_channel(mcspi_dma->dma_rx); - mcspi_dma->dma_rx = NULL; - } - if (mcspi_dma->dma_tx) { - dma_release_channel(mcspi_dma->dma_tx); - mcspi_dma->dma_tx = NULL; - } - } - if (gpio_is_valid(spi->cs_gpio)) gpio_free(spi->cs_gpio); } @@ -1313,6 +1301,9 @@ static bool omap2_mcspi_can_dma(struct spi_master *master, if (spi_controller_is_slave(master)) return true; + master->dma_rx = mcspi_dma->dma_rx; + master->dma_tx = mcspi_dma->dma_tx; + return (xfer->len >= DMA_MIN_BYTES); } @@ -1475,6 +1466,11 @@ static int omap2_mcspi_probe(struct platform_device *pdev) for (i = 0; i < master->num_chipselect; i++) { sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i); sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i); + + status = omap2_mcspi_request_dma(mcspi, + &mcspi->dma_channels[i]); + if (status == -EPROBE_DEFER) + goto free_master; } status = platform_get_irq(pdev, 0); @@ -1512,6 +1508,7 @@ disable_pm: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); free_master: + omap2_mcspi_release_dma(master); spi_master_put(master); return status; } @@ -1521,6 +1518,8 @@ static int omap2_mcspi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + omap2_mcspi_release_dma(master); + pm_runtime_dont_use_autosuspend(mcspi->dev); pm_runtime_put_sync(mcspi->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index bb6a14d1ab0f..723145673206 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -68,6 +68,10 @@ MODULE_ALIAS("platform:pxa2xx-spi"); #define LPSS_CAPS_CS_EN_SHIFT 9 #define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT) +#define LPSS_PRIV_CLOCK_GATE 0x38 +#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3 +#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3 + struct lpss_config { /* LPSS offset from drv_data->ioaddr */ unsigned offset; @@ -84,6 +88,8 @@ struct lpss_config { unsigned cs_sel_shift; unsigned cs_sel_mask; unsigned cs_num; + /* Quirks */ + unsigned cs_clk_stays_gated : 1; }; /* Keep these sorted with enum pxa_ssp_type */ @@ -154,6 +160,7 @@ static const struct lpss_config lpss_platforms[] = { .tx_threshold_hi = 56, .cs_sel_shift = 8, .cs_sel_mask = 3 << 8, + .cs_clk_stays_gated = true, }, }; @@ -381,6 +388,22 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable) else value |= LPSS_CS_CONTROL_CS_HIGH; __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); + if (config->cs_clk_stays_gated) { + u32 clkgate; + + /* + * Changing CS alone when dynamic clock gating is on won't + * actually flip CS at that time. This ruins SPI transfers + * that specify delays, or have no data. Toggle the clock mode + * to force on briefly to poke the CS pin to move. + */ + clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE); + value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) | + LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON; + + __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value); + __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate); + } } static void cs_assert(struct spi_device *spi) @@ -1441,6 +1464,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP }, + /* JSL */ + { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP }, /* APL */ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, @@ -1457,6 +1484,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP }, + /* CML-H */ + { PCI_VDEVICE(INTEL, 0x06aa), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x06ab), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x06fb), LPSS_CNL_SSP }, /* TGL-LP */ { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP }, @@ -1565,7 +1596,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) #endif ssp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(ssp->clk)) + return NULL; + ssp->irq = platform_get_irq(pdev, 0); + if (ssp->irq < 0) + return NULL; + ssp->type = type; ssp->pdev = pdev; ssp->port_id = pxa2xx_spi_get_port_id(adev); @@ -1602,6 +1639,11 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller, return cs; } +static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi) +{ + return MAX_DMA_LEN; +} + static int pxa2xx_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1707,6 +1749,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } else { controller->can_dma = pxa2xx_spi_can_dma; controller->max_dma_len = MAX_DMA_LEN; + controller->max_transfer_size = + pxa2xx_spi_max_dma_transfer_size; } } diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 2f559e531100..fa8079fbea77 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1217,6 +1217,11 @@ static int spi_qup_suspend(struct device *device) struct spi_qup *controller = spi_master_get_devdata(master); int ret; + if (pm_runtime_suspended(device)) { + ret = spi_qup_pm_resume_runtime(device); + if (ret) + return ret; + } ret = spi_master_suspend(master); if (ret) return ret; @@ -1225,10 +1230,8 @@ static int spi_qup_suspend(struct device *device) if (ret) return ret; - if (!pm_runtime_suspended(device)) { - clk_disable_unprepare(controller->cclk); - clk_disable_unprepare(controller->iclk); - } + clk_disable_unprepare(controller->cclk); + clk_disable_unprepare(controller->iclk); return 0; } diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 15f5723d9f95..7222c7689c3c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -1257,9 +1257,9 @@ static int rspi_probe(struct platform_device *pdev) ctlr->flags = ops->flags; ctlr->dev.of_node = pdev->dev.of_node; - ret = platform_get_irq_byname(pdev, "rx"); + ret = platform_get_irq_byname_optional(pdev, "rx"); if (ret < 0) { - ret = platform_get_irq_byname(pdev, "mux"); + ret = platform_get_irq_byname_optional(pdev, "mux"); if (ret < 0) ret = platform_get_irq(pdev, 0); if (ret >= 0) @@ -1270,10 +1270,6 @@ static int rspi_probe(struct platform_device *pdev) if (ret >= 0) rspi->tx_irq = ret; } - if (ret < 0) { - dev_err(&pdev->dev, "platform_get_irq error\n"); - goto error2; - } if (rspi->rx_irq == rspi->tx_irq) { /* Single multiplexed interrupt */ diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c index 35254bdc42c4..f7c1e20432e0 100644 --- a/drivers/spi/spi-sifive.c +++ b/drivers/spi/spi-sifive.c @@ -357,14 +357,14 @@ static int sifive_spi_probe(struct platform_device *pdev) if (!cs_bits) { dev_err(&pdev->dev, "Could not auto probe CS lines\n"); ret = -EINVAL; - goto put_master; + goto disable_clk; } num_cs = ilog2(cs_bits) + 1; if (num_cs > SIFIVE_SPI_MAX_CS) { dev_err(&pdev->dev, "Invalid number of spi slaves\n"); ret = -EINVAL; - goto put_master; + goto disable_clk; } /* Define our master */ @@ -393,7 +393,7 @@ static int sifive_spi_probe(struct platform_device *pdev) dev_name(&pdev->dev), spi); if (ret) { dev_err(&pdev->dev, "Unable to bind to interrupt\n"); - goto put_master; + goto disable_clk; } dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n", @@ -402,11 +402,13 @@ static int sifive_spi_probe(struct platform_device *pdev) ret = devm_spi_register_master(&pdev->dev, master); if (ret < 0) { dev_err(&pdev->dev, "spi_register_master failed\n"); - goto put_master; + goto disable_clk; } return 0; +disable_clk: + clk_disable_unprepare(spi->clk); put_master: spi_master_put(master); @@ -420,6 +422,7 @@ static int sifive_spi_remove(struct platform_device *pdev) /* Disable all the interrupts just in case */ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + clk_disable_unprepare(spi->clk); return 0; } diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index 9a051286f120..9613cfe3c0a2 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -393,6 +393,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this, val |= BIT_WDG_RUN | BIT_WDG_RST; sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val); + /* Lock the watchdog */ + sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY); + mdelay(1000); dev_emerg(sadi->dev, "Unable to restart system\n"); diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index 8c9021b7f7a9..fa597e27be17 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -674,7 +674,7 @@ static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t) u16 word_delay, interval; u32 val; - val = readl_relaxed(ss->base + SPRD_SPI_CTL7); + val = readl_relaxed(ss->base + SPRD_SPI_CTL0); val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX); /* Set default chip selection, clock phase and clock polarity */ val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX; diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 0c24c494f386..77d26d64541a 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -381,6 +381,7 @@ static int spi_st_probe(struct platform_device *pdev) return 0; clk_disable: + pm_runtime_disable(&pdev->dev); clk_disable_unprepare(spi_st->clk); put_master: spi_master_put(master); @@ -392,6 +393,8 @@ static int spi_st_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct spi_st *spi_st = spi_master_get_devdata(master); + pm_runtime_disable(&pdev->dev); + clk_disable_unprepare(spi_st->clk); pinctrl_pm_select_sleep_state(&pdev->dev); diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 9ac6f9fe13cf..4e726929bb4f 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -528,7 +528,6 @@ static void stm32_qspi_release(struct stm32_qspi *qspi) stm32_qspi_dma_free(qspi); mutex_destroy(&qspi->lock); clk_disable_unprepare(qspi->clk); - spi_master_put(qspi->ctrl); } static int stm32_qspi_probe(struct platform_device *pdev) @@ -626,6 +625,8 @@ static int stm32_qspi_probe(struct platform_device *pdev) err: stm32_qspi_release(qspi); + spi_master_put(qspi->ctrl); + return ret; } diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 111fffc91435..374a2a32edcd 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1073,7 +1073,7 @@ static int tegra_slink_probe(struct platform_device *pdev) ret = clk_enable(tspi->clk); if (ret < 0) { dev_err(&pdev->dev, "Clock enable failed %d\n", ret); - goto exit_free_master; + goto exit_clk_unprepare; } spi_irq = platform_get_irq(pdev, 0); @@ -1146,6 +1146,8 @@ exit_free_irq: free_irq(spi_irq, tspi); exit_clk_disable: clk_disable(tspi->clk); +exit_clk_unprepare: + clk_unprepare(tspi->clk); exit_free_master: spi_master_put(master); return ret; @@ -1159,6 +1161,7 @@ static int tegra_slink_remove(struct platform_device *pdev) free_irq(tspi->irq, tspi); clk_disable(tspi->clk); + clk_unprepare(tspi->clk); if (tspi->tx_dma_chan) tegra_slink_deinit_dma_param(tspi, false); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 3cb65371ae3b..66dcb6128539 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -62,6 +62,7 @@ struct ti_qspi { u32 dc; bool mmap_enabled; + int current_cs; }; #define QSPI_PID (0x0) @@ -487,6 +488,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi) MEM_CS_EN(spi->chip_select)); } qspi->mmap_enabled = true; + qspi->current_cs = spi->chip_select; } static void ti_qspi_disable_memory_map(struct spi_device *spi) @@ -498,6 +500,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, MEM_CS_MASK, 0); qspi->mmap_enabled = false; + qspi->current_cs = -1; } static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, @@ -543,7 +546,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); - if (!qspi->mmap_enabled) + if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) ti_qspi_enable_memory_map(mem->spi); ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, op->addr.nbytes, op->dummy.nbytes); @@ -799,6 +802,7 @@ no_dma: } } qspi->mmap_enabled = false; + qspi->current_cs = -1; ret = devm_spi_register_master(&pdev->dev, master); if (!ret) diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 47cde1864630..ce9b30112e26 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv) } } -static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv, + unsigned int threshold) { - unsigned int fifo_threshold, fill_bytes; u32 val; - fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, - bytes_per_word(priv->bits_per_word)); - fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); - - fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes); - - /* set fifo threshold */ val = readl(priv->base + SSI_FC); val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK); - val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold); - val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold); + val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold); + val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold); writel(val, priv->base + SSI_FC); +} - while (fill_bytes--) +static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +{ + unsigned int fifo_threshold, fill_words; + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw); + fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); + + uniphier_spi_set_fifo_threshold(priv, fifo_threshold); + + fill_words = fifo_threshold - + DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw); + + while (fill_words--) uniphier_spi_send(priv); } diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 60c4de4e4485..7412a3042a8d 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -401,9 +401,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high) zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry); - /* Dummy generic FIFO entry */ - zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0); - /* Manually start the generic FIFO command */ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) | diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f9502dbbb5c1..c186d3a944cd 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1711,15 +1711,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi->mode |= SPI_3WIRE; if (of_property_read_bool(nc, "spi-lsb-first")) spi->mode |= SPI_LSB_FIRST; - - /* - * For descriptors associated with the device, polarity inversion is - * handled in the gpiolib, so all chip selects are "active high" in - * the logical sense, the gpiolib will invert the line if need be. - */ - if (ctlr->use_gpio_descriptors) - spi->mode |= SPI_CS_HIGH; - else if (of_property_read_bool(nc, "spi-cs-high")) + if (of_property_read_bool(nc, "spi-cs-high")) spi->mode |= SPI_CS_HIGH; /* Device DUAL/QUAD mode */ @@ -1783,6 +1775,15 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, } spi->chip_select = value; + /* + * For descriptors associated with the device, polarity inversion is + * handled in the gpiolib, so all gpio chip selects are "active high" + * in the logical sense, the gpiolib will invert the line if need be. + */ + if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods && + ctlr->cs_gpiods[spi->chip_select]) + spi->mode |= SPI_CS_HIGH; + /* Device speed */ rc = of_property_read_u32(nc, "spi-max-frequency", &value); if (rc) { @@ -2451,7 +2452,7 @@ int spi_register_controller(struct spi_controller *ctlr) if (ctlr->use_gpio_descriptors) { status = spi_get_gpio_descs(ctlr); if (status) - return status; + goto free_bus_id; /* * A controller using GPIO descriptors always * supports SPI_CS_HIGH if need be. @@ -2461,7 +2462,7 @@ int spi_register_controller(struct spi_controller *ctlr) /* Legacy code path for GPIOs from DT */ status = of_spi_get_gpio_numbers(ctlr); if (status) - return status; + goto free_bus_id; } } @@ -2469,17 +2470,14 @@ int spi_register_controller(struct spi_controller *ctlr) * Even if it's just one always-selected device, there must * be at least one chipselect. */ - if (!ctlr->num_chipselect) - return -EINVAL; + if (!ctlr->num_chipselect) { + status = -EINVAL; + goto free_bus_id; + } status = device_add(&ctlr->dev); - if (status < 0) { - /* free bus id */ - mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); - mutex_unlock(&board_lock); - goto done; - } + if (status < 0) + goto free_bus_id; dev_dbg(dev, "registered %s %s\n", spi_controller_is_slave(ctlr) ? "slave" : "master", dev_name(&ctlr->dev)); @@ -2495,11 +2493,7 @@ int spi_register_controller(struct spi_controller *ctlr) status = spi_controller_initialize_queue(ctlr); if (status) { device_del(&ctlr->dev); - /* free bus id */ - mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); - mutex_unlock(&board_lock); - goto done; + goto free_bus_id; } } /* add statistics */ @@ -2514,7 +2508,12 @@ int spi_register_controller(struct spi_controller *ctlr) /* Register devices from the device tree and ACPI */ of_register_spi_devices(ctlr); acpi_register_spi_devices(ctlr); -done: + return status; + +free_bus_id: + mutex_lock(&board_lock); + idr_remove(&spi_master_idr, ctlr->bus_num); + mutex_unlock(&board_lock); return status; } EXPORT_SYMBOL_GPL(spi_register_controller); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 255786f2e844..ab2c3848f5bf 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -394,6 +394,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) else retval = get_user(tmp, (u32 __user *)arg); if (retval == 0) { + struct spi_controller *ctlr = spi->controller; u32 save = spi->mode; if (tmp & ~SPI_MODE_MASK) { @@ -401,6 +402,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } + if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods && + ctlr->cs_gpiods[spi->chip_select]) + tmp |= SPI_CS_HIGH; + tmp |= spi->mode & ~SPI_MODE_MASK; spi->mode = (u16)tmp; retval = spi_setup(spi); @@ -627,6 +632,9 @@ static int spidev_release(struct inode *inode, struct file *filp) if (dofree) kfree(spidev); } +#ifdef CONFIG_SPI_SLAVE + spi_slave_abort(spidev->spi); +#endif mutex_unlock(&device_list_lock); return 0; diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 97acc2ba2912..de844b412110 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d, return 0; } +static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class; static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb, struct irq_domain *domain, unsigned int virq, @@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb, else handler = handle_level_irq; + + irq_set_lockdep_class(virq, &qpnpint_irq_lock_class, + &qpnpint_irq_request_class); irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb, handler, NULL, NULL); } diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 74d497d39c5a..c6695354b123 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot) _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); } +static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma) +{ + /* do not allow to mmap ashmem backing shmem file directly */ + return -EPERM; +} + +static unsigned long +ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +} + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) { + static struct file_operations vmfile_fops; struct ashmem_area *asma = file->private_data; int ret = 0; @@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } vmfile->f_mode |= FMODE_LSEEK; asma->file = vmfile; + /* + * override mmap operation of the vmfile so that it can't be + * remapped which would lead to creation of a new vma with no + * asma permission checks. Have to override get_unmapped_area + * as well to prevent VM_BUG_ON check for f_ops modification. + */ + if (!vmfile_fops.mmap) { + vmfile_fops = *vmfile->f_op; + vmfile_fops.mmap = ashmem_vmfile_mmap; + vmfile_fops.get_unmapped_area = + ashmem_vmfile_get_unmapped_area; + } + vmfile->f_op = &vmfile_fops; } get_file(asma->file); diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig index 3fffe4d6f327..f180a8e9f58a 100644 --- a/drivers/staging/axis-fifo/Kconfig +++ b/drivers/staging/axis-fifo/Kconfig @@ -4,7 +4,7 @@ # config XIL_AXIS_FIFO tristate "Xilinx AXI-Stream FIFO IP core driver" - depends on OF + depends on OF && HAS_IOMEM help This adds support for the Xilinx AXI-Stream FIFO IP core driver. The AXI Streaming FIFO allows memory mapped access to a AXI Streaming diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index dbff0f7e7cf5..ddc0dc93d08b 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -46,8 +46,8 @@ #define PCI171X_RANGE_UNI BIT(4) #define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0) #define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */ -#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8) -#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0) +#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8) +#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0) #define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x)) #define PCI171X_STATUS_REG 0x06 /* R: status register */ #define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */ diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c index 4bdf44d82879..dc62db1ee1dd 100644 --- a/drivers/staging/comedi/drivers/gsc_hpdi.c +++ b/drivers/staging/comedi/drivers/gsc_hpdi.c @@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE, &devpriv->dio_buffer_phys_addr[i], GFP_KERNEL); + if (!devpriv->dio_buffer[i]) { + dev_warn(dev->class_dev, + "failed to allocate DMA buffer\n"); + return -ENOMEM; + } } /* allocate dma descriptors */ devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev, @@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, NUM_DMA_DESCRIPTORS, &devpriv->dma_desc_phys_addr, GFP_KERNEL); + if (!devpriv->dma_desc) { + dev_warn(dev->class_dev, + "failed to allocate DMA descriptors\n"); + return -ENOMEM; + } if (devpriv->dma_desc_phys_addr & 0xf) { dev_warn(dev->class_dev, " dma descriptors not quad-word aligned (bug)\n"); diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c index eb61494dc2bd..88beb0d6c42b 100644 --- a/drivers/staging/comedi/drivers/ni_routes.c +++ b/drivers/staging/comedi/drivers/ni_routes.c @@ -74,9 +74,6 @@ static int ni_find_device_routes(const char *device_family, } } - if (!rv) - return -ENODATA; - /* Second, find the set of routes valid for this device. */ for (i = 0; ni_device_routes_list[i]; ++i) { if (memcmp(ni_device_routes_list[i]->device, board_name, @@ -86,12 +83,12 @@ static int ni_find_device_routes(const char *device_family, } } - if (!dr) - return -ENODATA; - tables->route_values = rv; tables->valid_routes = dr; + if (!rv || !dr) + return -ENODATA; + return 0; } @@ -489,6 +486,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest, { int src; + if (!tables->route_values) + return -EINVAL; + dest = B(dest); /* subtract NI names offset */ /* ensure we are not going to under/over run the route value table */ if (dest < 0 || dest >= NI_NUM_NAMES) diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index 04bc488385e6..4af012968cb6 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk + * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk */ /* @@ -8,7 +8,7 @@ * Description: University of Stirling USB DAQ & INCITE Technology Limited * Devices: [ITL] USB-DUX-FAST (usbduxfast) * Author: Bernd Porr - * Updated: 10 Oct 2014 + * Updated: 16 Nov 2019 * Status: stable */ @@ -22,6 +22,7 @@ * * * Revision history: + * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest * 0.9: Dropping the first data packet which seems to be from the last transfer. * Buffer overflows in the FX2 are handed over to comedi. * 0.92: Dropping now 4 packets. The quad buffer has to be emptied. @@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, struct comedi_cmd *cmd) { int err = 0; + int err2 = 0; unsigned int steps; unsigned int arg; @@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, */ steps = (cmd->convert_arg * 30) / 1000; if (cmd->chanlist_len != 1) - err |= comedi_check_trigger_arg_min(&steps, - MIN_SAMPLING_PERIOD); - err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); - arg = (steps * 1000) / 30; - err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); + err2 |= comedi_check_trigger_arg_min(&steps, + MIN_SAMPLING_PERIOD); + else + err2 |= comedi_check_trigger_arg_min(&steps, 1); + err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); + if (err2) { + err |= err2; + arg = (steps * 1000) / 30; + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); + } if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h index 3abab33e932c..4973c9edc26e 100644 --- a/drivers/staging/exfat/exfat.h +++ b/drivers/staging/exfat/exfat.h @@ -943,8 +943,8 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir, s32 create_file(struct inode *inode, struct chain_t *p_dir, struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid); void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry); -s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry, - struct uni_name_t *p_uniname, struct file_id_t *fid); +s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry, + struct uni_name_t *p_uniname, struct file_id_t *fid); s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry, struct chain_t *p_newdir, struct uni_name_t *p_uniname, struct file_id_t *fid); diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c index 79174e5c4145..f3774a1912d1 100644 --- a/drivers/staging/exfat/exfat_core.c +++ b/drivers/staging/exfat/exfat_core.c @@ -3381,8 +3381,8 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry) fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries); } -s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry, - struct uni_name_t *p_uniname, struct file_id_t *fid) +s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry, + struct uni_name_t *p_uniname, struct file_id_t *fid) { s32 ret, newentry = -1, num_old_entries, num_new_entries; sector_t sector_old, sector_new; diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c index 3b2b0ceb7297..58c7d66060f7 100644 --- a/drivers/staging/exfat/exfat_super.c +++ b/drivers/staging/exfat/exfat_super.c @@ -1308,8 +1308,8 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid, fs_set_vol_flags(sb, VOL_DIRTY); if (olddir.dir == newdir.dir) - ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name, - fid); + ret = exfat_rename_file(new_parent_inode, &olddir, dentry, + &uni_name, fid); else ret = move_file(new_parent_inode, &olddir, dentry, &newdir, &uni_name, fid); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index a0a67aa517f0..61f0286fb157 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -666,7 +666,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, fbdefio->deferred_io = fbtft_deferred_io; fb_deferred_io_init(info); - strncpy(info->fix.id, dev->driver->name, 16); + snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.xpanstep = 0; diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c index 9b19ea9d3fa1..9a3f7c034ab4 100644 --- a/drivers/staging/greybus/audio_manager.c +++ b/drivers/staging/greybus/audio_manager.c @@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void) list_for_each_entry_safe(module, next, &modules_list, list) { list_del(&module->list); - kobject_put(&module->kobj); ida_simple_remove(&module_id, module->id); + kobject_put(&module->kobj); } is_empty = list_empty(&modules_list); diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c index ba6f905f26fa..69c6dce9be31 100644 --- a/drivers/staging/greybus/tools/loopback_test.c +++ b/drivers/staging/greybus/tools/loopback_test.c @@ -19,6 +19,7 @@ #include #define MAX_NUM_DEVICES 10 +#define MAX_SYSFS_PREFIX 0x80 #define MAX_SYSFS_PATH 0x200 #define CSV_MAX_LINE 0x1000 #define SYSFS_MAX_INT 0x20 @@ -67,7 +68,7 @@ struct loopback_results { }; struct loopback_device { - char name[MAX_SYSFS_PATH]; + char name[MAX_STR_LEN]; char sysfs_entry[MAX_SYSFS_PATH]; char debugfs_entry[MAX_SYSFS_PATH]; struct loopback_results results; @@ -93,8 +94,8 @@ struct loopback_test { int stop_all; int poll_count; char test_name[MAX_STR_LEN]; - char sysfs_prefix[MAX_SYSFS_PATH]; - char debugfs_prefix[MAX_SYSFS_PATH]; + char sysfs_prefix[MAX_SYSFS_PREFIX]; + char debugfs_prefix[MAX_SYSFS_PREFIX]; struct timespec poll_timeout; struct loopback_device devices[MAX_NUM_DEVICES]; struct loopback_results aggregate_results; @@ -637,7 +638,7 @@ baddir: static int open_poll_files(struct loopback_test *t) { struct loopback_device *dev; - char buf[MAX_STR_LEN]; + char buf[MAX_SYSFS_PATH + MAX_STR_LEN]; char dummy; int fds_idx = 0; int i; @@ -655,7 +656,7 @@ static int open_poll_files(struct loopback_test *t) goto err; } read(t->fds[fds_idx].fd, &dummy, 1); - t->fds[fds_idx].events = EPOLLERR|EPOLLPRI; + t->fds[fds_idx].events = POLLERR | POLLPRI; t->fds[fds_idx].revents = 0; fds_idx++; } @@ -748,7 +749,7 @@ static int wait_for_complete(struct loopback_test *t) } for (i = 0; i < t->poll_count; i++) { - if (t->fds[i].revents & EPOLLPRI) { + if (t->fds[i].revents & POLLPRI) { /* Dummy read to clear the event */ read(t->fds[i].fd, &dummy, 1); number_of_events++; @@ -907,10 +908,10 @@ int main(int argc, char *argv[]) t.iteration_max = atoi(optarg); break; case 'S': - snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg); + snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg); break; case 'D': - snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg); + snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg); break; case 'm': t.mask = atol(optarg); @@ -961,10 +962,10 @@ int main(int argc, char *argv[]) } if (!strcmp(t.sysfs_prefix, "")) - snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix); + snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix); if (!strcmp(t.debugfs_prefix, "")) - snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix); + snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix); ret = find_loopback_devices(&t); if (ret) diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c index 038d6732c3fd..23026978a5a5 100644 --- a/drivers/staging/iio/frequency/ad9834.c +++ b/drivers/staging/iio/frequency/ad9834.c @@ -417,6 +417,10 @@ static int ad9834_probe(struct spi_device *spi) st = iio_priv(indio_dev); mutex_init(&st->lock); st->mclk = devm_clk_get(&spi->dev, NULL); + if (IS_ERR(st->mclk)) { + ret = PTR_ERR(st->mclk); + goto error_disable_reg; + } ret = clk_prepare_enable(st->mclk); if (ret) { diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c index 1b9b43659bdf..a20c0bfa68f3 100644 --- a/drivers/staging/isdn/gigaset/usb-gigaset.c +++ b/drivers/staging/isdn/gigaset/usb-gigaset.c @@ -571,8 +571,7 @@ static int gigaset_initcshw(struct cardstate *cs) { struct usb_cardstate *ucs; - cs->hw.usb = ucs = - kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); + cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL); if (!ucs) { pr_err("out of memory\n"); return -ENOMEM; @@ -584,9 +583,6 @@ static int gigaset_initcshw(struct cardstate *cs) ucs->bchars[3] = 0; ucs->bchars[4] = 0x11; ucs->bchars[5] = 0x13; - ucs->bulk_out_buffer = NULL; - ucs->bulk_out_urb = NULL; - ucs->read_urb = NULL; tasklet_init(&cs->write_tasklet, gigaset_modem_fill, (unsigned long) cs); @@ -685,6 +681,11 @@ static int gigaset_probe(struct usb_interface *interface, return -ENODEV; } + if (hostif->desc.bNumEndpoints < 2) { + dev_err(&interface->dev, "missing endpoints\n"); + return -ENODEV; + } + dev_info(&udev->dev, "%s: Device matched ... !\n", __func__); /* allocate memory for our device state and initialize it */ @@ -704,6 +705,12 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[0].desc; + if (!usb_endpoint_is_bulk_out(endpoint)) { + dev_err(&interface->dev, "missing bulk-out endpoint\n"); + retval = -ENODEV; + goto error; + } + buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->bulk_out_size = buffer_size; ucs->bulk_out_epnum = usb_endpoint_num(endpoint); @@ -723,6 +730,12 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[1].desc; + if (!usb_endpoint_is_int_in(endpoint)) { + dev_err(&interface->dev, "missing int-in endpoint\n"); + retval = -ENODEV; + goto error; + } + ucs->busy = 0; ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c index 0a23727d0dc3..871441658f0e 100644 --- a/drivers/staging/kpc2000/kpc2000/core.c +++ b/drivers/staging/kpc2000/kpc2000/core.c @@ -110,10 +110,10 @@ static ssize_t cpld_reconfigure(struct device *dev, const char *buf, size_t count) { struct kp2000_device *pcard = dev_get_drvdata(dev); - long wr_val; + unsigned long wr_val; int rv; - rv = kstrtol(buf, 0, &wr_val); + rv = kstrtoul(buf, 0, &wr_val); if (rv < 0) return rv; if (wr_val > 7) diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c index 6d9d41170832..32e5966ba5c5 100644 --- a/drivers/staging/media/hantro/hantro_drv.c +++ b/drivers/staging/media/hantro/hantro_drv.c @@ -553,13 +553,13 @@ static int hantro_attach_func(struct hantro_dev *vpu, goto err_rel_entity1; /* Connect the three entities */ - ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1, + ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rel_entity2; - ret = media_create_pad_link(&func->proc, 0, &func->sink, 0, + ret = media_create_pad_link(&func->proc, 1, &func->sink, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c index 7ab534936843..5f29b7a836db 100644 --- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c +++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c @@ -34,9 +34,11 @@ static void set_params(struct hantro_ctx *ctx) reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0); if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD) reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E; - reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E; - if (dec_param->nal_ref_idc) - reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E; + if (sps->profile_idc > 66) { + reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E; + if (dec_param->nal_ref_idc) + reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E; + } if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) && (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD || @@ -61,7 +63,7 @@ static void set_params(struct hantro_ctx *ctx) /* always use the matrix sent from userspace */ reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E; - if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC) + if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)) reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E; vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2); @@ -246,7 +248,7 @@ static void set_buffers(struct hantro_ctx *ctx) vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST); /* Higher profiles require DMV buffer appended to reference frames. */ - if (ctrls->sps->profile_idc > 66) { + if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) { size_t pic_size = ctx->h264_dec.pic_size; size_t mv_offset = round_up(pic_size, 8); diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c index 0d758e0c0f99..a9c134204351 100644 --- a/drivers/staging/media/hantro/hantro_h264.c +++ b/drivers/staging/media/hantro/hantro_h264.c @@ -20,7 +20,7 @@ /* Size with u32 units. */ #define CABAC_INIT_BUFFER_SIZE (460 * 2) #define POC_BUFFER_SIZE 34 -#define SCALING_LIST_SIZE (6 * 16 + 6 * 64) +#define SCALING_LIST_SIZE (6 * 16 + 2 * 64) #define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1) @@ -194,23 +194,6 @@ static const u32 h264_cabac_table[] = { 0x1f0c2517, 0x1f261440 }; -/* - * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process - * to get the values in matrix order. In addition, the hardware requires bytes - * swapped within each subsequent 4 bytes. Both arrays below include both - * transformations. - */ -static const u32 zig_zag_4x4[] = { - 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12 -}; - -static const u32 zig_zag_8x8[] = { - 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6, - 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31, - 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48, - 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60 -}; - static void reorder_scaling_list(struct hantro_ctx *ctx) { @@ -218,33 +201,23 @@ reorder_scaling_list(struct hantro_ctx *ctx) const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling; const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4); const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]); - const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8); const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]); struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu; - u8 *dst = tbl->scaling_list; - const u8 *src; + u32 *dst = (u32 *)tbl->scaling_list; + const u32 *src; int i, j; - BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4); - BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8); - BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) != - num_list_4x4 * list_len_4x4 + - num_list_8x8 * list_len_8x8); - - src = &scaling->scaling_list_4x4[0][0]; - for (i = 0; i < num_list_4x4; ++i) { - for (j = 0; j < list_len_4x4; ++j) - dst[zig_zag_4x4[j]] = src[j]; - src += list_len_4x4; - dst += list_len_4x4; + for (i = 0; i < num_list_4x4; i++) { + src = (u32 *)&scaling->scaling_list_4x4[i]; + for (j = 0; j < list_len_4x4 / 4; j++) + *dst++ = swab32(src[j]); } - src = &scaling->scaling_list_8x8[0][0]; - for (i = 0; i < num_list_8x8; ++i) { - for (j = 0; j < list_len_8x8; ++j) - dst[zig_zag_8x8[j]] = src[j]; - src += list_len_8x8; - dst += list_len_8x8; + /* Only Intra/Inter Y lists */ + for (i = 0; i < 2; i++) { + src = (u32 *)&scaling->scaling_list_8x8[i]; + for (j = 0; j < list_len_8x8 / 4; j++) + *dst++ = swab32(src[j]); } } @@ -271,6 +244,7 @@ struct hantro_h264_reflist_builder { const struct v4l2_h264_dpb_entry *dpb; s32 pocs[HANTRO_H264_DPB_SIZE]; u8 unordered_reflist[HANTRO_H264_DPB_SIZE]; + int frame_nums[HANTRO_H264_DPB_SIZE]; s32 curpoc; u8 num_valid; }; @@ -294,13 +268,20 @@ static void init_reflist_builder(struct hantro_ctx *ctx, struct hantro_h264_reflist_builder *b) { + const struct v4l2_ctrl_h264_slice_params *slice_params; const struct v4l2_ctrl_h264_decode_params *dec_param; + const struct v4l2_ctrl_h264_sps *sps; struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx); const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb; struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; + int cur_frame_num, max_frame_num; unsigned int i; dec_param = ctx->h264_dec.ctrls.decode; + slice_params = &ctx->h264_dec.ctrls.slices[0]; + sps = ctx->h264_dec.ctrls.sps; + max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4); + cur_frame_num = slice_params->frame_num; memset(b, 0, sizeof(*b)); b->dpb = dpb; @@ -318,6 +299,18 @@ init_reflist_builder(struct hantro_ctx *ctx, continue; buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx)); + + /* + * Handle frame_num wraparound as described in section + * '8.2.4.1 Decoding process for picture numbers' of the spec. + * TODO: This logic will have to be adjusted when we start + * supporting interlaced content. + */ + if (dpb[i].frame_num > cur_frame_num) + b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num; + else + b->frame_nums[i] = dpb[i].frame_num; + b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt, dpb[i].bottom_field_order_cnt); b->unordered_reflist[b->num_valid] = i; @@ -353,7 +346,7 @@ static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data) * ascending order. */ if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) - return b->frame_num - a->frame_num; + return builder->frame_nums[idxb] - builder->frame_nums[idxa]; return a->pic_num - b->pic_num; } diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c index 3dae52abb96c..fcf95c1d39ca 100644 --- a/drivers/staging/media/hantro/hantro_v4l2.c +++ b/drivers/staging/media/hantro/hantro_v4l2.c @@ -367,19 +367,26 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; struct hantro_ctx *ctx = fh_to_ctx(priv); + struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); const struct hantro_fmt *formats; unsigned int num_fmts; - struct vb2_queue *vq; int ret; - /* Change not allowed if queue is busy. */ - vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); - if (vb2_is_busy(vq)) - return -EBUSY; + ret = vidioc_try_fmt_out_mplane(file, priv, f); + if (ret) + return ret; if (!hantro_is_encoder_ctx(ctx)) { struct vb2_queue *peer_vq; + /* + * In order to support dynamic resolution change, + * the decoder admits a resolution change, as long + * as the pixelformat remains. Can't be done if streaming. + */ + if (vb2_is_streaming(vq) || (vb2_is_busy(vq) && + pix_mp->pixelformat != ctx->src_fmt.pixelformat)) + return -EBUSY; /* * Since format change on the OUTPUT queue will reset * the CAPTURE queue, we can't allow doing so @@ -389,12 +396,15 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f) V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (vb2_is_busy(peer_vq)) return -EBUSY; + } else { + /* + * The encoder doesn't admit a format change if + * there are OUTPUT buffers allocated. + */ + if (vb2_is_busy(vq)) + return -EBUSY; } - ret = vidioc_try_fmt_out_mplane(file, priv, f); - if (ret) - return ret; - formats = hantro_get_formats(ctx, &num_fmts); ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts, pix_mp->pixelformat); diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c index b33a07bc9105..46576e32581f 100644 --- a/drivers/staging/media/imx/imx-media-capture.c +++ b/drivers/staging/media/imx/imx-media-capture.c @@ -26,6 +26,8 @@ #include #include "imx-media.h" +#define IMX_CAPTURE_NAME "imx-capture" + struct capture_priv { struct imx_media_video_dev vdev; @@ -69,8 +71,8 @@ static int vidioc_querycap(struct file *file, void *fh, { struct capture_priv *priv = video_drvdata(file); - strscpy(cap->driver, "imx-media-capture", sizeof(cap->driver)); - strscpy(cap->card, "imx-media-capture", sizeof(cap->card)); + strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver)); + strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", priv->src_sd->name); diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c index 73d8354e618c..e50b1f88e25b 100644 --- a/drivers/staging/media/imx/imx7-mipi-csis.c +++ b/drivers/staging/media/imx/imx7-mipi-csis.c @@ -350,6 +350,8 @@ static void mipi_csis_sw_reset(struct csi_state *state) static int mipi_csis_phy_init(struct csi_state *state) { state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy"); + if (IS_ERR(state->mipi_phy_regulator)) + return PTR_ERR(state->mipi_phy_regulator); return regulator_set_voltage(state->mipi_phy_regulator, 1000000, 1000000); @@ -966,7 +968,10 @@ static int mipi_csis_probe(struct platform_device *pdev) return ret; } - mipi_csis_phy_init(state); + ret = mipi_csis_phy_init(state); + if (ret < 0) + return ret; + mipi_csis_phy_reset(state); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h index c7cd27efac8a..0b1cb9f9cbd1 100644 --- a/drivers/staging/media/ipu3/include/intel-ipu3.h +++ b/drivers/staging/media/ipu3/include/intel-ipu3.h @@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s { __u16 reserved1; __u32 bayer_sign; __u8 bayer_nf; - __u8 reserved2[3]; + __u8 reserved2[7]; } __attribute__((aligned(32))) __packed; /** diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c index 0a1a04fd5d13..8dd1396909d7 100644 --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c @@ -133,6 +133,8 @@ vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb) struct amvdec_buffer *new_buf; new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL); + if (!new_buf) + return; new_buf->vb = vb; mutex_lock(&sess->bufs_recycle_lock); diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index 2d3ea8b74dfd..3439f6ad6338 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -357,6 +357,8 @@ static int cedrus_probe(struct platform_device *pdev) dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model)); + strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME, + sizeof(dev->mdev.bus_info)); media_device_init(&dev->mdev); dev->mdev.ops = &cedrus_m2m_media_ops; diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h index 2f017a651848..3758a1c4e2d0 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.h +++ b/drivers/staging/media/sunxi/cedrus/cedrus.h @@ -179,12 +179,16 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf, static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx, int index, unsigned int plane) { - struct vb2_buffer *buf; + struct vb2_buffer *buf = NULL; + struct vb2_queue *vq; if (index < 0) return 0; - buf = ctx->fh.m2m_ctx->cap_q_ctx.q.bufs[index]; + vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (vq) + buf = vb2_get_buffer(vq, index); + return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0; } diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c index d6a782703c9b..c07526c12629 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c @@ -96,7 +96,7 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx, const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params; const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params; const struct v4l2_ctrl_h264_sps *sps = run->h264.sps; - struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; + struct vb2_queue *cap_q; struct cedrus_buffer *output_buf; struct cedrus_dev *dev = ctx->dev; unsigned long used_dpbs = 0; @@ -104,6 +104,8 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx, unsigned int output = 0; unsigned int i; + cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + memset(pic_list, 0, sizeof(pic_list)); for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) { @@ -167,12 +169,14 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx, enum cedrus_h264_sram_off sram) { const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params; - struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; + struct vb2_queue *cap_q; struct cedrus_dev *dev = ctx->dev; u8 sram_array[CEDRUS_MAX_REF_IDX]; unsigned int i; size_t size; + cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + memset(sram_array, 0, sizeof(sram_array)); for (i = 0; i < num_ref; i++) { @@ -240,8 +244,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx, sizeof(scaling->scaling_list_8x8[0])); cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1, - scaling->scaling_list_8x8[3], - sizeof(scaling->scaling_list_8x8[3])); + scaling->scaling_list_8x8[1], + sizeof(scaling->scaling_list_8x8[1])); cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4, scaling->scaling_list_4x4, diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h index ddd29788d685..f9dd8cbf3458 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h +++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h @@ -10,6 +10,9 @@ #ifndef _CEDRUS_REGS_H_ #define _CEDRUS_REGS_H_ +#define SHIFT_AND_MASK_BITS(v, h, l) \ + (((unsigned long)(v) << (l)) & GENMASK(h, l)) + /* * Common acronyms and contractions used in register descriptions: * * VLD : Variable-Length Decoder @@ -37,8 +40,8 @@ #define VE_PRIMARY_CHROMA_BUF_LEN 0xc4 #define VE_PRIMARY_FB_LINE_STRIDE 0xc8 -#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16)) -#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0)) +#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) SHIFT_AND_MASK_BITS(s, 31, 16) +#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) SHIFT_AND_MASK_BITS(s, 15, 0) #define VE_CHROMA_BUF_LEN 0xe8 @@ -46,7 +49,7 @@ #define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30) #define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30) #define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30) -#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0)) +#define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0) #define VE_PRIMARY_OUT_FMT 0xec @@ -69,15 +72,15 @@ #define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00) -#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28)) +#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) SHIFT_AND_MASK_BITS(t, 30, 28) #define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x)) #define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \ - (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y)) + (((unsigned long)(__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y)) #define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \ - (((p) << 10) & GENMASK(11, 10)) + SHIFT_AND_MASK_BITS(p, 11, 10) #define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \ - (((s) << 8) & GENMASK(9, 8)) + SHIFT_AND_MASK_BITS(s, 9, 8) #define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \ ((v) ? BIT(7) : 0) #define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \ @@ -98,19 +101,19 @@ #define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08) #define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \ - ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8)) + SHIFT_AND_MASK_BITS(DIV_ROUND_UP((w), 16), 15, 8) #define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \ - ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0)) + SHIFT_AND_MASK_BITS(DIV_ROUND_UP((h), 16), 7, 0) #define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c) -#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16)) -#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0)) +#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16) +#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0) #define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10) -#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8)) -#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(7, 0)) +#define VE_DEC_MPEG_MBADDR_X(w) SHIFT_AND_MASK_BITS(w, 15, 8) +#define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0) #define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14) @@ -225,7 +228,7 @@ #define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14) #define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14) #define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \ - (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8))) + (SHIFT_AND_MASK_BITS(i, 13, 8) | SHIFT_AND_MASK_BITS(v, 7, 0)) #define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4) #define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8) diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c index 26a31854c636..c48956f0ef29 100644 --- a/drivers/staging/most/net/net.c +++ b/drivers/staging/most/net/net.c @@ -81,6 +81,11 @@ static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo) unsigned int payload_len = skb->len - ETH_HLEN; unsigned int mdp_len = payload_len + MDP_HDR_LEN; + if (mdp_len < skb->len) { + pr_err("drop: too large packet! (%u)\n", skb->len); + return -EINVAL; + } + if (mbo->buffer_length < mdp_len) { pr_err("drop: too small buffer! (%d for %d)\n", mbo->buffer_length, mdp_len); @@ -128,6 +133,11 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo) u8 *buff = mbo->virt_address; unsigned int mep_len = skb->len + MEP_HDR_LEN; + if (mep_len < skb->len) { + pr_err("drop: too large packet! (%u)\n", skb->len); + return -EINVAL; + } + if (mbo->buffer_length < mep_len) { pr_err("drop: too small buffer! (%d for %d)\n", mbo->buffer_length, mep_len); diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig index af928b75a940..ce58042f2f21 100644 --- a/drivers/staging/mt7621-pci/Kconfig +++ b/drivers/staging/mt7621-pci/Kconfig @@ -2,7 +2,6 @@ config PCI_MT7621 tristate "MediaTek MT7621 PCI Controller" depends on RALINK - depends on PCI select PCI_DRIVERS_GENERIC help This selects a driver for the MediaTek MT7621 PCI Controller. diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c index 6b98827da57f..3633c924848e 100644 --- a/drivers/staging/mt7621-pci/pci-mt7621.c +++ b/drivers/staging/mt7621-pci/pci-mt7621.c @@ -29,15 +29,14 @@ #include #include #include +#include #include #include #include "../../pci/pci.h" /* sysctl */ -#define MT7621_CHIP_REV_ID 0x0c #define MT7621_GPIO_MODE 0x60 -#define CHIP_REV_MT7621_E2 0x0101 /* MediaTek specific configuration registers */ #define PCIE_FTS_NUM 0x70c @@ -126,6 +125,8 @@ struct mt7621_pcie_port { * @ports: pointer to PCIe port information * @perst: gpio reset * @rst: pointer to pcie reset + * @resets_inverted: depends on chip revision + * reset lines are inverted. */ struct mt7621_pcie { void __iomem *base; @@ -140,6 +141,7 @@ struct mt7621_pcie { struct list_head ports; struct gpio_desc *perst; struct reset_control *rst; + bool resets_inverted; }; static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg) @@ -229,9 +231,9 @@ static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port) static inline void mt7621_control_assert(struct mt7621_pcie_port *port) { - u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID); + struct mt7621_pcie *pcie = port->pcie; - if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2) + if (pcie->resets_inverted) reset_control_assert(port->pcie_rst); else reset_control_deassert(port->pcie_rst); @@ -239,9 +241,9 @@ static inline void mt7621_control_assert(struct mt7621_pcie_port *port) static inline void mt7621_control_deassert(struct mt7621_pcie_port *port) { - u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID); + struct mt7621_pcie *pcie = port->pcie; - if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2) + if (pcie->resets_inverted) reset_control_deassert(port->pcie_rst); else reset_control_assert(port->pcie_rst); @@ -641,9 +643,14 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host, return pci_host_probe(host); } +static const struct soc_device_attribute mt7621_pci_quirks_match[] = { + { .soc_id = "mt7621", .revision = "E2" } +}; + static int mt7621_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct soc_device_attribute *attr; struct mt7621_pcie *pcie; struct pci_host_bridge *bridge; int err; @@ -661,6 +668,10 @@ static int mt7621_pci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); INIT_LIST_HEAD(&pcie->ports); + attr = soc_device_match(mt7621_pci_quirks_match); + if (attr) + pcie->resets_inverted = true; + err = mt7621_pcie_parse_dt(pcie); if (err) { dev_err(dev, "Parsing DT failed\n"); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index a8a864b40913..042220d86d33 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -14,7 +14,7 @@ #include #include -#ifdef CONFIG_MIPS +#ifdef CONFIG_CAVIUM_OCTEON_SOC #include diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h index b78ce9eaab85..ae014265064a 100644 --- a/drivers/staging/octeon/octeon-stubs.h +++ b/drivers/staging/octeon/octeon-stubs.h @@ -1,5 +1,8 @@ #define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512 -#define XKPHYS_TO_PHYS(p) (p) + +#ifndef XKPHYS_TO_PHYS +# define XKPHYS_TO_PHYS(p) (p) +#endif #define OCTEON_IRQ_WORKQ0 0 #define OCTEON_IRQ_RML 0 diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c index 952f2ab51347..c37591657bac 100644 --- a/drivers/staging/rtl8188eu/core/rtw_xmit.c +++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c @@ -776,7 +776,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN); memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN); - if (psta->qos_option) + if (psta && psta->qos_option) qos_option = true; } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { @@ -784,7 +784,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN); - if (psta->qos_option) + if (psta && psta->qos_option) qos_option = true; } else { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv))); diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index ec5835d1aa8c..630e7d933b10 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -229,18 +229,21 @@ static char *translate_scan(struct adapter *padapter, /* parsing WPA/WPA2 IE */ { - u8 buf[MAX_WPA_IE_LEN]; + u8 *buf; u8 wpa_ie[255], rsn_ie[255]; u16 wpa_len = 0, rsn_len = 0; u8 *p; + buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC); + if (!buf) + return start; + rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid)); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len)); if (wpa_len > 0) { p = buf; - memset(buf, 0, MAX_WPA_IE_LEN); p += sprintf(p, "wpa_ie="); for (i = 0; i < wpa_len; i++) p += sprintf(p, "%02x", wpa_ie[i]); @@ -257,7 +260,6 @@ static char *translate_scan(struct adapter *padapter, } if (rsn_len > 0) { p = buf; - memset(buf, 0, MAX_WPA_IE_LEN); p += sprintf(p, "rsn_ie="); for (i = 0; i < rsn_len; i++) p += sprintf(p, "%02x", rsn_ie[i]); @@ -271,6 +273,7 @@ static char *translate_scan(struct adapter *padapter, iwe.u.data.length = rsn_len; start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie); } + kfree(buf); } {/* parsing WPS IE */ @@ -2022,7 +2025,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p) struct ieee_param *param; uint ret = 0; - if (p->length < sizeof(struct ieee_param) || !p->pointer) { + if (!p->pointer || p->length != sizeof(struct ieee_param)) { ret = -EINVAL; goto out; } @@ -2809,7 +2812,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p) goto out; } - if (!p->pointer) { + if (!p->pointer || p->length != sizeof(struct ieee_param)) { ret = -EINVAL; goto out; } diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 4fac9dca798e..f7f09c0d273f 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -32,11 +32,14 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { /****** 8188EUS ********/ {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ + {USB_DEVICE(0x0B05, 0x18F0)}, /* ASUS USB-N10 Nano B1 */ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ + {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ @@ -70,7 +73,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf) phost_conf = pusbd->actconfig; pconf_desc = &phost_conf->desc; - phost_iface = &usb_intf->altsetting[0]; + phost_iface = usb_intf->cur_altsetting; piface_desc = &phost_iface->desc; pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index f932cb15e4e5..c702ee9691b1 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -1616,14 +1616,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); skb_push(skb, priv->rtllib->tx_headroom); ret = _rtl92e_tx(dev, skb); - if (ret != 0) - kfree_skb(skb); if (queue_index != MGNT_QUEUE) { priv->rtllib->stats.tx_bytes += (skb->len - priv->rtllib->tx_headroom); priv->rtllib->stats.tx_packets++; } + + if (ret != 0) + kfree_skb(skb); } static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 2821411878ce..511136dce3a4 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -1422,7 +1422,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) (struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN); struct usb_device *udev = priv->udev; int pend; - int status; + int status, rt = -1; struct urb *tx_urb = NULL, *tx_urb_zero = NULL; unsigned int idx_pipe; @@ -1566,8 +1566,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) } if (bSend0Byte) { tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC); - if (!tx_urb_zero) - return -ENOMEM; + if (!tx_urb_zero) { + rt = -ENOMEM; + goto error; + } usb_fill_bulk_urb(tx_urb_zero, udev, usb_sndbulkpipe(udev, idx_pipe), &zero, 0, tx_zero_isr, dev); @@ -1577,7 +1579,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) "Error TX URB for zero byte %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]), status); - return -1; + goto error; } } netif_trans_update(dev); @@ -1588,7 +1590,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) RT_TRACE(COMP_ERR, "Error TX URB %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]), status); - return -1; + +error: + dev_kfree_skb_any(skb); + usb_free_urb(tx_urb); + usb_free_urb(tx_urb_zero); + return rt; } static short rtl8192_usb_initendpoints(struct net_device *dev) diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index ba1288297ee4..a87562f632a7 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -247,7 +247,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter) pdvobjpriv->padapter = padapter; padapter->eeprom_address_size = 6; - phost_iface = &pintf->altsetting[0]; + phost_iface = pintf->cur_altsetting; piface_desc = &phost_iface->desc; pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints; if (pusbd->speed == USB_SPEED_HIGH) { diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index b44e902ed338..b6d56cfb0a19 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context) s32 ret; struct adapter *padapter; struct xmit_priv *pxmitpriv; - u8 thread_name[20] = "RTWHALXT"; - + u8 thread_name[20]; ret = _SUCCESS; padapter = context; pxmitpriv = &padapter->xmitpriv; - rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter)); + rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter)); thread_enter(thread_name); DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter)); diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index d1b199e3e5bd..d8d44fd9a92f 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -3379,7 +3379,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p) /* down(&ieee->wx_sem); */ - if (p->length < sizeof(struct ieee_param) || !p->pointer) { + if (!p->pointer || p->length != sizeof(struct ieee_param)) { ret = -EINVAL; goto out; } @@ -4213,7 +4213,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p) /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */ - if (!p->pointer) { + if (!p->pointer || p->length != sizeof(*param)) { ret = -EINVAL; goto out; } diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c index d3784c44f6d0..3784a27641a6 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c @@ -18,18 +18,13 @@ static const struct sdio_device_id sdio_ids[] = { { SDIO_DEVICE(0x024c, 0x0523), }, + { SDIO_DEVICE(0x024c, 0x0525), }, { SDIO_DEVICE(0x024c, 0x0623), }, { SDIO_DEVICE(0x024c, 0x0626), }, { SDIO_DEVICE(0x024c, 0xb723), }, { /* end: all zeroes */ }, }; -static const struct acpi_device_id acpi_ids[] = { - {"OBDA8723", 0x0000}, - {} -}; - MODULE_DEVICE_TABLE(sdio, sdio_ids); -MODULE_DEVICE_TABLE(acpi, acpi_ids); static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id); static void rtw_dev_remove(struct sdio_func *func); diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 488f2539aa9a..81ecfd1a200d 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -561,7 +561,7 @@ static u_long get_word(struct vc_data *vc) return 0; } else if (tmpx < vc->vc_cols - 2 && (ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) && - get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE) { + get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE) { tmp_pos += 2; tmpx++; } else { diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c index a8b4d0c5ab7e..032f3264fba1 100644 --- a/drivers/staging/speakup/selection.c +++ b/drivers/staging/speakup/selection.c @@ -51,9 +51,7 @@ static void __speakup_set_selection(struct work_struct *work) goto unref; } - console_lock(); set_selection_kernel(&sel, tty); - console_unlock(); unref: tty_kref_put(tty); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index b1595b13dea8..af6bf0736b52 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -3299,7 +3299,7 @@ static int __init vchiq_driver_init(void) return 0; region_unregister: - platform_driver_unregister(&vchiq_driver); + unregister_chrdev_region(vchiq_devid, 1); class_destroy: class_destroy(vchiq_class); diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c index 8d19ae71e7cc..4e651b698617 100644 --- a/drivers/staging/vt6656/baseband.c +++ b/drivers/staging/vt6656/baseband.c @@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv) memcpy(array, addr, length); - ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0, - MESSAGE_REQUEST_BBREG, length, array); + ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE, + MESSAGE_REQUEST_BBREG, length, array); if (ret) goto end; diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c index 56cd77fd9ea0..7958fc165462 100644 --- a/drivers/staging/vt6656/card.c +++ b/drivers/staging/vt6656/card.c @@ -719,7 +719,7 @@ end: */ int vnt_radio_power_on(struct vnt_private *priv) { - int ret = true; + int ret = 0; vnt_exit_deep_sleep(priv); diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h index 6074ceda78bf..e2fabe818b19 100644 --- a/drivers/staging/vt6656/device.h +++ b/drivers/staging/vt6656/device.h @@ -52,6 +52,8 @@ #define RATE_AUTO 12 #define MAX_RATE 12 +#define VNT_B_RATES (BIT(RATE_1M) | BIT(RATE_2M) |\ + BIT(RATE_5M) | BIT(RATE_11M)) /* * device specific @@ -259,6 +261,7 @@ struct vnt_private { u8 mac_hw; /* netdev */ struct usb_device *usb; + struct usb_interface *intf; u64 tsf_time; u8 rx_rate; diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c index 3b94e80f1d5e..879ceef517fb 100644 --- a/drivers/staging/vt6656/dpc.c +++ b/drivers/staging/vt6656/dpc.c @@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb, vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm); - priv->bb_pre_ed_rssi = (u8)rx_dbm + 1; + priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1; priv->current_rssi = priv->bb_pre_ed_rssi; skb_pull(skb, 8); diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c index f40947955675..af215860be4c 100644 --- a/drivers/staging/vt6656/int.c +++ b/drivers/staging/vt6656/int.c @@ -99,9 +99,11 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr) info->status.rates[0].count = tx_retry; - if (!(tsr & (TSR_TMO | TSR_RETRYTMO))) { + if (!(tsr & TSR_TMO)) { info->status.rates[0].idx = idx; - info->flags |= IEEE80211_TX_STAT_ACK; + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + info->flags |= IEEE80211_TX_STAT_ACK; } ieee80211_tx_status_irqsafe(priv->hw, context->skb); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 856ba97aec4f..69a48383611f 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -950,7 +950,7 @@ static const struct ieee80211_ops vnt_mac_ops = { int vnt_init(struct vnt_private *priv) { - if (!(vnt_init_registers(priv))) + if (vnt_init_registers(priv)) return -EAGAIN; SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr); @@ -993,6 +993,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id) priv = hw->priv; priv->hw = hw; priv->usb = udev; + priv->intf = intf; vnt_set_options(priv); @@ -1015,6 +1016,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id) ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS); ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(priv->hw, SUPPORTS_PS); + ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK); priv->hw->max_signal = 100; diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index 4e9cfacf75f2..ab6141f361af 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -278,11 +278,9 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context, PK_TYPE_11B, &buf->b); /* Get Duration and TimeStamp */ - if (ieee80211_is_pspoll(hdr->frame_control)) { - __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15)); - - buf->duration_a = dur; - buf->duration_b = dur; + if (ieee80211_is_nullfunc(hdr->frame_control)) { + buf->duration_a = hdr->duration_id; + buf->duration_b = hdr->duration_id; } else { buf->duration_a = vnt_get_duration_le(priv, tx_context->pkt_type, need_ack); @@ -371,10 +369,8 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context, tx_context->pkt_type, &buf->ab); /* Get Duration and TimeStampOff */ - if (ieee80211_is_pspoll(hdr->frame_control)) { - __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15)); - - buf->duration = dur; + if (ieee80211_is_nullfunc(hdr->frame_control)) { + buf->duration = hdr->duration_id; } else { buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type, need_ack); @@ -815,10 +811,14 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) if (info->band == NL80211_BAND_5GHZ) { pkt_type = PK_TYPE_11A; } else { - if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) - pkt_type = PK_TYPE_11GB; - else - pkt_type = PK_TYPE_11GA; + if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + if (priv->basic_rates & VNT_B_RATES) + pkt_type = PK_TYPE_11GB; + else + pkt_type = PK_TYPE_11GA; + } else { + pkt_type = PK_TYPE_11A; + } } } else { pkt_type = PK_TYPE_11B; diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index d3304df6bd53..d977d4777e4f 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: @@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data) reg_off, reg, sizeof(u8), &data); } +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 length, u8 *data) +{ + int ret = 0, i; + + for (i = 0; i < length; i += block) { + u16 len = min_t(int, length - i, block); + + ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, + i, reg, len, data + i); + if (ret) + goto end; + } +end: + return ret; +} + int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { @@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h index 95147ec7b96a..b65d9c01a211 100644 --- a/drivers/staging/vt6656/usbpipe.h +++ b/drivers/staging/vt6656/usbpipe.h @@ -18,6 +18,8 @@ #include "device.h" +#define VNT_REG_BLOCK_SIZE 64 + int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer); int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, @@ -26,6 +28,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data); int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data); +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 len, u8 *data); + int vnt_start_interrupt_urb(struct vnt_private *priv); int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb); int vnt_tx_context(struct vnt_private *priv, diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c index 3eb2f11a5de1..2c5250ca2801 100644 --- a/drivers/staging/vt6656/wcmd.c +++ b/drivers/staging/vt6656/wcmd.c @@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work) if (vnt_init(priv)) { /* If fail all ends TODO retry */ dev_err(&priv->usb->dev, "failed to start\n"); + usb_set_intfdata(priv->intf, NULL); ieee80211_free_hw(priv->hw); return; } diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c index f2b7d5a1be17..77d0732f451b 100644 --- a/drivers/staging/wilc1000/wilc_hif.c +++ b/drivers/staging/wilc1000/wilc_hif.c @@ -473,20 +473,27 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len); if (rates_ie) { rates_len = rates_ie[1]; + if (rates_len > WILC_MAX_RATES_SUPPORTED) + rates_len = WILC_MAX_RATES_SUPPORTED; param->supp_rates[0] = rates_len; memcpy(¶m->supp_rates[1], rates_ie + 2, rates_len); } - supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data, - ies->len); - if (supp_rates_ie) { - if (supp_rates_ie[1] > (WILC_MAX_RATES_SUPPORTED - rates_len)) - param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED; - else - param->supp_rates[0] += supp_rates_ie[1]; + if (rates_len < WILC_MAX_RATES_SUPPORTED) { + supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, + ies->data, ies->len); + if (supp_rates_ie) { + u8 ext_rates = supp_rates_ie[1]; - memcpy(¶m->supp_rates[rates_len + 1], supp_rates_ie + 2, - (param->supp_rates[0] - rates_len)); + if (ext_rates > (WILC_MAX_RATES_SUPPORTED - rates_len)) + param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED; + else + param->supp_rates[0] += ext_rates; + + memcpy(¶m->supp_rates[rates_len + 1], + supp_rates_ie + 2, + (param->supp_rates[0] - rates_len)); + } } ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len); diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index 22f21831649b..c3cd6f389a98 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -1419,8 +1419,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE) wilc_wfi_deinit_mon_interface(wl, true); vif->iftype = WILC_STATION_MODE; - wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), - WILC_STATION_MODE, vif->idx); + + if (wl->initialized) + wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), + WILC_STATION_MODE, vif->idx); memset(priv->assoc_stainfo.sta_associated_bss, 0, WILC_MAX_NUM_STA * ETH_ALEN); @@ -1432,8 +1434,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, priv->wdev.iftype = type; vif->monitor_flag = 0; vif->iftype = WILC_CLIENT_MODE; - wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), - WILC_STATION_MODE, vif->idx); + + if (wl->initialized) + wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), + WILC_STATION_MODE, vif->idx); break; case NL80211_IFTYPE_AP: @@ -1450,8 +1454,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, dev->ieee80211_ptr->iftype = type; priv->wdev.iftype = type; vif->iftype = WILC_GO_MODE; - wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), - WILC_AP_MODE, vif->idx); + + if (wl->initialized) + wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), + WILC_AP_MODE, vif->idx); break; default: diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig index ac136663fa8e..082c16a31616 100644 --- a/drivers/staging/wlan-ng/Kconfig +++ b/drivers/staging/wlan-ng/Kconfig @@ -4,6 +4,7 @@ config PRISM2_USB depends on WLAN && USB && CFG80211 select WIRELESS_EXT select WEXT_PRIV + select CRC32 help This is the wlan-ng prism 2.5/3 USB driver for a wide range of old USB wireless devices. diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 28d372a0663a..e29c14e0ed49 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3374,6 +3374,8 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev, WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) { pr_debug("overlen frm: len=%zd\n", skblen - sizeof(struct p80211_caphdr)); + + return; } skb = dev_alloc_skb(skblen); diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 7350fe5d96a3..a8860d2aee68 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -959,7 +959,7 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp) } } - return 0; + return result; } /*---------------------------------------------------------------- diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index b5ba176004c1..d8d86761b790 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -180,6 +180,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface) cancel_work_sync(&hw->link_bh); cancel_work_sync(&hw->commsqual_bh); + cancel_work_sync(&hw->usb_work); /* Now we complete any outstanding commands * and tell everyone who is waiting for their diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 51ddca2033e0..8fe9b12a07a4 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -70,7 +70,7 @@ static int chap_check_algorithm(const char *a_str) if (!token) goto out; - if (!strncmp(token, "5", 1)) { + if (!strcmp(token, "5")) { pr_debug("Selected MD5 Algorithm\n"); kfree(orig); return CHAP_DIGEST_MD5; diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 3c79411c4cd0..6b4b354c88aa 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -118,7 +118,7 @@ static int srp_get_pr_transport_id( memset(buf + 8, 0, leading_zero_bytes); rc = hex2bin(buf + 8 + leading_zero_bytes, p, count); if (rc < 0) { - pr_debug("hex2bin failed for %s: %d\n", __func__, rc); + pr_debug("hex2bin failed for %s: %d\n", p, rc); return rc; } diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 6949ea8bc387..51ffd5c002de 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, } bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); - bip_set_seed(bip, bio->bi_iter.bi_sector); + /* virtual start sector must be in integrity interval units */ + bip_set_seed(bip, bio->bi_iter.bi_sector >> + (bi->interval_exp - SECTOR_SHIFT)); pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, (unsigned long long)bip->bip_iter.bi_sector); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7f06a62f8661..d542e26ca56a 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -584,6 +584,15 @@ void transport_free_session(struct se_session *se_sess) } EXPORT_SYMBOL(transport_free_session); +static int target_release_res(struct se_device *dev, void *data) +{ + struct se_session *sess = data; + + if (dev->reservation_holder == sess) + target_release_reservation(dev); + return 0; +} + void transport_deregister_session(struct se_session *se_sess) { struct se_portal_group *se_tpg = se_sess->se_tpg; @@ -600,6 +609,12 @@ void transport_deregister_session(struct se_session *se_sess) se_sess->fabric_sess_ptr = NULL; spin_unlock_irqrestore(&se_tpg->session_lock, flags); + /* + * Since the session is being removed, release SPC-2 + * reservations held by the session that is disappearing. + */ + target_for_each_device(target_release_res, se_sess); + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->fabric_name); /* @@ -651,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) target_remove_from_state_list(cmd); + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if frontend context caller is requesting the stopping of @@ -678,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) return cmd->se_tfo->check_stop_free(cmd); } +static void transport_lun_remove_cmd(struct se_cmd *cmd) +{ + struct se_lun *lun = cmd->se_lun; + + if (!lun) + return; + + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); +} + static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -768,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd) WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); } @@ -1680,6 +1713,7 @@ static void target_complete_tmr_failure(struct work_struct *work) se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; se_cmd->se_tfo->queue_tm_rsp(se_cmd); + transport_lun_remove_cmd(se_cmd); transport_cmd_check_stop_to_fabric(se_cmd); } @@ -1870,6 +1904,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, goto queue_full; check_stop: + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -2167,6 +2202,7 @@ queue_status: transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } @@ -2261,6 +2297,7 @@ static void target_complete_ok_work(struct work_struct *work) if (ret) goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } @@ -2286,6 +2323,7 @@ static void target_complete_ok_work(struct work_struct *work) if (ret) goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } @@ -2321,6 +2359,7 @@ queue_rsp: if (ret) goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } @@ -2356,6 +2395,7 @@ queue_status: break; } + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -2682,6 +2722,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) */ if (cmd->state_active) target_remove_from_state_list(cmd); + + if (cmd->se_lun) + transport_lun_remove_cmd(cmd); } if (aborted) cmd->free_compl = &compl; @@ -2753,9 +2796,6 @@ static void target_release_cmd_kref(struct kref *kref) struct completion *abrt_compl = se_cmd->abrt_compl; unsigned long flags; - if (se_cmd->lun_ref_active) - percpu_ref_put(&se_cmd->se_lun->lun_ref); - if (se_sess) { spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_del_init(&se_cmd->se_cmd_list); diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index d1ad512e1708..3ca71e3812ed 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -3,6 +3,7 @@ config OPTEE tristate "OP-TEE" depends on HAVE_ARM_SMCCC + depends on MMU help This implements the OP-TEE Trusted Execution Environment (TEE) driver. diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index 13b0269a0abc..cf2367ba08d6 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -554,6 +554,13 @@ static int check_mem_type(unsigned long start, size_t num_pages) struct mm_struct *mm = current->mm; int rc; + /* + * Allow kernel address to register with OP-TEE as kernel + * pages are configured as normal memory only. + */ + if (virt_addr_valid(start)) + return 0; + down_read(&mm->mmap_sem); rc = __check_mem_type(find_vma(mm, start), start + num_pages * PAGE_SIZE); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 1854a3db7345..b830e0a87fba 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -643,11 +643,6 @@ static struct optee *optee_probe(struct device_node *np) if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) pr_info("dynamic shared memory is enabled\n"); - rc = optee_enumerate_devices(); - if (rc) - goto err; - - pr_info("initialized driver\n"); return optee; err: if (optee) { @@ -702,9 +697,10 @@ static struct optee *optee_svc; static int __init optee_driver_init(void) { - struct device_node *fw_np; - struct device_node *np; - struct optee *optee; + struct device_node *fw_np = NULL; + struct device_node *np = NULL; + struct optee *optee = NULL; + int rc = 0; /* Node is supposed to be below /firmware */ fw_np = of_find_node_by_name(NULL, "firmware"); @@ -723,6 +719,14 @@ static int __init optee_driver_init(void) if (IS_ERR(optee)) return PTR_ERR(optee); + rc = optee_enumerate_devices(); + if (rc) { + optee_remove(optee); + return rc; + } + + pr_info("initialized driver\n"); + optee_svc = optee; return 0; diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c index de1d9b8fad90..d767eebf30bd 100644 --- a/drivers/tee/optee/shm_pool.c +++ b/drivers/tee/optee/shm_pool.c @@ -17,6 +17,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, { unsigned int order = get_order(size); struct page *page; + int rc = 0; page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!page) @@ -26,12 +27,34 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, shm->paddr = page_to_phys(page); shm->size = PAGE_SIZE << order; - return 0; + if (shm->flags & TEE_SHM_DMA_BUF) { + unsigned int nr_pages = 1 << order, i; + struct page **pages; + + pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0; i < nr_pages; i++) { + pages[i] = page; + page++; + } + + shm->flags |= TEE_SHM_REGISTER; + rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, + (unsigned long)shm->kaddr); + kfree(pages); + } + + return rc; } static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm) { + if (shm->flags & TEE_SHM_DMA_BUF) + optee_shm_unregister(shm->ctx, shm); + free_pages((unsigned long)shm->kaddr, get_order(shm->size)); shm->kaddr = NULL; } diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c index 5825ac581f56..680f1a070606 100644 --- a/drivers/thermal/broadcom/brcmstb_thermal.c +++ b/drivers/thermal/broadcom/brcmstb_thermal.c @@ -49,7 +49,7 @@ #define AVS_TMON_TP_TEST_ENABLE 0x20 /* Default coefficients */ -#define AVS_TMON_TEMP_SLOPE -487 +#define AVS_TMON_TEMP_SLOPE 487 #define AVS_TMON_TEMP_OFFSET 410040 /* HW related temperature constants */ @@ -108,23 +108,12 @@ struct brcmstb_thermal_priv { struct thermal_zone_device *thermal; }; -static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope, - int *offset) -{ - *slope = thermal_zone_get_slope(tz); - *offset = thermal_zone_get_offset(tz); -} - /* Convert a HW code to a temperature reading (millidegree celsius) */ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz, u32 code) { - const int val = code & AVS_TMON_TEMP_MASK; - int slope, offset; - - avs_tmon_get_coeffs(tz, &slope, &offset); - - return slope * val + offset; + return (AVS_TMON_TEMP_OFFSET - + (int)((code & AVS_TMON_TEMP_MAX) * AVS_TMON_TEMP_SLOPE)); } /* @@ -136,20 +125,18 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz, static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz, int temp, bool low) { - int slope, offset; - if (temp < AVS_TMON_TEMP_MIN) - return AVS_TMON_TEMP_MAX; /* Maximum code value */ + return AVS_TMON_TEMP_MAX; /* Maximum code value */ - avs_tmon_get_coeffs(tz, &slope, &offset); - - if (temp >= offset) + if (temp >= AVS_TMON_TEMP_OFFSET) return 0; /* Minimum code value */ if (low) - return (u32)(DIV_ROUND_UP(offset - temp, abs(slope))); + return (u32)(DIV_ROUND_UP(AVS_TMON_TEMP_OFFSET - temp, + AVS_TMON_TEMP_SLOPE)); else - return (u32)((offset - temp) / abs(slope)); + return (u32)((AVS_TMON_TEMP_OFFSET - temp) / + AVS_TMON_TEMP_SLOPE); } static int brcmstb_get_temp(void *data, int *temp) diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index 372dbbaaafb8..21d4d6e6409a 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c @@ -152,8 +152,8 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data) db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING, next_low, next_high); - dev_info(&th->tz->device, - "PRCMU set max %ld, min %ld\n", next_high, next_low); + dev_dbg(&th->tz->device, + "PRCMU set max %ld, min %ld\n", next_high, next_low); } else if (idx == num_points - 1) /* So we roof out 1 degree over the max point */ th->interpolated_temp = db8500_thermal_points[idx] + 1; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index d4481cc8958f..c28271817e43 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -304,7 +304,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, &tz->poll_queue, msecs_to_jiffies(delay)); else - cancel_delayed_work_sync(&tz->poll_queue); + cancel_delayed_work(&tz->poll_queue); } static void monitor_thermal_zone(struct thermal_zone_device *tz) @@ -1414,7 +1414,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) mutex_unlock(&thermal_list_lock); - thermal_zone_device_set_polling(tz, 0); + cancel_delayed_work_sync(&tz->poll_queue); thermal_set_governor(tz, NULL); diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 5ea8db667e83..e53932d27ac5 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -168,7 +168,7 @@ static int nvm_validate_and_write(struct tb_switch *sw) static int nvm_authenticate_host(struct tb_switch *sw) { - int ret; + int ret = 0; /* * Root switch NVM upgrade requires that we disconnect the @@ -176,6 +176,8 @@ static int nvm_authenticate_host(struct tb_switch *sw) * already). */ if (!sw->safe_mode) { + u32 status; + ret = tb_domain_disconnect_all_paths(sw->tb); if (ret) return ret; @@ -184,7 +186,16 @@ static int nvm_authenticate_host(struct tb_switch *sw) * everything goes well so getting timeout is expected. */ ret = dma_port_flash_update_auth(sw->dma_port); - return ret == -ETIMEDOUT ? 0 : ret; + if (!ret || ret == -ETIMEDOUT) + return 0; + + /* + * Any error from update auth operation requires power + * cycling of the host router. + */ + tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); + if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) + nvm_set_auth_status(sw, status); } /* @@ -192,7 +203,7 @@ static int nvm_authenticate_host(struct tb_switch *sw) * switch. */ dma_port_power_cycle(sw->dma_port); - return 0; + return ret; } static int nvm_authenticate_device(struct tb_switch *sw) @@ -200,8 +211,16 @@ static int nvm_authenticate_device(struct tb_switch *sw) int ret, retries = 10; ret = dma_port_flash_update_auth(sw->dma_port); - if (ret && ret != -ETIMEDOUT) + switch (ret) { + case 0: + case -ETIMEDOUT: + case -EACCES: + case -EINVAL: + /* Power cycle is required */ + break; + default: return ret; + } /* * Poll here for the authentication status. It takes some time @@ -255,6 +274,12 @@ out: return ret; } +static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + return -EPERM; +} + static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) { @@ -300,6 +325,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, config.read_only = true; } else { config.name = "nvm_non_active"; + config.reg_read = tb_switch_nvm_no_read; config.reg_write = tb_switch_nvm_write; config.root_only = true; } @@ -1246,8 +1272,6 @@ static ssize_t nvm_authenticate_store(struct device *dev, */ nvm_authenticate_start(sw); ret = nvm_authenticate_host(sw); - if (ret) - nvm_authenticate_complete(sw); } else { ret = nvm_authenticate_device(sw); } @@ -1690,13 +1714,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) int ret; switch (sw->generation) { - case 3: - break; - case 2: /* Only root switch can be upgraded */ if (tb_route(sw)) return 0; + + /* fallthrough */ + case 3: + ret = tb_switch_set_uuid(sw); + if (ret) + return ret; break; default: @@ -1720,6 +1747,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (sw->no_nvm_upgrade) return 0; + /* + * If there is status already set then authentication failed + * when the dma_port_flash_update_auth() returned. Power cycling + * is not needed (it was done already) so only thing we do here + * is to unblock runtime PM of the root port. + */ + nvm_get_auth_status(sw, &status); + if (status) { + if (!tb_route(sw)) + nvm_authenticate_complete(sw); + return 0; + } + /* * Check status of the previous flash authentication. If there * is one we need to power cycle the switch in any case to make @@ -1735,9 +1775,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (status) { tb_sw_info(sw, "switch flash authentication failed\n"); - ret = tb_switch_set_uuid(sw); - if (ret) - return ret; nvm_set_auth_status(sw, status); } diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index a0ac16ee6575..a9719858c950 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -582,6 +582,12 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl, return AE_OK; } +static const struct acpi_device_id serdev_acpi_devices_blacklist[] = { + { "INT3511", 0 }, + { "INT3512", 0 }, + { }, +}; + static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -591,6 +597,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, if (acpi_bus_get_device(handle, &adev)) return AE_OK; + /* Skip if black listed */ + if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist)) + return AE_OK; + return acpi_serdev_register_device(ctrl, adev); } diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index d1cdd2ab8b4c..d367803e2044 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, struct tty_driver *drv, int idx) { - const struct tty_port_client_operations *old_ops; struct serdev_controller *ctrl; struct serport *serport; int ret; @@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port, ctrl->ops = &ctrl_ops; - old_ops = port->client_ops; port->client_ops = &client_ops; port->client_data = ctrl; @@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port, err_reset_data: port->client_data = NULL; - port->client_ops = old_ops; + port->client_ops = &tty_port_default_client_ops; serdev_controller_put(ctrl); return ERR_PTR(ret); @@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port) return -ENODEV; serdev_controller_remove(ctrl); - port->client_ops = NULL; port->client_data = NULL; + port->client_ops = &tty_port_default_client_ops; serdev_controller_put(ctrl); return 0; diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c index 0438d9a905ce..6ba2efde7252 100644 --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -379,7 +379,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev) port.port.line = rc; port.port.irq = irq_of_parse_and_map(np, 0); - port.port.irqflags = IRQF_SHARED; port.port.handle_irq = aspeed_vuart_handle_irq; port.port.iotype = UPIO_MEM; port.port.type = PORT_16550A; diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c index 8ce700c1a7fc..4997c519ebb3 100644 --- a/drivers/tty/serial/8250/8250_bcm2835aux.c +++ b/drivers/tty/serial/8250/8250_bcm2835aux.c @@ -113,7 +113,7 @@ static int bcm2835aux_serial_remove(struct platform_device *pdev) { struct bcm2835aux_data *data = platform_get_drvdata(pdev); - serial8250_unregister_port(data->uart.port.line); + serial8250_unregister_port(data->line); clk_disable_unprepare(data->clk); return 0; diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index e682390ce0de..28bdbd7b4ab2 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up) struct hlist_head *h; struct hlist_node *n; struct irq_info *i; - int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0; + int ret; mutex_lock(&hash_mutex); @@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up) INIT_LIST_HEAD(&up->list); i->head = &up->list; spin_unlock_irq(&i->lock); - irq_flags |= up->port.irqflags; ret = request_irq(up->port.irq, serial8250_interrupt, - irq_flags, up->port.name, i); + up->port.irqflags, up->port.name, i); if (ret < 0) serial_do_unlink(i, up); } diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 1c72fdc2dd37..51a7d3b19b39 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -386,10 +386,10 @@ static int dw8250_probe(struct platform_device *pdev) { struct uart_8250_port uart = {}, *up = &uart; struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - int irq = platform_get_irq(pdev, 0); struct uart_port *p = &up->port; struct device *dev = &pdev->dev; struct dw8250_data *data; + int irq; int err; u32 val; @@ -398,11 +398,9 @@ static int dw8250_probe(struct platform_device *pdev) return -EINVAL; } - if (irq < 0) { - if (irq != -EPROBE_DEFER) - dev_err(dev, "cannot get irq\n"); + irq = platform_get_irq(pdev, 0); + if (irq < 0) return irq; - } spin_lock_init(&p->lock); p->mapbase = regs->start; diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 597eb9d16f21..e1268646ee56 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -25,6 +25,14 @@ #include "8250.h" +#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052 +#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d +#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c +#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8 +#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2 +#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db +#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea + #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a @@ -658,6 +666,22 @@ static int __maybe_unused exar_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume); +static const struct exar8250_board acces_com_2x = { + .num_ports = 2, + .setup = pci_xr17c154_setup, +}; + +static const struct exar8250_board acces_com_4x = { + .num_ports = 4, + .setup = pci_xr17c154_setup, +}; + +static const struct exar8250_board acces_com_8x = { + .num_ports = 8, + .setup = pci_xr17c154_setup, +}; + + static const struct exar8250_board pbn_fastcom335_2 = { .num_ports = 2, .setup = pci_fastcom335_setup, @@ -726,6 +750,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = { } static const struct pci_device_id exar_pci_tbl[] = { + EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x), + EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x), + + CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect), CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect), CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect), diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index b411ba4eb5e9..4d067f515f74 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -544,7 +544,7 @@ static int mtk8250_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - data->rx_wakeup_irq = platform_get_irq(pdev, 1); + data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1); return 0; } diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 0826cfdbd406..9ba31701a372 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -172,7 +172,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev, port->type = type; port->uartclk = clk; - port->irqflags |= IRQF_SHARED; if (of_property_read_bool(np, "no-loopback-test")) port->flags |= UPF_SKIP_TEST; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 6adbadd6a56a..8a01d034f9d1 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -745,16 +745,8 @@ static int pci_ni8430_init(struct pci_dev *dev) } /* UART Port Control Register */ -#define NI16550_PCR_OFFSET 0x0f -#define NI16550_PCR_RS422 0x00 -#define NI16550_PCR_ECHO_RS485 0x01 -#define NI16550_PCR_DTR_RS485 0x02 -#define NI16550_PCR_AUTO_RS485 0x03 -#define NI16550_PCR_WIRE_MODE_MASK 0x03 -#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3) -#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6) -#define NI16550_ACR_DTR_AUTO_DTR (0x2 << 3) -#define NI16550_ACR_DTR_MANUAL_DTR (0x0 << 3) +#define NI8430_PORTCON 0x0f +#define NI8430_PORTCON_TXVR_ENABLE (1 << 3) static int pci_ni8430_setup(struct serial_private *priv, @@ -776,117 +768,14 @@ pci_ni8430_setup(struct serial_private *priv, return -ENOMEM; /* enable the transceiver */ - writeb(readb(p + offset + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT, - p + offset + NI16550_PCR_OFFSET); + writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE, + p + offset + NI8430_PORTCON); iounmap(p); return setup_port(priv, port, bar, offset, board->reg_shift); } -static int pci_ni8431_config_rs485(struct uart_port *port, - struct serial_rs485 *rs485) -{ - u8 pcr, acr; - struct uart_8250_port *up; - - up = container_of(port, struct uart_8250_port, port); - acr = up->acr; - pcr = port->serial_in(port, NI16550_PCR_OFFSET); - pcr &= ~NI16550_PCR_WIRE_MODE_MASK; - - if (rs485->flags & SER_RS485_ENABLED) { - /* RS-485 */ - if ((rs485->flags & SER_RS485_RX_DURING_TX) && - (rs485->flags & SER_RS485_RTS_ON_SEND)) { - dev_dbg(port->dev, "Invalid 2-wire mode\n"); - return -EINVAL; - } - - if (rs485->flags & SER_RS485_RX_DURING_TX) { - /* Echo */ - dev_vdbg(port->dev, "2-wire DTR with echo\n"); - pcr |= NI16550_PCR_ECHO_RS485; - acr |= NI16550_ACR_DTR_MANUAL_DTR; - } else { - /* Auto or DTR */ - if (rs485->flags & SER_RS485_RTS_ON_SEND) { - /* Auto */ - dev_vdbg(port->dev, "2-wire Auto\n"); - pcr |= NI16550_PCR_AUTO_RS485; - acr |= NI16550_ACR_DTR_AUTO_DTR; - } else { - /* DTR-controlled */ - /* No Echo */ - dev_vdbg(port->dev, "2-wire DTR no echo\n"); - pcr |= NI16550_PCR_DTR_RS485; - acr |= NI16550_ACR_DTR_MANUAL_DTR; - } - } - } else { - /* RS-422 */ - dev_vdbg(port->dev, "4-wire\n"); - pcr |= NI16550_PCR_RS422; - acr |= NI16550_ACR_DTR_MANUAL_DTR; - } - - dev_dbg(port->dev, "write pcr: 0x%08x\n", pcr); - port->serial_out(port, NI16550_PCR_OFFSET, pcr); - - up->acr = acr; - port->serial_out(port, UART_SCR, UART_ACR); - port->serial_out(port, UART_ICR, up->acr); - - /* Update the cache. */ - port->rs485 = *rs485; - - return 0; -} - -static int pci_ni8431_setup(struct serial_private *priv, - const struct pciserial_board *board, - struct uart_8250_port *uart, int idx) -{ - u8 pcr, acr; - struct pci_dev *dev = priv->dev; - void __iomem *addr; - unsigned int bar, offset = board->first_offset; - - if (idx >= board->num_ports) - return 1; - - bar = FL_GET_BASE(board->flags); - offset += idx * board->uart_offset; - - addr = pci_ioremap_bar(dev, bar); - if (!addr) - return -ENOMEM; - - /* enable the transceiver */ - writeb(readb(addr + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT, - addr + NI16550_PCR_OFFSET); - - pcr = readb(addr + NI16550_PCR_OFFSET); - pcr &= ~NI16550_PCR_WIRE_MODE_MASK; - - /* set wire mode to default RS-422 */ - pcr |= NI16550_PCR_RS422; - acr = NI16550_ACR_DTR_MANUAL_DTR; - - /* write port configuration to register */ - writeb(pcr, addr + NI16550_PCR_OFFSET); - - /* access and write to UART acr register */ - writeb(UART_ACR, addr + UART_SCR); - writeb(acr, addr + UART_ICR); - - uart->port.rs485_config = &pci_ni8431_config_rs485; - - iounmap(addr); - - return setup_port(priv, uart, bar, offset, board->reg_shift); -} - static int pci_netmos_9900_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) @@ -2023,15 +1912,6 @@ pci_moxa_setup(struct serial_private *priv, #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9 #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8 -#define PCIE_DEVICE_ID_NI_PXIE8430_2328 0x74C2 -#define PCIE_DEVICE_ID_NI_PXIE8430_23216 0x74C1 -#define PCI_DEVICE_ID_NI_PXI8431_4852 0x7081 -#define PCI_DEVICE_ID_NI_PXI8431_4854 0x70DE -#define PCI_DEVICE_ID_NI_PXI8431_4858 0x70E3 -#define PCI_DEVICE_ID_NI_PXI8433_4852 0x70E9 -#define PCI_DEVICE_ID_NI_PXI8433_4854 0x70ED -#define PCIE_DEVICE_ID_NI_PXIE8431_4858 0x74C4 -#define PCIE_DEVICE_ID_NI_PXIE8431_48516 0x74C3 #define PCI_DEVICE_ID_MOXA_CP102E 0x1024 #define PCI_DEVICE_ID_MOXA_CP102EL 0x1025 @@ -2269,87 +2149,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .setup = pci_ni8430_setup, .exit = pci_ni8430_exit, }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCIE_DEVICE_ID_NI_PXIE8430_2328, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8430_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCIE_DEVICE_ID_NI_PXIE8430_23216, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8430_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCI_DEVICE_ID_NI_PXI8431_4852, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8431_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCI_DEVICE_ID_NI_PXI8431_4854, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8431_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCI_DEVICE_ID_NI_PXI8431_4858, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8431_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCI_DEVICE_ID_NI_PXI8433_4852, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8431_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCI_DEVICE_ID_NI_PXI8433_4854, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8431_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCIE_DEVICE_ID_NI_PXIE8431_4858, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8431_setup, - .exit = pci_ni8430_exit, - }, - { - .vendor = PCI_VENDOR_ID_NI, - .device = PCIE_DEVICE_ID_NI_PXIE8431_48516, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .init = pci_ni8430_init, - .setup = pci_ni8431_setup, - .exit = pci_ni8430_exit, - }, /* Quatech */ { .vendor = PCI_VENDOR_ID_QUATECH, @@ -3106,13 +2905,6 @@ enum pci_board_num_t { pbn_ni8430_4, pbn_ni8430_8, pbn_ni8430_16, - pbn_ni8430_pxie_8, - pbn_ni8430_pxie_16, - pbn_ni8431_2, - pbn_ni8431_4, - pbn_ni8431_8, - pbn_ni8431_pxie_8, - pbn_ni8431_pxie_16, pbn_ADDIDATA_PCIe_1_3906250, pbn_ADDIDATA_PCIe_2_3906250, pbn_ADDIDATA_PCIe_4_3906250, @@ -3765,55 +3557,6 @@ static struct pciserial_board pci_boards[] = { .uart_offset = 0x10, .first_offset = 0x800, }, - [pbn_ni8430_pxie_16] = { - .flags = FL_BASE0, - .num_ports = 16, - .base_baud = 3125000, - .uart_offset = 0x10, - .first_offset = 0x800, - }, - [pbn_ni8430_pxie_8] = { - .flags = FL_BASE0, - .num_ports = 8, - .base_baud = 3125000, - .uart_offset = 0x10, - .first_offset = 0x800, - }, - [pbn_ni8431_8] = { - .flags = FL_BASE0, - .num_ports = 8, - .base_baud = 3686400, - .uart_offset = 0x10, - .first_offset = 0x800, - }, - [pbn_ni8431_4] = { - .flags = FL_BASE0, - .num_ports = 4, - .base_baud = 3686400, - .uart_offset = 0x10, - .first_offset = 0x800, - }, - [pbn_ni8431_2] = { - .flags = FL_BASE0, - .num_ports = 2, - .base_baud = 3686400, - .uart_offset = 0x10, - .first_offset = 0x800, - }, - [pbn_ni8431_pxie_16] = { - .flags = FL_BASE0, - .num_ports = 16, - .base_baud = 3125000, - .uart_offset = 0x10, - .first_offset = 0x800, - }, - [pbn_ni8431_pxie_8] = { - .flags = FL_BASE0, - .num_ports = 8, - .base_baud = 3125000, - .uart_offset = 0x10, - .first_offset = 0x800, - }, /* * ADDI-DATA GmbH PCI-Express communication cards */ @@ -5567,33 +5310,6 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_4 }, - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_2328, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8430_pxie_8 }, - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_23216, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8430_pxie_16 }, - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4852, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8431_2 }, - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4854, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8431_4 }, - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4858, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8431_8 }, - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_4858, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8431_pxie_8 }, - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_48516, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8431_pxie_16 }, - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4852, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8431_2 }, - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4854, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_ni8431_4 }, /* * MOXA diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 8407166610ce..2c65c775bf5a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2192,6 +2192,10 @@ int serial8250_do_startup(struct uart_port *port) } } + /* Check if we need to have shared IRQs */ + if (port->irq && (up->port.flags & UPF_SHARE_IRQ)) + up->port.irqflags |= IRQF_SHARED; + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; /* diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 3a7d1a66f79c..b0b689546395 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -813,10 +813,8 @@ __acquires(&uap->port.lock) if (!uap->using_tx_dma) return; - /* Avoid deadlock with the DMA engine callback */ - spin_unlock(&uap->port.lock); - dmaengine_terminate_all(uap->dmatx.chan); - spin_lock(&uap->port.lock); + dmaengine_terminate_async(uap->dmatx.chan); + if (uap->dmatx.queued) { dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, DMA_TO_DEVICE); diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index 3bdd56a1021b..ea12f10610b6 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -286,6 +286,10 @@ static void ar933x_uart_set_termios(struct uart_port *port, ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, AR933X_UART_CS_HOST_INT_EN); + /* enable RX and TX ready overide */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); + /* reenable the UART */ ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S, @@ -418,6 +422,10 @@ static int ar933x_uart_startup(struct uart_port *port) ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, AR933X_UART_CS_HOST_INT_EN); + /* enable RX and TX ready overide */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); + /* Enable RX interrupts */ up->ier = AR933X_UART_INT_RX_VALID; ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index a8dc8af83f39..8a909d556185 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -574,7 +574,8 @@ static void atmel_stop_tx(struct uart_port *port) atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); if (atmel_uart_is_half_duplex(port)) - atmel_start_rx(port); + if (!atomic_read(&atmel_port->tasklet_shutdown)) + atmel_start_rx(port); } @@ -2270,27 +2271,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, mode |= ATMEL_US_USMODE_NORMAL; } - /* set the mode, clock divisor, parity, stop bits and data size */ - atmel_uart_writel(port, ATMEL_US_MR, mode); - - /* - * when switching the mode, set the RTS line state according to the - * new mode, otherwise keep the former state - */ - if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { - unsigned int rts_state; - - if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { - /* let the hardware control the RTS line */ - rts_state = ATMEL_US_RTSDIS; - } else { - /* force RTS line to low level */ - rts_state = ATMEL_US_RTSEN; - } - - atmel_uart_writel(port, ATMEL_US_CR, rts_state); - } - /* * Set the baud rate: * Fractional baudrate allows to setup output frequency more @@ -2317,6 +2297,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) atmel_uart_writel(port, ATMEL_US_BRGR, quot); + + /* set the mode, clock divisor, parity, stop bits and data size */ + atmel_uart_writel(port, ATMEL_US_MR, mode); + + /* + * when switching the mode, set the RTS line state according to the + * new mode, otherwise keep the former state + */ + if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { + unsigned int rts_state; + + if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { + /* let the hardware control the RTS line */ + rts_state = ATMEL_US_RTSDIS; + } else { + /* force RTS line to low level */ + rts_state = ATMEL_US_RTSEN; + } + + atmel_uart_writel(port, ATMEL_US_CR, rts_state); + } + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); atmel_port->tx_stopped = false; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 537896c4d887..d2fc050a3445 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -268,6 +268,7 @@ struct lpuart_port { int rx_dma_rng_buf_len; unsigned int dma_tx_nents; wait_queue_head_t dma_wait; + bool id_allocated; }; struct lpuart_soc_data { @@ -437,8 +438,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport) } sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl, - sport->dma_tx_nents, - DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); + ret, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT); if (!sport->dma_tx_desc) { dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); dev_err(dev, "Cannot prepare TX slave DMA!\n"); @@ -2382,19 +2383,6 @@ static int lpuart_probe(struct platform_device *pdev) if (!sport) return -ENOMEM; - ret = of_alias_get_id(np, "serial"); - if (ret < 0) { - ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL); - if (ret < 0) { - dev_err(&pdev->dev, "port line is full, add device failed\n"); - return ret; - } - } - if (ret >= ARRAY_SIZE(lpuart_ports)) { - dev_err(&pdev->dev, "serial%d out of range\n", ret); - return -EINVAL; - } - sport->port.line = ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sport->port.membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sport->port.membase)) @@ -2435,9 +2423,25 @@ static int lpuart_probe(struct platform_device *pdev) } } + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL); + if (ret < 0) { + dev_err(&pdev->dev, "port line is full, add device failed\n"); + return ret; + } + sport->id_allocated = true; + } + if (ret >= ARRAY_SIZE(lpuart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", ret); + ret = -EINVAL; + goto failed_out_of_range; + } + sport->port.line = ret; + ret = lpuart_enable_clks(sport); if (ret) - return ret; + goto failed_clock_enable; sport->port.uartclk = lpuart_get_baud_clk_rate(sport); lpuart_ports[sport->port.line] = sport; @@ -2487,6 +2491,10 @@ static int lpuart_probe(struct platform_device *pdev) failed_attach_port: failed_irq_request: lpuart_disable_clks(sport); +failed_clock_enable: +failed_out_of_range: + if (sport->id_allocated) + ida_simple_remove(&fsl_lpuart_ida, sport->port.line); return ret; } @@ -2496,7 +2504,8 @@ static int lpuart_remove(struct platform_device *pdev) uart_remove_one_port(&lpuart_reg, &sport->port); - ida_simple_remove(&fsl_lpuart_ida, sport->port.line); + if (sport->id_allocated) + ida_simple_remove(&fsl_lpuart_ida, sport->port.line); lpuart_disable_clks(sport); diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index ffefd218761e..31033d517e82 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -1230,6 +1230,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi) struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi); /* stop activity */ tasklet_kill(&ifx_dev->io_work_tasklet); + + pm_runtime_disable(&spi->dev); + /* free irq */ free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev); free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 5e08f2657b90..22d8705cd5cd 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -603,7 +603,7 @@ static void imx_uart_dma_tx(struct imx_port *sport) sport->tx_bytes = uart_circ_chars_pending(xmit); - if (xmit->tail < xmit->head) { + if (xmit->tail < xmit->head || xmit->head == 0) { sport->dma_tx_nents = 1; sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); } else { @@ -619,7 +619,7 @@ static void imx_uart_dma_tx(struct imx_port *sport) dev_err(dev, "DMA mapping error for TX.\n"); return; } - desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents, + desc = dmaengine_prep_slave_sg(chan, sgl, ret, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!desc) { dma_unmap_sg(dev, sgl, sport->dma_tx_nents, @@ -700,22 +700,33 @@ static void imx_uart_start_tx(struct uart_port *port) } } -static irqreturn_t imx_uart_rtsint(int irq, void *dev_id) +static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id) { struct imx_port *sport = dev_id; u32 usr1; - spin_lock(&sport->port.lock); - imx_uart_writel(sport, USR1_RTSD, USR1); usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS; uart_handle_cts_change(&sport->port, !!usr1); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); - spin_unlock(&sport->port.lock); return IRQ_HANDLED; } +static irqreturn_t imx_uart_rtsint(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + irqreturn_t ret; + + spin_lock(&sport->port.lock); + + ret = __imx_uart_rtsint(irq, dev_id); + + spin_unlock(&sport->port.lock); + + return ret; +} + static irqreturn_t imx_uart_txint(int irq, void *dev_id) { struct imx_port *sport = dev_id; @@ -726,14 +737,12 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t imx_uart_rxint(int irq, void *dev_id) +static irqreturn_t __imx_uart_rxint(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int rx, flg, ignored = 0; struct tty_port *port = &sport->port.state->port; - spin_lock(&sport->port.lock); - while (imx_uart_readl(sport, USR2) & USR2_RDR) { u32 usr2; @@ -792,11 +801,25 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id) } out: - spin_unlock(&sport->port.lock); tty_flip_buffer_push(port); + return IRQ_HANDLED; } +static irqreturn_t imx_uart_rxint(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + irqreturn_t ret; + + spin_lock(&sport->port.lock); + + ret = __imx_uart_rxint(irq, dev_id); + + spin_unlock(&sport->port.lock); + + return ret; +} + static void imx_uart_clear_rx_errors(struct imx_port *sport); /* @@ -855,6 +878,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id) unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4; irqreturn_t ret = IRQ_NONE; + spin_lock(&sport->port.lock); + usr1 = imx_uart_readl(sport, USR1); usr2 = imx_uart_readl(sport, USR2); ucr1 = imx_uart_readl(sport, UCR1); @@ -888,27 +913,25 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id) usr2 &= ~USR2_ORE; if (usr1 & (USR1_RRDY | USR1_AGTIM)) { - imx_uart_rxint(irq, dev_id); + __imx_uart_rxint(irq, dev_id); ret = IRQ_HANDLED; } if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) { - imx_uart_txint(irq, dev_id); + imx_uart_transmit_buffer(sport); ret = IRQ_HANDLED; } if (usr1 & USR1_DTRD) { imx_uart_writel(sport, USR1_DTRD, USR1); - spin_lock(&sport->port.lock); imx_uart_mctrl_check(sport); - spin_unlock(&sport->port.lock); ret = IRQ_HANDLED; } if (usr1 & USR1_RTSD) { - imx_uart_rtsint(irq, dev_id); + __imx_uart_rtsint(irq, dev_id); ret = IRQ_HANDLED; } @@ -923,6 +946,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id) ret = IRQ_HANDLED; } + spin_unlock(&sport->port.lock); + return ret; } diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 3657a24913fc..e0718ee5d42a 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -980,6 +980,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port) static void msm_reset(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); + unsigned int mr; /* reset everything */ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR); @@ -987,7 +988,10 @@ static void msm_reset(struct uart_port *port) msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR); msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); - msm_write(port, UART_CR_CMD_SET_RFR, UART_CR); + msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR); + mr = msm_read(port, UART_MR1); + mr &= ~UART_MR1_RX_RDY_CTL; + msm_write(port, mr, UART_MR1); /* Disable DM modes */ if (msm_port->is_uartdm) @@ -1576,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s, int num_newlines = 0; bool replaced = false; void __iomem *tf; + int locked = 1; if (is_uartdm) tf = port->membase + UARTDM_TF; @@ -1588,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s, num_newlines++; count += num_newlines; - spin_lock(&port->lock); + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&port->lock); + else + spin_lock(&port->lock); + if (is_uartdm) msm_reset_dm_count(port, count); @@ -1624,7 +1635,9 @@ static void __msm_console_write(struct uart_port *port, const char *s, iowrite32_rep(tf, buf, 1); i += num_chars; } - spin_unlock(&port->lock); + + if (locked) + spin_unlock(&port->lock); } static void msm_console_write(struct console *co, const char *s, diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index c12a12556339..4e9a590712cb 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -851,7 +851,7 @@ static int mvebu_uart_probe(struct platform_device *pdev) port->membase = devm_ioremap_resource(&pdev->dev, reg); if (IS_ERR(port->membase)) - return -PTR_ERR(port->membase); + return PTR_ERR(port->membase); mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart), GFP_KERNEL); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 6157213a8359..c16234bca78f 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -233,6 +233,7 @@ struct eg20t_port { struct dma_chan *chan_rx; struct scatterlist *sg_tx_p; int nent; + int orig_nent; struct scatterlist sg_rx; int tx_dma_use; void *rx_buf_virt; @@ -787,9 +788,10 @@ static void pch_dma_tx_complete(void *arg) } xmit->tail &= UART_XMIT_SIZE - 1; async_tx_ack(priv->desc_tx); - dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE); + dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE); priv->tx_dma_use = 0; priv->nent = 0; + priv->orig_nent = 0; kfree(priv->sg_tx_p); pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT); } @@ -1010,6 +1012,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__); return 0; } + priv->orig_nent = num; priv->nent = nent; for (i = 0; i < nent; i++, sg++) { diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 14c6306bc462..f98a79172ad2 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -125,6 +125,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop); static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop); static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port); static void qcom_geni_serial_stop_rx(struct uart_port *uport); +static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop); static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200, 32000000, 48000000, 64000000, 80000000, @@ -615,7 +616,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport) u32 irq_en; u32 status; struct qcom_geni_serial_port *port = to_dev_port(uport, uport); - u32 irq_clear = S_CMD_DONE_EN; + u32 s_irq_status; irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN); irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN); @@ -631,10 +632,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport) return; geni_se_cancel_s_cmd(&port->se); - qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG, - S_GENI_CMD_CANCEL, false); + qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS, + S_CMD_CANCEL_EN, true); + /* + * If timeout occurs secondary engine remains active + * and Abort sequence is executed. + */ + s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS); + /* Flush the Rx buffer */ + if (s_irq_status & S_RX_FIFO_LAST_EN) + qcom_geni_serial_handle_rx(uport, true); + writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR); + status = readl(uport->membase + SE_GENI_STATUS); - writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR); if (status & S_GENI_CMD_ACTIVE) qcom_geni_serial_abort_rx(uport); } diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index c4a414a46c7f..7c2782785736 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1111,7 +1111,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state) if (!uport) goto out; - if (uport->type != PORT_UNKNOWN) + if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl) uport->ops->break_ctl(uport, break_state); ret = 0; out: @@ -2834,6 +2834,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) if (uport->cons && uport->dev) of_console_check(uport->dev->of_node, uport->cons->name, uport->line); + tty_port_link_device(port, drv->tty_driver, uport->line); uart_configure_port(drv, state, uport); port->console = uart_console(uport); diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 771d11196523..07573de70445 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -679,6 +679,9 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id) if (ims & SPRD_IMSR_TIMEOUT) serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT); + if (ims & SPRD_IMSR_BREAK_DETECT) + serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT); + if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT | SPRD_IMSR_TIMEOUT)) sprd_rx(port); @@ -1100,14 +1103,13 @@ static int sprd_remove(struct platform_device *dev) if (sup) { uart_remove_one_port(&sprd_uart_driver, &sup->port); sprd_port[sup->port.line] = NULL; + sprd_rx_free_buf(sup); sprd_ports_num--; } if (!sprd_ports_num) uart_unregister_driver(&sprd_uart_driver); - sprd_rx_free_buf(sup); - return 0; } diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index df90747ee3a8..2f72514d63ed 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -240,8 +240,8 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded) * cleared by the sequence [read SR - read DR]. */ if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) - stm32_clr_bits(port, ofs->icr, USART_ICR_ORECF | - USART_ICR_PECF | USART_ICR_FECF); + writel_relaxed(sr & USART_SR_ERR_MASK, + port->membase + ofs->icr); c = stm32_get_char(port, &sr, &stm32_port->last_res); port->icount.rx++; @@ -435,7 +435,7 @@ static void stm32_transmit_chars(struct uart_port *port) if (ofs->icr == UNDEF_REG) stm32_clr_bits(port, ofs->isr, USART_SR_TC); else - stm32_set_bits(port, ofs->icr, USART_ICR_TCCF); + writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); if (stm32_port->tx_ch) stm32_transmit_chars_dma(port); diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index e8a9047de451..36f1a4d870eb 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -1334,10 +1334,10 @@ static void throttle(struct tty_struct * tty) DBGINFO(("%s throttle\n", info->device_name)); if (I_IXOFF(tty)) send_xchar(tty, STOP_CHAR(tty)); - if (C_CRTSCTS(tty)) { + if (C_CRTSCTS(tty)) { spin_lock_irqsave(&info->lock,flags); info->signals &= ~SerialSignal_RTS; - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } @@ -1359,10 +1359,10 @@ static void unthrottle(struct tty_struct * tty) else send_xchar(tty, START_CHAR(tty)); } - if (C_CRTSCTS(tty)) { + if (C_CRTSCTS(tty)) { spin_lock_irqsave(&info->lock,flags); info->signals |= SerialSignal_RTS; - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } @@ -2560,8 +2560,8 @@ static void change_params(struct slgt_info *info) info->read_status_mask = IRQ_RXOVER; if (I_INPCK(info->port.tty)) info->read_status_mask |= MASK_PARITY | MASK_FRAMING; - if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) - info->read_status_mask |= MASK_BREAK; + if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) + info->read_status_mask |= MASK_BREAK; if (I_IGNPAR(info->port.tty)) info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING; if (I_IGNBRK(info->port.tty)) { @@ -3192,7 +3192,7 @@ static int tiocmset(struct tty_struct *tty, info->signals &= ~SerialSignal_DTR; spin_lock_irqsave(&info->lock,flags); - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); return 0; } @@ -3203,7 +3203,7 @@ static int carrier_raised(struct tty_port *port) struct slgt_info *info = container_of(port, struct slgt_info, port); spin_lock_irqsave(&info->lock,flags); - get_signals(info); + get_signals(info); spin_unlock_irqrestore(&info->lock,flags); return (info->signals & SerialSignal_DCD) ? 1 : 0; } @@ -3218,7 +3218,7 @@ static void dtr_rts(struct tty_port *port, int on) info->signals |= SerialSignal_RTS | SerialSignal_DTR; else info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR); - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index fcb91bf7a15b..54b897a646d0 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c @@ -1453,10 +1453,10 @@ static void throttle(struct tty_struct * tty) if (I_IXOFF(tty)) send_xchar(tty, STOP_CHAR(tty)); - if (C_CRTSCTS(tty)) { + if (C_CRTSCTS(tty)) { spin_lock_irqsave(&info->lock,flags); info->serial_signals &= ~SerialSignal_RTS; - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } @@ -1482,10 +1482,10 @@ static void unthrottle(struct tty_struct * tty) send_xchar(tty, START_CHAR(tty)); } - if (C_CRTSCTS(tty)) { + if (C_CRTSCTS(tty)) { spin_lock_irqsave(&info->lock,flags); info->serial_signals |= SerialSignal_RTS; - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } @@ -2470,7 +2470,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status ) if (status & SerialSignal_CTS) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx start..."); - info->port.tty->hw_stopped = 0; + info->port.tty->hw_stopped = 0; tx_start(info); info->pending_bh |= BH_TRANSMIT; return; @@ -2479,7 +2479,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status ) if (!(status & SerialSignal_CTS)) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx stop..."); - info->port.tty->hw_stopped = 1; + info->port.tty->hw_stopped = 1; tx_stop(info); } } @@ -2806,8 +2806,8 @@ static void change_params(SLMP_INFO *info) info->read_status_mask2 = OVRN; if (I_INPCK(info->port.tty)) info->read_status_mask2 |= PE | FRME; - if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) - info->read_status_mask1 |= BRKD; + if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) + info->read_status_mask1 |= BRKD; if (I_IGNPAR(info->port.tty)) info->ignore_status_mask2 |= PE | FRME; if (I_IGNBRK(info->port.tty)) { @@ -3177,7 +3177,7 @@ static int tiocmget(struct tty_struct *tty) unsigned long flags; spin_lock_irqsave(&info->lock,flags); - get_signals(info); + get_signals(info); spin_unlock_irqrestore(&info->lock,flags); result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) | @@ -3215,7 +3215,7 @@ static int tiocmset(struct tty_struct *tty, info->serial_signals &= ~SerialSignal_DTR; spin_lock_irqsave(&info->lock,flags); - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); return 0; @@ -3227,7 +3227,7 @@ static int carrier_raised(struct tty_port *port) unsigned long flags; spin_lock_irqsave(&info->lock,flags); - get_signals(info); + get_signals(info); spin_unlock_irqrestore(&info->lock,flags); return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; @@ -3243,7 +3243,7 @@ static void dtr_rts(struct tty_port *port, int on) info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); - set_signals(info); + set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 573b2055173c..843b5af424dc 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -57,6 +57,7 @@ /* Whether we react on sysrq keys or just ignore them */ static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static bool __read_mostly sysrq_always_enabled; +static int __read_mostly sysrq_use_leftctrl = 1; static bool sysrq_on(void) { @@ -749,18 +750,21 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) container_of(work, struct sysrq_state, reinject_work); struct input_handle *handle = &sysrq->handle; unsigned int alt_code = sysrq->alt_use; + unsigned int sysrq_code; if (sysrq->need_reinject) { /* we do not want the assignment to be reordered */ sysrq->reinjecting = true; mb(); + sysrq_code = sysrq_use_leftctrl ? KEY_LEFTCTRL : KEY_SYSRQ; + /* Simulate press and release of Alt + SysRq */ input_inject_event(handle, EV_KEY, alt_code, 1); - input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); + input_inject_event(handle, EV_KEY, sysrq_code, 1); input_inject_event(handle, EV_SYN, SYN_REPORT, 1); - input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); + input_inject_event(handle, EV_KEY, sysrq_code, 0); input_inject_event(handle, EV_KEY, alt_code, 0); input_inject_event(handle, EV_SYN, SYN_REPORT, 1); @@ -815,7 +819,23 @@ static bool sysrq_handle_keypress(struct sysrq_state *sysrq, clear_bit(KEY_SYSRQ, sysrq->handle.dev->key); break; + case KEY_LEFTCTRL: + if (sysrq_use_leftctrl && !sysrq->active) { + if (value == 1 && sysrq->alt != KEY_RESERVED) { + sysrq->active = true; + sysrq->alt_use = sysrq->alt; + /* + * If nothing else will be pressed we'll need + * to re-inject Alt-SysRq keysroke. + */ + sysrq->need_reinject = true; + } + if (sysrq->active) + clear_bit(KEY_LEFTCTRL, sysrq->handle.dev->key); + + break; + } default: if (sysrq->active && value && value != 2) { sysrq->need_reinject = false; @@ -1054,6 +1074,12 @@ int sysrq_toggle_support(int enable_mask) return 0; } +int sysrq_toggle_sysrq_key(int __sysrq_use_leftctrl) +{ + sysrq_use_leftctrl = !!__sysrq_use_leftctrl; + return 0; +} + static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p, struct sysrq_key_op *remove_op_p) { diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 802c1210558f..36c1c59cc72a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2731,9 +2731,11 @@ static int compat_tty_tiocgserial(struct tty_struct *tty, struct serial_struct32 v32; struct serial_struct v; int err; - memset(&v, 0, sizeof(struct serial_struct)); - if (!tty->ops->set_serial) + memset(&v, 0, sizeof(v)); + memset(&v32, 0, sizeof(v32)); + + if (!tty->ops->get_serial) return -ENOTTY; err = tty->ops->get_serial(tty, &v); if (!err) { diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 044c3cbdcfa4..ea80bf872f54 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port) } } -static const struct tty_port_client_operations default_client_ops = { +const struct tty_port_client_operations tty_port_default_client_ops = { .receive_buf = tty_port_default_receive_buf, .write_wakeup = tty_port_default_wakeup, }; +EXPORT_SYMBOL_GPL(tty_port_default_client_ops); void tty_port_init(struct tty_port *port) { @@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port) spin_lock_init(&port->lock); port->close_delay = (50 * HZ) / 100; port->closing_wait = (3000 * HZ) / 100; - port->client_ops = &default_client_ops; + port->client_ops = &tty_port_default_client_ops; kref_init(&port->kref); } EXPORT_SYMBOL(tty_port_init); diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 515fc095e3b4..15d33fa0c925 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -1491,7 +1491,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type, if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev)) kbd_rawcode(value); - if (event_type == EV_KEY) + if (event_type == EV_KEY && event_code <= KEY_MAX) kbd_keycode(event_code, value, HW_RAW(handle->dev)); spin_unlock(&kbd_event_lock); diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index 78732feaf65b..7556139cd0da 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -29,6 +30,8 @@ #include #include +#include + /* Don't take this from : 011-015 on the screen aren't spaces */ #define isspace(c) ((c) == ' ') @@ -43,6 +46,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */ static int sel_end; static int sel_buffer_lth; static char *sel_buffer; +static DEFINE_MUTEX(sel_lock); /* clear_selection, highlight and highlight_pointer can be called from interrupt (via scrollback/front) */ @@ -84,6 +88,11 @@ void clear_selection(void) } EXPORT_SYMBOL_GPL(clear_selection); +bool vc_is_sel(struct vc_data *vc) +{ + return vc == sel_cons; +} + /* * User settable table: what characters are to be considered alphabetic? * 128 bits. Locked by the console lock. @@ -177,14 +186,14 @@ int set_selection_user(const struct tiocl_selection __user *sel, return set_selection_kernel(&v, tty); } -int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) +static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) { struct vc_data *vc = vc_cons[fg_console].d; int new_sel_start, new_sel_end, spc; char *bp, *obp; int i, ps, pe, multiplier; u32 c; - int mode; + int mode, ret = 0; poke_blanked_console(); @@ -332,7 +341,21 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) } } sel_buffer_lth = bp - sel_buffer; - return 0; + + return ret; +} + +int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) +{ + int ret; + + mutex_lock(&sel_lock); + console_lock(); + ret = __set_selection_kernel(v, tty); + console_unlock(); + mutex_unlock(&sel_lock); + + return ret; } EXPORT_SYMBOL_GPL(set_selection_kernel); @@ -350,6 +373,7 @@ int paste_selection(struct tty_struct *tty) unsigned int count; struct tty_ldisc *ld; DECLARE_WAITQUEUE(wait, current); + int ret = 0; console_lock(); poke_blanked_console(); @@ -361,10 +385,17 @@ int paste_selection(struct tty_struct *tty) tty_buffer_lock_exclusive(&vc->port); add_wait_queue(&vc->paste_wait, &wait); + mutex_lock(&sel_lock); while (sel_buffer && sel_buffer_lth > pasted) { set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { + ret = -EINTR; + break; + } if (tty_throttled(tty)) { + mutex_unlock(&sel_lock); schedule(); + mutex_lock(&sel_lock); continue; } __set_current_state(TASK_RUNNING); @@ -373,11 +404,12 @@ int paste_selection(struct tty_struct *tty) count); pasted += count; } + mutex_unlock(&sel_lock); remove_wait_queue(&vc->paste_wait, &wait); __set_current_state(TASK_RUNNING); tty_buffer_unlock_exclusive(&vc->port); tty_ldisc_deref(ld); - return 0; + return ret; } EXPORT_SYMBOL_GPL(paste_selection); diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 1f042346e722..778f83ea2249 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -456,6 +456,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) size_t ret; char *con_buf; + if (use_unicode(inode)) + return -EOPNOTSUPP; + con_buf = (char *) __get_free_page(GFP_KERNEL); if (!con_buf) return -ENOMEM; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 34aa39d1aed9..fa9433e6cdc7 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -890,8 +890,9 @@ static void hide_softcursor(struct vc_data *vc) static void hide_cursor(struct vc_data *vc) { - if (vc == sel_cons) + if (vc_is_sel(vc)) clear_selection(); + vc->vc_sw->con_cursor(vc, CM_ERASE); hide_softcursor(vc); } @@ -901,7 +902,7 @@ static void set_cursor(struct vc_data *vc) if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS) return; if (vc->vc_deccm) { - if (vc == sel_cons) + if (vc_is_sel(vc)) clear_selection(); add_softcursor(vc); if ((vc->vc_cursor_type & 0x0f) != 1) @@ -936,10 +937,21 @@ static void flush_scrollback(struct vc_data *vc) WARN_CONSOLE_UNLOCKED(); set_origin(vc); - if (vc->vc_sw->con_flush_scrollback) + if (vc->vc_sw->con_flush_scrollback) { vc->vc_sw->con_flush_scrollback(vc); - else + } else if (con_is_visible(vc)) { + /* + * When no con_flush_scrollback method is provided then the + * legacy way for flushing the scrollback buffer is to use + * a side effect of the con_switch method. We do it only on + * the foreground console as background consoles have no + * scrollback buffers in that case and we obviously don't + * want to switch to them. + */ + hide_cursor(vc); vc->vc_sw->con_switch(vc); + set_cursor(vc); + } } /* @@ -1063,6 +1075,17 @@ static void visual_deinit(struct vc_data *vc) module_put(vc->vc_sw->owner); } +static void vc_port_destruct(struct tty_port *port) +{ + struct vc_data *vc = container_of(port, struct vc_data, port); + + kfree(vc); +} + +static const struct tty_port_operations vc_port_ops = { + .destruct = vc_port_destruct, +}; + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; @@ -1088,6 +1111,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ vc_cons[currcons].d = vc; tty_port_init(&vc->port); + vc->port.ops = &vc_port_ops; INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); visual_init(vc, currcons, 1); @@ -1196,7 +1220,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, } } - if (vc == sel_cons) + if (vc_is_sel(vc)) clear_selection(); old_rows = vc->vc_rows; @@ -3035,10 +3059,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) switch (type) { case TIOCL_SETSEL: - console_lock(); ret = set_selection_user((struct tiocl_selection __user *)(p+1), tty); - console_unlock(); break; case TIOCL_PASTESEL: ret = paste_selection(tty); @@ -3244,6 +3266,7 @@ static int con_install(struct tty_driver *driver, struct tty_struct *tty) tty->driver_data = vc; vc->port.tty = tty; + tty_port_get(&vc->port); if (!tty->winsize.ws_row && !tty->winsize.ws_col) { tty->winsize.ws_row = vc_cons[currcons].d->vc_rows; @@ -3279,6 +3302,13 @@ static void con_shutdown(struct tty_struct *tty) console_unlock(); } +static void con_cleanup(struct tty_struct *tty) +{ + struct vc_data *vc = tty->driver_data; + + tty_port_put(&vc->port); +} + static int default_color = 7; /* white */ static int default_italic_color = 2; // green (ASCII) static int default_underline_color = 3; // cyan (ASCII) @@ -3403,7 +3433,8 @@ static const struct tty_operations con_ops = { .throttle = con_throttle, .unthrottle = con_unthrottle, .resize = vt_resize, - .shutdown = con_shutdown + .shutdown = con_shutdown, + .cleanup = con_cleanup, }; static struct cdev vc0_cdev; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 8b0ed139592f..daf61c28ba76 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -39,11 +39,32 @@ #include #include -char vt_dont_switch; -extern struct tty_driver *console_driver; +bool vt_dont_switch; -#define VT_IS_IN_USE(i) (console_driver->ttys[i] && console_driver->ttys[i]->count) -#define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || vc_cons[i].d == sel_cons) +static inline bool vt_in_use(unsigned int i) +{ + const struct vc_data *vc = vc_cons[i].d; + + /* + * console_lock must be held to prevent the vc from being deallocated + * while we're checking whether it's in-use. + */ + WARN_CONSOLE_UNLOCKED(); + + return vc && kref_read(&vc->port.kref) > 1; +} + +static inline bool vt_busy(int i) +{ + if (vt_in_use(i)) + return true; + if (i == fg_console) + return true; + if (vc_is_sel(vc_cons[i].d)) + return true; + + return false; +} /* * Console (vt and kd) routines, as defined by USL SVR4 manual, and by @@ -289,16 +310,14 @@ static int vt_disallocate(unsigned int vc_num) int ret = 0; console_lock(); - if (VT_BUSY(vc_num)) + if (vt_busy(vc_num)) ret = -EBUSY; else if (vc_num) vc = vc_deallocate(vc_num); console_unlock(); - if (vc && vc_num >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc->port); - kfree(vc); - } + if (vc && vc_num >= MIN_NR_CONSOLES) + tty_port_put(&vc->port); return ret; } @@ -311,17 +330,15 @@ static void vt_disallocate_all(void) console_lock(); for (i = 1; i < MAX_NR_CONSOLES; i++) - if (!VT_BUSY(i)) + if (!vt_busy(i)) vc[i] = vc_deallocate(i); else vc[i] = NULL; console_unlock(); for (i = 1; i < MAX_NR_CONSOLES; i++) { - if (vc[i] && i >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc[i]->port); - kfree(vc[i]); - } + if (vc[i] && i >= MIN_NR_CONSOLES) + tty_port_put(&vc[i]->port); } } @@ -335,22 +352,13 @@ int vt_ioctl(struct tty_struct *tty, { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ - unsigned int console; + unsigned int console = vc->vc_num; unsigned char ucval; unsigned int uival; void __user *up = (void __user *)arg; int i, perm; int ret = 0; - console = vc->vc_num; - - - if (!vc_cons_allocated(console)) { /* impossible? */ - ret = -ENOIOCTLCMD; - goto out; - } - - /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. @@ -641,15 +649,16 @@ int vt_ioctl(struct tty_struct *tty, struct vt_stat __user *vtstat = up; unsigned short state, mask; - /* Review: FIXME: Console lock ? */ if (put_user(fg_console + 1, &vtstat->v_active)) ret = -EFAULT; else { state = 1; /* /dev/tty0 is always open */ + console_lock(); /* required by vt_in_use() */ for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; ++i, mask <<= 1) - if (VT_IS_IN_USE(i)) + if (vt_in_use(i)) state |= mask; + console_unlock(); ret = put_user(state, &vtstat->v_state); } break; @@ -659,10 +668,11 @@ int vt_ioctl(struct tty_struct *tty, * Returns the first available (non-opened) console. */ case VT_OPENQRY: - /* FIXME: locking ? - but then this is a stupid API */ + console_lock(); /* required by vt_in_use() */ for (i = 0; i < MAX_NR_CONSOLES; ++i) - if (! VT_IS_IN_USE(i)) + if (!vt_in_use(i)) break; + console_unlock(); uival = i < MAX_NR_CONSOLES ? (i+1) : -1; goto setint; @@ -876,15 +886,20 @@ int vt_ioctl(struct tty_struct *tty, return -EINVAL; for (i = 0; i < MAX_NR_CONSOLES; i++) { + struct vc_data *vcp; + if (!vc_cons[i].d) continue; console_lock(); - if (v.v_vlin) - vc_cons[i].d->vc_scan_lines = v.v_vlin; - if (v.v_clin) - vc_cons[i].d->vc_font.height = v.v_clin; - vc_cons[i].d->vc_resize_user = 1; - vc_resize(vc_cons[i].d, v.v_cols, v.v_rows); + vcp = vc_cons[i].d; + if (vcp) { + if (v.v_vlin) + vcp->vc_scan_lines = v.v_vlin; + if (v.v_clin) + vcp->vc_font.height = v.v_clin; + vcp->vc_resize_user = 1; + vc_resize(vcp, v.v_cols, v.v_rows); + } console_unlock(); } break; @@ -1006,12 +1021,12 @@ int vt_ioctl(struct tty_struct *tty, case VT_LOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) return -EPERM; - vt_dont_switch = 1; + vt_dont_switch = true; break; case VT_UNLOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) return -EPERM; - vt_dont_switch = 0; + vt_dont_switch = false; break; case VT_GETHIFONTMASK: ret = put_user(vc->vc_hi_font_mask, @@ -1175,14 +1190,9 @@ long vt_compat_ioctl(struct tty_struct *tty, { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ - unsigned int console = vc->vc_num; void __user *up = compat_ptr(arg); int perm; - - if (!vc_cons_allocated(console)) /* impossible? */ - return -ENOIOCTLCMD; - /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index ebcf1434e296..44858f70f5f5 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -132,11 +132,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) if (irq_on) { if (test_and_clear_bit(0, &priv->flags)) enable_irq(dev_info->irq); + spin_unlock_irqrestore(&priv->lock, flags); } else { - if (!test_and_set_bit(0, &priv->flags)) + if (!test_and_set_bit(0, &priv->flags)) { + spin_unlock_irqrestore(&priv->lock, flags); disable_irq(dev_info->irq); + } } - spin_unlock_irqrestore(&priv->lock, flags); return 0; } diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 8b0ea8c70d73..635cf0466b59 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -2124,10 +2124,11 @@ resubmit: /* * Start the modem : init the data and start kernel thread */ -static int uea_boot(struct uea_softc *sc) +static int uea_boot(struct uea_softc *sc, struct usb_interface *intf) { - int ret, size; struct intr_pkt *intr; + int ret = -ENOMEM; + int size; uea_enters(INS_TO_USBDEV(sc)); @@ -2152,6 +2153,11 @@ static int uea_boot(struct uea_softc *sc) if (UEA_CHIP_VERSION(sc) == ADI930) load_XILINX_firmware(sc); + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { + ret = -ENODEV; + goto err0; + } + intr = kmalloc(size, GFP_KERNEL); if (!intr) goto err0; @@ -2163,8 +2169,7 @@ static int uea_boot(struct uea_softc *sc) usb_fill_int_urb(sc->urb_int, sc->usb_dev, usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE), intr, size, uea_intr, sc, - sc->usb_dev->actconfig->interface[0]->altsetting[0]. - endpoint[0].desc.bInterval); + intf->cur_altsetting->endpoint[0].desc.bInterval); ret = usb_submit_urb(sc->urb_int, GFP_KERNEL); if (ret < 0) { @@ -2179,6 +2184,7 @@ static int uea_boot(struct uea_softc *sc) sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm"); if (IS_ERR(sc->kthread)) { uea_err(INS_TO_USBDEV(sc), "failed to create thread\n"); + ret = PTR_ERR(sc->kthread); goto err2; } @@ -2193,7 +2199,7 @@ err1: kfree(intr); err0: uea_leaves(INS_TO_USBDEV(sc)); - return -ENOMEM; + return ret; } /* @@ -2548,7 +2554,7 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf, } } - ret = uea_boot(sc); + ret = uea_boot(sc, intf); if (ret < 0) goto error; diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 4c1e75509303..f624cc87cbab 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1375,13 +1375,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, */ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; u32 reg; - priv_dev = cdns->gadget_dev; - /* check USB device interrupt */ reg = readl(&priv_dev->regs->usb_ists); if (reg) { @@ -1419,14 +1416,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) */ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; unsigned long flags; int bit; u32 reg; - priv_dev = cdns->gadget_dev; spin_lock_irqsave(&priv_dev->lock, flags); reg = readl(&priv_dev->regs->usb_ists); @@ -2112,7 +2107,7 @@ found: /* Update ring only if removed request is on pending_req_list list */ if (req_on_hw_ring) { link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + - (priv_req->start_trb * TRB_SIZE)); + ((priv_req->end_trb + 1) * TRB_SIZE)); link_trb->control = (link_trb->control & TRB_CYCLE) | TRB_TYPE(TRB_LINK) | TRB_CHAIN; @@ -2157,11 +2152,21 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) { struct cdns3_device *priv_dev = priv_ep->cdns3_dev; struct usb_request *request; + struct cdns3_request *priv_req; + struct cdns3_trb *trb = NULL; int ret; int val; trace_cdns3_halt(priv_ep, 0, 0); + request = cdns3_next_request(&priv_ep->pending_req_list); + if (request) { + priv_req = to_cdns3_request(request); + trb = priv_req->trb; + if (trb) + trb->control = trb->control ^ TRB_CYCLE; + } + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); /* wait for EPRST cleared */ @@ -2172,10 +2177,11 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); - request = cdns3_next_request(&priv_ep->pending_req_list); - - if (request) + if (request) { + if (trb) + trb->control = trb->control ^ TRB_CYCLE; cdns3_rearm_transfer(priv_ep, 1); + } cdns3_start_all_request(priv_dev, priv_ep); return ret; @@ -2539,7 +2545,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns) priv_dev = cdns->gadget_dev; - devm_free_irq(cdns->dev, cdns->dev_irq, cdns); + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); pm_runtime_mark_last_busy(cdns->dev); pm_runtime_put_autosuspend(cdns->dev); @@ -2710,7 +2716,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns) ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, cdns3_device_irq_handler, cdns3_device_thread_irq_handler, - IRQF_SHARED, dev_name(cdns->dev), cdns); + IRQF_SHARED, dev_name(cdns->dev), + cdns->gadget_dev); if (ret) goto err0; diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index b45ceb91c735..48e4a5ca1835 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd); struct ehci_ci_priv { struct regulator *reg_vbus; + bool enabled; }; static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) @@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) int ret = 0; int port = HCS_N_PORTS(ehci->hcs_params); - if (priv->reg_vbus) { + if (priv->reg_vbus && enable != priv->enabled) { if (port > 1) { dev_warn(dev, "Not support multi-port regulator control\n"); @@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) enable ? "enable" : "disable", ret); return ret; } + priv->enabled = enable; } if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) { diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 62f4fb9b362f..84d6f7df09a4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -896,10 +896,10 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss) ss->xmit_fifo_size = acm->writesize; ss->baud_base = le32_to_cpu(acm->line.dwDTERate); - ss->close_delay = acm->port.close_delay / 10; + ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : - acm->port.closing_wait / 10; + jiffies_to_msecs(acm->port.closing_wait) / 10; return 0; } @@ -907,17 +907,25 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct acm *acm = tty->driver_data; unsigned int closing_wait, close_delay; + unsigned int old_closing_wait, old_close_delay; int retval = 0; - close_delay = ss->close_delay * 10; + close_delay = msecs_to_jiffies(ss->close_delay * 10); closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ? - ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10; + ASYNC_CLOSING_WAIT_NONE : + msecs_to_jiffies(ss->closing_wait * 10); + + /* we must redo the rounding here, so that the values match */ + old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; + old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? + ASYNC_CLOSING_WAIT_NONE : + jiffies_to_msecs(acm->port.closing_wait) / 10; mutex_lock(&acm->port.mutex); if (!capable(CAP_SYS_ADMIN)) { - if ((close_delay != acm->port.close_delay) || - (closing_wait != acm->port.closing_wait)) + if ((ss->close_delay != old_close_delay) || + (ss->closing_wait != old_closing_wait)) retval = -EPERM; else retval = -EOPNOTSUPP; diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 87338f9eb5be..ed204cbb63ea 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -156,7 +156,8 @@ static int usb_conn_probe(struct platform_device *pdev) info->vbus = devm_regulator_get(dev, "vbus"); if (IS_ERR(info->vbus)) { - dev_err(dev, "failed to get vbus\n"); + if (PTR_ERR(info->vbus) != -EPROBE_DEFER) + dev_err(dev, "failed to get vbus\n"); return PTR_ERR(info->vbus); } diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 1ac1095bfeac..c68217b7dace 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -203,10 +203,60 @@ static const unsigned short super_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_INT] = 1024, }; -static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, - int asnum, struct usb_host_interface *ifp, int num_ep, - unsigned char *buffer, int size) +static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1, + struct usb_endpoint_descriptor *e2) { + if (e1->bEndpointAddress == e2->bEndpointAddress) + return true; + + if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) { + if (usb_endpoint_num(e1) == usb_endpoint_num(e2)) + return true; + } + + return false; +} + +/* + * Check for duplicate endpoint addresses in other interfaces and in the + * altsetting currently being parsed. + */ +static bool config_endpoint_is_duplicate(struct usb_host_config *config, + int inum, int asnum, struct usb_endpoint_descriptor *d) +{ + struct usb_endpoint_descriptor *epd; + struct usb_interface_cache *intfc; + struct usb_host_interface *alt; + int i, j, k; + + for (i = 0; i < config->desc.bNumInterfaces; ++i) { + intfc = config->intf_cache[i]; + + for (j = 0; j < intfc->num_altsetting; ++j) { + alt = &intfc->altsetting[j]; + + if (alt->desc.bInterfaceNumber == inum && + alt->desc.bAlternateSetting != asnum) + continue; + + for (k = 0; k < alt->desc.bNumEndpoints; ++k) { + epd = &alt->endpoint[k].desc; + + if (endpoint_is_duplicate(epd, d)) + return true; + } + } + } + + return false; +} + +static int usb_parse_endpoint(struct device *ddev, int cfgno, + struct usb_host_config *config, int inum, int asnum, + struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) +{ + struct usb_device *udev = to_usb_device(ddev); unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; @@ -242,11 +292,18 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, goto skip_to_next_endpoint_or_interface_descriptor; /* Check for duplicate endpoint addresses */ - for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { - if (ifp->endpoint[i].desc.bEndpointAddress == - d->bEndpointAddress) { - dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", - cfgno, inum, asnum, d->bEndpointAddress); + if (config_endpoint_is_duplicate(config, inum, asnum, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; + } + + /* Ignore blacklisted endpoints */ + if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) { + if (usb_endpoint_is_blacklisted(udev, ifp, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, + d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } } @@ -346,12 +403,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } - /* Validate the wMaxPacketSize field */ + /* + * Validate the wMaxPacketSize field. + * Some devices have isochronous endpoints in altsetting 0; + * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 + * (see the end of section 5.6.3), so don't warn about them. + */ maxp = usb_endpoint_maxp(&endpoint->desc); - if (maxp == 0) { - dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n", + if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; } /* Find the highest legal maxpacket size for this endpoint */ @@ -522,8 +583,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno, if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; - retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, - num_ep, buffer, size); + retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum, + alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 3f899552f6e3..6ca40d135430 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -764,8 +764,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum) intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; - else + else { + unsigned int old_suppress; + + /* suppress uevents while claiming interface */ + old_suppress = dev_get_uevent_suppress(&intf->dev); + dev_set_uevent_suppress(&intf->dev, 1); err = usb_driver_claim_interface(&usbfs_driver, intf, ps); + dev_set_uevent_suppress(&intf->dev, old_suppress); + } if (err == 0) set_bit(ifnum, &ps->ifclaimed); return err; @@ -785,7 +792,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum) if (!intf) err = -ENOENT; else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) { + unsigned int old_suppress; + + /* suppress uevents while releasing interface */ + old_suppress = dev_get_uevent_suppress(&intf->dev); + dev_set_uevent_suppress(&intf->dev, 1); usb_driver_release_interface(&usbfs_driver, intf); + dev_set_uevent_suppress(&intf->dev, old_suppress); err = 0; } return err; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index f225eaa98ff8..d0f45600b669 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1409,7 +1409,17 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, if (usb_endpoint_xfer_control(&urb->ep->desc)) { if (hcd->self.uses_pio_for_control) return ret; - if (hcd_uses_dma(hcd)) { + if (hcd->localmem_pool) { + ret = hcd_alloc_coherent( + urb->dev->bus, mem_flags, + &urb->setup_dma, + (void **)&urb->setup_packet, + sizeof(struct usb_ctrlrequest), + DMA_TO_DEVICE); + if (ret) + return ret; + urb->transfer_flags |= URB_SETUP_MAP_LOCAL; + } else if (hcd_uses_dma(hcd)) { if (is_vmalloc_addr(urb->setup_packet)) { WARN_ONCE(1, "setup packet is not dma capable\n"); return -EAGAIN; @@ -1427,23 +1437,22 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, urb->setup_dma)) return -EAGAIN; urb->transfer_flags |= URB_SETUP_MAP_SINGLE; - } else if (hcd->localmem_pool) { - ret = hcd_alloc_coherent( - urb->dev->bus, mem_flags, - &urb->setup_dma, - (void **)&urb->setup_packet, - sizeof(struct usb_ctrlrequest), - DMA_TO_DEVICE); - if (ret) - return ret; - urb->transfer_flags |= URB_SETUP_MAP_LOCAL; } } dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; if (urb->transfer_buffer_length != 0 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { - if (hcd_uses_dma(hcd)) { + if (hcd->localmem_pool) { + ret = hcd_alloc_coherent( + urb->dev->bus, mem_flags, + &urb->transfer_dma, + &urb->transfer_buffer, + urb->transfer_buffer_length, + dir); + if (ret == 0) + urb->transfer_flags |= URB_MAP_LOCAL; + } else if (hcd_uses_dma(hcd)) { if (urb->num_sgs) { int n; @@ -1497,15 +1506,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, else urb->transfer_flags |= URB_DMA_MAP_SINGLE; } - } else if (hcd->localmem_pool) { - ret = hcd_alloc_coherent( - urb->dev->bus, mem_flags, - &urb->transfer_dma, - &urb->transfer_buffer, - urb->transfer_buffer_length, - dir); - if (ret == 0) - urb->transfer_flags |= URB_MAP_LOCAL; } if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL))) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 236313f41f4a..243577656177 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -37,7 +37,9 @@ #include "otg_whitelist.h" #define USB_VENDOR_GENESYS_LOGIC 0x05e3 +#define USB_VENDOR_SMSC 0x0424 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 +#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 #define USB_TP_TRANSMISSION_DELAY 40 /* ns */ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ @@ -985,13 +987,17 @@ int usb_remove_device(struct usb_device *udev) { struct usb_hub *hub; struct usb_interface *intf; + int ret; if (!udev->parent) /* Can't remove a root hub */ return -EINVAL; hub = usb_hub_to_struct_hub(udev->parent); intf = to_usb_interface(hub->intfdev); - usb_autopm_get_interface(intf); + ret = usb_autopm_get_interface(intf); + if (ret < 0) + return ret; + set_bit(udev->portnum, hub->removed_bits); hub_port_logical_disconnect(hub, udev->portnum); usb_autopm_put_interface(intf); @@ -1191,6 +1197,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || + (portchange & USB_PORT_STAT_C_CONNECTION) || (portstatus & USB_PORT_STAT_OVERCURRENT) || (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); @@ -1215,11 +1222,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) #ifdef CONFIG_PM udev->reset_resume = 1; #endif - /* Don't set the change_bits when the device - * was powered off. - */ - if (test_bit(port1, hub->power_bits)) - set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ @@ -1729,6 +1731,10 @@ static void hub_disconnect(struct usb_interface *intf) kfree(hub->buffer); pm_suspend_ignore_children(&intf->dev, false); + + if (hub->quirk_disable_autosuspend) + usb_autopm_put_interface(intf); + kref_put(&hub->kref, hub_release); } @@ -1861,6 +1867,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND) hub->quirk_check_port_auto_suspend = 1; + if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) { + hub->quirk_disable_autosuspend = 1; + usb_autopm_get_interface_no_resume(intf); + } + if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) return 0; @@ -2691,7 +2702,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) -#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme) +#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme)) #define HUB_ROOT_RESET_TIME 60 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 @@ -5483,6 +5494,10 @@ out_hdev_lock: } static const struct usb_device_id hub_id_table[] = { + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, + .idVendor = USB_VENDOR_SMSC, + .bInterfaceClass = USB_CLASS_HUB, + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = USB_VENDOR_GENESYS_LOGIC, @@ -5814,7 +5829,7 @@ re_enumerate_no_bos: /** * usb_reset_device - warn interface drivers and perform a USB port reset - * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) + * @udev: device to reset (not in NOTATTACHED state) * * Warns all drivers bound to registered interfaces (using their pre_reset * method), performs the port reset, and then lets the drivers know that @@ -5842,8 +5857,7 @@ int usb_reset_device(struct usb_device *udev) struct usb_host_config *config = udev->actconfig; struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); - if (udev->state == USB_STATE_NOTATTACHED || - udev->state == USB_STATE_SUSPENDED) { + if (udev->state == USB_STATE_NOTATTACHED) { dev_dbg(&udev->dev, "device reset not allowed in state %d\n", udev->state); return -EINVAL; diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index a9e24e4b8df1..a97dd1ba964e 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -61,6 +61,7 @@ struct usb_hub { unsigned quiescing:1; unsigned disconnected:1; unsigned in_reset:1; + unsigned quirk_disable_autosuspend:1; unsigned quirk_check_port_auto_suspend:1; diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index bbbb35fa639f..235a7c645503 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -213,7 +213,10 @@ static int usb_port_runtime_resume(struct device *dev) if (!port_dev->is_superspeed && peer) pm_runtime_get_sync(&peer->dev); - usb_autopm_get_interface(intf); + retval = usb_autopm_get_interface(intf); + if (retval < 0) + return retval; + retval = usb_hub_set_port_power(hdev, hub, port1, true); msleep(hub_power_on_good_delay(hub)); if (udev && !retval) { @@ -266,7 +269,10 @@ static int usb_port_runtime_suspend(struct device *dev) if (usb_port_block_power_off) return -EBUSY; - usb_autopm_get_interface(intf); + retval = usb_autopm_get_interface(intf); + if (retval < 0) + return retval; + retval = usb_hub_set_port_power(hdev, hub, port1, false); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION); if (!port_dev->is_superspeed) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6b6413073584..da30b5664ff3 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech PTZ Pro Camera */ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Screen Share */ + { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -354,6 +357,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0904, 0x6103), .driver_info = USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + /* Sound Devices USBPre2 */ + { USB_DEVICE(0x0926, 0x0202), .driver_info = + USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -371,6 +378,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0b05, 0x17e0), .driver_info = USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Realtek hub in Dell WD19 (Type-C) */ + { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, + + /* Generic RTL8153 based ethernet adapters */ + { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -445,6 +458,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, + /* novation SoundControl XL */ + { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, + { } /* terminating entry must be last */ }; @@ -472,6 +488,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { { } /* terminating entry must be last */ }; +/* + * Entries for blacklisted endpoints that should be ignored when parsing + * configuration descriptors. + * + * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST. + */ +static const struct usb_device_id usb_endpoint_blacklist[] = { + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { } +}; + +bool usb_endpoint_is_blacklisted(struct usb_device *udev, + struct usb_host_interface *intf, + struct usb_endpoint_descriptor *epd) +{ + const struct usb_device_id *id; + unsigned int address; + + for (id = usb_endpoint_blacklist; id->match_flags; ++id) { + if (!usb_match_device(udev, id)) + continue; + + if (!usb_match_one_id_intf(udev, intf, id)) + continue; + + address = id->driver_info; + if (address == epd->bEndpointAddress) + return true; + } + + return false; +} + static bool usb_match_any_interface(struct usb_device *udev, const struct usb_device_id *id) { diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 0eab79f82ce4..da923ec17612 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -45,6 +45,7 @@ void usb_init_urb(struct urb *urb) if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); + INIT_LIST_HEAD(&urb->urb_list); INIT_LIST_HEAD(&urb->anchor_list); } } diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index cf4783cf661a..3ad0ee57e859 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *); extern void usb_detect_quirks(struct usb_device *udev); extern void usb_detect_interface_quirks(struct usb_device *udev); extern void usb_release_quirk_list(void); +extern bool usb_endpoint_is_blacklisted(struct usb_device *udev, + struct usb_host_interface *intf, + struct usb_endpoint_descriptor *epd); extern int usb_remove_device(struct usb_device *udev); extern int usb_get_device_descriptor(struct usb_device *dev, diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 8e41d70fd298..78a4925aa118 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -524,7 +524,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait) greset |= GRSTCTL_CSFTRST; dwc2_writel(hsotg, greset, GRSTCTL); - if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) { + if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) { dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n", __func__); return -EBUSY; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 6be10e496e10..7fd0900a9cb0 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, else packets = 1; /* send one packet if length is zero. */ - if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { - dev_err(hsotg->dev, "req length > maxpacket*mc\n"); - return; - } - if (dir_in && index != 0) if (hs_ep->isochronous) epsize = DXEPTSIZ_MC(packets); @@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, req->actual = 0; req->status = -EINPROGRESS; + /* Don't queue ISOC request if length greater than mps*mc */ + if (hs_ep->isochronous && + req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) { + dev_err(hs->dev, "req length > maxpacket*mc\n"); + return -EINVAL; + } + /* In DDMA mode for ISOC's don't queue request if length greater * than descriptor limits. */ @@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; struct dwc2_hsotg_ep *ep; __le16 reply; + u16 status; int ret; dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); @@ -1643,11 +1646,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: - /* - * bit 0 => self powered - * bit 1 => remote wakeup - */ - reply = cpu_to_le16(0); + status = 1 << USB_DEVICE_SELF_POWERED; + status |= hsotg->remote_wakeup_allowed << + USB_DEVICE_REMOTE_WAKEUP; + reply = cpu_to_le16(status); break; case USB_RECIP_INTERFACE: @@ -1758,7 +1760,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, case USB_RECIP_DEVICE: switch (wValue) { case USB_DEVICE_REMOTE_WAKEUP: - hsotg->remote_wakeup_allowed = 1; + if (set) + hsotg->remote_wakeup_allowed = 1; + else + hsotg->remote_wakeup_allowed = 0; break; case USB_DEVICE_TEST_MODE: @@ -1768,16 +1773,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, return -EINVAL; hsotg->test_mode = wIndex >> 8; - ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); - if (ret) { - dev_err(hsotg->dev, - "%s: failed to send reply\n", __func__); - return ret; - } break; default: return -ENOENT; } + + ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); + if (ret) { + dev_err(hsotg->dev, + "%s: failed to send reply\n", __func__); + return ret; + } break; case USB_RECIP_ENDPOINT: @@ -4056,11 +4062,12 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, * a unique tx-fifo even if it is non-periodic. */ if (dir_in && hsotg->dedicated_fifos) { + unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); u32 fifo_index = 0; u32 fifo_size = UINT_MAX; size = hs_ep->ep.maxpacket * hs_ep->mc; - for (i = 1; i < hsotg->num_of_eps; ++i) { + for (i = 1; i <= fifo_count; ++i) { if (hsotg->fifo_map & (1 << i)) continue; val = dwc2_readl(hsotg, DPTXFSIZN(i)); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 97d6ae3c4df2..cede7a8e3605 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1209,6 +1209,9 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc) /* do nothing */ break; } + + /* de-assert DRVVBUS for HOST and OTG mode */ + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); } static void dwc3_get_properties(struct dwc3 *dwc) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 1c8b349379af..77c4a9abe365 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -688,7 +688,9 @@ struct dwc3_ep { #define DWC3_EP_STALL BIT(1) #define DWC3_EP_WEDGE BIT(2) #define DWC3_EP_TRANSFER_STARTED BIT(3) +#define DWC3_EP_END_TRANSFER_PENDING BIT(4) #define DWC3_EP_PENDING_REQUEST BIT(5) +#define DWC3_EP_DELAY_START BIT(6) /* This last one is specific to EP0 */ #define DWC3_EP0_DIR_IN BIT(31) diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index 9baabed87d61..f2c97058a00b 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size, u8 epnum = event->endpoint_number; size_t len; int status; - int ret; - ret = snprintf(str, size, "ep%d%s: ", epnum >> 1, + len = scnprintf(str, size, "ep%d%s: ", epnum >> 1, (epnum & 1) ? "in" : "out"); - if (ret < 0) - return "UNKNOWN"; status = event->status; switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: - len = strlen(str); - snprintf(str + len, size - len, "Transfer Complete (%c%c%c)", + len += scnprintf(str + len, size - len, + "Transfer Complete (%c%c%c)", status & DEPEVT_STATUS_SHORT ? 'S' : 's', status & DEPEVT_STATUS_IOC ? 'I' : 'i', status & DEPEVT_STATUS_LST ? 'L' : 'l'); - len = strlen(str); - if (epnum <= 1) - snprintf(str + len, size - len, " [%s]", + scnprintf(str + len, size - len, " [%s]", dwc3_ep0_state_string(ep0state)); break; case DWC3_DEPEVT_XFERINPROGRESS: - len = strlen(str); - - snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)", + scnprintf(str + len, size - len, + "Transfer In Progress [%d] (%c%c%c)", event->parameters, status & DEPEVT_STATUS_SHORT ? 'S' : 's', status & DEPEVT_STATUS_IOC ? 'I' : 'i', status & DEPEVT_STATUS_LST ? 'M' : 'm'); break; case DWC3_DEPEVT_XFERNOTREADY: - len = strlen(str); - - snprintf(str + len, size - len, "Transfer Not Ready [%d]%s", + len += scnprintf(str + len, size - len, + "Transfer Not Ready [%d]%s", event->parameters, status & DEPEVT_STATUS_TRANSFER_ACTIVE ? " (Active)" : " (Not Active)"); - len = strlen(str); - /* Control Endpoints */ if (epnum <= 1) { int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status); switch (phase) { case DEPEVT_STATUS_CONTROL_DATA: - snprintf(str + ret, size - ret, + scnprintf(str + len, size - len, " [Data Phase]"); break; case DEPEVT_STATUS_CONTROL_STATUS: - snprintf(str + ret, size - ret, + scnprintf(str + len, size - len, " [Status Phase]"); } } break; case DWC3_DEPEVT_RXTXFIFOEVT: - snprintf(str + ret, size - ret, "FIFO"); + scnprintf(str + len, size - len, "FIFO"); break; case DWC3_DEPEVT_STREAMEVT: status = event->status; switch (status) { case DEPEVT_STREAMEVT_FOUND: - snprintf(str + ret, size - ret, " Stream %d Found", + scnprintf(str + len, size - len, " Stream %d Found", event->parameters); break; case DEPEVT_STREAMEVT_NOTFOUND: default: - snprintf(str + ret, size - ret, " Stream Not Found"); + scnprintf(str + len, size - len, " Stream Not Found"); break; } break; case DWC3_DEPEVT_EPCMDCMPLT: - snprintf(str + ret, size - ret, "Endpoint Command Complete"); + scnprintf(str + len, size - len, "Endpoint Command Complete"); break; default: - snprintf(str, size, "UNKNOWN"); + scnprintf(str + len, size - len, "UNKNOWN"); } return str; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 023f0357efd7..7051611229c9 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -29,10 +29,12 @@ #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa #define PCI_DEVICE_ID_INTEL_APL 0x5aaa #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 -#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee +#define PCI_DEVICE_ID_INTEL_CMLLP 0x02ee +#define PCI_DEVICE_ID_INTEL_CMLH 0x06ee #define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e +#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0 #define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee @@ -308,6 +310,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH), (kernel_ulong_t) &dwc3_pci_intel_properties, }, @@ -338,6 +343,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH), (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 3996b9c4ff8d..6dee4dabc0a4 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -1117,6 +1117,9 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { + struct dwc3_ep *dep = dwc->eps[event->endpoint_number]; + u8 cmd; + switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: dwc3_ep0_xfer_complete(dwc, event); @@ -1129,7 +1132,14 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc, case DWC3_DEPEVT_XFERINPROGRESS: case DWC3_DEPEVT_RXTXFIFOEVT: case DWC3_DEPEVT_STREAMEVT: + break; case DWC3_DEPEVT_EPCMDCMPLT: + cmd = DEPEVT_PARAMETER_CMD(event->parameters); + + if (cmd == DWC3_DEPCMD_ENDTRANSFER) { + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; + } break; } } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a9aba716bf80..09d6b11246c9 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1068,7 +1068,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, unsigned int rem = length % maxp; unsigned chain = true; - if (sg_is_last(s)) + /* + * IOMMU driver is coalescing the list of sgs which shares a + * page boundary into one and giving it to USB driver. With + * this the number of sgs mapped is not equal to the number of + * sgs passed. So mark the chain bit to false if it isthe last + * mapped sg. + */ + if (i == remaining - 1) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { @@ -1447,6 +1454,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) list_add_tail(&req->list, &dep->pending_list); req->status = DWC3_REQUEST_STATUS_QUEUED; + /* Start the transfer only after the END_TRANSFER is completed */ + if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { + dep->flags |= DWC3_EP_DELAY_START; + return 0; + } + /* * NOTICE: Isochronous endpoints should NEVER be prestarted. We must * wait for a XferNotReady event so we will know what's the current @@ -1505,7 +1518,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r for (i = 0; i < req->num_trbs; i++) { struct dwc3_trb *trb; - trb = req->trb + i; + trb = &dep->trb_pool[dep->trb_dequeue]; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; dwc3_ep_inc_deq(dep); } @@ -2420,7 +2433,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, if (event->status & DEPEVT_STATUS_SHORT && !chain) return 1; - if (event->status & DEPEVT_STATUS_IOC) + if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || + (trb->ctrl & DWC3_TRB_CTRL_LST)) return 1; return 0; @@ -2467,6 +2481,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { + /* + * For OUT direction, host may send less than the setup + * length. Return true for all OUT requests. + */ + if (!req->direction) + return true; + return req->request.actual == req->request.length; } @@ -2491,7 +2512,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, req->request.actual = req->request.length - req->remaining; - if (!dwc3_gadget_ep_request_completed(req) && + if (!dwc3_gadget_ep_request_completed(req) || req->num_pending_sgs) { __dwc3_gadget_kick_transfer(dep); goto out; @@ -2618,8 +2639,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, cmd = DEPEVT_PARAMETER_CMD(event->parameters); if (cmd == DWC3_DEPCMD_ENDTRANSFER) { + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; dep->flags &= ~DWC3_EP_TRANSFER_STARTED; dwc3_gadget_ep_cleanup_cancelled_requests(dep); + if ((dep->flags & DWC3_EP_DELAY_START) && + !usb_endpoint_xfer_isoc(dep->endpoint.desc)) + __dwc3_gadget_kick_transfer(dep); + + dep->flags &= ~DWC3_EP_DELAY_START; } break; case DWC3_DEPEVT_STREAMEVT: @@ -2676,7 +2703,8 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, u32 cmd; int ret; - if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) + if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) || + (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) return; /* @@ -2719,6 +2747,11 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, WARN_ON_ONCE(ret); dep->resource_index = 0; + if (!interrupt) + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; + else + dep->flags |= DWC3_EP_END_TRANSFER_PENDING; + if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) udelay(100); } diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 5567ed2cddbe..fa252870c926 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc) memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); if (dwc->usb3_lpm_capable) - props[prop_idx++].name = "usb3-lpm-capable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable"); if (dwc->usb2_lpm_disable) - props[prop_idx++].name = "usb2-lpm-disable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable"); /** * WORKAROUND: dwc3 revisions <=3.00a have a limitation @@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc) * This following flag tells XHCI to do just that. */ if (dwc->revision <= DWC3_REVISION_300A) - props[prop_idx++].name = "quirk-broken-port-ped"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped"); if (prop_idx) { ret = platform_device_add_properties(xhci, props); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5ec54b69c29c..d7871636fced 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -437,12 +437,14 @@ static u8 encode_bMaxPower(enum usb_device_speed speed, val = CONFIG_USB_GADGET_VBUS_DRAW; if (!val) return 0; - switch (speed) { - case USB_SPEED_SUPER: - return DIV_ROUND_UP(val, 8); - default: - return DIV_ROUND_UP(val, 2); - } + if (speed < USB_SPEED_SUPER) + return min(val, 500U) / 2; + else + /* + * USB 3.x supports up to 900mA, but since 900 isn't divisible + * by 8 the integral division will effectively cap to 896mA. + */ + return min(val, 900U) / 8; } static int config_buf(struct usb_configuration *config, @@ -854,6 +856,10 @@ static int set_config(struct usb_composite_dev *cdev, /* when we return, be sure our power usage is valid */ power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; + if (gadget->speed < USB_SPEED_SUPER) + power = min(power, 500U); + else + power = min(power, 900U); done: usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) @@ -2280,7 +2286,7 @@ void composite_resume(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; - u16 maxpower; + unsigned maxpower; /* REVISIT: should we have config level * suspend/resume callbacks? @@ -2294,10 +2300,14 @@ void composite_resume(struct usb_gadget *gadget) f->resume(f); } - maxpower = cdev->config->MaxPower; + maxpower = cdev->config->MaxPower ? + cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; + if (gadget->speed < USB_SPEED_SUPER) + maxpower = min(maxpower, 500U); + else + maxpower = min(maxpower, 900U); - usb_gadget_vbus_draw(gadget, maxpower ? - maxpower : CONFIG_USB_GADGET_VBUS_DRAW); + usb_gadget_vbus_draw(gadget, maxpower); } cdev->suspended = 0; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 33852c2b29d1..ab9ac48a751a 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1544,6 +1544,7 @@ static struct config_group *gadgets_make( gi->composite.resume = NULL; gi->composite.max_speed = USB_SPEED_SUPER; + spin_lock_init(&gi->spinlock); mutex_init(&gi->lock); INIT_LIST_HEAD(&gi->string_list); INIT_LIST_HEAD(&gi->available_func); diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index 6ce044008cf6..7f5cf488b2b1 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -52,6 +52,7 @@ struct f_ecm { struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; + atomic_t notify_count; bool is_open; /* FIXME is_open needs some irq-ish locking @@ -380,7 +381,7 @@ static void ecm_do_notify(struct f_ecm *ecm) int status; /* notification already in flight? */ - if (!req) + if (atomic_read(&ecm->notify_count)) return; event = req->buf; @@ -420,10 +421,10 @@ static void ecm_do_notify(struct f_ecm *ecm) event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(ecm->ctrl_id); - ecm->notify_req = NULL; + atomic_inc(&ecm->notify_count); status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC); if (status < 0) { - ecm->notify_req = req; + atomic_dec(&ecm->notify_count); DBG(cdev, "notify --> %d\n", status); } } @@ -448,17 +449,19 @@ static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req) switch (req->status) { case 0: /* no fault */ + atomic_dec(&ecm->notify_count); break; case -ECONNRESET: case -ESHUTDOWN: + atomic_set(&ecm->notify_count, 0); ecm->notify_state = ECM_NOTIFY_NONE; break; default: DBG(cdev, "event %02x --> %d\n", event->bNotificationType, req->status); + atomic_dec(&ecm->notify_count); break; } - ecm->notify_req = req; ecm_do_notify(ecm); } @@ -621,8 +624,12 @@ static void ecm_disable(struct usb_function *f) DBG(cdev, "ecm deactivated\n"); - if (ecm->port.in_ep->enabled) + if (ecm->port.in_ep->enabled) { gether_disconnect(&ecm->port); + } else { + ecm->port.in_ep->desc = NULL; + ecm->port.out_ep->desc = NULL; + } usb_ep_disable(ecm->notify); ecm->notify->desc = NULL; @@ -903,6 +910,11 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f) usb_free_all_descriptors(f); + if (atomic_read(&ecm->notify_count)) { + usb_ep_dequeue(ecm->notify, ecm->notify_req); + atomic_set(&ecm->notify_count, 0); + } + kfree(ecm->notify_req->buf); usb_ep_free_request(ecm->notify, ecm->notify_req); } diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 59d9d512dcda..a9a711e04614 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1062,6 +1062,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) req->num_sgs = io_data->sgt.nents; } else { req->buf = data; + req->num_sgs = 0; } req->length = data_len; @@ -1105,6 +1106,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) req->num_sgs = io_data->sgt.nents; } else { req->buf = data; + req->num_sgs = 0; } req->length = data_len; @@ -1160,18 +1162,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; struct ffs_epfile *epfile = kiocb->ki_filp->private_data; + unsigned long flags; int value; ENTER(); - spin_lock_irq(&epfile->ffs->eps_lock); + spin_lock_irqsave(&epfile->ffs->eps_lock, flags); if (likely(io_data && io_data->ep && io_data->req)) value = usb_ep_dequeue(io_data->ep, io_data->req); else value = -EINVAL; - spin_unlock_irq(&epfile->ffs->eps_lock); + spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags); return value; } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 2d6e76e4cffa..1d900081b1f0 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -53,6 +53,7 @@ struct f_ncm { struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; + atomic_t notify_count; bool is_open; const struct ndp_parser_opts *parser_opts; @@ -547,7 +548,7 @@ static void ncm_do_notify(struct f_ncm *ncm) int status; /* notification already in flight? */ - if (!req) + if (atomic_read(&ncm->notify_count)) return; event = req->buf; @@ -587,7 +588,8 @@ static void ncm_do_notify(struct f_ncm *ncm) event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(ncm->ctrl_id); - ncm->notify_req = NULL; + atomic_inc(&ncm->notify_count); + /* * In double buffering if there is a space in FIFO, * completion callback can be called right after the call, @@ -597,7 +599,7 @@ static void ncm_do_notify(struct f_ncm *ncm) status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC); spin_lock(&ncm->lock); if (status < 0) { - ncm->notify_req = req; + atomic_dec(&ncm->notify_count); DBG(cdev, "notify --> %d\n", status); } } @@ -632,17 +634,19 @@ static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req) case 0: VDBG(cdev, "Notification %02x sent\n", event->bNotificationType); + atomic_dec(&ncm->notify_count); break; case -ECONNRESET: case -ESHUTDOWN: + atomic_set(&ncm->notify_count, 0); ncm->notify_state = NCM_NOTIFY_NONE; break; default: DBG(cdev, "event %02x --> %d\n", event->bNotificationType, req->status); + atomic_dec(&ncm->notify_count); break; } - ncm->notify_req = req; ncm_do_notify(ncm); spin_unlock(&ncm->lock); } @@ -1649,6 +1653,11 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) ncm_string_defs[0].id = 0; usb_free_all_descriptors(f); + if (atomic_read(&ncm->notify_count)) { + usb_ep_dequeue(ncm->notify, ncm->notify_req); + atomic_set(&ncm->notify_count, 0); + } + kfree(ncm->notify_req->buf); usb_ep_free_request(ncm->notify, ncm->notify_req); } diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index d48df36622b7..0d8e4a364ca6 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f) gether_disconnect(&rndis->port); usb_ep_disable(rndis->notify); + rndis->notify->desc = NULL; } /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 65f634ec7fc2..038c445a4e9b 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -560,8 +560,10 @@ static int gs_start_io(struct gs_port *port) port->n_read = 0; started = gs_start_rx(port); - /* unblock any pending writes into our circular buffer */ if (started) { + gs_start_tx(port); + /* Unblock any pending writes into our circular buffer, in case + * we didn't in gs_start_tx() */ tty_wakeup(port->port.tty); } else { gs_free_requests(ep, head, &port->read_allocated); @@ -1239,8 +1241,10 @@ int gserial_alloc_line(unsigned char *line_num) __func__, port_num, PTR_ERR(tty_dev)); ret = PTR_ERR(tty_dev); + mutex_lock(&ports[port_num].lock); port = ports[port_num].port; ports[port_num].port = NULL; + mutex_unlock(&ports[port_num].lock); gserial_free_port(port); goto err; } diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c index da1c37933ca1..8d7a556ece30 100644 --- a/drivers/usb/gadget/legacy/cdc2.c +++ b/drivers/usb/gadget/legacy/cdc2.c @@ -225,7 +225,7 @@ static struct usb_composite_driver cdc_driver = { .name = "g_cdc", .dev = &device_desc, .strings = dev_strings, - .max_speed = USB_SPEED_HIGH, + .max_speed = USB_SPEED_SUPER, .bind = cdc_bind, .unbind = cdc_unbind, }; diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index b640ed3fcf70..ae6d8f7092b8 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c @@ -149,7 +149,7 @@ static struct usb_composite_driver gfs_driver = { .name = DRIVER_NAME, .dev = &gfs_dev_desc, .strings = gfs_dev_strings, - .max_speed = USB_SPEED_HIGH, + .max_speed = USB_SPEED_SUPER, .bind = gfs_bind, .unbind = gfs_unbind, }; diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c index 50515f9e1022..ec9749845660 100644 --- a/drivers/usb/gadget/legacy/multi.c +++ b/drivers/usb/gadget/legacy/multi.c @@ -482,7 +482,7 @@ static struct usb_composite_driver multi_driver = { .name = "g_multi", .dev = &device_desc, .strings = dev_strings, - .max_speed = USB_SPEED_HIGH, + .max_speed = USB_SPEED_SUPER, .bind = multi_bind, .unbind = multi_unbind, .needs_serial = 1, diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c index 8465f081e921..c61e71ba7045 100644 --- a/drivers/usb/gadget/legacy/ncm.c +++ b/drivers/usb/gadget/legacy/ncm.c @@ -197,7 +197,7 @@ static struct usb_composite_driver ncm_driver = { .name = "g_ncm", .dev = &device_desc, .strings = dev_strings, - .max_speed = USB_SPEED_HIGH, + .max_speed = USB_SPEED_SUPER, .bind = gncm_bind, .unbind = gncm_unbind, }; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 3d499d93c083..4c9d1e49d5ed 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -1321,7 +1321,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req, u32 this_sg; bool next_sg; - to_host = usb_pipein(urb->pipe); + to_host = usb_urb_dir_in(urb); rbuf = req->req.buf + req->req.actual; if (!urb->num_sgs) { @@ -1409,7 +1409,7 @@ top: /* FIXME update emulated data toggle too */ - to_host = usb_pipein(urb->pipe); + to_host = usb_urb_dir_in(urb); if (unlikely(len == 0)) is_short = 1; else { @@ -1830,7 +1830,7 @@ restart: /* find the gadget's ep for this request (if configured) */ address = usb_pipeendpoint (urb->pipe); - if (usb_pipein(urb->pipe)) + if (usb_urb_dir_in(urb)) address |= USB_DIR_IN; ep = find_endpoint(dum, address); if (!ep) { @@ -2385,7 +2385,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb) s = "?"; break; } s; }), - ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "", + ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "", ({ char *s; \ switch (usb_pipetype(urb->pipe)) { \ case PIPE_CONTROL: \ @@ -2725,7 +2725,7 @@ static struct platform_driver dummy_hcd_driver = { }; /*-------------------------------------------------------------------------*/ -#define MAX_NUM_UDC 2 +#define MAX_NUM_UDC 32 static struct platform_device *the_udc_pdev[MAX_NUM_UDC]; static struct platform_device *the_hcd_pdev[MAX_NUM_UDC]; diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 7a0e9a58c2d8..116d386472ef 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -2176,8 +2176,6 @@ static int gr_probe(struct platform_device *pdev) return -ENOMEM; } - spin_lock(&dev->lock); - /* Inside lock so that no gadget can use this udc until probe is done */ retval = usb_add_gadget_udc(dev->dev, &dev->gadget); if (retval) { @@ -2186,15 +2184,21 @@ static int gr_probe(struct platform_device *pdev) } dev->added = 1; - retval = gr_udc_init(dev); - if (retval) - goto out; + spin_lock(&dev->lock); - gr_dfs_create(dev); + retval = gr_udc_init(dev); + if (retval) { + spin_unlock(&dev->lock); + goto out; + } /* Clear all interrupt enables that might be left on since last boot */ gr_disable_interrupts_and_pullup(dev); + spin_unlock(&dev->lock); + + gr_dfs_create(dev); + retval = gr_request_irq(dev, dev->irq); if (retval) { dev_err(dev->dev, "Failed to request irq %d\n", dev->irq); @@ -2223,8 +2227,6 @@ static int gr_probe(struct platform_device *pdev) dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq); out: - spin_unlock(&dev->lock); - if (retval) gr_remove(pdev); diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index 265dab2bbfac..3344fb8c4181 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -1519,7 +1519,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev, td = phys_to_virt(addr); addr2 = (dma_addr_t)td->next; dma_pool_free(dev->data_requests, td, addr); - td->next = 0x00; addr = addr2; } req->chain_len = 1; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 79b2e79dddd0..b1834c22b7db 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -4,6 +4,12 @@ # comment "USB Host Controller Drivers" +# tuzhi added begin +config ARCH_PHYTIUM_1500A_USB_RESET_PROBE_REMOVE + bool "ft1500a remove the usbcard reset in probe" +# tuzhi added end + + config USB_C67X00_HCD tristate "Cypress C67x00 HCD support" depends on HAS_IOMEM diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index aa2f77f1506d..8a5c9b3ebe1e 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -27,6 +27,10 @@ /*-------------------------------------------------------------------------*/ +/* PID Codes that are used here, from EHCI specification, Table 3-16. */ +#define PID_CODE_IN 1 +#define PID_CODE_SETUP 2 + /* fill a qtd, returning how much of the buffer we were able to queue up */ static int @@ -190,7 +194,7 @@ static int qtd_copy_status ( int status = -EINPROGRESS; /* count IN/OUT bytes, not SETUP (even short packets) */ - if (likely (QTD_PID (token) != 2)) + if (likely(QTD_PID(token) != PID_CODE_SETUP)) urb->actual_length += length - QTD_LENGTH (token); /* don't modify error codes */ @@ -206,6 +210,13 @@ static int qtd_copy_status ( if (token & QTD_STS_BABBLE) { /* FIXME "must" disable babbling device's port too */ status = -EOVERFLOW; + /* + * When MMF is active and PID Code is IN, queue is halted. + * EHCI Specification, Table 4-13. + */ + } else if ((token & QTD_STS_MMF) && + (QTD_PID(token) == PID_CODE_IN)) { + status = -EPROTO; /* CERR nonzero + halt --> stall */ } else if (QTD_CERR(token)) { status = -EPIPE; diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 38183ac438c6..1371b0c249ec 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev) } da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN); - if (IS_ERR(da8xx_ohci->oc_gpio)) + if (IS_ERR(da8xx_ohci->oc_gpio)) { + error = PTR_ERR(da8xx_ohci->oc_gpio); goto err; + } if (da8xx_ohci->oc_gpio) { oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio); - if (oc_irq < 0) + if (oc_irq < 0) { + error = oc_irq; goto err; + } error = devm_request_threaded_irq(dev, oc_irq, NULL, ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING | diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index b7d23c438756..af92b2576fe9 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = { static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, u16 wLength) { + struct xhci_port_cap *port_cap = NULL; int i, ssa_count; u32 temp; u16 desc_size, ssp_cap_size, ssa_size = 0; @@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size; /* does xhci support USB 3.1 Enhanced SuperSpeed */ - if (xhci->usb3_rhub.min_rev >= 0x01) { + for (i = 0; i < xhci->num_port_caps; i++) { + if (xhci->port_caps[i].maj_rev == 0x03 && + xhci->port_caps[i].min_rev >= 0x01) { + usb3_1 = true; + port_cap = &xhci->port_caps[i]; + break; + } + } + + if (usb3_1) { /* does xhci provide a PSI table for SSA speed attributes? */ - if (xhci->usb3_rhub.psi_count) { + if (port_cap->psi_count) { /* two SSA entries for each unique PSI ID, RX and TX */ - ssa_count = xhci->usb3_rhub.psi_uid_count * 2; + ssa_count = port_cap->psi_uid_count * 2; ssa_size = ssa_count * sizeof(u32); ssp_cap_size -= 16; /* skip copying the default SSA */ } desc_size += ssp_cap_size; - usb3_1 = true; } memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength)); @@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, } /* If PSI table exists, add the custom speed attributes from it */ - if (usb3_1 && xhci->usb3_rhub.psi_count) { + if (usb3_1 && port_cap->psi_count) { u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp; int offset; @@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */ bm_attrib = (ssa_count - 1) & 0x1f; - bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5; + bm_attrib |= (port_cap->psi_uid_count - 1) << 5; put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]); if (wLength < desc_size + ssa_size) @@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, * USB 3.1 requires two SSA entries (RX and TX) for every link */ offset = desc_size; - for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { - psi = xhci->usb3_rhub.psi[i]; + for (i = 0; i < port_cap->psi_count; i++) { + psi = port_cap->psi[i]; psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; psi_exp = XHCI_EXT_PORT_PSIE(psi); psi_mant = XHCI_EXT_PORT_PSIM(psi); @@ -806,7 +815,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, static int xhci_handle_usb2_port_link_resume(struct xhci_port *port, u32 *status, u32 portsc, - unsigned long flags) + unsigned long *flags) { struct xhci_bus_state *bus_state; struct xhci_hcd *xhci; @@ -860,11 +869,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port, xhci_test_and_clear_bit(xhci, port, PORT_PLC); xhci_set_link_state(xhci, port, XDEV_U0); - spin_unlock_irqrestore(&xhci->lock, flags); + spin_unlock_irqrestore(&xhci->lock, *flags); time_left = wait_for_completion_timeout( &bus_state->rexit_done[wIndex], msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS)); - spin_lock_irqsave(&xhci->lock, flags); + spin_lock_irqsave(&xhci->lock, *flags); if (time_left) { slot_id = xhci_find_slot_id_by_port(hcd, xhci, @@ -920,11 +929,13 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status, { struct xhci_bus_state *bus_state; struct xhci_hcd *xhci; + struct usb_hcd *hcd; u32 link_state; u32 portnum; bus_state = &port->rhub->bus_state; xhci = hcd_to_xhci(port->rhub->hcd); + hcd = port->rhub->hcd; link_state = portsc & PORT_PLS_MASK; portnum = port->hcd_portnum; @@ -952,12 +963,20 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status, bus_state->suspended_ports &= ~(1 << portnum); } + /* remote wake resume signaling complete */ + if (bus_state->port_remote_wakeup & (1 << portnum) && + link_state != XDEV_RESUME && + link_state != XDEV_RECOVERY) { + bus_state->port_remote_wakeup &= ~(1 << portnum); + usb_hcd_end_port_resume(&hcd->self, portnum); + } + xhci_hub_report_usb3_link_state(xhci, status, portsc); xhci_del_comp_mod_timer(xhci, portsc, portnum); } static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, - u32 portsc, unsigned long flags) + u32 portsc, unsigned long *flags) { struct xhci_bus_state *bus_state; u32 link_state; @@ -1007,7 +1026,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, static u32 xhci_get_port_status(struct usb_hcd *hcd, struct xhci_bus_state *bus_state, u16 wIndex, u32 raw_port_status, - unsigned long flags) + unsigned long *flags) __releases(&xhci->lock) __acquires(&xhci->lock) { @@ -1130,7 +1149,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } trace_xhci_get_port_status(wIndex, temp); status = xhci_get_port_status(hcd, bus_state, wIndex, temp, - flags); + &flags); if (status == 0xffffffff) goto error; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index e16eda6e2b8b..884c601bfa15 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Allow 3 retries for everything but isoc, set CErr = 3 */ if (!usb_endpoint_xfer_isoc(&ep->desc)) err_count = 3; - /* Some devices get this wrong */ - if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH) - max_packet = 512; + /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */ + if (usb_endpoint_xfer_bulk(&ep->desc)) { + if (udev->speed == USB_SPEED_HIGH) + max_packet = 512; + if (udev->speed == USB_SPEED_FULL) { + max_packet = rounddown_pow_of_two(max_packet); + max_packet = clamp_val(max_packet, 8, 64); + } + } /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) avg_trb_len = 8; @@ -1913,6 +1919,10 @@ no_bw: kfree(xhci->hw_ports); kfree(xhci->rh_bw); kfree(xhci->ext_caps); + for (i = 0; i < xhci->num_port_caps; i++) + kfree(xhci->port_caps[i].psi); + kfree(xhci->port_caps); + xhci->num_port_caps = 0; xhci->usb2_rhub.ports = NULL; xhci->usb3_rhub.ports = NULL; @@ -2116,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, u8 major_revision, minor_revision; struct xhci_hub *rhub; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_port_cap *port_cap; temp = readl(addr); major_revision = XHCI_EXT_PORT_MAJOR(temp); @@ -2150,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, /* WTF? "Valid values are ‘1’ to MaxPorts" */ return; - rhub->psi_count = XHCI_EXT_PORT_PSIC(temp); - if (rhub->psi_count) { - rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi), - GFP_KERNEL, dev_to_node(dev)); - if (!rhub->psi) - rhub->psi_count = 0; + port_cap = &xhci->port_caps[xhci->num_port_caps++]; + if (xhci->num_port_caps > max_caps) + return; - rhub->psi_uid_count++; - for (i = 0; i < rhub->psi_count; i++) { - rhub->psi[i] = readl(addr + 4 + i); + port_cap->maj_rev = major_revision; + port_cap->min_rev = minor_revision; + port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp); + + if (port_cap->psi_count) { + port_cap->psi = kcalloc_node(port_cap->psi_count, + sizeof(*port_cap->psi), + GFP_KERNEL, dev_to_node(dev)); + if (!port_cap->psi) + port_cap->psi_count = 0; + + port_cap->psi_uid_count++; + for (i = 0; i < port_cap->psi_count; i++) { + port_cap->psi[i] = readl(addr + 4 + i); /* count unique ID values, two consecutive entries can * have the same ID if link is assymetric */ - if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) != - XHCI_EXT_PORT_PSIV(rhub->psi[i - 1]))) - rhub->psi_uid_count++; + if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) != + XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1]))) + port_cap->psi_uid_count++; xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", - XHCI_EXT_PORT_PSIV(rhub->psi[i]), - XHCI_EXT_PORT_PSIE(rhub->psi[i]), - XHCI_EXT_PORT_PLT(rhub->psi[i]), - XHCI_EXT_PORT_PFD(rhub->psi[i]), - XHCI_EXT_PORT_LP(rhub->psi[i]), - XHCI_EXT_PORT_PSIM(rhub->psi[i])); + XHCI_EXT_PORT_PSIV(port_cap->psi[i]), + XHCI_EXT_PORT_PSIE(port_cap->psi[i]), + XHCI_EXT_PORT_PLT(port_cap->psi[i]), + XHCI_EXT_PORT_PFD(port_cap->psi[i]), + XHCI_EXT_PORT_LP(port_cap->psi[i]), + XHCI_EXT_PORT_PSIM(port_cap->psi[i])); } } /* cache usb2 port capabilities */ @@ -2209,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, continue; } hw_port->rhub = rhub; + hw_port->port_cap = port_cap; rhub->num_ports++; } /* FIXME: Should we disable ports not in the Extended Capabilities? */ @@ -2299,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->ext_caps) return -ENOMEM; + xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps), + flags, dev_to_node(dev)); + if (!xhci->port_caps) + return -ENOMEM; + offset = cap_start; while (offset) { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1e0236e90687..1fddc41fa1f3 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -48,6 +48,8 @@ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0 +#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13 +#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba @@ -134,7 +136,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_AMD && - (pdev->device == 0x15e0 || + (pdev->device == 0x145c || + pdev->device == 0x15e0 || pdev->device == 0x15e1 || pdev->device == 0x43bb)) xhci->quirks |= XHCI_SUSPEND_DELAY; @@ -186,7 +189,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) { + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) { xhci->quirks |= XHCI_PME_STUCK_QUIRK; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && @@ -212,7 +216,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && @@ -300,6 +305,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (!usb_hcd_is_primary_hcd(hcd)) return 0; + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_acpi_rtd3_enable(pdev); + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); /* Find any debug ports */ @@ -357,9 +365,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; - if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_acpi_rtd3_enable(dev); - /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ pm_runtime_put_noidle(&dev->dev); @@ -517,6 +522,18 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) retval = xhci_resume(xhci, hibernated); return retval; } + +static void xhci_pci_shutdown(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + xhci_shutdown(hcd); + + /* Yet another workaround for spurious wakeups at shutdown with HSW */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + pci_set_power_state(pdev, PCI_D3hot); +} #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ @@ -554,6 +571,7 @@ static int __init xhci_pci_init(void) #ifdef CONFIG_PM xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend; xhci_pci_hc_driver.pci_resume = xhci_pci_resume; + xhci_pci_hc_driver.shutdown = xhci_pci_shutdown; #endif return pci_register_driver(&xhci_pci_driver); } diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index d90cd5ec09cf..315b4552693c 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -445,6 +445,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = &xhci_plat_pm_ops, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index e7aab31fd9a5..f7a190fb2353 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1624,7 +1624,6 @@ static void handle_port_status(struct xhci_hcd *xhci, slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); if (slot_id && xhci->devs[slot_id]) xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; - bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); } if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { @@ -1644,6 +1643,7 @@ static void handle_port_status(struct xhci_hcd *xhci, */ bus_state->port_remote_wakeup |= 1 << hcd_portnum; xhci_test_and_clear_bit(xhci, port, PORT_PLC); + usb_hcd_start_port_resume(&hcd->self, hcd_portnum); xhci_set_link_state(xhci, port, XDEV_U0); /* Need to wait until the next link state change * indicates the device is actually in U0. @@ -1684,7 +1684,6 @@ static void handle_port_status(struct xhci_hcd *xhci, if (slot_id && xhci->devs[slot_id]) xhci_ring_device(xhci, slot_id); if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { - bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); xhci_test_and_clear_bit(xhci, port, PORT_PLC); usb_wakeup_notification(hcd->self.root_hub, hcd_portnum + 1); @@ -2378,7 +2377,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_SUCCESS: if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) break; - if (xhci->quirks & XHCI_TRUST_TX_LENGTH) + if (xhci->quirks & XHCI_TRUST_TX_LENGTH || + ep_ring->last_td_was_short) trb_comp_code = COMP_SHORT_PACKET; else xhci_warn_ratelimited(xhci, @@ -2740,6 +2740,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci) return 1; } +/* + * Update Event Ring Dequeue Pointer: + * - When all events have finished + * - To avoid "Event Ring Full Error" condition + */ +static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + union xhci_trb *event_ring_deq) +{ + u64 temp_64; + dma_addr_t deq; + + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + /* If necessary, update the HW's version of the event ring deq ptr. */ + if (event_ring_deq != xhci->event_ring->dequeue) { + deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, + xhci->event_ring->dequeue); + if (deq == 0) + xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); + /* + * Per 4.9.4, Software writes to the ERDP register shall + * always advance the Event Ring Dequeue Pointer value. + */ + if ((temp_64 & (u64) ~ERST_PTR_MASK) == + ((u64) deq & (u64) ~ERST_PTR_MASK)) + return; + + /* Update HC event ring dequeue pointer */ + temp_64 &= ERST_PTR_MASK; + temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); + } + + /* Clear the event handler busy flag (RW1C) */ + temp_64 |= ERST_EHB; + xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); +} + /* * xHCI spec says we can get an interrupt, and if the HC has an error condition, * we might get bad data out of the event ring. Section 4.10.2.7 has a list of @@ -2751,9 +2787,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) union xhci_trb *event_ring_deq; irqreturn_t ret = IRQ_NONE; unsigned long flags; - dma_addr_t deq; u64 temp_64; u32 status; + int event_loop = 0; spin_lock_irqsave(&xhci->lock, flags); /* Check if the xHC generated the interrupt, or the irq is shared */ @@ -2807,24 +2843,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) /* FIXME this should be a delayed service routine * that clears the EHB. */ - while (xhci_handle_event(xhci) > 0) {} - - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - /* If necessary, update the HW's version of the event ring deq ptr. */ - if (event_ring_deq != xhci->event_ring->dequeue) { - deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, - xhci->event_ring->dequeue); - if (deq == 0) - xhci_warn(xhci, "WARN something wrong with SW event " - "ring dequeue ptr.\n"); - /* Update HC event ring dequeue pointer */ - temp_64 &= ERST_PTR_MASK; - temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); + while (xhci_handle_event(xhci) > 0) { + if (event_loop++ < TRBS_PER_SEGMENT / 2) + continue; + xhci_update_erst_dequeue(xhci, event_ring_deq); + event_loop = 0; } - /* Clear the event handler busy flag (RW1C); event ring is empty. */ - temp_64 |= ERST_EHB; - xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); + xhci_update_erst_dequeue(xhci, event_ring_deq); ret = IRQ_HANDLED; out: diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 2ff7c911fbd0..b8e24ccba9f3 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -755,7 +755,6 @@ static int tegra_xusb_runtime_suspend(struct device *dev) { struct tegra_xusb *tegra = dev_get_drvdata(dev); - tegra_xusb_phy_disable(tegra); regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies); tegra_xusb_clk_disable(tegra); @@ -779,16 +778,8 @@ static int tegra_xusb_runtime_resume(struct device *dev) goto disable_clk; } - err = tegra_xusb_phy_enable(tegra); - if (err < 0) { - dev_err(dev, "failed to enable PHYs: %d\n", err); - goto disable_regulator; - } - return 0; -disable_regulator: - regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies); disable_clk: tegra_xusb_clk_disable(tegra); return err; @@ -1181,6 +1172,12 @@ static int tegra_xusb_probe(struct platform_device *pdev) */ platform_set_drvdata(pdev, tegra); + err = tegra_xusb_phy_enable(tegra); + if (err < 0) { + dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err); + goto put_hcd; + } + pm_runtime_enable(&pdev->dev); if (pm_runtime_enabled(&pdev->dev)) err = pm_runtime_get_sync(&pdev->dev); @@ -1189,7 +1186,7 @@ static int tegra_xusb_probe(struct platform_device *pdev) if (err < 0) { dev_err(&pdev->dev, "failed to enable device: %d\n", err); - goto disable_rpm; + goto disable_phy; } tegra_xusb_config(tegra, regs); @@ -1275,9 +1272,11 @@ remove_usb2: put_rpm: if (!pm_runtime_status_suspended(&pdev->dev)) tegra_xusb_runtime_suspend(&pdev->dev); -disable_rpm: - pm_runtime_disable(&pdev->dev); +put_hcd: usb_put_hcd(tegra->hcd); +disable_phy: + tegra_xusb_phy_disable(tegra); + pm_runtime_disable(&pdev->dev); put_powerdomains: if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) { tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC); @@ -1314,6 +1313,8 @@ static int tegra_xusb_remove(struct platform_device *pdev) tegra_xusb_powerdomain_remove(&pdev->dev, tegra); } + tegra_xusb_phy_disable(tegra); + tegra_xusb_padctl_put(tegra->padctl); return 0; @@ -1412,6 +1413,7 @@ MODULE_FIRMWARE("nvidia/tegra210/xusb.bin"); static const char * const tegra186_supply_names[] = { }; +MODULE_FIRMWARE("nvidia/tegra186/xusb.bin"); static const struct tegra_xusb_phy_type tegra186_phy_types[] = { { .name = "usb3", .num = 3, }, diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index 052a269d86f2..87da9098fb34 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -289,23 +289,12 @@ DECLARE_EVENT_CLASS(xhci_log_urb, ), TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x", __entry->epnum, __entry->dir_in ? "in" : "out", - ({ char *s; - switch (__entry->type) { - case USB_ENDPOINT_XFER_INT: - s = "intr"; - break; - case USB_ENDPOINT_XFER_CONTROL: - s = "control"; - break; - case USB_ENDPOINT_XFER_BULK: - s = "bulk"; - break; - case USB_ENDPOINT_XFER_ISOC: - s = "isoc"; - break; - default: - s = "UNKNOWN"; - } s; }), __entry->urb, __entry->pipe, __entry->slot_id, + __print_symbolic(__entry->type, + { USB_ENDPOINT_XFER_INT, "intr" }, + { USB_ENDPOINT_XFER_CONTROL, "control" }, + { USB_ENDPOINT_XFER_BULK, "bulk" }, + { USB_ENDPOINT_XFER_ISOC, "isoc" }), + __entry->urb, __entry->pipe, __entry->slot_id, __entry->actual, __entry->length, __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream, __entry->flags ) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6c17e3fe181a..9b3b1b16eafb 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -770,7 +770,7 @@ static void xhci_stop(struct usb_hcd *hcd) * * This will only ever be called with the main usb_hcd (the USB3 roothub). */ -static void xhci_shutdown(struct usb_hcd *hcd) +void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -789,11 +789,8 @@ static void xhci_shutdown(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_shutdown completed - status = %x", readl(&xhci->op_regs->status)); - - /* Yet another workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot); } +EXPORT_SYMBOL_GPL(xhci_shutdown); #ifdef CONFIG_PM static void xhci_save_registers(struct xhci_hcd *xhci) @@ -973,7 +970,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci) int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; - unsigned int delay = XHCI_MAX_HALT_USEC; + unsigned int delay = XHCI_MAX_HALT_USEC * 2; struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; u32 res; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f9f88626a57a..98b98a0cd2a8 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1702,12 +1702,20 @@ struct xhci_bus_state { * Intel Lynx Point LP xHCI host. */ #define XHCI_MAX_REXIT_TIMEOUT_MS 20 +struct xhci_port_cap { + u32 *psi; /* array of protocol speed ID entries */ + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; +}; struct xhci_port { __le32 __iomem *addr; int hw_portnum; int hcd_portnum; struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; }; struct xhci_hub { @@ -1719,9 +1727,6 @@ struct xhci_hub { /* supported prococol extended capabiliy values */ u8 maj_rev; u8 min_rev; - u32 *psi; /* array of protocol speed ID entries */ - u8 psi_count; - u8 psi_uid_count; }; /* There is one xhci_hcd structure per controller */ @@ -1880,6 +1885,9 @@ struct xhci_hcd { /* cached usb2 extened protocol capabilites */ u32 *ext_caps; unsigned int num_ext_caps; + /* cached extended protocol port capabilities */ + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; /* Compliance Mode Recovery Data */ struct timer_list comp_mode_recovery_timer; u32 port_status_u0; @@ -2050,6 +2058,7 @@ int xhci_start(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); +void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 6f5edb9fc61e..d8d157c4c271 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -669,7 +669,7 @@ static int adu_probe(struct usb_interface *interface, init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); - res = usb_find_common_endpoints_reverse(&interface->altsetting[0], + res = usb_find_common_endpoints_reverse(interface->cur_altsetting, NULL, NULL, &dev->interrupt_in_endpoint, &dev->interrupt_out_endpoint); diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index ac92725458b5..ba1eaabc7796 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd) 0, pdata->msgdata, 2, ACD_USB_TIMEOUT); - brightness = pdata->msgdata[1]; + if (retval < 2) { + if (retval >= 0) + retval = -EMSGSIZE; + } else { + brightness = pdata->msgdata[1]; + } mutex_unlock(&pdata->sysfslock); if (retval < 0) @@ -299,6 +304,7 @@ error: if (pdata) { if (pdata->urb) { usb_kill_urb(pdata->urb); + cancel_delayed_work_sync(&pdata->work); if (pdata->urbdata) usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, pdata->urbdata, pdata->urb->transfer_dma); diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 34e6cd6f40d3..87067c3d6109 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev) !dev->reading, (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) ); - if (result < 0) + if (result < 0) { + usb_kill_urb(dev->urb); goto out; + } - if (result == 0) + if (result == 0) { result = -ETIMEDOUT; - else + usb_kill_urb(dev->urb); + } else { result = dev->valid; + } out: /* Let the device go back to sleep eventually */ usb_autopm_put_interface(dev->interface); @@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface, static int chaoskey_resume(struct usb_interface *interface) { + struct chaoskey *dev; + struct usb_device *udev = interface_to_usbdev(interface); + usb_dbg(interface, "resume"); + dev = usb_get_intfdata(interface); + + /* + * We may have lost power. + * In that case the device that needs a long time + * for the first requests needs an extended timeout + * again + */ + if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID) + dev->reads_started = false; + return 0; } #else diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index 20b0f91a5d9b..bb24527f3c70 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -337,7 +337,7 @@ static int idmouse_probe(struct usb_interface *interface, int result; /* check if we have gotten the data or the hid interface */ - iface_desc = &interface->altsetting[0]; + iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0x0A) return -ENODEV; diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index dce44fbf031f..dce20301e367 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -33,6 +33,14 @@ #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512 /* full speed iowarrior */ #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503 +/* fuller speed iowarrior */ +#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504 +#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505 +#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506 + +/* OEMed devices */ +#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a +#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b /* Get a minor range for your devices from the usb maintainer */ #ifdef CONFIG_USB_DYNAMIC_MINORS @@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = { {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, iowarrior_ids); @@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file, } switch (dev->product_id) { case USB_DEVICE_ID_CODEMERCS_IOW24: + case USB_DEVICE_ID_CODEMERCS_IOW24SAG: case USB_DEVICE_ID_CODEMERCS_IOWPV1: case USB_DEVICE_ID_CODEMERCS_IOWPV2: case USB_DEVICE_ID_CODEMERCS_IOW40: @@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file, goto exit; break; case USB_DEVICE_ID_CODEMERCS_IOW56: + case USB_DEVICE_ID_CODEMERCS_IOW56AM: + case USB_DEVICE_ID_CODEMERCS_IOW28: + case USB_DEVICE_ID_CODEMERCS_IOW28L: + case USB_DEVICE_ID_CODEMERCS_IOW100: /* The IOW56 uses asynchronous IO and more urbs */ if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) { /* Wait until we are below the limit for submitted urbs */ @@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case IOW_WRITE: if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 || + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) { @@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface, goto error; } - if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { + if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) { res = usb_find_last_int_out_endpoint(iface_desc, &dev->int_out_endpoint); if (res) { @@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface, /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56)) + ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100))) /* IOWarrior56 has wMaxPacketSize different from report size */ dev->report_size = 7; diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index ac2b4fcc265f..f48a23adbc35 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1039,12 +1039,18 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); - mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); - kfree(rp->b_vec); - rp->b_vec = vec; - rp->b_size = size; - rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; - rp->cnt_lost = 0; + if (rp->mmap_active) { + mon_free_buff(vec, size/CHUNK_SIZE); + kfree(vec); + ret = -EBUSY; + } else { + mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); + kfree(rp->b_vec); + rp->b_vec = vec; + rp->b_size = size; + rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; + rp->cnt_lost = 0; + } spin_unlock_irqrestore(&rp->b_lock, flags); mutex_unlock(&rp->fetch_lock); } @@ -1216,13 +1222,21 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait) static void mon_bin_vma_open(struct vm_area_struct *vma) { struct mon_reader_bin *rp = vma->vm_private_data; + unsigned long flags; + + spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active++; + spin_unlock_irqrestore(&rp->b_lock, flags); } static void mon_bin_vma_close(struct vm_area_struct *vma) { + unsigned long flags; + struct mon_reader_bin *rp = vma->vm_private_data; + spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active--; + spin_unlock_irqrestore(&rp->b_lock, flags); } /* @@ -1234,16 +1248,12 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) unsigned long offset, chunk_idx; struct page *pageptr; - mutex_lock(&rp->fetch_lock); offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= rp->b_size) { - mutex_unlock(&rp->fetch_lock); + if (offset >= rp->b_size) return VM_FAULT_SIGBUS; - } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); - mutex_unlock(&rp->fetch_lock); vmf->page = pageptr; return 0; } diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index 5261f8dfedec..e3b8c84ccdb8 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -75,14 +75,17 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = { static int jz4740_musb_init(struct musb *musb) { struct device *dev = musb->controller->parent; + int err; if (dev->of_node) musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0); else musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); if (IS_ERR(musb->xceiv)) { - dev_err(dev, "No transceiver configured\n"); - return PTR_ERR(musb->xceiv); + err = PTR_ERR(musb->xceiv); + if (err != -EPROBE_DEFER) + dev_err(dev, "No transceiver configured: %d", err); + return err; } /* Silicon does not implement ConfigData register. diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index bd63450af76a..bf083c1f997f 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1840,6 +1840,9 @@ ATTRIBUTE_GROUPS(musb); #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \ (2 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) +#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \ + (3 << MUSB_DEVCTL_VBUS_SHIFT) | \ + MUSB_DEVCTL_SESSION) #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) @@ -1862,6 +1865,11 @@ static void musb_pm_runtime_check_session(struct musb *musb) s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV | MUSB_DEVCTL_HR; switch (devctl & ~s) { + case MUSB_QUIRK_B_DISCONNECT_99: + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + break; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, @@ -2310,6 +2318,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_disable_interrupts(musb); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */ + musb_writeb(musb->mregs, MUSB_POWER, 0); + /* Init IRQ workqueue before request_irq */ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 5a44b70372d9..fa9922c0c910 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -1462,10 +1462,7 @@ done: * We need to map sg if the transfer_buffer is * NULL. */ - if (!urb->transfer_buffer) - qh->use_sg = true; - - if (qh->use_sg) { + if (!urb->transfer_buffer) { /* sg_miter_start is already done in musb_ep_program */ if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); @@ -1473,9 +1470,8 @@ done: status = -EINVAL; goto done; } - urb->transfer_buffer = qh->sg_miter.addr; length = min_t(u32, length, qh->sg_miter.length); - musb_write_fifo(hw_ep, length, urb->transfer_buffer); + musb_write_fifo(hw_ep, length, qh->sg_miter.addr); qh->sg_miter.consumed = length; sg_miter_stop(&qh->sg_miter); } else { @@ -1484,11 +1480,6 @@ done: qh->segsize = length; - if (qh->use_sg) { - if (offset + length >= urb->transfer_buffer_length) - qh->use_sg = false; - } - musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); @@ -2003,8 +1994,10 @@ finish: urb->actual_length += xfer_len; qh->offset += xfer_len; if (done) { - if (qh->use_sg) + if (qh->use_sg) { qh->use_sg = false; + urb->transfer_buffer = NULL; + } if (urb->status == -EINPROGRESS) urb->status = status; diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 5fc6825745f2..2d3751d885b4 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -425,7 +425,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb, controller->controller.channel_abort = dma_channel_abort; if (request_irq(irq, dma_controller_irq, 0, - dev_name(musb->controller), &controller->controller)) { + dev_name(musb->controller), controller)) { dev_err(dev, "request_irq %d failed!\n", irq); musb_dma_controller_destroy(&controller->controller); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index a3d2fef67746..5c93226e0e20 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -361,8 +361,6 @@ static const struct musb_platform_ops omap2430_ops = { .init = omap2430_musb_init, .exit = omap2430_musb_exit, - .set_vbus = omap2430_musb_set_vbus, - .enable = omap2430_musb_enable, .disable = omap2430_musb_disable, diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h index 0824099b905e..ef1735d014da 100644 --- a/drivers/usb/renesas_usbhs/common.h +++ b/drivers/usb/renesas_usbhs/common.h @@ -161,11 +161,12 @@ struct usbhs_priv; #define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */ #define VALID (1 << 3) /* USB Request Receive */ -#define DVSQ_MASK (0x3 << 4) /* Device State */ +#define DVSQ_MASK (0x7 << 4) /* Device State */ #define POWER_STATE (0 << 4) #define DEFAULT_STATE (1 << 4) #define ADDRESS_STATE (2 << 4) #define CONFIGURATION_STATE (3 << 4) +#define SUSPENDED_STATE (4 << 4) #define CTSQ_MASK (0x7) /* Control Transfer Stage */ #define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */ diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index cd38d74b3223..53489cafecc1 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -457,12 +457,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv, { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); + int state = usbhs_status_get_device_state(irq_state); gpriv->gadget.speed = usbhs_bus_get_speed(priv); - dev_dbg(dev, "state = %x : speed : %d\n", - usbhs_status_get_device_state(irq_state), - gpriv->gadget.speed); + dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed); + + if (gpriv->gadget.speed != USB_SPEED_UNKNOWN && + (state & SUSPENDED_STATE)) { + if (gpriv->driver && gpriv->driver->suspend) + gpriv->driver->suspend(&gpriv->gadget); + usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED); + } return 0; } diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index 94b4e7db2b94..97e3d75b19a3 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -169,8 +169,8 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get); void usb_role_switch_put(struct usb_role_switch *sw) { if (!IS_ERR_OR_NULL(sw)) { - put_device(&sw->dev); module_put(sw->dev.parent->driver->owner); + put_device(&sw->dev); } } EXPORT_SYMBOL_GPL(usb_role_switch_put); diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 3bb1fff02bed..955ab97b9b22 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -589,9 +589,13 @@ static int ch341_tiocmget(struct tty_struct *tty) static int ch341_reset_resume(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; - struct ch341_private *priv = usb_get_serial_port_data(port); + struct ch341_private *priv; int ret; + priv = usb_get_serial_port_data(port); + if (!priv) + return 0; + /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 979bef9bfb6b..f5143eedbc48 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 25e81faf4c24..9ad44a96dfe3 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1033,6 +1033,9 @@ static const struct usb_device_id id_table_combined[] = { /* Sienna devices */ { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, + /* U-Blox devices */ + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 22d66217cb41..e8373528264c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1558,3 +1558,10 @@ */ #define UNJO_VID 0x22B7 #define UNJO_ISODEBUG_V1_PID 0x150D + +/* + * U-Blox products (http://www.u-blox.com). + */ +#define UBLOX_VID 0x1546 +#define UBLOX_C099F9P_ZED_PID 0x0502 +#define UBLOX_C099F9P_ODIN_PID 0x0503 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 48a439298a68..4cca0b836f43 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -710,13 +710,13 @@ static void edge_interrupt_callback(struct urb *urb) /* grab the txcredits for the ports if available */ position = 2; portNumber = 0; - while ((position < length) && + while ((position < length - 1) && (portNumber < edge_serial->serial->num_ports)) { txCredits = data[position] | (data[position+1] << 8); if (txCredits) { port = edge_serial->serial->port[portNumber]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { spin_lock_irqsave(&edge_port->ep_lock, flags); edge_port->txCredits += txCredits; @@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state) static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength) { - struct device *dev = &edge_serial->serial->dev->dev; + struct usb_serial *serial = edge_serial->serial; + struct device *dev = &serial->dev->dev; struct usb_serial_port *port; struct edgeport_port *edge_port; __u16 lastBufferLength; @@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, /* spit this data back into the tty driver if this port is open */ - if (rxLen) { - port = edge_serial->serial->port[ - edge_serial->rxPort]; + if (rxLen && edge_serial->rxPort < serial->num_ports) { + port = serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n", __func__, rxLen, edge_serial->rxPort); @@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, rxLen); edge_port->port->icount.rx += rxLen; } - buffer += rxLen; } + buffer += rxLen; break; case EXPECT_HDR3: /* Expect 3rd byte of status header */ @@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 code = edge_serial->rxStatusCode; /* switch the port pointer to the one being currently talked about */ + if (edge_serial->rxPort >= edge_serial->serial->num_ports) + return; port = edge_serial->serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port == NULL) { @@ -2901,16 +2903,18 @@ static int edge_startup(struct usb_serial *serial) response = 0; if (edge_serial->is_epic) { + struct usb_host_interface *alt; + + alt = serial->interface->cur_altsetting; + /* EPIC thing, set up our interrupt polling now and our read * urb, so that the device knows it really is connected. */ interrupt_in_found = bulk_in_found = bulk_out_found = false; - for (i = 0; i < serial->interface->altsetting[0] - .desc.bNumEndpoints; ++i) { + for (i = 0; i < alt->desc.bNumEndpoints; ++i) { struct usb_endpoint_descriptor *endpoint; int buffer_size; - endpoint = &serial->interface->altsetting[0]. - endpoint[i].desc; + endpoint = &alt->endpoint[i].desc; buffer_size = usb_endpoint_maxp(endpoint); if (!interrupt_in_found && (usb_endpoint_is_int_in(endpoint))) { diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 302eb9530859..627bea7e6cfb 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c @@ -45,9 +45,10 @@ static int buffer_size; static int xbof = -1; static int ir_startup (struct usb_serial *serial); -static int ir_open(struct tty_struct *tty, struct usb_serial_port *port); -static int ir_prepare_write_buffer(struct usb_serial_port *port, - void *dest, size_t size); +static int ir_write(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count); +static int ir_write_room(struct tty_struct *tty); +static void ir_write_bulk_callback(struct urb *urb); static void ir_process_read_urb(struct urb *urb); static void ir_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios); @@ -77,8 +78,9 @@ static struct usb_serial_driver ir_device = { .num_ports = 1, .set_termios = ir_set_termios, .attach = ir_startup, - .open = ir_open, - .prepare_write_buffer = ir_prepare_write_buffer, + .write = ir_write, + .write_room = ir_write_room, + .write_bulk_callback = ir_write_bulk_callback, .process_read_urb = ir_process_read_urb, }; @@ -195,6 +197,9 @@ static int ir_startup(struct usb_serial *serial) struct usb_irda_cs_descriptor *irda_desc; int rates; + if (serial->num_bulk_in < 1 || serial->num_bulk_out < 1) + return -ENODEV; + irda_desc = irda_usb_find_class_desc(serial, 0); if (!irda_desc) { dev_err(&serial->dev->dev, @@ -251,35 +256,102 @@ static int ir_startup(struct usb_serial *serial) return 0; } -static int ir_open(struct tty_struct *tty, struct usb_serial_port *port) +static int ir_write(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count) { - int i; + struct urb *urb = NULL; + unsigned long flags; + int ret; - for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) - port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET; + if (port->bulk_out_size == 0) + return -EINVAL; - /* Start reading from the device */ - return usb_serial_generic_open(tty, port); -} + if (count == 0) + return 0; -static int ir_prepare_write_buffer(struct usb_serial_port *port, - void *dest, size_t size) -{ - unsigned char *buf = dest; - int count; + count = min(count, port->bulk_out_size - 1); + + spin_lock_irqsave(&port->lock, flags); + if (__test_and_clear_bit(0, &port->write_urbs_free)) { + urb = port->write_urbs[0]; + port->tx_bytes += count; + } + spin_unlock_irqrestore(&port->lock, flags); + + if (!urb) + return 0; /* * The first byte of the packet we send to the device contains an - * inbound header which indicates an additional number of BOFs and + * outbound header which indicates an additional number of BOFs and * a baud rate change. * * See section 5.4.2.2 of the USB IrDA spec. */ - *buf = ir_xbof | ir_baud; + *(u8 *)urb->transfer_buffer = ir_xbof | ir_baud; - count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1, - &port->lock); - return count + 1; + memcpy(urb->transfer_buffer + 1, buf, count); + + urb->transfer_buffer_length = count + 1; + urb->transfer_flags = URB_ZERO_PACKET; + + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) { + dev_err(&port->dev, "failed to submit write urb: %d\n", ret); + + spin_lock_irqsave(&port->lock, flags); + __set_bit(0, &port->write_urbs_free); + port->tx_bytes -= count; + spin_unlock_irqrestore(&port->lock, flags); + + return ret; + } + + return count; +} + +static void ir_write_bulk_callback(struct urb *urb) +{ + struct usb_serial_port *port = urb->context; + int status = urb->status; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + __set_bit(0, &port->write_urbs_free); + port->tx_bytes -= urb->transfer_buffer_length - 1; + spin_unlock_irqrestore(&port->lock, flags); + + switch (status) { + case 0: + break; + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + dev_dbg(&port->dev, "write urb stopped: %d\n", status); + return; + case -EPIPE: + dev_err(&port->dev, "write urb stopped: %d\n", status); + return; + default: + dev_err(&port->dev, "nonzero write-urb status: %d\n", status); + break; + } + + usb_serial_port_softint(port); +} + +static int ir_write_room(struct tty_struct *tty) +{ + struct usb_serial_port *port = tty->driver_data; + int count = 0; + + if (port->bulk_out_size == 0) + return 0; + + if (test_bit(0, &port->write_urbs_free)) + count = port->bulk_out_size - 1; + + return count; } static void ir_process_read_urb(struct urb *urb) @@ -332,34 +404,34 @@ static void ir_set_termios(struct tty_struct *tty, switch (baud) { case 2400: - ir_baud = USB_IRDA_BR_2400; + ir_baud = USB_IRDA_LS_2400; break; case 9600: - ir_baud = USB_IRDA_BR_9600; + ir_baud = USB_IRDA_LS_9600; break; case 19200: - ir_baud = USB_IRDA_BR_19200; + ir_baud = USB_IRDA_LS_19200; break; case 38400: - ir_baud = USB_IRDA_BR_38400; + ir_baud = USB_IRDA_LS_38400; break; case 57600: - ir_baud = USB_IRDA_BR_57600; + ir_baud = USB_IRDA_LS_57600; break; case 115200: - ir_baud = USB_IRDA_BR_115200; + ir_baud = USB_IRDA_LS_115200; break; case 576000: - ir_baud = USB_IRDA_BR_576000; + ir_baud = USB_IRDA_LS_576000; break; case 1152000: - ir_baud = USB_IRDA_BR_1152000; + ir_baud = USB_IRDA_LS_1152000; break; case 4000000: - ir_baud = USB_IRDA_BR_4000000; + ir_baud = USB_IRDA_LS_4000000; break; default: - ir_baud = USB_IRDA_BR_9600; + ir_baud = USB_IRDA_LS_9600; baud = 9600; } diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index e66a59ef43a1..aa3dbce22cfb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1058,6 +1058,8 @@ static void usa49_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); @@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 18110225d506..2ec4eeacebc7 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial) product = le16_to_cpu(serial->dev->descriptor.idProduct); dev = serial->dev; - /* setting configuration feature to one */ - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); - if (product == MOSCHIP_DEVICE_ID_7715) { struct urb *urb = serial->port[0]->interrupt_in_urb; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index a698d46ba773..ab4bf8d6d7df 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -119,11 +119,15 @@ /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 * ATEN UC2322 device using Moschip MCS7820 + * MOXA UPort 2210 device using Moschip MCS7820 */ #define USB_VENDOR_ID_ATENINTL 0x0557 #define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2322 0x7820 +#define USB_VENDOR_ID_MOXA 0x110a +#define MOXA_DEVICE_ID_2210 0x2210 + /* Interrupt Routine Defines */ #define SERIAL_IIR_RLS 0x06 @@ -195,6 +199,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, + {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)}, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); @@ -2020,6 +2025,7 @@ static int mos7840_probe(struct usb_serial *serial, const struct usb_device_id *id) { u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); + u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor); u8 *buf; int device_type; @@ -2030,6 +2036,11 @@ static int mos7840_probe(struct usb_serial *serial, goto out; } + if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) { + device_type = MOSCHIP_DEVICE_ID_7820; + goto out; + } + buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -2279,11 +2290,6 @@ out: goto error; } else dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); - - /* setting configuration feature to one */ - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - 0x03, 0x00, 0x01, 0x00, NULL, 0x00, - MOS_WDR_TIMEOUT); } return 0; error: diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index cb7aac9cd9e7..ed2b4e6dca38 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -113,7 +113,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), requesttype, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, - 0, 0, buffer, 1, 0); + 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT); kfree(buffer); if (retval < 0) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 06ab016be0b6..8bfffca3e4ae 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define DELL_PRODUCT_5821E 0x81d7 +#define DELL_PRODUCT_5821E_ESIM 0x81e0 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -247,6 +248,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 +#define QUECTEL_PRODUCT_RM500Q 0x0800 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -566,6 +568,9 @@ static void option_instat_callback(struct urb *urb); /* Interface must have two endpoints */ #define NUMEP2 BIT(16) +/* Device needs ZLP */ +#define ZLP BIT(17) + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, @@ -1044,6 +1049,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1098,6 +1105,11 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), + .driver_info = ZLP }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), @@ -1169,6 +1181,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */ + .driver_info = NCTRL(0) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff), /* Telit ME910G1 (ECM) */ + .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), @@ -1193,6 +1209,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -1974,8 +1992,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ + { USB_DEVICE_INTERFACE_CLASS(0x1435, 0xd191, 0xff), /* Wistron Neweb D19Q1 */ + .driver_info = RSVD(1) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x1690, 0x7588, 0xff), /* ASKEY WWHC050 */ + .driver_info = RSVD(1) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2033, 0xff), /* BroadMobi BM806U */ + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */ .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ @@ -1990,6 +2014,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) }, + { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */ + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ @@ -2090,6 +2118,9 @@ static int option_attach(struct usb_serial *serial) if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) data->use_send_setup = 1; + if (device_flags & ZLP) + data->use_zlp = 1; + spin_lock_init(&data->susp_lock); usb_set_serial_data(serial, data); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 9d27b76c5c6e..e6d9b79d3521 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index b0175f17d1a2..c98db6b650a5 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -124,6 +124,7 @@ #define HP_LM920_PRODUCT_ID 0x026b #define HP_TD620_PRODUCT_ID 0x0956 #define HP_LD960_PRODUCT_ID 0x0b39 +#define HP_LD381_PRODUCT_ID 0x0f7f #define HP_LCM220_PRODUCT_ID 0x3139 #define HP_LCM960_PRODUCT_ID 0x3239 #define HP_LD220_PRODUCT_ID 0x3524 diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index a62981ca7a73..f93b81a297d6 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) u8 newMSR = (u8) *ch; unsigned long flags; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowMSR = newMSR; @@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch) unsigned long flags; u8 newLSR = (u8) *ch; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; if (newLSR & UART_LSR_BI) newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index edbbb13d6de6..bd23a7cb1be2 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS); #define MOTOROLA_TETRA_IDS() \ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ + { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \ + { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 8f066bb55d7d..dc7a65b9ec98 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver) return -EINVAL; } + /* Prevent individual ports from being unbound. */ + driver->driver.suppress_bind_attrs = true; + usb_serial_operations_init(driver); /* Add this device to our list of devices */ diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h index 1c120eaf4091..934e9361cf6b 100644 --- a/drivers/usb/serial/usb-wwan.h +++ b/drivers/usb/serial/usb-wwan.h @@ -38,6 +38,7 @@ struct usb_wwan_intf_private { spinlock_t susp_lock; unsigned int suspended:1; unsigned int use_send_setup:1; + unsigned int use_zlp:1; int in_flight; unsigned int open_ports; void *private; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 7e855c87e4f7..13be21aad2f4 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, void (*callback) (struct urb *)) { struct usb_serial *serial = port->serial; + struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ @@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); + if (intfdata->use_zlp && dir == USB_DIR_OUT) + urb->transfer_flags |= URB_ZERO_PACKET; + return urb; } diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 54a3c8195c96..2adcabe060c5 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -135,7 +135,8 @@ static int slave_configure(struct scsi_device *sdev) * For such controllers we need to make sure the block layer sets * up bounce buffers in addressable memory. */ - if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus))) + if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) || + (bus_to_hcd(us->pusb_dev->bus)->localmem_pool != NULL)) blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH); /* diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 34538253f12c..bb2198496f42 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -45,6 +45,7 @@ struct uas_dev_info { struct scsi_cmnd *cmnd[MAX_CMNDS]; spinlock_t lock; struct work_struct work; + struct work_struct scan_work; /* for async scanning */ }; enum { @@ -114,6 +115,17 @@ out: spin_unlock_irqrestore(&devinfo->lock, flags); } +static void uas_scan_work(struct work_struct *work) +{ + struct uas_dev_info *devinfo = + container_of(work, struct uas_dev_info, scan_work); + struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf); + + dev_dbg(&devinfo->intf->dev, "starting scan\n"); + scsi_scan_host(shost); + dev_dbg(&devinfo->intf->dev, "scan complete\n"); +} + static void uas_add_work(struct uas_cmd_info *cmdinfo) { struct scsi_pointer *scp = (void *)cmdinfo; @@ -825,6 +837,10 @@ static int uas_slave_configure(struct scsi_device *sdev) sdev->wce_default_on = 1; } + /* Some disks cannot handle READ_CAPACITY_16 */ + if (devinfo->flags & US_FL_NO_READ_CAPACITY_16) + sdev->no_read_capacity_16 = 1; + /* * Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. @@ -833,6 +849,12 @@ static int uas_slave_configure(struct scsi_device *sdev) if (devinfo->flags & US_FL_FIX_CAPACITY) sdev->fix_capacity = 1; + /* + * in some cases we have to guess + */ + if (devinfo->flags & US_FL_CAPACITY_HEURISTICS) + sdev->guess_capacity = 1; + /* * Some devices don't like MODE SENSE with page=0x3f, * which is the command used for checking if a device @@ -973,6 +995,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) init_usb_anchor(&devinfo->data_urbs); spin_lock_init(&devinfo->lock); INIT_WORK(&devinfo->work, uas_do_work); + INIT_WORK(&devinfo->scan_work, uas_scan_work); result = uas_configure_endpoints(devinfo); if (result) @@ -989,7 +1012,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) if (result) goto free_streams; - scsi_scan_host(shost); + /* Submit the delayed_work for SCSI-device scanning */ + schedule_work(&devinfo->scan_work); + return result; free_streams: @@ -1157,6 +1182,12 @@ static void uas_disconnect(struct usb_interface *intf) usb_kill_anchored_urbs(&devinfo->data_urbs); uas_zap_pending(devinfo, DID_NO_CONNECT); + /* + * Prevent SCSI scanning (if it hasn't started yet) + * or wait for the SCSI-scanning routine to stop. + */ + cancel_work_sync(&devinfo->scan_work); + scsi_remove_host(shost); uas_free_streams(devinfo); scsi_host_put(shost); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1cd9b6305b06..1880f3e13f57 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, USB_SC_RBC, USB_PR_BULK, NULL, 0 ), +UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100, + "Samsung", + "Flash Drive FIT", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64), + /* aeb */ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, "Feiya", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index d0bdebd87ce3..1b23741036ee 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -87,12 +87,15 @@ UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_UAS), -/* Reported-by: Takeo Nakayama */ +/* + * Initially Reported-by: Takeo Nakayama + * UAS Ignore Reported by Steven Ellis + */ UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, "JMicron", "JMS566", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_NO_REPORT_OPCODES), + US_FL_NO_REPORT_OPCODES | US_FL_IGNORE_UAS), /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 94a3eda62add..a400b65cf17b 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1592,14 +1592,16 @@ struct typec_port *typec_register_port(struct device *parent, port->sw = typec_switch_get(&port->dev); if (IS_ERR(port->sw)) { + ret = PTR_ERR(port->sw); put_device(&port->dev); - return ERR_CAST(port->sw); + return ERR_PTR(ret); } port->mux = typec_mux_get(&port->dev, NULL); if (IS_ERR(port->mux)) { + ret = PTR_ERR(port->mux); put_device(&port->dev); - return ERR_CAST(port->mux); + return ERR_PTR(ret); } ret = device_add(&port->dev); diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index 72481bbb2af3..5b986d6c801d 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -32,6 +32,7 @@ endif # TYPEC_TCPCI config TYPEC_FUSB302 tristate "Fairchild FUSB302 Type-C chip driver" depends on I2C + depends on EXTCON || !EXTCON help The Fairchild FUSB302 Type-C chip driver that works with Type-C Port Controller Manager to provide USB PD and USB diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index ed8655c6af8c..b498960ff72b 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -1666,7 +1666,7 @@ static const struct property_entry port_props[] = { PROPERTY_ENTRY_STRING("try-power-role", "sink"), PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo), PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo), - PROPERTY_ENTRY_U32("op-sink-microwatt", 2500), + PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000), { } }; diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index c1f7073a56de..753645bb2527 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci) if (status & TCPC_ALERT_RX_STATUS) { struct pd_message msg; - unsigned int cnt; + unsigned int cnt, payload_cnt; u16 header; regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt); + /* + * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14 + * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is + * defined in table 4-36 as one greater than the number of + * bytes received. And that number includes the header. So: + */ + if (cnt > 3) + payload_cnt = cnt - (1 + sizeof(msg.header)); + else + payload_cnt = 0; tcpci_read16(tcpci, TCPC_RX_HDR, &header); msg.header = cpu_to_le16(header); - if (WARN_ON(cnt > sizeof(msg.payload))) - cnt = sizeof(msg.payload); + if (WARN_ON(payload_cnt > sizeof(msg.payload))) + payload_cnt = sizeof(msg.payload); - if (cnt > 0) + if (payload_cnt > 0) regmap_raw_read(tcpci->regmap, TCPC_RX_DATA, - &msg.payload, cnt); + &msg.payload, payload_cnt); /* Read complete, clear RX status alert bit */ tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS); @@ -581,6 +591,12 @@ static int tcpci_probe(struct i2c_client *client, static int tcpci_remove(struct i2c_client *client) { struct tcpci_chip *chip = i2c_get_clientdata(client); + int err; + + /* Disable chip interrupts before unregistering port */ + err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0); + if (err < 0) + return err; tcpci_unregister_port(chip->tcpci); diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c index edc271da14f4..9b745f432c91 100644 --- a/drivers/usb/typec/tcpm/wcove.c +++ b/drivers/usb/typec/tcpm/wcove.c @@ -597,7 +597,7 @@ static const struct property_entry wcove_props[] = { PROPERTY_ENTRY_STRING("try-power-role", "sink"), PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo), PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo), - PROPERTY_ENTRY_U32("op-sink-microwatt", 15000), + PROPERTY_ENTRY_U32("op-sink-microwatt", 15000000), { } }; diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index d99700cb4dca..166c2aabb512 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -273,6 +273,9 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt) return; dp = typec_altmode_get_drvdata(alt); + if (!dp) + return; + dp->data.conf = 0; dp->data.status = 0; dp->initialized = false; @@ -287,6 +290,8 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, struct typec_altmode *alt; struct ucsi_dp *dp; + mutex_lock(&con->lock); + /* We can't rely on the firmware with the capabilities. */ desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE; @@ -295,12 +300,15 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, desc->vdo |= all_assignments << 16; alt = typec_port_register_altmode(con->port, desc); - if (IS_ERR(alt)) + if (IS_ERR(alt)) { + mutex_unlock(&con->lock); return alt; + } dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL); if (!dp) { typec_unregister_altmode(alt); + mutex_unlock(&con->lock); return ERR_PTR(-ENOMEM); } @@ -313,5 +321,7 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, alt->ops = &ucsi_displayport_ops; typec_altmode_set_drvdata(alt, dp); + mutex_unlock(&con->lock); + return alt; } diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig index 2f86b28fa3da..7bbae7a08642 100644 --- a/drivers/usb/usbip/Kconfig +++ b/drivers/usb/usbip/Kconfig @@ -4,6 +4,7 @@ config USBIP_CORE tristate "USB/IP support" depends on NET select USB_COMMON + select SGL_ALLOC ---help--- This enables pushing USB packets over IP to allow remote machines direct access to USB devices. It provides the diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 66edfeea68fe..e2b019532234 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, if (pipe == -1) return; + /* + * Smatch reported the error case where use_sg is true and buf_len is 0. + * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be + * released by stub event handler and connection will be shut down. + */ priv = stub_priv_alloc(sdev, pdu); if (!priv) return; buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length; + if (use_sg && !buf_len) { + dev_err(&udev->dev, "sg buffer with zero length\n"); + goto err_malloc; + } + /* allocate urb transfer buffer, if needed */ if (buf_len) { if (use_sg) { sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents); if (!sgl) goto err_malloc; + + /* Check if the server's HCD supports SG */ + if (!udev->bus->sg_tablesize) { + /* + * If the server's HCD doesn't support SG, break + * a single SG request into several URBs and map + * each SG list entry to corresponding URB + * buffer. The previously allocated SG list is + * stored in priv->sgl (If the server's HCD + * support SG, SG list is stored only in + * urb->sg) and it is used as an indicator that + * the server split single SG request into + * several URBs. Later, priv->sgl is used by + * stub_complete() and stub_send_ret_submit() to + * reassemble the divied URBs. + */ + support_sg = 0; + num_urbs = nents; + priv->completed_urbs = 0; + pdu->u.cmd_submit.transfer_flags &= + ~URB_DMA_MAP_SG; + } } else { buffer = kzalloc(buf_len, GFP_KERNEL); if (!buffer) @@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, } } - /* Check if the server's HCD supports SG */ - if (use_sg && !udev->bus->sg_tablesize) { - /* - * If the server's HCD doesn't support SG, break a single SG - * request into several URBs and map each SG list entry to - * corresponding URB buffer. The previously allocated SG - * list is stored in priv->sgl (If the server's HCD support SG, - * SG list is stored only in urb->sg) and it is used as an - * indicator that the server split single SG request into - * several URBs. Later, priv->sgl is used by stub_complete() and - * stub_send_ret_submit() to reassemble the divied URBs. - */ - support_sg = 0; - num_urbs = nents; - priv->completed_urbs = 0; - pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG; - } - /* allocate urb array */ priv->num_urbs = num_urbs; priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL); diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 6532d68e8808..e4b96674c405 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -727,6 +727,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) copy -= recv; ret += recv; + + if (!copy) + break; } if (ret != size) diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 33f8972ba842..00fc98741c5d 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -77,16 +77,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0); /* recv transfer buffer */ - if (usbip_recv_xbuff(ud, urb) < 0) - return; + if (usbip_recv_xbuff(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* recv iso_packet_descriptor */ - if (usbip_recv_iso(ud, urb) < 0) - return; + if (usbip_recv_iso(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* restore the padding in iso packets */ usbip_pad_iso(ud, urb); +error: if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 3fa3f728fb39..2056f3f85f59 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -294,8 +294,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, irq = pci_irq_vector(pdev, vector); if (vdev->ctx[vector].trigger) { - free_irq(irq, vdev->ctx[vector].trigger); irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + free_irq(irq, vdev->ctx[vector].trigger); kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c index f2983f0f84be..3f5f8198a6bb 100644 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c @@ -97,8 +97,10 @@ static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev, /* If there were any mappings at all... */ if (data->mm) { - ret = mm_iommu_put(data->mm, data->mem); - WARN_ON(ret); + if (data->mem) { + ret = mm_iommu_put(data->mm, data->mem); + WARN_ON(ret); + } mmdrop(data->mm); } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 1a2dd53caade..b53b6528d6ce 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1414,10 +1414,6 @@ static int vhost_net_release(struct inode *inode, struct file *f) static struct socket *get_raw_socket(int fd) { - struct { - struct sockaddr_ll sa; - char buf[MAX_ADDR_LEN]; - } uaddr; int r; struct socket *sock = sockfd_lookup(fd, &r); @@ -1430,11 +1426,7 @@ static struct socket *get_raw_socket(int fd) goto err; } - r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0); - if (r < 0) - goto err; - - if (uaddr.sa.sll_family != AF_PACKET) { + if (sock->sk->sk_family != AF_PACKET) { r = -EPFNOSUPPORT; goto err; } diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 9f57736fe15e..88a5aa6624b4 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -437,7 +437,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) virtio_transport_deliver_tap_pkt(pkt); /* Only accept correctly addressed packets */ - if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && + le64_to_cpu(pkt->hdr.dst_cid) == + vhost_transport_get_local_cid()) virtio_transport_recv_pkt(pkt); else virtio_transport_free_pkt(pkt); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index c6b3bdbbdbc9..bfaa9ec4bc1f 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font) static int vgacon_resize(struct vc_data *c, unsigned int width, unsigned int height, unsigned int user) { + if ((width << 1) * height > vga_vram_size) + return -EINVAL; + if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ c->vc_font.height) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index c9235a2f42f8..22070cfea1d0 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1276,6 +1276,9 @@ finished: if (!con_is_bound(&fb_con)) fbcon_exit(); + if (vc->vc_num == logo_shown) + logo_shown = FBCON_LOGO_CANSHOW; + return; } diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c index 1410f476e135..1fc50fc0694b 100644 --- a/drivers/video/fbdev/pxa168fb.c +++ b/drivers/video/fbdev/pxa168fb.c @@ -766,8 +766,8 @@ failed_free_cmap: failed_free_clk: clk_disable_unprepare(fbi->clk); failed_free_fbmem: - dma_free_coherent(fbi->dev, info->fix.smem_len, - info->screen_base, fbi->fb_start_dma); + dma_free_wc(fbi->dev, info->fix.smem_len, + info->screen_base, fbi->fb_start_dma); failed_free_info: kfree(info); @@ -801,7 +801,7 @@ static int pxa168fb_remove(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); - dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len), + dma_free_wc(fbi->dev, info->fix.smem_len, info->screen_base, info->fix.smem_start); clk_disable_unprepare(fbi->clk); diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index b939bc28d886..9c82e2a0a411 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -1576,12 +1576,12 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame, if (ptr[0] & 0x10) frame->active_aspect = ptr[1] & 0xf; if (ptr[0] & 0x8) { - frame->top_bar = (ptr[5] << 8) + ptr[6]; - frame->bottom_bar = (ptr[7] << 8) + ptr[8]; + frame->top_bar = (ptr[6] << 8) | ptr[5]; + frame->bottom_bar = (ptr[8] << 8) | ptr[7]; } if (ptr[0] & 0x4) { - frame->left_bar = (ptr[9] << 8) + ptr[10]; - frame->right_bar = (ptr[11] << 8) + ptr[12]; + frame->left_bar = (ptr[10] << 8) | ptr[9]; + frame->right_bar = (ptr[12] << 8) | ptr[11]; } frame->scan_mode = ptr[0] & 0x3; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index e05679c478e2..7aaf150f89ba 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -157,6 +157,8 @@ static void set_page_pfns(struct virtio_balloon *vb, { unsigned int i; + BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX); + /* * Set balloon pfns pointing at this page. * Note that the first pfn points at start of the page. @@ -474,7 +476,9 @@ static int init_vqs(struct virtio_balloon *vb) names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate"; callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack; names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; + callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL; names[VIRTIO_BALLOON_VQ_STATS] = NULL; + callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { @@ -721,6 +725,17 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, get_page(newpage); /* balloon reference */ + /* + * When we migrate a page to a different zone and adjusted the + * managed page count when inflating, we have to fixup the count of + * both involved zones. + */ + if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) && + page_zone(page) != page_zone(newpage)) { + adjust_managed_page_count(page, 1); + adjust_managed_page_count(newpage, -1); + } + /* balloon's page migration 1st step -- inflate "newpage" */ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_insert(vb_dev_info, newpage); @@ -887,8 +902,7 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); if (IS_ERR(vb->vb_dev_info.inode)) { err = PTR_ERR(vb->vb_dev_info.inode); - kern_unmount(balloon_mnt); - goto out_del_vqs; + goto out_kern_unmount; } vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif @@ -899,13 +913,13 @@ static int virtballoon_probe(struct virtio_device *vdev) */ if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { err = -ENOSPC; - goto out_del_vqs; + goto out_iput; } vb->balloon_wq = alloc_workqueue("balloon-wq", WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); if (!vb->balloon_wq) { err = -ENOMEM; - goto out_del_vqs; + goto out_iput; } INIT_WORK(&vb->report_free_page_work, report_free_page_func); vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; @@ -939,7 +953,13 @@ static int virtballoon_probe(struct virtio_device *vdev) out_del_balloon_wq: if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) destroy_workqueue(vb->balloon_wq); +out_iput: +#ifdef CONFIG_BALLOON_COMPACTION + iput(vb->vb_dev_info.inode); +out_kern_unmount: + kern_unmount(balloon_mnt); out_del_vqs: +#endif vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); @@ -954,6 +974,10 @@ static void remove_common(struct virtio_balloon *vb) leak_balloon(vb, vb->num_pages); update_balloon_size(vb); + /* There might be free pages that are being reported: release them. */ + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + return_free_pages_to_mm(vb, ULONG_MAX); + /* Now we reset the device so we can clean up the queues. */ vb->vdev->config->reset(vb->vdev); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index f2862f66c2ac..222d630c41fc 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -294,7 +294,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) - if (callbacks[i]) + if (names[i] && callbacks[i]) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 867c7ebd3f10..58b96baa8d48 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq) vq->split.queue_size_in_bytes, vq->split.vring.desc, vq->split.queue_dma_addr); - - kfree(vq->split.desc_state); } } + if (!vq->packed_ring) + kfree(vq->split.desc_state); list_del(&_vq->list); kfree(vq); } diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c index ca752b8f495f..cb1eb7e05f87 100644 --- a/drivers/visorbus/visorchipset.c +++ b/drivers/visorbus/visorchipset.c @@ -1210,14 +1210,17 @@ static void setup_crash_devices_work_queue(struct work_struct *work) { struct controlvm_message local_crash_bus_msg; struct controlvm_message local_crash_dev_msg; - struct controlvm_message msg; + struct controlvm_message msg = { + .hdr.id = CONTROLVM_CHIPSET_INIT, + .cmd.init_chipset = { + .bus_count = 23, + .switch_count = 0, + }, + }; u32 local_crash_msg_offset; u16 local_crash_msg_count; /* send init chipset msg */ - msg.hdr.id = CONTROLVM_CHIPSET_INIT; - msg.cmd.init_chipset.bus_count = 23; - msg.cmd.init_chipset.switch_count = 0; chipset_init(&msg); /* get saved message count */ if (visorchannel_read(chipset_dev->controlvm_channel, diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c index 3208a4409e44..6a1bc284f297 100644 --- a/drivers/vme/bridges/vme_fake.c +++ b/drivers/vme/bridges/vme_fake.c @@ -414,8 +414,9 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr, } } -static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr, - u32 aspace, u32 cycle) +static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) { u8 retval = 0xff; int i; @@ -446,8 +447,9 @@ static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr, return retval; } -static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr, - u32 aspace, u32 cycle) +static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) { u16 retval = 0xffff; int i; @@ -478,8 +480,9 @@ static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr, return retval; } -static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr, - u32 aspace, u32 cycle) +static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge, + unsigned long long addr, + u32 aspace, u32 cycle) { u32 retval = 0xffffffff; int i; @@ -609,8 +612,9 @@ out: return retval; } -static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf, - unsigned long long addr, u32 aspace, u32 cycle) +static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge, + u8 *buf, unsigned long long addr, + u32 aspace, u32 cycle) { int i; unsigned long long start, end, offset; @@ -639,8 +643,9 @@ static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf, } -static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf, - unsigned long long addr, u32 aspace, u32 cycle) +static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge, + u16 *buf, unsigned long long addr, + u32 aspace, u32 cycle) { int i; unsigned long long start, end, offset; @@ -669,8 +674,9 @@ static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf, } -static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf, - unsigned long long addr, u32 aspace, u32 cycle) +static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge, + u32 *buf, unsigned long long addr, + u32 aspace, u32 cycle) { int i; unsigned long long start, end, offset; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 58e7c100b6ad..e2745f686196 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG config MAX77620_WATCHDOG tristate "Maxim Max77620 Watchdog Timer" depends on MFD_MAX77620 || COMPILE_TEST + select WATCHDOG_CORE help This is the driver for the Max77620 watchdog timer. Say 'Y' here to enable the watchdog timer support for @@ -1444,6 +1445,7 @@ config SMSC37B787_WDT config TQMX86_WDT tristate "TQ-Systems TQMX86 Watchdog Timer" depends on X86 + select WATCHDOG_CORE help This is the driver for the hardware watchdog timer in the TQMX86 IO controller found on some of their ComExpress Modules. diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index 4ec0906bf12c..7e00960651fa 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -258,11 +258,6 @@ static int aspeed_wdt_probe(struct platform_device *pdev) if (IS_ERR(wdt->base)) return PTR_ERR(wdt->base); - /* - * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only - * runs at 1MHz. We chose to always run at 1MHz, as there's no - * good reason to have a faster watchdog counter. - */ wdt->wdd.info = &aspeed_wdt_info; wdt->wdd.ops = &aspeed_wdt_ops; wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS; @@ -278,7 +273,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev) return -EINVAL; config = ofdid->data; - wdt->ctrl = WDT_CTRL_1MHZ_CLK; + /* + * On clock rates: + * - ast2400 wdt can run at PCLK, or 1MHz + * - ast2500 only runs at 1MHz, hard coding bit 4 to 1 + * - ast2600 always runs at 1MHz + * + * Set the ast2400 to run at 1MHz as it simplifies the driver. + */ + if (of_device_is_compatible(np, "aspeed,ast2400-wdt")) + wdt->ctrl = WDT_CTRL_1MHZ_CLK; /* * Control reset on a per-device basis to ensure the diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c index e149e66a6ea9..e92f38fcb7a4 100644 --- a/drivers/watchdog/da9062_wdt.c +++ b/drivers/watchdog/da9062_wdt.c @@ -94,13 +94,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd) struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); int ret; - ret = da9062_reset_watchdog_timer(wdt); - if (ret) { - dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n", - ret); - return ret; - } - ret = regmap_update_bits(wdt->hw->regmap, DA9062AA_CONTROL_D, DA9062AA_TWDSCALE_MASK, diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h index 0f7373ba10d5..69e92e692ae0 100644 --- a/drivers/watchdog/iTCO_vendor.h +++ b/drivers/watchdog/iTCO_vendor.h @@ -1,10 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* iTCO Vendor Specific Support hooks */ #ifdef CONFIG_ITCO_VENDOR_SUPPORT +extern int iTCO_vendorsupport; extern void iTCO_vendor_pre_start(struct resource *, unsigned int); extern void iTCO_vendor_pre_stop(struct resource *); extern int iTCO_vendor_check_noreboot_on(void); #else +#define iTCO_vendorsupport 0 #define iTCO_vendor_pre_start(acpibase, heartbeat) {} #define iTCO_vendor_pre_stop(acpibase) {} #define iTCO_vendor_check_noreboot_on() 1 diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c index 4f1b96f59349..cf0eaa04b064 100644 --- a/drivers/watchdog/iTCO_vendor_support.c +++ b/drivers/watchdog/iTCO_vendor_support.c @@ -39,8 +39,10 @@ /* Broken BIOS */ #define BROKEN_BIOS 911 -static int vendorsupport; -module_param(vendorsupport, int, 0); +int iTCO_vendorsupport; +EXPORT_SYMBOL(iTCO_vendorsupport); + +module_param_named(vendorsupport, iTCO_vendorsupport, int, 0); MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" "0 (none), 1=SuperMicro Pent3, 911=Broken SMI BIOS"); @@ -152,7 +154,7 @@ static void broken_bios_stop(struct resource *smires) void iTCO_vendor_pre_start(struct resource *smires, unsigned int heartbeat) { - switch (vendorsupport) { + switch (iTCO_vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_start(smires); break; @@ -165,7 +167,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_start); void iTCO_vendor_pre_stop(struct resource *smires) { - switch (vendorsupport) { + switch (iTCO_vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_stop(smires); break; @@ -178,7 +180,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop); int iTCO_vendor_check_noreboot_on(void) { - switch (vendorsupport) { + switch (iTCO_vendorsupport) { case SUPERMICRO_OLD_BOARD: return 0; default: @@ -189,13 +191,13 @@ EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on); static int __init iTCO_vendor_init_module(void) { - if (vendorsupport == SUPERMICRO_NEW_BOARD) { + if (iTCO_vendorsupport == SUPERMICRO_NEW_BOARD) { pr_warn("Option vendorsupport=%d is no longer supported, " "please use the w83627hf_wdt driver instead\n", SUPERMICRO_NEW_BOARD); return -EINVAL; } - pr_info("vendor-support=%d\n", vendorsupport); + pr_info("vendor-support=%d\n", iTCO_vendorsupport); return 0; } diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 156360e37714..e707c4797f76 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -459,13 +459,25 @@ static int iTCO_wdt_probe(struct platform_device *pdev) if (!p->tco_res) return -ENODEV; - p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI); - if (!p->smi_res) - return -ENODEV; - p->iTCO_version = pdata->version; p->pci_dev = to_pci_dev(dev->parent); + p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI); + if (p->smi_res) { + /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ + if (!devm_request_region(dev, p->smi_res->start, + resource_size(p->smi_res), + pdev->name)) { + pr_err("I/O address 0x%04llx already in use, device disabled\n", + (u64)SMI_EN(p)); + return -EBUSY; + } + } else if (iTCO_vendorsupport || + turn_SMI_watchdog_clear_off >= p->iTCO_version) { + pr_err("SMI I/O resource is missing\n"); + return -ENODEV; + } + iTCO_wdt_no_reboot_bit_setup(p, pdata); /* @@ -492,14 +504,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev) /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ p->update_no_reboot_bit(p->no_reboot_priv, true); - /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ - if (!devm_request_region(dev, p->smi_res->start, - resource_size(p->smi_res), - pdev->name)) { - pr_err("I/O address 0x%04llx already in use, device disabled\n", - (u64)SMI_EN(p)); - return -EBUSY; - } if (turn_SMI_watchdog_clear_off >= p->iTCO_version) { /* * Bit 13: TCO_EN -> 0 diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c index 5ce51026989a..ba5d535a6db2 100644 --- a/drivers/watchdog/imx7ulp_wdt.c +++ b/drivers/watchdog/imx7ulp_wdt.c @@ -106,12 +106,28 @@ static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog, return 0; } +static int imx7ulp_wdt_restart(struct watchdog_device *wdog, + unsigned long action, void *data) +{ + struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); + + imx7ulp_wdt_enable(wdt->base, true); + imx7ulp_wdt_set_timeout(&wdt->wdd, 1); + + /* wait for wdog to fire */ + while (true) + ; + + return NOTIFY_DONE; +} + static const struct watchdog_ops imx7ulp_wdt_ops = { .owner = THIS_MODULE, .start = imx7ulp_wdt_start, .stop = imx7ulp_wdt_stop, .ping = imx7ulp_wdt_ping, .set_timeout = imx7ulp_wdt_set_timeout, + .restart = imx7ulp_wdt_restart, }; static const struct watchdog_info imx7ulp_wdt_info = { diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 1cccf8eb1c5d..8e6dfe76f9c9 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev) set_bit(WDOG_HW_RUNNING, &dev->wdt.status); /* Request the IRQ only after the watchdog is disabled */ - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { /* * Not all supported platforms specify an interrupt for the @@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev) } /* Optional 2nd interrupt for pretimeout */ - irq = platform_get_irq(pdev, 1); + irq = platform_get_irq_optional(pdev, 1); if (irq > 0) { orion_wdt_info.options |= WDIOF_PRETIMEOUT; ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq, diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index a494543d3ae1..eb47fe5ed280 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev) } /* check if there is pretimeout support */ - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { ret = devm_request_irq(dev, irq, qcom_wdt_isr, IRQF_TRIGGER_RISING, diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c index 234876047431..6e524c8e26a8 100644 --- a/drivers/watchdog/rn5t618_wdt.c +++ b/drivers/watchdog/rn5t618_wdt.c @@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = { module_platform_driver(rn5t618_wdt_driver); +MODULE_ALIAS("platform:rn5t618-wdt"); MODULE_AUTHOR("Beniamino Galvani "); MODULE_DESCRIPTION("RN5T618 watchdog driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c index 0bb17b046140..65cb55f3916f 100644 --- a/drivers/watchdog/sprd_wdt.c +++ b/drivers/watchdog/sprd_wdt.c @@ -327,10 +327,9 @@ static int sprd_wdt_probe(struct platform_device *pdev) static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev) { - struct watchdog_device *wdd = dev_get_drvdata(dev); struct sprd_wdt *wdt = dev_get_drvdata(dev); - if (watchdog_active(wdd)) + if (watchdog_active(&wdt->wdd)) sprd_wdt_stop(&wdt->wdd); sprd_wdt_disable(wdt); @@ -339,7 +338,6 @@ static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev) static int __maybe_unused sprd_wdt_pm_resume(struct device *dev) { - struct watchdog_device *wdd = dev_get_drvdata(dev); struct sprd_wdt *wdt = dev_get_drvdata(dev); int ret; @@ -347,7 +345,7 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev) if (ret) return ret; - if (watchdog_active(wdd)) { + if (watchdog_active(&wdt->wdd)) { ret = sprd_wdt_start(&wdt->wdd); if (ret) { sprd_wdt_disable(wdt); diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c index a3a329011a06..25188d6bbe15 100644 --- a/drivers/watchdog/stm32_iwdg.c +++ b/drivers/watchdog/stm32_iwdg.c @@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev) watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT); watchdog_init_timeout(wdd, 0, dev); + /* + * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set + * (Means U-Boot/bootloaders leaves the watchdog running) + * When we get here we should make a decision to prevent + * any side effects before user space daemon will take care of it. + * The best option, taking into consideration that there is no + * way to read values back from hardware, is to enforce watchdog + * being run with deterministic values. + */ + if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) { + ret = stm32_iwdg_start(wdd); + if (ret) + return ret; + + /* Make sure the watchdog is serviced */ + set_bit(WDOG_HW_RUNNING, &wdd->status); + } + ret = devm_watchdog_register_device(dev, wdd); if (ret) return ret; diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 21e8085b848b..861daf4f37b2 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -147,6 +147,25 @@ int watchdog_init_timeout(struct watchdog_device *wdd, } EXPORT_SYMBOL_GPL(watchdog_init_timeout); +static int watchdog_reboot_notifier(struct notifier_block *nb, + unsigned long code, void *data) +{ + struct watchdog_device *wdd; + + wdd = container_of(nb, struct watchdog_device, reboot_nb); + if (code == SYS_DOWN || code == SYS_HALT) { + if (watchdog_active(wdd)) { + int ret; + + ret = wdd->ops->stop(wdd); + if (ret) + return NOTIFY_BAD; + } + } + + return NOTIFY_DONE; +} + static int watchdog_restart_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -235,6 +254,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd) } } + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { + wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; + + ret = register_reboot_notifier(&wdd->reboot_nb); + if (ret) { + pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", + wdd->id, ret); + watchdog_dev_unregister(wdd); + ida_simple_remove(&watchdog_ida, id); + return ret; + } + } + if (wdd->ops->restart) { wdd->restart_nb.notifier_call = watchdog_restart_notifier; @@ -289,6 +321,9 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd) if (wdd->ops->restart) unregister_restart_handler(&wdd->restart_nb); + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) + unregister_reboot_notifier(&wdd->reboot_nb); + watchdog_dev_unregister(wdd); ida_simple_remove(&watchdog_ida, wdd->id); } diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index dbd2ad4c9294..ce04edc69e5f 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -34,12 +34,10 @@ #include /* For __init/__exit/... */ #include /* For hrtimers */ #include /* For printk/panic/... */ -#include /* For data references */ #include /* For kthread_work */ #include /* For handling misc devices */ #include /* For module stuff/... */ #include /* For mutexes */ -#include /* For reboot notifier */ #include /* For memory functions */ #include /* For standard types (like size_t) */ #include /* For watchdog specific items */ @@ -52,14 +50,14 @@ /* * struct watchdog_core_data - watchdog core internal data - * @kref: Reference count. + * @dev: The watchdog's internal device * @cdev: The watchdog's Character device. * @wdd: Pointer to watchdog device. * @lock: Lock for watchdog core. * @status: Watchdog core internal status bits. */ struct watchdog_core_data { - struct kref kref; + struct device dev; struct cdev cdev; struct watchdog_device *wdd; struct mutex lock; @@ -158,7 +156,8 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd) ktime_t t = watchdog_next_keepalive(wdd); if (t > 0) - hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL); + hrtimer_start(&wd_data->timer, t, + HRTIMER_MODE_REL_HARD); } else { hrtimer_cancel(&wd_data->timer); } @@ -177,7 +176,7 @@ static int __watchdog_ping(struct watchdog_device *wdd) if (ktime_after(earliest_keepalive, now)) { hrtimer_start(&wd_data->timer, ktime_sub(earliest_keepalive, now), - HRTIMER_MODE_REL); + HRTIMER_MODE_REL_HARD); return 0; } @@ -839,7 +838,7 @@ static int watchdog_open(struct inode *inode, struct file *file) file->private_data = wd_data; if (!hw_running) - kref_get(&wd_data->kref); + get_device(&wd_data->dev); /* * open_timeout only applies for the first open from @@ -860,11 +859,11 @@ out_clear: return err; } -static void watchdog_core_data_release(struct kref *kref) +static void watchdog_core_data_release(struct device *dev) { struct watchdog_core_data *wd_data; - wd_data = container_of(kref, struct watchdog_core_data, kref); + wd_data = container_of(dev, struct watchdog_core_data, dev); kfree(wd_data); } @@ -924,7 +923,7 @@ done: */ if (!running) { module_put(wd_data->cdev.owner); - kref_put(&wd_data->kref, watchdog_core_data_release); + put_device(&wd_data->dev); } return 0; } @@ -943,17 +942,22 @@ static struct miscdevice watchdog_miscdev = { .fops = &watchdog_fops, }; +static struct class watchdog_class = { + .name = "watchdog", + .owner = THIS_MODULE, + .dev_groups = wdt_groups, +}; + /* * watchdog_cdev_register: register watchdog character device * @wdd: watchdog device - * @devno: character device number * * Register a watchdog character device including handling the legacy * /dev/watchdog node. /dev/watchdog is actually a miscdevice and * thus we set it up like that. */ -static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) +static int watchdog_cdev_register(struct watchdog_device *wdd) { struct watchdog_core_data *wd_data; int err; @@ -961,7 +965,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL); if (!wd_data) return -ENOMEM; - kref_init(&wd_data->kref); mutex_init(&wd_data->lock); wd_data->wdd = wdd; @@ -971,7 +974,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) return -ENODEV; kthread_init_work(&wd_data->work, watchdog_ping_work); - hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); wd_data->timer.function = watchdog_timer_expired; if (wdd->id == 0) { @@ -990,23 +993,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) } } + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); + wd_data->dev.class = &watchdog_class; + wd_data->dev.parent = wdd->parent; + wd_data->dev.groups = wdd->groups; + wd_data->dev.release = watchdog_core_data_release; + dev_set_drvdata(&wd_data->dev, wdd); + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + /* Fill in the data structures */ cdev_init(&wd_data->cdev, &watchdog_fops); - wd_data->cdev.owner = wdd->ops->owner; /* Add the device */ - err = cdev_add(&wd_data->cdev, devno, 1); + err = cdev_device_add(&wd_data->cdev, &wd_data->dev); if (err) { pr_err("watchdog%d unable to add device %d:%d\n", wdd->id, MAJOR(watchdog_devt), wdd->id); if (wdd->id == 0) { misc_deregister(&watchdog_miscdev); old_wd_data = NULL; - kref_put(&wd_data->kref, watchdog_core_data_release); + put_device(&wd_data->dev); } return err; } + wd_data->cdev.owner = wdd->ops->owner; + /* Record time of most recent heartbeat as 'just before now'. */ wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1); watchdog_set_open_deadline(wd_data); @@ -1017,9 +1030,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) */ if (watchdog_hw_running(wdd)) { __module_get(wdd->ops->owner); - kref_get(&wd_data->kref); + get_device(&wd_data->dev); if (handle_boot_enabled) - hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL); + hrtimer_start(&wd_data->timer, 0, + HRTIMER_MODE_REL_HARD); else pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", wdd->id); @@ -1040,7 +1054,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) { struct watchdog_core_data *wd_data = wdd->wd_data; - cdev_del(&wd_data->cdev); + cdev_device_del(&wd_data->cdev, &wd_data->dev); if (wdd->id == 0) { misc_deregister(&watchdog_miscdev); old_wd_data = NULL; @@ -1059,32 +1073,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) hrtimer_cancel(&wd_data->timer); kthread_cancel_work_sync(&wd_data->work); - kref_put(&wd_data->kref, watchdog_core_data_release); -} - -static struct class watchdog_class = { - .name = "watchdog", - .owner = THIS_MODULE, - .dev_groups = wdt_groups, -}; - -static int watchdog_reboot_notifier(struct notifier_block *nb, - unsigned long code, void *data) -{ - struct watchdog_device *wdd; - - wdd = container_of(nb, struct watchdog_device, reboot_nb); - if (code == SYS_DOWN || code == SYS_HALT) { - if (watchdog_active(wdd)) { - int ret; - - ret = wdd->ops->stop(wdd); - if (ret) - return NOTIFY_BAD; - } - } - - return NOTIFY_DONE; + put_device(&wd_data->dev); } /* @@ -1098,41 +1087,15 @@ static int watchdog_reboot_notifier(struct notifier_block *nb, int watchdog_dev_register(struct watchdog_device *wdd) { - struct device *dev; - dev_t devno; int ret; - devno = MKDEV(MAJOR(watchdog_devt), wdd->id); - - ret = watchdog_cdev_register(wdd, devno); + ret = watchdog_cdev_register(wdd); if (ret) return ret; - dev = device_create_with_groups(&watchdog_class, wdd->parent, - devno, wdd, wdd->groups, - "watchdog%d", wdd->id); - if (IS_ERR(dev)) { - watchdog_cdev_unregister(wdd); - return PTR_ERR(dev); - } - ret = watchdog_register_pretimeout(wdd); - if (ret) { - device_destroy(&watchdog_class, devno); + if (ret) watchdog_cdev_unregister(wdd); - return ret; - } - - if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { - wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; - - ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb); - if (ret) { - pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", - wdd->id, ret); - watchdog_dev_unregister(wdd); - } - } return ret; } @@ -1148,7 +1111,6 @@ int watchdog_dev_register(struct watchdog_device *wdd) void watchdog_dev_unregister(struct watchdog_device *wdd) { watchdog_unregister_pretimeout(wdd); - device_destroy(&watchdog_class, wdd->wd_data->cdev.dev); watchdog_cdev_unregister(wdd); } diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index e7cf41aa26c3..88c5e6361aa0 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c @@ -54,6 +54,13 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +#define WDAT_DEFAULT_TIMEOUT 30 + +static int timeout = WDAT_DEFAULT_TIMEOUT; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default=" + __MODULE_STRING(WDAT_DEFAULT_TIMEOUT) ")"); + static int wdat_wdt_read(struct wdat_wdt *wdat, const struct wdat_instruction *instr, u32 *value) { @@ -389,7 +396,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) memset(&r, 0, sizeof(r)); r.start = gas->address; - r.end = r.start + gas->access_width - 1; + r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { r.flags = IORESOURCE_MEM; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { @@ -438,6 +445,22 @@ static int wdat_wdt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, wdat); + /* + * Set initial timeout so that userspace has time to configure the + * watchdog properly after it has opened the device. In some cases + * the BIOS default is too short and causes immediate reboot. + */ + if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms || + timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) { + dev_warn(dev, "Invalid timeout %d given, using %d\n", + timeout, WDAT_DEFAULT_TIMEOUT); + timeout = WDAT_DEFAULT_TIMEOUT; + } + + ret = wdat_wdt_set_timeout(&wdat->wdd, timeout); + if (ret) + return ret; + watchdog_set_nowayout(&wdat->wdd, nowayout); return devm_watchdog_register_device(dev, &wdat->wdd); } diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 79cc75096f42..a50dadd01093 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -141,7 +141,8 @@ config XEN_GNTDEV config XEN_GNTDEV_DMABUF bool "Add support for dma-buf grant access device driver extension" - depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER + depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC + select DMA_SHARED_BUFFER help Allows userspace processes and kernel modules to use Xen backed dma-buf implementation. With this extension grant references to diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 5bae515c8e25..bed90d612e48 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -395,7 +395,8 @@ static struct notifier_block xen_memory_nb = { #else static enum bp_state reserve_additional_memory(void) { - balloon_stats.target_pages = balloon_stats.current_pages; + balloon_stats.target_pages = balloon_stats.current_pages + + balloon_stats.target_unpopulated; return BP_ECANCELED; } #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 8b9919c26095..456a164364a2 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void) * cpu. */ __this_cpu_write(xen_in_preemptible_hcall, false); - _cond_resched(); + local_irq_enable(); + cond_resched(); + local_irq_disable(); __this_cpu_write(xen_in_preemptible_hcall, true); } } diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 6d12fc368210..a8d24433c8e9 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -94,7 +94,7 @@ static void watch_target(struct xenbus_watch *watch, "%llu", &static_max) == 1)) static_max >>= PAGE_SHIFT - 10; else - static_max = new_target; + static_max = balloon_stats.current_pages; target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0 : static_max - balloon_stats.target_pages; diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index d239fc3c5e3d..eb5151fc8efa 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -313,6 +313,8 @@ static int process_msg(void) req->msg.type = state.msg.type; req->msg.len = state.msg.len; req->body = state.body; + /* write body, then update state */ + virt_wmb(); req->state = xb_req_state_got_reply; req->cb(req); } else @@ -395,6 +397,8 @@ static int process_writes(void) if (state.req->state == xb_req_state_aborted) kfree(state.req); else { + /* write err, then update state */ + virt_wmb(); state.req->state = xb_req_state_got_reply; wake_up(&state.req->wq); } diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index ddc18da61834..3a06eb699f33 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -191,8 +191,11 @@ static bool xenbus_ok(void) static bool test_reply(struct xb_req_data *req) { - if (req->state == xb_req_state_got_reply || !xenbus_ok()) + if (req->state == xb_req_state_got_reply || !xenbus_ok()) { + /* read req->state before all other fields */ + virt_rmb(); return true; + } /* Make sure to reread req->state each time. */ barrier(); @@ -202,7 +205,7 @@ static bool test_reply(struct xb_req_data *req) static void *read_reply(struct xb_req_data *req) { - while (req->state != xb_req_state_got_reply) { + do { wait_event(req->wq, test_reply(req)); if (!xenbus_ok()) @@ -216,7 +219,7 @@ static void *read_reply(struct xb_req_data *req) if (req->err) return ERR_PTR(req->err); - } + } while (req->state != xb_req_state_got_reply); return req->body; } diff --git a/fs/affs/super.c b/fs/affs/super.c index cc463ae47c12..3812f7bc3a7f 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -561,14 +561,9 @@ affs_remount(struct super_block *sb, int *flags, char *data) int root_block; unsigned long mount_flags; int res = 0; - char *new_opts; char volume[32]; char *prefix = NULL; - new_opts = kstrdup(data, GFP_KERNEL); - if (data && !new_opts) - return -ENOMEM; - pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); sync_filesystem(sb); @@ -579,7 +574,6 @@ affs_remount(struct super_block *sb, int *flags, char *data) &blocksize, &prefix, volume, &mount_flags)) { kfree(prefix); - kfree(new_opts); return -EINVAL; } diff --git a/fs/afs/cell.c b/fs/afs/cell.c index fd5133e26a38..78ba5f932287 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } - if (namelen == 5 && memcmp(name, "@cell", 5) == 0) + + /* Prohibit cell names that contain unprintable chars, '/' and '@' or + * that begin with a dot. This also precludes "@cell". + */ + if (name[0] == '.') return ERR_PTR(-EINVAL); + for (i = 0; i < namelen; i++) { + char ch = name[i]; + if (!isprint(ch) || ch == '/' || ch == '@') + return ERR_PTR(-EINVAL); + } _enter("%*.*s,%s", namelen, namelen, name, addresses); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index b86195e4dc6c..b378cd780ed5 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -243,6 +243,17 @@ static void afs_cm_destructor(struct afs_call *call) call->buffer = NULL; } +/* + * Abort a service call from within an action function. + */ +static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error, + const char *why) +{ + rxrpc_kernel_abort_call(call->net->socket, call->rxcall, + abort_code, error, why); + afs_set_call_complete(call, error, 0); +} + /* * The server supplied a list of callbacks that it wanted to break. */ @@ -510,8 +521,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) afs_send_empty_reply(call); else - rxrpc_kernel_abort_call(call->net->socket, call->rxcall, - 1, 1, "K-1"); + afs_abort_service_call(call, 1, 1, "K-1"); afs_put_call(call); _leave(""); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 497f979018c2..5c794f4b051a 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -908,6 +908,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct afs_vnode *dvnode = AFS_FS_I(dir); + struct afs_fid fid = {}; struct inode *inode; struct dentry *d; struct key *key; @@ -951,21 +952,18 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, afs_stat_v(dvnode, n_lookup); inode = afs_do_lookup(dir, dentry, key); key_put(key); - if (inode == ERR_PTR(-ENOENT)) { + if (inode == ERR_PTR(-ENOENT)) inode = afs_try_auto_mntpt(dentry, dir); - } else { - dentry->d_fsdata = - (void *)(unsigned long)dvnode->status.data_version; - } + + if (!IS_ERR_OR_NULL(inode)) + fid = AFS_FS_I(inode)->fid; + d = d_splice_alias(inode, dentry); if (!IS_ERR_OR_NULL(d)) { d->d_fsdata = dentry->d_fsdata; - trace_afs_lookup(dvnode, &d->d_name, - inode ? AFS_FS_I(inode) : NULL); + trace_afs_lookup(dvnode, &d->d_name, &fid); } else { - trace_afs_lookup(dvnode, &dentry->d_name, - IS_ERR_OR_NULL(inode) ? NULL - : AFS_FS_I(inode)); + trace_afs_lookup(dvnode, &dentry->d_name, &fid); } return d; } diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c index d4fbe5f85f1b..b108528bf010 100644 --- a/fs/afs/dir_edit.c +++ b/fs/afs/dir_edit.c @@ -68,13 +68,11 @@ static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_ static void afs_set_contig_bits(union afs_xdr_dir_block *block, int bit, unsigned int nr_slots) { - u64 mask, before, after; + u64 mask; mask = (1 << nr_slots) - 1; mask <<= bit; - before = *(u64 *)block->hdr.bitmap; - block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8); block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8); block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8); @@ -83,8 +81,6 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block, block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8); block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8); block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8); - - after = *(u64 *)block->hdr.bitmap; } /* @@ -93,13 +89,11 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block, static void afs_clear_contig_bits(union afs_xdr_dir_block *block, int bit, unsigned int nr_slots) { - u64 mask, before, after; + u64 mask; mask = (1 << nr_slots) - 1; mask <<= bit; - before = *(u64 *)block->hdr.bitmap; - block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8); block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8); block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8); @@ -108,8 +102,6 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block, block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8); block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8); block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8); - - after = *(u64 *)block->hdr.bitmap; } /* diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 4150280509ff..7503899c0a1b 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -136,6 +136,9 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr ASSERTCMP(d_inode(dentry), ==, NULL); + if (flags & LOOKUP_CREATE) + return ERR_PTR(-EOPNOTSUPP); + if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index cfe62b154f68..e1b9ed679045 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -145,6 +145,7 @@ static int afs_do_probe_fileserver(struct afs_net *net, read_lock(&server->fs_lock); ac.alist = rcu_dereference_protected(server->addresses, lockdep_is_held(&server->fs_lock)); + afs_get_addrlist(ac.alist); read_unlock(&server->fs_lock); atomic_set(&server->probe_outstanding, ac.alist->nr_addrs); @@ -163,6 +164,7 @@ static int afs_do_probe_fileserver(struct afs_net *net, if (!in_progress) afs_fs_probe_done(server); + afs_put_addrlist(ac.alist); return in_progress; } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 759e0578012c..d5efb1debebf 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -154,7 +154,7 @@ struct afs_call { }; unsigned char unmarshall; /* unmarshalling phase */ unsigned char addr_ix; /* Address in ->alist */ - bool incoming; /* T if incoming call */ + bool drop_ref; /* T if need to drop ref for incoming call */ bool send_pages; /* T if data from mapping should be sent */ bool need_attention; /* T if RxRPC poked us */ bool async; /* T if asynchronous */ @@ -1203,8 +1203,16 @@ static inline void afs_set_call_complete(struct afs_call *call, ok = true; } spin_unlock_bh(&call->state_lock); - if (ok) + if (ok) { trace_afs_call_done(call); + + /* Asynchronous calls have two refs to release - one from the alloc and + * one queued with the work item - and we can't just deallocate the + * call because the work item may be queued again. + */ + if (call->drop_ref) + afs_put_call(call); + } } /* diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index f532d6d3bd28..79bc5f1338ed 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -126,7 +126,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) if (src_as->cell) ctx->cell = afs_get_cell(src_as->cell); - if (size > PAGE_SIZE - 1) + if (size < 2 || size > PAGE_SIZE - 1) return -EINVAL; page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL); @@ -140,7 +140,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) } buf = kmap(page); - ret = vfs_parse_fs_string(fc, "source", buf, size); + ret = -EINVAL; + if (buf[size - 1] == '.') + ret = vfs_parse_fs_string(fc, "source", buf, size - 1); kunmap(page); put_page(page); if (ret < 0) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 61498d9f06ef..ef1d09f8920b 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -18,7 +18,6 @@ struct workqueue_struct *afs_async_calls; static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); -static void afs_delete_async_call(struct work_struct *); static void afs_process_async_call(struct work_struct *); static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); @@ -169,7 +168,7 @@ void afs_put_call(struct afs_call *call) int n = atomic_dec_return(&call->usage); int o = atomic_read(&net->nr_outstanding_calls); - trace_afs_call(call, afs_call_trace_put, n + 1, o, + trace_afs_call(call, afs_call_trace_put, n, o, __builtin_return_address(0)); ASSERTCMP(n, >=, 0); @@ -402,8 +401,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) /* If the call is going to be asynchronous, we need an extra ref for * the call to hold itself so the caller need not hang on to its ref. */ - if (call->async) + if (call->async) { afs_get_call(call, afs_call_trace_get); + call->drop_ref = true; + } /* create a call */ rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, @@ -584,8 +585,6 @@ static void afs_deliver_to_call(struct afs_call *call) done: if (call->type->done) call->type->done(call); - if (state == AFS_CALL_COMPLETE && call->incoming) - afs_put_call(call); out: _leave(""); return; @@ -604,11 +603,7 @@ call_complete: long afs_wait_for_call_to_complete(struct afs_call *call, struct afs_addr_cursor *ac) { - signed long rtt2, timeout; long ret; - bool stalled = false; - u64 rtt; - u32 life, last_life; bool rxrpc_complete = false; DECLARE_WAITQUEUE(myself, current); @@ -619,14 +614,6 @@ long afs_wait_for_call_to_complete(struct afs_call *call, if (ret < 0) goto out; - rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); - rtt2 = nsecs_to_jiffies64(rtt) * 2; - if (rtt2 < 2) - rtt2 = 2; - - timeout = rtt2; - rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life); - add_wait_queue(&call->waitq, &myself); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -637,37 +624,19 @@ long afs_wait_for_call_to_complete(struct afs_call *call, call->need_attention = false; __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); - timeout = rtt2; continue; } if (afs_check_call_state(call, AFS_CALL_COMPLETE)) break; - if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) { + if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) { /* rxrpc terminated the call. */ rxrpc_complete = true; break; } - if (call->intr && timeout == 0 && - life == last_life && signal_pending(current)) { - if (stalled) - break; - __set_current_state(TASK_RUNNING); - rxrpc_kernel_probe_life(call->net->socket, call->rxcall); - timeout = rtt2; - stalled = true; - continue; - } - - if (life != last_life) { - timeout = rtt2; - last_life = life; - stalled = false; - } - - timeout = schedule_timeout(timeout); + schedule(); } remove_wait_queue(&call->waitq, &myself); @@ -735,7 +704,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, u = atomic_fetch_add_unless(&call->usage, 1, 0); if (u != 0) { - trace_afs_call(call, afs_call_trace_wake, u, + trace_afs_call(call, afs_call_trace_wake, u + 1, atomic_read(&call->net->nr_outstanding_calls), __builtin_return_address(0)); @@ -744,21 +713,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, } } -/* - * Delete an asynchronous call. The work item carries a ref to the call struct - * that we need to release. - */ -static void afs_delete_async_call(struct work_struct *work) -{ - struct afs_call *call = container_of(work, struct afs_call, async_work); - - _enter(""); - - afs_put_call(call); - - _leave(""); -} - /* * Perform I/O processing on an asynchronous call. The work item carries a ref * to the call struct that we either need to release or to pass on. @@ -774,16 +728,6 @@ static void afs_process_async_call(struct work_struct *work) afs_deliver_to_call(call); } - if (call->state == AFS_CALL_COMPLETE) { - /* We have two refs to release - one from the alloc and one - * queued with the work item - and we can't just deallocate the - * call because the work item may be queued again. - */ - call->async_work.func = afs_delete_async_call; - if (!queue_work(afs_async_calls, &call->async_work)) - afs_put_call(call); - } - afs_put_call(call); _leave(""); } @@ -810,6 +754,7 @@ void afs_charge_preallocation(struct work_struct *work) if (!call) break; + call->drop_ref = true; call->async = true; call->state = AFS_CALL_SV_AWAIT_OP_ID; init_waitqueue_head(&call->waitq); diff --git a/fs/afs/server.c b/fs/afs/server.c index 64d440aaabc0..ca8115ba1724 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -32,18 +32,11 @@ static void afs_dec_servers_outstanding(struct afs_net *net) struct afs_server *afs_find_server(struct afs_net *net, const struct sockaddr_rxrpc *srx) { - const struct sockaddr_in6 *a = &srx->transport.sin6, *b; const struct afs_addr_list *alist; struct afs_server *server = NULL; unsigned int i; - bool ipv6 = true; int seq = 0, diff; - if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 || - srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 || - srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff)) - ipv6 = false; - rcu_read_lock(); do { @@ -52,7 +45,8 @@ struct afs_server *afs_find_server(struct afs_net *net, server = NULL; read_seqbegin_or_lock(&net->fs_addr_lock, &seq); - if (ipv6) { + if (srx->transport.family == AF_INET6) { + const struct sockaddr_in6 *a = &srx->transport.sin6, *b; hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) { alist = rcu_dereference(server->addresses); for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { @@ -68,15 +62,16 @@ struct afs_server *afs_find_server(struct afs_net *net, } } } else { + const struct sockaddr_in *a = &srx->transport.sin, *b; hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) { alist = rcu_dereference(server->addresses); for (i = 0; i < alist->nr_ipv4; i++) { - b = &alist->addrs[i].transport.sin6; - diff = ((u16 __force)a->sin6_port - - (u16 __force)b->sin6_port); + b = &alist->addrs[i].transport.sin; + diff = ((u16 __force)a->sin_port - + (u16 __force)b->sin_port); if (diff == 0) - diff = ((u32 __force)a->sin6_addr.s6_addr32[3] - - (u32 __force)b->sin6_addr.s6_addr32[3]); + diff = ((u32 __force)a->sin_addr.s_addr - + (u32 __force)b->sin_addr.s_addr); if (diff == 0) goto found; } diff --git a/fs/afs/super.c b/fs/afs/super.c index 488641b1a418..7f8a9b3137bf 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -404,6 +404,7 @@ static int afs_test_super(struct super_block *sb, struct fs_context *fc) return (as->net_ns == fc->net_ns && as->volume && as->volume->vid == ctx->volume->vid && + as->cell == ctx->cell && !as->dyn_root); } @@ -448,7 +449,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) /* allocate the root inode and dentry */ if (as->dyn_root) { inode = afs_iget_pseudo_dir(sb, true); - sb->s_flags |= SB_RDONLY; } else { sprintf(sb->s_id, "%llu", as->volume->vid); afs_activate_volume(as->volume); diff --git a/fs/aio.c b/fs/aio.c index 0d9a559d488c..4115d5ad6b90 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1610,6 +1610,14 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, return 0; } +static void aio_poll_put_work(struct work_struct *work) +{ + struct poll_iocb *req = container_of(work, struct poll_iocb, work); + struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); + + iocb_put(iocb); +} + static void aio_poll_complete_work(struct work_struct *work) { struct poll_iocb *req = container_of(work, struct poll_iocb, work); @@ -1674,6 +1682,8 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, list_del_init(&req->wait.entry); if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { + struct kioctx *ctx = iocb->ki_ctx; + /* * Try to complete the iocb inline if we can. Use * irqsave/irqrestore because not all filesystems (e.g. fuse) @@ -1683,8 +1693,14 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, list_del(&iocb->ki_list); iocb->ki_res.res = mangle_poll(mask); req->done = true; - spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); - iocb_put(iocb); + if (iocb->ki_eventfd && eventfd_signal_count()) { + iocb = NULL; + INIT_WORK(&req->work, aio_poll_put_work); + schedule_work(&req->work); + } + spin_unlock_irqrestore(&ctx->ctx_lock, flags); + if (iocb) + iocb_put(iocb); } else { schedule_work(&req->work); } diff --git a/fs/attr.c b/fs/attr.c index df28035aa23e..b4bbdbd4c8ca 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -183,18 +183,12 @@ void setattr_copy(struct inode *inode, const struct iattr *attr) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; - if (ia_valid & ATTR_ATIME) { - inode->i_atime = timestamp_truncate(attr->ia_atime, - inode); - } - if (ia_valid & ATTR_MTIME) { - inode->i_mtime = timestamp_truncate(attr->ia_mtime, - inode); - } - if (ia_valid & ATTR_CTIME) { - inode->i_ctime = timestamp_truncate(attr->ia_ctime, - inode); - } + if (ia_valid & ATTR_ATIME) + inode->i_atime = attr->ia_atime; + if (ia_valid & ATTR_MTIME) + inode->i_mtime = attr->ia_mtime; + if (ia_valid & ATTR_CTIME) + inode->i_ctime = attr->ia_ctime; if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; @@ -268,8 +262,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de attr->ia_ctime = now; if (!(ia_valid & ATTR_ATIME_SET)) attr->ia_atime = now; + else + attr->ia_atime = timestamp_truncate(attr->ia_atime, inode); if (!(ia_valid & ATTR_MTIME_SET)) attr->ia_mtime = now; + else + attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode); + if (ia_valid & ATTR_KILL_PRIV) { error = security_inode_need_killpriv(dentry); if (error < 0) diff --git a/fs/block_dev.c b/fs/block_dev.c index 9c073dbdc1b0..d612468ee66b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1403,11 +1403,7 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty) "resized disk %s\n", bdev->bd_disk ? bdev->bd_disk->disk_name : ""); } - - if (!bdev->bd_disk) - return; - if (disk_part_scan_enabled(bdev->bd_disk)) - bdev->bd_invalidated = 1; + bdev->bd_invalidated = 1; } /** @@ -1512,6 +1508,19 @@ EXPORT_SYMBOL(bd_set_size); static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); +static void bdev_disk_changed(struct block_device *bdev, bool invalidate) +{ + if (disk_part_scan_enabled(bdev->bd_disk)) { + if (invalidate) + invalidate_partitions(bdev->bd_disk, bdev); + else + rescan_partitions(bdev->bd_disk, bdev); + } else { + check_disk_size_change(bdev->bd_disk, bdev, !invalidate); + bdev->bd_invalidated = 0; + } +} + /* * bd_mutex locking: * @@ -1594,12 +1603,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) * The latter is necessary to prevent ghost * partitions on a removed medium. */ - if (bdev->bd_invalidated) { - if (!ret) - rescan_partitions(disk, bdev); - else if (ret == -ENOMEDIUM) - invalidate_partitions(disk, bdev); - } + if (bdev->bd_invalidated && + (!ret || ret == -ENOMEDIUM)) + bdev_disk_changed(bdev, ret == -ENOMEDIUM); if (ret) goto out_clear; @@ -1632,12 +1638,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (bdev->bd_disk->fops->open) ret = bdev->bd_disk->fops->open(bdev, mode); /* the same as first opener case, read comment there */ - if (bdev->bd_invalidated) { - if (!ret) - rescan_partitions(bdev->bd_disk, bdev); - else if (ret == -ENOMEDIUM) - invalidate_partitions(bdev->bd_disk, bdev); - } + if (bdev->bd_invalidated && + (!ret || ret == -ENOMEDIUM)) + bdev_disk_changed(bdev, ret == -ENOMEDIUM); if (ret) goto out_unlock_bdev; } diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 2e9e13ffbd08..3f3110975f88 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -53,16 +53,6 @@ struct btrfs_workqueue { struct __btrfs_workqueue *high; }; -static void normal_work_helper(struct btrfs_work *work); - -#define BTRFS_WORK_HELPER(name) \ -noinline_for_stack void btrfs_##name(struct work_struct *arg) \ -{ \ - struct btrfs_work *work = container_of(arg, struct btrfs_work, \ - normal_work); \ - normal_work_helper(work); \ -} - struct btrfs_fs_info * btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) { @@ -89,29 +79,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; } -BTRFS_WORK_HELPER(worker_helper); -BTRFS_WORK_HELPER(delalloc_helper); -BTRFS_WORK_HELPER(flush_delalloc_helper); -BTRFS_WORK_HELPER(cache_helper); -BTRFS_WORK_HELPER(submit_helper); -BTRFS_WORK_HELPER(fixup_helper); -BTRFS_WORK_HELPER(endio_helper); -BTRFS_WORK_HELPER(endio_meta_helper); -BTRFS_WORK_HELPER(endio_meta_write_helper); -BTRFS_WORK_HELPER(endio_raid56_helper); -BTRFS_WORK_HELPER(endio_repair_helper); -BTRFS_WORK_HELPER(rmw_helper); -BTRFS_WORK_HELPER(endio_write_helper); -BTRFS_WORK_HELPER(freespace_write_helper); -BTRFS_WORK_HELPER(delayed_meta_helper); -BTRFS_WORK_HELPER(readahead_helper); -BTRFS_WORK_HELPER(qgroup_rescan_helper); -BTRFS_WORK_HELPER(extent_refs_helper); -BTRFS_WORK_HELPER(scrub_helper); -BTRFS_WORK_HELPER(scrubwrc_helper); -BTRFS_WORK_HELPER(scrubnc_helper); -BTRFS_WORK_HELPER(scrubparity_helper); - static struct __btrfs_workqueue * __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, unsigned int flags, int limit_active, int thresh) @@ -252,16 +219,17 @@ out: } } -static void run_ordered_work(struct __btrfs_workqueue *wq) +static void run_ordered_work(struct __btrfs_workqueue *wq, + struct btrfs_work *self) { struct list_head *list = &wq->ordered_list; struct btrfs_work *work; spinlock_t *lock = &wq->list_lock; unsigned long flags; + void *wtag; + bool free_self = false; while (1) { - void *wtag; - spin_lock_irqsave(lock, flags); if (list_empty(list)) break; @@ -287,20 +255,54 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) list_del(&work->ordered_list); spin_unlock_irqrestore(lock, flags); - /* - * We don't want to call the ordered free functions with the - * lock held though. Save the work as tag for the trace event, - * because the callback could free the structure. - */ - wtag = work; - work->ordered_free(work); - trace_btrfs_all_work_done(wq->fs_info, wtag); + if (work == self) { + /* + * This is the work item that the worker is currently + * executing. + * + * The kernel workqueue code guarantees non-reentrancy + * of work items. I.e., if a work item with the same + * address and work function is queued twice, the second + * execution is blocked until the first one finishes. A + * work item may be freed and recycled with the same + * work function; the workqueue code assumes that the + * original work item cannot depend on the recycled work + * item in that case (see find_worker_executing_work()). + * + * Note that different types of Btrfs work can depend on + * each other, and one type of work on one Btrfs + * filesystem may even depend on the same type of work + * on another Btrfs filesystem via, e.g., a loop device. + * Therefore, we must not allow the current work item to + * be recycled until we are really done, otherwise we + * break the above assumption and can deadlock. + */ + free_self = true; + } else { + /* + * We don't want to call the ordered free functions with + * the lock held though. Save the work as tag for the + * trace event, because the callback could free the + * structure. + */ + wtag = work; + work->ordered_free(work); + trace_btrfs_all_work_done(wq->fs_info, wtag); + } } spin_unlock_irqrestore(lock, flags); + + if (free_self) { + wtag = self; + self->ordered_free(self); + trace_btrfs_all_work_done(wq->fs_info, wtag); + } } -static void normal_work_helper(struct btrfs_work *work) +static void btrfs_work_helper(struct work_struct *normal_work) { + struct btrfs_work *work = container_of(normal_work, struct btrfs_work, + normal_work); struct __btrfs_workqueue *wq; void *wtag; int need_order = 0; @@ -324,21 +326,19 @@ static void normal_work_helper(struct btrfs_work *work) work->func(work); if (need_order) { set_bit(WORK_DONE_BIT, &work->flags); - run_ordered_work(wq); + run_ordered_work(wq, work); } if (!need_order) trace_btrfs_all_work_done(wq->fs_info, wtag); } -void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, - btrfs_func_t func, - btrfs_func_t ordered_func, - btrfs_func_t ordered_free) +void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, + btrfs_func_t ordered_func, btrfs_func_t ordered_free) { work->func = func; work->ordered_func = ordered_func; work->ordered_free = ordered_free; - INIT_WORK(&work->normal_work, uniq_func); + INIT_WORK(&work->normal_work, btrfs_work_helper); INIT_LIST_HEAD(&work->ordered_list); work->flags = 0; } diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 7861c9feba5f..c5bf2b117c05 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -29,42 +29,13 @@ struct btrfs_work { unsigned long flags; }; -#define BTRFS_WORK_HELPER_PROTO(name) \ -void btrfs_##name(struct work_struct *arg) - -BTRFS_WORK_HELPER_PROTO(worker_helper); -BTRFS_WORK_HELPER_PROTO(delalloc_helper); -BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper); -BTRFS_WORK_HELPER_PROTO(cache_helper); -BTRFS_WORK_HELPER_PROTO(submit_helper); -BTRFS_WORK_HELPER_PROTO(fixup_helper); -BTRFS_WORK_HELPER_PROTO(endio_helper); -BTRFS_WORK_HELPER_PROTO(endio_meta_helper); -BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper); -BTRFS_WORK_HELPER_PROTO(endio_raid56_helper); -BTRFS_WORK_HELPER_PROTO(endio_repair_helper); -BTRFS_WORK_HELPER_PROTO(rmw_helper); -BTRFS_WORK_HELPER_PROTO(endio_write_helper); -BTRFS_WORK_HELPER_PROTO(freespace_write_helper); -BTRFS_WORK_HELPER_PROTO(delayed_meta_helper); -BTRFS_WORK_HELPER_PROTO(readahead_helper); -BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper); -BTRFS_WORK_HELPER_PROTO(extent_refs_helper); -BTRFS_WORK_HELPER_PROTO(scrub_helper); -BTRFS_WORK_HELPER_PROTO(scrubwrc_helper); -BTRFS_WORK_HELPER_PROTO(scrubnc_helper); -BTRFS_WORK_HELPER_PROTO(scrubparity_helper); - - struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, unsigned int flags, int limit_active, int thresh); -void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, - btrfs_func_t func, - btrfs_func_t ordered_func, - btrfs_func_t ordered_free); +void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, + btrfs_func_t ordered_func, btrfs_func_t ordered_free); void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 670700cb1110..7dcfa7d7632a 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -695,8 +695,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, caching_ctl->block_group = cache; caching_ctl->progress = cache->key.objectid; refcount_set(&caching_ctl->count, 1); - btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, - caching_thread, NULL, NULL); + btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); spin_lock(&cache->lock); /* @@ -2662,7 +2661,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, * is because we need the unpinning stage to actually add the * space back to the block group, otherwise we will leak space. */ - if (!alloc && cache->cached == BTRFS_CACHE_NO) + if (!alloc && !btrfs_block_group_cache_done(cache)) btrfs_cache_block_group(cache, 1); byte_in_group = bytenr - cache->key.objectid; diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 0b52ab4cb964..72c70f59fc60 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -629,7 +629,6 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, static int btrfsic_process_superblock(struct btrfsic_state *state, struct btrfs_fs_devices *fs_devices) { - struct btrfs_fs_info *fs_info = state->fs_info; struct btrfs_super_block *selected_super; struct list_head *dev_head = &fs_devices->devices; struct btrfs_device *device; @@ -700,7 +699,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, break; } - num_copies = btrfs_num_copies(fs_info, next_bytenr, + num_copies = btrfs_num_copies(state->fs_info, next_bytenr, state->metablock_size); if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) pr_info("num_copies(log_bytenr=%llu) = %d\n", diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index e59cde204b2f..a989105d39c8 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -330,12 +330,10 @@ u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem) { write_lock(&fs_info->tree_mod_log_lock); - spin_lock(&fs_info->tree_mod_seq_lock); if (!elem->seq) { elem->seq = btrfs_inc_tree_mod_seq(fs_info); list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); } - spin_unlock(&fs_info->tree_mod_seq_lock); write_unlock(&fs_info->tree_mod_log_lock); return elem->seq; @@ -355,7 +353,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, if (!seq_putting) return; - spin_lock(&fs_info->tree_mod_seq_lock); + write_lock(&fs_info->tree_mod_log_lock); list_del(&elem->list); elem->seq = 0; @@ -366,24 +364,22 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, * blocker with lower sequence number exists, we * cannot remove anything from the log */ - spin_unlock(&fs_info->tree_mod_seq_lock); + write_unlock(&fs_info->tree_mod_log_lock); return; } min_seq = cur_elem->seq; } } - spin_unlock(&fs_info->tree_mod_seq_lock); /* * anything that's lower than the lowest existing (read: blocked) * sequence number can be removed from the tree. */ - write_lock(&fs_info->tree_mod_log_lock); tm_root = &fs_info->tree_mod_log; for (node = rb_first(tm_root); node; node = next) { next = rb_next(node); tm = rb_entry(node, struct tree_mod_elem, node); - if (tm->seq > min_seq) + if (tm->seq >= min_seq) continue; rb_erase(node, tm_root); kfree(tm); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index fe2b8765d9e6..169075550a5a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -671,14 +671,12 @@ struct btrfs_fs_info { atomic_t nr_delayed_iputs; wait_queue_head_t delayed_iputs_wait; - /* this protects tree_mod_seq_list */ - spinlock_t tree_mod_seq_lock; atomic64_t tree_mod_seq; - struct list_head tree_mod_seq_list; - /* this protects tree_mod_log */ + /* this protects tree_mod_log and tree_mod_seq_list */ rwlock_t tree_mod_log_lock; struct rb_root tree_mod_log; + struct list_head tree_mod_seq_list; atomic_t async_delalloc_pages; @@ -2785,7 +2783,7 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( /* file-item.c */ struct btrfs_dio_private; int btrfs_del_csums(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); + struct btrfs_root *root, u64 bytenr, u64 len); blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst); blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, @@ -3109,17 +3107,21 @@ do { \ rcu_read_unlock(); \ } while (0) -__cold -static inline void assfail(const char *expr, const char *file, int line) +#ifdef CONFIG_BTRFS_ASSERT +__cold __noreturn +static inline void assertfail(const char *expr, const char *file, int line) { - if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) { - pr_err("assertion failed: %s, in %s:%d\n", expr, file, line); - BUG(); - } + pr_err("assertion failed: %s, in %s:%d\n", expr, file, line); + BUG(); } -#define ASSERT(expr) \ - (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) +#define ASSERT(expr) \ + (likely(expr) ? (void)0 : assertfail(#expr, __FILE__, __LINE__)) + +#else +static inline void assertfail(const char *expr, const char* file, int line) { } +#define ASSERT(expr) (void)(expr) +#endif /* * Use that for functions that are conditionally exported for sanity tests but diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 1f7f39b10bd0..c7a53e79c66d 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1367,8 +1367,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, return -ENOMEM; async_work->delayed_root = delayed_root; - btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, - btrfs_async_run_delayed_root, NULL, NULL); + btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, + NULL); async_work->nr = nr; btrfs_queue_work(fs_info->delayed_workers, &async_work->work); @@ -1949,12 +1949,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) } inode_id = delayed_nodes[n - 1]->inode_id + 1; - - for (i = 0; i < n; i++) - refcount_inc(&delayed_nodes[i]->refs); + for (i = 0; i < n; i++) { + /* + * Don't increase refs in case the node is dead and + * about to be removed from the tree in the loop below + */ + if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) + delayed_nodes[i] = NULL; + } spin_unlock(&root->inode_lock); for (i = 0; i < n; i++) { + if (!delayed_nodes[i]) + continue; __btrfs_kill_delayed_node(delayed_nodes[i]); btrfs_release_delayed_node(delayed_nodes[i]); } diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index df3bd880061d..dfdb7d4f8406 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -492,7 +492,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, if (head->is_data) return; - spin_lock(&fs_info->tree_mod_seq_lock); + read_lock(&fs_info->tree_mod_log_lock); if (!list_empty(&fs_info->tree_mod_seq_list)) { struct seq_list *elem; @@ -500,7 +500,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, struct seq_list, list); seq = elem->seq; } - spin_unlock(&fs_info->tree_mod_seq_lock); + read_unlock(&fs_info->tree_mod_log_lock); again: for (node = rb_first_cached(&head->ref_tree); node; @@ -518,7 +518,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq) struct seq_list *elem; int ret = 0; - spin_lock(&fs_info->tree_mod_seq_lock); + read_lock(&fs_info->tree_mod_log_lock); if (!list_empty(&fs_info->tree_mod_seq_list)) { elem = list_first_entry(&fs_info->tree_mod_seq_list, struct seq_list, list); @@ -531,7 +531,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq) } } - spin_unlock(&fs_info->tree_mod_seq_lock); + read_unlock(&fs_info->tree_mod_log_lock); return ret; } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 402b61bf345c..5cdd1b51285b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -706,43 +706,31 @@ static void end_workqueue_bio(struct bio *bio) struct btrfs_end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; struct btrfs_workqueue *wq; - btrfs_work_func_t func; fs_info = end_io_wq->info; end_io_wq->status = bio->bi_status; if (bio_op(bio) == REQ_OP_WRITE) { - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) wq = fs_info->endio_meta_write_workers; - func = btrfs_endio_meta_write_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) wq = fs_info->endio_raid56_workers; - func = btrfs_endio_raid56_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } } else { - if (unlikely(end_io_wq->metadata == - BTRFS_WQ_ENDIO_DIO_REPAIR)) { + if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR)) wq = fs_info->endio_repair_workers; - func = btrfs_endio_repair_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) wq = fs_info->endio_raid56_workers; - func = btrfs_endio_raid56_helper; - } else if (end_io_wq->metadata) { + else if (end_io_wq->metadata) wq = fs_info->endio_meta_workers; - func = btrfs_endio_meta_helper; - } else { + else wq = fs_info->endio_workers; - func = btrfs_endio_helper; - } } - btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); + btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); btrfs_queue_work(wq, &end_io_wq->work); } @@ -835,8 +823,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, async->mirror_num = mirror_num; async->submit_bio_start = submit_bio_start; - btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, - run_one_async_done, run_one_async_free); + btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, + run_one_async_free); async->bio_offset = bio_offset; @@ -1657,8 +1645,8 @@ static void end_workqueue_fn(struct btrfs_work *work) bio->bi_status = end_io_wq->status; bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; - kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); bio_endio(bio); + kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); } static int cleaner_kthread(void *arg) @@ -2028,7 +2016,7 @@ static void free_root_extent_buffers(struct btrfs_root *root) } /* helper to cleanup tree roots */ -static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) +static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) { free_root_extent_buffers(info->tree_root); @@ -2037,7 +2025,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) free_root_extent_buffers(info->csum_root); free_root_extent_buffers(info->quota_root); free_root_extent_buffers(info->uuid_root); - if (chunk_root) + if (free_chunk_root) free_root_extent_buffers(info->chunk_root); free_root_extent_buffers(info->free_space_root); } @@ -2664,7 +2652,6 @@ int open_ctree(struct super_block *sb, spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); - spin_lock_init(&fs_info->tree_mod_seq_lock); spin_lock_init(&fs_info->super_lock); spin_lock_init(&fs_info->buffer_lock); spin_lock_init(&fs_info->unused_bgs_lock); @@ -3180,6 +3167,7 @@ retry_root_backup: /* do not make disk changes in broken FS or nologreplay is given */ if (btrfs_super_log_root(disk_super) != 0 && !btrfs_test_opt(fs_info, NOLOGREPLAY)) { + btrfs_info(fs_info, "start tree-log replay"); ret = btrfs_replay_log(fs_info, fs_devices); if (ret) { err = ret; @@ -3215,6 +3203,7 @@ retry_root_backup: if (IS_ERR(fs_info->fs_root)) { err = PTR_ERR(fs_info->fs_root); btrfs_warn(fs_info, "failed to read fs tree: %d", err); + fs_info->fs_root = NULL; goto fail_qgroup; } @@ -3336,7 +3325,7 @@ fail_block_groups: btrfs_put_block_group_cache(fs_info); fail_tree_roots: - free_root_pointers(fs_info, 1); + free_root_pointers(fs_info, true); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); fail_sb_buffer: @@ -3368,7 +3357,7 @@ recovery_tree_root: if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) goto fail_tree_roots; - free_root_pointers(fs_info, 0); + free_root_pointers(fs_info, false); /* don't use the log in recovery mode, it won't be valid */ btrfs_set_super_log_root(disk_super, 0); @@ -4059,10 +4048,17 @@ void close_ctree(struct btrfs_fs_info *fs_info) invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); - btrfs_free_block_groups(fs_info); - clear_bit(BTRFS_FS_OPEN, &fs_info->flags); - free_root_pointers(fs_info, 1); + free_root_pointers(fs_info, true); + + /* + * We must free the block groups after dropping the fs_roots as we could + * have had an IO error and have left over tree log blocks that aren't + * cleaned up until the fs roots are freed. This makes the block group + * accounting appear to be wrong because there's pending reserved bytes, + * so make sure we do the block group cleanup afterwards. + */ + btrfs_free_block_groups(fs_info); iput(fs_info->btree_inode); @@ -4298,6 +4294,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, cond_resched(); spin_lock(&delayed_refs->lock); } + btrfs_qgroup_destroy_extent_records(trans); spin_unlock(&delayed_refs->lock); @@ -4523,7 +4520,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, wake_up(&fs_info->transaction_wait); btrfs_destroy_delayed_inodes(fs_info); - btrfs_assert_delayed_root_empty(fs_info); btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, EXTENT_DIRTY); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 49cb26fa7c63..47ecf7216b3e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1848,8 +1848,8 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, btrfs_pin_extent(fs_info, head->bytenr, head->num_bytes, 1); if (head->is_data) { - ret = btrfs_del_csums(trans, fs_info, head->bytenr, - head->num_bytes); + ret = btrfs_del_csums(trans, fs_info->csum_root, + head->bytenr, head->num_bytes); } } @@ -3155,7 +3155,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, btrfs_release_path(path); if (is_data) { - ret = btrfs_del_csums(trans, info, bytenr, num_bytes); + ret = btrfs_del_csums(trans, info->csum_root, bytenr, + num_bytes); if (ret) { btrfs_abort_transaction(trans, ret); goto out; @@ -3780,6 +3781,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, u64 flags, int delalloc) { int ret = 0; + int cache_block_group_error = 0; struct btrfs_free_cluster *last_ptr = NULL; struct btrfs_block_group_cache *block_group = NULL; struct find_free_extent_ctl ffe_ctl = {0}; @@ -3939,7 +3941,20 @@ have_block_group: if (unlikely(!ffe_ctl.cached)) { ffe_ctl.have_caching_bg = true; ret = btrfs_cache_block_group(block_group, 0); - BUG_ON(ret < 0); + + /* + * If we get ENOMEM here or something else we want to + * try other block groups, because it may not be fatal. + * However if we can't find anything else we need to + * save our return here so that we return the actual + * error that caused problems, not ENOSPC. + */ + if (ret < 0) { + if (!cache_block_group_error) + cache_block_group_error = ret; + ret = 0; + goto loop; + } ret = 0; } @@ -4026,7 +4041,7 @@ loop: if (ret > 0) goto search; - if (ret == -ENOSPC) { + if (ret == -ENOSPC && !cache_block_group_error) { /* * Use ffe_ctl->total_free_space as fallback if we can't find * any contiguous hole. @@ -4037,6 +4052,8 @@ loop: space_info->max_extent_size = ffe_ctl.max_extent_size; spin_unlock(&space_info->lock); ins->offset = ffe_ctl.max_extent_size; + } else if (ret == -ENOSPC) { + ret = cache_block_group_error; } return ret; } @@ -4394,6 +4411,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner, offset, ins, 1); + if (ret) + btrfs_pin_extent(fs_info, ins->objectid, ins->offset, 1); btrfs_put_block_group(block_group); return ret; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cceaf05aada2..284540cdbbd9 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1583,21 +1583,25 @@ void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, /* Find first extent with bits cleared */ while (1) { node = __etree_search(tree, start, &next, &prev, NULL, NULL); - if (!node) { + if (!node && !next && !prev) { + /* + * Tree is completely empty, send full range and let + * caller deal with it + */ + *start_ret = 0; + *end_ret = -1; + goto out; + } else if (!node && !next) { + /* + * We are past the last allocated chunk, set start at + * the end of the last extent. + */ + state = rb_entry(prev, struct extent_state, rb_node); + *start_ret = state->end + 1; + *end_ret = -1; + goto out; + } else if (!node) { node = next; - if (!node) { - /* - * We are past the last allocated chunk, - * set start at the end of the last extent. The - * device alloc tree should never be empty so - * prev is always set. - */ - ASSERT(prev); - state = rb_entry(prev, struct extent_state, rb_node); - *start_ret = state->end + 1; - *end_ret = -1; - goto out; - } } /* * At this point 'node' either contains 'start' or start is @@ -1899,7 +1903,7 @@ static int __process_pages_contig(struct address_space *mapping, if (page_ops & PAGE_SET_PRIVATE2) SetPagePrivate2(pages[i]); - if (pages[i] == locked_page) { + if (locked_page && pages[i] == locked_page) { put_page(pages[i]); pages_locked++; continue; @@ -3938,6 +3942,11 @@ int btree_write_cache_pages(struct address_space *mapping, if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; + /* + * Start from the beginning does not need to cycle over the + * range, mark it as scanned. + */ + scanned = (index == 0); } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; @@ -3955,7 +3964,6 @@ retry: tag))) { unsigned i; - scanned = 1; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; @@ -4084,6 +4092,11 @@ static int extent_write_cache_pages(struct address_space *mapping, if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; + /* + * Start from the beginning does not need to cycle over the + * range, mark it as scanned. + */ + scanned = (index == 0); } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; @@ -4117,11 +4130,10 @@ retry: &index, end, tag))) { unsigned i; - scanned = 1; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; - done_index = page->index; + done_index = page->index + 1; /* * At this point we hold neither the i_pages lock nor * the page lock: the page may be truncated or @@ -4156,16 +4168,6 @@ retry: ret = __extent_writepage(page, wbc, epd); if (ret < 0) { - /* - * done_index is set past this page, - * so media errors will not choke - * background writeout for the entire - * file. This has consequences for - * range_cyclic semantics (ie. it may - * not be suitable for data integrity - * writeout). - */ - done_index = page->index + 1; done = 1; break; } @@ -4187,7 +4189,16 @@ retry: */ scanned = 1; index = 0; - goto retry; + + /* + * If we're looping we could run into a page that is locked by a + * writer and that writer could be waiting on writeback for a + * page in our current bio, and thus deadlock, so flush the + * write bio here. + */ + ret = flush_write_bio(epd); + if (!ret) + goto retry; } if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole)) @@ -5076,12 +5087,14 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, return eb; eb = alloc_dummy_extent_buffer(fs_info, start); if (!eb) - return NULL; + return ERR_PTR(-ENOMEM); eb->fs_info = fs_info; again: ret = radix_tree_preload(GFP_NOFS); - if (ret) + if (ret) { + exists = ERR_PTR(ret); goto free_eb; + } spin_lock(&fs_info->buffer_lock); ret = radix_tree_insert(&fs_info->buffer_radix, start >> PAGE_SHIFT, eb); diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 9d30acca55e1..043eec682ccd 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -233,6 +233,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) struct extent_map *merge = NULL; struct rb_node *rb; + /* + * We can't modify an extent map that is in the tree and that is being + * used by another task, as it can cause that other task to see it in + * inconsistent state during the merging. We always have 1 reference for + * the tree and 1 for this task (which is unpinning the extent map or + * clearing the logging flag), so anything > 2 means it's being used by + * other tasks too. + */ + if (refcount_read(&em->refs) > 2) + return; + if (em->start != 0) { rb = rb_prev(&em->rb_node); if (rb) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 1a599f50837b..f62a179f85bb 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -274,7 +274,8 @@ found: csum += count * csum_size; nblocks -= count; next: - while (count--) { + while (count > 0) { + count--; disk_bytenr += fs_info->sectorsize; offset += fs_info->sectorsize; page_bytes_left -= fs_info->sectorsize; @@ -590,9 +591,9 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, * range of bytes. */ int btrfs_del_csums(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 bytenr, u64 len) + struct btrfs_root *root, u64 bytenr, u64 len) { - struct btrfs_root *root = fs_info->csum_root; + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_path *path; struct btrfs_key key; u64 end_byte = bytenr + len; @@ -602,6 +603,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); int blocksize_bits = fs_info->sb->s_blocksize_bits; + ASSERT(root == fs_info->csum_root || + root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); + path = btrfs_alloc_path(); if (!path) return -ENOMEM; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 435a502a3226..5739b8fc7fff 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1636,6 +1636,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, break; } + only_release_metadata = false; sector_offset = pos & (fs_info->sectorsize - 1); reserve_bytes = round_up(write_bytes + sector_offset, fs_info->sectorsize); @@ -1791,7 +1792,6 @@ again: set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, EXTENT_NORESERVE, NULL, NULL, GFP_NOFS); - only_release_metadata = false; } btrfs_drop_pages(pages, num_pages); @@ -1903,9 +1903,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, (iocb->ki_flags & IOCB_NOWAIT)) return -EOPNOTSUPP; - if (!inode_trylock(inode)) { - if (iocb->ki_flags & IOCB_NOWAIT) + if (iocb->ki_flags & IOCB_NOWAIT) { + if (!inode_trylock(inode)) return -EAGAIN; + } else { inode_lock(inode); } @@ -2601,8 +2602,8 @@ int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, } } - if (clone_info) { - u64 clone_len = drop_end - cur_offset; + if (clone_info && drop_end > clone_info->file_offset) { + u64 clone_len = drop_end - clone_info->file_offset; ret = btrfs_insert_clone_extent(trans, inode, path, clone_info, clone_len); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d54dcd0ab230..d86ada9c3c54 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -385,6 +385,12 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode if (uptodate && !PageUptodate(page)) { btrfs_readpage(NULL, page); lock_page(page); + if (page->mapping != inode->i_mapping) { + btrfs_err(BTRFS_I(inode)->root->fs_info, + "free space cache page truncated"); + io_ctl_drop_pages(io_ctl); + return -EIO; + } if (!PageUptodate(page)) { btrfs_err(BTRFS_I(inode)->root->fs_info, "error reading free space cache"); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 015910079e73..94b0df3fb3c8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -712,10 +712,12 @@ cleanup_and_bail_uncompressed: * to our extent and set things up for the async work queue to run * cow_file_range to do the normal delalloc dance. */ - if (page_offset(async_chunk->locked_page) >= start && - page_offset(async_chunk->locked_page) <= end) + if (async_chunk->locked_page && + (page_offset(async_chunk->locked_page) >= start && + page_offset(async_chunk->locked_page)) <= end) { __set_page_dirty_nobuffers(async_chunk->locked_page); /* unlocked later on in the async handlers */ + } if (redirty) extent_range_redirty_for_io(inode, start, end); @@ -795,7 +797,7 @@ retry: async_extent->start + async_extent->ram_size - 1, WB_SYNC_ALL); - else if (ret) + else if (ret && async_chunk->locked_page) unlock_page(async_chunk->locked_page); kfree(async_extent); cond_resched(); @@ -1264,14 +1266,27 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, async_chunk[i].inode = inode; async_chunk[i].start = start; async_chunk[i].end = cur_end; - async_chunk[i].locked_page = locked_page; async_chunk[i].write_flags = write_flags; INIT_LIST_HEAD(&async_chunk[i].extents); - btrfs_init_work(&async_chunk[i].work, - btrfs_delalloc_helper, - async_cow_start, async_cow_submit, - async_cow_free); + /* + * The locked_page comes all the way from writepage and its + * the original page we were actually given. As we spread + * this large delalloc region across multiple async_chunk + * structs, only the first struct needs a pointer to locked_page + * + * This way we don't need racey decisions about who is supposed + * to unlock it. + */ + if (locked_page) { + async_chunk[i].locked_page = locked_page; + locked_page = NULL; + } else { + async_chunk[i].locked_page = NULL; + } + + btrfs_init_work(&async_chunk[i].work, async_cow_start, + async_cow_submit, async_cow_free); nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); atomic_add(nr_pages, &fs_info->async_delalloc_pages); @@ -1439,10 +1454,10 @@ next_slot: disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); /* - * If extent we got ends before our range starts, skip - * to next extent + * If the extent we got ends before our current offset, + * skip to the next extent. */ - if (extent_end <= start) { + if (extent_end <= cur_offset) { path->slots[0]++; goto next_slot; } @@ -2153,6 +2168,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, /* see btrfs_writepage_start_hook for details on why this is required */ struct btrfs_writepage_fixup { struct page *page; + struct inode *inode; struct btrfs_work work; }; @@ -2166,27 +2182,71 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) struct inode *inode; u64 page_start; u64 page_end; - int ret; + int ret = 0; + bool free_delalloc_space = true; fixup = container_of(work, struct btrfs_writepage_fixup, work); page = fixup->page; + inode = fixup->inode; + page_start = page_offset(page); + page_end = page_offset(page) + PAGE_SIZE - 1; + + /* + * This is similar to page_mkwrite, we need to reserve the space before + * we take the page lock. + */ + ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, + PAGE_SIZE); again: lock_page(page); + + /* + * Before we queued this fixup, we took a reference on the page. + * page->mapping may go NULL, but it shouldn't be moved to a different + * address space. + */ if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { - ClearPageChecked(page); + /* + * Unfortunately this is a little tricky, either + * + * 1) We got here and our page had already been dealt with and + * we reserved our space, thus ret == 0, so we need to just + * drop our space reservation and bail. This can happen the + * first time we come into the fixup worker, or could happen + * while waiting for the ordered extent. + * 2) Our page was already dealt with, but we happened to get an + * ENOSPC above from the btrfs_delalloc_reserve_space. In + * this case we obviously don't have anything to release, but + * because the page was already dealt with we don't want to + * mark the page with an error, so make sure we're resetting + * ret to 0. This is why we have this check _before_ the ret + * check, because we do not want to have a surprise ENOSPC + * when the page was already properly dealt with. + */ + if (!ret) { + btrfs_delalloc_release_extents(BTRFS_I(inode), + PAGE_SIZE); + btrfs_delalloc_release_space(inode, data_reserved, + page_start, PAGE_SIZE, + true); + } + ret = 0; goto out_page; } - inode = page->mapping->host; - page_start = page_offset(page); - page_end = page_offset(page) + PAGE_SIZE - 1; + /* + * We can't mess with the page state unless it is locked, so now that + * it is locked bail if we failed to make our space reservation. + */ + if (ret) + goto out_page; lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state); /* already ordered? We're done */ if (PagePrivate2(page)) - goto out; + goto out_reserved; ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE); @@ -2199,35 +2259,49 @@ again: goto again; } - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, - PAGE_SIZE); - if (ret) { - mapping_set_error(page->mapping, ret); - end_extent_writepage(page, ret, page_start, page_end); - ClearPageChecked(page); - goto out; - } - ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state); - if (ret) { - mapping_set_error(page->mapping, ret); - end_extent_writepage(page, ret, page_start, page_end); - ClearPageChecked(page); - goto out; - } + if (ret) + goto out_reserved; - ClearPageChecked(page); - set_page_dirty(page); + /* + * Everything went as planned, we're now the owner of a dirty page with + * delayed allocation bits set and space reserved for our COW + * destination. + * + * The page was dirty when we started, nothing should have cleaned it. + */ + BUG_ON(!PageDirty(page)); + free_delalloc_space = false; +out_reserved: btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); -out: + if (free_delalloc_space) + btrfs_delalloc_release_space(inode, data_reserved, page_start, + PAGE_SIZE, true); unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state); out_page: + if (ret) { + /* + * We hit ENOSPC or other errors. Update the mapping and page + * to reflect the errors and clean the page. + */ + mapping_set_error(page->mapping, ret); + end_extent_writepage(page, ret, page_start, page_end); + clear_page_dirty_for_io(page); + SetPageError(page); + } + ClearPageChecked(page); unlock_page(page); put_page(page); kfree(fixup); extent_changeset_free(data_reserved); + /* + * As a precaution, do a delayed iput in case it would be the last iput + * that could need flushing space. Recursing back to fixup worker would + * deadlock. + */ + btrfs_add_delayed_iput(inode); } /* @@ -2251,6 +2325,13 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) if (TestClearPagePrivate2(page)) return 0; + /* + * PageChecked is set below when we create a fixup worker for this page, + * don't try to create another one if we're already PageChecked() + * + * The extent_io writepage code will redirty the page if we send back + * EAGAIN. + */ if (PageChecked(page)) return -EAGAIN; @@ -2258,13 +2339,21 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) if (!fixup) return -EAGAIN; + /* + * We are already holding a reference to this inode from + * write_cache_pages. We need to hold it because the space reservation + * takes place outside of the page lock, and we can't trust + * page->mapping outside of the page lock. + */ + ihold(inode); SetPageChecked(page); get_page(page); - btrfs_init_work(&fixup->work, btrfs_fixup_helper, - btrfs_writepage_fixup_worker, NULL, NULL); + btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); fixup->page = page; + fixup->inode = inode; btrfs_queue_work(fs_info->fixup_workers, &fixup->work); - return -EBUSY; + + return -EAGAIN; } static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, @@ -3254,7 +3343,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ordered_extent *ordered_extent = NULL; struct btrfs_workqueue *wq; - btrfs_work_func_t func; trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); @@ -3263,16 +3351,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, end - start + 1, uptodate)) return; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } - btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, - NULL); + btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); btrfs_queue_work(wq, &ordered_extent->work); } @@ -4202,18 +4286,30 @@ out: } static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, - struct inode *dir, u64 objectid, - const char *name, int name_len) + struct inode *dir, struct dentry *dentry) { struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; struct btrfs_key key; + const char *name = dentry->d_name.name; + int name_len = dentry->d_name.len; u64 index; int ret; + u64 objectid; u64 dir_ino = btrfs_ino(BTRFS_I(dir)); + if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { + objectid = inode->root->root_key.objectid; + } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { + objectid = inode->location.objectid; + } else { + WARN_ON(1); + return -EINVAL; + } + path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -4235,13 +4331,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, } btrfs_release_path(path); - ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid, - dir_ino, &index, name, name_len); - if (ret < 0) { - if (ret != -ENOENT) { - btrfs_abort_transaction(trans, ret); - goto out; - } + /* + * This is a placeholder inode for a subvolume we didn't have a + * reference to at the time of the snapshot creation. In the meantime + * we could have renamed the real subvol link into our snapshot, so + * depending on btrfs_del_root_ref to return -ENOENT here is incorret. + * Instead simply lookup the dir_index_item for this entry so we can + * remove it. Otherwise we know we have a ref to the root and we can + * call btrfs_del_root_ref, and it _shouldn't_ fail. + */ + if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { di = btrfs_search_dir_index_item(root, path, dir_ino, name, name_len); if (IS_ERR_OR_NULL(di)) { @@ -4256,8 +4355,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); index = key.offset; + btrfs_release_path(path); + } else { + ret = btrfs_del_root_ref(trans, objectid, + root->root_key.objectid, dir_ino, + &index, name, name_len); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } } - btrfs_release_path(path); ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index); if (ret) { @@ -4451,8 +4558,7 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); - ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid, - dentry->d_name.name, dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, dir, dentry); if (ret) { err = ret; btrfs_abort_transaction(trans, ret); @@ -4547,10 +4653,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) return PTR_ERR(trans); if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - err = btrfs_unlink_subvol(trans, dir, - BTRFS_I(inode)->location.objectid, - dentry->d_name.name, - dentry->d_name.len); + err = btrfs_unlink_subvol(trans, dir, dentry); goto out; } @@ -4631,6 +4734,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u64 bytes_deleted = 0; bool be_nice = false; bool should_throttle = false; + const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); + struct extent_state *cached_state = NULL; BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); @@ -4647,6 +4752,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->reada = READA_BACK; + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) + lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1, + &cached_state); + /* * We want to drop from the next block forward in case this new size is * not block aligned since we will be keeping the last block of the @@ -4683,7 +4792,6 @@ search_again: goto out; } - path->leave_spinning = 1; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; @@ -4835,7 +4943,6 @@ delete: root == fs_info->tree_root)) { struct btrfs_ref ref = { 0 }; - btrfs_set_path_blocking(path); bytes_deleted += extent_num_bytes; btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, @@ -4911,6 +5018,8 @@ out: if (!ret && last_size > new_size) last_size = new_size; btrfs_ordered_update_i_size(inode, last_size, NULL); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, + (u64)-1, &cached_state); } btrfs_free_path(path); @@ -5693,7 +5802,6 @@ static void inode_tree_add(struct inode *inode) static void inode_tree_del(struct inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; int empty = 0; @@ -5706,7 +5814,6 @@ static void inode_tree_del(struct inode *inode) spin_unlock(&root->inode_lock); if (empty && btrfs_root_refs(&root->root_item) == 0) { - synchronize_srcu(&fs_info->subvol_srcu); spin_lock(&root->inode_lock); empty = RB_EMPTY_ROOT(&root->inode_tree); spin_unlock(&root->inode_lock); @@ -8211,18 +8318,14 @@ static void __endio_write_update_ordered(struct inode *inode, struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ordered_extent *ordered = NULL; struct btrfs_workqueue *wq; - btrfs_work_func_t func; u64 ordered_offset = offset; u64 ordered_bytes = bytes; u64 last_offset; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } while (ordered_offset < offset + bytes) { last_offset = ordered_offset; @@ -8230,9 +8333,8 @@ static void __endio_write_update_ordered(struct inode *inode, &ordered_offset, ordered_bytes, uptodate)) { - btrfs_init_work(&ordered->work, func, - finish_ordered_fn, - NULL, NULL); + btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, + NULL); btrfs_queue_work(wq, &ordered->work); } /* @@ -8324,6 +8426,7 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, { struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); + u16 csum_size; blk_status_t ret; /* @@ -8343,7 +8446,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, file_offset -= dip->logical_offset; file_offset >>= inode->i_sb->s_blocksize_bits; - io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset); + csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy); + io_bio->csum = orig_io_bio->csum + csum_size * file_offset; return 0; } @@ -9513,7 +9617,6 @@ static int btrfs_rename_exchange(struct inode *old_dir, u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); u64 old_idx = 0; u64 new_idx = 0; - u64 root_objectid; int ret; bool root_log_pinned = false; bool dest_log_pinned = false; @@ -9531,9 +9634,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, btrfs_init_log_ctx(&ctx_dest, new_inode); /* close the race window with snapshot create/destroy ioctl */ - if (old_ino == BTRFS_FIRST_FREE_OBJECTID) - down_read(&fs_info->subvol_sem); - if (new_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID || + new_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&fs_info->subvol_sem); /* @@ -9550,6 +9652,9 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_notrans; } + if (dest != root) + btrfs_record_root_in_trans(trans, dest); + /* * We need to find a free sequence number both in the source and * in the destination directory for the exchange. @@ -9617,10 +9722,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, /* src is a subvolume */ if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { - root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; - ret = btrfs_unlink_subvol(trans, old_dir, root_objectid, - old_dentry->d_name.name, - old_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); } else { /* src is an inode */ ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), BTRFS_I(old_dentry->d_inode), @@ -9636,10 +9738,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, /* dest is a subvolume */ if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { - root_objectid = BTRFS_I(new_inode)->root->root_key.objectid; - ret = btrfs_unlink_subvol(trans, new_dir, root_objectid, - new_dentry->d_name.name, - new_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); } else { /* dest is an inode */ ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), BTRFS_I(new_dentry->d_inode), @@ -9764,9 +9863,8 @@ out_fail: ret = ret ? ret : ret2; } out_notrans: - if (new_ino == BTRFS_FIRST_FREE_OBJECTID) - up_read(&fs_info->subvol_sem); - if (old_ino == BTRFS_FIRST_FREE_OBJECTID) + if (new_ino == BTRFS_FIRST_FREE_OBJECTID || + old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); ASSERT(list_empty(&ctx_root.list)); @@ -9838,7 +9936,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_inode = d_inode(new_dentry); struct inode *old_inode = d_inode(old_dentry); u64 index = 0; - u64 root_objectid; int ret; u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); bool log_pinned = false; @@ -9946,10 +10043,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, BTRFS_I(old_inode), 1); if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { - root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; - ret = btrfs_unlink_subvol(trans, old_dir, root_objectid, - old_dentry->d_name.name, - old_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); } else { ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), BTRFS_I(d_inode(old_dentry)), @@ -9968,10 +10062,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_inode->i_ctime = current_time(new_inode); if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - root_objectid = BTRFS_I(new_inode)->location.objectid; - ret = btrfs_unlink_subvol(trans, new_dir, root_objectid, - new_dentry->d_name.name, - new_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); BUG_ON(new_inode->i_nlink == 0); } else { ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), @@ -10051,6 +10142,10 @@ out_fail: ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx); if (ret) commit_transaction = true; + } else if (sync_log) { + mutex_lock(&root->log_mutex); + list_del(&ctx.list); + mutex_unlock(&root->log_mutex); } if (commit_transaction) { ret = btrfs_commit_transaction(trans); @@ -10116,8 +10211,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode init_completion(&work->completion); INIT_LIST_HEAD(&work->list); work->inode = inode; - btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, - btrfs_run_delalloc_work, NULL, NULL); + btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); return work; } @@ -10382,6 +10476,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key ins; u64 cur_offset = start; + u64 clear_offset = start; u64 i_size; u64 cur_bytes; u64 last_alloc = (u64)-1; @@ -10416,6 +10511,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, btrfs_end_transaction(trans); break; } + + /* + * We've reserved this space, and thus converted it from + * ->bytes_may_use to ->bytes_reserved. Any error that happens + * from here on out we will only need to clear our reservation + * for the remaining unreserved area, so advance our + * clear_offset by our extent size. + */ + clear_offset += ins.offset; btrfs_dec_block_group_reservations(fs_info, ins.objectid); last_alloc = ins.offset; @@ -10496,9 +10600,9 @@ next: if (own_trans) btrfs_end_transaction(trans); } - if (cur_offset < end) - btrfs_free_reserved_data_space(inode, NULL, cur_offset, - end - cur_offset + 1); + if (clear_offset < end) + btrfs_free_reserved_data_space(inode, NULL, clear_offset, + end - clear_offset + 1); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 23272d9154f3..d88b8d8897cc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -705,11 +705,17 @@ static noinline int create_subvol(struct inode *dir, btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); ret = btrfs_update_inode(trans, root, dir); - BUG_ON(ret); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto fail; + } ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid, btrfs_ino(BTRFS_I(dir)), index, name, namelen); - BUG_ON(ret); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto fail; + } ret = btrfs_uuid_tree_add(trans, root_item->uuid, BTRFS_UUID_KEY_SUBVOL, objectid); @@ -3238,6 +3244,7 @@ static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, struct inode *dst, u64 dst_loff) { + const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; int ret; /* @@ -3245,7 +3252,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, * source range to serialize with relocation. */ btrfs_double_extent_lock(src, loff, dst, dst_loff, len); - ret = btrfs_clone(src, dst, loff, len, len, dst_loff, 1); + ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); return ret; @@ -3721,24 +3728,18 @@ process_slot: ret = 0; if (last_dest_end < destoff + len) { - struct btrfs_clone_extent_info clone_info = { 0 }; /* - * We have an implicit hole (NO_HOLES feature is enabled) that - * fully or partially overlaps our cloning range at its end. + * We have an implicit hole that fully or partially overlaps our + * cloning range at its end. This means that we either have the + * NO_HOLES feature enabled or the implicit hole happened due to + * mixing buffered and direct IO writes against this file. */ btrfs_release_path(path); path->leave_spinning = 0; - /* - * We are dealing with a hole and our clone_info already has a - * disk_offset of 0, we only need to fill the data length and - * file offset. - */ - clone_info.data_len = destoff + len - last_dest_end; - clone_info.file_offset = last_dest_end; ret = btrfs_punch_hole_range(inode, path, last_dest_end, destoff + len - 1, - &clone_info, &trans); + NULL, &trans); if (ret) goto out; @@ -4254,7 +4255,19 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg) &sa->progress, sa->flags & BTRFS_SCRUB_READONLY, 0); - if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa))) + /* + * Copy scrub args to user space even if btrfs_scrub_dev() returned an + * error. This is important as it allows user space to know how much + * progress scrub has done. For example, if scrub is canceled we get + * -ECANCELED from btrfs_scrub_dev() and return that error back to user + * space. Later user space can inspect the progress from the structure + * btrfs_ioctl_scrub_args and resume scrub from where it left off + * previously (btrfs-progs does this). + * If we fail to copy the btrfs_ioctl_scrub_args structure to user space + * then return -EFAULT to signal the structure was not copied or it may + * be corrupt and unreliable due to a partial copy. + */ + if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; if (!(sa->flags & BTRFS_SCRUB_READONLY)) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 24b6c72b9a59..00e1ef4f7979 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -547,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, spin_unlock(&root->ordered_extent_lock); btrfs_init_work(&ordered->flush_work, - btrfs_flush_delalloc_helper, btrfs_run_ordered_extent_work, NULL, NULL); list_add_tail(&ordered->work_list, &works); btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); @@ -691,10 +690,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) } btrfs_start_ordered_extent(inode, ordered, 1); end = ordered->file_offset; + /* + * If the ordered extent had an error save the error but don't + * exit without waiting first for all other ordered extents in + * the range to complete. + */ if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) ret = -EIO; btrfs_put_ordered_extent(ordered); - if (ret || end == 0 || end == start) + if (end == 0 || end == start) break; end--; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 3ad151655eb8..286c8c11c8d3 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2423,8 +2423,12 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 nr_old_roots = 0; int ret = 0; + /* + * If quotas get disabled meanwhile, the resouces need to be freed and + * we can't just exit here. + */ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) - return 0; + goto out_free; if (new_roots) { if (!maybe_fs_roots(new_roots)) @@ -3232,12 +3236,12 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { btrfs_warn(fs_info, - "qgroup rescan init failed, qgroup is not enabled"); + "qgroup rescan init failed, qgroup rescan is not queued"); ret = -EINVAL; } else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) { btrfs_warn(fs_info, - "qgroup rescan init failed, qgroup rescan is not queued"); + "qgroup rescan init failed, qgroup is not enabled"); ret = -EINVAL; } @@ -3280,7 +3284,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, memset(&fs_info->qgroup_rescan_work, 0, sizeof(fs_info->qgroup_rescan_work)); btrfs_init_work(&fs_info->qgroup_rescan_work, - btrfs_qgroup_rescan_helper, btrfs_qgroup_rescan_worker, NULL, NULL); return 0; } @@ -4015,3 +4018,16 @@ out: } return ret; } + +void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) +{ + struct btrfs_qgroup_extent_record *entry; + struct btrfs_qgroup_extent_record *next; + struct rb_root *root; + + root = &trans->delayed_refs.dirty_extent_root; + rbtree_postorder_for_each_entry_safe(entry, next, root, node) { + ulist_free(entry->old_roots); + kfree(entry); + } +} diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 46ba7bd2961c..17e8ac992c50 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, u64 last_snapshot); int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *eb); +void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans); #endif diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 57a2ac721985..8f47a85944eb 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -190,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work); static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) { - btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL); + btrfs_init_work(&rbio->work, work_func, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); } @@ -1743,8 +1743,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) plug = container_of(cb, struct btrfs_plug_cb, cb); if (from_schedule) { - btrfs_init_work(&plug->work, btrfs_rmw_helper, - unplug_work, NULL, NULL); + btrfs_init_work(&plug->work, unplug_work, NULL, NULL); btrfs_queue_work(plug->info->rmw_workers, &plug->work); return; diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index ee6f60547a8d..1feaeadc8cf5 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -752,21 +752,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev) static void reada_start_machine_worker(struct btrfs_work *work) { struct reada_machine_work *rmw; - struct btrfs_fs_info *fs_info; int old_ioprio; rmw = container_of(work, struct reada_machine_work, work); - fs_info = rmw->fs_info; - - kfree(rmw); old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current), task_nice_ioprio(current)); set_task_ioprio(current, BTRFS_IOPRIO_READA); - __reada_start_machine(fs_info); + __reada_start_machine(rmw->fs_info); set_task_ioprio(current, old_ioprio); - atomic_dec(&fs_info->reada_works_cnt); + atomic_dec(&rmw->fs_info->reada_works_cnt); + + kfree(rmw); } static void __reada_start_machine(struct btrfs_fs_info *fs_info) @@ -821,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info) /* FIXME we cannot handle this properly right now */ BUG(); } - btrfs_init_work(&rmw->work, btrfs_readahead_helper, - reada_start_machine_worker, NULL, NULL); + btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); rmw->fs_info = fs_info; btrfs_queue_work(fs_info->readahead_workers, &rmw->work); diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index b57f3618e58e..454a1015d026 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -744,6 +744,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, */ be = add_block_entry(fs_info, bytenr, num_bytes, ref_root); if (IS_ERR(be)) { + kfree(ref); kfree(ra); ret = PTR_ERR(be); goto out; @@ -757,6 +758,8 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, "re-allocated a block that still has references to it!"); dump_block_entry(fs_info, be); dump_ref_action(fs_info, ra); + kfree(ref); + kfree(ra); goto out_unlock; } @@ -819,6 +822,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, "dropping a ref for a existing root that doesn't have a ref on the block"); dump_block_entry(fs_info, be); dump_ref_action(fs_info, ra); + kfree(ref); kfree(ra); goto out_unlock; } @@ -834,6 +838,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, "attempting to add another ref for an existing ref on a tree block"); dump_block_entry(fs_info, be); dump_ref_action(fs_info, ra); + kfree(ref); kfree(ra); goto out_unlock; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 5cd42b66818c..bc1d7f144ace 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -517,6 +517,34 @@ static int update_backref_cache(struct btrfs_trans_handle *trans, return 1; } +static bool reloc_root_is_dead(struct btrfs_root *root) +{ + /* + * Pair with set_bit/clear_bit in clean_dirty_subvols and + * btrfs_update_reloc_root. We need to see the updated bit before + * trying to access reloc_root + */ + smp_rmb(); + if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) + return true; + return false; +} + +/* + * Check if this subvolume tree has valid reloc tree. + * + * Reloc tree after swap is considered dead, thus not considered as valid. + * This is enough for most callers, as they don't distinguish dead reloc root + * from no reloc root. But should_ignore_root() below is a special case. + */ +static bool have_reloc_root(struct btrfs_root *root) +{ + if (reloc_root_is_dead(root)) + return false; + if (!root->reloc_root) + return false; + return true; +} static int should_ignore_root(struct btrfs_root *root) { @@ -525,6 +553,10 @@ static int should_ignore_root(struct btrfs_root *root) if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) return 0; + /* This root has been merged with its reloc tree, we can ignore it */ + if (reloc_root_is_dead(root)) + return 1; + reloc_root = root->reloc_root; if (!reloc_root) return 0; @@ -1439,7 +1471,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, * The subvolume has reloc tree but the swap is finished, no need to * create/update the dead reloc tree */ - if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) + if (reloc_root_is_dead(root)) return 0; if (root->reloc_root) { @@ -1478,8 +1510,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root_item *root_item; int ret; - if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) || - !root->reloc_root) + if (!have_reloc_root(root)) goto out; reloc_root = root->reloc_root; @@ -1489,6 +1520,11 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, if (fs_info->reloc_ctl->merge_reloc_tree && btrfs_root_refs(root_item) == 0) { set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); + /* + * Mark the tree as dead before we change reloc_root so + * have_reloc_root will not touch it from now on. + */ + smp_wmb(); __del_reloc_root(reloc_root); } @@ -2202,6 +2238,11 @@ static int clean_dirty_subvols(struct reloc_control *rc) if (ret2 < 0 && !ret) ret = ret2; } + /* + * Need barrier to ensure clear_bit() only happens after + * root->reloc_root = NULL. Pairs with have_reloc_root. + */ + smp_wmb(); clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); btrfs_put_fs_root(root); } else { @@ -4555,6 +4596,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) fs_root = read_fs_root(fs_info, reloc_root->root_key.offset); if (IS_ERR(fs_root)) { err = PTR_ERR(fs_root); + list_add_tail(&reloc_root->root_list, &reloc_roots); goto out_free; } @@ -4720,7 +4762,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, struct btrfs_root *root = pending->root; struct reloc_control *rc = root->fs_info->reloc_ctl; - if (!root->reloc_root || !rc) + if (!rc || !have_reloc_root(root)) return; if (!rc->merge_reloc_tree) @@ -4754,7 +4796,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, struct reloc_control *rc = root->fs_info->reloc_ctl; int ret; - if (!root->reloc_root || !rc) + if (!rc || !have_reloc_root(root)) return 0; rc = root->fs_info->reloc_ctl; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 3b17b647d002..612411c74550 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -376,11 +376,13 @@ again: leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - - WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid); - WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len); ptr = (unsigned long)(ref + 1); - WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len)); + if ((btrfs_root_ref_dirid(leaf, ref) != dirid) || + (btrfs_root_ref_name_len(leaf, ref) != name_len) || + memcmp_extent_buffer(leaf, name, ptr, name_len)) { + err = -ENOENT; + goto out; + } *sequence = btrfs_root_ref_sequence(leaf, ref); ret = btrfs_del_item(trans, tree_root, path); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f7d4e03f4c5d..a7b043fd7a57 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -598,8 +598,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( sbio->index = i; sbio->sctx = sctx; sbio->page_count = 0; - btrfs_init_work(&sbio->work, btrfs_scrub_helper, - scrub_bio_end_io_worker, NULL, NULL); + btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, + NULL); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; @@ -1720,8 +1720,7 @@ static void scrub_wr_bio_end_io(struct bio *bio) sbio->status = bio->bi_status; sbio->bio = bio; - btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, - scrub_wr_bio_end_io_worker, NULL, NULL); + btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } @@ -2149,14 +2148,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work) scrub_write_block_to_dev_replace(sblock); } - scrub_block_put(sblock); - if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } + scrub_block_put(sblock); scrub_pending_bio_dec(sctx); } @@ -2204,8 +2202,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) raid56_add_scrub_pages(rbio, spage->page, spage->logical); } - btrfs_init_work(&sblock->work, btrfs_scrub_helper, - scrub_missing_raid56_worker, NULL, NULL); + btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); @@ -2743,8 +2740,8 @@ static void scrub_parity_bio_endio(struct bio *bio) bio_put(bio); - btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, - scrub_parity_bio_endio_worker, NULL, NULL); + btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL, + NULL); btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); } diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 123ac54af071..3eb0fec2488a 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -24,6 +24,14 @@ #include "transaction.h" #include "compression.h" +/* + * Maximum number of references an extent can have in order for us to attempt to + * issue clone operations instead of write operations. This currently exists to + * avoid hitting limitations of the backreference walking code (taking a lot of + * time and using too much memory for extents with large number of references). + */ +#define SEND_MAX_EXTENT_REFS 64 + /* * A fs_path is a helper to dynamically build path names with unknown size. * It reallocates the internal buffer on demand. @@ -1302,6 +1310,7 @@ static int find_extent_clone(struct send_ctx *sctx, struct clone_root *cur_clone_root; struct btrfs_key found_key; struct btrfs_path *tmp_path; + struct btrfs_extent_item *ei; int compressed; u32 i; @@ -1349,7 +1358,6 @@ static int find_extent_clone(struct send_ctx *sctx, ret = extent_from_logical(fs_info, disk_byte, tmp_path, &found_key, &flags); up_read(&fs_info->commit_root_sem); - btrfs_release_path(tmp_path); if (ret < 0) goto out; @@ -1358,6 +1366,21 @@ static int find_extent_clone(struct send_ctx *sctx, goto out; } + ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0], + struct btrfs_extent_item); + /* + * Backreference walking (iterate_extent_inodes() below) is currently + * too expensive when an extent has a large number of references, both + * in time spent and used memory. So for now just fallback to write + * operations instead of clone operations when an extent has more than + * a certain amount of references. + */ + if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) { + ret = -ENOENT; + goto out; + } + btrfs_release_path(tmp_path); + /* * Setup the clone roots. */ @@ -7052,12 +7075,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) send_root->send_in_progress++; spin_unlock(&send_root->root_item_lock); - /* - * This is done when we lookup the root, it should already be complete - * by the time we get here. - */ - WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE); - /* * Userspace tools do the checks and warn the user if it's * not RO. diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 1b151af25772..aea24202cd35 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1804,6 +1804,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) } if (btrfs_super_log_root(fs_info->super_copy) != 0) { + btrfs_warn(fs_info, + "mount required to replay tree-log, cannot remount read-write"); ret = -EINVAL; goto restore; } @@ -2102,7 +2104,15 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) */ thresh = SZ_4M; - if (!mixed && total_free_meta - thresh < block_rsv->size) + /* + * We only want to claim there's no available space if we can no longer + * allocate chunks for our metadata profile and our global reserve will + * not fit in the free metadata space. If we aren't ->full then we + * still can allocate chunks and thus are fine using the currently + * calculated f_bavail. + */ + if (!mixed && block_rsv->space_info->full && + total_free_meta - thresh < block_rsv->size) buf->f_bavail = 0; buf->f_type = BTRFS_SUPER_MAGIC; diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 99fe9bf3fdac..98f9684e7ffc 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -121,7 +121,6 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) spin_lock_init(&fs_info->qgroup_lock); spin_lock_init(&fs_info->super_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); - spin_lock_init(&fs_info->tree_mod_seq_lock); mutex_init(&fs_info->qgroup_ioctl_lock); mutex_init(&fs_info->qgroup_rescan_lock); rwlock_init(&fs_info->tree_mod_log_lock); diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 123d9a614357..df7ce874a74b 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -441,8 +441,17 @@ static int test_find_first_clear_extent_bit(void) int ret = -EINVAL; test_msg("running find_first_clear_extent_bit test"); + extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST, NULL); + /* Test correct handling of empty tree */ + find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED); + if (start != 0 || end != -1) { + test_err( + "error getting a range from completely empty tree: start %llu end %llu", + start, end); + goto out; + } /* * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between * 4M-32M diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index bc92df977630..6e774d055402 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -463,9 +463,9 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, root->fs_info->tree_root = root; root->node = alloc_test_extent_buffer(root->fs_info, nodesize); - if (!root->node) { + if (IS_ERR(root->node)) { test_std_err(TEST_ALLOC_EXTENT_BUFFER); - ret = -ENOMEM; + ret = PTR_ERR(root->node); goto out; } btrfs_set_header_level(root->node, 0); diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 09aaca1efd62..ac035a6fa003 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -484,9 +484,9 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) * *cough*backref walking code*cough* */ root->node = alloc_test_extent_buffer(root->fs_info, nodesize); - if (!root->node) { + if (IS_ERR(root->node)) { test_err("couldn't allocate dummy buffer"); - ret = -ENOMEM; + ret = PTR_ERR(root->node); goto out; } btrfs_set_header_level(root->node, 0); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 8624bdee8c5b..98b6903e3938 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -51,6 +51,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) BUG_ON(!list_empty(&transaction->list)); WARN_ON(!RB_EMPTY_ROOT( &transaction->delayed_refs.href_root.rb_root)); + WARN_ON(!RB_EMPTY_ROOT( + &transaction->delayed_refs.dirty_extent_root)); if (transaction->delayed_refs.pending_csums) btrfs_err(transaction->fs_info, "pending csums is %llu", @@ -77,13 +79,14 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) } } -static noinline void switch_commit_roots(struct btrfs_transaction *trans) +static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) { + struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root, *tmp; down_write(&fs_info->commit_root_sem); - list_for_each_entry_safe(root, tmp, &trans->switch_commits, + list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, dirty_list) { list_del_init(&root->dirty_list); free_extent_buffer(root->commit_root); @@ -95,16 +98,17 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans) } /* We can free old roots now. */ - spin_lock(&trans->dropped_roots_lock); - while (!list_empty(&trans->dropped_roots)) { - root = list_first_entry(&trans->dropped_roots, + spin_lock(&cur_trans->dropped_roots_lock); + while (!list_empty(&cur_trans->dropped_roots)) { + root = list_first_entry(&cur_trans->dropped_roots, struct btrfs_root, root_list); list_del_init(&root->root_list); - spin_unlock(&trans->dropped_roots_lock); + spin_unlock(&cur_trans->dropped_roots_lock); + btrfs_free_log(trans, root); btrfs_drop_and_free_fs_root(fs_info, root); - spin_lock(&trans->dropped_roots_lock); + spin_lock(&cur_trans->dropped_roots_lock); } - spin_unlock(&trans->dropped_roots_lock); + spin_unlock(&cur_trans->dropped_roots_lock); up_write(&fs_info->commit_root_sem); } @@ -1359,7 +1363,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, ret = commit_cowonly_roots(trans); if (ret) goto out; - switch_commit_roots(trans->transaction); + switch_commit_roots(trans); ret = btrfs_write_and_wait_transaction(trans); if (ret) btrfs_handle_fs_error(fs_info, ret, @@ -1949,6 +1953,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) struct btrfs_transaction *prev_trans = NULL; int ret; + /* + * Some places just start a transaction to commit it. We need to make + * sure that if this commit fails that the abort code actually marks the + * transaction as failed, so set trans->dirty to make the abort code do + * the right thing. + */ + trans->dirty = true; + /* Stop the commit early if ->aborted is set */ if (unlikely(READ_ONCE(cur_trans->aborted))) { ret = cur_trans->aborted; @@ -2237,7 +2249,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) list_add_tail(&fs_info->chunk_root->dirty_list, &cur_trans->switch_commits); - switch_commit_roots(cur_trans); + switch_commit_roots(trans); ASSERT(list_empty(&cur_trans->dirty_bgs)); ASSERT(list_empty(&cur_trans->io_bgs)); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 076d5b8014fb..0e44db066641 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -243,7 +243,7 @@ static int check_extent_data_item(struct extent_buffer *leaf, } static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, - int slot) + int slot, struct btrfs_key *prev_key) { struct btrfs_fs_info *fs_info = leaf->fs_info; u32 sectorsize = fs_info->sectorsize; @@ -267,6 +267,20 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, btrfs_item_size_nr(leaf, slot), csumsize); return -EUCLEAN; } + if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) { + u64 prev_csum_end; + u32 prev_item_size; + + prev_item_size = btrfs_item_size_nr(leaf, slot - 1); + prev_csum_end = (prev_item_size / csumsize) * sectorsize; + prev_csum_end += prev_key->offset; + if (prev_csum_end > key->offset) { + generic_err(leaf, slot - 1, +"csum end range (%llu) goes beyond the start range (%llu) of the next csum item", + prev_csum_end, key->offset); + return -EUCLEAN; + } + } return 0; } @@ -1239,7 +1253,7 @@ static int check_leaf_item(struct extent_buffer *leaf, ret = check_extent_data_item(leaf, key, slot, prev_key); break; case BTRFS_EXTENT_CSUM_KEY: - ret = check_csum_item(leaf, key, slot); + ret = check_csum_item(leaf, key, slot, prev_key); break; case BTRFS_DIR_ITEM_KEY: case BTRFS_DIR_INDEX_KEY: diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 8a6cc600bf18..6f2178618c22 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -808,7 +808,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = btrfs_del_csums(trans, fs_info, + ret = btrfs_del_csums(trans, + fs_info->csum_root, sums->bytenr, sums->len); if (!ret) @@ -3927,10 +3928,32 @@ static int log_inode_item(struct btrfs_trans_handle *trans, return 0; } +static int log_csums(struct btrfs_trans_handle *trans, + struct btrfs_root *log_root, + struct btrfs_ordered_sum *sums) +{ + int ret; + + /* + * Due to extent cloning, we might have logged a csum item that covers a + * subrange of a cloned extent, and later we can end up logging a csum + * item for a larger subrange of the same extent or the entire range. + * This would leave csum items in the log tree that cover the same range + * and break the searches for checksums in the log tree, resulting in + * some checksums missing in the fs/subvolume tree. So just delete (or + * trim and adjust) any existing csum items in the log for this range. + */ + ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len); + if (ret) + return ret; + + return btrfs_csum_file_blocks(trans, log_root, sums); +} + static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *dst_path, - struct btrfs_path *src_path, u64 *last_extent, + struct btrfs_path *src_path, int start_slot, int nr, int inode_only, u64 logged_isize) { @@ -3941,7 +3964,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_file_extent_item *extent; struct btrfs_inode_item *inode_item; struct extent_buffer *src = src_path->nodes[0]; - struct btrfs_key first_key, last_key, key; int ret; struct btrfs_key *ins_keys; u32 *ins_sizes; @@ -3949,9 +3971,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, int i; struct list_head ordered_sums; int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; - bool has_extents = false; - bool need_find_last_extent = true; - bool done = false; INIT_LIST_HEAD(&ordered_sums); @@ -3960,8 +3979,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, if (!ins_data) return -ENOMEM; - first_key.objectid = (u64)-1; - ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); @@ -3982,9 +3999,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, src_offset = btrfs_item_ptr_offset(src, start_slot + i); - if (i == nr - 1) - last_key = ins_keys[i]; - if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_path->slots[0], @@ -3998,20 +4012,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, src_offset, ins_sizes[i]); } - /* - * We set need_find_last_extent here in case we know we were - * processing other items and then walk into the first extent in - * the inode. If we don't hit an extent then nothing changes, - * we'll do the last search the next time around. - */ - if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { - has_extents = true; - if (first_key.objectid == (u64)-1) - first_key = ins_keys[i]; - } else { - need_find_last_extent = false; - } - /* take a reference on file data extents so that truncates * or deletes of this inode don't have to relog the inode * again @@ -4072,172 +4072,11 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = btrfs_csum_file_blocks(trans, log, sums); + ret = log_csums(trans, log, sums); list_del(&sums->list); kfree(sums); } - if (!has_extents) - return ret; - - if (need_find_last_extent && *last_extent == first_key.offset) { - /* - * We don't have any leafs between our current one and the one - * we processed before that can have file extent items for our - * inode (and have a generation number smaller than our current - * transaction id). - */ - need_find_last_extent = false; - } - - /* - * Because we use btrfs_search_forward we could skip leaves that were - * not modified and then assume *last_extent is valid when it really - * isn't. So back up to the previous leaf and read the end of the last - * extent before we go and fill in holes. - */ - if (need_find_last_extent) { - u64 len; - - ret = btrfs_prev_leaf(inode->root, src_path); - if (ret < 0) - return ret; - if (ret) - goto fill_holes; - if (src_path->slots[0]) - src_path->slots[0]--; - src = src_path->nodes[0]; - btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); - if (key.objectid != btrfs_ino(inode) || - key.type != BTRFS_EXTENT_DATA_KEY) - goto fill_holes; - extent = btrfs_item_ptr(src, src_path->slots[0], - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(src, extent) == - BTRFS_FILE_EXTENT_INLINE) { - len = btrfs_file_extent_ram_bytes(src, extent); - *last_extent = ALIGN(key.offset + len, - fs_info->sectorsize); - } else { - len = btrfs_file_extent_num_bytes(src, extent); - *last_extent = key.offset + len; - } - } -fill_holes: - /* So we did prev_leaf, now we need to move to the next leaf, but a few - * things could have happened - * - * 1) A merge could have happened, so we could currently be on a leaf - * that holds what we were copying in the first place. - * 2) A split could have happened, and now not all of the items we want - * are on the same leaf. - * - * So we need to adjust how we search for holes, we need to drop the - * path and re-search for the first extent key we found, and then walk - * forward until we hit the last one we copied. - */ - if (need_find_last_extent) { - /* btrfs_prev_leaf could return 1 without releasing the path */ - btrfs_release_path(src_path); - ret = btrfs_search_slot(NULL, inode->root, &first_key, - src_path, 0, 0); - if (ret < 0) - return ret; - ASSERT(ret == 0); - src = src_path->nodes[0]; - i = src_path->slots[0]; - } else { - i = start_slot; - } - - /* - * Ok so here we need to go through and fill in any holes we may have - * to make sure that holes are punched for those areas in case they had - * extents previously. - */ - while (!done) { - u64 offset, len; - u64 extent_end; - - if (i >= btrfs_header_nritems(src_path->nodes[0])) { - ret = btrfs_next_leaf(inode->root, src_path); - if (ret < 0) - return ret; - ASSERT(ret == 0); - src = src_path->nodes[0]; - i = 0; - need_find_last_extent = true; - } - - btrfs_item_key_to_cpu(src, &key, i); - if (!btrfs_comp_cpu_keys(&key, &last_key)) - done = true; - if (key.objectid != btrfs_ino(inode) || - key.type != BTRFS_EXTENT_DATA_KEY) { - i++; - continue; - } - extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(src, extent) == - BTRFS_FILE_EXTENT_INLINE) { - len = btrfs_file_extent_ram_bytes(src, extent); - extent_end = ALIGN(key.offset + len, - fs_info->sectorsize); - } else { - len = btrfs_file_extent_num_bytes(src, extent); - extent_end = key.offset + len; - } - i++; - - if (*last_extent == key.offset) { - *last_extent = extent_end; - continue; - } - offset = *last_extent; - len = key.offset - *last_extent; - ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), - offset, 0, 0, len, 0, len, 0, 0, 0); - if (ret) - break; - *last_extent = extent_end; - } - - /* - * Check if there is a hole between the last extent found in our leaf - * and the first extent in the next leaf. If there is one, we need to - * log an explicit hole so that at replay time we can punch the hole. - */ - if (ret == 0 && - key.objectid == btrfs_ino(inode) && - key.type == BTRFS_EXTENT_DATA_KEY && - i == btrfs_header_nritems(src_path->nodes[0])) { - ret = btrfs_next_leaf(inode->root, src_path); - need_find_last_extent = true; - if (ret > 0) { - ret = 0; - } else if (ret == 0) { - btrfs_item_key_to_cpu(src_path->nodes[0], &key, - src_path->slots[0]); - if (key.objectid == btrfs_ino(inode) && - key.type == BTRFS_EXTENT_DATA_KEY && - *last_extent < key.offset) { - const u64 len = key.offset - *last_extent; - - ret = btrfs_insert_file_extent(trans, log, - btrfs_ino(inode), - *last_extent, 0, - 0, len, 0, len, - 0, 0, 0); - *last_extent += len; - } - } - } - /* - * Need to let the callers know we dropped the path so they should - * re-search. - */ - if (!ret && need_find_last_extent) - ret = 1; return ret; } @@ -4292,7 +4131,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = btrfs_csum_file_blocks(trans, log_root, sums); + ret = log_csums(trans, log_root, sums); list_del(&sums->list); kfree(sums); } @@ -4402,7 +4241,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, const u64 i_size = i_size_read(&inode->vfs_inode); const u64 ino = btrfs_ino(inode); struct btrfs_path *dst_path = NULL; - u64 last_extent = (u64)-1; + bool dropped_extents = false; int ins_nr = 0; int start_slot; int ret; @@ -4424,8 +4263,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, if (slot >= btrfs_header_nritems(leaf)) { if (ins_nr > 0) { ret = copy_items(trans, inode, dst_path, path, - &last_extent, start_slot, - ins_nr, 1, 0); + start_slot, ins_nr, 1, 0); if (ret < 0) goto out; ins_nr = 0; @@ -4449,8 +4287,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, path->slots[0]++; continue; } - if (last_extent == (u64)-1) { - last_extent = key.offset; + if (!dropped_extents) { /* * Avoid logging extent items logged in past fsync calls * and leading to duplicate keys in the log tree. @@ -4464,6 +4301,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, } while (ret == -EAGAIN); if (ret) goto out; + dropped_extents = true; } if (ins_nr == 0) start_slot = slot; @@ -4478,7 +4316,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, } } if (ins_nr > 0) { - ret = copy_items(trans, inode, dst_path, path, &last_extent, + ret = copy_items(trans, inode, dst_path, path, start_slot, ins_nr, 1, 0); if (ret > 0) ret = 0; @@ -4665,13 +4503,8 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, if (slot >= nritems) { if (ins_nr > 0) { - u64 last_extent = 0; - ret = copy_items(trans, inode, dst_path, path, - &last_extent, start_slot, - ins_nr, 1, 0); - /* can't be 1, extent items aren't processed */ - ASSERT(ret <= 0); + start_slot, ins_nr, 1, 0); if (ret < 0) return ret; ins_nr = 0; @@ -4695,13 +4528,8 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, cond_resched(); } if (ins_nr > 0) { - u64 last_extent = 0; - ret = copy_items(trans, inode, dst_path, path, - &last_extent, start_slot, - ins_nr, 1, 0); - /* can't be 1, extent items aren't processed */ - ASSERT(ret <= 0); + start_slot, ins_nr, 1, 0); if (ret < 0) return ret; } @@ -4710,100 +4538,119 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, } /* - * If the no holes feature is enabled we need to make sure any hole between the - * last extent and the i_size of our inode is explicitly marked in the log. This - * is to make sure that doing something like: - * - * 1) create file with 128Kb of data - * 2) truncate file to 64Kb - * 3) truncate file to 256Kb - * 4) fsync file - * 5) - * 6) mount fs and trigger log replay - * - * Will give us a file with a size of 256Kb, the first 64Kb of data match what - * the file had in its first 64Kb of data at step 1 and the last 192Kb of the - * file correspond to a hole. The presence of explicit holes in a log tree is - * what guarantees that log replay will remove/adjust file extent items in the - * fs/subvol tree. - * - * Here we do not need to care about holes between extents, that is already done - * by copy_items(). We also only need to do this in the full sync path, where we - * lookup for extents from the fs/subvol tree only. In the fast path case, we - * lookup the list of modified extent maps and if any represents a hole, we - * insert a corresponding extent representing a hole in the log tree. + * When using the NO_HOLES feature if we punched a hole that causes the + * deletion of entire leafs or all the extent items of the first leaf (the one + * that contains the inode item and references) we may end up not processing + * any extents, because there are no leafs with a generation matching the + * current transaction that have extent items for our inode. So we need to find + * if any holes exist and then log them. We also need to log holes after any + * truncate operation that changes the inode's size. */ -static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_inode *inode, - struct btrfs_path *path) +static int btrfs_log_holes(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_inode *inode, + struct btrfs_path *path) { struct btrfs_fs_info *fs_info = root->fs_info; - int ret; struct btrfs_key key; - u64 hole_start; - u64 hole_size; - struct extent_buffer *leaf; - struct btrfs_root *log = root->log_root; const u64 ino = btrfs_ino(inode); const u64 i_size = i_size_read(&inode->vfs_inode); + u64 prev_extent_end = 0; + int ret; - if (!btrfs_fs_incompat(fs_info, NO_HOLES)) + if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0) return 0; key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = (u64)-1; + key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - ASSERT(ret != 0); if (ret < 0) return ret; - ASSERT(path->slots[0] > 0); - path->slots[0]--; - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - - if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { - /* inode does not have any extents */ - hole_start = 0; - hole_size = i_size; - } else { + while (true) { struct btrfs_file_extent_item *extent; + struct extent_buffer *leaf = path->nodes[0]; u64 len; - /* - * If there's an extent beyond i_size, an explicit hole was - * already inserted by copy_items(). - */ - if (key.offset >= i_size) - return 0; + if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + return ret; + if (ret > 0) { + ret = 0; + break; + } + leaf = path->nodes[0]; + } + + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) + break; + + /* We have a hole, log it. */ + if (prev_extent_end < key.offset) { + const u64 hole_len = key.offset - prev_extent_end; + + /* + * Release the path to avoid deadlocks with other code + * paths that search the root while holding locks on + * leafs from the log root. + */ + btrfs_release_path(path); + ret = btrfs_insert_file_extent(trans, root->log_root, + ino, prev_extent_end, 0, + 0, hole_len, 0, hole_len, + 0, 0, 0); + if (ret < 0) + return ret; + + /* + * Search for the same key again in the root. Since it's + * an extent item and we are holding the inode lock, the + * key must still exist. If it doesn't just emit warning + * and return an error to fall back to a transaction + * commit. + */ + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + return ret; + if (WARN_ON(ret > 0)) + return -ENOENT; + leaf = path->nodes[0]; + } extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, extent) == - BTRFS_FILE_EXTENT_INLINE) - return 0; + BTRFS_FILE_EXTENT_INLINE) { + len = btrfs_file_extent_ram_bytes(leaf, extent); + prev_extent_end = ALIGN(key.offset + len, + fs_info->sectorsize); + } else { + len = btrfs_file_extent_num_bytes(leaf, extent); + prev_extent_end = key.offset + len; + } - len = btrfs_file_extent_num_bytes(leaf, extent); - /* Last extent goes beyond i_size, no need to log a hole. */ - if (key.offset + len > i_size) - return 0; - hole_start = key.offset + len; - hole_size = i_size - hole_start; + path->slots[0]++; + cond_resched(); } - btrfs_release_path(path); - /* Last extent ends at i_size. */ - if (hole_size == 0) - return 0; + if (prev_extent_end < i_size) { + u64 hole_len; - hole_size = ALIGN(hole_size, fs_info->sectorsize); - ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, - hole_size, 0, hole_size, 0, 0, 0); - return ret; + btrfs_release_path(path); + hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize); + ret = btrfs_insert_file_extent(trans, root->log_root, + ino, prev_extent_end, 0, 0, + hole_len, 0, hole_len, + 0, 0, 0); + if (ret < 0) + return ret; + } + + return 0; } /* @@ -5007,6 +4854,50 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, } continue; } + /* + * If the inode was already logged skip it - otherwise we can + * hit an infinite loop. Example: + * + * From the commit root (previous transaction) we have the + * following inodes: + * + * inode 257 a directory + * inode 258 with references "zz" and "zz_link" on inode 257 + * inode 259 with reference "a" on inode 257 + * + * And in the current (uncommitted) transaction we have: + * + * inode 257 a directory, unchanged + * inode 258 with references "a" and "a2" on inode 257 + * inode 259 with reference "zz_link" on inode 257 + * inode 261 with reference "zz" on inode 257 + * + * When logging inode 261 the following infinite loop could + * happen if we don't skip already logged inodes: + * + * - we detect inode 258 as a conflicting inode, with inode 261 + * on reference "zz", and log it; + * + * - we detect inode 259 as a conflicting inode, with inode 258 + * on reference "a", and log it; + * + * - we detect inode 258 as a conflicting inode, with inode 259 + * on reference "zz_link", and log it - again! After this we + * repeat the above steps forever. + */ + spin_lock(&BTRFS_I(inode)->lock); + /* + * Check the inode's logged_trans only instead of + * btrfs_inode_in_log(). This is because the last_log_commit of + * the inode is not updated when we only log that it exists and + * and it has the full sync bit set (see btrfs_log_inode()). + */ + if (BTRFS_I(inode)->logged_trans == trans->transid) { + spin_unlock(&BTRFS_I(inode)->lock); + btrfs_add_delayed_iput(inode); + continue; + } + spin_unlock(&BTRFS_I(inode)->lock); /* * We are safe logging the other inode without acquiring its * lock as long as we log with the LOG_INODE_EXISTS mode. We @@ -5106,7 +4997,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = root->log_root; - u64 last_extent = 0; int err = 0; int ret; int nritems; @@ -5284,7 +5174,7 @@ again: ins_start_slot = path->slots[0]; } ret = copy_items(trans, inode, dst_path, path, - &last_extent, ins_start_slot, + ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { @@ -5307,17 +5197,13 @@ again: if (ins_nr == 0) goto next_slot; ret = copy_items(trans, inode, dst_path, path, - &last_extent, ins_start_slot, + ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } ins_nr = 0; - if (ret) { - btrfs_release_path(path); - continue; - } goto next_slot; } @@ -5330,18 +5216,13 @@ again: goto next_slot; } - ret = copy_items(trans, inode, dst_path, path, &last_extent, + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } - if (ret) { - ins_nr = 0; - btrfs_release_path(path); - continue; - } ins_nr = 1; ins_start_slot = path->slots[0]; next_slot: @@ -5355,13 +5236,12 @@ next_slot: } if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, - &last_extent, ins_start_slot, + ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } - ret = 0; ins_nr = 0; } btrfs_release_path(path); @@ -5376,14 +5256,13 @@ next_key: } } if (ins_nr) { - ret = copy_items(trans, inode, dst_path, path, &last_extent, + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } - ret = 0; ins_nr = 0; } @@ -5396,7 +5275,7 @@ next_key: if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { btrfs_release_path(path); btrfs_release_path(dst_path); - err = btrfs_log_trailing_hole(trans, root, inode, path); + err = btrfs_log_holes(trans, root, inode, path); if (err) goto out_unlock; } @@ -6314,9 +6193,28 @@ again: wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); if (IS_ERR(wc.replay_dest)) { ret = PTR_ERR(wc.replay_dest); + + /* + * We didn't find the subvol, likely because it was + * deleted. This is ok, simply skip this log and go to + * the next one. + * + * We need to exclude the root because we can't have + * other log replays overwriting this log as we'll read + * it back in a few more times. This will keep our + * block from being modified, and we'll just bail for + * each subsequent pass. + */ + if (ret == -ENOENT) + ret = btrfs_pin_extent_for_log_replay(fs_info, + log->node->start, + log->node->len); free_extent_buffer(log->node); free_extent_buffer(log->commit_root); kfree(log); + + if (!ret) + goto next; btrfs_handle_fs_error(fs_info, ret, "Couldn't read target root for tree log recovery."); goto error; @@ -6348,7 +6246,6 @@ again: &root->highest_objectid); } - key.offset = found_key.offset - 1; wc.replay_dest->log_root = NULL; free_extent_buffer(log->node); free_extent_buffer(log->commit_root); @@ -6356,9 +6253,10 @@ again: if (ret) goto error; - +next: if (found_key.offset == 0) break; + key.offset = found_key.offset - 1; } btrfs_release_path(path); diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 91caab63bdf5..76b84f2397b1 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -324,6 +324,8 @@ again_search_slot: } if (ret < 0 && ret != -ENOENT) goto out; + key.offset++; + goto again_search_slot; } item_size -= sizeof(subid_le); offset += sizeof(subid_le); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e04409f85063..3e64f49c394b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -881,17 +881,54 @@ static struct btrfs_fs_devices *find_fsid_changed( /* * Handles the case where scanned device is part of an fs that had * multiple successful changes of FSID but curently device didn't - * observe it. Meaning our fsid will be different than theirs. + * observe it. Meaning our fsid will be different than theirs. We need + * to handle two subcases : + * 1 - The fs still continues to have different METADATA/FSID uuids. + * 2 - The fs is switched back to its original FSID (METADATA/FSID + * are equal). */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { + /* Changed UUIDs */ if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, BTRFS_FSID_SIZE) == 0 && memcmp(fs_devices->fsid, disk_super->fsid, - BTRFS_FSID_SIZE) != 0) { + BTRFS_FSID_SIZE) != 0) + return fs_devices; + + /* Unchanged UUIDs */ + if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, + BTRFS_FSID_SIZE) == 0 && + memcmp(fs_devices->fsid, disk_super->metadata_uuid, + BTRFS_FSID_SIZE) == 0) + return fs_devices; + } + + return NULL; +} + +static struct btrfs_fs_devices *find_fsid_reverted_metadata( + struct btrfs_super_block *disk_super) +{ + struct btrfs_fs_devices *fs_devices; + + /* + * Handle the case where the scanned device is part of an fs whose last + * metadata UUID change reverted it to the original FSID. At the same + * time * fs_devices was first created by another constitutent device + * which didn't fully observe the operation. This results in an + * btrfs_fs_devices created with metadata/fsid different AND + * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the + * fs_devices equal to the FSID of the disk. + */ + list_for_each_entry(fs_devices, &fs_uuids, fs_list) { + if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, + BTRFS_FSID_SIZE) != 0 && + memcmp(fs_devices->metadata_uuid, disk_super->fsid, + BTRFS_FSID_SIZE) == 0 && + fs_devices->fsid_change) return fs_devices; - } } return NULL; @@ -935,7 +972,9 @@ static noinline struct btrfs_device *device_list_add(const char *path, fs_devices = find_fsid(disk_super->fsid, disk_super->metadata_uuid); } else { - fs_devices = find_fsid(disk_super->fsid, NULL); + fs_devices = find_fsid_reverted_metadata(disk_super); + if (!fs_devices) + fs_devices = find_fsid(disk_super->fsid, NULL); } @@ -965,12 +1004,18 @@ static noinline struct btrfs_device *device_list_add(const char *path, * a device which had the CHANGING_FSID_V2 flag then replace the * metadata_uuid/fsid values of the fs_devices. */ - if (has_metadata_uuid && fs_devices->fsid_change && + if (fs_devices->fsid_change && found_transid > fs_devices->latest_generation) { memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); - memcpy(fs_devices->metadata_uuid, - disk_super->metadata_uuid, BTRFS_FSID_SIZE); + + if (has_metadata_uuid) + memcpy(fs_devices->metadata_uuid, + disk_super->metadata_uuid, + BTRFS_FSID_SIZE); + else + memcpy(fs_devices->metadata_uuid, + disk_super->fsid, BTRFS_FSID_SIZE); fs_devices->fsid_change = false; } @@ -4066,7 +4111,11 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, } } - num_devices = btrfs_num_devices(fs_info); + /* + * rw_devices will not change at the moment, device add/delete/replace + * are excluded by EXCL_OP + */ + num_devices = fs_info->fs_devices->rw_devices; /* * SINGLE profile on-disk has no profile bit, but in-memory we have a @@ -6676,8 +6725,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, else generate_random_uuid(dev->uuid); - btrfs_init_work(&dev->work, btrfs_submit_helper, - pending_bios_fn, NULL, NULL); + btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); return dev; } @@ -7547,6 +7595,8 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, else btrfs_dev_stat_set(dev, i, 0); } + btrfs_info(fs_info, "device stats zeroed by %s (%d)", + current->comm, task_pid_nr(current)); } else { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) if (stats->nr_items > i) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a7da1f3e3627..5acf5c507ec2 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -330,7 +330,6 @@ struct btrfs_bio { u64 map_type; /* get from map_lookup->type */ bio_end_io_t *end_io; struct bio *orig_bio; - unsigned long flags; void *private; atomic_t error; int max_errors; diff --git a/fs/buffer.c b/fs/buffer.c index 86a38b979323..91ceca52d14f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2991,11 +2991,9 @@ static void end_bio_bh_io_sync(struct bio *bio) * errors, this only handles the "we need to be able to * do IO at the final sector" case. */ -void guard_bio_eod(int op, struct bio *bio) +void guard_bio_eod(struct bio *bio) { sector_t maxsector; - struct bio_vec *bvec = bio_last_bvec_all(bio); - unsigned truncated_bytes; struct hd_struct *part; rcu_read_lock(); @@ -3021,28 +3019,7 @@ void guard_bio_eod(int op, struct bio *bio) if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) return; - /* Uhhuh. We've got a bio that straddles the device size! */ - truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); - - /* - * The bio contains more than one segment which spans EOD, just return - * and let IO layer turn it into an EIO - */ - if (truncated_bytes > bvec->bv_len) - return; - - /* Truncate the bio.. */ - bio->bi_iter.bi_size -= truncated_bytes; - bvec->bv_len -= truncated_bytes; - - /* ..and clear the end of the buffer for reads */ - if (op == REQ_OP_READ) { - struct bio_vec bv; - - mp_bvec_last_segment(bvec, &bv); - zero_user(bv.bv_page, bv.bv_offset + bv.bv_len, - truncated_bytes); - } + bio_truncate(bio, maxsector << 9); } static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, @@ -3078,15 +3055,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; - /* Take care of bh's that straddle the end of the device */ - guard_bio_eod(op, bio); - if (buffer_meta(bh)) op_flags |= REQ_META; if (buffer_prio(bh)) op_flags |= REQ_PRIO; bio_set_op_attrs(bio, op, op_flags); + /* Take care of bh's that straddle the end of the device */ + guard_bio_eod(bio); + if (wbc) { wbc_init_bio(wbc, bio); wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index facb387c2735..97d47357512d 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -117,6 +117,7 @@ static int mdsc_show(struct seq_file *s, void *p) seq_printf(s, " %s", req->r_path2); } + seq_printf(s, "\tpid:%d", req->r_pid); seq_puts(s, "\n"); } mutex_unlock(&mdsc->mutex); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index d17a789fd856..2e4764fd1872 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1809,6 +1809,7 @@ const struct file_operations ceph_dir_fops = { .open = ceph_open, .release = ceph_release, .unlocked_ioctl = ceph_ioctl, + .compat_ioctl = compat_ptr_ioctl, .fsync = ceph_fsync, .lock = ceph_lock, .flock = ceph_flock, diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8de633964dc3..ce54a1b12819 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1415,9 +1415,13 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); + struct ceph_osd_client *osdc = &fsc->client->osdc; struct ceph_cap_flush *prealloc_cf; ssize_t count, written = 0; int err, want, got; + bool direct_lock = false; + u32 map_flags; + u64 pool_flags; loff_t pos; loff_t limit = max(i_size_read(inode), fsc->max_file_size); @@ -1428,8 +1432,11 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!prealloc_cf) return -ENOMEM; + if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT) + direct_lock = true; + retry_snap: - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_start_io_direct(inode); else ceph_start_io_write(inode); @@ -1477,8 +1484,12 @@ retry_snap: goto out; } - /* FIXME: not complete since it doesn't account for being at quota */ - if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) { + down_read(&osdc->lock); + map_flags = osdc->osdmap->flags; + pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id); + up_read(&osdc->lock); + if ((map_flags & CEPH_OSDMAP_FULL) || + (pool_flags & CEPH_POOL_FLAG_FULL)) { err = -ENOSPC; goto out; } @@ -1519,14 +1530,15 @@ retry_snap: /* we might need to revert back to that point */ data = *from; - if (iocb->ki_flags & IOCB_DIRECT) { + if (iocb->ki_flags & IOCB_DIRECT) written = ceph_direct_read_write(iocb, &data, snapc, &prealloc_cf); - ceph_end_io_direct(inode); - } else { + else written = ceph_sync_write(iocb, &data, pos, snapc); + if (direct_lock) + ceph_end_io_direct(inode); + else ceph_end_io_write(inode); - } if (written > 0) iov_iter_advance(from, written); ceph_put_snap_context(snapc); @@ -1570,14 +1582,15 @@ retry_snap: } if (written >= 0) { - if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL)) + if ((map_flags & CEPH_OSDMAP_NEARFULL) || + (pool_flags & CEPH_POOL_FLAG_NEARFULL)) iocb->ki_flags |= IOCB_DSYNC; written = generic_write_sync(iocb, written); } goto out_unlocked; out: - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_end_io_direct(inode); else ceph_end_io_write(inode); @@ -2188,7 +2201,7 @@ const struct file_operations ceph_file_fops = { .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = ceph_ioctl, - .compat_ioctl = ceph_ioctl, + .compat_ioctl = compat_ptr_ioctl, .fallocate = ceph_fallocate, .copy_file_range = ceph_copy_file_range, }; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a5163296d9d9..5c84545bb8e8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "super.h" #include "mds_client.h" @@ -708,8 +709,10 @@ void ceph_mdsc_release_request(struct kref *kref) /* avoid calling iput_final() in mds dispatch threads */ ceph_async_iput(req->r_inode); } - if (req->r_parent) + if (req->r_parent) { ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); + ceph_async_iput(req->r_parent); + } ceph_async_iput(req->r_target_inode); if (req->r_dentry) dput(req->r_dentry); @@ -785,6 +788,11 @@ static void __register_request(struct ceph_mds_client *mdsc, req->r_uid = current_fsuid(); req->r_gid = current_fsgid(); + if (pid_alive(current)) { + req->r_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); + } else { + req->r_pid = -1; + } if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) mdsc->oldest_tid = req->r_tid; @@ -2550,8 +2558,7 @@ static void __do_request(struct ceph_mds_client *mdsc, if (!(mdsc->fsc->mount_options->flags & CEPH_MOUNT_OPT_MOUNTWAIT) && !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { - err = -ENOENT; - pr_info("probably no mds server is up\n"); + err = -EHOSTUNREACH; goto finish; } } @@ -2670,8 +2677,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir, /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ if (req->r_inode) ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); - if (req->r_parent) + if (req->r_parent) { ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); + ihold(req->r_parent); + } if (req->r_old_dentry_dir) ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), CEPH_CAP_PIN); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 5cd131b41d84..e18328cf6d3d 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -252,6 +252,7 @@ struct ceph_mds_request { int r_fmode; /* file mode, if expecting cap */ kuid_t r_uid; kgid_t r_gid; + pid_t r_pid; struct timespec64 r_stamp; /* for choosing which mds to send this request to */ diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index ccfcc66aaf44..923be9399b21 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -1155,5 +1155,6 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc) pr_err("snapid map %llx -> %x still in use\n", sm->snap, sm->dev); } + kfree(sm); } } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index b47f43fc2d68..d40658d5e808 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -106,7 +106,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } - static int ceph_sync_fs(struct super_block *sb, int wait) { struct ceph_fs_client *fsc = ceph_sb_to_client(sb); @@ -215,6 +214,26 @@ static match_table_t fsopt_tokens = { {-1, NULL} }; +/* + * Remove adjacent slashes and then the trailing slash, unless it is + * the only remaining character. + * + * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/". + */ +static void canonicalize_path(char *path) +{ + int i, j = 0; + + for (i = 0; path[i] != '\0'; i++) { + if (path[i] != '/' || j < 1 || path[j - 1] != '/') + path[j++] = path[i]; + } + + if (j > 1 && path[j - 1] == '/') + j--; + path[j] = '\0'; +} + static int parse_fsopt_token(char *c, void *private) { struct ceph_mount_options *fsopt = private; @@ -446,12 +465,15 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt, ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name); if (ret) return ret; + ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace); if (ret) return ret; + ret = strcmp_null(fsopt1->server_path, fsopt2->server_path); if (ret) return ret; + ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq); if (ret) return ret; @@ -507,13 +529,17 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt, */ dev_name_end = strchr(dev_name, '/'); if (dev_name_end) { - if (strlen(dev_name_end) > 1) { - fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL); - if (!fsopt->server_path) { - err = -ENOMEM; - goto out; - } + /* + * The server_path will include the whole chars from userland + * including the leading '/'. + */ + fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL); + if (!fsopt->server_path) { + err = -ENOMEM; + goto out; } + + canonicalize_path(fsopt->server_path); } else { dev_name_end = dev_name + strlen(dev_name); } @@ -842,7 +868,6 @@ static void destroy_caches(void) ceph_fscache_unregister(); } - /* * ceph_umount_begin - initiate forced umount. Tear down down the * mount, skipping steps that may hang while waiting for server(s). @@ -929,9 +954,6 @@ out: return root; } - - - /* * mount: join the ceph cluster, and open root directory. */ @@ -945,7 +967,9 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) mutex_lock(&fsc->client->mount_mutex); if (!fsc->sb->s_root) { - const char *path; + const char *path = fsc->mount_options->server_path ? + fsc->mount_options->server_path + 1 : ""; + err = __ceph_open_session(fsc->client, started); if (err < 0) goto out; @@ -957,13 +981,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) goto out; } - if (!fsc->mount_options->server_path) { - path = ""; - dout("mount opening path \\t\n"); - } else { - path = fsc->mount_options->server_path + 1; - dout("mount opening path %s\n", path); - } + dout("mount opening path '%s'\n", path); ceph_fs_debugfs_init(fsc); @@ -1137,6 +1155,11 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, return res; out_splat: + if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) { + pr_info("No mds server is up or the cluster is laggy\n"); + err = -EHOSTUNREACH; + } + ceph_mdsc_close_sessions(fsc->mdsc); deactivate_locked_super(sb); goto out_final; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index f98d9247f9cb..bb12c9f3a218 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -92,7 +92,7 @@ struct ceph_mount_options { char *snapdir_name; /* default ".snap" */ char *mds_namespace; /* default NULL */ - char *server_path; /* default "/" */ + char *server_path; /* default NULL (means "/") */ char *fscache_uniq; /* default NULL */ }; diff --git a/fs/char_dev.c b/fs/char_dev.c index 00dfe17871ac..c5e6eff5a381 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -352,7 +352,7 @@ static struct kobject *cdev_get(struct cdev *p) if (owner && !try_module_get(owner)) return NULL; - kobj = kobject_get(&p->kobj); + kobj = kobject_get_unless_zero(&p->kobj); if (!kobj) module_put(owner); return kobj; diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 0b4eee3bed66..efb2928ff6c8 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -256,6 +256,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) if (!server->rdma) goto skip_rdma; + if (!server->smbd_conn) { + seq_printf(m, "\nSMBDirect transport not available"); + goto skip_rdma; + } + seq_printf(m, "\nSMBDirect (in hex) protocol version: %x " "transport status: %x", server->smbd_conn->protocol, diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 41957b82d796..cc3ada12848d 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -120,17 +120,17 @@ cifs_build_devname(char *nodename, const char *prepath) /** - * cifs_compose_mount_options - creates mount options for refferral + * cifs_compose_mount_options - creates mount options for referral * @sb_mountdata: parent/root DFS mount options (template) * @fullpath: full path in UNC format - * @ref: server's referral + * @ref: optional server's referral * @devname: optional pointer for saving device name * * creates mount options for submount based on template options sb_mountdata * and replacing unc,ip,prefixpath options with ones we've got form ref_unc. * * Returns: pointer to new mount options or ERR_PTR. - * Caller is responcible for freeing retunrned value if it is not error. + * Caller is responsible for freeing returned value if it is not error. */ char *cifs_compose_mount_options(const char *sb_mountdata, const char *fullpath, @@ -150,18 +150,27 @@ char *cifs_compose_mount_options(const char *sb_mountdata, if (sb_mountdata == NULL) return ERR_PTR(-EINVAL); - if (strlen(fullpath) - ref->path_consumed) { - prepath = fullpath + ref->path_consumed; - /* skip initial delimiter */ - if (*prepath == '/' || *prepath == '\\') - prepath++; - } + if (ref) { + if (strlen(fullpath) - ref->path_consumed) { + prepath = fullpath + ref->path_consumed; + /* skip initial delimiter */ + if (*prepath == '/' || *prepath == '\\') + prepath++; + } - name = cifs_build_devname(ref->node_name, prepath); - if (IS_ERR(name)) { - rc = PTR_ERR(name); - name = NULL; - goto compose_mount_options_err; + name = cifs_build_devname(ref->node_name, prepath); + if (IS_ERR(name)) { + rc = PTR_ERR(name); + name = NULL; + goto compose_mount_options_err; + } + } else { + name = cifs_build_devname((char *)fullpath, NULL); + if (IS_ERR(name)) { + rc = PTR_ERR(name); + name = NULL; + goto compose_mount_options_err; + } } rc = dns_resolve_server_name_to_ip(name, &srvIP); @@ -225,6 +234,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata, if (devname) *devname = name; + else + kfree(name); /*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/ /*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/ @@ -241,23 +252,23 @@ compose_mount_options_err: } /** - * cifs_dfs_do_refmount - mounts specified path using provided refferal + * cifs_dfs_do_mount - mounts specified path using DFS full path + * + * Always pass down @fullpath to smb3_do_mount() so we can use the root server + * to perform failover in case we failed to connect to the first target in the + * referral. + * * @cifs_sb: parent/root superblock * @fullpath: full path in UNC format - * @ref: server's referral */ -static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt, - struct cifs_sb_info *cifs_sb, - const char *fullpath, const struct dfs_info3_param *ref) +static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt, + struct cifs_sb_info *cifs_sb, + const char *fullpath) { struct vfsmount *mnt; char *mountdata; char *devname; - /* - * Always pass down the DFS full path to smb3_do_mount() so we - * can use it later for failover. - */ devname = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL); if (!devname) return ERR_PTR(-ENOMEM); @@ -266,7 +277,7 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt, /* strip first '\' from fullpath */ mountdata = cifs_compose_mount_options(cifs_sb->mountdata, - fullpath + 1, ref, NULL); + fullpath + 1, NULL, NULL); if (IS_ERR(mountdata)) { kfree(devname); return (struct vfsmount *)mountdata; @@ -278,28 +289,16 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt, return mnt; } -static void dump_referral(const struct dfs_info3_param *ref) -{ - cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name); - cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name); - cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n", - ref->flags, ref->server_type); - cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n", - ref->ref_flag, ref->path_consumed); -} - /* * Create a vfsmount that we can automount */ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) { - struct dfs_info3_param referral = {0}; struct cifs_sb_info *cifs_sb; struct cifs_ses *ses; struct cifs_tcon *tcon; char *full_path, *root_path; unsigned int xid; - int len; int rc; struct vfsmount *mnt; @@ -325,6 +324,8 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) if (full_path == NULL) goto cdda_exit; + convert_delimiter(full_path, '\\'); + cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path); if (!cifs_sb_master_tlink(cifs_sb)) { @@ -357,7 +358,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) if (!rc) { rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), full_path + 1, - &referral, NULL); + NULL, NULL); } free_xid(xid); @@ -366,26 +367,16 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) mnt = ERR_PTR(rc); goto free_root_path; } - - dump_referral(&referral); - - len = strlen(referral.node_name); - if (len < 2) { - cifs_dbg(VFS, "%s: Net Address path too short: %s\n", - __func__, referral.node_name); - mnt = ERR_PTR(-EINVAL); - goto free_dfs_ref; - } /* - * cifs_mount() will retry every available node server in case - * of failures. + * OK - we were able to get and cache a referral for @full_path. + * + * Now, pass it down to cifs_mount() and it will retry every available + * node server in case of failures - no need to do it here. */ - mnt = cifs_dfs_do_refmount(mntpt, cifs_sb, full_path, &referral); - cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n", __func__, - referral.node_name, mnt); + mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path); + cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, + full_path + 1, mnt); -free_dfs_ref: - free_dfs_info_param(&referral); free_root_path: kfree(root_path); free_full_path: diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index f842944a5c76..1619af216677 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) *pmode |= (S_IXUGO & (*pbits_to_set)); - cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode); + cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode); return; } @@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, if (mode & S_IXUGO) *pace_flags |= SET_FILE_EXEC_RIGHTS; - cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n", + cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n", mode, *pace_flags); return; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 1a135d1b85bd..115f063497ff 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -119,6 +119,7 @@ extern mempool_t *cifs_mid_poolp; struct workqueue_struct *cifsiod_wq; struct workqueue_struct *decrypt_wq; +struct workqueue_struct *fileinfo_put_wq; struct workqueue_struct *cifsoplockd_wq; __u32 cifs_lock_secret; @@ -413,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses) seq_puts(s, "ntlm"); break; case Kerberos: - seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid)); + seq_puts(s, "krb5"); break; case RawNTLMSSP: seq_puts(s, "ntlmssp"); @@ -426,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses) if (ses->sign) seq_puts(s, "i"); + + if (ses->sectype == Kerberos) + seq_printf(s, ",cruid=%u", + from_kuid_munged(&init_user_ns, ses->cred_uid)); } static void @@ -525,6 +530,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) if (tcon->seal) seq_puts(s, ",seal"); + else if (tcon->ses->server->ignore_signature) + seq_puts(s, ",signloosely"); if (tcon->nocase) seq_puts(s, ",nocase"); if (tcon->local_lease) @@ -1554,11 +1561,18 @@ init_cifs(void) goto out_destroy_cifsiod_wq; } + fileinfo_put_wq = alloc_workqueue("cifsfileinfoput", + WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); + if (!fileinfo_put_wq) { + rc = -ENOMEM; + goto out_destroy_decrypt_wq; + } + cifsoplockd_wq = alloc_workqueue("cifsoplockd", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!cifsoplockd_wq) { rc = -ENOMEM; - goto out_destroy_decrypt_wq; + goto out_destroy_fileinfo_put_wq; } rc = cifs_fscache_register(); @@ -1624,6 +1638,8 @@ out_unreg_fscache: cifs_fscache_unregister(); out_destroy_cifsoplockd_wq: destroy_workqueue(cifsoplockd_wq); +out_destroy_fileinfo_put_wq: + destroy_workqueue(fileinfo_put_wq); out_destroy_decrypt_wq: destroy_workqueue(decrypt_wq); out_destroy_cifsiod_wq: @@ -1653,6 +1669,7 @@ exit_cifs(void) cifs_fscache_unregister(); destroy_workqueue(cifsoplockd_wq); destroy_workqueue(decrypt_wq); + destroy_workqueue(fileinfo_put_wq); destroy_workqueue(cifsiod_wq); cifs_proc_clean(); } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index d78bfcc19156..f9cbdfc1591b 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1229,6 +1229,7 @@ struct cifs_fid { __u64 volatile_fid; /* volatile file id for smb2 */ __u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */ __u8 create_guid[16]; + __u32 access; struct cifs_pending_open *pending_open; unsigned int epoch; #ifdef CONFIG_CIFS_DEBUG2 @@ -1265,6 +1266,7 @@ struct cifsFileInfo { struct mutex fh_mutex; /* prevents reopen race after dead ses*/ struct cifs_search_info srch_inf; struct work_struct oplock_break; /* work for oplock breaks */ + struct work_struct put; /* work for the final part of _put */ }; struct cifs_io_parms { @@ -1370,7 +1372,8 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file) } struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file); -void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr); +void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr, + bool offload); void cifsFileInfo_put(struct cifsFileInfo *cifs_file); #define CIFS_CACHE_READ_FLG 1 @@ -1524,6 +1527,7 @@ struct mid_q_entry { struct TCP_Server_Info *server; /* server corresponding to this mid */ __u64 mid; /* multiplex id */ __u16 credits; /* number of credits consumed by this mid */ + __u16 credits_received; /* number of credits from the response */ __u32 pid; /* process id */ __u32 sequence_number; /* for CIFS signing */ unsigned long when_alloc; /* when mid was created */ @@ -1535,6 +1539,7 @@ struct mid_q_entry { mid_callback_t *callback; /* call completion callback */ mid_handle_t *handle; /* call handle mid callback */ void *callback_data; /* general purpose pointer for callback */ + struct task_struct *creator; void *resp_buf; /* pointer to received SMB header */ unsigned int resp_buf_size; int mid_state; /* wish this were enum but can not pass to wait_event */ @@ -1696,6 +1701,12 @@ static inline bool is_retryable_error(int error) return false; } + +/* cifs_get_writable_file() flags */ +#define FIND_WR_ANY 0 +#define FIND_WR_FSUID_ONLY 1 +#define FIND_WR_WITH_DELETE 2 + #define MID_FREE 0 #define MID_REQUEST_ALLOCATED 1 #define MID_REQUEST_SUBMITTED 2 @@ -1907,6 +1918,7 @@ void cifs_queue_oplock_break(struct cifsFileInfo *cfile); extern const struct slow_work_ops cifs_oplock_break_ops; extern struct workqueue_struct *cifsiod_wq; extern struct workqueue_struct *decrypt_wq; +extern struct workqueue_struct *fileinfo_put_wq; extern struct workqueue_struct *cifsoplockd_wq; extern __u32 cifs_lock_secret; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index fe597d3d5208..f18da99a6b55 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -133,11 +133,12 @@ extern bool backup_cred(struct cifs_sb_info *); extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, unsigned int bytes_written); -extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool); +extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int); extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, - bool fsuid_only, + int flags, struct cifsFileInfo **ret_file); extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, + int flags, struct cifsFileInfo **ret_file); extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 4f554f019a98..c8494fa5e19d 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1489,6 +1489,7 @@ openRetry: *oplock = rsp->OplockLevel; /* cifs fid stays in le */ oparms->fid->netfid = rsp->Fid; + oparms->fid->access = desired_access; /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ @@ -2112,7 +2113,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata) wdata2->tailsz = tailsz; wdata2->bytes = cur_len; - rc = cifs_get_writable_file(CIFS_I(inode), false, + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &wdata2->cfile); if (!wdata2->cfile) { cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ccaa8bad336f..bcda48c03882 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -387,7 +387,7 @@ static inline int reconn_set_ipaddr(struct TCP_Server_Info *server) #ifdef CONFIG_CIFS_DFS_UPCALL struct super_cb_data { struct TCP_Server_Info *server; - struct cifs_sb_info *cifs_sb; + struct super_block *sb; }; /* These functions must be called with server->srv_mutex held */ @@ -398,25 +398,39 @@ static void super_cb(struct super_block *sb, void *arg) struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; - if (d->cifs_sb) + if (d->sb) return; cifs_sb = CIFS_SB(sb); tcon = cifs_sb_master_tcon(cifs_sb); if (tcon->ses->server == d->server) - d->cifs_sb = cifs_sb; + d->sb = sb; } -static inline struct cifs_sb_info * -find_super_by_tcp(struct TCP_Server_Info *server) +static struct super_block *get_tcp_super(struct TCP_Server_Info *server) { struct super_cb_data d = { .server = server, - .cifs_sb = NULL, + .sb = NULL, }; iterate_supers_type(&cifs_fs_type, super_cb, &d); - return d.cifs_sb ? d.cifs_sb : ERR_PTR(-ENOENT); + + if (unlikely(!d.sb)) + return ERR_PTR(-ENOENT); + /* + * Grab an active reference in order to prevent automounts (DFS links) + * of expiring and then freeing up our cifs superblock pointer while + * we're doing failover. + */ + cifs_sb_active(d.sb); + return d.sb; +} + +static inline void put_tcp_super(struct super_block *sb) +{ + if (!IS_ERR_OR_NULL(sb)) + cifs_sb_deactive(sb); } static void reconn_inval_dfs_target(struct TCP_Server_Info *server, @@ -480,6 +494,7 @@ cifs_reconnect(struct TCP_Server_Info *server) struct mid_q_entry *mid_entry; struct list_head retry_list; #ifdef CONFIG_CIFS_DFS_UPCALL + struct super_block *sb = NULL; struct cifs_sb_info *cifs_sb = NULL; struct dfs_cache_tgt_list tgt_list = {0}; struct dfs_cache_tgt_iterator *tgt_it = NULL; @@ -489,13 +504,15 @@ cifs_reconnect(struct TCP_Server_Info *server) server->nr_targets = 1; #ifdef CONFIG_CIFS_DFS_UPCALL spin_unlock(&GlobalMid_Lock); - cifs_sb = find_super_by_tcp(server); - if (IS_ERR(cifs_sb)) { - rc = PTR_ERR(cifs_sb); + sb = get_tcp_super(server); + if (IS_ERR(sb)) { + rc = PTR_ERR(sb); cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n", __func__, rc); - cifs_sb = NULL; + sb = NULL; } else { + cifs_sb = CIFS_SB(sb); + rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it); if (rc && (rc != -EOPNOTSUPP)) { cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n", @@ -512,6 +529,10 @@ cifs_reconnect(struct TCP_Server_Info *server) /* the demux thread will exit normally next time through the loop */ spin_unlock(&GlobalMid_Lock); +#ifdef CONFIG_CIFS_DFS_UPCALL + dfs_cache_free_tgts(&tgt_list); + put_tcp_super(sb); +#endif return rc; } else server->tcpStatus = CifsNeedReconnect; @@ -638,7 +659,10 @@ cifs_reconnect(struct TCP_Server_Info *server) __func__, rc); } dfs_cache_free_tgts(&tgt_list); + } + + put_tcp_super(sb); #endif if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); @@ -905,6 +929,20 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed) spin_unlock(&GlobalMid_Lock); } +static unsigned int +smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) +{ + struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buffer; + + /* + * SMB1 does not use credits. + */ + if (server->vals->header_preamble_size) + return 0; + + return le16_to_cpu(shdr->CreditRequest); +} + static void handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, char *buf, int malformed) @@ -912,6 +950,7 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, if (server->ops->check_trans2 && server->ops->check_trans2(mid, server, buf, malformed)) return; + mid->credits_received = smb2_get_credits_from_hdr(buf, server); mid->resp_buf = buf; mid->large_buf = server->large_buf; /* Was previous buf put in mpx struct for multi-rsp? */ @@ -1222,12 +1261,6 @@ next_pdu: for (i = 0; i < num_mids; i++) { if (mids[i] != NULL) { mids[i]->resp_buf_size = server->pdu_size; - if ((mids[i]->mid_flags & MID_WAIT_CANCELLED) && - mids[i]->mid_state == MID_RESPONSE_RECEIVED && - server->ops->handle_cancelled_mid) - server->ops->handle_cancelled_mid( - mids[i]->resp_buf, - server); if (!mids[i]->multiRsp || mids[i]->multiEnd) mids[i]->callback(mids[i]); @@ -3619,8 +3652,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) { struct cifs_sb_info *old = CIFS_SB(sb); struct cifs_sb_info *new = mnt_data->cifs_sb; - bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH; - bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH; + bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && + old->prepath; + bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && + new->prepath; if (old_set && new_set && !strcmp(new->prepath, old->prepath)) return 1; @@ -4059,7 +4094,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, cifs_sb->mnt_gid = pvolume_info->linux_gid; cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; - cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n", + cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); cifs_sb->actimeo = pvolume_info->actimeo; @@ -4700,6 +4735,17 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol, } #ifdef CONFIG_CIFS_DFS_UPCALL +static inline void set_root_tcon(struct cifs_sb_info *cifs_sb, + struct cifs_tcon *tcon, + struct cifs_tcon **root) +{ + spin_lock(&cifs_tcp_ses_lock); + tcon->tc_count++; + tcon->remap = cifs_remap(cifs_sb); + spin_unlock(&cifs_tcp_ses_lock); + *root = tcon; +} + int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol) { int rc = 0; @@ -4801,18 +4847,10 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol) /* Cache out resolved root server */ (void)dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), root_path + 1, NULL, NULL); - /* - * Save root tcon for additional DFS requests to update or create a new - * DFS cache entry, or even perform DFS failover. - */ - spin_lock(&cifs_tcp_ses_lock); - tcon->tc_count++; - tcon->dfs_path = root_path; + kfree(root_path); root_path = NULL; - tcon->remap = cifs_remap(cifs_sb); - spin_unlock(&cifs_tcp_ses_lock); - root_tcon = tcon; + set_root_tcon(cifs_sb, tcon, &root_tcon); for (count = 1; ;) { if (!rc && tcon) { @@ -4849,6 +4887,15 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol) mount_put_conns(cifs_sb, xid, server, ses, tcon); rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses, &tcon); + /* + * Ensure that DFS referrals go through new root server. + */ + if (!rc && tcon && + (tcon->share_flags & (SHI1005_FLAGS_DFS | + SHI1005_FLAGS_DFS_ROOT))) { + cifs_put_tcon(root_tcon); + set_root_tcon(cifs_sb, tcon, &root_tcon); + } } if (rc) { if (rc == -EACCES || rc == -EOPNOTSUPP) diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index 1692c0c6c23a..cf6cec59696c 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -1317,10 +1317,9 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi, int rc; struct dfs_info3_param ref = {0}; char *mdata = NULL, *devname = NULL; - bool is_smb3 = tcon->ses->server->vals->header_preamble_size == 0; struct TCP_Server_Info *server; struct cifs_ses *ses; - struct smb_vol vol; + struct smb_vol vol = {NULL}; rpath = get_dfs_root(path); if (IS_ERR(rpath)) @@ -1344,7 +1343,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi, goto out; } - rc = cifs_setup_volume_info(&vol, mdata, devname, is_smb3); + rc = cifs_setup_volume_info(&vol, mdata, devname, false); kfree(devname); if (rc) { diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 7ce689d31aa2..5a35850ccb1a 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -560,7 +560,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, if (server->ops->close) server->ops->close(xid, tcon, &fid); cifs_del_pending_open(&open); - fput(file); rc = -ENOMEM; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index fa7b0fa72bb3..35c55cf38a35 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -288,6 +288,8 @@ cifs_down_write(struct rw_semaphore *sem) msleep(10); } +static void cifsFileInfo_put_work(struct work_struct *work); + struct cifsFileInfo * cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, __u32 oplock) @@ -313,9 +315,6 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, INIT_LIST_HEAD(&fdlocks->locks); fdlocks->cfile = cfile; cfile->llist = fdlocks; - cifs_down_write(&cinode->lock_sem); - list_add(&fdlocks->llist, &cinode->llist); - up_write(&cinode->lock_sem); cfile->count = 1; cfile->pid = current->tgid; @@ -325,6 +324,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, cfile->invalidHandle = false; cfile->tlink = cifs_get_tlink(tlink); INIT_WORK(&cfile->oplock_break, cifs_oplock_break); + INIT_WORK(&cfile->put, cifsFileInfo_put_work); mutex_init(&cfile->fh_mutex); spin_lock_init(&cfile->file_info_lock); @@ -339,6 +339,10 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, oplock = 0; } + cifs_down_write(&cinode->lock_sem); + list_add(&fdlocks->llist, &cinode->llist); + up_write(&cinode->lock_sem); + spin_lock(&tcon->open_file_lock); if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) oplock = fid->pending_open->oplock; @@ -375,6 +379,41 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file) return cifs_file; } +static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file) +{ + struct inode *inode = d_inode(cifs_file->dentry); + struct cifsInodeInfo *cifsi = CIFS_I(inode); + struct cifsLockInfo *li, *tmp; + struct super_block *sb = inode->i_sb; + + /* + * Delete any outstanding lock records. We'll lose them when the file + * is closed anyway. + */ + cifs_down_write(&cifsi->lock_sem); + list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { + list_del(&li->llist); + cifs_del_lock_waiters(li); + kfree(li); + } + list_del(&cifs_file->llist->llist); + kfree(cifs_file->llist); + up_write(&cifsi->lock_sem); + + cifs_put_tlink(cifs_file->tlink); + dput(cifs_file->dentry); + cifs_sb_deactive(sb); + kfree(cifs_file); +} + +static void cifsFileInfo_put_work(struct work_struct *work) +{ + struct cifsFileInfo *cifs_file = container_of(work, + struct cifsFileInfo, put); + + cifsFileInfo_put_final(cifs_file); +} + /** * cifsFileInfo_put - release a reference of file priv data * @@ -382,15 +421,15 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file) */ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) { - _cifsFileInfo_put(cifs_file, true); + _cifsFileInfo_put(cifs_file, true, true); } /** * _cifsFileInfo_put - release a reference of file priv data * * This may involve closing the filehandle @cifs_file out on the - * server. Must be called without holding tcon->open_file_lock and - * cifs_file->file_info_lock. + * server. Must be called without holding tcon->open_file_lock, + * cinode->open_file_lock and cifs_file->file_info_lock. * * If @wait_for_oplock_handler is true and we are releasing the last * reference, wait for any running oplock break handler of the file @@ -398,7 +437,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) * oplock break handler, you need to pass false. * */ -void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) +void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, + bool wait_oplock_handler, bool offload) { struct inode *inode = d_inode(cifs_file->dentry); struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); @@ -406,7 +446,6 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) struct cifsInodeInfo *cifsi = CIFS_I(inode); struct super_block *sb = inode->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - struct cifsLockInfo *li, *tmp; struct cifs_fid fid; struct cifs_pending_open open; bool oplock_break_cancelled; @@ -467,24 +506,10 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) cifs_del_pending_open(&open); - /* - * Delete any outstanding lock records. We'll lose them when the file - * is closed anyway. - */ - cifs_down_write(&cifsi->lock_sem); - list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { - list_del(&li->llist); - cifs_del_lock_waiters(li); - kfree(li); - } - list_del(&cifs_file->llist->llist); - kfree(cifs_file->llist); - up_write(&cifsi->lock_sem); - - cifs_put_tlink(cifs_file->tlink); - dput(cifs_file->dentry); - cifs_sb_deactive(sb); - kfree(cifs_file); + if (offload) + queue_work(fileinfo_put_wq, &cifs_file->put); + else + cifsFileInfo_put_final(cifs_file); } int cifs_open(struct inode *inode, struct file *file) @@ -728,6 +753,13 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; + /* O_SYNC also has bit for O_DSYNC so following check picks up either */ + if (cfile->f_flags & O_SYNC) + create_options |= CREATE_WRITE_THROUGH; + + if (cfile->f_flags & O_DIRECT) + create_options |= CREATE_NO_BUFFER; + if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &cfile->fid); @@ -808,7 +840,7 @@ reopen_error_exit: int cifs_close(struct inode *inode, struct file *file) { if (file->private_data != NULL) { - cifsFileInfo_put(file->private_data); + _cifsFileInfo_put(file->private_data, true, false); file->private_data = NULL; } @@ -1141,7 +1173,8 @@ try_again: rc = posix_lock_file(file, flock, NULL); up_write(&cinode->lock_sem); if (rc == FILE_LOCK_DEFERRED) { - rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker); + rc = wait_event_interruptible(flock->fl_wait, + list_empty(&flock->fl_blocked_member)); if (!rc) goto try_again; locks_delete_block(flock); @@ -1884,7 +1917,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, /* Return -EBADF if no handle is found and general rc otherwise */ int -cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only, +cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, struct cifsFileInfo **ret_file) { struct cifsFileInfo *open_file, *inv_file = NULL; @@ -1892,7 +1925,8 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only, bool any_available = false; int rc = -EBADF; unsigned int refind = 0; - + bool fsuid_only = flags & FIND_WR_FSUID_ONLY; + bool with_delete = flags & FIND_WR_WITH_DELETE; *ret_file = NULL; /* @@ -1924,6 +1958,8 @@ refind_writable: continue; if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; + if (with_delete && !(open_file->fid.access & DELETE)) + continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { if (!open_file->invalidHandle) { /* found a good writable file */ @@ -1971,12 +2007,12 @@ refind_writable: } struct cifsFileInfo * -find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only) +find_writable_file(struct cifsInodeInfo *cifs_inode, int flags) { struct cifsFileInfo *cfile; int rc; - rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile); + rc = cifs_get_writable_file(cifs_inode, flags, &cfile); if (rc) cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc); @@ -1985,6 +2021,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only) int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, + int flags, struct cifsFileInfo **ret_file) { struct list_head *tmp; @@ -2011,7 +2048,7 @@ cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, kfree(full_path); cinode = CIFS_I(d_inode(cfile->dentry)); spin_unlock(&tcon->open_file_lock); - return cifs_get_writable_file(cinode, 0, ret_file); + return cifs_get_writable_file(cinode, flags, ret_file); } spin_unlock(&tcon->open_file_lock); @@ -2088,7 +2125,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) if (mapping->host->i_size - offset < (loff_t)to) to = (unsigned)(mapping->host->i_size - offset); - rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file); + rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY, + &open_file); if (!rc) { bytes_written = cifs_write(open_file, open_file->pid, write_data, to - from, &offset); @@ -2281,7 +2319,7 @@ retry: if (cfile) cifsFileInfo_put(cfile); - rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile); + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); /* in case of an error store it to return later */ if (rc) @@ -4680,7 +4718,7 @@ void cifs_oplock_break(struct work_struct *work) cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } - _cifsFileInfo_put(cfile, false /* do not wait for ourself */); + _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); cifs_done_oplock_break(cinode); } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index df9377828e2f..7c5e983fe385 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -163,7 +163,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) spin_lock(&inode->i_lock); /* we do not want atime to be less than mtime, it broke some apps */ - if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime)) + if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0) inode->i_atime = fattr->cf_mtime; else inode->i_atime = fattr->cf_atime; @@ -1586,7 +1586,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) struct TCP_Server_Info *server; char *full_path; - cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n", + cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n", mode, inode); cifs_sb = CIFS_SB(inode->i_sb); @@ -2011,6 +2011,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry) struct inode *inode = d_inode(dentry); struct super_block *sb = dentry->d_sb; char *full_path = NULL; + int count = 0; if (inode == NULL) return -ENOENT; @@ -2032,15 +2033,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry) full_path, inode, inode->i_count.counter, dentry, cifs_get_time(dentry), jiffies); +again: if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); else rc = cifs_get_inode_info(&inode, full_path, NULL, sb, xid, NULL); - + if (rc == -EAGAIN && count++ < 10) + goto again; out: kfree(full_path); free_xid(xid); + return rc; } @@ -2216,7 +2220,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, * writebehind data than the SMB timeout for the SetPathInfo * request would allow */ - open_file = find_writable_file(cifsInode, true); + open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); if (open_file) { tcon = tlink_tcon(open_file->tlink); server = tcon->ses->server; @@ -2366,7 +2370,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) args->ctime = NO_CHANGE_64; args->device = 0; - open_file = find_writable_file(cifsInode, true); + open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); if (open_file) { u16 nfid = open_file->fid.netfid; u32 npid = open_file->pid; @@ -2469,7 +2473,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) rc = 0; if (attrs->ia_valid & ATTR_MTIME) { - rc = cifs_get_writable_file(cifsInode, false, &wfile); + rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile); if (!rc) { tcon = tlink_tcon(wfile->tlink); rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 514810694c0f..195766221a7a 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -767,7 +767,7 @@ smb_set_file_info(struct inode *inode, const char *full_path, struct cifs_tcon *tcon; /* if the file is already open for write, just use that fileid */ - open_file = find_writable_file(cinode, true); + open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); if (open_file) { fid.netfid = open_file->fid.netfid; netpid = open_file->pid; diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 8b0b512c5792..afe1f03aabe3 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, goto out; - if (oparms->tcon->use_resilient) { + if (oparms->tcon->use_resilient) { /* default timeout is 0, servers pick default (120 seconds) */ nr_ioctl_req.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 4121ac1163ca..f2a6f7f28340 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -525,7 +525,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name, cifs_i = CIFS_I(inode); dosattrs = cifs_i->cifsAttrs | ATTR_READONLY; data.Attributes = cpu_to_le32(dosattrs); - cifs_get_writable_path(tcon, name, &cfile); + cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile); tmprc = smb2_compound_op(xid, tcon, cifs_sb, name, FILE_WRITE_ATTRIBUTES, FILE_CREATE, CREATE_NOT_FILE, ACL_NO_MODE, @@ -581,7 +581,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon, { struct cifsFileInfo *cfile; - cifs_get_writable_path(tcon, from_name, &cfile); + cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile); return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, DELETE, SMB2_OP_RENAME, cfile); diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index e311f58dc1c8..14265b4bbcc0 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -673,10 +673,10 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifs_ses, smb_ses_list); + list_for_each(tmp1, &ses->tcon_list) { tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); - cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); spin_lock(&tcon->open_file_lock); list_for_each(tmp2, &tcon->openFileList) { cfile = list_entry(tmp2, struct cifsFileInfo, @@ -688,6 +688,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) continue; cifs_dbg(FYI, "file id match, oplock break\n"); + cifs_stats_inc( + &tcon->stats.cifs_stats.num_oplock_brks); cinode = CIFS_I(d_inode(cfile->dentry)); spin_lock(&cfile->file_info_lock); if (!CIFS_CACHE_WRITE(cinode) && @@ -720,9 +722,6 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) return true; } spin_unlock(&tcon->open_file_lock); - spin_unlock(&cifs_tcp_ses_lock); - cifs_dbg(FYI, "No matching file for oplock break\n"); - return true; } } spin_unlock(&cifs_tcp_ses_lock); @@ -744,36 +743,67 @@ smb2_cancelled_close_fid(struct work_struct *work) kfree(cancelled); } +/* Caller should already has an extra reference to @tcon */ +static int +__smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, + __u64 volatile_fid) +{ + struct close_cancelled_open *cancelled; + + cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC); + if (!cancelled) + return -ENOMEM; + + cancelled->fid.persistent_fid = persistent_fid; + cancelled->fid.volatile_fid = volatile_fid; + cancelled->tcon = tcon; + INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); + WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false); + + return 0; +} + +int +smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, + __u64 volatile_fid) +{ + int rc; + + cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); + spin_lock(&cifs_tcp_ses_lock); + tcon->tc_count++; + spin_unlock(&cifs_tcp_ses_lock); + + rc = __smb2_handle_cancelled_close(tcon, persistent_fid, volatile_fid); + if (rc) + cifs_put_tcon(tcon); + + return rc; +} + int smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server) { struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer; struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer; struct cifs_tcon *tcon; - struct close_cancelled_open *cancelled; + int rc; if (sync_hdr->Command != SMB2_CREATE || sync_hdr->Status != STATUS_SUCCESS) return 0; - cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL); - if (!cancelled) - return -ENOMEM; - tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId, sync_hdr->TreeId); - if (!tcon) { - kfree(cancelled); + if (!tcon) return -ENOENT; - } - cancelled->fid.persistent_fid = rsp->PersistentFileId; - cancelled->fid.volatile_fid = rsp->VolatileFileId; - cancelled->tcon = tcon; - INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); - queue_work(cifsiod_wq, &cancelled->work); + rc = __smb2_handle_cancelled_close(tcon, rsp->PersistentFileId, + rsp->VolatileFileId); + if (rc) + cifs_put_tcon(tcon); - return 0; + return rc; } /** diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index cd55af9b7cc5..76eacffb24d8 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -151,13 +151,7 @@ smb2_get_credits_field(struct TCP_Server_Info *server, const int optype) static unsigned int smb2_get_credits(struct mid_q_entry *mid) { - struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf; - - if (mid->mid_state == MID_RESPONSE_RECEIVED - || mid->mid_state == MID_RESPONSE_MALFORMED) - return le16_to_cpu(shdr->CreditRequest); - - return 0; + return mid->credits_received; } static int @@ -1093,7 +1087,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, void *data[1]; struct smb2_file_full_ea_info *ea = NULL; struct kvec close_iov[1]; - int rc; + struct smb2_query_info_rsp *rsp; + int rc, used_len = 0; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -1116,6 +1111,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, cifs_sb); if (rc == -ENODATA) goto sea_exit; + } else { + /* If we are adding a attribute we should first check + * if there will be enough space available to store + * the new EA. If not we should not add it since we + * would not be able to even read the EAs back. + */ + rc = smb2_query_info_compound(xid, tcon, utf16_path, + FILE_READ_EA, + FILE_FULL_EA_INFORMATION, + SMB2_O_INFO_FILE, + CIFSMaxBufSize - + MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE, + &rsp_iov[1], &resp_buftype[1], cifs_sb); + if (rc == 0) { + rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; + used_len = le32_to_cpu(rsp->OutputBufferLength); + } + free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); + resp_buftype[1] = CIFS_NO_BUFFER; + memset(&rsp_iov[1], 0, sizeof(rsp_iov[1])); + rc = 0; + + /* Use a fudge factor of 256 bytes in case we collide + * with a different set_EAs command. + */ + if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 < + used_len + ea_name_len + ea_value_len + 1) { + rc = -ENOSPC; + goto sea_exit; + } } } @@ -1311,6 +1338,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) cfile->fid.persistent_fid = fid->persistent_fid; cfile->fid.volatile_fid = fid->volatile_fid; + cfile->fid.access = fid->access; #ifdef CONFIG_CIFS_DEBUG2 cfile->fid.mid = fid->mid; #endif /* CIFS_DEBUG2 */ @@ -1467,7 +1495,9 @@ smb2_ioctl_query_info(const unsigned int xid, COMPOUND_FID, COMPOUND_FID, qi.info_type, true, buffer, qi.output_buffer_length, - CIFSMaxBufSize); + CIFSMaxBufSize - + MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE); } } else if (qi.flags == PASSTHRU_SET_INFO) { /* Can eventually relax perm check since server enforces too */ @@ -2640,7 +2670,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid, fid.volatile_fid, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, CIFSMaxBufSize); + true /* is_fctl */, NULL, 0, + CIFSMaxBufSize - + MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE); if (rc) goto querty_exit; @@ -3130,7 +3163,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs * some servers (Windows2016) will not reflect recent writes in * QUERY_ALLOCATED_RANGES until SMB2_flush is called. */ - wrcfile = find_writable_file(cifsi, false); + wrcfile = find_writable_file(cifsi, FIND_WR_ANY); if (wrcfile) { filemap_write_and_wait(inode->i_mapping); smb2_flush_file(xid, tcon, &wrcfile->fid); @@ -3219,7 +3252,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon, if (rc) goto out; - if (out_data_len < sizeof(struct file_allocated_range_buffer)) { + if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) { rc = -EINVAL; goto out; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 05149862aea4..c8f304cae3f3 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) if (tcon == NULL) return 0; - if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) + if (smb2_command == SMB2_TREE_CONNECT) return 0; if (tcon->tidStatus == CifsExiting) { @@ -312,7 +312,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) if (server->tcpStatus != CifsNeedReconnect) break; - if (--retries) + if (retries && --retries) continue; /* @@ -350,9 +350,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) } rc = cifs_negotiate_protocol(0, tcon->ses); - if (!rc && tcon->ses->need_reconnect) + if (!rc && tcon->ses->need_reconnect) { rc = cifs_setup_session(0, tcon->ses, nls_codepage); - + if ((rc == -EACCES) && !tcon->retry) { + rc = -EHOSTDOWN; + mutex_unlock(&tcon->ses->session_mutex); + goto failed; + } + } if (rc || !tcon->need_reconnect) { mutex_unlock(&tcon->ses->session_mutex); goto out; @@ -397,6 +402,7 @@ out: case SMB2_SET_INFO: rc = -EAGAIN; } +failed: unload_nls(nls_codepage); return rc; } @@ -426,16 +432,9 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, * SMB information in the SMB header. If the return code is zero, this * function must have filled in request_buf pointer. */ -static int -smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, - void **request_buf, unsigned int *total_len) +static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, + void **request_buf, unsigned int *total_len) { - int rc; - - rc = smb2_reconnect(smb2_command, tcon); - if (rc) - return rc; - /* BB eventually switch this to SMB2 specific small buf size */ if (smb2_command == SMB2_SET_INFO) *request_buf = cifs_buf_get(); @@ -456,7 +455,31 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, cifs_stats_inc(&tcon->num_smbs_sent); } - return rc; + return 0; +} + +static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, + void **request_buf, unsigned int *total_len) +{ + int rc; + + rc = smb2_reconnect(smb2_command, tcon); + if (rc) + return rc; + + return __smb2_plain_req_init(smb2_command, tcon, request_buf, + total_len); +} + +static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, + void **request_buf, unsigned int *total_len) +{ + /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */ + if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) { + return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, + total_len); + } + return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len); } /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */ @@ -2627,6 +2650,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, atomic_inc(&tcon->num_remote_opens); oparms->fid->persistent_fid = rsp->PersistentFileId; oparms->fid->volatile_fid = rsp->VolatileFileId; + oparms->fid->access = oparms->desired_access; #ifdef CONFIG_CIFS_DEBUG2 oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId); #endif /* CIFS_DEBUG2 */ @@ -2661,7 +2685,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, int rc; char *in_data_buf; - rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len); + rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len); if (rc) return rc; @@ -2972,7 +2996,21 @@ int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { - return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0); + int rc; + int tmp_rc; + + rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0); + + /* retry close in a worker thread if this one is interrupted */ + if (rc == -EINTR) { + tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid, + volatile_fid); + if (tmp_rc) + cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n", + persistent_fid, tmp_rc); + } + + return rc; } int @@ -3880,6 +3918,9 @@ smb2_writev_callback(struct mid_q_entry *mid) wdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes, wdata->result); + if (wdata->result == -ENOSPC) + printk_once(KERN_WARNING "Out of space writing to %s\n", + tcon->treeName); } else trace_smb3_write_done(0 /* no xid */, wdata->cfile->fid.persistent_fid, diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 71b2930b8e0b..2a12a2fa38a2 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -212,6 +212,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, const u64 persistent_fid, const u64 volatile_fid, const __u8 oplock_level); +extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon, + __u64 persistent_fid, + __u64 volatile_fid); extern int smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server); void smb2_cancelled_close_fid(struct work_struct *work); diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 148d7942c796..805652969065 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -599,6 +599,8 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr, * The default is for the mid to be synchronous, so the * default callback just wakes up the current task. */ + get_task_struct(current); + temp->creator = current; temp->callback = cifs_wake_up_task; temp->callback_data = current; diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 3c91fa97c9a8..5b1b97e9e0c9 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -1069,7 +1069,7 @@ static int smbd_post_send_data( if (n_vec > SMBDIRECT_MAX_SGE) { cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec); - return -ENOMEM; + return -EINVAL; } sg_init_table(sgl, n_vec); @@ -1476,6 +1476,7 @@ void smbd_destroy(struct TCP_Server_Info *server) info->transport_status = SMBD_DESTROYED; destroy_workqueue(info->workqueue); + log_rdma_event(INFO, "rdma session destroyed\n"); kfree(info); } @@ -1505,8 +1506,9 @@ create_conn: log_rdma_event(INFO, "creating rdma session\n"); server->smbd_conn = smbd_get_connection( server, (struct sockaddr *) &server->dstaddr); - log_rdma_event(INFO, "created rdma session info=%p\n", - server->smbd_conn); + + if (server->smbd_conn) + cifs_dbg(VFS, "RDMA transport re-established\n"); return server->smbd_conn ? 0 : -ENOENT; } @@ -1970,7 +1972,7 @@ read_rfc1002_done: if (info->transport_status != SMBD_CONNECTED) { log_read(ERR, "disconnected\n"); - return 0; + return -ECONNABORTED; } goto again; @@ -2269,12 +2271,7 @@ static void smbd_mr_recovery_work(struct work_struct *work) int rc; list_for_each_entry(smbdirect_mr, &info->mr_list, list) { - if (smbdirect_mr->state == MR_INVALIDATED) - ib_dma_unmap_sg( - info->id->device, smbdirect_mr->sgl, - smbdirect_mr->sgl_count, - smbdirect_mr->dir); - else if (smbdirect_mr->state == MR_ERROR) { + if (smbdirect_mr->state == MR_ERROR) { /* recover this MR entry */ rc = ib_dereg_mr(smbdirect_mr->mr); @@ -2602,11 +2599,20 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) */ smbdirect_mr->state = MR_INVALIDATED; - /* - * Schedule the work to do MR recovery for future I/Os - * MR recovery is slow and we don't want it to block the current I/O - */ - queue_work(info->workqueue, &info->mr_recovery_work); + if (smbdirect_mr->state == MR_INVALIDATED) { + ib_dma_unmap_sg( + info->id->device, smbdirect_mr->sgl, + smbdirect_mr->sgl_count, + smbdirect_mr->dir); + smbdirect_mr->state = MR_READY; + if (atomic_inc_return(&info->mr_ready_count) == 1) + wake_up_interruptible(&info->wait_mr); + } else + /* + * Schedule the work to do MR recovery for future I/Os MR + * recovery is slow and don't want it to block current I/O + */ + queue_work(info->workqueue, &info->mr_recovery_work); done: if (atomic_dec_and_test(&info->mr_used_count)) diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index ca3de62688d6..e67a43fd037c 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -76,6 +76,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) * The default is for the mid to be synchronous, so the * default callback just wakes up the current task. */ + get_task_struct(current); + temp->creator = current; temp->callback = cifs_wake_up_task; temp->callback_data = current; @@ -93,8 +95,14 @@ static void _cifs_mid_q_entry_release(struct kref *refcount) __u16 smb_cmd = le16_to_cpu(midEntry->command); unsigned long now; unsigned long roundtrip_time; - struct TCP_Server_Info *server = midEntry->server; #endif + struct TCP_Server_Info *server = midEntry->server; + + if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) && + midEntry->mid_state == MID_RESPONSE_RECEIVED && + server->ops->handle_cancelled_mid) + server->ops->handle_cancelled_mid(midEntry->resp_buf, server); + midEntry->mid_state = MID_FREE; atomic_dec(&midCount); if (midEntry->large_buf) @@ -152,6 +160,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount) } } #endif + put_task_struct(midEntry->creator); mempool_free(midEntry, cifs_mid_poolp); } @@ -319,8 +328,11 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, int val = 1; __be32 rfc1002_marker; - if (cifs_rdma_enabled(server) && server->smbd_conn) { - rc = smbd_send(server, num_rqst, rqst); + if (cifs_rdma_enabled(server)) { + /* return -EAGAIN when connecting or reconnecting */ + rc = -EAGAIN; + if (server->smbd_conn) + rc = smbd_send(server, num_rqst, rqst); goto smbd_done; } @@ -1119,8 +1131,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, midQ[i]->mid, le16_to_cpu(midQ[i]->command)); send_cancel(server, &rqst[i], midQ[i]); spin_lock(&GlobalMid_Lock); + midQ[i]->mid_flags |= MID_WAIT_CANCELLED; if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { - midQ[i]->mid_flags |= MID_WAIT_CANCELLED; midQ[i]->callback = cifs_cancelled_callback; cancelled_mid[i] = true; credits[i].value = 0; diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index a7ec2d3dff92..e0226b2138d6 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -1032,10 +1032,11 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, #endif case FICLONE: + goto do_ioctl; case FICLONERANGE: case FIDEDUPERANGE: case FS_IOC_FIEMAP: - goto do_ioctl; + goto found_handler; case FIBMAP: case FIGETBSZ: diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 680aba9c00d5..fd0b5dd68f9e 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -76,14 +76,11 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr) if (ia_valid & ATTR_GID) sd_iattr->ia_gid = iattr->ia_gid; if (ia_valid & ATTR_ATIME) - sd_iattr->ia_atime = timestamp_truncate(iattr->ia_atime, - inode); + sd_iattr->ia_atime = iattr->ia_atime; if (ia_valid & ATTR_MTIME) - sd_iattr->ia_mtime = timestamp_truncate(iattr->ia_mtime, - inode); + sd_iattr->ia_mtime = iattr->ia_mtime; if (ia_valid & ATTR_CTIME) - sd_iattr->ia_ctime = timestamp_truncate(iattr->ia_ctime, - inode); + sd_iattr->ia_ctime = iattr->ia_ctime; if (ia_valid & ATTR_MODE) { umode_t mode = iattr->ia_mode; diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index c34fa7c61b43..4ee65b2b6247 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -664,9 +664,6 @@ static int check_for_busy_inodes(struct super_block *sb, struct list_head *pos; size_t busy_count = 0; unsigned long ino; - struct dentry *dentry; - char _path[256]; - char *path = NULL; spin_lock(&mk->mk_decrypted_inodes_lock); @@ -685,22 +682,14 @@ static int check_for_busy_inodes(struct super_block *sb, struct fscrypt_info, ci_master_key_link)->ci_inode; ino = inode->i_ino; - dentry = d_find_alias(inode); } spin_unlock(&mk->mk_decrypted_inodes_lock); - if (dentry) { - path = dentry_path(dentry, _path, sizeof(_path)); - dput(dentry); - } - if (IS_ERR_OR_NULL(path)) - path = "(unknown)"; - fscrypt_warn(NULL, - "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu (%s)", + "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu", sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, - ino, path); + ino); return -EBUSY; } diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index d71c2d6dd162..75898340eb46 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -578,6 +578,15 @@ int fscrypt_drop_inode(struct inode *inode) return 0; mk = ci->ci_master_key->payload.data[0]; + /* + * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes + * protected by the key were cleaned by sync_filesystem(). But if + * userspace is still using the files, inodes can be dirtied between + * then and now. We mustn't lose any writes, so skip dirty inodes here. + */ + if (inode->i_state & I_DIRTY_ALL) + return 0; + /* * Note: since we aren't holding ->mk_secret_sem, the result here can * immediately become outdated. But there's no correctness problem with diff --git a/fs/dax.c b/fs/dax.c index 2cc43cd914eb..cc56313c6b3b 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1207,6 +1207,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, lockdep_assert_held(&inode->i_rwsem); } + if (iocb->ki_flags & IOCB_NOWAIT) + flags |= IOMAP_NOWAIT; + while (iov_iter_count(iter)) { ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, iter, dax_iomap_actor); diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 87846aad594b..8fd45eb89424 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -142,18 +142,21 @@ EXPORT_SYMBOL_GPL(debugfs_file_put); * We also need to exclude any file that has ways to write or alter it as root * can bypass the permissions check. */ -static bool debugfs_is_locked_down(struct inode *inode, - struct file *filp, - const struct file_operations *real_fops) +static int debugfs_locked_down(struct inode *inode, + struct file *filp, + const struct file_operations *real_fops) { if ((inode->i_mode & 07777) == 0444 && !(filp->f_mode & FMODE_WRITE) && !real_fops->unlocked_ioctl && !real_fops->compat_ioctl && !real_fops->mmap) - return false; + return 0; - return security_locked_down(LOCKDOWN_DEBUGFS); + if (security_locked_down(LOCKDOWN_DEBUGFS)) + return -EPERM; + + return 0; } static int open_proxy_open(struct inode *inode, struct file *filp) @@ -168,7 +171,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp) real_fops = debugfs_real_fops(filp); - r = debugfs_is_locked_down(inode, filp, real_fops); + r = debugfs_locked_down(inode, filp, real_fops); if (r) goto out; @@ -298,7 +301,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp) real_fops = debugfs_real_fops(filp); - r = debugfs_is_locked_down(inode, filp, real_fops); + r = debugfs_locked_down(inode, filp, real_fops); if (r) goto out; diff --git a/fs/drop_caches.c b/fs/drop_caches.c index d31b6c72b476..dc1a1d5d825b 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); - cond_resched(); invalidate_mapping_pages(inode->i_mapping, 0, -1); iput(toput_inode); toput_inode = inode; + cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index f91db24bbf3b..a064b408d841 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -311,8 +311,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, struct extent_crypt_result ecr; int rc = 0; - BUG_ON(!crypt_stat || !crypt_stat->tfm - || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)); + if (!crypt_stat || !crypt_stat->tfm + || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) + return -EINVAL; + if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n", crypt_stat->key_size); diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 216fbe6a4837..4dc09638de8f 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1304,7 +1304,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat, printk(KERN_WARNING "Tag 1 packet contains key larger " "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n"); rc = -EINVAL; - goto out; + goto out_free; } memcpy((*new_auth_tok)->session_key.encrypted_key, &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2))); diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index d668e60b85b5..c05ca39aa449 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -379,6 +379,7 @@ int __init ecryptfs_init_messaging(void) * ecryptfs_message_buf_len), GFP_KERNEL); if (!ecryptfs_msg_ctx_arr) { + kfree(ecryptfs_daemon_hash); rc = -ENOMEM; goto out; } diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 19f89f9fb10c..23b74b8e8f96 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq, } src = kmap_atomic(*rq->in); - if (!rq->out[0]) { - dst = NULL; - } else { + if (rq->out[0]) { dst = kmap_atomic(rq->out[0]); memcpy(dst + rq->pageofs_out, src, righthalf); + kunmap_atomic(dst); } - if (rq->out[1] == *rq->in) { - memmove(src, src + righthalf, rq->pageofs_out); - } else if (nrpages_out == 2) { - if (dst) - kunmap_atomic(dst); + if (nrpages_out == 2) { DBG_BUGON(!rq->out[1]); - dst = kmap_atomic(rq->out[1]); - memcpy(dst, src + righthalf, rq->pageofs_out); + if (rq->out[1] == *rq->in) { + memmove(src, src + righthalf, rq->pageofs_out); + } else { + dst = kmap_atomic(rq->out[1]); + memcpy(dst, src + righthalf, rq->pageofs_out); + kunmap_atomic(dst); + } } - if (dst) - kunmap_atomic(dst); kunmap_atomic(src); return 0; } diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index a13a78725c57..b766c3ee5fa8 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -649,6 +649,8 @@ ssize_t erofs_listxattr(struct dentry *dentry, struct listxattr_iter it; ret = init_inode_xattrs(d_inode(dentry)); + if (ret == -ENOATTR) + return 0; if (ret) return ret; diff --git a/fs/eventfd.c b/fs/eventfd.c index 8aa0ea8c55e8..78e41c7c3d05 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -24,6 +24,8 @@ #include #include +DEFINE_PER_CPU(int, eventfd_wake_count); + static DEFINE_IDA(eventfd_ida); struct eventfd_ctx { @@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) { unsigned long flags; + /* + * Deadlock or stack overflow issues can happen if we recurse here + * through waitqueue wakeup handlers. If the caller users potentially + * nested waitqueues with custom wakeup handlers, then it should + * check eventfd_signal_count() before calling this function. If + * it returns true, the eventfd_signal() call should be deferred to a + * safe context. + */ + if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count))) + return 0; + spin_lock_irqsave(&ctx->wqh.lock, flags); + this_cpu_inc(eventfd_wake_count); if (ULLONG_MAX - ctx->count < n) n = ULLONG_MAX - ctx->count; ctx->count += n; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLIN); + this_cpu_dec(eventfd_wake_count); spin_unlock_irqrestore(&ctx->wqh.lock, flags); return n; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index c4159bcc05d9..84a6b61b9829 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1881,9 +1881,9 @@ fetch_events: waiter = true; init_waitqueue_entry(&wait, current); - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); __add_wait_queue_exclusive(&ep->wq, &wait); - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); } for (;;) { @@ -1931,9 +1931,9 @@ send_events: goto fetch_events; if (waiter) { - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); __remove_wait_queue(&ep->wq, &wait); - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); } return res; @@ -2252,6 +2252,9 @@ error_return: return error; } +/* limit minimal timeout to 1 HZ */ +int sysctl_min_epoll_wait_time = 1000/HZ; + /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_wait(2). @@ -2284,6 +2287,10 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events, if (!is_file_epoll(f.file)) goto error_fput; + if (timeout > 0 && sysctl_min_epoll_wait_time > 0 && + timeout < sysctl_min_epoll_wait_time) + timeout = sysctl_min_epoll_wait_time; + /* * At this point it is safe to assume that the "private_data" contains * our own data structure. diff --git a/fs/exec.c b/fs/exec.c index 555e93c7dec8..0caab9269cff 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -78,6 +78,24 @@ int suid_dumpable = 0; static LIST_HEAD(formats); static DEFINE_RWLOCK(binfmt_lock); +static DEFINE_PER_CPU(long, execve_count) = { 0 }; + +extern int hook_info_flag; +extern int execve_info_flag; + +long nr_execve_count(void) +{ + int cpu; + long total = 0; + + for_each_possible_cpu(cpu) + { + total += per_cpu(execve_count, cpu); + } + return total; +} +EXPORT_SYMBOL(nr_execve_count); + void __register_binfmt(struct linux_binfmt * fmt, int insert) { BUG_ON(!fmt); @@ -492,6 +510,18 @@ static int prepare_arg_pages(struct linux_binprm *bprm, return 0; } +typedef void (*GET_EXECVE_INFO)(int argc, struct user_arg_ptr argv, const char *filename); +static GET_EXECVE_INFO get_execve_info_func; + +int set_get_execve_info_func(GET_EXECVE_INFO func_addr) +{ + if (hook_info_flag != 0 || nr_execve_count() != 0) + return -EINVAL; + get_execve_info_func = func_addr; + return 0; +} +EXPORT_SYMBOL(set_get_execve_info_func); + /* * 'copy_strings()' copies argument/environment strings from the old * processes's memory to the new process's stack. The call to get_user_pages() @@ -1015,7 +1045,7 @@ static int exec_mmap(struct mm_struct *mm) /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; - mm_release(tsk, old_mm); + exec_mm_release(tsk, old_mm); if (old_mm) { sync_mm_rss(old_mm); @@ -1800,6 +1830,13 @@ static int __do_execve_file(int fd, struct filename *filename, if (retval < 0) goto out; + if (execve_info_flag && hook_info_flag) { + __this_cpu_inc(execve_count); + if (get_execve_info_func != NULL) + get_execve_info_func(bprm->argc, argv, filename->name); + __this_cpu_dec(execve_count); + } + retval = prepare_binprm(bprm); if (retval < 0) goto out; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 7004ce581a32..a16c53655e77 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -701,10 +701,13 @@ static int ext2_get_blocks(struct inode *inode, if (!partial) { count++; mutex_unlock(&ei->truncate_mutex); - if (err) - goto cleanup; goto got_it; } + + if (err) { + mutex_unlock(&ei->truncate_mutex); + goto cleanup; + } } /* diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 30c630d73f0f..065cd2d1bdc6 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -1082,9 +1082,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) if (EXT2_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext2; - sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - - le32_to_cpu(es->s_first_data_block) - 1) - / EXT2_BLOCKS_PER_GROUP(sb)) + 1; + sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - + le32_to_cpu(es->s_first_data_block) - 1) + / EXT2_BLOCKS_PER_GROUP(sb)) + 1; db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / EXT2_DESC_PER_BLOCK(sb); sbi->s_group_desc = kmalloc_array (db_count, diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 0b202e00d93f..5aba67a504cf 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ext4_group_t ngroups = ext4_get_groups_count(sb); struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); + struct buffer_head *bh_p; if (block_group >= ngroups) { ext4_error(sb, "block_group >= groups_count - block_group = %u," @@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); - if (!sbi->s_group_desc[group_desc]) { + bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc); + /* + * sbi_array_rcu_deref returns with rcu unlocked, this is ok since + * the pointer being dereferenced won't be dereferenced again. By + * looking at the usage in add_new_gdb() the value isn't modified, + * just the pointer, and so it remains valid. + */ + if (!bh_p) { ext4_error(sb, "Group descriptor not loaded - " "block_group = %u, group_desc = %u, desc = %u", block_group, group_desc, offset); @@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, } desc = (struct ext4_group_desc *)( - (__u8 *)sbi->s_group_desc[group_desc]->b_data + + (__u8 *)bh_p->b_data + offset * EXT4_DESC_SIZE(sb)); if (bh) - *bh = sbi->s_group_desc[group_desc]; + *bh = bh_p; return desc; } diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index d4d4fdfac1a6..ff8e1205127e 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -203,6 +203,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb, return PTR_ERR(inode); num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; while (i < num) { + cond_resched(); map.m_lblk = i; map.m_len = num - i; n = ext4_map_blocks(NULL, inode, &map, 0); diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 9fdd2b269d61..2743c6f8a457 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -81,6 +81,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, error_msg = "rec_len is too small for name_len"; else if (unlikely(((char *) de - buf) + rlen > size)) error_msg = "directory entry overrun"; + else if (unlikely(((char *) de - buf) + rlen > + size - EXT4_DIR_REC_LEN(1) && + ((char *) de - buf) + rlen != size)) { + error_msg = "directory entry too close to block end"; + } else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; @@ -125,12 +130,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) if (err != ERR_BAD_DX_DIR) { return err; } - /* - * We don't set the inode dirty flag since it's not - * critical that it get flushed back to the disk. - */ - ext4_clear_inode_flag(file_inode(file), - EXT4_INODE_INDEX); + /* Can we just clear INDEX flag to ignore htree information? */ + if (!ext4_has_metadata_csum(sb)) { + /* + * We don't set the inode dirty flag since it's not + * critical that it gets flushed back to the disk. + */ + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); + } } if (ext4_has_inline_data(inode)) { @@ -668,9 +675,11 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct qstr qstr = {.name = str, .len = len }; - struct inode *inode = dentry->d_parent->d_inode; + const struct dentry *parent = READ_ONCE(dentry->d_parent); + const struct inode *inode = READ_ONCE(parent->d_inode); - if (!IS_CASEFOLDED(inode) || !EXT4_SB(inode->i_sb)->s_encoding) { + if (!inode || !IS_CASEFOLDED(inode) || + !EXT4_SB(inode->i_sb)->s_encoding) { if (len != name->len) return -1; return memcmp(str, name->name, len); @@ -683,10 +692,11 @@ static int ext4_d_hash(const struct dentry *dentry, struct qstr *str) { const struct ext4_sb_info *sbi = EXT4_SB(dentry->d_sb); const struct unicode_map *um = sbi->s_encoding; + const struct inode *inode = READ_ONCE(dentry->d_inode); unsigned char *norm; int len, ret = 0; - if (!IS_CASEFOLDED(dentry->d_inode) || !um) + if (!inode || !IS_CASEFOLDED(inode) || !um) return 0; norm = kmalloc(PATH_MAX, GFP_ATOMIC); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 03db3e71676c..69a7535b7982 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1028,6 +1028,7 @@ struct ext4_inode_info { struct timespec64 i_crtime; /* mballoc */ + atomic_t i_prealloc_active; struct list_head i_prealloc_list; spinlock_t i_prealloc_lock; @@ -1396,7 +1397,7 @@ struct ext4_sb_info { loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */ struct buffer_head * s_sbh; /* Buffer containing the super block */ struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */ - struct buffer_head **s_group_desc; + struct buffer_head * __rcu *s_group_desc; unsigned int s_mount_opt; unsigned int s_mount_opt2; unsigned int s_mount_flags; @@ -1458,7 +1459,7 @@ struct ext4_sb_info { #endif /* for buddy allocator */ - struct ext4_group_info ***s_group_info; + struct ext4_group_info ** __rcu *s_group_info; struct inode *s_buddy_cache; spinlock_t s_md_lock; unsigned short *s_mb_offsets; @@ -1476,6 +1477,7 @@ struct ext4_sb_info { unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; + unsigned int s_mb_max_inode_prealloc; unsigned int s_max_dir_size_kb; /* where last allocation was done - for stream allocation */ unsigned long s_mb_last_group; @@ -1508,7 +1510,7 @@ struct ext4_sb_info { unsigned int s_extent_max_zeroout_kb; unsigned int s_log_groups_per_flex; - struct flex_groups *s_flex_groups; + struct flex_groups * __rcu *s_flex_groups; ext4_group_t s_flex_groups_allocated; /* workqueue for reserved extent conversions (buffered io) */ @@ -1548,8 +1550,11 @@ struct ext4_sb_info { struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; - /* Barrier between changing inodes' journal flags and writepages ops. */ - struct percpu_rw_semaphore s_journal_flag_rwsem; + /* + * Barrier between writepages ops and changing any inode's JOURNAL_DATA + * or EXTENTS flag. + */ + struct percpu_rw_semaphore s_writepages_rwsem; struct dax_device *s_daxdev; }; @@ -1569,6 +1574,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); } +/* + * Returns: sbi->field[index] + * Used to access an array element from the following sbi fields which require + * rcu protection to avoid dereferencing an invalid pointer due to reassignment + * - s_group_desc + * - s_group_info + * - s_flex_group + */ +#define sbi_array_rcu_deref(sbi, field, index) \ +({ \ + typeof(*((sbi)->field)) _v; \ + rcu_read_lock(); \ + _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \ + rcu_read_unlock(); \ + _v; \ +}) + /* * Inode dynamic state flags */ @@ -1581,6 +1603,8 @@ enum { EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ EXT4_STATE_NEWENTRY, /* File just added to dir */ + EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read + nolocking */ EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */ EXT4_STATE_EXT_PRECACHED, /* extents have been precached */ EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */ @@ -2476,8 +2500,11 @@ void ext4_insert_dentry(struct inode *inode, struct ext4_filename *fname); static inline void ext4_update_dx_flag(struct inode *inode) { - if (!ext4_has_feature_dir_index(inode->i_sb)) + if (!ext4_has_feature_dir_index(inode->i_sb)) { + /* ext4_iget() should have caught this... */ + WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb)); ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); + } } static const unsigned char ext4_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK @@ -2535,7 +2562,7 @@ extern int ext4_mb_release(struct super_block *); extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, struct ext4_allocation_request *, int *); extern int ext4_mb_reserve_blocks(struct super_block *, int); -extern void ext4_discard_preallocations(struct inode *); +extern void ext4_discard_preallocations(struct inode *, unsigned int); extern int __init ext4_init_mballoc(void); extern void ext4_exit_mballoc(void); extern void ext4_free_blocks(handle_t *handle, struct inode *inode, @@ -2663,6 +2690,7 @@ extern int ext4_generic_delete_entry(handle_t *handle, extern bool ext4_empty_dir(struct inode *inode); /* resize.c */ +extern void ext4_kvfree_array_rcu(void *to_free); extern int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input); extern int ext4_group_extend(struct super_block *sb, @@ -2910,13 +2938,13 @@ static inline struct ext4_group_info *ext4_get_group_info(struct super_block *sb, ext4_group_t group) { - struct ext4_group_info ***grp_info; + struct ext4_group_info **grp_info; long indexv, indexh; BUG_ON(group >= EXT4_SB(sb)->s_groups_count); - grp_info = EXT4_SB(sb)->s_group_info; indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); - return grp_info[indexv][indexh]; + grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); + return grp_info[indexh]; } /* @@ -2966,7 +2994,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize) !inode_is_locked(inode)); down_write(&EXT4_I(inode)->i_data_sem); if (newsize > EXT4_I(inode)->i_disksize) - EXT4_I(inode)->i_disksize = newsize; + WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize); up_write(&EXT4_I(inode)->i_data_sem); } @@ -3346,6 +3374,21 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); } +/* + * Disable DIO read nolock optimization, so new dioreaders will be forced + * to grab i_mutex + */ +static inline void ext4_inode_block_unlocked_dio(struct inode *inode) +{ + ext4_set_inode_state(inode, EXT4_STATE_DIOREAD_LOCK); + smp_mb(); +} +static inline void ext4_inode_resume_unlocked_dio(struct inode *inode) +{ + smp_mb(); + ext4_clear_inode_state(inode, EXT4_STATE_DIOREAD_LOCK); +} + #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) /* For ioend & aio unwritten conversion wait queues */ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index fb0f99dc8c22..6208b36a2256 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4492,7 +4492,7 @@ got_allocated_blocks: /* free data blocks we just allocated */ /* not a good idea to call discard here directly, * but otherwise we'd need to call it every free() */ - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ext4_free_blocks(handle, inode, NULL, newblock, EXT4_C2B(sbi, allocated_clusters), fb_flags); goto out2; @@ -4757,6 +4757,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, flags |= EXT4_GET_BLOCKS_KEEP_SIZE; /* Wait all existing dio workers, newcomers will block on i_mutex */ + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* Preallocate the range including the unaligned edges */ @@ -4767,7 +4768,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, round_down(offset, 1 << blkbits)) >> blkbits, new_size, flags); if (ret) - goto out_mutex; + goto out_dio; } @@ -4785,13 +4786,13 @@ static long ext4_zero_range(struct file *file, loff_t offset, ret = ext4_break_layouts(inode); if (ret) { up_write(&EXT4_I(inode)->i_mmap_sem); - goto out_mutex; + goto out_dio; } ret = ext4_update_disksize_before_punch(inode, offset, len); if (ret) { up_write(&EXT4_I(inode)->i_mmap_sem); - goto out_mutex; + goto out_dio; } /* Now release the pages and zero block aligned part of pages */ truncate_pagecache_range(inode, start, end - 1); @@ -4801,10 +4802,10 @@ static long ext4_zero_range(struct file *file, loff_t offset, flags); up_write(&EXT4_I(inode)->i_mmap_sem); if (ret) - goto out_mutex; + goto out_dio; } if (!partial_begin && !partial_end) - goto out_mutex; + goto out_dio; /* * In worst case we have to writeout two nonadjacent unwritten @@ -4817,7 +4818,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(inode->i_sb, ret); - goto out_mutex; + goto out_dio; } inode->i_mtime = inode->i_ctime = current_time(inode); @@ -4842,6 +4843,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, ext4_handle_sync(handle); ext4_journal_stop(handle); +out_dio: + ext4_inode_resume_unlocked_dio(inode); out_mutex: inode_unlock(inode); return ret; @@ -4929,9 +4932,11 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) } /* Wait all existing dio workers, newcomers will block on i_mutex */ + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); + ext4_inode_resume_unlocked_dio(inode); if (ret) goto out; @@ -5489,6 +5494,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) } /* Wait for existing dio to complete */ + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* @@ -5532,7 +5538,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) } down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); @@ -5546,7 +5552,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_ext_shift_extents(inode, handle, punch_stop, punch_stop - punch_start, SHIFT_LEFT); @@ -5570,6 +5576,7 @@ out_stop: ext4_journal_stop(handle); out_mmap: up_write(&EXT4_I(inode)->i_mmap_sem); + ext4_inode_resume_unlocked_dio(inode); out_mutex: inode_unlock(inode); return ret; @@ -5642,6 +5649,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) } /* Wait for existing dio to complete */ + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* @@ -5682,7 +5690,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); path = ext4_find_extent(inode, offset_lblk, NULL, 0); if (IS_ERR(path)) { @@ -5748,6 +5756,7 @@ out_stop: ext4_journal_stop(handle); out_mmap: up_write(&EXT4_I(inode)->i_mmap_sem); + ext4_inode_resume_unlocked_dio(inode); out_mutex: inode_unlock(inode); return ret; diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index d996b44d2265..809d1f337537 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -153,6 +153,9 @@ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len); +unsigned int ext4_shrink_es_timeout; +unsigned int ext4_shrink_es_timeout_min; + int __init ext4_init_es(void) { ext4_es_cachep = kmem_cache_create("ext4_extent_status", @@ -160,6 +163,9 @@ int __init ext4_init_es(void) 0, (SLAB_RECLAIM_ACCOUNT), NULL); if (ext4_es_cachep == NULL) return -ENOMEM; + + ext4_shrink_es_timeout = jiffies_to_msecs(HZ/4); + ext4_shrink_es_timeout_min = jiffies_to_msecs(HZ/10); return 0; } @@ -1452,6 +1458,7 @@ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, int nr_to_walk; int nr_shrunk = 0; int retried = 0, nr_skipped = 0; + unsigned long end_time = 0; es_stats = &sbi->s_es_stats; start_time = ktime_get(); @@ -1459,6 +1466,7 @@ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, retry: spin_lock(&sbi->s_es_lock); nr_to_walk = sbi->s_es_nr_inode; + end_time = jiffies + msecs_to_jiffies(ext4_shrink_es_timeout); while (nr_to_walk-- > 0) { if (list_empty(&sbi->s_es_list)) { spin_unlock(&sbi->s_es_lock); @@ -1494,6 +1502,15 @@ retry: if (nr_to_scan <= 0) goto out; + + /* + * As tkernel disabled preempt, so it's meaningless to unlock + * sbi->s_es_lock, here we call cond_resched instead to avoid + * soft lockup + */ + if (time_after_eq(jiffies, end_time)) + cond_resched(); + spin_lock(&sbi->s_es_lock); } spin_unlock(&sbi->s_es_lock); diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index 825313c59752..80a62ee17a81 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -238,6 +238,9 @@ extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); extern int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v); +extern unsigned int ext4_shrink_es_timeout; +extern unsigned int ext4_shrink_es_timeout_min; + extern int __init ext4_init_pending(void); extern void ext4_exit_pending(void); extern void ext4_init_pending_tree(struct ext4_pending_tree *tree); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 8d2bbcc2d813..f7f8ca6689ef 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -40,9 +40,10 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; - if (!inode_trylock_shared(inode)) { - if (iocb->ki_flags & IOCB_NOWAIT) + if (iocb->ki_flags & IOCB_NOWAIT) { + if (!inode_trylock_shared(inode)) return -EAGAIN; + } else { inode_lock_shared(inode); } /* @@ -94,7 +95,7 @@ static int ext4_release_file(struct inode *inode, struct file *filp) !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) @@ -190,9 +191,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; - if (!inode_trylock(inode)) { - if (iocb->ki_flags & IOCB_NOWAIT) + if (iocb->ki_flags & IOCB_NOWAIT) { + if (!inode_trylock(inode)) return -EAGAIN; + } else { inode_lock(inode); } ret = ext4_write_checks(iocb, from); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 764ff4c56233..07a548dccb15 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -265,13 +265,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ext4_debug("freeing inode %lu\n", ino); trace_ext4_free_inode(inode); - /* - * Note: we must free any quota before locking the superblock, - * as writing the quota to disk may need the lock as well. - */ dquot_initialize(inode); dquot_free_inode(inode); - dquot_drop(inode); is_directory = S_ISDIR(inode->i_mode); @@ -330,11 +325,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) percpu_counter_inc(&sbi->s_freeinodes_counter); if (sbi->s_log_groups_per_flex) { - ext4_group_t f = ext4_flex_group(sbi, block_group); + struct flex_groups *fg; - atomic_inc(&sbi->s_flex_groups[f].free_inodes); + fg = sbi_array_rcu_deref(sbi, s_flex_groups, + ext4_flex_group(sbi, block_group)); + atomic_inc(&fg->free_inodes); if (is_directory) - atomic_dec(&sbi->s_flex_groups[f].used_dirs); + atomic_dec(&fg->used_dirs); } BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); @@ -370,12 +367,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, int flex_size, struct orlov_stats *stats) { struct ext4_group_desc *desc; - struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups; if (flex_size > 1) { - stats->free_inodes = atomic_read(&flex_group[g].free_inodes); - stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); - stats->used_dirs = atomic_read(&flex_group[g].used_dirs); + struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb), + s_flex_groups, g); + stats->free_inodes = atomic_read(&fg->free_inodes); + stats->free_clusters = atomic64_read(&fg->free_clusters); + stats->used_dirs = atomic_read(&fg->used_dirs); return; } @@ -991,6 +989,9 @@ got: block_bitmap_bh = ext4_read_block_bitmap(sb, group); if (IS_ERR(block_bitmap_bh)) { err = PTR_ERR(block_bitmap_bh); + printk(KERN_ERR "%s: failed to update group #%d due to EIO on %s," + " meta-data may be inconsistent.\n", + __func__, group, sbi->s_journal->j_devname); goto out; } BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); @@ -1056,7 +1057,8 @@ got: if (sbi->s_log_groups_per_flex) { ext4_group_t f = ext4_flex_group(sbi, group); - atomic_inc(&sbi->s_flex_groups[f].used_dirs); + atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups, + f)->used_dirs); } } if (ext4_has_group_desc_csum(sb)) { @@ -1079,7 +1081,8 @@ got: if (sbi->s_log_groups_per_flex) { flex_group = ext4_flex_group(sbi, group); - atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); + atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups, + flex_group)->free_inodes); } inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 516faa280ced..55ac694005dc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -184,7 +184,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, up_write(&EXT4_I(inode)->i_data_sem); ret = ext4_journal_restart(handle, nblocks); down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); return ret; } @@ -196,7 +196,12 @@ void ext4_evict_inode(struct inode *inode) { handle_t *handle; int err; - int extra_credits = 3; + /* + * Credits for final inode cleanup and freeing: + * sb + inode (ext4_orphan_del()), block bitmap, group descriptor + * (xattr block freeing), bitmap, group descriptor (inode freeing) + */ + int extra_credits = 6; struct ext4_xattr_inode_array *ea_inode_array = NULL; trace_ext4_evict_inode(inode); @@ -252,8 +257,12 @@ void ext4_evict_inode(struct inode *inode) if (!IS_NOQUOTA(inode)) extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); + /* + * Block bitmap, group descriptor, and inode are accounted in both + * ext4_blocks_for_truncate() and extra_credits. So subtract 3. + */ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, - ext4_blocks_for_truncate(inode)+extra_credits); + ext4_blocks_for_truncate(inode) + extra_credits - 3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); /* @@ -392,7 +401,7 @@ void ext4_da_update_reserve_space(struct inode *inode, */ if ((ei->i_reserved_data_blocks == 0) && !inode_is_open_for_write(inode)) - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); } static int __check_block_validity(struct inode *inode, const char *func, @@ -2564,7 +2573,7 @@ update_disksize: * truncate are avoided by checking i_size under i_data_sem. */ disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; - if (disksize > EXT4_I(inode)->i_disksize) { + if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) { int err2; loff_t i_size; @@ -2725,7 +2734,7 @@ static int ext4_writepages(struct address_space *mapping, if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; - percpu_down_read(&sbi->s_journal_flag_rwsem); + percpu_down_read(&sbi->s_writepages_rwsem); trace_ext4_writepages(inode, wbc); /* @@ -2946,7 +2955,7 @@ unplug: out_writepages: trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); - percpu_up_read(&sbi->s_journal_flag_rwsem); + percpu_up_read(&sbi->s_writepages_rwsem); return ret; } @@ -2961,13 +2970,13 @@ static int ext4_dax_writepages(struct address_space *mapping, if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; - percpu_down_read(&sbi->s_journal_flag_rwsem); + percpu_down_read(&sbi->s_writepages_rwsem); trace_ext4_writepages(inode, wbc); ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc); trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); - percpu_up_read(&sbi->s_journal_flag_rwsem); + percpu_up_read(&sbi->s_writepages_rwsem); return ret; } @@ -3523,8 +3532,14 @@ retry: return ret; } + /* + * Writes that span EOF might trigger an I/O size update on completion, + * so consider them to be dirty for the purposes of O_DSYNC, even if + * there is no other metadata changes being made or are pending here. + */ iomap->flags = 0; - if (ext4_inode_datasync_dirty(inode)) + if (ext4_inode_datasync_dirty(inode) || + offset + length > i_size_read(inode)) iomap->flags |= IOMAP_F_DIRTY; iomap->bdev = inode->i_sb->s_bdev; iomap->dax_dev = sbi->s_daxdev; @@ -3817,25 +3832,32 @@ out: static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) { - struct address_space *mapping = iocb->ki_filp->f_mapping; - struct inode *inode = mapping->host; - size_t count = iov_iter_count(iter); + int unlocked = 0; + struct inode *inode = iocb->ki_filp->f_mapping->host; ssize_t ret; - /* - * Shared inode_lock is enough for us - it protects against concurrent - * writes & truncates and since we take care of writing back page cache, - * we are protected against page writeback as well. - */ - inode_lock_shared(inode); - ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, - iocb->ki_pos + count - 1); - if (ret) - goto out_unlock; + if (ext4_should_dioread_nolock(inode)) { + /* + * Nolock dioread optimization may be dynamically disabled + * via ext4_inode_block_unlocked_dio(). Check inode's state + * while holding extra i_dio_count ref. + */ + inode_dio_begin(inode); + smp_mb(); + if (unlikely(ext4_test_inode_state(inode, + EXT4_STATE_DIOREAD_LOCK))) + inode_dio_end(inode); + else + unlocked = 1; + } + ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, - iter, ext4_dio_get_block, NULL, NULL, 0); -out_unlock: - inode_unlock_shared(inode); + iter, ext4_dio_get_block, + NULL, NULL, + unlocked ? 0 : DIO_LOCKING); + if (unlocked) + inode_dio_end(inode); + return ret; } @@ -4302,6 +4324,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) } /* Wait all existing dio workers, newcomers will block on i_mutex */ + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* @@ -4350,7 +4373,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) if (stop_block > first_block) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); @@ -4379,6 +4402,7 @@ out_stop: ext4_journal_stop(handle); out_dio: up_write(&EXT4_I(inode)->i_mmap_sem); + ext4_inode_resume_unlocked_dio(inode); out_mutex: inode_unlock(inode); return ret; @@ -4505,7 +4529,7 @@ int ext4_truncate(struct inode *inode) down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) err = ext4_ext_truncate(handle, inode); @@ -4951,6 +4975,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ret = -EFSCORRUPTED; goto bad_inode; } + /* + * If dir_index is not enabled but there's dir with INDEX flag set, + * we'd normally treat htree data as empty space. But with metadata + * checksumming that corrupts checksums so forbid that. + */ + if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) && + ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) { + ext4_error_inode(inode, function, line, 0, + "iget: Dir with htree data on filesystem without dir_index feature."); + ret = -EFSCORRUPTED; + goto bad_inode; + } ei->i_disksize = inode->i_size; #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; @@ -5450,11 +5486,15 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) offset = inode->i_size & (PAGE_SIZE - 1); /* - * All buffers in the last page remain valid? Then there's nothing to - * do. We do the check mainly to optimize the common PAGE_SIZE == - * blocksize case + * If the page is fully truncated, we don't need to wait for any commit + * (and we even should not as __ext4_journalled_invalidatepage() may + * strip all buffers from the page but keep the page dirty which can then + * confuse e.g. concurrent ext4_writepage() seeing dirty page without + * buffers). Also we don't need to wait for any commit if all buffers in + * the page remain valid. This is most beneficial for the common case of + * blocksize == PAGESIZE. */ - if (offset > PAGE_SIZE - i_blocksize(inode)) + if (!offset || offset > (PAGE_SIZE - i_blocksize(inode))) return; while (1) { page = find_lock_page(inode->i_mapping, @@ -5599,7 +5639,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) * Blocks are going to be removed from the inode. Wait * for dio in flight. */ + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); + ext4_inode_resume_unlocked_dio(inode); } down_write(&EXT4_I(inode)->i_mmap_sem); @@ -5912,8 +5954,23 @@ static int __ext4_expand_extra_isize(struct inode *inode, { struct ext4_inode *raw_inode; struct ext4_xattr_ibody_header *header; + unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); int error; + /* this was checked at iget time, but double check for good measure */ + if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) || + (ei->i_extra_isize & 3)) { + EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)", + ei->i_extra_isize, + EXT4_INODE_SIZE(inode->i_sb)); + return -EFSCORRUPTED; + } + if ((new_extra_isize < ei->i_extra_isize) || + (new_extra_isize < 4) || + (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE)) + return -EINVAL; /* Should never happen */ + raw_inode = ext4_raw_inode(iloc); header = IHDR(inode, raw_inode); @@ -6007,7 +6064,7 @@ int ext4_expand_extra_isize(struct inode *inode, error = ext4_journal_get_write_access(handle, iloc->bh); if (error) { brelse(iloc->bh); - goto out_stop; + goto out_unlock; } error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, @@ -6017,8 +6074,8 @@ int ext4_expand_extra_isize(struct inode *inode, if (!error) error = rc; +out_unlock: ext4_write_unlock_xattr(inode, &no_expand); -out_stop: ext4_journal_stop(handle); return error; } @@ -6114,6 +6171,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) return -EROFS; /* Wait for all existing dio workers */ + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* @@ -6129,11 +6187,12 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) err = filemap_write_and_wait(inode->i_mapping); if (err < 0) { up_write(&EXT4_I(inode)->i_mmap_sem); + ext4_inode_resume_unlocked_dio(inode); return err; } } - percpu_down_write(&sbi->s_journal_flag_rwsem); + percpu_down_write(&sbi->s_writepages_rwsem); jbd2_journal_lock_updates(journal); /* @@ -6150,7 +6209,8 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) err = jbd2_journal_flush(journal); if (err < 0) { jbd2_journal_unlock_updates(journal); - percpu_up_write(&sbi->s_journal_flag_rwsem); + percpu_up_write(&sbi->s_writepages_rwsem); + ext4_inode_resume_unlocked_dio(inode); return err; } ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); @@ -6158,10 +6218,11 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) ext4_set_aops(inode); jbd2_journal_unlock_updates(journal); - percpu_up_write(&sbi->s_journal_flag_rwsem); + percpu_up_write(&sbi->s_writepages_rwsem); if (val) up_write(&EXT4_I(inode)->i_mmap_sem); + ext4_inode_resume_unlocked_dio(inode); /* Finally we can mark the inode as dirty. */ diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 0b7f316fd30f..be4fd8f3d58c 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -154,6 +154,8 @@ static long swap_inode_boot_loader(struct super_block *sb, goto err_out; /* Wait for all existing dio workers */ + ext4_inode_block_unlocked_dio(inode); + ext4_inode_block_unlocked_dio(inode_bl); inode_dio_wait(inode); inode_dio_wait(inode_bl); @@ -202,7 +204,7 @@ static long swap_inode_boot_loader(struct super_block *sb, reset_inode_seed(inode); reset_inode_seed(inode_bl); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); err = ext4_mark_inode_dirty(handle, inode); if (err < 0) { @@ -252,6 +254,8 @@ err_out1: err_out: up_write(&EXT4_I(inode)->i_mmap_sem); journal_err_out: + ext4_inode_resume_unlocked_dio(inode); + ext4_inode_resume_unlocked_dio(inode_bl); unlock_two_nondirectories(inode, inode_bl); iput(inode_bl); return err; @@ -364,6 +368,7 @@ static int ext4_ioctl_setflags(struct inode *inode, */ if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) && (flags & EXT4_IMMUTABLE_FL)) { + ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); err = filemap_write_and_wait(inode->i_mapping); if (err) @@ -424,6 +429,7 @@ flags_err: } flags_out: + ext4_inode_resume_unlocked_dio(inode); return err; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index a3e2767bdf2f..c92cdf24238c 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned size; - struct ext4_group_info ***new_groupinfo; + struct ext4_group_info ***old_groupinfo, ***new_groupinfo; size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); @@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); return -ENOMEM; } - if (sbi->s_group_info) { - memcpy(new_groupinfo, sbi->s_group_info, + rcu_read_lock(); + old_groupinfo = rcu_dereference(sbi->s_group_info); + if (old_groupinfo) + memcpy(new_groupinfo, old_groupinfo, sbi->s_group_info_size * sizeof(*sbi->s_group_info)); - kvfree(sbi->s_group_info); - } - sbi->s_group_info = new_groupinfo; + rcu_read_unlock(); + rcu_assign_pointer(sbi->s_group_info, new_groupinfo); sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); + if (old_groupinfo) + ext4_kvfree_array_rcu(old_groupinfo); ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", sbi->s_group_info_size); return 0; @@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, { int i; int metalen = 0; + int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_info **meta_group_info; struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); @@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, "for a buddy group"); goto exit_meta_group_info; } - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = - meta_group_info; + rcu_read_lock(); + rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; + rcu_read_unlock(); } - meta_group_info = - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; + meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); @@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, exit_group_info: /* If a meta_group_info table has been allocated, release it now */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { - kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; + struct ext4_group_info ***group_info; + + rcu_read_lock(); + group_info = rcu_dereference(sbi->s_group_info); + kfree(group_info[idx]); + group_info[idx] = NULL; + rcu_read_unlock(); } exit_meta_group_info: return -ENOMEM; @@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb) struct ext4_sb_info *sbi = EXT4_SB(sb); int err; struct ext4_group_desc *desc; + struct ext4_group_info ***group_info; struct kmem_cache *cachep; err = ext4_mb_alloc_groupinfo(sb, ngroups); @@ -2507,11 +2517,16 @@ err_freebuddy: while (i-- > 0) kmem_cache_free(cachep, ext4_get_group_info(sb, i)); i = sbi->s_group_info_size; + rcu_read_lock(); + group_info = rcu_dereference(sbi->s_group_info); while (i-- > 0) - kfree(sbi->s_group_info[i]); + kfree(group_info[i]); + rcu_read_unlock(); iput(sbi->s_buddy_cache); err_freesgi: - kvfree(sbi->s_group_info); + rcu_read_lock(); + kvfree(rcu_dereference(sbi->s_group_info)); + rcu_read_unlock(); return -ENOMEM; } @@ -2618,6 +2633,7 @@ int ext4_mb_init(struct super_block *sb) sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; + sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; /* * The default group preallocation is 512, which for 4k block * sizes translates to 2 megabytes. However for bigalloc file @@ -2700,7 +2716,7 @@ int ext4_mb_release(struct super_block *sb) ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; int num_meta_group_infos; - struct ext4_group_info *grinfo; + struct ext4_group_info *grinfo, ***group_info; struct ext4_sb_info *sbi = EXT4_SB(sb); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); @@ -2719,9 +2735,12 @@ int ext4_mb_release(struct super_block *sb) num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); + rcu_read_lock(); + group_info = rcu_dereference(sbi->s_group_info); for (i = 0; i < num_meta_group_infos; i++) - kfree(sbi->s_group_info[i]); - kvfree(sbi->s_group_info); + kfree(group_info[i]); + kvfree(group_info); + rcu_read_unlock(); } kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); @@ -3020,7 +3039,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ext4_group_t flex_group = ext4_flex_group(sbi, ac->ac_b_ex.fe_group); atomic64_sub(ac->ac_b_ex.fe_len, - &sbi->s_flex_groups[flex_group].free_clusters); + &sbi_array_rcu_deref(sbi, s_flex_groups, + flex_group)->free_clusters); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); @@ -3548,6 +3568,26 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, mb_debug(1, "preallocated %u for group %u\n", preallocated, group); } +static void ext4_mb_mark_pa_deleted(struct super_block *sb, + struct ext4_prealloc_space *pa) +{ + struct ext4_inode_info *ei; + + if (pa->pa_deleted) { + ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", + pa->pa_type, pa->pa_pstart, pa->pa_lstart, + pa->pa_len); + return; + } + + pa->pa_deleted = 1; + + if (pa->pa_type == MB_INODE_PA) { + ei = EXT4_I(pa->pa_inode); + atomic_dec(&ei->i_prealloc_active); + } +} + static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; @@ -3580,7 +3620,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, return; } - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; @@ -3708,6 +3748,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); + atomic_inc(&ei->i_prealloc_active); return 0; } @@ -3928,7 +3969,7 @@ repeat: } /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); /* we can trust pa_free ... */ free += pa->pa_free; @@ -3986,7 +4027,7 @@ out: * * FIXME!! Make sure it is valid at all the call sites */ -void ext4_discard_preallocations(struct inode *inode) +void ext4_discard_preallocations(struct inode *inode, unsigned int needed) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; @@ -4003,15 +4044,19 @@ void ext4_discard_preallocations(struct inode *inode) } mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); - trace_ext4_discard_preallocations(inode); + trace_ext4_discard_preallocations(inode, + atomic_read(&ei->i_prealloc_active), needed); INIT_LIST_HEAD(&list); + if (needed == 0) + needed = UINT_MAX; + repeat: /* first, collect all pa's in the inode */ spin_lock(&ei->i_prealloc_lock); - while (!list_empty(&ei->i_prealloc_list)) { - pa = list_entry(ei->i_prealloc_list.next, + while (!list_empty(&ei->i_prealloc_list) && needed) { + pa = list_entry(ei->i_prealloc_list.prev, struct ext4_prealloc_space, pa_inode_list); BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); @@ -4028,10 +4073,11 @@ repeat: } if (pa->pa_deleted == 0) { - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &list); + needed--; continue; } @@ -4299,7 +4345,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); @@ -4396,11 +4442,31 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) return ; } +/* + * if per-inode prealloc list is too long, trim some PA + */ +static void +ext4_mb_trim_inode_pa(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + int count, delta; + + count = atomic_read(&ei->i_prealloc_active); + delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; + if (count > sbi->s_mb_max_inode_prealloc + delta) { + count -= sbi->s_mb_max_inode_prealloc; + ext4_discard_preallocations(inode, count); + } +} + /* * release all resource we used in allocation */ static int ext4_mb_release_context(struct ext4_allocation_context *ac) { + struct inode *inode = ac->ac_inode; + struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { @@ -4427,6 +4493,17 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) spin_unlock(pa->pa_obj_lock); ext4_mb_add_n_trim(ac); } + + if (pa->pa_type == MB_INODE_PA) { + /* + * treat per-inode prealloc list as a lru list, then try + * to trim the least recently used PA. + */ + spin_lock(pa->pa_obj_lock); + list_move(&pa->pa_inode_list, &ei->i_prealloc_list); + spin_unlock(pa->pa_obj_lock); + } + ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) @@ -4436,6 +4513,7 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); + ext4_mb_trim_inode_pa(inode); return 0; } @@ -4914,7 +4992,8 @@ do_more: if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); atomic64_add(count_clusters, - &sbi->s_flex_groups[flex_group].free_clusters); + &sbi_array_rcu_deref(sbi, s_flex_groups, + flex_group)->free_clusters); } /* @@ -5071,7 +5150,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); atomic64_add(clusters_freed, - &sbi->s_flex_groups[flex_group].free_clusters); + &sbi_array_rcu_deref(sbi, s_flex_groups, + flex_group)->free_clusters); } ext4_mb_unload_buddy(&e4b); diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 88c98f17e3d9..d794ca8fa2d0 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -77,6 +77,10 @@ do { \ */ #define MB_DEFAULT_GROUP_PREALLOC 512 +/* + * maximum length of inode prealloc list + */ +#define MB_DEFAULT_MAX_INODE_PREALLOC 512 struct ext4_free_data { /* this links the free block information from sb_info */ diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index b1e4d359f73b..be4ee3dcc5cf 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -427,6 +427,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode) int ext4_ext_migrate(struct inode *inode) { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); handle_t *handle; int retval = 0, i; __le32 *i_data; @@ -451,6 +452,8 @@ int ext4_ext_migrate(struct inode *inode) */ return retval; + percpu_down_write(&sbi->s_writepages_rwsem); + /* * Worst case we can touch the allocation bitmaps, a bgd * block, and a block to link in the orphan list. We do need @@ -461,7 +464,7 @@ int ext4_ext_migrate(struct inode *inode) if (IS_ERR(handle)) { retval = PTR_ERR(handle); - return retval; + goto out_unlock; } goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; @@ -472,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode) if (IS_ERR(tmp_inode)) { retval = PTR_ERR(tmp_inode); ext4_journal_stop(handle); - return retval; + goto out_unlock; } i_size_write(tmp_inode, i_size_read(inode)); /* @@ -514,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode) */ ext4_orphan_del(NULL, tmp_inode); retval = PTR_ERR(handle); - goto out; + goto out_tmp_inode; } ei = EXT4_I(inode); @@ -595,10 +598,11 @@ err_out: /* Reset the extent details */ ext4_ext_tree_init(handle, tmp_inode); ext4_journal_stop(handle); -out: +out_tmp_inode: unlock_new_inode(tmp_inode); iput(tmp_inode); - +out_unlock: + percpu_up_write(&sbi->s_writepages_rwsem); return retval; } @@ -608,7 +612,8 @@ out: int ext4_ind_migrate(struct inode *inode) { struct ext4_extent_header *eh; - struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_super_block *es = sbi->s_es; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent *ex; unsigned int i, len; @@ -632,9 +637,13 @@ int ext4_ind_migrate(struct inode *inode) if (test_opt(inode->i_sb, DELALLOC)) ext4_alloc_da_blocks(inode); + percpu_down_write(&sbi->s_writepages_rwsem); + handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out_unlock; + } down_write(&EXT4_I(inode)->i_data_sem); ret = ext4_ext_check_inode(inode); @@ -669,5 +678,7 @@ int ext4_ind_migrate(struct inode *inode) errout: ext4_journal_stop(handle); up_write(&EXT4_I(inode)->i_data_sem); +out_unlock: + percpu_up_write(&sbi->s_writepages_rwsem); return ret; } diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 2305b4374fd3..9d00e0dd2ba9 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp, { __ext4_warning(sb, function, line, "%s", msg); __ext4_warning(sb, function, line, - "MMP failure info: last update time: %llu, last update " - "node: %s, last update device: %s", - (long long unsigned int) le64_to_cpu(mmp->mmp_time), - mmp->mmp_nodename, mmp->mmp_bdevname); + "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s", + (unsigned long long)le64_to_cpu(mmp->mmp_time), + (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename, + (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname); } /* @@ -154,6 +154,7 @@ static int kmmpd(void *data) mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval, EXT4_MMP_MIN_CHECK_INTERVAL); mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); + BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE); bdevname(bh->b_bdev, mmp->mmp_bdevname); memcpy(mmp->mmp_nodename, init_utsname()->nodename, @@ -375,7 +376,8 @@ skip: /* * Start a kernel thread to update the MMP block periodically. */ - EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s", + EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s", + (int)sizeof(mmp->mmp_bdevname), bdevname(bh->b_bdev, mmp->mmp_bdevname)); if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) { diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 30ce3dc69378..7a1bf02c43ab 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -603,6 +603,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, lock_two_nondirectories(orig_inode, donor_inode); /* Wait for all existing dio workers */ + ext4_inode_block_unlocked_dio(orig_inode); + ext4_inode_block_unlocked_dio(donor_inode); inode_dio_wait(orig_inode); inode_dio_wait(donor_inode); @@ -686,13 +688,15 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, out: if (*moved_len) { - ext4_discard_preallocations(orig_inode); - ext4_discard_preallocations(donor_inode); + ext4_discard_preallocations(orig_inode, 0); + ext4_discard_preallocations(donor_inode, 0); } ext4_ext_drop_refs(path); kfree(path); ext4_double_up_write_data_sem(orig_inode, donor_inode); + ext4_inode_resume_unlocked_dio(orig_inode); + ext4_inode_resume_unlocked_dio(donor_inode); unlock_two_nondirectories(orig_inode, donor_inode); return ret; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a427d2031a8d..a564d0289a70 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1507,6 +1507,7 @@ restart: /* * We deal with the read-ahead logic here. */ + cond_resched(); if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; @@ -2205,6 +2206,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, retval = ext4_dx_add_entry(handle, &fname, dir, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) goto out; + /* Can we just ignore htree data? */ + if (ext4_has_metadata_csum(sb)) { + EXT4_ERROR_INODE(dir, + "Directory has corrupted htree index."); + retval = -EFSCORRUPTED; + goto out; + } ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; ext4_mark_inode_dirty(handle, dir); @@ -2808,7 +2816,7 @@ bool ext4_empty_dir(struct inode *inode) { unsigned int offset; struct buffer_head *bh; - struct ext4_dir_entry_2 *de, *de1; + struct ext4_dir_entry_2 *de; struct super_block *sb; if (ext4_has_inline_data(inode)) { @@ -2833,19 +2841,25 @@ bool ext4_empty_dir(struct inode *inode) return true; de = (struct ext4_dir_entry_2 *) bh->b_data; - de1 = ext4_next_entry(de, sb->s_blocksize); - if (le32_to_cpu(de->inode) != inode->i_ino || - le32_to_cpu(de1->inode) == 0 || - strcmp(".", de->name) || strcmp("..", de1->name)) { - ext4_warning_inode(inode, "directory missing '.' and/or '..'"); + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, + 0) || + le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) { + ext4_warning_inode(inode, "directory missing '.'"); brelse(bh); return true; } - offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) + - ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize); - de = ext4_next_entry(de1, sb->s_blocksize); + offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); + de = ext4_next_entry(de, sb->s_blocksize); + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, + offset) || + le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { + ext4_warning_inode(inode, "directory missing '..'"); + brelse(bh); + return true; + } + offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); while (offset < inode->i_size) { - if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { + if (!(offset & (sb->s_blocksize - 1))) { unsigned int lblock; brelse(bh); lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); @@ -2856,12 +2870,11 @@ bool ext4_empty_dir(struct inode *inode) } if (IS_ERR(bh)) return true; - de = (struct ext4_dir_entry_2 *) bh->b_data; } + de = (struct ext4_dir_entry_2 *) (bh->b_data + + (offset & (sb->s_blocksize - 1))); if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset)) { - de = (struct ext4_dir_entry_2 *)(bh->b_data + - sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; continue; } @@ -2870,7 +2883,6 @@ bool ext4_empty_dir(struct inode *inode) return false; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); - de = ext4_next_entry(de, sb->s_blocksize); } brelse(bh); return true; @@ -3182,18 +3194,17 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); - if (inode->i_nlink == 0) { - ext4_warning_inode(inode, "Deleting file '%.*s' with no links", - dentry->d_name.len, dentry->d_name.name); - set_nlink(inode, 1); - } retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = current_time(dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); - drop_nlink(inode); + if (inode->i_nlink == 0) + ext4_warning_inode(inode, "Deleting file '%.*s' with no links", + dentry->d_name.len, dentry->d_name.name); + else + drop_nlink(inode); if (!inode->i_nlink) ext4_orphan_add(handle, inode); inode->i_ctime = current_time(inode); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 12ceadef32c5..2cc9f2168b9e 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -478,17 +478,26 @@ int ext4_bio_write_page(struct ext4_io_submit *io, gfp_t gfp_flags = GFP_NOFS; unsigned int enc_bytes = round_up(len, i_blocksize(inode)); + /* + * Since bounce page allocation uses a mempool, we can only use + * a waiting mask (i.e. request guaranteed allocation) on the + * first page of the bio. Otherwise it can deadlock. + */ + if (io->io_bio) + gfp_flags = GFP_NOWAIT | __GFP_NOWARN; retry_encrypt: bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes, 0, gfp_flags); if (IS_ERR(bounce_page)) { ret = PTR_ERR(bounce_page); - if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { - if (io->io_bio) { + if (ret == -ENOMEM && + (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) { + gfp_flags = GFP_NOFS; + if (io->io_bio) ext4_io_submit(io); - congestion_wait(BLK_RW_ASYNC, HZ/50); - } - gfp_flags |= __GFP_NOFAIL; + else + gfp_flags |= __GFP_NOFAIL; + congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry_encrypt; } bounce_page = NULL; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index a30b203fa461..a5f55fece9b0 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -57,6 +57,7 @@ enum bio_post_read_step { STEP_INITIAL = 0, STEP_DECRYPT, STEP_VERITY, + STEP_MAX, }; struct bio_post_read_ctx { @@ -106,10 +107,22 @@ static void verity_work(struct work_struct *work) { struct bio_post_read_ctx *ctx = container_of(work, struct bio_post_read_ctx, work); + struct bio *bio = ctx->bio; - fsverity_verify_bio(ctx->bio); + /* + * fsverity_verify_bio() may call readpages() again, and although verity + * will be disabled for that, decryption may still be needed, causing + * another bio_post_read_ctx to be allocated. So to guarantee that + * mempool_alloc() never deadlocks we must free the current ctx first. + * This is safe because verity is the last post-read step. + */ + BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX); + mempool_free(ctx, bio_post_read_ctx_pool); + bio->bi_private = NULL; - bio_post_read_processing(ctx); + fsverity_verify_bio(bio); + + __read_end_io(bio); } static void bio_post_read_processing(struct bio_post_read_ctx *ctx) diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index c0e9aef376a7..080e25f6ef56 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -17,6 +17,33 @@ #include "ext4_jbd2.h" +struct ext4_rcu_ptr { + struct rcu_head rcu; + void *ptr; +}; + +static void ext4_rcu_ptr_callback(struct rcu_head *head) +{ + struct ext4_rcu_ptr *ptr; + + ptr = container_of(head, struct ext4_rcu_ptr, rcu); + kvfree(ptr->ptr); + kfree(ptr); +} + +void ext4_kvfree_array_rcu(void *to_free) +{ + struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); + + if (ptr) { + ptr->ptr = to_free; + call_rcu(&ptr->rcu, ext4_rcu_ptr_callback); + return; + } + synchronize_rcu(); + kvfree(to_free); +} + int ext4_resize_begin(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -560,8 +587,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb, brelse(gdb); goto out; } - memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, - gdb->b_size); + memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, + s_group_desc, j)->b_data, gdb->b_size); set_buffer_uptodate(gdb); err = ext4_handle_dirty_metadata(handle, NULL, gdb); @@ -879,13 +906,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, } brelse(dind); - o_group_desc = EXT4_SB(sb)->s_group_desc; + rcu_read_lock(); + o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); memcpy(n_group_desc, o_group_desc, EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); + rcu_read_unlock(); n_group_desc[gdb_num] = gdb_bh; - EXT4_SB(sb)->s_group_desc = n_group_desc; + rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); EXT4_SB(sb)->s_gdb_count++; - kvfree(o_group_desc); + ext4_kvfree_array_rcu(o_group_desc); le16_add_cpu(&es->s_reserved_gdt_blocks, -1); err = ext4_handle_dirty_super(handle, sb); @@ -929,9 +958,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb, return err; } - o_group_desc = EXT4_SB(sb)->s_group_desc; + rcu_read_lock(); + o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); memcpy(n_group_desc, o_group_desc, EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); + rcu_read_unlock(); n_group_desc[gdb_num] = gdb_bh; BUFFER_TRACE(gdb_bh, "get_write_access"); @@ -942,9 +973,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb, return err; } - EXT4_SB(sb)->s_group_desc = n_group_desc; + rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); EXT4_SB(sb)->s_gdb_count++; - kvfree(o_group_desc); + ext4_kvfree_array_rcu(o_group_desc); return err; } @@ -1210,7 +1241,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, * use non-sparse filesystems anymore. This is already checked above. */ if (gdb_off) { - gdb_bh = sbi->s_group_desc[gdb_num]; + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, + gdb_num); BUFFER_TRACE(gdb_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, gdb_bh); @@ -1292,7 +1324,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, /* * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). */ - gdb_bh = sbi->s_group_desc[gdb_num]; + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); /* Update group descriptor block for new group */ gdp = (struct ext4_group_desc *)(gdb_bh->b_data + gdb_off * EXT4_DESC_SIZE(sb)); @@ -1420,11 +1452,14 @@ static void ext4_update_super(struct super_block *sb, percpu_counter_read(&sbi->s_freeclusters_counter)); if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { ext4_group_t flex_group; + struct flex_groups *fg; + flex_group = ext4_flex_group(sbi, group_data[0].group); + fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), - &sbi->s_flex_groups[flex_group].free_clusters); + &fg->free_clusters); atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, - &sbi->s_flex_groups[flex_group].free_inodes); + &fg->free_inodes); } /* @@ -1519,7 +1554,8 @@ exit_journal: for (; gdb_num <= gdb_num_end; gdb_num++) { struct buffer_head *gdb_bh; - gdb_bh = sbi->s_group_desc[gdb_num]; + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, + gdb_num); if (old_gdb == gdb_bh->b_blocknr) continue; update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index dd654e53ba3d..539d63503e99 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -61,6 +61,7 @@ static struct ext4_lazy_init *ext4_li_info; static struct mutex ext4_li_mtx; static struct ratelimit_state ext4_mount_msg_ratelimit; +static struct ctl_table_header *ext4_sysctl_table; static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); @@ -970,6 +971,8 @@ static void ext4_put_super(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; + struct buffer_head **group_desc; + struct flex_groups **flex_groups; int aborted = 0; int i, err; @@ -1000,15 +1003,23 @@ static void ext4_put_super(struct super_block *sb) if (!sb_rdonly(sb)) ext4_commit_super(sb, 1); + rcu_read_lock(); + group_desc = rcu_dereference(sbi->s_group_desc); for (i = 0; i < sbi->s_gdb_count; i++) - brelse(sbi->s_group_desc[i]); - kvfree(sbi->s_group_desc); - kvfree(sbi->s_flex_groups); + brelse(group_desc[i]); + kvfree(group_desc); + flex_groups = rcu_dereference(sbi->s_flex_groups); + if (flex_groups) { + for (i = 0; i < sbi->s_flex_groups_allocated; i++) + kvfree(flex_groups[i]); + kvfree(flex_groups); + } + rcu_read_unlock(); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); - percpu_free_rwsem(&sbi->s_journal_flag_rwsem); + percpu_free_rwsem(&sbi->s_writepages_rwsem); #ifdef CONFIG_QUOTA for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(get_qf_name(sb, sbi, i)); @@ -1076,6 +1087,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) inode_set_iversion(&ei->vfs_inode, 1); spin_lock_init(&ei->i_raw_lock); + atomic_set(&ei->i_prealloc_active, 0); INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); @@ -1172,9 +1184,9 @@ void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); - dquot_drop(inode); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); + dquot_drop(inode); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); @@ -1887,6 +1899,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, } sbi->s_commit_interval = HZ * arg; } else if (token == Opt_debug_want_extra_isize) { + if ((arg & 1) || + (arg < 4) || + (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) { + ext4_msg(sb, KERN_ERR, + "Invalid want_extra_isize %d", arg); + return -1; + } sbi->s_want_extra_isize = arg; } else if (token == Opt_max_batch_time) { sbi->s_max_batch_time = arg; @@ -2245,6 +2264,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PUTS("data_err=abort"); if (DUMMY_ENCRYPTION_ENABLED(sbi)) SEQ_OPTS_PUTS("test_dummy_encryption"); + if (test_opt(sb, BARRIER)) + SEQ_OPTS_PUTS("barrier"); ext4_show_quota_options(seq, sb); return 0; @@ -2325,8 +2346,8 @@ done: int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) { struct ext4_sb_info *sbi = EXT4_SB(sb); - struct flex_groups *new_groups; - int size; + struct flex_groups **old_groups, **new_groups; + int size, i, j; if (!sbi->s_log_groups_per_flex) return 0; @@ -2335,22 +2356,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) if (size <= sbi->s_flex_groups_allocated) return 0; - size = roundup_pow_of_two(size * sizeof(struct flex_groups)); - new_groups = kvzalloc(size, GFP_KERNEL); + new_groups = kvzalloc(roundup_pow_of_two(size * + sizeof(*sbi->s_flex_groups)), GFP_KERNEL); if (!new_groups) { - ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", - size / (int) sizeof(struct flex_groups)); + ext4_msg(sb, KERN_ERR, + "not enough memory for %d flex group pointers", size); return -ENOMEM; } - - if (sbi->s_flex_groups) { - memcpy(new_groups, sbi->s_flex_groups, - (sbi->s_flex_groups_allocated * - sizeof(struct flex_groups))); - kvfree(sbi->s_flex_groups); + for (i = sbi->s_flex_groups_allocated; i < size; i++) { + new_groups[i] = kvzalloc(roundup_pow_of_two( + sizeof(struct flex_groups)), + GFP_KERNEL); + if (!new_groups[i]) { + for (j = sbi->s_flex_groups_allocated; j < i; j++) + kvfree(new_groups[j]); + kvfree(new_groups); + ext4_msg(sb, KERN_ERR, + "not enough memory for %d flex groups", size); + return -ENOMEM; + } } - sbi->s_flex_groups = new_groups; - sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); + rcu_read_lock(); + old_groups = rcu_dereference(sbi->s_flex_groups); + if (old_groups) + memcpy(new_groups, old_groups, + (sbi->s_flex_groups_allocated * + sizeof(struct flex_groups *))); + rcu_read_unlock(); + rcu_assign_pointer(sbi->s_flex_groups, new_groups); + sbi->s_flex_groups_allocated = size; + if (old_groups) + ext4_kvfree_array_rcu(old_groups); return 0; } @@ -2358,6 +2394,7 @@ static int ext4_fill_flex_info(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; + struct flex_groups *fg; ext4_group_t flex_group; int i, err; @@ -2375,12 +2412,11 @@ static int ext4_fill_flex_info(struct super_block *sb) gdp = ext4_get_group_desc(sb, i, NULL); flex_group = ext4_flex_group(sbi, i); - atomic_add(ext4_free_inodes_count(sb, gdp), - &sbi->s_flex_groups[flex_group].free_inodes); + fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); + atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes); atomic64_add(ext4_free_group_clusters(sb, gdp), - &sbi->s_flex_groups[flex_group].free_clusters); - atomic_add(ext4_used_dirs_count(sb, gdp), - &sbi->s_flex_groups[flex_group].used_dirs); + &fg->free_clusters); + atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs); } return 1; @@ -2954,17 +2990,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly) return 0; } -#ifndef CONFIG_QUOTA - if (ext4_has_feature_quota(sb) && !readonly) { +#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2) + if (!readonly && (ext4_has_feature_quota(sb) || + ext4_has_feature_project(sb))) { ext4_msg(sb, KERN_ERR, - "Filesystem with quota feature cannot be mounted RDWR " - "without CONFIG_QUOTA"); - return 0; - } - if (ext4_has_feature_project(sb) && !readonly) { - ext4_msg(sb, KERN_ERR, - "Filesystem with project quota feature cannot be mounted RDWR " - "without CONFIG_QUOTA"); + "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2"); return 0; } #endif /* CONFIG_QUOTA */ @@ -3551,37 +3581,6 @@ int ext4_calculate_overhead(struct super_block *sb) return 0; } -static void ext4_clamp_want_extra_isize(struct super_block *sb) -{ - struct ext4_sb_info *sbi = EXT4_SB(sb); - struct ext4_super_block *es = sbi->s_es; - - /* determine the minimum size of new large inodes, if present */ - if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE && - sbi->s_want_extra_isize == 0) { - sbi->s_want_extra_isize = sizeof(struct ext4_inode) - - EXT4_GOOD_OLD_INODE_SIZE; - if (ext4_has_feature_extra_isize(sb)) { - if (sbi->s_want_extra_isize < - le16_to_cpu(es->s_want_extra_isize)) - sbi->s_want_extra_isize = - le16_to_cpu(es->s_want_extra_isize); - if (sbi->s_want_extra_isize < - le16_to_cpu(es->s_min_extra_isize)) - sbi->s_want_extra_isize = - le16_to_cpu(es->s_min_extra_isize); - } - } - /* Check if enough inode space is available */ - if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > - sbi->s_inode_size) { - sbi->s_want_extra_isize = sizeof(struct ext4_inode) - - EXT4_GOOD_OLD_INODE_SIZE; - ext4_msg(sb, KERN_INFO, - "required extra inode space not available"); - } -} - static void ext4_set_resv_clusters(struct super_block *sb) { ext4_fsblk_t resv_clusters; @@ -3616,9 +3615,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) { struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev); char *orig_data = kstrdup(data, GFP_KERNEL); - struct buffer_head *bh; + struct buffer_head *bh, **group_desc; struct ext4_super_block *es = NULL; struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + struct flex_groups **flex_groups; ext4_fsblk_t block; ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t logical_sb_block; @@ -3772,7 +3772,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; - if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) + if ((def_mount_opts & EXT4_MOUNT_BARRIER)) set_opt(sb, BARRIER); /* @@ -3789,6 +3789,78 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); + if (blocksize < EXT4_MIN_BLOCK_SIZE || + blocksize > EXT4_MAX_BLOCK_SIZE) { + ext4_msg(sb, KERN_ERR, + "Unsupported filesystem blocksize %d (%d log_block_size)", + blocksize, le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } + + if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { + sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; + sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; + } else { + sbi->s_inode_size = le16_to_cpu(es->s_inode_size); + sbi->s_first_ino = le32_to_cpu(es->s_first_ino); + if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { + ext4_msg(sb, KERN_ERR, "invalid first ino: %u", + sbi->s_first_ino); + goto failed_mount; + } + if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || + (!is_power_of_2(sbi->s_inode_size)) || + (sbi->s_inode_size > blocksize)) { + ext4_msg(sb, KERN_ERR, + "unsupported inode size: %d", + sbi->s_inode_size); + ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize); + goto failed_mount; + } + /* + * i_atime_extra is the last extra field available for + * [acm]times in struct ext4_inode. Checking for that + * field should suffice to ensure we have extra space + * for all three. + */ + if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + + sizeof(((struct ext4_inode *)0)->i_atime_extra)) { + sb->s_time_gran = 1; + sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; + } else { + sb->s_time_gran = NSEC_PER_SEC; + sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; + } + sb->s_time_min = EXT4_TIMESTAMP_MIN; + } + if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { + sbi->s_want_extra_isize = sizeof(struct ext4_inode) - + EXT4_GOOD_OLD_INODE_SIZE; + if (ext4_has_feature_extra_isize(sb)) { + unsigned v, max = (sbi->s_inode_size - + EXT4_GOOD_OLD_INODE_SIZE); + + v = le16_to_cpu(es->s_want_extra_isize); + if (v > max) { + ext4_msg(sb, KERN_ERR, + "bad s_want_extra_isize: %d", v); + goto failed_mount; + } + if (sbi->s_want_extra_isize < v) + sbi->s_want_extra_isize = v; + + v = le16_to_cpu(es->s_min_extra_isize); + if (v > max) { + ext4_msg(sb, KERN_ERR, + "bad s_min_extra_isize: %d", v); + goto failed_mount; + } + if (sbi->s_want_extra_isize < v) + sbi->s_want_extra_isize = v; + } + } + if (sbi->s_es->s_mount_opts[0]) { char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, sizeof(sbi->s_es->s_mount_opts), @@ -3947,14 +4019,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) goto failed_mount; - blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); - if (blocksize < EXT4_MIN_BLOCK_SIZE || - blocksize > EXT4_MAX_BLOCK_SIZE) { - ext4_msg(sb, KERN_ERR, - "Unsupported filesystem blocksize %d (%d log_block_size)", - blocksize, le32_to_cpu(es->s_log_block_size)); - goto failed_mount; - } if (le32_to_cpu(es->s_log_block_size) > (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { ext4_msg(sb, KERN_ERR, @@ -4027,42 +4091,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); - if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { - sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; - sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; - } else { - sbi->s_inode_size = le16_to_cpu(es->s_inode_size); - sbi->s_first_ino = le32_to_cpu(es->s_first_ino); - if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { - ext4_msg(sb, KERN_ERR, "invalid first ino: %u", - sbi->s_first_ino); - goto failed_mount; - } - if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || - (!is_power_of_2(sbi->s_inode_size)) || - (sbi->s_inode_size > blocksize)) { - ext4_msg(sb, KERN_ERR, - "unsupported inode size: %d", - sbi->s_inode_size); - goto failed_mount; - } - /* - * i_atime_extra is the last extra field available for [acm]times in - * struct ext4_inode. Checking for that field should suffice to ensure - * we have extra space for all three. - */ - if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + - sizeof(((struct ext4_inode *)0)->i_atime_extra)) { - sb->s_time_gran = 1; - sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; - } else { - sb->s_time_gran = NSEC_PER_SEC; - sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; - } - - sb->s_time_min = EXT4_TIMESTAMP_MIN; - } - sbi->s_desc_size = le16_to_cpu(es->s_desc_size); if (ext4_has_feature_64bit(sb)) { if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || @@ -4244,9 +4272,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } } - sbi->s_group_desc = kvmalloc_array(db_count, - sizeof(struct buffer_head *), - GFP_KERNEL); + rcu_assign_pointer(sbi->s_group_desc, + kvmalloc_array(db_count, + sizeof(struct buffer_head *), + GFP_KERNEL)); if (sbi->s_group_desc == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory"); ret = -ENOMEM; @@ -4262,14 +4291,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } for (i = 0; i < db_count; i++) { + struct buffer_head *bh; + block = descriptor_loc(sb, logical_sb_block, i); - sbi->s_group_desc[i] = sb_bread_unmovable(sb, block); - if (!sbi->s_group_desc[i]) { + bh = sb_bread_unmovable(sb, block); + if (!bh) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); db_count = i; goto failed_mount2; } + rcu_read_lock(); + rcu_dereference(sbi->s_group_desc)[i] = bh; + rcu_read_unlock(); } sbi->s_gdb_count = db_count; if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { @@ -4381,6 +4415,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* We have now updated the journal if required, so we can * validate the data journaling mode. */ + if (IS_EXT3_SB(sb) && test_opt(sb, DATA_FLAGS) == 0) + set_opt(sb, WRITEBACK_DATA); switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal @@ -4518,8 +4554,6 @@ no_journal: } else if (ret) goto failed_mount4a; - ext4_clamp_want_extra_isize(sb); - ext4_set_resv_clusters(sb); err = ext4_setup_system_zone(sb); @@ -4557,7 +4591,7 @@ no_journal: err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, GFP_KERNEL); if (!err) - err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem); + err = percpu_init_rwsem(&sbi->s_writepages_rwsem); if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); @@ -4645,13 +4679,19 @@ failed_mount7: ext4_unregister_li_request(sb); failed_mount6: ext4_mb_release(sb); - if (sbi->s_flex_groups) - kvfree(sbi->s_flex_groups); + rcu_read_lock(); + flex_groups = rcu_dereference(sbi->s_flex_groups); + if (flex_groups) { + for (i = 0; i < sbi->s_flex_groups_allocated; i++) + kvfree(flex_groups[i]); + kvfree(flex_groups); + } + rcu_read_unlock(); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); - percpu_free_rwsem(&sbi->s_journal_flag_rwsem); + percpu_free_rwsem(&sbi->s_writepages_rwsem); failed_mount5: ext4_ext_release(sb); ext4_release_system_zone(sb); @@ -4680,9 +4720,12 @@ failed_mount3: if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: + rcu_read_lock(); + group_desc = rcu_dereference(sbi->s_group_desc); for (i = 0; i < db_count; i++) - brelse(sbi->s_group_desc[i]); - kvfree(sbi->s_group_desc); + brelse(group_desc[i]); + kvfree(group_desc); + rcu_read_unlock(); failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); @@ -5307,8 +5350,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - ext4_clamp_want_extra_isize(sb); - if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ test_opt(sb, JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "changing journal_checksum " @@ -5546,9 +5587,15 @@ static int ext4_statfs_project(struct super_block *sb, return PTR_ERR(dquot); spin_lock(&dquot->dq_dqb_lock); - limit = (dquot->dq_dqb.dqb_bsoftlimit ? - dquot->dq_dqb.dqb_bsoftlimit : - dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits; + limit = 0; + if (dquot->dq_dqb.dqb_bsoftlimit && + (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit)) + limit = dquot->dq_dqb.dqb_bsoftlimit; + if (dquot->dq_dqb.dqb_bhardlimit && + (!limit || dquot->dq_dqb.dqb_bhardlimit < limit)) + limit = dquot->dq_dqb.dqb_bhardlimit; + limit >>= sb->s_blocksize_bits; + if (limit && buf->f_blocks > limit) { curblock = (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; @@ -5558,9 +5605,14 @@ static int ext4_statfs_project(struct super_block *sb, (buf->f_blocks - curblock) : 0; } - limit = dquot->dq_dqb.dqb_isoftlimit ? - dquot->dq_dqb.dqb_isoftlimit : - dquot->dq_dqb.dqb_ihardlimit; + limit = 0; + if (dquot->dq_dqb.dqb_isoftlimit && + (!limit || dquot->dq_dqb.dqb_isoftlimit < limit)) + limit = dquot->dq_dqb.dqb_isoftlimit; + if (dquot->dq_dqb.dqb_ihardlimit && + (!limit || dquot->dq_dqb.dqb_ihardlimit < limit)) + limit = dquot->dq_dqb.dqb_ihardlimit; + if (limit && buf->f_files > limit) { buf->f_files = limit; buf->f_ffree = @@ -6107,6 +6159,36 @@ MODULE_ALIAS_FS("ext4"); /* Shared across all ext4 file systems */ wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; +static struct ctl_table ext4_sysctls[] = { + { + .procname = "shrink_es_timeout", + .data = &ext4_shrink_es_timeout, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &ext4_shrink_es_timeout_min, + }, + { } +}; + +static struct ctl_table ext4_sysctl_dir[] = { + { + .procname = "ext4", + .mode = 0555, + .child = ext4_sysctls, + }, + { } +}; + +static struct ctl_table ext4_sysctl_root[] = { + { + .procname = "fs", + .mode = 0555, + .child = ext4_sysctl_dir, + }, + { } +}; + static int __init ext4_init_fs(void) { int i, err; @@ -6148,9 +6230,14 @@ static int __init ext4_init_fs(void) err = ext4_init_mballoc(); if (err) goto out2; + + ext4_sysctl_table = register_sysctl_table(ext4_sysctl_root); + if (ext4_sysctl_table == NULL) + goto out1; + err = init_inodecache(); if (err) - goto out1; + goto out0; register_as_ext3(); register_as_ext2(); err = register_filesystem(&ext4_fs_type); @@ -6162,6 +6249,8 @@ out: unregister_as_ext2(); unregister_as_ext3(); destroy_inodecache(); +out0: + unregister_sysctl_table(ext4_sysctl_table); out1: ext4_exit_mballoc(); out2: @@ -6187,6 +6276,7 @@ static void __exit ext4_exit_fs(void) unregister_as_ext3(); unregister_filesystem(&ext4_fs_type); destroy_inodecache(); + unregister_sysctl_table(ext4_sysctl_table); ext4_exit_mballoc(); ext4_exit_sysfs(); ext4_exit_system_zone(); diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index eb1efad0e20a..9c4ce4d681e9 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -186,6 +186,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); +EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error); EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval); @@ -215,6 +216,7 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(mb_order2_req), ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), + ATTR_LIST(mb_max_inode_prealloc), ATTR_LIST(max_writeback_mb_bump), ATTR_LIST(extent_max_zeroout_kb), ATTR_LIST(trigger_fs_error), diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 491f9ee4040e..894a61010ae9 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1820,8 +1820,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, if (EXT4_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); - if (IS_ERR(bs->bh)) - return PTR_ERR(bs->bh); + if (IS_ERR(bs->bh)) { + error = PTR_ERR(bs->bh); + bs->bh = NULL; + return error; + } ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 5755e897a5f0..ec9a1f9ce2dd 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1074,19 +1074,6 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) int err = 0; bool direct_io = iocb->ki_flags & IOCB_DIRECT; - /* convert inline data for Direct I/O*/ - if (direct_io) { - err = f2fs_convert_inline_inode(inode); - if (err) - return err; - } - - if (direct_io && allow_outplace_dio(inode, iocb, from)) - return 0; - - if (is_inode_flag_set(inode, FI_NO_PREALLOC)) - return 0; - map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); if (map.m_len > map.m_lblk) @@ -2098,7 +2085,7 @@ static int __write_data_page(struct page *page, bool *submitted, loff_t i_size = i_size_read(inode); const pgoff_t end_index = ((unsigned long long) i_size) >> PAGE_SHIFT; - loff_t psize = (page->index + 1) << PAGE_SHIFT; + loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT; unsigned offset = 0; bool need_balance_fs = false; int err = 0; @@ -3043,7 +3030,8 @@ int f2fs_migrate_page(struct address_space *mapping, #ifdef CONFIG_SWAP /* Copied from generic_swapfile_activate() to check any holes */ -static int check_swap_activate(struct file *swap_file, unsigned int max) +static int check_swap_activate(struct swap_info_struct *sis, + struct file *swap_file, sector_t *span) { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; @@ -3054,6 +3042,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max) sector_t last_block; sector_t lowest_block = -1; sector_t highest_block = 0; + int nr_extents = 0; + int ret; blkbits = inode->i_blkbits; blocks_per_page = PAGE_SIZE >> blkbits; @@ -3065,7 +3055,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max) probe_block = 0; page_no = 0; last_block = i_size_read(inode) >> blkbits; - while ((probe_block + blocks_per_page) <= last_block && page_no < max) { + while ((probe_block + blocks_per_page) <= last_block && + page_no < sis->max) { unsigned block_in_page; sector_t first_block; @@ -3105,13 +3096,27 @@ static int check_swap_activate(struct file *swap_file, unsigned int max) highest_block = first_block; } + /* + * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks + */ + ret = add_swap_extent(sis, page_no, 1, first_block); + if (ret < 0) + goto out; + nr_extents += ret; page_no++; probe_block += blocks_per_page; reprobe: continue; } - return 0; - + ret = nr_extents; + *span = 1 + highest_block - lowest_block; + if (page_no == 0) + page_no = 1; /* force Empty message */ + sis->max = page_no; + sis->pages = page_no - 1; + sis->highest_bit = page_no - 1; +out: + return ret; bad_bmap: pr_err("swapon: swapfile has holes\n"); return -EINVAL; @@ -3133,14 +3138,14 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, if (ret) return ret; - ret = check_swap_activate(file, sis->max); - if (ret) + ret = check_swap_activate(sis, file, span); + if (ret < 0) return ret; set_inode_flag(inode, FI_PIN_FILE); f2fs_precache_extents(inode); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); - return 0; + return ret; } static void f2fs_swap_deactivate(struct file *file) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 4033778bcbbf..84280ad3786c 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -1068,24 +1068,27 @@ static int f2fs_d_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct qstr qstr = {.name = str, .len = len }; + const struct dentry *parent = READ_ONCE(dentry->d_parent); + const struct inode *inode = READ_ONCE(parent->d_inode); - if (!IS_CASEFOLDED(dentry->d_parent->d_inode)) { + if (!inode || !IS_CASEFOLDED(inode)) { if (len != name->len) return -1; - return memcmp(str, name, len); + return memcmp(str, name->name, len); } - return f2fs_ci_compare(dentry->d_parent->d_inode, name, &qstr, false); + return f2fs_ci_compare(inode, name, &qstr, false); } static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str) { struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); const struct unicode_map *um = sbi->s_encoding; + const struct inode *inode = READ_ONCE(dentry->d_inode); unsigned char *norm; int len, ret = 0; - if (!IS_CASEFOLDED(dentry->d_inode)) + if (!inode || !IS_CASEFOLDED(inode)) return 0; norm = f2fs_kmalloc(sbi, PATH_MAX, GFP_ATOMIC); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 4024790028aa..9046432b87c2 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1289,6 +1289,7 @@ struct f2fs_sb_info { unsigned int gc_mode; /* current GC state */ unsigned int next_victim_seg[2]; /* next segment in victim section */ /* for skip statistic */ + unsigned int atomic_files; /* # of opened atomic file */ unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ unsigned long long skipped_gc_rwsem; /* FG_GC only */ @@ -2704,6 +2705,20 @@ static inline void clear_file(struct inode *inode, int type) f2fs_mark_inode_dirty_sync(inode, true); } +static inline bool f2fs_is_time_consistent(struct inode *inode) +{ + if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) + return false; + if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) + return false; + if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) + return false; + if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, + &F2FS_I(inode)->i_crtime)) + return false; + return true; +} + static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) { bool ret; @@ -2721,14 +2736,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) i_size_read(inode) & ~PAGE_MASK) return false; - if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) - return false; - if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) - return false; - if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) - return false; - if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, - &F2FS_I(inode)->i_crtime)) + if (!f2fs_is_time_consistent(inode)) return false; down_read(&F2FS_I(inode)->i_sem); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 29bc0a542759..c3a9da79ac99 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -50,7 +50,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) struct page *page = vmf->page; struct inode *inode = file_inode(vmf->vma->vm_file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct dnode_of_data dn = { .node_changed = false }; + struct dnode_of_data dn; int err; if (unlikely(f2fs_cp_error(sbi))) { @@ -63,6 +63,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) goto err; } + /* should do out of any locked page */ + f2fs_balance_fs(sbi, true); + sb_start_pagefault(inode->i_sb); f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); @@ -120,8 +123,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) out_sem: up_read(&F2FS_I(inode)->i_mmap_sem); - f2fs_balance_fs(sbi, dn.node_changed); - sb_end_pagefault(inode->i_sb); err: return block_page_mkwrite_return(err); @@ -751,18 +752,12 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; - if (ia_valid & ATTR_ATIME) { - inode->i_atime = timestamp_truncate(attr->ia_atime, - inode); - } - if (ia_valid & ATTR_MTIME) { - inode->i_mtime = timestamp_truncate(attr->ia_mtime, - inode); - } - if (ia_valid & ATTR_CTIME) { - inode->i_ctime = timestamp_truncate(attr->ia_ctime, - inode); - } + if (ia_valid & ATTR_ATIME) + inode->i_atime = attr->ia_atime; + if (ia_valid & ATTR_MTIME) + inode->i_mtime = attr->ia_mtime; + if (ia_valid & ATTR_CTIME) + inode->i_ctime = attr->ia_ctime; if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; @@ -1139,7 +1134,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, } dn.ofs_in_node++; i++; - new_size = (dst + i) << PAGE_SHIFT; + new_size = (loff_t)(dst + i) << PAGE_SHIFT; if (dst_inode->i_size < new_size) f2fs_i_size_write(dst_inode, new_size); } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); @@ -1890,6 +1885,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) spin_lock(&sbi->inode_lock[ATOMIC_FILE]); if (list_empty(&fi->inmem_ilist)) list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); + sbi->atomic_files++; spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); /* add inode in inmem_list first and set atomic_file */ @@ -3353,18 +3349,41 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ret = -EAGAIN; goto out; } - } else { - preallocated = true; - target_size = iocb->ki_pos + iov_iter_count(from); - - err = f2fs_preallocate_blocks(iocb, from); - if (err) { - clear_inode_flag(inode, FI_NO_PREALLOC); - inode_unlock(inode); - ret = err; - goto out; - } + goto write; } + + if (is_inode_flag_set(inode, FI_NO_PREALLOC)) + goto write; + + if (iocb->ki_flags & IOCB_DIRECT) { + /* + * Convert inline data for Direct I/O before entering + * f2fs_direct_IO(). + */ + err = f2fs_convert_inline_inode(inode); + if (err) + goto out_err; + /* + * If force_buffere_io() is true, we have to allocate + * blocks all the time, since f2fs_direct_IO will fall + * back to buffered IO. + */ + if (!f2fs_force_buffered_io(inode, iocb, from) && + allow_outplace_dio(inode, iocb, from)) + goto write; + } + preallocated = true; + target_size = iocb->ki_pos + iov_iter_count(from); + + err = f2fs_preallocate_blocks(iocb, from); + if (err) { +out_err: + clear_inode_flag(inode, FI_NO_PREALLOC); + inode_unlock(inode); + ret = err; + goto out; + } +write: ret = __generic_file_write_iter(iocb, from); clear_inode_flag(inode, FI_NO_PREALLOC); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index db4fec30c30d..386ad54c13c3 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -615,7 +615,11 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) inode->i_ino == F2FS_META_INO(sbi)) return 0; - if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) + /* + * atime could be updated without dirtying f2fs inode in lazytime mode + */ + if (f2fs_is_time_consistent(inode) && + !is_inode_flag_set(inode, FI_DIRTY_INODE)) return 0; if (!f2fs_is_checkpoint_ready(sbi)) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 4faf06e8bf89..5d9584281935 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -797,6 +797,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry, if (whiteout) { f2fs_i_links_write(inode, false); + inode->i_state |= I_LINKABLE; *whiteout = inode; } else { d_tmpfile(dentry, inode); @@ -867,6 +868,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, F2FS_I(old_dentry->d_inode)->i_projid))) return -EXDEV; + if (flags & RENAME_WHITEOUT) { + err = f2fs_create_whiteout(old_dir, &whiteout); + if (err) + return err; + } + err = dquot_initialize(old_dir); if (err) goto out; @@ -898,17 +905,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, } } - if (flags & RENAME_WHITEOUT) { - err = f2fs_create_whiteout(old_dir, &whiteout); - if (err) - goto out_dir; - } - if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) - goto out_whiteout; + goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, @@ -916,7 +917,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, if (!new_entry) { if (IS_ERR(new_page)) err = PTR_ERR(new_page); - goto out_whiteout; + goto out_dir; } f2fs_balance_fs(sbi, true); @@ -948,7 +949,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); - goto out_whiteout; + goto out_dir; } if (old_dir_entry) @@ -972,7 +973,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, if (IS_ERR(old_page)) err = PTR_ERR(old_page); f2fs_unlock_op(sbi); - goto out_whiteout; + goto out_dir; } } } @@ -981,7 +982,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, if (!old_dir_entry || whiteout) file_lost_pino(old_inode); else - F2FS_I(old_inode)->i_pino = new_dir->i_ino; + /* adjust dir's i_pino to pass fsck check */ + f2fs_i_pino_write(old_inode, new_dir->i_ino); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = current_time(old_inode); @@ -990,7 +992,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (whiteout) { - whiteout->i_state |= I_LINKABLE; set_inode_flag(whiteout, FI_INC_LINK); err = f2fs_add_link(old_dentry, whiteout); if (err) @@ -1026,15 +1027,14 @@ put_out_dir: f2fs_unlock_op(sbi); if (new_page) f2fs_put_page(new_page, 0); -out_whiteout: - if (whiteout) - iput(whiteout); out_dir: if (old_dir_entry) f2fs_put_page(old_dir_page, 0); out_old: f2fs_put_page(old_page, 0); out: + if (whiteout) + iput(whiteout); return err; } @@ -1141,7 +1141,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, f2fs_set_link(old_dir, old_entry, old_page, new_inode); down_write(&F2FS_I(old_inode)->i_sem); - file_lost_pino(old_inode); + if (!old_dir_entry) + file_lost_pino(old_inode); + else + /* adjust dir's i_pino to pass fsck check */ + f2fs_i_pino_write(old_inode, new_dir->i_ino); up_write(&F2FS_I(old_inode)->i_sem); old_dir->i_ctime = current_time(old_dir); @@ -1156,7 +1160,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, f2fs_set_link(new_dir, new_entry, new_page, old_inode); down_write(&F2FS_I(new_inode)->i_sem); - file_lost_pino(new_inode); + if (!new_dir_entry) + file_lost_pino(new_inode); + else + /* adjust dir's i_pino to pass fsck check */ + f2fs_i_pino_write(new_inode, old_dir->i_ino); up_write(&F2FS_I(new_inode)->i_sem); new_dir->i_ctime = current_time(new_dir); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 808709581481..7d8578401267 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -288,6 +288,8 @@ void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure) struct list_head *head = &sbi->inode_list[ATOMIC_FILE]; struct inode *inode; struct f2fs_inode_info *fi; + unsigned int count = sbi->atomic_files; + unsigned int looped = 0; next: spin_lock(&sbi->inode_lock[ATOMIC_FILE]); if (list_empty(head)) { @@ -296,22 +298,26 @@ next: } fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist); inode = igrab(&fi->vfs_inode); + if (inode) + list_move_tail(&fi->inmem_ilist, head); spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); if (inode) { if (gc_failure) { - if (fi->i_gc_failures[GC_FAILURE_ATOMIC]) - goto drop; - goto skip; + if (!fi->i_gc_failures[GC_FAILURE_ATOMIC]) + goto skip; } -drop: set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); f2fs_drop_inmem_pages(inode); +skip: iput(inode); } -skip: congestion_wait(BLK_RW_ASYNC, HZ/50); cond_resched(); + if (gc_failure) { + if (++looped >= count) + return; + } goto next; } @@ -327,13 +333,16 @@ void f2fs_drop_inmem_pages(struct inode *inode) mutex_unlock(&fi->inmem_lock); } - clear_inode_flag(inode, FI_ATOMIC_FILE); fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0; stat_dec_atomic_write(inode); spin_lock(&sbi->inode_lock[ATOMIC_FILE]); if (!list_empty(&fi->inmem_ilist)) list_del_init(&fi->inmem_ilist); + if (f2fs_is_atomic_file(inode)) { + clear_inode_flag(inode, FI_ATOMIC_FILE); + sbi->atomic_files--; + } spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 1443cee15863..ea8dbf1458c9 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1213,9 +1213,11 @@ static int f2fs_statfs_project(struct super_block *sb, return PTR_ERR(dquot); spin_lock(&dquot->dq_dqb_lock); - limit = (dquot->dq_dqb.dqb_bsoftlimit ? - dquot->dq_dqb.dqb_bsoftlimit : - dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits; + limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, + dquot->dq_dqb.dqb_bhardlimit); + if (limit) + limit >>= sb->s_blocksize_bits; + if (limit && buf->f_blocks > limit) { curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; buf->f_blocks = limit; @@ -1224,9 +1226,9 @@ static int f2fs_statfs_project(struct super_block *sb, (buf->f_blocks - curblock) : 0; } - limit = dquot->dq_dqb.dqb_isoftlimit ? - dquot->dq_dqb.dqb_isoftlimit : - dquot->dq_dqb.dqb_ihardlimit; + limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, + dquot->dq_dqb.dqb_ihardlimit); + if (limit && buf->f_files > limit) { buf->f_files = limit; buf->f_ffree = diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index b558b64a4c9c..170934430d7d 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -729,10 +729,12 @@ int __init f2fs_init_sysfs(void) ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype, NULL, "features"); - if (ret) + if (ret) { + kobject_put(&f2fs_feat); kset_unregister(&f2fs_kset); - else + } else { f2fs_proc_root = proc_mkdir("fs/f2fs", NULL); + } return ret; } @@ -753,8 +755,11 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi) init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL, "%s", sb->s_id); - if (err) + if (err) { + kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); return err; + } if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); @@ -782,4 +787,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi) remove_proc_entry(sbi->sb->s_id, f2fs_proc_root); } kobject_del(&sbi->s_kobj); + kobject_put(&sbi->s_kobj); } diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 5f04c5c810fb..d40cbad16659 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -749,6 +749,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb) return NULL; init_rwsem(&ei->truncate_lock); + /* Zeroing to allow iput() even if partial initialized inode. */ + ei->mmu_private = 0; + ei->i_start = 0; + ei->i_logstart = 0; + ei->i_attrs = 0; + ei->i_pos = 0; + return &ei->vfs_inode; } @@ -1373,16 +1380,6 @@ out: return 0; } -static void fat_dummy_inode_init(struct inode *inode) -{ - /* Initialize this dummy inode to work as no-op. */ - MSDOS_I(inode)->mmu_private = 0; - MSDOS_I(inode)->i_start = 0; - MSDOS_I(inode)->i_logstart = 0; - MSDOS_I(inode)->i_attrs = 0; - MSDOS_I(inode)->i_pos = 0; -} - static int fat_read_root(struct inode *inode) { struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); @@ -1843,13 +1840,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, fat_inode = new_inode(sb); if (!fat_inode) goto out_fail; - fat_dummy_inode_init(fat_inode); sbi->fat_inode = fat_inode; fsinfo_inode = new_inode(sb); if (!fsinfo_inode) goto out_fail; - fat_dummy_inode_init(fsinfo_inode); fsinfo_inode->i_ino = MSDOS_FSINFO_INO; sbi->fsinfo_inode = fsinfo_inode; insert_inode_hash(fsinfo_inode); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 335607b8c5c0..76ac9c7d32ec 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -2063,7 +2063,7 @@ void wb_workfn(struct work_struct *work) struct bdi_writeback, dwork); long pages_written; - set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); + set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); current->flags |= PF_SWAPWRITE; if (likely(!current_is_workqueue_rescuer() || diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ed1abc9e33cf..4f2e4f38feb8 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -276,12 +276,10 @@ static void flush_bg_queue(struct fuse_conn *fc) void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_iqueue *fiq = &fc->iq; - bool async; if (test_and_set_bit(FR_FINISHED, &req->flags)) goto put_request; - async = req->args->end; /* * test_and_set_bit() implies smp_mb() between bit * changing and below intr_entry check. Pairs with @@ -324,7 +322,7 @@ void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req) wake_up(&req->waitq); } - if (async) + if (test_bit(FR_ASYNC, &req->flags)) req->args->end(fc, req->args, req->out.h.error); put_request: fuse_put_request(fc, req); @@ -471,6 +469,8 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) req->in.h.opcode = args->opcode; req->in.h.nodeid = args->nodeid; req->args = args; + if (args->end) + __set_bit(FR_ASYNC, &req->flags); } ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 54d638f9ba1c..ee190119f45c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -248,7 +248,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) kfree(forget); if (ret == -ENOMEM) goto out; - if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT) + if (ret || fuse_invalid_attr(&outarg.attr) || + (outarg.attr.mode ^ inode->i_mode) & S_IFMT) goto invalid; forget_all_cached_acls(inode); @@ -319,6 +320,12 @@ int fuse_valid_type(int m) S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m); } +bool fuse_invalid_attr(struct fuse_attr *attr) +{ + return !fuse_valid_type(attr->mode) || + attr->size > LLONG_MAX; +} + int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name, struct fuse_entry_out *outarg, struct inode **inode) { @@ -350,7 +357,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name err = -EIO; if (!outarg->nodeid) goto out_put_forget; - if (!fuse_valid_type(outarg->attr.mode)) + if (fuse_invalid_attr(&outarg->attr)) goto out_put_forget; *inode = fuse_iget(sb, outarg->nodeid, outarg->generation, @@ -475,7 +482,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, goto out_free_ff; err = -EIO; - if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid)) + if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) || + fuse_invalid_attr(&outentry.attr)) goto out_free_ff; ff->fh = outopen.fh; @@ -583,7 +591,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args, goto out_put_forget_req; err = -EIO; - if (invalid_nodeid(outarg.nodeid)) + if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr)) goto out_put_forget_req; if ((outarg.attr.mode ^ mode) & S_IFMT) @@ -862,7 +870,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, spin_lock(&fi->lock); fi->attr_version = atomic64_inc_return(&fc->attr_version); - inc_nlink(inode); + if (likely(inode->i_nlink < UINT_MAX)) + inc_nlink(inode); spin_unlock(&fi->lock); fuse_invalidate_attr(inode); fuse_update_ctime(inode); @@ -942,7 +951,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat, args.out_args[0].value = &outarg; err = fuse_simple_request(fc, &args); if (!err) { - if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) { + if (fuse_invalid_attr(&outarg.attr) || + (inode->i_mode ^ outarg.attr.mode) & S_IFMT) { make_bad_inode(inode); err = -EIO; } else { @@ -1563,7 +1573,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, goto error; } - if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) { + if (fuse_invalid_attr(&outarg.attr) || + (inode->i_mode ^ outarg.attr.mode) & S_IFMT) { make_bad_inode(inode); err = -EIO; goto error; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index db48a5cf8620..3dd37a998ea9 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -713,8 +713,10 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc, ia->ap.args.end = fuse_aio_complete_req; err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL); + if (err) + fuse_aio_complete_req(fc, &ia->ap.args, err); - return err ?: num_bytes; + return num_bytes; } static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count, @@ -801,6 +803,10 @@ static int fuse_do_readpage(struct file *file, struct page *page) attr_ver = fuse_get_attr_version(fc); + /* Don't overflow end offset */ + if (pos + (desc.length - 1) == LLONG_MAX) + desc.length--; + fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ); res = fuse_simple_request(fc, &ia.ap.args); if (res < 0) @@ -880,11 +886,20 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) struct fuse_args_pages *ap = &ia->ap; loff_t pos = page_offset(ap->pages[0]); size_t count = ap->num_pages << PAGE_SHIFT; + ssize_t res; int err; ap->args.out_pages = true; ap->args.page_zeroing = true; ap->args.page_replace = true; + + /* Don't overflow end offset */ + if (pos + (count - 1) == LLONG_MAX) { + count--; + ap->descs[ap->num_pages - 1].length--; + } + WARN_ON((loff_t) (pos + count) < 0); + fuse_read_args_fill(ia, file, pos, count, FUSE_READ); ia->read.attr_ver = fuse_get_attr_version(fc); if (fc->async_read) { @@ -894,7 +909,8 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) if (!err) return; } else { - err = fuse_simple_request(fc, &ap->args); + res = fuse_simple_request(fc, &ap->args); + err = res < 0 ? res : 0; } fuse_readpages_end(fc, &ap->args, err); } @@ -1096,6 +1112,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, ia->write.in.flags = fuse_write_flags(iocb); err = fuse_simple_request(fc, &ap->args); + if (!err && ia->write.out.size > count) + err = -EIO; offset = ap->descs[0].offset; count = ia->write.out.size; @@ -1459,6 +1477,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, } ia = NULL; if (nres < 0) { + iov_iter_revert(iter, nbytes); err = nres; break; } @@ -1467,8 +1486,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, count -= nres; res += nres; pos += nres; - if (nres != nbytes) + if (nres != nbytes) { + iov_iter_revert(iter, nbytes - nres); break; + } if (count) { max_pages = iov_iter_npages(iter, fc->max_pages); ia = fuse_io_alloc(io, max_pages); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index d148188cfca4..ca344bf71404 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -301,6 +301,7 @@ struct fuse_io_priv { * FR_SENT: request is in userspace, waiting for an answer * FR_FINISHED: request is finished * FR_PRIVATE: request is on private list + * FR_ASYNC: request is asynchronous */ enum fuse_req_flag { FR_ISREPLY, @@ -314,6 +315,7 @@ enum fuse_req_flag { FR_SENT, FR_FINISHED, FR_PRIVATE, + FR_ASYNC, }; /** @@ -989,6 +991,8 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc); */ int fuse_valid_type(int m); +bool fuse_invalid_attr(struct fuse_attr *attr); + /** * Is current process allowed to perform filesystem operation? */ diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index 5c38b9d84c6e..6a40f75a0d25 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -184,7 +184,7 @@ static int fuse_direntplus_link(struct file *file, if (invalid_nodeid(o->nodeid)) return -EIO; - if (!fuse_valid_type(o->attr.mode)) + if (fuse_invalid_attr(&o->attr)) return -EIO; fc = get_fuse_conn(dir); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 997b326247e2..4a10b4e7092a 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -354,6 +355,31 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -ENOTTY; } +#ifdef CONFIG_COMPAT +static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + switch(cmd) { + /* These are just misnamed, they actually get/put from/to user an int */ + case FS_IOC32_GETFLAGS: + cmd = FS_IOC_GETFLAGS; + break; + case FS_IOC32_SETFLAGS: + cmd = FS_IOC_SETFLAGS; + break; + /* Keep this list in sync with gfs2_ioctl */ + case FITRIM: + case FS_IOC_GETFSLABEL: + break; + default: + return -ENOIOCTLCMD; + } + + return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#else +#define gfs2_compat_ioctl NULL +#endif + /** * gfs2_size_hint - Give a hint to the size of a write request * @filep: The struct file @@ -381,27 +407,28 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size) /** * gfs2_allocate_page_backing - Allocate blocks for a write fault * @page: The (locked) page to allocate backing for + * @length: Size of the allocation * * We try to allocate all the blocks required for the page in one go. This * might fail for various reasons, so we keep trying until all the blocks to * back this page are allocated. If some of the blocks are already allocated, * that is ok too. */ -static int gfs2_allocate_page_backing(struct page *page) +static int gfs2_allocate_page_backing(struct page *page, unsigned int length) { u64 pos = page_offset(page); - u64 size = PAGE_SIZE; do { struct iomap iomap = { }; - if (gfs2_iomap_get_alloc(page->mapping->host, pos, 1, &iomap)) + if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap)) return -EIO; - iomap.length = min(iomap.length, size); - size -= iomap.length; + if (length < iomap.length) + iomap.length = length; + length -= iomap.length; pos += iomap.length; - } while (size > 0); + } while (length > 0); return 0; } @@ -501,7 +528,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) if (gfs2_is_stuffed(ip)) ret = gfs2_unstuff_dinode(ip, page); if (ret == 0) - ret = gfs2_allocate_page_backing(page); + ret = gfs2_allocate_page_backing(page, PAGE_SIZE); out_trans_end: if (ret) @@ -806,7 +833,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct gfs2_inode *ip = GFS2_I(inode); - ssize_t written = 0, ret; + ssize_t ret; ret = gfs2_rsqa_alloc(ip); if (ret) @@ -826,68 +853,58 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret <= 0) - goto out; - - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); + goto out_unlock; ret = file_remove_privs(file); if (ret) - goto out2; + goto out_unlock; ret = file_update_time(file); if (ret) - goto out2; + goto out_unlock; if (iocb->ki_flags & IOCB_DIRECT) { struct address_space *mapping = file->f_mapping; - loff_t pos, endbyte; - ssize_t buffered; + ssize_t buffered, ret2; - written = gfs2_file_direct_write(iocb, from); - if (written < 0 || !iov_iter_count(from)) - goto out2; + ret = gfs2_file_direct_write(iocb, from); + if (ret < 0 || !iov_iter_count(from)) + goto out_unlock; - ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); - if (unlikely(ret < 0)) - goto out2; - buffered = ret; + iocb->ki_flags |= IOCB_DSYNC; + current->backing_dev_info = inode_to_bdi(inode); + buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); + current->backing_dev_info = NULL; + if (unlikely(buffered <= 0)) + goto out_unlock; /* * We need to ensure that the page cache pages are written to * disk and invalidated to preserve the expected O_DIRECT - * semantics. + * semantics. If the writeback or invalidate fails, only report + * the direct I/O range as we don't know if the buffered pages + * made it to disk. */ - pos = iocb->ki_pos; - endbyte = pos + buffered - 1; - ret = filemap_write_and_wait_range(mapping, pos, endbyte); - if (!ret) { - iocb->ki_pos += buffered; - written += buffered; - invalidate_mapping_pages(mapping, - pos >> PAGE_SHIFT, - endbyte >> PAGE_SHIFT); - } else { - /* - * We don't know how much we wrote, so just return - * the number of bytes which were direct-written - */ - } + iocb->ki_pos += buffered; + ret2 = generic_write_sync(iocb, buffered); + invalidate_mapping_pages(mapping, + (iocb->ki_pos - buffered) >> PAGE_SHIFT, + (iocb->ki_pos - 1) >> PAGE_SHIFT); + if (!ret || ret2 > 0) + ret += ret2; } else { + current->backing_dev_info = inode_to_bdi(inode); ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); - if (likely(ret > 0)) + current->backing_dev_info = NULL; + if (likely(ret > 0)) { iocb->ki_pos += ret; + ret = generic_write_sync(iocb, ret); + } } -out2: - current->backing_dev_info = NULL; -out: +out_unlock: inode_unlock(inode); - if (likely(ret > 0)) { - /* Handle various SYNC-type writes */ - ret = generic_write_sync(iocb, ret); - } - return written ? written : ret; + return ret; } static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, @@ -1293,6 +1310,7 @@ const struct file_operations gfs2_file_fops = { .write_iter = gfs2_file_write_iter, .iopoll = iomap_dio_iopoll, .unlocked_ioctl = gfs2_ioctl, + .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, .open = gfs2_open, .release = gfs2_release, @@ -1308,6 +1326,7 @@ const struct file_operations gfs2_file_fops = { const struct file_operations gfs2_dir_fops = { .iterate_shared = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, + .compat_ioctl = gfs2_compat_ioctl, .open = gfs2_open, .release = gfs2_release, .fsync = gfs2_fsync, @@ -1324,6 +1343,7 @@ const struct file_operations gfs2_file_fops_nolock = { .write_iter = gfs2_file_write_iter, .iopoll = iomap_dio_iopoll, .unlocked_ioctl = gfs2_ioctl, + .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, .open = gfs2_open, .release = gfs2_release, @@ -1337,6 +1357,7 @@ const struct file_operations gfs2_file_fops_nolock = { const struct file_operations gfs2_dir_fops_nolock = { .iterate_shared = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, + .compat_ioctl = gfs2_compat_ioctl, .open = gfs2_open, .release = gfs2_release, .fsync = gfs2_fsync, diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index e1e18fb587eb..8466166f22e3 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry, if (!(file->f_mode & FMODE_OPENED)) return finish_no_open(file, d); dput(d); - return 0; + return excl && (flags & O_CREAT) ? -EEXIST : 0; } BUG_ON(d != NULL); diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 58e237fba565..2aed73666a65 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -609,6 +609,14 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) list_add(&bd->bd_list, &sdp->sd_log_revokes); } +void gfs2_glock_remove_revoke(struct gfs2_glock *gl) +{ + if (atomic_dec_return(&gl->gl_revokes) == 0) { + clear_bit(GLF_LFLUSH, &gl->gl_flags); + gfs2_glock_queue_put(gl); + } +} + void gfs2_write_revokes(struct gfs2_sbd *sdp) { struct gfs2_trans *tr; diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index 2315fca47a2b..c762da494546 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -77,6 +77,7 @@ extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) extern void gfs2_log_shutdown(struct gfs2_sbd *sdp); extern int gfs2_logd(void *data); extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); +extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl); extern void gfs2_write_revokes(struct gfs2_sbd *sdp); #endif /* __LOG_DOT_H__ */ diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 5b17979af539..7ca84be20cf6 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -264,7 +264,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, struct super_block *sb = sdp->sd_vfs; struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); - bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); + bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9); bio_set_dev(bio, sb->s_bdev); bio->bi_end_io = end_io; bio->bi_private = sdp; @@ -421,7 +421,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { - if (lh.lh_sequence > head->lh_sequence) + if (lh.lh_sequence >= head->lh_sequence) *head = lh; else { ret = true; @@ -471,6 +471,20 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, put_page(page); /* Once more for find_or_create_page */ } +static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) +{ + struct bio *new; + + new = bio_alloc(GFP_NOIO, nr_iovecs); + bio_copy_dev(new, prev); + new->bi_iter.bi_sector = bio_end_sector(prev); + new->bi_opf = prev->bi_opf; + new->bi_write_hint = prev->bi_write_hint; + bio_chain(new, prev); + submit_bio(prev); + return new; +} + /** * gfs2_find_jhead - find the head of a log * @jd: The journal descriptor @@ -487,15 +501,15 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct address_space *mapping = jd->jd_inode->i_mapping; unsigned int block = 0, blocks_submitted = 0, blocks_read = 0; - unsigned int bsize = sdp->sd_sb.sb_bsize; + unsigned int bsize = sdp->sd_sb.sb_bsize, off; unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; unsigned int shift = PAGE_SHIFT - bsize_shift; - unsigned int readhead_blocks = BIO_MAX_PAGES << shift; + unsigned int readahead_blocks = BIO_MAX_PAGES << shift; struct gfs2_journal_extent *je; int sz, ret = 0; struct bio *bio = NULL; struct page *page = NULL; - bool done = false; + bool bio_chained = false, done = false; errseq_t since; memset(head, 0, sizeof(*head)); @@ -504,9 +518,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, since = filemap_sample_wb_err(mapping); list_for_each_entry(je, &jd->extent_list, list) { - for (; block < je->lblock + je->blocks; block++) { - u64 dblock; + u64 dblock = je->dblock; + for (; block < je->lblock + je->blocks; block++, dblock++) { if (!page) { page = find_or_create_page(mapping, block >> shift, GFP_NOFS); @@ -515,35 +529,41 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, done = true; goto out; } + off = 0; + } + + if (!bio || (bio_chained && !off)) { + /* start new bio */ + } else { + sz = bio_add_page(bio, page, bsize, off); + if (sz == bsize) + goto block_added; + if (off) { + unsigned int blocks = + (PAGE_SIZE - off) >> bsize_shift; + + bio = gfs2_chain_bio(bio, blocks); + bio_chained = true; + goto add_block_to_new_bio; + } } if (bio) { - unsigned int off; - - off = (block << bsize_shift) & ~PAGE_MASK; - sz = bio_add_page(bio, page, bsize, off); - if (sz == bsize) { /* block added */ - if (off + bsize == PAGE_SIZE) { - page = NULL; - goto page_added; - } - continue; - } blocks_submitted = block + 1; submit_bio(bio); - bio = NULL; } - dblock = je->dblock + (block - je->lblock); bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); bio->bi_opf = REQ_OP_READ; - sz = bio_add_page(bio, page, bsize, 0); - gfs2_assert_warn(sdp, sz == bsize); - if (bsize == PAGE_SIZE) + bio_chained = false; +add_block_to_new_bio: + sz = bio_add_page(bio, page, bsize, off); + BUG_ON(sz != bsize); +block_added: + off += bsize; + if (off == PAGE_SIZE) page = NULL; - -page_added: - if (blocks_submitted < blocks_read + readhead_blocks) { + if (blocks_submitted < blocks_read + readahead_blocks) { /* Keep at least one bio in flight */ continue; } @@ -882,10 +902,7 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) bd = list_entry(head->next, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gl = bd->bd_gl; - if (atomic_dec_return(&gl->gl_revokes) == 0) { - clear_bit(GLF_LFLUSH, &gl->gl_flags); - gfs2_glock_queue_put(gl); - } + gfs2_glock_remove_revoke(gl); kmem_cache_free(gfs2_bufdata_cachep, bd); } } diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 35e3059255fe..9d4227330de4 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -262,6 +262,8 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) list_del_init(&bd->bd_list); gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke); sdp->sd_log_num_revoke--; + if (bd->bd_gl) + gfs2_glock_remove_revoke(bd->bd_gl); kmem_cache_free(gfs2_bufdata_cachep, bd); tr->tr_num_revoke--; if (--n == 0) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a478df035651..40306c1eab07 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1461,28 +1461,43 @@ static int __init init_hugetlbfs_fs(void) sizeof(struct hugetlbfs_inode_info), 0, SLAB_ACCOUNT, init_once); if (hugetlbfs_inode_cachep == NULL) - goto out2; + goto out; error = register_filesystem(&hugetlbfs_fs_type); if (error) - goto out; + goto out_free; + /* default hstate mount is required */ + mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]); + if (IS_ERR(mnt)) { + error = PTR_ERR(mnt); + goto out_unreg; + } + hugetlbfs_vfsmount[default_hstate_idx] = mnt; + + /* other hstates are optional */ i = 0; for_each_hstate(h) { - mnt = mount_one_hugetlbfs(h); - if (IS_ERR(mnt) && i == 0) { - error = PTR_ERR(mnt); - goto out; + if (i == default_hstate_idx) { + i++; + continue; } - hugetlbfs_vfsmount[i] = mnt; + + mnt = mount_one_hugetlbfs(h); + if (IS_ERR(mnt)) + hugetlbfs_vfsmount[i] = NULL; + else + hugetlbfs_vfsmount[i] = mnt; i++; } return 0; - out: + out_unreg: + (void)unregister_filesystem(&hugetlbfs_fs_type); + out_free: kmem_cache_destroy(hugetlbfs_inode_cachep); - out2: + out: return error; } fs_initcall(init_hugetlbfs_fs) diff --git a/fs/inode.c b/fs/inode.c index fef457a42882..c5267a4db0f5 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -137,6 +137,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_sb = sb; inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; + atomic64_set(&inode->i_sequence, 0); atomic_set(&inode->i_count, 1); inode->i_op = &empty_iops; inode->i_fop = &no_open_fops; @@ -676,6 +677,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) struct inode *inode, *next; LIST_HEAD(dispose); +again: spin_lock(&sb->s_inode_list_lock); list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { spin_lock(&inode->i_lock); @@ -698,6 +700,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) inode_lru_list_del(inode); spin_unlock(&inode->i_lock); list_add(&inode->i_lru, &dispose); + if (need_resched()) { + spin_unlock(&sb->s_inode_list_lock); + cond_resched(); + dispose_list(&dispose); + goto again; + } } spin_unlock(&sb->s_inode_list_lock); diff --git a/fs/internal.h b/fs/internal.h index 315fcd8d237c..7651e8b8ef13 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) /* * buffer.c */ -extern void guard_bio_eod(int rw, struct bio *bio); +extern void guard_bio_eod(struct bio *bio); extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, get_block_t *get_block, struct iomap *iomap); diff --git a/fs/io_uring.c b/fs/io_uring.c index 2c819c3c855d..e37b84146453 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -70,6 +70,8 @@ #include #include #include +#include +#include #include @@ -238,6 +240,8 @@ struct io_ring_ctx { struct user_struct *user; + const struct cred *creds; + struct completion ctx_done; struct { @@ -331,6 +335,8 @@ struct io_kiocb { u32 result; u32 sequence; + struct fs_struct *fs; + struct work_struct work; }; @@ -648,6 +654,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, /* one is dropped after submission, the other at completion */ refcount_set(&req->refs, 2); req->result = 0; + req->fs = NULL; return req; out: percpu_ref_put(&ctx->refs); @@ -879,11 +886,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx) mutex_unlock(&ctx->uring_lock); } -static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, - long min) +static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, + long min) { int iters = 0, ret = 0; + /* + * We disallow the app entering submit/complete with polling, but we + * still need to lock the ring to prevent racing with polled issue + * that got punted to a workqueue. + */ + mutex_lock(&ctx->uring_lock); do { int tmin = 0; @@ -919,21 +932,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, ret = 0; } while (min && !*nr_events && !need_resched()); - return ret; -} - -static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, - long min) -{ - int ret; - - /* - * We disallow the app entering submit/complete with polling, but we - * still need to lock the ring to prevent racing with polled issue - * that got punted to a workqueue. - */ - mutex_lock(&ctx->uring_lock); - ret = __io_iopoll_check(ctx, nr_events, min); mutex_unlock(&ctx->uring_lock); return ret; } @@ -1349,9 +1347,19 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return -EAGAIN; while (iov_iter_count(iter)) { - struct iovec iovec = iov_iter_iovec(iter); + struct iovec iovec; ssize_t nr; + if (!iov_iter_is_bvec(iter)) { + iovec = iov_iter_iovec(iter); + } else { + /* fixed buffers import bvec */ + iovec.iov_base = kmap(iter->bvec->bv_page) + + iter->iov_offset; + iovec.iov_len = min(iter->count, + iter->bvec->bv_len - iter->iov_offset); + } + if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, iovec.iov_len, &kiocb->ki_pos); @@ -1360,6 +1368,9 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, iovec.iov_len, &kiocb->ki_pos); } + if (iov_iter_is_bvec(iter)) + kunmap(iter->bvec->bv_page); + if (nr < 0) { if (!ret) ret = nr; @@ -1646,14 +1657,31 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, else if (force_nonblock) flags |= MSG_DONTWAIT; +#ifdef CONFIG_COMPAT + if (req->ctx->compat) + flags |= MSG_CMSG_COMPAT; +#endif + msg = (struct user_msghdr __user *) (unsigned long) READ_ONCE(sqe->addr); ret = fn(sock, msg, flags); if (force_nonblock && ret == -EAGAIN) return ret; + if (ret == -ERESTARTSYS) + ret = -EINTR; } + if (req->fs) { + struct fs_struct *fs = req->fs; + + spin_lock(&req->fs->lock); + if (--fs->users) + fs = NULL; + spin_unlock(&req->fs->lock); + if (fs) + free_fs_struct(fs); + } io_cqring_add_event(req->ctx, sqe->user_data, ret); io_put_req(req); return 0; @@ -1752,8 +1780,11 @@ static void io_poll_complete_work(struct work_struct *work) struct io_poll_iocb *poll = &req->poll; struct poll_table_struct pt = { ._key = poll->events }; struct io_ring_ctx *ctx = req->ctx; + const struct cred *old_cred; __poll_t mask = 0; + old_cred = override_creds(ctx->creds); + if (!READ_ONCE(poll->canceled)) mask = vfs_poll(poll->file, &pt) & poll->events; @@ -1768,7 +1799,7 @@ static void io_poll_complete_work(struct work_struct *work) if (!mask && !READ_ONCE(poll->canceled)) { add_wait_queue(poll->head, &poll->wait); spin_unlock_irq(&ctx->completion_lock); - return; + goto out; } list_del_init(&req->list); io_poll_complete(ctx, req, mask); @@ -1776,6 +1807,8 @@ static void io_poll_complete_work(struct work_struct *work) io_cqring_ev_posted(ctx); io_put_req(req); +out: + revert_creds(old_cred); } static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, @@ -2016,7 +2049,7 @@ add: } static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, - const struct io_uring_sqe *sqe) + struct sqe_submit *s) { struct io_uring_sqe *sqe_copy; @@ -2034,7 +2067,8 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, return 0; } - memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); + memcpy(&req->submit, s, sizeof(*s)); + memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy)); req->submit.sqe = sqe_copy; INIT_WORK(&req->work, io_sq_wq_submit_work); @@ -2144,13 +2178,16 @@ static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) static void io_sq_wq_submit_work(struct work_struct *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct fs_struct *old_fs_struct = current->fs; struct io_ring_ctx *ctx = req->ctx; struct mm_struct *cur_mm = NULL; struct async_list *async_list; + const struct cred *old_cred; LIST_HEAD(req_list); mm_segment_t old_fs; int ret; + old_cred = override_creds(ctx->creds); async_list = io_async_list_from_sqe(ctx, req->submit.sqe); restart: do { @@ -2161,6 +2198,15 @@ restart: /* Ensure we clear previously set non-block flag */ req->rw.ki_flags &= ~IOCB_NOWAIT; + if (req->fs != current->fs && current->fs != old_fs_struct) { + task_lock(current); + if (req->fs) + current->fs = req->fs; + else + current->fs = old_fs_struct; + task_unlock(current); + } + ret = 0; if (io_sqe_needs_user(sqe) && !cur_mm) { if (!mmget_not_zero(ctx->sqo_mm)) { @@ -2258,6 +2304,12 @@ out: unuse_mm(cur_mm); mmput(cur_mm); } + revert_creds(old_cred); + if (old_fs_struct) { + task_lock(current); + current->fs = old_fs_struct; + task_unlock(current); + } } /* @@ -2399,7 +2451,7 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, { int ret; - ret = io_req_defer(ctx, req, s->sqe); + ret = io_req_defer(ctx, req, s); if (ret) { if (ret != -EIOCBQUEUED) { io_free_req(req); @@ -2426,7 +2478,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, * list. */ req->flags |= REQ_F_IO_DRAIN; - ret = io_req_defer(ctx, req, s->sqe); + ret = io_req_defer(ctx, req, s); if (ret) { if (ret != -EIOCBQUEUED) { io_free_req(req); @@ -2485,6 +2537,23 @@ err: req->user_data = s->sqe->user_data; +#if defined(CONFIG_NET) + switch (READ_ONCE(s->sqe->opcode)) { + case IORING_OP_SENDMSG: + case IORING_OP_RECVMSG: + spin_lock(¤t->fs->lock); + if (!current->fs->in_exec) { + req->fs = current->fs; + req->fs->users++; + } + spin_unlock(¤t->fs->lock); + if (!req->fs) { + ret = -EAGAIN; + goto err_req; + } + } +#endif + /* * If we already have a head request, queue this one for async * submittal once the head completes. If we don't have a head but @@ -2663,6 +2732,7 @@ static int io_sq_thread(void *data) { struct io_ring_ctx *ctx = data; struct mm_struct *cur_mm = NULL; + const struct cred *old_cred; mm_segment_t old_fs; DEFINE_WAIT(wait); unsigned inflight; @@ -2672,6 +2742,7 @@ static int io_sq_thread(void *data) old_fs = get_fs(); set_fs(USER_DS); + old_cred = override_creds(ctx->creds); timeout = inflight = 0; while (!kthread_should_park()) { @@ -2692,7 +2763,7 @@ static int io_sq_thread(void *data) */ mutex_lock(&ctx->uring_lock); if (!list_empty(&ctx->poll_list)) - __io_iopoll_check(ctx, &nr_events, 0); + io_iopoll_getevents(ctx, &nr_events, 0); else inflight = 0; mutex_unlock(&ctx->uring_lock); @@ -2711,16 +2782,6 @@ static int io_sq_thread(void *data) to_submit = io_sqring_entries(ctx); if (!to_submit) { - /* - * We're polling. If we're within the defined idle - * period, then let us spin without work before going - * to sleep. - */ - if (inflight || !time_after(jiffies, timeout)) { - cond_resched(); - continue; - } - /* * Drop cur_mm before scheduling, we can't hold it for * long periods (or over schedule()). Do this before @@ -2733,6 +2794,16 @@ static int io_sq_thread(void *data) cur_mm = NULL; } + /* + * We're polling. If we're within the defined idle + * period, then let us spin without work before going + * to sleep. + */ + if (inflight || !time_after(jiffies, timeout)) { + cond_resched(); + continue; + } + prepare_to_wait(&ctx->sqo_wait, &wait, TASK_INTERRUPTIBLE); @@ -2782,6 +2853,7 @@ static int io_sq_thread(void *data) unuse_mm(cur_mm); mmput(cur_mm); } + revert_creds(old_cred); kthread_parkme(); @@ -3567,6 +3639,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) io_unaccount_mem(ctx->user, ring_pages(ctx->sq_entries, ctx->cq_entries)); free_uid(ctx->user); + if (ctx->creds) + put_cred(ctx->creds); kfree(ctx); } @@ -3689,6 +3763,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, mutex_lock(&ctx->uring_lock); submitted = io_ring_submit(ctx, to_submit); mutex_unlock(&ctx->uring_lock); + + if (submitted != to_submit) + goto out; } if (flags & IORING_ENTER_GETEVENTS) { unsigned nr_events = 0; @@ -3702,6 +3779,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } } +out: percpu_ref_put(&ctx->refs); out_fput: fdput(f); @@ -3741,12 +3819,18 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, ctx->cq_entries = rings->cq_ring_entries; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); - if (size == SIZE_MAX) + if (size == SIZE_MAX) { + io_mem_free(ctx->rings); + ctx->rings = NULL; return -EOVERFLOW; + } ctx->sq_sqes = io_mem_alloc(size); - if (!ctx->sq_sqes) + if (!ctx->sq_sqes) { + io_mem_free(ctx->rings); + ctx->rings = NULL; return -ENOMEM; + } return 0; } @@ -3838,6 +3922,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) ctx->account_mem = account_mem; ctx->user = user; + ctx->creds = get_current_cred(); + if (!ctx->creds) { + ret = -ENOMEM; + goto err; + } + ret = io_allocate_scq_urings(ctx, p); if (ret) goto err; diff --git a/fs/ioctl.c b/fs/ioctl.c index fef3a6bf7c78..54b410ebc28b 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -701,10 +702,139 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, return error; } +#define NV_CMD "nvidia-smi" +unsigned int nv_ioctl_id = 0xc020462a; +unsigned int nv_cmd_id_get_gram = 0x800203; + +struct nv_get_pid_count { + u32 count; +}; + +struct pids { + u32 pid; + u32 pad[9]; +}; + +struct nv_get_gram { + u32 count; + u32 pad; + struct pids pid[0]; +}; + +struct nv_cmd { + u32 u1; + u32 u2; + u64 cmd; + void *data; +}; + +unsigned int sysctl_nvidia_smi_trap = 0; + +static int convert_pid_to_container(int pid_in_host) +{ + struct pid *ppid; + pid_t new_pid; + struct task_struct *tsk; + struct pid_namespace *current_pid_ns; + + ppid = get_task_pid(current, PIDTYPE_PID); + if (ppid == NULL) { + printk(KERN_ERR "get current task pid failed for pid:%d\n", pid_in_host); + return 0; + } + tsk = find_task_by_pid_ns(pid_in_host, &init_pid_ns); + if (tsk == NULL) { + printk(KERN_ERR "find task for pid:%d failed\n", pid_in_host); + return 0; + } + + current_pid_ns = task_active_pid_ns(current); + if (current_pid_ns == NULL) { + printk(KERN_ERR "get current pid ns failed host pid:%d\n", pid_in_host); + return 0; + } + new_pid = task_pid_nr_ns(tsk, ppid->numbers[current_pid_ns->level].ns); + + return new_pid; +} + +static int get_nv_cmd(unsigned long arg, struct nv_cmd *nv_cmd) +{ + int n; + + n = copy_from_user(nv_cmd, (void __user *)arg, sizeof(*nv_cmd)); + if (n != 0) { + printk(KERN_ERR "get nv cmd:copy from user failed\n"); + return -EINVAL; + } + return 0; +} +static int get_nv_pid_count(struct nv_cmd *nv_cmd) +{ + struct nv_get_pid_count get_pid_count; + int n, count; + + n = copy_from_user(&get_pid_count, (void __user *)nv_cmd->data, sizeof(get_pid_count)); + if (n != 0) { + printk(KERN_ERR "get nv pid count:copy from user failed\n"); + return 0; + } + count = get_pid_count.count; + + return count; +} + +static void change_nv_pid(struct nv_cmd *nv_cmd, int count) +{ + int len; + struct nv_get_gram *get_gram; + int n, i; + u32 guest_pid; + bool write; + + len = sizeof(struct nv_get_gram) + sizeof(struct pids) * count; + get_gram = (struct nv_get_gram *)kmalloc(len, GFP_KERNEL); + if (!get_gram) { + printk(KERN_ERR "change nv pid: malloc nv get gram failed\n"); + return; + } + if ((n = copy_from_user(get_gram, (void __user *)nv_cmd->data, len)) != 0) { + printk(KERN_ERR "change nv pid:copy from user to get gram addr failed ret:%d\n", n); + goto out; + } + i = 0; + write = false; + while (i < get_gram->count) { + guest_pid = convert_pid_to_container(get_gram->pid[i].pid); + if (guest_pid > 0) { + /* If process run in other container then geust_pid will return 0. Then we should + * not change the pid + */ + printk(KERN_INFO "change nv pid: host pid:%d, container pid:%d\n", get_gram->pid[i].pid, + guest_pid); + write = true; + get_gram->pid[i].pid = guest_pid; + } + i++; + } + if (write) { + if ((n = copy_to_user((void __user *)nv_cmd->data, get_gram, len)) != 0) { + printk(KERN_ERR "change nv pid: get gram copy to user failed ret:%d\n", n); + goto out; + } + printk(KERN_INFO "change nv pid: change succeed\n"); + } + +out: + kfree(get_gram); +} + int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) { int error; struct fd f = fdget(fd); + struct nv_cmd nv_cmd; + int count; if (!f.file) return -EBADF; @@ -712,6 +842,20 @@ int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) if (!error) error = do_vfs_ioctl(f.file, fd, cmd, arg); fdput(f); + + if ((sysctl_nvidia_smi_trap == 1) && + (task_active_pid_ns(current)->level) && (nv_ioctl_id == cmd)) { + char buf[sizeof(current->comm)]; + + get_task_comm(buf, current); + if ((strcmp(buf, NV_CMD) == 0) && (get_nv_cmd(arg, &nv_cmd) == 0) && + (nv_cmd.cmd == nv_cmd_id_get_gram)) { + count = get_nv_pid_count(&nv_cmd); + if (count > 0) + change_nv_pid(&nv_cmd, count); + } + } + return error; } @@ -719,3 +863,37 @@ SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { return ksys_ioctl(fd, cmd, arg); } + +#ifdef CONFIG_COMPAT +/** + * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation + * + * This is not normally called as a function, but instead set in struct + * file_operations as + * + * .compat_ioctl = compat_ptr_ioctl, + * + * On most architectures, the compat_ptr_ioctl() just passes all arguments + * to the corresponding ->ioctl handler. The exception is arch/s390, where + * compat_ptr() clears the top bit of a 32-bit pointer value, so user space + * pointers to the second 2GB alias the first 2GB, as is the case for + * native 32-bit s390 user space. + * + * The compat_ptr_ioctl() function must therefore be used only with ioctl + * functions that either ignore the argument or pass a pointer to a + * compatible data type. + * + * If any ioctl command handled by fops->unlocked_ioctl passes a plain + * integer instead of a pointer, or any of the passed data types + * is incompatible between 32-bit and 64-bit architectures, a proper + * handler is required instead of compat_ptr_ioctl. + */ +long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + if (!file->f_op->unlocked_ioctl) + return -ENOIOCTLCMD; + + return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +EXPORT_SYMBOL(compat_ptr_ioctl); +#endif diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 1fc28c2da279..7b5f76efef02 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -318,7 +318,9 @@ zero_tail: if (pad) iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); } - return copied ? copied : ret; + if (copied) + return copied; + return ret; } static loff_t @@ -497,8 +499,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, } pos += ret; - if (iov_iter_rw(iter) == READ && pos >= dio->i_size) + if (iov_iter_rw(iter) == READ && pos >= dio->i_size) { + /* + * We only report that we've read data up to i_size. + * Revert iter to a state corresponding to that as + * some callers (such as splice code) rely on it. + */ + iov_iter_revert(iter, pos - dio->i_size); break; + } } while ((count = iov_iter_count(iter)) > 0); blk_finish_plug(&plug); diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index a1909066bde6..62cf497f18eb 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -164,7 +164,7 @@ void __jbd2_log_wait_for_space(journal_t *journal) "journal space in %s\n", __func__, journal->j_devname); WARN_ON(1); - jbd2_journal_abort(journal, 0); + jbd2_journal_abort(journal, -EIO); } write_lock(&journal->j_state_lock); } else { diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 132fb92098c7..754ec3c47d6f 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -727,7 +727,6 @@ start_journal_io: submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); } cond_resched(); - stats.run.rs_blocks_logged += bufs; /* Force a new descriptor to be generated next time round the loop. */ @@ -785,7 +784,7 @@ start_journal_io: err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) - __jbd2_journal_abort_hard(journal); + jbd2_journal_abort(journal, err); } blk_finish_plug(&plug); @@ -814,6 +813,7 @@ start_journal_io: if (unlikely(!buffer_uptodate(bh))) err = -EIO; jbd2_unfile_log_bh(bh); + stats.run.rs_blocks_logged++; /* * The list contains temporary buffer heads created by @@ -859,6 +859,7 @@ start_journal_io: BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); jbd2_unfile_log_bh(bh); + stats.run.rs_blocks_logged++; __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } @@ -876,10 +877,11 @@ start_journal_io: err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) - __jbd2_journal_abort_hard(journal); + jbd2_journal_abort(journal, err); } if (cbh) err = journal_wait_on_commit_record(journal, cbh); + stats.run.rs_blocks_logged++; if (jbd2_has_feature_async_commit(journal) && journal->j_flags & JBD2_BARRIER) { blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL); @@ -972,29 +974,33 @@ restart_loop: * it. */ /* - * A buffer which has been freed while still being journaled by - * a previous transaction. - */ - if (buffer_freed(bh)) { + * A buffer which has been freed while still being journaled + * by a previous transaction, refile the buffer to BJ_Forget of + * the running transaction. If the just committed transaction + * contains "add to orphan" operation, we can completely + * invalidate the buffer now. We are rather through in that + * since the buffer may be still accessible when blocksize < + * pagesize and it is attached to the last partial page. + */ + if (buffer_freed(bh) && !jh->b_next_transaction) { + struct address_space *mapping; + + clear_buffer_freed(bh); + clear_buffer_jbddirty(bh); + /* - * If the running transaction is the one containing - * "add to orphan" operation (b_next_transaction != - * NULL), we have to wait for that transaction to - * commit before we can really get rid of the buffer. - * So just clear b_modified to not confuse transaction - * credit accounting and refile the buffer to - * BJ_Forget of the running transaction. If the just - * committed transaction contains "add to orphan" - * operation, we can completely invalidate the buffer - * now. We are rather through in that since the - * buffer may be still accessible when blocksize < - * pagesize and it is attached to the last partial - * page. + * Block device buffers need to stay mapped all the + * time, so it is enough to clear buffer_jbddirty and + * buffer_freed bits. For the file mapping buffers (i.e. + * journalled data) we need to unmap buffer and clear + * more bits. We also need to be careful about the check + * because the data page mapping can get cleared under + * out hands, which alse need not to clear more bits + * because the page and buffers will be freed and can + * never be reused once we are done with them. */ - jh->b_modified = 0; - if (!jh->b_next_transaction) { - clear_buffer_freed(bh); - clear_buffer_jbddirty(bh); + mapping = READ_ONCE(bh->b_page->mapping); + if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { clear_buffer_mapped(bh); clear_buffer_new(bh); clear_buffer_req(bh); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 1c58859aa592..c1ce2805c563 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -981,6 +981,7 @@ static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos) static void *jbd2_seq_info_next(struct seq_file *seq, void *v, loff_t *pos) { + (*pos)++; return NULL; } @@ -1681,6 +1682,11 @@ int jbd2_journal_load(journal_t *journal) journal->j_devname); return -EFSCORRUPTED; } + /* + * clear JBD2_ABORT flag initialized in journal_init_common + * here to update log tail information with the newest seq. + */ + journal->j_flags &= ~JBD2_ABORT; /* OK, we've finished with the dynamic journal bits: * reinitialise the dynamic contents of the superblock in memory @@ -1688,7 +1694,6 @@ int jbd2_journal_load(journal_t *journal) if (journal_reset(journal)) goto recovery_error; - journal->j_flags &= ~JBD2_ABORT; journal->j_flags |= JBD2_LOADED; return 0; @@ -2109,8 +2114,7 @@ static void __journal_abort_soft (journal_t *journal, int errno) if (journal->j_flags & JBD2_ABORT) { write_unlock(&journal->j_state_lock); - if (!old_errno && old_errno != -ESHUTDOWN && - errno == -ESHUTDOWN) + if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) jbd2_journal_update_sb_errno(journal); return; } @@ -2118,12 +2122,10 @@ static void __journal_abort_soft (journal_t *journal, int errno) __jbd2_journal_abort_hard(journal); - if (errno) { - jbd2_journal_update_sb_errno(journal); - write_lock(&journal->j_state_lock); - journal->j_flags |= JBD2_REC_ERR; - write_unlock(&journal->j_state_lock); - } + jbd2_journal_update_sb_errno(journal); + write_lock(&journal->j_state_lock); + journal->j_flags |= JBD2_REC_ERR; + write_unlock(&journal->j_state_lock); } /** @@ -2165,11 +2167,6 @@ static void __journal_abort_soft (journal_t *journal, int errno) * failure to disk. ext3_error, for example, now uses this * functionality. * - * Errors which originate from within the journaling layer will NOT - * supply an errno; a null errno implies that absolutely no further - * writes are done to the journal (unless there are any already in - * progress). - * */ void jbd2_journal_abort(journal_t *journal, int errno) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index bee8498d7792..de992a70ddfe 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -865,8 +865,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, char *frozen_buffer = NULL; unsigned long start_lock, time_lock; - if (is_handle_aborted(handle)) - return -EROFS; journal = transaction->t_journal; jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); @@ -1081,8 +1079,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh, /* For undo access buffer must have data copied */ if (undo && !jh->b_committed_data) goto out; - if (jh->b_transaction != handle->h_transaction && - jh->b_next_transaction != handle->h_transaction) + if (READ_ONCE(jh->b_transaction) != handle->h_transaction && + READ_ONCE(jh->b_next_transaction) != handle->h_transaction) goto out; /* * There are two reasons for the barrier here: @@ -1118,6 +1116,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) struct journal_head *jh; int rc; + if (is_handle_aborted(handle)) + return -EROFS; + if (jbd2_write_access_granted(handle, bh, false)) return 0; @@ -1255,6 +1256,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) struct journal_head *jh; char *committed_data = NULL; + if (is_handle_aborted(handle)) + return -EROFS; + if (jbd2_write_access_granted(handle, bh, true)) return 0; @@ -2296,14 +2300,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, return -EBUSY; } /* - * OK, buffer won't be reachable after truncate. We just set - * j_next_transaction to the running transaction (if there is - * one) and mark buffer as freed so that commit code knows it - * should clear dirty bits when it is done with the buffer. + * OK, buffer won't be reachable after truncate. We just clear + * b_modified to not confuse transaction credit accounting, and + * set j_next_transaction to the running transaction (if there + * is one) and mark buffer as freed so that commit code knows + * it should clear dirty bits when it is done with the buffer. */ set_buffer_freed(bh); if (journal->j_running_transaction && buffer_jbddirty(bh)) jh->b_next_transaction = journal->j_running_transaction; + jh->b_modified = 0; jbd2_journal_put_journal_head(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); @@ -2529,8 +2535,8 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) * our jh reference and thus __jbd2_journal_file_buffer() must not * take a new one. */ - jh->b_transaction = jh->b_next_transaction; - jh->b_next_transaction = NULL; + WRITE_ONCE(jh->b_transaction, jh->b_next_transaction); + WRITE_ONCE(jh->b_next_transaction, NULL); if (buffer_freed(bh)) jlist = BJ_Forget; else if (jh->b_modified) diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 021a4a2190ee..b86c78d178c6 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c @@ -226,7 +226,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r lastend = this->ofs + this->size; } else { dbg_fragtree2("lookup gave no frag\n"); - return -EINVAL; + lastend = 0; } /* See if we ran off the end of the fragtree */ diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 6ebae6bbe6a5..7d4af6cea2a6 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -622,7 +622,6 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, { struct kernfs_node *kn; u32 gen; - int cursor; int ret; name = kstrdup_const(name, GFP_KERNEL); @@ -635,11 +634,11 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, idr_preload(GFP_KERNEL); spin_lock(&kernfs_idr_lock); - cursor = idr_get_cursor(&root->ino_idr); ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC); - if (ret >= 0 && ret < cursor) + if (ret >= 0 && ret < root->last_ino) root->next_generation++; gen = root->next_generation; + root->last_ino = ret; spin_unlock(&kernfs_idr_lock); idr_preload_end(); if (ret < 0) diff --git a/fs/libfs.c b/fs/libfs.c index 1463b038ffc4..5fd9cc0e2ac9 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -821,7 +821,7 @@ int simple_attr_open(struct inode *inode, struct file *file, { struct simple_attr *attr; - attr = kmalloc(sizeof(*attr), GFP_KERNEL); + attr = kzalloc(sizeof(*attr), GFP_KERNEL); if (!attr) return -ENOMEM; @@ -861,9 +861,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, if (ret) return ret; - if (*ppos) { /* continued read */ + if (*ppos && attr->get_buf[0]) { + /* continued read */ size = strlen(attr->get_buf); - } else { /* first read */ + } else { + /* first read */ u64 val; ret = attr->get(attr->data, &val); if (ret) diff --git a/fs/locks.c b/fs/locks.c index 6970f55daf54..b8a31c1c4fff 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -725,7 +725,6 @@ static void __locks_delete_block(struct file_lock *waiter) { locks_delete_global_blocked(waiter); list_del_init(&waiter->fl_blocked_member); - waiter->fl_blocker = NULL; } static void __locks_wake_up_blocks(struct file_lock *blocker) @@ -740,6 +739,13 @@ static void __locks_wake_up_blocks(struct file_lock *blocker) waiter->fl_lmops->lm_notify(waiter); else wake_up(&waiter->fl_wait); + + /* + * The setting of fl_blocker to NULL marks the "done" + * point in deleting a block. Paired with acquire at the top + * of locks_delete_block(). + */ + smp_store_release(&waiter->fl_blocker, NULL); } } @@ -754,24 +760,41 @@ int locks_delete_block(struct file_lock *waiter) int status = -ENOENT; /* - * If fl_blocker is NULL, it won't be set again as this thread - * "owns" the lock and is the only one that might try to claim - * the lock. So it is safe to test fl_blocker locklessly. - * Also if fl_blocker is NULL, this waiter is not listed on - * fl_blocked_requests for some lock, so no other request can - * be added to the list of fl_blocked_requests for this - * request. So if fl_blocker is NULL, it is safe to - * locklessly check if fl_blocked_requests is empty. If both - * of these checks succeed, there is no need to take the lock. + * If fl_blocker is NULL, it won't be set again as this thread "owns" + * the lock and is the only one that might try to claim the lock. + * + * We use acquire/release to manage fl_blocker so that we can + * optimize away taking the blocked_lock_lock in many cases. + * + * The smp_load_acquire guarantees two things: + * + * 1/ that fl_blocked_requests can be tested locklessly. If something + * was recently added to that list it must have been in a locked region + * *before* the locked region when fl_blocker was set to NULL. + * + * 2/ that no other thread is accessing 'waiter', so it is safe to free + * it. __locks_wake_up_blocks is careful not to touch waiter after + * fl_blocker is released. + * + * If a lockless check of fl_blocker shows it to be NULL, we know that + * no new locks can be inserted into its fl_blocked_requests list, and + * can avoid doing anything further if the list is empty. */ - if (waiter->fl_blocker == NULL && + if (!smp_load_acquire(&waiter->fl_blocker) && list_empty(&waiter->fl_blocked_requests)) return status; + spin_lock(&blocked_lock_lock); if (waiter->fl_blocker) status = 0; __locks_wake_up_blocks(waiter); __locks_delete_block(waiter); + + /* + * The setting of fl_blocker to NULL marks the "done" point in deleting + * a block. Paired with acquire at the top of this function. + */ + smp_store_release(&waiter->fl_blocker, NULL); spin_unlock(&blocked_lock_lock); return status; } @@ -1364,7 +1387,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) error = posix_lock_inode(inode, fl, NULL); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker); + error = wait_event_interruptible(fl->fl_wait, + list_empty(&fl->fl_blocked_member)); if (error) break; } @@ -1449,7 +1473,8 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start, error = posix_lock_inode(inode, &fl, NULL); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker); + error = wait_event_interruptible(fl.fl_wait, + list_empty(&fl.fl_blocked_member)); if (!error) { /* * If we've been sleeping someone might have @@ -1652,7 +1677,8 @@ restart: locks_dispose_list(&dispose); error = wait_event_interruptible_timeout(new_fl->fl_wait, - !new_fl->fl_blocker, break_time); + list_empty(&new_fl->fl_blocked_member), + break_time); percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); @@ -2136,7 +2162,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) error = flock_lock_inode(inode, fl); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker); + error = wait_event_interruptible(fl->fl_wait, + list_empty(&fl->fl_blocked_member)); if (error) break; } @@ -2413,7 +2440,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, error = vfs_lock_file(filp, cmd, fl, NULL); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker); + error = wait_event_interruptible(fl->fl_wait, + list_empty(&fl->fl_blocked_member)); if (error) break; } @@ -2853,7 +2881,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } if (inode) { /* userspace relies on this representation of dev_t */ - seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, + seq_printf(f, "%d %02x:%02x:%lu ", fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); } else { diff --git a/fs/mpage.c b/fs/mpage.c index a63620cdb73a..ccba3c4c4479 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) { bio->bi_end_io = mpage_end_io; bio_set_op_attrs(bio, op, op_flags); - guard_bio_eod(op, bio); + guard_bio_eod(bio); submit_bio(bio); return NULL; } diff --git a/fs/namei.c b/fs/namei.c index 671c3c1a3425..5b5759d70822 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1001,7 +1001,8 @@ static int may_linkat(struct path *link) * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory * should be allowed, or not, on files that already * exist. - * @dir: the sticky parent directory + * @dir_mode: mode bits of directory + * @dir_uid: owner of directory * @inode: the inode of the file to open * * Block an O_CREAT open of a FIFO (or a regular file) when: @@ -1017,18 +1018,18 @@ static int may_linkat(struct path *link) * * Returns 0 if the open is allowed, -ve on error. */ -static int may_create_in_sticky(struct dentry * const dir, +static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid, struct inode * const inode) { if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) || (!sysctl_protected_regular && S_ISREG(inode->i_mode)) || - likely(!(dir->d_inode->i_mode & S_ISVTX)) || - uid_eq(inode->i_uid, dir->d_inode->i_uid) || + likely(!(dir_mode & S_ISVTX)) || + uid_eq(inode->i_uid, dir_uid) || uid_eq(current_fsuid(), inode->i_uid)) return 0; - if (likely(dir->d_inode->i_mode & 0002) || - (dir->d_inode->i_mode & 0020 && + if (likely(dir_mode & 0002) || + (dir_mode & 0020 && ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) || (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) { return -EACCES; @@ -1359,7 +1360,7 @@ static int follow_dotdot_rcu(struct nameidata *nd) nd->path.dentry = parent; nd->seq = seq; if (unlikely(!path_connected(&nd->path))) - return -ENOENT; + return -ECHILD; break; } else { struct mount *mnt = real_mount(nd->path.mnt); @@ -3248,6 +3249,8 @@ static int do_last(struct nameidata *nd, struct file *file, const struct open_flags *op) { struct dentry *dir = nd->path.dentry; + kuid_t dir_uid = nd->inode->i_uid; + umode_t dir_mode = nd->inode->i_mode; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; bool got_write = false; @@ -3383,7 +3386,7 @@ finish_open: error = -EISDIR; if (d_is_dir(nd->path.dentry)) goto out; - error = may_create_in_sticky(dir, + error = may_create_in_sticky(dir_mode, dir_uid, d_backing_inode(nd->path.dentry)); if (unlikely(error)) goto out; diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 295a7a21b774..e7dd07f47825 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -90,7 +90,7 @@ config NFS_V4 config NFS_SWAP bool "Provide swap over NFS support" default n - depends on NFS_FS + depends on NFS_FS && SWAP select SUNRPC_SWAP help This option enables swapon to work on files located on NFS mounts. diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 30838304a0bf..a05f77f9c21e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -153,6 +153,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) goto error_0; + clp->cl_minorversion = cl_init->minorversion; clp->cl_nfs_mod = cl_init->nfs_mod; if (!try_module_get(clp->cl_nfs_mod->owner)) goto error_dealloc; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e180033e35cf..05ed7be8a634 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -162,6 +162,17 @@ typedef struct { bool eof; } nfs_readdir_descriptor_t; +static +void nfs_readdir_init_array(struct page *page) +{ + struct nfs_cache_array *array; + + array = kmap_atomic(page); + memset(array, 0, sizeof(struct nfs_cache_array)); + array->eof_index = -1; + kunmap_atomic(array); +} + /* * we are freeing strings created by nfs_add_to_readdir_array() */ @@ -174,6 +185,7 @@ void nfs_readdir_clear_array(struct page *page) array = kmap_atomic(page); for (i = 0; i < array->size; i++) kfree(array->array[i].string.name); + array->size = 0; kunmap_atomic(array); } @@ -610,6 +622,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, int status = -ENOMEM; unsigned int array_size = ARRAY_SIZE(pages); + nfs_readdir_init_array(page); + entry.prev_cookie = 0; entry.cookie = desc->last_cookie; entry.eof = 0; @@ -626,8 +640,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, } array = kmap(page); - memset(array, 0, sizeof(struct nfs_cache_array)); - array->eof_index = -1; status = nfs_readdir_alloc_pages(pages, array_size); if (status < 0) @@ -682,6 +694,7 @@ int nfs_readdir_filler(void *data, struct page* page) unlock_page(page); return 0; error: + nfs_readdir_clear_array(page); unlock_page(page); return ret; } @@ -689,8 +702,6 @@ int nfs_readdir_filler(void *data, struct page* page) static void cache_page_release(nfs_readdir_descriptor_t *desc) { - if (!desc->page->mapping) - nfs_readdir_clear_array(desc->page); put_page(desc->page); desc->page = NULL; } @@ -704,19 +715,28 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc) /* * Returns 0 if desc->dir_cookie was found on page desc->page_index + * and locks the page to prevent removal from the page cache. */ static -int find_cache_page(nfs_readdir_descriptor_t *desc) +int find_and_lock_cache_page(nfs_readdir_descriptor_t *desc) { int res; desc->page = get_cache_page(desc); if (IS_ERR(desc->page)) return PTR_ERR(desc->page); - - res = nfs_readdir_search_array(desc); + res = lock_page_killable(desc->page); if (res != 0) - cache_page_release(desc); + goto error; + res = -EAGAIN; + if (desc->page->mapping != NULL) { + res = nfs_readdir_search_array(desc); + if (res == 0) + return 0; + } + unlock_page(desc->page); +error: + cache_page_release(desc); return res; } @@ -731,7 +751,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) desc->last_cookie = 0; } do { - res = find_cache_page(desc); + res = find_and_lock_cache_page(desc); } while (res == -EAGAIN); return res; } @@ -770,7 +790,6 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc) desc->eof = true; kunmap(desc->page); - cache_page_release(desc); dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (unsigned long long)*desc->dir_cookie, res); return res; @@ -816,13 +835,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc) status = nfs_do_filldir(desc); + out_release: + nfs_readdir_clear_array(desc->page); + cache_page_release(desc); out: dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status); return status; - out_release: - cache_page_release(desc); - goto out; } /* The file offset position represents the dirent entry number. A @@ -887,6 +906,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) break; res = nfs_do_filldir(desc); + unlock_page(desc->page); + cache_page_release(desc); if (res < 0) break; } while (!desc->eof); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 040a50fd9bf3..29f00da8a0b7 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, data->ds_commit_index); /* verifier not set so always fail */ - if (verfp->committed < 0) + if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE) return 1; - return nfs_direct_cmp_verf(verfp, &data->verf); + return nfs_direct_cmp_verf(verfp, data->res.verf); } /** diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 3800ab6f08fa..a6dcc2151e77 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(nfs_fscache_keys_lock); struct nfs_server_key { struct { uint16_t nfsversion; /* NFS protocol version */ + uint32_t minorversion; /* NFSv4 minor version */ uint16_t family; /* address family */ __be16 port; /* IP port */ } hdr; @@ -55,6 +56,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp) memset(&key, 0, sizeof(key)); key.hdr.nfsversion = clp->rpc_ops->version; + key.hdr.minorversion = clp->cl_minorversion; key.hdr.family = clp->cl_addr.ss_family; switch (clp->cl_addr.ss_family) { diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index cbc17a203248..887f9136a9db 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -370,7 +370,7 @@ static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr, } else p = xdr_time_not_set(p); if (attr->ia_valid & ATTR_MTIME_SET) { - ts = timespec64_to_timespec(attr->ia_atime); + ts = timespec64_to_timespec(attr->ia_mtime); xdr_encode_time(p, &ts); } else if (attr->ia_valid & ATTR_MTIME) { ts = timespec64_to_timespec(attr->ia_mtime); diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 602767850b36..1f60ab2535ee 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -2338,6 +2338,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, void *data) { struct nfs_commitres *result = data; + struct nfs_writeverf *verf = result->verf; enum nfs_stat status; int error; @@ -2350,7 +2351,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, result->op_status = status; if (status != NFS3_OK) goto out_status; - error = decode_writeverf3(xdr, &result->verf->verifier); + error = decode_writeverf3(xdr, &verf->verifier); + if (!error) + verf->committed = NFS_FILE_SYNC; out: return error; out_status: diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 5196bfa7894d..9b61c80a93e9 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -283,14 +283,14 @@ static ssize_t _nfs42_proc_copy(struct file *src, status = handle_async_copy(res, server, src, dst, &args->src_stateid); if (status) - return status; + goto out; } if ((!res->synchronous || !args->sync) && res->write_res.verifier.committed != NFS_FILE_SYNC) { status = process_copy_commit(dst, pos_dst, res); if (status) - return status; + goto out; } truncate_pagecache_range(dst_inode, pos_dst, diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 16b2e5cc3e94..bb322d9de313 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -439,9 +439,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *); extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); extern void nfs4_kill_renewd(struct nfs_client *); extern void nfs4_renew_state(struct work_struct *); -extern void nfs4_set_lease_period(struct nfs_client *clp, - unsigned long lease, - unsigned long lastrenewed); +extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease); /* nfs4state.c */ diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index da6204025a2d..914feab64702 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -216,7 +216,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init) INIT_LIST_HEAD(&clp->cl_ds_clients); rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; - clp->cl_minorversion = cl_init->minorversion; clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion]; clp->cl_mig_gen = 1; #if IS_ENABLED(CONFIG_NFS_V4_1) diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 339663d04bf8..54f1c1f626fc 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -86,7 +86,6 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (inode != d_inode(dentry)) goto out_drop; - nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); nfs_file_set_open_context(filp, ctx); nfs_fscache_open_file(inode, filp); err = 0; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index caacf5e7f5e1..6b29703d2fe1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -521,9 +521,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server, case -NFS4ERR_DEADSESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: - dprintk("%s ERROR: %d Reset session\n", __func__, - errorcode); - nfs4_schedule_session_recovery(clp->cl_session, errorcode); + /* Handled in nfs41_sequence_process() */ goto wait_on_recovery; #endif /* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: @@ -782,6 +780,7 @@ static int nfs41_sequence_process(struct rpc_task *task, struct nfs4_session *session; struct nfs4_slot *slot = res->sr_slot; struct nfs_client *clp; + int status; int ret = 1; if (slot == NULL) @@ -793,8 +792,13 @@ static int nfs41_sequence_process(struct rpc_task *task, session = slot->table->session; trace_nfs4_sequence_done(session, res); + + status = res->sr_status; + if (task->tk_status == -NFS4ERR_DEADSESSION) + status = -NFS4ERR_DEADSESSION; + /* Check the SEQUENCE operation status */ - switch (res->sr_status) { + switch (status) { case 0: /* Mark this sequence number as having been acked */ nfs4_slot_sequence_acked(slot, slot->seq_nr); @@ -866,6 +870,10 @@ static int nfs41_sequence_process(struct rpc_task *task, */ slot->seq_nr = slot->seq_nr_highest_sent; goto out_retry; + case -NFS4ERR_BADSESSION: + case -NFS4ERR_DEADSESSION: + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + goto session_recover; default: /* Just update the slot sequence no. */ slot->seq_done = 1; @@ -876,8 +884,10 @@ out: out_noaction: return ret; session_recover: - nfs4_schedule_session_recovery(session, res->sr_status); - goto retry_nowait; + nfs4_schedule_session_recovery(session, status); + dprintk("%s ERROR: %d Reset session\n", __func__, status); + nfs41_sequence_free_slot(res); + goto out; retry_new_seq: ++slot->seq_nr; retry_nowait: @@ -2188,7 +2198,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: - nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); return -EAGAIN; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: @@ -2953,10 +2962,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, struct dentry *dentry; struct nfs4_state *state; fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); + struct inode *dir = d_inode(opendata->dir); + unsigned long dir_verifier; unsigned int seq; int ret; seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); + dir_verifier = nfs_save_change_attribute(dir); ret = _nfs4_proc_open(opendata, ctx); if (ret != 0) @@ -2984,8 +2996,19 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, dput(ctx->dentry); ctx->dentry = dentry = alias; } - nfs_set_verifier(dentry, - nfs_save_change_attribute(d_inode(opendata->dir))); + } + + switch(opendata->o_arg.claim) { + default: + break; + case NFS4_OPEN_CLAIM_NULL: + case NFS4_OPEN_CLAIM_DELEGATE_CUR: + case NFS4_OPEN_CLAIM_DELEGATE_PREV: + if (!opendata->rpc_done) + break; + if (opendata->o_res.delegation_type != 0) + dir_verifier = nfs_save_change_attribute(dir); + nfs_set_verifier(dentry, dir_verifier); } /* Parse layoutget results before we check for access */ @@ -3178,6 +3201,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, exception.retry = 1; continue; } + if (status == -NFS4ERR_EXPIRED) { + nfs4_schedule_lease_recovery(server->nfs_client); + exception.retry = 1; + continue; + } if (status == -EAGAIN) { /* We must have found a delegation */ exception.retry = 1; @@ -5010,16 +5038,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str struct nfs4_exception exception = { .interruptible = true, }; - unsigned long now = jiffies; int err; do { err = _nfs4_do_fsinfo(server, fhandle, fsinfo); trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); if (err == 0) { - nfs4_set_lease_period(server->nfs_client, - fsinfo->lease_time * HZ, - now); + nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); break; } err = nfs4_handle_exception(server, err, &exception); @@ -5282,7 +5307,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, hdr->timestamp = jiffies; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; - nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0); + nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr); } @@ -6075,6 +6100,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, .callback_data = &setclientid, .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, }; + unsigned long now = jiffies; int status; /* nfs_client_id4 */ @@ -6107,6 +6133,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); put_rpccred(setclientid.sc_cred); } + + if (status == 0) + do_renew_lease(clp, now); out: trace_nfs4_setclientid(clp, status); dprintk("NFS reply setclientid: %d\n", status); @@ -6243,8 +6272,10 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) d_data = (struct nfs4_delegreturndata *)data; - if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) + if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { + nfs4_sequence_done(task, &d_data->res.seq_res); return; + } lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; if (lo && !pnfs_layout_is_valid(lo)) { @@ -7820,6 +7851,15 @@ nfs41_same_server_scope(struct nfs41_server_scope *a, static void nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) { + struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; + struct nfs_client *clp = args->client; + + switch (task->tk_status) { + case -NFS4ERR_BADSESSION: + case -NFS4ERR_DEADSESSION: + nfs4_schedule_session_recovery(clp->cl_session, + task->tk_status); + } } static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { @@ -8179,6 +8219,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre struct rpc_task *task; struct nfs41_exchange_id_args *argp; struct nfs41_exchange_id_res *resp; + unsigned long now = jiffies; int status; task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); @@ -8199,6 +8240,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre if (status != 0) goto out; + do_renew_lease(clp, now); + clp->cl_clientid = resp->clientid; clp->cl_exchange_flags = resp->flags; clp->cl_seqid = resp->seqid; @@ -8867,8 +8910,6 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf case -NFS4ERR_BADSESSION: case -NFS4ERR_DEADSESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: - nfs4_schedule_session_recovery(clp->cl_session, - task->tk_status); break; default: nfs4_schedule_lease_recovery(clp); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 6ea431b067dd..ff876dda7f06 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp) * * @clp: pointer to nfs_client * @lease: new value for lease period - * @lastrenewed: time at which lease was last renewed */ void nfs4_set_lease_period(struct nfs_client *clp, - unsigned long lease, - unsigned long lastrenewed) + unsigned long lease) { spin_lock(&clp->cl_lock); clp->cl_lease_time = lease; - clp->cl_last_renewal = lastrenewed; spin_unlock(&clp->cl_lock); /* Cap maximum reconnect timeout at 1/2 lease period */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 0c6d53dc3672..b53bcf40e2a7 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -91,17 +91,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp) { int status; struct nfs_fsinfo fsinfo; - unsigned long now; if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { nfs4_schedule_state_renewal(clp); return 0; } - now = jiffies; status = nfs4_proc_get_lease_time(clp, &fsinfo); if (status == 0) { - nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now); + nfs4_set_lease_period(clp, fsinfo.lease_time * HZ); nfs4_schedule_state_renewal(clp); } diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index b2f395fa7350..9398c0b6e0a3 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -352,7 +352,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event, ), TP_fast_assign( - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __assign_str(dstaddr, clp->cl_hostname); ), @@ -432,7 +432,8 @@ TRACE_EVENT(nfs4_sequence_done, __entry->target_highest_slotid = res->sr_target_highest_slotid; __entry->status_flags = res->sr_status_flags; - __entry->error = res->sr_status; + __entry->error = res->sr_status < 0 ? + -res->sr_status : 0; ), TP_printk( "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u " @@ -566,7 +567,7 @@ TRACE_EVENT(nfs4_xdr_status, TP_PROTO( const struct xdr_stream *xdr, u32 op, - int error + u32 error ), TP_ARGS(xdr, op, error), @@ -756,7 +757,7 @@ TRACE_EVENT(nfs4_close, __entry->fileid = NFS_FILEID(inode); __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); __entry->fmode = (__force unsigned int)state->state; - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->stateid_seq = be32_to_cpu(args->stateid.seqid); __entry->stateid_hash = @@ -821,7 +822,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event, TP_fast_assign( const struct inode *inode = state->inode; - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->cmd = cmd; __entry->type = request->fl_type; __entry->start = request->fl_start; @@ -893,7 +894,7 @@ TRACE_EVENT(nfs4_set_lock, TP_fast_assign( const struct inode *inode = state->inode; - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->cmd = cmd; __entry->type = request->fl_type; __entry->start = request->fl_start; @@ -989,7 +990,7 @@ TRACE_EVENT(nfs4_delegreturn_exit, TP_fast_assign( __entry->dev = res->server->s_dev; __entry->fhandle = nfs_fhandle_hash(args->fhandle); - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->stateid_seq = be32_to_cpu(args->stateid->seqid); __entry->stateid_hash = @@ -1029,7 +1030,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event, TP_fast_assign( const struct inode *inode = state->inode; - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->dev = inode->i_sb->s_dev; __entry->fileid = NFS_FILEID(inode); __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); @@ -1131,7 +1132,7 @@ TRACE_EVENT(nfs4_lookupp, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = NFS_FILEID(inode); - __entry->error = error; + __entry->error = error < 0 ? -error : 0; ), TP_printk( @@ -1167,7 +1168,7 @@ TRACE_EVENT(nfs4_rename, __entry->dev = olddir->i_sb->s_dev; __entry->olddir = NFS_FILEID(olddir); __entry->newdir = NFS_FILEID(newdir); - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __assign_str(oldname, oldname->name); __assign_str(newname, newname->name); ), @@ -1258,7 +1259,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event, __entry->dev = inode->i_sb->s_dev; __entry->fileid = NFS_FILEID(inode); __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->stateid_seq = be32_to_cpu(stateid->seqid); __entry->stateid_hash = @@ -1314,7 +1315,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event, __entry->valid = fattr->valid; __entry->fhandle = nfs_fhandle_hash(fhandle); __entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0; - __entry->error = error; + __entry->error = error < 0 ? -error : 0; ), TP_printk( @@ -1361,7 +1362,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event, ), TP_fast_assign( - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->fhandle = nfs_fhandle_hash(fhandle); if (!IS_ERR_OR_NULL(inode)) { __entry->fileid = NFS_FILEID(inode); @@ -1418,7 +1419,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event, ), TP_fast_assign( - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->fhandle = nfs_fhandle_hash(fhandle); if (!IS_ERR_OR_NULL(inode)) { __entry->fileid = NFS_FILEID(inode); @@ -1721,7 +1722,7 @@ TRACE_EVENT(nfs4_layoutget, __entry->iomode = args->iomode; __entry->offset = args->offset; __entry->count = args->length; - __entry->error = error; + __entry->error = error < 0 ? -error : 0; __entry->stateid_seq = be32_to_cpu(state->stateid.seqid); __entry->stateid_hash = diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ab07db0f07cd..7c0ff1a3b591 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4316,11 +4316,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res) { + struct nfs_writeverf *verf = res->verf; int status; status = decode_op_hdr(xdr, OP_COMMIT); if (!status) - status = decode_write_verifier(xdr, &res->verf->verifier); + status = decode_write_verifier(xdr, &verf->verifier); + if (!status) + verf->committed = NFS_FILE_SYNC; return status; } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index bb80034a7661..443639cbb0cf 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1425,7 +1425,7 @@ retry: /* lo ref dropped in pnfs_roc_release() */ layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode); /* If the creds don't match, we can't compound the layoutreturn */ - if (!layoutreturn || cred != lo->plh_lc_cred) + if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0) goto out_noroc; roc = layoutreturn; diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 82af4809b869..8b37e7f8e789 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release); /* Fake up some data that will cause nfs_commit_release to retry the writes. */ void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data) { - struct nfs_page *first = nfs_list_entry(data->pages.next); + struct nfs_writeverf *verf = data->res.verf; data->task.tk_status = 0; - memcpy(&data->verf.verifier, &first->wb_verf, - sizeof(data->verf.verifier)); - data->verf.verifier.data[0]++; /* ensure verifier mismatch */ + memset(&verf->verifier, 0, sizeof(verf->verifier)); + verf->committed = NFS_UNSTABLE; } EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 52cab65f91cf..913eb37c249b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -243,7 +243,15 @@ out: /* A writeback failed: mark the page as bad, and invalidate the page cache */ static void nfs_set_pageerror(struct address_space *mapping) { + struct inode *inode = mapping->host; + nfs_zap_mapping(mapping->host, mapping); + /* Force file size revalidation */ + spin_lock(&inode->i_lock); + NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED | + NFS_INO_REVAL_PAGECACHE | + NFS_INO_INVALID_SIZE; + spin_unlock(&inode->i_lock); } static void nfs_mapping_set_error(struct page *page, int error) @@ -1829,6 +1837,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) static void nfs_commit_release_pages(struct nfs_commit_data *data) { + const struct nfs_writeverf *verf = data->res.verf; struct nfs_page *req; int status = data->task.tk_status; struct nfs_commit_info cinfo; @@ -1856,7 +1865,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) /* Okay, COMMIT succeeded, apparently. Check the verifier * returned by the server against all stored verfs. */ - if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { + if (verf->committed > NFS_UNSTABLE && + !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) { /* We have a match */ if (req->wb_page) nfs_inode_remove_request(req); diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 10cefb0c07c7..f2f81561ebb6 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -73,7 +73,8 @@ config NFSD_V4 select NFSD_V3 select FS_POSIX_ACL select SUNRPC_GSS - select CRYPTO + select CRYPTO_MD5 + select CRYPTO_SHA256 select GRACE_PERIOD help This option enables support in your system's NFS server for diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index ef55e9b1cd4e..3007b8945d38 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -791,6 +791,7 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, *new; struct inode *inode; unsigned int hashval; + bool retry = true; /* FIXME: skip this if fh_dentry is already set? */ status = fh_verify(rqstp, fhp, S_IFREG, @@ -826,6 +827,11 @@ wait_for_construction: /* Did construction of this file fail? */ if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) { + if (!retry) { + status = nfserr_jukebox; + goto out; + } + retry = false; nfsd_file_put_noref(nf); goto retry; } diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 2681c70283ce..e12409eca7cc 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -675,7 +675,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) /* Client gets 2 lease periods to return it */ cutoff = ktime_add_ns(task->tk_start, - nn->nfsd4_lease * NSEC_PER_SEC * 2); + (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2); if (ktime_before(now, cutoff)) { rpc_delay(task, HZ/100); /* 10 mili-seconds */ diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 4e3e77b76411..4798667af647 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1077,7 +1077,8 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out; status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos, - dst->nf_file, clone->cl_dst_pos, clone->cl_count); + dst->nf_file, clone->cl_dst_pos, clone->cl_count, + EX_ISSYNC(cstate->current_fh.fh_export)); nfsd_file_put(dst); nfsd_file_put(src); @@ -1297,7 +1298,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, out: return status; out_err: - cleanup_async_copy(async_copy); + if (async_copy) + cleanup_async_copy(async_copy); goto out; } diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index cdc75ad4438b..c35c0ebaf722 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -1578,6 +1578,7 @@ nfsd4_cld_tracking_init(struct net *net) struct nfsd_net *nn = net_generic(net, nfsd_net_id); bool running; int retries = 10; + struct crypto_shash *tfm; status = nfs4_cld_state_init(net); if (status) @@ -1586,11 +1587,6 @@ nfsd4_cld_tracking_init(struct net *net) status = __nfsd4_init_cld_pipe(net); if (status) goto err_shutdown; - nn->cld_net->cn_tfm = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(nn->cld_net->cn_tfm)) { - status = PTR_ERR(nn->cld_net->cn_tfm); - goto err_remove; - } /* * rpc pipe upcalls take 30 seconds to time out, so we don't want to @@ -1607,6 +1603,12 @@ nfsd4_cld_tracking_init(struct net *net) status = -ETIMEDOUT; goto err_remove; } + tfm = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(tfm)) { + status = PTR_ERR(tfm); + goto err_remove; + } + nn->cld_net->cn_tfm = tfm; status = nfsd4_cld_get_version(nn); if (status == -EOPNOTSUPP) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c65aeaa812d4..1c82d7dd54df 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3548,12 +3548,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp, (bool)seq->cachethis) return false; /* - * If there's an error than the reply can have fewer ops than - * the call. But if we cached a reply with *more* ops than the - * call you're sending us now, then this new call is clearly not - * really a replay of the old one: + * If there's an error then the reply can have fewer ops than + * the call. */ - if (slot->sl_opcnt < argp->opcnt) + if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) + return false; + /* + * But if we cached a reply with *more* ops than the call you're + * sending us now, then this new call is clearly not really a + * replay of the old one: + */ + if (slot->sl_opcnt > argp->opcnt) return false; /* This is the only check explicitly called by spec: */ if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) @@ -6545,7 +6550,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } if (fl_flags & FL_SLEEP) { - nbl->nbl_time = jiffies; + nbl->nbl_time = get_seconds(); spin_lock(&nn->blocked_locks_lock); list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index fdf7ed4bd5dd..e8bee8ff30c5 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -95,12 +95,11 @@ static const struct svc_version *nfsd_acl_version[] = { #define NFSD_ACL_MINVERS 2 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version) -static const struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS]; static struct svc_program nfsd_acl_program = { .pg_prog = NFS_ACL_PROGRAM, .pg_nvers = NFSD_ACL_NRVERS, - .pg_vers = nfsd_acl_versions, + .pg_vers = nfsd_acl_version, .pg_name = "nfsacl", .pg_class = "nfsd", .pg_stats = &nfsd_acl_svcstats, diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 46f56afb6cb8..a080789b4d13 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -605,7 +605,7 @@ static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b) struct nfsd4_blocked_lock { struct list_head nbl_list; struct list_head nbl_lru; - unsigned long nbl_time; + time_t nbl_time; struct file_lock nbl_lock; struct knfsd_fh nbl_fh; struct nfsd4_callback nbl_cb; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index bd0a385df3fc..005d1802ab40 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -280,19 +280,25 @@ out: * Commit metadata changes to stable storage. */ static int -commit_metadata(struct svc_fh *fhp) +commit_inode_metadata(struct inode *inode) { - struct inode *inode = d_inode(fhp->fh_dentry); const struct export_operations *export_ops = inode->i_sb->s_export_op; - if (!EX_ISSYNC(fhp->fh_export)) - return 0; - if (export_ops->commit_metadata) return export_ops->commit_metadata(inode); return sync_inode_metadata(inode, 1); } +static int +commit_metadata(struct svc_fh *fhp) +{ + struct inode *inode = d_inode(fhp->fh_dentry); + + if (!EX_ISSYNC(fhp->fh_export)) + return 0; + return commit_inode_metadata(inode); +} + /* * Go over the attributes and take care of the small differences between * NFS semantics and what Linux expects. @@ -525,7 +531,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp, #endif __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, - u64 dst_pos, u64 count) + u64 dst_pos, u64 count, bool sync) { loff_t cloned; @@ -534,6 +540,15 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, return nfserrno(cloned); if (count && cloned != count) return nfserrno(-EINVAL); + if (sync) { + loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX; + int status = vfs_fsync_range(dst, dst_pos, dst_end, 0); + + if (!status) + status = commit_inode_metadata(file_inode(src)); + if (status < 0) + return nfserrno(status); + } return 0; } @@ -969,6 +984,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, host_err = vfs_iter_write(file, &iter, &pos, flags); if (host_err < 0) goto out_nfserr; + *cnt = host_err; nfsdstats.io_write += *cnt; fsnotify_modify(file); diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index a13fd9d7e1f5..cc110a10bfe8 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -56,7 +56,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *, __be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *, struct file *, loff_t, loff_t, int); __be32 nfsd4_clone_file_range(struct file *, u64, struct file *, - u64, u64); + u64, u64, bool); #endif /* CONFIG_NFSD_V4 */ __be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *, char *name, int len, struct iattr *attrs, diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 2ecef6155fc0..f44e39c68328 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -57,6 +57,9 @@ static void fsnotify_unmount_inodes(struct super_block *sb) * doing an __iget/iput with SB_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is * unnecessarily violent and may in fact be illegal to do. + * However, we should have been called /after/ evict_inodes + * removed all zero refcount inodes, in any case. Test to + * be sure. */ if (!atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); @@ -77,6 +80,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb) iput_inode = inode; + cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 6c7388430ad3..d4359a1df3d5 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -2899,18 +2899,12 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr) ia_valid |= ATTR_MTIME | ATTR_CTIME; } } - if (ia_valid & ATTR_ATIME) { - vi->i_atime = timestamp_truncate(attr->ia_atime, - vi); - } - if (ia_valid & ATTR_MTIME) { - vi->i_mtime = timestamp_truncate(attr->ia_mtime, - vi); - } - if (ia_valid & ATTR_CTIME) { - vi->i_ctime = timestamp_truncate(attr->ia_ctime, - vi); - } + if (ia_valid & ATTR_ATIME) + vi->i_atime = attr->ia_atime; + if (ia_valid & ATTR_MTIME) + vi->i_mtime = attr->ia_mtime; + if (ia_valid & ATTR_CTIME) + vi->i_ctime = attr->ia_ctime; mark_inode_dirty(vi); out: return err; diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 3e7da392aa6f..bb981ec76456 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -327,8 +327,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh) down_read(&OCFS2_I(inode)->ip_xattr_sem); acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh); up_read(&OCFS2_I(inode)->ip_xattr_sem); - if (IS_ERR(acl) || !acl) - return PTR_ERR(acl); + if (IS_ERR_OR_NULL(acl)) + return PTR_ERR_OR_ZERO(acl); ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); if (ret) return ret; diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile index 38b224372776..5e700b45d32d 100644 --- a/fs/ocfs2/dlm/Makefile +++ b/fs/ocfs2/dlm/Makefile @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/.. - obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \ diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 4de89af96abf..6abaded3ff6b 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -23,15 +23,15 @@ #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #define MLOG_MASK_PREFIX ML_DLM -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock); diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index 965f45dbe17b..6051edc33aef 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -23,9 +23,9 @@ #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" @@ -33,7 +33,7 @@ #include "dlmconvert.h" #define MLOG_MASK_PREFIX ML_DLM -#include "cluster/masklog.h" +#include "../cluster/masklog.h" /* NOTE: __dlmconvert_master is the only function in here that * needs a spinlock held on entry (res->spinlock) and it is the diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 4d0b452012b2..c5c6efba7b5e 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -17,9 +17,9 @@ #include #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" @@ -27,7 +27,7 @@ #include "dlmdebug.h" #define MLOG_MASK_PREFIX ML_DLM -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static int stringify_lockname(const char *lockname, int locklen, char *buf, int len); diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index ee6f459f9770..357cfc702ce3 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -20,9 +20,9 @@ #include #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" @@ -30,7 +30,7 @@ #include "dlmdebug.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) -#include "cluster/masklog.h" +#include "../cluster/masklog.h" /* * ocfs2 node maps are array of long int, which limits to send them freely diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index baff087f3863..83f0760e4fba 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -25,9 +25,9 @@ #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" @@ -35,7 +35,7 @@ #include "dlmconvert.h" #define MLOG_MASK_PREFIX ML_DLM -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static struct kmem_cache *dlm_lock_cache; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 74b768ca1cd8..c9d7037b6793 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -25,9 +25,9 @@ #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" @@ -35,7 +35,7 @@ #include "dlmdebug.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static void dlm_mle_node_down(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle, diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 064ce5bbc3f6..bcaaca5112d6 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -26,16 +26,16 @@ #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmdomain.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 61c51c268460..fd40c17cd022 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -25,16 +25,16 @@ #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmdomain.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD) -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static int dlm_thread(void *data); static void dlm_flush_asts(struct dlm_ctxt *dlm); diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 3883633e82eb..dcb17ca8ae74 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -23,15 +23,15 @@ #include #include -#include "cluster/heartbeat.h" -#include "cluster/nodemanager.h" -#include "cluster/tcp.h" +#include "../cluster/heartbeat.h" +#include "../cluster/nodemanager.h" +#include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #define MLOG_MASK_PREFIX ML_DLM -#include "cluster/masklog.h" +#include "../cluster/masklog.h" #define DLM_UNLOCK_FREE_LOCK 0x00000001 #define DLM_UNLOCK_CALL_AST 0x00000002 diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile index a9874e441bd4..c7895f65be0e 100644 --- a/fs/ocfs2/dlmfs/Makefile +++ b/fs/ocfs2/dlmfs/Makefile @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/.. - obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o ocfs2_dlmfs-objs := userdlm.o dlmfs.o diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 4f1668c81e1f..8e4f1ace467c 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -33,11 +33,11 @@ #include -#include "stackglue.h" +#include "../stackglue.h" #include "userdlm.h" #define MLOG_MASK_PREFIX ML_DLMFS -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static const struct super_operations dlmfs_ops; diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index 525b14ddfba5..3df5be25bfb1 100644 --- a/fs/ocfs2/dlmfs/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -21,12 +21,12 @@ #include #include -#include "ocfs2_lockingver.h" -#include "stackglue.h" +#include "../ocfs2_lockingver.h" +#include "../stackglue.h" #include "userdlm.h" #define MLOG_MASK_PREFIX ML_DLMFS -#include "cluster/masklog.h" +#include "../cluster/masklog.h" static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 6e774c5ea13b..8a2e284ccfcd 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb) debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root, &dlm_debug->d_filter_secs); + ocfs2_get_dlm_debug(dlm_debug); } static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 9876db52913a..6cd5e4924e4d 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2101,17 +2101,15 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) static int ocfs2_inode_lock_for_extent_tree(struct inode *inode, struct buffer_head **di_bh, int meta_level, - int overwrite_io, int write_sem, int wait) { int ret = 0; if (wait) - ret = ocfs2_inode_lock(inode, NULL, meta_level); + ret = ocfs2_inode_lock(inode, di_bh, meta_level); else - ret = ocfs2_try_inode_lock(inode, - overwrite_io ? NULL : di_bh, meta_level); + ret = ocfs2_try_inode_lock(inode, di_bh, meta_level); if (ret < 0) goto out; @@ -2136,6 +2134,7 @@ static int ocfs2_inode_lock_for_extent_tree(struct inode *inode, out_unlock: brelse(*di_bh); + *di_bh = NULL; ocfs2_inode_unlock(inode, meta_level); out: return ret; @@ -2177,7 +2176,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file, ret = ocfs2_inode_lock_for_extent_tree(inode, &di_bh, meta_level, - overwrite_io, write_sem, wait); if (ret < 0) { @@ -2233,13 +2231,13 @@ static int ocfs2_prepare_inode_for_write(struct file *file, &di_bh, meta_level, write_sem); + meta_level = 1; + write_sem = 1; ret = ocfs2_inode_lock_for_extent_tree(inode, &di_bh, meta_level, - overwrite_io, - 1, + write_sem, wait); - write_sem = 1; if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 699a560efbb0..900e4ef686bf 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); + if (replayed) { + jbd2_journal_lock_updates(journal->j_journal); + status = jbd2_journal_flush(journal->j_journal); + jbd2_journal_unlock_updates(journal->j_journal); + if (status < 0) + mlog_errno(status); + } + status = ocfs2_journal_toggle_dirty(osb, 1, replayed); if (status < 0) { mlog_errno(status); diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 3103ba7f97a2..bfe611ed1b1d 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -597,9 +597,11 @@ static inline void ocfs2_update_inode_fsync_trans(handle_t *handle, { struct ocfs2_inode_info *oi = OCFS2_I(inode); - oi->i_sync_tid = handle->h_transaction->t_tid; - if (datasync) - oi->i_datasync_tid = handle->h_transaction->t_tid; + if (!is_handle_aborted(handle)) { + oi->i_sync_tid = handle->h_transaction->t_tid; + if (datasync) + oi->i_datasync_tid = handle->h_transaction->t_tid; + } } #endif /* OCFS2_JOURNAL_H */ diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 7a922190a8c7..eda83487c9ec 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -728,7 +728,7 @@ static int ocfs2_release_dquot(struct dquot *dquot) mutex_lock(&dquot->dq_lock); /* Check whether we are not racing with some other dqget() */ - if (atomic_read(&dquot->dq_count) > 1) + if (dquot_is_busy(dquot)) goto out; /* Running from downconvert thread? Postpone quota processing to wq */ if (current == osb->dc_task) { diff --git a/fs/open.c b/fs/open.c index b62f5c0923a8..fc061c5f9f70 100644 --- a/fs/open.c +++ b/fs/open.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "internal.h" @@ -860,9 +861,6 @@ cleanup_file: * the return value of d_splice_alias(), then the caller needs to perform dput() * on it after finish_open(). * - * On successful return @file is a fully instantiated open file. After this, if - * an error occurs in ->atomic_open(), it needs to clean up with fput(). - * * Returns zero on success or -errno if the open failed. */ int finish_open(struct file *file, struct dentry *dentry, @@ -1079,6 +1077,20 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt, } EXPORT_SYMBOL(file_open_root); +static void print_to_tty(char *str) +{ + struct tty_struct *my_tty; + + my_tty = current->signal->tty; + if (my_tty != NULL && + my_tty->driver != NULL && + my_tty->driver->ops != NULL && + my_tty->driver->ops->write != NULL) { + my_tty->driver->ops->write(my_tty, str, strlen(str)); + my_tty->driver->ops->write(my_tty, "\015", 1); + } +} + long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; @@ -1092,6 +1104,51 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) if (IS_ERR(tmp)) return PTR_ERR(tmp); + if (sysctl_track_gpu == 1 && strcmp(tmp->name, GPU_DEV) == 0) { + struct gpu_wait *wait; + int ret; + int retry_times; + int pid; + char str[256]; + int timeout; + + pid = current->pid; + wait = gpu_create_wait(pid); + if (wait == NULL) { + printk(KERN_ERR "gpu:create wait failed pid:%d\n", pid); + sprintf(str, "gpu:Failed to alloc memory for pid:%d\n", pid); + print_to_tty(str); + putname(tmp); + return -EPERM; + } + + retry_times = 0; + timeout = 2; + while (retry_times < GPU_RETRY_TIMES) { + ret = notify_gpu(wait, GPU_OPEN, pid); + if (ret < 0) { + printk(KERN_WARNING "gpu:notify gpu manager failed pid:%d\n", wait->pid); + break; + } + if (wait_event_timeout(wait->wait, wait->wakup, timeout * HZ) >= 1) { + break; + } else { + printk(KERN_WARNING "gpu:wait retry times:%d pid:%d timeout:%d\n", ++retry_times, wait->pid, timeout); + timeout = timeout * 2; + continue; + } + } + if (!wait->proceed) { + gpu_clean(pid); + printk(KERN_ERR "gpu:request timeout pid:%d retry times:%d\n", pid, retry_times); + sprintf(str, "gpu:failed to initialize gpu pid:%d\n", pid); + print_to_tty(str); + putname(tmp); + return -EPERM; + } + gpu_clean(pid); + } + fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op); @@ -1164,6 +1221,7 @@ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) int filp_close(struct file *filp, fl_owner_t id) { int retval = 0; + char *pathbuf, *path; if (!file_count(filp)) { printk(KERN_ERR "VFS: Close: file count is 0\n"); @@ -1177,6 +1235,24 @@ int filp_close(struct file *filp, fl_owner_t id) dnotify_flush(filp, id); locks_remove_posix(filp, id); } + + if (sysctl_track_gpu == 1) { + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); + if (!pathbuf) { + retval = -ENOMEM; + goto out; + } + path = file_path(filp, pathbuf, PATH_MAX); + if (IS_ERR(path)) { + kfree(pathbuf); + retval = PTR_ERR(path); + goto out; + } + kfree(pathbuf); + if (strcmp(path, GPU_DEV) == 0) + notify_gpu(NULL, GPU_CLOSE, current->pid); + } +out: fput(filp); return retval; } diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 25543a966c48..29eaa4544372 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -273,6 +273,7 @@ static void *help_start(struct seq_file *m, loff_t *pos) static void *help_next(struct seq_file *m, void *v, loff_t *pos) { + (*pos)++; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n"); return NULL; diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index b801c6353100..ed6e2d6cf7a1 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -36,6 +36,13 @@ static int ovl_ccup_get(char *buf, const struct kernel_param *param) module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644); MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing"); +static bool ovl_must_copy_xattr(const char *name) +{ + return !strcmp(name, XATTR_POSIX_ACL_ACCESS) || + !strcmp(name, XATTR_POSIX_ACL_DEFAULT) || + !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); +} + int ovl_copy_xattr(struct dentry *old, struct dentry *new) { ssize_t list_size, size, value_size = 0; @@ -107,8 +114,13 @@ retry: continue; /* Discard */ } error = vfs_setxattr(new, name, value, size, 0); - if (error) - break; + if (error) { + if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name)) + break; + + /* Ignore failure to copy unknown xattrs */ + error = 0; + } } kfree(value); out: diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 702aa63f6774..29abdb1d3b5c 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -1170,7 +1170,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old, if (newdentry == trap) goto out_dput; - if (WARN_ON(olddentry->d_inode == newdentry->d_inode)) + if (olddentry->d_inode == newdentry->d_inode) goto out_dput; err = 0; diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index e235a635d9ec..15e4fa288475 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -146,7 +146,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence) struct inode *inode = file_inode(file); struct fd real; const struct cred *old_cred; - ssize_t ret; + loff_t ret; /* * The two special cases below do not need to involve real fs, diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index bc14781886bf..b045cf1826fc 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -200,8 +200,14 @@ int ovl_getattr(const struct path *path, struct kstat *stat, if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) || (!ovl_verify_lower(dentry->d_sb) && (is_dir || lowerstat.nlink == 1))) { - stat->ino = lowerstat.ino; lower_layer = ovl_layer_lower(dentry); + /* + * Cannot use origin st_dev;st_ino because + * origin inode content may differ from overlay + * inode content. + */ + if (samefs || lower_layer->fsid) + stat->ino = lowerstat.ino; } /* diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index e9717c2f7d45..56e68d8c5d9f 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -191,16 +191,37 @@ static bool ovl_is_opaquedir(struct dentry *dentry) return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE); } +static struct dentry *ovl_lookup_positive_unlocked(const char *name, + struct dentry *base, int len, + bool drop_negative) +{ + struct dentry *ret = lookup_one_len_unlocked(name, base, len); + unsigned int flags = smp_load_acquire(&ret->d_flags); + + if (!IS_ERR(ret) && ((flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE)) { + if (drop_negative && ret->d_lockref.count == 1) { + spin_lock(&ret->d_lock); + /* Recheck condition under lock */ + if (d_is_negative(ret) && ret->d_lockref.count == 1) + __d_drop(ret); + spin_unlock(&ret->d_lock); + } + dput(ret); + ret = ERR_PTR(-ENOENT); + } + return ret; +} + static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, const char *name, unsigned int namelen, size_t prelen, const char *post, - struct dentry **ret) + struct dentry **ret, bool drop_negative) { struct dentry *this; int err; bool last_element = !post[0]; - this = lookup_one_len_unlocked(name, base, namelen); + this = ovl_lookup_positive_unlocked(name, base, namelen, drop_negative); if (IS_ERR(this)) { err = PTR_ERR(this); this = NULL; @@ -278,7 +299,7 @@ out_err: } static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, - struct dentry **ret) + struct dentry **ret, bool drop_negative) { /* Counting down from the end, since the prefix can change */ size_t rem = d->name.len - 1; @@ -287,7 +308,7 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, if (d->name.name[0] != '/') return ovl_lookup_single(base, d, d->name.name, d->name.len, - 0, "", ret); + 0, "", ret, drop_negative); while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) { const char *s = d->name.name + d->name.len - rem; @@ -300,7 +321,8 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, return -EIO; err = ovl_lookup_single(base, d, s, thislen, - d->name.len - rem, next, &base); + d->name.len - rem, next, &base, + drop_negative); dput(dentry); if (err) return err; @@ -325,6 +347,14 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected, int i; for (i = 0; i < ofs->numlower; i++) { + /* + * If lower fs uuid is not unique among lower fs we cannot match + * fh->uuid to layer. + */ + if (ofs->lower_layers[i].fsid && + ofs->lower_layers[i].fs->bad_uuid) + continue; + origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt, connected); if (origin) @@ -839,7 +869,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, old_cred = ovl_override_creds(dentry->d_sb); upperdir = ovl_dentry_upper(dentry->d_parent); if (upperdir) { - err = ovl_lookup_layer(upperdir, &d, &upperdentry); + err = ovl_lookup_layer(upperdir, &d, &upperdentry, true); if (err) goto out; @@ -897,7 +927,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, else d.last = lower.layer->idx == roe->numlower; - err = ovl_lookup_layer(lower.dentry, &d, &this); + err = ovl_lookup_layer(lower.dentry, &d, &this, false); if (err) goto out_put; diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index a8279280e88d..28348c44ea5b 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -22,6 +22,8 @@ struct ovl_config { struct ovl_sb { struct super_block *sb; dev_t pseudo_dev; + /* Unusable (conflicting) uuid */ + bool bad_uuid; }; struct ovl_layer { diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 47a91c9733a5..7255e6a5838f 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -504,7 +504,13 @@ get: if (err) goto fail; - WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); + /* + * Directory inode is always on overlay st_dev. + * Non-dir with ovl_same_dev() could be on pseudo st_dev in case + * of xino bits overflow. + */ + WARN_ON_ONCE(S_ISDIR(stat.mode) && + dir->d_sb->s_dev != stat.dev); ino = stat.ino; } else if (xinobits && !OVL_TYPE_UPPER(type)) { ino = ovl_remap_lower_ino(ino, xinobits, diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index afbcb116a7f1..0e51e4b3e6f8 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -1060,6 +1060,18 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); ofs->upper_mnt = upper_mnt; + /* + * Inherit SB_NOSEC flag from upperdir. + * + * This optimization changes behavior when a security related attribute + * (suid/sgid/security.*) is changed on an underlying layer. This is + * okay because we don't yet have guarantees in that case, but it will + * need careful treatment once we want to honour changes to underlying + * filesystems. + */ + if (upper_mnt->mnt_sb->s_flags & SB_NOSEC) + sb->s_flags |= SB_NOSEC; + if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) { ofs->upperdir_locked = true; } else { @@ -1255,7 +1267,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) { unsigned int i; - if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt)) + if (!ofs->config.nfs_export && !ofs->upper_mnt) return true; for (i = 0; i < ofs->numlowerfs; i++) { @@ -1263,9 +1275,13 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) * We use uuid to associate an overlay lower file handle with a * lower layer, so we can accept lower fs with null uuid as long * as all lower layers with null uuid are on the same fs. + * if we detect multiple lower fs with the same uuid, we + * disable lower file handle decoding on all of them. */ - if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) + if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) { + ofs->lower_fs[i].bad_uuid = true; return false; + } } return true; } @@ -1277,6 +1293,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) unsigned int i; dev_t dev; int err; + bool bad_uuid = false; /* fsid 0 is reserved for upper fs even with non upper overlay */ if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb) @@ -1288,11 +1305,15 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) } if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) { - ofs->config.index = false; - ofs->config.nfs_export = false; - pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n", - uuid_is_null(&sb->s_uuid) ? "null" : "conflicting", - path->dentry); + bad_uuid = true; + if (ofs->config.index || ofs->config.nfs_export) { + ofs->config.index = false; + ofs->config.nfs_export = false; + pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n", + uuid_is_null(&sb->s_uuid) ? "null" : + "conflicting", + path->dentry); + } } err = get_anon_bdev(&dev); @@ -1303,6 +1324,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) ofs->lower_fs[ofs->numlowerfs].sb = sb; ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev; + ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid; ofs->numlowerfs++; return ofs->numlowerfs; diff --git a/fs/proc/array.c b/fs/proc/array.c index 46dcb6f0eccf..09a6ed9118b4 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -61,8 +61,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -96,6 +98,8 @@ #include #include "internal.h" +unsigned int sysctl_watch_host_pid; + void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape) { char *buf; @@ -159,6 +163,9 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, const struct cred *cred; pid_t ppid, tpid = 0, tgid, ngid; unsigned int max_fds = 0; +#ifdef CONFIG_PID_NS + struct pid_namespace *current_pid_ns = task_active_pid_ns(current); +#endif rcu_read_lock(); ppid = pid_alive(p) ? @@ -211,16 +218,16 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, #ifdef CONFIG_PID_NS seq_puts(m, "\nNStgid:"); - for (g = ns->level; g <= pid->level; g++) + for (g = current_pid_ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_tgid_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSpid:"); - for (g = ns->level; g <= pid->level; g++) + for (g = current_pid_ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_pid_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSpgid:"); - for (g = ns->level; g <= pid->level; g++) + for (g = current_pid_ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_pgrp_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSsid:"); - for (g = ns->level; g <= pid->level; g++) + for (g = current_pid_ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_session_nr_ns(p, pid->numbers[g].ns)); #endif seq_putc(m, '\n'); @@ -401,6 +408,41 @@ static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm) seq_printf(m, "THP_enabled:\t%d\n", thp_enabled); } +int host_pid_info(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + int g; + struct task_struct *p; + struct pid_namespace *current_pid_ns; + + if (!sysctl_watch_host_pid) + return 0; + + p = task; + current_pid_ns = task_active_pid_ns(current); + + seq_puts(m, "Name:\t"); + proc_task_name(m, task, true); + seq_putc(m, '\n'); + + rcu_read_lock(); + seq_puts(m, "NStgid:"); + for (g = current_pid_ns->level; g >= 0; g--) + seq_printf(m, "\t%d", + task_tgid_nr_ns(p, pid->numbers[g].ns)); + seq_puts(m, "\nNSpid:"); + for (g = current_pid_ns->level; g >= 0; g--) + seq_printf(m, "\t%d", + task_pid_nr_ns(p, pid->numbers[g].ns)); + seq_puts(m, "\nNSsid:"); + for (g = current_pid_ns->level; g >= 0; g--) + seq_printf(m, "\t%d", + task_session_nr_ns(p, pid->numbers[g].ns)); + rcu_read_unlock(); + seq_putc(m, '\n'); + return 0; +} + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { diff --git a/fs/proc/base.c b/fs/proc/base.c index ebea9501afb8..ced7d0684ec0 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3008,6 +3008,7 @@ static const struct pid_entry tgid_base_stuff[] = { REG("environ", S_IRUSR, proc_environ_operations), REG("auxv", S_IRUSR, proc_auxv_operations), ONE("status", S_IRUGO, proc_pid_status), + ONE("hostinfo", S_IRUGO, host_pid_info), ONE("personality", S_IRUSR, proc_pid_personality), ONE("limits", S_IRUGO, proc_pid_limits), #ifdef CONFIG_SCHED_DEBUG @@ -3407,6 +3408,7 @@ static const struct pid_entry tid_base_stuff[] = { REG("environ", S_IRUSR, proc_environ_operations), REG("auxv", S_IRUSR, proc_auxv_operations), ONE("status", S_IRUGO, proc_pid_status), + ONE("hostinfo", S_IRUGO, host_pid_info), ONE("personality", S_IRUSR, proc_pid_personality), ONE("limits", S_IRUGO, proc_pid_limits), #ifdef CONFIG_SCHED_DEBUG diff --git a/fs/proc/internal.h b/fs/proc/internal.h index cd0c8d5ce9a1..92db21bdebad 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -146,6 +146,8 @@ extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); extern int proc_pid_status(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); +extern int host_pid_info(struct seq_file *, struct pid_namespace *, + struct pid *, struct task_struct *); extern int proc_pid_statm(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 80c305f206bb..abfc287d761f 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -195,7 +195,7 @@ static int show_stat(struct seq_file *p, void *v) "procs_blocked %lu\n", nr_context_switches(), (unsigned long long)boottime.tv_sec, - total_forks, + nr_forks(), nr_running(), nr_iowait()); diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 273ee82d8aa9..2cb7917cce79 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include "proc/internal.h" /* only for get_proc_task() in ->open() */ @@ -94,13 +96,50 @@ static void show_type(struct seq_file *m, struct super_block *sb) } } +static inline bool is_in_container(void) +{ + if (task_active_pid_ns(current)->level) + return true; + else + return false; +} + +static bool mnt_need_shield(struct vfsmount *mnt) +{ + struct mount *r = real_mount(mnt); + struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; + char *p; + bool ret = false; + + char *buf = (char *)__get_free_pages(GFP_KERNEL, 0); + if (!buf) + goto out; + + p = d_path(&mnt_path, buf, PAGE_SIZE); + if (IS_ERR(p)) { + ret = PTR_ERR(p); + goto out; + } + + ret = is_mount_shielded(r->mnt_devname ? r->mnt_devname : "none", p); + +out: + if (buf) + free_pages((unsigned long) buf, 0); + return ret; + +} + static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) { struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct super_block *sb = mnt_path.dentry->d_sb; - int err; + int err = 0; + + if (is_in_container() && mnt_need_shield(mnt)) + goto out; if (sb->s_op->show_devname) { err = sb->s_op->show_devname(m, mnt_path.dentry); @@ -134,7 +173,10 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) struct mount *r = real_mount(mnt); struct super_block *sb = mnt->mnt_sb; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; - int err; + int err = 0; + + if (is_in_container() && mnt_need_shield(mnt)) + goto out; seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id, MAJOR(sb->s_dev), MINOR(sb->s_dev)); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 8caff834f002..013486b5125e 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -407,6 +407,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record) prz = cxt->dprzs[cxt->dump_write_cnt]; + /* + * Since this is a new crash dump, we need to reset the buffer in + * case it still has an old dump present. Without this, the new dump + * will get appended, which would seriously confuse anything trying + * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() + * expects to find a dump header in the beginning of buffer data, so + * we must to reset the buffer values, in order to ensure that the + * header will be written to the beginning of the buffer. + */ + persistent_ram_zap(prz); + /* Build header and append record contents. */ hlen = ramoops_write_kmsg_hdr(prz, record); if (!hlen) @@ -572,6 +583,7 @@ static int ramoops_init_przs(const char *name, prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, &cxt->ecc_info, cxt->memtype, flags, label); + kfree(label); if (IS_ERR(prz_ar[i])) { err = PTR_ERR(prz_ar[i]); dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", @@ -617,6 +629,7 @@ static int ramoops_init_prz(const char *name, label = kasprintf(GFP_KERNEL, "ramoops:%s", name); *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype, PRZ_FLAG_ZAP_OLD, label); + kfree(label); if (IS_ERR(*prz)) { int err = PTR_ERR(*prz); diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index 8823f65888f0..1f4d8c06f9be 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -574,7 +574,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, /* Initialize general buffer state. */ raw_spin_lock_init(&prz->buffer_lock); prz->flags = flags; - prz->label = label; + prz->label = kstrdup(label, GFP_KERNEL); ret = persistent_ram_buffer_map(start, size, prz, memtype); if (ret) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 6e826b454082..7abc3230c21a 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -497,7 +497,7 @@ int dquot_release(struct dquot *dquot) mutex_lock(&dquot->dq_lock); /* Check whether we are not racing with some other dqget() */ - if (atomic_read(&dquot->dq_count) > 1) + if (dquot_is_busy(dquot)) goto out_dqlock; if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); @@ -623,7 +623,7 @@ EXPORT_SYMBOL(dquot_scan_active); /* Write all dquot structures to quota files */ int dquot_writeback_dquots(struct super_block *sb, int type) { - struct list_head *dirty; + struct list_head dirty; struct dquot *dquot; struct quota_info *dqopt = sb_dqopt(sb); int cnt; @@ -637,9 +637,10 @@ int dquot_writeback_dquots(struct super_block *sb, int type) if (!sb_has_quota_active(sb, cnt)) continue; spin_lock(&dq_list_lock); - dirty = &dqopt->info[cnt].dqi_dirty_list; - while (!list_empty(dirty)) { - dquot = list_first_entry(dirty, struct dquot, + /* Move list away to avoid livelock. */ + list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty); + while (!list_empty(&dirty)) { + dquot = list_first_entry(&dirty, struct dquot, dq_dirty); WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)); @@ -985,6 +986,7 @@ static int add_dquot_ref(struct super_block *sb, int type) * later. */ old_inode = inode; + cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); @@ -2860,68 +2862,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); static int do_proc_dqstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - unsigned int type = (int *)table->data - dqstats.stat; + unsigned int type = (unsigned long *)table->data - dqstats.stat; + s64 value = percpu_counter_sum(&dqstats.counter[type]); + + /* Filter negative values for non-monotonic counters */ + if (value < 0 && (type == DQST_ALLOC_DQUOTS || + type == DQST_FREE_DQUOTS)) + value = 0; /* Update global table */ - dqstats.stat[type] = - percpu_counter_sum_positive(&dqstats.counter[type]); - return proc_dointvec(table, write, buffer, lenp, ppos); + dqstats.stat[type] = value; + return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } static struct ctl_table fs_dqstats_table[] = { { .procname = "lookups", .data = &dqstats.stat[DQST_LOOKUPS], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "drops", .data = &dqstats.stat[DQST_DROPS], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "reads", .data = &dqstats.stat[DQST_READS], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "writes", .data = &dqstats.stat[DQST_WRITES], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "cache_hits", .data = &dqstats.stat[DQST_CACHE_HITS], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "allocated_dquots", .data = &dqstats.stat[DQST_ALLOC_DQUOTS], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "free_dquots", .data = &dqstats.stat[DQST_FREE_DQUOTS], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "syncs", .data = &dqstats.stat[DQST_SYNCS], - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, diff --git a/fs/read_write.c b/fs/read_write.c index 5bbf587f5bc1..7458fccc59e1 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1777,10 +1777,9 @@ static int remap_verify_area(struct file *file, loff_t pos, loff_t len, * else. Assume that the offsets have already been checked for block * alignment. * - * For deduplication we always scale down to the previous block because we - * can't meaningfully compare post-EOF contents. - * - * For clone we only link a partial EOF block above the destination file's EOF. + * For clone we only link a partial EOF block above or at the destination file's + * EOF. For deduplication we accept a partial EOF block only if it ends at the + * destination file's EOF (can not link it into the middle of a file). * * Shorten the request if possible. */ @@ -1796,8 +1795,7 @@ static int generic_remap_check_len(struct inode *inode_in, if ((*len & blkmask) == 0) return 0; - if ((remap_flags & REMAP_FILE_DEDUP) || - pos_out + *len < i_size_read(inode_out)) + if (pos_out + *len < i_size_read(inode_out)) new_len &= ~blkmask; if (new_len == *len) diff --git a/fs/readdir.c b/fs/readdir.c index d26d5ea4de7b..de2eceffdee8 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir); * filename length, and the above "soft error" worry means * that it's probably better left alone until we have that * issue clarified. + * + * Note the PATH_MAX check - it's arbitrary but the real + * kernel limit on a possible path component, not NAME_MAX, + * which is the technical standard limit. */ static int verify_dirent_name(const char *name, int len) { - if (!len) + if (len <= 0 || len >= PATH_MAX) return -EIO; if (memchr(name, '/', len)) return -EIO; @@ -206,7 +210,7 @@ struct linux_dirent { struct getdents_callback { struct dir_context ctx; struct linux_dirent __user * current_dir; - struct linux_dirent __user * previous; + int prev_reclen; int count; int error; }; @@ -214,12 +218,13 @@ struct getdents_callback { static int filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { - struct linux_dirent __user * dirent; + struct linux_dirent __user *dirent, *prev; struct getdents_callback *buf = container_of(ctx, struct getdents_callback, ctx); unsigned long d_ino; int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2, sizeof(long)); + int prev_reclen; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) @@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen, buf->error = -EOVERFLOW; return -EOVERFLOW; } - dirent = buf->previous; - if (dirent && signal_pending(current)) + prev_reclen = buf->prev_reclen; + if (prev_reclen && signal_pending(current)) return -EINTR; - - /* - * Note! This range-checks 'previous' (which may be NULL). - * The real range was checked in getdents - */ - if (!user_access_begin(dirent, sizeof(*dirent))) - goto efault; - if (dirent) - unsafe_put_user(offset, &dirent->d_off, efault_end); dirent = buf->current_dir; + prev = (void __user *) dirent - prev_reclen; + if (!user_access_begin(prev, reclen + prev_reclen)) + goto efault; + + /* This might be 'dirent->d_off', but if so it will get overwritten */ + unsafe_put_user(offset, &prev->d_off, efault_end); unsafe_put_user(d_ino, &dirent->d_ino, efault_end); unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); user_access_end(); - buf->previous = dirent; - dirent = (void __user *)dirent + reclen; - buf->current_dir = dirent; + buf->current_dir = (void __user *)dirent + reclen; + buf->prev_reclen = reclen; buf->count -= reclen; return 0; efault_end: @@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, struct linux_dirent __user *, dirent, unsigned int, count) { struct fd f; - struct linux_dirent __user * lastdirent; struct getdents_callback buf = { .ctx.actor = filldir, .count = count, @@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; - lastdirent = buf.previous; - if (lastdirent) { + if (buf.prev_reclen) { + struct linux_dirent __user * lastdirent; + lastdirent = (void __user *)buf.current_dir - buf.prev_reclen; + if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else @@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, struct getdents_callback64 { struct dir_context ctx; struct linux_dirent64 __user * current_dir; - struct linux_dirent64 __user * previous; + int prev_reclen; int count; int error; }; @@ -307,11 +309,12 @@ struct getdents_callback64 { static int filldir64(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { - struct linux_dirent64 __user *dirent; + struct linux_dirent64 __user *dirent, *prev; struct getdents_callback64 *buf = container_of(ctx, struct getdents_callback64, ctx); int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1, sizeof(u64)); + int prev_reclen; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) @@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen, buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; - dirent = buf->previous; - if (dirent && signal_pending(current)) + prev_reclen = buf->prev_reclen; + if (prev_reclen && signal_pending(current)) return -EINTR; - - /* - * Note! This range-checks 'previous' (which may be NULL). - * The real range was checked in getdents - */ - if (!user_access_begin(dirent, sizeof(*dirent))) - goto efault; - if (dirent) - unsafe_put_user(offset, &dirent->d_off, efault_end); dirent = buf->current_dir; + prev = (void __user *)dirent - prev_reclen; + if (!user_access_begin(prev, reclen + prev_reclen)) + goto efault; + + /* This might be 'dirent->d_off', but if so it will get overwritten */ + unsafe_put_user(offset, &prev->d_off, efault_end); unsafe_put_user(ino, &dirent->d_ino, efault_end); unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, &dirent->d_type, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); user_access_end(); - buf->previous = dirent; - dirent = (void __user *)dirent + reclen; - buf->current_dir = dirent; + buf->prev_reclen = reclen; + buf->current_dir = (void __user *)dirent + reclen; buf->count -= reclen; return 0; + efault_end: user_access_end(); efault: @@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, unsigned int count) { struct fd f; - struct linux_dirent64 __user * lastdirent; struct getdents_callback64 buf = { .ctx.actor = filldir64, .count = count, @@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; - lastdirent = buf.previous; - if (lastdirent) { + if (buf.prev_reclen) { + struct linux_dirent64 __user * lastdirent; typeof(lastdirent->d_off) d_off = buf.ctx.pos; + + lastdirent = (void __user *) buf.current_dir - buf.prev_reclen; if (__put_user(d_off, &lastdirent->d_off)) error = -EFAULT; else diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 132ec4406ed0..6419e6dacc39 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2097,6 +2097,15 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, goto out_inserted_sd; } + /* + * Mark it private if we're creating the privroot + * or something under it. + */ + if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) { + inode->i_flags |= S_PRIVATE; + inode->i_opflags &= ~IOP_XATTR; + } + if (reiserfs_posixacl(inode->i_sb)) { reiserfs_write_unlock(inode->i_sb); retval = reiserfs_inherit_default_acl(th, dir, dentry, inode); @@ -2111,8 +2120,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, reiserfs_warning(inode->i_sb, "jdm-13090", "ACLs aren't enabled in the fs, " "but vfs thinks they are!"); - } else if (IS_PRIVATE(dir)) - inode->i_flags |= S_PRIVATE; + } if (security->name) { reiserfs_write_unlock(inode->i_sb); diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 97f3fc4fdd79..959a066b7bb0 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -377,10 +377,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry, /* * Propagate the private flag so we know we're - * in the priv tree + * in the priv tree. Also clear IOP_XATTR + * since we don't have xattrs on xattr files. */ - if (IS_PRIVATE(dir)) + if (IS_PRIVATE(dir)) { inode->i_flags |= S_PRIVATE; + inode->i_opflags &= ~IOP_XATTR; + } } reiserfs_write_unlock(dir->i_sb); if (retval == IO_ERROR) { diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index e5ca9ed79e54..726580114d55 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -1168,6 +1168,8 @@ static inline int bmap_would_wrap(unsigned bmap_nr) return bmap_nr > ((1LL << 16) - 1); } +extern const struct xattr_handler *reiserfs_xattr_handlers[]; + /* * this says about version of key of all items (but stat data) the * object consists of diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index da9ebe33882b..bb4973aefbb1 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -2246,7 +2246,8 @@ error_out: /* also releases the path */ unfix_nodes(&s_ins_balance); #ifdef REISERQUOTA_DEBUG - reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, + if (inode) + reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, "reiserquota insert_item(): freeing %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index d69b4ac0ae2f..a6bce5b1fb1d 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -629,6 +629,7 @@ static void reiserfs_put_super(struct super_block *s) reiserfs_write_unlock(s); mutex_destroy(&REISERFS_SB(s)->lock); destroy_workqueue(REISERFS_SB(s)->commit_wq); + kfree(REISERFS_SB(s)->s_jdev); kfree(s->s_fs_info); s->s_fs_info = NULL; } @@ -1947,7 +1948,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) if (!sbi->s_jdev) { SWARN(silent, s, "", "Cannot allocate memory for " "journal device name"); - goto error; + goto error_unlocked; } } #ifdef CONFIG_QUOTA @@ -2049,6 +2050,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) if (replay_only(s)) goto error_unlocked; + s->s_xattr = reiserfs_xattr_handlers; + if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) { SWARN(silent, s, "clm-7000", "Detected readonly device, marking FS readonly"); @@ -2238,6 +2241,7 @@ error_unlocked: kfree(qf_names[j]); } #endif + kfree(sbi->s_jdev); kfree(sbi); s->s_fs_info = NULL; diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index b5b26d8a192c..28b241cd6987 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -122,13 +122,13 @@ static struct dentry *open_xa_root(struct super_block *sb, int flags) struct dentry *xaroot; if (d_really_is_negative(privroot)) - return ERR_PTR(-ENODATA); + return ERR_PTR(-EOPNOTSUPP); inode_lock_nested(d_inode(privroot), I_MUTEX_XATTR); xaroot = dget(REISERFS_SB(sb)->xattr_root); if (!xaroot) - xaroot = ERR_PTR(-ENODATA); + xaroot = ERR_PTR(-EOPNOTSUPP); else if (d_really_is_negative(xaroot)) { int err = -ENODATA; @@ -319,8 +319,12 @@ static int reiserfs_for_each_xattr(struct inode *inode, out_dir: dput(dir); out: - /* -ENODATA isn't an error */ - if (err == -ENODATA) + /* + * -ENODATA: this object doesn't have any xattrs + * -EOPNOTSUPP: this file system doesn't have xattrs enabled on disk. + * Neither are errors + */ + if (err == -ENODATA || err == -EOPNOTSUPP) err = 0; return err; } @@ -619,6 +623,10 @@ int reiserfs_xattr_set(struct inode *inode, const char *name, int error, error2; size_t jbegin_count = reiserfs_xattr_nblocks(inode, buffer_size); + /* Check before we start a transaction and then do nothing. */ + if (!d_really_is_positive(REISERFS_SB(inode->i_sb)->priv_root)) + return -EOPNOTSUPP; + if (!(flags & XATTR_REPLACE)) jbegin_count += reiserfs_xattr_jcreate_nblocks(inode); @@ -841,8 +849,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) if (d_really_is_negative(dentry)) return -EINVAL; - if (!dentry->d_sb->s_xattr || - get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) + if (get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) return -EOPNOTSUPP; dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE); @@ -882,6 +889,7 @@ static int create_privroot(struct dentry *dentry) } d_inode(dentry)->i_flags |= S_PRIVATE; + d_inode(dentry)->i_opflags &= ~IOP_XATTR; reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr " "storage.\n", PRIVROOT_NAME); @@ -895,7 +903,7 @@ static int create_privroot(struct dentry *dentry) { return 0; } #endif /* Actual operations that are exported to VFS-land */ -static const struct xattr_handler *reiserfs_xattr_handlers[] = { +const struct xattr_handler *reiserfs_xattr_handlers[] = { #ifdef CONFIG_REISERFS_FS_XATTR &reiserfs_xattr_user_handler, &reiserfs_xattr_trusted_handler, @@ -966,8 +974,10 @@ int reiserfs_lookup_privroot(struct super_block *s) if (!IS_ERR(dentry)) { REISERFS_SB(s)->priv_root = dentry; d_set_d_op(dentry, &xattr_lookup_poison_ops); - if (d_really_is_positive(dentry)) + if (d_really_is_positive(dentry)) { d_inode(dentry)->i_flags |= S_PRIVATE; + d_inode(dentry)->i_opflags &= ~IOP_XATTR; + } } else err = PTR_ERR(dentry); inode_unlock(d_inode(s->s_root)); @@ -996,7 +1006,6 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags) } if (d_really_is_positive(privroot)) { - s->s_xattr = reiserfs_xattr_handlers; inode_lock(d_inode(privroot)); if (!REISERFS_SB(s)->xattr_root) { struct dentry *dentry; diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index aa9380bac196..05f666794561 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -320,10 +320,8 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, * would be useless since permissions are ignored, and a pain because * it introduces locking cycles */ - if (IS_PRIVATE(dir)) { - inode->i_flags |= S_PRIVATE; + if (IS_PRIVATE(inode)) goto apply_umask; - } err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl); if (err) diff --git a/fs/splice.c b/fs/splice.c index 98412721f056..e509239d7e06 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -945,12 +945,13 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, WARN_ON_ONCE(pipe->nrbufs != 0); while (len) { + unsigned int pipe_pages; size_t read_len; loff_t pos = sd->pos, prev_pos = pos; /* Don't try to read more the pipe has space for. */ - read_len = min_t(size_t, len, - (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT); + pipe_pages = pipe->buffers - pipe->nrbufs; + read_len = min(len, (size_t)pipe_pages << PAGE_SHIFT); ret = do_splice_to(in, &pos, pipe, read_len, flags); if (unlikely(ret <= 0)) goto out_release; @@ -1180,8 +1181,15 @@ static long do_splice(struct file *in, loff_t __user *off_in, pipe_lock(opipe); ret = wait_for_space(opipe, flags); - if (!ret) + if (!ret) { + unsigned int pipe_pages; + + /* Don't try to read more the pipe has space for. */ + pipe_pages = opipe->buffers - opipe->nrbufs; + len = min(len, (size_t)pipe_pages << PAGE_SHIFT); + ret = do_splice_to(in, &offset, opipe, len, flags); + } pipe_unlock(opipe); if (ret > 0) wakeup_pipe_readers(opipe); diff --git a/fs/super.c b/fs/super.c index cfadab2cbf35..cd352530eca9 100644 --- a/fs/super.c +++ b/fs/super.c @@ -448,10 +448,12 @@ void generic_shutdown_super(struct super_block *sb) sync_filesystem(sb); sb->s_flags &= ~SB_ACTIVE; - fsnotify_sb_delete(sb); cgroup_writeback_umount(); + /* evict all inodes with zero refcount */ evict_inodes(sb); + /* only nonzero refcount inodes can have marks */ + fsnotify_sb_delete(sb); if (sb->s_dio_done_wq) { destroy_workqueue(sb->s_dio_done_wq); diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 0b98e3c8b461..6c0e19f7a21f 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -228,6 +228,8 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, if (nm.hash) { ubifs_assert(c, fname_len(&nm) == 0); ubifs_assert(c, fname_name(&nm) == NULL); + if (nm.hash & ~UBIFS_S_KEY_HASH_MASK) + goto done; /* ENOENT */ dent_key_init_hash(c, &key, dir->i_ino, nm.hash); err = ubifs_tnc_lookup_dh(c, &key, dent, nm.minor_hash); } else { diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index cd52585c8f4f..a771273fba7e 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -786,7 +786,9 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, if (page_offset > end_index) break; - page = find_or_create_page(mapping, page_offset, ra_gfp_mask); + page = pagecache_get_page(mapping, page_offset, + FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT, + ra_gfp_mask); if (!page) break; if (!PageUptodate(page)) @@ -1078,18 +1080,12 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; - if (attr->ia_valid & ATTR_ATIME) { - inode->i_atime = timestamp_truncate(attr->ia_atime, - inode); - } - if (attr->ia_valid & ATTR_MTIME) { - inode->i_mtime = timestamp_truncate(attr->ia_mtime, - inode); - } - if (attr->ia_valid & ATTR_CTIME) { - inode->i_ctime = timestamp_truncate(attr->ia_ctime, - inode); - } + if (attr->ia_valid & ATTR_ATIME) + inode->i_atime = attr->ia_atime; + if (attr->ia_valid & ATTR_MTIME) + inode->i_mtime = attr->ia_mtime; + if (attr->ia_valid & ATTR_CTIME) + inode->i_ctime = attr->ia_ctime; if (attr->ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 5dc5abca11c7..eeb1be259888 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c @@ -113,7 +113,8 @@ static int setflags(struct inode *inode, int flags) if (err) goto out_unlock; - ui->flags = ioctl2ubifs(flags); + ui->flags &= ~ioctl2ubifs(UBIFS_SUPPORTED_IOCTL_FLAGS); + ui->flags |= ioctl2ubifs(flags); ubifs_set_inode_flags(inode); inode->i_ctime = current_time(inode); release = ui->dirty; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 4fd9683b8245..826dad0243dc 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -899,7 +899,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) fname_name(&nm) = xent->name; fname_len(&nm) = le16_to_cpu(xent->nlen); - xino = ubifs_iget(c->vfs_sb, xent->inum); + xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum)); if (IS_ERR(xino)) { err = PTR_ERR(xino); ubifs_err(c, "dead directory entry '%s', error %d", diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 3b4b4114f208..edf43ddd7dce 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -129,7 +129,7 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o) static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) { if (orph->del) { - dbg_gen("deleted twice ino %lu", orph->inum); + dbg_gen("deleted twice ino %lu", (unsigned long)orph->inum); return; } @@ -137,7 +137,7 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) orph->del = 1; orph->dnext = c->orph_dnext; c->orph_dnext = orph; - dbg_gen("delete later ino %lu", orph->inum); + dbg_gen("delete later ino %lu", (unsigned long)orph->inum); return; } @@ -631,12 +631,17 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, ino_t inum; int i, n, err, first = 1; + ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS); + if (!ino) + return -ENOMEM; + list_for_each_entry(snod, &sleb->nodes, list) { if (snod->type != UBIFS_ORPH_NODE) { ubifs_err(c, "invalid node type %d in orphan area at %d:%d", snod->type, sleb->lnum, snod->offs); ubifs_dump_node(c, snod->node); - return -EINVAL; + err = -EINVAL; + goto out_free; } orph = snod->node; @@ -663,20 +668,18 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d", cmt_no, sleb->lnum, snod->offs); ubifs_dump_node(c, snod->node); - return -EINVAL; + err = -EINVAL; + goto out_free; } dbg_rcvry("out of date LEB %d", sleb->lnum); *outofdate = 1; - return 0; + err = 0; + goto out_free; } if (first) first = 0; - ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS); - if (!ino) - return -ENOMEM; - n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; for (i = 0; i < n; i++) { union ubifs_key key1, key2; diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index a551eb3e9b89..6681c18e52b8 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -161,7 +161,7 @@ static int create_default_filesystem(struct ubifs_info *c) sup = kzalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_KERNEL); mst = kzalloc(c->mst_node_alsz, GFP_KERNEL); idx_node_size = ubifs_idx_node_sz(c, 1); - idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL); + idx = kzalloc(ALIGN(idx_node_size, c->min_io_size), GFP_KERNEL); ino = kzalloc(ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size), GFP_KERNEL); cs = kzalloc(ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size), GFP_KERNEL); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 7d4547e5202d..7fc2f3f07c16 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1599,6 +1599,7 @@ out_free: vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); + kfree(c->sup_node); ubifs_debugging_exit(c); return err; } @@ -1641,6 +1642,7 @@ static void ubifs_umount(struct ubifs_info *c) vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); + kfree(c->sup_node); ubifs_debugging_exit(c); } @@ -2267,10 +2269,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, } } else { err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); - if (err) { - kfree(c); + if (err) goto out_deact; - } /* We do not support atime */ sb->s_flags |= SB_ACTIVE; if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index a384a0f9ff32..234be1c4dc87 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -212,7 +212,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key, /** * layout_leb_in_gaps - layout index nodes using in-the-gaps method. * @c: UBIFS file-system description object - * @p: return LEB number here + * @p: return LEB number in @c->gap_lebs[p] * * This function lays out new index nodes for dirty znodes using in-the-gaps * method of TNC commit. @@ -221,7 +221,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key, * This function returns the number of index nodes written into the gaps, or a * negative error code on failure. */ -static int layout_leb_in_gaps(struct ubifs_info *c, int *p) +static int layout_leb_in_gaps(struct ubifs_info *c, int p) { struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; @@ -236,7 +236,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p) * filled, however we do not check there at present. */ return lnum; /* Error code */ - *p = lnum; + c->gap_lebs[p] = lnum; dbg_gc("LEB %d", lnum); /* * Scan the index LEB. We use the generic scan for this even though @@ -355,7 +355,7 @@ static int get_leb_cnt(struct ubifs_info *c, int cnt) */ static int layout_in_gaps(struct ubifs_info *c, int cnt) { - int err, leb_needed_cnt, written, *p; + int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs; dbg_gc("%d znodes to write", cnt); @@ -364,9 +364,9 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) if (!c->gap_lebs) return -ENOMEM; - p = c->gap_lebs; + old_idx_lebs = c->lst.idx_lebs; do { - ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs); + ubifs_assert(c, p < c->lst.idx_lebs); written = layout_leb_in_gaps(c, p); if (written < 0) { err = written; @@ -392,9 +392,29 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) leb_needed_cnt = get_leb_cnt(c, cnt); dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt, leb_needed_cnt, c->ileb_cnt); + /* + * Dynamically change the size of @c->gap_lebs to prevent + * oob, because @c->lst.idx_lebs could be increased by + * function @get_idx_gc_leb (called by layout_leb_in_gaps-> + * ubifs_find_dirty_idx_leb) during loop. Only enlarge + * @c->gap_lebs when needed. + * + */ + if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs && + old_idx_lebs < c->lst.idx_lebs) { + old_idx_lebs = c->lst.idx_lebs; + gap_lebs = krealloc(c->gap_lebs, sizeof(int) * + (old_idx_lebs + 1), GFP_NOFS); + if (!gap_lebs) { + kfree(c->gap_lebs); + c->gap_lebs = NULL; + return -ENOMEM; + } + c->gap_lebs = gap_lebs; + } } while (leb_needed_cnt > c->ileb_cnt); - *p = -1; + c->gap_lebs[p] = -1; return 0; } diff --git a/fs/udf/super.c b/fs/udf/super.c index 8c28e93e9b73..4baa1ca91e9b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1035,7 +1035,6 @@ static int check_partition_desc(struct super_block *sb, switch (le32_to_cpu(p->accessType)) { case PD_ACCESS_TYPE_READ_ONLY: case PD_ACCESS_TYPE_WRITE_ONCE: - case PD_ACCESS_TYPE_REWRITABLE: case PD_ACCESS_TYPE_NONE: goto force_ro; } @@ -2492,17 +2491,29 @@ static unsigned int udf_count_free_table(struct super_block *sb, static unsigned int udf_count_free(struct super_block *sb) { unsigned int accum = 0; - struct udf_sb_info *sbi; + struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; + unsigned int part = sbi->s_partition; + int ptype = sbi->s_partmaps[part].s_partition_type; + + if (ptype == UDF_METADATA_MAP25) { + part = sbi->s_partmaps[part].s_type_specific.s_metadata. + s_phys_partition_ref; + } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) { + /* + * Filesystems with VAT are append-only and we cannot write to + * them. Let's just report 0 here. + */ + return 0; + } - sbi = UDF_SB(sb); if (sbi->s_lvid_bh) { struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *) sbi->s_lvid_bh->b_data; - if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) { + if (le32_to_cpu(lvid->numOfPartitions) > part) { accum = le32_to_cpu( - lvid->freeSpaceTable[sbi->s_partition]); + lvid->freeSpaceTable[part]); if (accum == 0xFFFFFFFF) accum = 0; } @@ -2511,7 +2522,7 @@ static unsigned int udf_count_free(struct super_block *sb) if (accum) return accum; - map = &sbi->s_partmaps[sbi->s_partition]; + map = &sbi->s_partmaps[part]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { accum += udf_count_free_bitmap(sb, map->s_uspace.s_bitmap); diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index f9fd18670e22..d99d166fd892 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1834,13 +1834,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) goto out; features = uffdio_api.features; - if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) { - memset(&uffdio_api, 0, sizeof(uffdio_api)); - if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) - goto out; - ret = -EINVAL; - goto out; - } + ret = -EINVAL; + if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) + goto err_out; + ret = -EPERM; + if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) + goto err_out; /* report all available features and ioctls to userland */ uffdio_api.features = UFFD_API_FEATURES; uffdio_api.ioctls = UFFD_API_IOCTLS; @@ -1853,6 +1852,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, ret = 0; out: return ret; +err_out: + memset(&uffdio_api, 0, sizeof(uffdio_api)); + if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) + ret = -EFAULT; + goto out; } static long userfaultfd_ioctl(struct file *file, unsigned cmd, diff --git a/fs/utimes.c b/fs/utimes.c index 1ba3f7883870..090739322463 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -36,14 +36,14 @@ static int utimes_common(const struct path *path, struct timespec64 *times) if (times[0].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_ATIME; else if (times[0].tv_nsec != UTIME_NOW) { - newattrs.ia_atime = timestamp_truncate(times[0], inode); + newattrs.ia_atime = times[0]; newattrs.ia_valid |= ATTR_ATIME_SET; } if (times[1].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_MTIME; else if (times[1].tv_nsec != UTIME_NOW) { - newattrs.ia_mtime = timestamp_truncate(times[1], inode); + newattrs.ia_mtime = times[1]; newattrs.ia_valid |= ATTR_MTIME_SET; } /* diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index da031b93e182..2ce8004d8c21 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -5,8 +5,10 @@ */ #include "xfs.h" #include +#include #include "xfs_message.h" #include "xfs_trace.h" +#include "xfs_linux.h" void * kmem_alloc(size_t size, xfs_km_flags_t flags) @@ -17,15 +19,33 @@ kmem_alloc(size_t size, xfs_km_flags_t flags) trace_kmem_alloc(size, flags, _RET_IP_); + if (xfs_kmem_alloc_by_vmalloc && + size > (PAGE_SIZE * xfs_kmem_alloc_by_vmalloc) && + xfs_kmem_alloc_large_dump_stack) { + xfs_warn(NULL, "%s size: %ld large than %ld\n", + __func__, size, PAGE_SIZE * xfs_kmem_alloc_by_vmalloc); + dump_stack(); + } + do { - ptr = kmalloc(size, lflags); + if (xfs_kmem_alloc_by_vmalloc && (size > PAGE_SIZE * xfs_kmem_alloc_by_vmalloc)) + ptr = __vmalloc(size, lflags, PAGE_KERNEL); + else + ptr = kmalloc(size, lflags); if (ptr || (flags & KM_MAYFAIL)) return ptr; - if (!(++retries % 100)) + if (!(++retries % 100)) { xfs_err(NULL, - "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)", + "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x), flags: 0x%x", current->comm, current->pid, - (unsigned int)size, __func__, lflags); + (unsigned int)size, __func__, lflags, flags); + if (xfs_kmem_fail_dump_stack == 1) + dump_stack(); + else if (xfs_kmem_fail_dump_stack == 2) + trigger_all_cpu_backtrace(); + else if (xfs_kmem_fail_dump_stack == 3) + show_mem(0, NULL); + } congestion_wait(BLK_RW_ASYNC, HZ/50); } while (1); } @@ -128,11 +148,18 @@ kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags) ptr = kmem_cache_alloc(zone, lflags); if (ptr || (flags & KM_MAYFAIL)) return ptr; - if (!(++retries % 100)) + if (!(++retries % 100)) { xfs_err(NULL, - "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", + "%s(%u) possible memory allocation deadlock in %s (mode:0x%x), size: 0x%x, flags: 0x%x", current->comm, current->pid, - __func__, lflags); + __func__, lflags, kmem_cache_size(zone), flags); + if (xfs_kmem_fail_dump_stack == 1) + dump_stack(); + else if (xfs_kmem_fail_dump_stack == 2) + trigger_all_cpu_backtrace(); + else if (xfs_kmem_fail_dump_stack == 3) + show_mem(0, NULL); + } congestion_wait(BLK_RW_ASYNC, HZ/50); } while (1); } diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h index 94badfa1743e..91c2cb14276e 100644 --- a/fs/xfs/libxfs/xfs_attr.h +++ b/fs/xfs/libxfs/xfs_attr.h @@ -26,7 +26,7 @@ struct xfs_attr_list_context; *========================================================================*/ -#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */ +#define ATTR_DONTFOLLOW 0x0001 /* -- ignored, from IRIX -- */ #define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */ #define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */ #define ATTR_SECURE 0x0008 /* use attrs in security namespace */ @@ -37,7 +37,10 @@ struct xfs_attr_list_context; #define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ #define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */ -#define ATTR_ALLOC 0x8000 /* allocate xattr buffer on demand */ +#define ATTR_ALLOC 0x8000 /* [kernel] allocate xattr buffer on demand */ + +#define ATTR_KERNEL_FLAGS \ + (ATTR_KERNOTIME | ATTR_KERNOVAL | ATTR_INCOMPLETE | ATTR_ALLOC) #define XFS_ATTR_FLAGS \ { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \ diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 02469d59c787..3f76da11197c 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -5300,7 +5300,7 @@ __xfs_bunmapi( * Make sure we don't touch multiple AGF headers out of order * in a single transaction, as that could cause AB-BA deadlocks. */ - if (!wasdel) { + if (!wasdel && !isrt) { agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); if (prev_agno != NULLAGNUMBER && prev_agno > agno) break; diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index f54244779492..01b1722333a9 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -124,6 +124,8 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t ino, xfs_extlen_t tot); +extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp, + xfs_ino_t inum); extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t inum, xfs_extlen_t tot); diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 85f14fc2a8da..0e112e1d3e6b 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -944,6 +944,27 @@ xfs_dir2_sf_removename( return 0; } +/* + * Check whether the sf dir replace operation need more blocks. + */ +bool +xfs_dir2_sf_replace_needblock( + struct xfs_inode *dp, + xfs_ino_t inum) +{ + int newsize; + struct xfs_dir2_sf_hdr *sfp; + + if (dp->i_d.di_format != XFS_DINODE_FMT_LOCAL) + return false; + + sfp = (struct xfs_dir2_sf_hdr *)dp->i_df.if_u1.if_data; + newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF; + + return inum > XFS_DIR2_MAX_SHORT_INUM && + sfp->i8count == 0 && newsize > XFS_IFORK_DSIZE(dp); +} + /* * Replace the inode number of an entry in a shortform directory. */ @@ -980,17 +1001,14 @@ xfs_dir2_sf_replace( */ if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) { int error; /* error return value */ - int newsize; /* new inode size */ - newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF; /* * Won't fit as shortform, convert to block then do replace. */ - if (newsize > XFS_IFORK_DSIZE(dp)) { + if (xfs_dir2_sf_replace_needblock(dp, args->inumber)) { error = xfs_dir2_sf_to_block(args); - if (error) { + if (error) return error; - } return xfs_dir2_block_replace(args); } /* diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h index 003a772cd26c..2e50d146105d 100644 --- a/fs/xfs/scrub/common.h +++ b/fs/xfs/scrub/common.h @@ -14,8 +14,15 @@ static inline bool xchk_should_terminate( struct xfs_scrub *sc, - int *error) + int *error) { + /* + * If preemption is disabled, we need to yield to the scheduler every + * few seconds so that we don't run afoul of the soft lockup watchdog + * or RCU stall detector. + */ + cond_resched(); + if (fatal_signal_pending(current)) { if (*error == 0) *error = -EAGAIN; diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c index fa55ab8b8d80..542de249bf79 100644 --- a/fs/xfs/xfs_globals.c +++ b/fs/xfs/xfs_globals.c @@ -16,7 +16,7 @@ xfs_param_t xfs_params = { .sgid_inherit = { 0, 0, 1 }, .symlink_mode = { 0, 0, 1 }, .panic_mask = { 0, 0, 256 }, - .error_level = { 0, 3, 11 }, + .error_level = { 0, 5, 11 }, .syncd_timer = { 1*100, 30*100, 7200*100}, .stats_clear = { 0, 0, 1 }, .inherit_sync = { 0, 1, 1 }, @@ -30,6 +30,9 @@ xfs_param_t xfs_params = { .fstrm_timer = { 1, 30*100, 3600*100}, .eofb_timer = { 1, 300, 3600*24}, .cowb_timer = { 1, 1800, 3600*24}, + .kmem_fail_dump_stack = { 0, 0, 3 }, + .kmem_alloc_by_vmalloc = { 0, 1, 4 }, + .kmem_alloc_large_dump_stack = { 0, 0, 1 }, }; struct xfs_globals xfs_globals = { diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 18f4b262e61c..90bf983a34e8 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3196,6 +3196,7 @@ xfs_rename( struct xfs_trans *tp; struct xfs_inode *wip = NULL; /* whiteout inode */ struct xfs_inode *inodes[__XFS_SORT_INODES]; + struct xfs_buf *agibp; int num_inodes = __XFS_SORT_INODES; bool new_parent = (src_dp != target_dp); bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); @@ -3361,6 +3362,22 @@ xfs_rename( * In case there is already an entry with the same * name at the destination directory, remove it first. */ + + /* + * Check whether the replace operation will need to allocate + * blocks. This happens when the shortform directory lacks + * space and we have to convert it to a block format directory. + * When more blocks are necessary, we must lock the AGI first + * to preserve locking order (AGI -> AGF). + */ + if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) { + error = xfs_read_agi(mp, tp, + XFS_INO_TO_AGNO(mp, target_ip->i_ino), + &agibp); + if (error) + goto out_trans_cancel; + } + error = xfs_dir_replace(tp, target_dp, target_name, src_ip->i_ino, spaceres); if (error) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index d58f0d6a699e..2a1909397cb4 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -536,6 +536,8 @@ xfs_attrmulti_by_handle( error = 0; for (i = 0; i < am_hreq.opcount; i++) { + ops[i].am_flags &= ~ATTR_KERNEL_FLAGS; + ops[i].am_error = strncpy_from_user((char *)attr_name, ops[i].am_attrname, MAXNAMELEN); if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index 1e08bf79b478..e61cc41189f8 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -450,6 +450,8 @@ xfs_compat_attrmulti_by_handle( error = 0; for (i = 0; i < am_hreq.opcount; i++) { + ops[i].am_flags &= ~ATTR_KERNEL_FLAGS; + ops[i].am_error = strncpy_from_user((char *)attr_name, compat_ptr(ops[i].am_attrname), MAXNAMELEN); diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index ca15105681ca..d823ee672e5c 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -99,6 +99,9 @@ typedef __u32 xfs_nlink_t; #define xfs_fstrm_centisecs xfs_params.fstrm_timer.val #define xfs_eofb_secs xfs_params.eofb_timer.val #define xfs_cowb_secs xfs_params.cowb_timer.val +#define xfs_kmem_fail_dump_stack xfs_params.kmem_fail_dump_stack.val +#define xfs_kmem_alloc_by_vmalloc xfs_params.kmem_alloc_by_vmalloc.val +#define xfs_kmem_alloc_large_dump_stack xfs_params.kmem_alloc_large_dump_stack.val #define current_cpu() (raw_smp_processor_id()) #define current_pid() (current->pid) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 641d07f30a27..7b0d9ad8cb1a 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1495,6 +1495,8 @@ out_free_iclog: prev_iclog = iclog->ic_next; kmem_free(iclog->ic_data); kmem_free(iclog); + if (prev_iclog == log->l_iclog) + break; } out_free_log: kmem_free(log); diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index cd6c7210a373..c7de17deeae6 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c @@ -201,6 +201,9 @@ xfs_fs_rm_xquota( if (XFS_IS_QUOTA_ON(mp)) return -EINVAL; + if (uflags & ~(FS_USER_QUOTA | FS_GROUP_QUOTA | FS_PROJ_QUOTA)) + return -EINVAL; + if (uflags & FS_USER_QUOTA) flags |= XFS_DQ_USER; if (uflags & FS_GROUP_QUOTA) diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index 31b3bdbd2eba..55a7fdf5b189 100644 --- a/fs/xfs/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c @@ -190,6 +190,33 @@ static struct ctl_table xfs_table[] = { .extra2 = &xfs_params.stats_clear.max }, #endif /* CONFIG_PROC_FS */ + { + .procname = "kmem_fail_dump_stack", + .data = &xfs_params.kmem_fail_dump_stack.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.kmem_fail_dump_stack.min, + .extra2 = &xfs_params.kmem_fail_dump_stack.max, + }, + { + .procname = "kmem_alloc_by_vmalloc", + .data = &xfs_params.kmem_alloc_by_vmalloc.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.kmem_alloc_by_vmalloc.min, + .extra2 = &xfs_params.kmem_alloc_by_vmalloc.max, + }, + { + .procname = "kmem_alloc_large_dump_stack", + .data = &xfs_params.kmem_alloc_large_dump_stack.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.kmem_alloc_large_dump_stack.min, + .extra2 = &xfs_params.kmem_alloc_large_dump_stack.max, + }, {} }; diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h index 8abf4640f1d5..33b1c8e3f9cb 100644 --- a/fs/xfs/xfs_sysctl.h +++ b/fs/xfs/xfs_sysctl.h @@ -37,6 +37,9 @@ typedef struct xfs_param { xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */ xfs_sysctl_val_t eofb_timer; /* Interval between eofb scan wakeups */ xfs_sysctl_val_t cowb_timer; /* Interval between cowb scan wakeups */ + xfs_sysctl_val_t kmem_fail_dump_stack; + xfs_sysctl_val_t kmem_alloc_by_vmalloc; + xfs_sysctl_val_t kmem_alloc_large_dump_stack; } xfs_param_t; /* diff --git a/include/Kbuild b/include/Kbuild deleted file mode 100644 index ffba79483cc5..000000000000 --- a/include/Kbuild +++ /dev/null @@ -1,1185 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -# Add header-test-$(CONFIG_...) guard to headers that are only compiled -# for particular architectures. -# -# Headers listed in header-test- are excluded from the test coverage. -# Many headers are excluded for now because they fail to build. Please -# consider to fix headers first before adding new ones to the blacklist. -# -# Sorted alphabetically. -header-test- += acpi/acbuffer.h -header-test- += acpi/acpi.h -header-test- += acpi/acpi_bus.h -header-test- += acpi/acpi_drivers.h -header-test- += acpi/acpi_io.h -header-test- += acpi/acpi_lpat.h -header-test- += acpi/acpiosxf.h -header-test- += acpi/acpixf.h -header-test- += acpi/acrestyp.h -header-test- += acpi/actbl.h -header-test- += acpi/actbl1.h -header-test- += acpi/actbl2.h -header-test- += acpi/actbl3.h -header-test- += acpi/actypes.h -header-test- += acpi/battery.h -header-test- += acpi/cppc_acpi.h -header-test- += acpi/nfit.h -header-test- += acpi/platform/acenv.h -header-test- += acpi/platform/acenvex.h -header-test- += acpi/platform/acintel.h -header-test- += acpi/platform/aclinux.h -header-test- += acpi/platform/aclinuxex.h -header-test- += acpi/processor.h -header-test-$(CONFIG_X86) += clocksource/hyperv_timer.h -header-test- += clocksource/timer-sp804.h -header-test- += crypto/cast_common.h -header-test- += crypto/internal/cryptouser.h -header-test- += crypto/pkcs7.h -header-test- += crypto/poly1305.h -header-test- += crypto/sha3.h -header-test- += drm/ati_pcigart.h -header-test- += drm/bridge/dw_hdmi.h -header-test- += drm/bridge/dw_mipi_dsi.h -header-test- += drm/drm_audio_component.h -header-test- += drm/drm_auth.h -header-test- += drm/drm_debugfs.h -header-test- += drm/drm_debugfs_crc.h -header-test- += drm/drm_displayid.h -header-test- += drm/drm_encoder_slave.h -header-test- += drm/drm_fb_cma_helper.h -header-test- += drm/drm_fb_helper.h -header-test- += drm/drm_fixed.h -header-test- += drm/drm_format_helper.h -header-test- += drm/drm_lease.h -header-test- += drm/drm_legacy.h -header-test- += drm/drm_panel.h -header-test- += drm/drm_plane_helper.h -header-test- += drm/drm_rect.h -header-test- += drm/i915_component.h -header-test- += drm/intel-gtt.h -header-test- += drm/tinydrm/tinydrm-helpers.h -header-test- += drm/ttm/ttm_debug.h -header-test- += keys/asymmetric-parser.h -header-test- += keys/asymmetric-subtype.h -header-test- += keys/asymmetric-type.h -header-test- += keys/big_key-type.h -header-test- += keys/request_key_auth-type.h -header-test- += keys/trusted.h -header-test- += kvm/arm_arch_timer.h -header-test- += kvm/arm_pmu.h -header-test-$(CONFIG_ARM) += kvm/arm_psci.h -header-test-$(CONFIG_ARM64) += kvm/arm_psci.h -header-test- += kvm/arm_vgic.h -header-test- += linux/8250_pci.h -header-test- += linux/a.out.h -header-test- += linux/adxl.h -header-test- += linux/agpgart.h -header-test- += linux/alcor_pci.h -header-test- += linux/amba/clcd.h -header-test- += linux/amba/pl080.h -header-test- += linux/amd-iommu.h -header-test-$(CONFIG_ARM) += linux/arm-cci.h -header-test-$(CONFIG_ARM64) += linux/arm-cci.h -header-test- += linux/arm_sdei.h -header-test- += linux/asn1_decoder.h -header-test- += linux/ata_platform.h -header-test- += linux/ath9k_platform.h -header-test- += linux/atm_tcp.h -header-test- += linux/atomic-fallback.h -header-test- += linux/avf/virtchnl.h -header-test- += linux/bcm47xx_sprom.h -header-test- += linux/bcma/bcma_driver_gmac_cmn.h -header-test- += linux/bcma/bcma_driver_mips.h -header-test- += linux/bcma/bcma_driver_pci.h -header-test- += linux/bcma/bcma_driver_pcie2.h -header-test- += linux/bit_spinlock.h -header-test- += linux/blk-mq-rdma.h -header-test- += linux/blk-mq.h -header-test- += linux/blktrace_api.h -header-test- += linux/blockgroup_lock.h -header-test- += linux/bma150.h -header-test- += linux/bpf_lirc.h -header-test- += linux/bpf_types.h -header-test- += linux/bsg-lib.h -header-test- += linux/bsg.h -header-test- += linux/btf.h -header-test- += linux/btree-128.h -header-test- += linux/btree-type.h -header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h -header-test- += linux/byteorder/generic.h -header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h -header-test- += linux/c2port.h -header-test- += linux/can/dev/peak_canfd.h -header-test- += linux/can/platform/cc770.h -header-test- += linux/can/platform/sja1000.h -header-test- += linux/ceph/ceph_features.h -header-test- += linux/ceph/ceph_frag.h -header-test- += linux/ceph/ceph_fs.h -header-test- += linux/ceph/debugfs.h -header-test- += linux/ceph/msgr.h -header-test- += linux/ceph/rados.h -header-test- += linux/cgroup_subsys.h -header-test- += linux/clk/sunxi-ng.h -header-test- += linux/clk/ti.h -header-test- += linux/cn_proc.h -header-test- += linux/coda_psdev.h -header-test- += linux/compaction.h -header-test- += linux/console_struct.h -header-test- += linux/count_zeros.h -header-test- += linux/cs5535.h -header-test- += linux/cuda.h -header-test- += linux/cyclades.h -header-test- += linux/dcookies.h -header-test- += linux/delayacct.h -header-test- += linux/delayed_call.h -header-test- += linux/device-mapper.h -header-test- += linux/devpts_fs.h -header-test- += linux/dio.h -header-test- += linux/dirent.h -header-test- += linux/dlm_plock.h -header-test- += linux/dm-dirty-log.h -header-test- += linux/dm-region-hash.h -header-test- += linux/dma-debug.h -header-test- += linux/dma/mmp-pdma.h -header-test- += linux/dma/sprd-dma.h -header-test- += linux/dns_resolver.h -header-test- += linux/drbd_genl.h -header-test- += linux/drbd_genl_api.h -header-test- += linux/dw_apb_timer.h -header-test- += linux/dynamic_debug.h -header-test- += linux/dynamic_queue_limits.h -header-test- += linux/ecryptfs.h -header-test- += linux/edma.h -header-test- += linux/eeprom_93cx6.h -header-test- += linux/efs_vh.h -header-test- += linux/elevator.h -header-test- += linux/elfcore-compat.h -header-test- += linux/error-injection.h -header-test- += linux/errseq.h -header-test- += linux/eventpoll.h -header-test- += linux/ext2_fs.h -header-test- += linux/f75375s.h -header-test- += linux/falloc.h -header-test- += linux/fault-inject.h -header-test- += linux/fbcon.h -header-test- += linux/firmware/intel/stratix10-svc-client.h -header-test- += linux/firmware/meson/meson_sm.h -header-test- += linux/firmware/trusted_foundations.h -header-test- += linux/firmware/xlnx-zynqmp.h -header-test- += linux/fixp-arith.h -header-test- += linux/flat.h -header-test- += linux/fs_types.h -header-test- += linux/fs_uart_pd.h -header-test- += linux/fsi-occ.h -header-test- += linux/fsi-sbefifo.h -header-test- += linux/fsl/bestcomm/ata.h -header-test- += linux/fsl/bestcomm/bestcomm.h -header-test- += linux/fsl/bestcomm/bestcomm_priv.h -header-test- += linux/fsl/bestcomm/fec.h -header-test- += linux/fsl/bestcomm/gen_bd.h -header-test- += linux/fsl/bestcomm/sram.h -header-test- += linux/fsl_hypervisor.h -header-test- += linux/fsldma.h -header-test- += linux/ftrace_irq.h -header-test- += linux/gameport.h -header-test- += linux/genl_magic_func.h -header-test- += linux/genl_magic_struct.h -header-test- += linux/gpio/aspeed.h -header-test- += linux/gpio/gpio-reg.h -header-test- += linux/hid-debug.h -header-test- += linux/hiddev.h -header-test- += linux/hippidevice.h -header-test- += linux/hmm.h -header-test- += linux/hp_sdc.h -header-test- += linux/huge_mm.h -header-test- += linux/hugetlb_cgroup.h -header-test- += linux/hugetlb_inline.h -header-test- += linux/hwmon-vid.h -header-test- += linux/hyperv.h -header-test- += linux/i2c-algo-pca.h -header-test- += linux/i2c-algo-pcf.h -header-test- += linux/i3c/ccc.h -header-test- += linux/i3c/device.h -header-test- += linux/i3c/master.h -header-test- += linux/i8042.h -header-test- += linux/ide.h -header-test- += linux/idle_inject.h -header-test- += linux/if_frad.h -header-test- += linux/if_rmnet.h -header-test- += linux/if_tap.h -header-test- += linux/iio/accel/kxcjk_1013.h -header-test- += linux/iio/adc/ad_sigma_delta.h -header-test- += linux/iio/buffer-dma.h -header-test- += linux/iio/buffer_impl.h -header-test- += linux/iio/common/st_sensors.h -header-test- += linux/iio/common/st_sensors_i2c.h -header-test- += linux/iio/common/st_sensors_spi.h -header-test- += linux/iio/dac/ad5421.h -header-test- += linux/iio/dac/ad5504.h -header-test- += linux/iio/dac/ad5791.h -header-test- += linux/iio/dac/max517.h -header-test- += linux/iio/dac/mcp4725.h -header-test- += linux/iio/frequency/ad9523.h -header-test- += linux/iio/frequency/adf4350.h -header-test- += linux/iio/hw-consumer.h -header-test- += linux/iio/imu/adis.h -header-test- += linux/iio/sysfs.h -header-test- += linux/iio/timer/stm32-timer-trigger.h -header-test- += linux/iio/trigger.h -header-test- += linux/iio/triggered_event.h -header-test- += linux/imx-media.h -header-test- += linux/inet_diag.h -header-test- += linux/init_ohci1394_dma.h -header-test- += linux/initrd.h -header-test- += linux/input/adp5589.h -header-test- += linux/input/bu21013.h -header-test- += linux/input/cma3000.h -header-test- += linux/input/kxtj9.h -header-test- += linux/input/lm8333.h -header-test- += linux/input/sparse-keymap.h -header-test- += linux/input/touchscreen.h -header-test- += linux/input/tps6507x-ts.h -header-test-$(CONFIG_X86) += linux/intel-iommu.h -header-test- += linux/intel-ish-client-if.h -header-test- += linux/intel-pti.h -header-test- += linux/intel-svm.h -header-test- += linux/interconnect-provider.h -header-test- += linux/ioc3.h -header-test-$(CONFIG_BLOCK) += linux/iomap.h -header-test- += linux/ipack.h -header-test- += linux/irq_cpustat.h -header-test- += linux/irq_poll.h -header-test- += linux/irqchip/arm-gic-v3.h -header-test- += linux/irqchip/arm-gic-v4.h -header-test- += linux/irqchip/irq-madera.h -header-test- += linux/irqchip/irq-sa11x0.h -header-test- += linux/irqchip/mxs.h -header-test- += linux/irqchip/versatile-fpga.h -header-test- += linux/irqdesc.h -header-test- += linux/irqflags.h -header-test- += linux/iscsi_boot_sysfs.h -header-test- += linux/isdn/capiutil.h -header-test- += linux/isdn/hdlc.h -header-test- += linux/isdn_ppp.h -header-test- += linux/jbd2.h -header-test- += linux/jump_label.h -header-test- += linux/jump_label_ratelimit.h -header-test- += linux/jz4740-adc.h -header-test- += linux/kasan.h -header-test- += linux/kcore.h -header-test- += linux/kdev_t.h -header-test- += linux/kernelcapi.h -header-test- += linux/khugepaged.h -header-test- += linux/kobj_map.h -header-test- += linux/kobject_ns.h -header-test- += linux/kvm_host.h -header-test- += linux/kvm_irqfd.h -header-test- += linux/kvm_para.h -header-test- += linux/lantiq.h -header-test- += linux/lapb.h -header-test- += linux/latencytop.h -header-test- += linux/led-lm3530.h -header-test- += linux/leds-bd2802.h -header-test- += linux/leds-lp3944.h -header-test- += linux/leds-lp3952.h -header-test- += linux/leds_pwm.h -header-test- += linux/libata.h -header-test- += linux/license.h -header-test- += linux/lightnvm.h -header-test- += linux/lis3lv02d.h -header-test- += linux/list_bl.h -header-test- += linux/list_lru.h -header-test- += linux/list_nulls.h -header-test- += linux/lockd/share.h -header-test- += linux/lzo.h -header-test- += linux/mailbox/zynqmp-ipi-message.h -header-test- += linux/maple.h -header-test- += linux/mbcache.h -header-test- += linux/mbus.h -header-test- += linux/mc146818rtc.h -header-test- += linux/mc6821.h -header-test- += linux/mdev.h -header-test- += linux/mem_encrypt.h -header-test- += linux/memfd.h -header-test- += linux/mfd/88pm80x.h -header-test- += linux/mfd/88pm860x.h -header-test- += linux/mfd/abx500/ab8500-bm.h -header-test- += linux/mfd/abx500/ab8500-gpadc.h -header-test- += linux/mfd/adp5520.h -header-test- += linux/mfd/arizona/pdata.h -header-test- += linux/mfd/as3711.h -header-test- += linux/mfd/as3722.h -header-test- += linux/mfd/da903x.h -header-test- += linux/mfd/da9055/pdata.h -header-test- += linux/mfd/db8500-prcmu.h -header-test- += linux/mfd/dbx500-prcmu.h -header-test- += linux/mfd/dln2.h -header-test- += linux/mfd/dm355evm_msp.h -header-test- += linux/mfd/ds1wm.h -header-test- += linux/mfd/ezx-pcap.h -header-test- += linux/mfd/intel_msic.h -header-test- += linux/mfd/janz.h -header-test- += linux/mfd/kempld.h -header-test- += linux/mfd/lm3533.h -header-test- += linux/mfd/lp8788-isink.h -header-test- += linux/mfd/lpc_ich.h -header-test- += linux/mfd/max77693.h -header-test- += linux/mfd/max8998-private.h -header-test- += linux/mfd/menelaus.h -header-test- += linux/mfd/mt6397/core.h -header-test- += linux/mfd/palmas.h -header-test- += linux/mfd/pcf50633/backlight.h -header-test- += linux/mfd/rc5t583.h -header-test- += linux/mfd/retu.h -header-test- += linux/mfd/samsung/core.h -header-test- += linux/mfd/si476x-platform.h -header-test- += linux/mfd/si476x-reports.h -header-test- += linux/mfd/sky81452.h -header-test- += linux/mfd/smsc.h -header-test- += linux/mfd/sta2x11-mfd.h -header-test- += linux/mfd/stmfx.h -header-test- += linux/mfd/tc3589x.h -header-test- += linux/mfd/tc6387xb.h -header-test- += linux/mfd/tc6393xb.h -header-test- += linux/mfd/tps65090.h -header-test- += linux/mfd/tps6586x.h -header-test- += linux/mfd/tps65910.h -header-test- += linux/mfd/tps80031.h -header-test- += linux/mfd/ucb1x00.h -header-test- += linux/mfd/viperboard.h -header-test- += linux/mfd/wm831x/core.h -header-test- += linux/mfd/wm831x/otp.h -header-test- += linux/mfd/wm831x/pdata.h -header-test- += linux/mfd/wm8994/core.h -header-test- += linux/mfd/wm8994/pdata.h -header-test- += linux/mlx4/doorbell.h -header-test- += linux/mlx4/srq.h -header-test- += linux/mlx5/doorbell.h -header-test- += linux/mlx5/eq.h -header-test- += linux/mlx5/fs_helpers.h -header-test- += linux/mlx5/mlx5_ifc.h -header-test- += linux/mlx5/mlx5_ifc_fpga.h -header-test- += linux/mm-arch-hooks.h -header-test- += linux/mm_inline.h -header-test- += linux/mmu_context.h -header-test- += linux/mpage.h -header-test- += linux/mtd/bbm.h -header-test- += linux/mtd/cfi.h -header-test- += linux/mtd/doc2000.h -header-test- += linux/mtd/flashchip.h -header-test- += linux/mtd/ftl.h -header-test- += linux/mtd/gen_probe.h -header-test- += linux/mtd/jedec.h -header-test- += linux/mtd/nand_bch.h -header-test- += linux/mtd/nand_ecc.h -header-test- += linux/mtd/ndfc.h -header-test- += linux/mtd/onenand.h -header-test- += linux/mtd/pismo.h -header-test- += linux/mtd/plat-ram.h -header-test- += linux/mtd/spi-nor.h -header-test- += linux/mv643xx.h -header-test- += linux/mv643xx_eth.h -header-test- += linux/mvebu-pmsu.h -header-test- += linux/mxm-wmi.h -header-test- += linux/n_r3964.h -header-test- += linux/ndctl.h -header-test- += linux/nfs.h -header-test- += linux/nfs_fs_i.h -header-test- += linux/nfs_fs_sb.h -header-test- += linux/nfs_page.h -header-test- += linux/nfs_xdr.h -header-test- += linux/nfsacl.h -header-test- += linux/nl802154.h -header-test- += linux/ns_common.h -header-test- += linux/nsc_gpio.h -header-test- += linux/ntb_transport.h -header-test- += linux/nubus.h -header-test- += linux/nvme-fc-driver.h -header-test- += linux/nvme-fc.h -header-test- += linux/nvme-rdma.h -header-test- += linux/nvram.h -header-test- += linux/objagg.h -header-test- += linux/of_clk.h -header-test- += linux/of_net.h -header-test- += linux/of_pdt.h -header-test- += linux/olpc-ec.h -header-test- += linux/omap-dma.h -header-test- += linux/omap-dmaengine.h -header-test- += linux/omap-gpmc.h -header-test- += linux/omap-iommu.h -header-test- += linux/omap-mailbox.h -header-test- += linux/once.h -header-test- += linux/osq_lock.h -header-test- += linux/overflow.h -header-test- += linux/page-flags-layout.h -header-test- += linux/page-isolation.h -header-test- += linux/page_ext.h -header-test- += linux/page_owner.h -header-test- += linux/parport_pc.h -header-test- += linux/parser.h -header-test- += linux/pci-acpi.h -header-test- += linux/pci-dma-compat.h -header-test- += linux/pci_hotplug.h -header-test- += linux/pda_power.h -header-test- += linux/perf/arm_pmu.h -header-test- += linux/perf_regs.h -header-test- += linux/phy/omap_control_phy.h -header-test- += linux/phy/tegra/xusb.h -header-test- += linux/phy/ulpi_phy.h -header-test- += linux/phy_fixed.h -header-test- += linux/pipe_fs_i.h -header-test- += linux/pktcdvd.h -header-test- += linux/pl320-ipc.h -header-test- += linux/pl353-smc.h -header-test- += linux/platform_data/ad5449.h -header-test- += linux/platform_data/ad5755.h -header-test- += linux/platform_data/ad7266.h -header-test- += linux/platform_data/ad7291.h -header-test- += linux/platform_data/ad7298.h -header-test- += linux/platform_data/ad7303.h -header-test- += linux/platform_data/ad7791.h -header-test- += linux/platform_data/ad7793.h -header-test- += linux/platform_data/ad7887.h -header-test- += linux/platform_data/adau17x1.h -header-test- += linux/platform_data/adp8870.h -header-test- += linux/platform_data/ads1015.h -header-test- += linux/platform_data/ads7828.h -header-test- += linux/platform_data/apds990x.h -header-test- += linux/platform_data/arm-ux500-pm.h -header-test- += linux/platform_data/asoc-s3c.h -header-test- += linux/platform_data/at91_adc.h -header-test- += linux/platform_data/ata-pxa.h -header-test- += linux/platform_data/atmel.h -header-test- += linux/platform_data/bh1770glc.h -header-test- += linux/platform_data/brcmfmac.h -header-test- += linux/platform_data/cros_ec_commands.h -header-test- += linux/platform_data/clk-u300.h -header-test- += linux/platform_data/cyttsp4.h -header-test- += linux/platform_data/dma-coh901318.h -header-test- += linux/platform_data/dma-imx-sdma.h -header-test- += linux/platform_data/dma-mcf-edma.h -header-test- += linux/platform_data/dma-s3c24xx.h -header-test- += linux/platform_data/dmtimer-omap.h -header-test- += linux/platform_data/dsa.h -header-test- += linux/platform_data/edma.h -header-test- += linux/platform_data/elm.h -header-test- += linux/platform_data/emif_plat.h -header-test- += linux/platform_data/fsa9480.h -header-test- += linux/platform_data/g762.h -header-test- += linux/platform_data/gpio-ath79.h -header-test- += linux/platform_data/gpio-davinci.h -header-test- += linux/platform_data/gpio-dwapb.h -header-test- += linux/platform_data/gpio-htc-egpio.h -header-test- += linux/platform_data/gpmc-omap.h -header-test- += linux/platform_data/hsmmc-omap.h -header-test- += linux/platform_data/hwmon-s3c.h -header-test- += linux/platform_data/i2c-davinci.h -header-test- += linux/platform_data/i2c-imx.h -header-test- += linux/platform_data/i2c-mux-reg.h -header-test- += linux/platform_data/i2c-ocores.h -header-test- += linux/platform_data/i2c-xiic.h -header-test- += linux/platform_data/intel-spi.h -header-test- += linux/platform_data/invensense_mpu6050.h -header-test- += linux/platform_data/irda-pxaficp.h -header-test- += linux/platform_data/irda-sa11x0.h -header-test- += linux/platform_data/itco_wdt.h -header-test- += linux/platform_data/jz4740/jz4740_nand.h -header-test- += linux/platform_data/keyboard-pxa930_rotary.h -header-test- += linux/platform_data/keypad-omap.h -header-test- += linux/platform_data/leds-lp55xx.h -header-test- += linux/platform_data/leds-omap.h -header-test- += linux/platform_data/lp855x.h -header-test- += linux/platform_data/lp8727.h -header-test- += linux/platform_data/max197.h -header-test- += linux/platform_data/max3421-hcd.h -header-test- += linux/platform_data/max732x.h -header-test- += linux/platform_data/mcs.h -header-test- += linux/platform_data/mdio-bcm-unimac.h -header-test- += linux/platform_data/mdio-gpio.h -header-test- += linux/platform_data/media/si4713.h -header-test- += linux/platform_data/mlxreg.h -header-test- += linux/platform_data/mmc-omap.h -header-test- += linux/platform_data/mmc-sdhci-s3c.h -header-test- += linux/platform_data/mmp_audio.h -header-test- += linux/platform_data/mtd-orion_nand.h -header-test- += linux/platform_data/mv88e6xxx.h -header-test- += linux/platform_data/net-cw1200.h -header-test- += linux/platform_data/omap-twl4030.h -header-test- += linux/platform_data/omapdss.h -header-test- += linux/platform_data/pcf857x.h -header-test- += linux/platform_data/pixcir_i2c_ts.h -header-test- += linux/platform_data/pwm_omap_dmtimer.h -header-test- += linux/platform_data/pxa2xx_udc.h -header-test- += linux/platform_data/pxa_sdhci.h -header-test- += linux/platform_data/remoteproc-omap.h -header-test- += linux/platform_data/sa11x0-serial.h -header-test- += linux/platform_data/sc18is602.h -header-test- += linux/platform_data/sdhci-pic32.h -header-test- += linux/platform_data/serial-sccnxp.h -header-test- += linux/platform_data/sht3x.h -header-test- += linux/platform_data/shtc1.h -header-test- += linux/platform_data/si5351.h -header-test- += linux/platform_data/sky81452-backlight.h -header-test- += linux/platform_data/spi-davinci.h -header-test- += linux/platform_data/spi-ep93xx.h -header-test- += linux/platform_data/spi-mt65xx.h -header-test- += linux/platform_data/st_sensors_pdata.h -header-test- += linux/platform_data/ti-sysc.h -header-test- += linux/platform_data/timer-ixp4xx.h -header-test- += linux/platform_data/touchscreen-s3c2410.h -header-test- += linux/platform_data/tsc2007.h -header-test- += linux/platform_data/tsl2772.h -header-test- += linux/platform_data/uio_pruss.h -header-test- += linux/platform_data/usb-davinci.h -header-test- += linux/platform_data/usb-ehci-mxc.h -header-test- += linux/platform_data/usb-ehci-orion.h -header-test- += linux/platform_data/usb-mx2.h -header-test- += linux/platform_data/usb-ohci-s3c2410.h -header-test- += linux/platform_data/usb-omap.h -header-test- += linux/platform_data/usb-s3c2410_udc.h -header-test- += linux/platform_data/usb3503.h -header-test- += linux/platform_data/ux500_wdt.h -header-test- += linux/platform_data/video-clcd-versatile.h -header-test- += linux/platform_data/video-imxfb.h -header-test- += linux/platform_data/video-pxafb.h -header-test- += linux/platform_data/video_s3c.h -header-test- += linux/platform_data/voltage-omap.h -header-test- += linux/platform_data/x86/apple.h -header-test- += linux/platform_data/x86/clk-pmc-atom.h -header-test- += linux/platform_data/x86/pmc_atom.h -header-test- += linux/platform_data/xtalk-bridge.h -header-test- += linux/pm2301_charger.h -header-test- += linux/pm_wakeirq.h -header-test- += linux/pm_wakeup.h -header-test- += linux/pmbus.h -header-test- += linux/pmu.h -header-test- += linux/posix_acl.h -header-test- += linux/posix_acl_xattr.h -header-test- += linux/power/ab8500.h -header-test- += linux/power/bq27xxx_battery.h -header-test- += linux/power/generic-adc-battery.h -header-test- += linux/power/jz4740-battery.h -header-test- += linux/power/max17042_battery.h -header-test- += linux/power/max8903_charger.h -header-test- += linux/ppp-comp.h -header-test- += linux/pps-gpio.h -header-test- += linux/pr.h -header-test- += linux/proc_ns.h -header-test- += linux/processor.h -header-test- += linux/psi.h -header-test- += linux/psp-sev.h -header-test- += linux/pstore.h -header-test- += linux/ptr_ring.h -header-test- += linux/ptrace.h -header-test- += linux/qcom-geni-se.h -header-test- += linux/qed/eth_common.h -header-test- += linux/qed/fcoe_common.h -header-test- += linux/qed/iscsi_common.h -header-test- += linux/qed/iwarp_common.h -header-test- += linux/qed/qed_eth_if.h -header-test- += linux/qed/qed_fcoe_if.h -header-test- += linux/qed/rdma_common.h -header-test- += linux/qed/storage_common.h -header-test- += linux/qed/tcp_common.h -header-test- += linux/qnx6_fs.h -header-test- += linux/quicklist.h -header-test- += linux/ramfs.h -header-test- += linux/range.h -header-test- += linux/rcu_node_tree.h -header-test- += linux/rculist_bl.h -header-test- += linux/rculist_nulls.h -header-test- += linux/rcutiny.h -header-test- += linux/rcutree.h -header-test- += linux/reboot-mode.h -header-test- += linux/regulator/fixed.h -header-test- += linux/regulator/gpio-regulator.h -header-test- += linux/regulator/max8973-regulator.h -header-test- += linux/regulator/of_regulator.h -header-test- += linux/regulator/tps51632-regulator.h -header-test- += linux/regulator/tps62360.h -header-test- += linux/regulator/tps6507x.h -header-test- += linux/regulator/userspace-consumer.h -header-test- += linux/remoteproc/st_slim_rproc.h -header-test- += linux/reset/socfpga.h -header-test- += linux/reset/sunxi.h -header-test- += linux/rtc/m48t59.h -header-test- += linux/rtc/rtc-omap.h -header-test- += linux/rtc/sirfsoc_rtciobrg.h -header-test- += linux/rwlock.h -header-test- += linux/rwlock_types.h -header-test- += linux/scc.h -header-test- += linux/sched/deadline.h -header-test- += linux/sched/smt.h -header-test- += linux/sched/sysctl.h -header-test- += linux/sched_clock.h -header-test- += linux/scpi_protocol.h -header-test- += linux/scx200_gpio.h -header-test- += linux/seccomp.h -header-test- += linux/sed-opal.h -header-test- += linux/seg6_iptunnel.h -header-test- += linux/selection.h -header-test- += linux/set_memory.h -header-test- += linux/shrinker.h -header-test- += linux/sirfsoc_dma.h -header-test- += linux/skb_array.h -header-test- += linux/slab_def.h -header-test- += linux/slub_def.h -header-test- += linux/sm501.h -header-test- += linux/smc91x.h -header-test- += linux/static_key.h -header-test- += linux/soc/actions/owl-sps.h -header-test- += linux/soc/amlogic/meson-canvas.h -header-test- += linux/soc/brcmstb/brcmstb.h -header-test- += linux/soc/ixp4xx/npe.h -header-test- += linux/soc/mediatek/infracfg.h -header-test- += linux/soc/qcom/smd-rpm.h -header-test- += linux/soc/qcom/smem.h -header-test- += linux/soc/qcom/smem_state.h -header-test- += linux/soc/qcom/wcnss_ctrl.h -header-test- += linux/soc/renesas/rcar-rst.h -header-test- += linux/soc/samsung/exynos-pmu.h -header-test- += linux/soc/sunxi/sunxi_sram.h -header-test- += linux/soc/ti/ti-msgmgr.h -header-test- += linux/soc/ti/ti_sci_inta_msi.h -header-test- += linux/soc/ti/ti_sci_protocol.h -header-test- += linux/soundwire/sdw.h -header-test- += linux/soundwire/sdw_intel.h -header-test- += linux/soundwire/sdw_type.h -header-test- += linux/spi/ad7877.h -header-test- += linux/spi/ads7846.h -header-test- += linux/spi/at86rf230.h -header-test- += linux/spi/ds1305.h -header-test- += linux/spi/libertas_spi.h -header-test- += linux/spi/lms283gf05.h -header-test- += linux/spi/max7301.h -header-test- += linux/spi/mcp23s08.h -header-test- += linux/spi/rspi.h -header-test- += linux/spi/s3c24xx.h -header-test- += linux/spi/sh_msiof.h -header-test- += linux/spi/spi-fsl-dspi.h -header-test- += linux/spi/spi_bitbang.h -header-test- += linux/spi/spi_gpio.h -header-test- += linux/spi/xilinx_spi.h -header-test- += linux/spinlock_api_smp.h -header-test- += linux/spinlock_api_up.h -header-test- += linux/spinlock_types.h -header-test- += linux/splice.h -header-test- += linux/sram.h -header-test- += linux/srcutiny.h -header-test- += linux/srcutree.h -header-test- += linux/ssb/ssb_driver_chipcommon.h -header-test- += linux/ssb/ssb_driver_extif.h -header-test- += linux/ssb/ssb_driver_mips.h -header-test- += linux/ssb/ssb_driver_pci.h -header-test- += linux/ssbi.h -header-test- += linux/stackdepot.h -header-test- += linux/stmp3xxx_rtc_wdt.h -header-test- += linux/string_helpers.h -header-test- += linux/sungem_phy.h -header-test- += linux/sunrpc/msg_prot.h -header-test- += linux/sunrpc/rpc_pipe_fs.h -header-test- += linux/sunrpc/xprtmultipath.h -header-test- += linux/sunrpc/xprtsock.h -header-test- += linux/sunxi-rsb.h -header-test- += linux/svga.h -header-test- += linux/sw842.h -header-test- += linux/swapfile.h -header-test- += linux/swapops.h -header-test- += linux/swiotlb.h -header-test- += linux/sysv_fs.h -header-test- += linux/t10-pi.h -header-test- += linux/task_io_accounting.h -header-test- += linux/tick.h -header-test- += linux/timb_dma.h -header-test- += linux/timekeeping.h -header-test- += linux/timekeeping32.h -header-test- += linux/ts-nbus.h -header-test- += linux/tsacct_kern.h -header-test- += linux/tty_flip.h -header-test- += linux/tty_ldisc.h -header-test- += linux/ucb1400.h -header-test- += linux/usb/association.h -header-test- += linux/usb/cdc-wdm.h -header-test- += linux/usb/cdc_ncm.h -header-test- += linux/usb/ezusb.h -header-test- += linux/usb/gadget_configfs.h -header-test- += linux/usb/gpio_vbus.h -header-test- += linux/usb/hcd.h -header-test- += linux/usb/iowarrior.h -header-test- += linux/usb/irda.h -header-test- += linux/usb/isp116x.h -header-test- += linux/usb/isp1362.h -header-test- += linux/usb/musb.h -header-test- += linux/usb/net2280.h -header-test- += linux/usb/ohci_pdriver.h -header-test- += linux/usb/otg-fsm.h -header-test- += linux/usb/pd_ado.h -header-test- += linux/usb/r8a66597.h -header-test- += linux/usb/rndis_host.h -header-test- += linux/usb/serial.h -header-test- += linux/usb/sl811.h -header-test- += linux/usb/storage.h -header-test- += linux/usb/uas.h -header-test- += linux/usb/usb338x.h -header-test- += linux/usb/usbnet.h -header-test- += linux/usb/wusb-wa.h -header-test- += linux/usb/xhci-dbgp.h -header-test- += linux/usb_usual.h -header-test- += linux/user-return-notifier.h -header-test- += linux/userfaultfd_k.h -header-test- += linux/verification.h -header-test- += linux/vgaarb.h -header-test- += linux/via_core.h -header-test- += linux/via_i2c.h -header-test- += linux/virtio_byteorder.h -header-test- += linux/virtio_ring.h -header-test- += linux/visorbus.h -header-test- += linux/vme.h -header-test- += linux/vmstat.h -header-test- += linux/vmw_vmci_api.h -header-test- += linux/vmw_vmci_defs.h -header-test- += linux/vringh.h -header-test- += linux/vt_buffer.h -header-test- += linux/zorro.h -header-test- += linux/zpool.h -header-test- += math-emu/double.h -header-test- += math-emu/op-common.h -header-test- += math-emu/quad.h -header-test- += math-emu/single.h -header-test- += math-emu/soft-fp.h -header-test- += media/davinci/dm355_ccdc.h -header-test- += media/davinci/dm644x_ccdc.h -header-test- += media/davinci/isif.h -header-test- += media/davinci/vpbe_osd.h -header-test- += media/davinci/vpbe_types.h -header-test- += media/davinci/vpif_types.h -header-test- += media/demux.h -header-test- += media/drv-intf/soc_mediabus.h -header-test- += media/dvb_net.h -header-test- += media/fwht-ctrls.h -header-test- += media/i2c/ad9389b.h -header-test- += media/i2c/adv7343.h -header-test- += media/i2c/adv7511.h -header-test- += media/i2c/adv7842.h -header-test- += media/i2c/m5mols.h -header-test- += media/i2c/mt9m032.h -header-test- += media/i2c/mt9t112.h -header-test- += media/i2c/mt9v032.h -header-test- += media/i2c/ov2659.h -header-test- += media/i2c/ov7670.h -header-test- += media/i2c/rj54n1cb0c.h -header-test- += media/i2c/saa6588.h -header-test- += media/i2c/saa7115.h -header-test- += media/i2c/sr030pc30.h -header-test- += media/i2c/tc358743.h -header-test- += media/i2c/tda1997x.h -header-test- += media/i2c/ths7303.h -header-test- += media/i2c/tvaudio.h -header-test- += media/i2c/tvp514x.h -header-test- += media/i2c/tvp7002.h -header-test- += media/i2c/wm8775.h -header-test- += media/imx.h -header-test- += media/media-dev-allocator.h -header-test- += media/mpeg2-ctrls.h -header-test- += media/rcar-fcp.h -header-test- += media/tuner-types.h -header-test- += media/tveeprom.h -header-test- += media/v4l2-flash-led-class.h -header-test- += misc/altera.h -header-test- += misc/cxl-base.h -header-test- += misc/cxllib.h -header-test- += net/9p/9p.h -header-test- += net/9p/client.h -header-test- += net/9p/transport.h -header-test- += net/af_vsock.h -header-test- += net/ax88796.h -header-test- += net/bluetooth/hci.h -header-test- += net/bluetooth/hci_core.h -header-test- += net/bluetooth/hci_mon.h -header-test- += net/bluetooth/hci_sock.h -header-test- += net/bluetooth/l2cap.h -header-test- += net/bluetooth/mgmt.h -header-test- += net/bluetooth/rfcomm.h -header-test- += net/bluetooth/sco.h -header-test- += net/bond_options.h -header-test- += net/caif/cfsrvl.h -header-test- += net/codel_impl.h -header-test- += net/codel_qdisc.h -header-test- += net/compat.h -header-test- += net/datalink.h -header-test- += net/dcbevent.h -header-test- += net/dcbnl.h -header-test- += net/dn_dev.h -header-test- += net/dn_fib.h -header-test- += net/dn_neigh.h -header-test- += net/dn_nsp.h -header-test- += net/dn_route.h -header-test- += net/erspan.h -header-test- += net/esp.h -header-test- += net/ethoc.h -header-test- += net/firewire.h -header-test- += net/flow_offload.h -header-test- += net/fq.h -header-test- += net/fq_impl.h -header-test- += net/garp.h -header-test- += net/gtp.h -header-test- += net/gue.h -header-test- += net/hwbm.h -header-test- += net/ila.h -header-test- += net/inet6_connection_sock.h -header-test- += net/inet_common.h -header-test- += net/inet_frag.h -header-test- += net/ip6_route.h -header-test- += net/ip_vs.h -header-test- += net/ipcomp.h -header-test- += net/ipconfig.h -header-test- += net/iucv/af_iucv.h -header-test- += net/iucv/iucv.h -header-test- += net/lapb.h -header-test- += net/llc_c_ac.h -header-test- += net/llc_c_st.h -header-test- += net/llc_s_ac.h -header-test- += net/llc_s_ev.h -header-test- += net/llc_s_st.h -header-test- += net/mpls_iptunnel.h -header-test- += net/mrp.h -header-test- += net/ncsi.h -header-test- += net/netevent.h -header-test- += net/netns/can.h -header-test- += net/netns/generic.h -header-test- += net/netns/ieee802154_6lowpan.h -header-test- += net/netns/ipv4.h -header-test- += net/netns/ipv6.h -header-test- += net/netns/mpls.h -header-test- += net/netns/nftables.h -header-test- += net/netns/sctp.h -header-test- += net/netrom.h -header-test- += net/p8022.h -header-test- += net/phonet/pep.h -header-test- += net/phonet/phonet.h -header-test- += net/phonet/pn_dev.h -header-test- += net/pptp.h -header-test- += net/psample.h -header-test- += net/psnap.h -header-test- += net/regulatory.h -header-test- += net/rose.h -header-test- += net/sctp/auth.h -header-test- += net/sctp/stream_interleave.h -header-test- += net/sctp/stream_sched.h -header-test- += net/sctp/tsnmap.h -header-test- += net/sctp/ulpevent.h -header-test- += net/sctp/ulpqueue.h -header-test- += net/secure_seq.h -header-test- += net/smc.h -header-test- += net/stp.h -header-test- += net/transp_v6.h -header-test- += net/tun_proto.h -header-test- += net/udplite.h -header-test- += net/xdp.h -header-test- += net/xdp_priv.h -header-test- += pcmcia/cistpl.h -header-test- += pcmcia/ds.h -header-test- += rdma/tid_rdma_defs.h -header-test- += scsi/fc/fc_encaps.h -header-test- += scsi/fc/fc_fc2.h -header-test- += scsi/fc/fc_fcoe.h -header-test- += scsi/fc/fc_fip.h -header-test- += scsi/fc_encode.h -header-test- += scsi/fc_frame.h -header-test- += scsi/iser.h -header-test- += scsi/libfc.h -header-test- += scsi/libfcoe.h -header-test- += scsi/libsas.h -header-test- += scsi/sas_ata.h -header-test- += scsi/scsi_cmnd.h -header-test- += scsi/scsi_dbg.h -header-test- += scsi/scsi_device.h -header-test- += scsi/scsi_dh.h -header-test- += scsi/scsi_eh.h -header-test- += scsi/scsi_host.h -header-test- += scsi/scsi_ioctl.h -header-test- += scsi/scsi_request.h -header-test- += scsi/scsi_tcq.h -header-test- += scsi/scsi_transport.h -header-test- += scsi/scsi_transport_fc.h -header-test- += scsi/scsi_transport_sas.h -header-test- += scsi/scsi_transport_spi.h -header-test- += scsi/scsi_transport_srp.h -header-test- += scsi/scsicam.h -header-test- += scsi/sg.h -header-test- += soc/arc/aux.h -header-test- += soc/arc/mcip.h -header-test- += soc/arc/timers.h -header-test- += soc/brcmstb/common.h -header-test- += soc/fsl/bman.h -header-test- += soc/fsl/qe/qe.h -header-test- += soc/fsl/qe/qe_ic.h -header-test- += soc/fsl/qe/qe_tdm.h -header-test- += soc/fsl/qe/ucc.h -header-test- += soc/fsl/qe/ucc_fast.h -header-test- += soc/fsl/qe/ucc_slow.h -header-test- += soc/fsl/qman.h -header-test- += soc/nps/common.h -header-test-$(CONFIG_ARC) += soc/nps/mtm.h -header-test- += soc/qcom/cmd-db.h -header-test- += soc/qcom/rpmh.h -header-test- += soc/qcom/tcs.h -header-test- += soc/tegra/ahb.h -header-test- += soc/tegra/bpmp-abi.h -header-test- += soc/tegra/common.h -header-test- += soc/tegra/flowctrl.h -header-test- += soc/tegra/fuse.h -header-test- += soc/tegra/mc.h -header-test- += sound/ac97/compat.h -header-test- += sound/aci.h -header-test- += sound/ad1843.h -header-test- += sound/adau1373.h -header-test- += sound/ak4113.h -header-test- += sound/ak4114.h -header-test- += sound/ak4117.h -header-test- += sound/cs35l33.h -header-test- += sound/cs35l34.h -header-test- += sound/cs35l35.h -header-test- += sound/cs35l36.h -header-test- += sound/cs4271.h -header-test- += sound/cs42l52.h -header-test- += sound/cs8427.h -header-test- += sound/da7218.h -header-test- += sound/da7219-aad.h -header-test- += sound/da7219.h -header-test- += sound/da9055.h -header-test- += sound/emu8000.h -header-test- += sound/emux_synth.h -header-test- += sound/hda_component.h -header-test- += sound/hda_hwdep.h -header-test- += sound/hda_i915.h -header-test- += sound/hwdep.h -header-test- += sound/i2c.h -header-test- += sound/l3.h -header-test- += sound/max98088.h -header-test- += sound/max98095.h -header-test- += sound/mixer_oss.h -header-test- += sound/omap-hdmi-audio.h -header-test- += sound/pcm_drm_eld.h -header-test- += sound/pcm_iec958.h -header-test- += sound/pcm_oss.h -header-test- += sound/pxa2xx-lib.h -header-test- += sound/rt286.h -header-test- += sound/rt298.h -header-test- += sound/rt5645.h -header-test- += sound/rt5659.h -header-test- += sound/rt5660.h -header-test- += sound/rt5665.h -header-test- += sound/rt5670.h -header-test- += sound/s3c24xx_uda134x.h -header-test- += sound/seq_device.h -header-test- += sound/seq_kernel.h -header-test- += sound/seq_midi_emul.h -header-test- += sound/seq_oss.h -header-test- += sound/soc-acpi-intel-match.h -header-test- += sound/soc-dai.h -header-test- += sound/soc-dapm.h -header-test- += sound/soc-dpcm.h -header-test- += sound/sof/control.h -header-test- += sound/sof/dai-intel.h -header-test- += sound/sof/dai.h -header-test- += sound/sof/header.h -header-test- += sound/sof/info.h -header-test- += sound/sof/pm.h -header-test- += sound/sof/stream.h -header-test- += sound/sof/topology.h -header-test- += sound/sof/trace.h -header-test- += sound/sof/xtensa.h -header-test- += sound/spear_spdif.h -header-test- += sound/sta32x.h -header-test- += sound/sta350.h -header-test- += sound/tea6330t.h -header-test- += sound/tlv320aic32x4.h -header-test- += sound/tlv320dac33-plat.h -header-test- += sound/uda134x.h -header-test- += sound/wavefront.h -header-test- += sound/wm8903.h -header-test- += sound/wm8904.h -header-test- += sound/wm8960.h -header-test- += sound/wm8962.h -header-test- += sound/wm8993.h -header-test- += sound/wm8996.h -header-test- += sound/wm9081.h -header-test- += sound/wm9090.h -header-test- += target/iscsi/iscsi_target_stat.h -header-test- += trace/bpf_probe.h -header-test- += trace/events/9p.h -header-test- += trace/events/afs.h -header-test- += trace/events/asoc.h -header-test- += trace/events/bcache.h -header-test- += trace/events/block.h -header-test- += trace/events/cachefiles.h -header-test- += trace/events/cgroup.h -header-test- += trace/events/clk.h -header-test- += trace/events/cma.h -header-test- += trace/events/ext4.h -header-test- += trace/events/f2fs.h -header-test- += trace/events/fs_dax.h -header-test- += trace/events/fscache.h -header-test- += trace/events/fsi.h -header-test- += trace/events/fsi_master_ast_cf.h -header-test- += trace/events/fsi_master_gpio.h -header-test- += trace/events/huge_memory.h -header-test- += trace/events/ib_mad.h -header-test- += trace/events/ib_umad.h -header-test- += trace/events/iscsi.h -header-test- += trace/events/jbd2.h -header-test- += trace/events/kvm.h -header-test- += trace/events/kyber.h -header-test- += trace/events/libata.h -header-test- += trace/events/mce.h -header-test- += trace/events/mdio.h -header-test- += trace/events/migrate.h -header-test- += trace/events/mmflags.h -header-test- += trace/events/nbd.h -header-test- += trace/events/nilfs2.h -header-test- += trace/events/pwc.h -header-test- += trace/events/rdma.h -header-test- += trace/events/rpcgss.h -header-test- += trace/events/rpcrdma.h -header-test- += trace/events/rxrpc.h -header-test- += trace/events/scsi.h -header-test- += trace/events/siox.h -header-test- += trace/events/spi.h -header-test- += trace/events/swiotlb.h -header-test- += trace/events/syscalls.h -header-test- += trace/events/target.h -header-test- += trace/events/thermal_power_allocator.h -header-test- += trace/events/timer.h -header-test- += trace/events/wbt.h -header-test- += trace/events/xen.h -header-test- += trace/perf.h -header-test- += trace/trace_events.h -header-test- += uapi/drm/vmwgfx_drm.h -header-test- += uapi/linux/a.out.h -header-test- += uapi/linux/coda.h -header-test- += uapi/linux/coda_psdev.h -header-test- += uapi/linux/errqueue.h -header-test- += uapi/linux/eventpoll.h -header-test- += uapi/linux/hdlc/ioctl.h -header-test- += uapi/linux/input.h -header-test- += uapi/linux/kvm.h -header-test- += uapi/linux/kvm_para.h -header-test- += uapi/linux/lightnvm.h -header-test- += uapi/linux/mic_common.h -header-test- += uapi/linux/mman.h -header-test- += uapi/linux/nilfs2_ondisk.h -header-test- += uapi/linux/patchkey.h -header-test- += uapi/linux/ptrace.h -header-test- += uapi/linux/scc.h -header-test- += uapi/linux/seg6_iptunnel.h -header-test- += uapi/linux/smc_diag.h -header-test- += uapi/linux/timex.h -header-test- += uapi/linux/videodev2.h -header-test- += uapi/scsi/scsi_bsg_fc.h -header-test- += uapi/sound/asound.h -header-test- += uapi/sound/sof/eq.h -header-test- += uapi/sound/sof/fw.h -header-test- += uapi/sound/sof/header.h -header-test- += uapi/sound/sof/manifest.h -header-test- += uapi/sound/sof/trace.h -header-test- += uapi/xen/evtchn.h -header-test- += uapi/xen/gntdev.h -header-test- += uapi/xen/privcmd.h -header-test- += vdso/vsyscall.h -header-test- += video/broadsheetfb.h -header-test- += video/cvisionppc.h -header-test- += video/gbe.h -header-test- += video/kyro.h -header-test- += video/maxinefb.h -header-test- += video/metronomefb.h -header-test- += video/neomagic.h -header-test- += video/of_display_timing.h -header-test- += video/omapvrfb.h -header-test- += video/s1d13xxxfb.h -header-test- += video/sstfb.h -header-test- += video/tgafb.h -header-test- += video/udlfb.h -header-test- += video/uvesafb.h -header-test- += video/vga.h -header-test- += video/w100fb.h -header-test- += xen/acpi.h -header-test- += xen/arm/hypercall.h -header-test- += xen/arm/page-coherent.h -header-test- += xen/arm/page.h -header-test- += xen/balloon.h -header-test- += xen/events.h -header-test- += xen/features.h -header-test- += xen/grant_table.h -header-test- += xen/hvm.h -header-test- += xen/interface/callback.h -header-test- += xen/interface/event_channel.h -header-test- += xen/interface/grant_table.h -header-test- += xen/interface/hvm/dm_op.h -header-test- += xen/interface/hvm/hvm_op.h -header-test- += xen/interface/hvm/hvm_vcpu.h -header-test- += xen/interface/hvm/params.h -header-test- += xen/interface/hvm/start_info.h -header-test- += xen/interface/io/9pfs.h -header-test- += xen/interface/io/blkif.h -header-test- += xen/interface/io/console.h -header-test- += xen/interface/io/displif.h -header-test- += xen/interface/io/fbif.h -header-test- += xen/interface/io/kbdif.h -header-test- += xen/interface/io/netif.h -header-test- += xen/interface/io/pciif.h -header-test- += xen/interface/io/protocols.h -header-test- += xen/interface/io/pvcalls.h -header-test- += xen/interface/io/ring.h -header-test- += xen/interface/io/sndif.h -header-test- += xen/interface/io/tpmif.h -header-test- += xen/interface/io/vscsiif.h -header-test- += xen/interface/io/xs_wire.h -header-test- += xen/interface/memory.h -header-test- += xen/interface/nmi.h -header-test- += xen/interface/physdev.h -header-test- += xen/interface/platform.h -header-test- += xen/interface/sched.h -header-test- += xen/interface/vcpu.h -header-test- += xen/interface/version.h -header-test- += xen/interface/xen-mca.h -header-test- += xen/interface/xen.h -header-test- += xen/interface/xenpmu.h -header-test- += xen/mem-reservation.h -header-test- += xen/page.h -header-test- += xen/platform_pci.h -header-test- += xen/swiotlb-xen.h -header-test- += xen/xen-front-pgdir-shbuf.h -header-test- += xen/xen-ops.h -header-test- += xen/xen.h -header-test- += xen/xenbus.h - -# Do not include directly -header-test- += linux/compiler-clang.h -header-test- += linux/compiler-gcc.h -header-test- += linux/patchkey.h -header-test- += linux/rwlock_api_smp.h -header-test- += linux/spinlock_types_up.h -header-test- += linux/spinlock_up.h -header-test- += linux/wimax/debug.h -header-test- += rdma/uverbs_named_ioctl.h - -# asm-generic/*.h is used by asm/*.h, and should not be included directly -header-test- += asm-generic/% uapi/asm-generic/% - -# Timestamp files touched by Kconfig -header-test- += config/% - -# Timestamp files touched by scripts/adjust_autoksyms.sh -header-test- += ksym/% - -# You could compile-test these, but maybe not so useful... -header-test- += dt-bindings/% - -# Do not test generated headers. Stale headers are often left over when you -# traverse the git history without cleaning. -header-test- += generated/% - -# The rest are compile-tested -header-test-pattern-y += */*.h */*/*.h */*/*/*.h */*/*/*/*.h diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 175f7b40c585..3f6fddeb7519 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -78,9 +78,6 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, bool acpi_dev_found(const char *hid); bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); -struct acpi_device * -acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); - #ifdef CONFIG_ACPI #include @@ -683,6 +680,9 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); } +struct acpi_device * +acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); + static inline void acpi_dev_put(struct acpi_device *adev) { put_device(&adev->dev); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index e5e041413581..4010c42e40bd 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -748,6 +748,8 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_gpe_device(u32 gpe_index, diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 2f3f28c7cea3..9373662cdb44 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -532,11 +532,12 @@ typedef u64 acpi_integer; strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE) /* - * Algorithm to obtain access bit width. + * Algorithm to obtain access bit or byte width. * Can be used with access_width of struct acpi_generic_address and access_size of * struct acpi_resource_generic_register. */ #define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2)) +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1)) /******************************************************************************* * diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index a950a22c4890..cac7404b2bdd 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -11,71 +11,102 @@ * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory */ +#ifndef flush_cache_all static inline void flush_cache_all(void) { } +#endif +#ifndef flush_cache_mm static inline void flush_cache_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_dup_mm static inline void flush_cache_dup_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_range static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_page static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { } +#endif +#ifndef flush_dcache_page static inline void flush_dcache_page(struct page *page) { } +#endif +#ifndef flush_dcache_mmap_lock static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } +#endif +#ifndef flush_dcache_mmap_unlock static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } +#endif +#ifndef flush_icache_range static inline void flush_icache_range(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_icache_page static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { } +#endif +#ifndef flush_icache_user_range static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { } +#endif +#ifndef flush_cache_vmap static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_vunmap static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } +#endif -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +#ifndef copy_to_user_page +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ flush_icache_user_range(vma, page, vaddr, len); \ } while (0) +#endif + +#ifndef copy_from_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) +#endif #endif /* __ASM_CACHEFLUSH_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 04c0644006fd..c716ea81e653 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -137,13 +137,6 @@ * When used, an architecture is expected to provide __tlb_remove_table() * which does the actual freeing of these pages. * - * HAVE_RCU_TABLE_NO_INVALIDATE - * - * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before - * freeing the page-table pages. This can be avoided if you use - * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux - * page-tables natively. - * * MMU_GATHER_NO_RANGE * * Use this if your architecture lacks an efficient flush_tlb_range(). @@ -189,8 +182,23 @@ struct mmu_table_batch { extern void tlb_remove_table(struct mmu_gather *tlb, void *table); +/* + * This allows an architecture that does not use the linux page-tables for + * hardware to skip the TLBI when freeing page tables. + */ +#ifndef tlb_needs_table_invalidate +#define tlb_needs_table_invalidate() (true) #endif +#else + +#ifdef tlb_needs_table_invalidate +#error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE +#endif + +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + + #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER /* * If we can't allocate a page to make a big batch of page pointers diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h index ce4103208619..cec543d9e87b 100644 --- a/include/asm-generic/vdso/vsyscall.h +++ b/include/asm-generic/vdso/vsyscall.h @@ -12,9 +12,9 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) #endif /* __arch_get_k_vdso_data */ #ifndef __arch_update_vdso_data -static __always_inline int __arch_update_vdso_data(void) +static __always_inline bool __arch_update_vdso_data(void) { - return 0; + return true; } #endif /* __arch_update_vdso_data */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index dae64600ccbf..a9c4e4721434 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -110,17 +110,17 @@ #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD -#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY -#define MCOUNT_REC() . = ALIGN(8); \ - __start_mcount_loc = .; \ - KEEP(*(__patchable_function_entries)) \ - __stop_mcount_loc = .; -#else +/* + * The ftrace call sites are logged to a section whose name depends on the + * compiler option used. A given kernel image will only use one, AKA + * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header + * dependencies for FTRACE_CALLSITE_SECTION's definition. + */ #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__mcount_loc)) \ + KEEP(*(__patchable_function_entries)) \ __stop_mcount_loc = .; -#endif #else #define MCOUNT_REC() #endif diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 734b6f7081b8..3175dfeaed2c 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -205,19 +205,6 @@ static inline unsigned int crypto_skcipher_alg_max_keysize( return alg->max_keysize; } -static inline unsigned int crypto_skcipher_alg_chunksize( - struct skcipher_alg *alg) -{ - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return alg->base.cra_blocksize; - - if (alg->base.cra_ablkcipher.encrypt) - return alg->base.cra_blocksize; - - return alg->chunksize; -} - static inline unsigned int crypto_skcipher_alg_walksize( struct skcipher_alg *alg) { @@ -231,23 +218,6 @@ static inline unsigned int crypto_skcipher_alg_walksize( return alg->walksize; } -/** - * crypto_skcipher_chunksize() - obtain chunk size - * @tfm: cipher handle - * - * The block size is set to one for ciphers such as CTR. However, - * you still need to provide incremental updates in multiples of - * the underlying block size as the IV does not have sub-block - * granularity. This is known in this API as the chunk size. - * - * Return: chunk size in bytes - */ -static inline unsigned int crypto_skcipher_chunksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); -} - /** * crypto_skcipher_walksize() - obtain walk size * @tfm: cipher handle diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 37c164234d97..aada87916918 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -304,6 +304,36 @@ static inline unsigned int crypto_skcipher_blocksize( return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + static inline unsigned int crypto_sync_skcipher_blocksize( struct crypto_sync_skcipher *tfm) { diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 2ba6253ea6d3..fc349204a71b 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -334,7 +334,7 @@ struct drm_dp_resource_status_notify { struct drm_dp_query_payload_ack_reply { u8 port_number; - u8 allocated_pbn; + u16 allocated_pbn; }; struct drm_dp_sideband_msg_req_body { diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h index d7b201652f4c..0c7c750fc2c4 100644 --- a/include/dt-bindings/clock/imx8mn-clock.h +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -122,8 +122,8 @@ #define IMX8MN_CLK_I2C1 105 #define IMX8MN_CLK_I2C2 106 #define IMX8MN_CLK_I2C3 107 -#define IMX8MN_CLK_I2C4 118 -#define IMX8MN_CLK_UART1 119 +#define IMX8MN_CLK_I2C4 108 +#define IMX8MN_CLK_UART1 109 #define IMX8MN_CLK_UART2 110 #define IMX8MN_CLK_UART3 111 #define IMX8MN_CLK_UART4 112 diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h index c614438bcbdb..fbc524a900da 100644 --- a/include/dt-bindings/reset/amlogic,meson8b-reset.h +++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h @@ -46,9 +46,9 @@ #define RESET_VD_RMEM 64 #define RESET_AUDIN 65 #define RESET_DBLK 66 -#define RESET_PIC_DC 66 -#define RESET_PSC 66 -#define RESET_NAND 66 +#define RESET_PIC_DC 67 +#define RESET_PSC 68 +#define RESET_NAND 69 #define RESET_GE2D 70 #define RESET_PARSER_REG 71 #define RESET_PARSER_FETCH 72 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 8b4e516bac00..ce29a014e591 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -473,6 +473,11 @@ void __init acpi_nvs_nosave_s3(void); void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ +int acpi_register_wakeup_handler( + int wake_irq, bool (*wakeup)(void *context), void *context); +void acpi_unregister_wakeup_handler( + bool (*wakeup)(void *context), void *context); + struct acpi_osc_context { char *uuid_str; /* UUID string */ int rev; diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index 6782f0d45ebe..49e5383d4222 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h @@ -19,6 +19,8 @@ struct ahci_host_priv; struct platform_device; struct scsi_host_template; +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv); +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 97967ce06de3..f88197c1ffc2 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -504,4 +505,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) (1 << WB_async_congested)); } +extern const char *bdi_unknown_name; + +static inline const char *bdi_dev_name(struct backing_dev_info *bdi) +{ + if (!bdi || !bdi->dev) + return bdi_unknown_name; + return dev_name(bdi->dev); +} + #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 3cdb84cdc488..853d92ceee64 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, gfp_t); extern int bio_uncopy_user(struct bio *); void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); +void bio_truncate(struct bio *bio, unsigned new_size); static inline void zero_fill_bio(struct bio *bio) { diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index bed9e43f9426..bca7b04eb1ab 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -42,6 +42,18 @@ enum blkg_rwstat_type { BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, }; +struct blkcg_dkstats { +#ifdef CONFIG_SMP + struct disk_stats __percpu *dkstats; + struct list_head alloc_node; +#else + struct disk_stats dkstats; +#endif + struct hd_struct *part; + struct list_head list_node; + struct rcu_head rcu_head; +}; + struct blkcg_gq; struct blkcg { @@ -59,6 +71,10 @@ struct blkcg { struct list_head cgwb_list; refcount_t cgwb_refcnt; #endif + /* disk_stats for per blkcg */ + unsigned int dkstats_on; + struct list_head dkstats_list; + struct blkcg_dkstats *dkstats_hint; }; /* @@ -184,6 +200,60 @@ extern struct blkcg blkcg_root; extern struct cgroup_subsys_state * const blkcg_root_css; extern bool blkcg_debug_stats; +struct disk_stats *blkcg_dkstats_find(struct blkcg *blkcg, int cpu, + struct hd_struct *hd, int *alloc); +struct disk_stats *blkcg_dkstats_find_create(struct blkcg *blkcg, + int cpu, struct hd_struct *hd); + +#define __blkcg_part_stat_add(blkcg, cpu, part, field, addnd) \ +({ \ + struct disk_stats *ds; \ + ds = blkcg_dkstats_find_create((blkcg), (cpu), (part)); \ + if (ds) \ + ds->field += (addnd); \ +}) + +#define blkcg_part_stat_add(blkcg, cpu, part, field, addnd) do { \ + __blkcg_part_stat_add((blkcg), (cpu), (part), field, (addnd)); \ + if ((part)->partno) \ + __blkcg_part_stat_add((blkcg), (cpu), \ + &part_to_disk((part))->part0, \ + field, (addnd)); \ +} while (0) + +#define blkcg_part_stat_dec(blkcg, cpu, gendiskp, field) \ + blkcg_part_stat_add(blkcg, cpu, gendiskp, field, -1) +#define blkcg_part_stat_inc(blkcg, cpu, gendiskp, field) \ + blkcg_part_stat_add(blkcg, cpu, gendiskp, field, 1) +#define blkcg_part_stat_sub(blkcg, cpu, gendiskp, field, subnd) \ + blkcg_part_stat_add(blkcg, cpu, gendiskp, field, -subnd) + +#ifdef CONFIG_SMP +#define blkcg_part_stat_read(blkcg, part, field) \ +({ \ + typeof((part)->dkstats->field) res = 0; \ + unsigned int cpu; \ + for_each_possible_cpu(cpu) { \ + struct disk_stats *ds; \ + ds = blkcg_dkstats_find(blkcg, cpu, part, NULL); \ + if (!ds) \ + break; \ + res += ds->field; \ + } \ + res; \ +}) +#else +#define blkcg_part_stat_read(blkcg, part, field) \ +({ \ + typeof((part)->dkstats->field) res = 0; \ + struct disk_stats *ds; \ + ds = blkcg_dkstats_find(blkcg, cpu, part, NULL); \ + if (ds) \ + res = ds->field; \ + res; \ +}) +#endif + struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, @@ -872,6 +942,12 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, #define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) +#define blkcg_part_stat_add(blkcg, cpu, part, field, addnd) do {} while(0) +#define blkcg_part_stat_dec(blkcg, cpu, gendiskp, field) do {} while(0) +#define blkcg_part_stat_inc(blkcg, cpu, gendiskp, field) do {} while(0) +#define blkcg_part_stat_sub(blkcg, cpu, gendiskp, field, subnd) do {} while(0) +#define blkcg_part_stat_read(blkcg, part, field) do {} while(0) + #endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLK_CGROUP */ #endif /* _BLK_CGROUP_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f3ea78b0c91c..b1c591a53c47 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -328,6 +328,7 @@ struct queue_limits { unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; + unsigned int logical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; @@ -337,8 +338,8 @@ struct queue_limits { unsigned int max_write_zeroes_sectors; unsigned int discard_granularity; unsigned int discard_alignment; + unsigned short nbd_ignore_blksize_set; - unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; @@ -531,7 +532,7 @@ struct request_queue { unsigned int sg_reserved_size; int node; #ifdef CONFIG_BLK_DEV_IO_TRACE - struct blk_trace *blk_trace; + struct blk_trace __rcu *blk_trace; struct mutex blk_trace_mutex; #endif /* @@ -683,7 +684,7 @@ static inline bool blk_account_rq(struct request *rq) static inline bool queue_is_mq(struct request_queue *q) { - return q->mq_ops; + return q && q->mq_ops; } static inline enum blk_zoned_model @@ -1080,7 +1081,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); -extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); @@ -1294,7 +1295,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned short queue_logical_block_size(const struct request_queue *q) +static inline unsigned queue_logical_block_size(const struct request_queue *q) { int retval = 512; @@ -1304,7 +1305,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue return retval; } -static inline unsigned short bdev_logical_block_size(struct block_device *bdev) +static inline unsigned int bdev_logical_block_size(struct block_device *bdev) { return queue_logical_block_size(bdev_get_queue(bdev)); } diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7bb2d8de9f30..3b6ff5902edc 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f **/ #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ do { \ - struct blk_trace *bt = (q)->blk_trace; \ + struct blk_trace *bt; \ + \ + rcu_read_lock(); \ + bt = rcu_dereference((q)->blk_trace); \ if (unlikely(bt)) \ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ + rcu_read_unlock(); \ } while (0) #define blk_add_trace_msg(q, fmt, ...) \ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) @@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f static inline bool blk_trace_note_message_enabled(struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; - if (likely(!bt)) - return false; - return bt->act_mask & BLK_TC_NOTIFY; + struct blk_trace *bt; + bool ret; + + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + ret = bt && (bt->act_mask & BLK_TC_NOTIFY); + rcu_read_unlock(); + return ret; } extern void blk_add_driver_data(struct request_queue *q, struct request *rq, diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 9b3c720a31b1..5e3d45525bd3 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -18,6 +18,7 @@ #include #include #include +#include #include /* @@ -91,6 +92,36 @@ struct can_priv { #define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) +/* Check for outgoing skbs that have not been created by the CAN subsystem */ +static inline bool can_skb_headroom_valid(struct net_device *dev, + struct sk_buff *skb) +{ + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) + return false; + + /* af_packet does not apply CAN skb specific settings */ + if (skb->ip_summed == CHECKSUM_NONE) { + /* init headroom */ + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* preform proper loopback on capable devices */ + if (dev->flags & IFF_ECHO) + skb->pkt_type = PACKET_LOOPBACK; + else + skb->pkt_type = PACKET_HOST; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + } + + return true; +} + /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ static inline bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) @@ -108,6 +139,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev, } else goto inval_skb; + if (!can_skb_headroom_valid(dev, skb)) + goto inval_skb; + return false; inval_skb: diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index b9dbda1c26aa..5227a8ee17dd 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -25,6 +25,9 @@ #include #include +extern int sysctl_ceph_ignore_epoch_barrier; +extern unsigned int sysctl_ceph_epoch_barrier_ceiling; + /* * mount options */ @@ -36,8 +39,9 @@ #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ #define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ #define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */ +#define CEPH_OPT_REQ_RESEND (1<<8) /* resend request if timeout */ -#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) +#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY | CEPH_OPT_REQ_RESEND) #define ceph_set_opt(client, opt) \ (client)->options->flags |= CEPH_OPT_##opt; @@ -142,6 +146,7 @@ struct ceph_client { struct dentry *debugfs_monmap; struct dentry *debugfs_osdmap; struct dentry *debugfs_options; + struct dentry *debugfs_req_resend; #endif }; diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index c4458dc6a757..8c816fe8e8d0 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -175,9 +175,10 @@ struct ceph_msg_data { #endif /* CONFIG_BLOCK */ struct ceph_bvec_iter bvec_pos; struct { - struct page **pages; /* NOT OWNER. */ + struct page **pages; size_t length; /* total # bytes */ unsigned int alignment; /* first page */ + bool own_pages; }; struct ceph_pagelist *pagelist; }; @@ -355,9 +356,10 @@ extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); extern void ceph_con_keepalive(struct ceph_connection *con); extern bool ceph_con_keepalive_expired(struct ceph_connection *con, unsigned long interval); +extern void ceph_con_fault_force(struct ceph_connection *con); -extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, - size_t length, size_t alignment); +void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, + size_t length, size_t alignment, bool own_pages); extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index e081b56f1c1d..5e601975745f 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs); #define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id together */ #define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */ +#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota, + will set FULL too */ +#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */ struct ceph_pg_pool_info { struct rb_node node; @@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); +u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); #endif diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 3eb0e55665b4..c004bced9b91 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s); /* * osd map flag bits */ -#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */ -#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */ +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC), + not set since ~luminous */ +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC), + not set since ~luminous */ #define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ #define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ #define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 3ba3e6da13a6..57577075d204 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -62,6 +62,7 @@ struct css_task_iter { struct list_head *mg_tasks_head; struct list_head *dying_tasks_head; + struct list_head *cur_tasks_head; struct css_set *cur_cset; struct css_set *cur_dcset; struct task_struct *cur_task; diff --git a/include/linux/compat.h b/include/linux/compat.h index 16dafd9f4b86..c4c389c7e1b4 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -410,8 +410,6 @@ struct compat_kexec_segment; struct compat_mq_attr; struct compat_msgbuf; -extern void compat_exit_robust_list(struct task_struct *curr); - #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) #define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 92d5fdc8154e..018dce868de6 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -148,6 +148,20 @@ struct cpufreq_policy { struct notifier_block nb_max; }; +/* + * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() + * callback for sanitization. That callback is only expected to modify the min + * and max values, if necessary, and specifically it must not update the + * frequency table. + */ +struct cpufreq_policy_data { + struct cpufreq_cpuinfo cpuinfo; + struct cpufreq_frequency_table *freq_table; + unsigned int cpu; + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ +}; + struct cpufreq_freqs { struct cpufreq_policy *policy; unsigned int old; @@ -201,8 +215,6 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); void cpufreq_cpu_release(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); -int cpufreq_set_policy(struct cpufreq_policy *policy, - struct cpufreq_policy *new_policy); void refresh_frequency_limits(struct cpufreq_policy *policy); void cpufreq_update_policy(unsigned int cpu); void cpufreq_update_limits(unsigned int cpu); @@ -284,7 +296,7 @@ struct cpufreq_driver { /* needed by all drivers */ int (*init)(struct cpufreq_policy *policy); - int (*verify)(struct cpufreq_policy *policy); + int (*verify)(struct cpufreq_policy_data *policy); /* define one out of two */ int (*setpolicy)(struct cpufreq_policy *policy); @@ -415,8 +427,9 @@ static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv) (drv->flags & CPUFREQ_IS_COOLING_DEV); } -static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, - unsigned int min, unsigned int max) +static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy, + unsigned int min, + unsigned int max) { if (policy->min < min) policy->min = min; @@ -432,10 +445,10 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, } static inline void -cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) +cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy) { cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + policy->cpuinfo.max_freq); } #ifdef CONFIG_CPU_FREQ @@ -513,6 +526,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, * CPUFREQ GOVERNORS * *********************************************************************/ +#define CPUFREQ_POLICY_UNKNOWN (0) /* * If (cpufreq_driver->target) exists, the ->governor decides what frequency * within the limits is used. If (cpufreq_driver->setpolicy> exists, these @@ -595,17 +609,6 @@ struct governor_attr { size_t count); }; -static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) -{ - /* - * Allow remote callbacks if: - * - dvfs_possible_from_any_cpu flag is set - * - the local and remote CPUs share cpufreq policy - */ - return policy->dvfs_possible_from_any_cpu || - cpumask_test_cpu(smp_processor_id(), policy->cpus); -} - /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ @@ -695,9 +698,9 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); -int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, +int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, struct cpufreq_frequency_table *table); -int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); +int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, unsigned int target_freq, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 068793a619ca..2d55cee638fc 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -59,6 +59,7 @@ enum cpuhp_state { CPUHP_IOMMU_INTEL_DEAD, CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, + CPUHP_PADATA_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index adf993a3bd58..6a18a97b76a8 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -3,8 +3,11 @@ #define _LINUX_DMA_DIRECT_H 1 #include +#include /* for min_low_pfn */ #include +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include #else @@ -24,11 +27,16 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { + dma_addr_t end = addr + size - 1; + if (!dev->dma_mask) return false; - return addr + size - 1 <= - min_not_zero(*dev->dma_mask, dev->bus_dma_mask); + if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) + return false; + + return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4a1c4fca475a..4d450672b7d6 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -162,7 +162,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle); int dma_release_from_global_coherent(int order, void *vaddr); int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); @@ -172,7 +172,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) -static inline void *dma_alloc_from_global_coherent(ssize_t size, +static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle) { return NULL; @@ -583,6 +583,10 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev) static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs) { + /* DMA must never operate on areas that might be remapped. */ + if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), + "rejecting DMA map of vmalloc memory\n")) + return DMA_MAPPING_ERROR; debug_dma_map_single(dev, ptr, size); return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8fcdee1c0cf9..8013562751a5 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -674,6 +674,7 @@ struct dma_filter { * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api + * @owner: owner module (automatically set based on the provided dev) * @src_addr_widths: bit mask of src addr widths the device supports * Width is specified in bytes, e.g. for a device supporting * a width of 4 the mask should have BIT(4) set. @@ -737,6 +738,7 @@ struct dma_device { int dev_id; struct device *dev; + struct module *owner; u32 src_addr_widths; u32 dst_addr_widths; @@ -1364,8 +1366,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) { struct dma_slave_caps caps; + int ret; - dma_get_slave_caps(tx->chan, &caps); + ret = dma_get_slave_caps(tx->chan, &caps); + if (ret) + return ret; if (caps.descriptor_reuse) { tx->flags |= DMA_CTRL_REUSE; diff --git a/include/linux/dmar.h b/include/linux/dmar.h index a7cf3599d9a1..f397e52c2d9d 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -69,19 +69,23 @@ struct dmar_pci_notify_info { extern struct rw_semaphore dmar_global_lock; extern struct list_head dmar_drhd_units; -#define for_each_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) +#define for_each_drhd_unit(drhd) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) #define for_each_active_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) \ if (drhd->ignored) {} else #define for_each_active_iommu(i, drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) \ if (i=drhd->iommu, drhd->ignored) {} else #define for_each_iommu(i, drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) \ if (i=drhd->iommu, 0) {} else static inline bool dmar_rcu_check(void) diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 0aa803c451a3..c620d9139c28 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb); - #else int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, @@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid) return 0; } -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) -{ - return NULL; -} - #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index ffcc7724ca21..dc4fd8a6644d 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); +DECLARE_PER_CPU(int, eventfd_wake_count); + +static inline bool eventfd_signal_count(void) +{ + return this_cpu_read(eventfd_wake_count); +} + #else /* CONFIG_EVENTFD */ /* @@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, return -ENOSYS; } +static inline bool eventfd_signal_count(void) +{ + return false; +} + #endif #endif /* _LINUX_EVENTFD_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index e0d909d35763..391f31472b44 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -83,6 +83,7 @@ extern int sysctl_protected_symlinks; extern int sysctl_protected_hardlinks; extern int sysctl_protected_fifos; extern int sysctl_protected_regular; +extern unsigned int sysctl_nvidia_smi_trap; typedef __kernel_rwf_t rwf_t; @@ -698,6 +699,7 @@ struct inode { struct rcu_head i_rcu; }; atomic64_t i_version; + atomic64_t i_sequence; /* see futex */ atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; @@ -1727,6 +1729,13 @@ int vfs_mkobj(struct dentry *, umode_t, extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +#ifdef CONFIG_COMPAT +extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +#else +#define compat_ptr_ioctl NULL +#endif + /* * VFS file helper functions. */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8a8cb3c401b2..9141f2263286 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -499,7 +499,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } /** * ftrace_make_nop - convert code into nop * @mod: module structure if called by module load initialization - * @rec: the mcount call site record + * @rec: the call site record (e.g. mcount/fentry) * @addr: the address that the call site should be calling * * This is a very sensitive operation and great care needs @@ -520,9 +520,38 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } extern int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr); + +/** + * ftrace_init_nop - initialize a nop call site + * @mod: module structure if called by module load initialization + * @rec: the call site record (e.g. mcount/fentry) + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should contain the contents created by + * the compiler + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +#ifndef ftrace_init_nop +static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + return ftrace_make_nop(mod, rec, MCOUNT_ADDR); +} +#endif + /** * ftrace_make_call - convert a nop call site into a call to addr - * @rec: the mcount call site record + * @rec: the call site record (e.g. mcount/fentry) * @addr: the address that the call site should call * * This is a very sensitive operation and great care needs @@ -545,7 +574,7 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /** * ftrace_modify_call - convert from one addr to another (no nop) - * @rec: the mcount call site record + * @rec: the call site record (e.g. mcount/fentry) * @old_addr: the address expected to be currently called to * @addr: the address to change to * @@ -709,6 +738,11 @@ static inline unsigned long get_lock_parent_ip(void) #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); +#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY +#define FTRACE_CALLSITE_SECTION "__patchable_function_entries" +#else +#define FTRACE_CALLSITE_SECTION "__mcount_loc" +#endif #else static inline void ftrace_init(void) { } #endif diff --git a/include/linux/futex.h b/include/linux/futex.h index ccaef0097785..b70df27d7e85 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -2,7 +2,9 @@ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H +#include #include + #include struct inode; @@ -29,34 +31,57 @@ struct task_struct; union futex_key { struct { + u64 i_seq; unsigned long pgoff; - struct inode *inode; - int offset; + unsigned int offset; } shared; struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; unsigned long address; - struct mm_struct *mm; - int offset; + unsigned int offset; } private; struct { + u64 ptr; unsigned long word; - void *ptr; - int offset; + unsigned int offset; } both; }; -#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } +#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } #ifdef CONFIG_FUTEX -extern void exit_robust_list(struct task_struct *curr); +enum { + FUTEX_STATE_OK, + FUTEX_STATE_EXITING, + FUTEX_STATE_DEAD, +}; + +static inline void futex_init_task(struct task_struct *tsk) +{ + tsk->robust_list = NULL; +#ifdef CONFIG_COMPAT + tsk->compat_robust_list = NULL; +#endif + INIT_LIST_HEAD(&tsk->pi_state_list); + tsk->pi_state_cache = NULL; + tsk->futex_state = FUTEX_STATE_OK; + mutex_init(&tsk->futex_exit_mutex); +} + +void futex_exit_recursive(struct task_struct *tsk); +void futex_exit_release(struct task_struct *tsk); +void futex_exec_release(struct task_struct *tsk); long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3); #else -static inline void exit_robust_list(struct task_struct *curr) -{ -} - +static inline void futex_init_task(struct task_struct *tsk) { } +static inline void futex_exit_recursive(struct task_struct *tsk) { } +static inline void futex_exit_release(struct task_struct *tsk) { } +static inline void futex_exec_release(struct task_struct *tsk) { } static inline long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) @@ -65,12 +90,4 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val, } #endif -#ifdef CONFIG_FUTEX_PI -extern void exit_pi_state_list(struct task_struct *curr); -#else -static inline void exit_pi_state_list(struct task_struct *curr) -{ -} -#endif - #endif diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 8b5330dd5ac0..2f9f336b8a11 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -29,6 +29,7 @@ extern struct device_type part_type; extern struct kobject *block_depr; extern struct class block_class; +extern const struct device_type disk_type; enum { /* These three have identical behaviour; use the second one if DOS FDISK gets diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index b70af921c614..803bb63dd5ff 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); +void gpiod_toggle_active_low(struct gpio_desc *desc); int gpiod_is_active_low(const struct gpio_desc *desc); int gpiod_cansleep(const struct gpio_desc *desc); @@ -479,6 +480,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) return -ENOSYS; } +static inline void gpiod_toggle_active_low(struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); +} + static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ diff --git a/include/linux/hid.h b/include/linux/hid.h index cd41f209043f..875f71132b14 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -492,7 +492,7 @@ struct hid_report_enum { }; #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ -#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */ +#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */ #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ #define HID_OUTPUT_FIFO_SIZE 64 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 1b9a51a1bccb..1f98b52118f0 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -456,12 +456,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); extern bool hrtimer_active(const struct hrtimer *timer); -/* - * Helper function to check, whether the timer is on one of the queues +/** + * hrtimer_is_queued = check, whether the timer is on one of the queues + * @timer: Timer to check + * + * Returns: True if the timer is queued, false otherwise + * + * The function can be used lockless, but it gives only a current snapshot. */ -static inline int hrtimer_is_queued(struct hrtimer *timer) +static inline bool hrtimer_is_queued(struct hrtimer *timer) { - return timer->state & HRTIMER_STATE_ENQUEUED; + /* The READ_ONCE pairs with the update functions of timer->state */ + return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED); } /* diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 7d3f2ced92d1..73c66a3a33ae 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2102,14 +2102,14 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) { struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; u8 spr_len = sizeof(struct ieee80211_he_spr); - u32 he_spr_params; + u8 he_spr_params; /* Make sure the input is not NULL */ if (!he_spr_ie) return 0; /* Calc required length */ - he_spr_params = le32_to_cpu(he_spr->he_sr_control); + he_spr_params = he_spr->he_sr_control; if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) spr_len++; if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 76cf11e905e1..8a9792a6427a 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -24,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_mac_header(skb); } +/* Prefer this version in TX path, instead of + * skb_reset_mac_header() + eth_hdr() + */ +static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb->data; +} + static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb_inner_mac_header(skb); diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 39faaaf843e1..c91cf2dee12a 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -2,15 +2,10 @@ #ifndef _INET_DIAG_H_ #define _INET_DIAG_H_ 1 +#include #include -struct net; -struct sock; struct inet_hashinfo; -struct nlattr; -struct nlmsghdr; -struct sk_buff; -struct netlink_callback; struct inet_diag_handler { void (*dump)(struct sk_buff *skb, @@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); +static inline size_t inet_diag_msg_attrs_size(void) +{ + return nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ +#if IS_ENABLED(CONFIG_IPV6) + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ +#endif + + nla_total_size(4) /* INET_DIAG_MARK */ + + nla_total_size(4); /* INET_DIAG_CLASS_ID */ +} int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, struct user_namespace *user_ns, bool net_admin); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6d8bf4bdf240..1e5dad8b8e59 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -120,6 +120,8 @@ #define dmar_readq(a) readq(a) #define dmar_writeq(a,v) writeq(v,a) +#define dmar_readl(a) readl(a) +#define dmar_writel(a, v) writel(v, a) #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 94f047a8a845..d7c403d0dd27 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid) BUG(); } -static int intel_svm_is_pasid_valid(struct device *dev, int pasid) +static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid) { return -EINVAL; } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 29bac5345563..87ea30c5b39d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -308,6 +308,12 @@ struct iommu_ops { struct iommu_page_response *msg); unsigned long pgsize_bitmap; + +#ifdef CONFIG_SMMU_BYPASS_DEV +#ifndef __GENKSYMS__ + int (*device_domain_type)(struct device *dev, unsigned int *type); +#endif +#endif }; /** diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 4dc66157d872..deec18b8944a 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -224,10 +224,14 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd, * is called, and the lower layer must get the interface from that * call. */ -int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, - void *send_info, - struct device *dev, - unsigned char slave_addr); +int ipmi_add_smi(struct module *owner, + const struct ipmi_smi_handlers *handlers, + void *send_info, + struct device *dev, + unsigned char slave_addr); + +#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \ + ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr) /* * Remove a low-level interface from the IPMI driver. This will diff --git a/include/linux/irq.h b/include/linux/irq.h index fb301cf29148..f8755e5fcd74 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -209,6 +209,8 @@ struct irq_data { * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set * IRQD_CAN_RESERVE - Can use reservation mode + * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change + * required */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -231,6 +233,7 @@ enum { IRQD_SINGLE_TARGET = (1 << 24), IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), + IRQD_MSI_NOMASK_QUIRK = (1 << 27), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -390,6 +393,21 @@ static inline bool irqd_can_reserve(struct irq_data *d) return __irqd_to_state(d) & IRQD_CAN_RESERVE; } +static inline void irqd_set_msi_nomask_quirk(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK; +} + +static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK; +} + +static inline bool irqd_msi_nomask_quirk(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 583e7abd07f9..e85f714a623e 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -191,7 +191,7 @@ enum { IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), /* Irq domain name was allocated in __irq_domain_add() */ - IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6), + IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1), /* Irq domain is an IPI domain with virq per cpu */ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), @@ -205,6 +205,13 @@ enum { /* Irq domain implements MSI remapping */ IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5), + /* + * Quirk to handle MSI implementations which do not provide + * masking. Currently known to affect x86, but partially + * handled in core code. + */ + IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 603fbc4e2f70..10e6049c0ba9 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1582,7 +1582,7 @@ static inline int jbd2_space_needed(journal_t *journal) static inline unsigned long jbd2_log_space_left(journal_t *journal) { /* Allow for rounding errors */ - unsigned long free = journal->j_free - 32; + long free = journal->j_free - 32; if (journal->j_committing_transaction) { unsigned long committing = atomic_read(&journal-> @@ -1591,7 +1591,7 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal) /* Transaction + control blocks */ free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT); } - return free; + return max_t(long, free, 0); } /* diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 7ee2bb43b251..bfad33f11abd 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -50,6 +50,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) extern unsigned long long nr_context_switches(void); +extern unsigned long long nr_context_switches_cpu(int cpu); extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); extern void kstat_incr_irq_this_cpu(unsigned int irq); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 936b61bd504e..f797ccc650e7 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -187,6 +187,7 @@ struct kernfs_root { /* private fields, do not use outside kernfs proper */ struct idr ino_idr; + u32 last_ino; u32 next_generation; struct kernfs_syscall_ops *syscall_ops; diff --git a/include/linux/kpatch.h b/include/linux/kpatch.h new file mode 100644 index 000000000000..cf47a157f384 --- /dev/null +++ b/include/linux/kpatch.h @@ -0,0 +1,11 @@ +struct kpatch_patch_dynrela { + unsigned long dest; + unsigned long src; + unsigned long type; + unsigned long sympos; + char *name; + char *objname; + int external; + int addend; +}; + diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d41c521a39da..b81f0f1ded5f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -204,7 +204,7 @@ struct kvm_async_pf { struct list_head queue; struct kvm_vcpu *vcpu; struct mm_struct *mm; - gva_t gva; + gpa_t cr2_or_gpa; unsigned long addr; struct kvm_arch_async_pf arch; bool wakeup_all; @@ -212,8 +212,8 @@ struct kvm_async_pf { void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, - struct kvm_arch_async_pf *arch); +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif @@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); void kvm_get_pfn(kvm_pfn_t pfn); +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, @@ -750,7 +751,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); -unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); +unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); @@ -758,8 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool atomic); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index bde5374ae021..2382cb58969d 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -18,7 +18,7 @@ struct kvm_memslots; enum kvm_mr_change; -#include +#include /* * Address types: @@ -49,4 +49,11 @@ struct gfn_to_hva_cache { struct kvm_memory_slot *memslot; }; +struct gfn_to_pfn_cache { + u64 generation; + gfn_t gfn; + kvm_pfn_t pfn; + bool dirty; +}; + #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 207e7ee764ce..c44e4cfbcb16 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1174,6 +1174,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); +extern u64 ata_qc_get_active(struct ata_port *ap); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, @@ -1219,6 +1220,7 @@ struct pci_bits { }; extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); +extern void ata_pci_shutdown_one(struct pci_dev *pdev); extern void ata_pci_remove_one(struct pci_dev *pdev); #ifdef CONFIG_PM diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index edb0f0c30904..1adf54aad2df 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h @@ -7,6 +7,9 @@ #include +#define INT32_MAX S32_MAX +#define UINT32_MAX U32_MAX + typedef __be16 fdt16_t; typedef __be32 fdt32_t; typedef __be64 fdt64_t; diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index 3ef96743db8d..1ecd35664e0d 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -72,10 +72,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, struct hlist_nulls_node *first = h->first; n->next = first; - n->pprev = &h->first; + WRITE_ONCE(n->pprev, &h->first); h->first = n; if (!is_a_nulls(first)) - first->pprev = &n->next; + WRITE_ONCE(first->pprev, &n->next); } static inline void __hlist_nulls_del(struct hlist_nulls_node *n) @@ -85,13 +85,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n) WRITE_ONCE(*pprev, next); if (!is_a_nulls(next)) - next->pprev = pprev; + WRITE_ONCE(next->pprev, pprev); } static inline void hlist_nulls_del(struct hlist_nulls_node *n) { __hlist_nulls_del(n); - n->pprev = LIST_POISON2; + WRITE_ONCE(n->pprev, LIST_POISON2); } /** diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ae703ea3ef48..2de725382784 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -235,6 +235,7 @@ struct mem_cgroup { * Should the accounting and control be hierarchical, per subtree? */ bool use_hierarchy; + bool meminfo_recursive; /* * Should the OOM killer kill all belonging tasks, had it kill one? @@ -705,6 +706,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); +void mod_memcg_obj_state(void *p, int idx, int val); static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) @@ -1128,6 +1130,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, __mod_node_page_state(page_pgdat(page), idx, val); } +static inline void mod_memcg_obj_state(void *p, int idx, int val) +{ +} + static inline unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1432,6 +1438,8 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } +struct mem_cgroup *mem_cgroup_from_obj(void *p); + #else static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) @@ -1473,6 +1481,11 @@ static inline void memcg_put_cache_ids(void) { } +static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) +{ + return NULL; +} + #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index f46ea71b4ffd..451efd4499cc 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -125,8 +125,8 @@ static inline bool movable_node_is_enabled(void) extern void arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap); -extern void __remove_pages(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages, struct vmem_altmap *altmap); +extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap); /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, @@ -345,6 +345,9 @@ extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); +extern void remove_pfn_range_from_zone(struct zone *zone, + unsigned long start_pfn, + unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); extern int sparse_add_section(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap); diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 7cfd2b0504df..a59bf323f713 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -610,7 +610,7 @@ enum { RK808_ID = 0x0000, RK809_ID = 0x8090, RK817_ID = 0x8170, - RK818_ID = 0x8181, + RK818_ID = 0x8180, }; struct rk808 { diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h index 1013e60c5b25..b0109ee6dae2 100644 --- a/include/linux/mfd/rohm-bd70528.h +++ b/include/linux/mfd/rohm-bd70528.h @@ -317,7 +317,7 @@ enum { #define BD70528_MASK_RTC_MINUTE 0x7f #define BD70528_MASK_RTC_HOUR_24H 0x80 #define BD70528_MASK_RTC_HOUR_PM 0x20 -#define BD70528_MASK_RTC_HOUR 0x1f +#define BD70528_MASK_RTC_HOUR 0x3f #define BD70528_MASK_RTC_DAY 0x3f #define BD70528_MASK_RTC_WEEK 0x07 #define BD70528_MASK_RTC_MONTH 0x1f diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 3247a3dc7934..b06b75776a32 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -57,6 +57,7 @@ #define UHID_MINOR 239 #define USERIO_MINOR 240 #define VHOST_VSOCK_MINOR 241 +#define RFKILL_MINOR 242 #define MISC_DYNAMIC_MINOR 255 struct device; diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index a858bcb6220b..a1babf811eee 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -66,6 +66,7 @@ struct mlx4_interface { int flags; }; +int mlx4_load_mlx4_en(void); int mlx4_register_interface(struct mlx4_interface *intf); void mlx4_unregister_interface(struct mlx4_interface *intf); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 0836fe232f97..acd859ea09d4 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -857,7 +857,11 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp_csum[0x1]; u8 swp_lso[0x1]; u8 cqe_checksum_full[0x1]; - u8 reserved_at_24[0x5]; + u8 tunnel_stateless_geneve_tx[0x1]; + u8 tunnel_stateless_mpls_over_udp[0x1]; + u8 tunnel_stateless_mpls_over_gre[0x1]; + u8 tunnel_stateless_vxlan_gpe[0x1]; + u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; u8 reserved_at_2a[0x6]; u8 max_vxlan_udp_ports[0x8]; @@ -1417,14 +1421,15 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_440[0x20]; - u8 tls[0x1]; - u8 reserved_at_461[0x2]; + u8 reserved_at_460[0x3]; u8 log_max_uctx[0x5]; u8 reserved_at_468[0x3]; u8 log_max_umem[0x5]; u8 max_num_eqs[0x10]; - u8 reserved_at_480[0x3]; + u8 reserved_at_480[0x1]; + u8 tls_tx[0x1]; + u8 reserved_at_482[0x1]; u8 log_max_l2_table[0x5]; u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; diff --git a/include/linux/mm.h b/include/linux/mm.h index a2adf95b3f9c..0affadb49092 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -197,6 +197,7 @@ static inline void __mm_zero_struct_page(struct page *page) #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) extern int sysctl_max_map_count; +extern int max_map_count_isolated; extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; @@ -2666,13 +2667,25 @@ static inline bool want_init_on_free(void) !page_poisoning_enabled(); } -#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT -DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); +#ifdef CONFIG_DEBUG_PAGEALLOC +extern void init_debug_pagealloc(void); #else -DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +static inline void init_debug_pagealloc(void) {} #endif +extern bool _debug_pagealloc_enabled_early; +DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); static inline bool debug_pagealloc_enabled(void) +{ + return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && + _debug_pagealloc_enabled_early; +} + +/* + * For use in fast paths after init_debug_pagealloc() has run, or when a + * false negative result is not harmful when called too early. + */ +static inline bool debug_pagealloc_enabled_static(void) { if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) return false; @@ -2683,6 +2696,10 @@ static inline bool debug_pagealloc_enabled(void) #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) extern void __kernel_map_pages(struct page *page, int numpages, int enable); +/* + * When called in DEBUG_PAGEALLOC context, the call should most likely be + * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static() + */ static inline void kernel_map_pages(struct page *page, int numpages, int enable) { diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 9b6336ad3266..e459b38ef33c 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -291,6 +291,7 @@ struct mmc_card { struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ unsigned int sdio_funcs; /* number of SDIO functions */ + atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */ struct sdio_cccr cccr; /* common card info */ struct sdio_cis cis; /* common tuple info */ struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index ba703384bea0..4c5eb3aa8e72 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -333,6 +333,7 @@ struct mmc_host { MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \ MMC_CAP_UHS_DDR50) #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */ +#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index d1a5d5df02f5..08b25c02b5a1 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -71,6 +71,8 @@ #define SDIO_VENDOR_ID_TI 0x0097 #define SDIO_DEVICE_ID_TI_WL1271 0x4076 +#define SDIO_VENDOR_ID_TI_WL1251 0x104c +#define SDIO_DEVICE_ID_TI_WL1251 0x9066 #define SDIO_VENDOR_ID_STE 0x0020 #define SDIO_DEVICE_ID_STE_CW1200 0x2280 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index bda20282746b..089963c52fd9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -215,9 +215,8 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */ - NR_SLAB_UNRECLAIMABLE, /* and this one without looking at - * memcg_flush_percpu_vmstats() first. */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, @@ -435,6 +434,14 @@ struct zone { */ long lowmem_reserve[MAX_NR_ZONES]; + /* + * This atomic counter is set when there is pagecache limit + * reclaim going on on this particular zone. Other potential + * reclaiers should back off to prevent from heavy lru_lock + * bouncing. + */ + atomic_t pagecache_reclaim; + #ifdef CONFIG_NUMA int node; #endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5714fd35a83c..e3596db077dc 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -587,9 +587,9 @@ struct platform_device_id { #define MDIO_NAME_SIZE 32 #define MDIO_MODULE_PREFIX "mdio:" -#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" +#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u" #define MDIO_ID_ARGS(_id) \ - (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ + ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \ diff --git a/include/linux/module.h b/include/linux/module.h index 6d20895e7739..a2ba67f2ac65 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -344,6 +344,13 @@ struct klp_modinfo { }; #endif +struct kpatch_modinfo { + Elf_Ehdr hdr; + Elf_Shdr *sechdrs; + char *secstrings; + unsigned int symndx; +}; + struct module { enum module_state state; @@ -485,6 +492,11 @@ struct module { struct klp_modinfo *klp_info; #endif +#if IS_ENABLED(CONFIG_KPATCH) + bool is_kpatch_mod; + struct kpatch_modinfo *kpatch_info; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; @@ -681,6 +693,18 @@ static inline bool is_livepatch_module(struct module *mod) } #endif /* CONFIG_LIVEPATCH */ +#if IS_ENABLED(CONFIG_KPATCH) +static inline bool is_kpatch_module(struct module *mod) +{ + return mod->is_kpatch_mod; +} +#else /* !CONFIG_KPATCH */ +static inline bool is_kpatch_module(struct module *mod) +{ + return false; +} +#endif /* CONFIG_KPATCH */ + bool is_module_sig_enforced(void); void set_module_sig_enforced(void); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c20f190b4c18..9c1081208b79 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -67,11 +67,14 @@ struct xdp_buff; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); +extern unsigned long snmp_fold_field(void __percpu *mib, int offt); /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_DROP 1 /* packet dropped */ +#define MAX_NEST_DEV 8 + /* * Transmit return codes: transmit return codes originate from three different * namespaces: @@ -1442,6 +1445,15 @@ struct net_device_ops { struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); }; +#define DEV_MIB_MAX __DEV_MIB_MAX +struct dev_mib { + unsigned long mibs[DEV_MIB_MAX]; +}; + +struct netdev_mib { + DEFINE_SNMP_STAT(struct dev_mib, dev_statistics); +}; + /** * enum net_device_priv_flags - &struct net_device priv_flags * @@ -1761,7 +1773,7 @@ enum netdev_priv_flags { * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock - spinlock + * spinlock * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * @qdisc_xmit_lock_key: lockdep class annotating * netdev_queue->_xmit_lock spinlock @@ -1823,6 +1835,7 @@ struct net_device { int group; struct net_device_stats stats; + struct netdev_mib mib; atomic_long_t rx_dropped; atomic_long_t tx_dropped; @@ -1867,6 +1880,11 @@ struct net_device { unsigned char if_port; unsigned char dma; + /* Note : dev->mtu is often read without holding a lock. + * Writers usually hold RTNL. + * It is recommended to use READ_ONCE() to annotate the reads, + * and to use WRITE_ONCE() to annotate the writes. + */ unsigned int mtu; unsigned int min_mtu; unsigned int max_mtu; @@ -3661,6 +3679,8 @@ int dev_set_alias(struct net_device *, const char *, size_t); int dev_get_alias(const struct net_device *, char *, size_t); int dev_change_net_namespace(struct net_device *, struct net *, const char *); int __dev_set_mtu(struct net_device *, int); +int dev_validate_mtu(struct net_device *dev, int mtu, + struct netlink_ext_ack *extack); int dev_set_mtu_ext(struct net_device *dev, int mtu, struct netlink_ext_ack *extack); int dev_set_mtu(struct net_device *, int); @@ -3716,6 +3736,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; extern unsigned int netdev_budget_usecs; +extern int netdev_pagefrag_enabled; /* Called by rtnetlink.c:rtnl_unlock() */ void netdev_run_todo(void); @@ -4287,11 +4308,8 @@ void *netdev_lower_get_next(struct net_device *dev, ldev; \ ldev = netdev_lower_get_next(dev, &(iter))) -struct net_device *netdev_all_lower_get_next(struct net_device *dev, +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, struct list_head **iter); -struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, - struct list_head **iter); - int netdev_walk_all_lower_dev(struct net_device *dev, int (*fn)(struct net_device *lower_dev, void *data), diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 77ebb61faf48..eb312e7ca36e 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -199,6 +199,8 @@ extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *e, unsigned int i); +void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, + const struct nf_hook_entries *e); /** * nf_hook - call a netfilter hook * @@ -311,17 +313,36 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct list_head *head, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - struct sk_buff *skb, *next; - struct list_head sublist; + struct nf_hook_entries *hook_head = NULL; - INIT_LIST_HEAD(&sublist); - list_for_each_entry_safe(skb, next, head, list) { - list_del(&skb->list); - if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1) - list_add_tail(&skb->list, &sublist); +#ifdef CONFIG_JUMP_LABEL + if (__builtin_constant_p(pf) && + __builtin_constant_p(hook) && + !static_key_false(&nf_hooks_needed[pf][hook])) + return; +#endif + + rcu_read_lock(); + switch (pf) { + case NFPROTO_IPV4: + hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); + break; + case NFPROTO_IPV6: + hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); + break; + default: + WARN_ON_ONCE(1); + break; } - /* Put passed packets back on main list */ - list_splice(&sublist, head); + + if (hook_head) { + struct nf_hook_state state; + + nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn); + + nf_hook_slow_list(head, &state, hook_head); + } + rcu_read_unlock(); } /* Call setsockopt() */ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 9bc255a8461b..32658749e9db 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -121,6 +121,7 @@ struct ip_set_ext { u32 timeout; u8 packets_op; u8 bytes_op; + bool target; }; struct ip_set; @@ -187,6 +188,14 @@ struct ip_set_type_variant { /* Return true if "b" set is the same as "a" * according to the create set parameters */ bool (*same_set)(const struct ip_set *a, const struct ip_set *b); + /* Region-locking is used */ + bool region_lock; +}; + +struct ip_set_region { + spinlock_t lock; /* Region lock */ + size_t ext_size; /* Size of the dynamic extensions */ + u32 elements; /* Number of elements vs timeout */ }; /* The core set type structure */ @@ -445,13 +454,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr) sizeof(*addr)); } -/* Calculate the bytes required to store the inclusive range of a-b */ -static inline int -bitmap_bytes(u32 a, u32 b) -{ - return 4 * ((((b - a + 8) / 8) + 3) / 4); -} - /* How often should the gc be run by default */ #define IPSET_GC_TIME (3 * 60) @@ -688,7 +690,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, } #define IP_SET_INIT_KEXT(skb, opt, set) \ - { .bytes = (skb)->len, .packets = 1, \ + { .bytes = (skb)->len, .packets = 1, .target = true,\ .timeout = ip_set_adt_opt_timeout(opt, set) } #define IP_SET_INIT_UEXT(set) \ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index cf09ab37b45b..851425c3178f 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -31,7 +31,7 @@ struct nfnetlink_subsystem { const struct nfnl_callback *cb; /* callback for individual types */ struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); - int (*abort)(struct net *net, struct sk_buff *skb); + int (*abort)(struct net *net, struct sk_buff *skb, bool autoload); void (*cleanup)(struct net *net); bool (*valid_genid)(struct net *net, u32 genid); }; diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 10f81629b9ce..6d0d70f3219c 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -270,6 +270,8 @@ struct nvme_fc_remote_port { * * Host/Initiator Transport Entrypoints/Parameters: * + * @module: The LLDD module using the interface + * * @localport_delete: The LLDD initiates deletion of a localport via * nvme_fc_deregister_localport(). However, the teardown is * asynchronous. This routine is called upon the completion of the @@ -383,6 +385,8 @@ struct nvme_fc_remote_port { * Value is Mandatory. Allowed to be zero. */ struct nvme_fc_port_template { + struct module *module; + /* initiator-based functions */ void (*localport_delete)(struct nvme_fc_local_port *); void (*remoteport_delete)(struct nvme_fc_remote_port *); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index f61d6906e59d..a260cd754f28 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1368,6 +1368,7 @@ enum { NVME_SC_ANA_INACCESSIBLE = 0x302, NVME_SC_ANA_TRANSITION = 0x303, NVME_SC_HOST_PATH_ERROR = 0x370, + NVME_SC_HOST_ABORTED_CMD = 0x371, NVME_SC_CRD = 0x1800, NVME_SC_DNR = 0x4000, diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 8f8be5b00060..5c17cb733224 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -118,7 +118,7 @@ static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) } static inline int nvmem_cell_write(struct nvmem_cell *cell, - const char *buf, size_t len) + void *buf, size_t len) { return -EOPNOTSUPP; } diff --git a/include/linux/padata.h b/include/linux/padata.h index 23717eeaad23..cccab7a59787 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -9,6 +9,7 @@ #ifndef PADATA_H #define PADATA_H +#include #include #include #include @@ -98,7 +99,7 @@ struct padata_cpumask { * struct parallel_data - Internal control structure, covers everything * that depends on the cpumask in use. * - * @pinst: padata instance. + * @sh: padata_shell object. * @pqueue: percpu padata queues used for parallelization. * @squeue: percpu padata queues used for serialuzation. * @reorder_objects: Number of objects waiting in the reorder queues. @@ -111,7 +112,7 @@ struct padata_cpumask { * @lock: Reorder lock. */ struct parallel_data { - struct padata_instance *pinst; + struct padata_shell *ps; struct padata_parallel_queue __percpu *pqueue; struct padata_serial_queue __percpu *squeue; atomic_t reorder_objects; @@ -124,14 +125,33 @@ struct parallel_data { spinlock_t lock ____cacheline_aligned; }; +/** + * struct padata_shell - Wrapper around struct parallel_data, its + * purpose is to allow the underlying control structure to be replaced + * on the fly using RCU. + * + * @pinst: padat instance. + * @pd: Actual parallel_data structure which may be substituted on the fly. + * @opd: Pointer to old pd to be freed by padata_replace. + * @list: List entry in padata_instance list. + */ +struct padata_shell { + struct padata_instance *pinst; + struct parallel_data __rcu *pd; + struct parallel_data *opd; + struct list_head list; +}; + /** * struct padata_instance - The overall control structure. * * @cpu_notifier: cpu hotplug notifier. * @parallel_wq: The workqueue used for parallel work. * @serial_wq: The workqueue used for serial work. - * @pd: The internal control structure. + * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. + * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask. + * @omask: Temporary storage used to compute the notification mask. * @cpumask_change_notifier: Notifiers chain for user-defined notify * callbacks that will be called when either @pcpu or @cbcpu * or both cpumasks change. @@ -143,8 +163,10 @@ struct padata_instance { struct hlist_node node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; - struct parallel_data *pd; + struct list_head pslist; struct padata_cpumask cpumask; + struct padata_cpumask rcpumask; + cpumask_var_t omask; struct blocking_notifier_head cpumask_change_notifier; struct kobject kobj; struct mutex lock; @@ -156,7 +178,9 @@ struct padata_instance { extern struct padata_instance *padata_alloc_possible(const char *name); extern void padata_free(struct padata_instance *pinst); -extern int padata_do_parallel(struct padata_instance *pinst, +extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); +extern void padata_free_shell(struct padata_shell *ps); +extern int padata_do_parallel(struct padata_shell *ps, struct padata_priv *padata, int *cb_cpu); extern void padata_do_serial(struct padata_priv *padata); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 1bf83c8fcaa7..77de28bfefb0 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -311,7 +311,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } __PAGEFLAG(Locked, locked, PF_NO_TAIL) PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) -PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) +PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) PAGEFLAG(Referenced, referenced, PF_HEAD) TESTCLEARFLAG(Referenced, referenced, PF_HEAD) __SETPAGEFLAG(Referenced, referenced, PF_HEAD) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 37a4d9e32cd3..e820ea17586a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -604,6 +604,8 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return 0; } +int add_to_page_cache(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, @@ -614,22 +616,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run __SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - __SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - __ClearPageLocked(page); - return error; -} - static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> diff --git a/include/linux/pci.h b/include/linux/pci.h index f9088c89a534..f39f22f9ee47 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2310,9 +2310,11 @@ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); void pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); +bool pci_pr3_present(struct pci_dev *pdev); #else static inline struct irq_domain * pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } +static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } #endif #ifdef CONFIG_EEH @@ -2322,7 +2324,7 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) } #endif -void pci_add_dma_alias(struct pci_dev *dev, u8 devfn); +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns); bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index a6fabd865211..176bfbd52d97 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -175,8 +175,7 @@ * Declaration/definition used for per-CPU variables that should be accessed * as decrypted when memory encryption is enabled in the guest. */ -#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT) - +#ifdef CONFIG_AMD_MEM_ENCRYPT #define DECLARE_PER_CPU_DECRYPTED(type, name) \ DECLARE_PER_CPU_SECTION(type, name, "..decrypted") diff --git a/include/linux/phy.h b/include/linux/phy.h index 9a0e981df502..80750783b5b0 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -336,6 +336,7 @@ struct phy_c45_device_ids { * is_gigabit_capable: Set to true if PHY supports 1000Mbps * has_fixups: Set to true if this phy has fixups/quirks. * suspended: Set to true if this phy has been suspended successfully. + * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus. * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. * loopback_enabled: Set true if this phy has been loopbacked successfully. * state: state of the PHY for management purposes @@ -372,6 +373,7 @@ struct phy_device { unsigned is_gigabit_capable:1; unsigned has_fixups:1; unsigned suspended:1; + unsigned suspended_by_mdio_bus:1; unsigned sysfs_links:1; unsigned loopback_enabled:1; @@ -524,6 +526,7 @@ struct phy_driver { /* * Checks if the PHY generated an interrupt. * For multi-PHY devices with shared PHY interrupt pin + * Set interrupt bits have to be cleared. */ int (*did_interrupt)(struct phy_device *phydev); @@ -993,7 +996,7 @@ int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, u16 mask, u16 set); -struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, +struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); #if IS_ENABLED(CONFIG_PHYLIB) diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 49538b172483..bb96d43a5135 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -45,6 +45,9 @@ struct pid_namespace { int hide_pid; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; +#ifdef CONFIG_PID_NS + int max_map_count; +#endif } __randomize_layout; extern struct pid_namespace init_pid_ns; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index b5b7a3423ca8..2cbde6542849 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,9 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) +#define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) +#define SYSC_MODULE_QUIRK_AESS BIT(19) #define SYSC_MODULE_QUIRK_SGX BIT(18) #define SYSC_MODULE_QUIRK_HDQ1W BIT(17) #define SYSC_MODULE_QUIRK_I2C BIT(16) diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index f2688404d1cd..569f446502be 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -24,7 +24,7 @@ struct platform_device { int id; bool id_auto; struct device dev; - u64 dma_mask; + u64 platform_dma_mask; u32 num_resources; struct resource *resource; diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index ebf5ef17cc2a..24a6263c9931 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -256,7 +256,7 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) #endif #define FREQ_QOS_MIN_DEFAULT_VALUE 0 -#define FREQ_QOS_MAX_DEFAULT_VALUE (-1) +#define FREQ_QOS_MAX_DEFAULT_VALUE S32_MAX enum freq_qos_req_type { FREQ_QOS_MIN = 1, diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index fe6cfdcfbc26..468328b1e1dd 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -69,29 +69,32 @@ struct posix_clock_operations { * * @ops: Functional interface to the clock * @cdev: Character device instance for this clock - * @kref: Reference count. + * @dev: Pointer to the clock's device. * @rwsem: Protects the 'zombie' field from concurrent access. * @zombie: If 'zombie' is true, then the hardware has disappeared. - * @release: A function to free the structure when the reference count reaches - * zero. May be NULL if structure is statically allocated. * * Drivers should embed their struct posix_clock within a private * structure, obtaining a reference to it during callbacks using * container_of(). + * + * Drivers should supply an initialized but not exposed struct device + * to posix_clock_register(). It is used to manage lifetime of the + * driver's private structure. It's 'release' field should be set to + * a release function for this private structure. */ struct posix_clock { struct posix_clock_operations ops; struct cdev cdev; - struct kref kref; + struct device *dev; struct rw_semaphore rwsem; bool zombie; - void (*release)(struct posix_clock *clk); }; /** * posix_clock_register() - register a new clock - * @clk: Pointer to the clock. Caller must provide 'ops' and 'release' - * @devid: Allocated device id + * @clk: Pointer to the clock. Caller must provide 'ops' field + * @dev: Pointer to the initialized device. Caller must provide + * 'release' field * * A clock driver calls this function to register itself with the * clock device subsystem. If 'clk' points to dynamically allocated @@ -100,7 +103,7 @@ struct posix_clock { * * Returns zero on success, non-zero otherwise. */ -int posix_clock_register(struct posix_clock *clk, dev_t devid); +int posix_clock_register(struct posix_clock *clk, struct device *dev); /** * posix_clock_unregister() - unregister a clock diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index d0b37e937037..971c9264179e 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -293,6 +293,9 @@ struct omap_sr_data { struct voltagedomain *voltdm; }; + +extern struct omap_sr_data omap_sr_pdata[OMAP_SR_NR]; + #ifdef CONFIG_POWER_AVS_OMAP /* Smartreflex module enable/disable interface */ diff --git a/include/linux/processor-type.h b/include/linux/processor-type.h new file mode 100644 index 000000000000..d33c6dd16376 --- /dev/null +++ b/include/linux/processor-type.h @@ -0,0 +1,9 @@ +#ifndef _PROCESS_TYPE_H_ +#define _PROCESS_TYPE_H_ + +extern unsigned int is_phytium2000(void); +extern unsigned int is_phytium1500(void); +extern unsigned int is_huawei(void); +extern unsigned int is_virtual(void); + +#endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 2a9df80ea887..84cfab1570b0 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -53,6 +53,7 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, #define PT_BLOCKSTEP_BIT 30 #define PT_BLOCKSTEP (1<dq_count); return dquot; } + +static inline bool dquot_is_busy(struct dquot *dquot) +{ + if (test_bit(DQ_MOD_B, &dquot->dq_flags)) + return true; + if (atomic_read(&dquot->dq_count) > 1) + return true; + return false; +} + void dqput(struct dquot *dquot); int dquot_scan_active(struct super_block *sb, int (*fn)(struct dquot *dquot, unsigned long priv), diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 0832c9b66852..e0ddb47f4402 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -27,7 +27,6 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #include #include -#include #include #include #include @@ -59,7 +58,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #define enable_kernel_altivec() #define disable_kernel_altivec() +#undef EXPORT_SYMBOL #define EXPORT_SYMBOL(sym) +#undef EXPORT_SYMBOL_GPL #define EXPORT_SYMBOL_GPL(sym) #define MODULE_LICENSE(licence) #define MODULE_DESCRIPTION(desc) diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index bc8206a8f30e..90f2e2232c6d 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -34,7 +34,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) { if (!hlist_nulls_unhashed(n)) { __hlist_nulls_del(n); - n->pprev = NULL; + WRITE_ONCE(n->pprev, NULL); } } @@ -66,7 +66,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) { __hlist_nulls_del(n); - n->pprev = LIST_POISON2; + WRITE_ONCE(n->pprev, LIST_POISON2); } /** @@ -94,10 +94,47 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, struct hlist_nulls_node *first = h->first; n->next = first; - n->pprev = &h->first; + WRITE_ONCE(n->pprev, &h->first); rcu_assign_pointer(hlist_nulls_first_rcu(h), n); if (!is_a_nulls(first)) - first->pprev = &n->next; + WRITE_ONCE(first->pprev, &n->next); +} + +/** + * hlist_nulls_add_tail_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_nulls, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *i, *last = NULL; + + /* Note: write side code, so rcu accessors are not needed. */ + for (i = h->first; !is_a_nulls(i); i = i->next) + last = i; + + if (last) { + n->next = last->next; + n->pprev = &last->next; + rcu_assign_pointer(hlist_next_rcu(last), n); + } else { + hlist_nulls_add_head_rcu(n, h); + } } /** diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h index 7cf8f797e13a..3ab1ddf151a2 100644 --- a/include/linux/regulator/ab8500.h +++ b/include/linux/regulator/ab8500.h @@ -37,14 +37,11 @@ enum ab8505_regulator_id { AB8505_LDO_AUX6, AB8505_LDO_INTCORE, AB8505_LDO_ADC, - AB8505_LDO_USB, AB8505_LDO_AUDIO, AB8505_LDO_ANAMIC1, AB8505_LDO_ANAMIC2, AB8505_LDO_AUX8, AB8505_LDO_ANA, - AB8505_SYSCLKREQ_2, - AB8505_SYSCLKREQ_4, AB8505_NUM_REGULATORS, }; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 337a46391527..6a92fd3105a3 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -287,6 +287,8 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, const char *const *supply_names, unsigned int num_supplies); +bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2); + #else /* @@ -593,6 +595,11 @@ regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, { } +static inline bool +regulator_is_equal(struct regulator *reg1, struct regulator *reg2) +{ + return false; +} #endif static inline int regulator_set_voltage_triplet(struct regulator *regulator, diff --git a/include/linux/sched.h b/include/linux/sched.h index 67a1d86981a9..b968d736833b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1054,6 +1054,8 @@ struct task_struct { #endif struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; #endif #ifdef CONFIG_PERF_EVENTS struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; @@ -1442,7 +1444,6 @@ extern struct pid *cad_pid; */ #define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* Getting shut down */ -#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ @@ -1914,11 +1915,11 @@ static inline void rseq_migrate(struct task_struct *t) /* * If parent process has a registered restartable sequences area, the - * child inherits. Only applies when forking a process, not a thread. + * child inherits. Unregister rseq for a clone with CLONE_VM set. */ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) { - if (clone_flags & CLONE_THREAD) { + if (clone_flags & CLONE_VM) { t->rseq = NULL; t->rseq_sig = 0; t->rseq_event_mask = 0; diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index afa940cd50dc..cc6bcc1e96bc 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -12,6 +12,8 @@ #define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ +struct cpufreq_policy; + struct update_util_data { void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; @@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); +bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); static inline unsigned long map_util_freq(unsigned long util, unsigned long freq, unsigned long cap) diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index e6770012db18..c49257a3b510 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -117,8 +117,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task); * succeeds. */ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); -/* Remove the current tasks stale references to the old mm_struct */ -extern void mm_release(struct task_struct *, struct mm_struct *); +/* Remove the current tasks stale references to the old mm_struct on exit() */ +extern void exit_mm_release(struct task_struct *, struct mm_struct *); +/* Remove the current tasks stale references to the old mm_struct on exec() */ +extern void exec_mm_release(struct task_struct *, struct mm_struct *); #ifdef CONFIG_MEMCG extern void mm_update_next_owner(struct mm_struct *mm); diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index 1abe91ff6e4a..6d67e9a5af6b 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { } #ifdef CONFIG_NO_HZ_COMMON void calc_load_nohz_start(void); +void calc_load_nohz_remote(struct rq *rq); void calc_load_nohz_stop(void); #else static inline void calc_load_nohz_start(void) { } +static inline void calc_load_nohz_remote(struct rq *rq) { } static inline void calc_load_nohz_stop(void) { } #endif /* CONFIG_NO_HZ_COMMON */ diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index 568286411b43..f1185ac29e70 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -12,11 +12,13 @@ * so they can only be relied on in narrow circumstances. ) */ -extern unsigned long total_forks; +DECLARE_PER_CPU(unsigned long, total_forks); extern int nr_threads; DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_forks(void); extern int nr_processes(void); extern unsigned long nr_running(void); +extern unsigned long nr_running_cpu(int cpu); extern bool single_task_running(void); extern unsigned long nr_iowait(void); extern unsigned long nr_iowait_cpu(int cpu); diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index d4f6215ee03f..d57e1ed4ec8d 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -20,6 +20,7 @@ extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, enum { sysctl_hung_task_timeout_secs = 0 }; #endif +extern unsigned int sysctl_watch_host_pid; extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; diff --git a/include/linux/selection.h b/include/linux/selection.h index e2c1f96bf059..5b890ef5b59f 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h @@ -11,8 +11,8 @@ #include #include -extern struct vc_data *sel_cons; struct tty_struct; +struct vc_data; extern void clear_selection(void); extern int set_selection_user(const struct tiocl_selection __user *sel, @@ -24,6 +24,8 @@ extern int sel_loadlut(char __user *p); extern int mouse_reporting(void); extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); +bool vc_is_sel(struct vc_data *vc); + extern int console_blanked; extern const unsigned char color_table[]; diff --git a/include/linux/shield_mounts.h b/include/linux/shield_mounts.h new file mode 100644 index 000000000000..24ba760bbeac --- /dev/null +++ b/include/linux/shield_mounts.h @@ -0,0 +1,7 @@ +#ifndef __SHIELD_MOUNTS_H__ +#define __SHIELD_MOUNTS_H__ + +bool is_mount_shielded(const char *dev_path, const char *mount_path); +extern unsigned int sysctl_shield_mounts_max; + +#endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8688f7adfda7..955e1370f033 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -634,8 +634,8 @@ typedef unsigned char *sk_buff_data_t; * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @tc_skip_classify: do not classify packet. set by IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress - * @tc_redirected: packet was redirected by a tc action - * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect + * @redirected: packet was redirected by packet classifier + * @from_ingress: packet was redirected from the ingress path * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag @@ -816,8 +816,10 @@ struct sk_buff { #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; - __u8 tc_redirected:1; - __u8 tc_from_ingress:1; +#endif +#ifdef CONFIG_NET_REDIRECT + __u8 redirected:1; + __u8 from_ingress:1; #endif #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; @@ -1795,7 +1797,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, */ static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) { - struct sk_buff *skb = list_->prev; + struct sk_buff *skb = READ_ONCE(list_->prev); if (skb == (struct sk_buff *)list_) skb = NULL; @@ -1861,7 +1863,9 @@ static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) { - /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */ + /* See skb_queue_empty_lockless() and skb_peek_tail() + * for the opposite READ_ONCE() + */ WRITE_ONCE(newsk->next, next); WRITE_ONCE(newsk->prev, prev); WRITE_ONCE(next->prev, newsk); @@ -3527,8 +3531,9 @@ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, - int mac_len); -int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len); + int mac_len, bool ethernet); +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, + bool ethernet); int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); int skb_mpls_dec_ttl(struct sk_buff *skb); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, @@ -4511,5 +4516,31 @@ static inline __wsum lco_csum(struct sk_buff *skb) return csum_partial(l4_hdr, csum_start - l4_hdr, partial); } +static inline bool skb_is_redirected(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_REDIRECT + return skb->redirected; +#else + return false; +#endif +} + +static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) +{ +#ifdef CONFIG_NET_REDIRECT + skb->redirected = 1; + skb->from_ingress = from_ingress; + if (skb->from_ingress) + skb->tstamp = 0; +#endif +} + +static inline void skb_reset_redirect(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_REDIRECT + skb->redirected = 0; +#endif +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index ce7055259877..7eb6a8754f19 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -14,6 +14,7 @@ #include #define MAX_MSG_FRAGS MAX_SKB_FRAGS +#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) enum __sk_action { __SK_DROP = 0, @@ -29,11 +30,13 @@ struct sk_msg_sg { u32 size; u32 copybreak; bool copy[MAX_MSG_FRAGS]; - /* The extra element is used for chaining the front and sections when - * the list becomes partitioned (e.g. end < start). The crypto APIs - * require the chaining. + /* The extra two elements: + * 1) used for chaining the front and sections when the list becomes + * partitioned (e.g. end < start). The crypto APIs require the + * chaining; + * 2) to chain tailer SG entries after the message. */ - struct scatterlist data[MAX_MSG_FRAGS + 1]; + struct scatterlist data[MAX_MSG_FRAGS + 2]; }; /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ @@ -141,13 +144,13 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) static inline u32 sk_msg_iter_dist(u32 start, u32 end) { - return end >= start ? end - start : end + (MAX_MSG_FRAGS - start); + return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); } #define sk_msg_iter_var_prev(var) \ do { \ if (var == 0) \ - var = MAX_MSG_FRAGS - 1; \ + var = NR_MSG_FRAG_IDS - 1; \ else \ var--; \ } while (0) @@ -155,7 +158,7 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end) #define sk_msg_iter_var_next(var) \ do { \ var++; \ - if (var == MAX_MSG_FRAGS) \ + if (var == NR_MSG_FRAG_IDS) \ var = 0; \ } while (0) @@ -172,9 +175,9 @@ static inline void sk_msg_clear_meta(struct sk_msg *msg) static inline void sk_msg_init(struct sk_msg *msg) { - BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS); + BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); memset(msg, 0, sizeof(*msg)); - sg_init_marker(msg->sg.data, MAX_MSG_FRAGS); + sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); } static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, @@ -195,14 +198,11 @@ static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) static inline bool sk_msg_full(const struct sk_msg *msg) { - return (msg->sg.end == msg->sg.start) && msg->sg.size; + return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; } static inline u32 sk_msg_elem_used(const struct sk_msg *msg) { - if (sk_msg_full(msg)) - return MAX_MSG_FRAGS; - return sk_msg_iter_dist(msg->sg.start, msg->sg.end); } @@ -354,17 +354,22 @@ static inline void sk_psock_update_proto(struct sock *sk, static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { - sk->sk_write_space = psock->saved_write_space; + sk->sk_prot->unhash = psock->saved_unhash; if (psock->sk_proto) { struct inet_connection_sock *icsk = inet_csk(sk); bool has_ulp = !!icsk->icsk_ulp_data; - if (has_ulp) - tcp_update_ulp(sk, psock->sk_proto); - else + if (has_ulp) { + tcp_update_ulp(sk, psock->sk_proto, + psock->saved_write_space); + } else { sk->sk_prot = psock->sk_proto; + sk->sk_write_space = psock->saved_write_space; + } psock->sk_proto = NULL; + } else { + sk->sk_write_space = psock->saved_write_space; } } diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 6fc8843f1c9e..cd97d2c8840c 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -191,7 +191,7 @@ struct platform_s2idle_ops { int (*begin)(void); int (*prepare)(void); int (*prepare_late)(void); - void (*wake)(void); + bool (*wake)(void); void (*restore_early)(void); void (*restore)(void); void (*end)(void); diff --git a/include/linux/swab.h b/include/linux/swab.h index e466fd159c85..bcff5149861a 100644 --- a/include/linux/swab.h +++ b/include/linux/swab.h @@ -7,6 +7,7 @@ # define swab16 __swab16 # define swab32 __swab32 # define swab64 __swab64 +# define swab __swab # define swahw32 __swahw32 # define swahb32 __swahb32 # define swab16p __swab16p diff --git a/include/linux/swap.h b/include/linux/swap.h index 063c0c1e112b..d4a727e06986 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -362,6 +362,18 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; +#define ADDITIONAL_RECLAIM_RATIO 2 +extern unsigned long pagecache_over_limit(void); +extern void shrink_page_cache(gfp_t mask, struct page *page); +extern unsigned long vm_pagecache_limit_pages; +extern unsigned long vm_pagecache_limit_reclaim_pages; +extern int vm_pagecache_limit_ratio; +extern int vm_pagecache_limit_reclaim_ratio; +extern unsigned int vm_pagecache_ignore_dirty; +extern unsigned int vm_pagecache_limit_async; +extern unsigned int vm_pagecache_ignore_slab; +extern int kpagecache_limitd_run(void); +extern void kpagecache_limitd_stop(void); extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 8c71874e8485..9b4313e618d0 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -50,6 +50,7 @@ int unregister_sysrq_key(int key, struct sysrq_key_op *op); struct sysrq_key_op *__sysrq_get_key_op(int key); int sysrq_toggle_support(int enable_mask); +int sysrq_toggle_sysrq_key(int __sysrq_use_leftctrl); #else diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 668e25a76d69..bcd95a793208 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -395,6 +395,11 @@ struct tcp_sock { */ struct request_sock __rcu *fastopen_rsk; u32 *saved_syn; + + u32 fullnat_real_ip; + u16 fullnat_real_port; + u32 fullnat_first_data_seq; + bool fullnat_real_ip_inserted; }; enum tsq_enum { diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 659a4400517b..e93e249a4e9b 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -147,6 +147,8 @@ check_copy_size(const void *addr, size_t bytes, bool is_source) __bad_copy_to(); return false; } + if (WARN_ON_ONCE(bytes > INT_MAX)) + return false; check_object_size(addr, bytes, is_source); return true; } diff --git a/include/linux/time.h b/include/linux/time.h index 27d83fd2ae61..5f3e49978837 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -96,4 +96,17 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its) */ #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0) #define time_before32(b, a) time_after32(a, b) + +/** + * time_between32 - check if a 32-bit timestamp is within a given time range + * @t: the time which may be within [l,h] + * @l: the lower bound of the range + * @h: the higher bound of the range + * + * time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are + * treated as 32-bit integers. + * + * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)). + */ +#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) #endif diff --git a/include/linux/tkernel.h b/include/linux/tkernel.h new file mode 100644 index 000000000000..2ddd19b220a2 --- /dev/null +++ b/include/linux/tkernel.h @@ -0,0 +1,5 @@ +#include +#include +extern struct proc_dir_entry *proc_tkernel; +extern const struct ctl_path tkernel_ctl_path[]; +extern unsigned char prot_sock_flag[]; diff --git a/include/linux/tnum.h b/include/linux/tnum.h index c17af77f3fae..ea627d1ab7e3 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -30,7 +30,7 @@ struct tnum tnum_lshift(struct tnum a, u8 shift); /* Shift (rsh) a tnum right (by a fixed shift) */ struct tnum tnum_rshift(struct tnum a, u8 shift); /* Shift (arsh) a tnum right (by a fixed min_shift) */ -struct tnum tnum_arshift(struct tnum a, u8 min_shift); +struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness); /* Add two tnums, return @a + @b */ struct tnum tnum_add(struct tnum a, struct tnum b); /* Subtract two tnums, return @a - @b */ diff --git a/include/linux/trackgpu.h b/include/linux/trackgpu.h new file mode 100644 index 000000000000..536be0c101f0 --- /dev/null +++ b/include/linux/trackgpu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TRACKGPU_H +#define _LINUX_TRACKGPU_H + +#define GPU_DEV "/dev/nvidiactl" +#define GPU_TIMER_EXPIRE 2 +#define GPU_RETRY_TIMES 5 +#define GPU_OK "ok" +struct gpu_request_list { + struct list_head list; + struct mutex lock; +}; + +enum GPU_EVENT_TYPE { + GPU_OPEN, + GPU_CLOSE, +}; +extern int sysctl_track_gpu; +struct gpu_wait { + struct list_head node; + wait_queue_head_t wait; + bool proceed; + bool wakup; + int pid; + int type; + bool used; +}; + +extern struct gpu_wait *gpu_create_wait(int pid); +extern int notify_gpu(struct gpu_wait *wait, enum GPU_EVENT_TYPE e, int pid); +extern void gpu_clean(int pid); + + +#endif diff --git a/include/linux/tty.h b/include/linux/tty.h index bfa4e2ee94a9..bd5fe0e907e8 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -225,6 +225,8 @@ struct tty_port_client_operations { void (*write_wakeup)(struct tty_port *port); }; +extern const struct tty_port_client_operations tty_port_default_client_ops; + struct tty_port { struct tty_bufhead buf; /* Locked internally */ struct tty_struct *tty; /* Back pointer */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d4ee6e942562..38555435a64a 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -337,6 +337,18 @@ extern long __probe_user_read(void *dst, const void __user *src, size_t size); extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); +/* + * probe_user_write(): safely attempt to write to a location in user space + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); +extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); + extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, long count); diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h index 396d2b043e64..556a801efce3 100644 --- a/include/linux/usb/irda.h +++ b/include/linux/usb/irda.h @@ -119,11 +119,22 @@ struct usb_irda_cs_descriptor { * 6 - 115200 bps * 7 - 576000 bps * 8 - 1.152 Mbps - * 9 - 5 mbps + * 9 - 4 Mbps * 10..15 - Reserved */ #define USB_IRDA_STATUS_LINK_SPEED 0x0f +#define USB_IRDA_LS_NO_CHANGE 0 +#define USB_IRDA_LS_2400 1 +#define USB_IRDA_LS_9600 2 +#define USB_IRDA_LS_19200 3 +#define USB_IRDA_LS_38400 4 +#define USB_IRDA_LS_57600 5 +#define USB_IRDA_LS_115200 6 +#define USB_IRDA_LS_576000 7 +#define USB_IRDA_LS_1152000 8 +#define USB_IRDA_LS_4000000 9 + /* The following is a 4-bit value used only for * outbound header: * diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index a1be64c9940f..22c1f579afe3 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -69,4 +69,7 @@ /* Hub needs extra delay after resetting its port. */ #define USB_QUIRK_HUB_SLOW_RESET BIT(14) +/* device has blacklisted endpoints */ +#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15) + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 4e7809408073..decac0790fc1 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -126,8 +126,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); -void vmalloc_sync_all(void); - +void vmalloc_sync_mappings(void); +void vmalloc_sync_unmappings(void); + /* * Lowlevel-APIs (not for driver use!) */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 8dc77e40bc03..ded5c48598f3 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -135,7 +135,7 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt); int vty_init(const struct file_operations *console_fops); -extern char vt_dont_switch; +extern bool vt_dont_switch; extern int default_utf8; extern int global_cursor_default; diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h index 7ce4e8332421..1409230ad3a4 100644 --- a/include/media/dvb-usb-ids.h +++ b/include/media/dvb-usb-ids.h @@ -389,6 +389,7 @@ #define USB_PID_MYGICA_T230 0xc688 #define USB_PID_MYGICA_T230C 0xc689 #define USB_PID_MYGICA_T230C2 0xc68a +#define USB_PID_MYGICA_T230C_LITE 0xc699 #define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 #define USB_PID_ELGATO_EYETV_DTT 0x0021 #define USB_PID_ELGATO_EYETV_DTT_2 0x003f diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h index e0b8f2602670..a0e93f0ef62a 100644 --- a/include/media/v4l2-device.h +++ b/include/media/v4l2-device.h @@ -371,7 +371,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) struct v4l2_subdev *__sd; \ \ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ - !(grpid) || __sd->grp_id == (grpid), o, f , \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ ##args); \ } while (0) @@ -403,7 +403,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) ({ \ struct v4l2_subdev *__sd; \ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ - !(grpid) || __sd->grp_id == (grpid), o, f , \ + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ ##args); \ }) @@ -431,8 +431,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) struct v4l2_subdev *__sd; \ \ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ - !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \ - ##args); \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ } while (0) /** @@ -462,8 +462,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) ({ \ struct v4l2_subdev *__sd; \ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ - !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \ - ##args); \ + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ + f , ##args); \ }) diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h index c86474dc7b55..8800a640c224 100644 --- a/include/media/v4l2-rect.h +++ b/include/media/v4l2-rect.h @@ -63,10 +63,10 @@ static inline void v4l2_rect_map_inside(struct v4l2_rect *r, r->left = boundary->left; if (r->top < boundary->top) r->top = boundary->top; - if (r->left + r->width > boundary->width) - r->left = boundary->width - r->width; - if (r->top + r->height > boundary->height) - r->top = boundary->height - r->height; + if (r->left + r->width > boundary->left + boundary->width) + r->left = boundary->left + boundary->width - r->width; + if (r->top + r->height > boundary->top + boundary->height) + r->top = boundary->top + boundary->height - r->height; } /** diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 1abae3c340a5..299240df79e4 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -58,9 +58,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); -bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *, - u32 *); -void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, ktime_t *); diff --git a/include/net/arp.h b/include/net/arp.h index c8f580a0e6b1..4950191f6b2b 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -57,8 +57,8 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key) unsigned long now = jiffies; /* avoid dirtying neighbour */ - if (n->confirmed != now) - n->confirmed = now; + if (READ_ONCE(n->confirmed) != now) + WRITE_ONCE(n->confirmed, now); } rcu_read_unlock_bh(); } diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index c8696a230b7d..9b73d4457d8b 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -259,6 +259,8 @@ struct ad_system { #define BOND_AD_INFO(bond) ((bond)->ad_info) #define SLAVE_AD_INFO(slave) ((slave)->ad_info) +extern int bond_8023ad_up; + struct ad_bond_info { struct ad_system system; /* 802.3ad system structure */ struct bond_3ad_stats stats; diff --git a/include/net/bonding.h b/include/net/bonding.h index 3d56b026bb9e..2f3305315094 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -139,6 +139,11 @@ struct bond_params { struct reciprocal_value reciprocal_packets_per_slave; u16 ad_actor_sys_prio; u16 ad_user_port_key; + int broadcast_arp; + int broadcast_nd; + unsigned long last_na; + int periodic_na; + u32 periodic_na_interval; /* 2 bytes of padding : see ether_addr_equal_64bits() */ u8 ad_actor_system[ETH_ALEN + 2]; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4ab2c49423dc..68782ba8b6e8 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3537,6 +3537,9 @@ struct cfg80211_update_owe_info { * * @start_radar_detection: Start radar detection in the driver. * + * @end_cac: End running CAC, probably because a related CAC + * was finished on another phy. + * * @update_ft_ies: Provide updated Fast BSS Transition information to the * driver. If the SME is in the driver/firmware, this information can be * used in building Authentication and Reassociation Request frames. @@ -3863,6 +3866,8 @@ struct cfg80211_ops { struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms); + void (*end_cac)(struct wiphy *wiphy, + struct net_device *dev); int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie); int (*crit_proto_start)(struct wiphy *wiphy, diff --git a/include/net/dst.h b/include/net/dst.h index fe62fe2eb781..3448cf865ede 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -82,7 +82,7 @@ struct dst_entry { struct dst_metrics { u32 metrics[RTAX_MAX]; refcount_t refcnt; -}; +} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */ extern const struct dst_metrics dst_default_metrics; u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); @@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu) struct dst_entry *dst = skb_dst(skb); if (dst && dst->ops->update_pmtu) - dst->ops->update_pmtu(dst, NULL, skb, mtu); + dst->ops->update_pmtu(dst, NULL, skb, mtu, true); +} + +/* update dst pmtu but not do neighbor confirm */ +static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) +{ + struct dst_entry *dst = skb_dst(skb); + + if (dst && dst->ops->update_pmtu) + dst->ops->update_pmtu(dst, NULL, skb, mtu, false); } static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, @@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, u32 encap_mtu = dst_mtu(encap_dst); if (skb->len > encap_mtu - headroom) - skb_dst_update_pmtu(skb, encap_mtu - headroom); + skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom); } #endif /* _NET_DST_H */ diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 5ec645f27ee3..443863c7b8da 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -27,7 +27,8 @@ struct dst_ops { struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu); + struct sk_buff *skb, u32 mtu, + bool confirm_neigh); void (*redirect)(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 20dcadd8eed9..7fed3193f81d 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -108,6 +108,7 @@ struct fib_rule_notifier_info { [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ [FRA_PRIORITY] = { .type = NLA_U32 }, \ [FRA_FWMARK] = { .type = NLA_U32 }, \ + [FRA_TUN_ID] = { .type = NLA_U64 }, \ [FRA_FWMASK] = { .type = NLA_U32 }, \ [FRA_TABLE] = { .type = NLA_U32 }, \ [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \ diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 5cd12276ae21..78f6437cbc3a 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -5,6 +5,7 @@ #include #include #include +#include #include /** @@ -229,6 +230,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ + FLOW_DISSECTOR_KEY_PORTS_RANGE, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */ @@ -337,4 +339,12 @@ struct bpf_flow_dissector { void *data_end; }; +static inline void +flow_dissector_init_keys(struct flow_dissector_key_control *key_control, + struct flow_dissector_key_basic *key_basic) +{ + memset(key_control, 0, sizeof(*key_control)); + memset(key_basic, 0, sizeof(*key_basic)); +} + #endif diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 86c567f531f3..c6f7bd22db60 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -380,19 +380,18 @@ static inline void flow_block_init(struct flow_block *flow_block) typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, enum tc_setup_type type, void *type_data); -typedef void flow_indr_block_ing_cmd_t(struct net_device *dev, - flow_indr_block_bind_cb_t *cb, - void *cb_priv, - enum flow_block_command command); +typedef void flow_indr_block_cmd_t(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, void *cb_priv, + enum flow_block_command command); -struct flow_indr_block_ing_entry { - flow_indr_block_ing_cmd_t *cb; +struct flow_indr_block_entry { + flow_indr_block_cmd_t *cb; struct list_head list; }; -void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry); +void flow_indr_add_block_cb(struct flow_indr_block_entry *entry); -void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry); +void flow_indr_del_block_cb(struct flow_indr_block_entry *entry); int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, flow_indr_block_bind_cb_t *cb, diff --git a/include/net/inet_common.h b/include/net/inet_common.h index ae2ba897675c..485d0b59a5e8 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -4,7 +4,7 @@ #include -extern const struct proto_ops inet_stream_ops; +extern struct proto_ops inet_stream_ops; extern const struct proto_ops inet_dgram_ops; /* diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index af2b4c065a04..3c95278d96ff 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -103,17 +103,23 @@ struct inet_bind_hashbucket { struct hlist_head chain; }; -/* - * Sockets can be hashed in established or listening table +/* Sockets can be hashed in established or listening table. + * We must use different 'nulls' end-of-chain value for all hash buckets : + * A socket might transition from ESTABLISH to LISTEN state without + * RCU grace period. A lookup in ehash table needs to handle this case. */ +#define LISTENING_NULLS_BASE (1U << 29) struct inet_listen_hashbucket { spinlock_t lock; unsigned int count; - struct hlist_head head; + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; }; /* This is for listening sockets, thus all sockets which possess wildcards. */ -#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ +#define INET_LHTABLE_SIZE 4096 /* Yes, really, this is all you need. */ struct inet_hashinfo { /* This is for sockets with full identity only. Sockets here will diff --git a/include/net/ip.h b/include/net/ip.h index a2c61c36dc4a..d09a0a805c48 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -293,6 +293,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) +#define NET_INC_DROPSTATS(net, field) \ + SNMP_INC_STATS((net)->mib.netdrop_statistics, field) +#define __NET_INC_DROPSTATS(net, field) \ + __SNMP_INC_STATS((net)->mib.netdrop_statistics, field) + u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); unsigned long snmp_fold_field(void __percpu *mib, int offt); #if BITS_PER_LONG==32 @@ -760,4 +765,9 @@ int ip_misc_proc_init(void); int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family, struct netlink_ext_ack *extack); +static inline bool inetdev_valid_mtu(unsigned int mtu) +{ + return likely(mtu >= IPV4_MIN_MTU); +} + #endif /* _IP_H */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 078887c8c586..58952725aff7 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -31,10 +31,12 @@ #include #endif #include /* Netw namespace */ - +#include #define IP_VS_HDR_INVERSE 1 #define IP_VS_HDR_ICMP 2 - +struct net *ip_vs_skb_net(struct sk_buff *skb); +extern struct net init_net; +extern bool share_ns; /* Generic access of ipvs struct */ static inline struct netns_ipvs *net_ipvs(struct net* net) { @@ -925,11 +927,14 @@ struct netns_ipvs { unsigned int sysctl_sync_refresh_period; int sysctl_sync_retries; int sysctl_nat_icmp_send; + int sysctl_estimation_timer_work; int sysctl_pmtu_disc; int sysctl_backup_only; int sysctl_conn_reuse_mode; int sysctl_schedule_icmp; int sysctl_ignore_tunneled; + int sysctl_ignore_no_rs_error; + int sysctl_tunnel_df_force_zero; /* ip_vs_lblc */ int sysctl_lblc_expiration; @@ -1059,6 +1064,11 @@ static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) return ipvs->sysctl_ignore_tunneled; } +static inline int sysctl_ignore_no_rs_error(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_ignore_no_rs_error; +} + static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) { return ipvs->sysctl_cache_bypass; @@ -1624,18 +1634,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ -/* Really using conntrack? */ -static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, - struct sk_buff *skb) +/* Using old conntrack that can not be redirected to another real server? */ +static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) { #ifdef CONFIG_IP_VS_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct; - if (!(cp->flags & IP_VS_CONN_F_NFCT)) - return false; ct = nf_ct_get(skb, &ctinfo); - if (ct) + if (ct && nf_ct_is_confirmed(ct)) return true; #endif return false; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 009605c56f20..8ffab8da037a 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1017,7 +1017,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, @@ -1112,7 +1112,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, /* * reassembly.c */ -extern const struct proto_ops inet6_stream_ops; +extern struct proto_ops inet6_stream_ops; extern const struct proto_ops inet6_dgram_ops; extern const struct proto_ops inet6_sockraw_ops; @@ -1161,4 +1161,12 @@ int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, const struct in6_addr *addr, unsigned int mode); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +/* public func in tcp_ipv6.c */ +extern struct sock * tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); +extern struct inet_connection_sock_af_ops ipv6_specific; + #endif /* _NET_IPV6_H */ diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index 5c93e942c50b..3e7d2c0e79ca 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -24,8 +24,10 @@ struct ipv6_stub { const struct in6_addr *addr); int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, const struct in6_addr *addr); - int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, - struct dst_entry **dst, struct flowi6 *fl6); + struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst); int (*ipv6_route_input)(struct sk_buff *skb); struct fib6_table *(*fib6_get_table)(struct net *net, u32 id); diff --git a/include/net/ipx.h b/include/net/ipx.h index baf090390998..9d1342807b59 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -47,11 +47,6 @@ struct ipxhdr { /* From af_ipx.c */ extern int sysctl_ipx_pprop_broadcasting; -static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) -{ - return (struct ipxhdr *)skb_transport_header(skb); -} - struct ipx_interface { /* IPX address */ __be32 if_netnum; diff --git a/include/net/ndisc.h b/include/net/ndisc.h index b2f715ca0567..b5ebeb3b0de0 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -414,8 +414,8 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev, unsigned long now = jiffies; /* avoid dirtying neighbour */ - if (n->confirmed != now) - n->confirmed = now; + if (READ_ONCE(n->confirmed) != now) + WRITE_ONCE(n->confirmed, now); } rcu_read_unlock_bh(); } @@ -431,8 +431,8 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev, unsigned long now = jiffies; /* avoid dirtying neighbour */ - if (n->confirmed != now) - n->confirmed = now; + if (READ_ONCE(n->confirmed) != now) + WRITE_ONCE(n->confirmed, now); } rcu_read_unlock_bh(); } diff --git a/include/net/neighbour.h b/include/net/neighbour.h index b8452cc0e059..8ec77bfdc1a4 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -72,7 +72,6 @@ struct neigh_parms { struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); - void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; @@ -468,7 +467,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb do { seq = read_seqbegin(&hh->hh_lock); - hh_len = hh->hh_len; + hh_len = READ_ONCE(hh->hh_len); if (likely(hh_len <= HH_DATA_MOD)) { hh_alen = HH_DATA_MOD; diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 112a6f40dfaf..993cb68a639f 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -28,6 +28,7 @@ enum nf_ct_ext_id { #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) NF_CT_EXT_SYNPROXY, #endif + NF_CT_EXT_AGENT, NF_CT_EXT_NUM, }; @@ -36,6 +37,7 @@ enum nf_ct_ext_id { #define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj #define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache +#define NF_CT_EXT_AGENT_TYPE struct nf_agent_stats #define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp #define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout #define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index c0c0791b1912..b353d9ec4661 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -213,6 +213,8 @@ struct netns_ipv4 { int sysctl_fib_multipath_hash_policy; #endif + int sysctl_tcp_max_orphans; + struct fib_notifier_ops *notifier_ops; unsigned int fib_seq; /* protected by rtnl_mutex */ @@ -221,5 +223,6 @@ struct netns_ipv4 { atomic_t rt_genid; siphash_key_t ip_id_key; + int sysctl_tcp_wan_timestamps; }; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 022a0fd1a5a4..865515266809 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -29,6 +29,7 @@ struct netns_sysctl_ipv6 { int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; + int ip6_tunnel_neigh_bypass; int multipath_hash_policy; int flowlabel_consistency; int auto_flowlabels; diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h index 830bdf345b17..48b40551095c 100644 --- a/include/net/netns/mib.h +++ b/include/net/netns/mib.h @@ -8,6 +8,7 @@ struct netns_mib { DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics); DEFINE_SNMP_STAT(struct linux_mib, net_statistics); + DEFINE_SNMP_STAT(struct linux_drop_mib, netdrop_statistics); DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index 286fd960896f..a1a8d45adb42 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -7,6 +7,7 @@ struct netns_nftables { struct list_head tables; struct list_head commit_list; + struct list_head module_list; struct mutex commit_mutex; unsigned int base_seq; u8 gencursor; diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 2cbcdbdec254..1121faa99c12 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -70,7 +70,12 @@ struct page_pool_params { struct page_pool { struct page_pool_params p; - u32 pages_state_hold_cnt; + struct delayed_work release_dw; + void (*disconnect)(void *); + unsigned long defer_start; + unsigned long defer_warn; + + u32 pages_state_hold_cnt; /* * Data structure for allocation side @@ -129,26 +134,20 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) struct page_pool *page_pool_create(const struct page_pool_params *params); -void __page_pool_free(struct page_pool *pool); -static inline void page_pool_free(struct page_pool *pool) -{ - /* When page_pool isn't compiled-in, net/core/xdp.c doesn't - * allow registering MEM_TYPE_PAGE_POOL, but shield linker. - */ #ifdef CONFIG_PAGE_POOL - __page_pool_free(pool); -#endif -} - -/* Drivers use this instead of page_pool_free */ +void page_pool_destroy(struct page_pool *pool); +void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)); +#else static inline void page_pool_destroy(struct page_pool *pool) { - if (!pool) - return; - - page_pool_free(pool); } +static inline void page_pool_use_xdp_mem(struct page_pool *pool, + void (*disconnect)(void *)) +{ +} +#endif + /* Never call this directly, use helpers below */ void __page_pool_put_page(struct page_pool *pool, struct page *page, bool allow_direct); @@ -170,24 +169,6 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, __page_pool_put_page(pool, page, true); } -/* API user MUST have disconnected alloc-side (not allowed to call - * page_pool_alloc_pages()) before calling this. The free-side can - * still run concurrently, to handle in-flight packet-pages. - * - * A request to shutdown can fail (with false) if there are still - * in-flight packet-pages. - */ -bool __page_pool_request_shutdown(struct page_pool *pool); -static inline bool page_pool_request_shutdown(struct page_pool *pool) -{ - bool safe_to_remove = false; - -#ifdef CONFIG_PAGE_POOL - safe_to_remove = __page_pool_request_shutdown(pool); -#endif - return safe_to_remove; -} - /* Disconnects a page (from a page_pool). API users can have a need * to disconnect a page (from a page_pool), to allow it to be used as * a regular page (that will eventually be returned to the normal @@ -216,11 +197,6 @@ static inline bool is_page_pool_compiled_in(void) #endif } -static inline void page_pool_get(struct page_pool *pool) -{ - refcount_inc(&pool->user_cnt); -} - static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e553fc80eb23..9976ad2f54fd 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -141,31 +141,38 @@ __cls_set_class(unsigned long *clp, unsigned long cl) return xchg(clp, cl); } -static inline unsigned long -cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl) +static inline void +__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) { - unsigned long old_cl; + unsigned long cl; - sch_tree_lock(q); - old_cl = __cls_set_class(clp, cl); - sch_tree_unlock(q); - return old_cl; + cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); + cl = __cls_set_class(&r->class, cl); + if (cl) + q->ops->cl_ops->unbind_tcf(q, cl); } static inline void tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) { struct Qdisc *q = tp->chain->block->q; - unsigned long cl; /* Check q as it is not set for shared blocks. In that case, * setting class is not supported. */ if (!q) return; - cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); - cl = cls_set_class(q, &r->class, cl); - if (cl) + sch_tree_lock(q); + __tcf_bind_filter(q, r, base); + sch_tree_unlock(q); +} + +static inline void +__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) +{ + unsigned long cl; + + if ((cl = __cls_set_class(&r->class, 0)) != 0) q->ops->cl_ops->unbind_tcf(q, cl); } @@ -173,12 +180,10 @@ static inline void tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) { struct Qdisc *q = tp->chain->block->q; - unsigned long cl; if (!q) return; - if ((cl = __cls_set_class(&r->class, 0)) != 0) - q->ops->cl_ops->unbind_tcf(q, cl); + __tcf_unbind_filter(q, r); } struct tcf_exts { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d80acda231ae..9fb7cf1cdf36 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -149,8 +149,8 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) static inline bool qdisc_is_empty(const struct Qdisc *qdisc) { if (qdisc_is_percpu_stats(qdisc)) - return qdisc->empty; - return !qdisc->q.qlen; + return READ_ONCE(qdisc->empty); + return !READ_ONCE(qdisc->q.qlen); } static inline bool qdisc_run_begin(struct Qdisc *qdisc) @@ -158,7 +158,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) if (qdisc->flags & TCQ_F_NOLOCK) { if (!spin_trylock(&qdisc->seqlock)) return false; - qdisc->empty = false; + WRITE_ONCE(qdisc->empty, false); } else if (qdisc_is_running(qdisc)) { return false; } @@ -308,6 +308,7 @@ struct tcf_proto_ops { int (*delete)(struct tcf_proto *tp, void *arg, bool *last, bool rtnl_held, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *tp); void (*walk)(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held); int (*reoffload)(struct tcf_proto *tp, bool add, @@ -317,7 +318,8 @@ struct tcf_proto_ops { void *type_data); void (*hw_del)(struct tcf_proto *tp, void *type_data); - void (*bind_class)(void *, u32, unsigned long); + void (*bind_class)(void *, u32, unsigned long, + void *, unsigned long); void * (*tmplt_create)(struct net *net, struct tcf_chain *chain, struct nlattr **tca, @@ -336,6 +338,10 @@ struct tcf_proto_ops { int flags; }; +/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags + * are expected to implement tcf_proto_ops->delete_empty(), otherwise race + * conditions can occur when filters are inserted/deleted simultaneously. + */ enum tcf_proto_ops_flags { TCF_PROTO_OPS_DOIT_UNLOCKED = 1, }; @@ -669,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); int skb_do_redirect(struct sk_buff *); -static inline void skb_reset_tc(struct sk_buff *skb) -{ -#ifdef CONFIG_NET_CLS_ACT - skb->tc_redirected = 0; -#endif -} - -static inline bool skb_is_tc_redirected(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_CLS_ACT - return skb->tc_redirected; -#else - return false; -#endif -} - static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 503fbc3cd819..2b6f3f13d5bc 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1239,6 +1239,9 @@ struct sctp_ep_common { /* What socket does this endpoint belong to? */ struct sock *sk; + /* Cache netns and it won't change once set */ + struct net *net; + /* This is where we receive inbound chunks. */ struct sctp_inq inqueue; diff --git a/include/net/snmp.h b/include/net/snmp.h index cb8ced4380a6..5a3f4b3da672 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -111,6 +111,11 @@ struct linux_xfrm_mib { unsigned long mibs[LINUX_MIB_XFRMMAX]; }; +#define LINUX_DROPSTAT_MIB_MAX __LINUX_DROPSTAT_MIB_MAX +struct linux_drop_mib { + unsigned long mibs[LINUX_DROPSTAT_MIB_MAX]; +}; + #define DEFINE_SNMP_STAT(type, name) \ __typeof__(type) __percpu *name #define DEFINE_SNMP_STAT_ATOMIC(type, name) \ diff --git a/include/net/sock.h b/include/net/sock.h index 718e62fbe869..6c5a3809483e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -723,6 +723,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); } +static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) +{ + hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); +} + static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { sock_hold(sk); @@ -1940,8 +1945,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); static inline void sk_dst_confirm(struct sock *sk) { - if (!sk->sk_dst_pending_confirm) - sk->sk_dst_pending_confirm = 1; + if (!READ_ONCE(sk->sk_dst_pending_confirm)) + WRITE_ONCE(sk->sk_dst_pending_confirm, 1); } static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) @@ -1951,10 +1956,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) unsigned long now = jiffies; /* avoid dirtying neighbour */ - if (n->confirmed != now) - n->confirmed = now; - if (sk && sk->sk_dst_pending_confirm) - sk->sk_dst_pending_confirm = 0; + if (READ_ONCE(n->confirmed) != now) + WRITE_ONCE(n->confirmed, now); + if (sk && READ_ONCE(sk->sk_dst_pending_confirm)) + WRITE_ONCE(sk->sk_dst_pending_confirm, 0); } } @@ -2584,9 +2589,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) */ static inline void sk_pacing_shift_update(struct sock *sk, int val) { - if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val) + if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) return; - sk->sk_pacing_shift = val; + WRITE_ONCE(sk->sk_pacing_shift, val); } /* if a socket is bound to a device, check that the given device diff --git a/include/net/tcp.h b/include/net/tcp.h index ab4eb5eb5d07..c25b9d4298bf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -134,10 +134,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_DELACK_MIN 4U #define TCP_ATO_MIN 4U #endif -#define TCP_RTO_MAX ((unsigned)(120*HZ)) -#define TCP_RTO_MIN ((unsigned)(HZ/5)) + +#define TCP_RTO_MAX ((unsigned)(sysctl_tcp_rto_max*HZ)) +#define TCP_RTO_MIN ((unsigned)((sysctl_tcp_rto_min*HZ)/1000)) #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ -#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ +//#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ +#define TCP_TIMEOUT_INIT ((unsigned)((sysctl_tcp_init_rto*HZ)/1000)) + #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now * used as a fallback RTO for the * initial data transmission if no @@ -157,7 +160,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_KEEPCNT 127 #define MAX_TCP_SYNCNT 127 -#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ +//#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ +#define TCP_SYNQ_INTERVAL ((unsigned)((sysctl_tcp_synack_rto_interval*HZ)/1000)) #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated @@ -240,8 +244,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* sysctl variables for tcp */ -extern int sysctl_tcp_max_orphans; extern long sysctl_tcp_mem[3]; +extern int sysctl_tcp_tw_ignore_syn_tsval_zero; +extern int sysctl_tcp_loss_init_cwnd; +extern int sysctl_tcp_no_delay_ack; +extern int sysctl_tcp_init_cwnd; +extern int sysctl_tcp_inherit_buffsize; +extern int sysctl_tcp_init_rto; +extern int sysctl_tcp_synack_rto_interval; +extern int sysctl_tcp_rto_min; +extern int sysctl_tcp_rto_max; +extern int sysctl_tcp_proc_sched; #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */ @@ -290,11 +303,12 @@ void sk_forced_mem_schedule(struct sock *sk, int size); static inline bool tcp_too_many_orphans(struct sock *sk, int shift) { struct percpu_counter *ocp = sk->sk_prot->orphan_count; + struct net *net = sock_net(sk); int orphans = percpu_counter_read_positive(ocp); - if (orphans << shift > sysctl_tcp_max_orphans) { + if (orphans << shift > net->ipv4.sysctl_tcp_max_orphans) { orphans = percpu_counter_sum_positive(ocp); - if (orphans << shift > sysctl_tcp_max_orphans) + if (orphans << shift > net->ipv4.sysctl_tcp_max_orphans) return true; } return false; @@ -494,15 +508,16 @@ static inline void tcp_synq_overflow(const struct sock *sk) reuse = rcu_dereference(sk->sk_reuseport_cb); if (likely(reuse)) { last_overflow = READ_ONCE(reuse->synq_overflow_ts); - if (time_after32(now, last_overflow + HZ)) + if (!time_between32(now, last_overflow, + last_overflow + HZ)) WRITE_ONCE(reuse->synq_overflow_ts, now); return; } } - last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; - if (time_after32(now, last_overflow + HZ)) - tcp_sk(sk)->rx_opt.ts_recent_stamp = now; + last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); + if (!time_between32(now, last_overflow, last_overflow + HZ)) + WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now); } /* syncookies: no recent synqueue overflow on this listening socket? */ @@ -517,13 +532,23 @@ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) reuse = rcu_dereference(sk->sk_reuseport_cb); if (likely(reuse)) { last_overflow = READ_ONCE(reuse->synq_overflow_ts); - return time_after32(now, last_overflow + - TCP_SYNCOOKIE_VALID); + return !time_between32(now, last_overflow - HZ, + last_overflow + + TCP_SYNCOOKIE_VALID); } } - last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; - return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); + last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); + + /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID, + * then we're under synflood. However, we have to use + * 'last_overflow - HZ' as lower bound. That's because a concurrent + * tcp_synq_overflow() could update .ts_recent_stamp after we read + * jiffies but before we store .ts_recent_stamp into last_overflow, + * which could lead to rejecting a valid syncookie. + */ + return !time_between32(now, last_overflow - HZ, + last_overflow + TCP_SYNCOOKIE_VALID); } static inline u32 tcp_cookie_time(void) @@ -1127,6 +1152,23 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, bool is_sack_reneg, struct rate_sample *rs); void tcp_rate_check_app_limited(struct sock *sk); +static inline int is_private_ip(__be32 daddr) { + __u8 i, j; +#if defined(__LITTLE_ENDIAN) + i = daddr & 0xFF; + j = (daddr >> 8) & 0xFF; +#elif defined(__BIG_ENDIAN) + i = (daddr >> 24) & 0xFF; + j = (daddr >> 16) & 0xFF; +#else +#error Unknown Endian +#endif + return i == 11 || i == 30 || i == 9 || i == 127 || i == 10 || + (i == 172 && j >= 16 && j < 32) || + (i == 192 && j == 168) || + (i == 100 && j >= 64 && j <= 127 ); +} + /* These functions determine how the current flow behaves in respect of SACK * handling. SACK is negotiated with the peer, and therefore it can vary * between different flows. @@ -2121,7 +2163,8 @@ struct tcp_ulp_ops { /* initialize ulp */ int (*init)(struct sock *sk); /* update ulp */ - void (*update)(struct sock *sk, struct proto *p); + void (*update)(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk)); /* cleanup ulp */ void (*release)(struct sock *sk); /* diagnostic */ @@ -2136,7 +2179,8 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type); int tcp_set_ulp(struct sock *sk, const char *name); void tcp_get_available_ulp(char *buf, size_t len); void tcp_cleanup_ulp(struct sock *sk); -void tcp_update_ulp(struct sock *sk, struct proto *p); +void tcp_update_ulp(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk)); #define MODULE_ALIAS_TCP_ULP(name) \ __MODULE_INFO(alias, alias_userspace, name); \ diff --git a/include/net/tls.h b/include/net/tls.h index f4ad831eaa02..093abb5a3dff 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -122,7 +122,6 @@ struct tls_rec { struct list_head list; int tx_ready; int tx_flags; - int inplace_crypto; struct sk_msg msg_plaintext; struct sk_msg msg_encrypted; @@ -396,7 +395,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx, int flags); int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, int flags); -bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx); +void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); static inline struct tls_msg *tls_msg(struct sk_buff *skb) { diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index a8f6020f1196..3f8a5a927108 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -57,7 +57,7 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) /* address family specific functions */ -extern const struct inet_connection_sock_af_ops ipv4_specific; +extern struct inet_connection_sock_af_ops ipv4_specific; void inet6_destroy_sock(struct sock *sk); diff --git a/include/net/udp.h b/include/net/udp.h index bad74f780831..afac782bb166 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -99,6 +99,7 @@ extern atomic_long_t udp_memory_allocated; extern long sysctl_udp_mem[3]; extern int sysctl_udp_rmem_min; extern int sysctl_udp_wmem_min; +extern int sysctl_udp_proc_sched; struct sk_buff; @@ -476,6 +477,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, if (!inet_get_convert_csum(sk)) features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + if (skb->pkt_type == PACKET_LOOPBACK) + skb->ip_summed = CHECKSUM_PARTIAL; + /* the GSO CB lays after the UDP one, no need to save and restore any * CB fragment */ diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h index 6a8cba6ea79a..a9d5b7603b89 100644 --- a/include/net/xdp_priv.h +++ b/include/net/xdp_priv.h @@ -12,12 +12,8 @@ struct xdp_mem_allocator { struct page_pool *page_pool; struct zero_copy_allocator *zc_alloc; }; - int disconnect_cnt; - unsigned long defer_start; struct rhash_head node; struct rcu_head rcu; - struct delayed_work defer_wq; - unsigned long defer_warn; }; #endif /* __LINUX_NET_XDP_PRIV_H__ */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e7e733add99f..30d50528d710 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2146,7 +2146,6 @@ struct ib_port_cache { struct ib_cache { rwlock_t lock; - struct ib_event_handler event_handler; }; struct ib_port_immutable { @@ -2590,7 +2589,11 @@ struct ib_device { struct rcu_head rcu_head; struct list_head event_handler_list; - spinlock_t event_handler_lock; + /* Protects event_handler_list */ + struct rw_semaphore event_handler_rwsem; + + /* Protects QP's event_handler calls and open_qp list */ + spinlock_t event_handler_lock; struct rw_semaphore client_data_rwsem; struct xarray client_data; @@ -2897,7 +2900,7 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, void ib_register_event_handler(struct ib_event_handler *event_handler); void ib_unregister_event_handler(struct ib_event_handler *event_handler); -void ib_dispatch_event(struct ib_event *event); +void ib_dispatch_event(const struct ib_event *event); int ib_query_port(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr); @@ -4043,9 +4046,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, */ static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) { - struct device_dma_parameters *p = dev->dma_device->dma_parms; - - return p ? p->max_segment_size : UINT_MAX; + return dma_get_max_seg_size(dev->dma_device); } /** @@ -4254,6 +4255,9 @@ static inline int ib_check_mr_access(int flags) !(flags & IB_ACCESS_LOCAL_WRITE)) return -EINVAL; + if (flags & ~IB_ACCESS_SUPPORTED) + return -EINVAL; + return 0; } diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index b260c5fd2337..fb9dce4c6928 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -330,6 +331,7 @@ struct hdac_bus { bool chip_init:1; /* h/w initialized */ /* behavior flags */ + bool aligned_mmio:1; /* aligned MMIO access */ bool sync_write:1; /* sync after verb write */ bool use_posbuf:1; /* use position buffer */ bool snoop:1; /* enable snooping */ @@ -405,34 +407,61 @@ void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus); unsigned int snd_hdac_aligned_read(void __iomem *addr, unsigned int mask); void snd_hdac_aligned_write(unsigned int val, void __iomem *addr, unsigned int mask); -#define snd_hdac_reg_writeb(v, addr) snd_hdac_aligned_write(v, addr, 0xff) -#define snd_hdac_reg_writew(v, addr) snd_hdac_aligned_write(v, addr, 0xffff) -#define snd_hdac_reg_readb(addr) snd_hdac_aligned_read(addr, 0xff) -#define snd_hdac_reg_readw(addr) snd_hdac_aligned_read(addr, 0xffff) -#else /* CONFIG_SND_HDA_ALIGNED_MMIO */ -#define snd_hdac_reg_writeb(val, addr) writeb(val, addr) -#define snd_hdac_reg_writew(val, addr) writew(val, addr) -#define snd_hdac_reg_readb(addr) readb(addr) -#define snd_hdac_reg_readw(addr) readw(addr) -#endif /* CONFIG_SND_HDA_ALIGNED_MMIO */ -#define snd_hdac_reg_writel(val, addr) writel(val, addr) -#define snd_hdac_reg_readl(addr) readl(addr) +#define snd_hdac_aligned_mmio(bus) (bus)->aligned_mmio +#else +#define snd_hdac_aligned_mmio(bus) false +#define snd_hdac_aligned_read(addr, mask) 0 +#define snd_hdac_aligned_write(val, addr, mask) do {} while (0) +#endif + +static inline void snd_hdac_reg_writeb(struct hdac_bus *bus, void __iomem *addr, + u8 val) +{ + if (snd_hdac_aligned_mmio(bus)) + snd_hdac_aligned_write(val, addr, 0xff); + else + writeb(val, addr); +} + +static inline void snd_hdac_reg_writew(struct hdac_bus *bus, void __iomem *addr, + u16 val) +{ + if (snd_hdac_aligned_mmio(bus)) + snd_hdac_aligned_write(val, addr, 0xffff); + else + writew(val, addr); +} + +static inline u8 snd_hdac_reg_readb(struct hdac_bus *bus, void __iomem *addr) +{ + return snd_hdac_aligned_mmio(bus) ? + snd_hdac_aligned_read(addr, 0xff) : readb(addr); +} + +static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr) +{ + return snd_hdac_aligned_mmio(bus) ? + snd_hdac_aligned_read(addr, 0xffff) : readw(addr); +} + +#define snd_hdac_reg_writel(bus, addr, val) writel(val, addr) +#define snd_hdac_reg_readl(bus, addr) readl(addr) /* * macros for easy use */ #define _snd_hdac_chip_writeb(chip, reg, value) \ - snd_hdac_reg_writeb(value, (chip)->remap_addr + (reg)) + snd_hdac_reg_writeb(chip, (chip)->remap_addr + (reg), value) #define _snd_hdac_chip_readb(chip, reg) \ - snd_hdac_reg_readb((chip)->remap_addr + (reg)) + snd_hdac_reg_readb(chip, (chip)->remap_addr + (reg)) #define _snd_hdac_chip_writew(chip, reg, value) \ - snd_hdac_reg_writew(value, (chip)->remap_addr + (reg)) + snd_hdac_reg_writew(chip, (chip)->remap_addr + (reg), value) #define _snd_hdac_chip_readw(chip, reg) \ - snd_hdac_reg_readw((chip)->remap_addr + (reg)) + snd_hdac_reg_readw(chip, (chip)->remap_addr + (reg)) #define _snd_hdac_chip_writel(chip, reg, value) \ - snd_hdac_reg_writel(value, (chip)->remap_addr + (reg)) + snd_hdac_reg_writel(chip, (chip)->remap_addr + (reg), value) #define _snd_hdac_chip_readl(chip, reg) \ - snd_hdac_reg_readl((chip)->remap_addr + (reg)) + snd_hdac_reg_readl(chip, (chip)->remap_addr + (reg)) /* read/write a register, pass without AZX_REG_ prefix */ #define snd_hdac_chip_writel(chip, reg, value) \ @@ -493,6 +522,7 @@ struct hdac_stream { bool prepared:1; bool no_period_wakeup:1; bool locked:1; + bool stripe:1; /* apply stripe control */ /* timestamp */ unsigned long start_wallclk; /* start + minimum wallclk */ @@ -539,17 +569,17 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus, */ /* read/write a register, pass without AZX_REG_ prefix */ #define snd_hdac_stream_writel(dev, reg, value) \ - snd_hdac_reg_writel(value, (dev)->sd_addr + AZX_REG_ ## reg) + snd_hdac_reg_writel((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value) #define snd_hdac_stream_writew(dev, reg, value) \ - snd_hdac_reg_writew(value, (dev)->sd_addr + AZX_REG_ ## reg) + snd_hdac_reg_writew((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value) #define snd_hdac_stream_writeb(dev, reg, value) \ - snd_hdac_reg_writeb(value, (dev)->sd_addr + AZX_REG_ ## reg) + snd_hdac_reg_writeb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value) #define snd_hdac_stream_readl(dev, reg) \ - snd_hdac_reg_readl((dev)->sd_addr + AZX_REG_ ## reg) + snd_hdac_reg_readl((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_readw(dev, reg) \ - snd_hdac_reg_readw((dev)->sd_addr + AZX_REG_ ## reg) + snd_hdac_reg_readw((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_readb(dev, reg) \ - snd_hdac_reg_readb((dev)->sd_addr + AZX_REG_ ## reg) + snd_hdac_reg_readb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) /* update a register, pass without AZX_REG_ prefix */ #define snd_hdac_stream_updatel(dev, reg, mask, val) \ diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 40ab20439fee..a36b7227a15a 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -77,9 +77,9 @@ struct snd_rawmidi_substream { struct list_head list; /* list of all substream for given stream */ int stream; /* direction */ int number; /* substream number */ - unsigned int opened: 1, /* open flag */ - append: 1, /* append flag (merge more streams) */ - active_sensing: 1; /* send active sensing when close */ + bool opened; /* open flag */ + bool append; /* append flag (merge more streams) */ + bool active_sensing; /* send active sensing when close */ int use_count; /* use counter (for output) */ size_t bytes; struct snd_rawmidi *rmidi; diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 31f76b6abf71..bbdd1542d6f1 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -8,6 +8,7 @@ #ifndef __SIMPLE_CARD_UTILS_H #define __SIMPLE_CARD_UTILS_H +#include #include #define asoc_simple_init_hp(card, sjack, prefix) \ diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index d5ec4fac82ae..c612cabbc378 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -233,7 +233,7 @@ enum afs_cb_break_reason { EM(afs_call_trace_get, "GET ") \ EM(afs_call_trace_put, "PUT ") \ EM(afs_call_trace_wake, "WAKE ") \ - E_(afs_call_trace_work, "WORK ") + E_(afs_call_trace_work, "QUEUE") #define afs_server_traces \ EM(afs_server_trace_alloc, "ALLOC ") \ @@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state, TRACE_EVENT(afs_lookup, TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name, - struct afs_vnode *vnode), + struct afs_fid *fid), - TP_ARGS(dvnode, name, vnode), + TP_ARGS(dvnode, name, fid), TP_STRUCT__entry( __field_struct(struct afs_fid, dfid ) @@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup, TP_fast_assign( int __len = min_t(int, name->len, 23); __entry->dfid = dvnode->fid; - if (vnode) { - __entry->fid = vnode->fid; - } else { - __entry->fid.vid = 0; - __entry->fid.vnode = 0; - __entry->fid.unique = 0; - } + __entry->fid = *fid; memcpy(__entry->name, name->name, __len); __entry->name[__len] = 0; ), diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index d68e9e536814..6fb0085ca0e2 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -726,24 +726,29 @@ TRACE_EVENT(ext4_mb_release_group_pa, ); TRACE_EVENT(ext4_discard_preallocations, - TP_PROTO(struct inode *inode), + TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed), - TP_ARGS(inode), + TP_ARGS(inode, len, needed), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned int, len ) + __field( unsigned int, needed ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; + __entry->len = len; + __entry->needed = needed; ), - TP_printk("dev %d,%d ino %lu", + TP_printk("dev %d,%d ino %lu len: %u needed %u", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino) + (unsigned long) __entry->ino, __entry->len, + __entry->needed) ); TRACE_EVENT(ext4_mb_discard_preallocations, diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index dd4db334bd63..d82a0f4e824d 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -31,7 +31,8 @@ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ - EMe(SCAN_TRUNCATED, "truncated") \ + EM( SCAN_TRUNCATED, "truncated") \ + EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \ #undef EM #undef EMe diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index 95fba0471e5b..3f249e150c0c 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h @@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template, TP_ARGS(ip, parent_ip), TP_STRUCT__entry( - __field(u32, caller_offs) - __field(u32, parent_offs) + __field(s32, caller_offs) + __field(s32, parent_offs) ), TP_fast_assign( - __entry->caller_offs = (u32)(ip - (unsigned long)_stext); - __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext); + __entry->caller_offs = (s32)(ip - (unsigned long)_stext); + __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext); ), TP_printk("caller=%pS parent=%pS", diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 694bd040cf51..fdd31c5fd126 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -442,7 +442,7 @@ TRACE_EVENT_RCU(rcu_fqs, */ TRACE_EVENT_RCU(rcu_dyntick, - TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks), + TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks), TP_ARGS(polarity, oldnesting, newnesting, dynticks), @@ -457,7 +457,7 @@ TRACE_EVENT_RCU(rcu_dyntick, __entry->polarity = polarity; __entry->oldnesting = oldnesting; __entry->newnesting = newnesting; - __entry->dynticks = atomic_read(&dynticks); + __entry->dynticks = dynticks; ), TP_printk("%s %lx %lx %#3x", __entry->polarity, diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index a13830616107..7fd11ec1c9a4 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -735,6 +735,31 @@ TRACE_EVENT(xprtrdma_post_recvs, ) ); +TRACE_EVENT(xprtrdma_post_linv, + TP_PROTO( + const struct rpcrdma_req *req, + int status + ), + + TP_ARGS(req, status), + + TP_STRUCT__entry( + __field(const void *, req) + __field(int, status) + __field(u32, xid) + ), + + TP_fast_assign( + __entry->req = req; + __entry->status = status; + __entry->xid = be32_to_cpu(req->rl_slot.rq_xid); + ), + + TP_printk("req=%p xid=0x%08x status=%d", + __entry->req, __entry->xid, __entry->status + ) +); + /** ** Completion events **/ diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h index b048694070e2..37342a13c9cb 100644 --- a/include/trace/events/wbt.h +++ b/include/trace/events/wbt.h @@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->rmean = stat[0].mean; __entry->rmin = stat[0].min; __entry->rmax = stat[0].max; @@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->lat = div_u64(lat, 1000); ), @@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->msg = msg; __entry->step = step; __entry->window = div_u64(window, 1000); @@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->status = status; __entry->step = step; __entry->inflight = inflight; diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index c2ce6480b4b1..66282552db20 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -67,8 +67,8 @@ DECLARE_EVENT_CLASS(writeback_page_template, TP_fast_assign( strscpy_pad(__entry->name, - mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", - 32); + bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : + NULL), 32); __entry->ino = mapping ? mapping->host->i_ino : 0; __entry->index = page->index; ), @@ -111,8 +111,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, struct backing_dev_info *bdi = inode_to_bdi(inode); /* may be called for files on pseudo FSes w/ unregistered bdi */ - strscpy_pad(__entry->name, - bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); + strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->flags = flags; @@ -193,7 +192,7 @@ TRACE_EVENT(inode_foreign_history, ), TP_fast_assign( - strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32); + strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); __entry->history = history; @@ -222,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs, ), TP_fast_assign( - strncpy(__entry->name, dev_name(old_wb->bdi->dev), 32); + strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32); __entry->ino = inode->i_ino; __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); @@ -255,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty, struct address_space *mapping = page_mapping(page); struct inode *inode = mapping ? mapping->host : NULL; - strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->bdi_id = wb->bdi->id; __entry->ino = inode ? inode->i_ino : 0; __entry->memcg_id = wb->memcg_css->id; @@ -288,7 +287,7 @@ TRACE_EVENT(flush_foreign, ), TP_fast_assign( - strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); __entry->frn_bdi_id = frn_bdi_id; __entry->frn_memcg_id = frn_memcg_id; @@ -318,7 +317,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, TP_fast_assign( strscpy_pad(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); @@ -361,9 +360,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(unsigned int, cgroup_ino) ), TP_fast_assign( - strscpy_pad(__entry->name, - wb->bdi->dev ? dev_name(wb->bdi->dev) : - "(unknown)", 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->nr_pages = work->nr_pages; __entry->sb_dev = work->sb ? work->sb->s_dev : 0; __entry->sync_mode = work->sync_mode; @@ -416,7 +413,7 @@ DECLARE_EVENT_CLASS(writeback_class, __field(unsigned int, cgroup_ino) ), TP_fast_assign( - strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: cgroup_ino=%u", @@ -438,7 +435,7 @@ TRACE_EVENT(writeback_bdi_register, __array(char, name, 32) ), TP_fast_assign( - strscpy_pad(__entry->name, dev_name(bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); ), TP_printk("bdi %s", __entry->name @@ -463,7 +460,7 @@ DECLARE_EVENT_CLASS(wbc_class, ), TP_fast_assign( - strscpy_pad(__entry->name, dev_name(bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->sync_mode = wbc->sync_mode; @@ -514,7 +511,7 @@ TRACE_EVENT(writeback_queue_io, ), TP_fast_assign( unsigned long *older_than_this = work->older_than_this; - strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->older = older_than_this ? *older_than_this : 0; __entry->age = older_than_this ? (jiffies - *older_than_this) * 1000 / HZ : -1; @@ -600,7 +597,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, ), TP_fast_assign( - strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->write_bw = KBps(wb->write_bandwidth); __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); __entry->dirty_rate = KBps(dirty_rate); @@ -665,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages, TP_fast_assign( unsigned long freerun = (thresh + bg_thresh) / 2; - strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->limit = global_wb_domain.dirty_limit; __entry->setpoint = (global_wb_domain.dirty_limit + @@ -726,7 +723,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue, TP_fast_assign( strscpy_pad(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; @@ -800,7 +797,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, TP_fast_assign( strscpy_pad(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 8c8420230a10..c79943e82a54 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -317,19 +317,15 @@ __MEM_TYPE_MAP(__MEM_TYPE_TP_FN) TRACE_EVENT(mem_disconnect, - TP_PROTO(const struct xdp_mem_allocator *xa, - bool safe_to_remove, bool force), + TP_PROTO(const struct xdp_mem_allocator *xa), - TP_ARGS(xa, safe_to_remove, force), + TP_ARGS(xa), TP_STRUCT__entry( __field(const struct xdp_mem_allocator *, xa) __field(u32, mem_id) __field(u32, mem_type) __field(const void *, allocator) - __field(bool, safe_to_remove) - __field(bool, force) - __field(int, disconnect_cnt) ), TP_fast_assign( @@ -337,19 +333,12 @@ TRACE_EVENT(mem_disconnect, __entry->mem_id = xa->mem.id; __entry->mem_type = xa->mem.type; __entry->allocator = xa->allocator; - __entry->safe_to_remove = safe_to_remove; - __entry->force = force; - __entry->disconnect_cnt = xa->disconnect_cnt; ), - TP_printk("mem_id=%d mem_type=%s allocator=%p" - " safe_to_remove=%s force=%s disconnect_cnt=%d", + TP_printk("mem_id=%d mem_type=%s allocator=%p", __entry->mem_id, __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), - __entry->allocator, - __entry->safe_to_remove ? "true" : "false", - __entry->force ? "true" : "false", - __entry->disconnect_cnt + __entry->allocator ) ); diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index 9a0e8af21310..a5ccfa67bc5c 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h @@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback, TP_PROTO(xen_mc_callback_fn_t fn, void *data), TP_ARGS(fn, data), TP_STRUCT__entry( - __field(xen_mc_callback_fn_t, fn) + /* + * Use field_struct to avoid is_signed_type() + * comparison of a function pointer. + */ + __field_struct(xen_mc_callback_fn_t, fn) __field(void *, data) ), TP_fast_assign( diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h index 8997d5068c08..4511b85c84df 100644 --- a/include/uapi/linux/cec-funcs.h +++ b/include/uapi/linux/cec-funcs.h @@ -923,7 +923,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg, msg->len = 3; msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS; msg->msg[2] = status_req; - msg->reply = reply ? CEC_MSG_DECK_STATUS : 0; + msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ? + CEC_MSG_DECK_STATUS : 0; } static inline void cec_ops_give_deck_status(const struct cec_msg *msg, @@ -1027,7 +1028,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg, msg->len = 3; msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS; msg->msg[2] = status_req; - msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0; + msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ? + CEC_MSG_TUNER_DEVICE_STATUS : 0; } static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg, diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h index 5704fa0292b5..423859e489c7 100644 --- a/include/uapi/linux/cec.h +++ b/include/uapi/linux/cec.h @@ -768,8 +768,8 @@ struct cec_event { #define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93 #define CEC_MSG_TUNER_DEVICE_STATUS 0x07 /* Recording Flag Operand (rec_flag) */ -#define CEC_OP_REC_FLAG_USED 0 -#define CEC_OP_REC_FLAG_NOT_USED 1 +#define CEC_OP_REC_FLAG_NOT_USED 0 +#define CEC_OP_REC_FLAG_USED 1 /* Tuner Display Info Operand (tuner_display_info) */ #define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0 #define CEC_OP_TUNER_DISPLAY_INFO_NONE 1 diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h index aac550a52f80..8847dbf24151 100644 --- a/include/uapi/linux/coresight-stm.h +++ b/include/uapi/linux/coresight-stm.h @@ -2,8 +2,10 @@ #ifndef __UAPI_CORESIGHT_STM_H_ #define __UAPI_CORESIGHT_STM_H_ -#define STM_FLAG_TIMESTAMPED BIT(3) -#define STM_FLAG_GUARANTEED BIT(7) +#include + +#define STM_FLAG_TIMESTAMPED _BITUL(3) +#define STM_FLAG_GUARANTEED _BITUL(7) /* * The CoreSight STM supports guaranteed and invariant timing diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h index 790585f0e61b..85560927eff7 100644 --- a/include/uapi/linux/if_bonding.h +++ b/include/uapi/linux/if_bonding.h @@ -82,7 +82,7 @@ #define BOND_STATE_ACTIVE 0 /* link is active */ #define BOND_STATE_BACKUP 1 /* link is backup */ -#define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ +#define BOND_DEFAULT_MAX_BONDS 0 /* Default maximum number of devices to support */ #define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index f056b2a00d5c..9a61c28ed3ae 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -34,6 +34,7 @@ struct input_event { __kernel_ulong_t __sec; #if defined(__sparc__) && defined(__arch64__) unsigned int __usec; + unsigned int __pad; #else __kernel_ulong_t __usec; #endif diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h index e4a0d9a9a9e8..4d6cf3888255 100644 --- a/include/uapi/linux/msg.h +++ b/include/uapi/linux/msg.h @@ -75,8 +75,8 @@ struct msginfo { */ #define MSGMNI 32000 /* <= IPCMNI */ /* max # of msg queue identifiers */ -#define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ -#define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ +#define MSGMAX 65536 /* <= INT_MAX */ /* max size of message (bytes) */ +#define MSGMNB 65536 /* <= INT_MAX */ /* default max size of a message queue */ /* unused */ #define MSGPOOL (MSGMNI * MSGMNB / 1024) /* size in kbytes of message pool */ diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h index 4bc6d1a08781..b4d804a9fccb 100644 --- a/include/uapi/linux/netfilter/xt_sctp.h +++ b/include/uapi/linux/netfilter/xt_sctp.h @@ -41,19 +41,19 @@ struct xt_sctp_info { #define SCTP_CHUNKMAP_SET(chunkmap, type) \ do { \ (chunkmap)[type / bytes(__u32)] |= \ - 1 << (type % bytes(__u32)); \ + 1u << (type % bytes(__u32)); \ } while (0) #define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \ do { \ (chunkmap)[type / bytes(__u32)] &= \ - ~(1 << (type % bytes(__u32))); \ + ~(1u << (type % bytes(__u32))); \ } while (0) #define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \ ({ \ ((chunkmap)[type / bytes (__u32)] & \ - (1 << (type % bytes (__u32)))) ? 1: 0; \ + (1u << (type % bytes (__u32)))) ? 1: 0; \ }) #define SCTP_CHUNKMAP_RESET(chunkmap) \ diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 0a4d73317759..21db87138514 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -32,6 +32,9 @@ #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG +/* For tkernel GPU track feature */ +#define NETLINK_USER 30 + #define MAX_LINKS 32 struct sockaddr_nl { diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h index 50e991952c97..ed2a96f43ce4 100644 --- a/include/uapi/linux/serio.h +++ b/include/uapi/linux/serio.h @@ -9,7 +9,7 @@ #ifndef _UAPI_SERIO_H #define _UAPI_SERIO_H - +#include #include #define SPIOCSTYPE _IOW('q', 0x01, unsigned long) @@ -18,10 +18,10 @@ /* * bit masks for use in "interrupt" flags (3rd argument) */ -#define SERIO_TIMEOUT BIT(0) -#define SERIO_PARITY BIT(1) -#define SERIO_FRAME BIT(2) -#define SERIO_OOB_DATA BIT(3) +#define SERIO_TIMEOUT _BITUL(0) +#define SERIO_PARITY _BITUL(1) +#define SERIO_FRAME _BITUL(2) +#define SERIO_OOB_DATA _BITUL(3) /* * Serio types diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 549a31c29f7d..454a29de96cd 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -323,4 +323,44 @@ enum __LINUX_MIB_XFRMMAX }; +enum { + LINUX_MIB_DROPSTAT_NUM = 0, + LINUX_MIB_TCPDQNODATADROP, + LINUX_MIB_TCPDQNOMEMDROP, + LINUX_MIB_TCPCHECKSEQDROP, + LINUX_MIB_TCPCHECKRSTDROP, + LINUX_MIB_TCPNOACKDROP, + LINUX_MIB_TCPINVALIDACKDROP, + LINUX_MIB_TCPRSTDROP, + LINUX_MIB_TCPNOSYNDROP, + LINUX_MIB_TCPPAWSDROP, + LINUX_MIB_TCPBADPKTDROP, + LINUX_MIB_TCPNOSOCKETDROP, + LINUX_MIB_TCPXFRMDROP, + LINUX_MIB_TCPFILTERDROP, + LINUX_MIB_TCPMD5DROP, + LINUX_MIB_TCPTWDROP, + LINUX_MIB_TCPRCVESTDROP, + LINUX_MIB_TCPNSKDROP, + LINUX_MIB_TCPCHILDPROCDROP, + LINUX_MIB_TCPRCVSTATEPROCDROP, + LINUX_MIB_TCPOFODUPDROP, + LINUX_MIB_TCPURGDROP, + LINUX_MIB_TCPOOWDROP, + __LINUX_DROPSTAT_MIB_MAX +}; + +enum { + DEV_MIB_NUM = 0, + DEV_MIB_IPV4_RX_PKTS, + DEV_MIB_IPV4_RX_BYTES, + DEV_MIB_IPV4_TX_PKTS, + DEV_MIB_IPV4_TX_BYTES, + DEV_MIB_IPV6_RX_PKTS, + DEV_MIB_IPV6_RX_BYTES, + DEV_MIB_IPV6_TX_PKTS, + DEV_MIB_IPV6_TX_BYTES, + __DEV_MIB_MAX +}; + #endif /* _LINUX_SNMP_H */ diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 23cd84868cc3..7272f85d6d6a 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -4,6 +4,7 @@ #include #include +#include #include /* @@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) __fswab64(x)) #endif +static __always_inline unsigned long __swab(const unsigned long y) +{ +#if __BITS_PER_LONG == 64 + return __swab64(y); +#else /* __BITS_PER_LONG == 32 */ + return __swab32(y); +#endif +} + /** * __swahw32 - return a word-swapped 32-bit value * @x: value to wordswap diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 87aa2a6d9125..a95349ec195c 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -426,6 +426,9 @@ enum NET_TCP_ALLOWED_CONG_CONTROL=123, NET_TCP_MAX_SSTHRESH=124, NET_TCP_FRTO_RESPONSE=125, + NET_TCP_TW_IGNORE_SYN_TSVAL_ZERO=126, + NET_IPV4_TCP_RTO_MIN=127, + NET_IPV4_TCP_RTO_MAX=128, }; enum { diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 81e697978e8b..9066f2e16f7b 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -129,6 +129,12 @@ enum { #define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */ +#define TCP_FULLNAT_REAL 10000 + +struct tcp_fullnat_real_opt { + u32 real_ip; + u16 real_port; +}; #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h index 5f72af35b3ed..ad22079125bf 100644 --- a/include/uapi/linux/usb/charger.h +++ b/include/uapi/linux/usb/charger.h @@ -14,18 +14,18 @@ * ACA (Accessory Charger Adapters) */ enum usb_charger_type { - UNKNOWN_TYPE, - SDP_TYPE, - DCP_TYPE, - CDP_TYPE, - ACA_TYPE, + UNKNOWN_TYPE = 0, + SDP_TYPE = 1, + DCP_TYPE = 2, + CDP_TYPE = 3, + ACA_TYPE = 4, }; /* USB charger state */ enum usb_charger_state { - USB_CHARGER_DEFAULT, - USB_CHARGER_PRESENT, - USB_CHARGER_ABSENT, + USB_CHARGER_DEFAULT = 0, + USB_CHARGER_PRESENT = 1, + USB_CHARGER_ABSENT = 2, }; #endif /* _UAPI__LINUX_USB_CHARGER_H */ diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h deleted file mode 100644 index f80495baa969..000000000000 --- a/include/uapi/rdma/nes-abi.h +++ /dev/null @@ -1,115 +0,0 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ -/* - * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. - * Copyright (c) 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef NES_ABI_USER_H -#define NES_ABI_USER_H - -#include - -#define NES_ABI_USERSPACE_VER 2 -#define NES_ABI_KERNEL_VER 2 - -/* - * Make sure that all structs defined in this file remain laid out so - * that they pack the same way on 32-bit and 64-bit architectures (to - * avoid incompatibility between 32-bit userspace and 64-bit kernels). - * In particular do not use pointer types -- pass pointers in __u64 - * instead. - */ - -struct nes_alloc_ucontext_req { - __u32 reserved32; - __u8 userspace_ver; - __u8 reserved8[3]; -}; - -struct nes_alloc_ucontext_resp { - __u32 max_pds; /* maximum pds allowed for this user process */ - __u32 max_qps; /* maximum qps allowed for this user process */ - __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ - __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */ - __u8 kernel_ver; - __u8 reserved[2]; -}; - -struct nes_alloc_pd_resp { - __u32 pd_id; - __u32 mmap_db_index; -}; - -struct nes_create_cq_req { - __aligned_u64 user_cq_buffer; - __u32 mcrqf; - __u8 reserved[4]; -}; - -struct nes_create_qp_req { - __aligned_u64 user_wqe_buffers; - __aligned_u64 user_qp_buffer; -}; - -enum iwnes_memreg_type { - IWNES_MEMREG_TYPE_MEM = 0x0000, - IWNES_MEMREG_TYPE_QP = 0x0001, - IWNES_MEMREG_TYPE_CQ = 0x0002, - IWNES_MEMREG_TYPE_MW = 0x0003, - IWNES_MEMREG_TYPE_FMR = 0x0004, - IWNES_MEMREG_TYPE_FMEM = 0x0005, -}; - -struct nes_mem_reg_req { - __u32 reg_type; /* indicates if id is memory, QP or CQ */ - __u32 reserved; -}; - -struct nes_create_cq_resp { - __u32 cq_id; - __u32 cq_size; - __u32 mmap_db_index; - __u32 reserved; -}; - -struct nes_create_qp_resp { - __u32 qp_id; - __u32 actual_sq_size; - __u32 actual_rq_size; - __u32 mmap_sq_db_index; - __u32 mmap_rq_db_index; - __u32 nes_drv_opt; -}; - -#endif /* NES_ABI_USER_H */ diff --git a/init/Kconfig b/init/Kconfig index b4daad2bac23..0bffc8fdbf3d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -54,6 +54,7 @@ config CC_DISABLE_WARN_MAYBE_UNINITIALIZED config CONSTRUCTORS bool + depends on !UML config IRQ_WORK bool @@ -104,29 +105,9 @@ config COMPILE_TEST here. If you are a user/distributor, say N here to exclude useless drivers to be distributed. -config HEADER_TEST - bool "Compile test headers that should be standalone compilable" - help - Compile test headers listed in header-test-y target to ensure they are - self-contained, i.e. compilable as standalone units. - - If you are a developer or tester and want to ensure the requested - headers are self-contained, say Y here. Otherwise, choose N. - -config KERNEL_HEADER_TEST - bool "Compile test kernel headers" - depends on HEADER_TEST - help - Headers in include/ are used to build external moduls. - Compile test them to ensure they are self-contained, i.e. - compilable as standalone units. - - If you are a developer or tester and want to ensure the headers - in include/ are self-contained, say Y here. Otherwise, choose N. - config UAPI_HEADER_TEST bool "Compile test UAPI headers" - depends on HEADER_TEST && HEADERS_INSTALL && CC_CAN_LINK + depends on HEADERS_INSTALL && CC_CAN_LINK help Compile test headers exported to user-space to ensure they are self-contained, i.e. compilable as standalone units. diff --git a/init/do_mounts.c b/init/do_mounts.c index 9634ecf3743d..740207ae3897 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -422,6 +422,22 @@ retry: case -EACCES: case -EINVAL: continue; +#ifdef CONFIG_BLOCK + case -ENXIO: +#define OLD_NAME "cciss/c0d0p" +#define OLD_SIZE (sizeof(OLD_NAME)-1) + if( MAJOR(ROOT_DEV) != SCSI_DISK0_MAJOR && + root_device_name != NULL && + !strncmp(root_device_name, OLD_NAME, OLD_SIZE) && + isdigit(root_device_name[OLD_SIZE])) + { + ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, root_device_name[OLD_SIZE] - '0'); + create_dev(name, ROOT_DEV); + printk(KERN_INFO "hpsa: change root device %s to sda%c\n", + root_device_name, root_device_name[OLD_SIZE]); + goto retry; + } +#endif } /* * Allow the user to distinguish between failed sys_open diff --git a/init/main.c b/init/main.c index 91f6ebb30ef0..c0206c507eba 100644 --- a/init/main.c +++ b/init/main.c @@ -553,6 +553,7 @@ static void __init mm_init(void) * bigger than MAX_ORDER unless SPARSEMEM. */ page_ext_init_flatmem(); + init_debug_pagealloc(); report_meminit(); mem_init(); kmem_cache_init(); diff --git a/ipc/msg.c b/ipc/msg.c index 8dec945fa030..767587ab45a3 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -377,7 +377,7 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, - struct msqid64_ds *msqid64) + struct ipc64_perm *perm, int msg_qbytes) { struct kern_ipc_perm *ipcp; struct msg_queue *msq; @@ -387,7 +387,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, rcu_read_lock(); ipcp = ipcctl_obtain_check(ns, &msg_ids(ns), msqid, cmd, - &msqid64->msg_perm, msqid64->msg_qbytes); + perm, msg_qbytes); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; @@ -409,18 +409,18 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, { DEFINE_WAKE_Q(wake_q); - if (msqid64->msg_qbytes > ns->msg_ctlmnb && + if (msg_qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) { err = -EPERM; goto out_unlock1; } ipc_lock_object(&msq->q_perm); - err = ipc_update_perm(&msqid64->msg_perm, ipcp); + err = ipc_update_perm(perm, ipcp); if (err) goto out_unlock0; - msq->q_qbytes = msqid64->msg_qbytes; + msq->q_qbytes = msg_qbytes; msq->q_ctime = ktime_get_real_seconds(); /* @@ -601,9 +601,10 @@ static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int ver case IPC_SET: if (copy_msqid_from_user(&msqid64, buf, version)) return -EFAULT; - /* fallthru */ + return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, + msqid64.msg_qbytes); case IPC_RMID: - return msgctl_down(ns, msqid, cmd, &msqid64); + return msgctl_down(ns, msqid, cmd, NULL, 0); default: return -EINVAL; } @@ -735,9 +736,9 @@ static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int versio case IPC_SET: if (copy_compat_msqid_from_user(&msqid64, uptr, version)) return -EFAULT; - /* fallthru */ + return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); case IPC_RMID: - return msgctl_down(ns, msqid, cmd, &msqid64); + return msgctl_down(ns, msqid, cmd, NULL, 0); default: return -EINVAL; } diff --git a/ipc/sem.c b/ipc/sem.c index ec97a7072413..fe12ea8dd2b3 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -2368,11 +2368,9 @@ void exit_sem(struct task_struct *tsk) ipc_assert_locked_object(&sma->sem_perm); list_del(&un->list_id); - /* we are the last process using this ulp, acquiring ulp->lock - * isn't required. Besides that, we are also protected against - * IPC_RMID as we hold sma->sem_perm lock now - */ + spin_lock(&ulp->lock); list_del_rcu(&un->list_proc); + spin_unlock(&ulp->lock); /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { diff --git a/kernel/Makefile b/kernel/Makefile index daad787fb795..fa543a44810e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -12,6 +12,8 @@ obj-y = fork.o exec_domain.o panic.o \ notifier.o ksysfs.o cred.o reboot.o \ async.o range.o smpboot.o ucount.o +obj-y += tkernel/ + obj-$(CONFIG_MODULES) += kmod.o obj-$(CONFIG_MULTIUSER) += groups.o diff --git a/kernel/audit.c b/kernel/audit.c index da8dc0db5bd3..dfc45063cb56 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1100,13 +1100,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature audit_log_end(ab); } -static int audit_set_feature(struct sk_buff *skb) +static int audit_set_feature(struct audit_features *uaf) { - struct audit_features *uaf; int i; BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names)); - uaf = nlmsg_data(nlmsg_hdr(skb)); /* if there is ever a version 2 we should handle that here */ @@ -1174,6 +1172,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { u32 seq; void *data; + int data_len; int err; struct audit_buffer *ab; u16 msg_type = nlh->nlmsg_type; @@ -1187,6 +1186,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) seq = nlh->nlmsg_seq; data = nlmsg_data(nlh); + data_len = nlmsg_len(nlh); switch (msg_type) { case AUDIT_GET: { @@ -1210,7 +1210,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct audit_status s; memset(&s, 0, sizeof(s)); /* guard against past and future API changes */ - memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh))); + memcpy(&s, data, min_t(size_t, sizeof(s), data_len)); if (s.mask & AUDIT_STATUS_ENABLED) { err = audit_set_enabled(s.enabled); if (err < 0) @@ -1314,7 +1314,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return err; break; case AUDIT_SET_FEATURE: - err = audit_set_feature(skb); + if (data_len < sizeof(struct audit_features)) + return -EINVAL; + err = audit_set_feature(data); if (err) return err; break; @@ -1326,6 +1328,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) err = audit_filter(msg_type, AUDIT_FILTER_USER); if (err == 1) { /* match or error */ + char *str = data; + err = 0; if (msg_type == AUDIT_USER_TTY) { err = tty_audit_push(); @@ -1333,26 +1337,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) break; } audit_log_user_recv_msg(&ab, msg_type); - if (msg_type != AUDIT_USER_TTY) + if (msg_type != AUDIT_USER_TTY) { + /* ensure NULL termination */ + str[data_len - 1] = '\0'; audit_log_format(ab, " msg='%.*s'", AUDIT_MESSAGE_TEXT_MAX, - (char *)data); - else { - int size; - + str); + } else { audit_log_format(ab, " data="); - size = nlmsg_len(nlh); - if (size > 0 && - ((unsigned char *)data)[size - 1] == '\0') - size--; - audit_log_n_untrustedstring(ab, data, size); + if (data_len > 0 && str[data_len - 1] == '\0') + data_len--; + audit_log_n_untrustedstring(ab, str, data_len); } audit_log_end(ab); } break; case AUDIT_ADD_RULE: case AUDIT_DEL_RULE: - if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) + if (data_len < sizeof(struct audit_rule_data)) return -EINVAL; if (audit_enabled == AUDIT_LOCKED) { audit_log_common_recv_msg(audit_context(), &ab, @@ -1364,7 +1366,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) audit_log_end(ab); return -EPERM; } - err = audit_rule_change(msg_type, seq, data, nlmsg_len(nlh)); + err = audit_rule_change(msg_type, seq, data, data_len); break; case AUDIT_LIST_RULES: err = audit_list_rules_send(skb, seq); @@ -1379,7 +1381,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case AUDIT_MAKE_EQUIV: { void *bufp = data; u32 sizes[2]; - size_t msglen = nlmsg_len(nlh); + size_t msglen = data_len; char *old, *new; err = -EINVAL; @@ -1455,7 +1457,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) memset(&s, 0, sizeof(s)); /* guard against past and future API changes */ - memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh))); + memcpy(&s, data, min_t(size_t, sizeof(s), data_len)); /* check if new data is valid */ if ((s.enabled != 0 && s.enabled != 1) || (s.log_passwd != 0 && s.log_passwd != 1)) diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index b0126e9c0743..026e34da4ace 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -456,6 +456,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, bufp = data->buf; for (i = 0; i < data->field_count; i++) { struct audit_field *f = &entry->rule.fields[i]; + u32 f_val; err = -EINVAL; @@ -464,12 +465,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, goto exit_free; f->type = data->fields[i]; - f->val = data->values[i]; + f_val = data->values[i]; /* Support legacy tests for a valid loginuid */ - if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) { + if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) { f->type = AUDIT_LOGINUID_SET; - f->val = 0; + f_val = 0; entry->rule.pflags |= AUDIT_LOGINUID_LEGACY; } @@ -485,7 +486,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, case AUDIT_SUID: case AUDIT_FSUID: case AUDIT_OBJ_UID: - f->uid = make_kuid(current_user_ns(), f->val); + f->uid = make_kuid(current_user_ns(), f_val); if (!uid_valid(f->uid)) goto exit_free; break; @@ -494,11 +495,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: - f->gid = make_kgid(current_user_ns(), f->val); + f->gid = make_kgid(current_user_ns(), f_val); if (!gid_valid(f->gid)) goto exit_free; break; case AUDIT_ARCH: + f->val = f_val; entry->rule.arch_f = f; break; case AUDIT_SUBJ_USER: @@ -511,11 +513,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: - str = audit_unpack_string(&bufp, &remain, f->val); - if (IS_ERR(str)) + str = audit_unpack_string(&bufp, &remain, f_val); + if (IS_ERR(str)) { + err = PTR_ERR(str); goto exit_free; - entry->rule.buflen += f->val; - + } + entry->rule.buflen += f_val; + f->lsm_str = str; err = security_audit_rule_init(f->type, f->op, str, (void **)&f->lsm_rule); /* Keep currently invalid fields around in case they @@ -524,68 +528,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, pr_warn("audit rule for LSM \'%s\' is invalid\n", str); err = 0; - } - if (err) { - kfree(str); + } else if (err) goto exit_free; - } else - f->lsm_str = str; break; case AUDIT_WATCH: - str = audit_unpack_string(&bufp, &remain, f->val); - if (IS_ERR(str)) + str = audit_unpack_string(&bufp, &remain, f_val); + if (IS_ERR(str)) { + err = PTR_ERR(str); goto exit_free; - entry->rule.buflen += f->val; - - err = audit_to_watch(&entry->rule, str, f->val, f->op); + } + err = audit_to_watch(&entry->rule, str, f_val, f->op); if (err) { kfree(str); goto exit_free; } + entry->rule.buflen += f_val; break; case AUDIT_DIR: - str = audit_unpack_string(&bufp, &remain, f->val); - if (IS_ERR(str)) + str = audit_unpack_string(&bufp, &remain, f_val); + if (IS_ERR(str)) { + err = PTR_ERR(str); goto exit_free; - entry->rule.buflen += f->val; - + } err = audit_make_tree(&entry->rule, str, f->op); kfree(str); if (err) goto exit_free; + entry->rule.buflen += f_val; break; case AUDIT_INODE: + f->val = f_val; err = audit_to_inode(&entry->rule, f); if (err) goto exit_free; break; case AUDIT_FILTERKEY: - if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN) + if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN) goto exit_free; - str = audit_unpack_string(&bufp, &remain, f->val); - if (IS_ERR(str)) - goto exit_free; - entry->rule.buflen += f->val; - entry->rule.filterkey = str; - break; - case AUDIT_EXE: - if (entry->rule.exe || f->val > PATH_MAX) - goto exit_free; - str = audit_unpack_string(&bufp, &remain, f->val); + str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } - entry->rule.buflen += f->val; - - audit_mark = audit_alloc_mark(&entry->rule, str, f->val); + entry->rule.buflen += f_val; + entry->rule.filterkey = str; + break; + case AUDIT_EXE: + if (entry->rule.exe || f_val > PATH_MAX) + goto exit_free; + str = audit_unpack_string(&bufp, &remain, f_val); + if (IS_ERR(str)) { + err = PTR_ERR(str); + goto exit_free; + } + audit_mark = audit_alloc_mark(&entry->rule, str, f_val); if (IS_ERR(audit_mark)) { kfree(str); err = PTR_ERR(audit_mark); goto exit_free; } + entry->rule.buflen += f_val; entry->rule.exe = audit_mark; break; + default: + f->val = f_val; + break; } } diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 29c7c06c6bd6..b03087f110eb 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2309,7 +2309,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env, struct_size = struct_type->size; bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); - if (struct_size - bytes_offset < sizeof(int)) { + if (struct_size - bytes_offset < member_type->size) { btf_verifier_log_member(env, struct_type, member, "Member exceeds struct_size"); return -EINVAL; @@ -3460,7 +3460,7 @@ int btf_get_info_by_fd(const struct btf *btf, union bpf_attr __user *uattr) { struct bpf_btf_info __user *uinfo; - struct bpf_btf_info info = {}; + struct bpf_btf_info info; u32 info_copy, btf_copy; void __user *ubtf; u32 uinfo_len; @@ -3469,6 +3469,7 @@ int btf_get_info_by_fd(const struct btf *btf, uinfo_len = attr->info.info_len; info_copy = min_t(u32, uinfo_len, sizeof(info)); + memset(&info, 0, sizeof(info)); if (copy_from_user(&info, uinfo, info_copy)) return -EFAULT; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index a3eaf08e7dd3..869e2e1860e8 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -35,8 +35,8 @@ void cgroup_bpf_offline(struct cgroup *cgrp) */ static void cgroup_bpf_release(struct work_struct *work) { - struct cgroup *cgrp = container_of(work, struct cgroup, - bpf.release_work); + struct cgroup *p, *cgrp = container_of(work, struct cgroup, + bpf.release_work); enum bpf_cgroup_storage_type stype; struct bpf_prog_array *old_array; unsigned int type; @@ -65,6 +65,9 @@ static void cgroup_bpf_release(struct work_struct *work) mutex_unlock(&cgroup_mutex); + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) + cgroup_bpf_put(p); + percpu_ref_exit(&cgrp->bpf.refcnt); cgroup_put(cgrp); } @@ -199,6 +202,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) */ #define NR ARRAY_SIZE(cgrp->bpf.effective) struct bpf_prog_array *arrays[NR] = {}; + struct cgroup *p; int ret, i; ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, @@ -206,6 +210,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) if (ret) return ret; + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) + cgroup_bpf_get(p); + for (i = 0; i < NR; i++) INIT_LIST_HEAD(&cgrp->bpf.progs[i]); @@ -221,6 +228,9 @@ cleanup: for (i = 0; i < NR; i++) bpf_prog_array_free(arrays[i]); + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) + cgroup_bpf_put(p); + percpu_ref_exit(&cgrp->bpf.refcnt); return -ENOMEM; @@ -293,8 +303,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, { struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE], - *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL}; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; + struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; enum bpf_cgroup_storage_type stype; struct bpf_prog_list *pl; bool pl_was_allocated; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 3867864cdc2f..b4b6b77f309c 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -74,7 +74,7 @@ struct bpf_dtab_netdev { struct bpf_dtab { struct bpf_map map; - struct bpf_dtab_netdev **netdev_map; + struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ struct list_head __percpu *flush_list; struct list_head list; @@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries) return hash; } +static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, + int idx) +{ + return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; +} + static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) { int err, cpu; @@ -120,8 +126,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) bpf_map_init_from_attr(&dtab->map, attr); /* make sure page count doesn't overflow */ - cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); - cost += sizeof(struct list_head) * num_possible_cpus(); + cost = (u64) sizeof(struct list_head) * num_possible_cpus(); if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); @@ -129,6 +134,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) if (!dtab->n_buckets) /* Overflow check */ return -EINVAL; cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; + } else { + cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); } /* if map size is larger than memlock limit, reject it */ @@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) for_each_possible_cpu(cpu) INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); - dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * - sizeof(struct bpf_dtab_netdev *), - dtab->map.numa_node); - if (!dtab->netdev_map) - goto free_percpu; - if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); if (!dtab->dev_index_head) - goto free_map_area; + goto free_percpu; spin_lock_init(&dtab->index_lock); + } else { + dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * + sizeof(struct bpf_dtab_netdev *), + dtab->map.numa_node); + if (!dtab->netdev_map) + goto free_percpu; } return 0; -free_map_area: - bpf_map_area_free(dtab->netdev_map); free_percpu: free_percpu(dtab->flush_list); free_charge: @@ -228,21 +233,40 @@ static void dev_map_free(struct bpf_map *map) cond_resched(); } - for (i = 0; i < dtab->map.max_entries; i++) { - struct bpf_dtab_netdev *dev; + if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { + for (i = 0; i < dtab->n_buckets; i++) { + struct bpf_dtab_netdev *dev; + struct hlist_head *head; + struct hlist_node *next; - dev = dtab->netdev_map[i]; - if (!dev) - continue; + head = dev_map_index_hash(dtab, i); - free_percpu(dev->bulkq); - dev_put(dev->dev); - kfree(dev); + hlist_for_each_entry_safe(dev, next, head, index_hlist) { + hlist_del_rcu(&dev->index_hlist); + free_percpu(dev->bulkq); + dev_put(dev->dev); + kfree(dev); + } + } + + kfree(dtab->dev_index_head); + } else { + for (i = 0; i < dtab->map.max_entries; i++) { + struct bpf_dtab_netdev *dev; + + dev = dtab->netdev_map[i]; + if (!dev) + continue; + + free_percpu(dev->bulkq); + dev_put(dev->dev); + kfree(dev); + } + + bpf_map_area_free(dtab->netdev_map); } free_percpu(dtab->flush_list); - bpf_map_area_free(dtab->netdev_map); - kfree(dtab->dev_index_head); kfree(dtab); } @@ -263,19 +287,14 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } -static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, - int idx) -{ - return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; -} - struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct hlist_head *head = dev_map_index_hash(dtab, key); struct bpf_dtab_netdev *dev; - hlist_for_each_entry_rcu(dev, head, index_hlist) + hlist_for_each_entry_rcu(dev, head, index_hlist, + lockdep_is_held(&dtab->index_lock)) if (dev->idx == key) return dev; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index a70f7209cda3..218c09ff6a27 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -196,6 +196,7 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) void *key = map_iter(m)->key; void *prev_key; + (*pos)++; if (map_iter(m)->done) return NULL; @@ -208,8 +209,6 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) map_iter(m)->done = true; return NULL; } - - ++(*pos); return key; } diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 5b9da0954a27..3668a0bc18ec 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -321,7 +321,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info, ulen = info->jited_prog_len; info->jited_prog_len = aux->offload->jited_len; - if (info->jited_prog_len & ulen) { + if (info->jited_prog_len && ulen) { uinsns = u64_to_user_ptr(info->jited_prog_insns); ulen = min_t(u32, info->jited_prog_len, ulen); if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 052580c33d26..173e983619d7 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -287,7 +287,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, bool irq_work_busy = false; struct stack_map_irq_work *work = NULL; - if (in_nmi()) { + if (irqs_disabled()) { work = this_cpu_ptr(&up_read_work); if (work->irq_work.flags & IRQ_WORK_BUSY) /* cannot queue more up_read, fallback */ @@ -295,8 +295,9 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } /* - * We cannot do up_read() in nmi context. To do build_id lookup - * in nmi context, we need to run up_read() in irq_work. We use + * We cannot do up_read() when the irq is disabled, because of + * risk to deadlock with rq_lock. To do build_id lookup when the + * irqs are disabled, we need to run up_read() in irq_work. We use * a percpu variable to do the irq_work. If the irq_work is * already used by another lookup, we fall back to report ips. * diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ace1cfaa24b6..14f4a76b44d5 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2325,7 +2325,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, union bpf_attr __user *uattr) { struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); - struct bpf_prog_info info = {}; + struct bpf_prog_info info; u32 info_len = attr->info.info_len; struct bpf_prog_stats stats; char __user *uinsns; @@ -2337,6 +2337,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, return err; info_len = min_t(u32, sizeof(info), info_len); + memset(&info, 0, sizeof(info)); if (copy_from_user(&info, uinfo, info_len)) return -EFAULT; @@ -2600,7 +2601,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map, union bpf_attr __user *uattr) { struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); - struct bpf_map_info info = {}; + struct bpf_map_info info; u32 info_len = attr->info.info_len; int err; @@ -2609,6 +2610,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map, return err; info_len = min_t(u32, sizeof(info), info_len); + memset(&info, 0, sizeof(info)); info.type = map->map_type; info.id = map->id; info.key_size = map->key_size; @@ -2836,7 +2838,7 @@ out: SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { - union bpf_attr attr = {}; + union bpf_attr attr; int err; if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) @@ -2848,6 +2850,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz size = min_t(u32, size, sizeof(attr)); /* copy attributes from user space, may be less than sizeof(bpf_attr) */ + memset(&attr, 0, sizeof(attr)); if (copy_from_user(&attr, uattr, size) != 0) return -EFAULT; diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c index ca52b9642943..d4f335a9a899 100644 --- a/kernel/bpf/tnum.c +++ b/kernel/bpf/tnum.c @@ -44,14 +44,19 @@ struct tnum tnum_rshift(struct tnum a, u8 shift) return TNUM(a.value >> shift, a.mask >> shift); } -struct tnum tnum_arshift(struct tnum a, u8 min_shift) +struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness) { /* if a.value is negative, arithmetic shifting by minimum shift * will have larger negative offset compared to more shifting. * If a.value is nonnegative, arithmetic shifting by minimum shift * will have larger positive offset compare to more shifting. */ - return TNUM((s64)a.value >> min_shift, (s64)a.mask >> min_shift); + if (insn_bitness == 32) + return TNUM((u32)(((s32)a.value) >> min_shift), + (u32)(((s32)a.mask) >> min_shift)); + else + return TNUM((s64)a.value >> min_shift, + (s64)a.mask >> min_shift); } struct tnum tnum_add(struct tnum a, struct tnum b) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ffc3e53f5300..a0b76b360d6f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -852,7 +852,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; -static void __mark_reg_not_init(struct bpf_reg_state *reg); +static void __mark_reg_not_init(const struct bpf_verifier_env *env, + struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. @@ -890,7 +891,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env, verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); return; } __mark_reg_known_zero(regs + regno); @@ -988,7 +989,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg) } /* Mark a register as having a completely unknown (scalar) value. */ -static void __mark_reg_unknown(struct bpf_reg_state *reg) +static void __mark_reg_unknown(const struct bpf_verifier_env *env, + struct bpf_reg_state *reg) { /* * Clear type, id, off, and union(map_ptr, range) and @@ -998,6 +1000,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg) reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; + reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? + true : false; __mark_reg_unbounded(reg); } @@ -1008,19 +1012,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env, verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); return; } - regs += regno; - __mark_reg_unknown(regs); - /* constant backtracking is enabled for root without bpf2bpf calls */ - regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? - true : false; + __mark_reg_unknown(env, regs + regno); } -static void __mark_reg_not_init(struct bpf_reg_state *reg) +static void __mark_reg_not_init(const struct bpf_verifier_env *env, + struct bpf_reg_state *reg) { - __mark_reg_unknown(reg); + __mark_reg_unknown(env, reg); reg->type = NOT_INIT; } @@ -1031,10 +1032,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); return; } - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); } #define DEF_NOT_SUBREG (0) @@ -3055,7 +3056,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, } if (state->stack[spi].slot_type[0] == STACK_SPILL && state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { - __mark_reg_unknown(&state->stack[spi].spilled_ptr); + __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) state->stack[spi].slot_type[j] = STACK_MISC; goto mark; @@ -3695,7 +3696,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, if (!reg) continue; if (reg_is_pkt_pointer_any(reg)) - __mark_reg_unknown(reg); + __mark_reg_unknown(env, reg); } } @@ -3723,7 +3724,7 @@ static void release_reg_references(struct bpf_verifier_env *env, if (!reg) continue; if (reg->ref_obj_id == ref_obj_id) - __mark_reg_unknown(reg); + __mark_reg_unknown(env, reg); } } @@ -4346,7 +4347,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ - __mark_reg_unknown(dst_reg); + __mark_reg_unknown(env, dst_reg); return 0; } @@ -4598,13 +4599,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ - __mark_reg_unknown(dst_reg); + __mark_reg_unknown(env, dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { - __mark_reg_unknown(dst_reg); + __mark_reg_unknown(env, dst_reg); return 0; } @@ -4812,9 +4813,16 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ - dst_reg->smin_value >>= umin_val; - dst_reg->smax_value >>= umin_val; - dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); + if (insn_bitness == 32) { + dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val); + dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val); + } else { + dst_reg->smin_value >>= umin_val; + dst_reg->smax_value >>= umin_val; + } + + dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, + insn_bitness); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. @@ -6019,6 +6027,7 @@ static bool may_access_skb(enum bpf_prog_type type) static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); + static const int ctx_reg = BPF_REG_6; u8 mode = BPF_MODE(insn->code); int i, err; @@ -6052,7 +6061,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check whether implicit source operand (register R6) is readable */ - err = check_reg_arg(env, BPF_REG_6, SRC_OP); + err = check_reg_arg(env, ctx_reg, SRC_OP); if (err) return err; @@ -6071,7 +6080,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } - if (regs[BPF_REG_6].type != PTR_TO_CTX) { + if (regs[ctx_reg].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; @@ -6084,6 +6093,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) return err; } + err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); + if (err < 0) + return err; + /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); @@ -6727,7 +6740,7 @@ static void clean_func_state(struct bpf_verifier_env *env, /* since the register is unused, clear its state * to make further comparison simpler */ - __mark_reg_not_init(&st->regs[i]); + __mark_reg_not_init(env, &st->regs[i]); } for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { @@ -6735,7 +6748,7 @@ static void clean_func_state(struct bpf_verifier_env *env, /* liveness must not touch this stack slot anymore */ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) { - __mark_reg_not_init(&st->stack[i].spilled_ptr); + __mark_reg_not_init(env, &st->stack[i].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) st->stack[i].slot_type[j] = STACK_INVALID; } diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 7f83f4121d8d..f684c82efc2e 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -473,6 +473,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) */ p++; if (p >= end) { + (*pos)++; return NULL; } else { *pos = *p; @@ -783,7 +784,7 @@ void cgroup1_release_agent(struct work_struct *work) pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); - if (!pathbuf || !agentbuf) + if (!pathbuf || !agentbuf || !strlen(agentbuf)) goto out; spin_lock_irq(&css_set_lock); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index ef4242e5d4bc..7c9e97553a00 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3111,8 +3111,6 @@ static int cgroup_apply_control_enable(struct cgroup *cgrp) for_each_subsys(ss, ssid) { struct cgroup_subsys_state *css = cgroup_css(dsct, ss); - WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt)); - if (!(cgroup_ss_mask(dsct) & (1 << ss->id))) continue; @@ -3122,6 +3120,8 @@ static int cgroup_apply_control_enable(struct cgroup *cgrp) return PTR_ERR(css); } + WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); + if (css_visible(css)) { ret = css_populate_dir(css); if (ret) @@ -3157,11 +3157,11 @@ static void cgroup_apply_control_disable(struct cgroup *cgrp) for_each_subsys(ss, ssid) { struct cgroup_subsys_state *css = cgroup_css(dsct, ss); - WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt)); - if (!css) continue; + WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); + if (css->parent && !(cgroup_ss_mask(dsct) & (1 << ss->id))) { kill_css(css); @@ -3448,7 +3448,8 @@ static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf, if (strcmp(strstrip(buf), "threaded")) return -EINVAL; - cgrp = cgroup_kn_lock_live(of->kn, false); + /* drain dying csses before we re-apply (threaded) subtree control */ + cgrp = cgroup_kn_lock_live(of->kn, true); if (!cgrp) return -ENOENT; @@ -4460,12 +4461,16 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it) } } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks)); - if (!list_empty(&cset->tasks)) + if (!list_empty(&cset->tasks)) { it->task_pos = cset->tasks.next; - else if (!list_empty(&cset->mg_tasks)) + it->cur_tasks_head = &cset->tasks; + } else if (!list_empty(&cset->mg_tasks)) { it->task_pos = cset->mg_tasks.next; - else + it->cur_tasks_head = &cset->mg_tasks; + } else { it->task_pos = cset->dying_tasks.next; + it->cur_tasks_head = &cset->dying_tasks; + } it->tasks_head = &cset->tasks; it->mg_tasks_head = &cset->mg_tasks; @@ -4523,10 +4528,14 @@ repeat: else it->task_pos = it->task_pos->next; - if (it->task_pos == it->tasks_head) + if (it->task_pos == it->tasks_head) { it->task_pos = it->mg_tasks_head->next; - if (it->task_pos == it->mg_tasks_head) + it->cur_tasks_head = it->mg_tasks_head; + } + if (it->task_pos == it->mg_tasks_head) { it->task_pos = it->dying_tasks_head->next; + it->cur_tasks_head = it->dying_tasks_head; + } if (it->task_pos == it->dying_tasks_head) css_task_iter_advance_css_set(it); } else { @@ -4545,11 +4554,12 @@ repeat: goto repeat; /* and dying leaders w/o live member threads */ - if (!atomic_read(&task->signal->live)) + if (it->cur_tasks_head == it->dying_tasks_head && + !atomic_read(&task->signal->live)) goto repeat; } else { /* skip all dying ones */ - if (task->flags & PF_EXITING) + if (it->cur_tasks_head == it->dying_tasks_head) goto repeat; } } @@ -4658,6 +4668,9 @@ static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos) struct kernfs_open_file *of = s->private; struct css_task_iter *it = of->priv; + if (pos) + (*pos)++; + return css_task_iter_next(it); } @@ -4673,7 +4686,7 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, * from position 0, so we can simply keep iterating on !0 *pos. */ if (!it) { - if (WARN_ON_ONCE((*pos)++)) + if (WARN_ON_ONCE((*pos))) return ERR_PTR(-EINVAL); it = kzalloc(sizeof(*it), GFP_KERNEL); @@ -4681,10 +4694,11 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, return ERR_PTR(-ENOMEM); of->priv = it; css_task_iter_start(&cgrp->self, iter_flags, it); - } else if (!(*pos)++) { + } else if (!(*pos)) { css_task_iter_end(it); css_task_iter_start(&cgrp->self, iter_flags, it); - } + } else + return it->cur_task; return cgroup_procs_next(s, NULL, NULL); } @@ -6380,6 +6394,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) return; } + /* Don't associate the sock with unrelated interrupted task's cgroup. */ + if (in_interrupt()) + return; + rcu_read_lock(); while (true) { diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index c87ee6412b36..8d60a0a65086 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -48,6 +48,8 @@ #include #include #include +#include +#include #include #include #include @@ -65,9 +67,14 @@ #include #include #include +#include +#include +#include +#include DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); +int cpuset_cpuinfo_show_realinfo __read_mostly; /* See "Frequency meter" comments, below. */ @@ -160,6 +167,10 @@ struct cpuset { */ int use_parent_ecpus; int child_ecpus_count; + + /* for cpu load calc */ + unsigned long calc_load_tasks; + unsigned long avenrun[3]; }; /* @@ -355,6 +366,9 @@ static struct workqueue_struct *cpuset_migrate_mm_wq; static void cpuset_hotplug_workfn(struct work_struct *work); static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); +static void cpuset_calc_loadavg_workfn(struct work_struct *work); +static DECLARE_DELAYED_WORK(cpuset_calc_loadavg_work, cpuset_calc_loadavg_workfn); + static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); /* @@ -2479,6 +2493,308 @@ static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) return 0; } +static u64 get_idle_time(int cpu) +{ + u64 idle, idle_time = -1ULL; + + if (cpu_online(cpu)) + idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ + idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; + else + idle = idle_time * NSEC_PER_USEC; + + return idle; +} + +static u64 get_iowait_time(int cpu) +{ + u64 iowait, iowait_time = -1ULL; + + if (cpu_online(cpu)) + iowait_time = get_cpu_iowait_time_us(cpu, NULL); + + if (iowait_time == -1ULL) + /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ + iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; + else + iowait = div_u64(iowait_time, NSEC_PER_USEC); + + return iowait; +} + + +static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) +{ + struct cpuset *cs = css_cs(seq_css(sf)); + int i, j; + u64 user, nice, system, idle, iowait, irq, softirq, steal; + u64 guest, guest_nice, n_ctx_switch, n_process, n_running, n_blocked; + u64 sum = 0; + u64 sum_softirq = 0; + unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; + struct timespec boottime; + + user = nice = system = idle = iowait = + irq = softirq = steal = 0; + guest = guest_nice = 0; + getboottime(&boottime); + + for_each_cpu(i, cs->cpus_allowed) { + user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; + nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; + system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(i); + iowait += get_iowait_time(i); + irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; + softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; + steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; + guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; + guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; + sum += kstat_cpu_irqs_sum(i); + sum += arch_irq_stat_cpu(i); + + for (j = 0; j < NR_SOFTIRQS; j++) { + unsigned int softirq_stat = kstat_softirqs_cpu(j, i); + + per_softirq_sums[j] += softirq_stat; + sum_softirq += softirq_stat; + } + } + //sum += arch_irq_stat(); + + seq_put_decimal_ull(sf, "cpu ", nsec_to_clock_t(user)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(guest_nice)); + seq_putc(sf, '\n'); + + j = 0; + for_each_cpu(i, cs->cpus_allowed) { + /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ + user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; + nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; + system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(i); + iowait = get_iowait_time(i); + irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; + softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; + steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; + guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; + guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; + seq_printf(sf, "cpu%d", i); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(user)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(sf, " ", nsec_to_clock_t(guest_nice)); + seq_putc(sf, '\n'); + } + seq_put_decimal_ull(sf, "intr ", (unsigned long long)sum); + + /* sum again ? it could be updated? */ + for_each_irq_nr(j) { + sum = 0; + for_each_cpu(i, cs->cpus_allowed) + sum += kstat_irqs_cpu(j, i); + + seq_put_decimal_ull(sf, " ", sum); + } + + n_ctx_switch = 0; + n_process = 0; + n_running = 0; + n_blocked = 0; + for_each_cpu(i, cs->cpus_allowed) { + n_ctx_switch += nr_context_switches_cpu(i); + n_process += per_cpu(total_forks, i); + n_running += nr_running_cpu(i); + n_blocked += nr_iowait_cpu(i); + } + seq_printf(sf, + "\nctxt %llu\n" + "btime %llu\n" + "processes %llu\n" + "procs_running %llu\n" + "procs_blocked %llu\n", + n_ctx_switch, + (unsigned long long)boottime.tv_sec, + n_process, + n_running, + n_blocked); + + seq_put_decimal_ull(sf, "softirq ", (unsigned long long)sum_softirq); + + for (i = 0; i < NR_SOFTIRQS; i++) + seq_put_decimal_ull(sf, " ", per_softirq_sums[i]); + seq_putc(sf, '\n'); + + return 0; +} + +#ifdef CONFIG_X86 +/* + * Get CPU information for use by the procfs. + */ +static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, + unsigned int cpu) +{ +#ifdef CONFIG_SMP + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpumask_weight(topology_core_cpumask(cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + seq_printf(m, "apicid\t\t: %d\n", c->apicid); + seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); +#endif +} + +#ifdef CONFIG_X86_32 +static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) +{ + seq_printf(m, + "fdiv_bug\t: %s\n" + "f00f_bug\t: %s\n" + "coma_bug\t: %s\n" + "fpu\t\t: %s\n" + "fpu_exception\t: %s\n" + "cpuid level\t: %d\n" + "wp\t\t: yes\n", + static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no", + static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no", + static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no", + static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", + static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", + c->cpuid_level); +} +#else +static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) +{ + seq_printf(m, + "fpu\t\t: yes\n" + "fpu_exception\t: yes\n" + "cpuid level\t: %d\n" + "wp\t\t: yes\n", + c->cpuid_level); +} +#endif + +static int cpuset_cgroup_cpuinfo_show(struct seq_file *sf, void *v) +{ + int i, j, k = 0; + struct cpuset *cs = css_cs(seq_css(sf)); + struct cpuinfo_x86 *c; + unsigned int cpu; + bool is_top_cgrp; + + if (!seq_css(sf)->parent) + is_top_cgrp = true; + else + is_top_cgrp = false; + + for_each_cpu(j, cs->cpus_allowed) + { + c = &cpu_data(j); + if (is_top_cgrp || cpuset_cpuinfo_show_realinfo) + cpu = c->cpu_index; + else + cpu = k; + k++; + seq_printf(sf, "processor\t: %u\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %u\n" + "model name\t: %s\n", + cpu, + c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", + c->x86, + c->x86_model, + c->x86_model_id[0] ? c->x86_model_id : "unknown"); + + if (c->x86_stepping || c->cpuid_level >= 0) + seq_printf(sf, "stepping\t: %d\n", c->x86_stepping); + else + seq_printf(sf, "stepping\t: unknown\n"); + if (c->microcode) + seq_printf(sf, "microcode\t: 0x%x\n", c->microcode); + + if (cpu_has(c, X86_FEATURE_TSC)) { + unsigned int freq = cpufreq_quick_get(cpu); + + if (!freq) + freq = cpu_khz; + seq_printf(sf, "cpu MHz\t\t: %u.%03u\n", + freq / 1000, (freq % 1000)); + } + + /* Cache size */ + if (c->x86_cache_size >= 0) + seq_printf(sf, "cache size\t: %d KB\n", c->x86_cache_size); + + show_cpuinfo_core(sf, c, cpu); + show_cpuinfo_misc(sf, c); + + seq_printf(sf, "flags\t\t:"); + for (i = 0; i < 32*NCAPINTS; i++) + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) + seq_printf(sf, " %s", x86_cap_flags[i]); + + seq_puts(sf, "\nbugs\t\t:"); + for (i = 0; i < 32*NBUGINTS; i++) { + unsigned int bug_bit = 32*NCAPINTS + i; + + if (cpu_has_bug(c, bug_bit) && x86_bug_flags[i]) + seq_printf(sf, " %s", x86_bug_flags[i]); + } + + seq_printf(sf, "\nbogomips\t: %lu.%02lu\n", + c->loops_per_jiffy/(500000/HZ), + (c->loops_per_jiffy/(5000/HZ)) % 100); + +#ifdef CONFIG_X86_64 + if (c->x86_tlbsize > 0) + seq_printf(sf, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); +#endif + seq_printf(sf, "clflush size\t: %u\n", c->x86_clflush_size); + seq_printf(sf, "cache_alignment\t: %d\n", c->x86_cache_alignment); + seq_printf(sf, "address sizes\t: %u bits physical, %u bits virtual\n", + c->x86_phys_bits, c->x86_virt_bits); + + seq_printf(sf, "power management:"); + for (i = 0; i < 32; i++) { + if (c->x86_power & (1 << i)) { + if (i < ARRAY_SIZE(x86_power_flags) && + x86_power_flags[i]) + seq_printf(sf, "%s%s", + x86_power_flags[i][0] ? " " : "", + x86_power_flags[i]); + else + seq_printf(sf, " [%d]", i); + } + } + + seq_puts(sf, "\n\n"); + } + return 0; +} +#endif + +static int cpuset_cgroup_loadavg_show(struct seq_file *sf, void *v); + static int sched_partition_show(struct seq_file *seq, void *v) { struct cpuset *cs = css_cs(seq_css(seq)); @@ -2632,6 +2948,22 @@ static struct cftype legacy_files[] = { .write_u64 = cpuset_write_u64, .private = FILE_MEMORY_PRESSURE_ENABLED, }, + { + .name = "stat", + .seq_show = cpuset_cgroup_stat_show, + }, + +#ifdef CONFIG_X86 + { + .name = "cpuinfo", + .seq_show = cpuset_cgroup_cpuinfo_show, + }, +#endif + + { + .name = "loadavg", + .seq_show = cpuset_cgroup_loadavg_show, + }, { } /* terminate */ }; @@ -3259,6 +3591,8 @@ void __init cpuset_init_smp(void) cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); BUG_ON(!cpuset_migrate_mm_wq); + + schedule_delayed_work(&cpuset_calc_loadavg_work, LOAD_FREQ); } /** @@ -3619,3 +3953,85 @@ void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) seq_printf(m, "Mems_allowed_list:\t%*pbl\n", nodemask_pr_args(&task->mems_allowed)); } + +static void __cpuset_calc_load(struct cpuset *cs) +{ + long active = cs->calc_load_tasks; + active = active > 0 ? active * FIXED_1 : 0; + cs->avenrun[0] = calc_load(cs->avenrun[0], EXP_1, active); + cs->avenrun[1] = calc_load(cs->avenrun[1], EXP_5, active); + cs->avenrun[2] = calc_load(cs->avenrun[2], EXP_15, active); + +} +/*calc cpu load for this cpuset*/ +void cgroup_cpuset_calc_load(void) +{ + struct cpuset *cs; + struct cgroup_subsys_state *pos_css; + struct css_task_iter it; + struct task_struct *task; + + rcu_read_lock(); + /* in docker, only one level cpuset */ + cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset){ + if (cs == &top_cpuset) + continue; + + cs->calc_load_tasks = 0; + + css_task_iter_start(&cs->css, 0, &it); + while ((task = css_task_iter_next(&it))) { + if (task->state == TASK_RUNNING || task->state == TASK_UNINTERRUPTIBLE) + cs->calc_load_tasks ++; + } + css_task_iter_end(&it); + + /*calc load*/ + __cpuset_calc_load(cs); + } + rcu_read_unlock(); +} + +static void cpuset_calc_loadavg_workfn(struct work_struct *work) +{ + cgroup_cpuset_calc_load(); + schedule_delayed_work(&cpuset_calc_loadavg_work, LOAD_FREQ); +} +static unsigned long cpuset_nr_running(struct cpuset *cs) +{ + struct css_task_iter it; + struct task_struct *task; + unsigned long nr_running = 0; + + css_task_iter_start(&cs->css, 0, &it); + while ((task = css_task_iter_next(&it))) { + if (task->state == TASK_RUNNING) + nr_running ++; + } + css_task_iter_end(&it); + + return nr_running; +} + +static int cpuset_cgroup_loadavg_show(struct seq_file *sf, void *v) +{ + struct cpuset *cs = css_cs(seq_css(sf)); + unsigned long loads[3] = {0}; + unsigned long offset = FIXED_1/200; + int shift = 0; + unsigned long n_running = cpuset_nr_running(cs); + + loads[0] = (cs->avenrun[0] + offset) << shift; + loads[1] = (cs->avenrun[1] + offset) << shift; + loads[2] = (cs->avenrun[2] + offset) << shift; +#define LOAD_INT(x) ((x) >> FSHIFT) +#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) + + seq_printf(sf, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n", + LOAD_INT(loads[0]), LOAD_FRAC(loads[0]), + LOAD_INT(loads[1]), LOAD_FRAC(loads[1]), + LOAD_INT(loads[2]), LOAD_FRAC(loads[2]), + n_running, nr_threads, + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); + return 0; +} diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c index 8cf010680678..3984dd6b8ddb 100644 --- a/kernel/cgroup/freezer.c +++ b/kernel/cgroup/freezer.c @@ -230,6 +230,15 @@ void cgroup_freezer_migrate_task(struct task_struct *task, if (task->flags & PF_KTHREAD) return; + /* + * It's not necessary to do changes if both of the src and dst cgroups + * are not freezing and task is not frozen. + */ + if (!test_bit(CGRP_FREEZE, &src->flags) && + !test_bit(CGRP_FREEZE, &dst->flags) && + !task->frozen) + return; + /* * Adjust counters of freezing and frozen tasks. * Note, that if the task is frozen, but the destination cgroup is not diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c index 8e513a573fe9..138059eb730d 100644 --- a/kernel/cgroup/pids.c +++ b/kernel/cgroup/pids.c @@ -45,7 +45,7 @@ struct pids_cgroup { * %PIDS_MAX = (%PID_MAX_LIMIT + 1). */ atomic64_t counter; - int64_t limit; + atomic64_t limit; /* Handle for "pids.events" */ struct cgroup_file events_file; @@ -73,8 +73,8 @@ pids_css_alloc(struct cgroup_subsys_state *parent) if (!pids) return ERR_PTR(-ENOMEM); - pids->limit = PIDS_MAX; atomic64_set(&pids->counter, 0); + atomic64_set(&pids->limit, PIDS_MAX); atomic64_set(&pids->events_limit, 0); return &pids->css; } @@ -146,13 +146,14 @@ static int pids_try_charge(struct pids_cgroup *pids, int num) for (p = pids; parent_pids(p); p = parent_pids(p)) { int64_t new = atomic64_add_return(num, &p->counter); + int64_t limit = atomic64_read(&p->limit); /* * Since new is capped to the maximum number of pid_t, if * p->limit is %PIDS_MAX then we know that this test will never * fail. */ - if (new > p->limit) + if (new > limit) goto revert; } @@ -277,7 +278,7 @@ set_limit: * Limit updates don't need to be mutex'd, since it isn't * critical that any racing fork()s follow the new limit. */ - pids->limit = limit; + atomic64_set(&pids->limit, limit); return nbytes; } @@ -285,7 +286,7 @@ static int pids_max_show(struct seq_file *sf, void *v) { struct cgroup_subsys_state *css = seq_css(sf); struct pids_cgroup *pids = css_pids(css); - int64_t limit = pids->limit; + int64_t limit = atomic64_read(&pids->limit); if (limit >= PIDS_MAX) seq_printf(sf, "%s\n", PIDS_MAX_STR); diff --git a/kernel/cpu.c b/kernel/cpu.c index e2cad3ee2ead..406828fb3038 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -525,8 +525,7 @@ static int bringup_wait_for_ap(unsigned int cpu) if (WARN_ON_ONCE((!cpu_online(cpu)))) return -ECANCELED; - /* Unpark the stopper thread and the hotplug thread of the target cpu */ - stop_machine_unpark(cpu); + /* Unpark the hotplug thread of the target cpu */ kthread_unpark(st->thread); /* @@ -1089,8 +1088,8 @@ void notify_cpu_starting(unsigned int cpu) /* * Called from the idle task. Wake up the controlling task which brings the - * stopper and the hotplug thread of the upcoming CPU up and then delegates - * the rest of the online bringup to the hotplug thread. + * hotplug thread of the upcoming CPU up and then delegates the rest of the + * online bringup to the hotplug thread. */ void cpuhp_online_idle(enum cpuhp_state state) { @@ -1100,6 +1099,12 @@ void cpuhp_online_idle(enum cpuhp_state state) if (state != CPUHP_AP_ONLINE_IDLE) return; + /* + * Unpart the stopper thread before we start the idle loop (and start + * scheduling); this ensures the stopper task is always available. + */ + stop_machine_unpark(smp_processor_id()); + st->state = CPUHP_AP_ONLINE_IDLE; complete_ap_thread(st, true); } @@ -1909,6 +1914,78 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) } EXPORT_SYMBOL(__cpuhp_remove_state); +#ifdef CONFIG_HOTPLUG_SMT +static void cpuhp_offline_cpu_device(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + dev->offline = true; + /* Tell user space about the state change */ + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); +} + +static void cpuhp_online_cpu_device(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + dev->offline = false; + /* Tell user space about the state change */ + kobject_uevent(&dev->kobj, KOBJ_ONLINE); +} + +int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) +{ + int cpu, ret = 0; + + cpu_maps_update_begin(); + for_each_online_cpu(cpu) { + if (topology_is_primary_thread(cpu)) + continue; + ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); + if (ret) + break; + /* + * As this needs to hold the cpu maps lock it's impossible + * to call device_offline() because that ends up calling + * cpu_down() which takes cpu maps lock. cpu maps lock + * needs to be held as this might race against in kernel + * abusers of the hotplug machinery (thermal management). + * + * So nothing would update device:offline state. That would + * leave the sysfs entry stale and prevent onlining after + * smt control has been changed to 'off' again. This is + * called under the sysfs hotplug lock, so it is properly + * serialized against the regular offline usage. + */ + cpuhp_offline_cpu_device(cpu); + } + if (!ret) + cpu_smt_control = ctrlval; + cpu_maps_update_done(); + return ret; +} + +int cpuhp_smt_enable(void) +{ + int cpu, ret = 0; + + cpu_maps_update_begin(); + cpu_smt_control = CPU_SMT_ENABLED; + for_each_present_cpu(cpu) { + /* Skip online CPUs and CPUs on offline nodes */ + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) + continue; + ret = _cpu_up(cpu, 0, CPUHP_ONLINE); + if (ret) + break; + /* See comment in cpuhp_smt_disable() */ + cpuhp_online_cpu_device(cpu); + } + cpu_maps_update_done(); + return ret; +} +#endif + #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) static ssize_t show_cpuhp_state(struct device *dev, struct device_attribute *attr, char *buf) @@ -2063,77 +2140,6 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = { #ifdef CONFIG_HOTPLUG_SMT -static void cpuhp_offline_cpu_device(unsigned int cpu) -{ - struct device *dev = get_cpu_device(cpu); - - dev->offline = true; - /* Tell user space about the state change */ - kobject_uevent(&dev->kobj, KOBJ_OFFLINE); -} - -static void cpuhp_online_cpu_device(unsigned int cpu) -{ - struct device *dev = get_cpu_device(cpu); - - dev->offline = false; - /* Tell user space about the state change */ - kobject_uevent(&dev->kobj, KOBJ_ONLINE); -} - -int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) -{ - int cpu, ret = 0; - - cpu_maps_update_begin(); - for_each_online_cpu(cpu) { - if (topology_is_primary_thread(cpu)) - continue; - ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); - if (ret) - break; - /* - * As this needs to hold the cpu maps lock it's impossible - * to call device_offline() because that ends up calling - * cpu_down() which takes cpu maps lock. cpu maps lock - * needs to be held as this might race against in kernel - * abusers of the hotplug machinery (thermal management). - * - * So nothing would update device:offline state. That would - * leave the sysfs entry stale and prevent onlining after - * smt control has been changed to 'off' again. This is - * called under the sysfs hotplug lock, so it is properly - * serialized against the regular offline usage. - */ - cpuhp_offline_cpu_device(cpu); - } - if (!ret) - cpu_smt_control = ctrlval; - cpu_maps_update_done(); - return ret; -} - -int cpuhp_smt_enable(void) -{ - int cpu, ret = 0; - - cpu_maps_update_begin(); - cpu_smt_control = CPU_SMT_ENABLED; - for_each_present_cpu(cpu) { - /* Skip online CPUs and CPUs on offline nodes */ - if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) - continue; - ret = _cpu_up(cpu, 0, CPUHP_ONLINE); - if (ret) - break; - /* See comment in cpuhp_smt_disable() */ - cpuhp_online_cpu_device(cpu); - } - cpu_maps_update_done(); - return ret; -} - - static ssize_t __store_smt_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) diff --git a/kernel/cred.c b/kernel/cred.c index c0a4c12d38b2..809a985b1793 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -175,8 +175,8 @@ void exit_creds(struct task_struct *tsk) put_cred(cred); #ifdef CONFIG_KEYS_REQUEST_CACHE - key_put(current->cached_requested_key); - current->cached_requested_key = NULL; + key_put(tsk->cached_requested_key); + tsk->cached_requested_key = NULL; #endif } @@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void) new->magic = CRED_MAGIC; #endif - if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) + if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) goto error; return new; @@ -282,7 +282,7 @@ struct cred *prepare_creds(void) new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; validate_creds(new); return new; @@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) #ifdef CONFIG_SECURITY new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; put_cred(old); diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 545e3869b0e3..551b0eb7028a 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -123,8 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, return ret; } -static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, - ssize_t size, dma_addr_t *dma_handle) +static void *__dma_alloc_from_coherent(struct device *dev, + struct dma_coherent_mem *mem, + ssize_t size, dma_addr_t *dma_handle) { int order = get_order(size); unsigned long flags; @@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, /* * Memory was found in the coherent area. */ - *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); ret = mem->virt_base + (pageno << PAGE_SHIFT); spin_unlock_irqrestore(&mem->spinlock, flags); memset(ret, 0, size); @@ -175,17 +176,18 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, if (!mem) return 0; - *ret = __dma_alloc_from_coherent(mem, size, dma_handle); + *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle); return 1; } -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle) { if (!dma_coherent_default_memory) return NULL; - return __dma_alloc_from_coherent(dma_coherent_default_memory, size, - dma_handle); + return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, + dma_handle); } static int __dma_release_from_coherent(struct dma_coherent_mem *mem, diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 099002d84f46..4ad74f5987ea 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -420,6 +420,7 @@ void debug_dma_dump_mappings(struct device *dev) } spin_unlock_irqrestore(&bucket->lock, flags); + cond_resched(); } } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 8402b29c280f..867fd72cb260 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -375,7 +375,7 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, { dma_addr_t dma_addr = paddr; - if (unlikely(!dma_direct_possible(dev, dma_addr, size))) { + if (unlikely(!dma_capable(dev, dma_addr, size))) { report_addr(dev, dma_addr, size); return DMA_MAPPING_ERROR; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 00a014670ed0..15b123bdcaf5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5607,10 +5607,8 @@ static void perf_mmap_close(struct vm_area_struct *vma) perf_pmu_output_stop(event); /* now it's safe to free the pages */ - if (!rb->aux_mmap_locked) - atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); - else - atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); + atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); + atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); /* this has to be the last one */ rb_free_aux(rb); @@ -5825,7 +5823,15 @@ accounting: */ user_lock_limit *= num_online_cpus(); - user_locked = atomic_long_read(&user->locked_vm) + user_extra; + user_locked = atomic_long_read(&user->locked_vm); + + /* + * sysctl_perf_event_mlock may have changed, so that + * user->locked_vm > user_lock_limit + */ + if (user_locked > user_lock_limit) + user_locked = user_lock_limit; + user_locked += user_extra; if (user_locked <= user_lock_limit) { /* charge all to locked_vm */ @@ -11184,8 +11190,10 @@ SYSCALL_DEFINE5(perf_event_open, } } - if (event->attr.aux_output && !perf_get_aux_event(event, group_leader)) + if (event->attr.aux_output && !perf_get_aux_event(event, group_leader)) { + err = -EINVAL; goto err_locked; + } /* * Must be under the same ctx::mutex as perf_install_in_context(), diff --git a/kernel/exit.c b/kernel/exit.c index a46a50d67002..22dfaac9e48c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -437,7 +437,7 @@ static void exit_mm(void) struct mm_struct *mm = current->mm; struct core_state *core_state; - mm_release(current, mm); + exit_mm_release(current, mm); if (!mm) return; sync_mm_rss(mm); @@ -517,10 +517,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father, } write_unlock_irq(&tasklist_lock); - if (unlikely(pid_ns == &init_pid_ns)) { - panic("Attempted to kill init! exitcode=0x%08x\n", - father->signal->group_exit_code ?: father->exit_code); - } list_for_each_entry_safe(p, n, dead, ptrace_entry) { list_del_init(&p->ptrace_entry); @@ -746,32 +742,12 @@ void __noreturn do_exit(long code) */ if (unlikely(tsk->flags & PF_EXITING)) { pr_alert("Fixing recursive fault but reboot is needed!\n"); - /* - * We can do this unlocked here. The futex code uses - * this flag just to verify whether the pi state - * cleanup has been done or not. In the worst case it - * loops once more. We pretend that the cleanup was - * done as there is no way to return. Either the - * OWNER_DIED bit is set by now or we push the blocked - * task into the wait for ever nirwana as well. - */ - tsk->flags |= PF_EXITPIDONE; + futex_exit_recursive(tsk); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_signals(tsk); /* sets PF_EXITING */ - /* - * Ensure that all new tsk->pi_lock acquisitions must observe - * PF_EXITING. Serializes against futex.c:attach_to_pi_owner(). - */ - smp_mb(); - /* - * Ensure that we must observe the pi_state in exit_mm() -> - * mm_release() -> exit_pi_state_list(). - */ - raw_spin_lock_irq(&tsk->pi_lock); - raw_spin_unlock_irq(&tsk->pi_lock); if (unlikely(in_atomic())) { pr_info("note: %s[%d] exited with preempt_count %d\n", @@ -786,6 +762,14 @@ void __noreturn do_exit(long code) acct_update_integrals(tsk); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { + /* + * If the last thread of global init has exited, panic + * immediately to get a useable coredump. + */ + if (unlikely(is_global_init(tsk))) + panic("Attempted to kill init! exitcode=0x%08x\n", + tsk->signal->group_exit_code ?: (int)code); + #ifdef CONFIG_POSIX_TIMERS hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); @@ -846,12 +830,6 @@ void __noreturn do_exit(long code) * Make sure we are holding no locks: */ debug_check_no_locks_held(); - /* - * We can do this unlocked here. The futex code uses this flag - * just to verify whether the pi state cleanup has been done - * or not. In the worst case it loops once more. - */ - tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index 13b38794efb5..f07a13d85a69 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -120,7 +120,7 @@ /* * Protected counters by write_lock_irq(&tasklist_lock) */ -unsigned long total_forks; /* Handle normal Linux uptimes. */ +DEFINE_PER_CPU(unsigned long, total_forks) = 0; int nr_threads; /* The idle threads do not count.. */ static int max_threads; /* tunable limit on nr_threads */ @@ -146,6 +146,17 @@ int lockdep_tasklist_lock_is_held(void) EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); #endif /* #ifdef CONFIG_PROVE_RCU */ +int nr_forks(void) +{ + int cpu; + int total = 0; + + for_each_possible_cpu(cpu) + total += per_cpu(total_forks, cpu); + + return total; +} + int nr_processes(void) { int cpu; @@ -394,8 +405,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account) mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, THREAD_SIZE / 1024 * account); - mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB, - account * (THREAD_SIZE / 1024)); + mod_memcg_obj_state(stack, MEMCG_KERNEL_STACK_KB, + account * (THREAD_SIZE / 1024)); } } @@ -1283,24 +1294,8 @@ static int wait_for_vfork_done(struct task_struct *child, * restoring the old one. . . * Eric Biederman 10 January 1998 */ -void mm_release(struct task_struct *tsk, struct mm_struct *mm) +static void mm_release(struct task_struct *tsk, struct mm_struct *mm) { - /* Get rid of any futexes when releasing the mm */ -#ifdef CONFIG_FUTEX - if (unlikely(tsk->robust_list)) { - exit_robust_list(tsk); - tsk->robust_list = NULL; - } -#ifdef CONFIG_COMPAT - if (unlikely(tsk->compat_robust_list)) { - compat_exit_robust_list(tsk); - tsk->compat_robust_list = NULL; - } -#endif - if (unlikely(!list_empty(&tsk->pi_state_list))) - exit_pi_state_list(tsk); -#endif - uprobe_free_utask(tsk); /* Get rid of any cached register state */ @@ -1333,6 +1328,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) complete_vfork_done(tsk); } +void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) +{ + futex_exit_release(tsk); + mm_release(tsk, mm); +} + +void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) +{ + futex_exec_release(tsk); + mm_release(tsk, mm); +} + /** * dup_mm() - duplicates an existing mm structure * @tsk: the task_struct with which the new mm will be associated. @@ -2062,14 +2069,8 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_BLOCK p->plug = NULL; #endif -#ifdef CONFIG_FUTEX - p->robust_list = NULL; -#ifdef CONFIG_COMPAT - p->compat_robust_list = NULL; -#endif - INIT_LIST_HEAD(&p->pi_state_list); - p->pi_state_cache = NULL; -#endif + futex_init_task(p); + /* * sigaltstack should be cleared when sharing the same VM */ @@ -2217,7 +2218,7 @@ static __latent_entropy struct task_struct *copy_process( attach_pid(p, PIDTYPE_PID); nr_threads++; } - total_forks++; + __this_cpu_inc(total_forks); hlist_del_init(&delayed.node); spin_unlock(¤t->sighand->siglock); syscall_tracepoint_update(p); @@ -2523,6 +2524,16 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, #endif #ifdef __ARCH_WANT_SYS_CLONE3 + +/* + * copy_thread implementations handle CLONE_SETTLS by reading the TLS value from + * the registers containing the syscall arguments for clone. This doesn't work + * with clone3 since the TLS value is passed in clone_args instead. + */ +#ifndef CONFIG_HAVE_COPY_THREAD_TLS +#error clone3 requires copy_thread_tls support in arch +#endif + noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, struct clone_args __user *uargs, size_t usize) diff --git a/kernel/futex.c b/kernel/futex.c index bd18f60e4c6c..5660c02b01b0 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -325,6 +325,12 @@ static inline bool should_fail_futex(bool fshared) } #endif /* CONFIG_FAIL_FUTEX */ +#ifdef CONFIG_COMPAT +static void compat_exit_robust_list(struct task_struct *curr); +#else +static inline void compat_exit_robust_list(struct task_struct *curr) { } +#endif + static inline void futex_get_mm(union futex_key *key) { mmgrab(key->private.mm); @@ -379,9 +385,9 @@ static inline int hb_waiters_pending(struct futex_hash_bucket *hb) */ static struct futex_hash_bucket *hash_futex(union futex_key *key) { - u32 hash = jhash2((u32*)&key->both.word, - (sizeof(key->both.word)+sizeof(key->both.ptr))/4, + u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, key->both.offset); + return &futex_queues[hash & (futex_hashsize - 1)]; } @@ -423,7 +429,7 @@ static void get_futex_key_refs(union futex_key *key) switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: - ihold(key->shared.inode); /* implies smp_mb(); (B) */ + smp_mb(); /* explicit smp_mb(); (B) */ break; case FUT_OFF_MMSHARED: futex_get_mm(key); /* implies smp_mb(); (B) */ @@ -457,7 +463,6 @@ static void drop_futex_key_refs(union futex_key *key) switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: - iput(key->shared.inode); break; case FUT_OFF_MMSHARED: mmdrop(key->private.mm); @@ -499,6 +504,46 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, return timeout; } +/* + * Generate a machine wide unique identifier for this inode. + * + * This relies on u64 not wrapping in the life-time of the machine; which with + * 1ns resolution means almost 585 years. + * + * This further relies on the fact that a well formed program will not unmap + * the file while it has a (shared) futex waiting on it. This mapping will have + * a file reference which pins the mount and inode. + * + * If for some reason an inode gets evicted and read back in again, it will get + * a new sequence number and will _NOT_ match, even though it is the exact same + * file. + * + * It is important that match_futex() will never have a false-positive, esp. + * for PI futexes that can mess up the state. The above argues that false-negatives + * are only possible for malformed programs. + */ +static u64 get_inode_sequence_number(struct inode *inode) +{ + static atomic64_t i_seq; + u64 old; + + /* Does the inode already have a sequence number? */ + old = atomic64_read(&inode->i_sequence); + if (likely(old)) + return old; + + for (;;) { + u64 new = atomic64_add_return(1, &i_seq); + if (WARN_ON_ONCE(!new)) + continue; + + old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new); + if (old) + return old; + return new; + } +} + /** * get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex @@ -511,9 +556,15 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, * * The key words are stored in @key on success. * - * For shared mappings, it's (page->index, file_inode(vma->vm_file), - * offset_within_page). For private mappings, it's (uaddr, current->mm). - * We can usually work out the index without swapping in the page. + * For shared mappings (when @fshared), the key is: + * ( inode->i_sequence, page->index, offset_within_page ) + * [ also see get_inode_sequence_number() ] + * + * For private mappings (or when !@fshared), the key is: + * ( current->mm, address, 0 ) + * + * This allows (cross process, where applicable) identification of the futex + * without keeping the page pinned for the duration of the FUTEX_WAIT. * * lock_page() might sleep, the caller should not hold a spinlock. */ @@ -653,8 +704,6 @@ again: key->private.mm = mm; key->private.address = address; - get_futex_key_refs(key); /* implies smp_mb(); (B) */ - } else { struct inode *inode; @@ -686,40 +735,14 @@ again: goto again; } - /* - * Take a reference unless it is about to be freed. Previously - * this reference was taken by ihold under the page lock - * pinning the inode in place so i_lock was unnecessary. The - * only way for this check to fail is if the inode was - * truncated in parallel which is almost certainly an - * application bug. In such a case, just retry. - * - * We are not calling into get_futex_key_refs() in file-backed - * cases, therefore a successful atomic_inc return below will - * guarantee that get_futex_key() will still imply smp_mb(); (B). - */ - if (!atomic_inc_not_zero(&inode->i_count)) { - rcu_read_unlock(); - put_page(page); - - goto again; - } - - /* Should be impossible but lets be paranoid for now */ - if (WARN_ON_ONCE(inode->i_mapping != mapping)) { - err = -EFAULT; - rcu_read_unlock(); - iput(inode); - - goto out; - } - key->both.offset |= FUT_OFF_INODE; /* inode-based key */ - key->shared.inode = inode; + key->shared.i_seq = get_inode_sequence_number(inode); key->shared.pgoff = basepage_index(tail); rcu_read_unlock(); } + get_futex_key_refs(key); /* implies smp_mb(); (B) */ + out: put_page(page); return err; @@ -890,7 +913,7 @@ static void put_pi_state(struct futex_pi_state *pi_state) * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ -void exit_pi_state_list(struct task_struct *curr) +static void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; @@ -960,7 +983,8 @@ void exit_pi_state_list(struct task_struct *curr) } raw_spin_unlock_irq(&curr->pi_lock); } - +#else +static inline void exit_pi_state_list(struct task_struct *curr) { } #endif /* @@ -1169,16 +1193,47 @@ out_error: return ret; } +/** + * wait_for_owner_exiting - Block until the owner has exited + * @exiting: Pointer to the exiting task + * + * Caller must hold a refcount on @exiting. + */ +static void wait_for_owner_exiting(int ret, struct task_struct *exiting) +{ + if (ret != -EBUSY) { + WARN_ON_ONCE(exiting); + return; + } + + if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) + return; + + mutex_lock(&exiting->futex_exit_mutex); + /* + * No point in doing state checking here. If the waiter got here + * while the task was in exec()->exec_futex_release() then it can + * have any FUTEX_STATE_* value when the waiter has acquired the + * mutex. OK, if running, EXITING or DEAD if it reached exit() + * already. Highly unlikely and not a problem. Just one more round + * through the futex maze. + */ + mutex_unlock(&exiting->futex_exit_mutex); + + put_task_struct(exiting); +} + static int handle_exit_race(u32 __user *uaddr, u32 uval, struct task_struct *tsk) { u32 uval2; /* - * If PF_EXITPIDONE is not yet set, then try again. + * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the + * caller that the alleged owner is busy. */ - if (tsk && !(tsk->flags & PF_EXITPIDONE)) - return -EAGAIN; + if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) + return -EBUSY; /* * Reread the user space value to handle the following situation: @@ -1196,8 +1251,9 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval, * *uaddr = 0xC0000000; tsk = get_task(PID); * } if (!tsk->flags & PF_EXITING) { * ... attach(); - * tsk->flags |= PF_EXITPIDONE; } else { - * if (!(tsk->flags & PF_EXITPIDONE)) + * tsk->futex_state = } else { + * FUTEX_STATE_DEAD; if (tsk->futex_state != + * FUTEX_STATE_DEAD) * return -EAGAIN; * return -ESRCH; <--- FAIL * } @@ -1228,7 +1284,8 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval, * it after doing proper sanity checks. */ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, - struct futex_pi_state **ps) + struct futex_pi_state **ps, + struct task_struct **exiting) { pid_t pid = uval & FUTEX_TID_MASK; struct futex_pi_state *pi_state; @@ -1253,22 +1310,33 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, } /* - * We need to look at the task state flags to figure out, - * whether the task is exiting. To protect against the do_exit - * change of the task flags, we do this protected by - * p->pi_lock: + * We need to look at the task state to figure out, whether the + * task is exiting. To protect against the change of the task state + * in futex_exit_release(), we do this protected by p->pi_lock: */ raw_spin_lock_irq(&p->pi_lock); - if (unlikely(p->flags & PF_EXITING)) { + if (unlikely(p->futex_state != FUTEX_STATE_OK)) { /* - * The task is on the way out. When PF_EXITPIDONE is - * set, we know that the task has finished the - * cleanup: + * The task is on the way out. When the futex state is + * FUTEX_STATE_DEAD, we know that the task has finished + * the cleanup: */ int ret = handle_exit_race(uaddr, uval, p); raw_spin_unlock_irq(&p->pi_lock); - put_task_struct(p); + /* + * If the owner task is between FUTEX_STATE_EXITING and + * FUTEX_STATE_DEAD then store the task pointer and keep + * the reference on the task struct. The calling code will + * drop all locks, wait for the task to reach + * FUTEX_STATE_DEAD and then drop the refcount. This is + * required to prevent a live lock when the current task + * preempted the exiting task between the two states. + */ + if (ret == -EBUSY) + *exiting = p; + else + put_task_struct(p); return ret; } @@ -1307,7 +1375,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, static int lookup_pi_state(u32 __user *uaddr, u32 uval, struct futex_hash_bucket *hb, - union futex_key *key, struct futex_pi_state **ps) + union futex_key *key, struct futex_pi_state **ps, + struct task_struct **exiting) { struct futex_q *top_waiter = futex_top_waiter(hb, key); @@ -1322,7 +1391,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval, * We are the first waiter - try to look up the owner based on * @uval and attach to it. */ - return attach_to_pi_owner(uaddr, uval, key, ps); + return attach_to_pi_owner(uaddr, uval, key, ps, exiting); } static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) @@ -1350,6 +1419,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. + * @exiting: Pointer to store the task pointer of the owner task + * which is in the middle of exiting * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Return: @@ -1358,11 +1429,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) * - <0 - error * * The hb->lock and futex_key refs shall be held by the caller. + * + * @exiting is only set when the return value is -EBUSY. If so, this holds + * a refcount on the exiting task on return and the caller needs to drop it + * after waiting for the exit to complete. */ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, - struct task_struct *task, int set_waiters) + struct task_struct *task, + struct task_struct **exiting, + int set_waiters) { u32 uval, newval, vpid = task_pid_vnr(task); struct futex_q *top_waiter; @@ -1432,7 +1509,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, * attach to the owner. If that fails, no harm done, we only * set the FUTEX_WAITERS bit in the user space variable. */ - return attach_to_pi_owner(uaddr, newval, key, ps); + return attach_to_pi_owner(uaddr, newval, key, ps, exiting); } /** @@ -1850,6 +1927,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, * @key1: the from futex key * @key2: the to futex key * @ps: address to store the pi_state pointer + * @exiting: Pointer to store the task pointer of the owner task + * which is in the middle of exiting * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Try and get the lock on behalf of the top waiter if we can do it atomically. @@ -1857,16 +1936,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. * + * @exiting is only set when the return value is -EBUSY. If so, this holds + * a refcount on the exiting task on return and the caller needs to drop it + * after waiting for the exit to complete. + * * Return: * - 0 - failed to acquire the lock atomically; * - >0 - acquired the lock, return value is vpid of the top_waiter * - <0 - error */ -static int futex_proxy_trylock_atomic(u32 __user *pifutex, - struct futex_hash_bucket *hb1, - struct futex_hash_bucket *hb2, - union futex_key *key1, union futex_key *key2, - struct futex_pi_state **ps, int set_waiters) +static int +futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, + struct futex_hash_bucket *hb2, union futex_key *key1, + union futex_key *key2, struct futex_pi_state **ps, + struct task_struct **exiting, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; @@ -1903,7 +1986,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, */ vpid = task_pid_vnr(top_waiter->task); ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, - set_waiters); + exiting, set_waiters); if (ret == 1) { requeue_pi_wake_futex(top_waiter, key2, hb2); return vpid; @@ -2032,6 +2115,8 @@ retry_private: } if (requeue_pi && (task_count - nr_wake < nr_requeue)) { + struct task_struct *exiting = NULL; + /* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS @@ -2039,7 +2124,8 @@ retry_private: * faults rather in the requeue loop below. */ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, - &key2, &pi_state, nr_requeue); + &key2, &pi_state, + &exiting, nr_requeue); /* * At this point the top_waiter has either taken uaddr2 or is @@ -2066,7 +2152,8 @@ retry_private: * If that call succeeds then we have pi_state and an * initial refcount on it. */ - ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state); + ret = lookup_pi_state(uaddr2, ret, hb2, &key2, + &pi_state, &exiting); } switch (ret) { @@ -2084,17 +2171,24 @@ retry_private: if (!ret) goto retry; goto out; + case -EBUSY: case -EAGAIN: /* * Two reasons for this: - * - Owner is exiting and we just wait for the + * - EBUSY: Owner is exiting and we just wait for the * exit to complete. - * - The user space value changed. + * - EAGAIN: The user space value changed. */ double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); + /* + * Handle the case where the owner is in the middle of + * exiting. Wait for the exit to complete otherwise + * this task might loop forever, aka. live lock. + */ + wait_for_owner_exiting(ret, exiting); cond_resched(); goto retry; default: @@ -2801,6 +2895,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, { struct hrtimer_sleeper timeout, *to; struct futex_pi_state *pi_state = NULL; + struct task_struct *exiting = NULL; struct rt_mutex_waiter rt_waiter; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; @@ -2822,7 +2917,8 @@ retry: retry_private: hb = queue_lock(&q); - ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); + ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, + &exiting, 0); if (unlikely(ret)) { /* * Atomic work succeeded and we got the lock, @@ -2835,15 +2931,22 @@ retry_private: goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; + case -EBUSY: case -EAGAIN: /* * Two reasons for this: - * - Task is exiting and we just wait for the + * - EBUSY: Task is exiting and we just wait for the * exit to complete. - * - The user space value changed. + * - EAGAIN: The user space value changed. */ queue_unlock(hb); put_futex_key(&q.key); + /* + * Handle the case where the owner is in the middle of + * exiting. Wait for the exit to complete otherwise + * this task might loop forever, aka. live lock. + */ + wait_for_owner_exiting(ret, exiting); cond_resched(); goto retry; default: @@ -3452,11 +3555,16 @@ err_unlock: return ret; } +/* Constants for the pending_op argument of handle_futex_death */ +#define HANDLE_DEATH_PENDING true +#define HANDLE_DEATH_LIST false + /* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ -static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) +static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, + bool pi, bool pending_op) { u32 uval, uninitialized_var(nval), mval; int err; @@ -3469,6 +3577,42 @@ retry: if (get_user(uval, uaddr)) return -1; + /* + * Special case for regular (non PI) futexes. The unlock path in + * user space has two race scenarios: + * + * 1. The unlock path releases the user space futex value and + * before it can execute the futex() syscall to wake up + * waiters it is killed. + * + * 2. A woken up waiter is killed before it can acquire the + * futex in user space. + * + * In both cases the TID validation below prevents a wakeup of + * potential waiters which can cause these waiters to block + * forever. + * + * In both cases the following conditions are met: + * + * 1) task->robust_list->list_op_pending != NULL + * @pending_op == true + * 2) User space futex value == 0 + * 3) Regular futex: @pi == false + * + * If these conditions are met, it is safe to attempt waking up a + * potential waiter without touching the user space futex value and + * trying to set the OWNER_DIED bit. The user space futex value is + * uncontended and the rest of the user space mutex state is + * consistent, so a woken waiter will just take over the + * uncontended futex. Setting the OWNER_DIED bit would create + * inconsistent state and malfunction of the user space owner died + * handling. + */ + if (pending_op && !pi && !uval) { + futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); + return 0; + } + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) return 0; @@ -3547,7 +3691,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, * * We silently return on any sign of list-walking problem. */ -void exit_robust_list(struct task_struct *curr) +static void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; @@ -3588,10 +3732,11 @@ void exit_robust_list(struct task_struct *curr) * A pending lock might already be on the list, so * don't process it twice: */ - if (entry != pending) + if (entry != pending) { if (handle_futex_death((void __user *)entry + futex_offset, - curr, pi)) + curr, pi, HANDLE_DEATH_LIST)) return; + } if (rc) return; entry = next_entry; @@ -3605,9 +3750,118 @@ void exit_robust_list(struct task_struct *curr) cond_resched(); } - if (pending) + if (pending) { handle_futex_death((void __user *)pending + futex_offset, - curr, pip); + curr, pip, HANDLE_DEATH_PENDING); + } +} + +static void futex_cleanup(struct task_struct *tsk) +{ + if (unlikely(tsk->robust_list)) { + exit_robust_list(tsk); + tsk->robust_list = NULL; + } + +#ifdef CONFIG_COMPAT + if (unlikely(tsk->compat_robust_list)) { + compat_exit_robust_list(tsk); + tsk->compat_robust_list = NULL; + } +#endif + + if (unlikely(!list_empty(&tsk->pi_state_list))) + exit_pi_state_list(tsk); +} + +/** + * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD + * @tsk: task to set the state on + * + * Set the futex exit state of the task lockless. The futex waiter code + * observes that state when a task is exiting and loops until the task has + * actually finished the futex cleanup. The worst case for this is that the + * waiter runs through the wait loop until the state becomes visible. + * + * This is called from the recursive fault handling path in do_exit(). + * + * This is best effort. Either the futex exit code has run already or + * not. If the OWNER_DIED bit has been set on the futex then the waiter can + * take it over. If not, the problem is pushed back to user space. If the + * futex exit code did not run yet, then an already queued waiter might + * block forever, but there is nothing which can be done about that. + */ +void futex_exit_recursive(struct task_struct *tsk) +{ + /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ + if (tsk->futex_state == FUTEX_STATE_EXITING) + mutex_unlock(&tsk->futex_exit_mutex); + tsk->futex_state = FUTEX_STATE_DEAD; +} + +static void futex_cleanup_begin(struct task_struct *tsk) +{ + /* + * Prevent various race issues against a concurrent incoming waiter + * including live locks by forcing the waiter to block on + * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in + * attach_to_pi_owner(). + */ + mutex_lock(&tsk->futex_exit_mutex); + + /* + * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. + * + * This ensures that all subsequent checks of tsk->futex_state in + * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with + * tsk->pi_lock held. + * + * It guarantees also that a pi_state which was queued right before + * the state change under tsk->pi_lock by a concurrent waiter must + * be observed in exit_pi_state_list(). + */ + raw_spin_lock_irq(&tsk->pi_lock); + tsk->futex_state = FUTEX_STATE_EXITING; + raw_spin_unlock_irq(&tsk->pi_lock); +} + +static void futex_cleanup_end(struct task_struct *tsk, int state) +{ + /* + * Lockless store. The only side effect is that an observer might + * take another loop until it becomes visible. + */ + tsk->futex_state = state; + /* + * Drop the exit protection. This unblocks waiters which observed + * FUTEX_STATE_EXITING to reevaluate the state. + */ + mutex_unlock(&tsk->futex_exit_mutex); +} + +void futex_exec_release(struct task_struct *tsk) +{ + /* + * The state handling is done for consistency, but in the case of + * exec() there is no way to prevent futher damage as the PID stays + * the same. But for the unlikely and arguably buggy case that a + * futex is held on exec(), this provides at least as much state + * consistency protection which is possible. + */ + futex_cleanup_begin(tsk); + futex_cleanup(tsk); + /* + * Reset the state to FUTEX_STATE_OK. The task is alive and about + * exec a new binary. + */ + futex_cleanup_end(tsk, FUTEX_STATE_OK); +} + +void futex_exit_release(struct task_struct *tsk) +{ + futex_cleanup_begin(tsk); + futex_cleanup(tsk); + futex_cleanup_end(tsk, FUTEX_STATE_DEAD); } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, @@ -3737,7 +3991,7 @@ static void __user *futex_uaddr(struct robust_list __user *entry, * * We silently return on any sign of list-walking problem. */ -void compat_exit_robust_list(struct task_struct *curr) +static void compat_exit_robust_list(struct task_struct *curr) { struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *next_entry, *pending; @@ -3784,7 +4038,8 @@ void compat_exit_robust_list(struct task_struct *curr) if (entry != pending) { void __user *uaddr = futex_uaddr(entry, futex_offset); - if (handle_futex_death(uaddr, curr, pi)) + if (handle_futex_death(uaddr, curr, pi, + HANDLE_DEATH_LIST)) return; } if (rc) @@ -3803,7 +4058,7 @@ void compat_exit_robust_list(struct task_struct *curr) if (pending) { void __user *uaddr = futex_uaddr(pending, futex_offset); - handle_futex_death(uaddr, curr, pip); + handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); } } diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 060e8e726755..3941a9c48f83 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -4,7 +4,7 @@ menu "GCOV-based kernel profiling" config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS - select CONSTRUCTORS + select CONSTRUCTORS if !UML default n ---help--- This option enables gcov-based code profiling (e.g. for code coverage diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index c1eccd4f6520..a949bd39e343 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -114,6 +114,7 @@ static const struct irq_bit_descr irqdata_states[] = { BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED), BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN), BIT_MASK_DESCR(IRQD_CAN_RESERVE), + BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK), BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU), diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 3924fbe829d4..c9d8eb7f5c02 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -128,8 +128,6 @@ static inline void unregister_handler_proc(unsigned int irq, extern bool irq_can_set_affinity_usr(unsigned int irq); -extern int irq_select_affinity_usr(unsigned int irq); - extern void irq_set_thread_affinity(struct irq_desc *desc); extern int irq_do_set_affinity(struct irq_data *data, diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index dd822fd8a7d5..480df3659720 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -1459,6 +1459,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg) if (rv) { /* Restore the original irq_data. */ *root_irq_data = *child_irq_data; + kfree(child_irq_data); goto error; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1753486b440c..b304c17d53a3 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -284,7 +284,11 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); - schedule_work(&desc->affinity_notify->work); + if (!schedule_work(&desc->affinity_notify->work)) { + /* Work was already scheduled, drop our extra ref */ + kref_put(&desc->affinity_notify->kref, + desc->affinity_notify->release); + } } irqd_set(data, IRQD_AFFINITY_SET); @@ -384,7 +388,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) raw_spin_unlock_irqrestore(&desc->lock, flags); if (old_notify) { - cancel_work_sync(&old_notify->work); + if (cancel_work_sync(&old_notify->work)) { + /* Pending work had a ref, put that one too */ + kref_put(&old_notify->kref, old_notify->release); + } kref_put(&old_notify->kref, old_notify->release); } @@ -442,23 +449,9 @@ int irq_setup_affinity(struct irq_desc *desc) { return irq_select_affinity(irq_desc_get_irq(desc)); } -#endif +#endif /* CONFIG_AUTO_IRQ_AFFINITY */ +#endif /* CONFIG_SMP */ -/* - * Called when a bogus affinity is set via /proc/irq - */ -int irq_select_affinity_usr(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - int ret; - - raw_spin_lock_irqsave(&desc->lock, flags); - ret = irq_setup_affinity(desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); - return ret; -} -#endif /** * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index ad26fbcfbfc8..eb95f6106a1e 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -453,8 +453,11 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, continue; irq_data = irq_domain_get_irq_data(domain, desc->irq); - if (!can_reserve) + if (!can_reserve) { irqd_clr_can_reserve(irq_data); + if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) + irqd_set_msi_nomask_quirk(irq_data); + } ret = irq_domain_activate_irq(irq_data, can_reserve); if (ret) goto cleanup; diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index cfc4f088a0e7..f5958c55406f 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -111,6 +111,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v) return show_irq_affinity(AFFINITY_LIST, m); } +#ifndef CONFIG_AUTO_IRQ_AFFINITY +static inline int irq_select_affinity_usr(unsigned int irq) +{ + /* + * If the interrupt is started up already then this fails. The + * interrupt is assigned to an online CPU already. There is no + * point to move it around randomly. Tell user space that the + * selected mask is bogus. + * + * If not then any change to the affinity is pointless because the + * startup code invokes irq_setup_affinity() which will select + * a online CPU anyway. + */ + return -EINVAL; +} +#else +/* ALPHA magic affinity auto selector. Keep it for historical reasons. */ +static inline int irq_select_affinity_usr(unsigned int irq) +{ + return irq_select_affinity(irq); +} +#endif static ssize_t write_irq_affinity(int type, struct file *file, const char __user *buffer, size_t count, loff_t *pos) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 53534aa258a6..2625c241ac00 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -510,6 +510,8 @@ static void do_unoptimize_kprobes(void) arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); /* Loop free_list for disarming */ list_for_each_entry_safe(op, tmp, &freeing_list, list) { + /* Switching from detour code to origin */ + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; /* Disarm probes if marked disabled */ if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); @@ -610,6 +612,18 @@ void wait_for_kprobe_optimizer(void) mutex_unlock(&kprobe_mutex); } +static bool optprobe_queued_unopt(struct optimized_kprobe *op) +{ + struct optimized_kprobe *_op; + + list_for_each_entry(_op, &unoptimizing_list, list) { + if (op == _op) + return true; + } + + return false; +} + /* Optimize kprobe if p is ready to be optimized */ static void optimize_kprobe(struct kprobe *p) { @@ -631,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p) return; /* Check if it is already optimized. */ - if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) + if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { + if (optprobe_queued_unopt(op)) { + /* This is under unoptimizing. Just dequeue the probe */ + list_del_init(&op->list); + } return; + } op->kp.flags |= KPROBE_FLAG_OPTIMIZED; - if (!list_empty(&op->list)) - /* This is under unoptimizing. Just dequeue the probe */ - list_del_init(&op->list); - else { - list_add(&op->list, &optimizing_list); - kick_kprobe_optimizer(); - } + /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */ + if (WARN_ON_ONCE(!list_empty(&op->list))) + return; + + list_add(&op->list, &optimizing_list); + kick_kprobe_optimizer(); } /* Short cut to direct unoptimizing */ @@ -649,6 +667,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op) { lockdep_assert_cpus_held(); arch_unoptimize_kprobe(op); + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); } @@ -662,31 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force) return; /* This is not an optprobe nor optimized */ op = container_of(p, struct optimized_kprobe, kp); - if (!kprobe_optimized(p)) { - /* Unoptimized or unoptimizing case */ - if (force && !list_empty(&op->list)) { - /* - * Only if this is unoptimizing kprobe and forced, - * forcibly unoptimize it. (No need to unoptimize - * unoptimized kprobe again :) - */ + if (!kprobe_optimized(p)) + return; + + if (!list_empty(&op->list)) { + if (optprobe_queued_unopt(op)) { + /* Queued in unoptimizing queue */ + if (force) { + /* + * Forcibly unoptimize the kprobe here, and queue it + * in the freeing list for release afterwards. + */ + force_unoptimize_kprobe(op); + list_move(&op->list, &freeing_list); + } + } else { + /* Dequeue from the optimizing queue */ list_del_init(&op->list); - force_unoptimize_kprobe(op); + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; } return; } - op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; - if (!list_empty(&op->list)) { - /* Dequeue from the optimization queue */ - list_del_init(&op->list); - return; - } /* Optimized kprobe case */ - if (force) + if (force) { /* Forcibly update the code: this is a special case */ force_unoptimize_kprobe(op); - else { + } else { list_add(&op->list, &unoptimizing_list); kick_kprobe_optimizer(); } diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 233459c03b5a..35d3b6925b1e 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -482,7 +482,7 @@ static struct lock_trace *save_trace(void) struct lock_trace *trace, *t2; struct hlist_head *hash_head; u32 hash; - unsigned int max_entries; + int max_entries; BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE); BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES); @@ -490,10 +490,8 @@ static struct lock_trace *save_trace(void) trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - LOCK_TRACE_SIZE_IN_LONGS; - trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); - if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES - - LOCK_TRACE_SIZE_IN_LONGS - 1) { + if (max_entries <= 0) { if (!debug_locks_off_graph_unlock()) return NULL; @@ -502,6 +500,7 @@ static struct lock_trace *save_trace(void) return NULL; } + trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); hash = jhash(trace->entries, trace->nr_entries * sizeof(trace->entries[0]), 0); diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index dadb7b7fba37..9bb6d2497b04 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -286,9 +286,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v) seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n", nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES); #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) - seq_printf(m, " number of stack traces: %llu\n", + seq_printf(m, " number of stack traces: %11llu\n", lockdep_stack_trace_count()); - seq_printf(m, " number of stack hash chains: %llu\n", + seq_printf(m, " number of stack hash chains: %11llu\n", lockdep_stack_hash_count()); #endif seq_printf(m, " combined max dependencies: %11u\n", diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index eef04551eae7..baafa1dd9fcc 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1226,8 +1226,8 @@ wait: * In this case, we attempt to acquire the lock again * without sleeping. */ - if ((wstate == WRITER_HANDOFF) && - (rwsem_spin_on_owner(sem, 0) == OWNER_NULL)) + if (wstate == WRITER_HANDOFF && + rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL) goto trylock_again; /* Block until there are no active lockers. */ diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c index 399669f7eba8..472dd462a40c 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -51,19 +51,19 @@ EXPORT_SYMBOL(__rwlock_init); static void spin_dump(raw_spinlock_t *lock, const char *msg) { - struct task_struct *owner = NULL; + struct task_struct *owner = READ_ONCE(lock->owner); - if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) - owner = lock->owner; + if (owner == SPINLOCK_OWNER_INIT) + owner = NULL; printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", msg, raw_smp_processor_id(), current->comm, task_pid_nr(current)); printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " ".owner_cpu: %d\n", - lock, lock->magic, + lock, READ_ONCE(lock->magic), owner ? owner->comm : "", owner ? task_pid_nr(owner) : -1, - lock->owner_cpu); + READ_ONCE(lock->owner_cpu)); dump_stack(); } @@ -80,16 +80,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg) static inline void debug_spin_lock_before(raw_spinlock_t *lock) { - SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); - SPIN_BUG_ON(lock->owner == current, lock, "recursion"); - SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), + SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic"); + SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion"); + SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(), lock, "cpu recursion"); } static inline void debug_spin_lock_after(raw_spinlock_t *lock) { - lock->owner_cpu = raw_smp_processor_id(); - lock->owner = current; + WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id()); + WRITE_ONCE(lock->owner, current); } static inline void debug_spin_unlock(raw_spinlock_t *lock) @@ -99,8 +99,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock) SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); - lock->owner = SPINLOCK_OWNER_INIT; - lock->owner_cpu = -1; + WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT); + WRITE_ONCE(lock->owner_cpu, -1); } /* @@ -187,8 +187,8 @@ static inline void debug_write_lock_before(rwlock_t *lock) static inline void debug_write_lock_after(rwlock_t *lock) { - lock->owner_cpu = raw_smp_processor_id(); - lock->owner = current; + WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id()); + WRITE_ONCE(lock->owner, current); } static inline void debug_write_unlock(rwlock_t *lock) @@ -197,8 +197,8 @@ static inline void debug_write_unlock(rwlock_t *lock) RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); - lock->owner = SPINLOCK_OWNER_INIT; - lock->owner_cpu = -1; + WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT); + WRITE_ONCE(lock->owner_cpu, -1); } void do_raw_write_lock(rwlock_t *lock) diff --git a/kernel/module.c b/kernel/module.c index ff2d7359a418..b7c4cfd3b718 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -214,7 +214,8 @@ static struct module *mod_find(unsigned long addr) { struct module *mod; - list_for_each_entry_rcu(mod, &modules, list) { + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { if (within_module(addr, mod)) return mod; } @@ -448,7 +449,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) return true; - list_for_each_entry_rcu(mod, &modules, list) { + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { struct symsearch arr[] = { { mod->syms, mod->syms + mod->num_syms, mod->crcs, NOT_GPL_ONLY, false }, @@ -616,7 +618,8 @@ static struct module *find_module_all(const char *name, size_t len, module_assert_mutex_or_preempt(); - list_for_each_entry_rcu(mod, &modules, list) { + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) continue; if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) @@ -1033,6 +1036,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); free_module(mod); + /* someone could wait for the module in add_unformed_module() */ + wake_up_all(&module_wq); return 0; out: mutex_unlock(&module_mutex); @@ -1360,9 +1365,16 @@ static inline int check_modstruct_version(const struct load_info *info, static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) { + int l1, l2; if (has_crcs) { - amagic += strcspn(amagic, " "); - bmagic += strcspn(bmagic, " "); + l1 = strcspn(amagic, " "); + l2 = strcspn(bmagic, " "); + if (l1 != l2) + return false; + if (memcmp(amagic, bmagic, l1)) + return false; + amagic += l1; + bmagic += l2; } return strcmp(amagic, bmagic) == 0; } @@ -1779,6 +1791,8 @@ static int module_add_modinfo_attrs(struct module *mod) error_out: if (i > 0) module_remove_modinfo_attrs(mod, --i); + else + kfree(mod->modinfo_attrs); return error; } @@ -2002,6 +2016,7 @@ void module_disable_ro(const struct module *mod) frob_text(&mod->init_layout, set_memory_rw); frob_rodata(&mod->init_layout, set_memory_rw); } +EXPORT_SYMBOL_GPL(module_disable_ro); void module_enable_ro(const struct module *mod, bool after_init) { @@ -2019,6 +2034,7 @@ void module_enable_ro(const struct module *mod, bool after_init) if (after_init) frob_ro_after_init(&mod->core_layout, set_memory_ro); } +EXPORT_SYMBOL_GPL(module_enable_ro); static void module_enable_nx(const struct module *mod) { @@ -2161,6 +2177,81 @@ static void free_module_elf(struct module *mod) } #endif /* CONFIG_LIVEPATCH */ +#if IS_ENABLED(CONFIG_KPATCH) +/* + * Persist Elf information about a module. Copy the Elf header, + * section header table, section string table, and symtab section + * index from info to mod->kpatch_info. + */ +static int copy_module_elf_kpatch(struct module *mod, struct load_info *info) +{ + unsigned int size, symndx; + int ret; + + size = sizeof(*mod->kpatch_info); + mod->kpatch_info = kmalloc(size, GFP_KERNEL); + if (mod->kpatch_info == NULL) + return -ENOMEM; + + /* Elf header */ + size = sizeof(mod->kpatch_info->hdr); + memcpy(&mod->kpatch_info->hdr, info->hdr, size); + + /* Elf section header table */ + size = sizeof(*info->sechdrs) * info->hdr->e_shnum; + mod->kpatch_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); + if (mod->kpatch_info->sechdrs == NULL) { + ret = -ENOMEM; + goto free_info; + } + + /* Elf section name string table */ + size = info->sechdrs[info->hdr->e_shstrndx].sh_size; + mod->kpatch_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); + if (mod->kpatch_info->secstrings == NULL) { + ret = -ENOMEM; + goto free_sechdrs; + } + + /* Elf symbol section index */ + symndx = info->index.sym; + mod->kpatch_info->symndx = symndx; + + /* + * For livepatch modules, core_kallsyms.symtab is a complete + * copy of the original symbol table. Adjust sh_addr to point + * to core_kallsyms.symtab since the copy of the symtab in module + * init memory is freed at the end of do_init_module(). + */ + mod->kpatch_info->sechdrs[symndx].sh_addr = \ + (unsigned long) mod->core_kallsyms.symtab; + + return 0; + +free_sechdrs: + kfree(mod->kpatch_info->sechdrs); +free_info: + kfree(mod->kpatch_info); + return ret; +} + +static void free_module_elf_kpatch(struct module *mod) +{ + kfree(mod->kpatch_info->sechdrs); + kfree(mod->kpatch_info->secstrings); + kfree(mod->kpatch_info); +} +#else /* !CONFIG_KPATCH */ +static int copy_module_elf_kpatch(struct module *mod, struct load_info *info) +{ + return 0; +} + +static void free_module_elf_kpatch(struct module *mod) +{ +} +#endif /* CONFIG_KPATCH */ + void __weak module_memfree(void *module_region) { /* @@ -2207,6 +2298,9 @@ static void free_module(struct module *mod) if (is_livepatch_module(mod)) free_module_elf(mod); + if (is_kpatch_module(mod)) + free_module_elf_kpatch(mod); + /* Now we can delete it from the lists */ mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ @@ -2958,6 +3052,27 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info) } #endif /* CONFIG_LIVEPATCH */ +#if IS_ENABLED(CONFIG_KPATCH) +static int check_modinfo_kpatch(struct module *mod, struct load_info *info) +{ + if (get_modinfo(info, "kpatch")) + mod->is_kpatch_mod = true; + + return 0; +} +#else /* !CONFIG_KPATCH */ +static int check_modinfo_kpatch(struct module *mod, struct load_info *info) +{ + if (get_modinfo(info, "kpatch")) { + pr_err("%s: module is marked as kpatch module, but kpatch support is disabled", + mod->name); + return -ENOEXEC; + } + + return 0; +} +#endif /* CONFIG_KPATCH */ + static void check_modinfo_retpoline(struct module *mod, struct load_info *info) { if (retpoline_module_ok(get_modinfo(info, "retpoline"))) @@ -3052,9 +3167,7 @@ static int setup_load_info(struct load_info *info, int flags) /* Try to find a name early so we can log errors with a module name */ info->index.info = find_sec(info, ".modinfo"); - if (!info->index.info) - info->name = "(missing .modinfo section)"; - else + if (info->index.info) info->name = get_modinfo(info, "name"); /* Find internal symbols and strings. */ @@ -3069,14 +3182,15 @@ static int setup_load_info(struct load_info *info, int flags) } if (info->index.sym == 0) { - pr_warn("%s: module has no symbols (stripped?)\n", info->name); + pr_warn("%s: module has no symbols (stripped?)\n", + info->name ?: "(missing .modinfo section or name field)"); return -ENOEXEC; } info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); if (!info->index.mod) { pr_warn("%s: No module found in object\n", - info->name ?: "(missing .modinfo name field)"); + info->name ?: "(missing .modinfo section or name field)"); return -ENOEXEC; } /* This is temporary: point mod into copy of data. */ @@ -3137,6 +3251,10 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) if (err) return err; + err = check_modinfo_kpatch(mod, info); + if (err) + return err; + /* Set up license info based on the info section */ set_license(mod, get_modinfo(info, "license")); @@ -3222,7 +3340,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ - mod->ftrace_callsites = section_objs(info, "__mcount_loc", + mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif @@ -3918,6 +4036,12 @@ static int load_module(struct load_info *info, const char __user *uargs, goto sysfs_cleanup; } + if (is_kpatch_module(mod)) { + err = copy_module_elf_kpatch(mod, info); + if (err < 0) + goto livepatch_cleanup; + } + /* Get rid of temporary copy. */ free_copy(info); @@ -3926,6 +4050,9 @@ static int load_module(struct load_info *info, const char __user *uargs, return do_init_module(mod); + livepatch_cleanup: + if (is_livepatch_module(mod)) + free_module_elf(mod); sysfs_cleanup: mod_sysfs_teardown(mod); coming_cleanup: diff --git a/kernel/notifier.c b/kernel/notifier.c index d9f5081d578d..157d7c29f720 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -554,7 +554,7 @@ NOKPROBE_SYMBOL(notify_die); int register_die_notifier(struct notifier_block *nb) { - vmalloc_sync_all(); + vmalloc_sync_mappings(); return atomic_notifier_chain_register(&die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); diff --git a/kernel/padata.c b/kernel/padata.c index c3fec1413295..c4b774331e46 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -35,6 +35,8 @@ #define MAX_OBJ_NUM 1000 +static void padata_free_pd(struct parallel_data *pd); + static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; @@ -87,7 +89,7 @@ static void padata_parallel_worker(struct work_struct *parallel_work) /** * padata_do_parallel - padata parallelization function * - * @pinst: padata instance + * @ps: padatashell * @padata: object to be parallelized * @cb_cpu: pointer to the CPU that the serialization callback function should * run on. If it's not in the serial cpumask of @pinst @@ -98,16 +100,17 @@ static void padata_parallel_worker(struct work_struct *parallel_work) * Note: Every object which is parallelized by padata_do_parallel * must be seen by padata_do_serial. */ -int padata_do_parallel(struct padata_instance *pinst, +int padata_do_parallel(struct padata_shell *ps, struct padata_priv *padata, int *cb_cpu) { + struct padata_instance *pinst = ps->pinst; int i, cpu, cpu_index, target_cpu, err; struct padata_parallel_queue *queue; struct parallel_data *pd; rcu_read_lock_bh(); - pd = rcu_dereference_bh(pinst->pd); + pd = rcu_dereference_bh(ps->pd); err = -EINVAL; if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) @@ -210,10 +213,10 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd, static void padata_reorder(struct parallel_data *pd) { + struct padata_instance *pinst = pd->ps->pinst; int cb_cpu; struct padata_priv *padata; struct padata_serial_queue *squeue; - struct padata_instance *pinst = pd->pinst; struct padata_parallel_queue *next_queue; /* @@ -283,6 +286,7 @@ static void padata_serial_worker(struct work_struct *serial_work) struct padata_serial_queue *squeue; struct parallel_data *pd; LIST_HEAD(local_list); + int cnt; local_bh_disable(); squeue = container_of(serial_work, struct padata_serial_queue, work); @@ -292,6 +296,8 @@ static void padata_serial_worker(struct work_struct *serial_work) list_replace_init(&squeue->serial.list, &local_list); spin_unlock(&squeue->serial.lock); + cnt = 0; + while (!list_empty(&local_list)) { struct padata_priv *padata; @@ -301,9 +307,12 @@ static void padata_serial_worker(struct work_struct *serial_work) list_del_init(&padata->list); padata->serial(padata); - atomic_dec(&pd->refcnt); + cnt++; } local_bh_enable(); + + if (atomic_sub_and_test(cnt, &pd->refcnt)) + padata_free_pd(pd); } /** @@ -341,36 +350,39 @@ void padata_do_serial(struct padata_priv *padata) } EXPORT_SYMBOL(padata_do_serial); -static int padata_setup_cpumasks(struct parallel_data *pd, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +static int padata_setup_cpumasks(struct padata_instance *pinst) { struct workqueue_attrs *attrs; + int err; + + attrs = alloc_workqueue_attrs(); + if (!attrs) + return -ENOMEM; + + /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ + cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); + err = apply_workqueue_attrs(pinst->parallel_wq, attrs); + free_workqueue_attrs(attrs); + + return err; +} + +static int pd_setup_cpumasks(struct parallel_data *pd, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) +{ int err = -ENOMEM; if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) goto out; - cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); - if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) goto free_pcpu_mask; - cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); - attrs = alloc_workqueue_attrs(); - if (!attrs) - goto free_cbcpu_mask; - - /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ - cpumask_copy(attrs->cpumask, pd->cpumask.pcpu); - err = apply_workqueue_attrs(pd->pinst->parallel_wq, attrs); - free_workqueue_attrs(attrs); - if (err < 0) - goto free_cbcpu_mask; + cpumask_copy(pd->cpumask.pcpu, pcpumask); + cpumask_copy(pd->cpumask.cbcpu, cbcpumask); return 0; -free_cbcpu_mask: - free_cpumask_var(pd->cpumask.cbcpu); free_pcpu_mask: free_cpumask_var(pd->cpumask.pcpu); out: @@ -414,12 +426,16 @@ static void padata_init_pqueues(struct parallel_data *pd) } /* Allocate and initialize the internal cpumask dependend resources. */ -static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) { + struct padata_instance *pinst = ps->pinst; + const struct cpumask *cbcpumask; + const struct cpumask *pcpumask; struct parallel_data *pd; + cbcpumask = pinst->rcpumask.cbcpu; + pcpumask = pinst->rcpumask.pcpu; + pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; @@ -432,15 +448,15 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, if (!pd->squeue) goto err_free_pqueue; - pd->pinst = pinst; - if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) + pd->ps = ps; + if (pd_setup_cpumasks(pd, pcpumask, cbcpumask)) goto err_free_squeue; padata_init_pqueues(pd); padata_init_squeues(pd); atomic_set(&pd->seq_nr, -1); atomic_set(&pd->reorder_objects, 0); - atomic_set(&pd->refcnt, 0); + atomic_set(&pd->refcnt, 1); spin_lock_init(&pd->lock); pd->cpu = cpumask_first(pd->cpumask.pcpu); INIT_WORK(&pd->reorder_work, invoke_padata_reorder); @@ -466,29 +482,6 @@ static void padata_free_pd(struct parallel_data *pd) kfree(pd); } -/* Flush all objects out of the padata queues. */ -static void padata_flush_queues(struct parallel_data *pd) -{ - int cpu; - struct padata_parallel_queue *pqueue; - struct padata_serial_queue *squeue; - - for_each_cpu(cpu, pd->cpumask.pcpu) { - pqueue = per_cpu_ptr(pd->pqueue, cpu); - flush_work(&pqueue->work); - } - - if (atomic_read(&pd->reorder_objects)) - padata_reorder(pd); - - for_each_cpu(cpu, pd->cpumask.cbcpu) { - squeue = per_cpu_ptr(pd->squeue, cpu); - flush_work(&squeue->work); - } - - BUG_ON(atomic_read(&pd->refcnt) != 0); -} - static void __padata_start(struct padata_instance *pinst) { pinst->flags |= PADATA_INIT; @@ -502,39 +495,63 @@ static void __padata_stop(struct padata_instance *pinst) pinst->flags &= ~PADATA_INIT; synchronize_rcu(); - - get_online_cpus(); - padata_flush_queues(pinst->pd); - put_online_cpus(); } /* Replace the internal control structure with a new one. */ -static void padata_replace(struct padata_instance *pinst, - struct parallel_data *pd_new) +static int padata_replace_one(struct padata_shell *ps) +{ + struct parallel_data *pd_new; + + pd_new = padata_alloc_pd(ps); + if (!pd_new) + return -ENOMEM; + + ps->opd = rcu_dereference_protected(ps->pd, 1); + rcu_assign_pointer(ps->pd, pd_new); + + return 0; +} + +static int padata_replace(struct padata_instance *pinst) { - struct parallel_data *pd_old = pinst->pd; int notification_mask = 0; + struct padata_shell *ps; + int err = 0; pinst->flags |= PADATA_RESET; - rcu_assign_pointer(pinst->pd, pd_new); + cpumask_copy(pinst->omask, pinst->rcpumask.pcpu); + cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu, + cpu_online_mask); + if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu)) + notification_mask |= PADATA_CPU_PARALLEL; + + cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu); + cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu, + cpu_online_mask); + if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu)) + notification_mask |= PADATA_CPU_SERIAL; + + list_for_each_entry(ps, &pinst->pslist, list) { + err = padata_replace_one(ps); + if (err) + break; + } synchronize_rcu(); - if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) - notification_mask |= PADATA_CPU_PARALLEL; - if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) - notification_mask |= PADATA_CPU_SERIAL; - - padata_flush_queues(pd_old); - padata_free_pd(pd_old); + list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) + if (atomic_dec_and_test(&ps->opd->refcnt)) + padata_free_pd(ps->opd); if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, notification_mask, - &pd_new->cpumask); + &pinst->cpumask); pinst->flags &= ~PADATA_RESET; + + return err; } /** @@ -587,7 +604,7 @@ static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t cbcpumask) { int valid; - struct parallel_data *pd; + int err; valid = padata_validate_cpumask(pinst, pcpumask); if (!valid) { @@ -600,19 +617,15 @@ static int __padata_set_cpumasks(struct padata_instance *pinst, __padata_stop(pinst); out_replace: - pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); - if (!pd) - return -ENOMEM; - cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - padata_replace(pinst, pd); + err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst); if (valid) __padata_start(pinst); - return 0; + return err; } /** @@ -630,8 +643,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, struct cpumask *serial_mask, *parallel_mask; int err = -EINVAL; - mutex_lock(&pinst->lock); get_online_cpus(); + mutex_lock(&pinst->lock); switch (cpumask_type) { case PADATA_CPU_PARALLEL: @@ -649,8 +662,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); out: - put_online_cpus(); mutex_unlock(&pinst->lock); + put_online_cpus(); return err; } @@ -695,46 +708,32 @@ EXPORT_SYMBOL(padata_stop); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { - struct parallel_data *pd; + int err = 0; if (cpumask_test_cpu(cpu, cpu_online_mask)) { - pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, - pinst->cpumask.cbcpu); - if (!pd) - return -ENOMEM; - - padata_replace(pinst, pd); + err = padata_replace(pinst); if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_start(pinst); } - return 0; + return err; } static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { - struct parallel_data *pd = NULL; - - if (cpumask_test_cpu(cpu, cpu_online_mask)) { + int err = 0; + if (!cpumask_test_cpu(cpu, cpu_online_mask)) { if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_stop(pinst); - pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, - pinst->cpumask.cbcpu); - if (!pd) - return -ENOMEM; - - padata_replace(pinst, pd); - - cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); - cpumask_clear_cpu(cpu, pd->cpumask.pcpu); + err = padata_replace(pinst); } - return 0; + return err; } /** @@ -793,7 +792,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) return ret; } -static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) +static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct padata_instance *pinst; int ret; @@ -814,11 +813,16 @@ static enum cpuhp_state hp_online; static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU + cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node); cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); #endif + WARN_ON(!list_empty(&pinst->pslist)); + padata_stop(pinst); - padata_free_pd(pinst->pd); + free_cpumask_var(pinst->omask); + free_cpumask_var(pinst->rcpumask.cbcpu); + free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); destroy_workqueue(pinst->serial_wq); @@ -965,7 +969,6 @@ static struct padata_instance *padata_alloc(const char *name, const struct cpumask *cbcpumask) { struct padata_instance *pinst; - struct parallel_data *pd = NULL; pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); if (!pinst) @@ -993,14 +996,22 @@ static struct padata_instance *padata_alloc(const char *name, !padata_validate_cpumask(pinst, cbcpumask)) goto err_free_masks; - pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); - if (!pd) + if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL)) goto err_free_masks; + if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL)) + goto err_free_rcpumask_pcpu; + if (!alloc_cpumask_var(&pinst->omask, GFP_KERNEL)) + goto err_free_rcpumask_cbcpu; - rcu_assign_pointer(pinst->pd, pd); + INIT_LIST_HEAD(&pinst->pslist); cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); + cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask); + cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask); + + if (padata_setup_cpumasks(pinst)) + goto err_free_omask; pinst->flags = 0; @@ -1010,12 +1021,20 @@ static struct padata_instance *padata_alloc(const char *name, #ifdef CONFIG_HOTPLUG_CPU cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); + cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, + &pinst->node); #endif put_online_cpus(); return pinst; +err_free_omask: + free_cpumask_var(pinst->omask); +err_free_rcpumask_cbcpu: + free_cpumask_var(pinst->rcpumask.cbcpu); +err_free_rcpumask_pcpu: + free_cpumask_var(pinst->rcpumask.pcpu); err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); @@ -1054,6 +1073,61 @@ void padata_free(struct padata_instance *pinst) } EXPORT_SYMBOL(padata_free); +/** + * padata_alloc_shell - Allocate and initialize padata shell. + * + * @pinst: Parent padata_instance object. + */ +struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) +{ + struct parallel_data *pd; + struct padata_shell *ps; + + ps = kzalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + goto out; + + ps->pinst = pinst; + + get_online_cpus(); + pd = padata_alloc_pd(ps); + put_online_cpus(); + + if (!pd) + goto out_free_ps; + + mutex_lock(&pinst->lock); + RCU_INIT_POINTER(ps->pd, pd); + list_add(&ps->list, &pinst->pslist); + mutex_unlock(&pinst->lock); + + return ps; + +out_free_ps: + kfree(ps); +out: + return NULL; +} +EXPORT_SYMBOL(padata_alloc_shell); + +/** + * padata_free_shell - free a padata shell + * + * @ps: padata shell to free + */ +void padata_free_shell(struct padata_shell *ps) +{ + struct padata_instance *pinst = ps->pinst; + + mutex_lock(&pinst->lock); + list_del(&ps->list); + padata_free_pd(rcu_dereference_protected(ps->pd, 1)); + mutex_unlock(&pinst->lock); + + kfree(ps); +} +EXPORT_SYMBOL(padata_free_shell); + #ifdef CONFIG_HOTPLUG_CPU static __init int padata_driver_init(void) @@ -1061,17 +1135,24 @@ static __init int padata_driver_init(void) int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", - padata_cpu_online, - padata_cpu_prep_down); + padata_cpu_online, NULL); if (ret < 0) return ret; hp_online = ret; + + ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead", + NULL, padata_cpu_dead); + if (ret < 0) { + cpuhp_remove_multi_state(hp_online); + return ret; + } return 0; } module_init(padata_driver_init); static __exit void padata_driver_exit(void) { + cpuhp_remove_multi_state(CPUHP_PADATA_DEAD); cpuhp_remove_multi_state(hp_online); } module_exit(padata_driver_exit); diff --git a/kernel/pid.c b/kernel/pid.c index 0a9f2e437217..cd2c32827b20 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -80,6 +80,7 @@ struct pid_namespace init_pid_ns = { .ns.inum = PROC_PID_INIT_INO, #ifdef CONFIG_PID_NS .ns.ops = &pidns_operations, + .max_map_count = DEFAULT_MAX_MAP_COUNT, #endif }; EXPORT_SYMBOL_GPL(init_pid_ns); diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index a6a79f85c81a..62043dad9d50 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -116,6 +116,8 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns ns->user_ns = get_user_ns(user_ns); ns->ucounts = ucounts; ns->pid_allocated = PIDNS_ADDING; + ns->max_map_count = max_map_count_isolated ? + parent_pid_ns->max_map_count : sysctl_max_map_count; INIT_WORK(&ns->proc_work, proc_cleanup_work); return ns; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 83105874f255..d65f2d5ab694 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -734,8 +734,15 @@ zone_found: * We have found the zone. Now walk the radix tree to find the leaf node * for our PFN. */ + + /* + * If the zone we wish to scan is the the current zone and the + * pfn falls into the current node then we do not need to walk + * the tree. + */ node = bm->cur.node; - if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) + if (zone == bm->cur.zone && + ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) goto node_found; node = zone->rtree; @@ -1140,24 +1147,24 @@ void free_basic_memory_bitmaps(void) void clear_free_pages(void) { -#ifdef CONFIG_PAGE_POISONING_ZERO struct memory_bitmap *bm = free_pages_map; unsigned long pfn; if (WARN_ON(!(free_pages_map))) return; - memory_bm_position_reset(bm); - pfn = memory_bm_next_pfn(bm); - while (pfn != BM_END_OF_MAP) { - if (pfn_valid(pfn)) - clear_highpage(pfn_to_page(pfn)); - + if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) { + memory_bm_position_reset(bm); pfn = memory_bm_next_pfn(bm); + while (pfn != BM_END_OF_MAP) { + if (pfn_valid(pfn)) + clear_highpage(pfn_to_page(pfn)); + + pfn = memory_bm_next_pfn(bm); + } + memory_bm_position_reset(bm); + pr_info("free pages cleared after restore\n"); } - memory_bm_position_reset(bm); - pr_info("free pages cleared after restore\n"); -#endif /* PAGE_POISONING_ZERO */ } /** diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f3b7239f1892..27f149f5d4a9 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -131,11 +131,12 @@ static void s2idle_loop(void) * to avoid them upfront. */ for (;;) { - if (s2idle_ops && s2idle_ops->wake) - s2idle_ops->wake(); - - if (pm_wakeup_pending()) + if (s2idle_ops && s2idle_ops->wake) { + if (s2idle_ops->wake()) + break; + } else if (pm_wakeup_pending()) { break; + } pm_wakeup_clear(false); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index ca65327a6de8..c0a5b56aea4e 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2770,8 +2770,6 @@ void register_console(struct console *newcon) * for us. */ logbuf_lock_irqsave(flags); - console_seq = syslog_seq; - console_idx = syslog_idx; /* * We're about to replay the log buffer. Only do this to the * just-registered console to avoid excessive message spam to @@ -2783,6 +2781,8 @@ void register_console(struct console *newcon) */ exclusive_console = newcon; exclusive_console_stop_seq = console_seq; + console_seq = syslog_seq; + console_idx = syslog_idx; logbuf_unlock_irqrestore(flags); } console_unlock(); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index cb9ddcc08119..36943f38665b 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -264,12 +264,17 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) return ret; } -static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) +static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns, + unsigned int mode) { + int ret; + if (mode & PTRACE_MODE_NOAUDIT) - return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); + ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT); else - return has_ns_capability(current, ns, CAP_SYS_PTRACE); + ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE); + + return ret == 0; } /* Returns 0 on success, -errno on denial. */ @@ -321,7 +326,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) gid_eq(caller_gid, tcred->sgid) && gid_eq(caller_gid, tcred->gid)) goto ok; - if (ptrace_has_cap(tcred->user_ns, mode)) + if (ptrace_has_cap(cred, tcred->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; @@ -340,7 +345,7 @@ ok: mm = task->mm; if (mm && ((get_dumpable(mm) != SUID_DUMP_USER) && - !ptrace_has_cap(mm->user_ns, mode))) + !ptrace_has_cap(cred, mm->user_ns, mode))) return -EPERM; return security_ptrace_access_check(task, mode); @@ -1234,6 +1239,9 @@ int ptrace_request(struct task_struct *child, long request, #define arch_ptrace_attach(child) do { } while (0) #endif +int (*ptrace_pre_hook)(long request, long pid, struct task_struct *task, long addr, long data); +EXPORT_SYMBOL(ptrace_pre_hook); + SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, unsigned long, data) { @@ -1252,6 +1260,12 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, ret = -ESRCH; goto out; } + + if (ptrace_pre_hook) { + ret = ptrace_pre_hook(request, pid, child, addr, data); + if (ret) + goto out_put_task_struct; + } if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ret = ptrace_attach(child, request, addr, data); diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 5dffade2d7cd..21acdff3bd27 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -530,7 +530,7 @@ static void srcu_gp_end(struct srcu_struct *ssp) idx = rcu_seq_state(ssp->srcu_gp_seq); WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); cbdelay = srcu_get_delay(ssp); - ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); + WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); rcu_seq_end(&ssp->srcu_gp_seq); gpseq = rcu_seq_current(&ssp->srcu_gp_seq); if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) @@ -762,6 +762,7 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp) unsigned long flags; struct srcu_data *sdp; unsigned long t; + unsigned long tlast; /* If the local srcu_data structure has callbacks, not idle. */ local_irq_save(flags); @@ -780,9 +781,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp) /* First, see if enough time has passed since the last GP. */ t = ktime_get_mono_fast_ns(); + tlast = READ_ONCE(ssp->srcu_last_gp_end); if (exp_holdoff == 0 || - time_in_range_open(t, ssp->srcu_last_gp_end, - ssp->srcu_last_gp_end + exp_holdoff)) + time_in_range_open(t, tlast, tlast + exp_holdoff)) return false; /* Too soon after last GP. */ /* Next, check for probable idleness. */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 81105141b6a8..62e59596a30a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -576,7 +576,7 @@ static void rcu_eqs_enter(bool user) } lockdep_assert_irqs_disabled(); - trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); rdp = this_cpu_ptr(&rcu_data); do_nocb_deferred_wakeup(rdp); @@ -649,14 +649,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq) * leave it in non-RCU-idle state. */ if (rdp->dynticks_nmi_nesting != 1) { - trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks); + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, + atomic_read(&rdp->dynticks)); WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ rdp->dynticks_nmi_nesting - 2); return; } /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ - trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks); + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ if (irq) @@ -743,7 +744,7 @@ static void rcu_eqs_exit(bool user) rcu_dynticks_task_exit(); rcu_dynticks_eqs_exit(); rcu_cleanup_after_idle(); - trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); WRITE_ONCE(rdp->dynticks_nesting, 1); WARN_ON_ONCE(rdp->dynticks_nmi_nesting); @@ -827,7 +828,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq) } trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), rdp->dynticks_nmi_nesting, - rdp->dynticks_nmi_nesting + incby, rdp->dynticks); + rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ rdp->dynticks_nmi_nesting + incby); barrier(); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index d632cd019597..df90d4d7ad2e 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -134,7 +134,7 @@ static void __maybe_unused sync_exp_reset_tree(void) rcu_for_each_node_breadth_first(rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); WARN_ON_ONCE(rnp->expmask); - rnp->expmask = rnp->expmaskinit; + WRITE_ONCE(rnp->expmask, rnp->expmaskinit); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } } @@ -211,7 +211,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp, rnp = rnp->parent; raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ WARN_ON_ONCE(!(rnp->expmask & mask)); - rnp->expmask &= ~mask; + WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); } } @@ -241,7 +241,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return; } - rnp->expmask &= ~mask; + WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ } @@ -372,12 +372,10 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); /* IPI the remaining CPUs for expedited quiescent state. */ - for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { + for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { unsigned long mask = leaf_node_cpu_bit(rnp, cpu); struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - if (!(mask_ofl_ipi & mask)) - continue; retry_ipi: if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { mask_ofl_test |= mask; @@ -491,7 +489,7 @@ static void synchronize_sched_expedited_wait(void) struct rcu_data *rdp; mask = leaf_node_cpu_bit(rnp, cpu); - if (!(rnp->expmask & mask)) + if (!(READ_ONCE(rnp->expmask) & mask)) continue; ndetected++; rdp = per_cpu_ptr(&rcu_data, cpu); @@ -503,7 +501,8 @@ static void synchronize_sched_expedited_wait(void) } pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", jiffies - jiffies_start, rcu_state.expedited_sequence, - rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); + READ_ONCE(rnp_root->expmask), + ".T"[!!rnp_root->exp_tasks]); if (ndetected) { pr_err("blocking rcu_node structures:"); rcu_for_each_node_breadth_first(rnp) { @@ -513,7 +512,7 @@ static void synchronize_sched_expedited_wait(void) continue; pr_cont(" l=%u:%d-%d:%#lx/%c", rnp->level, rnp->grplo, rnp->grphi, - rnp->expmask, + READ_ONCE(rnp->expmask), ".T"[!!rnp->exp_tasks]); } pr_cont("\n"); @@ -521,7 +520,7 @@ static void synchronize_sched_expedited_wait(void) rcu_for_each_leaf_node(rnp) { for_each_leaf_node_possible_cpu(rnp, cpu) { mask = leaf_node_cpu_bit(rnp, cpu); - if (!(rnp->expmask & mask)) + if (!(READ_ONCE(rnp->expmask) & mask)) continue; dump_cpu_task(cpu); } @@ -541,15 +540,14 @@ static void rcu_exp_wait_wake(unsigned long s) struct rcu_node *rnp; synchronize_sched_expedited_wait(); + + // Switch over to wakeup mode, allowing the next GP to proceed. + // End the previous grace period only after acquiring the mutex + // to ensure that only one GP runs concurrently with wakeups. + mutex_lock(&rcu_state.exp_wake_mutex); rcu_exp_gp_seq_end(); trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); - /* - * Switch over to wakeup mode, allowing the next GP, but -only- the - * next GP, to proceed. - */ - mutex_lock(&rcu_state.exp_wake_mutex); - rcu_for_each_node_breadth_first(rnp) { if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { spin_lock(&rnp->exp_lock); @@ -559,7 +557,7 @@ static void rcu_exp_wait_wake(unsigned long s) spin_unlock(&rnp->exp_lock); } smp_mb(); /* All above changes before wakeup. */ - wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]); + wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); } trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); mutex_unlock(&rcu_state.exp_wake_mutex); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 2defc7fe74c3..f7118842a2b8 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -220,7 +220,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) * blocked tasks. */ if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { - rnp->gp_tasks = &t->rcu_node_entry; + WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); } if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) @@ -340,7 +340,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); */ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) { - return rnp->gp_tasks != NULL; + return READ_ONCE(rnp->gp_tasks) != NULL; } /* Bias and limit values for ->rcu_read_lock_nesting. */ @@ -493,7 +493,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), rnp->gp_seq, t->pid); if (&t->rcu_node_entry == rnp->gp_tasks) - rnp->gp_tasks = np; + WRITE_ONCE(rnp->gp_tasks, np); if (&t->rcu_node_entry == rnp->exp_tasks) rnp->exp_tasks = np; if (IS_ENABLED(CONFIG_RCU_BOOST)) { @@ -612,7 +612,7 @@ static void rcu_read_unlock_special(struct task_struct *t) t->rcu_read_unlock_special.b.exp_hint = false; exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) || - (rdp->grpmask & rnp->expmask) || + (rdp->grpmask & READ_ONCE(rnp->expmask)) || tick_nohz_full_cpu(rdp->cpu); // Need to defer quiescent state until everything is enabled. if (irqs_were_disabled && use_softirq && @@ -663,7 +663,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) dump_blkd_tasks(rnp, 10); if (rcu_preempt_has_tasks(rnp) && (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { - rnp->gp_tasks = rnp->blkd_tasks.next; + WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); t = container_of(rnp->gp_tasks, struct task_struct, rcu_node_entry); trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), @@ -757,7 +757,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", - __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks); + __func__, READ_ONCE(rnp->gp_tasks), rnp->boost_tasks, + rnp->exp_tasks); pr_info("%s: ->blkd_tasks", __func__); i = 0; list_for_each(lhp, &rnp->blkd_tasks) { @@ -1946,7 +1947,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) int __maybe_unused cpu = my_rdp->cpu; unsigned long cur_gp_seq; unsigned long flags; - bool gotcbs; + bool gotcbs = false; unsigned long j = jiffies; bool needwait_gp = false; // This prevents actual uninitialized use. bool needwake; @@ -2321,6 +2322,8 @@ static void __init rcu_organize_nocb_kthreads(void) { int cpu; bool firsttime = true; + bool gotnocbs = false; + bool gotnocbscbs = true; int ls = rcu_nocb_gp_stride; int nl = 0; /* Next GP kthread. */ struct rcu_data *rdp; @@ -2343,21 +2346,31 @@ static void __init rcu_organize_nocb_kthreads(void) rdp = per_cpu_ptr(&rcu_data, cpu); if (rdp->cpu >= nl) { /* New GP kthread, set up for CBs & next GP. */ + gotnocbs = true; nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; rdp->nocb_gp_rdp = rdp; rdp_gp = rdp; - if (!firsttime && dump_tree) - pr_cont("\n"); - firsttime = false; - pr_alert("%s: No-CB GP kthread CPU %d:", __func__, cpu); + if (dump_tree) { + if (!firsttime) + pr_cont("%s\n", gotnocbscbs + ? "" : " (self only)"); + gotnocbscbs = false; + firsttime = false; + pr_alert("%s: No-CB GP kthread CPU %d:", + __func__, cpu); + } } else { /* Another CB kthread, link to previous GP kthread. */ + gotnocbscbs = true; rdp->nocb_gp_rdp = rdp_gp; rdp_prev->nocb_next_cb_rdp = rdp; - pr_alert(" %d", cpu); + if (dump_tree) + pr_cont(" %d", cpu); } rdp_prev = rdp; } + if (gotnocbs && dump_tree) + pr_cont("%s\n", gotnocbscbs ? "" : " (self only)"); } /* diff --git a/kernel/reboot.c b/kernel/reboot.c index c4d472b7f1b4..509029cbc820 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -17,6 +17,7 @@ #include #include #include +#include /* * this indicates whether you can reboot with ctrl-alt-del: the default is yes @@ -250,6 +251,48 @@ void kernel_restart(char *cmd) pr_emerg("Restarting system\n"); else pr_emerg("Restarting system with command '%s'\n", cmd); + +#ifdef CONFIG_ARCH_PHYTIUM + /* ---Use GPIO to reset phytium cpu ---*/ + if (is_phytium1500()) { + unsigned long gpio_ctl_base; + unsigned long value; + + gpio_ctl_base = (unsigned long)ioremap(0x28006000, 0x10000); + gpio_ctl_base += 0xC; + + value = 0; + value = readl((void __iomem *)(gpio_ctl_base)+0x4); //GPIO_DDR + value = value&(~0x60); + value = value | 0x60; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x4)); //GPIO_DDR + + pr_info("---system restart by gpio---\n"); + value = readl((void __iomem *)(gpio_ctl_base)+0x0); //GPIO_DR + value = value & (~0x60); + value = value | 0x40; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x0)); //GPIO_DR + } else if (is_phytium2000()) { + unsigned long gpio_ctl_base; + unsigned long value; + + gpio_ctl_base = (unsigned long)ioremap(0x80028006000, 0x10000); + gpio_ctl_base += 0x24; + + value = 0; + value = readl((void __iomem *)(gpio_ctl_base)+0x4); //GPIO_DDR + value = value&(~0x18); + value = value | 0x18; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x4)); //GPIO_DDR + + pr_info("---system restart by gpio---\n"); + value = readl((void __iomem *)(gpio_ctl_base)+0x0); //GPIO_DR + value = value&(~0x18); + value = value | 0x10; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x0)); //GPIO_DR + } +#endif + kmsg_dump(KMSG_DUMP_RESTART); machine_restart(cmd); } @@ -274,6 +317,30 @@ void kernel_halt(void) migrate_to_reboot_cpu(); syscore_shutdown(); pr_emerg("System halted\n"); + +#ifdef CONFIG_ARCH_PHYTIUM + /* ---Use GPIO to halt phytium system ---*/ + if (is_phytium1500()) { + unsigned long gpio_ctl_base; + unsigned long value; + + gpio_ctl_base = (unsigned long)ioremap(0x28006000, 0x10000); + gpio_ctl_base += 0xC; + + value = 0; + value = readl((void __iomem *)(gpio_ctl_base)+0x4); //GPIO_DDR + value = value&(~0x60); + value = value | 0x60; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x4)); //GPIO_DDR + + pr_info("---system halt by gpio---\n"); + value = readl((void __iomem *)(gpio_ctl_base)+0x0); //GPIO_DR + value = value&(~0x60); + value = value | 0x20; + writel(value, ((void __iomem *)(gpio_ctl_base)+0x0)); //GPIO_DR + } +#endif + kmsg_dump(KMSG_DUMP_HALT); machine_halt(); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 44123b4d14e8..ea09ebacce45 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -810,7 +810,7 @@ static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value) return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value); } -static inline enum uclamp_id uclamp_none(enum uclamp_id clamp_id) +static inline unsigned int uclamp_none(enum uclamp_id clamp_id) { if (clamp_id == UCLAMP_MIN) return 0; @@ -853,7 +853,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, } static inline -enum uclamp_id uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, +unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) { struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; @@ -918,7 +918,7 @@ uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) return uc_req; } -enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) +unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) { struct uclamp_se uc_eff; @@ -1252,7 +1252,8 @@ static void __init init_uclamp(void) mutex_init(&uclamp_mutex); for_each_possible_cpu(cpu) { - memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq)); + memset(&cpu_rq(cpu)->uclamp, 0, + sizeof(struct uclamp_rq)*UCLAMP_CNT); cpu_rq(cpu)->uclamp_flags = 0; } @@ -3403,6 +3404,16 @@ unsigned long nr_running(void) return sum; } +unsigned long nr_running_cpu(int cpu) +{ + return cpu_rq(cpu)->nr_running; +} + +unsigned long long nr_context_switches_cpu(int cpu) +{ + return cpu_rq(cpu)->nr_switches; +} + /* * Check if only the current task is running on the CPU. * @@ -3667,28 +3678,32 @@ static void sched_tick_remote(struct work_struct *work) * statistics and checks timeslices in a time-independent way, regardless * of when exactly it is running. */ - if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) + if (!tick_nohz_tick_stopped_cpu(cpu)) goto out_requeue; rq_lock_irq(rq, &rf); curr = rq->curr; - if (is_idle_task(curr) || cpu_is_offline(cpu)) + if (cpu_is_offline(cpu)) goto out_unlock; + curr = rq->curr; update_rq_clock(rq); - delta = rq_clock_task(rq) - curr->se.exec_start; - /* - * Make sure the next tick runs within a reasonable - * amount of time. - */ - WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); + if (!is_idle_task(curr)) { + /* + * Make sure the next tick runs within a reasonable + * amount of time. + */ + delta = rq_clock_task(rq) - curr->se.exec_start; + WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); + } curr->sched_class->task_tick(rq, curr, 0); + calc_load_nohz_remote(rq); out_unlock: rq_unlock_irq(rq, &rf); - out_requeue: + /* * Run the remote tick once per second (1Hz). This arbitrary * frequency is large enough to avoid overload but short enough @@ -7053,8 +7068,15 @@ void sched_move_task(struct task_struct *tsk) if (queued) enqueue_task(rq, tsk, queue_flags); - if (running) + if (running) { set_next_task(rq, tsk); + /* + * After changing group, the running task may have joined a + * throttled one but it's still the running task. Trigger a + * resched to make sure that task can still run. + */ + resched_curr(rq); + } task_rq_unlock(rq, tsk, &rf); } @@ -7090,6 +7112,12 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) if (parent) sched_online_group(tg, parent); + +#ifdef CONFIG_UCLAMP_TASK_GROUP + /* Propagate the effective uclamp value for the new group */ + cpu_util_update_eff(css); +#endif + return 0; } @@ -7244,7 +7272,7 @@ capacity_from_percent(char *buf) &req.percent); if (req.ret) return req; - if (req.percent > UCLAMP_PERCENT_SCALE) { + if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { req.ret = -ERANGE; return req; } diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 9fbb10383434..e9fea3cac9f1 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -30,6 +30,8 @@ struct cpuacct { /* cpuusage holds pointer to a u64-type object on every CPU */ struct cpuacct_usage __percpu *cpuusage; struct kernel_cpustat __percpu *cpustat; + struct timespec64 uptime; + u64 idletime; }; static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) @@ -59,6 +61,7 @@ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) { struct cpuacct *ca; + u32 cpu; if (!parent_css) return &root_cpuacct.css; @@ -75,6 +78,10 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!ca->cpustat) goto out_free_cpuusage; + ktime_get_boottime_ts64(&ca->uptime); + for_each_possible_cpu(cpu) + ca->idletime += (__force u64) kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; + return &ca->css; out_free_cpuusage: @@ -293,6 +300,29 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) return 0; } +static int cpuacct_uptime_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + struct timespec64 uptime, idle, curr; + u64 idletime = 0; + u32 cpu, rem; + + ktime_get_boottime_ts64(&curr); + uptime = timespec64_sub(curr, ca->uptime); + + for_each_possible_cpu(cpu) + idletime += (__force u64) kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; + + idle.tv_sec = div_u64_rem(idletime - ca->idletime, NSEC_PER_SEC, &rem); + idle.tv_nsec = rem; + seq_printf(sf, "%lu.%02lu %lu.%02lu\n", + (unsigned long) uptime.tv_sec, + (uptime.tv_nsec / (NSEC_PER_SEC / 100)), + (unsigned long) idle.tv_sec, + (idle.tv_nsec / (NSEC_PER_SEC / 100))); + return 0; +} + static struct cftype files[] = { { .name = "usage", @@ -327,6 +357,10 @@ static struct cftype files[] = { .name = "stat", .seq_show = cpuacct_stats_show, }, + { + .name = "uptime", + .seq_show = cpuacct_uptime_show, + }, { } /* terminate */ }; diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index b5dcd1d83c7f..7c2fe50fd76d 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -5,6 +5,8 @@ * Copyright (C) 2016, Intel Corporation * Author: Rafael J. Wysocki */ +#include + #include "sched.h" DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); @@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu) rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL); } EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook); + +/** + * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated. + * @policy: cpufreq policy to check. + * + * Return 'true' if: + * - the local and remote CPUs share @policy, + * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going + * offline (in which case it is not expected to run cpufreq updates any more). + */ +bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) +{ + return cpumask_test_cpu(smp_processor_id(), policy->cpus) || + (policy->dvfs_possible_from_any_cpu && + rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data))); +} diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 86800b4d5453..b6f56e7c8dd1 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) * by the hardware, as calculating the frequency is pointless if * we cannot in fact act on it. * - * For the slow switching platforms, the kthread is always scheduled on - * the right set of CPUs and any CPU can find the next frequency and - * schedule the kthread. + * This is needed on the slow switching platforms too to prevent CPUs + * going offline from leaving stale IRQ work items behind. */ - if (sg_policy->policy->fast_switch_enabled && - !cpufreq_this_cpu_can_update(sg_policy->policy)) + if (!cpufreq_this_cpu_can_update(sg_policy->policy)) return false; if (unlikely(sg_policy->limits_changed)) { diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a8a08030a8f7..08bdee0480b3 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1743,13 +1743,16 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p) } #endif -static void set_next_task_dl(struct rq *rq, struct task_struct *p) +static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) { p->se.exec_start = rq_clock_task(rq); /* You can't push away the running task */ dequeue_pushable_dl_task(rq, p); + if (!first) + return; + if (hrtick_enabled(rq)) start_hrtick_dl(rq, p); @@ -1785,7 +1788,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) dl_se = pick_next_dl_entity(rq, dl_rq); BUG_ON(!dl_se); p = dl_task_of(dl_se); - set_next_task_dl(rq, p); + set_next_task_dl(rq, p, true); return p; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 69a81a5709ff..3f723c87abc4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3504,9 +3504,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif - if (decayed) - cfs_rq_util_change(cfs_rq, 0); - return decayed; } @@ -3616,8 +3613,12 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); update_tg_load_avg(cfs_rq, 0); - } else if (decayed && (flags & UPDATE_TG)) - update_tg_load_avg(cfs_rq, 0); + } else if (decayed) { + cfs_rq_util_change(cfs_rq, 0); + + if (flags & UPDATE_TG) + update_tg_load_avg(cfs_rq, 0); + } } #ifndef CONFIG_64BIT @@ -5434,7 +5435,7 @@ static int wake_wide(struct task_struct *p) if (master < slave) swap(master, slave); - if (slave < factor || master < slave * factor) + if (slave <= factor || master <= slave * factor) return 0; return 1; } @@ -5932,6 +5933,7 @@ static inline int select_idle_smt(struct task_struct *p, int target) */ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) { + struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); struct sched_domain *this_sd; u64 avg_cost, avg_idle; u64 time, cost; @@ -5963,11 +5965,11 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t time = cpu_clock(this); - for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); + + for_each_cpu_wrap(cpu, cpus, target) { if (!--nr) return si_cpu; - if (!cpumask_test_cpu(cpu, p->cpus_ptr)) - continue; if (available_idle_cpu(cpu)) break; if (si_cpu == -1 && sched_idle_cpu(cpu)) @@ -7517,6 +7519,28 @@ static inline bool others_have_blocked(struct rq *rq) { return false; } static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} #endif +static bool __update_blocked_others(struct rq *rq, bool *done) +{ + const struct sched_class *curr_class; + u64 now = rq_clock_pelt(rq); + bool decayed; + + /* + * update_load_avg() can call cpufreq_update_util(). Make sure that RT, + * DL and IRQ signals have been updated before updating CFS. + */ + curr_class = rq->curr->sched_class; + + decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | + update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | + update_irq_load_avg(rq, 0); + + if (others_have_blocked(rq)) + *done = false; + + return decayed; +} + #ifdef CONFIG_FAIR_GROUP_SCHED static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) @@ -7536,29 +7560,11 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) return true; } -static void update_blocked_averages(int cpu) +static bool __update_blocked_fair(struct rq *rq, bool *done) { - struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq, *pos; - const struct sched_class *curr_class; - struct rq_flags rf; - bool done = true; - - rq_lock_irqsave(rq, &rf); - update_rq_clock(rq); - - /* - * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure - * that RT, DL and IRQ signals have been updated before updating CFS. - */ - curr_class = rq->curr->sched_class; - update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); - update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); - update_irq_load_avg(rq, 0); - - /* Don't need periodic decay once load/util_avg are null */ - if (others_have_blocked(rq)) - done = false; + bool decayed = false; + int cpu = cpu_of(rq); /* * Iterates the task_group tree in a bottom up fashion, see @@ -7567,9 +7573,13 @@ static void update_blocked_averages(int cpu) for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { struct sched_entity *se; - if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) + if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { update_tg_load_avg(cfs_rq, 0); + if (cfs_rq == &rq->cfs) + decayed = true; + } + /* Propagate pending load changes to the parent, if any: */ se = cfs_rq->tg->se[cpu]; if (se && !skip_blocked_update(se)) @@ -7584,11 +7594,10 @@ static void update_blocked_averages(int cpu) /* Don't need periodic decay once load/util_avg are null */ if (cfs_rq_has_blocked(cfs_rq)) - done = false; + *done = false; } - update_blocked_load_status(rq, !done); - rq_unlock_irqrestore(rq, &rf); + return decayed; } /* @@ -7638,29 +7647,16 @@ static unsigned long task_h_load(struct task_struct *p) cfs_rq_load_avg(cfs_rq) + 1); } #else -static inline void update_blocked_averages(int cpu) +static bool __update_blocked_fair(struct rq *rq, bool *done) { - struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq = &rq->cfs; - const struct sched_class *curr_class; - struct rq_flags rf; + bool decayed; - rq_lock_irqsave(rq, &rf); - update_rq_clock(rq); + decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); + if (cfs_rq_has_blocked(cfs_rq)) + *done = false; - /* - * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure - * that RT, DL and IRQ signals have been updated before updating CFS. - */ - curr_class = rq->curr->sched_class; - update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); - update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); - update_irq_load_avg(rq, 0); - - update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); - - update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq)); - rq_unlock_irqrestore(rq, &rf); + return decayed; } static unsigned long task_h_load(struct task_struct *p) @@ -7669,6 +7665,24 @@ static unsigned long task_h_load(struct task_struct *p) } #endif +static void update_blocked_averages(int cpu) +{ + bool decayed = false, done = true; + struct rq *rq = cpu_rq(cpu); + struct rq_flags rf; + + rq_lock_irqsave(rq, &rf); + update_rq_clock(rq); + + decayed |= __update_blocked_others(rq, &done); + decayed |= __update_blocked_fair(rq, &done); + + update_blocked_load_status(rq, !done); + if (decayed) + cpufreq_update_util(rq, 0); + rq_unlock_irqrestore(rq, &rf); +} + /********** Helpers for find_busiest_group ************************/ /* @@ -10151,7 +10165,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) * This routine is mostly called to set cfs_rq->curr field when a task * migrates between groups/classes. */ -static void set_next_task_fair(struct rq *rq, struct task_struct *p) +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) { struct sched_entity *se = &p->se; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index f65ef1e2f204..131e7c86cf06 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -385,7 +385,7 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { } -static void set_next_task_idle(struct rq *rq, struct task_struct *next) +static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) { update_idle_core(rq); schedstat_inc(rq->sched_goidle); @@ -399,7 +399,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf if (prev) put_prev_task(rq, prev); - set_next_task_idle(rq, next); + set_next_task_idle(rq, next, true); return next; } diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 28a516575c18..de22da666ac7 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -231,16 +231,11 @@ static inline int calc_load_read_idx(void) return calc_load_idx & 1; } -void calc_load_nohz_start(void) +static void calc_load_nohz_fold(struct rq *rq) { - struct rq *this_rq = this_rq(); long delta; - /* - * We're going into NO_HZ mode, if there's any pending delta, fold it - * into the pending NO_HZ delta. - */ - delta = calc_load_fold_active(this_rq, 0); + delta = calc_load_fold_active(rq, 0); if (delta) { int idx = calc_load_write_idx(); @@ -248,6 +243,24 @@ void calc_load_nohz_start(void) } } +void calc_load_nohz_start(void) +{ + /* + * We're going into NO_HZ mode, if there's any pending delta, fold it + * into the pending NO_HZ delta. + */ + calc_load_nohz_fold(this_rq()); +} + +/* + * Keep track of the load for NOHZ_FULL, must be called between + * calc_load_nohz_{start,stop}(). + */ +void calc_load_nohz_remote(struct rq *rq) +{ + calc_load_nohz_fold(rq); +} + void calc_load_nohz_stop(void) { struct rq *this_rq = this_rq(); @@ -268,7 +281,7 @@ void calc_load_nohz_stop(void) this_rq->calc_load_update += LOAD_FREQ; } -static long calc_load_nohz_fold(void) +static long calc_load_nohz_read(void) { int idx = calc_load_read_idx(); long delta = 0; @@ -323,7 +336,7 @@ static void calc_global_nohz(void) } #else /* !CONFIG_NO_HZ_COMMON */ -static inline long calc_load_nohz_fold(void) { return 0; } +static inline long calc_load_nohz_read(void) { return 0; } static inline void calc_global_nohz(void) { } #endif /* CONFIG_NO_HZ_COMMON */ @@ -346,7 +359,7 @@ void calc_global_load(unsigned long ticks) /* * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs. */ - delta = calc_load_nohz_fold(); + delta = calc_load_nohz_read(); if (delta) atomic_long_add(delta, &calc_load_tasks); diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 517e3719027e..9154e745f097 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -185,7 +185,8 @@ static void group_init(struct psi_group *group) for_each_possible_cpu(cpu) seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); - group->avg_next_update = sched_clock() + psi_period; + group->avg_last_update = sched_clock(); + group->avg_next_update = group->avg_last_update + psi_period; INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); mutex_init(&group->avgs_lock); /* Init trigger-related members */ @@ -481,7 +482,7 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value) u32 remaining; remaining = win->size - elapsed; - growth += div_u64(win->prev_growth * remaining, win->size); + growth += div64_u64(win->prev_growth * remaining, win->size); } return growth; @@ -1198,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf, if (static_branch_likely(&psi_disabled)) return -EOPNOTSUPP; + if (!nbytes) + return -EINVAL; + buf_size = min(nbytes, sizeof(buf)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 9b8adc01be3d..8201c5a0d23d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1515,13 +1515,16 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag #endif } -static inline void set_next_task_rt(struct rq *rq, struct task_struct *p) +static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) { p->se.exec_start = rq_clock_task(rq); /* The running task is never eligible for pushing */ dequeue_pushable_task(rq, p); + if (!first) + return; + /* * If prev task was rt, put_prev_task() has already updated the * utilization. We only care of the case where we start to schedule a @@ -1575,7 +1578,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) return NULL; p = _pick_next_task_rt(rq); - set_next_task_rt(rq, p); + set_next_task_rt(rq, p, true); return p; } @@ -2399,7 +2402,9 @@ static DEFINE_MUTEX(rt_constraints_mutex); /* Must be called with tasklist_lock held */ static inline int tg_has_rt_tasks(struct task_group *tg) { - struct task_struct *g, *p; + struct task_struct *task; + struct css_task_iter it; + int ret = 0; /* * Autogroups do not have RT tasks; see autogroup_create(). @@ -2407,12 +2412,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg) if (task_group_is_autogroup(tg)) return 0; - for_each_process_thread(g, p) { - if (rt_task(p) && task_group(p) == tg) - return 1; - } + css_task_iter_start(&tg->css, 0, &it); + while (!ret && (task = css_task_iter_next(&it))) + ret |= rt_task(task); + css_task_iter_end(&it); - return 0; + return ret; } struct rt_schedulable_data { @@ -2445,7 +2450,8 @@ static int tg_rt_schedulable(struct task_group *tg, void *data) /* * Ensure we don't starve existing RT tasks. */ - if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) + if (rt_bandwidth_enabled() && !runtime && + tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) return -EBUSY; total = to_ratio(period, runtime); @@ -2511,7 +2517,6 @@ static int tg_set_rt_bandwidth(struct task_group *tg, return -EINVAL; mutex_lock(&rt_constraints_mutex); - read_lock(&tasklist_lock); err = __rt_schedulable(tg, rt_period, rt_runtime); if (err) goto unlock; @@ -2529,7 +2534,6 @@ static int tg_set_rt_bandwidth(struct task_group *tg, } raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); unlock: - read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); return err; @@ -2588,9 +2592,7 @@ static int sched_rt_global_constraints(void) int ret = 0; mutex_lock(&rt_constraints_mutex); - read_lock(&tasklist_lock); ret = __rt_schedulable(NULL, 0, 0); - read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); return ret; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c8870c5bd7df..e5e2605778c9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1728,7 +1728,7 @@ struct sched_class { struct task_struct *prev, struct rq_flags *rf); void (*put_prev_task)(struct rq *rq, struct task_struct *p); - void (*set_next_task)(struct rq *rq, struct task_struct *p); + void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); #ifdef CONFIG_SMP int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); @@ -1780,7 +1780,7 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) static inline void set_next_task(struct rq *rq, struct task_struct *next) { WARN_ON_ONCE(rq->curr != next); - next->sched_class->set_next_task(rq, next); + next->sched_class->set_next_task(rq, next, false); } #ifdef CONFIG_SMP @@ -2309,7 +2309,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} #endif /* CONFIG_CPU_FREQ */ #ifdef CONFIG_UCLAMP_TASK -enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); +unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); static __always_inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util, diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index c0640739e05e..02dc0a8e3925 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -29,7 +29,7 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) /* we're never preempted */ } -static void set_next_task_stop(struct rq *rq, struct task_struct *stop) +static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) { stop->se.exec_start = rq_clock_task(rq); } @@ -42,7 +42,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf if (!sched_stop_runnable(rq)) return NULL; - set_next_task_stop(rq, rq->stop); + set_next_task_stop(rq, rq->stop, true); return rq->stop; } diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 49b835f1305f..1fa1e13a5944 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1882,6 +1882,42 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve return sd; } +/* + * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for + * any two given CPUs at this (non-NUMA) topology level. + */ +static bool topology_span_sane(struct sched_domain_topology_level *tl, + const struct cpumask *cpu_map, int cpu) +{ + int i; + + /* NUMA levels are allowed to overlap */ + if (tl->flags & SDTL_OVERLAP) + return true; + + /* + * Non-NUMA levels cannot partially overlap - they must be either + * completely equal or completely disjoint. Otherwise we can end up + * breaking the sched_group lists - i.e. a later get_group() pass + * breaks the linking done for an earlier span. + */ + for_each_cpu(i, cpu_map) { + if (i == cpu) + continue; + /* + * We should 'and' all those masks with 'cpu_map' to exactly + * match the topology we're about to build, but that can only + * remove CPUs, which only lessens our ability to detect + * overlaps + */ + if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && + cpumask_intersects(tl->mask(cpu), tl->mask(i))) + return false; + } + + return true; +} + /* * Find the sched_domain_topology_level where all CPU capacities are visible * for all CPUs. @@ -1978,6 +2014,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att has_asym = true; } + if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) + goto error; + sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); if (tl == sched_domain_topology) diff --git a/kernel/seccomp.c b/kernel/seccomp.c index dba52a7db5e8..614a557a0814 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -1015,6 +1015,13 @@ static long seccomp_notify_recv(struct seccomp_filter *filter, struct seccomp_notif unotif; ssize_t ret; + /* Verify that we're not given garbage to keep struct extensible. */ + ret = check_zeroed_user(buf, sizeof(unotif)); + if (ret < 0) + return ret; + if (!ret) + return -EINVAL; + memset(&unotif, 0, sizeof(unotif)); ret = down_interruptible(&filter->notif->request); diff --git a/kernel/signal.c b/kernel/signal.c index bcd46f547db3..eea748174ade 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -413,27 +413,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi { struct sigqueue *q = NULL; struct user_struct *user; + int sigpending; /* * Protect access to @t credentials. This can go away when all * callers hold rcu read lock. + * + * NOTE! A pending signal will hold on to the user refcount, + * and we get/put the refcount only when the sigpending count + * changes from/to zero. */ rcu_read_lock(); - user = get_uid(__task_cred(t)->user); - atomic_inc(&user->sigpending); + user = __task_cred(t)->user; + sigpending = atomic_inc_return(&user->sigpending); + if (sigpending == 1) + get_uid(user); rcu_read_unlock(); - if (override_rlimit || - atomic_read(&user->sigpending) <= - task_rlimit(t, RLIMIT_SIGPENDING)) { + if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } if (unlikely(q == NULL)) { - atomic_dec(&user->sigpending); - free_uid(user); + if (atomic_dec_and_test(&user->sigpending)) + free_uid(user); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; @@ -447,8 +452,8 @@ static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) return; - atomic_dec(&q->user->sigpending); - free_uid(q->user); + if (atomic_dec_and_test(&q->user->sigpending)) + free_uid(q->user); kmem_cache_free(sigqueue_cachep, q); } diff --git a/kernel/softirq.c b/kernel/softirq.c index 0427a86743a4..75d5e6a50489 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #define CREATE_TRACE_POINTS #include @@ -76,6 +78,32 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } +unsigned int sysctl_softirq_accel_target = 2 * 1000 * 1000; //2ms +int sysctl_softirq_accel_mask; +int min_softirq_accel_mask; +int max_softirq_accel_mask = (1 << NR_SOFTIRQS) - 1; +static bool need_softirq_accel(struct task_struct *tsk, unsigned long pending) +{ +#ifdef CONFIG_SCHED_INFO + if (tsk && tsk->state == TASK_RUNNING && + (pending & sysctl_softirq_accel_mask)) { + u64 delta = sched_clock_cpu(smp_processor_id()); + + if (!sched_info_on() || current == tsk || + !tsk->sched_info.last_queued || + delta <= tsk->sched_info.last_queued) + return false; + + delta -= tsk->sched_info.last_queued; + if (delta >= sysctl_softirq_accel_target) { + tsk->sched_info.last_queued += delta; + return true; + } + } +#endif + return false; +} + /* * If ksoftirqd is scheduled, we do not want to process pending softirqs * right now. Let ksoftirqd handle this at its own rate, to get fairness, @@ -88,6 +116,8 @@ static bool ksoftirqd_running(unsigned long pending) if (pending & SOFTIRQ_NOW_MASK) return false; + if (sysctl_softirq_accel_mask && need_softirq_accel(tsk, pending)) + return false; return tsk && (tsk->state == TASK_RUNNING) && !__kthread_should_park(tsk); } diff --git a/kernel/sys.c b/kernel/sys.c index a611d1d58c7d..40dee3547dc4 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -1236,6 +1237,39 @@ static int override_release(char __user *release, size_t len) return ret; } +extern int release_suffix; +/* if in contanier overridle*/ +static int override_release_for_container(char __user *release, size_t len) +{ + int ret = 0; + + /* in container */ + if (task_active_pid_ns(current)->level){ + const char *rest = UTS_RELEASE; + const char *suffix = "_docker_guest"; + char buf[__NEW_UTS_LEN + 1]; + int nbar = 0; + int idx = 0; + int len = strlen(rest); + int newlen = strlen(suffix); + /* if string too long , do nothing */ + if (len + newlen >__NEW_UTS_LEN) + return ret; + + strncpy(buf, rest, len); + while(*rest) { + if (*rest == '-' && ++nbar >= 3) + break; + idx ++; + rest ++; + } + scnprintf(buf+idx, sizeof(buf)-idx, "%s%s", suffix, rest); + ret = copy_to_user(release, buf, len + newlen + 1); + + } + return ret; +} + SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) { struct new_utsname tmp; @@ -1248,6 +1282,8 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) if (override_release(name->release, sizeof(name->release))) return -EFAULT; + if (release_suffix && override_release_for_container(name->release, sizeof(name->release))) + return -EFAULT; if (override_architecture(name)) return -EFAULT; return 0; @@ -1272,6 +1308,8 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) if (override_release(name->release, sizeof(name->release))) return -EFAULT; + if (release_suffix && override_release_for_container(name->release, sizeof(name->release))) + return -EFAULT; if (override_architecture(name)) return -EFAULT; return 0; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b6f2f35d0bcf..fa320c937fa7 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -68,6 +68,7 @@ #include #include #include +#include #include "../lib/kstrtox.h" @@ -105,6 +106,7 @@ /* External variables not in a header file. */ extern int suid_dumpable; +extern int cpuset_cpuinfo_show_realinfo; #ifdef CONFIG_COREDUMP extern int core_uses_pid; extern char core_pattern[]; @@ -119,6 +121,12 @@ extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max; extern int sysctl_nr_trim_pages; #endif +extern int sysctl_min_epoll_wait_time; +extern unsigned int sysctl_softirq_accel_target; +extern int sysctl_softirq_accel_mask; +extern int min_softirq_accel_mask; +extern int max_softirq_accel_mask; + /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR static int sixty = 60; @@ -214,6 +222,9 @@ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif +static int proc_dointvec_max_map_count(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -231,6 +242,7 @@ static int proc_dopipe_max_size(struct ctl_table *table, int write, #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses its own private copy */ static int __sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; +static int __sysrq_use_leftctrl = 1; static int sysrq_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, @@ -248,6 +260,22 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write, return 0; } +static int sysrq_use_leftctrl_sysctl_handler(struct ctl_table * table ,int write, + void __user *buffer,size_t *lenp, + loff_t *ppos) +{ + int error; + + error = proc_dointvec(table, write, buffer, lenp, ppos); + if (error) + return error; + + if (write) + sysrq_toggle_sysrq_key(__sysrq_use_leftctrl); + + return 0; +} + #endif static struct ctl_table kern_table[]; @@ -269,6 +297,12 @@ extern struct ctl_table firmware_config_table[]; int sysctl_legacy_va_layout; #endif +int connect_info_flag; +int accept_info_flag; +int sendto_info_flag; +int recvfrom_info_flag; +int execve_info_flag; + /* The default sysctl tables: */ static struct ctl_table sysctl_base_table[] = { @@ -297,6 +331,13 @@ static struct ctl_table sysctl_base_table[] = { .mode = 0555, .child = dev_table, }, + { + .procname = "min_epoll_wait_time", + .data = &sysctl_min_epoll_wait_time, + .maxlen = sizeof(sysctl_min_epoll_wait_time), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, { } }; @@ -317,6 +358,75 @@ static int max_extfrag_threshold = 1000; #endif static struct ctl_table kern_table[] = { + { + .procname = "connect_info_switch", + .data = &connect_info_flag, + .maxlen = sizeof(connect_info_flag), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .procname = "accept_info_switch", + .data = &accept_info_flag, + .maxlen = sizeof(accept_info_flag), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .procname = "sendto_info_switch", + .data = &sendto_info_flag, + .maxlen = sizeof(sendto_info_flag), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .procname = "recvfrom_info_switch", + .data = &recvfrom_info_flag, + .maxlen = sizeof(recvfrom_info_flag), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .procname = "execve_info_switch", + .data = &execve_info_flag, + .maxlen = sizeof(execve_info_flag), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .procname = "softirq_accel_target_us", + .data = &sysctl_softirq_accel_target, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "softirq_accel_mask", + .data = &sysctl_softirq_accel_mask, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &min_softirq_accel_mask, + .extra2 = &max_softirq_accel_mask, + }, +#ifdef CONFIG_PID_NS + { + .procname = "watch_host_pid", + .data = &sysctl_watch_host_pid, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "isolate_max_map_count", + .data = &max_map_count_isolated, + .maxlen = sizeof(max_map_count_isolated), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, @@ -324,6 +434,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "cpuset_cpuinfo_show_realinfo", + .data = &cpuset_cpuinfo_show_realinfo, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", @@ -752,6 +869,14 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sysrq_sysctl_handler, }, + { + .procname = "sysrq_use_leftctrl", + .data = &__sysrq_use_leftctrl, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = sysrq_use_leftctrl_sysctl_handler, + + }, #endif #ifdef CONFIG_PROC_SYSCTL { @@ -1290,9 +1415,36 @@ static struct ctl_table kern_table[] = { .extra2 = SYSCTL_ONE, }, #endif + { + .procname = "track_gpu", + .data = &sysctl_track_gpu, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "nvidia_smi_trap", + .data = &sysctl_nvidia_smi_trap, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; +static int pc_limit_proc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + +static int pc_reclaim_limit_proc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + +static int pc_limit_async_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + static struct ctl_table vm_table[] = { { .procname = "overcommit_memory", @@ -1414,6 +1566,45 @@ static struct ctl_table vm_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = &one_hundred, }, + { + .procname = "pagecache_limit_ratio", + .data = &vm_pagecache_limit_ratio, + .maxlen = sizeof(vm_pagecache_limit_ratio), + .mode = 0644, + .proc_handler = &pc_limit_proc_dointvec, + .extra1 = SYSCTL_ZERO, + .extra2 = &one_hundred, + }, + { + .procname = "pagecache_limit_reclaim_ratio", + .data = &vm_pagecache_limit_reclaim_ratio, + .maxlen = sizeof(vm_pagecache_limit_reclaim_ratio), + .mode = 0644, + .proc_handler = &pc_reclaim_limit_proc_dointvec, + .extra1 = SYSCTL_ZERO, + .extra2 = &one_hundred, + }, + { + .procname = "pagecache_limit_ignore_dirty", + .data = &vm_pagecache_ignore_dirty, + .maxlen = sizeof(vm_pagecache_ignore_dirty), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .procname = "pagecache_limit_async", + .data = &vm_pagecache_limit_async, + .maxlen = sizeof(vm_pagecache_limit_async), + .mode = 0644, + .proc_handler = &pc_limit_async_handler, + }, + { + .procname = "pagecache_limit_ignore_slab", + .data = &vm_pagecache_ignore_slab, + .maxlen = sizeof(vm_pagecache_ignore_slab), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", @@ -1466,7 +1657,7 @@ static struct ctl_table vm_table[] = { .procname = "drop_caches", .data = &sysctl_drop_caches, .maxlen = sizeof(int), - .mode = 0644, + .mode = 0200, .proc_handler = drop_caches_sysctl_handler, .extra1 = SYSCTL_ONE, .extra2 = &four, @@ -1538,7 +1729,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_dointvec_max_map_count, .extra1 = SYSCTL_ZERO, }, #else @@ -2422,6 +2613,71 @@ static int do_proc_dointvec(struct ctl_table *table, int write, buffer, lenp, ppos, conv, data); } +int setup_pagecache_limit(void) +{ + /* reclaim $ADDITIONAL_RECLAIM_PAGES more than limit. */ + vm_pagecache_limit_reclaim_ratio = vm_pagecache_limit_ratio + ADDITIONAL_RECLAIM_RATIO; + + if (vm_pagecache_limit_reclaim_ratio > 100) + vm_pagecache_limit_reclaim_ratio = 100; + if (vm_pagecache_limit_ratio == 0) + vm_pagecache_limit_reclaim_ratio = 0; + + vm_pagecache_limit_pages = vm_pagecache_limit_ratio * totalram_pages() / 100; + vm_pagecache_limit_reclaim_pages = vm_pagecache_limit_reclaim_ratio * totalram_pages() / 100; + return 0; +} + +static int pc_limit_proc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (write && !ret) + ret = setup_pagecache_limit(); + return ret; +} + +static int pc_reclaim_limit_proc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int pre_reclaim_ratio = vm_pagecache_limit_reclaim_ratio; + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (write && vm_pagecache_limit_ratio == 0) + return -EINVAL; + + if (write && !ret) { + if (vm_pagecache_limit_reclaim_ratio - vm_pagecache_limit_ratio < ADDITIONAL_RECLAIM_RATIO) { + vm_pagecache_limit_reclaim_ratio = pre_reclaim_ratio; + return -EINVAL; + } + vm_pagecache_limit_reclaim_pages = vm_pagecache_limit_reclaim_ratio * totalram_pages() / 100; + } + return ret; +} + +static int pc_limit_async_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (write && vm_pagecache_limit_ratio == 0) + return -EINVAL; + + if (write && !ret) { + if (vm_pagecache_limit_async > 0) { + if (kpagecache_limitd_run()) { + vm_pagecache_limit_async = 0; + return -EINVAL; + } + } + else { + kpagecache_limitd_stop(); + } + } + return ret; +} + static int do_proc_douintvec_w(unsigned int *tbl_data, struct ctl_table *table, void __user *buffer, @@ -2714,6 +2970,22 @@ int proc_dointvec_minmax(struct ctl_table *table, int write, do_proc_dointvec_minmax_conv, ¶m); } +int proc_dointvec_max_map_count(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + +#ifdef CONFIG_PID_NS + table->data = max_map_count_isolated ? + &task_active_pid_ns(current)->max_map_count : &sysctl_max_map_count; +#endif + struct do_proc_dointvec_minmax_conv_param param = { + .min = (int *) table->extra1, + .max = (int *) table->extra2, + }; + return do_proc_dointvec(table, write, buffer, lenp, ppos, + do_proc_dointvec_minmax_conv, ¶m); +} + /** * struct do_proc_douintvec_minmax_conv_param - proc_douintvec_minmax() range checking structure * @min: pointer to minimum allowable value diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 73c132095a7b..bbd263836127 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -386,6 +386,7 @@ static const struct bin_table bin_net_ipv4_table[] = { { CTL_INT, NET_TCP_TW_REUSE, "tcp_tw_reuse" }, { CTL_INT, NET_TCP_FRTO, "tcp_frto" }, { CTL_INT, NET_TCP_FRTO_RESPONSE, "tcp_frto_response" }, + { CTL_INT, NET_TCP_TW_IGNORE_SYN_TSVAL_ZERO, "tcp_tw_ignore_syn_tsval_zero" }, { CTL_INT, NET_TCP_LOW_LATENCY, "tcp_low_latency" }, { CTL_INT, NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" }, { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" }, diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 13a0f2e6ebc2..0d90569f6d45 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -554,25 +554,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; - struct taskstats *stats; + struct taskstats *stats_new, *stats; - if (sig->stats || thread_group_empty(tsk)) - goto ret; + /* Pairs with smp_store_release() below. */ + stats = smp_load_acquire(&sig->stats); + if (stats || thread_group_empty(tsk)) + return stats; /* No problem if kmem_cache_zalloc() fails */ - stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); + stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); spin_lock_irq(&tsk->sighand->siglock); - if (!sig->stats) { - sig->stats = stats; - stats = NULL; + stats = sig->stats; + if (!stats) { + /* + * Pairs with smp_store_release() above and order the + * kmem_cache_zalloc(). + */ + smp_store_release(&sig->stats, stats_new); + stats = stats_new; + stats_new = NULL; } spin_unlock_irq(&tsk->sighand->siglock); - if (stats) - kmem_cache_free(taskstats_cache, stats); -ret: - return sig->stats; + if (stats_new) + kmem_cache_free(taskstats_cache, stats_new); + + return stats; } /* Send pid data out on exit */ @@ -679,6 +687,7 @@ static struct genl_family family __ro_after_init = { .version = TASKSTATS_GENL_VERSION, .maxattr = TASKSTATS_CMD_ATTR_MAX, .module = THIS_MODULE, + .netnsok = true, .ops = taskstats_ops, .n_ops = ARRAY_SIZE(taskstats_ops), .pre_doit = taskstats_pre_doit, diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 451f9d05ccfe..b97401f6bc23 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -88,6 +88,8 @@ static int alarmtimer_rtc_add_device(struct device *dev, unsigned long flags; struct rtc_device *rtc = to_rtc_device(dev); struct wakeup_source *__ws; + struct platform_device *pdev; + int ret = 0; if (rtcdev) return -EBUSY; @@ -98,12 +100,14 @@ static int alarmtimer_rtc_add_device(struct device *dev, return -1; __ws = wakeup_source_register(dev, "alarmtimer"); + pdev = platform_device_register_data(dev, "alarmtimer", + PLATFORM_DEVID_AUTO, NULL, 0); spin_lock_irqsave(&rtcdev_lock, flags); - if (!rtcdev) { + if (__ws && !IS_ERR(pdev) && !rtcdev) { if (!try_module_get(rtc->owner)) { - spin_unlock_irqrestore(&rtcdev_lock, flags); - return -1; + ret = -1; + goto unlock; } rtcdev = rtc; @@ -111,12 +115,17 @@ static int alarmtimer_rtc_add_device(struct device *dev, get_device(dev); ws = __ws; __ws = NULL; + pdev = NULL; + } else { + ret = -1; } +unlock: spin_unlock_irqrestore(&rtcdev_lock, flags); + platform_device_unregister(pdev); wakeup_source_unregister(__ws); - return 0; + return ret; } static inline void alarmtimer_rtc_timer_init(void) @@ -874,8 +883,7 @@ static struct platform_driver alarmtimer_driver = { */ static int __init alarmtimer_init(void) { - struct platform_device *pdev; - int error = 0; + int error; int i; alarmtimer_rtc_timer_init(); @@ -898,15 +906,7 @@ static int __init alarmtimer_init(void) if (error) goto out_if; - pdev = platform_device_register_simple("alarmtimer", -1, NULL, 0); - if (IS_ERR(pdev)) { - error = PTR_ERR(pdev); - goto out_drv; - } return 0; - -out_drv: - platform_driver_unregister(&alarmtimer_driver); out_if: alarmtimer_rtc_interface_remove(); return error; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index fff5f64981c6..428beb69426a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -293,8 +293,15 @@ static void clocksource_watchdog(struct timer_list *unused) next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(cpu_online_mask); - watchdog_timer.expires += WATCHDOG_INTERVAL; - add_timer_on(&watchdog_timer, next_cpu); + + /* + * Arm timer if not already pending: could race with concurrent + * pair clocksource_stop_watchdog() clocksource_start_watchdog(). + */ + if (!timer_pending(&watchdog_timer)) { + watchdog_timer.expires += WATCHDOG_INTERVAL; + add_timer_on(&watchdog_timer, next_cpu); + } out: spin_unlock(&watchdog_lock); } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 65605530ee34..7f31932216a1 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -966,7 +966,8 @@ static int enqueue_hrtimer(struct hrtimer *timer, base->cpu_base->active_bases |= 1 << base->index; - timer->state = HRTIMER_STATE_ENQUEUED; + /* Pairs with the lockless read in hrtimer_is_queued() */ + WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED); return timerqueue_add(&base->active, &timer->node); } @@ -988,7 +989,8 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_cpu_base *cpu_base = base->cpu_base; u8 state = timer->state; - timer->state = newstate; + /* Pairs with the lockless read in hrtimer_is_queued() */ + WRITE_ONCE(timer->state, newstate); if (!(state & HRTIMER_STATE_ENQUEUED)) return; @@ -1013,8 +1015,9 @@ static void __remove_hrtimer(struct hrtimer *timer, static inline int remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) { - if (hrtimer_is_queued(timer)) { - u8 state = timer->state; + u8 state = timer->state; + + if (state & HRTIMER_STATE_ENQUEUED) { int reprogram; /* diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index ec960bb939fd..200fb2d3be99 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c @@ -14,8 +14,6 @@ #include "posix-timers.h" -static void delete_clock(struct kref *kref); - /* * Returns NULL if the posix_clock instance attached to 'fp' is old and stale. */ @@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) err = 0; if (!err) { - kref_get(&clk->kref); + get_device(clk->dev); fp->private_data = clk; } out: @@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp) if (clk->ops.release) err = clk->ops.release(clk); - kref_put(&clk->kref, delete_clock); + put_device(clk->dev); fp->private_data = NULL; @@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = { #endif }; -int posix_clock_register(struct posix_clock *clk, dev_t devid) +int posix_clock_register(struct posix_clock *clk, struct device *dev) { int err; - kref_init(&clk->kref); init_rwsem(&clk->rwsem); cdev_init(&clk->cdev, &posix_clock_file_operations); + err = cdev_device_add(&clk->cdev, dev); + if (err) { + pr_err("%s unable to add device %d:%d\n", + dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt)); + return err; + } clk->cdev.owner = clk->ops.owner; - err = cdev_add(&clk->cdev, devid, 1); + clk->dev = dev; - return err; + return 0; } EXPORT_SYMBOL_GPL(posix_clock_register); -static void delete_clock(struct kref *kref) -{ - struct posix_clock *clk = container_of(kref, struct posix_clock, kref); - - if (clk->release) - clk->release(clk); -} - void posix_clock_unregister(struct posix_clock *clk) { - cdev_del(&clk->cdev); + cdev_device_del(&clk->cdev, clk->dev); down_write(&clk->rwsem); clk->zombie = true; up_write(&clk->rwsem); - kref_put(&clk->kref, delete_clock); + put_device(clk->dev); } EXPORT_SYMBOL_GPL(posix_clock_unregister); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 955851748dc3..4eac43db8148 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -58,8 +58,9 @@ static void tick_do_update_jiffies64(ktime_t now) /* * Do a quick check without holding jiffies_lock: + * The READ_ONCE() pairs with two updates done later in this function. */ - delta = ktime_sub(now, last_jiffies_update); + delta = ktime_sub(now, READ_ONCE(last_jiffies_update)); if (delta < tick_period) return; @@ -70,8 +71,9 @@ static void tick_do_update_jiffies64(ktime_t now) if (delta >= tick_period) { delta = ktime_sub(delta, tick_period); - last_jiffies_update = ktime_add(last_jiffies_update, - tick_period); + /* Pairs with the lockless read in this function. */ + WRITE_ONCE(last_jiffies_update, + ktime_add(last_jiffies_update, tick_period)); /* Slow path for long timeouts */ if (unlikely(delta >= tick_period)) { @@ -79,8 +81,10 @@ static void tick_do_update_jiffies64(ktime_t now) ticks = ktime_divns(delta, incr); - last_jiffies_update = ktime_add_ns(last_jiffies_update, - incr * ticks); + /* Pairs with the lockless read in this function. */ + WRITE_ONCE(last_jiffies_update, + ktime_add_ns(last_jiffies_update, + incr * ticks)); } do_timer(++ticks); @@ -1309,7 +1313,12 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) return HRTIMER_RESTART; } +#ifdef CONFIG_ARM64 static int sched_skew_tick; +#else +/* value -1 means not set via cmdline */ +static int sched_skew_tick = -1; +#endif static int __init skew_tick(char *str) { @@ -1326,6 +1335,7 @@ void tick_setup_sched_timer(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); ktime_t now = ktime_get(); + int cpus = num_possible_cpus(); /* * Emulate tick processing via per-CPU hrtimers: @@ -1337,9 +1347,9 @@ void tick_setup_sched_timer(void) hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); /* Offset the tick to avert jiffies_lock contention. */ - if (sched_skew_tick) { + if (sched_skew_tick > 0 || (sched_skew_tick < 0 && cpus > 16)) { u64 offset = ktime_to_ns(tick_period) >> 1; - do_div(offset, num_possible_cpus()); + do_div(offset, cpus); offset *= smp_processor_id(); hrtimer_add_expires_ns(&ts->sched_timer, offset); } diff --git a/kernel/time/time.c b/kernel/time/time.c index 5c54ca632d08..83f403e7a15c 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -881,7 +881,8 @@ int get_timespec64(struct timespec64 *ts, ts->tv_sec = kts.tv_sec; /* Zero out the padding for 32 bit systems or in compat mode */ - if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall()) + if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || + in_compat_syscall())) kts.tv_nsec &= 0xFFFFFFFFUL; ts->tv_nsec = kts.tv_nsec; diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 5ee0f7709410..9577c89179cd 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -28,11 +28,6 @@ static inline void update_vdso_data(struct vdso_data *vdata, vdata[CS_RAW].mult = tk->tkr_raw.mult; vdata[CS_RAW].shift = tk->tkr_raw.shift; - /* CLOCK_REALTIME */ - vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; - vdso_ts->sec = tk->xtime_sec; - vdso_ts->nsec = tk->tkr_mono.xtime_nsec; - /* CLOCK_MONOTONIC */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; @@ -70,12 +65,6 @@ static inline void update_vdso_data(struct vdso_data *vdata, vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; vdso_ts->nsec = tk->tkr_mono.xtime_nsec; - - /* - * Read without the seqlock held by clock_getres(). - * Note: No need to have a second copy. - */ - WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); } void update_vsyscall(struct timekeeper *tk) @@ -84,20 +73,17 @@ void update_vsyscall(struct timekeeper *tk) struct vdso_timestamp *vdso_ts; u64 nsec; - if (__arch_update_vdso_data()) { - /* - * Some architectures might want to skip the update of the - * data page. - */ - return; - } - /* copy vsyscall data */ vdso_write_begin(vdata); vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk); vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); + /* CLOCK_REALTIME also required for time() */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; + vdso_ts->sec = tk->xtime_sec; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; + /* CLOCK_REALTIME_COARSE */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; vdso_ts->sec = tk->xtime_sec; @@ -110,7 +96,18 @@ void update_vsyscall(struct timekeeper *tk) nsec = nsec + tk->wall_to_monotonic.tv_nsec; vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec); - update_vdso_data(vdata, tk); + /* + * Read without the seqlock held by clock_getres(). + * Note: No need to have a second copy. + */ + WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); + + /* + * Architectures can opt out of updating the high resolution part + * of the VDSO. + */ + if (__arch_update_vdso_data()) + update_vdso_data(vdata, tk); __arch_update_vsyscall(vdata, tk); diff --git a/kernel/tkernel/Makefile b/kernel/tkernel/Makefile new file mode 100644 index 000000000000..d76230e17367 --- /dev/null +++ b/kernel/tkernel/Makefile @@ -0,0 +1,7 @@ +obj-y = base.o netbind.o +obj-y += ttools/ +obj-y += shield_mounts.o +obj-y += release_suffix.o +obj-y += netatop/ +obj-y += trackgpu.o +obj-$(CONFIG_KPATCH) += kpatch/ diff --git a/kernel/tkernel/base.c b/kernel/tkernel/base.c new file mode 100644 index 000000000000..b476af17369a --- /dev/null +++ b/kernel/tkernel/base.c @@ -0,0 +1,21 @@ +#include +#include +#include +#include + +struct proc_dir_entry *proc_tkernel; +const struct ctl_path tkernel_ctl_path[] = { + { .procname = "tkernel", }, + { } +}; +static struct ctl_table placeholder_vars[] = { + {} +}; + +static int __init tkernel_init(void) { + proc_tkernel = proc_mkdir("tkernel", NULL); + register_sysctl_paths(tkernel_ctl_path, placeholder_vars); + return 0; +} + +early_initcall(tkernel_init); diff --git a/kernel/tkernel/kpatch/Kconfig b/kernel/tkernel/kpatch/Kconfig new file mode 100644 index 000000000000..b9e1252d6eae --- /dev/null +++ b/kernel/tkernel/kpatch/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config KPATCH + tristate "Kernel Kpatching" + depends on DYNAMIC_FTRACE_WITH_REGS + depends on MODULES + depends on SYSFS + help + Say M here if you want to support kernel kpatch. + This option has no runtime impact until a kernel "patch" + module uses the interface provided by this option to register + a patch, causing calls to patched functions to be redirected + to new function code contained in the patch module. diff --git a/kernel/tkernel/kpatch/Makefile b/kernel/tkernel/kpatch/Makefile new file mode 100644 index 000000000000..980aff735e3e --- /dev/null +++ b/kernel/tkernel/kpatch/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_KPATCH) += kpatch.o +kpatch-objs := core.o shadow.o diff --git a/kernel/tkernel/kpatch/core.c b/kernel/tkernel/kpatch/core.c new file mode 100644 index 000000000000..10d6796e74b6 --- /dev/null +++ b/kernel/tkernel/kpatch/core.c @@ -0,0 +1,1347 @@ +/* + * Copyright (C) 2014 Seth Jennings + * Copyright (C) 2013-2014 Josh Poimboeuf + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * kpatch core module + * + * Patch modules register with this module to redirect old functions to new + * functions. + * + * For each function patched by the module we must: + * - Call stop_machine + * - Ensure that no task has the old function in its call stack + * - Add the new function address to kpatch_func_hash + * + * After that, each call to the old function calls into kpatch_ftrace_handler() + * which finds the new function in kpatch_func_hash table and updates the + * return instruction pointer so that ftrace will return to the new function. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kpatch.h" + +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#endif + +#if !defined(CONFIG_FUNCTION_TRACER) || \ + !defined(CONFIG_MODULES) || \ + !defined(CONFIG_SYSFS) || \ + !defined(CONFIG_STACKTRACE) || \ + !defined(CONFIG_KALLSYMS_ALL) +#error "CONFIG_FUNCTION_TRACER, CONFIG_MODULES, CONFIG_SYSFS, CONFIG_KALLSYMS_ALL kernel config options are required" +#endif + +#define KPATCH_HASH_BITS 8 +static DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS); + +static DEFINE_SEMAPHORE(kpatch_mutex); + +static LIST_HEAD(kpmod_list); + +static int kpatch_num_patched; + +struct kobject *kpatch_root_kobj; +EXPORT_SYMBOL_GPL(kpatch_root_kobj); + +struct kpatch_kallsyms_args { + const char *objname; + const char *name; + unsigned long addr; + unsigned long count; + unsigned long pos; +}; + +/* this is a double loop, use goto instead of break */ +#define do_for_each_linked_func(kpmod, func) { \ + struct kpatch_object *_object; \ + list_for_each_entry(_object, &kpmod->objects, list) { \ + if (!kpatch_object_linked(_object)) \ + continue; \ + list_for_each_entry(func, &_object->funcs, list) { + +#define while_for_each_linked_func() \ + } \ + } \ +} + + +/* + * The kpatch core module has a state machine which allows for proper + * synchronization with kpatch_ftrace_handler() when it runs in NMI context. + * + * +-----------------------------------------------------+ + * | | + * | + + * v +---> KPATCH_STATE_SUCCESS + * KPATCH_STATE_IDLE +---> KPATCH_STATE_UPDATING | + * ^ +---> KPATCH_STATE_FAILURE + * | + + * | | + * +-----------------------------------------------------+ + * + * KPATCH_STATE_IDLE: No updates are pending. The func hash is valid, and the + * reader doesn't need to check func->op. + * + * KPATCH_STATE_UPDATING: An update is in progress. The reader must call + * kpatch_state_finish(KPATCH_STATE_FAILURE) before accessing the func hash. + * + * KPATCH_STATE_FAILURE: An update failed, and the func hash might be + * inconsistent (pending patched funcs might not have been removed yet). If + * func->op is KPATCH_OP_PATCH, then rollback to the previous version of the + * func. + * + * KPATCH_STATE_SUCCESS: An update succeeded, but the func hash might be + * inconsistent (pending unpatched funcs might not have been removed yet). If + * func->op is KPATCH_OP_UNPATCH, then rollback to the previous version of the + * func. + */ +enum { + KPATCH_STATE_IDLE, + KPATCH_STATE_UPDATING, + KPATCH_STATE_SUCCESS, + KPATCH_STATE_FAILURE, +}; +static atomic_t kpatch_state; + +static int (*kpatch_set_memory_rw)(unsigned long addr, int numpages); +static int (*kpatch_set_memory_ro)(unsigned long addr, int numpages); + +extern int static_relocate(struct module *mod, unsigned long type, + void * loc, unsigned long value); + +#define MAX_STACK_TRACE_DEPTH 64 +static unsigned long stack_entries[MAX_STACK_TRACE_DEPTH]; +static struct stack_trace trace = { + .max_entries = ARRAY_SIZE(stack_entries), + .entries = &stack_entries[0], +}; + +static inline void kpatch_state_idle(void) +{ + int state = atomic_read(&kpatch_state); + + WARN_ON(state != KPATCH_STATE_SUCCESS && state != KPATCH_STATE_FAILURE); + atomic_set(&kpatch_state, KPATCH_STATE_IDLE); +} + +static inline void kpatch_state_updating(void) +{ + WARN_ON(atomic_read(&kpatch_state) != KPATCH_STATE_IDLE); + atomic_set(&kpatch_state, KPATCH_STATE_UPDATING); +} + +/* If state is updating, change it to success or failure and return new state */ +static inline int kpatch_state_finish(int state) +{ + int result; + + WARN_ON(state != KPATCH_STATE_SUCCESS && state != KPATCH_STATE_FAILURE); + result = atomic_cmpxchg(&kpatch_state, KPATCH_STATE_UPDATING, state); + return result == KPATCH_STATE_UPDATING ? state : result; +} + +static struct kpatch_func *kpatch_get_func(unsigned long ip) +{ + struct kpatch_func *f; + + /* Here, we have to use rcu safe hlist because of NMI concurrency */ + hash_for_each_possible_rcu(kpatch_func_hash, f, node, ip) + if (f->old_addr == ip) + return f; + return NULL; +} + +static struct kpatch_func *kpatch_get_prev_func(struct kpatch_func *f, + unsigned long ip) +{ + hlist_for_each_entry_continue_rcu(f, node) + if (f->old_addr == ip) + return f; + return NULL; +} + +static inline bool kpatch_object_linked(struct kpatch_object *object) +{ + return object->mod || !strcmp(object->name, "vmlinux"); +} + +static inline int kpatch_compare_addresses(unsigned long stack_addr, + unsigned long func_addr, + unsigned long func_size, + const char *func_name) +{ + if (stack_addr >= func_addr && stack_addr < func_addr + func_size) { + pr_err("activeness safety check failed for %s\n", func_name); + return -EBUSY; + } + return 0; +} + +static int kpatch_backtrace_address_verify(struct kpatch_module *kpmod, + unsigned long address) +{ + struct kpatch_func *func; + int i; + int ret; + + /* check kpmod funcs */ + do_for_each_linked_func(kpmod, func) { + unsigned long func_addr, func_size; + const char *func_name; + struct kpatch_func *active_func; + + if (func->force) + continue; + + active_func = kpatch_get_func(func->old_addr); + if (!active_func) { + /* patching an unpatched func */ + func_addr = func->old_addr; + func_size = func->old_size; + func_name = func->name; + } else { + /* repatching or unpatching */ + func_addr = active_func->new_addr; + func_size = active_func->new_size; + func_name = active_func->name; + } + + ret = kpatch_compare_addresses(address, func_addr, + func_size, func_name); + if (ret) + return ret; + } while_for_each_linked_func(); + + /* in the replace case, need to check the func hash as well */ + hash_for_each_rcu(kpatch_func_hash, i, func, node) { + if (func->op == KPATCH_OP_UNPATCH && !func->force) { + ret = kpatch_compare_addresses(address, + func->new_addr, + func->new_size, + func->name); + if (ret) + return ret; + } + } + + return ret; +} + +/* + * Verify activeness safety, i.e. that none of the to-be-patched functions are + * on the stack of any task. + * + * This function is called from stop_machine() context. + */ +static int kpatch_verify_activeness_safety(struct kpatch_module *kpmod) +{ + struct task_struct *g, *t; + int i; + int ret = 0; + + /* Check the stacks of all tasks. */ + do_each_thread(g, t) { + + trace.nr_entries = 0; + save_stack_trace_tsk(t, &trace); + if (trace.nr_entries >= trace.max_entries) { + ret = -EBUSY; + pr_err("more than %u trace entries!\n", + trace.max_entries); + goto out; + } + + for (i = 0; i < trace.nr_entries; i++) { + if (trace.entries[i] == ULONG_MAX) + break; + ret = kpatch_backtrace_address_verify(kpmod, + trace.entries[i]); + if (ret) + goto out; + } + + } while_each_thread(g, t); + +out: + if (ret) { + pr_err("PID: %d Comm: %.20s\n", t->pid, t->comm); + for (i = 0; i < trace.nr_entries; i++) { + if (trace.entries[i] == ULONG_MAX) + break; + pr_err(" [<%pK>] %pB\n", + (void *)trace.entries[i], + (void *)trace.entries[i]); + } + } + + return ret; +} + +static inline int pre_patch_callback(struct kpatch_object *object) +{ + int ret; + + if (kpatch_object_linked(object) && + object->pre_patch_callback) { + ret = (*object->pre_patch_callback)(object); + if (ret) { + object->callbacks_enabled = false; + return ret; + } + } + object->callbacks_enabled = true; + + return 0; +} + +static inline void post_patch_callback(struct kpatch_object *object) +{ + if (kpatch_object_linked(object) && + object->post_patch_callback && + object->callbacks_enabled) + (*object->post_patch_callback)(object); +} + +static inline void pre_unpatch_callback(struct kpatch_object *object) +{ + if (kpatch_object_linked(object) && + object->pre_unpatch_callback && + object->callbacks_enabled) + (*object->pre_unpatch_callback)(object); +} + +static inline void post_unpatch_callback(struct kpatch_object *object) +{ + if (kpatch_object_linked(object) && + object->post_unpatch_callback && + object->callbacks_enabled) + (*object->post_unpatch_callback)(object); +} + +/* Called from stop_machine */ +static int kpatch_apply_patch(void *data) +{ + struct kpatch_module *kpmod = data; + struct kpatch_func *func; + struct kpatch_object *object; + int ret; + + ret = kpatch_verify_activeness_safety(kpmod); + if (ret) { + kpatch_state_finish(KPATCH_STATE_FAILURE); + return ret; + } + + /* run any user-defined pre-patch callbacks */ + list_for_each_entry(object, &kpmod->objects, list) { + ret = pre_patch_callback(object); + if (ret) { + pr_err("pre-patch callback failed!\n"); + kpatch_state_finish(KPATCH_STATE_FAILURE); + goto err; + } + } + + /* tentatively add the new funcs to the global func hash */ + do_for_each_linked_func(kpmod, func) { + hash_add_rcu(kpatch_func_hash, &func->node, func->old_addr); + } while_for_each_linked_func(); + + /* memory barrier between func hash add and state change */ + smp_wmb(); + + /* + * Check if any inconsistent NMI has happened while updating. If not, + * move to success state. + */ + ret = kpatch_state_finish(KPATCH_STATE_SUCCESS); + if (ret == KPATCH_STATE_FAILURE) { + pr_err("NMI activeness safety check failed\n"); + + /* Failed, we have to rollback patching process */ + do_for_each_linked_func(kpmod, func) { + hash_del_rcu(&func->node); + } while_for_each_linked_func(); + + ret = -EBUSY; + goto err; + } + + /* run any user-defined post-patch callbacks */ + list_for_each_entry(object, &kpmod->objects, list) + post_patch_callback(object); + + return 0; +err: + /* undo pre-patch callbacks by calling post-unpatch counterparts */ + list_for_each_entry(object, &kpmod->objects, list) + post_unpatch_callback(object); + + return ret; +} + +/* Called from stop_machine */ +static int kpatch_remove_patch(void *data) +{ + struct kpatch_module *kpmod = data; + struct kpatch_func *func; + struct kpatch_object *object; + int ret; + + ret = kpatch_verify_activeness_safety(kpmod); + if (ret) { + kpatch_state_finish(KPATCH_STATE_FAILURE); + return ret; + } + + /* run any user-defined pre-unpatch callbacks */ + list_for_each_entry(object, &kpmod->objects, list) + pre_unpatch_callback(object); + + /* Check if any inconsistent NMI has happened while updating */ + ret = kpatch_state_finish(KPATCH_STATE_SUCCESS); + if (ret == KPATCH_STATE_FAILURE) { + ret = -EBUSY; + goto err; + } + + /* Succeeded, remove all updating funcs from hash table */ + do_for_each_linked_func(kpmod, func) { + hash_del_rcu(&func->node); + } while_for_each_linked_func(); + + /* run any user-defined post-unpatch callbacks */ + list_for_each_entry(object, &kpmod->objects, list) + post_unpatch_callback(object); + + return 0; + +err: + /* undo pre-unpatch callbacks by calling post-patch counterparts */ + list_for_each_entry(object, &kpmod->objects, list) + post_patch_callback(object); + + return ret; +} + +/* + * This is where the magic happens. Update regs->ip to tell ftrace to return + * to the new function. + * + * If there are multiple patch modules that have registered to patch the same + * function, the last one to register wins, as it'll be first in the hash + * bucket. + */ +static void notrace +kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *fops, struct pt_regs *regs) +{ + struct kpatch_func *func; + int state; + + preempt_disable_notrace(); + +#ifdef CONFIG_ARM64 + ip -= AARCH64_INSN_SIZE; +#endif + + if (likely(!in_nmi())) + func = kpatch_get_func(ip); + else { + /* Checking for NMI inconsistency */ + state = kpatch_state_finish(KPATCH_STATE_FAILURE); + + /* no memory reordering between state and func hash read */ + smp_rmb(); + + func = kpatch_get_func(ip); + + if (likely(state == KPATCH_STATE_IDLE)) + goto done; + + if (state == KPATCH_STATE_SUCCESS) { + /* + * Patching succeeded. If the function was being + * unpatched, roll back to the previous version. + */ + if (func && func->op == KPATCH_OP_UNPATCH) + func = kpatch_get_prev_func(func, ip); + } else { + /* + * Patching failed. If the function was being patched, + * roll back to the previous version. + */ + if (func && func->op == KPATCH_OP_PATCH) + func = kpatch_get_prev_func(func, ip); + } + } +done: + + if (func) +#ifdef CONFIG_X86 + regs->ip = func->new_addr + MCOUNT_INSN_SIZE; +#elif defined(CONFIG_ARM64) + regs->pc = func->new_addr; +#else + pr_err("Not support this arch\n"); +#endif + + preempt_enable_notrace(); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) +#define FTRACE_OPS_FL_IPMODIFY 0 +#endif + +static struct ftrace_ops kpatch_ftrace_ops __read_mostly = { + .func = kpatch_ftrace_handler, + .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, +}; + +static int kpatch_ftrace_add_func(unsigned long ip) +{ + int ret; + + /* check if any other patch modules have also patched this func */ + if (kpatch_get_func(ip)) + return 0; + +#ifdef CONFIG_ARM64 + ip += AARCH64_INSN_SIZE; +#endif + ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, ip, 0, 0); + if (ret) { + pr_err("can't set ftrace filter at address 0x%lx\n", ip); + return ret; + } + + if (!kpatch_num_patched) { + ret = register_ftrace_function(&kpatch_ftrace_ops); + if (ret) { + pr_err("can't register ftrace handler\n"); + ftrace_set_filter_ip(&kpatch_ftrace_ops, ip, 1, 0); + return ret; + } + } + kpatch_num_patched++; + + return 0; +} + +static int kpatch_ftrace_remove_func(unsigned long ip) +{ + int ret; + + /* check if any other patch modules have also patched this func */ + if (kpatch_get_func(ip)) + return 0; + + if (kpatch_num_patched == 1) { + ret = unregister_ftrace_function(&kpatch_ftrace_ops); + if (ret) { + pr_err("can't unregister ftrace handler\n"); + return ret; + } + } + kpatch_num_patched--; + +#ifdef CONFIG_ARM64 + ip += AARCH64_INSN_SIZE; +#endif + ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, ip, 1, 0); + if (ret) { + pr_err("can't remove ftrace filter at address 0x%lx\n", ip); + return ret; + } + + return 0; +} + +static int kpatch_kallsyms_callback(void *data, const char *name, + struct module *mod, + unsigned long addr) +{ + struct kpatch_kallsyms_args *args = data; + bool vmlinux = !strcmp(args->objname, "vmlinux"); + + if ((mod && vmlinux) || (!mod && !vmlinux)) + return 0; + + if (strcmp(args->name, name)) + return 0; + + if (!vmlinux && strcmp(args->objname, mod->name)) + return 0; + + args->addr = addr; + args->count++; + + /* + * Finish the search when the symbol is found for the desired position + * or the position is not defined for a non-unique symbol. + */ + if ((args->pos && (args->count == args->pos)) || + (!args->pos && (args->count > 1))) { + return 1; + } + + return 0; +} + +static int kpatch_find_object_symbol(const char *objname, const char *name, + unsigned long sympos, unsigned long *addr) +{ + struct kpatch_kallsyms_args args = { + .objname = objname, + .name = name, + .addr = 0, + .count = 0, + .pos = sympos, + }; + + mutex_lock(&module_mutex); + kallsyms_on_each_symbol(kpatch_kallsyms_callback, &args); + mutex_unlock(&module_mutex); + + /* + * Ensure an address was found. If sympos is 0, ensure symbol is unique; + * otherwise ensure the symbol position count matches sympos. + */ + if (args.addr == 0) + pr_err("symbol '%s' not found in symbol table\n", name); + else if (args.count > 1 && sympos == 0) { + pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n", + name, objname); + } else if (sympos != args.count && sympos > 0) { + pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", + sympos, name, objname); + } else { + *addr = args.addr; + return 0; + } + + *addr = 0; + return -EINVAL; +} + +/* + * External symbols are located outside the parent object (where the parent + * object is either vmlinux or the kmod being patched). + */ +static int kpatch_find_external_symbol(const char *objname, const char *name, + unsigned long sympos, unsigned long *addr) + +{ + const struct kernel_symbol *sym; + + /* first, check if it's an exported symbol */ + preempt_disable(); + sym = find_symbol(name, NULL, NULL, true, true); + preempt_enable(); + if (sym) { +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + *addr = (unsigned long)offset_to_ptr(&sym->value_offset); +#else + *addr = sym->value; +#endif + return 0; + } + + /* otherwise check if it's in another .o within the patch module */ + return kpatch_find_object_symbol(objname, name, sympos, addr); +} + +static int kpatch_write_relocations(struct kpatch_module *kpmod, + struct kpatch_object *object) +{ + int ret, size, readonly = 0, numpages; + struct kpatch_dynrela *dynrela; + u64 loc, val; +#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) ) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) && \ + UTS_UBUNTU_RELEASE_ABI >= 7 ) \ + ) + unsigned long core = (unsigned long)kpmod->mod->core_layout.base; + unsigned long core_size = kpmod->mod->core_layout.size; +#else + unsigned long core = (unsigned long)kpmod->mod->module_core; + unsigned long core_size = kpmod->mod->core_size; +#endif + + list_for_each_entry(dynrela, &object->dynrelas, list) { + if (dynrela->external) + ret = kpatch_find_external_symbol(kpmod->mod->name, + dynrela->name, + dynrela->sympos, + &dynrela->src); + else + ret = kpatch_find_object_symbol(object->name, + dynrela->name, + dynrela->sympos, + &dynrela->src); + if (ret) { + pr_err("unable to find symbol '%s'\n", dynrela->name); + return ret; + } + + switch (dynrela->type) { +#ifdef CONFIG_X86 + case R_X86_64_NONE: + continue; + case R_X86_64_PC32: + loc = dynrela->dest; + val = (u32)(dynrela->src + dynrela->addend - + dynrela->dest); + size = 4; + break; + case R_X86_64_32S: + loc = dynrela->dest; + val = (s32)dynrela->src + dynrela->addend; + size = 4; + break; + case R_X86_64_64: + loc = dynrela->dest; + val = dynrela->src; + size = 8; + break; +#endif + default: + pr_err("unsupported rela type %ld for source %s (0x%lx <- 0x%lx)\n", + dynrela->type, dynrela->name, dynrela->dest, + dynrela->src); + return -EINVAL; + } + + if (loc < core || loc >= core + core_size) { + pr_err("bad dynrela location 0x%llx for symbol %s\n", + loc, dynrela->name); + return -EINVAL; + } + + /* + * Skip it if the instruction to be relocated has been + * changed already (paravirt or alternatives may do this). + */ + if (memchr_inv((void *)loc, 0, size)) { + pr_notice("Skipped dynrela for %s (0x%lx <- 0x%lx): the instruction has been changed already.\n", + dynrela->name, dynrela->dest, dynrela->src); + pr_notice_once( +"This is not necessarily a bug but it may indicate in some cases " +"that the binary patch does not handle paravirt operations, alternatives or the like properly.\n"); + continue; + } + +#if defined(CONFIG_DEBUG_SET_MODULE_RONX) || defined(CONFIG_ARCH_HAS_SET_MEMORY) +#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) ) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) && \ + UTS_UBUNTU_RELEASE_ABI >= 7 ) \ + ) + readonly = (loc < core + kpmod->mod->core_layout.ro_size); +#else + readonly = (loc < core + kpmod->mod->core_ro_size); +#endif +#endif + + numpages = (PAGE_SIZE - (loc & ~PAGE_MASK) >= size) ? 1 : 2; + + if (readonly) + kpatch_set_memory_rw(loc & PAGE_MASK, numpages); + + ret = probe_kernel_write((void *)loc, &val, size); + + if (readonly) + kpatch_set_memory_ro(loc & PAGE_MASK, numpages); + + if (ret) { + pr_err("write to 0x%llx failed for symbol %s\n", + loc, dynrela->name); + return ret; + } + } + + return 0; +} + +static int kpatch_write_relocations_arm64(struct kpatch_module *kpmod, + struct kpatch_object *object) +{ + struct module *mod = kpmod->mod; + struct kpatch_dynrela *dynrela; + u64 loc, val; + int type, ret; + + module_disable_ro(mod); + list_for_each_entry(dynrela, &object->dynrelas, list) { + if (dynrela->external) + ret = kpatch_find_external_symbol(kpmod->mod->name, + dynrela->name, + dynrela->sympos, + &dynrela->src); + else + ret = kpatch_find_object_symbol(object->name, + dynrela->name, + dynrela->sympos, + &dynrela->src); + if (ret) { + pr_err("unable to find symbol '%s'\n", dynrela->name); + module_enable_ro(mod, true); + return ret; + } + + loc = dynrela->dest; + val = dynrela->src + dynrela->addend; + type = dynrela->type; + ret = static_relocate(mod, type, (void *)loc, val); + if (ret) { + pr_err("write to 0x%llx failed for symbol %s\n", + loc, dynrela->name); + module_enable_ro(mod, true); + return ret; + } + } + module_enable_ro(mod, true); + return 0; +} + +static int kpatch_unlink_object(struct kpatch_object *object) +{ + struct kpatch_func *func; + int ret; + + list_for_each_entry(func, &object->funcs, list) { + if (!func->old_addr) + continue; + ret = kpatch_ftrace_remove_func(func->old_addr); + if (ret) { + WARN(1, "can't unregister ftrace for address 0x%lx\n", + func->old_addr); + return ret; + } + } + + if (object->mod) { + module_put(object->mod); + object->mod = NULL; + } + + return 0; +} + +/* + * Link to a to-be-patched object in preparation for patching it. + * + * - Find the object module + * - Write patch module relocations which reference the object + * - Calculate the patched functions' addresses + * - Register them with ftrace + */ +static int kpatch_link_object(struct kpatch_module *kpmod, + struct kpatch_object *object) +{ + struct module *mod = NULL; + struct kpatch_func *func, *func_err = NULL; + int ret; + bool vmlinux = !strcmp(object->name, "vmlinux"); + + if (!vmlinux) { + mutex_lock(&module_mutex); + mod = find_module(object->name); + if (!mod) { + /* + * The module hasn't been loaded yet. We can patch it + * later in kpatch_module_notify(). + */ + mutex_unlock(&module_mutex); + return 0; + } + + /* should never fail because we have the mutex */ + WARN_ON(!try_module_get(mod)); + mutex_unlock(&module_mutex); + object->mod = mod; + } + +#ifdef CONFIG_X86 + ret = kpatch_write_relocations(kpmod, object); +#elif defined(CONFIG_ARM64) + ret = kpatch_write_relocations_arm64(kpmod, object); +#else + pr_err("Not support this arch relocations\n"); + ret = -EINVAL; +#endif + if (ret) + goto err_put; + + list_for_each_entry(func, &object->funcs, list) { + + /* lookup the old location */ + ret = kpatch_find_object_symbol(object->name, + func->name, + func->sympos, + &func->old_addr); + if (ret) { + func_err = func; + goto err_ftrace; + } + + /* add to ftrace filter and register handler if needed */ + ret = kpatch_ftrace_add_func(func->old_addr); + if (ret) { + func_err = func; + goto err_ftrace; + } + } + + return 0; + +err_ftrace: + list_for_each_entry(func, &object->funcs, list) { + if (func == func_err) + break; + WARN_ON(kpatch_ftrace_remove_func(func->old_addr)); + } +err_put: + if (!vmlinux) + module_put(mod); + return ret; +} + +static int kpatch_module_notify_coming(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct module *mod = data; + struct kpatch_module *kpmod; + struct kpatch_object *object; + struct kpatch_func *func; + int ret = 0; + bool found = false; + + if (action != MODULE_STATE_COMING) + return 0; + + down(&kpatch_mutex); + + list_for_each_entry(kpmod, &kpmod_list, list) { + list_for_each_entry(object, &kpmod->objects, list) { + if (kpatch_object_linked(object)) + continue; + if (!strcmp(object->name, mod->name)) { + found = true; + goto done; + } + } + } +done: + if (!found) + goto out; + + ret = kpatch_link_object(kpmod, object); + if (ret) + goto out; + + BUG_ON(!object->mod); + + pr_notice("patching newly loaded module '%s'\n", object->name); + + /* run user-defined pre-patch callback */ + ret = pre_patch_callback(object); + if (ret) { + pr_err("pre-patch callback failed!\n"); + goto out; /* and WARN */ + } + + /* add to the global func hash */ + list_for_each_entry(func, &object->funcs, list) + hash_add_rcu(kpatch_func_hash, &func->node, func->old_addr); + + /* run user-defined post-patch callback */ + post_patch_callback(object); +out: + up(&kpatch_mutex); + + /* no way to stop the module load on error */ + WARN(ret, "error (%d) patching newly loaded module '%s'\n", ret, + object->name); + + return 0; +} + +static int kpatch_module_notify_going(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct module *mod = data; + struct kpatch_module *kpmod; + struct kpatch_object *object; + struct kpatch_func *func; + bool found = false; + + if (action != MODULE_STATE_GOING) + return 0; + + down(&kpatch_mutex); + + list_for_each_entry(kpmod, &kpmod_list, list) { + list_for_each_entry(object, &kpmod->objects, list) { + if (!kpatch_object_linked(object)) + continue; + if (!strcmp(object->name, mod->name)) { + found = true; + goto done; + } + } + } +done: + if (!found) + goto out; + + /* run user-defined pre-unpatch callback */ + pre_unpatch_callback(object); + + /* remove from the global func hash */ + list_for_each_entry(func, &object->funcs, list) + hash_del_rcu(&func->node); + + /* run user-defined pre-unpatch callback */ + post_unpatch_callback(object); + + kpatch_unlink_object(object); + +out: + up(&kpatch_mutex); + + return 0; +} + +int kpatch_register(struct kpatch_module *kpmod, bool replace) +{ + int ret, i, force = 0; + struct kpatch_object *object, *object_err = NULL; + struct kpatch_func *func; + + if (!kpmod->mod || list_empty(&kpmod->objects)) + return -EINVAL; + + down(&kpatch_mutex); + + if (kpmod->enabled) { + ret = -EINVAL; + goto err_up; + } + + list_add_tail(&kpmod->list, &kpmod_list); + + if (!try_module_get(kpmod->mod)) { + ret = -ENODEV; + goto err_list; + } + + list_for_each_entry(object, &kpmod->objects, list) { + + ret = kpatch_link_object(kpmod, object); + if (ret) { + object_err = object; + goto err_unlink; + } + + if (!kpatch_object_linked(object)) { + pr_notice("delaying patch of unloaded module '%s'\n", + object->name); + continue; + } + + if (strcmp(object->name, "vmlinux")) + pr_notice("patching module '%s'\n", object->name); + + list_for_each_entry(func, &object->funcs, list) + func->op = KPATCH_OP_PATCH; + } + + if (replace) + hash_for_each_rcu(kpatch_func_hash, i, func, node) + func->op = KPATCH_OP_UNPATCH; + + /* memory barrier between func hash and state write */ + smp_wmb(); + + kpatch_state_updating(); + + /* + * Idle the CPUs, verify activeness safety, and atomically make the new + * functions visible to the ftrace handler. + */ + ret = stop_machine(kpatch_apply_patch, kpmod, NULL); + + /* + * For the replace case, remove any obsolete funcs from the hash and + * the ftrace filter, and disable the owning patch module so that it + * can be removed. + */ + if (!ret && replace) { + struct kpatch_module *kpmod2, *safe; + + hash_for_each_rcu(kpatch_func_hash, i, func, node) { + if (func->op != KPATCH_OP_UNPATCH) + continue; + if (func->force) + force = 1; + hash_del_rcu(&func->node); + WARN_ON(kpatch_ftrace_remove_func(func->old_addr)); + } + + list_for_each_entry_safe(kpmod2, safe, &kpmod_list, list) { + if (kpmod == kpmod2) + continue; + + kpmod2->enabled = false; + pr_notice("unloaded patch module '%s'\n", + kpmod2->mod->name); + + /* + * Don't allow modules with forced functions to be + * removed because they might still be in use. + */ + if (!force) + module_put(kpmod2->mod); + + list_del(&kpmod2->list); + } + } + + + /* memory barrier between func hash and state write */ + smp_wmb(); + + /* NMI handlers can return to normal now */ + kpatch_state_idle(); + + /* + * Wait for all existing NMI handlers to complete so that they don't + * see any changes to funcs or funcs->op that might occur after this + * point. + * + * Any NMI handlers starting after this point will see the IDLE state. + */ + synchronize_rcu(); + + if (ret) + goto err_ops; + + do_for_each_linked_func(kpmod, func) { + func->op = KPATCH_OP_NONE; + } while_for_each_linked_func(); + +/* HAS_MODULE_TAINT - upstream 2992ef29ae01 "livepatch/module: make TAINT_LIVEPATCH module-specific" */ +/* HAS_MODULE_TAINT_LONG - upstream 7fd8329ba502 "taint/module: Clean up global and module taint flags handling" */ +#ifdef RHEL_RELEASE_CODE +# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4) +# define HAS_MODULE_TAINT +# endif +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +# define HAS_MODULE_TAINT_LONG +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) +# define HAS_MODULE_TAINT +#endif + +#ifdef TAINT_LIVEPATCH + pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n"); + add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK); +# ifdef HAS_MODULE_TAINT_LONG + set_bit(TAINT_LIVEPATCH, &kpmod->mod->taints); +# elif defined(HAS_MODULE_TAINT) + kpmod->mod->taints |= (1 << TAINT_LIVEPATCH); +# endif +#else + pr_notice_once("tainting kernel with TAINT_USER\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); +#endif + + pr_notice("loaded patch module '%s'\n", kpmod->mod->name); + + kpmod->enabled = true; + + up(&kpatch_mutex); + return 0; + +err_ops: + if (replace) + hash_for_each_rcu(kpatch_func_hash, i, func, node) + func->op = KPATCH_OP_NONE; +err_unlink: + list_for_each_entry(object, &kpmod->objects, list) { + if (object == object_err) + break; + if (!kpatch_object_linked(object)) + continue; + WARN_ON(kpatch_unlink_object(object)); + } + module_put(kpmod->mod); +err_list: + list_del(&kpmod->list); +err_up: + up(&kpatch_mutex); + return ret; +} +EXPORT_SYMBOL(kpatch_register); + +int kpatch_unregister(struct kpatch_module *kpmod) +{ + struct kpatch_object *object; + struct kpatch_func *func; + int ret, force = 0; + + down(&kpatch_mutex); + + if (!kpmod->enabled) { + ret = -EINVAL; + goto out; + } + + do_for_each_linked_func(kpmod, func) { + func->op = KPATCH_OP_UNPATCH; + if (func->force) + force = 1; + } while_for_each_linked_func(); + + /* memory barrier between func hash and state write */ + smp_wmb(); + + kpatch_state_updating(); + + ret = stop_machine(kpatch_remove_patch, kpmod, NULL); + + /* NMI handlers can return to normal now */ + kpatch_state_idle(); + + /* + * Wait for all existing NMI handlers to complete so that they don't + * see any changes to funcs or funcs->op that might occur after this + * point. + * + * Any NMI handlers starting after this point will see the IDLE state. + */ + synchronize_rcu(); + + if (ret) { + do_for_each_linked_func(kpmod, func) { + func->op = KPATCH_OP_NONE; + } while_for_each_linked_func(); + goto out; + } + + list_for_each_entry(object, &kpmod->objects, list) { + if (!kpatch_object_linked(object)) + continue; + ret = kpatch_unlink_object(object); + if (ret) + goto out; + } + + pr_notice("unloaded patch module '%s'\n", kpmod->mod->name); + + kpmod->enabled = false; + + /* + * Don't allow modules with forced functions to be removed because they + * might still be in use. + */ + if (!force) + module_put(kpmod->mod); + + list_del(&kpmod->list); + +out: + up(&kpatch_mutex); + return ret; +} +EXPORT_SYMBOL(kpatch_unregister); + + +static struct notifier_block kpatch_module_nb_coming = { + .notifier_call = kpatch_module_notify_coming, + .priority = INT_MIN, /* called last */ +}; +static struct notifier_block kpatch_module_nb_going = { + .notifier_call = kpatch_module_notify_going, + .priority = INT_MAX, /* called first */ +}; + +static int kpatch_init(void) +{ + int ret; + + kpatch_set_memory_rw = (void *)kallsyms_lookup_name("set_memory_rw"); + if (!kpatch_set_memory_rw) { + pr_err("can't find set_memory_rw symbol\n"); + return -ENXIO; + } + + kpatch_set_memory_ro = (void *)kallsyms_lookup_name("set_memory_ro"); + if (!kpatch_set_memory_ro) { + pr_err("can't find set_memory_ro symbol\n"); + return -ENXIO; + } + + kpatch_root_kobj = kobject_create_and_add("kpatch", kernel_kobj); + if (!kpatch_root_kobj) + return -ENOMEM; + + ret = register_module_notifier(&kpatch_module_nb_coming); + if (ret) + goto err_root_kobj; + ret = register_module_notifier(&kpatch_module_nb_going); + if (ret) + goto err_unregister_coming; + + return 0; + +err_unregister_coming: + WARN_ON(unregister_module_notifier(&kpatch_module_nb_coming)); +err_root_kobj: + kobject_put(kpatch_root_kobj); + return ret; +} + +static void kpatch_exit(void) +{ + rcu_barrier(); + + WARN_ON(kpatch_num_patched != 0); + WARN_ON(unregister_module_notifier(&kpatch_module_nb_coming)); + WARN_ON(unregister_module_notifier(&kpatch_module_nb_going)); + kobject_put(kpatch_root_kobj); +} + +module_init(kpatch_init); +module_exit(kpatch_exit); +MODULE_LICENSE("GPL"); diff --git a/kernel/tkernel/kpatch/kpatch.h b/kernel/tkernel/kpatch/kpatch.h new file mode 100644 index 000000000000..36f021fa0b1b --- /dev/null +++ b/kernel/tkernel/kpatch/kpatch.h @@ -0,0 +1,102 @@ +/* + * kpatch.h + * + * Copyright (C) 2014 Seth Jennings + * Copyright (C) 2013-2014 Josh Poimboeuf + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Contains the API for the core kpatch module used by the patch modules + */ + +#ifndef _KPATCH_H_ +#define _KPATCH_H_ + +#include +#include + +enum kpatch_op { + KPATCH_OP_NONE, + KPATCH_OP_PATCH, + KPATCH_OP_UNPATCH, +}; + +struct kpatch_func { + /* public */ + unsigned long new_addr; + unsigned long new_size; + unsigned long old_addr; + unsigned long old_size; + unsigned long sympos; + const char *name; + struct list_head list; + int force; + + /* private */ + struct hlist_node node; + enum kpatch_op op; + struct kobject kobj; +}; + +struct kpatch_dynrela { + unsigned long dest; + unsigned long src; + unsigned long type; + unsigned long sympos; + const char *name; + int addend; + int external; + struct list_head list; +}; + +struct kpatch_object { + struct list_head list; + const char *name; + struct list_head funcs; + struct list_head dynrelas; + + int (*pre_patch_callback)(struct kpatch_object *); + void (*post_patch_callback)(struct kpatch_object *); + void (*pre_unpatch_callback)(struct kpatch_object *); + void (*post_unpatch_callback)(struct kpatch_object *); + bool callbacks_enabled; + + /* private */ + struct module *mod; + struct kobject kobj; +}; + +struct kpatch_module { + /* public */ + struct module *mod; + struct list_head objects; + + /* public read-only */ + bool enabled; + + /* private */ + struct list_head list; + struct kobject kobj; +}; + +extern struct kobject *kpatch_root_kobj; + +extern int kpatch_register(struct kpatch_module *kpmod, bool replace); +extern int kpatch_unregister(struct kpatch_module *kpmod); + +extern void *kpatch_shadow_alloc(void *obj, char *var, size_t size, gfp_t gfp); +extern void kpatch_shadow_free(void *obj, char *var); +extern void *kpatch_shadow_get(void *obj, char *var); + +#endif /* _KPATCH_H_ */ diff --git a/kernel/tkernel/kpatch/shadow.c b/kernel/tkernel/kpatch/shadow.c new file mode 100644 index 000000000000..627928add7dd --- /dev/null +++ b/kernel/tkernel/kpatch/shadow.c @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2014 Josh Poimboeuf + * Copyright (C) 2014 Seth Jennings + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * kpatch shadow variables + * + * These functions can be used to add new "shadow" fields to existing data + * structures. For example, to allocate a "newpid" variable associated with an + * instance of task_struct, and assign it a value of 1000: + * + * struct task_struct *tsk = current; + * int *newpid; + * newpid = kpatch_shadow_alloc(tsk, "newpid", sizeof(int), GFP_KERNEL); + * if (newpid) + * *newpid = 1000; + * + * To retrieve a pointer to the variable: + * + * struct task_struct *tsk = current; + * int *newpid; + * newpid = kpatch_shadow_get(tsk, "newpid"); + * if (newpid) + * printk("task newpid = %d\n", *newpid); // prints "task newpid = 1000" + * + * To free it: + * + * kpatch_shadow_free(tsk, "newpid"); + */ + +#include +#include +#include "kpatch.h" + +static DEFINE_HASHTABLE(kpatch_shadow_hash, 12); +static DEFINE_SPINLOCK(kpatch_shadow_lock); + +struct kpatch_shadow { + struct hlist_node node; + struct rcu_head rcu_head; + void *obj; + union { + char *var; /* assumed to be 4-byte aligned */ + unsigned long flags; + }; + void *data; +}; + +#define SHADOW_FLAG_INPLACE 0x1 +#define SHADOW_FLAG_RESERVED0 0x2 /* reserved for future use */ + +#define SHADOW_FLAG_MASK 0x3 +#define SHADOW_PTR_MASK (~(SHADOW_FLAG_MASK)) + +static inline void shadow_set_inplace(struct kpatch_shadow *shadow) +{ + shadow->flags |= SHADOW_FLAG_INPLACE; +} + +static inline int shadow_is_inplace(struct kpatch_shadow *shadow) +{ + return shadow->flags & SHADOW_FLAG_INPLACE; +} + +static inline char *shadow_var(struct kpatch_shadow *shadow) +{ + return (char *)((unsigned long)shadow->var & SHADOW_PTR_MASK); +} + +void *kpatch_shadow_alloc(void *obj, char *var, size_t size, gfp_t gfp) +{ + unsigned long flags; + struct kpatch_shadow *shadow; + + shadow = kmalloc(sizeof(*shadow), gfp); + if (!shadow) + return NULL; + + shadow->obj = obj; + + shadow->var = kstrdup(var, gfp); + if (!shadow->var) { + kfree(shadow); + return NULL; + } + + if (size <= sizeof(shadow->data)) { + shadow->data = &shadow->data; + shadow_set_inplace(shadow); + } else { + shadow->data = kmalloc(size, gfp); + if (!shadow->data) { + kfree(shadow->var); + kfree(shadow); + return NULL; + } + } + + spin_lock_irqsave(&kpatch_shadow_lock, flags); + hash_add_rcu(kpatch_shadow_hash, &shadow->node, (unsigned long)obj); + spin_unlock_irqrestore(&kpatch_shadow_lock, flags); + + return shadow->data; +} +EXPORT_SYMBOL_GPL(kpatch_shadow_alloc); + +static void kpatch_shadow_rcu_free(struct rcu_head *head) +{ + struct kpatch_shadow *shadow; + + shadow = container_of(head, struct kpatch_shadow, rcu_head); + + if (!shadow_is_inplace(shadow)) + kfree(shadow->data); + kfree(shadow_var(shadow)); + kfree(shadow); +} + +void kpatch_shadow_free(void *obj, char *var) +{ + unsigned long flags; + struct kpatch_shadow *shadow; + + spin_lock_irqsave(&kpatch_shadow_lock, flags); + + hash_for_each_possible(kpatch_shadow_hash, shadow, node, + (unsigned long)obj) { + if (shadow->obj == obj && !strcmp(shadow_var(shadow), var)) { + hash_del_rcu(&shadow->node); + spin_unlock_irqrestore(&kpatch_shadow_lock, flags); + call_rcu(&shadow->rcu_head, kpatch_shadow_rcu_free); + return; + } + } + + spin_unlock_irqrestore(&kpatch_shadow_lock, flags); +} +EXPORT_SYMBOL_GPL(kpatch_shadow_free); + +void *kpatch_shadow_get(void *obj, char *var) +{ + struct kpatch_shadow *shadow; + + rcu_read_lock(); + + hash_for_each_possible_rcu(kpatch_shadow_hash, shadow, node, + (unsigned long)obj) { + if (shadow->obj == obj && !strcmp(shadow_var(shadow), var)) { + rcu_read_unlock(); + if (shadow_is_inplace(shadow)) + return &(shadow->data); + + return shadow->data; + } + } + + rcu_read_unlock(); + + return NULL; +} +EXPORT_SYMBOL_GPL(kpatch_shadow_get); diff --git a/kernel/tkernel/netatop/Makefile b/kernel/tkernel/netatop/Makefile new file mode 100644 index 000000000000..0a29c3d27060 --- /dev/null +++ b/kernel/tkernel/netatop/Makefile @@ -0,0 +1 @@ +obj-m := netatop.o diff --git a/kernel/tkernel/netatop/netatop.c b/kernel/tkernel/netatop/netatop.c new file mode 100644 index 000000000000..1af2b47a7d9a --- /dev/null +++ b/kernel/tkernel/netatop/netatop.c @@ -0,0 +1,1793 @@ +/* +** This module uses the netfilter interface to maintain statistics +** about the network traffic per task, on level of thread group +** and individual thread. +** +** General setup +** ------------- +** Once the module is active, it is called for every packet that is +** transmitted by a local process and every packet that is received +** from an interface. Not only the packets that contain the user data +** are passed but also the TCP related protocol packets (SYN, ACK, ...). +** +** When the module discovers a packet for a connection (TCP) or local +** port (UDP) that is new, it creates a sockinfo structure. As soon as +** possible the sockinfo struct will be connected to a taskinfo struct +** that represents the proces or thread that is related to the socket. +** However, the task can only be determined when a packet is transmitted, +** i.e. the module is called during system call handling in the context +** of the transmitting process. At that moment the tgid (process) and +** pid (thread) can be obtained from the process administration to +** be stored in the module's own taskinfo structs (one for the process, +** one for the thread). +** For the time that the sockinfo struct can not be related to a taskinfo +** struct (e.g. when only packets are received), counters are maintained +** temporarily in the sockinfo struct. As soon as a related taskinfo struct +** is discovered when the task transmits, counters will be maintained in +** the taskinfo struct itself. +** When packets are only received for a socket (e.g. another machine is +** sending UDP packets to the local machine) while the local task +** never responds, no match to a process can be made and the packets +** remain unidentified by the netatop module. At least one packet should +** have been sent by a local process to be able to match packets for such +** socket. +** In the file /proc/netatop counters can be found that show the total +** number of packets sent/received and how many of these packets were +** unidentified (i.e. not accounted to a process/thread). +** +** Garbage collection +** ------------------ +** The module uses a garbage collector to cleanup the unused sockinfo +** structs if connections do not exist any more (TCP) or have not been +** used for some time (TCP/UDP). +** Furthermore, the garbage collector checks if the taskinfo structs +** still represent existing processes or threads. If not, the taskinfo struct +** is destroyed (in case of a thread) or it is moved to a separate list of +** finished processes (in case of a process). Analysis programs can read +** the taskinfo of such finished process. When the taskinfo of a finished +** process is not read within 15 seconds, the taskinfo will be destroyed. +** +** A garbage collector cycle can be triggered by issueing a getsockopt +** call from an analysis program (e.g. atop). Apart from that, a time-based +** garbage collector cycle is issued anyhow every 15 seconds by the +** knetatop kernel thread. +** +** Interface with user mode +** ------------------------ +** Programs can open an IP socket and use the getsockopt() system call +** to issue commands to this module. With the command ATOP_GETCNT_TGID +** the current counters can be obtained on process level (thread group) +** and with the command ATOP_GETCNT_PID the counters on thread level. +** For both commands, the tgid/pid has to be passed of the required thread +** (group). When the required thread (group) does not exist, an errno ESRCH +** is given. +** +** The command ATOP_GETCNT_EXIT can be issued to obtain the counters of +** an exited process. As stated above, such command has to be issued +** within 15 seconds after a process has been declared 'finished' by +** the garbage collector. Whenever this command is issued and no exited +** process is in the exitlist, the requesting process is blocked until +** an exited process is available. +** +** The command NETATOP_FORCE_GC activates the garbage collector of the +** netatop module to determine if sockinfo's of old connections/ports +** can be destroyed and if taskinfo's of exited processes can be +** The command NETATOP_EMPTY_EXIT can be issued to wait until the exitlist +** with the taskinfo's of exited processes is empty. +** ---------------------------------------------------------------------- +** Copyright (C) 2012 Gerlof Langeveld (gerlof.langeveld@atoptool.nl) +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License version 2 as +** published by the Free Software Foundation. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "netatop.h" +#include "netatopversion.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Gerlof Langeveld "); +MODULE_DESCRIPTION("Per-task network statistics"); +MODULE_VERSION(NETATOPVERSION); + +#define GCINTERVAL (HZ*15) // interval garbage collector (jiffies) +#define GCMAXUDP (HZ*16) // max inactivity for UDP (jiffies) +#define GCMAXTCP (HZ*1800) // max inactivity for TCP (jiffies) +#define GCMAXUNREF (HZ*60) // max time without taskref (jiffies) + +#define SILIMIT (4096*1024) // maximum memory for sockinfo structs +#define TILIMIT (2048*1024) // maximum memory for taskinfo structs + +#define NF_IP_PRE_ROUTING 0 +#define NF_IP_LOCAL_IN 1 +#define NF_IP_FORWARD 2 +#define NF_IP_LOCAL_OUT 3 +#define NF_IP_POST_ROUTING 4 + +/* +** struct that maintains statistics about the network +** traffic caused per thread or thread group +*/ +struct chainer { + void *next; + void *prev; +}; + +struct taskinfobucket; + +struct taskinfo { + struct chainer ch; + + pid_t id; // tgid or pid + char type; // 'g' (thread group) or + // 't' (thread) + unsigned char state; // see below + char command[COMLEN]; + unsigned long btime; // start time of process + unsigned long long exittime; // time inserted in exitlist + + struct taskcount tc; +}; + +static struct kmem_cache *ticache; // taskinfo cache + +// state values above +#define CHECKED 1 // verified that task still exists +#define INDELETE 2 // task exited but still in hash list +#define FINISHED 3 // task on exit list + +/* +** hash tables to find a particular thread group or thread +*/ +#define TBUCKS 1024 // must be multiple of 2! +#define THASH(x, t) (((x)+t)&(TBUCKS-1)) + +struct taskinfobucket { + struct chainer ch; + spinlock_t lock; +} thash[TBUCKS]; + +static unsigned long nrt; // current number of taskinfo allocated +static unsigned long nrt_ovf; // no taskinfo allocated due to overflow +static DEFINE_SPINLOCK(nrtlock); + + +static struct taskinfo *exithead; // linked list of exited processes +static struct taskinfo *exittail; +static DEFINE_SPINLOCK(exitlock); + +static DECLARE_WAIT_QUEUE_HEAD(exitlist_filled); +static DECLARE_WAIT_QUEUE_HEAD(exitlist_empty); + +static unsigned long nre; // current number of taskinfo on exitlist + +/* +** structs that uniquely identify a TCP connection (host endian format) +*/ +struct tcpv4_ident { + uint32_t laddr; /* local IP address */ + uint32_t raddr; /* remote IP address */ + uint16_t lport; /* local port number */ + uint16_t rport; /* remote port number */ +}; + +struct tcpv6_ident { + struct in6_addr laddr; /* local IP address */ + struct in6_addr raddr; /* remote IP address */ + uint16_t lport; /* local port number */ + uint16_t rport; /* remote port number */ +}; + +/* +** struct to maintain the reference from a socket +** to a thread and thread-group +*/ +struct sockinfo { + struct chainer ch; + + unsigned char last_state; // last known state of socket + uint8_t proto; // protocol + + union keydef { + uint16_t udp; // UDP ident (only portnumber) + struct tcpv4_ident tcp4; // TCP connection ident IPv4 + struct tcpv6_ident tcp6; // TCP connection ident IPv6 + } key; + + struct taskinfo *tgp; // ref to thread group + struct taskinfo *thp; // ref to thread (or NULL) + + short tgh; // hash number of thread group + short thh; // hash number of thread + + unsigned int sndpacks; // temporary counters in case + unsigned int rcvpacks; // known yet + unsigned long sndbytes; // no relation to process is + unsigned long rcvbytes; + + unsigned long long lastact; // last updated (jiffies) +}; + +static struct kmem_cache *sicache; // sockinfo cache + +/* +** hash table to find a socket reference +*/ +#define SBUCKS 1024 // must be multiple of 2! +#define SHASHTCP4(x) (((x).raddr+(x).lport+(x).rport)&(SBUCKS-1)) +#define SHASHUDP(x) ((x)&(SBUCKS-1)) + +struct { + struct chainer ch; + spinlock_t lock; +} shash[SBUCKS]; + +static unsigned long nrs; // current number sockinfo allocated +static unsigned long nrs_ovf; // no sockinfo allocated due to overflow +static DEFINE_SPINLOCK(nrslock); + +/* +** various static counters +*/ +static unsigned long icmpsndbytes; +static unsigned long icmpsndpacks; +static unsigned long icmprcvbytes; +static unsigned long icmprcvpacks; + +static unsigned long tcpsndpacks; +static unsigned long tcprcvpacks; +static unsigned long udpsndpacks; +static unsigned long udprcvpacks; +static unsigned long unidentudpsndpacks; +static unsigned long unidentudprcvpacks; +static unsigned long unidenttcpsndpacks; +static unsigned long unidenttcprcvpacks; + +static unsigned long unknownproto; + +static DEFINE_MUTEX(gclock); +static unsigned long long gclast; // last garbage collection (jiffies) + +static struct task_struct *knetatop_task; + +static struct timespec boottime; + +/* +** function prototypes +*/ +static void analyze_tcpv4_packet(struct sk_buff *, + const struct net_device *, int, char, + struct iphdr *, void *); + +static void analyze_udp_packet(struct sk_buff *, + const struct net_device *, int, char, + struct iphdr *, void *); + +static int sock2task(char, struct sockinfo *, + struct taskinfo **, short *, + struct sk_buff *, const struct net_device *, + int, char); + +static void update_taskcounters(struct sk_buff *, + const struct net_device *, + struct taskinfo *, char); + +static void update_sockcounters(struct sk_buff *, + const struct net_device *, + struct sockinfo *, char); + +static void sock2task_sync(struct sk_buff *, + struct sockinfo *, struct taskinfo *); + +static void register_unident(struct sockinfo *); + +static int calc_reallen(struct sk_buff *, + const struct net_device *); + +static void get_tcpv4_ident(struct iphdr *, void *, + char, union keydef *); + +static struct sockinfo *find_sockinfo(int, union keydef *, int, int); +static struct sockinfo *make_sockinfo(int, union keydef *, int, int); + +static void wipesockinfo(void); +static void wipetaskinfo(void); +static void wipetaskexit(void); + +static void garbage_collector(void); +static void gctaskexit(void); +static void gcsockinfo(void); +static void gctaskinfo(void); + +static void move_taskinfo(struct taskinfo *); +static void delete_taskinfo(struct taskinfo *); +static void delete_sockinfo(struct sockinfo *); + +static struct taskinfo *get_taskinfo(pid_t, char); + +static int getsockopt(struct sock *, int, void *, int *); + +static int netatop_open(struct inode *inode, struct file *file); + +/* +** hook definitions +*/ +static struct nf_hook_ops hookin_ipv4; +static struct nf_hook_ops hookout_ipv4; + +/* +** getsockopt definitions for communication with user space +*/ +static struct nf_sockopt_ops sockopts = { + .pf = PF_INET, + .get_optmin = NETATOP_BASE_CTL, + .get_optmax = NETATOP_BASE_CTL+6, + .get = getsockopt, + .owner = THIS_MODULE, +}; + +static struct file_operations netatop_proc_fops = { + .open = netatop_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +/* +** hook function to be called for every incoming local packet +*/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +#define HOOK_ARG_TYPE void *priv +#define HOOK_STATE_ARGS const struct nf_hook_state *state + +#define DEV_IN state->in +#define DEV_OUT state->out +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) +#define HOOK_ARG_TYPE const struct nf_hook_ops *ops +#define HOOK_STATE_ARGS const struct nf_hook_state *state + +#define DEV_IN state->in +#define DEV_OUT state->out +#else +#define HOOK_ARG_TYPE unsigned int hooknum +#define HOOK_STATE_ARGS const struct net_device *in, \ + const struct net_device *out, \ + int (*okfn)(struct sk_buff *) + +#define DEV_IN in +#define DEV_OUT out +#endif + +static unsigned int +ipv4_hookin(HOOK_ARG_TYPE, struct sk_buff *skb, HOOK_STATE_ARGS) +{ + struct iphdr *iph; + void *trh; + + if (skb == NULL) // useless socket buffer? + return NF_ACCEPT; + + /* + ** get pointer to IP header and transport header + */ + iph = (struct iphdr *)skb_network_header(skb); + trh = ((char *)iph + (iph->ihl * 4)); + + /* + ** react on protocol number + */ + switch (iph->protocol) { + case IPPROTO_TCP: + tcprcvpacks++; + analyze_tcpv4_packet(skb, DEV_IN, 0, 'i', iph, trh); + break; + + case IPPROTO_UDP: + udprcvpacks++; + analyze_udp_packet(skb, DEV_IN, 0, 'i', iph, trh); + break; + + case IPPROTO_ICMP: + icmprcvpacks++; + icmprcvbytes += skb->len + DEV_IN->hard_header_len + 4; + break; + + default: + unknownproto++; + } + + // accept every packet after stats gathering + return NF_ACCEPT; +} + +/* +** hook function to be called for every outgoing local packet +*/ +static unsigned int +ipv4_hookout(HOOK_ARG_TYPE, struct sk_buff *skb, HOOK_STATE_ARGS) +{ + int in_syscall = !in_interrupt(); + struct iphdr *iph; + void *trh; + + if (skb == NULL) // useless socket buffer? + return NF_ACCEPT; + + /* + ** get pointer to IP header and transport header + */ + iph = (struct iphdr *)skb_network_header(skb); + trh = skb_transport_header(skb); + + /* + ** react on protocol number + */ + switch (iph->protocol) { + case IPPROTO_TCP: + tcpsndpacks++; + analyze_tcpv4_packet(skb, DEV_OUT, in_syscall, 'o', iph, trh); + break; + + case IPPROTO_UDP: + udpsndpacks++; + analyze_udp_packet(skb, DEV_OUT, in_syscall, 'o', iph, trh); + break; + + case IPPROTO_ICMP: + icmpsndpacks++; + icmpsndbytes += skb->len + DEV_OUT->hard_header_len + 4; + break; + + default: + unknownproto++; + } + + // accept every packet after stats gathering + return NF_ACCEPT; +} + +/* +** generic function (for input and output) to analyze the current packet +*/ +static void +analyze_tcpv4_packet(struct sk_buff *skb, + const struct net_device *ndev, // interface description + int in_syscall, // called during system call? + char direction, // incoming ('i') or outgoing ('o') + struct iphdr *iph, void *trh) +{ + union keydef key; + struct sockinfo *sip; + int bs; // hash bucket for sockinfo + unsigned long sflags; + + /* + ** determine tcpv4_ident that identifies this TCP packet + ** and calculate hash bucket in sockinfo hash + */ + get_tcpv4_ident(iph, trh, direction, &key); + + /* + ** check if we have seen this tcpv4_ident before with a + ** corresponding thread and thread group + */ + bs = SHASHTCP4(key.tcp4); + + spin_lock_irqsave(&shash[bs].lock, sflags); + + if ( (sip = find_sockinfo(IPPROTO_TCP, &key, sizeof key.tcp4, bs)) + == NULL) { + // no sockinfo yet: create one + if ( (sip = make_sockinfo(IPPROTO_TCP, &key, + sizeof key.tcp4, bs)) == NULL) { + if (direction == 'i') + unidenttcprcvpacks++; + else + unidenttcpsndpacks++; + goto unlocks; + } + } + + if (skb->sk) + sip->last_state = skb->sk->sk_state; + + /* + ** if needed (re)connect the sockinfo to a taskinfo and update + ** the counters + */ + + // connect to thread group and update + if (sock2task('g', sip, &sip->tgp, &sip->tgh, + skb, ndev, in_syscall, direction)) { + // connect to thread and update + (void) sock2task('t', sip, &sip->thp, &sip->thh, + skb, ndev, in_syscall, direction); + } + +unlocks: + spin_unlock_irqrestore(&shash[bs].lock, sflags); +} + + +/* +** generic function (for input and output) to analyze the current packet +*/ +static void +analyze_udp_packet(struct sk_buff *skb, + const struct net_device *ndev, // interface description + int in_syscall, // called during system call? + char direction, // incoming ('i') or outgoing ('o') + struct iphdr *iph, void *trh) +{ + struct udphdr *udph = (struct udphdr *)trh; + uint16_t udplocal = (direction == 'i' ? + ntohs(udph->dest) : ntohs(udph->source)); + int bs; // hash bucket for sockinfo + + union keydef key; + struct sockinfo *sip; + unsigned long sflags; + + /* + ** check if we have seen this local UDP port before with a + ** corresponding thread and thread group + */ + key.udp = udplocal; + bs = SHASHUDP(udplocal); + + spin_lock_irqsave(&shash[bs].lock, sflags); + + if ( (sip = find_sockinfo(IPPROTO_UDP, &key, sizeof key.udp, bs)) + == NULL) { + // no sockinfo yet: create one + if ( (sip = make_sockinfo(IPPROTO_UDP, &key, + sizeof key.udp, bs)) == NULL) { + if (direction == 'i') + unidentudprcvpacks++; + else + unidentudpsndpacks++; + goto unlocks; + } + } + + /* + ** if needed (re)connect the sockinfo to a taskinfo and update + ** the counters + */ + + // connect to thread group and update + if (sock2task('g', sip, &sip->tgp, &sip->tgh, + skb, ndev, in_syscall, direction)) { + // connect to thread and update + (void) sock2task('t', sip, &sip->thp, &sip->thh, + skb, ndev, in_syscall, direction); + } + +unlocks: + spin_unlock_irqrestore(&shash[bs].lock, sflags); +} + +/* +** connect the sockinfo to the correct taskinfo and update the counters +*/ +static int +sock2task(char idtype, struct sockinfo *sip, struct taskinfo **tipp, + short *hash, struct sk_buff *skb, const struct net_device *ndev, + int in_syscall, char direction) +{ + pid_t curid; + unsigned long tflags; + + if (*tipp == NULL) { + /* + ** no taskinfo connected yet for this reference from + ** sockinfo; to connect to a taskinfo, we must + ** be in system call handling now --> verify + */ + if (!in_syscall) { + if (idtype == 'g') + update_sockcounters(skb, ndev, sip, direction); + + return 0; // failed + } + + /* + ** try to find existing taskinfo or create new taskinfo + */ + curid = (idtype == 'g' ? current->tgid : current->pid); + + *hash = THASH(curid, idtype); // calc hashQ + + spin_lock_irqsave(&thash[*hash].lock, tflags); + + if ( (*tipp = get_taskinfo(curid, idtype)) == NULL) { + /* + ** not possible to connect + */ + spin_unlock_irqrestore(&thash[*hash].lock, tflags); + + if (idtype == 'g') + update_sockcounters(skb, ndev, sip, direction); + + return 0; // failed + } + + /* + ** new connection made: + ** update task counters with sock counters + */ + sock2task_sync(skb, sip, *tipp); + } else { + /* + ** already related to thread group or thread + ** lock existing task + */ + spin_lock_irqsave(&thash[*hash].lock, tflags); + + /* + ** check if socket has been passed to another process in the + ** meantime, like programs as xinetd use to do + ** if so, connect sockinfo to the new task + */ + if (in_syscall) { + curid = (idtype == 'g' ? current->tgid : current->pid); + + if ((*tipp)->id != curid) { + spin_unlock_irqrestore(&thash[*hash].lock, + tflags); + *hash = THASH(curid, idtype); + + spin_lock_irqsave(&thash[*hash].lock, tflags); + + if ( (*tipp = get_taskinfo(curid, idtype)) + == NULL) { + spin_unlock_irqrestore( + &thash[*hash].lock, tflags); + return 0; + } + } + } + } + + update_taskcounters(skb, ndev, *tipp, direction); + + spin_unlock_irqrestore(&thash[*hash].lock, tflags); + + return 1; +} + +/* +** update the statistics of a particular thread group or thread +*/ +static void +update_taskcounters(struct sk_buff *skb, const struct net_device *ndev, + struct taskinfo *tip, char direction) +{ + struct iphdr *iph = (struct iphdr *)skb_network_header(skb); + int reallen = calc_reallen(skb, ndev); + + switch (iph->protocol) { + case IPPROTO_TCP: + if (direction == 'i') { + tip->tc.tcprcvpacks++; + tip->tc.tcprcvbytes += reallen; + } else { + tip->tc.tcpsndpacks++; + tip->tc.tcpsndbytes += reallen; + } + break; + + case IPPROTO_UDP: + if (direction == 'i') { + tip->tc.udprcvpacks++; + tip->tc.udprcvbytes += reallen; + } else { + tip->tc.udpsndpacks++; + tip->tc.udpsndbytes += reallen; + } + } +} + +/* +** update the statistics of a sockinfo without a connected task +*/ +static void +update_sockcounters(struct sk_buff *skb, const struct net_device *ndev, + struct sockinfo *sip, char direction) +{ + int reallen = calc_reallen(skb, ndev); + + if (direction == 'i') { + sip->rcvpacks++; + sip->rcvbytes += reallen; + } else { + sip->sndpacks++; + sip->sndbytes += reallen; + } +} + +/* +** add the temporary counters in the sockinfo to the new connected task +*/ +static void +sock2task_sync(struct sk_buff *skb, struct sockinfo *sip, struct taskinfo *tip) +{ + struct iphdr *iph = (struct iphdr *)skb_network_header(skb); + + switch (iph->protocol) { + case IPPROTO_TCP: + tip->tc.tcprcvpacks += sip->rcvpacks; + tip->tc.tcprcvbytes += sip->rcvbytes; + tip->tc.tcpsndpacks += sip->sndpacks; + tip->tc.tcpsndbytes += sip->sndbytes; + break; + + case IPPROTO_UDP: + tip->tc.udprcvpacks += sip->rcvpacks; + tip->tc.udprcvbytes += sip->rcvbytes; + tip->tc.udpsndpacks += sip->sndpacks; + tip->tc.udpsndbytes += sip->sndbytes; + } +} + +static void +register_unident(struct sockinfo *sip) +{ + switch (sip->proto) { + case IPPROTO_TCP: + unidenttcprcvpacks += sip->rcvpacks; + unidenttcpsndpacks += sip->sndpacks; + break; + + case IPPROTO_UDP: + unidentudprcvpacks += sip->rcvpacks; + unidentudpsndpacks += sip->sndpacks; + } +} + +/* +** calculate the number of bytes that are really sent or received +*/ +static int +calc_reallen(struct sk_buff *skb, const struct net_device *ndev) +{ + /* + ** calculate the real load of this packet on the network: + ** + ** - length of IP header, TCP/UDP header and data (skb->len) + ** + ** since packet assembly/disassembly is done by the IP layer + ** (we get an input packet that has been assembled already and + ** an output packet that still has to be assembled), additional + ** IP headers/interface headers and interface headers have + ** to be calculated for packets that are larger than the mtu + ** + ** - interface header length + 4 bytes crc + */ + int reallen = skb->len; + + if (reallen > ndev->mtu) + reallen += (reallen / ndev->mtu) * + (sizeof(struct iphdr) + ndev->hard_header_len + 4); + + reallen += ndev->hard_header_len + 4; + + return reallen; +} + +/* +** find the tcpv4_ident for the current packet, represented by +** the skb_buff +*/ +static void +get_tcpv4_ident(struct iphdr *iph, void *trh, char direction, union keydef *key) +{ + struct tcphdr *tcph = (struct tcphdr *)trh; + + memset(key, 0, sizeof *key); // important for memcmp later on + + /* + ** determine local/remote IP address and + ** determine local/remote port number + */ + switch (direction) { + case 'i': // incoming packet + key->tcp4.laddr = ntohl(iph->daddr); + key->tcp4.raddr = ntohl(iph->saddr); + key->tcp4.lport = ntohs(tcph->dest); + key->tcp4.rport = ntohs(tcph->source); + break; + + case 'o': // outgoing packet + key->tcp4.laddr = ntohl(iph->saddr); + key->tcp4.raddr = ntohl(iph->daddr); + key->tcp4.lport = ntohs(tcph->source); + key->tcp4.rport = ntohs(tcph->dest); + } +} + +/* +** search for the sockinfo holding the given address info +** the appropriate hash bucket must have been locked before calling +*/ +static struct sockinfo * +find_sockinfo(int proto, union keydef *identp, int identsz, int hash) +{ + struct sockinfo *sip = shash[hash].ch.next; + + /* + ** search for appropriate struct + */ + while (sip != (void *)&shash[hash].ch) { + if ( memcmp(&sip->key, identp, identsz) == 0 && + sip->proto == proto) { + sip->lastact = jiffies_64; + return sip; + } + + sip = sip->ch.next; + } + + return NULL; // not existing +} + +/* +** create a new sockinfo and fill +** the appropriate hash bucket must have been locked before calling +*/ +static struct sockinfo * +make_sockinfo(int proto, union keydef *identp, int identsz, int hash) +{ + struct sockinfo *sip; + unsigned long flags; + + /* + ** check if the threshold of memory used for sockinfo structs + ** is reached to avoid that a fork bomb of processes opening + ** a socket leads to memory overload + */ + if ( (nrs+1) * sizeof(struct sockinfo) > SILIMIT) { + spin_lock_irqsave(&nrslock, flags); + nrs_ovf++; + spin_unlock_irqrestore(&nrslock, flags); + return NULL; + } + + if ( (sip = kmem_cache_alloc(sicache, GFP_ATOMIC)) == NULL) + return NULL; + + spin_lock_irqsave(&nrslock, flags); + nrs++; + spin_unlock_irqrestore(&nrslock, flags); + + /* + ** insert new struct in doubly linked list + */ + memset(sip, '\0', sizeof *sip); + + sip->ch.next = &shash[hash].ch; + sip->ch.prev = shash[hash].ch.prev; + ((struct sockinfo *)shash[hash].ch.prev)->ch.next = sip; + shash[hash].ch.prev = sip; + + sip->proto = proto; + sip->lastact = jiffies_64; + sip->key = *identp; + + return sip; +} + +/* +** search the taskinfo structure holding the info about the given id/type +** if such taskinfo is not yet present, create a new one +*/ +static struct taskinfo * +get_taskinfo(pid_t id, char type) +{ + int bt = THASH(id, type); + struct taskinfo *tip = thash[bt].ch.next; + unsigned long tflags; + + /* + ** search if id exists already + */ + while (tip != (void *)&thash[bt].ch) { + if (tip->id == id && tip->type == type) + return tip; + + tip = tip->ch.next; + } + + /* + ** check if the threshold of memory used for taskinfo structs + ** is reached to avoid that a fork bomb of processes opening + ** a socket lead to memory overload + */ + if ( (nre+nrt+1) * sizeof(struct taskinfo) > TILIMIT) { + spin_lock_irqsave(&nrtlock, tflags); + nrt_ovf++; + spin_unlock_irqrestore(&nrtlock, tflags); + return NULL; + } + + /* + ** id not known yet + ** add new entry to hash list + */ + if ( (tip = kmem_cache_alloc(ticache, GFP_ATOMIC)) == NULL) + return NULL; + + spin_lock_irqsave(&nrtlock, tflags); + nrt++; + spin_unlock_irqrestore(&nrtlock, tflags); + + /* + ** insert new struct in doubly linked list + ** and fill values + */ + memset(tip, '\0', sizeof *tip); + + tip->ch.next = &thash[bt].ch; + tip->ch.prev = thash[bt].ch.prev; + ((struct taskinfo *)thash[bt].ch.prev)->ch.next = tip; + thash[bt].ch.prev = tip; + + tip->id = id; + tip->type = type; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) + tip->btime = div_u64((current->real_start_time + + (boottime.tv_sec * NSEC_PER_SEC + + boottime.tv_sec)), NSEC_PER_SEC); +#else + // current->real_start_time is type u64 + tip->btime = current->real_start_time.tv_sec + boottime.tv_sec; + + if (current->real_start_time.tv_nsec + boottime.tv_nsec > NSEC_PER_SEC) + tip->btime++; +#endif + + strncpy(tip->command, current->comm, COMLEN); + + return tip; +} + +/* +** garbage collector that removes: +** - exited tasks that are not by user mode programs +** - sockinfo's that are not used any more +** - taskinfo's that do not exist any more +** +** a mutex avoids that the garbage collector runs several times in parallel +** +** this function may only be called in process context! +*/ +static void +garbage_collector(void) +{ + mutex_lock(&gclock); + + if (jiffies_64 < gclast + (HZ/2)) { // maximum 2 GC cycles per second + mutex_unlock(&gclock); + return; + } + + gctaskexit(); // remove remaining taskinfo structs from exit list + + gcsockinfo(); // clean up sockinfo structs in shash list + + gctaskinfo(); // clean up taskinfo structs in thash list + + gclast = jiffies_64; + + mutex_unlock(&gclock); +} + +/* +** tasks in the exitlist can be read by a user mode process for a limited +** amount of time; this function removes all taskinfo structures that have +** not been read within that period of time +** notice that exited processes are chained to the tail, so the oldest +** can be found at the head +*/ +static void +gctaskexit() +{ + unsigned long flags; + struct taskinfo *tip; + + spin_lock_irqsave(&exitlock, flags); + + for (tip=exithead; tip;) { + if (jiffies_64 < tip->exittime + GCINTERVAL) + break; + + // remove taskinfo from exitlist + exithead = tip->ch.next; + kmem_cache_free(ticache, tip); + nre--; + tip = exithead; + } + + /* + ** if list empty now, then exithead and exittail both NULL + ** wakeup waiters for emptylist + */ + if (nre == 0) { + exittail = NULL; + wake_up_interruptible(&exitlist_empty); + } + + spin_unlock_irqrestore(&exitlock, flags); +} + +/* +** cleanup sockinfo structures that are connected to finished processes +*/ +static void +gcsockinfo() +{ + int i; + struct sockinfo *sip, *sipsave; + unsigned long sflags, tflags; + struct pid *pid; + + /* + ** go through all sockinfo hash buckets + */ + for (i=0; i < SBUCKS; i++) { + if (shash[i].ch.next == (void *)&shash[i].ch) + continue; // quick return without lock + + spin_lock_irqsave(&shash[i].lock, sflags); + + sip = shash[i].ch.next; + + /* + ** search all sockinfo structs chained in one bucket + */ + while (sip != (void *)&shash[i].ch) { + /* + ** TCP connections that were not in + ** state ESTABLISHED or LISTEN can be + ** eliminated + */ + if (sip->proto == IPPROTO_TCP) { + switch (sip->last_state) { + case TCP_ESTABLISHED: + case TCP_LISTEN: + break; + + default: + sipsave = sip->ch.next; + delete_sockinfo(sip); + sip = sipsave; + continue; + } + } + + /* + ** check if this sockinfo has no relation + ** for a while with a thread group + ** if so, delete the sockinfo + */ + if (sip->tgp == NULL) { + if (sip->lastact + GCMAXUNREF < jiffies_64) { + register_unident(sip); + sipsave = sip->ch.next; + delete_sockinfo(sip); + sip = sipsave; + } else { + sip = sip->ch.next; + } + continue; + } + + /* + ** check if referred thread group is + ** already marked as 'indelete' during this + ** sockinfo search + ** if so, delete this sockinfo + */ + spin_lock_irqsave(&thash[sip->tgh].lock, tflags); + + if (sip->tgp->state == INDELETE) { + spin_unlock_irqrestore(&thash[sip->tgh].lock, + tflags); + sipsave = sip->ch.next; + delete_sockinfo(sip); + sip = sipsave; + continue; + } + + /* + ** check if referred thread group still exists; + ** this step will be skipped if we already verified + ** the existance of the thread group earlier during + ** this garbage collection cycle + */ + if (sip->tgp->state != CHECKED) { + /* + ** connected thread group not yet verified + ** during this cycle, so check if it still + ** exists + ** if not, mark the thread group as 'indelete' + ** (it can not be deleted right now because + ** we might find other sockinfo's referring + ** to this thread group during the current + ** cycle) and delete this sockinfo + ** if the thread group exists, just mark + ** it as 'checked' for this cycle + */ + rcu_read_lock(); + pid = find_vpid(sip->tgp->id); + rcu_read_unlock(); + + if (pid == NULL) { + sip->tgp->state = INDELETE; + spin_unlock_irqrestore( + &thash[sip->tgh].lock, tflags); + + sipsave = sip->ch.next; + delete_sockinfo(sip); + sip = sipsave; + continue; + } else { + sip->tgp->state = CHECKED; + } + } + + spin_unlock_irqrestore(&thash[sip->tgh].lock, tflags); + + /* + ** check if this sockinfo has a relation with a thread + ** if not, skip further handling of this sockinfo + */ + if (sip->thp == NULL) { + sip = sip->ch.next; + continue; + } + + /* + ** check if referred thread is already marked + ** as 'indelete' during this sockinfo search + ** if so, break connection + */ + spin_lock_irqsave(&thash[sip->thh].lock, tflags); + + if (sip->thp->state == INDELETE) { + spin_unlock_irqrestore(&thash[sip->thh].lock, + tflags); + sip->thp = NULL; + sip = sip->ch.next; + continue; + } + + /* + ** check if referred thread is already checked + ** during this sockinfo search + */ + if (sip->thp->state == CHECKED) { + spin_unlock_irqrestore(&thash[sip->thh].lock, + tflags); + sip = sip->ch.next; + continue; + } + + /* + ** connected thread not yet verified + ** check if it still exists + ** if not, mark it as 'indelete' and break connection + ** if thread exists, mark it 'checked' + */ + rcu_read_lock(); + pid = find_vpid(sip->thp->id); + rcu_read_unlock(); + + if (pid == NULL) { + sip->thp->state = INDELETE; + sip->thp = NULL; + } else { + sip->thp->state = CHECKED; + } + + spin_unlock_irqrestore(&thash[sip->thh].lock, tflags); + + /* + ** check if a TCP port has not been used + ** for some time --> destroy even if the thread + ** (group) is still there + */ + if (sip->proto == IPPROTO_TCP && + sip->lastact + GCMAXTCP < jiffies_64) { + sipsave = sip->ch.next; + delete_sockinfo(sip); + sip = sipsave; + continue; + } + + /* + ** check if a UDP port has not been used + ** for some time --> destroy even if the thread + ** (group) is still there + ** e.g. outgoing DNS requests (to remote port 53) are + ** issued every time with another source port being + ** a new object that should not be kept too long; + ** local well-known ports are useful to keep + */ + if (sip->proto == IPPROTO_UDP && + sip->lastact + GCMAXUDP < jiffies_64 && + sip->key.udp > 1024) { + sipsave = sip->ch.next; + delete_sockinfo(sip); + sip = sipsave; + continue; + } + + sip = sip->ch.next; + } + + spin_unlock_irqrestore(&shash[i].lock, sflags); + } +} + +/* +** remove taskinfo structures of finished tasks from hash list +*/ +static void +gctaskinfo() +{ + int i; + struct taskinfo *tip, *tipsave; + unsigned long tflags; + struct pid *pid; + + /* + ** go through all taskinfo hash buckets + */ + for (i=0; i < TBUCKS; i++) { + if (thash[i].ch.next == (void *)&thash[i].ch) + continue; // quick return without lock + + spin_lock_irqsave(&thash[i].lock, tflags); + + tip = thash[i].ch.next; + + /* + ** check all taskinfo structs chained to this bucket + */ + while (tip != (void *)&thash[i].ch) { + switch (tip->state) { + /* + ** remove INDELETE tasks from the hash buckets + ** -- move thread group to exitlist + ** -- destroy thread right away + */ + case INDELETE: + tipsave = tip->ch.next; + + if (tip->type == 'g') + move_taskinfo(tip); // thread group + else + delete_taskinfo(tip); // thread + + tip = tipsave; + break; + + case CHECKED: + tip->state = 0; + tip = tip->ch.next; + break; + + default: // not checked yet + rcu_read_lock(); + pid = find_vpid(tip->id); + rcu_read_unlock(); + + if (pid == NULL) { + tipsave = tip->ch.next; + + if (tip->type == 'g') + move_taskinfo(tip); + else + delete_taskinfo(tip); + + tip = tipsave; + } else { + tip = tip->ch.next; + } + } + } + + spin_unlock_irqrestore(&thash[i].lock, tflags); + } +} + + +/* +** remove all sockinfo structs +*/ +static void +wipesockinfo() +{ + struct sockinfo *sip, *sipsave; + int i; + unsigned long sflags; + + for (i=0; i < SBUCKS; i++) { + spin_lock_irqsave(&shash[i].lock, sflags); + + sip = shash[i].ch.next; + + /* + ** free all structs chained in one bucket + */ + while (sip != (void *)&shash[i].ch) { + sipsave = sip->ch.next; + delete_sockinfo(sip); + sip = sipsave; + } + + spin_unlock_irqrestore(&shash[i].lock, sflags); + } +} + +/* +** remove all taskinfo structs from hash list +*/ +static void +wipetaskinfo() +{ + struct taskinfo *tip, *tipsave; + int i; + unsigned long tflags; + + for (i=0; i < TBUCKS; i++) { + spin_lock_irqsave(&thash[i].lock, tflags); + + tip = thash[i].ch.next; + + /* + ** free all structs chained in one bucket + */ + while (tip != (void *)&thash[i].ch) { + tipsave = tip->ch.next; + delete_taskinfo(tip); + tip = tipsave; + } + + spin_unlock_irqrestore(&thash[i].lock, tflags); + } +} + +/* +** remove all taskinfo structs from exit list +*/ +static void +wipetaskexit() +{ + gctaskexit(); +} + +/* +** move one taskinfo struct from hash bucket to exitlist +*/ +static void +move_taskinfo(struct taskinfo *tip) +{ + unsigned long flags; + + /* + ** remove from hash list + */ + ((struct taskinfo *)tip->ch.next)->ch.prev = tip->ch.prev; + ((struct taskinfo *)tip->ch.prev)->ch.next = tip->ch.next; + + spin_lock_irqsave(&nrtlock, flags); + nrt--; + spin_unlock_irqrestore(&nrtlock, flags); + + /* + ** add to exitlist + */ + tip->ch.next = NULL; + tip->state = FINISHED; + tip->exittime = jiffies_64; + + spin_lock_irqsave(&exitlock, flags); + + if (exittail) { // list filled? + exittail->ch.next = tip; + exittail = tip; + } else { // list empty + exithead = exittail = tip; + } + + nre++; + + wake_up_interruptible(&exitlist_filled); + + spin_unlock_irqrestore(&exitlock, flags); +} + +/* +** remove one taskinfo struct for the hash bucket chain +*/ +static void +delete_taskinfo(struct taskinfo *tip) +{ + unsigned long flags; + + ((struct taskinfo *)tip->ch.next)->ch.prev = tip->ch.prev; + ((struct taskinfo *)tip->ch.prev)->ch.next = tip->ch.next; + + kmem_cache_free(ticache, tip); + + spin_lock_irqsave(&nrtlock, flags); + nrt--; + spin_unlock_irqrestore(&nrtlock, flags); +} + +/* +** remove one sockinfo struct for the hash bucket chain +*/ +static void +delete_sockinfo(struct sockinfo *sip) +{ + unsigned long flags; + + ((struct sockinfo *)sip->ch.next)->ch.prev = sip->ch.prev; + ((struct sockinfo *)sip->ch.prev)->ch.next = sip->ch.next; + + kmem_cache_free(sicache, sip); + + spin_lock_irqsave(&nrslock, flags); + nrs--; + spin_unlock_irqrestore(&nrslock, flags); +} + +/* +** read function for /proc/netatop +*/ +static int +netatop_show(struct seq_file *m, void *v) +{ + seq_printf(m, "tcpsndpacks: %12lu (unident: %9lu)\n" + "tcprcvpacks: %12lu (unident: %9lu)\n" + "udpsndpacks: %12lu (unident: %9lu)\n" + "udprcvpacks: %12lu (unident: %9lu)\n\n" + "icmpsndpacks: %12lu\n" + "icmprcvpacks: %12lu\n\n" + "#sockinfo: %12lu (overflow: %8lu)\n" + "#taskinfo: %12lu (overflow: %8lu)\n" + "#taskexit: %12lu\n\n" + "modversion: %14s\n", + tcpsndpacks, unidenttcpsndpacks, + tcprcvpacks, unidenttcprcvpacks, + udpsndpacks, unidentudpsndpacks, + udprcvpacks, unidentudprcvpacks, + icmpsndpacks, icmprcvpacks, + nrs, nrs_ovf, + nrt, nrt_ovf, + nre, NETATOPVERSION); + return 0; +} + +static int +netatop_open(struct inode *inode, struct file *file) +{ + return single_open(file, netatop_show, NULL); +} + +/* +** called when user spce issues system call getsockopt() +*/ +static int +getsockopt(struct sock *sk, int cmd, void __user *user, int *len) +{ + int bt; + struct taskinfo *tip; + char tasktype = 't'; + struct netpertask npt; + unsigned long tflags; + + /* + ** verify the proper privileges + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* + ** react on command + */ + switch (cmd) { + case NETATOP_PROBE: + break; + + case NETATOP_FORCE_GC: + garbage_collector(); + break; + + case NETATOP_EMPTY_EXIT: + while (nre > 0) { + if (wait_event_interruptible(exitlist_empty, nre == 0)) + return -ERESTARTSYS; + } + break; + + case NETATOP_GETCNT_EXIT: + if (nre == 0) + wake_up_interruptible(&exitlist_empty); + + if (*len < sizeof(pid_t)) + return -EINVAL; + + if (*len > sizeof npt) + *len = sizeof npt; + + spin_lock_irqsave(&exitlock, tflags); + + /* + ** check if an exited process is present + ** if not, wait for it... + */ + while (nre == 0) { + spin_unlock_irqrestore(&exitlock, tflags); + + if ( wait_event_interruptible(exitlist_filled, nre > 0)) + return -ERESTARTSYS; + + spin_lock_irqsave(&exitlock, tflags); + } + + /* + ** get first eprocess from exitlist and remove it from there + */ + tip = exithead; + + if ( (exithead = tip->ch.next) == NULL) + exittail = NULL; + + nre--; + + spin_unlock_irqrestore(&exitlock, tflags); + + /* + ** pass relevant info to user mode + ** and free taskinfo struct + */ + npt.id = tip->id; + npt.tc = tip->tc; + npt.btime = tip->btime; + memcpy(npt.command, tip->command, COMLEN); + + if (copy_to_user(user, &npt, *len) != 0) + return -EFAULT; + + kmem_cache_free(ticache, tip); + + return 0; + + case NETATOP_GETCNT_TGID: + tasktype = 'g'; + + case NETATOP_GETCNT_PID: + if (*len < sizeof(pid_t)) + return -EINVAL; + + if (*len > sizeof npt) + *len = sizeof npt; + + if (copy_from_user(&npt, user, *len) != 0) + return -EFAULT; + + /* + ** search requested id in taskinfo hash + */ + bt = THASH(npt.id, tasktype); // calculate hash + + if (thash[bt].ch.next == (void *)&thash[bt].ch) + return -ESRCH; // quick return without lock + + spin_lock_irqsave(&thash[bt].lock, tflags); + + tip = thash[bt].ch.next; + + while (tip != (void *)&thash[bt].ch) { + // is this the one? + if (tip->id == npt.id && tip->type == tasktype) { + /* + ** found: copy results to user space + */ + memcpy(npt.command, tip->command, COMLEN); + npt.tc = tip->tc; + npt.btime = tip->btime; + + spin_unlock_irqrestore(&thash[bt].lock, tflags); + + if (copy_to_user(user, &npt, *len) != 0) + return -EFAULT; + else + return 0; + } + + tip = tip->ch.next; + } + + spin_unlock_irqrestore(&thash[bt].lock, tflags); + return -ESRCH; + + default: + printk(KERN_INFO "unknown getsockopt command %d\n", cmd); + return -EINVAL; + } + + return 0; +} + +/* +** kernel mode thread: initiate garbage collection every N seconds +*/ +static int netatop_thread(void *dummy) +{ + while (!kthread_should_stop()) { + /* + ** do garbage collection + */ + garbage_collector(); + + /* + ** wait a while + */ + (void) schedule_timeout_interruptible(GCINTERVAL); + } + + return 0; +} + +/* +** called when module loaded +*/ +int +init_module() +{ + int i; + + /* + ** initialize caches for taskinfo and sockinfo + */ + ticache = kmem_cache_create("Netatop_taskinfo", + sizeof (struct taskinfo), 0, 0, NULL); + if (!ticache) + return -EFAULT; + + sicache = kmem_cache_create("Netatop_sockinfo", + sizeof (struct sockinfo), 0, 0, NULL); + if (!sicache) { + kmem_cache_destroy(ticache); + return -EFAULT; + } + + /* + ** initialize hash table for taskinfo and sockinfo + */ + for (i=0; i < TBUCKS; i++) { + thash[i].ch.next = &thash[i].ch; + thash[i].ch.prev = &thash[i].ch; + spin_lock_init(&thash[i].lock); + } + + for (i=0; i < SBUCKS; i++) { + shash[i].ch.next = &shash[i].ch; + shash[i].ch.prev = &shash[i].ch; + spin_lock_init(&shash[i].lock); + } + + getboottime(&boottime); + + /* + ** register getsockopt for user space communication + */ + if (nf_register_sockopt(&sockopts) < 0) { + kmem_cache_destroy(ticache); + kmem_cache_destroy(sicache); + return -1; + } + + /* + ** create a new kernel mode thread for time-driven garbage collection + ** after creation, the thread waits until it is woken up + */ + knetatop_task = kthread_create(netatop_thread, NULL, "knetatop"); + + if (IS_ERR(knetatop_task)) { + nf_unregister_sockopt(&sockopts); + kmem_cache_destroy(ticache); + kmem_cache_destroy(sicache); + return -1; + } + + /* + ** prepare hooks and register + */ + hookin_ipv4.hooknum = NF_IP_LOCAL_IN; // input packs + hookin_ipv4.hook = ipv4_hookin; // func to call + hookin_ipv4.pf = PF_INET; // IPV4 packets + hookin_ipv4.priority = NF_IP_PRI_FIRST; // highest prio + + hookout_ipv4.hooknum = NF_IP_LOCAL_OUT; // output packs + hookout_ipv4.hook = ipv4_hookout; // func to call + hookout_ipv4.pf = PF_INET; // IPV4 packets + hookout_ipv4.priority = NF_IP_PRI_FIRST; // highest prio + + nf_register_net_hook(&init_net, &hookin_ipv4); // register hook + nf_register_net_hook(&init_net, &hookout_ipv4); // register hook + + /* + ** create a /proc-entry to produce status-info on request + */ + proc_create("netatop", 0444, NULL, &netatop_proc_fops); + + /* + ** all admi prepared; kick off kernel mode thread + */ + wake_up_process(knetatop_task); + + return 0; // return success +} + +/* +** called when module unloaded +*/ +void +cleanup_module() +{ + /* + ** tell kernel daemon to stop + */ + kthread_stop(knetatop_task); + + /* + ** unregister netfilter hooks and other miscellaneous stuff + */ + nf_unregister_net_hook(&init_net, &hookin_ipv4); + nf_unregister_net_hook(&init_net, &hookout_ipv4); + + remove_proc_entry("netatop", NULL); + + nf_unregister_sockopt(&sockopts); + + /* + ** destroy allocated stats + */ + wipesockinfo(); + wipetaskinfo(); + wipetaskexit(); + + /* + ** destroy caches + */ + kmem_cache_destroy(ticache); + kmem_cache_destroy(sicache); +} diff --git a/kernel/tkernel/netatop/netatop.h b/kernel/tkernel/netatop/netatop.h new file mode 100644 index 000000000000..a14e6fa5db52 --- /dev/null +++ b/kernel/tkernel/netatop/netatop.h @@ -0,0 +1,47 @@ +#define COMLEN 16 + +struct taskcount { + unsigned long long tcpsndpacks; + unsigned long long tcpsndbytes; + unsigned long long tcprcvpacks; + unsigned long long tcprcvbytes; + + unsigned long long udpsndpacks; + unsigned long long udpsndbytes; + unsigned long long udprcvpacks; + unsigned long long udprcvbytes; + + /* space for future extensions */ +}; + +struct netpertask { + pid_t id; // tgid or tid (depending on command) + unsigned long btime; + char command[COMLEN]; + + struct taskcount tc; +}; + + +/* +** getsocktop commands +*/ +#define NETATOP_BASE_CTL 15661 + +// just probe if the netatop module is active +#define NETATOP_PROBE (NETATOP_BASE_CTL) + +// force garbage collection to make finished processes available +#define NETATOP_FORCE_GC (NETATOP_BASE_CTL+1) + +// wait until all finished processes are read (blocks until done) +#define NETATOP_EMPTY_EXIT (NETATOP_BASE_CTL+2) + +// get info for finished process (blocks until available) +#define NETATOP_GETCNT_EXIT (NETATOP_BASE_CTL+3) + +// get counters for thread group (i.e. process): input is 'id' (pid) +#define NETATOP_GETCNT_TGID (NETATOP_BASE_CTL+4) + +// get counters for thread: input is 'id' (tid) +#define NETATOP_GETCNT_PID (NETATOP_BASE_CTL+5) diff --git a/kernel/tkernel/netatop/netatopversion.h b/kernel/tkernel/netatop/netatopversion.h new file mode 100644 index 000000000000..593dc6a65769 --- /dev/null +++ b/kernel/tkernel/netatop/netatopversion.h @@ -0,0 +1,2 @@ +#define NETATOPVERSION "0.7" +#define NETATOPDATE "2015/10/09 14:26:52" diff --git a/kernel/tkernel/netbind.c b/kernel/tkernel/netbind.c new file mode 100644 index 000000000000..c9f5cdf2e3c4 --- /dev/null +++ b/kernel/tkernel/netbind.c @@ -0,0 +1,89 @@ +#include +#include +#include +#include +#include + +unsigned char prot_sock_flag[PROT_SOCK]; +EXPORT_SYMBOL(prot_sock_flag); + + +static int netbind_proc_show(struct seq_file *m, void *v) +{ + int i; + for(i=1; i<1024; i++) { + if(prot_sock_flag[i]) + seq_printf(m, "%d\n", i); + } + return 0; +} + +static int netbind_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, netbind_proc_show, NULL); +} + +static ssize_t netbind_proc_write(struct file *file, const char __user *buf, + size_t length, loff_t *ppos) +{ + int port, en; + char *buffer, *p; + int err; + + if (!buf || length > PAGE_SIZE) + return -EINVAL; + + buffer = (char *)__get_free_page(GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = -EFAULT; + if (copy_from_user(buffer, buf, length)) + goto out; + + err = -EINVAL; + if (length < PAGE_SIZE) + buffer[length] = '\0'; + else if (buffer[PAGE_SIZE-1]) + goto out; + + en = 1; + p = buffer; + if(*p=='+') { + p++; + } else if(*p=='-') { + en = 0; + p++; + } + + if(*p < '0' || *p > '9') + goto out; + + port = simple_strtoul(p, &p, 0); + if(*p != '\n' && *p != '\r' && *p != '\0') + goto out; + + if(port <= 0 || port >= PROT_SOCK) + goto out; + + prot_sock_flag[port] = en; + err = length; + +out: + free_page((unsigned long)buffer); + return err; +} + +static const struct file_operations netbind_proc_ops = { + .open = netbind_proc_open, + .read = seq_read, + .write = netbind_proc_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init nonpriv_netbind_init(void) { + proc_create("nonpriv_netbind", 0, proc_tkernel, &netbind_proc_ops); + return 0; +} +late_initcall(nonpriv_netbind_init); diff --git a/kernel/tkernel/release_suffix.c b/kernel/tkernel/release_suffix.c new file mode 100644 index 000000000000..1a5904fbf1dc --- /dev/null +++ b/kernel/tkernel/release_suffix.c @@ -0,0 +1,60 @@ +#include +#include +#include +#include +#include +#include + +#define RELEASE_SUFFIX_MAX_LENS 16 +int release_suffix = 0; + +static int release_suffix_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", release_suffix); + return 0; +} + +static int release_suffix_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, release_suffix_proc_show, NULL); +} + +static ssize_t release_suffix_proc_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[8] = {0}; + + if (cnt != 2){ + return -EINVAL; + } + + if (copy_from_user(buf, ubuf, cnt)) + return -EFAULT; + + if (*buf == '0'){ + release_suffix = 0; + }else if(*buf == '1'){ + release_suffix = 1; + }else{ + return -EINVAL; + } + + return cnt; + +} + +static const struct file_operations release_suffix_proc_fops = { + .open = release_suffix_proc_open, + .read = seq_read, + .write = release_suffix_proc_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init proc_release_suffix_init(void) +{ + proc_create("release_suffix", 0, proc_tkernel, &release_suffix_proc_fops); + return 0; +} + +late_initcall(proc_release_suffix_init); diff --git a/kernel/tkernel/shield_mounts.c b/kernel/tkernel/shield_mounts.c new file mode 100644 index 000000000000..1a5c9b4567b2 --- /dev/null +++ b/kernel/tkernel/shield_mounts.c @@ -0,0 +1,243 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define SHIELD_PATH_MAX 1024 + +struct mount_pair { + struct list_head list; + char dev_name[SHIELD_PATH_MAX]; + char mnt_path[SHIELD_PATH_MAX]; +}; + +static DEFINE_RWLOCK(shield_mounts_lock); +static LIST_HEAD(shield_mounts_list); +static unsigned int shield_mounts_count; +unsigned int sysctl_shield_mounts_max = 512; + +bool is_mount_shielded(const char *dev_path, const char *mount_path) +{ + bool ret = false; + struct mount_pair *p; + + read_lock(&shield_mounts_lock); + + list_for_each_entry(p, &shield_mounts_list, list) { + if (!strcmp(p->dev_name, dev_path) && + !strcmp(p->mnt_path, mount_path)) { + ret = true; + break; + } + } + + read_unlock(&shield_mounts_lock); + return ret; +} + +static int shield_mounts_search_and_insert(struct mount_pair *item) +{ + int ret = -EEXIST; + struct mount_pair *p; + + write_lock(&shield_mounts_lock); + + list_for_each_entry(p, &shield_mounts_list, list) { + if (!strcmp(p->dev_name, item->dev_name) && + !strcmp(p->mnt_path, item->mnt_path)) + goto unlock; + } + + if (shield_mounts_count >= sysctl_shield_mounts_max) { + ret = -ENOMEM; + goto unlock; + } + + list_add_tail(&item->list, &shield_mounts_list); + shield_mounts_count++; + ret = 0; + +unlock: + write_unlock(&shield_mounts_lock); + return ret; +} + +static int shield_mounts_search_and_del(struct mount_pair *item) +{ + struct mount_pair *p; + + write_lock(&shield_mounts_lock); + + list_for_each_entry(p, &shield_mounts_list, list) { + if (!strcmp(p->dev_name, item->dev_name) && + !strcmp(p->mnt_path, item->mnt_path)) { + list_del(&p->list); + kfree(p); + shield_mounts_count--; + goto unlock; + } + } + +unlock: + write_unlock(&shield_mounts_lock); + return 0; + +} +/*helper function*/ +static void str_escape(char *s, const char *esc) +{ + char *p = s; + while (p && *p != '\0') { + char c = *p++; + while (c != '\0' && strchr(esc, c)) + c = *p++; + *s++ = c; + } + *s = '\0'; +} + + +/* + * parse buffer to get shield mount info + * "set devpath mountpoint": means to add a new shield mount info + * "clear devpath mountpoint": means to delete an exist one + * */ +static int shield_mounts_parse(char *buf, bool *is_set, struct mount_pair *item) +{ + char *token; + + str_escape(buf, "\t\n"); + buf = skip_spaces(buf); + token = strsep(&buf, " "); + if (!token || !*token || !buf) + goto error; + + if (!strcmp(token, "set")) { + /* set */ + *is_set = true; + } else if (!strcmp(token, "clear")) { + /* clear */ + *is_set = false; + } else { + printk(KERN_ERR"set parse error\n"); + goto error; + } + + buf = skip_spaces(buf); + /* dev path */ + token = strsep(&buf, " "); + if (!buf || !token || !*token || strlen(token) > (PATH_MAX-1)) { + printk(KERN_ERR"dev path faild\n"); + goto error; + } + memcpy(item->dev_name, token, strlen(token)+1); + + /* mnt */ + buf = strim(buf); + memcpy(item->mnt_path, buf, strlen(buf)+1); + + return 0; +error: + printk(KERN_ERR"Failed to parse shield mounts pair\n"); + return -EFAULT; +} + +static int shield_mounts_proc_show(struct seq_file *m, void *v) +{ + struct mount_pair *p; + + read_lock(&shield_mounts_lock); + + list_for_each_entry(p, &shield_mounts_list, list) { + seq_printf(m, "%s on %s\n", p->dev_name, p->mnt_path); + } + + read_unlock(&shield_mounts_lock); + return 0; +} + +static int shield_mounts_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, shield_mounts_proc_show, NULL); +} + + +static ssize_t shield_mounts_proc_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *buffer = NULL; + int ret = cnt; + int order = 1; + bool is_set; + struct mount_pair *item; + + /*max file lens for 8k*/ + if (!ubuf || cnt > PAGE_SIZE * (1 << order)) + return -EINVAL; + + buffer = (char *)__get_free_pages(GFP_KERNEL, order); + if (!buffer) + return -ENOMEM; + + if (copy_from_user(buffer, ubuf, cnt)) { + ret = -EFAULT; + goto out; + } + + buffer[cnt] = 0; + + item = kmalloc(sizeof(struct mount_pair), GFP_KERNEL); + if (!item){ + printk(KERN_ERR"Failed to malloc mount_pair\n"); + ret = -ENOMEM; + goto out; + } + + /*parse buffer*/ + ret = shield_mounts_parse(buffer, &is_set, item); + if (ret) + goto out1; + + if(is_set) { + /*set */ + ret = shield_mounts_search_and_insert(item); + if (!ret) + item = NULL; + } else { + /* clear */ + ret = shield_mounts_search_and_del(item); + } + + if (!ret) + ret = cnt; +out1: + kfree(item); +out: + free_pages((unsigned long) buffer, order); + return ret; +} + + +int shield_mounts_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} +static const struct file_operations shield_mounts_proc_fops = { + .open = shield_mounts_proc_open, + .read = seq_read, + .write = shield_mounts_proc_write, + .llseek = seq_lseek, + .release = shield_mounts_release, +}; + +static int __init proc_shield_mounts_init(void) +{ + proc_create("shield_mounts", 0, proc_tkernel, &shield_mounts_proc_fops); + return 0; +} + +late_initcall(proc_shield_mounts_init); diff --git a/kernel/tkernel/trackgpu.c b/kernel/tkernel/trackgpu.c new file mode 100644 index 000000000000..d59f7375bd59 --- /dev/null +++ b/kernel/tkernel/trackgpu.c @@ -0,0 +1,226 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct gpu_request_list gpu_list; +int sysctl_track_gpu __read_mostly = 0; +atomic_t gpu_req_count; + +#define GPU_MAX_REQUEST 4096 +struct gpu_wait *gpu_create_wait(int pid) +{ + struct gpu_wait *w; + int c = atomic_read(&gpu_req_count); + + if (c >= GPU_MAX_REQUEST) { + if (c >= GPU_MAX_REQUEST) { + printk(KERN_ERR "maximum gpu requests detected aborting pid:%d...:%d\n", pid, c); + return NULL; + } + } + w = kmalloc(sizeof(*w), GFP_KERNEL); + if (!w) { + printk(KERN_ERR "alloc gpu wait failed\n"); + return NULL; + } + + init_waitqueue_head(&w->wait); + w->proceed = false; + w->used = false; + w->wakup = false; + w->pid = pid; + w->type = GPU_OPEN; + + mutex_lock(&gpu_list.lock); + list_add_tail(&w->node, &gpu_list.list); + mutex_unlock(&gpu_list.lock); + atomic_inc(&gpu_req_count); + + return w; +} +EXPORT_SYMBOL_GPL(gpu_create_wait); + +void gpu_clean(int pid) +{ + struct gpu_wait *w; + + mutex_lock(&gpu_list.lock); + list_for_each_entry(w, &gpu_list.list, node) { + if (w->pid == pid) { + list_del(&w->node); + kfree(w); + atomic_dec(&gpu_req_count); + goto out; + } + } +out: + mutex_unlock(&gpu_list.lock); +} +EXPORT_SYMBOL_GPL(gpu_clean); + +static struct gpu_wait *gpu_check_list(int pid) +{ + struct gpu_wait *w; + struct gpu_wait *wait = NULL; + + mutex_lock(&gpu_list.lock); + list_for_each_entry(w, &gpu_list.list, node) { + if (w->pid == pid && !w->used) { + w->used = true; + wait = w; + } + } + mutex_unlock(&gpu_list.lock); + + return wait; +} +static bool nl_send_msg(int gpu_pid, enum GPU_EVENT_TYPE event_type); +static struct sock *nl_sk = NULL; +/* This is the communication pid usually the gpu_manager pid. + * the pid will not change once gpu_manager started. + * we use this pid to communicate with gpu_manager in the user space. + */ +int comm_pid = -1; + +static void nl_recv_msg(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + + int msg_size; + char msg_ok[32]; + int recv_pid = 0; + char *recv_msg; + struct gpu_wait *wait = NULL; + char comp_msg[32]; + int len; + int ret; + + nlh = nlmsg_hdr(skb); + if (nlh->nlmsg_len < NLMSG_HDRLEN || + skb->len < nlh->nlmsg_len) { + printk(KERN_ERR "gpu:nl_recv_msg error\n"); + return; + } + + comm_pid = nlh->nlmsg_pid; + + recv_msg = (char *)nlmsg_data(nlh); + msg_size = strlen(recv_msg); + + memset(msg_ok, 0, sizeof(msg_ok)); + memset(comp_msg, 0, sizeof(comp_msg)); + len = min(sizeof(comp_msg), msg_size); + /* During the test we found there are a lot irrelevant characters + * following the received messsage so we only use the first 32 + * characters in the received message. + */ + strncpy(comp_msg, recv_msg, len); + /* the format of the received message form gpu_manager is:pid ok xxx + * we retrieve the pid and ok from the received message and check the + * request list to decide whether the open can proceed or not + */ + ret = sscanf(comp_msg, "%d %s", &recv_pid, msg_ok); + if (ret < 2) { + return; + } + wait = gpu_check_list(recv_pid); + if (wait != NULL) { + if (strncmp(msg_ok, GPU_OK, 2) == 0) { + wait->wakup = true; + wait->proceed = true; + wake_up(&wait->wait); + } else { + wait->wakup = true; + wait->proceed = false; + wake_up(&wait->wait); + } + } +} + +static bool nl_send_msg(int gpu_pid, enum GPU_EVENT_TYPE event_type) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb_out; + int msg_size; + char msg[64]; + int res; + + if (comm_pid == -1) + return false; + + /* The format send to gpu_manager(in user space) is:pid, open/close + * when gpu_manager got the msg it will response to the kernel + * with pid ok xxx so the open can proceed otherwise the open + * rejected. + */ + if (event_type == GPU_OPEN) + sprintf(msg, "pid:%d, open", gpu_pid); + else + sprintf(msg, "pid:%d, close", gpu_pid); + msg_size = strlen(msg); + skb_out = nlmsg_new(msg_size, GFP_KERNEL); + if (!skb_out) + return false; + nlh = nlmsg_put(skb_out, comm_pid, 0, NLMSG_DONE, msg_size, 0); + if (!nlh) + return false; + + NETLINK_CB(skb_out).dst_group = 0; + strncpy(nlmsg_data(nlh), msg, msg_size); + + res = netlink_unicast(nl_sk, skb_out, comm_pid, 0); + if (res < 0) + return false; + + return true; +} + +int notify_gpu(struct gpu_wait *wait, enum GPU_EVENT_TYPE event, int pid) +{ + if (event == GPU_OPEN) { + if (!nl_send_msg(pid, GPU_OPEN)) { + printk(KERN_ERR "gpu notifier:nl send msg failed pid:%d\n", wait->pid); + wait->proceed = true; + wait->wakup = true; + wake_up(&wait->wait); + return -EINVAL; + } + } else + nl_send_msg(pid, GPU_CLOSE); + + return 0; +} + +static int __init trackgpu_init(void) +{ + int err; + struct netlink_kernel_cfg cfg = { + .groups = 0, + .flags = 0, + .cb_mutex = NULL, + .bind = NULL, + .unbind = NULL, + .compare = NULL, + .input = nl_recv_msg, + }; + + nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &cfg); + if (!nl_sk) { + pr_info("Error create netlink socket\n"); + return -EINVAL; + } + INIT_LIST_HEAD(&gpu_list.list); + mutex_init(&gpu_list.lock); + atomic_set(&gpu_req_count, 0); + + return 0; +} +late_initcall(trackgpu_init); diff --git a/kernel/tkernel/ttools/Makefile b/kernel/tkernel/ttools/Makefile new file mode 100644 index 000000000000..a8879c8937e9 --- /dev/null +++ b/kernel/tkernel/ttools/Makefile @@ -0,0 +1,2 @@ +obj-m := ttools.o +ttools-y := ttools_module.o diff --git a/kernel/tkernel/ttools/ttools.h b/kernel/tkernel/ttools/ttools.h new file mode 100644 index 000000000000..91d220e6b710 --- /dev/null +++ b/kernel/tkernel/ttools/ttools.h @@ -0,0 +1,18 @@ +#ifndef __TTOOLS_H__ +#define __TTOOLS_H__ + +#include +#include + +#define TTOOLS_IO 0xEE + +struct ttools_fd_ref { + int fd; + long ref_cnt; +}; + +#define TTOOLS_PTRACE_PROTECT _IO(TTOOLS_IO, 0x00) +#define TTOOLS_PTRACE_UNPROTECT _IO(TTOOLS_IO, 0x01) +#define TTOOLS_GET_FD_REFS_CNT _IOWR(TTOOLS_IO, 0x02, struct ttools_fd_ref) + +#endif diff --git a/kernel/tkernel/ttools/ttools_module.c b/kernel/tkernel/ttools/ttools_module.c new file mode 100644 index 000000000000..a4a7b7c6bdee --- /dev/null +++ b/kernel/tkernel/ttools/ttools_module.c @@ -0,0 +1,222 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ttools.h" + +#define TTOOLS_MINOR 254 +#define TTOOLS_VER "2.0" + + +struct ttools_pid { + struct list_head list; + struct task_struct *task; +}; + +static LIST_HEAD(ttools_protected_pids); +static DEFINE_SPINLOCK(ttools_pids_lock); + +static struct inode **p_anon_inode_inode; + +static bool is_annon_inode(struct inode *inode) +{ + return inode == *p_anon_inode_inode; +} + +static int ttools_ptrace_protect_task(struct task_struct *p_task) +{ + struct ttools_pid *p_item; + bool exist = false; + struct ttools_pid *pid_item = kmalloc(sizeof(struct ttools_pid), GFP_KERNEL); + if (!pid_item) + return -ENOMEM; + + spin_lock(&ttools_pids_lock); + list_for_each_entry(p_item, &ttools_protected_pids, list) { + if (p_item->task == p_task) { + exist = true; + break; + } + } + if (!exist) { + pid_item->task = p_task; + list_add_tail(&pid_item->list, &ttools_protected_pids); + } + spin_unlock(&ttools_pids_lock); + if (exist) { + kfree(pid_item); + return -EEXIST; + } + return 0; +} + +static int ttools_ptrace_unprotect_task(struct task_struct *p_task) +{ + struct ttools_pid *p_item; + + spin_lock(&ttools_pids_lock); + list_for_each_entry(p_item, &ttools_protected_pids, list) { + if (p_item->task == p_task) { + list_del(&p_item->list); + kfree(p_item); + break; + } + } + spin_unlock(&ttools_pids_lock); + return 0; +} + +static bool ttools_task_ptrace_protected(struct task_struct *p_task) +{ + struct ttools_pid *p_item; + bool ret = false; + + spin_lock(&ttools_pids_lock); + list_for_each_entry(p_item, &ttools_protected_pids, list) { + if (p_item->task == p_task) { + ret = true; + break; + } + } + spin_unlock(&ttools_pids_lock); + return ret; +} + +static int ttools_ptrace_hook(long request, long pid, struct task_struct *task, long addr, long data) +{ + if (ttools_task_ptrace_protected(task->group_leader)) + return -EPERM; + return 0; +} + +static void ttools_clean_task_list(void) +{ + struct ttools_pid *p_item; + struct ttools_pid *p_item2; + + spin_lock(&ttools_pids_lock); + list_for_each_entry_safe(p_item, p_item2, &ttools_protected_pids, list) { + list_del(&p_item->list); + kfree(p_item); + } + spin_unlock(&ttools_pids_lock); +} + +static int ttools_get_fd_refs_cnt(struct ttools_fd_ref *p_ref) +{ + struct dentry *dentry; + long dentry_cnt, f_cnt; + bool is_annon; + struct fd f; + f = fdget(p_ref->fd); + if (!f.file) + return -EBADF; + is_annon = is_annon_inode(f.file->f_inode); + f_cnt = atomic_long_read(&(f.file->f_count)) - (f.flags & FDPUT_FPUT); + dentry = f.file->f_path.dentry; + if (!dentry) + dentry_cnt = 0; + else + dentry_cnt = dentry->d_lockref.count; + fdput(f); + if (is_annon || !dentry_cnt) + p_ref->ref_cnt = f_cnt; + else + p_ref->ref_cnt = dentry_cnt; + return 0; +} + +static long ttools_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + int ret = -EINVAL; + void __user *argp = (void __user *)arg; + struct ttools_fd_ref fd_ref; + + switch (ioctl) { + case TTOOLS_PTRACE_PROTECT: + ret = ttools_ptrace_protect_task(current->group_leader); + break; + case TTOOLS_PTRACE_UNPROTECT: + ret = ttools_ptrace_unprotect_task(current->group_leader); + break; + case TTOOLS_GET_FD_REFS_CNT: + if (copy_from_user(&fd_ref, argp, sizeof(struct ttools_fd_ref))) + return -EFAULT; + ret = ttools_get_fd_refs_cnt(&fd_ref); + if (ret) + return ret; + if (copy_to_user(argp, &fd_ref, sizeof(struct ttools_fd_ref))) + return -EFAULT; + break; + } + return ret; +} + +static struct file_operations ttools_chardev_ops = { + .unlocked_ioctl = ttools_dev_ioctl, + .compat_ioctl = ttools_dev_ioctl, + .llseek = noop_llseek, +}; + +static struct miscdevice ttools_dev = { + TTOOLS_MINOR, + "ttools", + &ttools_chardev_ops, + .mode = 0666, +}; + + +static void flush_icache_1(void *info) +{ + smp_mb(); +#ifdef CONFIG_X86 + sync_core(); +#endif +} + +static int ttools_init(void) +{ + int ret; + unsigned long addr; + + addr = kallsyms_lookup_name("anon_inode_inode"); + if (!addr) + return -ENODEV; + *((void**)&p_anon_inode_inode) = (void**)addr; + + ttools_chardev_ops.owner = THIS_MODULE; + ret = misc_register(&ttools_dev); + ptrace_pre_hook = ttools_ptrace_hook; + smp_wmb(); + smp_call_function(flush_icache_1, NULL, 1); + pr_info("ttools " TTOOLS_VER " loaded\n"); + return ret; +} + +static void ttools_exit(void) +{ + ptrace_pre_hook = NULL; + smp_wmb(); + smp_call_function(flush_icache_1, NULL, 1); + misc_deregister(&ttools_dev); + ttools_clean_task_list(); + pr_info("ttools " TTOOLS_VER " unloaded\n"); +} + +module_init(ttools_init); +module_exit(ttools_exit); +MODULE_DESCRIPTION("ttools " TTOOLS_VER " for " UTS_RELEASE); +MODULE_LICENSE("GPL"); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 2d6e93ab0478..e7e483cdbea6 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -336,6 +336,7 @@ static void put_probe_ref(void) static void blk_trace_cleanup(struct blk_trace *bt) { + synchronize_rcu(); blk_trace_free(bt); put_probe_ref(); } @@ -630,8 +631,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, static int __blk_trace_startstop(struct request_queue *q, int start) { int ret; - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (bt == NULL) return -EINVAL; @@ -741,8 +744,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) void blk_trace_shutdown(struct request_queue *q) { mutex_lock(&q->blk_trace_mutex); - - if (q->blk_trace) { + if (rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex))) { __blk_trace_startstop(q, 0); __blk_trace_remove(q); } @@ -754,8 +757,10 @@ void blk_trace_shutdown(struct request_queue *q) static union kernfs_node_id * blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + /* We don't use the 'bt' value here except as an optimization... */ + bt = rcu_dereference_protected(q->blk_trace, 1); if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) return NULL; @@ -800,10 +805,14 @@ static void blk_add_trace_rq(struct request *rq, int error, unsigned int nr_bytes, u32 what, union kernfs_node_id *cgid) { - struct blk_trace *bt = rq->q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(rq->q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } if (blk_rq_is_passthrough(rq)) what |= BLK_TC_ACT(BLK_TC_PC); @@ -812,6 +821,7 @@ static void blk_add_trace_rq(struct request *rq, int error, __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), rq->cmd_flags, what, error, 0, NULL, cgid); + rcu_read_unlock(); } static void blk_add_trace_rq_insert(void *ignore, @@ -857,14 +867,19 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq, static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, what, error, 0, NULL, blk_trace_bio_get_cgid(q, bio)); + rcu_read_unlock(); } static void blk_add_trace_bio_bounce(void *ignore, @@ -909,11 +924,14 @@ static void blk_add_trace_getrq(void *ignore, if (bio) blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, NULL, NULL); + rcu_read_unlock(); } } @@ -925,27 +943,35 @@ static void blk_add_trace_sleeprq(void *ignore, if (bio) blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, 0, 0, NULL, NULL); + rcu_read_unlock(); } } static void blk_add_trace_plug(void *ignore, struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); + rcu_read_unlock(); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, unsigned int depth, bool explicit) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(depth); u32 what; @@ -957,14 +983,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); } + rcu_read_unlock(); } static void blk_add_trace_split(void *ignore, struct request_queue *q, struct bio *bio, unsigned int pdu) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(pdu); @@ -973,6 +1002,7 @@ static void blk_add_trace_split(void *ignore, BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), &rpdu, blk_trace_bio_get_cgid(q, bio)); } + rcu_read_unlock(); } /** @@ -992,11 +1022,15 @@ static void blk_add_trace_bio_remap(void *ignore, struct request_queue *q, struct bio *bio, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(bio_dev(bio)); @@ -1005,6 +1039,7 @@ static void blk_add_trace_bio_remap(void *ignore, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); + rcu_read_unlock(); } /** @@ -1025,11 +1060,15 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); @@ -1038,6 +1077,7 @@ static void blk_add_trace_rq_remap(void *ignore, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), 0, BLK_TA_REMAP, 0, sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); + rcu_read_unlock(); } /** @@ -1055,14 +1095,19 @@ void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, BLK_TA_DRV_DATA, 0, len, data, blk_trace_request_get_cgid(q, rq)); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(blk_add_driver_data); @@ -1589,6 +1634,7 @@ static int blk_trace_remove_queue(struct request_queue *q) return -EINVAL; put_probe_ref(); + synchronize_rcu(); blk_trace_free(bt); return 0; } @@ -1750,6 +1796,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct hd_struct *p = dev_to_part(dev); struct request_queue *q; struct block_device *bdev; + struct blk_trace *bt; ssize_t ret = -ENXIO; bdev = bdget(part_devt(p)); @@ -1762,21 +1809,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - ret = sprintf(buf, "%u\n", !!q->blk_trace); + ret = sprintf(buf, "%u\n", !!bt); goto out_unlock_bdev; } - if (q->blk_trace == NULL) + if (bt == NULL) ret = sprintf(buf, "disabled\n"); else if (attr == &dev_attr_act_mask) - ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); + ret = blk_trace_mask2str(buf, bt->act_mask); else if (attr == &dev_attr_pid) - ret = sprintf(buf, "%u\n", q->blk_trace->pid); + ret = sprintf(buf, "%u\n", bt->pid); else if (attr == &dev_attr_start_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); + ret = sprintf(buf, "%llu\n", bt->start_lba); else if (attr == &dev_attr_end_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); + ret = sprintf(buf, "%llu\n", bt->end_lba); out_unlock_bdev: mutex_unlock(&q->blk_trace_mutex); @@ -1793,6 +1842,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct block_device *bdev; struct request_queue *q; struct hd_struct *p; + struct blk_trace *bt; u64 value; ssize_t ret = -EINVAL; @@ -1823,8 +1873,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - if (!!value == !!q->blk_trace) { + if (!!value == !!bt) { ret = 0; goto out_unlock_bdev; } @@ -1836,18 +1888,21 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, } ret = 0; - if (q->blk_trace == NULL) + if (bt == NULL) { ret = blk_trace_setup_queue(q, bdev); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); + } if (ret == 0) { if (attr == &dev_attr_act_mask) - q->blk_trace->act_mask = value; + bt->act_mask = value; else if (attr == &dev_attr_pid) - q->blk_trace->pid = value; + bt->pid = value; else if (attr == &dev_attr_start_lba) - q->blk_trace->start_lba = value; + bt->start_lba = value; else if (attr == &dev_attr_end_lba) - q->blk_trace->end_lba = value; + bt->end_lba = value; } out_unlock_bdev: diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 44bd08f2443b..89bdac61233d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -163,7 +163,7 @@ static const struct bpf_func_proto bpf_probe_read_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, +BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, u32, size) { /* @@ -186,10 +186,8 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; - if (!access_ok(unsafe_ptr, size)) - return -EPERM; - return probe_kernel_write(unsafe_ptr, src, size); + return probe_user_write(unsafe_ptr, src, size); } static const struct bpf_func_proto bpf_probe_write_user_proto = { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f296d89be757..9495d69460b2 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -524,8 +524,7 @@ static int function_stat_show(struct seq_file *m, void *v) } #ifdef CONFIG_FUNCTION_GRAPH_TRACER - avg = rec->time; - do_div(avg, rec->counter); + avg = div64_ul(rec->time, rec->counter); if (tracing_thresh && (avg < tracing_thresh)) goto out; #endif @@ -551,7 +550,8 @@ static int function_stat_show(struct seq_file *m, void *v) * Divide only 1000 for ns^2 -> us^2 conversion. * trace_print_graph_duration will divide 1000 again. */ - do_div(stddev, rec->counter * (rec->counter - 1) * 1000); + stddev = div64_ul(stddev, + rec->counter * (rec->counter - 1) * 1000); } trace_seq_init(&s); @@ -2494,14 +2494,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) } static int -ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) +ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) { int ret; if (unlikely(ftrace_disabled)) return 0; - ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); + ret = ftrace_init_nop(mod, rec); if (ret) { ftrace_bug_type = FTRACE_BUG_INIT; ftrace_bug(ret, rec); @@ -2943,7 +2943,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) * to the NOP instructions. */ if (!__is_defined(CC_USING_NOP_MCOUNT) && - !ftrace_code_disable(mod, p)) + !ftrace_nop_initialize(mod, p)) break; update_cnt++; @@ -5102,8 +5102,8 @@ static const struct file_operations ftrace_notrace_fops = { static DEFINE_MUTEX(graph_lock); -struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; -struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; +struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; +struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; enum graph_filter_type { GRAPH_FILTER_NOTRACE = 0, @@ -5378,8 +5378,15 @@ ftrace_graph_release(struct inode *inode, struct file *file) mutex_unlock(&graph_lock); - /* Wait till all users are no longer using the old hash */ - synchronize_rcu(); + /* + * We need to do a hard force of sched synchronization. + * This is because we use preempt_disable() to do RCU, but + * the function tracers can be called where RCU is not watching + * (like before user_exit()). We can not rely on the RCU + * infrastructure to do the synchronization, thus we must do it + * ourselves. + */ + schedule_on_each_cpu(ftrace_sync); free_ftrace_hash(old_hash); } @@ -6530,9 +6537,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) struct trace_array *tr = m->private; struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); - if (v == FTRACE_NO_PIDS) + if (v == FTRACE_NO_PIDS) { + (*pos)++; return NULL; - + } return trace_pid_next(pid_list, v, pos); } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 66358d66c933..4bf050fcfe3b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include /* for self test */ @@ -5068,6 +5069,11 @@ static __init int test_ringbuffer(void) int cpu; int ret = 0; + if (security_locked_down(LOCKDOWN_TRACEFS)) { + pr_warning("Lockdown is enabled, skipping ring buffer tests\n"); + return 0; + } + pr_info("Running ring buffer tests...\n"); buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6a0ee9178365..c6ccaf6c62f7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1743,6 +1743,7 @@ static __init int init_trace_selftests(void) pr_info("Running postponed tracer tests:\n"); + tracing_selftest_running = true; list_for_each_entry_safe(p, n, &postponed_selftests, list) { /* This loop can take minutes when sanitizers are enabled, so * lets make sure we allow RCU processing. @@ -1765,6 +1766,7 @@ static __init int init_trace_selftests(void) list_del(&p->list); kfree(p); } + tracing_selftest_running = false; out: mutex_unlock(&trace_types_lock); @@ -1804,6 +1806,12 @@ int __init register_tracer(struct tracer *type) return -1; } + if (security_locked_down(LOCKDOWN_TRACEFS)) { + pr_warning("Can not register tracer %s due to lockdown\n", + type->name); + return -EPERM; + } + mutex_lock(&trace_types_lock); tracing_selftest_running = true; @@ -4590,6 +4598,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { + if ((mask == TRACE_ITER_RECORD_TGID) || + (mask == TRACE_ITER_RECORD_CMD)) + lockdep_assert_held(&event_mutex); + /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; @@ -4609,7 +4621,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) if (mask == TRACE_ITER_RECORD_TGID) { if (!tgid_map) - tgid_map = kcalloc(PID_MAX_DEFAULT + 1, + tgid_map = kvcalloc(PID_MAX_DEFAULT + 1, sizeof(*tgid_map), GFP_KERNEL); if (!tgid_map) { @@ -4657,6 +4669,7 @@ static int trace_set_options(struct trace_array *tr, char *option) cmp += len; + mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); @@ -4667,6 +4680,7 @@ static int trace_set_options(struct trace_array *tr, char *option) ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); + mutex_unlock(&event_mutex); /* * If the first trailing whitespace is replaced with '\0' by strstrip, @@ -7972,9 +7986,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, if (val != 0 && val != 1) return -EINVAL; + mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); + mutex_unlock(&event_mutex); if (ret < 0) return ret; @@ -8639,6 +8655,11 @@ struct dentry *tracing_init_dentry(void) { struct trace_array *tr = &global_trace; + if (security_locked_down(LOCKDOWN_TRACEFS)) { + pr_warning("Tracing disabled due to lockdown\n"); + return ERR_PTR(-EPERM); + } + /* The top level trace array uses NULL as parent */ if (tr->dir) return NULL; @@ -9081,6 +9102,12 @@ __init static int tracer_alloc_buffers(void) int ring_buf_size; int ret = -ENOMEM; + + if (security_locked_down(LOCKDOWN_TRACEFS)) { + pr_warning("Tracing disabled due to lockdown\n"); + return -EPERM; + } + /* * Make sure we don't accidently add more trace options * than we have bits for. @@ -9245,6 +9272,11 @@ __init static int tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { + if (security_locked_down(LOCKDOWN_TRACEFS)) { + pr_warn("Can not set tracing clock due to lockdown\n"); + return -EPERM; + } + printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d685c61085c0..a3c29d5fcc61 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -932,22 +932,31 @@ extern void __trace_graph_return(struct trace_array *tr, unsigned long flags, int pc); #ifdef CONFIG_DYNAMIC_FTRACE -extern struct ftrace_hash *ftrace_graph_hash; -extern struct ftrace_hash *ftrace_graph_notrace_hash; +extern struct ftrace_hash __rcu *ftrace_graph_hash; +extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) { unsigned long addr = trace->func; int ret = 0; + struct ftrace_hash *hash; preempt_disable_notrace(); - if (ftrace_hash_empty(ftrace_graph_hash)) { + /* + * Have to open code "rcu_dereference_sched()" because the + * function graph tracer can be called when RCU is not + * "watching". + * Protected with schedule_on_each_cpu(ftrace_sync) + */ + hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); + + if (ftrace_hash_empty(hash)) { ret = 1; goto out; } - if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { + if (ftrace_lookup_ip(hash, addr)) { /* * This needs to be cleared on the return functions @@ -983,10 +992,20 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) static inline int ftrace_graph_notrace_addr(unsigned long addr) { int ret = 0; + struct ftrace_hash *notrace_hash; preempt_disable_notrace(); - if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr)) + /* + * Have to open code "rcu_dereference_sched()" because the + * function graph tracer can be called when RCU is not + * "watching". + * Protected with schedule_on_each_cpu(ftrace_sync) + */ + notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, + !preemptible()); + + if (ftrace_lookup_ip(notrace_hash, addr)) ret = 1; preempt_enable_notrace(); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index fba87d10f0c1..995061bb2dec 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable) struct trace_event_file *file; struct trace_array *tr; - mutex_lock(&event_mutex); + lockdep_assert_held(&event_mutex); + do_for_each_event_file(tr, file) { if (!(file->flags & EVENT_FILE_FL_ENABLED)) @@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable) clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); } } while_for_each_event_file(); - mutex_unlock(&event_mutex); } void trace_event_enable_tgid_record(bool enable) @@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable) struct trace_event_file *file; struct trace_array *tr; - mutex_lock(&event_mutex); + lockdep_assert_held(&event_mutex); + do_for_each_event_file(tr, file) { if (!(file->flags & EVENT_FILE_FL_ENABLED)) continue; @@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable) &file->flags); } } while_for_each_event_file(); - mutex_unlock(&event_mutex); } static int __ftrace_event_enable_disable(struct trace_event_file *file, diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index c9a74f82b14a..bf44f6bbd0c3 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1662,7 +1662,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); return -EINVAL; fail_mem: - kfree(filter); + __free_filter(filter); /* If any call succeeded, we still need to sync */ if (!fail) tracepoint_synchronize_unregister(); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 7482a1466ebf..6495800fb92a 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -116,6 +116,7 @@ struct hist_field { struct ftrace_event_field *field; unsigned long flags; hist_field_fn_t fn; + unsigned int ref; unsigned int size; unsigned int offset; unsigned int is_signed; @@ -469,11 +470,12 @@ struct action_data { * When a histogram trigger is hit, the values of any * references to variables, including variables being passed * as parameters to synthetic events, are collected into a - * var_ref_vals array. This var_ref_idx is the index of the - * first param in the array to be passed to the synthetic - * event invocation. + * var_ref_vals array. This var_ref_idx array is an array of + * indices into the var_ref_vals array, one for each synthetic + * event param, and is passed to the synthetic event + * invocation. */ - unsigned int var_ref_idx; + unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; struct synth_event *synth_event; bool use_trace_keyword; char *synth_event_name; @@ -809,6 +811,29 @@ static const char *synth_field_fmt(char *type) return fmt; } +static void print_synth_event_num_val(struct trace_seq *s, + char *print_fmt, char *name, + int size, u64 val, char *space) +{ + switch (size) { + case 1: + trace_seq_printf(s, print_fmt, name, (u8)val, space); + break; + + case 2: + trace_seq_printf(s, print_fmt, name, (u16)val, space); + break; + + case 4: + trace_seq_printf(s, print_fmt, name, (u32)val, space); + break; + + default: + trace_seq_printf(s, print_fmt, name, val, space); + break; + } +} + static enum print_line_t print_synth_event(struct trace_iterator *iter, int flags, struct trace_event *event) @@ -847,10 +872,13 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, } else { struct trace_print_flags __flags[] = { __def_gfpflag_names, {-1, NULL} }; + char *space = (i == se->n_fields - 1 ? "" : " "); - trace_seq_printf(s, print_fmt, se->fields[i]->name, - entry->fields[n_u64], - i == se->n_fields - 1 ? "" : " "); + print_synth_event_num_val(s, print_fmt, + se->fields[i]->name, + se->fields[i]->size, + entry->fields[n_u64], + space); if (strcmp(se->fields[i]->type, "gfp_t") == 0) { trace_seq_puts(s, " ("); @@ -874,14 +902,14 @@ static struct trace_event_functions synth_event_funcs = { static notrace void trace_event_raw_event_synth(void *__data, u64 *var_ref_vals, - unsigned int var_ref_idx) + unsigned int *var_ref_idx) { struct trace_event_file *trace_file = __data; struct synth_trace_event *entry; struct trace_event_buffer fbuffer; struct ring_buffer *buffer; struct synth_event *event; - unsigned int i, n_u64; + unsigned int i, n_u64, val_idx; int fields_size = 0; event = trace_file->event_call->data; @@ -904,14 +932,34 @@ static notrace void trace_event_raw_event_synth(void *__data, goto out; for (i = 0, n_u64 = 0; i < event->n_fields; i++) { + val_idx = var_ref_idx[i]; if (event->fields[i]->is_string) { - char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i]; + char *str_val = (char *)(long)var_ref_vals[val_idx]; char *str_field = (char *)&entry->fields[n_u64]; strscpy(str_field, str_val, STR_VAR_LEN_MAX); n_u64 += STR_VAR_LEN_MAX / sizeof(u64); } else { - entry->fields[n_u64] = var_ref_vals[var_ref_idx + i]; + struct synth_field *field = event->fields[i]; + u64 val = var_ref_vals[val_idx]; + + switch (field->size) { + case 1: + *(u8 *)&entry->fields[n_u64] = (u8)val; + break; + + case 2: + *(u16 *)&entry->fields[n_u64] = (u16)val; + break; + + case 4: + *(u32 *)&entry->fields[n_u64] = (u32)val; + break; + + default: + entry->fields[n_u64] = val; + break; + } n_u64++; } } @@ -1093,10 +1141,10 @@ static struct tracepoint *alloc_synth_tracepoint(char *name) } typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, - unsigned int var_ref_idx); + unsigned int *var_ref_idx); static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, - unsigned int var_ref_idx) + unsigned int *var_ref_idx) { struct tracepoint *tp = event->tp; @@ -1747,11 +1795,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data, struct event_trigger_data *test; struct hist_field *hist_field; + lockdep_assert_held(&event_mutex); + hist_field = find_var_field(hist_data, var_name); if (hist_field) return hist_field; - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { test_data = test->private_data; hist_field = find_var_field(test_data, var_name); @@ -1801,7 +1851,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file, struct event_trigger_data *test; struct hist_field *hist_field; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { test_data = test->private_data; hist_field = find_var_field(test_data, var_name); @@ -2011,12 +2063,6 @@ static int parse_map_size(char *str) unsigned long size, map_bits; int ret; - strsep(&str, "="); - if (!str) { - ret = -EINVAL; - goto out; - } - ret = kstrtoul(str, 0, &size); if (ret) goto out; @@ -2076,25 +2122,25 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs) static int parse_assignment(struct trace_array *tr, char *str, struct hist_trigger_attrs *attrs) { - int ret = 0; + int len, ret = 0; - if ((str_has_prefix(str, "key=")) || - (str_has_prefix(str, "keys="))) { - attrs->keys_str = kstrdup(str, GFP_KERNEL); + if ((len = str_has_prefix(str, "key=")) || + (len = str_has_prefix(str, "keys="))) { + attrs->keys_str = kstrdup(str + len, GFP_KERNEL); if (!attrs->keys_str) { ret = -ENOMEM; goto out; } - } else if ((str_has_prefix(str, "val=")) || - (str_has_prefix(str, "vals=")) || - (str_has_prefix(str, "values="))) { - attrs->vals_str = kstrdup(str, GFP_KERNEL); + } else if ((len = str_has_prefix(str, "val=")) || + (len = str_has_prefix(str, "vals=")) || + (len = str_has_prefix(str, "values="))) { + attrs->vals_str = kstrdup(str + len, GFP_KERNEL); if (!attrs->vals_str) { ret = -ENOMEM; goto out; } - } else if (str_has_prefix(str, "sort=")) { - attrs->sort_key_str = kstrdup(str, GFP_KERNEL); + } else if ((len = str_has_prefix(str, "sort="))) { + attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL); if (!attrs->sort_key_str) { ret = -ENOMEM; goto out; @@ -2105,12 +2151,8 @@ static int parse_assignment(struct trace_array *tr, ret = -ENOMEM; goto out; } - } else if (str_has_prefix(str, "clock=")) { - strsep(&str, "="); - if (!str) { - ret = -EINVAL; - goto out; - } + } else if ((len = str_has_prefix(str, "clock="))) { + str += len; str = strstrip(str); attrs->clock = kstrdup(str, GFP_KERNEL); @@ -2118,8 +2160,8 @@ static int parse_assignment(struct trace_array *tr, ret = -ENOMEM; goto out; } - } else if (str_has_prefix(str, "size=")) { - int map_bits = parse_map_size(str); + } else if ((len = str_has_prefix(str, "size="))) { + int map_bits = parse_map_size(str + len); if (map_bits < 0) { ret = map_bits; @@ -2159,8 +2201,14 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) while (trigger_str) { char *str = strsep(&trigger_str, ":"); + char *rhs; - if (strchr(str, '=')) { + rhs = strchr(str, '='); + if (rhs) { + if (!strlen(++rhs)) { + ret = -EINVAL; + goto free; + } ret = parse_assignment(tr, str, attrs); if (ret) goto free; @@ -2404,8 +2452,16 @@ static int contains_operator(char *str) return field_op; } +static void get_hist_field(struct hist_field *hist_field) +{ + hist_field->ref++; +} + static void __destroy_hist_field(struct hist_field *hist_field) { + if (--hist_field->ref > 1) + return; + kfree(hist_field->var.name); kfree(hist_field->name); kfree(hist_field->type); @@ -2447,6 +2503,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, if (!hist_field) return NULL; + hist_field->ref = 1; + hist_field->hist_data = hist_data; if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) @@ -2621,6 +2679,22 @@ static int init_var_ref(struct hist_field *ref_field, goto out; } +static int find_var_ref_idx(struct hist_trigger_data *hist_data, + struct hist_field *var_field) +{ + struct hist_field *ref_field; + int i; + + for (i = 0; i < hist_data->n_var_refs; i++) { + ref_field = hist_data->var_refs[i]; + if (ref_field->var.idx == var_field->var.idx && + ref_field->var.hist_data == var_field->hist_data) + return i; + } + + return -ENOENT; +} + /** * create_var_ref - Create a variable reference and attach it to trigger * @hist_data: The trigger that will be referencing the variable @@ -2642,6 +2716,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, { unsigned long flags = HIST_FIELD_FL_VAR_REF; struct hist_field *ref_field; + int i; + + /* Check if the variable already exists */ + for (i = 0; i < hist_data->n_var_refs; i++) { + ref_field = hist_data->var_refs[i]; + if (ref_field->var.idx == var_field->var.idx && + ref_field->var.hist_data == var_field->hist_data) { + get_hist_field(ref_field); + return ref_field; + } + } ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); if (ref_field) { @@ -3096,7 +3181,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data, { struct event_trigger_data *test; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (test->private_data == hist_data) return test->filter_str; @@ -3147,9 +3234,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data, struct event_trigger_data *test; unsigned int n_keys; + lockdep_assert_held(&event_mutex); + n_keys = target_hist_data->n_fields - target_hist_data->n_vals; - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { hist_data = test->private_data; @@ -4179,11 +4268,11 @@ static int trace_action_create(struct hist_trigger_data *hist_data, struct trace_array *tr = hist_data->event_file->tr; char *event_name, *param, *system = NULL; struct hist_field *hist_field, *var_ref; - unsigned int i, var_ref_idx; + unsigned int i; unsigned int field_pos = 0; struct synth_event *event; char *synth_event_name; - int ret = 0; + int var_ref_idx, ret = 0; lockdep_assert_held(&event_mutex); @@ -4200,8 +4289,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data, event->ref++; - var_ref_idx = hist_data->n_var_refs; - for (i = 0; i < data->n_params; i++) { char *p; @@ -4250,6 +4337,14 @@ static int trace_action_create(struct hist_trigger_data *hist_data, goto err; } + var_ref_idx = find_var_ref_idx(hist_data, var_ref); + if (WARN_ON(var_ref_idx < 0)) { + ret = var_ref_idx; + goto err; + } + + data->var_ref_idx[i] = var_ref_idx; + field_pos++; kfree(p); continue; @@ -4268,7 +4363,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data, } data->synth_event = event; - data->var_ref_idx = var_ref_idx; out: return ret; err: @@ -4487,10 +4581,6 @@ static int create_val_fields(struct hist_trigger_data *hist_data, if (!fields_str) goto out; - strsep(&fields_str, "="); - if (!fields_str) - goto out; - for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && j < TRACING_MAP_VALS_MAX; i++) { field_str = strsep(&fields_str, ","); @@ -4585,10 +4675,6 @@ static int create_key_fields(struct hist_trigger_data *hist_data, if (!fields_str) goto out; - strsep(&fields_str, "="); - if (!fields_str) - goto out; - for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { field_str = strsep(&fields_str, ","); if (!field_str) @@ -4746,12 +4832,6 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) if (!fields_str) goto out; - strsep(&fields_str, "="); - if (!fields_str) { - ret = -EINVAL; - goto out; - } - for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { struct hist_field *hist_field; char *field_str, *field_name; @@ -4760,9 +4840,11 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) sort_key = &hist_data->sort_keys[i]; field_str = strsep(&fields_str, ","); - if (!field_str) { - if (i == 0) - ret = -EINVAL; + if (!field_str) + break; + + if (!*field_str) { + ret = -EINVAL; break; } @@ -4772,7 +4854,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) } field_name = strsep(&field_str, "."); - if (!field_name) { + if (!field_name || !*field_name) { ret = -EINVAL; break; } @@ -5509,7 +5591,7 @@ static int hist_show(struct seq_file *m, void *v) goto out_unlock; } - list_for_each_entry_rcu(data, &event_file->triggers, list) { + list_for_each_entry(data, &event_file->triggers, list) { if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) hist_trigger_show(m, data, n++); } @@ -5902,7 +5984,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, if (hist_data->attrs->name && !named_data) goto new; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (!hist_trigger_match(data, test, named_data, false)) continue; @@ -5986,10 +6070,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data, struct event_trigger_data *test, *named_data = NULL; bool match = false; + lockdep_assert_held(&event_mutex); + if (hist_data->attrs->name) named_data = find_named_trigger(hist_data->attrs->name); - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (hist_trigger_match(data, test, named_data, false)) { match = true; @@ -6007,10 +6093,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data, struct hist_trigger_data *hist_data = data->private_data; struct event_trigger_data *test, *named_data = NULL; + lockdep_assert_held(&event_mutex); + if (hist_data->attrs->name) named_data = find_named_trigger(hist_data->attrs->name); - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (!hist_trigger_match(data, test, named_data, false)) continue; @@ -6032,10 +6120,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *test, *named_data = NULL; bool unregistered = false; + lockdep_assert_held(&event_mutex); + if (hist_data->attrs->name) named_data = find_named_trigger(hist_data->attrs->name); - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (!hist_trigger_match(data, test, named_data, false)) continue; @@ -6061,7 +6151,9 @@ static bool hist_file_check_refs(struct trace_event_file *file) struct hist_trigger_data *hist_data; struct event_trigger_data *test; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { hist_data = test->private_data; if (check_var_refs(hist_data)) @@ -6304,7 +6396,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec, struct enable_trigger_data *enable_data = data->private_data; struct event_trigger_data *test; - list_for_each_entry_rcu(test, &enable_data->file->triggers, list) { + list_for_each_entry_rcu(test, &enable_data->file->triggers, list, + lockdep_is_held(&event_mutex)) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (enable_data->enable) test->paused = false; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 2cd53ca21b51..287d77eae59b 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) { struct trace_event_file *event_file = event_file_data(m->private); - if (t == SHOW_AVAILABLE_TRIGGERS) + if (t == SHOW_AVAILABLE_TRIGGERS) { + (*pos)++; return NULL; - + } return seq_list_next(t, &event_file->triggers, pos); } @@ -501,7 +502,9 @@ void update_cond_flag(struct trace_event_file *file) struct event_trigger_data *data; bool set_cond = false; - list_for_each_entry_rcu(data, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(data, &file->triggers, list) { if (data->filter || event_command_post_trigger(data->cmd_ops) || event_command_needs_rec(data->cmd_ops)) { set_cond = true; @@ -536,7 +539,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *test; int ret = 0; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { ret = -EEXIST; goto out; @@ -581,7 +586,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *data; bool unregistered = false; - list_for_each_entry_rcu(data, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(data, &file->triggers, list) { if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { unregistered = true; list_del_rcu(&data->list); @@ -1497,7 +1504,9 @@ int event_enable_register_trigger(char *glob, struct event_trigger_data *test; int ret = 0; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { test_enable_data = test->private_data; if (test_enable_data && (test->cmd_ops->trigger_type == @@ -1537,7 +1546,9 @@ void event_enable_unregister_trigger(char *glob, struct event_trigger_data *data; bool unregistered = false; - list_for_each_entry_rcu(data, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(data, &file->triggers, list) { enable_data = data->private_data; if (enable_data && (data->cmd_ops->trigger_type == diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1552a95c743b..3f54dc2f6e1c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, INIT_HLIST_NODE(&tk->rp.kp.hlist); INIT_LIST_HEAD(&tk->rp.kp.list); - ret = trace_probe_init(&tk->tp, event, group); + ret = trace_probe_init(&tk->tp, event, group, false); if (ret < 0) goto error; @@ -435,11 +435,10 @@ static int disable_trace_kprobe(struct trace_event_call *call, #if defined(CONFIG_KPROBES_ON_FTRACE) && \ !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE) -static bool within_notrace_func(struct trace_kprobe *tk) +static bool __within_notrace_func(unsigned long addr) { - unsigned long offset, size, addr; + unsigned long offset, size; - addr = trace_kprobe_address(tk); if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset)) return false; @@ -452,6 +451,28 @@ static bool within_notrace_func(struct trace_kprobe *tk) */ return !ftrace_location_range(addr, addr + size - 1); } + +static bool within_notrace_func(struct trace_kprobe *tk) +{ + unsigned long addr = addr = trace_kprobe_address(tk); + char symname[KSYM_NAME_LEN], *p; + + if (!__within_notrace_func(addr)) + return false; + + /* Check if the address is on a suffixed-symbol */ + if (!lookup_symbol_name(addr, symname)) { + p = strchr(symname, '.'); + if (!p) + return true; + *p = '\0'; + addr = (unsigned long)kprobe_lookup_name(symname, 0); + if (addr) + return __within_notrace_func(addr); + } + + return true; +} #else #define within_notrace_func(tk) (false) #endif diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 905b10af5d5c..ab8b6436d53f 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -876,7 +876,8 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, for (i = 0; i < tp->nr_args; i++) { parg = tp->args + i; if (parg->count) { - if (strcmp(parg->type->name, "string") == 0) + if ((strcmp(parg->type->name, "string") == 0) || + (strcmp(parg->type->name, "ustring") == 0)) fmt = ", __get_str(%s[%d])"; else fmt = ", REC->%s[%d]"; @@ -884,7 +885,8 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, pos += snprintf(buf + pos, LEN_OR_ZERO, fmt, parg->name, j); } else { - if (strcmp(parg->type->name, "string") == 0) + if ((strcmp(parg->type->name, "string") == 0) || + (strcmp(parg->type->name, "ustring") == 0)) fmt = ", __get_str(%s)"; else fmt = ", REC->%s"; @@ -984,15 +986,19 @@ void trace_probe_cleanup(struct trace_probe *tp) } int trace_probe_init(struct trace_probe *tp, const char *event, - const char *group) + const char *group, bool alloc_filter) { struct trace_event_call *call; + size_t size = sizeof(struct trace_probe_event); int ret = 0; if (!event || !group) return -EINVAL; - tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL); + if (alloc_filter) + size += sizeof(struct trace_uprobe_filter); + + tp->event = kzalloc(size, GFP_KERNEL); if (!tp->event) return -ENOMEM; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 4ee703728aec..a0ff9e200ef6 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -223,6 +223,12 @@ struct probe_arg { const struct fetch_type *type; /* Type of this argument */ }; +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + /* Event call and class holder */ struct trace_probe_event { unsigned int flags; /* For TP_FLAG_* */ @@ -230,6 +236,7 @@ struct trace_probe_event { struct trace_event_call call; struct list_head files; struct list_head probes; + struct trace_uprobe_filter filter[0]; }; struct trace_probe { @@ -322,7 +329,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp) } int trace_probe_init(struct trace_probe *tp, const char *event, - const char *group); + const char *group, bool alloc_filter); void trace_probe_cleanup(struct trace_probe *tp); int trace_probe_append(struct trace_probe *tp, struct trace_probe *to); void trace_probe_unlink(struct trace_probe *tp); diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e288168661e1..e304196d7c28 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -89,8 +89,10 @@ static void tracing_sched_unregister(void) static void tracing_start_sched_switch(int ops) { - bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref); + bool sched_register; + mutex_lock(&sched_register_mutex); + sched_register = (!sched_cmdline_ref && !sched_tgid_ref); switch (ops) { case RECORD_CMDLINE: diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 5e43b9664eca..617e297f46dc 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr) if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_migrate_task\n"); - return; + goto fail_deprobe_sched_switch; } wakeup_reset(tr); @@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr) printk(KERN_ERR "failed to start wakeup tracer\n"); return; +fail_deprobe_sched_switch: + unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); fail_deprobe_wake_new: unregister_trace_sched_wakeup_new(probe_wakeup, NULL); fail_deprobe: diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4df9a209f7ca..c557f42a9397 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack) local_irq_restore(flags); } +/* Some archs may not define MCOUNT_INSN_SIZE */ +#ifndef MCOUNT_INSN_SIZE +# define MCOUNT_INSN_SIZE 0 +#endif + static void stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 9ab0a1a7ad5e..3c9c17feea33 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -282,18 +282,22 @@ static int tracing_stat_init(void) d_tracing = tracing_init_dentry(); if (IS_ERR(d_tracing)) - return 0; + return -ENODEV; stat_dir = tracefs_create_dir("trace_stat", d_tracing); - if (!stat_dir) + if (!stat_dir) { pr_warn("Could not create tracefs 'trace_stat' entry\n"); + return -ENOMEM; + } return 0; } static int init_stat_file(struct stat_session *session) { - if (!stat_dir && tracing_stat_init()) - return -ENODEV; + int ret; + + if (!stat_dir && (ret = tracing_stat_init())) + return ret; session->file = tracefs_create_file(session->ts->name, 0644, stat_dir, @@ -306,7 +310,7 @@ static int init_stat_file(struct stat_session *session) int register_stat_tracer(struct tracer_stat *trace) { struct stat_session *session, *node; - int ret; + int ret = -EINVAL; if (!trace) return -EINVAL; @@ -317,17 +321,15 @@ int register_stat_tracer(struct tracer_stat *trace) /* Already registered? */ mutex_lock(&all_stat_sessions_mutex); list_for_each_entry(node, &all_stat_sessions, session_list) { - if (node->ts == trace) { - mutex_unlock(&all_stat_sessions_mutex); - return -EINVAL; - } + if (node->ts == trace) + goto out; } - mutex_unlock(&all_stat_sessions_mutex); + ret = -ENOMEM; /* Init the session */ session = kzalloc(sizeof(*session), GFP_KERNEL); if (!session) - return -ENOMEM; + goto out; session->ts = trace; INIT_LIST_HEAD(&session->session_list); @@ -336,15 +338,16 @@ int register_stat_tracer(struct tracer_stat *trace) ret = init_stat_file(session); if (ret) { destroy_session(session); - return ret; + goto out; } + ret = 0; /* Register */ - mutex_lock(&all_stat_sessions_mutex); list_add_tail(&session->session_list, &all_stat_sessions); + out: mutex_unlock(&all_stat_sessions_mutex); - return 0; + return ret; } void unregister_stat_tracer(struct tracer_stat *trace) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 352073d36585..2619bc5ed520 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -34,12 +34,6 @@ struct uprobe_trace_entry_head { #define DATAOF_TRACE_ENTRY(entry, is_return) \ ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) -struct trace_uprobe_filter { - rwlock_t rwlock; - int nr_systemwide; - struct list_head perf_events; -}; - static int trace_uprobe_create(int argc, const char **argv); static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev); static int trace_uprobe_release(struct dyn_event *ev); @@ -60,7 +54,6 @@ static struct dyn_event_operations trace_uprobe_ops = { */ struct trace_uprobe { struct dyn_event devent; - struct trace_uprobe_filter filter; struct uprobe_consumer consumer; struct path path; struct inode *inode; @@ -351,7 +344,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) if (!tu) return ERR_PTR(-ENOMEM); - ret = trace_probe_init(&tu->tp, event, group); + ret = trace_probe_init(&tu->tp, event, group, true); if (ret < 0) goto error; @@ -359,7 +352,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) tu->consumer.handler = uprobe_dispatcher; if (is_ret) tu->consumer.ret_handler = uretprobe_dispatcher; - init_trace_uprobe_filter(&tu->filter); + init_trace_uprobe_filter(tu->tp.event->filter); return tu; error: @@ -1067,13 +1060,14 @@ static void __probe_event_disable(struct trace_probe *tp) struct trace_probe *pos; struct trace_uprobe *tu; + tu = container_of(tp, struct trace_uprobe, tp); + WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { tu = container_of(pos, struct trace_uprobe, tp); if (!tu->inode) continue; - WARN_ON(!uprobe_filter_is_empty(&tu->filter)); - uprobe_unregister(tu->inode, tu->offset, &tu->consumer); tu->inode = NULL; } @@ -1108,7 +1102,7 @@ static int probe_event_enable(struct trace_event_call *call, } tu = container_of(tp, struct trace_uprobe, tp); - WARN_ON(!uprobe_filter_is_empty(&tu->filter)); + WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); if (enabled) return 0; @@ -1205,39 +1199,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) } static inline bool -uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) +trace_uprobe_filter_event(struct trace_uprobe_filter *filter, + struct perf_event *event) { - return __uprobe_perf_filter(&tu->filter, event->hw.target->mm); + return __uprobe_perf_filter(filter, event->hw.target->mm); } -static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) +static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter, + struct perf_event *event) { bool done; - write_lock(&tu->filter.rwlock); + write_lock(&filter->rwlock); if (event->hw.target) { list_del(&event->hw.tp_list); - done = tu->filter.nr_systemwide || + done = filter->nr_systemwide || (event->hw.target->flags & PF_EXITING) || - uprobe_filter_event(tu, event); + trace_uprobe_filter_event(filter, event); } else { - tu->filter.nr_systemwide--; - done = tu->filter.nr_systemwide; + filter->nr_systemwide--; + done = filter->nr_systemwide; } - write_unlock(&tu->filter.rwlock); + write_unlock(&filter->rwlock); - if (!done) - return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); - - return 0; + return done; } -static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) +/* This returns true if the filter always covers target mm */ +static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter, + struct perf_event *event) { bool done; - int err; - write_lock(&tu->filter.rwlock); + write_lock(&filter->rwlock); if (event->hw.target) { /* * event->parent != NULL means copy_process(), we can avoid @@ -1247,28 +1241,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) * attr.enable_on_exec means that exec/mmap will install the * breakpoints we need. */ - done = tu->filter.nr_systemwide || + done = filter->nr_systemwide || event->parent || event->attr.enable_on_exec || - uprobe_filter_event(tu, event); - list_add(&event->hw.tp_list, &tu->filter.perf_events); + trace_uprobe_filter_event(filter, event); + list_add(&event->hw.tp_list, &filter->perf_events); } else { - done = tu->filter.nr_systemwide; - tu->filter.nr_systemwide++; + done = filter->nr_systemwide; + filter->nr_systemwide++; } - write_unlock(&tu->filter.rwlock); + write_unlock(&filter->rwlock); - err = 0; - if (!done) { - err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); - if (err) - uprobe_perf_close(tu, event); - } - return err; + return done; } -static int uprobe_perf_multi_call(struct trace_event_call *call, - struct perf_event *event, - int (*op)(struct trace_uprobe *tu, struct perf_event *event)) +static int uprobe_perf_close(struct trace_event_call *call, + struct perf_event *event) { struct trace_probe *pos, *tp; struct trace_uprobe *tu; @@ -1278,25 +1265,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call, if (WARN_ON_ONCE(!tp)) return -ENODEV; + tu = container_of(tp, struct trace_uprobe, tp); + if (trace_uprobe_filter_remove(tu->tp.event->filter, event)) + return 0; + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { tu = container_of(pos, struct trace_uprobe, tp); - ret = op(tu, event); + ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); if (ret) break; } return ret; } + +static int uprobe_perf_open(struct trace_event_call *call, + struct perf_event *event) +{ + struct trace_probe *pos, *tp; + struct trace_uprobe *tu; + int err = 0; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return -ENODEV; + + tu = container_of(tp, struct trace_uprobe, tp); + if (trace_uprobe_filter_add(tu->tp.event->filter, event)) + return 0; + + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); + if (err) { + uprobe_perf_close(call, event); + break; + } + } + + return err; +} + static bool uprobe_perf_filter(struct uprobe_consumer *uc, enum uprobe_filter_ctx ctx, struct mm_struct *mm) { + struct trace_uprobe_filter *filter; struct trace_uprobe *tu; int ret; tu = container_of(uc, struct trace_uprobe, consumer); - read_lock(&tu->filter.rwlock); - ret = __uprobe_perf_filter(&tu->filter, mm); - read_unlock(&tu->filter.rwlock); + filter = tu->tp.event->filter; + + read_lock(&filter->rwlock); + ret = __uprobe_perf_filter(filter, mm); + read_unlock(&filter->rwlock); return ret; } @@ -1419,10 +1440,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, return 0; case TRACE_REG_PERF_OPEN: - return uprobe_perf_multi_call(event, data, uprobe_perf_open); + return uprobe_perf_open(event, data); case TRACE_REG_PERF_CLOSE: - return uprobe_perf_multi_call(event, data, uprobe_perf_close); + return uprobe_perf_close(event, data); #endif default: diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 9a1c22310323..9e31bfc818ff 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -148,8 +148,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b) #define DEFINE_TRACING_MAP_CMP_FN(type) \ static int tracing_map_cmp_##type(void *val_a, void *val_b) \ { \ - type a = *(type *)val_a; \ - type b = *(type *)val_b; \ + type a = (type)(*(u64 *)val_a); \ + type b = (type)(*(u64 *)val_b); \ \ return (a > b) ? 1 : ((a < b) ? -1 : 0); \ } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f41334ef0971..cbd3cf503c90 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -161,6 +161,8 @@ static void lockup_detector_update_enable(void) #ifdef CONFIG_SOFTLOCKUP_DETECTOR +#define SOFTLOCKUP_RESET ULONG_MAX + /* Global variables, exported for sysctl */ unsigned int __read_mostly softlockup_panic = CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; @@ -274,7 +276,7 @@ notrace void touch_softlockup_watchdog_sched(void) * Preemption can be enabled. It doesn't matter which CPU's timestamp * gets zeroed here, so use the raw_ operation. */ - raw_cpu_write(watchdog_touch_ts, 0); + raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET); } notrace void touch_softlockup_watchdog(void) @@ -298,14 +300,14 @@ void touch_all_softlockup_watchdogs(void) * the softlockup check. */ for_each_cpu(cpu, &watchdog_allowed_mask) - per_cpu(watchdog_touch_ts, cpu) = 0; + per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET; wq_watchdog_touch(-1); } void touch_softlockup_watchdog_sync(void) { __this_cpu_write(softlockup_touch_sync, true); - __this_cpu_write(watchdog_touch_ts, 0); + __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET); } static int is_softlockup(unsigned long touch_ts) @@ -383,7 +385,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) /* .. and repeat */ hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); - if (touch_ts == 0) { + if (touch_ts == SOFTLOCKUP_RESET) { if (unlikely(__this_cpu_read(softlockup_touch_sync))) { /* * If the time stamp was touched atomically diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bc2e09a8ea61..1a0c224af6fb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -425,7 +425,8 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); * ignored. */ #define for_each_pwq(pwq, wq) \ - list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ + list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ + lockdep_is_held(&wq->mutex)) \ if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ else @@ -1416,14 +1417,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, return; rcu_read_lock(); retry: - if (req_cpu == WORK_CPU_UNBOUND) - cpu = wq_select_unbound_cpu(raw_smp_processor_id()); - /* pwq which will be used unless @work is executing elsewhere */ - if (!(wq->flags & WQ_UNBOUND)) - pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); - else + if (wq->flags & WQ_UNBOUND) { + if (req_cpu == WORK_CPU_UNBOUND) + cpu = wq_select_unbound_cpu(raw_smp_processor_id()); pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); + } else { + if (req_cpu == WORK_CPU_UNBOUND) + cpu = raw_smp_processor_id(); + pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); + } /* * If @work was previously on a different pool, it might still be @@ -2532,8 +2535,14 @@ repeat: */ if (need_to_create_worker(pool)) { spin_lock(&wq_mayday_lock); - get_pwq(pwq); - list_move_tail(&pwq->mayday_node, &wq->maydays); + /* + * Queue iff we aren't racing destruction + * and somebody else hasn't queued it already. + */ + if (wq->rescuer && list_empty(&pwq->mayday_node)) { + get_pwq(pwq); + list_add_tail(&pwq->mayday_node, &wq->maydays); + } spin_unlock(&wq_mayday_lock); } } @@ -4325,9 +4334,29 @@ void destroy_workqueue(struct workqueue_struct *wq) struct pool_workqueue *pwq; int node; + /* + * Remove it from sysfs first so that sanity check failure doesn't + * lead to sysfs name conflicts. + */ + workqueue_sysfs_unregister(wq); + /* drain it before proceeding with destruction */ drain_workqueue(wq); + /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ + if (wq->rescuer) { + struct worker *rescuer = wq->rescuer; + + /* this prevents new queueing */ + spin_lock_irq(&wq_mayday_lock); + wq->rescuer = NULL; + spin_unlock_irq(&wq_mayday_lock); + + /* rescuer will empty maydays list before exiting */ + kthread_stop(rescuer->task); + kfree(rescuer); + } + /* sanity checks */ mutex_lock(&wq->mutex); for_each_pwq(pwq, wq) { @@ -4359,11 +4388,6 @@ void destroy_workqueue(struct workqueue_struct *wq) list_del_rcu(&wq->list); mutex_unlock(&wq_pool_mutex); - workqueue_sysfs_unregister(wq); - - if (wq->rescuer) - kthread_stop(wq->rescuer->task); - if (!(wq->flags & WQ_UNBOUND)) { wq_unregister_lockdep(wq); /* @@ -4638,7 +4662,8 @@ static void show_pwq(struct pool_workqueue *pwq) pr_info(" pwq %d:", pool->id); pr_cont_pool_info(pool); - pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active, + pr_cont(" active=%d/%d refcnt=%d%s\n", + pwq->nr_active, pwq->max_active, pwq->refcnt, !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); hash_for_each(pool->busy_hash, bkt, worker, hentry) { diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 93d97f9b0157..f61d834e02fe 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -299,17 +299,6 @@ config HEADERS_INSTALL user-space program samples. It is also needed by some features such as uapi header sanity checks. -config HEADERS_CHECK - bool "Run sanity checks on uapi headers when building 'all'" - depends on HEADERS_INSTALL - help - This option will run basic sanity checks on uapi headers when - building the 'all' target, for example, ensure that they do not - attempt to include files which were not exported, etc. - - If you're making modifications to header files which are - relevant for userspace, say 'Y'. - config OPTIMIZE_INLINING def_bool y help diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 61261195f5b6..48054dbf1b51 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -132,14 +132,18 @@ static void fill_pool(void) struct debug_obj *obj; unsigned long flags; - if (likely(obj_pool_free >= debug_objects_pool_min_level)) + if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) return; /* * Reuse objs from the global free list; they will be reinitialized * when allocating. + * + * Both obj_nr_tofree and obj_pool_free are checked locklessly; the + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical + * sections. */ - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { + while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { raw_spin_lock_irqsave(&pool_lock, flags); /* * Recheck with the lock held as the worker thread might have @@ -148,9 +152,9 @@ static void fill_pool(void) while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); - obj_nr_tofree--; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); hlist_add_head(&obj->node, &obj_pool); - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } @@ -158,7 +162,7 @@ static void fill_pool(void) if (unlikely(!obj_cache)) return; - while (obj_pool_free < debug_objects_pool_min_level) { + while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { struct debug_obj *new[ODEBUG_BATCH_SIZE]; int cnt; @@ -174,7 +178,7 @@ static void fill_pool(void) while (cnt) { hlist_add_head(&new[--cnt]->node, &obj_pool); debug_objects_allocated++; - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } @@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) obj = __alloc_object(&obj_pool); if (obj) { obj_pool_used++; - obj_pool_free--; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); /* * Looking ahead, allocate one batch of debug objects and @@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) &percpu_pool->free_objs); percpu_pool->obj_free++; obj_pool_used++; - obj_pool_free--; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); } } @@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work) obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); hlist_add_head(&obj->node, &obj_pool); - obj_pool_free++; - obj_nr_tofree--; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); return; @@ -324,7 +328,7 @@ free_objs: if (obj_nr_tofree) { hlist_move_list(&obj_to_free, &tofree); debug_objects_freed += obj_nr_tofree; - obj_nr_tofree = 0; + WRITE_ONCE(obj_nr_tofree, 0); } raw_spin_unlock_irqrestore(&pool_lock, flags); @@ -375,10 +379,10 @@ free_to_obj_pool: obj_pool_used--; if (work) { - obj_nr_tofree++; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); hlist_add_head(&obj->node, &obj_to_free); if (lookahead_count) { - obj_nr_tofree += lookahead_count; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, @@ -396,15 +400,15 @@ free_to_obj_pool: for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { obj = __alloc_object(&obj_pool); hlist_add_head(&obj->node, &obj_to_free); - obj_pool_free--; - obj_nr_tofree++; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); } } } else { - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); hlist_add_head(&obj->node, &obj_pool); if (lookahead_count) { - obj_pool_free += lookahead_count; + WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, @@ -423,7 +427,7 @@ free_to_obj_pool: static void free_object(struct debug_obj *obj) { __free_object(obj); - if (!obj_freeing && obj_nr_tofree) { + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } @@ -982,7 +986,7 @@ repeat: debug_objects_maxchecked = objs_checked; /* Schedule work to actually kmem_cache_free() objects */ - if (!obj_freeing && obj_nr_tofree) { + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } @@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v) seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); seq_printf(m, "warnings :%d\n", debug_objects_warnings); seq_printf(m, "fixups :%d\n", debug_objects_fixups); - seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free); + seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); - seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); + seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); seq_printf(m, "objs_freed :%d\n", debug_objects_freed); return 0; diff --git a/lib/find_bit.c b/lib/find_bit.c index 5c51eb45178a..4e68490fa703 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -149,18 +149,6 @@ EXPORT_SYMBOL(find_last_bit); #ifdef __BIG_ENDIAN -/* include/linux/byteorder does not support "unsigned long" type */ -static inline unsigned long ext2_swab(const unsigned long y) -{ -#if BITS_PER_LONG == 64 - return (unsigned long) __swab64((u64) y); -#elif BITS_PER_LONG == 32 - return (unsigned long) __swab32((u32) y); -#else -#error BITS_PER_LONG not defined -#endif -} - #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) static inline unsigned long _find_next_bit_le(const unsigned long *addr1, const unsigned long *addr2, unsigned long nbits, @@ -177,7 +165,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1, tmp ^= invert; /* Handle 1st word. */ - tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); + tmp &= swab(BITMAP_FIRST_WORD_MASK(start)); start = round_down(start, BITS_PER_LONG); while (!tmp) { @@ -191,7 +179,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1, tmp ^= invert; } - return min(start + __ffs(ext2_swab(tmp)), nbits); + return min(start + __ffs(swab(tmp)), nbits); } #endif diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c index 9c485df1308f..f02e10fa6238 100644 --- a/lib/raid6/mktables.c +++ b/lib/raid6/mktables.c @@ -56,8 +56,8 @@ int main(int argc, char *argv[]) uint8_t v; uint8_t exptbl[256], invtbl[256]; - printf("#include \n"); printf("#include \n"); + printf("#include \n"); /* Compute multiplication table */ printf("\nconst u8 __attribute__((aligned(256)))\n" diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk index c6aa03631df8..0809805a7e23 100644 --- a/lib/raid6/unroll.awk +++ b/lib/raid6/unroll.awk @@ -13,7 +13,7 @@ BEGIN { for (i = 0; i < rep; ++i) { tmp = $0 gsub(/\$\$/, i, tmp) - gsub(/\$\#/, n, tmp) + gsub(/\$#/, n, tmp) gsub(/\$\*/, "$", tmp) print tmp } diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 969e5400a615..ee3ce1494568 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -667,8 +667,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, if (!sbq_wait->sbq) { sbq_wait->sbq = sbq; atomic_inc(&sbq->ws_active); + add_wait_queue(&ws->wait, &sbq_wait->wait); } - add_wait_queue(&ws->wait, &sbq_wait->wait); } EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c2cf2c311b7d..5813072bc589 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -311,7 +311,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, if (prv) table->nents = ++table->orig_nents; - return -ENOMEM; + return -ENOMEM; } sg_init_table(sg, alloc_size); diff --git a/lib/show_mem.c b/lib/show_mem.c index 1c26c14ffbb9..078fba98e402 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -42,3 +42,4 @@ void show_mem(unsigned int filter, nodemask_t *nodemask) printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); #endif } +EXPORT_SYMBOL(show_mem); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index ed717dd08ff3..81c69c08d1d1 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc) return true; if (stack_slabs[depot_index] == NULL) { stack_slabs[depot_index] = *prealloc; + *prealloc = NULL; } else { - stack_slabs[depot_index + 1] = *prealloc; + /* If this is the last depot slab, do not touch the next one. */ + if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) { + stack_slabs[depot_index + 1] = *prealloc; + *prealloc = NULL; + } /* * This smp_store_release pairs with smp_load_acquire() from * |next_slab_inited| above and in stack_depot_save(). */ smp_store_release(&next_slab_inited, 1); } - *prealloc = NULL; return true; } diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index dccb95af6003..706020b06617 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long res = 0; - /* - * Truncate 'max' to the user-specified limit, so that - * we only have one limit we need to check in the loop - */ - if (max > count) - max = count; - if (IS_UNALIGNED(src, dst)) goto byte_at_a_time; @@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count) unsigned long max = max_addr - src_addr; long retval; + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + kasan_check_write(dst, count); check_object_size(dst, count, false); if (user_access_begin(src, max)) { diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 6c0005d5dd5c..41670d4a5816 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -26,13 +26,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long align, res = 0; unsigned long c; - /* - * Truncate 'max' to the user-specified limit, so that - * we only have one limit we need to check in the loop - */ - if (max > count) - max = count; - /* * Do everything aligned. But that means that we * need to also expand the maximum.. @@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count) unsigned long max = max_addr - src_addr; long retval; + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + if (user_access_begin(str, max)) { retval = do_strnlen_user(str, count, max); user_access_end(); diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 49cc4d570a40..bd3d9ef7d39e 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -157,6 +157,7 @@ static noinline void __init kmalloc_oob_krealloc_more(void) if (!ptr1 || !ptr2) { pr_err("Allocation failed\n"); kfree(ptr1); + kfree(ptr2); return; } diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 7df4f7f395bf..8c7d7a8468b8 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -2,6 +2,7 @@ /* * test_xarray.c: Test the XArray API * Copyright (c) 2017-2018 Microsoft Corporation + * Copyright (c) 2019-2020 Oracle * Author: Matthew Wilcox */ @@ -11,6 +12,9 @@ static unsigned int tests_run; static unsigned int tests_passed; +static const unsigned int order_limit = + IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; + #ifndef XA_DEBUG # ifdef __KERNEL__ void xa_dump(const struct xarray *xa) { } @@ -902,28 +906,34 @@ static noinline void check_store_iter(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_find(struct xarray *xa) +static noinline void check_multi_find_1(struct xarray *xa, unsigned order) { #ifdef CONFIG_XARRAY_MULTI + unsigned long multi = 3 << order; + unsigned long next = 4 << order; unsigned long index; - xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); - XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); + xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); + XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); index = 0; XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != - xa_mk_value(12)); - XA_BUG_ON(xa, index != 12); - index = 13; + xa_mk_value(multi)); + XA_BUG_ON(xa, index != multi); + index = multi + 1; XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != - xa_mk_value(12)); - XA_BUG_ON(xa, (index < 12) || (index >= 16)); + xa_mk_value(multi)); + XA_BUG_ON(xa, (index < multi) || (index >= next)); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != - xa_mk_value(16)); - XA_BUG_ON(xa, index != 16); + xa_mk_value(next)); + XA_BUG_ON(xa, index != next); + XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); + XA_BUG_ON(xa, index != next); - xa_erase_index(xa, 12); - xa_erase_index(xa, 16); + xa_erase_index(xa, multi); + xa_erase_index(xa, next); + xa_erase_index(xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } @@ -952,6 +962,20 @@ static noinline void check_multi_find_2(struct xarray *xa) } } +static noinline void check_multi_find_3(struct xarray *xa) +{ + unsigned int order; + + for (order = 5; order < order_limit; order++) { + unsigned long index = 1UL << (order - 5); + + XA_BUG_ON(xa, !xa_empty(xa)); + xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); + XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); + xa_erase_index(xa, 0); + } +} + static noinline void check_find_1(struct xarray *xa) { unsigned long i, j, k; @@ -1046,13 +1070,35 @@ static noinline void check_find_3(struct xarray *xa) xa_destroy(xa); } +static noinline void check_find_4(struct xarray *xa) +{ + unsigned long index = 0; + void *entry; + + xa_store_index(xa, ULONG_MAX, GFP_KERNEL); + + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); + XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); + + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); + XA_BUG_ON(xa, entry); + + xa_erase_index(xa, ULONG_MAX); +} + static noinline void check_find(struct xarray *xa) { + unsigned i; + check_find_1(xa); check_find_2(xa); check_find_3(xa); - check_multi_find(xa); + check_find_4(xa); + + for (i = 2; i < 10; i++) + check_multi_find_1(xa, i); check_multi_find_2(xa); + check_multi_find_3(xa); } /* See find_swap_entry() in mm/shmem.c */ @@ -1132,6 +1178,27 @@ static noinline void check_move_tiny(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_move_max(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + xa_store_index(xa, ULONG_MAX, GFP_KERNEL); + rcu_read_lock(); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); + rcu_read_unlock(); + + xas_set(&xas, 0); + rcu_read_lock(); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); + xas_pause(&xas); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); + rcu_read_unlock(); + + xa_erase_index(xa, ULONG_MAX); + XA_BUG_ON(xa, !xa_empty(xa)); +} + static noinline void check_move_small(struct xarray *xa, unsigned long idx) { XA_STATE(xas, xa, 0); @@ -1240,6 +1307,7 @@ static noinline void check_move(struct xarray *xa) xa_destroy(xa); check_move_tiny(xa); + check_move_max(xa); for (i = 0; i < 16; i++) check_move_small(xa, 1UL << i); diff --git a/lib/ubsan.c b/lib/ubsan.c index e7d31735950d..f007a406f89c 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -140,25 +140,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, } } -static DEFINE_SPINLOCK(report_lock); - -static void ubsan_prologue(struct source_location *location, - unsigned long *flags) +static void ubsan_prologue(struct source_location *location) { current->in_ubsan++; - spin_lock_irqsave(&report_lock, *flags); pr_err("========================================" "========================================\n"); print_source_location("UBSAN: Undefined behaviour in", location); } -static void ubsan_epilogue(unsigned long *flags) +static void ubsan_epilogue(void) { dump_stack(); pr_err("========================================" "========================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + current->in_ubsan--; } @@ -167,14 +163,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, { struct type_descriptor *type = data->type; - unsigned long flags; char lhs_val_str[VALUE_LENGTH]; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); @@ -186,7 +181,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, rhs_val_str, type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } void __ubsan_handle_add_overflow(struct overflow_data *data, @@ -214,20 +209,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); void __ubsan_handle_negate_overflow(struct overflow_data *data, void *old_val) { - unsigned long flags; char old_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); pr_err("negation of %s cannot be represented in type %s:\n", old_val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_negate_overflow); @@ -235,13 +229,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); void __ubsan_handle_divrem_overflow(struct overflow_data *data, void *lhs, void *rhs) { - unsigned long flags; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); @@ -251,58 +244,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, else pr_err("division by zero\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); static void handle_null_ptr_deref(struct type_mismatch_data_common *data) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s null pointer of type %s\n", type_check_kinds[data->type_check_kind], data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_misaligned_access(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s misaligned address %p for type %s\n", type_check_kinds[data->type_check_kind], (void *)ptr, data->type->type_name); pr_err("which requires %ld byte alignment\n", data->alignment); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_object_size_mismatch(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); pr_err("for an object of type %s\n", data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, @@ -351,34 +338,33 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) { - unsigned long flags; char index_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(index_str, sizeof(index_str), data->index_type, index); pr_err("index %s is out of range for type %s\n", index_str, data->array_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, void *lhs, void *rhs) { - unsigned long flags; struct type_descriptor *rhs_type = data->rhs_type; struct type_descriptor *lhs_type = data->lhs_type; char rhs_str[VALUE_LENGTH]; char lhs_str[VALUE_LENGTH]; + unsigned long ua_flags = user_access_save(); if (suppress_report(&data->location)) - return; + goto out; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); @@ -401,18 +387,18 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, lhs_str, rhs_str, lhs_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); +out: + user_access_restore(ua_flags); } EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { - unsigned long flags; - - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); pr_err("calling __builtin_unreachable()\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); panic("can't return from __builtin_unreachable()"); } EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); @@ -420,19 +406,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, void *val) { - unsigned long flags; char val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(val_str, sizeof(val_str), data->type, val); pr_err("load of value %s is not a valid value for type %s\n", val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); diff --git a/lib/xarray.c b/lib/xarray.c index 1237c213f52b..acd1fad2e862 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0+ /* * XArray implementation - * Copyright (c) 2017 Microsoft Corporation + * Copyright (c) 2017-2018 Microsoft Corporation + * Copyright (c) 2018-2020 Oracle * Author: Matthew Wilcox */ @@ -967,6 +968,7 @@ void xas_pause(struct xa_state *xas) if (xas_invalid(xas)) return; + xas->xa_node = XAS_RESTART; if (node) { unsigned int offset = xas->xa_offset; while (++offset < XA_CHUNK_SIZE) { @@ -974,10 +976,11 @@ void xas_pause(struct xa_state *xas) break; } xas->xa_index += (offset - xas->xa_offset) << node->shift; + if (xas->xa_index == 0) + xas->xa_node = XAS_BOUNDS; } else { xas->xa_index++; } - xas->xa_node = XAS_RESTART; } EXPORT_SYMBOL_GPL(xas_pause); @@ -1079,13 +1082,15 @@ void *xas_find(struct xa_state *xas, unsigned long max) { void *entry; - if (xas_error(xas)) + if (xas_error(xas) || xas->xa_node == XAS_BOUNDS) return NULL; + if (xas->xa_index > max) + return set_bounds(xas); if (!xas->xa_node) { xas->xa_index = 1; return set_bounds(xas); - } else if (xas_top(xas->xa_node)) { + } else if (xas->xa_node == XAS_RESTART) { entry = xas_load(xas); if (entry || xas_not_node(xas->xa_node)) return entry; @@ -1150,6 +1155,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) if (xas_error(xas)) return NULL; + if (xas->xa_index > max) + goto max; if (!xas->xa_node) { xas->xa_index = 1; @@ -1824,6 +1831,18 @@ void *xa_find(struct xarray *xa, unsigned long *indexp, } EXPORT_SYMBOL(xa_find); +static bool xas_sibling(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + unsigned long mask; + + if (!node) + return false; + mask = (XA_CHUNK_SIZE << node->shift) - 1; + return (xas->xa_index & mask) > + ((unsigned long)xas->xa_offset << node->shift); +} + /** * xa_find_after() - Search the XArray for a present entry. * @xa: XArray. @@ -1847,21 +1866,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp, XA_STATE(xas, xa, *indexp + 1); void *entry; + if (xas.xa_index == 0) + return NULL; + rcu_read_lock(); for (;;) { if ((__force unsigned int)filter < XA_MAX_MARKS) entry = xas_find_marked(&xas, max, filter); else entry = xas_find(&xas, max); - if (xas.xa_node == XAS_BOUNDS) + + if (xas_invalid(&xas)) break; - if (xas.xa_shift) { - if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) - continue; - } else { - if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) - continue; - } + if (xas_sibling(&xas)) + continue; if (!xas_retry(&xas, entry)) break; } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index c360f6a6c844..62f05f605fb5 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -21,6 +21,7 @@ struct backing_dev_info noop_backing_dev_info = { EXPORT_SYMBOL_GPL(noop_backing_dev_info); static struct class *bdi_class; +const char *bdi_unknown_name = "(unknown)"; /* * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU diff --git a/mm/debug.c b/mm/debug.c index 0461df1207cb..6a52316af839 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -47,6 +47,7 @@ void __dump_page(struct page *page, const char *reason) struct address_space *mapping; bool page_poisoned = PagePoisoned(page); int mapcount; + char *type = ""; /* * If struct page is poisoned don't access Page*() functions as that @@ -78,9 +79,9 @@ void __dump_page(struct page *page, const char *reason) page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageKsm(page)) - pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags); + type = "ksm "; else if (PageAnon(page)) - pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags); + type = "anon "; else if (mapping) { if (mapping->host && mapping->host->i_dentry.first) { struct dentry *dentry; @@ -88,10 +89,11 @@ void __dump_page(struct page *page, const char *reason) pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry); } else pr_warn("%ps\n", mapping->a_ops); - pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags); } BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); + pr_warn("%sflags: %#lx(%pGp)\n", type, page->flags, &page->flags); + hex_only: print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, diff --git a/mm/filemap.c b/mm/filemap.c index 85b7d087eb45..5376c23c9fc1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -933,12 +933,35 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, } EXPORT_SYMBOL(add_to_page_cache_locked); +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run __set_page_locked() against it. + */ +int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + if (unlikely(vm_pagecache_limit_pages) && pagecache_over_limit() > 0) + shrink_page_cache(gfp_mask, page); + + __SetPageLocked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + __ClearPageLocked(page); + return error; +} +EXPORT_SYMBOL(add_to_page_cache); + int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { void *shadow = NULL; int ret; + if (unlikely(vm_pagecache_limit_pages) && pagecache_over_limit() > 0) + shrink_page_cache(gfp_mask, page); + __SetPageLocked(page); ret = __add_to_page_cache_locked(page, mapping, offset, gfp_mask, &shadow); @@ -2329,27 +2352,6 @@ EXPORT_SYMBOL(generic_file_read_iter); #ifdef CONFIG_MMU #define MMAP_LOTSAMISS (100) -static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, - struct file *fpin) -{ - int flags = vmf->flags; - - if (fpin) - return fpin; - - /* - * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or - * anything, so we only pin the file and drop the mmap_sem if only - * FAULT_FLAG_ALLOW_RETRY is set. - */ - if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == - FAULT_FLAG_ALLOW_RETRY) { - fpin = get_file(vmf->vma->vm_file); - up_read(&vmf->vma->vm_mm->mmap_sem); - } - return fpin; -} - /* * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem * @vmf - the vm_fault for this fault. diff --git a/mm/gup.c b/mm/gup.c index 8f236a335ae9..745b4036cdfd 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2401,7 +2401,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, unsigned long addr, len, end; int nr = 0, ret = 0; - if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM))) + if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | + FOLL_FORCE))) return -EINVAL; start = untagged_addr(start) & PAGE_MASK; diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c index 7dd602d7f8db..ad9d5b1c4473 100644 --- a/mm/gup_benchmark.c +++ b/mm/gup_benchmark.c @@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd, unsigned long i, nr_pages, addr, next; int nr; struct page **pages; + int ret = 0; if (gup->size > ULONG_MAX) return -EINVAL; @@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd, NULL); break; default: - return -1; + kvfree(pages); + ret = -EINVAL; + goto out; } if (nr <= 0) @@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd, gup->put_delta_usec = ktime_us_delta(end_time, start_time); kvfree(pages); - return 0; +out: + return ret; } static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 13cc93785006..a2d9162543a0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -46,16 +46,7 @@ * Defrag is invoked by khugepaged hugepage allocations and by page faults * for all hugepage allocations. */ -unsigned long transparent_hugepage_flags __read_mostly = -#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS - (1<mm->get_unmapped_area(filp, 0, len_pad, + ret = current->mm->get_unmapped_area(filp, addr, len_pad, off >> PAGE_SHIFT, flags); - if (IS_ERR_VALUE(addr)) + + /* + * The failure might be due to length padding. The caller will retry + * without the padding. + */ + if (IS_ERR_VALUE(ret)) return 0; - addr += (off - addr) & (size - 1); - return addr; + /* + * Do not try to align to THP boundary if allocation at the address + * hint succeeds. + */ + if (ret == addr) + return addr; + + ret += (off - ret) & (size - 1); + return ret; } unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { + unsigned long ret; loff_t off = (loff_t)pgoff << PAGE_SHIFT; - if (addr) - goto out; if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) goto out; - addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); - if (addr) - return addr; - - out: + ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); + if (ret) + return ret; +out: return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); @@ -2702,7 +2695,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) unsigned long flags; pgoff_t end; - VM_BUG_ON_PAGE(is_huge_zero_page(page), page); + VM_BUG_ON_PAGE(is_huge_zero_page(head), head); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageCompound(page), page); @@ -3030,8 +3023,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, return; flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); - pmdval = *pvmw->pmd; - pmdp_invalidate(vma, address, pvmw->pmd); + pmdval = pmdp_invalidate(vma, address, pvmw->pmd); if (pmd_dirty(pmdval)) set_page_dirty(page); entry = make_migration_entry(page, pmd_write(pmdval)); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b45a95363a84..e0afd582ca01 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -1255,7 +1256,7 @@ static inline void ClearPageHugeTemporary(struct page *page) page[2].mapping = NULL; } -void free_huge_page(struct page *page) +static void __free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the @@ -1318,6 +1319,54 @@ void free_huge_page(struct page *page) spin_unlock(&hugetlb_lock); } +/* + * As free_huge_page() can be called from a non-task context, we have + * to defer the actual freeing in a workqueue to prevent potential + * hugetlb_lock deadlock. + * + * free_hpage_workfn() locklessly retrieves the linked list of pages to + * be freed and frees them one-by-one. As the page->mapping pointer is + * going to be cleared in __free_huge_page() anyway, it is reused as the + * llist_node structure of a lockless linked list of huge pages to be freed. + */ +static LLIST_HEAD(hpage_freelist); + +static void free_hpage_workfn(struct work_struct *work) +{ + struct llist_node *node; + struct page *page; + + node = llist_del_all(&hpage_freelist); + + while (node) { + page = container_of((struct address_space **)node, + struct page, mapping); + node = node->next; + __free_huge_page(page); + } +} +static DECLARE_WORK(free_hpage_work, free_hpage_workfn); + +void free_huge_page(struct page *page) +{ + /* + * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. + */ + if (!in_task()) { + /* + * Only call schedule_work() if hpage_freelist is previously + * empty. Otherwise, schedule_work() had been called but the + * workfn hasn't retrieved the list yet. + */ + if (llist_add((struct llist_node *)&page->mapping, + &hpage_freelist)) + schedule_work(&free_hpage_work); + return; + } + + __free_huge_page(page); +} + static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) { INIT_LIST_HEAD(&page->lru); diff --git a/mm/internal.h b/mm/internal.h index 0d5f720c75ab..7dd7fbb577a9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -362,6 +362,27 @@ vma_address(struct page *page, struct vm_area_struct *vma) return max(start, vma->vm_start); } +static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, + struct file *fpin) +{ + int flags = vmf->flags; + + if (fpin) + return fpin; + + /* + * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or + * anything, so we only pin the file and drop the mmap_sem if only + * FAULT_FLAG_ALLOW_RETRY is set. + */ + if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == + FAULT_FLAG_ALLOW_RETRY) { + fpin = get_file(vmf->vma->vm_file); + up_read(&vmf->vma->vm_mm->mmap_sem); + } + return fpin; +} + #else /* !CONFIG_MMU */ static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } diff --git a/mm/maccess.c b/mm/maccess.c index d065736f6b87..2d3c3d01064c 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -18,6 +18,18 @@ probe_read_common(void *dst, const void __user *src, size_t size) return ret ? -EFAULT : 0; } +static __always_inline long +probe_write_common(void __user *dst, const void *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_to_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + /** * probe_kernel_read(): safely attempt to read from a kernel-space location * @dst: pointer to the buffer that shall take the data @@ -85,6 +97,7 @@ EXPORT_SYMBOL_GPL(probe_user_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ + long __weak probe_kernel_write(void *dst, const void *src, size_t size) __attribute__((alias("__probe_kernel_write"))); @@ -94,15 +107,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - pagefault_enable(); + ret = probe_write_common((__force void __user *)dst, src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_write); +/** + * probe_user_write(): safely attempt to write to a user-space location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_write(void __user *dst, const void *src, size_t size) + __attribute__((alias("__probe_user_write"))); + +long __probe_user_write(void __user *dst, const void *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(dst, size)) + ret = probe_write_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_write); /** * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. diff --git a/mm/madvise.c b/mm/madvise.c index 94c343b4c968..418a8076112d 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -143,7 +143,13 @@ static long madvise_behavior(struct vm_area_struct *vma, *prev = vma; if (start != vma->vm_start) { + #ifdef CONFIG_PID_NS + if (unlikely(mm->map_count >= (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count))) { + #else if (unlikely(mm->map_count >= sysctl_max_map_count)) { + #endif error = -ENOMEM; goto out; } @@ -153,7 +159,13 @@ static long madvise_behavior(struct vm_area_struct *vma, } if (end != vma->vm_end) { + #ifdef CONFIG_PID_NS + if (unlikely(mm->map_count >= (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count))) { + #else if (unlikely(mm->map_count >= sysctl_max_map_count)) { + #endif error = -ENOMEM; goto out; } @@ -335,12 +347,14 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, } page = pmd_page(orig_pmd); + + /* Do not interfere with other mappings of this page */ + if (page_mapcount(page) != 1) + goto huge_unlock; + if (next - addr != HPAGE_PMD_SIZE) { int err; - if (page_mapcount(page) != 1) - goto huge_unlock; - get_page(page); spin_unlock(ptl); lock_page(page); @@ -426,6 +440,10 @@ regular_page: continue; } + /* Do not interfere with other mappings of this page */ + if (page_mapcount(page) != 1) + continue; + VM_BUG_ON_PAGE(PageTransCompound(page), page); if (pte_young(ptent)) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 46ad252e6d6a..9db4f1ae0c9a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -418,8 +418,10 @@ int memcg_expand_shrinker_maps(int new_id) if (mem_cgroup_is_root(memcg)) continue; ret = memcg_expand_one_shrinker_map(memcg, size, old_size); - if (ret) + if (ret) { + mem_cgroup_iter_break(NULL, memcg); goto unlock; + } } unlock: if (!ret) @@ -784,6 +786,17 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) rcu_read_unlock(); } +void mod_memcg_obj_state(void *p, int idx, int val) +{ + struct mem_cgroup *memcg; + + rcu_read_lock(); + memcg = mem_cgroup_from_obj(p); + if (memcg) + mod_memcg_state(memcg, idx, val); + rcu_read_unlock(); +} + /** * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup @@ -2412,28 +2425,41 @@ static void high_work_func(struct work_struct *work) #define MEMCG_DELAY_SCALING_SHIFT 14 /* - * Scheduled by try_charge() to be executed from the userland return path - * and reclaims memory over the high limit. + * Get the number of jiffies that we should penalise a mischievous cgroup which + * is exceeding its memory.high by checking both it and its ancestors. */ -void mem_cgroup_handle_over_high(void) +static unsigned long calculate_high_delay(struct mem_cgroup *memcg, + unsigned int nr_pages) { - unsigned long usage, high, clamped_high; - unsigned long pflags; - unsigned long penalty_jiffies, overage; - unsigned int nr_pages = current->memcg_nr_pages_over_high; - struct mem_cgroup *memcg; + unsigned long penalty_jiffies; + u64 max_overage = 0; - if (likely(!nr_pages)) - return; + do { + unsigned long usage, high; + u64 overage; - memcg = get_mem_cgroup_from_mm(current->mm); - reclaim_high(memcg, nr_pages, GFP_KERNEL); - current->memcg_nr_pages_over_high = 0; + usage = page_counter_read(&memcg->memory); + high = READ_ONCE(memcg->high); + + /* + * Prevent division by 0 in overage calculation by acting as if + * it was a threshold of 1 page + */ + high = max(high, 1UL); + + overage = usage - high; + overage <<= MEMCG_DELAY_PRECISION_SHIFT; + overage = div64_u64(overage, high); + + if (overage > max_overage) + max_overage = overage; + } while ((memcg = parent_mem_cgroup(memcg)) && + !mem_cgroup_is_root(memcg)); + + if (!max_overage) + return 0; /* - * memory.high is breached and reclaim is unable to keep up. Throttle - * allocators proactively to slow down excessive growth. - * * We use overage compared to memory.high to calculate the number of * jiffies to sleep (penalty_jiffies). Ideally this value should be * fairly lenient on small overages, and increasingly harsh when the @@ -2441,24 +2467,9 @@ void mem_cgroup_handle_over_high(void) * its crazy behaviour, so we exponentially increase the delay based on * overage amount. */ - - usage = page_counter_read(&memcg->memory); - high = READ_ONCE(memcg->high); - - if (usage <= high) - goto out; - - /* - * Prevent division by 0 in overage calculation by acting as if it was a - * threshold of 1 page - */ - clamped_high = max(high, 1UL); - - overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT, - clamped_high); - - penalty_jiffies = ((u64)overage * overage * HZ) - >> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT); + penalty_jiffies = max_overage * max_overage * HZ; + penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; + penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; /* * Factor in the task's own contribution to the overage, such that four @@ -2475,7 +2486,32 @@ void mem_cgroup_handle_over_high(void) * application moving forwards and also permit diagnostics, albeit * extremely slowly. */ - penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); + return min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); +} + +/* + * Scheduled by try_charge() to be executed from the userland return path + * and reclaims memory over the high limit. + */ +void mem_cgroup_handle_over_high(void) +{ + unsigned long penalty_jiffies; + unsigned long pflags; + unsigned int nr_pages = current->memcg_nr_pages_over_high; + struct mem_cgroup *memcg; + + if (likely(!nr_pages)) + return; + + memcg = get_mem_cgroup_from_mm(current->mm); + reclaim_high(memcg, nr_pages, GFP_KERNEL); + current->memcg_nr_pages_over_high = 0; + + /* + * memory.high is breached and reclaim is unable to keep up. Throttle + * allocators proactively to slow down excessive growth. + */ + penalty_jiffies = calculate_high_delay(memcg, nr_pages); /* * Don't sleep if the amount of jiffies this memcg owes us is so low @@ -2753,6 +2789,33 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, } #ifdef CONFIG_MEMCG_KMEM +/* + * Returns a pointer to the memory cgroup to which the kernel object is charged. + * + * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), + * cgroup_mutex, etc. + */ +struct mem_cgroup *mem_cgroup_from_obj(void *p) +{ + struct page *page; + + if (mem_cgroup_disabled()) + return NULL; + + page = virt_to_head_page(p); + + /* + * Slab pages don't have page->mem_cgroup set because corresponding + * kmem caches can be reparented during the lifetime. That's why + * memcg_from_slab_page() should be used instead. + */ + if (PageSlab(page)) + return memcg_from_slab_page(page); + + /* All other pages use page->mem_cgroup */ + return page->mem_cgroup; +} + static int memcg_alloc_cache_id(void) { int id, size; @@ -3404,49 +3467,34 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } } -static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only) +static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) { - unsigned long stat[MEMCG_NR_STAT]; + unsigned long stat[MEMCG_NR_STAT] = {0}; struct mem_cgroup *mi; int node, cpu, i; - int min_idx, max_idx; - - if (slab_only) { - min_idx = NR_SLAB_RECLAIMABLE; - max_idx = NR_SLAB_UNRECLAIMABLE; - } else { - min_idx = 0; - max_idx = MEMCG_NR_STAT; - } - - for (i = min_idx; i < max_idx; i++) - stat[i] = 0; for_each_online_cpu(cpu) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < MEMCG_NR_STAT; i++) stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < MEMCG_NR_STAT; i++) atomic_long_add(stat[i], &mi->vmstats[i]); - if (!slab_only) - max_idx = NR_VM_NODE_STAT_ITEMS; - for_each_node(node) { struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; struct mem_cgroup_per_node *pi; - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) stat[i] = 0; for_each_online_cpu(cpu) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) stat[i] += per_cpu( pn->lruvec_stat_cpu->count[i], cpu); for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) atomic_long_add(stat[i], &pi->lruvec_stat[i]); } } @@ -3520,13 +3568,9 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) parent = root_mem_cgroup; /* - * Deactivate and reparent kmem_caches. Then flush percpu - * slab statistics to have precise values at the parent and - * all ancestor levels. It's required to keep slab stats - * accurate after the reparenting of kmem_caches. + * Deactivate and reparent kmem_caches. */ memcg_deactivate_kmem_caches(memcg, parent); - memcg_flush_percpu_vmstats(memcg, true); kmemcg_id = memcg->kmemcg_id; BUG_ON(kmemcg_id < 0); @@ -4168,7 +4212,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; unsigned long usage; - int i, j, size; + int i, j, size, entries; mutex_lock(&memcg->thresholds_lock); @@ -4188,14 +4232,20 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, __mem_cgroup_threshold(memcg, type == _MEMSWAP); /* Calculate new number of threshold */ - size = 0; + size = entries = 0; for (i = 0; i < thresholds->primary->size; i++) { if (thresholds->primary->entries[i].eventfd != eventfd) size++; + else + entries++; } new = thresholds->spare; + /* If no items related to eventfd have been cleared, nothing to do */ + if (!entries) + goto unlock; + /* Set thresholds array to NULL if we don't have thresholds */ if (!size) { kfree(new); @@ -4770,6 +4820,308 @@ out_kfree: return ret; } +static u64 memcg_meminfo_recursive_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return mem_cgroup_from_css(css)->meminfo_recursive; +} + +static int memcg_meminfo_recursive_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + int retval = 0; + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (memcg->meminfo_recursive == val) + return 0; + + if (val == 1 || val == 0) + memcg->meminfo_recursive = val; + else + retval = -EINVAL; + + return retval; +} + +static inline unsigned long memcg_read_stat(struct mem_cgroup *memcg, + int idx) +{ + unsigned long val = 0; + struct mem_cgroup *iter; + + for_each_mem_cgroup_tree(iter, memcg) + val += memcg_page_state(iter, idx); + + return val; +} + +static unsigned long memcg_nr_lru_pages(struct mem_cgroup *memcg, + unsigned int lru_mask) +{ + unsigned long nr = 0; + struct mem_cgroup *iter; + + for_each_mem_cgroup_tree(iter, memcg) + nr += mem_cgroup_nr_lru_pages(iter, lru_mask); + + return nr; +} + +static int mem_cgroup_meminfo_read(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + u64 mem_limit, mem_usage; + u64 mem_swap, mem_swap_usage; + unsigned long mem_cache, mem_swap_cache; + unsigned long mem_active, mem_inactive; + unsigned long mem_active_anon, mem_inactive_anon; + unsigned long mem_active_file, mem_inactive_file; + unsigned long mem_unevictable; + unsigned long mem_rss, mem_rss_huge; + unsigned long mem_file_map, mem_shmem; + + mem_limit = memcg->memory.max; + /* if limit not set, use host ram total size*/ + if (mem_limit == PAGE_COUNTER_MAX) + mem_limit = totalram_pages() * PAGE_SIZE; + else + mem_limit = mem_limit * PAGE_SIZE; + mem_usage = (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; + mem_swap = memcg->memsw.max; + + if (mem_swap == PAGE_COUNTER_MAX) + mem_swap = total_swap_pages * PAGE_SIZE; + else + mem_swap = mem_swap * PAGE_SIZE; + mem_swap_usage = (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE - mem_usage; + + if (!memcg->meminfo_recursive) { + mem_cache = memcg_page_state(memcg, MEMCG_CACHE); + mem_swap_cache = memcg_page_state(memcg, MEMCG_SWAP); + mem_active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)) + + mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); + mem_inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)) + + mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); + mem_active_anon = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); + mem_inactive_anon = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); + mem_active_file = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); + mem_inactive_file = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); + mem_unevictable = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); + mem_rss = memcg_page_state(memcg, MEMCG_RSS); + mem_rss_huge = memcg_page_state(memcg, MEMCG_RSS_HUGE) * HPAGE_PMD_NR; + mem_file_map = memcg_page_state(memcg, NR_FILE_MAPPED); + mem_shmem = 0; + } else { + mem_cache = memcg_read_stat(memcg, MEMCG_CACHE); + mem_swap_cache = memcg_read_stat(memcg, MEMCG_SWAP); + mem_active = memcg_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)) + + memcg_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); + mem_inactive = memcg_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)) + + memcg_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); + mem_active_anon = memcg_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); + mem_inactive_anon = memcg_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); + mem_active_file = memcg_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); + mem_inactive_file = memcg_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); + mem_unevictable = memcg_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); + mem_rss = memcg_read_stat(memcg, MEMCG_RSS); + mem_rss_huge = memcg_read_stat(memcg, MEMCG_RSS_HUGE) * HPAGE_PMD_NR; + mem_file_map = memcg_read_stat(memcg, NR_FILE_MAPPED); + mem_shmem = memcg_read_stat(memcg, NR_SHMEM); + } + + /* + * Tagged format, for easy grepping and expansion. + */ + seq_printf(m, + "MemTotal: %8lu kB\n" + "MemFree: %8lu kB\n" + "Buffers: %8lu kB\n" + "Cached: %8lu kB\n" + "SwapCached: %8lu kB\n" + "Active: %8lu kB\n" + "Inactive: %8lu kB\n" + "Active(anon): %8lu kB\n" + "Inactive(anon): %8lu kB\n" + "Active(file): %8lu kB\n" + "Inactive(file): %8lu kB\n" + "Unevictable: %8lu kB\n" + "Mlocked: %8lu kB\n" +#ifdef CONFIG_HIGHMEM + "HighTotal: %8lu kB\n" + "HighFree: %8lu kB\n" + "LowTotal: %8lu kB\n" + "LowFree: %8lu kB\n" +#endif +#ifndef CONFIG_MMU + "MmapCopy: %8lu kB\n" +#endif + "SwapTotal: %8lu kB\n" + "SwapFree: %8lu kB\n" + "Dirty: %8lu kB\n" + "Writeback: %8lu kB\n" + "AnonPages: %8lu kB\n" + "Mapped: %8lu kB\n" + "Shmem: %8lu kB\n" + "Slab: %8lu kB\n" + "SReclaimable: %8lu kB\n" + "SUnreclaim: %8lu kB\n" + "KernelStack: %8lu kB\n" + "PageTables: %8lu kB\n" +#ifdef CONFIG_QUICKLIST + "Quicklists: %8lu kB\n" +#endif + "NFS_Unstable: %8lu kB\n" + "Bounce: %8lu kB\n" + "WritebackTmp: %8lu kB\n" + "CommitLimit: %8lu kB\n" + "Committed_AS: %8lu kB\n" + "VmallocTotal: %8lu kB\n" + "VmallocUsed: %8lu kB\n" + "VmallocChunk: %8lu kB\n" +#ifdef CONFIG_MEMORY_FAILURE + "HardwareCorrupted: %5lu kB\n" +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + "AnonHugePages: %8lu kB\n" +#endif + , + (unsigned long)(mem_limit / 1024), + (unsigned long)((mem_limit - mem_usage) / 1024), + (unsigned long)0, + K(mem_cache), + K(mem_swap_cache), + K(mem_active),//K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]), + K(mem_inactive),//K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]), + K(mem_active_anon),//K(pages[LRU_ACTIVE_ANON]), + K(mem_inactive_anon),//K(pages[LRU_INACTIVE_ANON]), + K(mem_active_file),//K(pages[LRU_ACTIVE_FILE]), + K(mem_inactive_file),//K(pages[LRU_INACTIVE_FILE]), + K(mem_unevictable),//K(pages[LRU_UNEVICTABLE]), + (unsigned long)0,//K(global_page_state(NR_MLOCK)), +#ifdef CONFIG_HIGHMEM + (unsigned long)0,//K(i.totalhigh), + (unsigned long)0,//K(i.freehigh), + (unsigned long)0,//K(i.totalram-i.totalhigh), + (unsigned long)0,//K(i.freeram-i.freehigh), +#endif +#ifndef CONFIG_MMU + (unsigned long)0,//K((unsigned long) atomic_long_read(&mmap_pages_allocated)), +#endif + (unsigned long)(mem_swap / 1024), + (unsigned long)((mem_swap - mem_swap_usage) / 1024), + (unsigned long)0,//K(global_page_state(NR_FILE_DIRTY)), + (unsigned long)0,//K(global_page_state(NR_WRITEBACK)), +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + K(mem_rss + mem_rss_huge), + //K(global_page_state(NR_ANON_PAGES) + // + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * + //HPAGE_PMD_NR), +#else + K(mem_rss), //K(global_page_state(NR_ANON_PAGES)), +#endif + K(mem_file_map),//K(global_page_state(NR_FILE_MAPPED)), + K(mem_shmem), //K(global_page_state(NR_SHMEM)), + (unsigned long)0, //K(global_page_state(NR_SLAB_RECLAIMABLE) + + //global_page_state(NR_SLAB_UNRECLAIMABLE)), + (unsigned long)0, //K(global_page_state(NR_SLAB_RECLAIMABLE)), + (unsigned long)0, //K(global_page_state(NR_SLAB_UNRECLAIMABLE)), + (unsigned long)0, //global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, + (unsigned long)0, //K(global_page_state(NR_PAGETABLE)), +#ifdef CONFIG_QUICKLIST + (unsigned long)0, //K(quicklist_total_size()), +#endif + (unsigned long)0, //K(global_page_state(NR_UNSTABLE_NFS)), + (unsigned long)0, //K(global_page_state(NR_BOUNCE)), + (unsigned long)0, //K(global_page_state(NR_WRITEBACK_TEMP)), + (unsigned long)0, //K(vm_commit_limit()), + (unsigned long)0, //K(committed), + (unsigned long)0, //(unsigned long)VMALLOC_TOTAL >> 10, + (unsigned long)0, //vmi.used >> 10, + (unsigned long)0, //vmi.largest_chunk >> 10 +#ifdef CONFIG_MEMORY_FAILURE + (unsigned long)0, //,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10) +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + K(mem_rss_huge) +#endif + ); + + //hugetlb_report_meminfo(m); + + //arch_report_meminfo(m); + + return 0; +} + +#define NR_VM_WRITEBACK_STAT_ITEMS 2 +extern const char * const vmstat_text[]; +extern unsigned int vmstat_text_size; + +static int mem_cgroup_vmstat_read(struct seq_file *m, void *vv) +{ + unsigned long *v1, *v; + int i, stat_items_size; + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + u64 mem_limit, mem_usage; + + mem_limit = memcg->memory.max; + if (mem_limit == PAGE_COUNTER_MAX) + mem_limit = totalram_pages() *PAGE_SIZE; + else + mem_limit = mem_limit * PAGE_SIZE; + mem_usage = (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; + + stat_items_size = vmstat_text_size * sizeof(unsigned long); + +#ifdef CONFIG_VM_EVENT_COUNTERS + stat_items_size += sizeof(struct vm_event_state); +#endif + + v1 = v = kzalloc(stat_items_size, GFP_KERNEL); + if (!v) + return -ENOMEM; + + v[NR_FREE_PAGES] = (mem_limit - mem_usage) >> PAGE_SHIFT; + v[NR_ZONE_INACTIVE_ANON] = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); + v[NR_ZONE_ACTIVE_ANON] = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); + v[NR_ZONE_INACTIVE_FILE] = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); + v[NR_ZONE_ACTIVE_FILE] = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); + v[NR_ZONE_UNEVICTABLE] = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); + v[NR_MLOCK] = 0; +#if 0 + v[NR_ANON_PAGES] = v[NR_INACTIVE_ANON] + v[NR_ACTIVE_ANON]; + v[NR_FILE_MAPPED] = memcg_page_state(memcg, memcg1_stats[4]); + v[NR_FILE_PAGES] = memcg_page_state(memcg, memcg1_stats[0]); + v[NR_FILE_DIRTY] = 0; + v[NR_WRITEBACK] = 0; + v[NR_SLAB_RECLAIMABLE] = 0; + v[NR_SLAB_UNRECLAIMABLE] = 0; + v[NR_PAGETABLE] = 0; + v[NR_KERNEL_STACK] = 0; + v[NR_UNSTABLE_NFS] = 0; +#endif + + v += NR_VM_ZONE_STAT_ITEMS; +#if IS_ENABLED(CONFIG_ZSMALLOC) + v += 1; +#endif + v += NR_VM_NUMA_STAT_ITEMS; + v += NR_VM_NODE_STAT_ITEMS; + v += NR_VM_WRITEBACK_STAT_ITEMS; + +#ifdef CONFIG_VM_EVENT_COUNTERS + //all_vm_events(v); + v[PGPGIN] = memcg_events_local(memcg, memcg1_events[0]) * (PAGE_SIZE / 1024); /* sectors -> kbytes */ + v[PGPGOUT] = memcg_events_local(memcg, memcg1_events[1]) * (PAGE_SIZE / 1024); +#endif + for ( i = 0; i < vmstat_text_size; i++ ) + { + seq_printf(m, "%s %lu\n", vmstat_text[i], v1[i]); + } + kfree(v1); + return 0; +} + static struct cftype mem_cgroup_legacy_files[] = { { .name = "usage_in_bytes", @@ -4843,6 +5195,19 @@ static struct cftype mem_cgroup_legacy_files[] = { .seq_show = memcg_numa_stat_show, }, #endif + { + .name = "meminfo", + .seq_show = mem_cgroup_meminfo_read, + }, + { + .name = "meminfo_recursive", + .write_u64 = memcg_meminfo_recursive_write, + .read_u64 = memcg_meminfo_recursive_read, + }, + { + .name = "vmstat", + .seq_show = mem_cgroup_vmstat_read, + }, { .name = "kmem.limit_in_bytes", .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), @@ -5037,7 +5402,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg) * Flush percpu vmstats and vmevents to guarantee the value correctness * on parent's and all ancestor levels. */ - memcg_flush_percpu_vmstats(memcg, false); + memcg_flush_percpu_vmstats(memcg); memcg_flush_percpu_vmevents(memcg); __mem_cgroup_free(memcg); } @@ -5484,14 +5849,6 @@ static int mem_cgroup_move_account(struct page *page, __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (compound && !list_empty(page_deferred_list(page))) { - spin_lock(&from->deferred_split_queue.split_queue_lock); - list_del_init(page_deferred_list(page)); - from->deferred_split_queue.split_queue_len--; - spin_unlock(&from->deferred_split_queue.split_queue_lock); - } -#endif /* * It is safe to change page->mem_cgroup here because the page * is referenced, charged, and isolated - we can't race with @@ -5501,16 +5858,6 @@ static int mem_cgroup_move_account(struct page *page, /* caller should have done css_get */ page->mem_cgroup = to; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (compound && list_empty(page_deferred_list(page))) { - spin_lock(&to->deferred_split_queue.split_queue_lock); - list_add_tail(page_deferred_list(page), - &to->deferred_split_queue.split_queue); - to->deferred_split_queue.split_queue_len++; - spin_unlock(&to->deferred_split_queue.split_queue_lock); - } -#endif - spin_unlock_irqrestore(&from->move_lock, flags); ret = 0; @@ -6827,19 +7174,9 @@ void mem_cgroup_sk_alloc(struct sock *sk) if (!mem_cgroup_sockets_enabled) return; - /* - * Socket cloning can throw us here with sk_memcg already - * filled. It won't however, necessarily happen from - * process context. So the test for root memcg given - * the current task's memcg won't help us in this case. - * - * Respecting the original socket's memcg is a better - * decision in this case. - */ - if (sk->sk_memcg) { - css_get(&sk->sk_memcg->css); + /* Do not associate the sock with unrelated interrupted task's memcg. */ + if (in_interrupt()) return; - } rcu_read_lock(); memcg = mem_cgroup_from_task(current); diff --git a/mm/memory.c b/mm/memory.c index b1ca51a079f2..cb7c940cf800 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2227,10 +2227,11 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) * * The function expects the page to be locked and unlocks it. */ -static void fault_dirty_shared_page(struct vm_area_struct *vma, - struct page *page) +static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct address_space *mapping; + struct page *page = vmf->page; bool dirtied; bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; @@ -2245,16 +2246,30 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma, mapping = page_rmapping(page); unlock_page(page); - if ((dirtied || page_mkwrite) && mapping) { - /* - * Some device drivers do not set page.mapping - * but still dirty their pages - */ - balance_dirty_pages_ratelimited(mapping); - } - if (!page_mkwrite) file_update_time(vma->vm_file); + + /* + * Throttle page dirtying rate down to writeback speed. + * + * mapping may be NULL here because some device drivers do not + * set page.mapping but still dirty their pages + * + * Drop the mmap_sem before waiting on IO, if we can. The file + * is pinning the mapping, as per above. + */ + if ((dirtied || page_mkwrite) && mapping) { + struct file *fpin; + + fpin = maybe_unlock_mmap_for_io(vmf, NULL); + balance_dirty_pages_ratelimited(mapping); + if (fpin) { + fput(fpin); + return VM_FAULT_RETRY; + } + } + + return 0; } /* @@ -2497,6 +2512,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret = VM_FAULT_WRITE; get_page(vmf->page); @@ -2520,10 +2536,10 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) wp_page_reuse(vmf); lock_page(vmf->page); } - fault_dirty_shared_page(vma, vmf->page); + ret |= fault_dirty_shared_page(vmf); put_page(vmf->page); - return VM_FAULT_WRITE; + return ret; } /* @@ -3567,7 +3583,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) return ret; } - fault_dirty_shared_page(vma, vmf->page); + ret |= fault_dirty_shared_page(vmf); return ret; } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f307bd82d750..c054945a9a74 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -465,8 +465,9 @@ static void update_pgdat_span(struct pglist_data *pgdat) pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; } -static void __remove_zone(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages) +void __ref remove_pfn_range_from_zone(struct zone *zone, + unsigned long start_pfn, + unsigned long nr_pages) { struct pglist_data *pgdat = zone->zone_pgdat; unsigned long flags; @@ -481,28 +482,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn, return; #endif + clear_zone_contiguous(zone); + pgdat_resize_lock(zone->zone_pgdat, &flags); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); update_pgdat_span(pgdat); pgdat_resize_unlock(zone->zone_pgdat, &flags); + + set_zone_contiguous(zone); } -static void __remove_section(struct zone *zone, unsigned long pfn, - unsigned long nr_pages, unsigned long map_offset, - struct vmem_altmap *altmap) +static void __remove_section(unsigned long pfn, unsigned long nr_pages, + unsigned long map_offset, + struct vmem_altmap *altmap) { struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn)); if (WARN_ON_ONCE(!valid_section(ms))) return; - __remove_zone(zone, pfn, nr_pages); sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); } /** - * __remove_pages() - remove sections of pages from a zone - * @zone: zone from which pages need to be removed + * __remove_pages() - remove sections of pages * @pfn: starting pageframe (must be aligned to start of a section) * @nr_pages: number of pages to remove (must be multiple of section size) * @altmap: alternative device page map or %NULL if default memmap is used @@ -512,16 +515,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn, * sure that pages are marked reserved and zones are adjust properly by * calling offline_pages(). */ -void __remove_pages(struct zone *zone, unsigned long pfn, - unsigned long nr_pages, struct vmem_altmap *altmap) +void __remove_pages(unsigned long pfn, unsigned long nr_pages, + struct vmem_altmap *altmap) { unsigned long map_offset = 0; unsigned long nr, start_sec, end_sec; map_offset = vmem_altmap_offset(altmap); - clear_zone_contiguous(zone); - if (check_pfn_span(pfn, nr_pages, "remove")) return; @@ -533,13 +534,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn, cond_resched(); pfns = min(nr_pages, PAGES_PER_SECTION - (pfn & ~PAGE_SECTION_MASK)); - __remove_section(zone, pfn, pfns, map_offset, altmap); + __remove_section(pfn, pfns, map_offset, altmap); pfn += pfns; nr_pages -= pfns; map_offset = 0; } - - set_zone_contiguous(zone); } int set_online_page_callback(online_page_callback_t callback) @@ -599,7 +598,13 @@ EXPORT_SYMBOL_GPL(__online_page_free); static void generic_online_page(struct page *page, unsigned int order) { - kernel_map_pages(page, 1 << order, 1); + /* + * Freeing the page with debug_pagealloc enabled will try to unmap it, + * so we should map it first. This is better than introducing a special + * case in page freeing fast path. + */ + if (debug_pagealloc_enabled_static()) + kernel_map_pages(page, 1 << order, 1); __free_pages_core(page, order); totalram_pages_add(1UL << order); #ifdef CONFIG_HIGHMEM @@ -867,6 +872,7 @@ failed_addition: (unsigned long long) pfn << PAGE_SHIFT, (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); memory_notify(MEM_CANCEL_ONLINE, &arg); + remove_pfn_range_from_zone(zone, pfn, nr_pages); mem_hotplug_done(); return ret; } @@ -1602,6 +1608,7 @@ static int __ref __offline_pages(unsigned long start_pfn, writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); + remove_pfn_range_from_zone(zone, start_pfn, nr_pages); mem_hotplug_done(); return 0; @@ -1737,8 +1744,6 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) BUG_ON(check_hotplug_memory_range(start, size)); - mem_hotplug_begin(); - /* * All memory blocks must be offlined before removing memory. Check * whether all memory blocks in question are offline and return error @@ -1753,9 +1758,14 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) memblock_free(start, size); memblock_remove(start, size); - /* remove memory block devices before removing memory */ + /* + * Memory block device removal under the device_hotplug_lock is + * a barrier against racing online attempts. + */ remove_memory_block_devices(start, size); + mem_hotplug_begin(); + arch_remove_memory(nid, start, size, NULL); __release_memory_resource(start, size); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e08c94170ae4..787c5fc91b21 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2802,6 +2802,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) char *flags = strchr(str, '='); int err = 1, mode; + if (flags) + *flags++ = '\0'; /* terminate mode string */ + if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; @@ -2812,9 +2815,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) } else nodes_clear(nodes); - if (flags) - *flags++ = '\0'; /* terminate mode string */ - mode = match_string(policy_modes, MPOL_MAX, str); if (mode < 0) goto out; @@ -2822,7 +2822,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) switch (mode) { case MPOL_PREFERRED: /* - * Insist on a nodelist of one node only + * Insist on a nodelist of one node only, although later + * we use first_node(nodes) to grab a single node, so here + * nodelist (or nodes) cannot be empty. */ if (nodelist) { char *rest = nodelist; @@ -2830,6 +2832,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) rest++; if (*rest) goto out; + if (nodes_empty(nodes)) + goto out; } break; case MPOL_INTERLEAVE: diff --git a/mm/memremap.c b/mm/memremap.c index 03ccbdfeb697..c51c6bd2fe34 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap) mem_hotplug_begin(); if (pgmap->type == MEMORY_DEVICE_PRIVATE) { - __remove_pages(page_zone(first_page), PHYS_PFN(res->start), + __remove_pages(PHYS_PFN(res->start), PHYS_PFN(resource_size(res)), NULL); } else { arch_remove_memory(nid, res->start, resource_size(res), diff --git a/mm/migrate.c b/mm/migrate.c index 4fe45d1428c8..c4c313e47f12 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1516,9 +1516,11 @@ static int do_move_pages_to_node(struct mm_struct *mm, /* * Resolves the given address to a struct page, isolates it from the LRU and * puts it to the given pagelist. - * Returns -errno if the page cannot be found/isolated or 0 when it has been - * queued or the page doesn't need to be migrated because it is already on - * the target node + * Returns: + * errno - if the page cannot be found/isolated + * 0 - when it doesn't have to be migrated because it is already on the + * target node + * 1 - when it has been queued */ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, int node, struct list_head *pagelist, bool migrate_all) @@ -1557,7 +1559,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, if (PageHuge(page)) { if (PageHead(page)) { isolate_huge_page(page, pagelist); - err = 0; + err = 1; } } else { struct page *head; @@ -1567,7 +1569,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, if (err) goto out_putpage; - err = 0; + err = 1; list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_cache(head), @@ -1629,8 +1631,19 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, start = i; } else if (node != current_node) { err = do_move_pages_to_node(mm, &pagelist, current_node); - if (err) + if (err) { + /* + * Positive err means the number of failed + * pages to migrate. Since we are going to + * abort and return the number of non-migrated + * pages, so need to incude the rest of the + * nr_pages that have not been attempted as + * well. + */ + if (err > 0) + err += nr_pages - i - 1; goto out; + } err = store_status(status, start, current_node, i - start); if (err) goto out; @@ -1644,16 +1657,28 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, */ err = add_page_for_migration(mm, addr, current_node, &pagelist, flags & MPOL_MF_MOVE_ALL); - if (!err) + + if (!err) { + /* The page is already on the target node */ + err = store_status(status, i, current_node, 1); + if (err) + goto out_flush; continue; + } else if (err > 0) { + /* The page is successfully queued for migration */ + continue; + } err = store_status(status, i, err, 1); if (err) goto out_flush; err = do_move_pages_to_node(mm, &pagelist, current_node); - if (err) + if (err) { + if (err > 0) + err += nr_pages - i - 1; goto out; + } if (i > start) { err = store_status(status, start, current_node, i - start); if (err) @@ -1667,9 +1692,16 @@ out_flush: /* Make sure we do not overwrite the existing error */ err1 = do_move_pages_to_node(mm, &pagelist, current_node); + /* + * Don't have to report non-attempted pages here since: + * - If the above loop is done gracefully all pages have been + * attempted. + * - If the above loop is aborted it means a fatal error + * happened, should return ret. + */ if (!err1) err1 = store_status(status, start, current_node, i - start); - if (!err) + if (err >= 0) err = err1; out: return err; diff --git a/mm/mmap.c b/mm/mmap.c index a7d8c84d19b7..e8d3504be68b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm, * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and - * MAP_PRIVATE: - * r: (no) no - * w: (no) no - * x: (yes) yes */ pgprot_t protection_map[16] __ro_after_init = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, @@ -201,8 +195,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) bool downgraded = false; LIST_HEAD(uf); - brk = untagged_addr(brk); - if (down_write_killable(&mm->mmap_sem)) return -EINTR; @@ -1435,7 +1427,13 @@ unsigned long do_mmap(struct file *file, unsigned long addr, return -EOVERFLOW; /* Too many mappings? */ +#ifdef CONFIG_PID_NS + if (mm->map_count > (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count)) +#else if (mm->map_count > sysctl_max_map_count) +#endif return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure @@ -1589,8 +1587,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, struct file *file = NULL; unsigned long retval; - addr = untagged_addr(addr); - if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); @@ -2730,7 +2726,13 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { +#ifdef CONFIG_PID_NS + if (mm->map_count >= (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count)) +#else if (mm->map_count >= sysctl_max_map_count) +#endif return -ENOMEM; return __split_vma(mm, vma, addr, new_below); @@ -2788,7 +2790,14 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ +#ifdef CONFIG_PID_NS + if (end < vma->vm_end && mm->map_count >= + (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count)) +#else if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) +#endif return -ENOMEM; error = __split_vma(mm, vma, start, 0); @@ -3033,7 +3042,13 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) return -ENOMEM; +#ifdef CONFIG_PID_NS + if (mm->map_count > (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count)) +#else if (mm->map_count > sysctl_max_map_count) +#endif return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 7d70e5c78f97..7c1b8f67af7b 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -102,14 +102,14 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ */ static inline void tlb_table_invalidate(struct mmu_gather *tlb) { -#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE - /* - * Invalidate page-table caches used by hardware walkers. Then we still - * need to RCU-sched wait while freeing the pages because software - * walkers can still be in-flight. - */ - tlb_flush_mmu_tlbonly(tlb); -#endif + if (tlb_needs_table_invalidate()) { + /* + * Invalidate page-table caches used by hardware walkers. Then + * we still need to RCU-sched wait while freeing the pages + * because software walkers can still be in-flight. + */ + tlb_flush_mmu_tlbonly(tlb); + } } static void tlb_remove_table_smp_sync(void *arg) diff --git a/mm/mprotect.c b/mm/mprotect.c index 7967825f6d33..95dee88f782b 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -161,6 +161,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, return pages; } +/* + * Used when setting automatic NUMA hinting protection where it is + * critical that a numa hinting PMD is not confused with a bad PMD. + */ +static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); + + /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); +#endif + + if (pmd_none(pmdval)) + return 1; + if (pmd_trans_huge(pmdval)) + return 0; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + return 1; + } + + return 0; +} + static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) @@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, unsigned long this_pages; next = pmd_addr_end(addr, end); - if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) - && pmd_none_or_clear_bad(pmd)) + + /* + * Automatic NUMA balancing walks the tables with mmap_sem + * held for read. It's possible a parallel update to occur + * between pmd_trans_huge() and a pmd_none_or_clear_bad() + * check leading to a false positive and clearing. + * Hence, it's necessary to atomically read the PMD value + * for all the checks. + */ + if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && + pmd_none_or_clear_bad_unless_trans_huge(pmd)) goto next; /* invoke the mmu notifier if the pmd is populated */ diff --git a/mm/mremap.c b/mm/mremap.c index 1fc8a29fbe3f..be399486dc81 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -336,7 +336,13 @@ static unsigned long move_vma(struct vm_area_struct *vma, * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ +#ifdef CONFIG_PID_NS + if (mm->map_count >= (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count) - 3) +#else if (mm->map_count >= sysctl_max_map_count - 3) +#endif return -ENOMEM; /* @@ -531,7 +537,11 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, * Check whether current map count plus 2 still leads us to 4 maps below * the threshold, otherwise return -ENOMEM here to be more safe. */ +#ifdef CONFIG_PID_NS + if ((mm->map_count + 2) >= task_active_pid_ns(current)->max_map_count - 3) +#else if ((mm->map_count + 2) >= sysctl_max_map_count - 3) +#endif return -ENOMEM; ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); @@ -607,7 +617,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, LIST_HEAD(uf_unmap); addr = untagged_addr(addr); - new_addr = untagged_addr(new_addr); if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) return ret; diff --git a/mm/nommu.c b/mm/nommu.c index 99b7ec318824..8800fc689061 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -359,10 +359,14 @@ void vm_unmap_aliases(void) EXPORT_SYMBOL_GPL(vm_unmap_aliases); /* - * Implement a stub for vmalloc_sync_all() if the architecture chose not to - * have one. + * Implement a stub for vmalloc_sync_[un]mapping() if the architecture + * chose not to have one. */ -void __weak vmalloc_sync_all(void) +void __weak vmalloc_sync_mappings(void) +{ +} + +void __weak vmalloc_sync_unmappings(void) { } @@ -1388,7 +1392,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_file) return -ENOMEM; +#ifdef CONFIG_PID_NS + if (mm->map_count >= (max_map_count_isolated ? + task_active_pid_ns(current)->max_map_count : + sysctl_max_map_count)) +#else if (mm->map_count >= sysctl_max_map_count) +#endif return -ENOMEM; region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 71e3acea7817..d58c481b3df8 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message) K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES)), from_kuid(&init_user_ns, task_uid(victim)), - mm_pgtables_bytes(mm), victim->signal->oom_score_adj); + mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); task_unlock(victim); /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 50055d2e4ea8..daeae783ddf5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -88,7 +88,7 @@ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ -int vm_dirty_ratio = 20; +int vm_dirty_ratio = 40; /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of @@ -201,11 +201,11 @@ static void wb_min_max_ratio(struct bdi_writeback *wb, if (this_bw < tot_bw) { if (min) { min *= this_bw; - do_div(min, tot_bw); + min = div64_ul(min, tot_bw); } if (max < 100) { max *= this_bw; - do_div(max, tot_bw); + max = div64_ul(max, tot_bw); } } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f391c0c4ed1d..266a3fb73b70 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -694,34 +694,27 @@ void prep_compound_page(struct page *page, unsigned int order) #ifdef CONFIG_DEBUG_PAGEALLOC unsigned int _debug_guardpage_minorder; -#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT -DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); -#else +bool _debug_pagealloc_enabled_early __read_mostly + = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); +EXPORT_SYMBOL(_debug_pagealloc_enabled_early); DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); -#endif EXPORT_SYMBOL(_debug_pagealloc_enabled); DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); static int __init early_debug_pagealloc(char *buf) { - bool enable = false; - - if (kstrtobool(buf, &enable)) - return -EINVAL; - - if (enable) - static_branch_enable(&_debug_pagealloc_enabled); - - return 0; + return kstrtobool(buf, &_debug_pagealloc_enabled_early); } early_param("debug_pagealloc", early_debug_pagealloc); -static void init_debug_guardpage(void) +void init_debug_pagealloc(void) { if (!debug_pagealloc_enabled()) return; + static_branch_enable(&_debug_pagealloc_enabled); + if (!debug_guardpage_minorder()) return; @@ -1186,7 +1179,7 @@ static __always_inline bool free_pages_prepare(struct page *page, */ arch_free_page(page, order); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) kernel_map_pages(page, 1 << order, 0); kasan_free_nondeferred_pages(page, order); @@ -1207,7 +1200,7 @@ static bool free_pcp_prepare(struct page *page) static bool bulkfree_pcp_prepare(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return free_pages_check(page); else return false; @@ -1221,7 +1214,7 @@ static bool bulkfree_pcp_prepare(struct page *page) */ static bool free_pcp_prepare(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return free_pages_prepare(page, 0, true); else return free_pages_prepare(page, 0, false); @@ -1973,10 +1966,6 @@ void __init page_alloc_init_late(void) for_each_populated_zone(zone) set_zone_contiguous(zone); - -#ifdef CONFIG_DEBUG_PAGEALLOC - init_debug_guardpage(); -#endif } #ifdef CONFIG_CMA @@ -2106,7 +2095,7 @@ static inline bool free_pages_prezeroed(void) */ static inline bool check_pcp_refill(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return check_new_page(page); else return false; @@ -2128,7 +2117,7 @@ static inline bool check_pcp_refill(struct page *page) } static inline bool check_new_pcp(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return check_new_page(page); else return false; @@ -2155,7 +2144,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, set_page_refcounted(page); arch_alloc_page(page, order); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) kernel_map_pages(page, 1 << order, 1); kasan_alloc_pages(page, order); kernel_poison_pages(page, 1 << order, 1); @@ -6944,7 +6933,8 @@ static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn) * This function also addresses a similar issue where struct pages are left * uninitialized because the physical address range is not covered by * memblock.memory or memblock.reserved. That could happen when memblock - * layout is manually configured via memmap=. + * layout is manually configured via memmap=, or when the highest physical + * address (max_pfn) does not end on a section boundary. */ void __init zero_resv_unavail(void) { @@ -6962,7 +6952,16 @@ void __init zero_resv_unavail(void) pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start)); next = end; } - pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn); + + /* + * Early sections always have a fully populated memmap for the whole + * section - see pfn_valid(). If the last section has holes at the + * end and that section is marked "online", the memmap will be + * considered initialized. Make sure that memmap has a well defined + * state. + */ + pgcnt += zero_pfn_range(PFN_DOWN(next), + round_up(max_pfn, PAGES_PER_SECTION)); /* * Struct pages that do not have backing memory. This could be because @@ -8550,6 +8549,59 @@ void zone_pcp_reset(struct zone *zone) local_irq_restore(flags); } +/* Returns a number that's positive if the pagecache is above + * the set limit*/ +unsigned long pagecache_over_limit() +{ + unsigned long should_reclaim_pages = 0; + unsigned long overlimit_pages = 0; + unsigned long delta_pages = 0; + unsigned long pgcache_lru_pages = 0; + /* We only want to limit unmapped and non-shmem page cache pages; + * normally all shmem pages are mapped as well*/ + unsigned long pgcache_pages = global_node_page_state(NR_FILE_PAGES) + - max_t(unsigned long, + global_node_page_state(NR_FILE_MAPPED), + global_node_page_state(NR_SHMEM)); + /* We certainly can't free more than what's on the LRU lists + * minus the dirty ones*/ + if (vm_pagecache_ignore_slab) + pgcache_lru_pages = global_node_page_state(NR_ACTIVE_FILE) + + global_node_page_state(NR_INACTIVE_FILE); + else + pgcache_lru_pages = global_node_page_state(NR_ACTIVE_FILE) + + global_node_page_state(NR_INACTIVE_FILE) + + global_node_page_state(NR_SLAB_RECLAIMABLE) + + global_node_page_state(NR_SLAB_UNRECLAIMABLE); + + if (vm_pagecache_ignore_dirty != 0) + pgcache_lru_pages -= global_node_page_state(NR_FILE_DIRTY) + /vm_pagecache_ignore_dirty; + /* Paranoia */ + if (unlikely(pgcache_lru_pages > LONG_MAX)) + return 0; + + /* Limit it to 94% of LRU (not all there might be unmapped) */ + pgcache_lru_pages -= pgcache_lru_pages/16; + if (vm_pagecache_ignore_slab) + pgcache_pages = min_t(unsigned long, pgcache_pages, pgcache_lru_pages); + else + pgcache_pages = pgcache_lru_pages; + + /* + *delta_pages: we should reclaim at least 2% more pages than overlimit_page, values get from + * /proc/vm/pagecache_limit_reclaim_pages + *should_reclaim_pages: the real pages we will reclaim, but it should less than pgcache_pages; + */ + if (pgcache_pages > vm_pagecache_limit_pages) { + overlimit_pages = pgcache_pages - vm_pagecache_limit_pages; + delta_pages = vm_pagecache_limit_reclaim_pages - vm_pagecache_limit_pages; + should_reclaim_pages = min_t(unsigned long, delta_pages, vm_pagecache_limit_pages) + overlimit_pages; + return should_reclaim_pages; + } + return 0; +} + #ifdef CONFIG_MEMORY_HOTREMOVE /* * All pages in the range must be in a single zone and isolated diff --git a/mm/shmem.c b/mm/shmem.c index 220be9fa2c41..3de91bd95b56 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1254,6 +1254,17 @@ int shmem_unuse(unsigned int type, bool frontswap, if (list_empty(&shmem_swaplist)) return 0; + /* + * try to shrink the page cache proactively even though + * we might already have the page in so the shrinking is + * not necessary but this is much easier than dropping + * the lock in shmem_unuse_inode before add_to_page_cache_lru. + * GFP_NOWAIT makes sure that we do not shrink when adding + * to page cache + */ + if (unlikely(vm_pagecache_limit_pages) && pagecache_over_limit() > 0) + shrink_page_cache(GFP_KERNEL, NULL); + mutex_lock(&shmem_swaplist_mutex); list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { if (!info->swapped) { @@ -1284,6 +1295,9 @@ int shmem_unuse(unsigned int type, bool frontswap, } mutex_unlock(&shmem_swaplist_mutex); + if (!error && (unlikely(vm_pagecache_limit_pages) && pagecache_over_limit() > 0)) + shrink_page_cache(GFP_KERNEL, NULL); + return error; } @@ -2022,16 +2036,14 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) shmem_falloc->waitq && vmf->pgoff >= shmem_falloc->start && vmf->pgoff < shmem_falloc->next) { + struct file *fpin; wait_queue_head_t *shmem_falloc_waitq; DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); ret = VM_FAULT_NOPAGE; - if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && - !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { - /* It's polite to up mmap_sem if we can */ - up_read(&vma->vm_mm->mmap_sem); + fpin = maybe_unlock_mmap_for_io(vmf, NULL); + if (fpin) ret = VM_FAULT_RETRY; - } shmem_falloc_waitq = shmem_falloc->waitq; prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, @@ -2049,6 +2061,9 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) spin_lock(&inode->i_lock); finish_wait(shmem_falloc_waitq, &shmem_fault_wait); spin_unlock(&inode->i_lock); + + if (fpin) + fput(fpin); return ret; } spin_unlock(&inode->i_lock); @@ -2105,9 +2120,10 @@ unsigned long shmem_get_unmapped_area(struct file *file, /* * Our priority is to support MAP_SHARED mapped hugely; * and support MAP_PRIVATE mapped hugely too, until it is COWed. - * But if caller specified an address hint, respect that as before. + * But if caller specified an address hint and we allocated area there + * successfully, respect that as before. */ - if (uaddr) + if (uaddr == addr) return addr; if (shmem_huge != SHMEM_HUGE_FORCE) { @@ -2141,7 +2157,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (inflated_len < len) return addr; - inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); + inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); if (IS_ERR_VALUE(inflated_addr)) return addr; if (inflated_addr & ~PAGE_MASK) @@ -2213,11 +2229,14 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) return -EPERM; /* - * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED - * read-only mapping, take care to not allow mprotect to revert - * protections. + * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as + * MAP_SHARED and read-only, take care to not allow mprotect to + * revert protections on such mappings. Do this only for shared + * mappings. For private mappings, don't need to mask + * VM_MAYWRITE as we still want them to be COW-writable. */ - vma->vm_flags &= ~(VM_MAYWRITE); + if (vma->vm_flags & VM_SHARED) + vma->vm_flags &= ~(VM_MAYWRITE); } file_accessed(file); @@ -2742,7 +2761,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, } shmem_falloc.waitq = &shmem_falloc_waitq; - shmem_falloc.start = unmap_start >> PAGE_SHIFT; + shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; spin_lock(&inode->i_lock); inode->i_private = &shmem_falloc; diff --git a/mm/slab.c b/mm/slab.c index 66e5d8032bae..d1d7624cec4a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1415,7 +1415,7 @@ static void kmem_rcu_free(struct rcu_head *head) #if DEBUG static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) { - if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && + if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && (cachep->size % PAGE_SIZE) == 0) return true; @@ -2007,7 +2007,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) * to check size >= 256. It guarantees that all necessary small * sized slab is initialized in current slab initialization sequence. */ - if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && + if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) && size >= 256 && cachep->object_size > cache_line_size()) { if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { size_t tmp_size = ALIGN(size, PAGE_SIZE); diff --git a/mm/slab_common.c b/mm/slab_common.c index f9fb27b4c843..ade6c257d4b4 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -903,7 +903,20 @@ static void flush_memcg_workqueue(struct kmem_cache *s) * deactivates the memcg kmem_caches through workqueue. Make sure all * previous workitems on workqueue are processed. */ - flush_workqueue(memcg_kmem_cache_wq); + if (likely(memcg_kmem_cache_wq)) + flush_workqueue(memcg_kmem_cache_wq); + + /* + * If we're racing with children kmem_cache deactivation, it might + * take another rcu grace period to complete their destruction. + * At this moment the corresponding percpu_ref_kill() call should be + * done, but it might take another rcu grace period to complete + * switching to the atomic mode. + * Please, note that we check without grabbing the slab_mutex. It's safe + * because at this moment the children list can't grow. + */ + if (!list_empty(&s->memcg_params.children)) + rcu_barrier(); } #else static inline int shutdown_memcg_caches(struct kmem_cache *s) diff --git a/mm/slub.c b/mm/slub.c index e72e802fc569..af44807d5b05 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -261,7 +261,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, * freepointer to be restored incorrectly. */ return (void *)((unsigned long)ptr ^ s->random ^ - (unsigned long)kasan_reset_tag((void *)ptr_addr)); + swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); #else return ptr; #endif @@ -290,7 +290,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) unsigned long freepointer_addr; void *p; - if (!debug_pagealloc_enabled()) + if (!debug_pagealloc_enabled_static()) return get_freepointer(s, object); freepointer_addr = (unsigned long)object + s->offset; @@ -1953,8 +1953,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, if (node == NUMA_NO_NODE) searchnode = numa_mem_id(); - else if (!node_present_pages(node)) - searchnode = node_to_mem_node(node); object = get_partial_node(s, get_node(s, searchnode), c, flags); if (object || node != NUMA_NO_NODE) @@ -2543,17 +2541,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, struct page *page; page = c->page; - if (!page) + if (!page) { + /* + * if the node is not online or has no normal memory, just + * ignore the node constraint + */ + if (unlikely(node != NUMA_NO_NODE && + !node_state(node, N_NORMAL_MEMORY))) + node = NUMA_NO_NODE; goto new_slab; + } redo: if (unlikely(!node_match(page, node))) { - int searchnode = node; - - if (node != NUMA_NO_NODE && !node_present_pages(node)) - searchnode = node_to_mem_node(node); - - if (unlikely(!node_match(page, searchnode))) { + /* + * same as above but node_match() being false already + * implies node != NUMA_NO_NODE + */ + if (!node_state(node, N_NORMAL_MEMORY)) { + node = NUMA_NO_NODE; + goto redo; + } else { stat(s, ALLOC_NODE_MISMATCH); deactivate_slab(s, page, c->freelist, c); goto new_slab; @@ -2977,11 +2985,13 @@ redo: barrier(); if (likely(page == c->page)) { - set_freepointer(s, tail_obj, c->freelist); + void **freelist = READ_ONCE(c->freelist); + + set_freepointer(s, tail_obj, freelist); if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, - c->freelist, tid, + freelist, tid, head, next_tid(tid)))) { note_cmpxchg_failure("slab_free", s, tid); @@ -3154,6 +3164,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void *object = c->freelist; if (unlikely(!object)) { + /* + * We may have removed an object from c->freelist using + * the fastpath in the previous iteration; in that case, + * c->tid has not been bumped yet. + * Since ___slab_alloc() may reenable interrupts while + * allocating memory, we should bump c->tid now. + */ + c->tid = next_tid(c->tid); + /* * Invoking slow path likely have side-effect * of re-populating per CPU c->freelist diff --git a/mm/sparse.c b/mm/sparse.c index f6891c1992b1..78bbecd904c3 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -647,7 +647,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP -static struct page *populate_section_memmap(unsigned long pfn, +static struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { return __populate_section_memmap(pfn, nr_pages, nid, altmap); @@ -669,7 +669,7 @@ static void free_map_bootmem(struct page *memmap) vmemmap_free(start, end, NULL); } #else -struct page *populate_section_memmap(unsigned long pfn, +struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { struct page *page, *ret; @@ -742,6 +742,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, struct mem_section *ms = __pfn_to_section(pfn); bool section_is_early = early_section(ms); struct page *memmap = NULL; + bool empty; unsigned long *subsection_map = ms->usage ? &ms->usage->subsection_map[0] : NULL; @@ -772,21 +773,37 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified */ bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); - if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) { + empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION); + if (empty) { unsigned long section_nr = pfn_to_section_nr(pfn); - if (!section_is_early) { + /* + * When removing an early section, the usage map is kept (as the + * usage maps of other sections fall into the same page). It + * will be re-used when re-adding the section - which is then no + * longer an early section. If the usage map is PageReserved, it + * was allocated during boot. + */ + if (!PageReserved(virt_to_page(ms->usage))) { kfree(ms->usage); ms->usage = NULL; } memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); - ms->section_mem_map = sparse_encode_mem_map(NULL, section_nr); + /* + * Mark the section invalid so that valid_section() + * return false. This prevents code from dereferencing + * ms->usage array. + */ + ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; } if (section_is_early && memmap) free_map_bootmem(memmap); else depopulate_section_memmap(pfn, nr_pages, altmap); + + if (empty) + ms->section_mem_map = (unsigned long)NULL; } static struct page * __meminit section_activate(int nid, unsigned long pfn, @@ -877,7 +894,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn, * Poison uninitialized struct pages in order to catch invalid flags * combinations. */ - page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages); + page_init_poison(memmap, sizeof(struct page) * nr_pages); ms = __nr_to_section(section_nr); set_section_nid(section_nr, nid); diff --git a/mm/swapfile.c b/mm/swapfile.c index dab43523afdd..891a3ef48651 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2892,10 +2892,6 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) p->bdev = inode->i_sb->s_bdev; } - inode_lock(inode); - if (IS_SWAPFILE(inode)) - return -EBUSY; - return 0; } @@ -3150,17 +3146,22 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) mapping = swap_file->f_mapping; inode = mapping->host; - /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */ error = claim_swapfile(p, inode); if (unlikely(error)) goto bad_swap; + inode_lock(inode); + if (IS_SWAPFILE(inode)) { + error = -EBUSY; + goto bad_swap_unlock_inode; + } + /* * Read the swap header. */ if (!mapping->a_ops->readpage) { error = -EINVAL; - goto bad_swap; + goto bad_swap_unlock_inode; } page = read_mapping_page(mapping, 0, swap_file); if (IS_ERR(page)) { @@ -3172,14 +3173,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) maxpages = read_swap_header(p, swap_header, inode); if (unlikely(!maxpages)) { error = -EINVAL; - goto bad_swap; + goto bad_swap_unlock_inode; } /* OK, set up the swap map and apply the bad block list */ swap_map = vzalloc(maxpages); if (!swap_map) { error = -ENOMEM; - goto bad_swap; + goto bad_swap_unlock_inode; } if (bdi_cap_stable_pages_required(inode_to_bdi(inode))) @@ -3204,7 +3205,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) GFP_KERNEL); if (!cluster_info) { error = -ENOMEM; - goto bad_swap; + goto bad_swap_unlock_inode; } for (ci = 0; ci < nr_cluster; ci++) @@ -3213,7 +3214,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->percpu_cluster = alloc_percpu(struct percpu_cluster); if (!p->percpu_cluster) { error = -ENOMEM; - goto bad_swap; + goto bad_swap_unlock_inode; } for_each_possible_cpu(cpu) { struct percpu_cluster *cluster; @@ -3227,13 +3228,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = swap_cgroup_swapon(p->type, maxpages); if (error) - goto bad_swap; + goto bad_swap_unlock_inode; nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, cluster_info, maxpages, &span); if (unlikely(nr_extents < 0)) { error = nr_extents; - goto bad_swap; + goto bad_swap_unlock_inode; } /* frontswap enabled? set up bit-per-page map for frontswap */ if (IS_ENABLED(CONFIG_FRONTSWAP)) @@ -3273,7 +3274,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = init_swap_address_space(p->type, maxpages); if (error) - goto bad_swap; + goto bad_swap_unlock_inode; /* * Flush any pending IO and dirty mappings before we start using this @@ -3283,7 +3284,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = inode_drain_writes(inode); if (error) { inode->i_flags &= ~S_SWAPFILE; - goto bad_swap; + goto bad_swap_unlock_inode; } mutex_lock(&swapon_mutex); @@ -3308,6 +3309,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = 0; goto out; +bad_swap_unlock_inode: + inode_unlock(inode); bad_swap: free_percpu(p->percpu_cluster); p->percpu_cluster = NULL; @@ -3315,6 +3318,7 @@ bad_swap: set_blocksize(p->bdev, p->old_block_size); blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); } + inode = NULL; destroy_swap_extents(p); swap_cgroup_swapoff(p->type); spin_lock(&swap_lock); @@ -3326,13 +3330,8 @@ bad_swap: kvfree(frontswap_map); if (inced_nr_rotate_swap) atomic_dec(&nr_rotate_swap); - if (swap_file) { - if (inode) { - inode_unlock(inode); - inode = NULL; - } + if (swap_file) filp_close(swap_file, NULL); - } out: if (page && !IS_ERR(page)) { kunmap(page); diff --git a/mm/util.c b/mm/util.c index 3ad6db9a722e..38a90a8f96f6 100644 --- a/mm/util.c +++ b/mm/util.c @@ -703,6 +703,7 @@ EXPORT_SYMBOL_GPL(__page_mapcount); int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; unsigned long sysctl_overcommit_kbytes __read_mostly; +int max_map_count_isolated = 1; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a3c70e275f4e..7d05834e594c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1259,7 +1259,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) * First make sure the mappings are removed from all page-tables * before they are freed. */ - vmalloc_sync_all(); + vmalloc_sync_unmappings(); /* * TODO: to calculate a flush range without looping. @@ -1349,7 +1349,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); unmap_vmap_area(va); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(va->va_start, va->va_end); free_vmap_area_noflush(va); @@ -1647,7 +1647,7 @@ static void vb_free(const void *addr, unsigned long size) vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range((unsigned long)addr, (unsigned long)addr + size); @@ -3050,16 +3050,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, EXPORT_SYMBOL(remap_vmalloc_range); /* - * Implement a stub for vmalloc_sync_all() if the architecture chose not to - * have one. + * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose + * not to have one. * * The purpose of this function is to make sure the vmalloc area * mappings are identical in all page-tables in the system. */ -void __weak vmalloc_sync_all(void) +void __weak vmalloc_sync_mappings(void) { } +void __weak vmalloc_sync_unmappings(void) +{ +} static int f(pte_t *pte, unsigned long addr, void *data) { diff --git a/mm/vmscan.c b/mm/vmscan.c index ee4eecc7e1c2..d7ab98cd98ab 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -165,6 +165,15 @@ struct scan_control { * From 0 .. 100. Higher means more swappy. */ int vm_swappiness = 60; +unsigned long vm_pagecache_limit_pages __read_mostly = 0; +unsigned long vm_pagecache_limit_reclaim_pages = 0; +int vm_pagecache_limit_ratio __read_mostly = 0; +int vm_pagecache_limit_reclaim_ratio __read_mostly = 0; +unsigned int vm_pagecache_ignore_dirty __read_mostly = 1; +unsigned int vm_pagecache_limit_async __read_mostly = 0; +unsigned int vm_pagecache_ignore_slab __read_mostly = 1; +static struct task_struct *kpclimitd = NULL; +static bool kpclimitd_context = false; /* * The total number of pages which are beyond the high watermark within all * zones. @@ -422,7 +431,7 @@ void register_shrinker_prepared(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG if (shrinker->flags & SHRINKER_MEMCG_AWARE) idr_replace(&shrinker_idr, shrinker, shrinker->id); #endif @@ -2530,10 +2539,13 @@ out: /* * Scan types proportional to swappiness and * their relative recent reclaim efficiency. - * Make sure we don't miss the last page - * because of a round-off error. + * Make sure we don't miss the last page on + * the offlined memory cgroups because of a + * round-off error. */ - scan = DIV64_U64_ROUND_UP(scan * fraction[file], + scan = mem_cgroup_online(memcg) ? + div64_u64(scan * fraction[file], denominator) : + DIV64_U64_ROUND_UP(scan * fraction[file], denominator); break; case SCAN_FILE: @@ -3559,6 +3571,8 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, return sc->nr_scanned >= sc->nr_to_reclaim; } +static void __shrink_page_cache(gfp_t mask); + /* * For kswapd, balance_pgdat() will reclaim pages across a node from zones * that are eligible for use by the caller until at least one zone is @@ -3594,6 +3608,10 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) count_vm_event(PAGEOUTRUN); + /* this reclaims from all zones so don't count to sc.nr_reclaimed */ + if (unlikely(vm_pagecache_limit_pages) && pagecache_over_limit() > 0) + __shrink_page_cache(GFP_KERNEL); + /* * Account for the reclaim boost. Note that the zone boost is left in * place so that parallel allocations that are near the watermark will @@ -3844,6 +3862,12 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); } + /* We do not need to loop_again if we have not achieved our + * pagecache target (i.e. && pagecache_over_limit(0) > 0) because + * the limit will be checked next time a page is added to the page + * cache. This might cause a short stall but we should rather not + * keep kswapd awake. + */ /* * After a short sleep, check if it was a premature sleep. If not, then * go fully to sleep until explicitly woken up. @@ -4012,6 +4036,27 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, wake_up_interruptible(&pgdat->kswapd_wait); } +/* + * * The reclaimable count would be mostly accurate. + * * The less reclaimable pages may be + * * - mlocked pages, which will be moved to unevictable list when encountered + * * - mapped pages, which may require several travels to be reclaimed + * * - dirty pages, which is not "instantly" reclaimable + * * */ + +static unsigned long global_reclaimable_pages(void) +{ + int nr; + + nr = global_node_page_state(NR_ACTIVE_FILE) + + global_node_page_state(NR_INACTIVE_FILE); + + if (get_nr_swap_pages() > 0) + nr += global_node_page_state(NR_ACTIVE_ANON) + + global_node_page_state(NR_INACTIVE_ANON); + return nr; +} + #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of @@ -4051,6 +4096,326 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) } #endif /* CONFIG_HIBERNATION */ +/* + * Returns non-zero if the lock has been acquired, false if somebody + * else is holding the lock. + */ +static int pagecache_reclaim_lock_zone(struct zone *zone) +{ + return atomic_add_unless(&zone->pagecache_reclaim, 1, 1); +} + +static void pagecache_reclaim_unlock_zone(struct zone *zone) +{ + BUG_ON(atomic_dec_return(&zone->pagecache_reclaim)); +} + +/* + * Potential page cache reclaimers who are not able to take + * reclaim lock on any zone are sleeping on this waitqueue. + * So this is basically a congestion wait queue for them. + */ +DECLARE_WAIT_QUEUE_HEAD(pagecache_reclaim_wq); +DECLARE_WAIT_QUEUE_HEAD(kpagecache_limitd_wq); + +/* + * Similar to shrink_zone but it has a different consumer - pagecache limit + * so we cannot reuse the original function - and we do not want to clobber + * that code path so we have to live with this code duplication. + * + * In short this simply scans through the given lru for all cgroups for the + * give zone. + * + * returns true if we managed to cumulatively reclaim (via nr_reclaimed) + * the given nr_to_reclaim pages, false otherwise. The caller knows that + * it doesn't have to touch other zones if the target was hit already. + * + * DO NOT USE OUTSIDE of shrink_all_zones unless you have a really really + * really good reason. + */ +static bool shrink_zone_per_memcg(struct zone *zone, enum lru_list lru, + unsigned long nr_to_scan, unsigned long nr_to_reclaim, + unsigned long *nr_reclaimed, struct scan_control *sc) +{ + struct mem_cgroup *root = sc->target_mem_cgroup; + struct mem_cgroup *memcg; + struct mem_cgroup_reclaim_cookie reclaim = { + .pgdat = zone->zone_pgdat, + .priority = sc->priority, + }; + + memcg = mem_cgroup_iter(root, NULL, &reclaim); + do { + struct lruvec *lruvec; + + lruvec = mem_cgroup_lruvec(zone->zone_pgdat, memcg); + *nr_reclaimed += shrink_list(lru, nr_to_scan, lruvec, sc); + if (*nr_reclaimed >= nr_to_reclaim) { + mem_cgroup_iter_break(root, memcg); + return true; + } + memcg = mem_cgroup_iter(root, memcg, &reclaim); + } while (memcg); + + return false; +} + +/* + * Tries to reclaim 'nr_pages' pages from LRU lists system-wide, for given + * pass. + * + * For pass > 3 we also try to shrink the LRU lists that contain a few pages + * + * Returns the number of scanned zones. + */ +static int shrink_all_zones(unsigned long nr_pages, int pass, + struct scan_control *sc) +{ + struct zone *zone; + unsigned long nr_reclaimed = 0; + unsigned int nr_locked_zones = 0; + DEFINE_WAIT(wait); + + prepare_to_wait(&pagecache_reclaim_wq, &wait, TASK_INTERRUPTIBLE); + + for_each_populated_zone(zone) { + enum lru_list lru; + + /* + * Back off if somebody is already reclaiming this zone + * for the pagecache reclaim. + */ + if (!pagecache_reclaim_lock_zone(zone)) + continue; + + + /* + * This reclaimer might scan a zone so it will never + * sleep on pagecache_reclaim_wq + */ + finish_wait(&pagecache_reclaim_wq, &wait); + nr_locked_zones++; + + for_each_evictable_lru(lru) { + enum zone_stat_item ls = NR_ZONE_LRU_BASE + lru; + unsigned long lru_pages = zone_page_state(zone, ls); + + /* For pass = 0, we don't shrink the active list */ + if (pass == 0 && (lru == LRU_ACTIVE_ANON || + lru == LRU_ACTIVE_FILE)) + continue; + + /* Original code relied on nr_saved_scan which is no + * longer present so we are just considering LRU pages. + * This means that the zone has to have quite large + * LRU list for default priority and minimum nr_pages + * size (8*SWAP_CLUSTER_MAX). In the end we will tend + * to reclaim more from large zones wrt. small. + * This should be OK because shrink_page_cache is called + * when we are getting to short memory condition so + * LRUs tend to be large. + */ + if (((lru_pages >> sc->priority) + 1) >= nr_pages || pass >= 3) { + unsigned long nr_to_scan; + + nr_to_scan = min(nr_pages, lru_pages); + + /* + * A bit of a hack but the code has always been + * updating sc->nr_reclaimed once per shrink_all_zones + * rather than accumulating it for all calls to shrink + * lru. This costs us an additional argument to + * shrink_zone_per_memcg but well... + * + * Let's stick with this for bug-to-bug compatibility + */ + while (nr_to_scan > 0) { + /* shrink_list takes lru_lock with IRQ off so we + * should be careful about really huge nr_to_scan + */ + unsigned long batch = min_t(unsigned long, nr_to_scan, SWAP_CLUSTER_MAX); + + if (shrink_zone_per_memcg(zone, lru, + batch, nr_pages, &nr_reclaimed, sc)) { + pagecache_reclaim_unlock_zone(zone); + goto out_wakeup; + } + nr_to_scan -= batch; + } + } + } + pagecache_reclaim_unlock_zone(zone); + } + + /* + * We have to go to sleep because all the zones are already reclaimed. + * One of the reclaimer will wake us up or __shrink_page_cache will + * do it if there is nothing to be done. + */ + if (!nr_locked_zones) { + if (!kpclimitd_context) + schedule(); + finish_wait(&pagecache_reclaim_wq, &wait); + goto out; + } + +out_wakeup: + wake_up_interruptible(&pagecache_reclaim_wq); + sc->nr_reclaimed += nr_reclaimed; +out: + return nr_locked_zones; +} + +/* + * Function to shrink the page cache + * + * This function calculates the number of pages (nr_pages) the page + * cache is over its limit and shrinks the page cache accordingly. + * + * The maximum number of pages, the page cache shrinks in one call of + * this function is limited to SWAP_CLUSTER_MAX pages. Therefore it may + * require a number of calls to actually reach the vm_pagecache_limit_kb. + * + * This function is similar to shrink_all_memory, except that it may never + * swap out mapped pages and only does four passes. + */ +static void __shrink_page_cache(gfp_t mask) +{ + unsigned long ret = 0; + int pass = 0; + struct reclaim_state reclaim_state; + struct scan_control sc = { + .gfp_mask = mask, + .may_swap = 0, + .priority = DEF_PRIORITY, + .may_unmap = 0, + .may_writepage = 0, + .target_mem_cgroup = NULL, + .reclaim_idx = MAX_NR_ZONES, + }; + struct reclaim_state *old_rs = current->reclaim_state; + long nr_pages; + + /* We might sleep during direct reclaim so make atomic context + * is certainly a bug. + */ + BUG_ON(!(mask & __GFP_RECLAIM)); + +retry: + /* How many pages are we over the limit?*/ + nr_pages = pagecache_over_limit(); + + /* + * Return early if there's no work to do. + * Wake up reclaimers that couldn't scan any zone due to congestion. + * There is apparently nothing to do so they do not have to sleep. + * This makes sure that no sleeping reclaimer will stay behind. + * Allow breaching the limit if the task is on the way out. + */ + if (nr_pages <= 0 || fatal_signal_pending(current)) { + wake_up_interruptible(&pagecache_reclaim_wq); + goto out; + } + + /* But do a few at least */ + nr_pages = max_t(unsigned long, nr_pages, 8*SWAP_CLUSTER_MAX); + + current->reclaim_state = &reclaim_state; + + /* + * Shrink the LRU in 4 passes: + * 0 = Reclaim from inactive_list only (fast) + * 1 = Reclaim from active list but don't reclaim mapped and dirtied (not that fast) + * 2 = Reclaim from active list but don't reclaim mapped (2nd pass) + * it may reclaim dirtied if vm_pagecache_ignore_dirty = 0 + * 3 = same as pass 2, but it will reclaim some few pages , detail in shrink_all_zones + */ + for (; pass <= 3; pass++) { + for (sc.priority = DEF_PRIORITY; sc.priority >= 0; sc.priority--) { + unsigned long nr_to_scan = nr_pages - ret; + struct mem_cgroup *memcg = mem_cgroup_iter(NULL, NULL, NULL); + int nid; + + sc.nr_scanned = 0; + + /* + * No zone reclaimed because of too many reclaimers. Retry whether + * there is still something to do + */ + if (!shrink_all_zones(nr_to_scan, pass, &sc)) + goto retry; + + ret += sc.nr_reclaimed; + if (ret >= nr_pages) + goto out; + + reclaim_state.reclaimed_slab = 0; + for_each_online_node(nid) { + do { + shrink_slab(mask, nid, memcg, sc.priority); + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); + } + ret += reclaim_state.reclaimed_slab; + + if (ret >= nr_pages) + goto out; + + } + if (pass == 1) { + if (vm_pagecache_ignore_dirty == 1 || + (mask & (__GFP_IO | __GFP_FS)) != (__GFP_IO | __GFP_FS) ) + break; + else + sc.may_writepage = 1; + } + } + +out: + current->reclaim_state = old_rs; +} + +static int kpagecache_limitd(void *data) +{ + DEFINE_WAIT(wait); + kpclimitd_context = true; + + /* + * make sure all work threads woken up, when switch to async mode + */ + if (waitqueue_active(&pagecache_reclaim_wq)) + wake_up_interruptible(&pagecache_reclaim_wq); + + for ( ; ; ) { + __shrink_page_cache(GFP_KERNEL); + prepare_to_wait(&kpagecache_limitd_wq, &wait, TASK_INTERRUPTIBLE); + + if (!kthread_should_stop()) + schedule(); + else { + finish_wait(&kpagecache_limitd_wq, &wait); + break; + } + finish_wait(&kpagecache_limitd_wq, &wait); + } + kpclimitd_context = false; + return 0; +} + +static void wakeup_kpclimitd(gfp_t mask) +{ + if (!waitqueue_active(&kpagecache_limitd_wq)) + return; + wake_up_interruptible(&kpagecache_limitd_wq); +} + +void shrink_page_cache(gfp_t mask, struct page *page) +{ + if (0 == vm_pagecache_limit_async) + __shrink_page_cache(mask); + else + wakeup_kpclimitd(mask); +} + /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, @@ -4125,6 +4490,31 @@ static int __init kswapd_init(void) module_init(kswapd_init) +int kpagecache_limitd_run(void) +{ + int ret = 0; + + if (kpclimitd) + return 0; + + kpclimitd = kthread_run(kpagecache_limitd, NULL, "kpclimitd"); + if (IS_ERR(kpclimitd)) { + pr_err("Failed to start kpagecache_limitd thread\n"); + ret = PTR_ERR(kpclimitd); + kpclimitd = NULL; + } + return ret; + +} + +void kpagecache_limitd_stop(void) +{ + if (kpclimitd) { + kthread_stop(kpclimitd); + kpclimitd = NULL; + } +} + #ifdef CONFIG_NUMA /* * Node reclaim mode diff --git a/mm/vmstat.c b/mm/vmstat.c index a8222041bd44..dca75c6ca98a 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1295,6 +1295,8 @@ const char * const vmstat_text[] = { }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ +unsigned int vmstat_text_size = ARRAY_SIZE(vmstat_text); + #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \ defined(CONFIG_PROC_FS) static void *frag_start(struct seq_file *m, loff_t *pos) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2b2b9aae8a3c..22d17ecfe7df 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, zs_pool_dec_isolated(pool); } + if (page_zone(newpage) != page_zone(page)) { + dec_zone_page_state(page, NR_ZSPAGES); + inc_zone_page_state(newpage, NR_ZSPAGES); + } + reset_page(page); put_page(page); page = newpage; diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index c46daf09a501..bb7ec1a3915d 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -126,6 +126,7 @@ int vlan_check_real_dev(struct net_device *real_dev, void vlan_setup(struct net_device *dev); int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack); void unregister_vlan_dev(struct net_device *dev, struct list_head *head); +void vlan_dev_uninit(struct net_device *dev); bool vlan_dev_inherit_address(struct net_device *dev, struct net_device *real_dev); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e5bff5cc6f97..2a78da4072de 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -586,7 +586,8 @@ static int vlan_dev_init(struct net_device *dev) return 0; } -static void vlan_dev_uninit(struct net_device *dev) +/* Note: this function might be called multiple times for the same device. */ +void vlan_dev_uninit(struct net_device *dev) { struct vlan_priority_tci_mapping *pm; struct vlan_dev_priv *vlan = vlan_dev_priv(dev); diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index c482a6fe9393..0db85aeb119b 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -108,11 +108,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], struct ifla_vlan_flags *flags; struct ifla_vlan_qos_mapping *m; struct nlattr *attr; - int rem; + int rem, err; if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); - vlan_dev_change_flags(dev, flags->flags, flags->mask); + err = vlan_dev_change_flags(dev, flags->flags, flags->mask); + if (err) + return err; } if (data[IFLA_VLAN_INGRESS_QOS]) { nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { @@ -123,7 +125,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_VLAN_EGRESS_QOS]) { nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) { m = nla_data(attr); - vlan_dev_set_egress_priority(dev, m->from, m->to); + err = vlan_dev_set_egress_priority(dev, m->from, m->to); + if (err) + return err; } } return 0; @@ -179,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; err = vlan_changelink(dev, tb, data, extack); - if (err < 0) - return err; - - return register_vlan_dev(dev, extack); + if (!err) + err = register_vlan_dev(dev, extack); + if (err) + vlan_dev_uninit(dev); + return err; } static inline size_t vlan_qos_map_size(unsigned int n) diff --git a/net/Kconfig b/net/Kconfig index 3101bfcbdd7a..f50104fbcf5a 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -52,6 +52,9 @@ config NET_INGRESS config NET_EGRESS bool +config NET_REDIRECT + bool + config SKB_EXTENSIONS bool @@ -64,6 +67,7 @@ source "net/xfrm/Kconfig" source "net/iucv/Kconfig" source "net/smc/Kconfig" source "net/xdp/Kconfig" +source "net/toa/Kconfig" config INET bool "TCP/IP networking" diff --git a/net/Makefile b/net/Makefile index 449fc0b221f8..84b5bd0fb6c9 100644 --- a/net/Makefile +++ b/net/Makefile @@ -67,6 +67,7 @@ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_SYSCTL) += sysctl_net.o endif obj-$(CONFIG_WIMAX) += wimax/ +obj-$(CONFIG_TOA) += toa/ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ obj-$(CONFIG_CEPH_LIB) += ceph/ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 5b0b20e6da95..d88a4de02237 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -789,6 +789,10 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface) lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex); + /* interface already disabled by batadv_iv_ogm_iface_disable */ + if (!*ogm_buff) + return; + /* the interface gets activated here to avoid race conditions between * the moment of activating the interface in * hardif_activate_interface() where the originator mac is set and diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b0af3a11d406..ec7bf5a4a9fc 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -285,6 +285,7 @@ static u32 batadv_hash_dat(const void *data, u32 size) u32 hash = 0; const struct batadv_dat_entry *dat = data; const unsigned char *key; + __be16 vid; u32 i; key = (const unsigned char *)&dat->ip; @@ -294,7 +295,8 @@ static u32 batadv_hash_dat(const void *data, u32 size) hash ^= (hash >> 6); } - key = (const unsigned char *)&dat->vid; + vid = htons(dat->vid); + key = (__force const unsigned char *)&vid; for (i = 0; i < sizeof(dat->vid); i++) { hash += key[i]; hash += (hash << 10); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ad5b0ac1f9ce..87691404d0c6 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -934,6 +934,14 @@ static void hci_req_directed_advertising(struct hci_request *req, return; memset(&cp, 0, sizeof(cp)); + + /* Some controllers might reject command if intervals are not + * within range for undirected advertising. + * BCM20702A0 is known to be affected by this. + */ + cp.min_interval = cpu_to_le16(0x0020); + cp.max_interval = cpu_to_le16(0x0020); + cp.type = LE_ADV_DIRECT_IND; cp.own_address_type = own_addr_type; cp.direct_addr_type = conn->dst_type; @@ -1168,8 +1176,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, if (!conn) return ERR_PTR(-ENOMEM); - if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) + if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { + hci_conn_del(conn); return ERR_PTR(-EBUSY); + } conn->state = BT_CONNECT; set_bit(HCI_CONN_SCANNING, &conn->flags); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 04bc79359a17..9e19d5a3aac8 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -842,8 +842,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt) if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { struct hci_cp_le_write_def_data_len cp; - cp.tx_len = hdev->le_max_tx_len; - cp.tx_time = hdev->le_max_tx_time; + cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); + cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); } @@ -1444,11 +1444,20 @@ static int hci_dev_do_open(struct hci_dev *hdev) if (hci_dev_test_flag(hdev, HCI_SETUP) || test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { + bool invalid_bdaddr; + hci_sock_dev_event(hdev, HCI_DEV_SETUP); if (hdev->setup) ret = hdev->setup(hdev); + /* The transport driver can set the quirk to mark the + * BD_ADDR invalid before creating the HCI device or in + * its setup callback. + */ + invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, + &hdev->quirks); + if (ret) goto setup_failed; @@ -1457,20 +1466,33 @@ static int hci_dev_do_open(struct hci_dev *hdev) hci_dev_get_bd_addr_from_property(hdev); if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) + hdev->set_bdaddr) { ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + + /* If setting of the BD_ADDR from the device + * property succeeds, then treat the address + * as valid even if the invalid BD_ADDR + * quirk indicates otherwise. + */ + if (!ret) + invalid_bdaddr = false; + } } setup_failed: /* The transport driver can set these quirks before * creating the HCI device or in its setup callback. * + * For the invalid BD_ADDR quirk it is possible that + * it becomes a valid address if the bootloader does + * provide it (see above). + * * In case any of them is set, the controller has to * start up as unconfigured. */ if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks)) + invalid_bdaddr) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* For an unconfigured controller it is required to @@ -4440,7 +4462,14 @@ static void hci_rx_work(struct work_struct *work) hci_send_to_sock(hdev, skb); } - if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + /* If the device has been opened in HCI_USER_CHANNEL, + * the userspace has exclusive access to device. + * When device is HCI_INIT, we still need to process + * the data packets to the driver in order + * to complete its setup(). + */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !test_bit(HCI_INIT, &hdev->flags)) { kfree_skb(skb); continue; } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 7f6a581b5b7e..3d25dbf10b26 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1273,6 +1273,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) instance_flags = get_adv_instance_flags(hdev, instance); + /* If instance already has the flags set skip adding it once + * again. + */ + if (adv_instance && eir_get_data(adv_instance->adv_data, + adv_instance->adv_data_len, EIR_FLAGS, + NULL)) + goto skip_flags; + /* The Add Advertising command allows userspace to set both the general * and limited discoverable flags. */ @@ -1305,6 +1313,7 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) } } +skip_flags: if (adv_instance) { memcpy(ptr, adv_instance->adv_data, adv_instance->adv_data_len); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index d32077b28433..8159b344deef 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -831,6 +831,8 @@ static int hci_sock_release(struct socket *sock) if (!sk) return 0; + lock_sock(sk); + switch (hci_pi(sk)->channel) { case HCI_CHANNEL_MONITOR: atomic_dec(&monitor_promisc); @@ -878,6 +880,7 @@ static int hci_sock_release(struct socket *sock) skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); + release_sock(sk); sock_put(sk); return 0; } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index da7fdbdf9c41..a845786258a0 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4936,10 +4936,8 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", chan, result, local_amp_id, remote_amp_id); - if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) { - l2cap_chan_unlock(chan); + if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) return; - } if (chan->state != BT_CONNECTED) { l2cap_do_create(chan, result, local_amp_id, remote_amp_id); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 0c7d31c6c18c..a58584949a95 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg) dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel); if (IS_ERR(dlc)) return PTR_ERR(dlc); - else if (dlc) { - rfcomm_dlc_put(dlc); + if (dlc) return -EBUSY; - } dlc = rfcomm_dlc_alloc(GFP_KERNEL); if (!dlc) return -ENOMEM; diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c index 77396a098fbe..efea4874743e 100644 --- a/net/bpfilter/main.c +++ b/net/bpfilter/main.c @@ -10,7 +10,7 @@ #include #include "msgfmt.h" -int debug_fd; +FILE *debug_f; static int handle_get_cmd(struct mbox_request *cmd) { @@ -35,9 +35,10 @@ static void loop(void) struct mbox_reply reply; int n; + fprintf(debug_f, "testing the buffer\n"); n = read(0, &req, sizeof(req)); if (n != sizeof(req)) { - dprintf(debug_fd, "invalid request %d\n", n); + fprintf(debug_f, "invalid request %d\n", n); return; } @@ -47,7 +48,7 @@ static void loop(void) n = write(1, &reply, sizeof(reply)); if (n != sizeof(reply)) { - dprintf(debug_fd, "reply failed %d\n", n); + fprintf(debug_f, "reply failed %d\n", n); return; } } @@ -55,9 +56,10 @@ static void loop(void) int main(void) { - debug_fd = open("/dev/kmsg", 00000002); - dprintf(debug_fd, "Started bpfilter\n"); + debug_f = fopen("/dev/kmsg", "w"); + setvbuf(debug_f, 0, _IOLBF, 0); + fprintf(debug_f, "Started bpfilter\n"); loop(); - close(debug_fd); + fclose(debug_f); return 0; } diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index e804a3016902..022dc6e504c4 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -245,6 +245,12 @@ static int br_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + /* dev_set_mac_addr() can be called by a master device on bridge's + * NETDEV_UNREGISTER, but since it's being destroyed do nothing + */ + if (dev->reg_state != NETREG_REGISTERED) + return -EBUSY; + spin_lock_bh(&br->lock); if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { /* Mac address will be changed in br_stp_change_bridge_id(). */ diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 09b1dd8cd853..4fd908c65179 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -272,6 +272,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); p = br_port_get_rcu(skb->dev); + if (!p) + goto drop; if (p->flags & BR_VLAN_TUNNEL) { if (br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p))) diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index af7800103e51..59980ecfc962 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -662,6 +662,9 @@ static unsigned int br_nf_forward_arp(void *priv, nf_bridge_pull_encap_header(skb); } + if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr)))) + return NF_DROP; + if (arp_hdr(skb)->ar_pln != 4) { if (is_vlan_arp(skb, state->net)) nf_bridge_push_encap_header(skb); diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 2cdfc5d6c25d..8c69f0c95a8e 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -22,7 +22,8 @@ #endif static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 4096d8a74a2b..e1256e03a9a8 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) } static int ebt_buf_add(struct ebt_entries_buf_state *state, - void *data, unsigned int sz) + const void *data, unsigned int sz) { if (state->buf_kern_start == NULL) goto count_only; @@ -1901,7 +1901,7 @@ enum compat_mwt { EBT_COMPAT_TARGET, }; -static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, +static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt, enum compat_mwt compat_mwt, struct ebt_entries_buf_state *state, const unsigned char *base) @@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, /* return size of all matches, watchers or target, including necessary * alignment and padding. */ -static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, +static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32, unsigned int size_left, enum compat_mwt type, struct ebt_entries_buf_state *state, const void *base) { + const char *buf = (const char *)match32; int growth = 0; - char *buf; if (size_left == 0) return 0; - buf = (char *) match32; - - while (size_left >= sizeof(*match32)) { + do { struct ebt_entry_match *match_kern; int ret; + if (size_left < sizeof(*match32)) + return -EINVAL; + match_kern = (struct ebt_entry_match *) state->buf_kern_start; if (match_kern) { char *tmp; @@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, if (match_kern) match_kern->match_size = ret; - /* rule should have no remaining data after target */ - if (type == EBT_COMPAT_TARGET && size_left) - return -EINVAL; - match32 = (struct compat_ebt_entry_mwt *) buf; - } + } while (size_left); return growth; } /* called for all ebt_entry structures. */ -static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, +static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base, unsigned int *total, struct ebt_entries_buf_state *state) { - unsigned int i, j, startoff, new_offset = 0; + unsigned int i, j, startoff, next_expected_off, new_offset = 0; /* stores match/watchers/targets & offset of next struct ebt_entry: */ unsigned int offsets[4]; unsigned int *offsets_update = NULL; @@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, return ret; } - startoff = state->buf_user_offset - startoff; - - if (WARN_ON(*total < startoff)) + next_expected_off = state->buf_user_offset - startoff; + if (next_expected_off != entry->next_offset) return -EINVAL; - *total -= startoff; + + if (*total < entry->next_offset) + return -EINVAL; + *total -= entry->next_offset; return 0; } diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index de09b0a65791..f7587428febd 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -423,9 +423,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct j1939_sock *jsk = j1939_sk(sock->sk); - struct j1939_priv *priv = jsk->priv; - struct sock *sk = sock->sk; - struct net *net = sock_net(sk); + struct j1939_priv *priv; + struct sock *sk; + struct net *net; int ret = 0; ret = j1939_sk_sanity_check(addr, len); @@ -434,6 +434,10 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) lock_sock(sock->sk); + priv = jsk->priv; + sk = sock->sk; + net = sock_net(sk); + /* Already bound to an interface? */ if (jsk->state & J1939_SOCK_BOUND) { /* A re-bind() to a different interface is not diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 2d568246803f..9a8de6ef451a 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -29,6 +29,9 @@ #include #include "crypto.h" +static struct ctl_table_header *ceph_table_header; +int sysctl_ceph_ignore_epoch_barrier; +unsigned int sysctl_ceph_epoch_barrier_ceiling; /* * Module compatibility interface. For now it doesn't do anything, @@ -274,6 +277,8 @@ enum { Opt_tcp_nodelay, Opt_notcp_nodelay, Opt_abort_on_full, + Opt_req_resend, + Opt_noreq_resend, }; static match_table_t opt_tokens = { @@ -300,6 +305,8 @@ static match_table_t opt_tokens = { {Opt_tcp_nodelay, "tcp_nodelay"}, {Opt_notcp_nodelay, "notcp_nodelay"}, {Opt_abort_on_full, "abort_on_full"}, + {Opt_req_resend, "req_resend"}, + {Opt_noreq_resend, "noreq_resend"}, {-1, NULL} }; @@ -559,6 +566,13 @@ ceph_parse_options(char *options, const char *dev_name, opt->flags |= CEPH_OPT_ABORT_ON_FULL; break; + case Opt_req_resend: + opt->flags |= CEPH_OPT_REQ_RESEND; + break; + case Opt_noreq_resend: + opt->flags &= ~CEPH_OPT_REQ_RESEND; + break; + default: BUG_ON(token); } @@ -601,6 +615,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, seq_puts(m, "notcp_nodelay,"); if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL)) seq_puts(m, "abort_on_full,"); + if ((opt->flags & CEPH_OPT_REQ_RESEND) == 0) + seq_puts(m, "noreq_resend,"); if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) seq_printf(m, "mount_timeout=%d,", @@ -779,6 +795,54 @@ int ceph_open_session(struct ceph_client *client) } EXPORT_SYMBOL(ceph_open_session); +static struct ctl_table ceph_table[] = { + { + .procname = "ceph_ignore_epoch_barrier", + .data = &sysctl_ceph_ignore_epoch_barrier, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax + }, + { + .procname = "ceph_epoch_barrier_ceiling", + .data = &sysctl_ceph_epoch_barrier_ceiling, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + }, +}; + +static struct ctl_table ceph_dir_table[] = { + { + .procname = "ceph", + .mode = 0555, + .child = ceph_table + }, + {} +}; + +static struct ctl_table ceph_root_table[] = { + { + .procname = "net", + .mode = 0555, + .child = ceph_dir_table + }, + {} +}; + +int cephfs_sysctl_register(void) +{ + ceph_table_header = register_sysctl_table(ceph_root_table); + if (!ceph_table_header) + return -ENOMEM; + return 0; +} + +void cephfs_sysctl_unregister(void) +{ + unregister_sysctl_table(ceph_table_header); +} + int ceph_wait_for_latest_osdmap(struct ceph_client *client, unsigned long timeout) { @@ -801,6 +865,10 @@ static int __init init_ceph_lib(void) { int ret = 0; + ret = cephfs_sysctl_register(); + if (ret) + goto out; + ceph_debugfs_init(); ret = ceph_crypto_init(); @@ -826,6 +894,8 @@ out_crypto: ceph_crypto_shutdown(); out_debugfs: ceph_debugfs_cleanup(); + cephfs_sysctl_unregister(); +out: return ret; } @@ -834,6 +904,7 @@ static void __exit exit_ceph_lib(void) dout("exit_ceph_lib\n"); WARN_ON(!ceph_strings_empty()); + cephfs_sysctl_unregister(); ceph_osdc_cleanup(); ceph_msgr_exit(); ceph_crypto_shutdown(); diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 7cb992e55475..bd14d8abe2dd 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -204,10 +204,11 @@ static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t) seq_puts(s, "\tP"); } -static void dump_request(struct seq_file *s, struct ceph_osd_request *req) +static void dump_request(struct seq_file *s, struct ceph_osd *osd, struct ceph_osd_request *req) { int i; + seq_printf(s, "osd %p con %p req %p\t", osd, &osd->o_con, req); seq_printf(s, "%llu\t", req->r_tid); dump_target(s, &req->r_t); @@ -235,7 +236,7 @@ static void dump_requests(struct seq_file *s, struct ceph_osd *osd) struct ceph_osd_request *req = rb_entry(n, struct ceph_osd_request, r_node); - dump_request(s, req); + dump_request(s, osd, req); } mutex_unlock(&osd->lock); @@ -389,6 +390,57 @@ CEPH_DEFINE_SHOW_FUNC(monc_show) CEPH_DEFINE_SHOW_FUNC(osdc_show) CEPH_DEFINE_SHOW_FUNC(client_options_show) +static int req_resend_show(struct seq_file *s, void *p) +{ + struct ceph_client *client = s->private; + + if (ceph_test_opt(client, REQ_RESEND)) + seq_puts(s, "1"); + else + seq_puts(s, "0"); + + seq_putc(s, '\n'); + return 0; +} + +static int req_resend_open(struct inode *inode, struct file *file) +{ + return single_open(file, req_resend_show, inode->i_private); +} + +static ssize_t req_resend_write(struct file *filp, const char __user *user_buf, + size_t len, loff_t *offset) +{ + struct ceph_client *client = file_inode(filp)->i_private; + char buffer[16]; + unsigned int req_resend; + + if (len >= sizeof(buffer)) + len = sizeof(buffer) - 1; + if (copy_from_user(buffer, user_buf, len)) + return -EFAULT; + buffer[len] = '\0'; + if (kstrtouint(buffer, 0, &req_resend)) + return -EINVAL; + + down_write(&client->osdc.lock); + if(req_resend) + client->options->flags |= CEPH_OPT_REQ_RESEND; + else + client->options->flags &= ~CEPH_OPT_REQ_RESEND; + up_write(&client->osdc.lock); + + return len; +} + +static const struct file_operations req_resend_fops = { + .open = req_resend_open, + .read = seq_read, + .write = req_resend_write, + .llseek = seq_lseek, + .release = single_release, +}; + void __init ceph_debugfs_init(void) { ceph_debugfs_dir = debugfs_create_dir("ceph", NULL); @@ -439,11 +491,17 @@ void ceph_debugfs_client_init(struct ceph_client *client) client->debugfs_dir, client, &client_options_show_fops); + client->debugfs_req_resend = debugfs_create_file("req_resend", + 0600, + client->debugfs_dir, + client, + &req_resend_fops); } void ceph_debugfs_client_cleanup(struct ceph_client *client) { dout("ceph_debugfs_client_cleanup %p\n", client); + debugfs_remove(client->debugfs_req_resend); debugfs_remove(client->debugfs_options); debugfs_remove(client->debugfs_osdmap); debugfs_remove(client->debugfs_monmap); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index e4cb3db2ee77..4f956fabe462 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -101,6 +101,7 @@ #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ +#define CON_FLAG_FORCE_FAULT 5 static bool con_flag_valid(unsigned long con_flag) { @@ -110,6 +111,7 @@ static bool con_flag_valid(unsigned long con_flag) case CON_FLAG_WRITE_PENDING: case CON_FLAG_SOCK_CLOSED: case CON_FLAG_BACKOFF: + case CON_FLAG_FORCE_FAULT: return true; default: return false; @@ -2967,6 +2969,10 @@ static void ceph_con_workfn(struct work_struct *work) break; /* If we make it to here, we're done */ } + + if (!fault && con_flag_test_and_clear(con, CON_FLAG_FORCE_FAULT)) + fault = true; + if (fault) con_fault(con); mutex_unlock(&con->mutex); @@ -3242,6 +3248,23 @@ bool ceph_con_keepalive_expired(struct ceph_connection *con, return false; } +void ceph_con_fault_force(struct ceph_connection *con) +{ + bool need_queue = false; + + pr_info("%s con %p %s%lld\n", __func__, con, ENTITY_NAME(con->peer_name)); + mutex_lock(&con->mutex); + if (con->state == CON_STATE_OPEN) { + con->error_msg = "osd request timeout and force con fault"; + con_flag_set(con, CON_FLAG_FORCE_FAULT); + need_queue = true; + } + mutex_unlock(&con->mutex); + + if (need_queue && con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) + queue_con(con); +} + static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg) { BUG_ON(msg->num_data_items >= msg->max_data_items); @@ -3250,12 +3273,16 @@ static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg) static void ceph_msg_data_destroy(struct ceph_msg_data *data) { - if (data->type == CEPH_MSG_DATA_PAGELIST) + if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) { + int num_pages = calc_pages_for(data->alignment, data->length); + ceph_release_page_vector(data->pages, num_pages); + } else if (data->type == CEPH_MSG_DATA_PAGELIST) { ceph_pagelist_release(data->pagelist); + } } void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, - size_t length, size_t alignment) + size_t length, size_t alignment, bool own_pages) { struct ceph_msg_data *data; @@ -3267,6 +3294,7 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, data->pages = pages; data->length = length; data->alignment = alignment & ~PAGE_MASK; + data->own_pages = own_pages; msg->data_length += length; } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ba45b074a362..76aa45a74439 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -54,6 +54,17 @@ static void unlink_linger(struct ceph_osd *osd, struct ceph_osd_linger_request *lreq); static void clear_backoffs(struct ceph_osd *osd); +bool ceph_epoch_barrier_cmp(struct ceph_osd_client *osdc) +{ + if (sysctl_ceph_ignore_epoch_barrier) { + if (!sysctl_ceph_epoch_barrier_ceiling || osdc->epoch_barrier > sysctl_ceph_epoch_barrier_ceiling) { + net_warn_ratelimited("osdc %p, epoch_barrir: %u ignored\n", osdc, osdc->epoch_barrier); + return false; + } + } + return (osdc->osdmap->epoch < osdc->epoch_barrier); +} + #if 1 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem) { @@ -962,7 +973,7 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg, BUG_ON(length > (u64) SIZE_MAX); if (length) ceph_msg_data_add_pages(msg, osd_data->pages, - length, osd_data->alignment); + length, osd_data->alignment, false); } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { BUG_ON(!length); ceph_msg_data_add_pagelist(msg, osd_data->pagelist); @@ -1348,7 +1359,7 @@ static int reopen_osd(struct ceph_osd *osd) { struct ceph_entity_addr *peer_addr; - dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); + pr_info("%s osd %p osd%d\n", __func__, osd, osd->o_osd); if (RB_EMPTY_ROOT(&osd->o_requests) && RB_EMPTY_ROOT(&osd->o_linger_requests)) { @@ -1361,7 +1372,7 @@ static int reopen_osd(struct ceph_osd *osd) !ceph_con_opened(&osd->o_con)) { struct rb_node *n; - dout("osd addr hasn't changed and connection never opened, " + pr_info("osd addr hasn't changed and connection never opened, " "letting msgr retry\n"); /* touch each r_stamp for handle_timeout()'s benfit */ for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { @@ -1494,7 +1505,7 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc, WARN_ON(pi->id != t->target_oloc.pool); return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || - (osdc->osdmap->epoch < osdc->epoch_barrier); + ceph_epoch_barrier_cmp(osdc); } enum calc_target_result { @@ -2301,8 +2312,8 @@ again: if (osdc->abort_err) { dout("req %p abort_err %d\n", req, osdc->abort_err); err = osdc->abort_err; - } else if (osdc->osdmap->epoch < osdc->epoch_barrier) { - dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, + } else if (ceph_epoch_barrier_cmp(osdc)) { + printk(KERN_INFO "req %p osdc %p epoch %u barrier %u\n", req, osdc, osdc->osdmap->epoch, osdc->epoch_barrier); req->r_t.paused = true; maybe_request_map(osdc); @@ -3275,6 +3286,7 @@ static void handle_timeout(struct work_struct *work) unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; LIST_HEAD(slow_osds); + LIST_HEAD(timeout_osds); struct rb_node *n, *p; dout("%s osdc %p\n", __func__, osdc); @@ -3288,6 +3300,7 @@ static void handle_timeout(struct work_struct *work) for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); bool found = false; + bool found_timeout = false; for (p = rb_first(&osd->o_requests); p; ) { struct ceph_osd_request *req = @@ -3302,9 +3315,12 @@ static void handle_timeout(struct work_struct *work) } if (opts->osd_request_timeout && time_before(req->r_start_stamp, expiry_cutoff)) { - pr_err_ratelimited("tid %llu on osd%d timeout\n", + pr_err("tid %llu on osd%d timeout\n", req->r_tid, osd->o_osd); - abort_request(req, -ETIMEDOUT); + if (ceph_test_opt(osdc->client, REQ_RESEND)) + found_timeout = true; + else + abort_request(req, -ETIMEDOUT); } } for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { @@ -3323,6 +3339,12 @@ static void handle_timeout(struct work_struct *work) if (found) list_move_tail(&osd->o_keepalive_item, &slow_osds); + /* + * When timeout, we move the osd into the timeout queue to re-send requests, + * and if the osd was in the slow_osd queue, then we save the keepalive. + */ + if (found_timeout) + list_move_tail(&osd->o_keepalive_item, &timeout_osds); } if (opts->osd_request_timeout) { @@ -3340,7 +3362,8 @@ static void handle_timeout(struct work_struct *work) } } - if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) + if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds) + || !list_empty(&timeout_osds)) maybe_request_map(osdc); while (!list_empty(&slow_osds)) { @@ -3351,6 +3374,14 @@ static void handle_timeout(struct work_struct *work) ceph_con_keepalive(&osd->o_con); } + while (!list_empty(&timeout_osds)) { + struct ceph_osd *osd = list_first_entry(&timeout_osds, + struct ceph_osd, + o_keepalive_item); + list_del_init(&osd->o_keepalive_item); + ceph_con_fault_force(&osd->o_con); + } + up_write(&osdc->lock); schedule_delayed_work(&osdc->timeout_work, osdc->client->options->osd_keepalive_timeout); @@ -4042,7 +4073,7 @@ done: ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc); if (was_pauserd || was_pausewr || pauserd || pausewr || - osdc->osdmap->epoch < osdc->epoch_barrier) + ceph_epoch_barrier_cmp(osdc)) maybe_request_map(osdc); kick_requests(osdc, &need_resend, &need_resend_linger); @@ -4097,8 +4128,10 @@ static void osd_fault(struct ceph_connection *con) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc = osd->o_osdc; + int osd_num = osd->o_osd; + int ret; - dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); + pr_info("%s osd %p osd%d\n", __func__, osd, osd->o_osd); down_write(&osdc->lock); if (!osd_registered(osd)) { @@ -4106,7 +4139,9 @@ static void osd_fault(struct ceph_connection *con) goto out_unlock; } - if (!reopen_osd(osd)) + ret = reopen_osd(osd); + pr_info("%s reopen osd%d ret=%d\n", __func__, osd_num, ret); + if (ret == 0) kick_osd_requests(osd); maybe_request_map(osdc); @@ -4436,9 +4471,7 @@ static void handle_watch_notify(struct ceph_osd_client *osdc, CEPH_MSG_DATA_PAGES); *lreq->preply_pages = data->pages; *lreq->preply_len = data->length; - } else { - ceph_release_page_vector(data->pages, - calc_pages_for(0, data->length)); + data->own_pages = false; } } lreq->notify_finish_error = return_code; @@ -5500,9 +5533,6 @@ out_unlock_osdc: return m; } -/* - * TODO: switch to a msg-owned pagelist - */ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) { struct ceph_msg *m; @@ -5516,7 +5546,6 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) if (data_len) { struct page **pages; - struct ceph_osd_data osd_data; pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), GFP_NOIO); @@ -5525,9 +5554,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) return NULL; } - ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false, - false); - ceph_osdc_msg_data_add(m, &osd_data); + ceph_msg_data_add_pages(m, pages, data_len, 0, true); } return m; diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 4e0de14f80bb..2a6e63a8edbe 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -710,6 +710,15 @@ int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) } EXPORT_SYMBOL(ceph_pg_poolid_by_name); +u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id) +{ + struct ceph_pg_pool_info *pi; + + pi = __lookup_pg_pool(&map->pg_pools, id); + return pi ? pi->flags : 0; +} +EXPORT_SYMBOL(ceph_pg_pool_flags); + static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) { rb_erase(&pi->node, root); diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index da5639a5bd3b..0147b26f585a 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&smap->map, attr); + nbuckets = roundup_pow_of_two(num_possible_cpus()); /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ - smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus()))); - nbuckets = 1U << smap->bucket_log; + nbuckets = max_t(u32, 2, nbuckets); + smap->bucket_log = ilog2(nbuckets); cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); ret = bpf_map_charge_init(&smap->map.memory, cost); diff --git a/net/core/dev.c b/net/core/dev.c index 99ac84ff398f..8c5a9cfd6e6f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -146,7 +146,6 @@ #include "net-sysfs.h" #define MAX_GRO_SKBS 8 -#define MAX_NEST_DEV 8 /* This should be increased if a protocol with a bigger head is added. */ #define GRO_MAX_HEAD (MAX_HEADER + 128) @@ -2796,6 +2795,8 @@ static u16 skb_tx_hash(const struct net_device *dev, if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); + if (hash >= qoffset) + hash -= qoffset; while (unlikely(hash >= qcount)) hash -= qcount; return hash + qoffset; @@ -3386,26 +3387,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_calculate_pkt_len(skb, q); if (q->flags & TCQ_F_NOLOCK) { - if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && - qdisc_run_begin(q)) { - if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, - &q->state))) { - __qdisc_drop(skb, &to_free); - rc = NET_XMIT_DROP; - goto end_run; - } - qdisc_bstats_cpu_update(q, skb); - - rc = NET_XMIT_SUCCESS; - if (sch_direct_xmit(skb, q, dev, txq, NULL, true)) - __qdisc_run(q); - -end_run: - qdisc_run_end(q); - } else { - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; - qdisc_run(q); - } + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + qdisc_run(q); if (unlikely(to_free)) kfree_skb_list(to_free); @@ -3886,6 +3869,7 @@ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ int dev_rx_weight __read_mostly = 64; int dev_tx_weight __read_mostly = 64; +int netdev_pagefrag_enabled __read_mostly = 1; /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ int gro_normal_batch __read_mostly = 8; @@ -4256,14 +4240,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, /* Reinjected packets coming from act_mirred or similar should * not get XDP generic processing. */ - if (skb_cloned(skb) || skb_is_tc_redirected(skb)) + if (skb_is_redirected(skb)) return XDP_PASS; /* XDP packets must be linear and must have sufficient headroom * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also * native XDP provides, thus we need to do it here as well. */ - if (skb_is_nonlinear(skb) || + if (skb_cloned(skb) || skb_is_nonlinear(skb) || skb_headroom(skb) < XDP_PACKET_HEADROOM) { int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); int troom = skb->tail + skb->data_len - skb->end; @@ -4805,7 +4789,7 @@ skip_taps: goto out; } #endif - skb_reset_tc(skb); + skb_reset_redirect(skb); skip_classify: if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; @@ -5270,9 +5254,29 @@ static void flush_all_backlogs(void) put_online_cpus(); } +/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ +static void gro_normal_list(struct napi_struct *napi) +{ + if (!napi->rx_count) + return; + netif_receive_skb_list_internal(&napi->rx_list); + INIT_LIST_HEAD(&napi->rx_list); + napi->rx_count = 0; +} + +/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, + * pass the whole batch up to the stack. + */ +static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) +{ + list_add_tail(&skb->list, &napi->rx_list); + if (++napi->rx_count >= gro_normal_batch) + gro_normal_list(napi); +} + INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); -static int napi_gro_complete(struct sk_buff *skb) +static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) { struct packet_offload *ptype; __be16 type = skb->protocol; @@ -5305,7 +5309,8 @@ static int napi_gro_complete(struct sk_buff *skb) } out: - return netif_receive_skb_internal(skb); + gro_normal_one(napi, skb); + return NET_RX_SUCCESS; } static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, @@ -5318,7 +5323,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; skb_list_del_init(skb); - napi_gro_complete(skb); + napi_gro_complete(napi, skb); napi->gro_hash[index].count--; } @@ -5421,7 +5426,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) } } -static void gro_flush_oldest(struct list_head *head) +static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) { struct sk_buff *oldest; @@ -5437,7 +5442,7 @@ static void gro_flush_oldest(struct list_head *head) * SKB to the chain. */ skb_list_del_init(oldest); - napi_gro_complete(oldest); + napi_gro_complete(napi, oldest); } INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, @@ -5513,7 +5518,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (pp) { skb_list_del_init(pp); - napi_gro_complete(pp); + napi_gro_complete(napi, pp); napi->gro_hash[hash].count--; } @@ -5524,7 +5529,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff goto normal; if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { - gro_flush_oldest(gro_head); + gro_flush_oldest(napi, gro_head); } else { napi->gro_hash[hash].count++; } @@ -5672,26 +5677,6 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi) } EXPORT_SYMBOL(napi_get_frags); -/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ -static void gro_normal_list(struct napi_struct *napi) -{ - if (!napi->rx_count) - return; - netif_receive_skb_list_internal(&napi->rx_list); - INIT_LIST_HEAD(&napi->rx_list); - napi->rx_count = 0; -} - -/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, - * pass the whole batch up to the stack. - */ -static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) -{ - list_add_tail(&skb->list, &napi->rx_list); - if (++napi->rx_count >= gro_normal_batch) - gro_normal_list(napi); -} - static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, gro_result_t ret) @@ -5979,8 +5964,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done) NAPIF_STATE_IN_BUSY_POLL))) return false; - gro_normal_list(n); - if (n->gro_bitmask) { unsigned long timeout = 0; @@ -5996,6 +5979,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done) hrtimer_start(&n->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); } + + gro_normal_list(n); + if (unlikely(!list_empty(&n->poll_list))) { /* If n->poll_list is not empty, we need to mask irqs */ local_irq_save(flags); @@ -6327,8 +6313,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) goto out_unlock; } - gro_normal_list(n); - if (n->gro_bitmask) { /* flush too old packets * If HZ < 1000, flush all packets. @@ -6336,6 +6320,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) napi_gro_flush(n, HZ >= 1000); } + gro_normal_list(n); + /* Some drivers may have called napi_schedule * prior to exhausting their budget. */ @@ -6930,8 +6916,8 @@ static int __netdev_walk_all_lower_dev(struct net_device *dev, return 0; } -static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, - struct list_head **iter) +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, + struct list_head **iter) { struct netdev_adjacent *lower; @@ -6943,6 +6929,7 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, return lower->dev; } +EXPORT_SYMBOL(netdev_next_lower_dev_rcu); static u8 __netdev_upper_depth(struct net_device *dev) { @@ -7967,11 +7954,28 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu) if (ops->ndo_change_mtu) return ops->ndo_change_mtu(dev, new_mtu); - dev->mtu = new_mtu; + /* Pairs with all the lockless reads of dev->mtu in the stack */ + WRITE_ONCE(dev->mtu, new_mtu); return 0; } EXPORT_SYMBOL(__dev_set_mtu); +int dev_validate_mtu(struct net_device *dev, int new_mtu, + struct netlink_ext_ack *extack) +{ + /* MTU must be positive, and in range */ + if (new_mtu < 0 || new_mtu < dev->min_mtu) { + NL_SET_ERR_MSG(extack, "mtu less than device minimum"); + return -EINVAL; + } + + if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { + NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); + return -EINVAL; + } + return 0; +} + /** * dev_set_mtu_ext - Change maximum transfer unit * @dev: device @@ -7988,16 +7992,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu, if (new_mtu == dev->mtu) return 0; - /* MTU must be positive, and in range */ - if (new_mtu < 0 || new_mtu < dev->min_mtu) { - NL_SET_ERR_MSG(extack, "mtu less than device minimum"); - return -EINVAL; - } - - if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { - NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); - return -EINVAL; - } + err = dev_validate_mtu(dev, new_mtu, extack); + if (err) + return err; if (!netif_device_present(dev)) return -ENODEV; @@ -8952,22 +8949,10 @@ static void netdev_unregister_lockdep_key(struct net_device *dev) void netdev_update_lockdep_key(struct net_device *dev) { - struct netdev_queue *queue; - int i; - - lockdep_unregister_key(&dev->qdisc_xmit_lock_key); lockdep_unregister_key(&dev->addr_list_lock_key); - - lockdep_register_key(&dev->qdisc_xmit_lock_key); lockdep_register_key(&dev->addr_list_lock_key); lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); - for (i = 0; i < dev->num_tx_queues; i++) { - queue = netdev_get_tx_queue(dev, i); - - lockdep_set_class(&queue->_xmit_lock, - &dev->qdisc_xmit_lock_key); - } } EXPORT_SYMBOL(netdev_update_lockdep_key); @@ -9084,8 +9069,10 @@ int register_netdevice(struct net_device *dev) goto err_uninit; ret = netdev_register_kobject(dev); - if (ret) + if (ret) { + dev->reg_state = NETREG_UNREGISTERED; goto err_uninit; + } dev->reg_state = NETREG_REGISTERED; __netdev_update_features(dev); @@ -9507,9 +9494,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, if (!dev->pcpu_refcnt) goto free_dev; - if (dev_addr_init(dev)) + dev->mib.dev_statistics = alloc_percpu(struct dev_mib); + if (!dev->mib.dev_statistics) goto free_pcpu; + if (dev_addr_init(dev)) + goto free_dev_mib; + dev_mc_init(dev); dev_uc_init(dev); @@ -9564,7 +9555,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, free_all: free_netdev(dev); return NULL; - +free_dev_mib: + free_percpu(dev->mib.dev_statistics); free_pcpu: free_percpu(dev->pcpu_refcnt); free_dev: @@ -9599,6 +9591,7 @@ void free_netdev(struct net_device *dev) netif_napi_del(p); free_percpu(dev->pcpu_refcnt); + free_percpu(dev->mib.dev_statistics); dev->pcpu_refcnt = NULL; netdev_unregister_lockdep_key(dev); diff --git a/net/core/devlink.c b/net/core/devlink.c index 93905dc7c179..4c25f1aa2d37 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3222,34 +3222,41 @@ devlink_param_value_get_from_info(const struct devlink_param *param, struct genl_info *info, union devlink_param_value *value) { + struct nlattr *param_data; int len; - if (param->type != DEVLINK_PARAM_TYPE_BOOL && - !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) + param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]; + + if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data) return -EINVAL; switch (param->type) { case DEVLINK_PARAM_TYPE_U8: - value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u8)) + return -EINVAL; + value->vu8 = nla_get_u8(param_data); break; case DEVLINK_PARAM_TYPE_U16: - value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u16)) + return -EINVAL; + value->vu16 = nla_get_u16(param_data); break; case DEVLINK_PARAM_TYPE_U32: - value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u32)) + return -EINVAL; + value->vu32 = nla_get_u32(param_data); break; case DEVLINK_PARAM_TYPE_STRING: - len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]), - nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); - if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) || + len = strnlen(nla_data(param_data), nla_len(param_data)); + if (len == nla_len(param_data) || len >= __DEVLINK_PARAM_MAX_STRING_VALUE) return -EINVAL; - strcpy(value->vstr, - nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); + strcpy(value->vstr, nla_data(param_data)); break; case DEVLINK_PARAM_TYPE_BOOL: - value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? - true : false; + if (param_data && nla_len(param_data)) + return -EINVAL; + value->vbool = nla_get_flag(param_data); break; } return 0; @@ -3863,6 +3870,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, goto out_unlock; } + /* return 0 if there is no further data to read */ + if (start_offset >= region->size) { + err = 0; + goto out_unlock; + } + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, DEVLINK_CMD_REGION_READ); @@ -5791,6 +5804,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 }, [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 }, + [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 }, + [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 }, [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 }, [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 }, @@ -6280,7 +6295,7 @@ static bool devlink_port_type_should_warn(struct devlink_port *devlink_port) devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA; } -#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 30) +#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600) static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port) { diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 536e032d95c8..246a258b1fac 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1004,8 +1004,10 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack) { int cpu; - if (!monitor_hw) + if (!monitor_hw) { NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled"); + return; + } monitor_hw = false; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index dd220ce7ca7a..bb11fc87bbae 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -967,7 +967,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh = nlmsg_data(nlh); frh->family = ops->family; - frh->table = rule->table; + frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT; if (nla_put_u32(skb, FRA_TABLE, rule->table)) goto nla_put_failure; if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) diff --git a/net/core/filter.c b/net/core/filter.c index 3fed5755494b..d59dbc88fef5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2055,6 +2055,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) } skb->dev = dev; + skb->tstamp = 0; dev_xmit_recursion_inc(); ret = dev_queue_xmit(skb); @@ -2230,10 +2231,10 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += len; len = sk_msg_elem(msg, i)->length; if (start < offset + len) break; - offset += len; sk_msg_iter_var_next(i); } while (i != msg->sg.end); @@ -2299,7 +2300,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, WARN_ON_ONCE(last_sge == first_sge); shift = last_sge > first_sge ? last_sge - first_sge - 1 : - MAX_SKB_FRAGS - first_sge + last_sge - 1; + NR_MSG_FRAG_IDS - first_sge + last_sge - 1; if (!shift) goto out; @@ -2308,8 +2309,8 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, do { u32 move_from; - if (i + shift >= MAX_MSG_FRAGS) - move_from = i + shift - MAX_MSG_FRAGS; + if (i + shift >= NR_MSG_FRAG_IDS) + move_from = i + shift - NR_MSG_FRAG_IDS; else move_from = i + shift; if (move_from == msg->sg.end) @@ -2323,7 +2324,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, } while (1); msg->sg.end = msg->sg.end - shift > msg->sg.end ? - msg->sg.end - shift + MAX_MSG_FRAGS : + msg->sg.end - shift + NR_MSG_FRAG_IDS : msg->sg.end - shift; out: msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; @@ -2345,7 +2346,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; - u32 new, i = 0, l, space, copy = 0, offset = 0; + u32 new, i = 0, l = 0, space, copy = 0, offset = 0; u8 *raw, *to, *from; struct page *page; @@ -2355,11 +2356,11 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += l; l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; - offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end); @@ -2414,6 +2415,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, sk_msg_iter_var_next(i); sg_unmark_end(psge); + sg_unmark_end(&rsge); sk_msg_iter_next(msg, end); } @@ -2505,7 +2507,7 @@ static void sk_msg_shift_right(struct sk_msg *msg, int i) BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { - u32 i = 0, l, space, offset = 0; + u32 i = 0, l = 0, space, offset = 0; u64 last = start + len; int pop; @@ -2515,11 +2517,11 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += l; l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; - offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end); @@ -3541,7 +3543,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, return err; } default: - break; + return -EBADRQC; } return 0; } @@ -5304,8 +5306,7 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { - if (!sock_flag(sk, SOCK_RCU_FREE)) - sock_gen_put(sk); + sock_gen_put(sk); return NULL; } } @@ -5342,8 +5343,7 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { - if (!sock_flag(sk, SOCK_RCU_FREE)) - sock_gen_put(sk); + sock_gen_put(sk); return NULL; } } @@ -5410,7 +5410,8 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { BPF_CALL_1(bpf_sk_release, struct sock *, sk) { - if (!sock_flag(sk, SOCK_RCU_FREE)) + /* Only full sockets have sk->sk_flags. */ + if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return 0; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 68eda10d0680..96b2566c298d 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -683,6 +683,31 @@ __skb_flow_dissect_tcp(const struct sk_buff *skb, key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF)); } +static void +__skb_flow_dissect_ports(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, void *data, int nhoff, + u8 ip_proto, int hlen) +{ + enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX; + struct flow_dissector_key_ports *key_ports; + + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) + dissector_ports = FLOW_DISSECTOR_KEY_PORTS; + else if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE)) + dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE; + + if (dissector_ports == FLOW_DISSECTOR_KEY_MAX) + return; + + key_ports = skb_flow_dissector_target(flow_dissector, + dissector_ports, + target_container); + key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, + data, hlen); +} + static void __skb_flow_dissect_ipv4(const struct sk_buff *skb, struct flow_dissector *flow_dissector, @@ -733,10 +758,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, struct flow_dissector *flow_dissector, void *target_container) { + struct flow_dissector_key_ports *key_ports = NULL; struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; - struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_tags *key_tags; key_control = skb_flow_dissector_target(flow_dissector, @@ -775,10 +800,17 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } - if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) key_ports = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_PORTS, target_container); + else if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE)) + key_ports = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE, + target_container); + + if (key_ports) { key_ports->src = flow_keys->sport; key_ports->dst = flow_keys->dport; } @@ -852,7 +884,6 @@ bool __skb_flow_dissect(const struct net *net, struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; - struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_icmp *key_icmp; struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_vlan *key_vlan; @@ -870,9 +901,10 @@ bool __skb_flow_dissect(const struct net *net, nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); #if IS_ENABLED(CONFIG_NET_DSA) - if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) { + if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) && + proto == htons(ETH_P_XDSA))) { const struct dsa_device_ops *ops; - int offset; + int offset = 0; ops = skb->dev->dsa_ptr->tag_ops; if (ops->flow_dissect && @@ -1299,14 +1331,9 @@ ip_proto_again: break; } - if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) && - !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) { - key_ports = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS, - target_container); - key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, - data, hlen); - } + if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) + __skb_flow_dissect_ports(skb, flow_dissector, target_container, + data, nhoff, ip_proto, hlen); if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP)) { diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index cf52d9c422fa..45b6a59ac124 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -283,7 +283,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, } EXPORT_SYMBOL(flow_block_cb_setup_simple); -static LIST_HEAD(block_ing_cb_list); +static LIST_HEAD(block_cb_list); static struct rhashtable indr_setup_block_ht; @@ -391,20 +391,19 @@ static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb) kfree(indr_block_cb); } -static DEFINE_MUTEX(flow_indr_block_ing_cb_lock); +static DEFINE_MUTEX(flow_indr_block_cb_lock); -static void flow_block_ing_cmd(struct net_device *dev, - flow_indr_block_bind_cb_t *cb, - void *cb_priv, - enum flow_block_command command) +static void flow_block_cmd(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, void *cb_priv, + enum flow_block_command command) { - struct flow_indr_block_ing_entry *entry; + struct flow_indr_block_entry *entry; - mutex_lock(&flow_indr_block_ing_cb_lock); - list_for_each_entry(entry, &block_ing_cb_list, list) { + mutex_lock(&flow_indr_block_cb_lock); + list_for_each_entry(entry, &block_cb_list, list) { entry->cb(dev, cb, cb_priv, command); } - mutex_unlock(&flow_indr_block_ing_cb_lock); + mutex_unlock(&flow_indr_block_cb_lock); } int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, @@ -424,8 +423,8 @@ int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, if (err) goto err_dev_put; - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, - FLOW_BLOCK_BIND); + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, + FLOW_BLOCK_BIND); return 0; @@ -464,8 +463,8 @@ void __flow_indr_block_cb_unregister(struct net_device *dev, if (!indr_block_cb) return; - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, - FLOW_BLOCK_UNBIND); + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, + FLOW_BLOCK_UNBIND); flow_indr_block_cb_del(indr_block_cb); flow_indr_block_dev_put(indr_dev); @@ -499,21 +498,21 @@ void flow_indr_block_call(struct net_device *dev, } EXPORT_SYMBOL_GPL(flow_indr_block_call); -void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry) +void flow_indr_add_block_cb(struct flow_indr_block_entry *entry) { - mutex_lock(&flow_indr_block_ing_cb_lock); - list_add_tail(&entry->list, &block_ing_cb_list); - mutex_unlock(&flow_indr_block_ing_cb_lock); + mutex_lock(&flow_indr_block_cb_lock); + list_add_tail(&entry->list, &block_cb_list); + mutex_unlock(&flow_indr_block_cb_lock); } -EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb); +EXPORT_SYMBOL_GPL(flow_indr_add_block_cb); -void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry) +void flow_indr_del_block_cb(struct flow_indr_block_entry *entry) { - mutex_lock(&flow_indr_block_ing_cb_lock); + mutex_lock(&flow_indr_block_cb_lock); list_del(&entry->list); - mutex_unlock(&flow_indr_block_ing_cb_lock); + mutex_unlock(&flow_indr_block_cb_lock); } -EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb); +EXPORT_SYMBOL_GPL(flow_indr_del_block_cb); static int __init init_flow_indr_rhashtable(void) { diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 74cfb8b5ab33..99a6de52b21d 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -230,9 +230,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb) fl6.daddr = iph6->daddr; fl6.saddr = iph6->saddr; - err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6); - if (unlikely(err)) - goto err; + dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto err; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 5480edff0c86..920784a9b7ff 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -98,9 +98,6 @@ static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) static void neigh_cleanup_and_release(struct neighbour *neigh) { - if (neigh->parms->neigh_cleanup) - neigh->parms->neigh_cleanup(neigh); - trace_neigh_cleanup_and_release(neigh, 0); __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); @@ -1197,7 +1194,7 @@ static void neigh_update_hhs(struct neighbour *neigh) if (update) { hh = &neigh->hh; - if (hh->hh_len) { + if (READ_ONCE(hh->hh_len)) { write_seqlock_bh(&hh->hh_lock); update(hh, neigh->dev, neigh->ha); write_sequnlock_bh(&hh->hh_lock); @@ -1476,7 +1473,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) struct net_device *dev = neigh->dev; unsigned int seq; - if (dev->header_ops->cache && !neigh->hh.hh_len) + if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) neigh_hh_init(neigh); do { @@ -2052,8 +2049,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, goto nla_put_failure; { unsigned long now = jiffies; - unsigned int flush_delta = now - tbl->last_flush; - unsigned int rand_delta = now - tbl->last_rand; + long flush_delta = now - tbl->last_flush; + long rand_delta = now - tbl->last_rand; struct neigh_hash_table *nht; struct ndt_config ndc = { .ndtc_key_len = tbl->key_len, diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 36347933ec3a..29e17639069d 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -272,6 +272,57 @@ static const struct seq_operations ptype_seq_ops = { .show = ptype_seq_show, }; +static const struct snmp_mib snmp_devstat_list[] = { + SNMP_MIB_ITEM("ipv4_rx_pkts", DEV_MIB_IPV4_RX_PKTS), + SNMP_MIB_ITEM("ipv4_rx_bytes", DEV_MIB_IPV4_RX_BYTES), + SNMP_MIB_ITEM("ipv4_tx_pkts", DEV_MIB_IPV4_TX_PKTS), + SNMP_MIB_ITEM("ipv4_tx_bytes", DEV_MIB_IPV4_TX_BYTES), + SNMP_MIB_ITEM("ipv6_rx_pkts", DEV_MIB_IPV6_RX_PKTS), + SNMP_MIB_ITEM("ipv6_rx_bytes", DEV_MIB_IPV6_RX_BYTES), + SNMP_MIB_ITEM("ipv6_tx_pkts", DEV_MIB_IPV6_TX_PKTS), + SNMP_MIB_ITEM("ipv6_tx_bytes", DEV_MIB_IPV6_TX_BYTES), + SNMP_MIB_SENTINEL +}; + +static void dev_ext_seq_printf_stats(struct seq_file *seq, void *v) +{ + struct net_device *dev = (struct net_device *)v; + int i; + + seq_printf(seq, "%10s ", dev->name); + + for (i = 0; snmp_devstat_list[i].name; i++) + seq_printf(seq, " %12lu", + snmp_fold_field((void __percpu **) + dev->mib.dev_statistics, + snmp_devstat_list[i].entry)); + seq_putc(seq, '\n'); +} + +static int dev_ext_seq_show(struct seq_file *seq, void *v) +{ + int i; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Interface "); + for (i = 0; snmp_devstat_list[i].name; i++) + seq_printf(seq, "%s ", snmp_devstat_list[i].name); + + seq_putc(seq, '\n'); + } else { + dev_ext_seq_printf_stats(seq, v); + } + + return 0; +} + +static const struct seq_operations dev_ext_seq_ops = { + .start = dev_seq_start, + .next = dev_seq_next, + .stop = dev_seq_stop, + .show = dev_ext_seq_show, +}; + static int __net_init dev_proc_net_init(struct net *net) { int rc = -ENOMEM; @@ -279,9 +330,12 @@ static int __net_init dev_proc_net_init(struct net *net) if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops, sizeof(struct seq_net_private))) goto out; + if (!proc_create_net("dev_ext", 0444, net->proc_net, &dev_ext_seq_ops, + sizeof(struct seq_net_private))) + goto out_dev; if (!proc_create_seq("softnet_stat", 0444, net->proc_net, &softnet_seq_ops)) - goto out_dev; + goto out_dev_ext; if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, sizeof(struct seq_net_private))) goto out_softnet; @@ -295,6 +349,8 @@ out_ptype: remove_proc_entry("ptype", net->proc_net); out_softnet: remove_proc_entry("softnet_stat", net->proc_net); +out_dev_ext: + remove_proc_entry("dev_ext", net->proc_net); out_dev: remove_proc_entry("dev", net->proc_net); goto out; @@ -306,6 +362,7 @@ static void __net_exit dev_proc_net_exit(struct net *net) remove_proc_entry("ptype", net->proc_net); remove_proc_entry("softnet_stat", net->proc_net); + remove_proc_entry("dev_ext", net->proc_net); remove_proc_entry("dev", net->proc_net); } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ae3bcb1540ec..4c826b8bf9b1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -919,14 +919,17 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) struct kobject *kobj = &queue->kobj; int error = 0; + /* Kobject_put later will trigger rx_queue_release call which + * decreases dev refcount: Take that reference here + */ + dev_hold(queue->dev); + kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, "rx-%u", index); if (error) goto err; - dev_hold(queue->dev); - if (dev->sysfs_rx_queue_group) { error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); if (error) @@ -1459,14 +1462,17 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) struct kobject *kobj = &queue->kobj; int error = 0; + /* Kobject_put later will trigger netdev_queue_release call + * which decreases dev refcount: Take that reference here + */ + dev_hold(queue->dev); + kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); if (error) goto err; - dev_hold(queue->dev); - #ifdef CONFIG_BQL error = sysfs_create_group(kobj, &dql_group); if (error) diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 0642f91c4038..b4c87fe31be2 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -53,30 +53,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css) kfree(css_cls_state(css)); } +/* + * To avoid freezing of sockets creation for tasks with big number of threads + * and opened sockets lets release file_lock every 1000 iterated descriptors. + * New sockets will already have been created with new classid. + */ + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +#define UPDATE_CLASSID_BATCH 1000 + static int update_classid_sock(const void *v, struct file *file, unsigned n) { int err; + struct update_classid_context *ctx = (void *)v; struct socket *sock = sock_from_file(file, &err); if (sock) { spin_lock(&cgroup_sk_update_lock); - sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, - (unsigned long)v); + sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid); spin_unlock(&cgroup_sk_update_lock); } + if (--ctx->batch == 0) { + ctx->batch = UPDATE_CLASSID_BATCH; + return n + 1; + } return 0; } +static void update_classid_task(struct task_struct *p, u32 classid) +{ + struct update_classid_context ctx = { + .classid = classid, + .batch = UPDATE_CLASSID_BATCH + }; + unsigned int fd = 0; + + do { + task_lock(p); + fd = iterate_fd(p->files, fd, update_classid_sock, &ctx); + task_unlock(p); + cond_resched(); + } while (fd); +} + static void cgrp_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; struct task_struct *p; cgroup_taskset_for_each(p, css, tset) { - task_lock(p); - iterate_fd(p->files, 0, update_classid_sock, - (void *)(unsigned long)css_cls_state(css)->classid); - task_unlock(p); + update_classid_task(p, css_cls_state(css)->classid); } } @@ -98,10 +128,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, css_task_iter_start(css, 0, &it); while ((p = css_task_iter_next(&it))) { - task_lock(p); - iterate_fd(p->files, 0, update_classid_sock, - (void *)(unsigned long)cs->classid); - task_unlock(p); + update_classid_task(p, cs->classid); cond_resched(); } css_task_iter_end(&it); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 5bc65587f1c4..dfc2501c35d9 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -18,6 +18,9 @@ #include +#define DEFER_TIME (msecs_to_jiffies(1000)) +#define DEFER_WARN_INTERVAL (60 * HZ) + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -193,22 +196,14 @@ static s32 page_pool_inflight(struct page_pool *pool) { u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); - s32 distance; + s32 inflight; - distance = _distance(hold_cnt, release_cnt); + inflight = _distance(hold_cnt, release_cnt); - trace_page_pool_inflight(pool, distance, hold_cnt, release_cnt); - return distance; -} - -static bool __page_pool_safe_to_destroy(struct page_pool *pool) -{ - s32 inflight = page_pool_inflight(pool); - - /* The distance should not be able to become negative */ + trace_page_pool_inflight(pool, inflight, hold_cnt, release_cnt); WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); - return (inflight == 0); + return inflight; } /* Cleanup page_pool state from page */ @@ -216,6 +211,7 @@ static void __page_pool_clean_page(struct page_pool *pool, struct page *page) { dma_addr_t dma; + int count; if (!(pool->p.flags & PP_FLAG_DMA_MAP)) goto skip_dma_unmap; @@ -227,9 +223,11 @@ static void __page_pool_clean_page(struct page_pool *pool, DMA_ATTR_SKIP_CPU_SYNC); page->dma_addr = 0; skip_dma_unmap: - atomic_inc(&pool->pages_state_release_cnt); - trace_page_pool_state_release(pool, page, - atomic_read(&pool->pages_state_release_cnt)); + /* This may be the last page returned, releasing the pool, so + * it is not safe to reference pool afterwards. + */ + count = atomic_inc_return(&pool->pages_state_release_cnt); + trace_page_pool_state_release(pool, page, count); } /* unmap the page and clean our state */ @@ -338,31 +336,10 @@ static void __page_pool_empty_ring(struct page_pool *pool) } } -static void __warn_in_flight(struct page_pool *pool) +static void page_pool_free(struct page_pool *pool) { - u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); - u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); - s32 distance; - - distance = _distance(hold_cnt, release_cnt); - - /* Drivers should fix this, but only problematic when DMA is used */ - WARN(1, "Still in-flight pages:%d hold:%u released:%u", - distance, hold_cnt, release_cnt); -} - -void __page_pool_free(struct page_pool *pool) -{ - /* Only last user actually free/release resources */ - if (!page_pool_put(pool)) - return; - - WARN(pool->alloc.count, "API usage violation"); - WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty"); - - /* Can happen due to forced shutdown */ - if (!__page_pool_safe_to_destroy(pool)) - __warn_in_flight(pool); + if (pool->disconnect) + pool->disconnect(pool); ptr_ring_cleanup(&pool->ring, NULL); @@ -371,12 +348,8 @@ void __page_pool_free(struct page_pool *pool) kfree(pool); } -EXPORT_SYMBOL(__page_pool_free); -/* Request to shutdown: release pages cached by page_pool, and check - * for in-flight pages - */ -bool __page_pool_request_shutdown(struct page_pool *pool) +static void page_pool_scrub(struct page_pool *pool) { struct page *page; @@ -393,7 +366,64 @@ bool __page_pool_request_shutdown(struct page_pool *pool) * be in-flight. */ __page_pool_empty_ring(pool); - - return __page_pool_safe_to_destroy(pool); } -EXPORT_SYMBOL(__page_pool_request_shutdown); + +static int page_pool_release(struct page_pool *pool) +{ + int inflight; + + page_pool_scrub(pool); + inflight = page_pool_inflight(pool); + if (!inflight) + page_pool_free(pool); + + return inflight; +} + +static void page_pool_release_retry(struct work_struct *wq) +{ + struct delayed_work *dwq = to_delayed_work(wq); + struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); + int inflight; + + inflight = page_pool_release(pool); + if (!inflight) + return; + + /* Periodic warning */ + if (time_after_eq(jiffies, pool->defer_warn)) { + int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; + + pr_warn("%s() stalled pool shutdown %d inflight %d sec\n", + __func__, inflight, sec); + pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; + } + + /* Still not ready to be disconnected, retry later */ + schedule_delayed_work(&pool->release_dw, DEFER_TIME); +} + +void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)) +{ + refcount_inc(&pool->user_cnt); + pool->disconnect = disconnect; +} + +void page_pool_destroy(struct page_pool *pool) +{ + if (!pool) + return; + + if (!page_pool_put(pool)) + return; + + if (!page_pool_release(pool)) + return; + + pool->defer_start = jiffies; + pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; + + INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); + schedule_delayed_work(&pool->release_dw, DEFER_TIME); +} +EXPORT_SYMBOL(page_pool_destroy); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 48b1e429857c..cb3b565ff5ad 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) /* skb was 'freed' by stack, so clean few * bits and reuse it */ - skb_reset_tc(skb); + skb_reset_redirect(skb); } while (--burst > 0); goto out; /* Skips xmit_mode M_START_XMIT */ } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e4ec575c1fba..944acb1a9f29 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2959,8 +2959,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, dev->rtnl_link_ops = ops; dev->rtnl_link_state = RTNL_LINK_INITIALIZING; - if (tb[IFLA_MTU]) - dev->mtu = nla_get_u32(tb[IFLA_MTU]); + if (tb[IFLA_MTU]) { + u32 mtu = nla_get_u32(tb[IFLA_MTU]); + int err; + + err = dev_validate_mtu(dev, mtu, extack); + if (err) { + free_netdev(dev); + return ERR_PTR(err); + } + dev->mtu = mtu; + } if (tb[IFLA_ADDRESS]) { memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), nla_len(tb[IFLA_ADDRESS])); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 867e61df00db..13f916ae0d29 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -432,7 +432,8 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, len += NET_SKB_PAD; if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || - (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA)) || + !netdev_pagefrag_enabled) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) goto skb_fail; @@ -5484,7 +5485,7 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, * Returns 0 on success, -errno otherwise. */ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, - int mac_len) + int mac_len, bool ethernet) { struct mpls_shim_hdr *lse; int err; @@ -5515,7 +5516,7 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, lse->label_stack_entry = mpls_lse; skb_postpush_rcsum(skb, lse, MPLS_HLEN); - if (skb->dev && skb->dev->type == ARPHRD_ETHER) + if (ethernet) skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); skb->protocol = mpls_proto; @@ -5529,12 +5530,14 @@ EXPORT_SYMBOL_GPL(skb_mpls_push); * @skb: buffer * @next_proto: ethertype of header after popped MPLS header * @mac_len: length of the MAC header + * @ethernet: flag to indicate if ethernet header is present in packet * * Expects skb->data at mac header. * * Returns 0 on success, -errno otherwise. */ -int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len) +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, + bool ethernet) { int err; @@ -5553,7 +5556,7 @@ int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len) skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); - if (skb->dev && skb->dev->type == ARPHRD_ETHER) { + if (ethernet) { struct ethhdr *hdr; /* use mpls_hdr() to get ethertype to account for VLANs. */ diff --git a/net/core/skmsg.c b/net/core/skmsg.c index ad31e4e53d0a..ded2d5227678 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -421,7 +421,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) copied = skb->len; msg->sg.start = 0; msg->sg.size = copied; - msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge; + msg->sg.end = num_sge; msg->skb = skb; sk_psock_queue_msg(psock, msg); @@ -793,15 +793,18 @@ static void sk_psock_strp_data_ready(struct sock *sk) static void sk_psock_write_space(struct sock *sk) { struct sk_psock *psock; - void (*write_space)(struct sock *sk); + void (*write_space)(struct sock *sk) = NULL; rcu_read_lock(); psock = sk_psock(sk); - if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))) - schedule_work(&psock->work); - write_space = psock->saved_write_space; + if (likely(psock)) { + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + schedule_work(&psock->work); + write_space = psock->saved_write_space; + } rcu_read_unlock(); - write_space(sk); + if (write_space) + write_space(sk); } int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) diff --git a/net/core/sock.c b/net/core/sock.c index ac78a570e43a..0adf7a9e5a90 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1832,7 +1832,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) atomic_set(&newsk->sk_zckey, 0); sock_reset_flag(newsk, SOCK_DONE); - mem_cgroup_sk_alloc(newsk); + + /* sk->sk_memcg will be populated at accept() time */ + newsk->sk_memcg = NULL; + cgroup_sk_alloc(&newsk->sk_cgrp_data); rcu_read_lock(); @@ -2918,7 +2921,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_max_pacing_rate = ~0UL; sk->sk_pacing_rate = ~0UL; - sk->sk_pacing_shift = 10; + WRITE_ONCE(sk->sk_pacing_shift, 10); sk->sk_incoming_cpu = -1; sk_rx_queue_clear(sk); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index eb114ee419b6..8291568b707f 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -233,20 +233,26 @@ static void sock_map_free(struct bpf_map *map) struct bpf_stab *stab = container_of(map, struct bpf_stab, map); int i; + /* After the sync no updates or deletes will be in-flight so it + * is safe to walk map and remove entries without risking a race + * in EEXIST update case. + */ synchronize_rcu(); - rcu_read_lock(); - raw_spin_lock_bh(&stab->lock); for (i = 0; i < stab->map.max_entries; i++) { struct sock **psk = &stab->sks[i]; struct sock *sk; sk = xchg(psk, NULL); - if (sk) + if (sk) { + lock_sock(sk); + rcu_read_lock(); sock_map_unref(sk, psk); + rcu_read_unlock(); + release_sock(sk); + } } - raw_spin_unlock_bh(&stab->lock); - rcu_read_unlock(); + /* wait for psock readers accessing its map link */ synchronize_rcu(); bpf_map_area_free(stab->sks); @@ -413,14 +419,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key, ret = -EINVAL; goto out; } - if (!sock_map_sk_is_suitable(sk) || - sk->sk_state != TCP_ESTABLISHED) { + if (!sock_map_sk_is_suitable(sk)) { ret = -EOPNOTSUPP; goto out; } sock_map_sk_acquire(sk); - ret = sock_map_update_common(map, idx, sk, flags); + if (sk->sk_state != TCP_ESTABLISHED) + ret = -EOPNOTSUPP; + else + ret = sock_map_update_common(map, idx, sk, flags); sock_map_sk_release(sk); out: fput(sock->file); @@ -736,14 +744,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key, ret = -EINVAL; goto out; } - if (!sock_map_sk_is_suitable(sk) || - sk->sk_state != TCP_ESTABLISHED) { + if (!sock_map_sk_is_suitable(sk)) { ret = -EOPNOTSUPP; goto out; } sock_map_sk_acquire(sk); - ret = sock_hash_update_common(map, key, sk, flags); + if (sk->sk_state != TCP_ESTABLISHED) + ret = -EOPNOTSUPP; + else + ret = sock_hash_update_common(map, key, sk, flags); sock_map_sk_release(sk); out: fput(sock->file); @@ -855,18 +865,28 @@ static void sock_hash_free(struct bpf_map *map) struct hlist_node *node; int i; + /* After the sync no updates or deletes will be in-flight so it + * is safe to walk map and remove entries without risking a race + * in EEXIST update case. + */ synchronize_rcu(); - rcu_read_lock(); for (i = 0; i < htab->buckets_num; i++) { bucket = sock_hash_select_bucket(htab, i); - raw_spin_lock_bh(&bucket->lock); hlist_for_each_entry_safe(elem, node, &bucket->head, node) { hlist_del_rcu(&elem->node); + lock_sock(elem->sk); + rcu_read_lock(); sock_map_unref(elem->sk, elem); + rcu_read_unlock(); + release_sock(elem->sk); } - raw_spin_unlock_bh(&bucket->lock); } - rcu_read_unlock(); + + /* wait for psock readers accessing its map link */ + synchronize_rcu(); + + /* wait for psock readers accessing its map link */ + synchronize_rcu(); bpf_map_area_free(htab->buckets); kfree(htab); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index eb29e5adc84d..8dcb6ce98d3a 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -288,6 +288,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, return ret; } +# ifdef CONFIG_HAVE_EBPF_JIT static int proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, @@ -298,6 +299,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } +# endif /* CONFIG_HAVE_EBPF_JIT */ static int proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, @@ -575,6 +577,13 @@ static struct ctl_table net_core_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "netdev_pagefrag_enabled", + .data = &netdev_pagefrag_enabled, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, { } }; diff --git a/net/core/utils.c b/net/core/utils.c index 6b6e51db9f3b..1f31a39236d5 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -438,6 +438,23 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, } EXPORT_SYMBOL(inet_proto_csum_replace4); +/** + * inet_proto_csum_replace16 - update layer 4 header checksum field + * @sum: Layer 4 header checksum field + * @skb: sk_buff for the packet + * @from: old IPv6 address + * @to: new IPv6 address + * @pseudohdr: True if layer 4 header checksum includes pseudoheader + * + * Update layer 4 header as per the update in IPv6 src/dst address. + * + * There is no need to update skb->csum in this function, because update in two + * fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other + * for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to + * update skb->csum, because update in 3 fields a.) IPv4 src/dst address, + * b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as + * L4 Header checksum for skb->csum calculation. + */ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, const __be32 *from, const __be32 *to, bool pseudohdr) @@ -449,9 +466,6 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, if (skb->ip_summed != CHECKSUM_PARTIAL) { *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum))); - if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) - skb->csum = ~csum_partial(diff, sizeof(diff), - ~skb->csum); } else if (pseudohdr) *sum = ~csum_fold(csum_partial(diff, sizeof(diff), csum_unfold(*sum))); diff --git a/net/core/xdp.c b/net/core/xdp.c index d7bf62ffbb5e..b3f463c6543f 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -70,10 +70,6 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) xa = container_of(rcu, struct xdp_mem_allocator, rcu); - /* Allocator have indicated safe to remove before this is called */ - if (xa->mem.type == MEM_TYPE_PAGE_POOL) - page_pool_free(xa->page_pool); - /* Allow this ID to be reused */ ida_simple_remove(&mem_id_pool, xa->mem.id); @@ -85,10 +81,41 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) kfree(xa); } -static bool __mem_id_disconnect(int id, bool force) +static void mem_xa_remove(struct xdp_mem_allocator *xa) +{ + trace_mem_disconnect(xa); + + if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) + call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); +} + +static void mem_allocator_disconnect(void *allocator) +{ + struct xdp_mem_allocator *xa; + struct rhashtable_iter iter; + + mutex_lock(&mem_id_lock); + + rhashtable_walk_enter(mem_id_ht, &iter); + do { + rhashtable_walk_start(&iter); + + while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { + if (xa->allocator == allocator) + mem_xa_remove(xa); + } + + rhashtable_walk_stop(&iter); + + } while (xa == ERR_PTR(-EAGAIN)); + rhashtable_walk_exit(&iter); + + mutex_unlock(&mem_id_lock); +} + +static void mem_id_disconnect(int id) { struct xdp_mem_allocator *xa; - bool safe_to_remove = true; mutex_lock(&mem_id_lock); @@ -96,51 +123,15 @@ static bool __mem_id_disconnect(int id, bool force) if (!xa) { mutex_unlock(&mem_id_lock); WARN(1, "Request remove non-existing id(%d), driver bug?", id); - return true; + return; } - xa->disconnect_cnt++; - /* Detects in-flight packet-pages for page_pool */ - if (xa->mem.type == MEM_TYPE_PAGE_POOL) - safe_to_remove = page_pool_request_shutdown(xa->page_pool); + trace_mem_disconnect(xa); - trace_mem_disconnect(xa, safe_to_remove, force); - - if ((safe_to_remove || force) && - !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) + if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); mutex_unlock(&mem_id_lock); - return (safe_to_remove|force); -} - -#define DEFER_TIME (msecs_to_jiffies(1000)) -#define DEFER_WARN_INTERVAL (30 * HZ) -#define DEFER_MAX_RETRIES 120 - -static void mem_id_disconnect_defer_retry(struct work_struct *wq) -{ - struct delayed_work *dwq = to_delayed_work(wq); - struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq); - bool force = false; - - if (xa->disconnect_cnt > DEFER_MAX_RETRIES) - force = true; - - if (__mem_id_disconnect(xa->mem.id, force)) - return; - - /* Periodic warning */ - if (time_after_eq(jiffies, xa->defer_warn)) { - int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ; - - pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n", - __func__, xa->mem.id, xa->disconnect_cnt, sec); - xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; - } - - /* Still not ready to be disconnected, retry later */ - schedule_delayed_work(&xa->defer_wq, DEFER_TIME); } void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) @@ -153,38 +144,21 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) return; } - if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL && - xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) { - return; - } - if (id == 0) return; - if (__mem_id_disconnect(id, false)) - return; + if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY) + return mem_id_disconnect(id); - /* Could not disconnect, defer new disconnect attempt to later */ - mutex_lock(&mem_id_lock); - - xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); - if (!xa) { - mutex_unlock(&mem_id_lock); - return; + if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) { + rcu_read_lock(); + xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); + page_pool_destroy(xa->page_pool); + rcu_read_unlock(); } - xa->defer_start = jiffies; - xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; - - INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry); - mutex_unlock(&mem_id_lock); - schedule_delayed_work(&xa->defer_wq, DEFER_TIME); } EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); -/* This unregister operation will also cleanup and destroy the - * allocator. The page_pool_free() operation is first called when it's - * safe to remove, possibly deferred to a workqueue. - */ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) { /* Simplify driver cleanup code paths, allow unreg "unused" */ @@ -371,7 +345,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, } if (type == MEM_TYPE_PAGE_POOL) - page_pool_get(xdp_alloc->page_pool); + page_pool_use_xdp_mem(allocator, mem_allocator_disconnect); mutex_unlock(&mem_id_lock); @@ -402,15 +376,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); page = virt_to_head_page(data); - if (likely(xa)) { - napi_direct &= !xdp_return_frame_no_direct(); - page_pool_put_page(xa->page_pool, page, napi_direct); - } else { - /* Hopefully stack show who to blame for late return */ - WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id); - trace_mem_return_failed(mem, page); - put_page(page); - } + napi_direct &= !xdp_return_frame_no_direct(); + page_pool_put_page(xa->page_pool, page, napi_direct); rcu_read_unlock(); break; case MEM_TYPE_PAGE_SHARED: diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 25aab672fc99..1e5e08cc0bfc 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -210,7 +210,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; @@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); /* sk = NULL, but it is safe for now. RST socket required. */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0); @@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index aea918135ec3..08c3dc45f1a4 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how); static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); static void dn_dst_link_failure(struct sk_buff *); static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb , u32 mtu); + struct sk_buff *skb , u32 mtu, + bool confirm_neigh); static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, @@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops) * advertise to the other end). */ static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct dn_route *rt = (struct dn_route *) dst; struct neighbour *n = rt->n; diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 12f8c7ee4dd8..bf9947c577b6 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -128,7 +128,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev, /* port.c */ int dsa_port_set_state(struct dsa_port *dp, u8 state, struct switchdev_trans *trans); +int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy); int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy); +void dsa_port_disable_rt(struct dsa_port *dp); void dsa_port_disable(struct dsa_port *dp); int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br); void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br); diff --git a/net/dsa/port.c b/net/dsa/port.c index 9b54e5a76297..fa023af69bc4 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state) pr_err("DSA: failed to set STP state %u (%d)\n", state, err); } -int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) +int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) { struct dsa_switch *ds = dp->ds; int port = dp->index; @@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_FORWARDING); + if (dp->pl) + phylink_start(dp->pl); + return 0; } -void dsa_port_disable(struct dsa_port *dp) +int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) +{ + int err; + + rtnl_lock(); + err = dsa_port_enable_rt(dp, phy); + rtnl_unlock(); + + return err; +} + +void dsa_port_disable_rt(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; int port = dp->index; + if (dp->pl) + phylink_stop(dp->pl); + if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_DISABLED); @@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp) ds->ops->port_disable(ds, port); } +void dsa_port_disable(struct dsa_port *dp) +{ + rtnl_lock(); + dsa_port_disable_rt(dp); + rtnl_unlock(); +} + int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br) { struct dsa_notifier_bridge_info info = { @@ -615,10 +639,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp) goto err_phy_connect; } - rtnl_lock(); - phylink_start(dp->pl); - rtnl_unlock(); - return 0; err_phy_connect: @@ -629,9 +649,14 @@ err_phy_connect: int dsa_port_link_register_of(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; + struct device_node *phy_np; - if (!ds->ops->adjust_link) - return dsa_port_phylink_register(dp); + if (!ds->ops->adjust_link) { + phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); + if (of_phy_is_fixed_link(dp->dn) || phy_np) + return dsa_port_phylink_register(dp); + return 0; + } dev_warn(ds->dev, "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); @@ -646,11 +671,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; - if (!ds->ops->adjust_link) { + if (!ds->ops->adjust_link && dp->pl) { rtnl_lock(); phylink_disconnect_phy(dp->pl); rtnl_unlock(); phylink_destroy(dp->pl); + dp->pl = NULL; return; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 028e65f4b5ba..23c2210fa7ec 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -90,12 +90,10 @@ static int dsa_slave_open(struct net_device *dev) goto clear_allmulti; } - err = dsa_port_enable(dp, dev->phydev); + err = dsa_port_enable_rt(dp, dev->phydev); if (err) goto clear_promisc; - phylink_start(dp->pl); - return 0; clear_promisc: @@ -119,9 +117,7 @@ static int dsa_slave_close(struct net_device *dev) cancel_work_sync(&dp->xmit_work); skb_queue_purge(&dp->xmit_queue); - phylink_stop(dp->pl); - - dsa_port_disable(dp); + dsa_port_disable_rt(dp); dev_mc_unsync(master, dev); dev_uc_unsync(master, dev); diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 9e5a883a9f0c..ebe73848d1cf 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -299,49 +299,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, } EXPORT_SYMBOL_GPL(dsa_8021q_xmit); -/* In the DSA packet_type handler, skb->data points in the middle of the VLAN - * tag, after tpid and before tci. This is because so far, ETH_HLEN - * (DMAC, SMAC, EtherType) bytes were pulled. - * There are 2 bytes of VLAN tag left in skb->data, and upper - * layers expect the 'real' EtherType to be consumed as well. - * Coincidentally, a VLAN header is also of the same size as - * the number of bytes that need to be pulled. - * - * skb_mac_header skb->data - * | | - * v v - * | | | | | | | | | | | | | | | | | | | - * +-----------------------+-----------------------+-------+-------+-------+ - * | Destination MAC | Source MAC | TPID | TCI | EType | - * +-----------------------+-----------------------+-------+-------+-------+ - * ^ | | - * |<--VLAN_HLEN-->to <---VLAN_HLEN---> - * from | - * >>>>>>> v - * >>>>>>> | | | | | | | | | | | | | | | - * >>>>>>> +-----------------------+-----------------------+-------+ - * >>>>>>> | Destination MAC | Source MAC | EType | - * +-----------------------+-----------------------+-------+ - * ^ ^ - * (now part of | | - * skb->head) skb_mac_header skb->data - */ -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) -{ - u8 *from = skb_mac_header(skb); - u8 *dest = from + VLAN_HLEN; - - memmove(dest, from, ETH_HLEN - VLAN_HLEN); - skb_pull(skb, VLAN_HLEN); - skb_push(skb, ETH_HLEN); - skb_reset_mac_header(skb); - skb_reset_mac_len(skb); - skb_pull_rcsum(skb, ETH_HLEN); - - return skb; -} -EXPORT_SYMBOL_GPL(dsa_8021q_remove_header); - static const struct dsa_device_ops dsa_8021q_netdev_ops = { .name = "8021q", .proto = DSA_TAG_PROTO_8021Q, diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 9c3114179690..9169b63a89e3 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -140,6 +140,8 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb, /* Remove Broadcom tag and update checksum */ skb_pull_rcsum(skb, BRCM_TAG_LEN); + skb->offload_fwd_mark = 1; + return skb; } #endif diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c index b678160bbd66..408d4af390a0 100644 --- a/net/dsa/tag_gswip.c +++ b/net/dsa/tag_gswip.c @@ -104,7 +104,7 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb, } static const struct dsa_device_ops gswip_netdev_ops = { - .name = "gwsip", + .name = "gswip", .proto = DSA_TAG_PROTO_GSWIP, .xmit = gswip_tag_xmit, .rcv = gswip_tag_rcv, diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index c95885215525..70db7c909f74 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -33,10 +33,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) struct dsa_port *dp = dsa_slave_to_port(dev); u16 *phdr, hdr; - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - if (skb_cow_head(skb, 0) < 0) + if (skb_cow_head(skb, QCA_HDR_LEN) < 0) return NULL; skb_push(skb, QCA_HDR_LEN); diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 63ef2a14c934..12f3ce52e62e 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -238,14 +238,14 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, { struct sja1105_meta meta = {0}; int source_port, switch_id; - struct vlan_ethhdr *hdr; + struct ethhdr *hdr; u16 tpid, vid, tci; bool is_link_local; bool is_tagged; bool is_meta; - hdr = vlan_eth_hdr(skb); - tpid = ntohs(hdr->h_vlan_proto); + hdr = eth_hdr(skb); + tpid = ntohs(hdr->h_proto); is_tagged = (tpid == ETH_P_SJA1105); is_link_local = sja1105_is_link_local(skb); is_meta = sja1105_is_meta_frame(skb); @@ -254,7 +254,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, if (is_tagged) { /* Normal traffic path. */ - tci = ntohs(hdr->h_vlan_TCI); + skb_push_rcsum(skb, ETH_HLEN); + __skb_vlan_pop(skb, &tci); + skb_pull_rcsum(skb, ETH_HLEN); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + vid = tci & VLAN_VID_MASK; source_port = dsa_8021q_rx_source_port(vid); switch_id = dsa_8021q_rx_switch_id(vid); @@ -283,12 +288,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, return NULL; } - /* Delete/overwrite fake VLAN header, DSA expects to not find - * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN). - */ - if (is_tagged) - skb = dsa_8021q_remove_header(skb); - return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local, is_meta); } diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 17374afee28f..9040fe55e0f5 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -244,7 +244,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 eth->h_proto = type; memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); memcpy(eth->h_dest, neigh->ha, ETH_ALEN); - hh->hh_len = ETH_HLEN; + + /* Pairs with READ_ONCE() in neigh_resolve_output(), + * neigh_hh_output() and neigh_update_hhs(). + */ + smp_store_release(&hh->hh_len, ETH_HLEN); + return 0; } EXPORT_SYMBOL(eth_header_cache); diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c index 94447974a3c0..d5f709b940ff 100644 --- a/net/hsr/hsr_debugfs.c +++ b/net/hsr/hsr_debugfs.c @@ -20,6 +20,8 @@ #include "hsr_main.h" #include "hsr_framereg.h" +static struct dentry *hsr_debugfs_root_dir; + static void print_mac_address(struct seq_file *sfp, unsigned char *mac) { seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:", @@ -63,8 +65,20 @@ hsr_node_table_open(struct inode *inode, struct file *filp) return single_open(filp, hsr_node_table_show, inode->i_private); } +void hsr_debugfs_rename(struct net_device *dev) +{ + struct hsr_priv *priv = netdev_priv(dev); + struct dentry *d; + + d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root, + hsr_debugfs_root_dir, dev->name); + if (IS_ERR(d)) + netdev_warn(dev, "failed to rename\n"); + else + priv->node_tbl_root = d; +} + static const struct file_operations hsr_fops = { - .owner = THIS_MODULE, .open = hsr_node_table_open, .read = seq_read, .llseek = seq_lseek, @@ -78,15 +92,14 @@ static const struct file_operations hsr_fops = { * When debugfs is configured this routine sets up the node_table file per * hsr device for dumping the node_table entries */ -int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) +void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) { - int rc = -1; struct dentry *de = NULL; - de = debugfs_create_dir(hsr_dev->name, NULL); - if (!de) { - pr_err("Cannot create hsr debugfs root\n"); - return rc; + de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir); + if (IS_ERR(de)) { + pr_err("Cannot create hsr debugfs directory\n"); + return; } priv->node_tbl_root = de; @@ -94,13 +107,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) de = debugfs_create_file("node_table", S_IFREG | 0444, priv->node_tbl_root, priv, &hsr_fops); - if (!de) { - pr_err("Cannot create hsr node_table directory\n"); - return rc; + if (IS_ERR(de)) { + pr_err("Cannot create hsr node_table file\n"); + debugfs_remove(priv->node_tbl_root); + priv->node_tbl_root = NULL; + return; } priv->node_tbl_file = de; - - return 0; } /* hsr_debugfs_term - Tear down debugfs intrastructure @@ -117,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv) debugfs_remove(priv->node_tbl_root); priv->node_tbl_root = NULL; } + +void hsr_debugfs_create_root(void) +{ + hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL); + if (IS_ERR(hsr_debugfs_root_dir)) { + pr_err("Cannot create hsr debugfs root directory\n"); + hsr_debugfs_root_dir = NULL; + } +} + +void hsr_debugfs_remove_root(void) +{ + /* debugfs_remove() internally checks NULL and ERROR */ + debugfs_remove(hsr_debugfs_root_dir); +} diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index f509b495451a..c7bd6c49fadf 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -227,8 +227,13 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) struct hsr_port *master; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); - skb->dev = master->dev; - hsr_forward_skb(skb, master); + if (master) { + skb->dev = master->dev; + hsr_forward_skb(skb, master); + } else { + atomic_long_inc(&dev->tx_dropped); + dev_kfree_skb_any(skb); + } return NETDEV_TX_OK; } @@ -267,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, skb->dev->dev_addr, skb->len) <= 0) goto out; skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); if (hsr_ver > 0) { hsr_tag = skb_put(skb, sizeof(struct hsr_tag)); @@ -363,7 +370,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) del_timer_sync(&hsr->prune_timer); del_timer_sync(&hsr->announce_timer); - hsr_del_self_node(&hsr->self_node_db); + hsr_del_self_node(hsr); hsr_del_nodes(&hsr->node_db); } @@ -435,11 +442,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], INIT_LIST_HEAD(&hsr->ports); INIT_LIST_HEAD(&hsr->node_db); INIT_LIST_HEAD(&hsr->self_node_db); + spin_lock_init(&hsr->list_lock); ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); /* Make sure we recognize frames from ourselves in hsr_rcv() */ - res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr, + res = hsr_create_self_node(hsr, hsr_dev->dev_addr, slave[1]->dev_addr); if (res < 0) return res; @@ -472,31 +480,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER); if (res) - goto err_add_port; + goto err_add_master; res = register_netdevice(hsr_dev); if (res) - goto fail; + goto err_unregister; res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A); if (res) - goto fail; + goto err_add_slaves; + res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B); if (res) - goto fail; + goto err_add_slaves; + hsr_debugfs_init(hsr, hsr_dev); mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD)); - res = hsr_debugfs_init(hsr, hsr_dev); - if (res) - goto fail; return 0; -fail: +err_add_slaves: + unregister_netdevice(hsr_dev); +err_unregister: list_for_each_entry_safe(port, tmp, &hsr->ports, port_list) hsr_del_port(port); -err_add_port: - hsr_del_self_node(&hsr->self_node_db); +err_add_master: + hsr_del_self_node(hsr); return res; } diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 292be446007b..002f341f3564 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db, /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize * frames from self that's been looped over the HSR ring. */ -int hsr_create_self_node(struct list_head *self_node_db, +int hsr_create_self_node(struct hsr_priv *hsr, unsigned char addr_a[ETH_ALEN], unsigned char addr_b[ETH_ALEN]) { + struct list_head *self_node_db = &hsr->self_node_db; struct hsr_node *node, *oldnode; node = kmalloc(sizeof(*node), GFP_KERNEL); @@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db, ether_addr_copy(node->macaddress_A, addr_a); ether_addr_copy(node->macaddress_B, addr_b); - rcu_read_lock(); + spin_lock_bh(&hsr->list_lock); oldnode = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); if (oldnode) { list_replace_rcu(&oldnode->mac_list, &node->mac_list); - rcu_read_unlock(); - synchronize_rcu(); - kfree(oldnode); + spin_unlock_bh(&hsr->list_lock); + kfree_rcu(oldnode, rcu_head); } else { - rcu_read_unlock(); list_add_tail_rcu(&node->mac_list, self_node_db); + spin_unlock_bh(&hsr->list_lock); } return 0; } -void hsr_del_self_node(struct list_head *self_node_db) +void hsr_del_self_node(struct hsr_priv *hsr) { + struct list_head *self_node_db = &hsr->self_node_db; struct hsr_node *node; - rcu_read_lock(); + spin_lock_bh(&hsr->list_lock); node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); - rcu_read_unlock(); if (node) { list_del_rcu(&node->mac_list); - kfree(node); + kfree_rcu(node, rcu_head); } + spin_unlock_bh(&hsr->list_lock); } void hsr_del_nodes(struct list_head *node_db) @@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db) * seq_out is used to initialize filtering of outgoing duplicate frames * originating from the newly added node. */ -struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], - u16 seq_out) +static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, + struct list_head *node_db, + unsigned char addr[], + u16 seq_out) { - struct hsr_node *node; + struct hsr_node *new_node, *node; unsigned long now; int i; - node = kzalloc(sizeof(*node), GFP_ATOMIC); - if (!node) + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) return NULL; - ether_addr_copy(node->macaddress_A, addr); + ether_addr_copy(new_node->macaddress_A, addr); /* We are only interested in time diffs here, so use current jiffies * as initialization. (0 could trigger an spurious ring error warning). */ now = jiffies; for (i = 0; i < HSR_PT_PORTS; i++) - node->time_in[i] = now; + new_node->time_in[i] = now; for (i = 0; i < HSR_PT_PORTS; i++) - node->seq_out[i] = seq_out; - - list_add_tail_rcu(&node->mac_list, node_db); + new_node->seq_out[i] = seq_out; + spin_lock_bh(&hsr->list_lock); + list_for_each_entry_rcu(node, node_db, mac_list) { + if (ether_addr_equal(node->macaddress_A, addr)) + goto out; + if (ether_addr_equal(node->macaddress_B, addr)) + goto out; + } + list_add_tail_rcu(&new_node->mac_list, node_db); + spin_unlock_bh(&hsr->list_lock); + return new_node; +out: + spin_unlock_bh(&hsr->list_lock); + kfree(new_node); return node; } @@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, bool is_sup) { struct list_head *node_db = &port->hsr->node_db; + struct hsr_priv *hsr = port->hsr; struct hsr_node *node; struct ethhdr *ethhdr; u16 seq_out; @@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, seq_out = HSR_SEQNR_START; } - return hsr_add_node(node_db, ethhdr->h_source, seq_out); + return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out); } /* Use the Supervision frame's info about an eventual macaddress_B for merging @@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, struct hsr_port *port_rcv) { - struct ethhdr *ethhdr; - struct hsr_node *node_real; + struct hsr_priv *hsr = port_rcv->hsr; struct hsr_sup_payload *hsr_sp; + struct hsr_node *node_real; struct list_head *node_db; + struct ethhdr *ethhdr; int i; ethhdr = (struct ethhdr *)skb_mac_header(skb); @@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A); if (!node_real) /* No frame received from AddrA of this node yet */ - node_real = hsr_add_node(node_db, hsr_sp->macaddress_A, + node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A, HSR_SEQNR_START - 1); if (!node_real) goto done; /* No mem */ @@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, } node_real->addr_B_port = port_rcv->type; + spin_lock_bh(&hsr->list_lock); list_del_rcu(&node_curr->mac_list); + spin_unlock_bh(&hsr->list_lock); kfree_rcu(node_curr, rcu_head); done: @@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t) { struct hsr_priv *hsr = from_timer(hsr, t, prune_timer); struct hsr_node *node; + struct hsr_node *tmp; struct hsr_port *port; unsigned long timestamp; unsigned long time_a, time_b; - rcu_read_lock(); - list_for_each_entry_rcu(node, &hsr->node_db, mac_list) { + spin_lock_bh(&hsr->list_lock); + list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) { /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A] * nor time_in[HSR_PT_SLAVE_B], will ever be updated for * the master port. Thus the master node will be repeatedly @@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t) kfree_rcu(node, rcu_head); } } - rcu_read_unlock(); + spin_unlock_bh(&hsr->list_lock); /* Restart timer */ mod_timer(&hsr->prune_timer, @@ -463,12 +482,9 @@ int hsr_get_node_data(struct hsr_priv *hsr, struct hsr_port *port; unsigned long tdiff; - rcu_read_lock(); node = find_node_by_addr_A(&hsr->node_db, addr); - if (!node) { - rcu_read_unlock(); - return -ENOENT; /* No such entry */ - } + if (!node) + return -ENOENT; ether_addr_copy(addr_b, node->macaddress_B); @@ -503,7 +519,5 @@ int hsr_get_node_data(struct hsr_priv *hsr, *addr_b_ifindex = -1; } - rcu_read_unlock(); - return 0; } diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 89a3ce38151d..0f0fa12b4329 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -12,10 +12,8 @@ struct hsr_node; -void hsr_del_self_node(struct list_head *self_node_db); +void hsr_del_self_node(struct hsr_priv *hsr); void hsr_del_nodes(struct list_head *node_db); -struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], - u16 seq_out); struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, bool is_sup); void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, @@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, void hsr_prune_nodes(struct timer_list *t); -int hsr_create_self_node(struct list_head *self_node_db, +int hsr_create_self_node(struct hsr_priv *hsr, unsigned char addr_a[ETH_ALEN], unsigned char addr_b[ETH_ALEN]); diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index b9988a662ee1..9e389accbfc7 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, case NETDEV_CHANGE: /* Link (carrier) state changes */ hsr_check_carrier_and_operstate(hsr); break; + case NETDEV_CHANGENAME: + if (is_hsr_master(dev)) + hsr_debugfs_rename(dev); + break; case NETDEV_CHANGEADDR: if (port->type == HSR_PT_MASTER) { /* This should not happen since there's no @@ -64,7 +68,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, /* Make sure we recognize frames from ourselves in hsr_rcv() */ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); - res = hsr_create_self_node(&hsr->self_node_db, + res = hsr_create_self_node(hsr, master->dev->dev_addr, port ? port->dev->dev_addr : @@ -123,6 +127,7 @@ static void __exit hsr_exit(void) { unregister_netdevice_notifier(&hsr_nb); hsr_netlink_exit(); + hsr_debugfs_remove_root(); } module_init(hsr_init); diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 96fac696a1e1..754d84b217f0 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -160,8 +160,9 @@ struct hsr_priv { int announce_count; u16 sequence_nr; u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */ - u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ - spinlock_t seqnr_lock; /* locking for sequence_nr */ + u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ + spinlock_t seqnr_lock; /* locking for sequence_nr */ + spinlock_t list_lock; /* locking for node list */ unsigned char sup_multicast_addr[ETH_ALEN]; #ifdef CONFIG_DEBUG_FS struct dentry *node_tbl_root; @@ -184,17 +185,24 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb) } #if IS_ENABLED(CONFIG_DEBUG_FS) -int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); +void hsr_debugfs_rename(struct net_device *dev); +void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); void hsr_debugfs_term(struct hsr_priv *priv); +void hsr_debugfs_create_root(void); +void hsr_debugfs_remove_root(void); #else -static inline int hsr_debugfs_init(struct hsr_priv *priv, - struct net_device *hsr_dev) +static inline void hsr_debugfs_rename(struct net_device *dev) { - return 0; } - +static inline void hsr_debugfs_init(struct hsr_priv *priv, + struct net_device *hsr_dev) +{} static inline void hsr_debugfs_term(struct hsr_priv *priv) {} +static inline void hsr_debugfs_create_root(void) +{} +static inline void hsr_debugfs_remove_root(void) +{} #endif #endif /* __HSR_PRIVATE_H */ diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 8f8337f893ba..fae21c863b1f 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -251,15 +251,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) if (!na) goto invalid; - hsr_dev = __dev_get_by_index(genl_info_net(info), - nla_get_u32(info->attrs[HSR_A_IFINDEX])); + rcu_read_lock(); + hsr_dev = dev_get_by_index_rcu(genl_info_net(info), + nla_get_u32(info->attrs[HSR_A_IFINDEX])); if (!hsr_dev) - goto invalid; + goto rcu_unlock; if (!is_hsr_master(hsr_dev)) - goto invalid; + goto rcu_unlock; /* Send reply */ - skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb_out) { res = -ENOMEM; goto fail; @@ -313,12 +314,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); if (res < 0) goto nla_put_failure; - rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); if (port) res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, port->dev->ifindex); - rcu_read_unlock(); if (res < 0) goto nla_put_failure; @@ -328,20 +327,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); if (res < 0) goto nla_put_failure; - rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); if (port) res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, port->dev->ifindex); - rcu_read_unlock(); if (res < 0) goto nla_put_failure; + rcu_read_unlock(); + genlmsg_end(skb_out, msg_head); genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); return 0; +rcu_unlock: + rcu_read_unlock(); invalid: netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; @@ -351,6 +352,7 @@ nla_put_failure: /* Fall through */ fail: + rcu_read_unlock(); return res; } @@ -358,16 +360,14 @@ fail: */ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) { - /* For receiving */ - struct nlattr *na; - struct net_device *hsr_dev; - - /* For sending */ - struct sk_buff *skb_out; - void *msg_head; - struct hsr_priv *hsr; - void *pos; unsigned char addr[ETH_ALEN]; + struct net_device *hsr_dev; + struct sk_buff *skb_out; + struct hsr_priv *hsr; + bool restart = false; + struct nlattr *na; + void *pos = NULL; + void *msg_head; int res; if (!info) @@ -377,15 +377,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) if (!na) goto invalid; - hsr_dev = __dev_get_by_index(genl_info_net(info), - nla_get_u32(info->attrs[HSR_A_IFINDEX])); + rcu_read_lock(); + hsr_dev = dev_get_by_index_rcu(genl_info_net(info), + nla_get_u32(info->attrs[HSR_A_IFINDEX])); if (!hsr_dev) - goto invalid; + goto rcu_unlock; if (!is_hsr_master(hsr_dev)) - goto invalid; + goto rcu_unlock; +restart: /* Send reply */ - skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb_out) { res = -ENOMEM; goto fail; @@ -399,18 +401,26 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) goto nla_put_failure; } - res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); - if (res < 0) - goto nla_put_failure; + if (!restart) { + res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); + if (res < 0) + goto nla_put_failure; + } hsr = netdev_priv(hsr_dev); - rcu_read_lock(); - pos = hsr_get_next_node(hsr, NULL, addr); + if (!pos) + pos = hsr_get_next_node(hsr, NULL, addr); while (pos) { res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); if (res < 0) { - rcu_read_unlock(); + if (res == -EMSGSIZE) { + genlmsg_end(skb_out, msg_head); + genlmsg_unicast(genl_info_net(info), skb_out, + info->snd_portid); + restart = true; + goto restart; + } goto nla_put_failure; } pos = hsr_get_next_node(hsr, pos, addr); @@ -422,15 +432,18 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) return 0; +rcu_unlock: + rcu_read_unlock(); invalid: netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; nla_put_failure: - kfree_skb(skb_out); + nlmsg_free(skb_out); /* Fall through */ fail: + rcu_read_unlock(); return res; } @@ -457,6 +470,7 @@ static struct genl_family hsr_genl_family __ro_after_init = { .version = 1, .maxattr = HSR_A_MAX, .policy = hsr_genl_policy, + .netnsok = true, .module = THIS_MODULE, .ops = hsr_ops, .n_ops = ARRAY_SIZE(hsr_ops), @@ -476,6 +490,7 @@ int __init hsr_netlink_init(void) if (rc) goto fail_genl_register_family; + hsr_debugfs_create_root(); return 0; fail_genl_register_family: diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index ee561297d8a7..a9104d42aafb 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -27,6 +27,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) rcu_read_lock(); /* hsr->node_db, hsr->ports */ port = hsr_port_get_rcu(skb->dev); + if (!port) + goto finish_pass; if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) { /* Directly kill frames sent by ourselves */ @@ -143,16 +145,16 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, if (!port) return -ENOMEM; + port->hsr = hsr; + port->dev = dev; + port->type = type; + if (type != HSR_PT_MASTER) { res = hsr_portdev_setup(dev, port); if (res) goto fail_dev_setup; } - port->hsr = hsr; - port->dev = dev; - port->type = type; - list_add_tail_rcu(&port->port_list, &hsr->ports); synchronize_rcu(); diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c index 2c7a38d76a3a..0672b2f01586 100644 --- a/net/ieee802154/nl_policy.c +++ b/net/ieee802154/nl_policy.c @@ -21,7 +21,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, }, [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, }, [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, }, + [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, }, + [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, }, + [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, }, + [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, }, + [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, }, [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, }, + [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, }, [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, }, [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, }, [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, }, diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 03381f3e12ba..a926de2e42b5 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -303,6 +303,7 @@ config SYN_COOKIES config NET_IPVTI tristate "Virtual (secure) IP: tunneling" + depends on IPV6 || IPV6=n select INET_TUNNEL select NET_IP_TUNNEL select XFRM diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 70f92aaca411..0a25205ee69a 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -118,6 +118,7 @@ #include #include +#include /* The inetsw table contains everything that inet_create needs to * build a new socket. @@ -495,7 +496,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, snum = ntohs(addr->sin_port); err = -EACCES; - if (snum && snum < inet_prot_sock(net) && + if (snum && snum < PROT_SOCK && !prot_sock_flag[snum] && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) goto out; @@ -980,7 +981,7 @@ static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon } #endif -const struct proto_ops inet_stream_ops = { +struct proto_ops inet_stream_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, @@ -1751,6 +1752,9 @@ static __net_init int ipv4_mib_init_net(struct net *net) net->mib.net_statistics = alloc_percpu(struct linux_mib); if (!net->mib.net_statistics) goto err_net_mib; + net->mib.netdrop_statistics = alloc_percpu(struct linux_drop_mib); + if (!net->mib.netdrop_statistics) + goto err_netdrop_mib; net->mib.udp_statistics = alloc_percpu(struct udp_mib); if (!net->mib.udp_statistics) goto err_udp_mib; @@ -1775,6 +1779,8 @@ err_icmp_mib: err_udplite_mib: free_percpu(net->mib.udp_statistics); err_udp_mib: + free_percpu(net->mib.netdrop_statistics); +err_netdrop_mib: free_percpu(net->mib.net_statistics); err_net_mib: free_percpu(net->mib.ip_statistics); @@ -1790,6 +1796,7 @@ static __net_exit void ipv4_mib_exit_net(struct net *net) free_percpu(net->mib.icmp_statistics); free_percpu(net->mib.udplite_statistics); free_percpu(net->mib.udp_statistics); + free_percpu(net->mib.netdrop_statistics); free_percpu(net->mib.net_statistics); free_percpu(net->mib.ip_statistics); free_percpu(net->mib.tcp_statistics); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 376882215919..0bd10a1f477f 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1724,6 +1724,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; + int res; if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; @@ -1735,7 +1736,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) memset(opt, 0, sizeof(struct ip_options)); opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr); - if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL)) + rcu_read_lock(); + res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL); + rcu_read_unlock(); + + if (res) return; if (gateway) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index a4b5bd4d2c89..e4632bd2026d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1496,11 +1496,6 @@ skip: } } -static bool inetdev_valid_mtu(unsigned int mtu) -{ - return mtu >= IPV4_MIN_MTU; -} - static void inetdev_send_gratuitous_arp(struct net_device *dev, struct in_device *in_dev) diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 0e4a7cf6bc87..e2e219c7854a 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head, if (!x) goto out_reset; + skb->mark = xfrm_smark_get(skb->mark, x); + sp->xvec[sp->len++] = x; sp->olen++; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 71c78d223dfd..48bf3b9be475 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1007,7 +1007,9 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) return -ENOENT; } + rcu_read_lock(); err = fib_table_dump(tb, skb, cb, &filter); + rcu_read_unlock(); return skb->len ? : err; } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1ab2fb6bb37d..f12fa8da6127 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2175,6 +2175,12 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, int count = cb->args[2]; t_key key = cb->args[3]; + /* First time here, count and key are both always 0. Count > 0 + * and key == 0 means the dump has wrapped around and we are done. + */ + if (count && !key) + return skb->len; + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { int err; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 30fa771d382a..dcc79ff54b41 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = { [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, }, [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, }, [FOU_ATTR_PEER_V4] = { .type = NLA_U32, }, - [FOU_ATTR_LOCAL_V6] = { .type = sizeof(struct in6_addr), }, - [FOU_ATTR_PEER_V6] = { .type = sizeof(struct in6_addr), }, + [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), }, + [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), }, [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, }, [FOU_ATTR_IFINDEX] = { .type = NLA_S32, }, }; diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 44bfeecac33e..66fdbfe5447c 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -56,7 +56,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) } EXPORT_SYMBOL_GPL(gre_del_protocol); -/* Fills in tpi and returns header length to be pulled. */ +/* Fills in tpi and returns header length to be pulled. + * Note that caller must use pskb_may_pull() before pulling GRE header. + */ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, bool *csum_err, __be16 proto, int nhs) { @@ -110,8 +112,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header */ if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { + u8 _val, *val; + + val = skb_header_pointer(skb, nhs + hdr_len, + sizeof(_val), &_val); + if (!val) + return -EINVAL; tpi->proto = proto; - if ((*(u8 *)options & 0xF0) != 0x40) + if ((*val & 0xF0) != 0x40) hdr_len += 4; } tpi->hdr_len = hdr_len; @@ -127,7 +135,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr))) return -EINVAL; - ershdr = (struct erspan_base_hdr *)options; + ershdr = (struct erspan_base_hdr *)(skb->data + nhs + hdr_len); tpi->key = cpu_to_be32(get_session_id(ershdr)); } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4298aae74e0e..ac95ba78b903 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -249,10 +249,11 @@ bool icmp_global_allow(void) bool rc = false; /* Check if token bucket is empty and cannot be refilled - * without taking the spinlock. + * without taking the spinlock. The READ_ONCE() are paired + * with the following WRITE_ONCE() in this same function. */ - if (!icmp_global.credit) { - delta = min_t(u32, now - icmp_global.stamp, HZ); + if (!READ_ONCE(icmp_global.credit)) { + delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ); if (delta < HZ / 50) return false; } @@ -262,14 +263,14 @@ bool icmp_global_allow(void) if (delta >= HZ / 50) { incr = sysctl_icmp_msgs_per_sec * delta / HZ ; if (incr) - icmp_global.stamp = now; + WRITE_ONCE(icmp_global.stamp, now); } credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); if (credit) { credit--; rc = true; } - icmp_global.credit = credit; + WRITE_ONCE(icmp_global.credit, credit); spin_unlock(&icmp_global.lock); return rc; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index eb30fc1770de..b0010c710802 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -482,8 +482,28 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern) } spin_unlock_bh(&queue->fastopenq.lock); } + out: release_sock(sk); + if (newsk && mem_cgroup_sockets_enabled) { + int amt; + + /* atomically get the memory usage, set and charge the + * newsk->sk_memcg. + */ + lock_sock(newsk); + + /* The socket has not been accepted yet, no need to look at + * newsk->sk_wmem_queued. + */ + amt = sk_mem_pages(newsk->sk_forward_alloc + + atomic_read(&newsk->sk_rmem_alloc)); + mem_cgroup_sk_alloc(newsk); + if (newsk->sk_memcg && amt) + mem_cgroup_charge_skmem(newsk->sk_memcg, amt); + + release_sock(newsk); + } if (req) reqsk_put(req); return newsk; @@ -1086,7 +1106,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) if (!dst) goto out; } - dst->ops->update_pmtu(dst, sk, NULL, mtu); + dst->ops->update_pmtu(dst, sk, NULL, mtu, true); dst = __sk_dst_check(sk, 0); if (!dst) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 7dc79b973e6e..5b68bdaa8bff 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -100,13 +100,9 @@ static size_t inet_sk_attr_size(struct sock *sk, aux = handler->idiag_get_aux_size(sk, net_admin); return nla_total_size(sizeof(struct tcp_info)) - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ - + nla_total_size(1) /* INET_DIAG_TOS */ - + nla_total_size(1) /* INET_DIAG_TCLASS */ - + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4) /* INET_DIAG_CLASS_ID */ - + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + nla_total_size(TCP_CA_NAME_MAX) + nla_total_size(sizeof(struct tcpvegas_info)) @@ -147,6 +143,24 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark)) goto errout; + if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || + ext & (1 << (INET_DIAG_TCLASS - 1))) { + u32 classid = 0; + +#ifdef CONFIG_SOCK_CGROUP_DATA + classid = sock_cgroup_classid(&sk->sk_cgrp_data); +#endif + /* Fallback to socket priority if class id isn't set. + * Classful qdiscs use it as direct reference to class. + * For cgroup2 classid is always zero. + */ + if (!classid) + classid = sk->sk_priority; + + if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) + goto errout; + } + r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk); @@ -284,24 +298,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto errout; } - if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || - ext & (1 << (INET_DIAG_TCLASS - 1))) { - u32 classid = 0; - -#ifdef CONFIG_SOCK_CGROUP_DATA - classid = sock_cgroup_classid(&sk->sk_cgrp_data); -#endif - /* Fallback to socket priority if class id isn't set. - * Classful qdiscs use it as direct reference to class. - * For cgroup2 classid is always zero. - */ - if (!classid) - classid = sk->sk_priority; - - if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) - goto errout; - } - out: nlmsg_end(skb, nlh); return 0; @@ -914,11 +910,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, for (i = s_i; i < INET_LHTABLE_SIZE; i++) { struct inet_listen_hashbucket *ilb; + struct hlist_nulls_node *node; num = 0; ilb = &hashinfo->listening_hash[i]; spin_lock(&ilb->lock); - sk_for_each(sk, &ilb->head) { + sk_nulls_for_each(sk, node, &ilb->nulls_head) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 83fb00153018..d9803a36b86f 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -516,10 +516,11 @@ static int inet_reuseport_add_sock(struct sock *sk, struct inet_listen_hashbucket *ilb) { struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; + const struct hlist_nulls_node *node; struct sock *sk2; kuid_t uid = sock_i_uid(sk); - sk_for_each_rcu(sk2, &ilb->head) { + sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { if (sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && @@ -555,9 +556,9 @@ int __inet_hash(struct sock *sk, struct sock *osk) } if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) - hlist_add_tail_rcu(&sk->sk_node, &ilb->head); + __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head); else - hlist_add_head_rcu(&sk->sk_node, &ilb->head); + __sk_nulls_add_node_rcu(sk, &ilb->nulls_head); inet_hash2(hashinfo, sk); ilb->count++; sock_set_flag(sk, SOCK_RCU_FREE); @@ -606,11 +607,9 @@ void inet_unhash(struct sock *sk) reuseport_detach_sock(sk); if (ilb) { inet_unhash2(hashinfo, sk); - __sk_del_node_init(sk); - ilb->count--; - } else { - __sk_nulls_del_node_init_rcu(sk); + ilb->count--; } + __sk_nulls_del_node_init_rcu(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); unlock: spin_unlock_bh(lock); @@ -666,13 +665,17 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, other_parity_scan: port = low + offset; for (i = 0; i < remaining; i += 2, port += 2) { + int ret; + if (unlikely(port >= high)) port -= remaining; if (inet_is_local_reserved_port(net, port)) continue; head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; - spin_lock_bh(&head->lock); + ret = spin_trylock_bh(&head->lock); + if (!ret) + continue; /* Does not bother with rcv_saddr checks, because * the established check is already unique enough. @@ -750,7 +753,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h) for (i = 0; i < INET_LHTABLE_SIZE; i++) { spin_lock_init(&h->listening_hash[i].lock); - INIT_HLIST_HEAD(&h->listening_hash[i].head); + INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head, + i + LISTENING_NULLS_BASE); h->listening_hash[i].count = 0; } diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index be778599bfed..ff327a62c9ce 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -160,7 +160,12 @@ static void inet_peer_gc(struct inet_peer_base *base, base->total / inet_peer_threshold * HZ; for (i = 0; i < gc_cnt; i++) { p = gc_stack[i]; - delta = (__u32)jiffies - p->dtime; + + /* The READ_ONCE() pairs with the WRITE_ONCE() + * in inet_putpeer() + */ + delta = (__u32)jiffies - READ_ONCE(p->dtime); + if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) gc_stack[i] = NULL; } @@ -237,7 +242,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer); void inet_putpeer(struct inet_peer *p) { - p->dtime = (__u32)jiffies; + /* The WRITE_ONCE() pairs with itself (we run lockless) + * and the READ_ONCE() in inet_peer_gc() + */ + WRITE_ONCE(p->dtime, (__u32)jiffies); if (refcount_dec_and_test(&p->refcnt)) call_rcu(&p->rcu, inetpeer_free_rcu); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 10636fb6093e..85ba1453ba5c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1149,6 +1149,24 @@ static int ipgre_netlink_parms(struct net_device *dev, if (data[IFLA_GRE_FWMARK]) *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); + return 0; +} + +static int erspan_netlink_parms(struct net_device *dev, + struct nlattr *data[], + struct nlattr *tb[], + struct ip_tunnel_parm *parms, + __u32 *fwmark) +{ + struct ip_tunnel *t = netdev_priv(dev); + int err; + + err = ipgre_netlink_parms(dev, data, tb, parms, fwmark); + if (err) + return err; + if (!data) + return 0; + if (data[IFLA_GRE_ERSPAN_VER]) { t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); @@ -1272,45 +1290,70 @@ static void ipgre_tap_setup(struct net_device *dev) ip_tunnel_setup(dev, gre_tap_net_id); } -static int ipgre_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +static int +ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[]) { - struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; - __u32 fwmark = 0; - int err; if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); - err = ip_tunnel_encap_setup(t, &ipencap); + int err = ip_tunnel_encap_setup(t, &ipencap); if (err < 0) return err; } + return 0; +} + +static int ipgre_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct ip_tunnel_parm p; + __u32 fwmark = 0; + int err; + + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; + err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); if (err < 0) return err; return ip_tunnel_newlink(dev, tb, &p, fwmark); } +static int erspan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct ip_tunnel_parm p; + __u32 fwmark = 0; + int err; + + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; + + err = erspan_netlink_parms(dev, data, tb, &p, &fwmark); + if (err) + return err; + return ip_tunnel_newlink(dev, tb, &p, fwmark); +} + static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); - struct ip_tunnel_encap ipencap; __u32 fwmark = t->fwmark; struct ip_tunnel_parm p; int err; - if (ipgre_netlink_encap_parms(data, &ipencap)) { - err = ip_tunnel_encap_setup(t, &ipencap); - - if (err < 0) - return err; - } + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); if (err < 0) @@ -1323,8 +1366,34 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], t->parms.i_flags = p.i_flags; t->parms.o_flags = p.o_flags; - if (strcmp(dev->rtnl_link_ops->kind, "erspan")) - ipgre_link_update(dev, !tb[IFLA_MTU]); + ipgre_link_update(dev, !tb[IFLA_MTU]); + + return 0; +} + +static int erspan_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct ip_tunnel *t = netdev_priv(dev); + __u32 fwmark = t->fwmark; + struct ip_tunnel_parm p; + int err; + + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; + + err = erspan_netlink_parms(dev, data, tb, &p, &fwmark); + if (err < 0) + return err; + + err = ip_tunnel_changelink(dev, tb, &p, fwmark); + if (err < 0) + return err; + + t->parms.i_flags = p.i_flags; + t->parms.o_flags = p.o_flags; return 0; } @@ -1515,8 +1584,8 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = { .priv_size = sizeof(struct ip_tunnel), .setup = erspan_setup, .validate = erspan_validate, - .newlink = ipgre_newlink, - .changelink = ipgre_changelink, + .newlink = erspan_newlink, + .changelink = erspan_changelink, .dellink = ip_tunnel_dellink, .get_size = ipgre_get_size, .fill_info = ipgre_fill_info, diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index c59a78a267c3..e665ee937719 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -429,6 +429,12 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) goto drop; __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); + if (skb->dev) { + __SNMP_INC_STATS(skb->dev->mib.dev_statistics, + DEV_MIB_IPV4_RX_PKTS); + __SNMP_ADD_STATS(skb->dev->mib.dev_statistics, + DEV_MIB_IPV4_RX_BYTES, skb->len); + } skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) { @@ -611,5 +617,6 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt, list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ - ip_sublist_rcv(&sublist, curr_dev, curr_net); + if (!list_empty(&sublist)) + ip_sublist_rcv(&sublist, curr_dev, curr_net); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3d8baaaf7086..77189800e539 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -191,6 +191,10 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s struct neighbour *neigh; bool is_v6gw = false; + SNMP_INC_STATS(dev->mib.dev_statistics, DEV_MIB_IPV4_TX_PKTS); + SNMP_ADD_STATS(dev->mib.dev_statistics, DEV_MIB_IPV4_TX_BYTES, + skb->len); + if (rt->rt_type == RTN_MULTICAST) { IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len); } else if (rt->rt_type == RTN_BROADCAST) @@ -1258,15 +1262,18 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->addr = ipc->addr; } - /* - * We steal reference to this route, caller should not release it - */ - *rtp = NULL; cork->fragsize = ip_sk_use_pmtu(sk) ? - dst_mtu(&rt->dst) : rt->dst.dev->mtu; + dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu); + + if (!inetdev_valid_mtu(cork->fragsize)) + return -ENETUNREACH; cork->gso_size = ipc->gso_size; + cork->dst = &rt->dst; + /* We stole this route, caller should not release it. */ + *rtp = NULL; + cork->length = 0; cork->ttl = ipc->ttl; cork->tos = ipc->tos; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 38c02bb62e2c..74e1d964a615 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -505,7 +505,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; if (skb_valid_dst(skb)) - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IP)) { if (!skb_is_gso(skb) && @@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev) iph->version = 4; iph->ihl = 5; - if (tunnel->collect_md) { - dev->features |= NETIF_F_NETNS_LOCAL; + if (tunnel->collect_md) netif_keep_dst(dev); - } return 0; } EXPORT_SYMBOL_GPL(ip_tunnel_init); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index cfb025606793..8ecaf0f26973 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -187,8 +187,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, int mtu; if (!dst) { - dev->stats.tx_carrier_errors++; - goto tx_error_icmp; + switch (skb->protocol) { + case htons(ETH_P_IP): { + struct rtable *rt; + + fl->u.ip4.flowi4_oif = dev->ifindex; + fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; + rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); + if (IS_ERR(rt)) { + dev->stats.tx_carrier_errors++; + goto tx_error_icmp; + } + dst = &rt->dst; + skb_dst_set(skb, dst); + break; + } +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + fl->u.ip6.flowi6_oif = dev->ifindex; + fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; + dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); + if (dst->error) { + dst_release(dst); + dst = NULL; + dev->stats.tx_carrier_errors++; + goto tx_error_icmp; + } + skb_dst_set(skb, dst); + break; +#endif + default: + dev->stats.tx_carrier_errors++; + goto tx_error_icmp; + } } dst_hold(dst); @@ -214,7 +245,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, mtu = dst_mtu(dst); if (skb->len > mtu) { - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IP)) { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 214154b47d56..f1f78a742b36 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -384,10 +384,11 @@ next: ; return 1; } -static inline int check_target(struct arpt_entry *e, const char *name) +static int check_target(struct arpt_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = arpt_get_target(e); struct xt_tgchk_param par = { + .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, @@ -399,8 +400,9 @@ static inline int check_target(struct arpt_entry *e, const char *name) return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); } -static inline int -find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, +static int +find_check_entry(struct arpt_entry *e, struct net *net, const char *name, + unsigned int size, struct xt_percpu_counter_alloc_state *alloc_state) { struct xt_entry_target *t; @@ -419,7 +421,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, } t->u.kernel.target = target; - ret = check_target(e, name); + ret = check_target(e, net, name); if (ret) goto err; return 0; @@ -494,12 +496,13 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, return 0; } -static inline void cleanup_entry(struct arpt_entry *e) +static void cleanup_entry(struct arpt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; t = arpt_get_target(e); + par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_ARP; @@ -512,7 +515,9 @@ static inline void cleanup_entry(struct arpt_entry *e) /* Checks and translates the user-supplied table segment (held in * newinfo). */ -static int translate_table(struct xt_table_info *newinfo, void *entry0, +static int translate_table(struct net *net, + struct xt_table_info *newinfo, + void *entry0, const struct arpt_replace *repl) { struct xt_percpu_counter_alloc_state alloc_state = { 0 }; @@ -569,7 +574,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { - ret = find_check_entry(iter, repl->name, repl->size, + ret = find_check_entry(iter, net, repl->name, repl->size, &alloc_state); if (ret != 0) break; @@ -580,7 +585,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; - cleanup_entry(iter); + cleanup_entry(iter, net); } return ret; } @@ -923,7 +928,7 @@ static int __do_replace(struct net *net, const char *name, /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) - cleanup_entry(iter); + cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, @@ -974,7 +979,7 @@ static int do_replace(struct net *net, const void __user *user, goto free_newinfo; } - ret = translate_table(newinfo, loc_cpu_entry, &tmp); + ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; @@ -986,7 +991,7 @@ static int do_replace(struct net *net, const void __user *user, free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) - cleanup_entry(iter); + cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -1149,7 +1154,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, } } -static int translate_compat_table(struct xt_table_info **pinfo, +static int translate_compat_table(struct net *net, + struct xt_table_info **pinfo, void **pentry0, const struct compat_arpt_replace *compatr) { @@ -1217,7 +1223,7 @@ static int translate_compat_table(struct xt_table_info **pinfo, repl.num_counters = 0; repl.counters = NULL; repl.size = newinfo->size; - ret = translate_table(newinfo, entry1, &repl); + ret = translate_table(net, newinfo, entry1, &repl); if (ret) goto free_newinfo; @@ -1270,7 +1276,7 @@ static int compat_do_replace(struct net *net, void __user *user, goto free_newinfo; } - ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp); + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; @@ -1282,7 +1288,7 @@ static int compat_do_replace(struct net *net, void __user *user, free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) - cleanup_entry(iter); + cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -1509,7 +1515,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len return ret; } -static void __arpt_unregister_table(struct xt_table *table) +static void __arpt_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; @@ -1521,7 +1527,7 @@ static void __arpt_unregister_table(struct xt_table *table) /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) - cleanup_entry(iter); + cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); @@ -1546,7 +1552,7 @@ int arpt_register_table(struct net *net, loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); - ret = translate_table(newinfo, loc_cpu_entry, repl); + ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; @@ -1561,7 +1567,7 @@ int arpt_register_table(struct net *net, ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { - __arpt_unregister_table(new_table); + __arpt_unregister_table(net, new_table); *res = NULL; } @@ -1576,7 +1582,7 @@ void arpt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); - __arpt_unregister_table(table); + __arpt_unregister_table(net, table); } /* The built-in targets: standard (NULL) and error. */ diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index fc34fd1668d6..3737d32ad11a 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -322,7 +322,9 @@ static size_t nh_nlmsg_size_single(struct nexthop *nh) static size_t nh_nlmsg_size(struct nexthop *nh) { - size_t sz = nla_total_size(4); /* NHA_ID */ + size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg)); + + sz += nla_total_size(4); /* NHA_ID */ if (nh->is_group) sz += nh_nlmsg_size_grp(nh); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index cc90243ccf76..dd9439630d02 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -486,6 +486,46 @@ static int netstat_seq_show(struct seq_file *seq, void *v) return 0; } +static const struct snmp_mib snmp4_dropstat_list[] = { + SNMP_MIB_ITEM("TCPDataQueueNoDataDrop", LINUX_MIB_TCPDQNODATADROP), + SNMP_MIB_ITEM("TCPDataQueueNoMemDrop", LINUX_MIB_TCPDQNOMEMDROP), + SNMP_MIB_ITEM("TCPCheckSeqDrop", LINUX_MIB_TCPCHECKSEQDROP), + SNMP_MIB_ITEM("TCPCheckRstDrop", LINUX_MIB_TCPCHECKRSTDROP), + SNMP_MIB_ITEM("TCPNoAckDrop", LINUX_MIB_TCPNOACKDROP), + SNMP_MIB_ITEM("TCPInvalidAckDrop", LINUX_MIB_TCPINVALIDACKDROP), + SNMP_MIB_ITEM("TCPRstDrop", LINUX_MIB_TCPRSTDROP), + SNMP_MIB_ITEM("TCPNoSynDrop", LINUX_MIB_TCPNOSYNDROP), + SNMP_MIB_ITEM("TCPPawsDrop", LINUX_MIB_TCPPAWSDROP), + SNMP_MIB_ITEM("TCPBadPktDrop", LINUX_MIB_TCPBADPKTDROP), + SNMP_MIB_ITEM("TCPNoSocketDrop", LINUX_MIB_TCPNOSOCKETDROP), + SNMP_MIB_ITEM("TCPXfrmDrop", LINUX_MIB_TCPXFRMDROP), + SNMP_MIB_ITEM("TCPFilterDrop", LINUX_MIB_TCPFILTERDROP), + SNMP_MIB_ITEM("TCPMd5Drop", LINUX_MIB_TCPMD5DROP), + SNMP_MIB_ITEM("TCPTimewaitDrop", LINUX_MIB_TCPTWDROP), + SNMP_MIB_ITEM("TCPRcvEstDrop", LINUX_MIB_TCPRCVESTDROP), + SNMP_MIB_ITEM("TCPNewSkDrop", LINUX_MIB_TCPNSKDROP), + SNMP_MIB_ITEM("TCPChildProcDrop", LINUX_MIB_TCPCHILDPROCDROP), + SNMP_MIB_ITEM("TCPRcvStateProcDrop", LINUX_MIB_TCPRCVSTATEPROCDROP), + SNMP_MIB_ITEM("TCPOfoDupDrop", LINUX_MIB_TCPOFODUPDROP), + SNMP_MIB_ITEM("TCPUrgDrop", LINUX_MIB_TCPURGDROP), + SNMP_MIB_ITEM("TCPOutOfWindowDrop", LINUX_MIB_TCPOOWDROP), + SNMP_MIB_SENTINEL +}; + +/* Output /proc/net/dropstat */ +static int dropstat_seq_show(struct seq_file *seq, void *v) +{ + struct net *net = seq->private; + int i; + + for (i = 0; snmp4_dropstat_list[i].name; i++) + seq_printf(seq, "%s : %lu\n", snmp4_dropstat_list[i].name, + snmp_fold_field(net->mib.netdrop_statistics, + snmp4_dropstat_list[i].entry)); + + return 0; +} + static __net_init int ip_proc_init_net(struct net *net) { if (!proc_create_net_single("sockstat", 0444, net->proc_net, @@ -497,9 +537,15 @@ static __net_init int ip_proc_init_net(struct net *net) if (!proc_create_net_single("snmp", 0444, net->proc_net, snmp_seq_show, NULL)) goto out_snmp; + if (!proc_create_net_single("dropstat", 0444, net->proc_net, + dropstat_seq_show, NULL)) + goto out_dropstat; + return 0; +out_dropstat: + remove_proc_entry("snmp", net->proc_net); out_snmp: remove_proc_entry("netstat", net->proc_net); out_netstat: @@ -513,6 +559,7 @@ static __net_exit void ip_proc_exit_net(struct net *net) remove_proc_entry("snmp", net->proc_net); remove_proc_entry("netstat", net->proc_net); remove_proc_entry("sockstat", net->proc_net); + remove_proc_entry("dropstat", net->proc_net); } static __net_initdata struct pernet_operations ip_proc_ops = { diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c index e35736b99300..a93e7d1e1251 100644 --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@ -100,8 +100,9 @@ static int raw_diag_dump_one(struct sk_buff *in_skb, if (IS_ERR(sk)) return PTR_ERR(sk); - rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + 64, + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) { sock_put(sk); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 621f83434b24..fe34e9e0912a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -139,7 +139,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst); static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu); + struct sk_buff *skb, u32 mtu, + bool confirm_neigh); static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static void ipv4_dst_destroy(struct dst_entry *dst); @@ -1043,7 +1044,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) } static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct rtable *rt = (struct rtable *) dst; struct flowi4 fl4; @@ -2648,7 +2650,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) } static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 0902cb32bbad..eee828590abc 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -478,13 +478,6 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, #endif static struct ctl_table ipv4_table[] = { - { - .procname = "tcp_max_orphans", - .data = &sysctl_tcp_max_orphans, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "inet_peer_threshold", .data = &inet_peer_threshold, @@ -584,6 +577,14 @@ static struct ctl_table ipv4_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, + { + .procname = "tcp_init_cwnd", + .data = &sysctl_tcp_init_cwnd, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = SYSCTL_ONE, + }, { .procname = "udp_mem", .data = &sysctl_udp_mem, @@ -612,6 +613,49 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_do_static_key, }, + { + .procname = "tcp_tw_ignore_syn_tsval_zero", + .data = &sysctl_tcp_tw_ignore_syn_tsval_zero, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "tcp_wan_timestamps", + .data = &init_net.ipv4.sysctl_tcp_wan_timestamps, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "tcp_rto_min", + .data = &sysctl_tcp_rto_min, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &four + }, + { + .procname = "tcp_rto_max", + .data = &sysctl_tcp_rto_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "udp_proc_sched", + .data = &sysctl_udp_proc_sched, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "tcp_proc_sched", + .data = &sysctl_tcp_proc_sched, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { } }; @@ -1079,6 +1123,13 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_max_orphans", + .data = &init_net.ipv4.sysctl_tcp_max_orphans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, { .procname = "tcp_early_retrans", .data = &init_net.ipv4.sysctl_tcp_early_retrans, @@ -1232,6 +1283,13 @@ static struct ctl_table ipv4_net_table[] = { .extra1 = SYSCTL_ONE, .extra2 = &gso_max_segs, }, + { + .procname = "tcp_loss_init_cwnd", + .data = &sysctl_tcp_loss_init_cwnd, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "tcp_min_rtt_wlen", .data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen, @@ -1323,6 +1381,29 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE }, + { + .procname = "tcp_inherit_buffsize", + .data = &sysctl_tcp_inherit_buffsize, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "tcp_init_rto", + .data = &sysctl_tcp_init_rto, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &four + }, + { + .procname = "tcp_synack_rto_interval", + .data = &sysctl_tcp_synack_rto_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &four + }, { } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d8876f0e9672..8e9e6cddc393 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -425,7 +425,7 @@ void tcp_init_sock(struct sock *sk) * algorithms that we must have the following bandaid to talk * efficiently to them. -DaveM */ - tp->snd_cwnd = TCP_INIT_CWND; + tp->snd_cwnd = sysctl_tcp_init_cwnd; /* There's a bubble in the pipe until at least the first ACK. */ tp->app_limited = ~0U; @@ -1087,8 +1087,7 @@ do_error: goto out; out_err: /* make sure we wake any epoll edge trigger waiter */ - if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && - err == -EAGAIN)) { + if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { sk->sk_write_space(sk); tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); } @@ -1419,8 +1418,7 @@ out_err: sock_zerocopy_put_abort(uarg, true); err = sk_stream_error(sk, flags, err); /* make sure we wake any epoll edge trigger waiter */ - if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && - err == -EAGAIN)) { + if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { sk->sk_write_space(sk); tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); } @@ -1958,8 +1956,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, struct sk_buff *skb, *last; u32 urg_hole = 0; struct scm_timestamping_internal tss; - bool has_tss = false; - bool has_cmsg; + int cmsg_flags; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); @@ -1974,7 +1971,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, if (sk->sk_state == TCP_LISTEN) goto out; - has_cmsg = tp->recvmsg_inq; + cmsg_flags = tp->recvmsg_inq ? 1 : 0; timeo = sock_rcvtimeo(sk, nonblock); /* Urgent data needs to be handled specially. */ @@ -2157,8 +2154,7 @@ skip_copy: if (TCP_SKB_CB(skb)->has_rxtstamp) { tcp_update_recv_tstamps(skb, &tss); - has_tss = true; - has_cmsg = true; + cmsg_flags |= 2; } if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; @@ -2183,10 +2179,10 @@ found_fin_ok: release_sock(sk); - if (has_cmsg) { - if (has_tss) + if (cmsg_flags) { + if (cmsg_flags & 2) tcp_recv_timestamp(msg, sk, &tss); - if (tp->recvmsg_inq) { + if (cmsg_flags & 1) { inq = tcp_inq_hint(sk); put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); } @@ -2524,6 +2520,7 @@ static void tcp_rtx_queue_purge(struct sock *sk) { struct rb_node *p = rb_first(&sk->tcp_rtx_queue); + tcp_sk(sk)->highest_sack = NULL; while (p) { struct sk_buff *skb = rb_to_skb(p); @@ -2621,10 +2618,12 @@ int tcp_disconnect(struct sock *sk, int flags) tp->snd_cwnd = TCP_INIT_CWND; tp->snd_cwnd_cnt = 0; tp->window_clamp = 0; + tp->delivered = 0; tp->delivered_ce = 0; tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; tcp_clear_retrans(tp); + tp->total_retrans = 0; inet_csk_delack_init(sk); /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 * issue in __tcp_select_window() @@ -2636,10 +2635,14 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); tp->compressed_ack = 0; + tp->segs_in = 0; + tp->segs_out = 0; tp->bytes_sent = 0; tp->bytes_acked = 0; tp->bytes_received = 0; tp->bytes_retrans = 0; + tp->data_segs_in = 0; + tp->data_segs_out = 0; tp->duplicate_sack[0].start_seq = 0; tp->duplicate_sack[0].end_seq = 0; tp->dsack_dups = 0; @@ -2940,8 +2943,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, err = -EPERM; else if (tp->repair_queue == TCP_SEND_QUEUE) WRITE_ONCE(tp->write_seq, val); - else if (tp->repair_queue == TCP_RECV_QUEUE) + else if (tp->repair_queue == TCP_RECV_QUEUE) { WRITE_ONCE(tp->rcv_nxt, val); + WRITE_ONCE(tp->copied_seq, val); + } else err = -EINVAL; break; @@ -3139,6 +3144,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level, tcp_enable_tx_delay(); tp->tcp_tx_delay = val; break; + case TCP_FULLNAT_REAL: + + if (optlen < sizeof(struct tcp_fullnat_real_opt)) + err = -EINVAL; + else { + struct tcp_fullnat_real_opt opt; + if (copy_from_user(&opt, optval, sizeof(struct tcp_fullnat_real_opt))) + err = -EFAULT; + else { + tp->fullnat_real_ip = opt.real_ip; + tp->fullnat_real_port = opt.real_port; + } + } + break; default: err = -ENOPROTOOPT; break; @@ -3967,12 +3986,14 @@ void __init tcp_init(void) alloc_large_system_hash("TCP established", sizeof(struct inet_ehash_bucket), thash_entries, - 17, /* one slot per 128 KB of memory */ + ((unsigned long)totalram_pages >= 128 * 1024) ? + 13 : 15, 0, NULL, &tcp_hashinfo.ehash_mask, 0, - thash_entries ? 0 : 512 * 1024); + /* mjqiu: 512 * 1024 -> 1024 * 1024, to double tcp_max_tw_buckets */ + thash_entries ? 0 : 1024 * 1024); for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); @@ -3996,7 +4017,7 @@ void __init tcp_init(void) cnt = tcp_hashinfo.ehash_mask + 1; - sysctl_tcp_max_orphans = cnt / 2; + init_net.ipv4.sysctl_tcp_max_orphans = cnt / 2; tcp_init_mem(); /* Set per-socket limits to no more than 1/128 the pressure threshold */ diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 32772d6ded4e..6c4d79baff26 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk) /* Sort of tcp_tso_autosize() but ignoring * driver provided sk_gso_max_size. */ - bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift, + bytes = min_t(unsigned long, + sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), GSO_MAX_SIZE - 1 - MAX_TCP_HEADER); segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); @@ -778,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) * bandwidth sample. Delivered is in packets and interval_us in uS and * ratio will be <<1 for most connections. So delivered is first scaled. */ - bw = (u64)rs->delivered * BW_UNIT; - do_div(bw, rs->interval_us); + bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us); /* If this sample is application-limited, it is likely to have a very * low delivered count that represents application behavior rather than diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 8a56e09cfb0e..8a01428f80c1 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -121,14 +121,14 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, struct sk_psock *psock; int copied, ret; - if (unlikely(flags & MSG_ERRQUEUE)) - return inet_recv_error(sk, msg, len, addr_len); - if (!skb_queue_empty(&sk->sk_receive_queue)) - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); - psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + if (unlikely(flags & MSG_ERRQUEUE)) + return inet_recv_error(sk, msg, len, addr_len); + if (!skb_queue_empty(&sk->sk_receive_queue) && + sk_psock_queue_empty(psock)) + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); lock_sock(sk); msg_bytes_ready: copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags); @@ -139,7 +139,7 @@ msg_bytes_ready: timeo = sock_rcvtimeo(sk, nonblock); data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err); if (data) { - if (skb_queue_empty(&sk->sk_receive_queue)) + if (!sk_psock_queue_empty(psock)) goto msg_bytes_ready; release_sock(sk); sk_psock_put(sk, psock); @@ -301,7 +301,7 @@ EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, int *copied, int flags) { - bool cork = false, enospc = msg->sg.start == msg->sg.end; + bool cork = false, enospc = sk_msg_full(msg); struct sock *sk_redir; u32 tosend, delta = 0; int ret; @@ -315,10 +315,7 @@ more_data: */ delta = msg->sg.size; psock->eval = sk_psock_msg_verdict(sk, psock, msg); - if (msg->sg.size < delta) - delta -= msg->sg.size; - else - delta = 0; + delta -= msg->sg.size; } if (msg->cork_bytes && diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a2e52ad7cdab..25a66d436222 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -80,7 +80,10 @@ #include #include -int sysctl_tcp_max_orphans __read_mostly = NR_FILE; +int sysctl_tcp_tw_ignore_syn_tsval_zero __read_mostly = 1; +int sysctl_tcp_loss_init_cwnd = 1; +int sysctl_tcp_no_delay_ack = 0; +int sysctl_tcp_init_cwnd = TCP_INIT_CWND; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ @@ -865,7 +868,7 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); if (!cwnd) - cwnd = TCP_INIT_CWND; + cwnd = sysctl_tcp_init_cwnd; return min_t(__u32, cwnd, tp->snd_cwnd_clamp); } @@ -915,9 +918,10 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { - if (!tp->retransmit_skb_hint || - before(TCP_SKB_CB(skb)->seq, - TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) + if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || + (tp->retransmit_skb_hint && + before(TCP_SKB_CB(skb)->seq, + TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) tp->retransmit_skb_hint = skb; } @@ -1727,8 +1731,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, } /* Ignore very old stuff early */ - if (!after(sp[used_sacks].end_seq, prior_snd_una)) + if (!after(sp[used_sacks].end_seq, prior_snd_una)) { + if (i == 0) + first_sack_index = -1; continue; + } used_sacks++; } @@ -2001,7 +2008,7 @@ void tcp_enter_loss(struct sock *sk) tcp_ca_event(sk, CA_EVENT_LOSS); tcp_init_undo(tp); } - tp->snd_cwnd = tcp_packets_in_flight(tp) + 1; + tp->snd_cwnd = max(sysctl_tcp_loss_init_cwnd, tcp_packets_in_flight(tp) + 1); tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_jiffies32; @@ -3160,6 +3167,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, tp->retransmit_skb_hint = NULL; if (unlikely(skb == tp->lost_skb_hint)) tp->lost_skb_hint = NULL; + tcp_highest_sack_replace(sk, skb, next); tcp_rtx_queue_unlink_and_free(skb, sk); } @@ -3838,6 +3846,7 @@ void tcp_parse_options(const struct net *net, { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); + struct iphdr *iph = ip_hdr(skb); int length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); @@ -3890,8 +3899,9 @@ void tcp_parse_options(const struct net *net, break; case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && - ((estab && opt_rx->tstamp_ok) || - (!estab && net->ipv4.sysctl_tcp_timestamps))) { + ((estab && opt_rx->tstamp_ok) || + (!estab && net->ipv4.sysctl_tcp_timestamps && + (net->ipv4.sysctl_tcp_wan_timestamps || is_private_ip(iph->saddr))))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); @@ -4495,6 +4505,8 @@ static void tcp_ofo_queue(struct sock *sk) if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { tcp_drop(sk, skb); + NET_INC_DROPSTATS(sock_net(sk), + LINUX_MIB_TCPOFODUPDROP); continue; } @@ -4759,6 +4771,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) int eaten; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPDQNODATADROP); __kfree_skb(skb); return; } @@ -4823,6 +4836,7 @@ queue_and_out: tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPOOWDROP); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); inet_csk_schedule_ack(sk); drop: @@ -5237,6 +5251,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) __tcp_select_window(sk) >= tp->rcv_wnd)) || /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || + /* We need to send ack no delay or ... */ + sysctl_tcp_no_delay_ack || /* Protocol state mandates a one-time immediate ACK */ inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) { send_now: @@ -5354,6 +5370,8 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); + NET_INC_DROPSTATS(sock_net(sk), + LINUX_MIB_TCPURGDROP); } } @@ -5449,6 +5467,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, } else if (tcp_reset_check(sk, skb)) { tcp_reset(sk); } + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPCHECKSEQDROP); goto discard; } @@ -5494,6 +5513,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, tcp_fastopen_active_disable(sk); tcp_send_challenge_ack(sk, skb); } + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPCHECKRSTDROP); goto discard; } @@ -5684,8 +5704,10 @@ slow_path: if (len < (th->doff << 2) || tcp_checksum_complete(skb)) goto csum_error; - if (!th->ack && !th->rst && !th->syn) + if (!th->ack && !th->rst && !th->syn) { + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPNOACKDROP); goto discard; + } /* * Standard slow path. @@ -5695,8 +5717,10 @@ slow_path: return; step5: - if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) + if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) { + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPINVALIDACKDROP); goto discard; + } tcp_rcv_rtt_measure_ts(sk, skb); @@ -5915,8 +5939,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * See note below! * --ANK(990513) */ - if (!th->syn) + if (!th->syn) { + NET_INC_DROPSTATS(sock_net(sk), + LINUX_MIB_TCPNOSYNDROP); goto discard_and_undo; + } /* rfc793: * "If the SYN bit is on ... @@ -5997,6 +6024,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, discard: tcp_drop(sk, skb); + NET_INC_DROPSTATS(sock_net(sk), + LINUX_MIB_TCPRCVSTATEPROCDROP); return 0; } else { tcp_send_ack(sk); @@ -6012,14 +6041,16 @@ discard: * * Otherwise (no ACK) drop the segment and return." */ - + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPRSTDROP); goto discard_and_undo; } /* PAWS check. */ if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && - tcp_paws_reject(&tp->rx_opt, 0)) + tcp_paws_reject(&tp->rx_opt, 0)) { + NET_INC_DROPSTATS(sock_net(sk), LINUX_MIB_TCPPAWSDROP); goto discard_and_undo; + } if (th->syn) { /* We see SYN without ACK. It is attempt of @@ -6091,7 +6122,11 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk) { struct request_sock *req; - tcp_try_undo_loss(sk, false); + /* If we are still handling the SYNACK RTO, see if timestamp ECR allows + * undo. If peer SACKs triggered fast recovery, we can't undo here. + */ + if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) + tcp_try_undo_loss(sk, false); /* Reset rtx states to prevent spurious retransmits_timed_out() */ tcp_sk(sk)->retrans_stamp = 0; @@ -6592,6 +6627,16 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); + if (sysctl_tcp_tw_ignore_syn_tsval_zero && tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) { + /* Some OSes (unknown ones, but I see them on web server, which + * contains information interesting only for windows' + * users) do not send their stamp in SYN. It is easy case. + * We simply do not advertise TS support. + */ + tmp_opt.saw_tstamp = 0; + tmp_opt.tstamp_ok = 0; + } + if (IS_ENABLED(CONFIG_SMC) && want_cookie) tmp_opt.smc_ok = 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 67b2dc7a1727..86861041323f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -82,6 +82,8 @@ #include +int sysctl_tcp_proc_sched __read_mostly = 1; + #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); @@ -1576,6 +1578,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (nsk != sk) { if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; + NET_INC_DROPSTATS(sock_net(sk), + LINUX_MIB_TCPCHILDPROCDROP); goto reset; } return 0; @@ -1585,6 +1589,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (tcp_rcv_state_process(sk, skb)) { rsk = sk; + NET_INC_DROPSTATS(sock_net(sk), + LINUX_MIB_TCPRCVSTATEPROCDROP); goto reset; } return 0; @@ -1822,15 +1828,19 @@ int tcp_v4_rcv(struct sk_buff *skb) /* Count it even if it's bad */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); - if (!pskb_may_pull(skb, sizeof(struct tcphdr))) + if (!pskb_may_pull(skb, sizeof(struct tcphdr))) { + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPBADPKTDROP); goto discard_it; + } th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) goto bad_packet; - if (!pskb_may_pull(skb, th->doff * 4)) + if (!pskb_may_pull(skb, th->doff * 4)) { + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPBADPKTDROP); goto discard_it; + } /* An explanation is required here, I think. * Packet length and doff are validated by header prediction, @@ -1861,6 +1871,7 @@ process: if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { sk_drops_add(sk, skb); reqsk_put(req); + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPMD5DROP); goto discard_it; } if (tcp_checksum_complete(skb)) { @@ -1913,16 +1924,22 @@ process: goto discard_and_relse; } - if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPXFRMDROP); goto discard_and_relse; + } - if (tcp_v4_inbound_md5_hash(sk, skb)) + if (tcp_v4_inbound_md5_hash(sk, skb)) { + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPMD5DROP); goto discard_and_relse; + } nf_reset_ct(skb); - if (tcp_filter(sk, skb)) + if (tcp_filter(sk, skb)) { + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPFILTERDROP); goto discard_and_relse; + } th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); tcp_v4_fill_cb(skb, iph, th); @@ -1959,6 +1976,7 @@ put_and_return: return ret; no_tcp_socket: + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPNOSOCKETDROP); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; @@ -1987,6 +2005,7 @@ discard_and_relse: do_time_wait: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPXFRMDROP); goto discard_it; } @@ -2024,6 +2043,7 @@ do_time_wait: goto discard_it; case TCP_TW_SUCCESS:; } + __NET_INC_DROPSTATS(net, LINUX_MIB_TCPTWDROP); goto discard_it; } @@ -2044,7 +2064,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) } EXPORT_SYMBOL(inet_sk_rx_dst_set); -const struct inet_connection_sock_af_ops ipv4_specific = { +struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, @@ -2149,13 +2169,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur) struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; + struct hlist_nulls_node *node; struct sock *sk = cur; if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock(&ilb->lock); - sk = sk_head(&ilb->head); + sk = sk_nulls_head(&ilb->nulls_head); st->offset = 0; goto get_sk; } @@ -2163,15 +2184,17 @@ get_head: ++st->num; ++st->offset; - sk = sk_next(sk); + sk = sk_nulls_next(sk); get_sk: - sk_for_each_from(sk) { + sk_nulls_for_each_from(sk, node) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == afinfo->family) return sk; } spin_unlock(&ilb->lock); + if (sysctl_tcp_proc_sched) + cond_resched(); st->offset = 0; if (++st->bucket < INET_LHTABLE_SIZE) goto get_head; @@ -2230,6 +2253,8 @@ static void *established_get_first(struct seq_file *seq) goto out; } spin_unlock_bh(lock); + if (sysctl_tcp_proc_sched) + cond_resched(); } out: return rc; @@ -2711,6 +2736,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_pacing_ss_ratio = 200; net->ipv4.sysctl_tcp_pacing_ca_ratio = 120; if (net != &init_net) { + net->ipv4.sysctl_tcp_max_orphans = NR_FILE; memcpy(net->ipv4.sysctl_tcp_rmem, init_net.ipv4.sysctl_tcp_rmem, sizeof(init_net.ipv4.sysctl_tcp_rmem)); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index c802bc80c400..24ac727df7bd 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -30,6 +30,8 @@ #include #include +int sysctl_tcp_inherit_buffsize __read_mostly = 1; + static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) { if (seq == s_win) @@ -502,6 +504,11 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, tcp_init_xmit_timers(newsk); WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); + if (!sysctl_tcp_inherit_buffsize && !(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { + newsk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; + newsk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; + } + if (sock_flag(newsk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(newsk, keepalive_time_when(newtp)); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0488607c5cd3..a9c34a4ef216 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -72,6 +72,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) __skb_unlink(skb, &sk->sk_write_queue); tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); + if (tp->highest_sack == NULL) + tp->highest_sack = skb; + tp->packets_out += tcp_skb_pcount(skb); if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); @@ -438,6 +441,20 @@ struct tcp_out_options { struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ }; +#define TCPOPT_ADDR 200 +#define TCPOLEN_ADDR 8 /* |opcode|size|ip+port| = 1 + 1 + 6 */ + +/* + * insert client ip in tcp option, now only support IPV4, + * must be 4 bytes alignment. + */ +struct ip_vs_tcpo_addr { + __u8 opcode; + __u8 opsize; + __u16 port; + __u32 addr; +}; + /* Write previously computed TCP options to the packet. * * Beware: Something in the Internet is very sensitive to the ordering of @@ -452,7 +469,8 @@ struct tcp_out_options { * (but it may well be that other scenarios fail similarly). */ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, - struct tcp_out_options *opts) + struct tcp_out_options *opts, + bool fullnat_opt) { u16 options = opts->options; /* mungable copy */ @@ -546,6 +564,11 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, } smc_options_write(ptr, &options); + + if (unlikely(fullnat_opt)) { + *ptr++ = htonl(TCPOPT_ADDR << 24 | TCPOLEN_ADDR << 16 | tp->fullnat_real_port); + *ptr++ = htonl(tp->fullnat_real_ip); + } } static void smc_set_option(const struct tcp_sock *tp, @@ -586,7 +609,8 @@ static void smc_set_option_cond(const struct tcp_sock *tp, */ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, - struct tcp_md5sig_key **md5) + struct tcp_md5sig_key **md5, + bool fullnat_opt) { struct tcp_sock *tp = tcp_sk(sk); unsigned int remaining = MAX_TCP_OPTION_SPACE; @@ -650,6 +674,10 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, smc_set_option(tp, opts, &remaining); + if (fullnat_opt) { + remaining -= 8; + } + return MAX_TCP_OPTION_SPACE - remaining; } @@ -721,7 +749,8 @@ static unsigned int tcp_synack_options(const struct sock *sk, */ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, - struct tcp_md5sig_key **md5) + struct tcp_md5sig_key **md5, + bool fullnat_opt) { struct tcp_sock *tp = tcp_sk(sk); unsigned int size = 0; @@ -755,10 +784,14 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb min_t(unsigned int, eff_sacks, (remaining - TCPOLEN_SACK_BASE_ALIGNED) / TCPOLEN_SACK_PERBLOCK); - size += TCPOLEN_SACK_BASE_ALIGNED + - opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; + if (likely(opts->num_sack_blocks)) + size += TCPOLEN_SACK_BASE_ALIGNED + + opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; } + if (unlikely(fullnat_opt)) + size += 8; + return size; } @@ -1024,6 +1057,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, struct tcphdr *th; u64 prior_wstamp; int err; + bool fullnat_opt = false; BUG_ON(!skb || !tcp_skb_pcount(skb)); tp = tcp_sk(sk); @@ -1044,6 +1078,10 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, if (unlikely(!skb)) return -ENOBUFS; + /* retransmit skbs might have a non zero value in skb->dev + * because skb->dev is aliased with skb->rbnode.rb_left + */ + skb->dev = NULL; } inet = inet_sk(sk); @@ -1051,10 +1089,26 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, memset(&opts, 0, sizeof(opts)); if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { - tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); - } else { + if (unlikely(!(tcb->tcp_flags & TCPHDR_ACK) && tp->fullnat_real_ip && + tp->fullnat_real_port)) { + tp->fullnat_first_data_seq = tcb->seq + 1; + tp->fullnat_real_ip_inserted = false; + fullnat_opt = true; + } + tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5, fullnat_opt); + } + else { + if (unlikely(tp->fullnat_real_ip && tp->fullnat_real_port && + !(tcb->tcp_flags & TCPHDR_RST) && + !(tcb->tcp_flags & TCPHDR_FIN))) { + if (after(tcb->seq, tp->fullnat_first_data_seq)) + tp->fullnat_real_ip_inserted = true; + if (!tp->fullnat_real_ip_inserted) + fullnat_opt = true; + } tcp_options_size = tcp_established_options(sk, skb, &opts, - &md5); + &md5, + fullnat_opt); /* Force a PSH flag on all (GSO) packets to expedite GRO flush * at receiver : This slightly improve GRO performance. * Note that we do not force the PSH flag for non GSO packets, @@ -1118,7 +1172,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, } } - tcp_options_write((__be32 *)(th + 1), tp, &opts); + tcp_options_write((__be32 *)(th + 1), tp, &opts, fullnat_opt); skb_shinfo(skb)->gso_type = sk->sk_gso_type; if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { th->window = htons(tcp_select_window(sk)); @@ -1599,7 +1653,7 @@ unsigned int tcp_current_mss(struct sock *sk) mss_now = tcp_sync_mss(sk, mtu); } - header_len = tcp_established_options(sk, NULL, &opts, &md5) + + header_len = tcp_established_options(sk, NULL, &opts, &md5, false) + sizeof(struct tcphdr); /* The mss_cache is sized based on tp->tcp_header_len, which assumes * some common options. If this is an odd packet (because we have SACK @@ -1724,7 +1778,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, u32 bytes, segs; bytes = min_t(unsigned long, - sk->sk_pacing_rate >> sk->sk_pacing_shift, + sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); /* Goal is to send at least one packet per ms, @@ -2259,7 +2313,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, limit = max_t(unsigned long, 2 * skb->truesize, - sk->sk_pacing_rate >> sk->sk_pacing_shift); + sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); if (sk->sk_pacing_status == SK_PACING_NONE) limit = min_t(unsigned long, limit, sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); @@ -2437,6 +2491,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (tcp_small_queue_check(sk, skb, 0)) break; + /* Argh, we hit an empty skb(), presumably a thread + * is sleeping in sendmsg()/sk_stream_wait_memory(). + * We do not want to send a pure-ack packet and have + * a strange looking rtx queue with empty packet(s). + */ + if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) + break; + if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; @@ -2964,8 +3026,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) tcp_skb_tsorted_save(skb) { nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); - err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : - -ENOBUFS; + if (nskb) { + nskb->dev = NULL; + err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC); + } else { + err = -ENOBUFS; + } } tcp_skb_tsorted_restore(skb); if (!err) { @@ -3219,6 +3285,7 @@ int tcp_send_synack(struct sock *sk) if (!nskb) return -ENOMEM; INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); + tcp_highest_sack_replace(sk, skb, nskb); tcp_rtx_queue_unlink_and_free(skb, sk); __skb_header_release(nskb); tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); @@ -3325,7 +3392,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ th->window = htons(min(req->rsk_rcv_wnd, 65535U)); - tcp_options_write((__be32 *)(th + 1), NULL, &opts); + tcp_options_write((__be32 *)(th + 1), NULL, &opts, false); th->doff = (tcp_header_size >> 2); __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index dd5a6317a801..3c787dea6603 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -23,6 +23,22 @@ #include #include +/* default value is 1s */ +int sysctl_tcp_init_rto __read_mostly = 1000; +EXPORT_SYMBOL(sysctl_tcp_init_rto); + +/* default value is 200ms */ +int sysctl_tcp_synack_rto_interval __read_mostly = 200; +EXPORT_SYMBOL(sysctl_tcp_synack_rto_interval); + +/* default value is 200ms */ +int sysctl_tcp_rto_min __read_mostly = 200; +EXPORT_SYMBOL(sysctl_tcp_rto_min); + +/* default value is 120s */ +int sysctl_tcp_rto_max __read_mostly = 120; +EXPORT_SYMBOL(sysctl_tcp_rto_max); + static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 4849edb62d52..9168645b760e 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -96,17 +96,19 @@ void tcp_get_available_ulp(char *buf, size_t maxlen) rcu_read_unlock(); } -void tcp_update_ulp(struct sock *sk, struct proto *proto) +void tcp_update_ulp(struct sock *sk, struct proto *proto, + void (*write_space)(struct sock *sk)) { struct inet_connection_sock *icsk = inet_csk(sk); if (!icsk->icsk_ulp_ops) { + sk->sk_write_space = write_space; sk->sk_prot = proto; return; } if (icsk->icsk_ulp_ops->update) - icsk->icsk_ulp_ops->update(sk, proto); + icsk->icsk_ulp_ops->update(sk, proto, write_space); } void tcp_cleanup_ulp(struct sock *sk) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 447defbfccdd..6ad20c6b463d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -122,6 +122,8 @@ EXPORT_SYMBOL(sysctl_udp_mem); atomic_long_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); +int sysctl_udp_proc_sched __read_mostly = 1; + #define MAX_UDP_PORTS 65536 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) @@ -1368,7 +1370,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, if (likely(partial)) { up->forward_deficit += size; size = up->forward_deficit; - if (size < (sk->sk_rcvbuf >> 2)) + if (size < (sk->sk_rcvbuf >> 2) && + !skb_queue_empty(&up->reader_queue)) return; } else { size += up->forward_deficit; @@ -1475,7 +1478,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) * queue contains some other skb */ rmem = atomic_add_return(size, &sk->sk_rmem_alloc); - if (rmem > (size + sk->sk_rcvbuf)) + if (rmem > (size + (unsigned int)sk->sk_rcvbuf)) goto uncharge_drop; spin_lock(&list->lock); @@ -1855,8 +1858,12 @@ int __udp_disconnect(struct sock *sk, int flags) inet->inet_dport = 0; sock_rps_reset_rxhash(sk); sk->sk_bound_dev_if = 0; - if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) { inet_reset_saddr(sk); + if (sk->sk_prot->rehash && + (sk->sk_userlocks & SOCK_BINDPORT_LOCK)) + sk->sk_prot->rehash(sk); + } if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { sk->sk_prot->unhash(sk); @@ -2826,6 +2833,8 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) goto found; } spin_unlock_bh(&hslot->lock); + if (sysctl_udp_proc_sched) + cond_resched(); } sk = NULL; found: diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 910555a4d9fe..dccd2286bc28 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -64,8 +64,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, goto out; err = -ENOMEM; - rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + 64, + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) goto out; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 35b84b52b702..9ebd54752e03 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -100,12 +100,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, } static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct dst_entry *path = xdst->route; - path->ops->update_pmtu(path, sk, skb, mtu); + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh); } static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 34ccef18b40e..6e1d200f30c1 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1226,11 +1226,13 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) } static void -cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt) +cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, + bool del_rt, bool del_peer) { struct fib6_info *f6i; - f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, + f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (f6i) { if (del_rt) @@ -1293,7 +1295,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, expires, - action == CLEANUP_PREFIX_RT_DEL); + action == CLEANUP_PREFIX_RT_DEL, false); } /* clean up prefsrc entries */ @@ -3294,6 +3296,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) if (netif_is_l3_master(idev->dev)) return; + /* no link local addresses on devices flagged as slaves */ + if (idev->dev->flags & IFF_SLAVE) + return; + ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); switch (idev->cnf.addr_gen_mode) { @@ -3345,6 +3351,10 @@ static void addrconf_dev_config(struct net_device *dev) (dev->type != ARPHRD_NONE) && (dev->type != ARPHRD_RAWIP)) { /* Alas, we support only Ethernet autoconfiguration. */ + idev = __in6_dev_get(dev); + if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP && + dev->flags & IFF_MULTICAST) + ipv6_mc_up(idev); return; } @@ -4586,12 +4596,14 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, } static int modify_prefix_route(struct inet6_ifaddr *ifp, - unsigned long expires, u32 flags) + unsigned long expires, u32 flags, + bool modify_peer) { struct fib6_info *f6i; u32 prio; - f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, + f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (!f6i) return -ENOENT; @@ -4602,7 +4614,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, ip6_del_rt(dev_net(ifp->idev->dev), f6i); /* add new one */ - addrconf_prefix_route(&ifp->addr, ifp->prefix_len, + addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } else { @@ -4624,6 +4637,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg) unsigned long timeout; bool was_managetempaddr; bool had_prefixroute; + bool new_peer = false; ASSERT_RTNL(); @@ -4655,6 +4669,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg) cfg->preferred_lft = timeout; } + if (cfg->peer_pfx && + memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) { + if (!ipv6_addr_any(&ifp->peer_addr)) + cleanup_prefix_route(ifp, expires, true, true); + new_peer = true; + } + spin_lock_bh(&ifp->lock); was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR; had_prefixroute = ifp->flags & IFA_F_PERMANENT && @@ -4670,6 +4691,9 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg) if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority) ifp->rt_priority = cfg->rt_priority; + if (new_peer) + ifp->peer_addr = *cfg->peer_pfx; + spin_unlock_bh(&ifp->lock); if (!(ifp->flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ifp); @@ -4678,7 +4702,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg) int rc = -ENOENT; if (had_prefixroute) - rc = modify_prefix_route(ifp, expires, flags); + rc = modify_prefix_route(ifp, expires, flags, false); /* prefix route could have been deleted; if so restore it */ if (rc == -ENOENT) { @@ -4686,6 +4710,15 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg) ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } + + if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr)) + rc = modify_prefix_route(ifp, expires, flags, true); + + if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) { + addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len, + ifp->rt_priority, ifp->idev->dev, + expires, flags, GFP_KERNEL); + } } else if (had_prefixroute) { enum cleanup_prefix_rt_t action; unsigned long rt_expires; @@ -4696,7 +4729,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg) if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, rt_expires, - action == CLEANUP_PREFIX_RT_DEL); + action == CLEANUP_PREFIX_RT_DEL, false); } } @@ -5231,16 +5264,16 @@ static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb, return -EINVAL; } + if (!netlink_strict_get_check(skb)) + return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, + ifa_ipv6_policy, extack); + ifm = nlmsg_data(nlh); if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request"); return -EINVAL; } - if (!netlink_strict_get_check(skb)) - return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, - ifa_ipv6_policy, extack); - err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, extack); if (err) @@ -5719,6 +5752,9 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) struct nlattr *tb[IFLA_INET6_MAX + 1]; int err; + if (!idev) + return -EAFNOSUPPORT; + if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) BUG(); @@ -5981,9 +6017,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); if (!ipv6_addr_any(&ifp->peer_addr)) - addrconf_prefix_route(&ifp->peer_addr, 128, 0, - ifp->idev->dev, 0, 0, - GFP_ATOMIC); + addrconf_prefix_route(&ifp->peer_addr, 128, + ifp->rt_priority, ifp->idev->dev, + 0, 0, GFP_ATOMIC); break; case RTM_DELADDR: if (ifp->idev->cnf.forwarding) diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 2fc079284ca4..ea00ce3d4117 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -129,11 +129,12 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) } EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); -static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1, - struct dst_entry **u2, - struct flowi6 *u3) +static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst) { - return -EAFNOSUPPORT; + return ERR_PTR(-EAFNOSUPPORT); } static int eafnosupport_ipv6_route_input(struct sk_buff *skb) @@ -190,7 +191,7 @@ static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt) } const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { - .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup, + .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, .ipv6_route_input = eafnosupport_ipv6_route_input, .fib6_get_table = eafnosupport_fib6_get_table, .fib6_table_lookup = eafnosupport_fib6_table_lookup, diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index ef37e0574f54..7a976aad30b9 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -62,6 +62,7 @@ #include #include +#include #include "ip6_offload.h" @@ -292,8 +293,8 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, return -EINVAL; snum = ntohs(addr->sin6_port); - if (snum && snum < inet_prot_sock(net) && - !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) + if (snum && snum < PROT_SOCK && !prot_sock_flag[snum] && + !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; if (with_lock) @@ -597,7 +598,7 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, return err; } -const struct proto_ops inet6_stream_ops = { +struct proto_ops inet6_stream_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, @@ -630,6 +631,7 @@ const struct proto_ops inet6_stream_ops = { #endif .set_rcvlowat = tcp_set_rcvlowat, }; +EXPORT_SYMBOL(inet6_stream_ops); const struct proto_ops inet6_dgram_ops = { .family = PF_INET6, @@ -765,7 +767,7 @@ int inet6_sk_rebuild_header(struct sock *sk) &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { sk->sk_route_caps = 0; sk->sk_err_soft = -PTR_ERR(dst); @@ -889,6 +891,7 @@ static int __net_init inet6_net_init(struct net *net) net->ipv6.sysctl.idgen_retries = 3; net->ipv6.sysctl.idgen_delay = 1 * HZ; net->ipv6.sysctl.flowlabel_state_ranges = 0; + net->ipv6.sysctl.ip6_tunnel_neigh_bypass = 0; net->ipv6.sysctl.max_dst_opts_cnt = IP6_DEFAULT_MAX_DST_OPTS_CNT; net->ipv6.sysctl.max_hbh_opts_cnt = IP6_DEFAULT_MAX_HBH_OPTS_CNT; net->ipv6.sysctl.max_dst_opts_len = IP6_DEFAULT_MAX_DST_OPTS_LEN; @@ -946,7 +949,7 @@ static int ipv6_route_input(struct sk_buff *skb) static const struct ipv6_stub ipv6_stub_impl = { .ipv6_sock_mc_join = ipv6_sock_mc_join, .ipv6_sock_mc_drop = ipv6_sock_mc_drop, - .ipv6_dst_lookup = ip6_dst_lookup, + .ipv6_dst_lookup_flow = ip6_dst_lookup_flow, .ipv6_route_input = ipv6_route_input, .fib6_get_table = fib6_get_table, .fib6_table_lookup = fib6_table_lookup, diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 96f939248d2f..390bedde21a5 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -85,7 +85,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) final_p = fl6_update_dst(&fl6, opt, &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index e31626ffccd1..fd535053245b 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head, if (!x) goto out_reset; + skb->mark = xfrm_smark_get(skb->mark, x); + sp->xvec[sp->len++] = x; sp->olen++; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 0a0945a5b30d..e315526fa244 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -48,7 +48,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, fl6->flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(fl6)); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(dst)) return NULL; @@ -103,7 +103,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, dst = __inet6_csk_dst_check(sk, np->dst_cookie); if (!dst) { - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!IS_ERR(dst)) ip6_dst_store(sk, dst, NULL, NULL); @@ -146,7 +146,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) if (IS_ERR(dst)) return NULL; - dst->ops->update_pmtu(dst, sk, NULL, mtu); + dst->ops->update_pmtu(dst, sk, NULL, mtu, true); dst = inet6_csk_route_socket(sk, &fl6); return IS_ERR(dst) ? NULL : dst; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 6e2af411cd9c..c75274e0745c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1050,8 +1050,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, found++; break; } - if (rt_can_ecmp) - fallback_ins = fallback_ins ?: ins; + fallback_ins = fallback_ins ?: ins; goto next_iter; } @@ -1094,7 +1093,9 @@ next_iter: } if (fallback_ins && !found) { - /* No ECMP-able route found, replace first non-ECMP one */ + /* No matching route with same ecmp-able-ness found, replace + * first matching route + */ ins = fallback_ins; iter = rcu_dereference_protected(*ins, lockdep_is_held(&rt->fib6_table->tb6_lock)); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 923034c52ce4..9ec05a1df5e1 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1040,7 +1040,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, /* TooBig packet may have updated dst->dev's mtu */ if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) - dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu); + dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, NEXTHDR_GRE); @@ -1466,7 +1466,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) dev->mtu -= 8; if (tunnel->parms.collect_md) { - dev->features |= NETIF_F_NETNS_LOCAL; netif_keep_dst(dev); } ip6gre_tnl_init_features(dev); @@ -1894,7 +1893,6 @@ static void ip6gre_tap_setup(struct net_device *dev) dev->needs_free_netdev = true; dev->priv_destructor = ip6gre_dev_free; - dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netif_keep_dst(dev); @@ -2197,7 +2195,6 @@ static void ip6erspan_tap_setup(struct net_device *dev) dev->needs_free_netdev = true; dev->priv_destructor = ip6gre_dev_free; - dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netif_keep_dst(dev); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 3d71c7d6102c..abf66ee02754 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -132,6 +132,12 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, return NULL; } + if (dev) { + __SNMP_INC_STATS(dev->mib.dev_statistics, DEV_MIB_IPV6_RX_PKTS); + __SNMP_ADD_STATS(dev->mib.dev_statistics, DEV_MIB_IPV6_RX_BYTES, + skb->len); + } + rcu_read_lock(); idev = __in6_dev_get(skb->dev); @@ -325,7 +331,8 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ - ip6_sublist_rcv(&sublist, curr_dev, curr_net); + if (!list_empty(&sublist)) + ip6_sublist_rcv(&sublist, curr_dev, curr_net); } INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *)); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 71827b56c006..f93d3bcbdfbe 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -63,6 +63,10 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * struct neighbour *neigh; int ret; + SNMP_INC_STATS(dev->mib.dev_statistics, DEV_MIB_IPV6_TX_PKTS); + SNMP_ADD_STATS(dev->mib.dev_statistics, DEV_MIB_IPV6_TX_BYTES, + skb->len); + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); @@ -109,8 +113,21 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * rcu_read_lock_bh(); nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); - if (unlikely(!neigh)) - neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); + + if (unlikely(!neigh)) { + /* If dev is tunnel, ignore neigh_create. + * For now, use init_net's ip6_tunnel_neigh_bypass. + */ + if (init_net.ipv6.sysctl.ip6_tunnel_neigh_bypass && + (dst->dev->flags & IFF_POINTOPOINT)) { + ret = dev_queue_xmit(skb); + rcu_read_unlock_bh(); + return ret; + } + neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, + false); + } + if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); ret = neigh_output(neigh, skb, false); @@ -1144,19 +1161,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); * It returns a valid dst pointer on success, or a pointer encoded * error code. */ -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { struct dst_entry *dst = NULL; int err; - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); + err = ip6_dst_lookup_tail(net, sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); + return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); @@ -1188,7 +1205,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, if (dst) return dst; - dst = ip6_dst_lookup_flow(sk, fl6, final_dst); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst); if (connected && !IS_ERR(dst)) ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 754a484d35df..b5dd20c4599b 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -640,7 +640,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (rel_info > dst_mtu(skb_dst(skb2))) goto out; - skb_dst_update_pmtu(skb2, rel_info); + skb_dst_update_pmtu_no_confirm(skb2, rel_info); } icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); @@ -1132,7 +1132,7 @@ route_lookup: mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ? IPV6_MIN_MTU : IPV4_MIN_MTU); - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { *pmtu = mtu; err = -EMSGSIZE; @@ -1877,10 +1877,8 @@ static int ip6_tnl_dev_init(struct net_device *dev) if (err) return err; ip6_tnl_link_config(t); - if (t->parms.collect_md) { - dev->features |= NETIF_F_NETNS_LOCAL; + if (t->parms.collect_md) netif_keep_dst(dev); - } return 0; } diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 024db17386d2..cc6180e08a4f 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -311,7 +311,7 @@ static int vti6_rcv(struct sk_buff *skb) if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { rcu_read_unlock(); - return 0; + goto discard; } ipv6h = ipv6_hdr(skb); @@ -449,8 +449,35 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) int err = -1; int mtu; - if (!dst) - goto tx_err_link_failure; + if (!dst) { + switch (skb->protocol) { + case htons(ETH_P_IP): { + struct rtable *rt; + + fl->u.ip4.flowi4_oif = dev->ifindex; + fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; + rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); + if (IS_ERR(rt)) + goto tx_err_link_failure; + dst = &rt->dst; + skb_dst_set(skb, dst); + break; + } + case htons(ETH_P_IPV6): + fl->u.ip6.flowi6_oif = dev->ifindex; + fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; + dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); + if (dst->error) { + dst_release(dst); + dst = NULL; + goto tx_err_link_failure; + } + skb_dst_set(skb, dst); + break; + default: + goto tx_err_link_failure; + } + } dst_hold(dst); dst = xfrm_lookup(t->net, dst, fl, NULL, 0); @@ -479,7 +506,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) mtu = dst_mtu(dst); if (skb->len > mtu) { - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { if (mtu < IPV6_MIN_MTU) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 79fc012dd2ca..debdaeba5d8c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -183,9 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = -EBUSY; break; } - } else if (sk->sk_protocol != IPPROTO_TCP) + } else if (sk->sk_protocol == IPPROTO_TCP) { + if (sk->sk_prot != &tcpv6_prot) { + retv = -EBUSY; + break; + } break; - + } else { + break; + } if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index a77f6b7d3a7c..dfe5e603ffe1 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -925,7 +925,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3f83ea851ebf..894c7370c1bd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -95,7 +95,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb); static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); static void ip6_link_failure(struct sk_buff *skb); static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu); + struct sk_buff *skb, u32 mtu, + bool confirm_neigh); static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, @@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) } static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { } @@ -2695,7 +2697,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) } static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, - const struct ipv6hdr *iph, u32 mtu) + const struct ipv6hdr *iph, u32 mtu, + bool confirm_neigh) { const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; @@ -2713,7 +2716,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, daddr = NULL; saddr = NULL; } - dst_confirm_neigh(dst, daddr); + + if (confirm_neigh) + dst_confirm_neigh(dst, daddr); + mtu = max_t(u32, mtu, IPV6_MIN_MTU); if (mtu >= dst_mtu(dst)) return; @@ -2767,9 +2773,11 @@ out_unlock: } static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { - __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); + __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu, + confirm_neigh); } void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, @@ -2788,7 +2796,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, dst = ip6_route_output(net, NULL, &fl6); if (!dst->error) - __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu)); + __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true); dst_release(dst); } EXPORT_SYMBOL_GPL(ip6_update_pmtu); @@ -5147,6 +5155,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, */ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | NLM_F_REPLACE); + cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; nhn++; } diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index e70567446f28..802eebf8ac4b 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifdef CONFIG_IPV6_SEG6_HMAC #include #endif @@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto) skb_reset_network_header(skb); skb_reset_transport_header(skb); - skb->encapsulation = 0; + if (iptunnel_pull_offloads(skb)) + return false; return true; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index b2ccbc473127..98954830c40b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -944,7 +944,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, } if (tunnel->parms.iph.daddr) - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->len > mtu && !skb_is_gso(skb)) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 16632e02e9b0..30915f6f31e3 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -235,7 +235,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) fl6.flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) goto out_free; } diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index ec8fcfc60a27..0f02f6dc517c 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -160,6 +160,13 @@ static struct ctl_table ipv6_table_template[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "ip6_tunnel_neigh_bypass", + .data = &init_net.ipv6.sysctl.ip6_tunnel_neigh_bypass, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, { } }; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4804b6dc5e65..0f754a2b0258 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -75,7 +75,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; -static const struct inet_connection_sock_af_ops ipv6_specific; +struct inet_connection_sock_af_ops ipv6_specific; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; @@ -275,7 +275,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; @@ -906,7 +906,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 * Underlying function will use this to retrieve the network * namespace */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass, @@ -1111,7 +1111,7 @@ static void tcp_v6_restore_cb(struct sk_buff *skb) sizeof(struct inet6_skb_parm)); } -static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, +struct sock * tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, @@ -1315,6 +1315,7 @@ out: tcp_listendrop(sk); return NULL; } +EXPORT_SYMBOL(tcp_v6_syn_recv_sock); /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. @@ -1739,7 +1740,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_destructor = tcp_twsk_destructor, }; -static const struct inet_connection_sock_af_ops ipv6_specific = { +struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, @@ -1758,6 +1759,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { #endif .mtu_reduced = tcp_v6_mtu_reduced, }; +EXPORT_SYMBOL(ipv6_specific); #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 699e0730ce8e..af7a4b8b1e9c 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -98,12 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, } static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct dst_entry *path = xdst->route; - path->ops->update_pmtu(path, sk, skb, mtu); + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh); } static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk, diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f82ea12bac37..425b95eb7e87 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -322,8 +322,13 @@ int l2tp_session_register(struct l2tp_session *session, spin_lock_bh(&pn->l2tp_session_hlist_lock); + /* IP encap expects session IDs to be globally unique, while + * UDP encap doesn't. + */ hlist_for_each_entry(session_walk, g_head, global_hlist) - if (session_walk->session_id == session->session_id) { + if (session_walk->session_id == session->session_id && + (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP || + tunnel->encap == L2TP_ENCAPTYPE_IP)) { err = -EEXIST; goto err_tlock_pnlock; } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 802f19aba7e3..d148766f40d1 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -615,7 +615,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 204a8351efff..c29170e767a8 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb) return LLC_PDU_IS_CMD(pdu) && /* command PDU */ LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID && - !pdu->dsap ? 0 : 1; /* NULL DSAP value */ + !pdu->dsap; /* NULL DSAP value */ } static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) @@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) return LLC_PDU_IS_CMD(pdu) && /* command PDU */ LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST && - !pdu->dsap ? 0 : 1; /* NULL DSAP */ + !pdu->dsap; /* NULL DSAP */ } static int llc_station_ac_send_xid_r(struct sk_buff *skb) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 70739e746c13..0daaf7e37a21 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2954,6 +2954,28 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy, return err; } +static void ieee80211_end_cac(struct wiphy *wiphy, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + + mutex_lock(&local->mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + /* it might be waiting for the local->mtx, but then + * by the time it gets it, sdata->wdev.cac_started + * will no longer be true + */ + cancel_delayed_work(&sdata->dfs_cac_timer_work); + + if (sdata->wdev.cac_started) { + ieee80211_vif_release_channel(sdata); + sdata->wdev.cac_started = false; + } + } + mutex_unlock(&local->mtx); +} + static struct cfg80211_beacon_data * cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) { @@ -4023,6 +4045,7 @@ const struct cfg80211_ops mac80211_config_ops = { #endif .get_channel = ieee80211_cfg_get_channel, .start_radar_detection = ieee80211_start_radar_detection, + .end_cac = ieee80211_end_cac, .channel_switch = ieee80211_channel_switch, .set_qos_map = ieee80211_set_qos_map, .set_ap_chanwidth = ieee80211_set_ap_chanwidth, diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index c8ad20c28c43..70ea4cc126d1 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -5,7 +5,7 @@ * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation */ #include @@ -78,6 +78,7 @@ static const char * const sta_flag_names[] = { FLAG(MPSP_OWNER), FLAG(MPSP_RECIPIENT), FLAG(PS_DELIVER), + FLAG(USES_ENCRYPTION), #undef FLAG }; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 0f889b919b06..efc1acc6543c 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -6,7 +6,7 @@ * Copyright 2007-2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright 2018-2019 Intel Corporation + * Copyright 2018-2020 Intel Corporation */ #include @@ -262,22 +262,29 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sta ? sta->sta.addr : bcast_addr, ret); } -int ieee80211_set_tx_key(struct ieee80211_key *key) +static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force) { struct sta_info *sta = key->sta; struct ieee80211_local *local = key->local; assert_key_lock(local); + set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION); + sta->ptk_idx = key->conf.keyidx; - if (!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) + if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_check_fast_xmit(sta); return 0; } +int ieee80211_set_tx_key(struct ieee80211_key *key) +{ + return _ieee80211_set_tx_key(key, false); +} + static void ieee80211_pairwise_rekey(struct ieee80211_key *old, struct ieee80211_key *new) { @@ -441,11 +448,8 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, if (pairwise) { rcu_assign_pointer(sta->ptk[idx], new); if (new && - !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) { - sta->ptk_idx = idx; - clear_sta_flag(sta, WLAN_STA_BLOCK_BA); - ieee80211_check_fast_xmit(sta); - } + !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) + _ieee80211_set_tx_key(new, true); } else { rcu_assign_pointer(sta->gtk[idx], new); } diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 68af62306385..38a0383dfbcf 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -328,6 +328,9 @@ u32 airtime_link_metric_get(struct ieee80211_local *local, unsigned long fail_avg = ewma_mesh_fail_avg_read(&sta->mesh->fail_avg); + if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) + return MAX_METRIC; + /* Try to get rate based on HW/SW RC algorithm. * Rate is returned in units of Kbps, correct this * to comply with airtime calculation units @@ -1149,7 +1152,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, } } - if (!(mpath->flags & MESH_PATH_RESOLVING)) + if (!(mpath->flags & MESH_PATH_RESOLVING) && + mesh_path_sel_is_hwmp(sdata)) mesh_queue_preq(mpath, PREQ_Q_F_START); if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 54dd8849d1cc..c7d8044ff0fa 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -8,7 +8,7 @@ * Copyright 2007, Michael Wu * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation */ #include @@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, if (!res) { ch_switch.timestamp = timestamp; ch_switch.device_timestamp = device_timestamp; - ch_switch.block_tx = beacon ? csa_ie.mode : 0; + ch_switch.block_tx = csa_ie.mode; ch_switch.chandef = csa_ie.chandef; ch_switch.count = csa_ie.count; ch_switch.delay = csa_ie.max_switch_time; @@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, sdata->vif.csa_active = true; sdata->csa_chandef = csa_ie.chandef; - sdata->csa_block_tx = ch_switch.block_tx; + sdata->csa_block_tx = csa_ie.mode; ifmgd->csa_ignored_same_chan = false; if (sdata->csa_block_tx) @@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, * reset when the disconnection worker runs. */ sdata->vif.csa_active = true; - sdata->csa_block_tx = ch_switch.block_tx; + sdata->csa_block_tx = csa_ie.mode; ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); mutex_unlock(&local->chanctx_mtx); @@ -2959,7 +2959,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, (auth_transaction == 2 && ifmgd->auth_data->expected_transaction == 2)) { if (!ieee80211_mark_sta_auth(sdata, bssid)) - goto out_err; + return; /* ignore frame -- wait for timeout */ } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && auth_transaction == 2) { sdata_info(sdata, "SAE peer confirmed\n"); @@ -2967,10 +2967,6 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, } cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); - return; - out_err: - mutex_unlock(&sdata->local->sta_mtx); - /* ignore frame -- wait for timeout */ } #define case_WLAN(type) \ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0e05ff037672..0ba98ad9bc85 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -4114,7 +4114,7 @@ void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) lockdep_assert_held(&local->sta_mtx); - list_for_each_entry_rcu(sta, &local->sta_list, list) { + list_for_each_entry(sta, &local->sta_list, list) { if (sdata != sta->sdata && (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) continue; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 8d3a2389b055..21b1422b1b1c 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation */ #include @@ -1032,6 +1032,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta) might_sleep(); lockdep_assert_held(&local->sta_mtx); + while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); + WARN_ON_ONCE(ret); + } + /* now keys can no longer be reached */ ieee80211_free_sta_keys(local, sta); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 369c2dddce52..be1d9dfa760d 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -98,6 +98,7 @@ enum ieee80211_sta_info_flags { WLAN_STA_MPSP_OWNER, WLAN_STA_MPSP_RECIPIENT, WLAN_STA_PS_DELIVER, + WLAN_STA_USES_ENCRYPTION, NUM_WLAN_STA_FLAGS, }; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index ab8ba5835ca0..5a3d645fe1bc 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -1030,7 +1030,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, I802_DEBUG_INC(local->dot11FailedCount); } - if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) && + if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && + ieee80211_has_pm(fc) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && !(info->flags & IEEE80211_TX_CTL_INJECTED) && local->ps_sdata && !(local->scanning)) { diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c index 727dc9f3f3b3..e7f57bb18f6e 100644 --- a/net/mac80211/tkip.c +++ b/net/mac80211/tkip.c @@ -263,9 +263,21 @@ int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, if ((keyid >> 6) != key->conf.keyidx) return TKIP_DECRYPT_INVALID_KEYIDX; - if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT && - (iv32 < rx_ctx->iv32 || - (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16))) + /* Reject replays if the received TSC is smaller than or equal to the + * last received value in a valid message, but with an exception for + * the case where a new key has been set and no valid frame using that + * key has yet received and the local RSC was initialized to 0. This + * exception allows the very first frame sent by the transmitter to be + * accepted even if that transmitter were to use TSC 0 (IEEE 802.11 + * described TSC to be initialized to 1 whenever a new key is taken into + * use). + */ + if (iv32 < rx_ctx->iv32 || + (iv32 == rx_ctx->iv32 && + (iv16 < rx_ctx->iv16 || + (iv16 == rx_ctx->iv16 && + (rx_ctx->iv32 || rx_ctx->iv16 || + rx_ctx->ctx.state != TKIP_STATE_NOT_INIT))))) return TKIP_DECRYPT_REPLAY; if (only_iv) { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1fa422782905..41da41cb5c40 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018, 2020 Intel Corporation * * Transmit and frame generation functions. */ @@ -590,10 +590,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; - if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) + if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) { tx->key = NULL; - else if (tx->sta && - (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) + return TX_CONTINUE; + } + + if (tx->sta && + (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) tx->key = key; else if (ieee80211_is_group_privacy_action(tx->skb) && (key = rcu_dereference(tx->sdata->default_multicast_key))) @@ -654,6 +657,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) if (!skip_hw && tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) info->control.hw_key = &tx->key->conf; + } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta && + test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) { + return TX_DROP; } return TX_CONTINUE; @@ -2263,6 +2269,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, payload[7]); } + /* + * Initialize skb->priority for QoS frames. This is put in the TID field + * of the frame before passing it to the driver. + */ + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *p = ieee80211_get_qos_ctl(hdr); + skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; + } + memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | @@ -3581,8 +3596,26 @@ begin: tx.skb = skb; tx.sdata = vif_to_sdata(info->control.vif); - if (txq->sta) + if (txq->sta) { tx.sta = container_of(txq->sta, struct sta_info, sta); + /* + * Drop unicast frames to unauthorised stations unless they are + * EAPOL frames from the local station. + */ + if (unlikely(ieee80211_is_data(hdr->frame_control) && + !ieee80211_vif_is_mesh(&tx.sdata->vif) && + tx.sdata->vif.type != NL80211_IFTYPE_OCB && + !is_multicast_ether_addr(hdr->addr1) && + !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) && + (!(info->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO) || + !ether_addr_equal(tx.sdata->vif.addr, + hdr->addr2)))) { + I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); + ieee80211_free_txskb(&local->hw, skb); + goto begin; + } + } /* * The key can be removed while the packet was queued, so need to call @@ -5052,6 +5085,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ethhdr *ehdr; + u32 ctrl_flags = 0; u32 flags; /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE @@ -5061,6 +5095,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, proto != cpu_to_be16(ETH_P_PREAUTH)) return -EINVAL; + if (proto == sdata->control_port_protocol) + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + if (unencrypted) flags = IEEE80211_TX_INTFL_DONT_ENCRYPT; else @@ -5086,7 +5123,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, skb_reset_mac_header(skb); local_bh_disable(); - __ieee80211_subif_start_xmit(skb, skb->dev, flags, 0); + __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags); local_bh_enable(); return 0; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 32a7a53833c0..decd46b38393 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1063,16 +1063,22 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, elem_parse_failed = true; break; case WLAN_EID_VHT_OPERATION: - if (elen >= sizeof(struct ieee80211_vht_operation)) + if (elen >= sizeof(struct ieee80211_vht_operation)) { elems->vht_operation = (void *)pos; - else - elem_parse_failed = true; + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + break; + } + elem_parse_failed = true; break; case WLAN_EID_OPMODE_NOTIF: - if (elen > 0) + if (elen > 0) { elems->opmode_notif = pos; - else - elem_parse_failed = true; + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + break; + } + elem_parse_failed = true; break; case WLAN_EID_MESH_ID: elems->mesh_id = pos; @@ -2987,10 +2993,22 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, int cf0, cf1; int ccfs0, ccfs1, ccfs2; int ccf0, ccf1; + u32 vht_cap; + bool support_80_80 = false; + bool support_160 = false; if (!oper || !htop) return false; + vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap; + support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK | + IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)); + support_80_80 = ((vht_cap & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) || + (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && + vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) || + ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >> + IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1)); ccfs0 = oper->center_freq_seg0_idx; ccfs1 = oper->center_freq_seg1_idx; ccfs2 = (le16_to_cpu(htop->operation_mode) & @@ -3018,10 +3036,10 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, unsigned int diff; diff = abs(ccf1 - ccf0); - if (diff == 8) { + if ((diff == 8) && support_160) { new.width = NL80211_CHAN_WIDTH_160; new.center_freq1 = cf1; - } else if (diff > 8) { + } else if ((diff > 8) && support_80_80) { new.width = NL80211_CHAN_WIDTH_80P80; new.center_freq2 = cf1; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index c312741df2ce..4701edffb1f7 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -617,16 +617,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, struct net_device *dev; struct dst_entry *dst; struct flowi6 fl6; - int err; if (!ipv6_stub) return ERR_PTR(-EAFNOSUPPORT); memset(&fl6, 0, sizeof(fl6)); memcpy(&fl6.daddr, addr, sizeof(struct in6_addr)); - err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6); - if (err) - return ERR_PTR(err); + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return ERR_CAST(dst); dev = dst->dev; dev_hold(dev); diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 5d5bdf450091..78f046ec506f 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -536,6 +536,26 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, } EXPORT_SYMBOL(nf_hook_slow); +void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, + const struct nf_hook_entries *e) +{ + struct sk_buff *skb, *next; + struct list_head sublist; + int ret; + + INIT_LIST_HEAD(&sublist); + + list_for_each_entry_safe(skb, next, head, list) { + skb_list_del_init(skb); + ret = nf_hook_slow(skb, state, e, 0); + if (ret == 1) + list_add_tail(&skb->list, &sublist); + } + /* Put passed packets back on main list */ + list_splice(&sublist, head); +} +EXPORT_SYMBOL(nf_hook_slow_list); + /* This needs to be compiled in any case to avoid dependencies between the * nfnetlink_queue code and nf_conntrack. */ diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index 063df74b4647..bfd4b42ba305 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h @@ -60,9 +60,9 @@ mtype_destroy(struct ip_set *set) if (SET_WITH_TIMEOUT(set)) del_timer_sync(&map->gc); - ip_set_free(map->members); if (set->dsize && set->extensions & IPSET_EXT_DESTROY) mtype_ext_cleanup(set); + ip_set_free(map->members); ip_set_free(map); set->data = NULL; @@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set) if (set->extensions & IPSET_EXT_DESTROY) mtype_ext_cleanup(set); - memset(map->members, 0, map->memsize); + bitmap_zero(map->members, map->elements); set->elements = 0; set->ext_size = 0; } diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index 11ff9d4a7006..d934384f31ad 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip"); /* Type structure */ struct bitmap_ip { - void *members; /* the set members */ + unsigned long *members; /* the set members */ u32 first_ip; /* host byte order, included in range */ u32 last_ip; /* host byte order, included in range */ u32 elements; /* number of max elements in the set */ @@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map, u32 first_ip, u32 last_ip, u32 elements, u32 hosts, u8 netmask) { - map->members = ip_set_alloc(map->memsize); + map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN); if (!map->members) return false; map->first_ip = first_ip; @@ -310,7 +310,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], if (!map) return -ENOMEM; - map->memsize = bitmap_bytes(0, elements - 1); + map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_ip; if (!init_map_ip(set, map, first_ip, last_ip, elements, hosts, netmask)) { diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 1d4e63326e68..e8532783b43a 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -42,7 +42,7 @@ enum { /* Type structure */ struct bitmap_ipmac { - void *members; /* the set members */ + unsigned long *members; /* the set members */ u32 first_ip; /* host byte order, included in range */ u32 last_ip; /* host byte order, included in range */ u32 elements; /* number of max elements in the set */ @@ -299,7 +299,7 @@ static bool init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, u32 first_ip, u32 last_ip, u32 elements) { - map->members = ip_set_alloc(map->memsize); + map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN); if (!map->members) return false; map->first_ip = first_ip; @@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], if (!map) return -ENOMEM; - map->memsize = bitmap_bytes(0, elements - 1); + map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_ipmac; if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { kfree(map); diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index 704a0dda1609..e3ac914fff1a 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port"); /* Type structure */ struct bitmap_port { - void *members; /* the set members */ + unsigned long *members; /* the set members */ u16 first_port; /* host byte order, included in range */ u16 last_port; /* host byte order, included in range */ u32 elements; /* number of max elements in the set */ @@ -204,7 +204,7 @@ static bool init_map_port(struct ip_set *set, struct bitmap_port *map, u16 first_port, u16 last_port) { - map->members = ip_set_alloc(map->memsize); + map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN); if (!map->members) return false; map->first_port = first_port; @@ -244,7 +244,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], return -ENOMEM; map->elements = elements; - map->memsize = bitmap_bytes(0, map->elements); + map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_port; if (!init_map_port(set, map, first_port, last_port)) { kfree(map); diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index d73d1828216a..75da200aa5d8 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -557,6 +557,20 @@ ip_set_rcu_get(struct net *net, ip_set_id_t index) return set; } +static inline void +ip_set_lock(struct ip_set *set) +{ + if (!set->variant->region_lock) + spin_lock_bh(&set->lock); +} + +static inline void +ip_set_unlock(struct ip_set *set) +{ + if (!set->variant->region_lock) + spin_unlock_bh(&set->lock); +} + int ip_set_test(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) @@ -578,9 +592,9 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb, if (ret == -EAGAIN) { /* Type requests element to be completed */ pr_debug("element must be completed, ADD is triggered\n"); - spin_lock_bh(&set->lock); + ip_set_lock(set); set->variant->kadt(set, skb, par, IPSET_ADD, opt); - spin_unlock_bh(&set->lock); + ip_set_unlock(set); ret = 1; } else { /* --return-nomatch: invert matched element */ @@ -609,9 +623,9 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb, !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; - spin_lock_bh(&set->lock); + ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt); - spin_unlock_bh(&set->lock); + ip_set_unlock(set); return ret; } @@ -631,9 +645,9 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb, !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; - spin_lock_bh(&set->lock); + ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt); - spin_unlock_bh(&set->lock); + ip_set_unlock(set); return ret; } @@ -1098,9 +1112,9 @@ ip_set_flush_set(struct ip_set *set) { pr_debug("set: %s\n", set->name); - spin_lock_bh(&set->lock); + ip_set_lock(set); set->variant->flush(set); - spin_unlock_bh(&set->lock); + ip_set_unlock(set); } static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb, @@ -1293,31 +1307,34 @@ ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = { }; static int -dump_init(struct netlink_callback *cb, struct ip_set_net *inst) +ip_set_dump_start(struct netlink_callback *cb) { struct nlmsghdr *nlh = nlmsg_hdr(cb->skb); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1]; struct nlattr *attr = (void *)nlh + min_len; + struct sk_buff *skb = cb->skb; + struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk)); u32 dump_type; - ip_set_id_t index; int ret; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr, nlh->nlmsg_len - min_len, ip_set_dump_policy, NULL); if (ret) - return ret; + goto error; cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]); if (cda[IPSET_ATTR_SETNAME]) { + ip_set_id_t index; struct ip_set *set; set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]), &index); - if (!set) - return -ENOENT; - + if (!set) { + ret = -ENOENT; + goto error; + } dump_type = DUMP_ONE; cb->args[IPSET_CB_INDEX] = index; } else { @@ -1333,10 +1350,17 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst) cb->args[IPSET_CB_DUMP] = dump_type; return 0; + +error: + /* We have to create and send the error message manually :-( */ + if (nlh->nlmsg_flags & NLM_F_ACK) { + netlink_ack(cb->skb, nlh, ret, NULL); + } + return ret; } static int -ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb) +ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb) { ip_set_id_t index = IPSET_INVALID_ID, max; struct ip_set *set = NULL; @@ -1347,18 +1371,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb) bool is_destroyed; int ret = 0; - if (!cb->args[IPSET_CB_DUMP]) { - ret = dump_init(cb, inst); - if (ret < 0) { - nlh = nlmsg_hdr(cb->skb); - /* We have to create and send the error message - * manually :-( - */ - if (nlh->nlmsg_flags & NLM_F_ACK) - netlink_ack(cb->skb, nlh, ret, NULL); - return ret; - } - } + if (!cb->args[IPSET_CB_DUMP]) + return -EINVAL; if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max) goto out; @@ -1494,7 +1508,8 @@ static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb, { struct netlink_dump_control c = { - .dump = ip_set_dump_start, + .start = ip_set_dump_start, + .dump = ip_set_dump_do, .done = ip_set_dump_done, }; return netlink_dump_start(ctnl, skb, nlh, &c); @@ -1522,9 +1537,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, bool eexist = flags & IPSET_FLAG_EXIST, retried = false; do { - spin_lock_bh(&set->lock); + ip_set_lock(set); ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); - spin_unlock_bh(&set->lock); + ip_set_unlock(set); retried = true; } while (ret == -EAGAIN && set->variant->resize && @@ -1658,6 +1673,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb, struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; int ret = 0; + u32 lineno; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || @@ -1674,7 +1690,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb, return -IPSET_ERR_PROTOCOL; rcu_read_lock_bh(); - ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0); + ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0); rcu_read_unlock_bh(); /* Userspace can't trigger element to be re-added */ if (ret == -EAGAIN) diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index d098d87bc331..2389c9f89e48 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -7,13 +7,21 @@ #include #include #include +#include #include -#define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c) -#define ipset_dereference_protected(p, set) \ - __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock)) - -#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1) +#define __ipset_dereference(p) \ + rcu_dereference_protected(p, 1) +#define ipset_dereference_nfnl(p) \ + rcu_dereference_protected(p, \ + lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET)) +#define ipset_dereference_set(p, set) \ + rcu_dereference_protected(p, \ + lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \ + lockdep_is_held(&(set)->lock)) +#define ipset_dereference_bh_nfnl(p) \ + rcu_dereference_bh_check(p, \ + lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET)) /* Hashing which uses arrays to resolve clashing. The hash table is resized * (doubled) when searching becomes too long. @@ -72,11 +80,35 @@ struct hbucket { __aligned(__alignof__(u64)); }; +/* Region size for locking == 2^HTABLE_REGION_BITS */ +#define HTABLE_REGION_BITS 10 +#define ahash_numof_locks(htable_bits) \ + ((htable_bits) < HTABLE_REGION_BITS ? 1 \ + : jhash_size((htable_bits) - HTABLE_REGION_BITS)) +#define ahash_sizeof_regions(htable_bits) \ + (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region)) +#define ahash_region(n, htable_bits) \ + ((n) % ahash_numof_locks(htable_bits)) +#define ahash_bucket_start(h, htable_bits) \ + ((htable_bits) < HTABLE_REGION_BITS ? 0 \ + : (h) * jhash_size(HTABLE_REGION_BITS)) +#define ahash_bucket_end(h, htable_bits) \ + ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \ + : ((h) + 1) * jhash_size(HTABLE_REGION_BITS)) + +struct htable_gc { + struct delayed_work dwork; + struct ip_set *set; /* Set the gc belongs to */ + u32 region; /* Last gc run position */ +}; + /* The hash table: the table size stored here in order to make resizing easy */ struct htable { atomic_t ref; /* References for resizing */ - atomic_t uref; /* References for dumping */ + atomic_t uref; /* References for dumping and gc */ u8 htable_bits; /* size of hash table == 2^htable_bits */ + u32 maxelem; /* Maxelem per region */ + struct ip_set_region *hregion; /* Region locks and ext sizes */ struct hbucket __rcu *bucket[0]; /* hashtable buckets */ }; @@ -162,6 +194,10 @@ htable_bits(u32 hashsize) #define NLEN 0 #endif /* IP_SET_HASH_WITH_NETS */ +#define SET_ELEM_EXPIRED(set, d) \ + (SET_WITH_TIMEOUT(set) && \ + ip_set_timeout_expired(ext_timeout(d, set))) + #endif /* _IP_SET_HASH_GEN_H */ #ifndef MTYPE @@ -205,10 +241,12 @@ htable_bits(u32 hashsize) #undef mtype_test_cidrs #undef mtype_test #undef mtype_uref -#undef mtype_expire #undef mtype_resize +#undef mtype_ext_size +#undef mtype_resize_ad #undef mtype_head #undef mtype_list +#undef mtype_gc_do #undef mtype_gc #undef mtype_gc_init #undef mtype_variant @@ -247,10 +285,12 @@ htable_bits(u32 hashsize) #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs) #define mtype_test IPSET_TOKEN(MTYPE, _test) #define mtype_uref IPSET_TOKEN(MTYPE, _uref) -#define mtype_expire IPSET_TOKEN(MTYPE, _expire) #define mtype_resize IPSET_TOKEN(MTYPE, _resize) +#define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size) +#define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad) #define mtype_head IPSET_TOKEN(MTYPE, _head) #define mtype_list IPSET_TOKEN(MTYPE, _list) +#define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do) #define mtype_gc IPSET_TOKEN(MTYPE, _gc) #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init) #define mtype_variant IPSET_TOKEN(MTYPE, _variant) @@ -275,8 +315,7 @@ htable_bits(u32 hashsize) /* The generic hash structure */ struct htype { struct htable __rcu *table; /* the hash table */ - struct timer_list gc; /* garbage collection when timeout enabled */ - struct ip_set *set; /* attached to this ip_set */ + struct htable_gc gc; /* gc workqueue */ u32 maxelem; /* max elements in the hash */ u32 initval; /* random jhash init value */ #ifdef IP_SET_HASH_WITH_MARKMASK @@ -288,21 +327,33 @@ struct htype { #ifdef IP_SET_HASH_WITH_NETMASK u8 netmask; /* netmask value for subnets to store */ #endif + struct list_head ad; /* Resize add|del backlist */ struct mtype_elem next; /* temporary storage for uadd */ #ifdef IP_SET_HASH_WITH_NETS struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */ #endif }; +/* ADD|DEL entries saved during resize */ +struct mtype_resize_ad { + struct list_head list; + enum ipset_adt ad; /* ADD|DEL element */ + struct mtype_elem d; /* Element value */ + struct ip_set_ext ext; /* Extensions for ADD */ + struct ip_set_ext mext; /* Target extensions for ADD */ + u32 flags; /* Flags for ADD */ +}; + #ifdef IP_SET_HASH_WITH_NETS /* Network cidr size book keeping when the hash stores different * sized networks. cidr == real cidr + 1 to support /0. */ static void -mtype_add_cidr(struct htype *h, u8 cidr, u8 n) +mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n) { int i, j; + spin_lock_bh(&set->lock); /* Add in increasing prefix order, so larger cidr first */ for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) { if (j != -1) { @@ -311,7 +362,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n) j = i; } else if (h->nets[i].cidr[n] == cidr) { h->nets[CIDR_POS(cidr)].nets[n]++; - return; + goto unlock; } } if (j != -1) { @@ -320,24 +371,29 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n) } h->nets[i].cidr[n] = cidr; h->nets[CIDR_POS(cidr)].nets[n] = 1; +unlock: + spin_unlock_bh(&set->lock); } static void -mtype_del_cidr(struct htype *h, u8 cidr, u8 n) +mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n) { u8 i, j, net_end = NLEN - 1; + spin_lock_bh(&set->lock); for (i = 0; i < NLEN; i++) { if (h->nets[i].cidr[n] != cidr) continue; h->nets[CIDR_POS(cidr)].nets[n]--; if (h->nets[CIDR_POS(cidr)].nets[n] > 0) - return; + goto unlock; for (j = i; j < net_end && h->nets[j].cidr[n]; j++) h->nets[j].cidr[n] = h->nets[j + 1].cidr[n]; h->nets[j].cidr[n] = 0; - return; + goto unlock; } +unlock: + spin_unlock_bh(&set->lock); } #endif @@ -345,7 +401,7 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 n) static size_t mtype_ahash_memsize(const struct htype *h, const struct htable *t) { - return sizeof(*h) + sizeof(*t); + return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits); } /* Get the ith element from the array block n */ @@ -369,24 +425,29 @@ mtype_flush(struct ip_set *set) struct htype *h = set->data; struct htable *t; struct hbucket *n; - u32 i; + u32 r, i; - t = ipset_dereference_protected(h->table, set); - for (i = 0; i < jhash_size(t->htable_bits); i++) { - n = __ipset_dereference_protected(hbucket(t, i), 1); - if (!n) - continue; - if (set->extensions & IPSET_EXT_DESTROY) - mtype_ext_cleanup(set, n); - /* FIXME: use slab cache */ - rcu_assign_pointer(hbucket(t, i), NULL); - kfree_rcu(n, rcu); + t = ipset_dereference_nfnl(h->table); + for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) { + spin_lock_bh(&t->hregion[r].lock); + for (i = ahash_bucket_start(r, t->htable_bits); + i < ahash_bucket_end(r, t->htable_bits); i++) { + n = __ipset_dereference(hbucket(t, i)); + if (!n) + continue; + if (set->extensions & IPSET_EXT_DESTROY) + mtype_ext_cleanup(set, n); + /* FIXME: use slab cache */ + rcu_assign_pointer(hbucket(t, i), NULL); + kfree_rcu(n, rcu); + } + t->hregion[r].ext_size = 0; + t->hregion[r].elements = 0; + spin_unlock_bh(&t->hregion[r].lock); } #ifdef IP_SET_HASH_WITH_NETS memset(h->nets, 0, sizeof(h->nets)); #endif - set->elements = 0; - set->ext_size = 0; } /* Destroy the hashtable part of the set */ @@ -397,7 +458,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy) u32 i; for (i = 0; i < jhash_size(t->htable_bits); i++) { - n = __ipset_dereference_protected(hbucket(t, i), 1); + n = __ipset_dereference(hbucket(t, i)); if (!n) continue; if (set->extensions & IPSET_EXT_DESTROY && ext_destroy) @@ -406,6 +467,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy) kfree(n); } + ip_set_free(t->hregion); ip_set_free(t); } @@ -414,28 +476,21 @@ static void mtype_destroy(struct ip_set *set) { struct htype *h = set->data; + struct list_head *l, *lt; if (SET_WITH_TIMEOUT(set)) - del_timer_sync(&h->gc); + cancel_delayed_work_sync(&h->gc.dwork); - mtype_ahash_destroy(set, - __ipset_dereference_protected(h->table, 1), true); + mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true); + list_for_each_safe(l, lt, &h->ad) { + list_del(l); + kfree(l); + } kfree(h); set->data = NULL; } -static void -mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t)) -{ - struct htype *h = set->data; - - timer_setup(&h->gc, gc, 0); - mod_timer(&h->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ); - pr_debug("gc initialized, run in every %u\n", - IPSET_GC_PERIOD(set->timeout)); -} - static bool mtype_same_set(const struct ip_set *a, const struct ip_set *b) { @@ -454,11 +509,9 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b) a->extensions == b->extensions; } -/* Delete expired elements from the hashtable */ static void -mtype_expire(struct ip_set *set, struct htype *h) +mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r) { - struct htable *t; struct hbucket *n, *tmp; struct mtype_elem *data; u32 i, j, d; @@ -466,10 +519,12 @@ mtype_expire(struct ip_set *set, struct htype *h) #ifdef IP_SET_HASH_WITH_NETS u8 k; #endif + u8 htable_bits = t->htable_bits; - t = ipset_dereference_protected(h->table, set); - for (i = 0; i < jhash_size(t->htable_bits); i++) { - n = __ipset_dereference_protected(hbucket(t, i), 1); + spin_lock_bh(&t->hregion[r].lock); + for (i = ahash_bucket_start(r, htable_bits); + i < ahash_bucket_end(r, htable_bits); i++) { + n = __ipset_dereference(hbucket(t, i)); if (!n) continue; for (j = 0, d = 0; j < n->pos; j++) { @@ -485,58 +540,100 @@ mtype_expire(struct ip_set *set, struct htype *h) smp_mb__after_atomic(); #ifdef IP_SET_HASH_WITH_NETS for (k = 0; k < IPSET_NET_COUNT; k++) - mtype_del_cidr(h, + mtype_del_cidr(set, h, NCIDR_PUT(DCIDR_GET(data->cidr, k)), k); #endif + t->hregion[r].elements--; ip_set_ext_destroy(set, data); - set->elements--; d++; } if (d >= AHASH_INIT_SIZE) { if (d >= n->size) { + t->hregion[r].ext_size -= + ext_size(n->size, dsize); rcu_assign_pointer(hbucket(t, i), NULL); kfree_rcu(n, rcu); continue; } tmp = kzalloc(sizeof(*tmp) + - (n->size - AHASH_INIT_SIZE) * dsize, - GFP_ATOMIC); + (n->size - AHASH_INIT_SIZE) * dsize, + GFP_ATOMIC); if (!tmp) - /* Still try to delete expired elements */ + /* Still try to delete expired elements. */ continue; tmp->size = n->size - AHASH_INIT_SIZE; for (j = 0, d = 0; j < n->pos; j++) { if (!test_bit(j, n->used)) continue; data = ahash_data(n, j, dsize); - memcpy(tmp->value + d * dsize, data, dsize); + memcpy(tmp->value + d * dsize, + data, dsize); set_bit(d, tmp->used); d++; } tmp->pos = d; - set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize); + t->hregion[r].ext_size -= + ext_size(AHASH_INIT_SIZE, dsize); rcu_assign_pointer(hbucket(t, i), tmp); kfree_rcu(n, rcu); } } + spin_unlock_bh(&t->hregion[r].lock); } static void -mtype_gc(struct timer_list *t) +mtype_gc(struct work_struct *work) { - struct htype *h = from_timer(h, t, gc); - struct ip_set *set = h->set; + struct htable_gc *gc; + struct ip_set *set; + struct htype *h; + struct htable *t; + u32 r, numof_locks; + unsigned int next_run; + + gc = container_of(work, struct htable_gc, dwork.work); + set = gc->set; + h = set->data; - pr_debug("called\n"); spin_lock_bh(&set->lock); - mtype_expire(set, h); + t = ipset_dereference_set(h->table, set); + atomic_inc(&t->uref); + numof_locks = ahash_numof_locks(t->htable_bits); + r = gc->region++; + if (r >= numof_locks) { + r = gc->region = 0; + } + next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks; + if (next_run < HZ/10) + next_run = HZ/10; spin_unlock_bh(&set->lock); - h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; - add_timer(&h->gc); + mtype_gc_do(set, h, t, r); + + if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) { + pr_debug("Table destroy after resize by expire: %p\n", t); + mtype_ahash_destroy(set, t, false); + } + + queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run); + } +static void +mtype_gc_init(struct htable_gc *gc) +{ + INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc); + queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ); +} + +static int +mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags); +static int +mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags); + /* Resize a hash: create a new hash table with doubling the hashsize * and inserting the elements to it. Repeat until we succeed or * fail due to memory pressures. @@ -547,7 +644,7 @@ mtype_resize(struct ip_set *set, bool retried) struct htype *h = set->data; struct htable *t, *orig; u8 htable_bits; - size_t extsize, dsize = set->dsize; + size_t dsize = set->dsize; #ifdef IP_SET_HASH_WITH_NETS u8 flags; struct mtype_elem *tmp; @@ -555,7 +652,9 @@ mtype_resize(struct ip_set *set, bool retried) struct mtype_elem *data; struct mtype_elem *d; struct hbucket *n, *m; - u32 i, j, key; + struct list_head *l, *lt; + struct mtype_resize_ad *x; + u32 i, j, r, nr, key; int ret; #ifdef IP_SET_HASH_WITH_NETS @@ -563,10 +662,8 @@ mtype_resize(struct ip_set *set, bool retried) if (!tmp) return -ENOMEM; #endif - rcu_read_lock_bh(); - orig = rcu_dereference_bh_nfnl(h->table); + orig = ipset_dereference_bh_nfnl(h->table); htable_bits = orig->htable_bits; - rcu_read_unlock_bh(); retry: ret = 0; @@ -583,88 +680,124 @@ retry: ret = -ENOMEM; goto out; } + t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits)); + if (!t->hregion) { + kfree(t); + ret = -ENOMEM; + goto out; + } t->htable_bits = htable_bits; + t->maxelem = h->maxelem / ahash_numof_locks(htable_bits); + for (i = 0; i < ahash_numof_locks(htable_bits); i++) + spin_lock_init(&t->hregion[i].lock); - spin_lock_bh(&set->lock); - orig = __ipset_dereference_protected(h->table, 1); - /* There can't be another parallel resizing, but dumping is possible */ + /* There can't be another parallel resizing, + * but dumping, gc, kernel side add/del are possible + */ + orig = ipset_dereference_bh_nfnl(h->table); atomic_set(&orig->ref, 1); atomic_inc(&orig->uref); - extsize = 0; pr_debug("attempt to resize set %s from %u to %u, t %p\n", set->name, orig->htable_bits, htable_bits, orig); - for (i = 0; i < jhash_size(orig->htable_bits); i++) { - n = __ipset_dereference_protected(hbucket(orig, i), 1); - if (!n) - continue; - for (j = 0; j < n->pos; j++) { - if (!test_bit(j, n->used)) + for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) { + /* Expire may replace a hbucket with another one */ + rcu_read_lock_bh(); + for (i = ahash_bucket_start(r, orig->htable_bits); + i < ahash_bucket_end(r, orig->htable_bits); i++) { + n = __ipset_dereference(hbucket(orig, i)); + if (!n) continue; - data = ahash_data(n, j, dsize); + for (j = 0; j < n->pos; j++) { + if (!test_bit(j, n->used)) + continue; + data = ahash_data(n, j, dsize); + if (SET_ELEM_EXPIRED(set, data)) + continue; #ifdef IP_SET_HASH_WITH_NETS - /* We have readers running parallel with us, - * so the live data cannot be modified. - */ - flags = 0; - memcpy(tmp, data, dsize); - data = tmp; - mtype_data_reset_flags(data, &flags); + /* We have readers running parallel with us, + * so the live data cannot be modified. + */ + flags = 0; + memcpy(tmp, data, dsize); + data = tmp; + mtype_data_reset_flags(data, &flags); #endif - key = HKEY(data, h->initval, htable_bits); - m = __ipset_dereference_protected(hbucket(t, key), 1); - if (!m) { - m = kzalloc(sizeof(*m) + + key = HKEY(data, h->initval, htable_bits); + m = __ipset_dereference(hbucket(t, key)); + nr = ahash_region(key, htable_bits); + if (!m) { + m = kzalloc(sizeof(*m) + AHASH_INIT_SIZE * dsize, GFP_ATOMIC); - if (!m) { - ret = -ENOMEM; - goto cleanup; - } - m->size = AHASH_INIT_SIZE; - extsize += ext_size(AHASH_INIT_SIZE, dsize); - RCU_INIT_POINTER(hbucket(t, key), m); - } else if (m->pos >= m->size) { - struct hbucket *ht; + if (!m) { + ret = -ENOMEM; + goto cleanup; + } + m->size = AHASH_INIT_SIZE; + t->hregion[nr].ext_size += + ext_size(AHASH_INIT_SIZE, + dsize); + RCU_INIT_POINTER(hbucket(t, key), m); + } else if (m->pos >= m->size) { + struct hbucket *ht; - if (m->size >= AHASH_MAX(h)) { - ret = -EAGAIN; - } else { - ht = kzalloc(sizeof(*ht) + + if (m->size >= AHASH_MAX(h)) { + ret = -EAGAIN; + } else { + ht = kzalloc(sizeof(*ht) + (m->size + AHASH_INIT_SIZE) * dsize, GFP_ATOMIC); - if (!ht) - ret = -ENOMEM; + if (!ht) + ret = -ENOMEM; + } + if (ret < 0) + goto cleanup; + memcpy(ht, m, sizeof(struct hbucket) + + m->size * dsize); + ht->size = m->size + AHASH_INIT_SIZE; + t->hregion[nr].ext_size += + ext_size(AHASH_INIT_SIZE, + dsize); + kfree(m); + m = ht; + RCU_INIT_POINTER(hbucket(t, key), ht); } - if (ret < 0) - goto cleanup; - memcpy(ht, m, sizeof(struct hbucket) + - m->size * dsize); - ht->size = m->size + AHASH_INIT_SIZE; - extsize += ext_size(AHASH_INIT_SIZE, dsize); - kfree(m); - m = ht; - RCU_INIT_POINTER(hbucket(t, key), ht); - } - d = ahash_data(m, m->pos, dsize); - memcpy(d, data, dsize); - set_bit(m->pos++, m->used); + d = ahash_data(m, m->pos, dsize); + memcpy(d, data, dsize); + set_bit(m->pos++, m->used); + t->hregion[nr].elements++; #ifdef IP_SET_HASH_WITH_NETS - mtype_data_reset_flags(d, &flags); + mtype_data_reset_flags(d, &flags); #endif + } } + rcu_read_unlock_bh(); } - rcu_assign_pointer(h->table, t); - set->ext_size = extsize; - spin_unlock_bh(&set->lock); + /* There can't be any other writer. */ + rcu_assign_pointer(h->table, t); /* Give time to other readers of the set */ synchronize_rcu(); pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, orig->htable_bits, orig, t->htable_bits, t); - /* If there's nobody else dumping the table, destroy it */ + /* Add/delete elements processed by the SET target during resize. + * Kernel-side add cannot trigger a resize and userspace actions + * are serialized by the mutex. + */ + list_for_each_safe(l, lt, &h->ad) { + x = list_entry(l, struct mtype_resize_ad, list); + if (x->ad == IPSET_ADD) { + mtype_add(set, &x->d, &x->ext, &x->mext, x->flags); + } else { + mtype_del(set, &x->d, NULL, NULL, 0); + } + list_del(l); + kfree(l); + } + /* If there's nobody else using the table, destroy it */ if (atomic_dec_and_test(&orig->uref)) { pr_debug("Table destroy by resize %p\n", orig); mtype_ahash_destroy(set, orig, false); @@ -677,15 +810,44 @@ out: return ret; cleanup: + rcu_read_unlock_bh(); atomic_set(&orig->ref, 0); atomic_dec(&orig->uref); - spin_unlock_bh(&set->lock); mtype_ahash_destroy(set, t, false); if (ret == -EAGAIN) goto retry; goto out; } +/* Get the current number of elements and ext_size in the set */ +static void +mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size) +{ + struct htype *h = set->data; + const struct htable *t; + u32 i, j, r; + struct hbucket *n; + struct mtype_elem *data; + + t = rcu_dereference_bh(h->table); + for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) { + for (i = ahash_bucket_start(r, t->htable_bits); + i < ahash_bucket_end(r, t->htable_bits); i++) { + n = rcu_dereference_bh(hbucket(t, i)); + if (!n) + continue; + for (j = 0; j < n->pos; j++) { + if (!test_bit(j, n->used)) + continue; + data = ahash_data(n, j, set->dsize); + if (!SET_ELEM_EXPIRED(set, data)) + (*elements)++; + } + } + *ext_size += t->hregion[r].ext_size; + } +} + /* Add an element to a hash and update the internal counters when succeeded, * otherwise report the proper error code. */ @@ -698,32 +860,49 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, const struct mtype_elem *d = value; struct mtype_elem *data; struct hbucket *n, *old = ERR_PTR(-ENOENT); - int i, j = -1; + int i, j = -1, ret; bool flag_exist = flags & IPSET_FLAG_EXIST; bool deleted = false, forceadd = false, reuse = false; - u32 key, multi = 0; + u32 r, key, multi = 0, elements, maxelem; - if (set->elements >= h->maxelem) { - if (SET_WITH_TIMEOUT(set)) - /* FIXME: when set is full, we slow down here */ - mtype_expire(set, h); - if (set->elements >= h->maxelem && SET_WITH_FORCEADD(set)) + rcu_read_lock_bh(); + t = rcu_dereference_bh(h->table); + key = HKEY(value, h->initval, t->htable_bits); + r = ahash_region(key, t->htable_bits); + atomic_inc(&t->uref); + elements = t->hregion[r].elements; + maxelem = t->maxelem; + if (elements >= maxelem) { + u32 e; + if (SET_WITH_TIMEOUT(set)) { + rcu_read_unlock_bh(); + mtype_gc_do(set, h, t, r); + rcu_read_lock_bh(); + } + maxelem = h->maxelem; + elements = 0; + for (e = 0; e < ahash_numof_locks(t->htable_bits); e++) + elements += t->hregion[e].elements; + if (elements >= maxelem && SET_WITH_FORCEADD(set)) forceadd = true; } + rcu_read_unlock_bh(); - t = ipset_dereference_protected(h->table, set); - key = HKEY(value, h->initval, t->htable_bits); - n = __ipset_dereference_protected(hbucket(t, key), 1); + spin_lock_bh(&t->hregion[r].lock); + n = rcu_dereference_bh(hbucket(t, key)); if (!n) { - if (forceadd || set->elements >= h->maxelem) + if (forceadd || elements >= maxelem) goto set_full; old = NULL; n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize, GFP_ATOMIC); - if (!n) - return -ENOMEM; + if (!n) { + ret = -ENOMEM; + goto unlock; + } n->size = AHASH_INIT_SIZE; - set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize); + t->hregion[r].ext_size += + ext_size(AHASH_INIT_SIZE, set->dsize); goto copy_elem; } for (i = 0; i < n->pos; i++) { @@ -737,38 +916,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, } data = ahash_data(n, i, set->dsize); if (mtype_data_equal(data, d, &multi)) { - if (flag_exist || - (SET_WITH_TIMEOUT(set) && - ip_set_timeout_expired(ext_timeout(data, set)))) { + if (flag_exist || SET_ELEM_EXPIRED(set, data)) { /* Just the extensions could be overwritten */ j = i; goto overwrite_extensions; } - return -IPSET_ERR_EXIST; + ret = -IPSET_ERR_EXIST; + goto unlock; } /* Reuse first timed out entry */ - if (SET_WITH_TIMEOUT(set) && - ip_set_timeout_expired(ext_timeout(data, set)) && - j == -1) { + if (SET_ELEM_EXPIRED(set, data) && j == -1) { j = i; reuse = true; } } if (reuse || forceadd) { + if (j == -1) + j = 0; data = ahash_data(n, j, set->dsize); if (!deleted) { #ifdef IP_SET_HASH_WITH_NETS for (i = 0; i < IPSET_NET_COUNT; i++) - mtype_del_cidr(h, + mtype_del_cidr(set, h, NCIDR_PUT(DCIDR_GET(data->cidr, i)), i); #endif ip_set_ext_destroy(set, data); - set->elements--; + t->hregion[r].elements--; } goto copy_data; } - if (set->elements >= h->maxelem) + if (elements >= maxelem) goto set_full; /* Create a new slot */ if (n->pos >= n->size) { @@ -776,28 +954,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, if (n->size >= AHASH_MAX(h)) { /* Trigger rehashing */ mtype_data_next(&h->next, d); - return -EAGAIN; + ret = -EAGAIN; + goto resize; } old = n; n = kzalloc(sizeof(*n) + (old->size + AHASH_INIT_SIZE) * set->dsize, GFP_ATOMIC); - if (!n) - return -ENOMEM; + if (!n) { + ret = -ENOMEM; + goto unlock; + } memcpy(n, old, sizeof(struct hbucket) + old->size * set->dsize); n->size = old->size + AHASH_INIT_SIZE; - set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize); + t->hregion[r].ext_size += + ext_size(AHASH_INIT_SIZE, set->dsize); } copy_elem: j = n->pos++; data = ahash_data(n, j, set->dsize); copy_data: - set->elements++; + t->hregion[r].elements++; #ifdef IP_SET_HASH_WITH_NETS for (i = 0; i < IPSET_NET_COUNT; i++) - mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i); + mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i); #endif memcpy(data, d, sizeof(struct mtype_elem)); overwrite_extensions: @@ -820,13 +1002,41 @@ overwrite_extensions: if (old) kfree_rcu(old, rcu); } + ret = 0; +resize: + spin_unlock_bh(&t->hregion[r].lock); + if (atomic_read(&t->ref) && ext->target) { + /* Resize is in process and kernel side add, save values */ + struct mtype_resize_ad *x; + + x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC); + if (!x) + /* Don't bother */ + goto out; + x->ad = IPSET_ADD; + memcpy(&x->d, value, sizeof(struct mtype_elem)); + memcpy(&x->ext, ext, sizeof(struct ip_set_ext)); + memcpy(&x->mext, mext, sizeof(struct ip_set_ext)); + x->flags = flags; + spin_lock_bh(&set->lock); + list_add_tail(&x->list, &h->ad); + spin_unlock_bh(&set->lock); + } + goto out; - return 0; set_full: if (net_ratelimit()) pr_warn("Set %s is full, maxelem %u reached\n", - set->name, h->maxelem); - return -IPSET_ERR_HASH_FULL; + set->name, maxelem); + ret = -IPSET_ERR_HASH_FULL; +unlock: + spin_unlock_bh(&t->hregion[r].lock); +out: + if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) { + pr_debug("Table destroy after resize by add: %p\n", t); + mtype_ahash_destroy(set, t, false); + } + return ret; } /* Delete an element from the hash and free up space if possible. @@ -840,13 +1050,23 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, const struct mtype_elem *d = value; struct mtype_elem *data; struct hbucket *n; - int i, j, k, ret = -IPSET_ERR_EXIST; + struct mtype_resize_ad *x = NULL; + int i, j, k, r, ret = -IPSET_ERR_EXIST; u32 key, multi = 0; size_t dsize = set->dsize; - t = ipset_dereference_protected(h->table, set); + /* Userspace add and resize is excluded by the mutex. + * Kernespace add does not trigger resize. + */ + rcu_read_lock_bh(); + t = rcu_dereference_bh(h->table); key = HKEY(value, h->initval, t->htable_bits); - n = __ipset_dereference_protected(hbucket(t, key), 1); + r = ahash_region(key, t->htable_bits); + atomic_inc(&t->uref); + rcu_read_unlock_bh(); + + spin_lock_bh(&t->hregion[r].lock); + n = rcu_dereference_bh(hbucket(t, key)); if (!n) goto out; for (i = 0, k = 0; i < n->pos; i++) { @@ -857,8 +1077,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, data = ahash_data(n, i, dsize); if (!mtype_data_equal(data, d, &multi)) continue; - if (SET_WITH_TIMEOUT(set) && - ip_set_timeout_expired(ext_timeout(data, set))) + if (SET_ELEM_EXPIRED(set, data)) goto out; ret = 0; @@ -866,20 +1085,33 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, smp_mb__after_atomic(); if (i + 1 == n->pos) n->pos--; - set->elements--; + t->hregion[r].elements--; #ifdef IP_SET_HASH_WITH_NETS for (j = 0; j < IPSET_NET_COUNT; j++) - mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)), - j); + mtype_del_cidr(set, h, + NCIDR_PUT(DCIDR_GET(d->cidr, j)), j); #endif ip_set_ext_destroy(set, data); + if (atomic_read(&t->ref) && ext->target) { + /* Resize is in process and kernel side del, + * save values + */ + x = kzalloc(sizeof(struct mtype_resize_ad), + GFP_ATOMIC); + if (x) { + x->ad = IPSET_DEL; + memcpy(&x->d, value, + sizeof(struct mtype_elem)); + x->flags = flags; + } + } for (; i < n->pos; i++) { if (!test_bit(i, n->used)) k++; } if (n->pos == 0 && k == 0) { - set->ext_size -= ext_size(n->size, dsize); + t->hregion[r].ext_size -= ext_size(n->size, dsize); rcu_assign_pointer(hbucket(t, key), NULL); kfree_rcu(n, rcu); } else if (k >= AHASH_INIT_SIZE) { @@ -898,7 +1130,8 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, k++; } tmp->pos = k; - set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize); + t->hregion[r].ext_size -= + ext_size(AHASH_INIT_SIZE, dsize); rcu_assign_pointer(hbucket(t, key), tmp); kfree_rcu(n, rcu); } @@ -906,6 +1139,16 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, } out: + spin_unlock_bh(&t->hregion[r].lock); + if (x) { + spin_lock_bh(&set->lock); + list_add(&x->list, &h->ad); + spin_unlock_bh(&set->lock); + } + if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) { + pr_debug("Table destroy after resize by del: %p\n", t); + mtype_ahash_destroy(set, t, false); + } return ret; } @@ -991,6 +1234,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, int i, ret = 0; u32 key, multi = 0; + rcu_read_lock_bh(); t = rcu_dereference_bh(h->table); #ifdef IP_SET_HASH_WITH_NETS /* If we test an IP address and not a network address, @@ -1022,6 +1266,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, goto out; } out: + rcu_read_unlock_bh(); return ret; } @@ -1033,23 +1278,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb) const struct htable *t; struct nlattr *nested; size_t memsize; + u32 elements = 0; + size_t ext_size = 0; u8 htable_bits; - /* If any members have expired, set->elements will be wrong - * mytype_expire function will update it with the right count. - * we do not hold set->lock here, so grab it first. - * set->elements can still be incorrect in the case of a huge set, - * because elements might time out during the listing. - */ - if (SET_WITH_TIMEOUT(set)) { - spin_lock_bh(&set->lock); - mtype_expire(set, h); - spin_unlock_bh(&set->lock); - } - rcu_read_lock_bh(); - t = rcu_dereference_bh_nfnl(h->table); - memsize = mtype_ahash_memsize(h, t) + set->ext_size; + t = rcu_dereference_bh(h->table); + mtype_ext_size(set, &elements, &ext_size); + memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size; htable_bits = t->htable_bits; rcu_read_unlock_bh(); @@ -1071,7 +1307,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb) #endif if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) || - nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements))) + nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements))) goto nla_put_failure; if (unlikely(ip_set_put_flags(skb, set))) goto nla_put_failure; @@ -1091,15 +1327,15 @@ mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start) if (start) { rcu_read_lock_bh(); - t = rcu_dereference_bh_nfnl(h->table); + t = ipset_dereference_bh_nfnl(h->table); atomic_inc(&t->uref); cb->args[IPSET_CB_PRIVATE] = (unsigned long)t; rcu_read_unlock_bh(); } else if (cb->args[IPSET_CB_PRIVATE]) { t = (struct htable *)cb->args[IPSET_CB_PRIVATE]; if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) { - /* Resizing didn't destroy the hash table */ - pr_debug("Table destroy by dump: %p\n", t); + pr_debug("Table destroy after resize " + " by dump: %p\n", t); mtype_ahash_destroy(set, t, false); } cb->args[IPSET_CB_PRIVATE] = 0; @@ -1141,8 +1377,7 @@ mtype_list(const struct ip_set *set, if (!test_bit(i, n->used)) continue; e = ahash_data(n, i, set->dsize); - if (SET_WITH_TIMEOUT(set) && - ip_set_timeout_expired(ext_timeout(e, set))) + if (SET_ELEM_EXPIRED(set, e)) continue; pr_debug("list hash %lu hbucket %p i %u, data %p\n", cb->args[IPSET_CB_ARG0], n, i, e); @@ -1208,6 +1443,7 @@ static const struct ip_set_type_variant mtype_variant = { .uref = mtype_uref, .resize = mtype_resize, .same_set = mtype_same_set, + .region_lock = true, }; #ifdef IP_SET_EMIT_CREATE @@ -1226,6 +1462,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, size_t hsize; struct htype *h; struct htable *t; + u32 i; pr_debug("Create set %s with family %s\n", set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); @@ -1294,6 +1531,15 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, kfree(h); return -ENOMEM; } + t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits)); + if (!t->hregion) { + kfree(t); + kfree(h); + return -ENOMEM; + } + h->gc.set = set; + for (i = 0; i < ahash_numof_locks(hbits); i++) + spin_lock_init(&t->hregion[i].lock); h->maxelem = maxelem; #ifdef IP_SET_HASH_WITH_NETMASK h->netmask = netmask; @@ -1304,9 +1550,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, get_random_bytes(&h->initval, sizeof(h->initval)); t->htable_bits = hbits; + t->maxelem = h->maxelem / ahash_numof_locks(hbits); RCU_INIT_POINTER(h->table, t); - h->set = set; + INIT_LIST_HEAD(&h->ad); set->data = h; #ifndef IP_SET_PROTO_UNDEF if (set->family == NFPROTO_IPV4) { @@ -1329,12 +1576,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, #ifndef IP_SET_PROTO_UNDEF if (set->family == NFPROTO_IPV4) #endif - IPSET_TOKEN(HTYPE, 4_gc_init)(set, - IPSET_TOKEN(HTYPE, 4_gc)); + IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc); #ifndef IP_SET_PROTO_UNDEF else - IPSET_TOKEN(HTYPE, 6_gc_init)(set, - IPSET_TOKEN(HTYPE, 6_gc)); + IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc); #endif } pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 02f2f636798d..f75ea126d5c8 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -48,10 +48,26 @@ static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444); MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size"); +bool share_ns; +EXPORT_SYMBOL_GPL(share_ns); +module_param_named(sharens, share_ns, bool, 0444); +MODULE_PARM_DESC(sharens, "share network namespace in IPVS"); + /* size and mask values */ int ip_vs_conn_tab_size __read_mostly; static int ip_vs_conn_tab_mask __read_mostly; +/* retrieve origin net in skb for xmit + * local-out: ip_queue_xmit->skb_dst_set_noref + * local-in: ip_route_input_slow set it + */ +struct net *ip_vs_skb_net(struct sk_buff *skb) +{ + if (skb_dst(skb)) + return dev_net(skb_dst(skb)->dev); + return NULL; +} + /* * Connection hash table: for input and output packets lookups of IPVS */ diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 8b80ab794a92..a28f64c4dc57 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -738,11 +738,15 @@ static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs, static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, unsigned int hooknum) { + struct net *net; if (!sysctl_snat_reroute(ipvs)) return 0; /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */ if (NF_INET_LOCAL_IN == hooknum) return 0; + net = ipvs->net; + if (share_ns) + net = ip_vs_skb_net(skb); #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { struct dst_entry *dst = skb_dst(skb); @@ -753,7 +757,7 @@ static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af, } else #endif if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && - ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0) + ip_route_me_harder(net, skb, RTN_LOCAL) != 0) return 1; return 0; @@ -1341,6 +1345,12 @@ drop: return NF_STOLEN; } +static void switch_netns(struct netns_ipvs **ipvs, struct sk_buff *skb) +{ + if (share_ns) + *ipvs = net_ipvs(&init_net); +} + /* * Check if outgoing packet belongs to the established ip_vs_conn. */ @@ -1354,7 +1364,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in struct sock *sk; EnterFunction(11); - + switch_netns(&ipvs, skb); /* Already marked as IPVS request or reply? */ if (skb->ipvs_property) return NF_ACCEPT; @@ -1507,10 +1517,30 @@ ip_vs_reply4(void *priv, struct sk_buff *skb, * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT. * Check if packet is reply for established ip_vs_conn. */ + static unsigned int ip_vs_local_reply4(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { + /* Delete IPVS nf local_out hook to handle response packet. + * Consider following steps: + * 1. curl vip:vport on a vm. packet is (nodeip:tmport->vip:vport) + * 2. Ipvs does DNAT and choose a POD on this vm. + * packet is (nodeip:tmpport->rsip:rsport) + * 3. The POD replies. packet is (rsip:rsport -> nodeip:tmpport) + * In nf local-out, ipvs ip_vs_local_reply4 + * does reverse DNAT, and modifies the packet to be + * (VIP:VPORT->nodeip:tmpport) + * 4. The packet go out of the POD's ENI to the iaas switch. + * 5. Iaas switch will drop the packet as it expects the source + * to be the ENI's ip. + * Any side effect to delete the hook? + * If a client out of the cluster accesses the service on a cvm, + * and the cvm choose a process runs on default net ns as the target, + * Break!. However, It doesn't matter as we haven't such case. + */ + if (share_ns) + return NF_ACCEPT; return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET); } @@ -1664,6 +1694,8 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related, bool ipip, new_cp = false; union nf_inet_addr *raddr; + switch_netns(&ipvs, skb); + *related = 1; /* reassemble IP fragments */ @@ -1985,6 +2017,8 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int int conn_reuse_mode; struct sock *sk; + switch_netns(&ipvs, skb); + /* Already marked as IPVS request or reply? */ if (skb->ipvs_property) return NF_ACCEPT; @@ -2061,14 +2095,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { - bool uses_ct = false, resched = false; + bool old_ct = false, resched = false; if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); } else if (is_new_conn_expected(cp, conn_reuse_mode)) { - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; } else { @@ -2076,15 +2110,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int * that uses conntrack while it is still * referenced by controlled connection(s). */ - resched = !uses_ct; + resched = !old_ct; } } if (resched) { + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; if (!atomic_read(&cp->n_control)) ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); - if (uses_ct) + if (old_ct) return NF_DROP; cp = NULL; } @@ -2345,6 +2381,7 @@ static const struct nf_hook_ops ip_vs_ops[] = { }, #endif }; + /* * Initialize IP Virtual Server netns mem. */ @@ -2488,6 +2525,7 @@ static int __init ip_vs_init(void) } pr_info("ipvs loaded.\n"); + pr_info("share ns is %d.\n", share_ns); return ret; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 3cccc88ef817..9644321713f8 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1944,6 +1944,12 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "estimation_timer_work", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "pmtu_disc", .maxlen = sizeof(int), @@ -1974,6 +1980,18 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "ignore_no_rs_error", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "tunnel_df_force_zero", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", @@ -4039,6 +4057,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3); tbl[idx++].data = &ipvs->sysctl_sync_retries; tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; + tbl[idx++].data = &ipvs->sysctl_estimation_timer_work; ipvs->sysctl_pmtu_disc = 1; tbl[idx++].data = &ipvs->sysctl_pmtu_disc; tbl[idx++].data = &ipvs->sysctl_backup_only; @@ -4046,6 +4065,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; + tbl[idx++].data = &ipvs->sysctl_ignore_no_rs_error; + tbl[idx++].data = &ipvs->sysctl_tunnel_df_force_zero; ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); if (ipvs->sysctl_hdr == NULL) { diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 05b8112ffb37..66c51c6be37f 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c @@ -100,6 +100,9 @@ static void estimation_timer(struct timer_list *t) u64 rate; struct netns_ipvs *ipvs = from_timer(ipvs, t, est_timer); + if (!ipvs->sysctl_estimation_timer_work) + goto out; + spin_lock(&ipvs->est_lock); list_for_each_entry(e, &ipvs->est_list, list) { s = container_of(e, struct ip_vs_stats, est); @@ -131,6 +134,8 @@ static void estimation_timer(struct timer_list *t) spin_unlock(&s->lock); } spin_unlock(&ipvs->est_lock); + +out: mod_timer(&ipvs->est_timer, jiffies + 2*HZ); } diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index 38495c6f6c7c..8711e704c558 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c @@ -82,7 +82,8 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, stop: spin_unlock_bh(&svc->sched_lock); - ip_vs_scheduler_err(svc, "no destination available"); + if (svc->ipvs && !sysctl_ignore_no_rs_error(svc->ipvs)) + ip_vs_scheduler_err(svc, "no destination available"); return NULL; out: diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 888d3068a492..a57fffd92274 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -208,7 +208,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu) struct rtable *ort = skb_rtable(skb); if (!skb->dev && sk && sk_fullsock(sk)) - ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu); + ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true); } static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af, @@ -314,8 +314,13 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, struct rtable *rt; /* Route to the other host */ int mtu; int local, noref = 1; + if (share_ns) + net = ip_vs_skb_net(skb); + if (!net) + return -1; - if (dest) { + /* when share netns, the cache will error */ + if (dest && !share_ns) { dest_dst = __ip_vs_dst_check(dest); if (likely(dest_dst)) rt = (struct rtable *) dest_dst->dst_cache; @@ -367,7 +372,10 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, goto err_put; } - if (unlikely(local)) { + /* traffic to local address shall route to lo dev + * so that traffic from a POD can choose itself as rs. + */ + if (!share_ns && unlikely(local)) { /* skb to local stack, preserve old route */ if (!noref) ip_rt_put(rt); @@ -639,6 +647,11 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, struct ip_vs_conn *cp, int local) { int ret = NF_STOLEN; + struct net *net; + + net = cp->ipvs->net; + if (share_ns) + net = ip_vs_skb_net(skb); skb->ipvs_property = 1; if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) @@ -655,7 +668,7 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, if (!local) { skb_forward_csum(skb); - NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, + NF_HOOK(pf, NF_INET_LOCAL_OUT, net, NULL, skb, NULL, skb_dst(skb)->dev, dst_output); } else ret = NF_ACCEPT; @@ -668,14 +681,18 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, struct ip_vs_conn *cp, int local) { int ret = NF_STOLEN; + struct net *net; + net = cp->ipvs->net; + if (share_ns) + net = ip_vs_skb_net(skb); skb->ipvs_property = 1; if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) ip_vs_notrack(skb); if (!local) { ip_vs_drop_early_demux_sk(skb); skb_forward_csum(skb); - NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, + NF_HOOK(pf, NF_INET_LOCAL_OUT, net, NULL, skb, NULL, skb_dst(skb)->dev, dst_output); } else ret = NF_ACCEPT; diff --git a/net/netfilter/ipvs/testcase.sh b/net/netfilter/ipvs/testcase.sh new file mode 100755 index 000000000000..2fb643be2381 --- /dev/null +++ b/net/netfilter/ipvs/testcase.sh @@ -0,0 +1,49 @@ + + # test share network ns + + ## setup the network +gw=192.168.1.1 +# podip="192.168.1.14" +# devname="ens18" +# nsname=sharens1 +# +# ip netns add $nsname +# ip link set $devname netns $nsname +# ip netns exec $nsname ip link set dev $devname up +# ip netns exec $nsname ip link set dev lo up +# ip netns exec $nsname ip addr add $podip dev $devname +# ip netns exec $nsname ip route add 192.168.1.0/24 dev $devname +# ip netns exec $nsname ip route add default via $gw dev $devname +# podip="192.168.1.15" +# devname="ens17" +# nsname=sharens2 +# +# ip netns add $nsname +# ip link set $devname netns $nsname +# ip netns exec $nsname ip link set dev $devname up +# ip netns exec $nsname ip link set dev lo up +# ip netns exec $nsname ip addr add $podip dev $devname +# ip netns exec $nsname ip route add 192.168.1.0/24 dev $devname +# ip netns exec $nsname ip route add default via $gw dev $devname + +#ip netns exec sharens1 python3 -m http.server & +#ip netns exec sharens2 python3 -m http.server & + +vip=1.2.3.4 +vport=80 + +tip1=192.168.1.3 +tip2=192.168.1.14 +tip3=192.168.1.15 +tport1=80 +tport2=8000 +tport3=8000 + +ipvsadm -A -t $vip:$vport -s rr +ipvsadm -a -t $vip:$vport -r $tip1:$tport1 -m +ipvsadm -a -t $vip:$vport -r $tip2:$tport2 -m +ipvsadm -a -t $vip:$vport -r $tip3:$tport3 -m + +curl 1.2.3.4:80 +ip netns exec sharens1 curl 1.2.3.4:80 +ip netns exec sharens2 curl 1.2.3.4:80 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 5cd610b547e0..459e10ce042d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2391,8 +2391,8 @@ int nf_conntrack_init_start(void) nf_conntrack_htable_size = (((nr_pages << PAGE_SHIFT) / 16384) / sizeof(struct hlist_head)); - if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) - nf_conntrack_htable_size = 65536; + if (nr_pages > (4ULL * (1024 * 1024 * 1024 / PAGE_SIZE))) + nf_conntrack_htable_size = 262144; else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) nf_conntrack_htable_size = 16384; if (nf_conntrack_htable_size < 32) diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index e2d13cd18875..aa8adf930b3c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -3602,6 +3602,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list) list_for_each_entry(net, net_exit_list, exit_list) ctnetlink_net_exit(net); + + /* wait for other cpus until they are done with ctnl_notifiers */ + synchronize_rcu(); } static struct pernet_operations ctnetlink_net_ops = { diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index b6b14db3955b..b3f4a334f9d7 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -677,6 +677,9 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], unsigned int *timeouts = data; int i; + if (!timeouts) + timeouts = dn->dccp_timeout; + /* set default DCCP timeouts. */ for (i=0; idccp_timeout[i]; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index fce3d93f1541..4f897b14b606 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -114,7 +114,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */ -/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA}, +/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA}, /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA}, /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS}, @@ -130,7 +130,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { /* REPLY */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */ /* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */ -/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA}, +/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA}, /* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL}, /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR}, /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA}, @@ -316,7 +316,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb, ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; } - ct->proto.sctp.state = new_state; + ct->proto.sctp.state = SCTP_CONNTRACK_NONE; } return true; @@ -594,6 +594,9 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], struct nf_sctp_net *sn = nf_sctp_pernet(net); int i; + if (!timeouts) + timeouts = sn->timeouts; + /* set default SCTP timeouts. */ for (i=0; itimeouts[i]; diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 410809c669e1..c6d29db08bae 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -29,6 +29,7 @@ MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks"); module_param(enable_hooks, bool, 0000); unsigned int nf_conntrack_net_id __read_mostly; +int sysctl_nf_ct_proc_sched __read_mostly = 1; #ifdef CONFIG_NF_CONNTRACK_PROCFS void @@ -140,8 +141,11 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos) struct hlist_nulls_node *head = ct_get_first(seq); if (head) - while (pos && (head = ct_get_next(seq, head))) + while (pos && (head = ct_get_next(seq, head))) { pos--; + if (sysctl_nf_ct_proc_sched) + cond_resched_rcu(); + } return pos ? NULL : head; } @@ -411,7 +415,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu + 1; return per_cpu_ptr(net->ct.stat, cpu); } - + (*pos)++; return NULL; } @@ -593,6 +597,7 @@ enum nf_ct_sysctl_index { NF_SYSCTL_CT_PROTO_TIMEOUT_GRE, NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM, #endif + NF_SYSCTL_CT_PROTO_PROC_SCHED, __NF_SYSCTL_CT_LAST_SYSCTL, }; @@ -920,6 +925,13 @@ static struct ctl_table nf_ct_sysctl_table[] = { .proc_handler = proc_dointvec_jiffies, }, #endif + [NF_SYSCTL_CT_PROTO_PROC_SCHED] = { + .procname = "nf_conntrack_proc_sched", + .data = &sysctl_nf_ct_proc_sched, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, {} }; diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index b9e7dd6e60ce..e92aa6b7eb80 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -189,6 +189,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; + iph = ip_hdr(skb); ports = (struct flow_ports *)(skb_network_header(skb) + thoff); tuple->src_v4.s_addr = iph->saddr; @@ -449,6 +450,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; + ip6h = ipv6_hdr(skb); ports = (struct flow_ports *)(skb_network_header(skb) + thoff); tuple->src_v6 = ip6h->saddr; diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c index 0a59c14b5177..64eedc17037a 100644 --- a/net/netfilter/nf_nat_proto.c +++ b/net/netfilter/nf_nat_proto.c @@ -233,6 +233,19 @@ icmp_manip_pkt(struct sk_buff *skb, return false; hdr = (struct icmphdr *)(skb->data + hdroff); + switch (hdr->type) { + case ICMP_ECHO: + case ICMP_ECHOREPLY: + case ICMP_TIMESTAMP: + case ICMP_TIMESTAMPREPLY: + case ICMP_INFO_REQUEST: + case ICMP_INFO_REPLY: + case ICMP_ADDRESS: + case ICMP_ADDRESSREPLY: + break; + default: + return true; + } inet_proto_csum_replace2(&hdr->checksum, skb, hdr->un.echo.id, tuple->src.u.icmp.id, false); hdr->un.echo.id = tuple->src.u.icmp.id; diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index a2b58de82600..f8f52ff99cfb 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -189,7 +189,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, goto err; } - if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) { + if (skb_dst(skb) && !skb_dst_force(skb)) { status = -ENETDOWN; goto err; } diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index b0930d4aba22..b9cbe1e2453e 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu + 1; return per_cpu_ptr(snet->stats, cpu); } - + (*pos)++; return NULL; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 712a428509ad..068daff41f6e 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -22,6 +22,8 @@ #include #include +#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-")) + static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); @@ -486,47 +488,71 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table) static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX]; +static const struct nft_chain_type * +__nft_chain_type_get(u8 family, enum nft_chain_types type) +{ + if (family >= NFPROTO_NUMPROTO || + type >= NFT_CHAIN_T_MAX) + return NULL; + + return chain_type[family][type]; +} + static const struct nft_chain_type * __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family) { + const struct nft_chain_type *type; int i; for (i = 0; i < NFT_CHAIN_T_MAX; i++) { - if (chain_type[family][i] != NULL && - !nla_strcmp(nla, chain_type[family][i]->name)) - return chain_type[family][i]; + type = __nft_chain_type_get(family, i); + if (!type) + continue; + if (!nla_strcmp(nla, type->name)) + return type; } return NULL; } -/* - * Loading a module requires dropping mutex that guards the - * transaction. - * We first need to abort any pending transactions as once - * mutex is unlocked a different client could start a new - * transaction. It must not see any 'future generation' - * changes * as these changes will never happen. - */ -#ifdef CONFIG_MODULES -static int __nf_tables_abort(struct net *net); +struct nft_module_request { + struct list_head list; + char module[MODULE_NAME_LEN]; + bool done; +}; -static void nft_request_module(struct net *net, const char *fmt, ...) +#ifdef CONFIG_MODULES +static int nft_request_module(struct net *net, const char *fmt, ...) { char module_name[MODULE_NAME_LEN]; + struct nft_module_request *req; va_list args; int ret; - __nf_tables_abort(net); - va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); va_end(args); - if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret)) - return; + if (ret >= MODULE_NAME_LEN) + return 0; - mutex_unlock(&net->nft.commit_mutex); - request_module("%s", module_name); - mutex_lock(&net->nft.commit_mutex); + list_for_each_entry(req, &net->nft.module_list, list) { + if (!strcmp(req->module, module_name)) { + if (req->done) + return 0; + + /* A request to load this module already exists. */ + return -EAGAIN; + } + } + + req = kmalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->done = false; + strlcpy(req->module, module_name, MODULE_NAME_LEN); + list_add_tail(&req->list, &net->nft.module_list); + + return -EAGAIN; } #endif @@ -550,10 +576,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla, lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (autoload) { - nft_request_module(net, "nft-chain-%u-%.*s", family, - nla_len(nla), (const char *)nla_data(nla)); - type = __nf_tables_chain_type_lookup(nla, family); - if (type != NULL) + if (nft_request_module(net, "nft-chain-%u-%.*s", family, + nla_len(nla), + (const char *)nla_data(nla)) == -EAGAIN) return ERR_PTR(-EAGAIN); } #endif @@ -978,12 +1003,18 @@ static int nft_flush_table(struct nft_ctx *ctx) } list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) { + if (!nft_is_active_next(ctx->net, flowtable)) + continue; + err = nft_delflowtable(ctx, flowtable); if (err < 0) goto out; } list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) { + if (!nft_is_active_next(ctx->net, obj)) + continue; + err = nft_delobj(ctx, obj); if (err < 0) goto out; @@ -1086,11 +1117,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx) void nft_register_chain_type(const struct nft_chain_type *ctype) { - if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO)) - return; - nfnl_lock(NFNL_SUBSYS_NFTABLES); - if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) { + if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); return; } @@ -1174,7 +1202,8 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { .len = NFT_CHAIN_MAXNAMELEN - 1 }, [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED }, [NFTA_CHAIN_POLICY] = { .type = NLA_U32 }, - [NFTA_CHAIN_TYPE] = { .type = NLA_STRING }, + [NFTA_CHAIN_TYPE] = { .type = NLA_STRING, + .len = NFT_MODULE_AUTOLOAD_LIMIT }, [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, [NFTA_CHAIN_FLAGS] = { .type = NLA_U32 }, }; @@ -1280,6 +1309,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, lockdep_commit_lock_is_held(net)); if (nft_dump_stats(skb, stats)) goto nla_put_failure; + + if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) && + nla_put_be32(skb, NFTA_CHAIN_FLAGS, + htonl(NFT_CHAIN_HW_OFFLOAD))) + goto nla_put_failure; } if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use))) @@ -1541,7 +1575,10 @@ static int nft_chain_parse_hook(struct net *net, hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); - type = chain_type[family][NFT_CHAIN_T_DEFAULT]; + type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT); + if (!type) + return -EOPNOTSUPP; + if (nla[NFTA_CHAIN_TYPE]) { type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE], family, autoload); @@ -2050,9 +2087,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family, static int nft_expr_type_request_module(struct net *net, u8 family, struct nlattr *nla) { - nft_request_module(net, "nft-expr-%u-%.*s", family, - nla_len(nla), (char *)nla_data(nla)); - if (__nft_expr_type_get(family, nla)) + if (nft_request_module(net, "nft-expr-%u-%.*s", family, + nla_len(nla), (char *)nla_data(nla)) == -EAGAIN) return -EAGAIN; return 0; @@ -2078,9 +2114,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net, if (nft_expr_type_request_module(net, family, nla) == -EAGAIN) return ERR_PTR(-EAGAIN); - nft_request_module(net, "nft-expr-%.*s", - nla_len(nla), (char *)nla_data(nla)); - if (__nft_expr_type_get(family, nla)) + if (nft_request_module(net, "nft-expr-%.*s", + nla_len(nla), + (char *)nla_data(nla)) == -EAGAIN) return ERR_PTR(-EAGAIN); } #endif @@ -2088,7 +2124,8 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net, } static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = { - [NFTA_EXPR_NAME] = { .type = NLA_STRING }, + [NFTA_EXPR_NAME] = { .type = NLA_STRING, + .len = NFT_MODULE_AUTOLOAD_LIMIT }, [NFTA_EXPR_DATA] = { .type = NLA_NESTED }, }; @@ -2170,9 +2207,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, err = PTR_ERR(ops); #ifdef CONFIG_MODULES if (err == -EAGAIN) - nft_expr_type_request_module(ctx->net, - ctx->family, - tb[NFTA_EXPR_NAME]); + if (nft_expr_type_request_module(ctx->net, + ctx->family, + tb[NFTA_EXPR_NAME]) != -EAGAIN) + err = -ENOENT; #endif goto err1; } @@ -3009,8 +3047,7 @@ nft_select_set_ops(const struct nft_ctx *ctx, lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (list_empty(&nf_tables_set_types)) { - nft_request_module(ctx->net, "nft-set"); - if (!list_empty(&nf_tables_set_types)) + if (nft_request_module(ctx->net, "nft-set") == -EAGAIN) return ERR_PTR(-EAGAIN); } #endif @@ -3931,7 +3968,8 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = { [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, [NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED }, - [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING }, + [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING, + .len = NFT_OBJ_MAXNAMELEN - 1 }, }; static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { @@ -4252,8 +4290,10 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, return err; err = -EINVAL; - if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) + if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) { + nft_data_release(&elem.key.val, desc.type); return err; + } priv = set->ops->get(ctx->net, set, &elem, flags); if (IS_ERR(priv)) @@ -4489,14 +4529,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (nla[NFTA_SET_ELEM_DATA] == NULL && !(flags & NFT_SET_ELEM_INTERVAL_END)) return -EINVAL; - if (nla[NFTA_SET_ELEM_DATA] != NULL && - flags & NFT_SET_ELEM_INTERVAL_END) - return -EINVAL; } else { if (nla[NFTA_SET_ELEM_DATA] != NULL) return -EINVAL; } + if ((flags & NFT_SET_ELEM_INTERVAL_END) && + (nla[NFTA_SET_ELEM_DATA] || + nla[NFTA_SET_ELEM_OBJREF] || + nla[NFTA_SET_ELEM_TIMEOUT] || + nla[NFTA_SET_ELEM_EXPIRATION] || + nla[NFTA_SET_ELEM_USERDATA] || + nla[NFTA_SET_ELEM_EXPR])) + return -EINVAL; + timeout = 0; if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) @@ -5127,8 +5173,7 @@ nft_obj_type_get(struct net *net, u32 objtype) lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (type == NULL) { - nft_request_module(net, "nft-obj-%u", objtype); - if (__nft_obj_type_get(objtype)) + if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN) return ERR_PTR(-EAGAIN); } #endif @@ -5209,7 +5254,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - type = nft_obj_type_get(net, objtype); + type = __nft_obj_type_get(objtype); nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj); @@ -5744,8 +5789,7 @@ nft_flowtable_type_get(struct net *net, u8 family) lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (type == NULL) { - nft_request_module(net, "nf-flowtable-%u", family); - if (__nft_flowtable_type_get(family)) + if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN) return ERR_PTR(-EAGAIN); } #endif @@ -6692,6 +6736,18 @@ static void nft_chain_del(struct nft_chain *chain) list_del_rcu(&chain->list); } +static void nf_tables_module_autoload_cleanup(struct net *net) +{ + struct nft_module_request *req, *next; + + WARN_ON_ONCE(!list_empty(&net->nft.commit_list)); + list_for_each_entry_safe(req, next, &net->nft.module_list, list) { + WARN_ON_ONCE(!req->done); + list_del(&req->list); + kfree(req); + } +} + static void nf_tables_commit_release(struct net *net) { struct nft_trans *trans; @@ -6704,6 +6760,7 @@ static void nf_tables_commit_release(struct net *net) * to prevent expensive synchronize_rcu() in commit phase. */ if (list_empty(&net->nft.commit_list)) { + nf_tables_module_autoload_cleanup(net); mutex_unlock(&net->nft.commit_mutex); return; } @@ -6718,6 +6775,7 @@ static void nf_tables_commit_release(struct net *net) list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list); spin_unlock(&nf_tables_destroy_list_lock); + nf_tables_module_autoload_cleanup(net); mutex_unlock(&net->nft.commit_mutex); schedule_work(&trans_destroy_work); @@ -6909,6 +6967,21 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) return 0; } +static void nf_tables_module_autoload(struct net *net) +{ + struct nft_module_request *req, *next; + LIST_HEAD(module_list); + + list_splice_init(&net->nft.module_list, &module_list); + mutex_unlock(&net->nft.commit_mutex); + list_for_each_entry_safe(req, next, &module_list, list) { + request_module("%s", req->module); + req->done = true; + } + mutex_lock(&net->nft.commit_mutex); + list_splice(&module_list, &net->nft.module_list); +} + static void nf_tables_abort_release(struct nft_trans *trans) { switch (trans->msg_type) { @@ -6938,7 +7011,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) kfree(trans); } -static int __nf_tables_abort(struct net *net) +static int __nf_tables_abort(struct net *net, bool autoload) { struct nft_trans *trans, *next; struct nft_trans_elem *te; @@ -7060,6 +7133,11 @@ static int __nf_tables_abort(struct net *net) nf_tables_abort_release(trans); } + if (autoload) + nf_tables_module_autoload(net); + else + nf_tables_module_autoload_cleanup(net); + return 0; } @@ -7068,9 +7146,9 @@ static void nf_tables_cleanup(struct net *net) nft_validate_state_update(net, NFT_VALIDATE_SKIP); } -static int nf_tables_abort(struct net *net, struct sk_buff *skb) +static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload) { - int ret = __nf_tables_abort(net); + int ret = __nf_tables_abort(net, autoload); mutex_unlock(&net->nft.commit_mutex); @@ -7665,6 +7743,7 @@ static int __net_init nf_tables_init_net(struct net *net) { INIT_LIST_HEAD(&net->nft.tables); INIT_LIST_HEAD(&net->nft.commit_list); + INIT_LIST_HEAD(&net->nft.module_list); mutex_init(&net->nft.commit_mutex); net->nft.base_seq = 1; net->nft.validate_state = NFT_VALIDATE_SKIP; @@ -7676,10 +7755,11 @@ static void __net_exit nf_tables_exit_net(struct net *net) { mutex_lock(&net->nft.commit_mutex); if (!list_empty(&net->nft.commit_list)) - __nf_tables_abort(net); + __nf_tables_abort(net, false); __nft_release_tables(net); mutex_unlock(&net->nft.commit_mutex); WARN_ON_ONCE(!list_empty(&net->nft.tables)); + WARN_ON_ONCE(!list_empty(&net->nft.module_list)); } static struct pernet_operations nf_tables_net_ops = { diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index e25dab8128db..914cd0618d5a 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -44,6 +44,9 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, expr = nft_expr_next(expr); } + if (num_actions == 0) + return ERR_PTR(-EOPNOTSUPP); + flow = nft_flow_rule_alloc(num_actions); if (!flow) return ERR_PTR(-ENOMEM); @@ -355,14 +358,14 @@ int nft_flow_rule_offload_commit(struct net *net) continue; if (trans->ctx.flags & NLM_F_REPLACE || - !(trans->ctx.flags & NLM_F_APPEND)) - return -EOPNOTSUPP; - + !(trans->ctx.flags & NLM_F_APPEND)) { + err = -EOPNOTSUPP; + break; + } err = nft_flow_offload_rule(trans->ctx.chain, nft_trans_rule(trans), nft_trans_flow_rule(trans), FLOW_CLS_REPLACE); - nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_DELRULE: if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) @@ -376,7 +379,23 @@ int nft_flow_rule_offload_commit(struct net *net) } if (err) - return err; + break; + } + + list_for_each_entry(trans, &net->nft.commit_list, list) { + if (trans->ctx.family != NFPROTO_NETDEV) + continue; + + switch (trans->msg_type) { + case NFT_MSG_NEWRULE: + if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + continue; + + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); + break; + default: + break; + } } return err; @@ -418,7 +437,7 @@ static void nft_indr_block_cb(struct net_device *dev, mutex_lock(&net->nft.commit_mutex); chain = __nft_offload_get_chain(dev); - if (chain) { + if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) { struct nft_base_chain *basechain; basechain = nft_base_chain(chain); @@ -446,6 +465,9 @@ static int nft_offload_netdev_event(struct notifier_block *this, struct net *net = dev_net(dev); struct nft_chain *chain; + if (event != NETDEV_UNREGISTER) + return NOTIFY_DONE; + mutex_lock(&net->nft.commit_mutex); chain = __nft_offload_get_chain(dev); if (chain) @@ -455,7 +477,7 @@ static int nft_offload_netdev_event(struct notifier_block *this, return NOTIFY_DONE; } -static struct flow_indr_block_ing_entry block_ing_entry = { +static struct flow_indr_block_entry block_ing_entry = { .cb = nft_indr_block_cb, .list = LIST_HEAD_INIT(block_ing_entry.list), }; @@ -472,13 +494,13 @@ int nft_offload_init(void) if (err < 0) return err; - flow_indr_add_block_ing_cb(&block_ing_entry); + flow_indr_add_block_cb(&block_ing_entry); return 0; } void nft_offload_exit(void) { - flow_indr_del_block_ing_cb(&block_ing_entry); + flow_indr_del_block_cb(&block_ing_entry); unregister_netdevice_notifier(&nft_offload_netdev_notifier); } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 4abbb452cf6c..99127e2d95a8 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -476,7 +476,7 @@ ack: } done: if (status & NFNL_BATCH_REPLAY) { - ss->abort(net, oskb); + ss->abort(net, oskb, true); nfnl_err_reset(&err_list); kfree_skb(skb); module_put(ss->owner); @@ -487,11 +487,11 @@ done: status |= NFNL_BATCH_REPLAY; goto done; } else if (err) { - ss->abort(net, oskb); + ss->abort(net, oskb, false); netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); } } else { - ss->abort(net, oskb); + ss->abort(net, oskb, false); } if (ss->cleanup) ss->cleanup(net); diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 7525063c25f5..60838d5fb8e0 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { [NFCTH_NAME] = { .type = NLA_NUL_STRING, .len = NF_CT_HELPER_NAME_LEN-1 }, [NFCTH_QUEUE_NUM] = { .type = NLA_U32, }, + [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, }, + [NFCTH_STATUS] = { .type = NLA_U32, }, }; static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = { diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 02afa752dd2e..10e9d50e4e19 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -80,7 +80,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, tb[NFTA_BITWISE_MASK]); if (err < 0) return err; - if (d1.len != priv->len) { + if (d1.type != NFT_DATA_VALUE || d1.len != priv->len) { err = -EINVAL; goto err1; } @@ -89,7 +89,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, tb[NFTA_BITWISE_XOR]); if (err < 0) goto err1; - if (d2.len != priv->len) { + if (d2.type != NFT_DATA_VALUE || d2.len != priv->len) { err = -EINVAL; goto err2; } diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c index ff9ac8ae0031..eac4a901233f 100644 --- a/net/netfilter/nft_chain_nat.c +++ b/net/netfilter/nft_chain_nat.c @@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = { .name = "nat", .type = NFT_CHAIN_T_NAT, .family = NFPROTO_INET, + .owner = THIS_MODULE, .hook_mask = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_LOCAL_OUT) | diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 0744b2bb46da..ae730dba60c8 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -80,6 +80,12 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, if (err < 0) return err; + if (desc.type != NFT_DATA_VALUE) { + err = -EINVAL; + nft_data_release(&priv->data, desc.type); + return err; + } + priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); err = nft_validate_register_load(priv->sreg, desc.len); if (err < 0) diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index f29bbc74c4bf..ff5ac173e897 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -197,9 +197,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx, static void nft_flow_offload_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { - struct nft_flow_offload *priv = nft_expr_priv(expr); - - priv->flowtable->use--; nf_ct_netns_put(ctx->net, ctx->family); } diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index aba11c2333f3..3087e23297db 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -28,6 +28,9 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr, struct nft_fwd_netdev *priv = nft_expr_priv(expr); int oif = regs->data[priv->sreg_dev]; + /* This is used by ifb only. */ + skb_set_redirected(pkt->skb, true); + nf_fwd_netdev_egress(pkt, oif); regs->verdict.code = NF_STOLEN; } @@ -190,6 +193,13 @@ nla_put_failure: return -1; } +static int nft_fwd_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS)); +} + static struct nft_expr_type nft_fwd_netdev_type; static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = { .type = &nft_fwd_netdev_type, @@ -197,6 +207,7 @@ static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = { .eval = nft_fwd_neigh_eval, .init = nft_fwd_neigh_init, .dump = nft_fwd_neigh_dump, + .validate = nft_fwd_validate, }; static const struct nft_expr_ops nft_fwd_netdev_ops = { @@ -205,6 +216,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = { .eval = nft_fwd_netdev_eval, .init = nft_fwd_netdev_init, .dump = nft_fwd_netdev_dump, + .validate = nft_fwd_validate, .offload = nft_fwd_netdev_offload, }; diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 317e3a9e8c5b..dda1e55d5801 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -33,19 +33,19 @@ static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); -static u8 nft_meta_weekday(unsigned long secs) +static u8 nft_meta_weekday(time64_t secs) { unsigned int dse; u8 wday; secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest; - dse = secs / NFT_META_SECS_PER_DAY; + dse = div_u64(secs, NFT_META_SECS_PER_DAY); wday = (4 + dse) % NFT_META_DAYS_PER_WEEK; return wday; } -static u32 nft_meta_hour(unsigned long secs) +static u32 nft_meta_hour(time64_t secs) { struct tm tm; @@ -250,10 +250,10 @@ void nft_meta_get_eval(const struct nft_expr *expr, nft_reg_store64(dest, ktime_get_real_ns()); break; case NFT_META_TIME_DAY: - nft_reg_store8(dest, nft_meta_weekday(get_seconds())); + nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds())); break; case NFT_META_TIME_HOUR: - *dest = nft_meta_hour(get_seconds()); + *dest = nft_meta_hour(ktime_get_real_seconds()); break; default: WARN_ON(1); diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c index f54d6ae15bb1..b42247aa48a9 100644 --- a/net/netfilter/nft_osf.c +++ b/net/netfilter/nft_osf.c @@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx, int err; u8 ttl; + if (!tb[NFTA_OSF_DREG]) + return -EINVAL; + if (tb[NFTA_OSF_TTL]) { ttl = nla_get_u8(tb[NFTA_OSF_TTL]); if (ttl > 2) diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 5cb2d8908d2a..0e3bfbc26e79 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -121,6 +121,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = { [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 }, [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 }, [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 }, + [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 }, }; static int nft_payload_init(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index 4701fa8a45e7..89efcc5a533d 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c @@ -66,11 +66,21 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr if (err < 0) return err; + if (desc_from.type != NFT_DATA_VALUE) { + err = -EINVAL; + goto err1; + } + err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to), &desc_to, tb[NFTA_RANGE_TO_DATA]); if (err < 0) goto err1; + if (desc_to.type != NFT_DATA_VALUE) { + err = -EINVAL; + goto err2; + } + if (desc_from.len != desc_to.len) { err = -EINVAL; goto err2; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 57123259452f..a9f804f7a04a 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -74,8 +74,13 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set parent = rcu_dereference_raw(parent->rb_left); continue; } - if (nft_rbtree_interval_end(rbe)) - goto out; + if (nft_rbtree_interval_end(rbe)) { + if (nft_set_is_anonymous(set)) + return false; + parent = rcu_dereference_raw(parent->rb_left); + interval = NULL; + continue; + } *ext = &rbe->ext; return true; @@ -88,7 +93,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set *ext = &interval->ext; return true; } -out: + return false; } @@ -139,8 +144,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, if (flags & NFT_SET_ELEM_INTERVAL_END) interval = rbe; } else { - if (!nft_set_elem_active(&rbe->ext, genmask)) + if (!nft_set_elem_active(&rbe->ext, genmask)) { parent = rcu_dereference_raw(parent->rb_left); + continue; + } if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) || (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) == @@ -148,7 +155,11 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, *elem = rbe; return true; } - return false; + + if (nft_rbtree_interval_end(rbe)) + interval = NULL; + + parent = rcu_dereference_raw(parent->rb_left); } } diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index f92a82c73880..95980154ef02 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -50,7 +50,7 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr, taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr); if (priv->sreg_port) - tport = regs->data[priv->sreg_port]; + tport = nft_reg_load16(®s->data[priv->sreg_port]); if (!tport) tport = hp->dest; @@ -117,7 +117,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr, taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr); if (priv->sreg_port) - tport = regs->data[priv->sreg_port]; + tport = nft_reg_load16(®s->data[priv->sreg_port]); if (!tport) tport = hp->dest; diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 3d4c2ae605a8..1effd4878619 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -76,7 +76,7 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx, struct nft_tunnel *priv = nft_expr_priv(expr); u32 len; - if (!tb[NFTA_TUNNEL_KEY] && + if (!tb[NFTA_TUNNEL_KEY] || !tb[NFTA_TUNNEL_DREG]) return -EINVAL; @@ -248,8 +248,9 @@ static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr, } static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = { + [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 }, [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 }, - [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 }, + [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 }, [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 }, }; @@ -266,6 +267,9 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, if (err < 0) return err; + if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]) + return -EINVAL; + version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])); switch (version) { case ERSPAN_VERSION: @@ -335,6 +339,8 @@ static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, }, [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, }, [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, }, + [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, }, + [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, }, [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, }, }; @@ -501,8 +507,8 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb, static int nft_tunnel_ports_dump(struct sk_buff *skb, struct ip_tunnel_info *info) { - if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 || - nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0) + if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 || + nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0) return -1; return 0; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index ce70c2576bb2..44f971f31992 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1551,6 +1551,9 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file)); struct nf_mttg_trav *trav = seq->private; + if (ppos != NULL) + ++(*ppos); + switch (trav->class) { case MTTG_TRAV_INIT: trav->class = MTTG_TRAV_NFP_UNSPEC; @@ -1576,9 +1579,6 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, default: return NULL; } - - if (ppos != NULL) - ++*ppos; return trav; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index ced3fc8fad7c..8c835ad63729 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ @@ -114,7 +115,7 @@ struct dsthash_ent { struct xt_hashlimit_htable { struct hlist_node node; /* global list of all htables */ - int use; + refcount_t use; u_int8_t family; bool rnd_initialized; @@ -315,7 +316,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, for (i = 0; i < hinfo->cfg.size; i++) INIT_HLIST_HEAD(&hinfo->hash[i]); - hinfo->use = 1; + refcount_set(&hinfo->use, 1); hinfo->count = 0; hinfo->family = family; hinfo->rnd_initialized = false; @@ -357,21 +358,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, return 0; } -static bool select_all(const struct xt_hashlimit_htable *ht, - const struct dsthash_ent *he) -{ - return true; -} - -static bool select_gc(const struct xt_hashlimit_htable *ht, - const struct dsthash_ent *he) -{ - return time_after_eq(jiffies, he->expires); -} - -static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, - bool (*select)(const struct xt_hashlimit_htable *ht, - const struct dsthash_ent *he)) +static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all) { unsigned int i; @@ -381,7 +368,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, spin_lock_bh(&ht->lock); hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { - if ((*select)(ht, dh)) + if (time_after_eq(jiffies, dh->expires) || select_all) dsthash_free(ht, dh); } spin_unlock_bh(&ht->lock); @@ -395,7 +382,7 @@ static void htable_gc(struct work_struct *work) ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); - htable_selective_cleanup(ht, select_gc); + htable_selective_cleanup(ht, false); queue_delayed_work(system_power_efficient_wq, &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval)); @@ -415,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) remove_proc_entry(hinfo->name, parent); } -static void htable_destroy(struct xt_hashlimit_htable *hinfo) -{ - cancel_delayed_work_sync(&hinfo->gc_work); - htable_remove_proc_entry(hinfo); - htable_selective_cleanup(hinfo, select_all); - kfree(hinfo->name); - vfree(hinfo); -} - static struct xt_hashlimit_htable *htable_find_get(struct net *net, const char *name, u_int8_t family) @@ -434,7 +412,7 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net, hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) { if (!strcmp(name, hinfo->name) && hinfo->family == family) { - hinfo->use++; + refcount_inc(&hinfo->use); return hinfo; } } @@ -443,12 +421,16 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net, static void htable_put(struct xt_hashlimit_htable *hinfo) { - mutex_lock(&hashlimit_mutex); - if (--hinfo->use == 0) { + if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) { hlist_del(&hinfo->node); - htable_destroy(hinfo); + htable_remove_proc_entry(hinfo); + mutex_unlock(&hashlimit_mutex); + + cancel_delayed_work_sync(&hinfo->gc_work); + htable_selective_cleanup(hinfo, true); + kfree(hinfo->name); + vfree(hinfo); } - mutex_unlock(&hashlimit_mutex); } /* The algorithm used is the Simple Token Bucket Filter (TBF) @@ -851,6 +833,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3); } +#define HASHLIMIT_MAX_SIZE 1048576 + static int hashlimit_mt_check_common(const struct xt_mtchk_param *par, struct xt_hashlimit_htable **hinfo, struct hashlimit_cfg3 *cfg, @@ -861,6 +845,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par, if (cfg->gc_interval == 0 || cfg->expire == 0) return -EINVAL; + if (cfg->size > HASHLIMIT_MAX_SIZE) { + cfg->size = HASHLIMIT_MAX_SIZE; + pr_info_ratelimited("size too large, truncated to %u\n", cfg->size); + } + if (cfg->max > HASHLIMIT_MAX_SIZE) { + cfg->max = HASHLIMIT_MAX_SIZE; + pr_info_ratelimited("max too large, truncated to %u\n", cfg->max); + } if (par->family == NFPROTO_IPV4) { if (cfg->srcmask > 32 || cfg->dstmask > 32) return -EINVAL; diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 781e0b482189..6c2582a19766 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -492,12 +492,12 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos) const struct recent_entry *e = v; const struct list_head *head = e->list.next; + (*pos)++; while (head == &t->iphash[st->bucket]) { if (++st->bucket >= ip_list_hash_size) return NULL; head = t->iphash[st->bucket].next; } - (*pos)++; return list_entry(head, struct recent_entry, list); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 90b2ab9dd449..474723bde1cc 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -670,7 +670,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, netlink_lock_table(); #ifdef CONFIG_MODULES - if (!nl_table[protocol].registered) { + if (!nl_table[protocol].registered && protocol != NETLINK_AUDIT && protocol != NETLINK_SELINUX) { netlink_unlock_table(); request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); netlink_lock_table(); @@ -1014,7 +1014,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, if (nlk->netlink_bind && groups) { int group; - for (group = 0; group < nlk->ngroups; group++) { + /* nl_groups is a u32, so cap the maximum groups we can bind */ + for (group = 0; group < BITS_PER_TYPE(u32); group++) { if (!test_bit(group, &groups)) continue; err = nlk->netlink_bind(net, group + 1); @@ -1033,7 +1034,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, netlink_insert(sk, nladdr->nl_pid) : netlink_autobind(sock); if (err) { - netlink_undo_bind(nlk->ngroups, groups, sk); + netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk); goto unlock; } } @@ -2433,7 +2434,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, in_skb->len)) WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, (u8 *)extack->bad_attr - - in_skb->data)); + (u8 *)nlh)); } else { if (extack->cookie_len) WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 6f1b096e601c..43811b5219b5 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -181,13 +181,20 @@ exit: void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, struct sk_buff *skb) { - u8 gate = hdev->pipes[pipe].gate; u8 status = NFC_HCI_ANY_OK; struct hci_create_pipe_resp *create_info; struct hci_delete_pipe_noti *delete_info; struct hci_all_pipe_cleared_noti *cleared_info; + u8 gate; - pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd); + pr_debug("from pipe %x cmd %x\n", pipe, cmd); + + if (pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + + gate = hdev->pipes[pipe].gate; switch (cmd) { case NFC_HCI_ADM_NOTIFY_PIPE_CREATED: @@ -375,8 +382,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, struct sk_buff *skb) { int r = 0; - u8 gate = hdev->pipes[pipe].gate; + u8 gate; + if (pipe >= NFC_HCI_MAX_PIPES) { + pr_err("Discarded event %x to invalid pipe %x\n", event, pipe); + goto exit; + } + + gate = hdev->pipes[pipe].gate; if (gate == NFC_HCI_INVALID_GATE) { pr_err("Discarded event %x to unopened pipe %x\n", event, pipe); goto exit; diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c index 78fe622eba65..11b554ce07ff 100644 --- a/net/nfc/nci/uart.c +++ b/net/nfc/nci/uart.c @@ -346,7 +346,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data, nu->rx_packet_len = -1; nu->rx_skb = nci_skb_alloc(nu->ndev, NCI_MAX_PACKET_SIZE, - GFP_KERNEL); + GFP_ATOMIC); if (!nu->rx_skb) return -ENOMEM; } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index afde0d763039..1b261375722e 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -32,6 +32,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING, .len = NFC_DEVICE_NAME_MAXSIZE }, [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 }, + [NFC_ATTR_TARGET_INDEX] = { .type = NLA_U32 }, [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, @@ -43,7 +44,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED }, [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING, .len = NFC_FIRMWARE_NAME_MAXSIZE }, + [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 }, [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY }, + [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 }, + [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 }, [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, }; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 1c77f520f474..99352f09deaa 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -166,7 +166,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, int err; err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype, - skb->mac_len); + skb->mac_len, + ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET); if (err) return err; @@ -179,7 +180,8 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, { int err; - err = skb_mpls_pop(skb, ethertype, skb->mac_len); + err = skb_mpls_pop(skb, ethertype, skb->mac_len, + ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET); if (err) return err; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 05249eb45082..283e8f9a5fd2 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -903,6 +903,17 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, } err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); + if (err == NF_ACCEPT && + ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { + if (maniptype == NF_NAT_MANIP_SRC) + maniptype = NF_NAT_MANIP_DST; + else + maniptype = NF_NAT_MANIP_SRC; + + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, + maniptype); + } + /* Mark NAT done if successful and update the flow key. */ if (err == NF_ACCEPT) ovs_nat_update_key(key, skb, maniptype); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d8c364d637b1..3eed90bfa2bf 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -704,9 +704,13 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts, { size_t len = NLMSG_ALIGN(sizeof(struct ovs_header)); - /* OVS_FLOW_ATTR_UFID */ + /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback + * see ovs_nla_put_identifier() + */ if (sfid && ovs_identifier_is_ufid(sfid)) len += nla_total_size(sfid->ufid_len); + else + len += nla_total_size(ovs_key_attr_size()); /* OVS_FLOW_ATTR_KEY */ if (!sfid || should_fill_key(sfid, ufid_flags)) @@ -882,7 +886,10 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow, retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb, info->snd_portid, info->snd_seq, 0, cmd, ufid_flags); - BUG_ON(retval < 0); + if (WARN_ON_ONCE(retval < 0)) { + kfree_skb(skb); + skb = ERR_PTR(retval); + } return skb; } @@ -1346,7 +1353,10 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) OVS_FLOW_CMD_DEL, ufid_flags); rcu_read_unlock(); - BUG_ON(err < 0); + if (WARN_ON_ONCE(err < 0)) { + kfree_skb(reply); + goto out_free; + } ovs_notify(&dp_flow_genl_family, reply, info); } else { @@ -1354,6 +1364,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) } } +out_free: ovs_flow_free(flow, true); return 0; unlock: @@ -1656,6 +1667,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_dp_reset_user_features(skb, info); } + ovs_unlock(); goto err_destroy_meters; } @@ -1672,7 +1684,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) return 0; err_destroy_meters: - ovs_unlock(); ovs_meters_exit(dp); err_destroy_ports_array: kfree(dp->ports); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 82a50e850245..56084a16d0f9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -544,7 +544,8 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po, msec = 1; div = ecmd.base.speed / 1000; } - } + } else + return DEFAULT_PRB_RETIRE_TOV; mbits = (blk_size_in_bytes * 8) / (1024 * 1024); @@ -1295,15 +1296,21 @@ static void packet_sock_destruct(struct sock *sk) static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) { - u32 rxhash; + u32 *history = po->rollover->history; + u32 victim, rxhash; int i, count = 0; rxhash = skb_get_hash(skb); for (i = 0; i < ROLLOVER_HLEN; i++) - if (po->rollover->history[i] == rxhash) + if (READ_ONCE(history[i]) == rxhash) count++; - po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; + victim = prandom_u32() % ROLLOVER_HLEN; + + /* Avoid dirtying the cache line if possible */ + if (READ_ONCE(history[victim]) != rxhash) + WRITE_ONCE(history[victim], rxhash); + return count > (ROLLOVER_HLEN >> 1); } @@ -2160,11 +2167,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; - unsigned short macoff, netoff, hdrlen; + unsigned short macoff, hdrlen; + unsigned int netoff; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; + unsigned int slot_id = 0; bool do_vnet = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. @@ -2228,6 +2237,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } macoff = netoff - maclen; } + if (netoff > USHRT_MAX) { + atomic_inc(&po->tp_drops); + goto drop_n_restore; + } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && @@ -2266,6 +2279,20 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, TP_STATUS_KERNEL, (macoff+snaplen)); if (!h.raw) goto drop_n_account; + + if (po->tp_version <= TPACKET_V2) { + slot_id = po->rx_ring.head; + if (test_bit(slot_id, po->rx_ring.rx_owner_map)) + goto drop_n_account; + __set_bit(slot_id, po->rx_ring.rx_owner_map); + } + + if (do_vnet && + virtio_net_hdr_from_skb(skb, h.raw + macoff - + sizeof(struct virtio_net_hdr), + vio_le(), true, 0)) + goto drop_n_account; + if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); /* @@ -2278,12 +2305,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, status |= TP_STATUS_LOSING; } - if (do_vnet && - virtio_net_hdr_from_skb(skb, h.raw + macoff - - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) - goto drop_n_account; - po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; @@ -2371,7 +2392,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, #endif if (po->tp_version <= TPACKET_V2) { + spin_lock(&sk->sk_receive_queue.lock); __packet_set_status(po, h.raw, status); + __clear_bit(slot_id, po->rx_ring.rx_owner_map); + spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); @@ -4268,6 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); + unsigned long *rx_owner_map = NULL; int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; @@ -4353,6 +4378,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } break; default: + if (!tx_ring) { + rx_owner_map = bitmap_alloc(req->tp_frame_nr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!rx_owner_map) + goto out_free_pg_vec; + } break; } } @@ -4382,6 +4413,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); + if (po->tp_version <= TPACKET_V2) + swap(rb->rx_owner_map, rx_owner_map); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; @@ -4413,6 +4446,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } out_free_pg_vec: + bitmap_free(rx_owner_map); if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: diff --git a/net/packet/internal.h b/net/packet/internal.h index 82fb2b10f790..907f4cd2a718 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -70,7 +70,10 @@ struct packet_ring_buffer { unsigned int __percpu *pending_refcnt; - struct tpacket_kbdq_core prb_bdqc; + union { + unsigned long *rx_owner_map; + struct tpacket_kbdq_core prb_bdqc; + }; }; extern struct mutex fanout_mutex; diff --git a/net/psample/psample.c b/net/psample/psample.c index a6ceb0533b5b..6f2fbc6b9eb2 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -229,7 +229,7 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN - NLA_ALIGNTO; - nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC); + nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC); if (unlikely(!nl_skb)) return; diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 88f98f27ad88..3d24d45be5f4 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -196,7 +196,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, hdr->size = cpu_to_le32(len); hdr->confirm_rx = 0; - skb_put_padto(skb, ALIGN(len, 4)); + skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); mutex_lock(&node->ep_lock); if (node->ep) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index f9b08a6d8dbe..6c089320ae4f 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1002,10 +1002,13 @@ static void rfkill_sync_work(struct work_struct *work) int __must_check rfkill_register(struct rfkill *rfkill) { static unsigned long rfkill_no; - struct device *dev = &rfkill->dev; + struct device *dev; int error; - BUG_ON(!rfkill); + if (!rfkill) + return -EINVAL; + + dev = &rfkill->dev; mutex_lock(&rfkill_global_mutex); @@ -1316,10 +1319,12 @@ static const struct file_operations rfkill_fops = { .llseek = no_llseek, }; +#define RFKILL_NAME "rfkill" + static struct miscdevice rfkill_miscdev = { - .name = "rfkill", .fops = &rfkill_fops, - .minor = MISC_DYNAMIC_MINOR, + .name = RFKILL_NAME, + .minor = RFKILL_MINOR, }; static int __init rfkill_init(void) @@ -1371,3 +1376,6 @@ static void __exit rfkill_exit(void) class_unregister(&rfkill_class); } module_exit(rfkill_exit); + +MODULE_ALIAS_MISCDEV(RFKILL_MINOR); +MODULE_ALIAS("devname:" RFKILL_NAME); diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index d72ddb67bb74..a293238fe1e7 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -194,6 +194,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) service_in_use: write_unlock(&local->services_lock); rxrpc_unuse_local(local); + rxrpc_put_local(local); ret = -EADDRINUSE; error_unlock: release_sock(&rx->sk); @@ -370,44 +371,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call); * rxrpc_kernel_check_life - Check to see whether a call is still alive * @sock: The socket the call is on * @call: The call to check - * @_life: Where to store the life value * - * Allow a kernel service to find out whether a call is still alive - ie. we're - * getting ACKs from the server. Passes back in *_life a number representing - * the life state which can be compared to that returned by a previous call and - * return true if the call is still alive. - * - * If the life state stalls, rxrpc_kernel_probe_life() should be called and - * then 2RTT waited. + * Allow a kernel service to find out whether a call is still alive - + * ie. whether it has completed. */ bool rxrpc_kernel_check_life(const struct socket *sock, - const struct rxrpc_call *call, - u32 *_life) + const struct rxrpc_call *call) { - *_life = call->acks_latest; return call->state != RXRPC_CALL_COMPLETE; } EXPORT_SYMBOL(rxrpc_kernel_check_life); -/** - * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive - * @sock: The socket the call is on - * @call: The call to check - * - * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to - * find out whether a call is still alive by pinging it. This should cause the - * life state to be bumped in about 2*RTT. - * - * The must be called in TASK_RUNNING state on pain of might_sleep() objecting. - */ -void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) -{ - rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, - rxrpc_propose_ack_ping_for_check_life); - rxrpc_send_ack_packet(call, true, NULL); -} -EXPORT_SYMBOL(rxrpc_kernel_probe_life); - /** * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. * @sock: The socket the call is on @@ -899,6 +873,7 @@ static int rxrpc_release_sock(struct sock *sk) rxrpc_purge_queue(&sk->sk_receive_queue); rxrpc_unuse_local(rx->local); + rxrpc_put_local(rx->local); rx->local = NULL; key_put(rx->key); rx->key = NULL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7c7d10f2e0c1..394d18857979 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -209,6 +209,7 @@ struct rxrpc_skb_priv { struct rxrpc_security { const char *name; /* name of this service */ u8 security_index; /* security type provided */ + u32 no_key_abort; /* Abort code indicating no key */ /* Initialise a security service */ int (*init)(void); @@ -489,6 +490,7 @@ enum rxrpc_call_flag { RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */ RXRPC_CALL_IS_INTR, /* The call is interruptible */ + RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */ }; /* @@ -673,7 +675,6 @@ struct rxrpc_call { /* transmission-phase ACK management */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ - rxrpc_serial_t acks_latest; /* serial number of latest ACK received */ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */ rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */ @@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, struct sk_buff *); struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); -void rxrpc_new_incoming_connection(struct rxrpc_sock *, - struct rxrpc_connection *, struct sk_buff *); +void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *, + const struct rxrpc_security *, struct key *, + struct sk_buff *); void rxrpc_unpublish_service_conn(struct rxrpc_connection *); /* @@ -1019,6 +1021,16 @@ void rxrpc_unuse_local(struct rxrpc_local *); void rxrpc_queue_local(struct rxrpc_local *); void rxrpc_destroy_all_locals(struct rxrpc_net *); +static inline bool __rxrpc_unuse_local(struct rxrpc_local *local) +{ + return atomic_dec_return(&local->active_users) == 0; +} + +static inline bool __rxrpc_use_local(struct rxrpc_local *local) +{ + return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0; +} + /* * misc.c */ @@ -1103,7 +1115,9 @@ extern const struct rxrpc_security rxkad; int __init rxrpc_init_security(void); void rxrpc_exit_security(void); int rxrpc_init_client_conn_security(struct rxrpc_connection *); -int rxrpc_init_server_conn_security(struct rxrpc_connection *); +bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *, + const struct rxrpc_security **, struct key **, + struct sk_buff *); /* * sendmsg.c diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 135bf5cd8dd5..70e44abf106c 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) kfree(b); } +/* + * Ping the other end to fill our RTT cache and to retrieve the rwind + * and MTU parameters. + */ +static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + ktime_t now = skb->tstamp; + + if (call->peer->rtt_usage < 3 || + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, + true, true, + rxrpc_propose_ack_ping_for_params); +} + /* * Allocate a new incoming call from the prealloc pool, along with a connection * and a peer as necessary. @@ -247,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_local *local, struct rxrpc_peer *peer, struct rxrpc_connection *conn, + const struct rxrpc_security *sec, + struct key *key, struct sk_buff *skb) { struct rxrpc_backlog *b = rx->backlog; @@ -294,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, conn->params.local = rxrpc_get_local(local); conn->params.peer = peer; rxrpc_see_connection(conn); - rxrpc_new_incoming_connection(rx, conn, skb); + rxrpc_new_incoming_connection(rx, conn, sec, key, skb); } else { rxrpc_get_connection(conn); } @@ -333,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + const struct rxrpc_security *sec = NULL; struct rxrpc_connection *conn; struct rxrpc_peer *peer = NULL; - struct rxrpc_call *call; + struct rxrpc_call *call = NULL; + struct key *key = NULL; _enter(""); @@ -346,9 +366,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; skb->priority = RX_INVALID_OPERATION; - _leave(" = NULL [close]"); - call = NULL; - goto out; + goto no_call; } /* The peer, connection and call may all have sprung into existence due @@ -358,29 +376,19 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, */ conn = rxrpc_find_connection_rcu(local, skb, &peer); - call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); + if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb)) + goto no_call; + + call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb); + key_put(key); if (!call) { skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; - _leave(" = NULL [busy]"); - call = NULL; - goto out; + goto no_call; } trace_rxrpc_receive(call, rxrpc_receive_incoming, sp->hdr.serial, sp->hdr.seq); - /* Lock the call to prevent rxrpc_kernel_send/recv_data() and - * sendmsg()/recvmsg() inconveniently stealing the mutex once the - * notification is generated. - * - * The BUG should never happen because the kernel should be well - * behaved enough not to access the call before the first notification - * event and userspace is prevented from doing so until the state is - * appropriate. - */ - if (!mutex_trylock(&call->user_mutex)) - BUG(); - /* Make the call live. */ rxrpc_incoming_call(rx, call, skb); conn = call->conn; @@ -421,6 +429,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, BUG(); } spin_unlock(&conn->state_lock); + spin_unlock(&rx->incoming_lock); + + rxrpc_send_ping(call, skb); if (call->state == RXRPC_CALL_SERVER_ACCEPTING) rxrpc_notify_socket(call); @@ -433,9 +444,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, rxrpc_put_call(call, rxrpc_call_put); _leave(" = %p{%d}", call, call->debug_id); -out: - spin_unlock(&rx->incoming_lock); return call; + +no_call: + spin_unlock(&rx->incoming_lock); + _leave(" = NULL [%u]", skb->mark); + return NULL; } /* diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index a31c18c09894..c9f34b0a11df 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -493,7 +493,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); - if (conn) + if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) rxrpc_disconnect_call(call); if (call->security) call->security->free_call_crypto(call); @@ -562,13 +562,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) } /* - * Final call destruction under RCU. + * Final call destruction - but must be done in process context. */ -static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) +static void rxrpc_destroy_call(struct work_struct *work) { - struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); + struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); struct rxrpc_net *rxnet = call->rxnet; + rxrpc_put_connection(call->conn); rxrpc_put_peer(call->peer); kfree(call->rxtx_buffer); kfree(call->rxtx_annotations); @@ -577,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) wake_up_var(&rxnet->nr_calls); } +/* + * Final call destruction under RCU. + */ +static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) +{ + struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); + + if (in_softirq()) { + INIT_WORK(&call->processor, rxrpc_destroy_call); + if (!rxrpc_queue_work(&call->processor)) + BUG(); + } else { + rxrpc_destroy_call(&call->processor); + } +} + /* * clean up a call */ @@ -590,7 +607,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); - ASSERTCMP(call->conn, ==, NULL); rxrpc_cleanup_ring(call); rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 376370cd9285..ea7d4c21f889 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -785,6 +785,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) u32 cid; spin_lock(&conn->channel_lock); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); cid = call->cid; if (cid) { @@ -792,7 +793,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) chan = &conn->channels[channel]; } trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); - call->conn = NULL; /* Calls that have never actually been assigned a channel can simply be * discarded. If the conn didn't get used either, it will follow @@ -908,7 +908,6 @@ out: spin_unlock(&rxnet->client_conn_cache_lock); out_2: spin_unlock(&conn->channel_lock); - rxrpc_put_connection(conn); _leave(""); return; diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index a1ceef4f5cd0..06fcff2ebbba 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) _enter("{%d}", conn->debug_id); ASSERT(conn->security_ix != 0); - - if (!conn->params.key) { - _debug("set up security"); - ret = rxrpc_init_server_conn_security(conn); - switch (ret) { - case 0: - break; - case -ENOENT: - abort_code = RX_CALL_DEAD; - goto abort; - default: - abort_code = RXKADNOAUTH; - goto abort; - } - } + ASSERT(conn->server_key); if (conn->security->issue_challenge(conn) < 0) { abort_code = RX_CALL_DEAD; @@ -452,16 +438,12 @@ again: /* * connection-level event processor */ -void rxrpc_process_connection(struct work_struct *work) +static void rxrpc_do_process_connection(struct rxrpc_connection *conn) { - struct rxrpc_connection *conn = - container_of(work, struct rxrpc_connection, processor); struct sk_buff *skb; u32 abort_code = RX_PROTOCOL_ERROR; int ret; - rxrpc_see_connection(conn); - if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) rxrpc_secure_connection(conn); @@ -489,18 +471,32 @@ void rxrpc_process_connection(struct work_struct *work) } } -out: - rxrpc_put_connection(conn); - _leave(""); return; requeue_and_leave: skb_queue_head(&conn->rx_queue, skb); - goto out; + return; protocol_error: if (rxrpc_abort_connection(conn, ret, abort_code) < 0) goto requeue_and_leave; rxrpc_free_skb(skb, rxrpc_skb_freed); - goto out; + return; +} + +void rxrpc_process_connection(struct work_struct *work) +{ + struct rxrpc_connection *conn = + container_of(work, struct rxrpc_connection, processor); + + rxrpc_see_connection(conn); + + if (__rxrpc_use_local(conn->params.local)) { + rxrpc_do_process_connection(conn); + rxrpc_unuse_local(conn->params.local); + } + + rxrpc_put_connection(conn); + _leave(""); + return; } diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 38d718e90dc6..19e141eeed17 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -223,9 +223,8 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) __rxrpc_disconnect_call(conn, call); spin_unlock(&conn->channel_lock); - call->conn = NULL; + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); conn->idle_timestamp = jiffies; - rxrpc_put_connection(conn); } /* diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index 123d6ceab15c..21da48e3d2e5 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c @@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn */ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, struct rxrpc_connection *conn, + const struct rxrpc_security *sec, + struct key *key, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); @@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, conn->service_id = sp->hdr.serviceId; conn->security_ix = sp->hdr.securityIndex; conn->out_clientflag = 0; + conn->security = sec; + conn->server_key = key_get(key); if (conn->security_ix) conn->state = RXRPC_CONN_SERVICE_UNSECURED; else diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 157be1ff8697..69e09d69c896 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -192,22 +192,6 @@ send_extra_data: goto out_no_clear_ca; } -/* - * Ping the other end to fill our RTT cache and to retrieve the rwind - * and MTU parameters. - */ -static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) -{ - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - ktime_t now = skb->tstamp; - - if (call->peer->rtt_usage < 3 || - ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) - rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, - true, true, - rxrpc_propose_ack_ping_for_params); -} - /* * Apply a hard ACK by advancing the Tx window. */ @@ -429,7 +413,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); enum rxrpc_call_state state; - unsigned int j; + unsigned int j, nr_subpackets; rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; bool immediate_ack = false, jumbo_bad = false; @@ -473,7 +457,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) call->ackr_prev_seq = seq0; hard_ack = READ_ONCE(call->rx_hard_ack); - if (sp->nr_subpackets > 1) { + nr_subpackets = sp->nr_subpackets; + if (nr_subpackets > 1) { if (call->nr_jumbo_bad > 3) { ack = RXRPC_ACK_NOSPACE; ack_serial = serial; @@ -481,11 +466,11 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) } } - for (j = 0; j < sp->nr_subpackets; j++) { + for (j = 0; j < nr_subpackets; j++) { rxrpc_serial_t serial = sp->hdr.serial + j; rxrpc_seq_t seq = seq0 + j; unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK; - bool terminal = (j == sp->nr_subpackets - 1); + bool terminal = (j == nr_subpackets - 1); bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST); u8 flags, annotation = j; @@ -522,7 +507,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) } if (call->rxtx_buffer[ix]) { - rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1, + rxrpc_input_dup_data(call, seq, nr_subpackets > 1, &jumbo_bad); if (ack != RXRPC_ACK_DUPLICATE) { ack = RXRPC_ACK_DUPLICATE; @@ -580,6 +565,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) * ring. */ skb = NULL; + sp = NULL; } if (last) { @@ -613,10 +599,8 @@ ack: false, true, rxrpc_propose_ack_input_data); - if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) { - trace_rxrpc_notify_socket(call->debug_id, serial); - rxrpc_notify_socket(call); - } + trace_rxrpc_notify_socket(call->debug_id, serial); + rxrpc_notify_socket(call); unlock: spin_unlock(&call->input_lock); @@ -898,7 +882,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) before(prev_pkt, call->ackr_prev_seq)) goto out; call->acks_latest_ts = skb->tstamp; - call->acks_latest = sp->hdr.serial; call->ackr_first_seq = first_soft_ack; call->ackr_prev_seq = prev_pkt; @@ -1396,8 +1379,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) call = rxrpc_new_incoming_call(local, rx, skb); if (!call) goto reject_packet; - rxrpc_send_ping(call, skb); - mutex_unlock(&call->user_mutex); } /* Process a call packet; this either discards or passes on the ref diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 36587260cabd..a6c1349e965d 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -364,11 +364,14 @@ void rxrpc_queue_local(struct rxrpc_local *local) void rxrpc_put_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); + unsigned int debug_id; int n; if (local) { + debug_id = local->debug_id; + n = atomic_dec_return(&local->usage); - trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here); + trace_rxrpc_local(debug_id, rxrpc_local_put, n, here); if (n == 0) call_rcu(&local->rcu, rxrpc_local_rcu); @@ -380,14 +383,11 @@ void rxrpc_put_local(struct rxrpc_local *local) */ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) { - unsigned int au; - local = rxrpc_get_local_maybe(local); if (!local) return NULL; - au = atomic_fetch_add_unless(&local->active_users, 1, 0); - if (au == 0) { + if (!__rxrpc_use_local(local)) { rxrpc_put_local(local); return NULL; } @@ -401,14 +401,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) */ void rxrpc_unuse_local(struct rxrpc_local *local) { - unsigned int au; - if (local) { - au = atomic_dec_return(&local->active_users); - if (au == 0) + if (__rxrpc_unuse_local(local)) { + rxrpc_get_local(local); rxrpc_queue_local(local); - else - rxrpc_put_local(local); + } } } @@ -465,7 +462,7 @@ static void rxrpc_local_processor(struct work_struct *work) do { again = false; - if (atomic_read(&local->active_users) == 0) { + if (!__rxrpc_use_local(local)) { rxrpc_local_destroyer(local); break; } @@ -479,6 +476,8 @@ static void rxrpc_local_processor(struct work_struct *work) rxrpc_process_local_events(local); again = true; } + + __rxrpc_unuse_local(local); } while (again); rxrpc_put_local(local); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 935bb60fff56..bad3d2420344 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -129,7 +129,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, rxrpc_serial_t *_serial) { - struct rxrpc_connection *conn = NULL; + struct rxrpc_connection *conn; struct rxrpc_ack_buffer *pkt; struct msghdr msg; struct kvec iov[2]; @@ -139,18 +139,14 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, int ret; u8 reason; - spin_lock_bh(&call->lock); - if (call->conn) - conn = rxrpc_get_connection_maybe(call->conn); - spin_unlock_bh(&call->lock); - if (!conn) + if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) return -ECONNRESET; pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); - if (!pkt) { - rxrpc_put_connection(conn); + if (!pkt) return -ENOMEM; - } + + conn = call->conn; msg.msg_name = &call->peer->srx.transport; msg.msg_namelen = call->peer->srx.transport_len; @@ -244,7 +240,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, } out: - rxrpc_put_connection(conn); kfree(pkt); return ret; } @@ -254,7 +249,7 @@ out: */ int rxrpc_send_abort_packet(struct rxrpc_call *call) { - struct rxrpc_connection *conn = NULL; + struct rxrpc_connection *conn; struct rxrpc_abort_buffer pkt; struct msghdr msg; struct kvec iov[1]; @@ -271,13 +266,11 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call) test_bit(RXRPC_CALL_TX_LAST, &call->flags)) return 0; - spin_lock_bh(&call->lock); - if (call->conn) - conn = rxrpc_get_connection_maybe(call->conn); - spin_unlock_bh(&call->lock); - if (!conn) + if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) return -ECONNRESET; + conn = call->conn; + msg.msg_name = &call->peer->srx.transport; msg.msg_namelen = call->peer->srx.transport_len; msg.msg_control = NULL; @@ -312,8 +305,6 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call) trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr, rxrpc_tx_point_call_abort); rxrpc_tx_backoff(call, ret); - - rxrpc_put_connection(conn); return ret; } diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 48f67a9b1037..923b263c401b 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -364,27 +364,31 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, if (!rxrpc_get_peer_maybe(peer)) continue; - spin_unlock_bh(&rxnet->peer_hash_lock); + if (__rxrpc_use_local(peer->local)) { + spin_unlock_bh(&rxnet->peer_hash_lock); - keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME; - slot = keepalive_at - base; - _debug("%02x peer %u t=%d {%pISp}", - cursor, peer->debug_id, slot, &peer->srx.transport); + keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME; + slot = keepalive_at - base; + _debug("%02x peer %u t=%d {%pISp}", + cursor, peer->debug_id, slot, &peer->srx.transport); - if (keepalive_at <= base || - keepalive_at > base + RXRPC_KEEPALIVE_TIME) { - rxrpc_send_keepalive(peer); - slot = RXRPC_KEEPALIVE_TIME; + if (keepalive_at <= base || + keepalive_at > base + RXRPC_KEEPALIVE_TIME) { + rxrpc_send_keepalive(peer); + slot = RXRPC_KEEPALIVE_TIME; + } + + /* A transmission to this peer occurred since last we + * examined it so put it into the appropriate future + * bucket. + */ + slot += cursor; + slot &= mask; + spin_lock_bh(&rxnet->peer_hash_lock); + list_add_tail(&peer->keepalive_link, + &rxnet->peer_keepalive[slot & mask]); + rxrpc_unuse_local(peer->local); } - - /* A transmission to this peer occurred since last we examined - * it so put it into the appropriate future bucket. - */ - slot += cursor; - slot &= mask; - spin_lock_bh(&rxnet->peer_hash_lock); - list_add_tail(&peer->keepalive_link, - &rxnet->peer_keepalive[slot & mask]); rxrpc_put_peer_locked(peer); } diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 8d8aa3c230b5..098f1f9ec53b 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) u32 serial; int ret; - _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); + _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); - ret = key_validate(conn->params.key); + ret = key_validate(conn->server_key); if (ret < 0) return ret; @@ -1293,6 +1293,7 @@ static void rxkad_exit(void) const struct rxrpc_security rxkad = { .name = "rxkad", .security_index = RXRPC_SECURITY_RXKAD, + .no_key_abort = RXKADUNKNOWNKEY, .init = rxkad_init, .exit = rxkad_exit, .init_connection_security = rxkad_init_connection_security, diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c index a4c47d2b7054..9b1fb9ed0717 100644 --- a/net/rxrpc/security.c +++ b/net/rxrpc/security.c @@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) } /* - * initialise the security on a server connection + * Find the security key for a server connection. */ -int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) +bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx, + const struct rxrpc_security **_sec, + struct key **_key, + struct sk_buff *skb) { const struct rxrpc_security *sec; - struct rxrpc_local *local = conn->params.local; - struct rxrpc_sock *rx; - struct key *key; - key_ref_t kref; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + key_ref_t kref = NULL; char kdesc[5 + 1 + 3 + 1]; _enter(""); - sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); + sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex); - sec = rxrpc_security_lookup(conn->security_ix); + sec = rxrpc_security_lookup(sp->hdr.securityIndex); if (!sec) { - _leave(" = -ENOKEY [lookup]"); - return -ENOKEY; + trace_rxrpc_abort(0, "SVS", + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + RX_INVALID_OPERATION, EKEYREJECTED); + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; + skb->priority = RX_INVALID_OPERATION; + return false; } - /* find the service */ - read_lock(&local->services_lock); - rx = rcu_dereference_protected(local->service, - lockdep_is_held(&local->services_lock)); - if (rx && (rx->srx.srx_service == conn->service_id || - rx->second_service == conn->service_id)) - goto found_service; + if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE) + goto out; - /* the service appears to have died */ - read_unlock(&local->services_lock); - _leave(" = -ENOENT"); - return -ENOENT; - -found_service: if (!rx->securities) { - read_unlock(&local->services_lock); - _leave(" = -ENOKEY"); - return -ENOKEY; + trace_rxrpc_abort(0, "SVR", + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + RX_INVALID_OPERATION, EKEYREJECTED); + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; + skb->priority = RX_INVALID_OPERATION; + return false; } /* look through the service's keyring */ kref = keyring_search(make_key_ref(rx->securities, 1UL), &key_type_rxrpc_s, kdesc, true); if (IS_ERR(kref)) { - read_unlock(&local->services_lock); - _leave(" = %ld [search]", PTR_ERR(kref)); - return PTR_ERR(kref); + trace_rxrpc_abort(0, "SVK", + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + sec->no_key_abort, EKEYREJECTED); + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; + skb->priority = sec->no_key_abort; + return false; } - key = key_ref_to_ptr(kref); - read_unlock(&local->services_lock); - - conn->server_key = key; - conn->security = sec; - - _leave(" = 0"); - return 0; +out: + *_sec = sec; + *_key = key_ref_to_ptr(kref); + return true; } diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 813fd6888142..136eb465bfcb 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -58,8 +58,8 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, rtt = READ_ONCE(call->peer->rtt); rtt2 = nsecs_to_jiffies64(rtt) * 2; - if (rtt2 < 1) - rtt2 = 1; + if (rtt2 < 2) + rtt2 = 2; timeout = rtt2; tx_start = READ_ONCE(call->tx_hard_ack); diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index fcc46025e790..0586546c20d7 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -329,6 +329,7 @@ static int tcf_ct_act_nat(struct sk_buff *skb, bool commit) { #if IS_ENABLED(CONFIG_NF_NAT) + int err; enum nf_nat_manip_type maniptype; if (!(ct_action & TCA_CT_ACT_NAT)) @@ -359,7 +360,17 @@ static int tcf_ct_act_nat(struct sk_buff *skb, return NF_ACCEPT; } - return ct_nat_execute(skb, ct, ctinfo, range, maniptype); + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); + if (err == NF_ACCEPT && + ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { + if (maniptype == NF_NAT_MANIP_SRC) + maniptype = NF_NAT_MANIP_DST; + else + maniptype = NF_NAT_MANIP_SRC; + + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); + } + return err; #else return NF_ACCEPT; #endif @@ -728,7 +739,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); if (params) - kfree_rcu(params, rcu); + call_rcu(¶ms->rcu, tcf_ct_params_free); if (res == ACT_P_CREATED) tcf_idr_insert(tn, *a); diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 0dbcfd1dca7b..f45995a6237a 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -360,6 +360,16 @@ static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static void tcf_ctinfo_cleanup(struct tc_action *a) +{ + struct tcf_ctinfo *ci = to_ctinfo(a); + struct tcf_ctinfo_params *cp; + + cp = rcu_dereference_protected(ci->params, 1); + if (cp) + kfree_rcu(cp, rcu); +} + static struct tc_action_ops act_ctinfo_ops = { .kind = "ctinfo", .id = TCA_ID_CTINFO, @@ -367,6 +377,7 @@ static struct tc_action_ops act_ctinfo_ops = { .act = tcf_ctinfo_act, .dump = tcf_ctinfo_dump, .init = tcf_ctinfo_init, + .cleanup= tcf_ctinfo_cleanup, .walk = tcf_ctinfo_walker, .lookup = tcf_ctinfo_search, .size = sizeof(struct tcf_ctinfo), diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 3a31e241c647..a0cfb4793c93 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -536,6 +536,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, } ife = to_ife(*a); + if (ret == ACT_P_CREATED) + INIT_LIST_HEAD(&ife->metalist); + err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; @@ -565,10 +568,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, p->eth_type = ife_type; } - - if (ret == ACT_P_CREATED) - INIT_LIST_HEAD(&ife->metalist); - if (tb[TCA_IFE_METALST]) { err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], NULL, diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 08923b21e566..27f624971121 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -219,8 +219,10 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, bool use_reinsert; bool want_ingress; bool is_redirect; + bool expects_nh; int m_eaction; int mac_len; + bool at_nh; rec_level = __this_cpu_inc_return(mirred_rec_level); if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) { @@ -261,19 +263,19 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, goto out; } - /* If action's target direction differs than filter's direction, - * and devices expect a mac header on xmit, then mac push/pull is - * needed. - */ want_ingress = tcf_mirred_act_wants_ingress(m_eaction); - if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) { - if (!skb_at_tc_ingress(skb)) { - /* caught at egress, act ingress: pull mac */ - mac_len = skb_network_header(skb) - skb_mac_header(skb); + + expects_nh = want_ingress || !m_mac_header_xmit; + at_nh = skb->data == skb_network_header(skb); + if (at_nh != expects_nh) { + mac_len = skb_at_tc_ingress(skb) ? skb->mac_len : + skb_network_header(skb) - skb_mac_header(skb); + if (expects_nh) { + /* target device/action expect data at nh */ skb_pull_rcsum(skb2, mac_len); } else { - /* caught at ingress, act egress: push mac */ - skb_push_rcsum(skb2, skb->mac_len); + /* target device/action expect data at mac */ + skb_push_rcsum(skb2, mac_len); } } @@ -282,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, /* mirror is always swallowed */ if (is_redirect) { - skb2->tc_redirected = 1; - skb2->tc_from_ingress = skb2->tc_at_ingress; - if (skb2->tc_from_ingress) - skb2->tstamp = 0; + skb_set_redirected(skb2, skb2->tc_at_ingress); + /* let's the caller reinsert the packet, if possible */ if (use_reinsert) { res->ingress = want_ingress; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 4cf6c553bb0b..db570d2bd0e0 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2019 Netronome Systems, Inc. */ +#include #include #include #include @@ -76,12 +77,14 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, switch (p->tcfm_action) { case TCA_MPLS_ACT_POP: - if (skb_mpls_pop(skb, p->tcfm_proto, mac_len)) + if (skb_mpls_pop(skb, p->tcfm_proto, mac_len, + skb->dev && skb->dev->type == ARPHRD_ETHER)) goto drop; break; case TCA_MPLS_ACT_PUSH: new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol)); - if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len)) + if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len, + skb->dev && skb->dev->type == ARPHRD_ETHER)) goto drop; break; case TCA_MPLS_ACT_MODIFY: diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 20d60b8fcb70..c2cdd0fc2e70 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -308,33 +308,12 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, tcf_proto_destroy(tp, rtnl_held, true, extack); } -static int walker_check_empty(struct tcf_proto *tp, void *fh, - struct tcf_walker *arg) +static bool tcf_proto_check_delete(struct tcf_proto *tp) { - if (fh) { - arg->nonempty = true; - return -1; - } - return 0; -} + if (tp->ops->delete_empty) + return tp->ops->delete_empty(tp); -static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held) -{ - struct tcf_walker walker = { .fn = walker_check_empty, }; - - if (tp->ops->walk) { - tp->ops->walk(tp, &walker, rtnl_held); - return !walker.nonempty; - } - return true; -} - -static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held) -{ - spin_lock(&tp->lock); - if (tcf_proto_is_empty(tp, rtnl_held)) - tp->deleting = true; - spin_unlock(&tp->lock); + tp->deleting = true; return tp->deleting; } @@ -626,15 +605,15 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) static int tcf_block_setup(struct tcf_block *block, struct flow_block_offload *bo); -static void tc_indr_block_ing_cmd(struct net_device *dev, - struct tcf_block *block, - flow_indr_block_bind_cb_t *cb, - void *cb_priv, - enum flow_block_command command) +static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, + flow_indr_block_bind_cb_t *cb, void *cb_priv, + enum flow_block_command command, bool ingress) { struct flow_block_offload bo = { .command = command, - .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + .binder_type = ingress ? + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, .net = dev_net(dev), .block_shared = tcf_block_non_null_shared(block), }; @@ -652,9 +631,10 @@ static void tc_indr_block_ing_cmd(struct net_device *dev, up_write(&block->cb_lock); } -static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) +static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) { const struct Qdisc_class_ops *cops; + const struct Qdisc_ops *ops; struct Qdisc *qdisc; if (!dev_ingress_queue(dev)) @@ -664,24 +644,37 @@ static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) if (!qdisc) return NULL; - cops = qdisc->ops->cl_ops; + ops = qdisc->ops; + if (!ops) + return NULL; + + if (!ingress && !strcmp("ingress", ops->id)) + return NULL; + + cops = ops->cl_ops; if (!cops) return NULL; if (!cops->tcf_block) return NULL; - return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL); + return cops->tcf_block(qdisc, + ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, + NULL); } -static void tc_indr_block_get_and_ing_cmd(struct net_device *dev, - flow_indr_block_bind_cb_t *cb, - void *cb_priv, - enum flow_block_command command) +static void tc_indr_block_get_and_cmd(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command) { - struct tcf_block *block = tc_dev_ingress_block(dev); + struct tcf_block *block; - tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command); + block = tc_dev_block(dev, true); + tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); + + block = tc_dev_block(dev, false); + tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); } static void tc_indr_block_call(struct tcf_block *block, @@ -1737,7 +1730,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, * concurrently. * Mark tp for deletion if it is empty. */ - if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) { + if (!tp_iter || !tcf_proto_check_delete(tp)) { mutex_unlock(&chain->filter_chain_lock); return; } @@ -2062,9 +2055,8 @@ replay: &chain_info)); mutex_unlock(&chain->filter_chain_lock); - tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]), - protocol, prio, chain, rtnl_held, - extack); + tp_new = tcf_proto_create(name, protocol, prio, chain, + rtnl_held, extack); if (IS_ERR(tp_new)) { err = PTR_ERR(tp_new); goto errout_tp; @@ -2721,13 +2713,19 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, struct netlink_ext_ack *extack) { const struct tcf_proto_ops *ops; + char name[IFNAMSIZ]; void *tmplt_priv; /* If kind is not set, user did not specify template. */ if (!tca[TCA_KIND]) return 0; - ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack); + if (tcf_proto_check_kind(tca[TCA_KIND], name)) { + NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); + return -EINVAL; + } + + ops = tcf_proto_lookup_ops(name, true, extack); if (IS_ERR(ops)) return PTR_ERR(ops); if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { @@ -3626,9 +3624,9 @@ static struct pernet_operations tcf_net_ops = { .size = sizeof(struct tcf_net), }; -static struct flow_indr_block_ing_entry block_ing_entry = { - .cb = tc_indr_block_get_and_ing_cmd, - .list = LIST_HEAD_INIT(block_ing_entry.list), +static struct flow_indr_block_entry block_entry = { + .cb = tc_indr_block_get_and_cmd, + .list = LIST_HEAD_INIT(block_entry.list), }; static int __init tc_filter_init(void) @@ -3643,7 +3641,7 @@ static int __init tc_filter_init(void) if (err) goto err_register_pernet_subsys; - flow_indr_add_block_ing_cb(&block_ing_entry); + flow_indr_add_block_cb(&block_entry); rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, RTNL_FLAG_DOIT_UNLOCKED); diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 4aafbe3d435c..f256a7c69093 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -263,12 +263,17 @@ skip: } } -static void basic_bind_class(void *fh, u32 classid, unsigned long cl) +static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q, + unsigned long base) { struct basic_filter *f = fh; - if (f && f->res.classid == classid) - f->res.class = cl; + if (f && f->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &f->res, base); + else + __tcf_unbind_filter(q, &f->res); + } } static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh, diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 8229ed4a67be..6e3e63db0e01 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -631,12 +631,17 @@ nla_put_failure: return -1; } -static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl) +static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl, + void *q, unsigned long base) { struct cls_bpf_prog *prog = fh; - if (prog && prog->res.classid == classid) - prog->res.class = cl; + if (prog && prog->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &prog->res, base); + else + __tcf_unbind_filter(q, &prog->res); + } } static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg, diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 74221e3351c3..1d270540e74d 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -54,8 +54,13 @@ struct fl_flow_key { struct flow_dissector_key_ip ip; struct flow_dissector_key_ip enc_ip; struct flow_dissector_key_enc_opts enc_opts; - struct flow_dissector_key_ports tp_min; - struct flow_dissector_key_ports tp_max; + union { + struct flow_dissector_key_ports tp; + struct { + struct flow_dissector_key_ports tp_min; + struct flow_dissector_key_ports tp_max; + }; + } tp_range; struct flow_dissector_key_ct ct; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ @@ -198,19 +203,19 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, { __be16 min_mask, max_mask, min_val, max_val; - min_mask = htons(filter->mask->key.tp_min.dst); - max_mask = htons(filter->mask->key.tp_max.dst); - min_val = htons(filter->key.tp_min.dst); - max_val = htons(filter->key.tp_max.dst); + min_mask = htons(filter->mask->key.tp_range.tp_min.dst); + max_mask = htons(filter->mask->key.tp_range.tp_max.dst); + min_val = htons(filter->key.tp_range.tp_min.dst); + max_val = htons(filter->key.tp_range.tp_max.dst); if (min_mask && max_mask) { - if (htons(key->tp.dst) < min_val || - htons(key->tp.dst) > max_val) + if (htons(key->tp_range.tp.dst) < min_val || + htons(key->tp_range.tp.dst) > max_val) return false; /* skb does not have min and max values */ - mkey->tp_min.dst = filter->mkey.tp_min.dst; - mkey->tp_max.dst = filter->mkey.tp_max.dst; + mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; + mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; } return true; } @@ -221,19 +226,19 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, { __be16 min_mask, max_mask, min_val, max_val; - min_mask = htons(filter->mask->key.tp_min.src); - max_mask = htons(filter->mask->key.tp_max.src); - min_val = htons(filter->key.tp_min.src); - max_val = htons(filter->key.tp_max.src); + min_mask = htons(filter->mask->key.tp_range.tp_min.src); + max_mask = htons(filter->mask->key.tp_range.tp_max.src); + min_val = htons(filter->key.tp_range.tp_min.src); + max_val = htons(filter->key.tp_range.tp_max.src); if (min_mask && max_mask) { - if (htons(key->tp.src) < min_val || - htons(key->tp.src) > max_val) + if (htons(key->tp_range.tp.src) < min_val || + htons(key->tp_range.tp.src) > max_val) return false; /* skb does not have min and max values */ - mkey->tp_min.src = filter->mkey.tp_min.src; - mkey->tp_max.src = filter->mkey.tp_max.src; + mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; + mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; } return true; } @@ -298,6 +303,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct cls_fl_filter *f; list_for_each_entry_rcu(mask, &head->masks, list) { + flow_dissector_init_keys(&skb_key.control, &skb_key.basic); fl_clear_masked_range(&skb_key, mask); skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); @@ -684,6 +690,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { .len = 128 / BITS_PER_BYTE }, [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, .len = 128 / BITS_PER_BYTE }, + [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, }; static const struct nla_policy @@ -715,23 +722,25 @@ static void fl_set_key_val(struct nlattr **tb, static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask) { - fl_set_key_val(tb, &key->tp_min.dst, - TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst, - TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst)); - fl_set_key_val(tb, &key->tp_max.dst, - TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst, - TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst)); - fl_set_key_val(tb, &key->tp_min.src, - TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src, - TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src)); - fl_set_key_val(tb, &key->tp_max.src, - TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src, - TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src)); + fl_set_key_val(tb, &key->tp_range.tp_min.dst, + TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); + fl_set_key_val(tb, &key->tp_range.tp_max.dst, + TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); + fl_set_key_val(tb, &key->tp_range.tp_min.src, + TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); + fl_set_key_val(tb, &key->tp_range.tp_max.src, + TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); - if ((mask->tp_min.dst && mask->tp_max.dst && - htons(key->tp_max.dst) <= htons(key->tp_min.dst)) || - (mask->tp_min.src && mask->tp_max.src && - htons(key->tp_max.src) <= htons(key->tp_min.src))) + if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && + htons(key->tp_range.tp_max.dst) <= + htons(key->tp_range.tp_min.dst)) || + (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && + htons(key->tp_range.tp_max.src) <= + htons(key->tp_range.tp_min.src))) return -EINVAL; return 0; @@ -1320,9 +1329,10 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); - if (FL_KEY_IS_MASKED(mask, tp) || - FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max)) - FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IP, ip); FL_KEY_SET_IF_MASKED(mask, keys, cnt, @@ -1371,8 +1381,10 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, fl_mask_copy(newmask, mask); - if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) || - (newmask->key.tp_min.src && newmask->key.tp_max.src)) + if ((newmask->key.tp_range.tp_min.dst && + newmask->key.tp_range.tp_max.dst) || + (newmask->key.tp_range.tp_min.src && + newmask->key.tp_range.tp_max.src)) newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; err = fl_init_mask_hashtable(newmask); @@ -1970,18 +1982,22 @@ static int fl_dump_key_val(struct sk_buff *skb, static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, struct fl_flow_key *mask) { - if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN, - &mask->tp_min.dst, TCA_FLOWER_UNSPEC, - sizeof(key->tp_min.dst)) || - fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX, - &mask->tp_max.dst, TCA_FLOWER_UNSPEC, - sizeof(key->tp_max.dst)) || - fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN, - &mask->tp_min.src, TCA_FLOWER_UNSPEC, - sizeof(key->tp_min.src)) || - fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX, - &mask->tp_max.src, TCA_FLOWER_UNSPEC, - sizeof(key->tp_max.src))) + if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, + TCA_FLOWER_KEY_PORT_DST_MIN, + &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, + sizeof(key->tp_range.tp_min.dst)) || + fl_dump_key_val(skb, &key->tp_range.tp_max.dst, + TCA_FLOWER_KEY_PORT_DST_MAX, + &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, + sizeof(key->tp_range.tp_max.dst)) || + fl_dump_key_val(skb, &key->tp_range.tp_min.src, + TCA_FLOWER_KEY_PORT_SRC_MIN, + &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, + sizeof(key->tp_range.tp_min.src)) || + fl_dump_key_val(skb, &key->tp_range.tp_max.src, + TCA_FLOWER_KEY_PORT_SRC_MAX, + &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, + sizeof(key->tp_range.tp_max.src))) return -1; return 0; @@ -2497,12 +2513,28 @@ nla_put_failure: return -EMSGSIZE; } -static void fl_bind_class(void *fh, u32 classid, unsigned long cl) +static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, + unsigned long base) { struct cls_fl_filter *f = fh; - if (f && f->res.classid == classid) - f->res.class = cl; + if (f && f->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &f->res, base); + else + __tcf_unbind_filter(q, &f->res); + } +} + +static bool fl_delete_empty(struct tcf_proto *tp) +{ + struct cls_fl_head *head = fl_head_dereference(tp); + + spin_lock(&tp->lock); + tp->deleting = idr_is_empty(&head->handle_idr); + spin_unlock(&tp->lock); + + return tp->deleting; } static struct tcf_proto_ops cls_fl_ops __read_mostly = { @@ -2514,6 +2546,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = { .put = fl_put, .change = fl_change, .delete = fl_delete, + .delete_empty = fl_delete_empty, .walk = fl_walk, .reoffload = fl_reoffload, .hw_add = fl_hw_add, diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index c9496c920d6f..ec945294626a 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -419,12 +419,17 @@ nla_put_failure: return -1; } -static void fw_bind_class(void *fh, u32 classid, unsigned long cl) +static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q, + unsigned long base) { struct fw_filter *f = fh; - if (f && f->res.classid == classid) - f->res.class = cl; + if (f && f->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &f->res, base); + else + __tcf_unbind_filter(q, &f->res); + } } static struct tcf_proto_ops cls_fw_ops __read_mostly = { diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 7fc2eb62aa98..610a0b728161 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -157,6 +157,7 @@ static void *mall_get(struct tcf_proto *tp, u32 handle) static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC }, [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 }, + [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 }, }; static int mall_set_parms(struct net *net, struct tcf_proto *tp, @@ -393,12 +394,17 @@ nla_put_failure: return -1; } -static void mall_bind_class(void *fh, u32 classid, unsigned long cl) +static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q, + unsigned long base) { struct cls_mall_head *head = fh; - if (head && head->res.classid == classid) - head->res.class = cl; + if (head && head->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &head->res, base); + else + __tcf_unbind_filter(q, &head->res); + } } static struct tcf_proto_ops cls_mall_ops __read_mostly = { diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 2d9e0b4484ea..5efa3e7ace15 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -534,8 +534,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, fp = &b->ht[h]; for (pfp = rtnl_dereference(*fp); pfp; fp = &pfp->next, pfp = rtnl_dereference(*fp)) { - if (pfp == f) { - *fp = f->next; + if (pfp == fold) { + rcu_assign_pointer(*fp, fold->next); break; } } @@ -641,12 +641,17 @@ nla_put_failure: return -1; } -static void route4_bind_class(void *fh, u32 classid, unsigned long cl) +static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q, + unsigned long base) { struct route4_filter *f = fh; - if (f && f->res.classid == classid) - f->res.class = cl; + if (f && f->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &f->res, base); + else + __tcf_unbind_filter(q, &f->res); + } } static struct tcf_proto_ops cls_route4_ops __read_mostly = { diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 2f3c03b25d5d..d36949d9382c 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -463,10 +463,8 @@ static u32 gen_tunnel(struct rsvp_head *data) static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = { [TCA_RSVP_CLASSID] = { .type = NLA_U32 }, - [TCA_RSVP_DST] = { .type = NLA_BINARY, - .len = RSVP_DST_LEN * sizeof(u32) }, - [TCA_RSVP_SRC] = { .type = NLA_BINARY, - .len = RSVP_DST_LEN * sizeof(u32) }, + [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) }, + [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) }, [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, }; @@ -738,12 +736,17 @@ nla_put_failure: return -1; } -static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl) +static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q, + unsigned long base) { struct rsvp_filter *f = fh; - if (f && f->res.classid == classid) - f->res.class = cl; + if (f && f->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &f->res, base); + else + __tcf_unbind_filter(q, &f->res); + } } static struct tcf_proto_ops RSVP_OPS __read_mostly = { diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index e573e5a5c794..61e95029c18f 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -26,9 +27,12 @@ #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */ +struct tcindex_data; + struct tcindex_filter_result { struct tcf_exts exts; struct tcf_result res; + struct tcindex_data *p; struct rcu_work rwork; }; @@ -49,6 +53,7 @@ struct tcindex_data { u32 hash; /* hash table size; 0 if undefined */ u32 alloc_hash; /* allocated size */ u32 fall_through; /* 0: only classify if explicit match */ + refcount_t refcnt; /* a temporary refcnt for perfect hash */ struct rcu_work rwork; }; @@ -57,6 +62,20 @@ static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) return tcf_exts_has_actions(&r->exts) || r->res.classid; } +static void tcindex_data_get(struct tcindex_data *p) +{ + refcount_inc(&p->refcnt); +} + +static void tcindex_data_put(struct tcindex_data *p) +{ + if (refcount_dec_and_test(&p->refcnt)) { + kfree(p->perfect); + kfree(p->h); + kfree(p); + } +} + static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, u16 key) { @@ -132,6 +151,7 @@ static int tcindex_init(struct tcf_proto *tp) p->mask = 0xffff; p->hash = DEFAULT_HASH_SIZE; p->fall_through = 1; + refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */ rcu_assign_pointer(tp->root, p); return 0; @@ -141,6 +161,7 @@ static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) { tcf_exts_destroy(&r->exts); tcf_exts_put_net(&r->exts); + tcindex_data_put(r->p); } static void tcindex_destroy_rexts_work(struct work_struct *work) @@ -212,6 +233,8 @@ found: else __tcindex_destroy_fexts(f); } else { + tcindex_data_get(p); + if (tcf_exts_get_net(&r->exts)) tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work); else @@ -228,9 +251,7 @@ static void tcindex_destroy_work(struct work_struct *work) struct tcindex_data, rwork); - kfree(p->perfect); - kfree(p->h); - kfree(p); + tcindex_data_put(p); } static inline int @@ -248,9 +269,11 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { }; static int tcindex_filter_result_init(struct tcindex_filter_result *r, + struct tcindex_data *p, struct net *net) { memset(r, 0, sizeof(*r)); + r->p = p; return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); } @@ -261,8 +284,10 @@ static void tcindex_partial_destroy_work(struct work_struct *work) struct tcindex_data, rwork); + rtnl_lock(); kfree(p->perfect); kfree(p); + rtnl_unlock(); } static void tcindex_free_perfect_hash(struct tcindex_data *cp) @@ -288,6 +313,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); if (err < 0) goto errout; + cp->perfect[i].p = cp; } return 0; @@ -332,23 +358,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, cp->alloc_hash = p->alloc_hash; cp->fall_through = p->fall_through; cp->tp = tp; - - if (p->perfect) { - int i; - - if (tcindex_alloc_perfect_hash(net, cp) < 0) - goto errout; - for (i = 0; i < cp->hash; i++) - cp->perfect[i].res = p->perfect[i].res; - balloc = 1; - } - cp->h = p->h; - - err = tcindex_filter_result_init(&new_filter_result, net); - if (err < 0) - goto errout1; - if (old_r) - cr = r->res; + refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */ if (tb[TCA_TCINDEX_HASH]) cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); @@ -359,6 +369,34 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (tb[TCA_TCINDEX_SHIFT]) cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); + if (!cp->hash) { + /* Hash not specified, use perfect hash if the upper limit + * of the hashing index is below the threshold. + */ + if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) + cp->hash = (cp->mask >> cp->shift) + 1; + else + cp->hash = DEFAULT_HASH_SIZE; + } + + if (p->perfect) { + int i; + + if (tcindex_alloc_perfect_hash(net, cp) < 0) + goto errout; + cp->alloc_hash = cp->hash; + for (i = 0; i < min(cp->hash, p->hash); i++) + cp->perfect[i].res = p->perfect[i].res; + balloc = 1; + } + cp->h = p->h; + + err = tcindex_filter_result_init(&new_filter_result, cp, net); + if (err < 0) + goto errout_alloc; + if (old_r) + cr = r->res; + err = -EBUSY; /* Hash already allocated, make sure that we still meet the @@ -376,16 +414,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (tb[TCA_TCINDEX_FALL_THROUGH]) cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); - if (!cp->hash) { - /* Hash not specified, use perfect hash if the upper limit - * of the hashing index is below the threshold. - */ - if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) - cp->hash = (cp->mask >> cp->shift) + 1; - else - cp->hash = DEFAULT_HASH_SIZE; - } - if (!cp->perfect && !cp->h) cp->alloc_hash = cp->hash; @@ -431,7 +459,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, goto errout_alloc; f->key = handle; f->next = NULL; - err = tcindex_filter_result_init(&f->result, net); + err = tcindex_filter_result_init(&f->result, cp, net); if (err < 0) { kfree(f); goto errout_alloc; @@ -444,7 +472,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, } if (old_r && old_r != r) { - err = tcindex_filter_result_init(old_r, net); + err = tcindex_filter_result_init(old_r, cp, net); if (err < 0) { kfree(f); goto errout_alloc; @@ -484,7 +512,6 @@ errout_alloc: tcindex_free_perfect_hash(cp); else if (balloc == 2) kfree(cp->h); -errout1: tcf_exts_destroy(&new_filter_result.exts); errout: kfree(cp); @@ -569,6 +596,14 @@ static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held, for (i = 0; i < p->hash; i++) { struct tcindex_filter_result *r = p->perfect + i; + /* tcf_queue_work() does not guarantee the ordering we + * want, so we have to take this refcnt temporarily to + * ensure 'p' is freed after all tcindex_filter_result + * here. Imperfect hash does not need this, because it + * uses linked lists rather than an array. + */ + tcindex_data_get(p); + tcf_unbind_filter(tp, &r->res); if (tcf_exts_get_net(&r->exts)) tcf_queue_work(&r->rwork, @@ -654,12 +689,17 @@ nla_put_failure: return -1; } -static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl) +static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl, + void *q, unsigned long base) { struct tcindex_filter_result *r = fh; - if (r && r->res.classid == classid) - r->res.class = cl; + if (r && r->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &r->res, base); + else + __tcf_unbind_filter(q, &r->res); + } } static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index a0e6fac613de..e15ff335953d 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -1255,12 +1255,17 @@ static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, return 0; } -static void u32_bind_class(void *fh, u32 classid, unsigned long cl) +static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q, + unsigned long base) { struct tc_u_knode *n = fh; - if (n && n->res.classid == classid) - n->res.class = cl; + if (n && n->res.classid == classid) { + if (cl) + __tcf_bind_filter(q, &n->res, base); + else + __tcf_unbind_filter(q, &n->res); + } } static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 8f2ad706784d..dd3b8c11a2e0 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -238,6 +238,9 @@ static int tcf_em_validate(struct tcf_proto *tp, goto errout; if (em->ops->change) { + err = -EINVAL; + if (em_hdr->flags & TCF_EM_SIMPLE) + goto errout; err = em->ops->change(net, data, data_len, em); if (err < 0) goto errout; @@ -263,12 +266,12 @@ static int tcf_em_validate(struct tcf_proto *tp, } em->data = (unsigned long) v; } + em->datalen = data_len; } } em->matchid = em_hdr->matchid; em->flags = em_hdr->flags; - em->datalen = data_len; em->net = net; err = 0; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 1047825d9f48..50794125bf02 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1891,8 +1891,9 @@ static int tclass_del_notify(struct net *net, struct tcf_bind_args { struct tcf_walker w; - u32 classid; + unsigned long base; unsigned long cl; + u32 classid; }; static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg) @@ -1903,28 +1904,30 @@ static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg) struct Qdisc *q = tcf_block_q(tp->chain->block); sch_tree_lock(q); - tp->ops->bind_class(n, a->classid, a->cl); + tp->ops->bind_class(n, a->classid, a->cl, q, a->base); sch_tree_unlock(q); } return 0; } -static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, - unsigned long new_cl) +struct tc_bind_class_args { + struct qdisc_walker w; + unsigned long new_cl; + u32 portid; + u32 clid; +}; + +static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl, + struct qdisc_walker *w) { + struct tc_bind_class_args *a = (struct tc_bind_class_args *)w; const struct Qdisc_class_ops *cops = q->ops->cl_ops; struct tcf_block *block; struct tcf_chain *chain; - unsigned long cl; - cl = cops->find(q, portid); - if (!cl) - return; - if (!cops->tcf_block) - return; block = cops->tcf_block(q, cl, NULL); if (!block) - return; + return 0; for (chain = tcf_get_next_chain(block, NULL); chain; chain = tcf_get_next_chain(block, chain)) { @@ -1935,11 +1938,29 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, struct tcf_bind_args arg = {}; arg.w.fn = tcf_node_bind; - arg.classid = clid; - arg.cl = new_cl; + arg.classid = a->clid; + arg.base = cl; + arg.cl = a->new_cl; tp->ops->walk(tp, &arg.w, true); } } + + return 0; +} + +static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, + unsigned long new_cl) +{ + const struct Qdisc_class_ops *cops = q->ops->cl_ops; + struct tc_bind_class_args args = {}; + + if (!cops->tcf_block) + return; + args.portid = portid; + args.clid = clid; + args.new_cl = new_cl; + args.w.fn = tc_bind_class_walker; + q->ops->cl_ops->walk(q, &args.w); } #else diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 53a80bc6b13a..2277369feae5 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1769,7 +1769,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, q->avg_window_begin)); u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; - do_div(b, window_interval); + b = div64_u64(b, window_interval); q->avg_peak_bandwidth = cake_ewma(q->avg_peak_bandwidth, b, b > q->avg_peak_bandwidth ? 2 : 8); @@ -2184,6 +2184,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = { [TCA_CAKE_MPU] = { .type = NLA_U32 }, [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, + [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 }, [TCA_CAKE_FWMARK] = { .type = NLA_U32 }, }; diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index b2905b03a432..2eaac2ff380f 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -181,6 +181,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) s64 credits; int len; + /* The previous packet is still being sent */ + if (now < q->last) { + qdisc_watchdog_schedule_ns(&q->watchdog, q->last); + return NULL; + } if (q->credits < 0) { credits = timediff_to_credits(now - q->last, q->idleslope); @@ -212,7 +217,12 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) credits += q->credits; q->credits = max_t(s64, credits, q->locredit); - q->last = now; + /* Estimate of the transmission of the last byte of the packet in ns */ + if (unlikely(atomic64_read(&q->port_rate) == 0)) + q->last = now; + else + q->last = now + div64_s64(len * NSEC_PER_SEC, + atomic64_read(&q->port_rate)); return skb; } diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 98dd87ce1510..f757ea90aba6 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) f->socket_hash != sk->sk_hash)) { f->credit = q->initial_quantum; f->socket_hash = sk->sk_hash; + if (q->rate_enable) + smp_store_release(&sk->sk_pacing_status, + SK_PACING_FQ); if (fq_flow_is_throttled(f)) fq_flow_unset_throttled(q, f); f->time_next_packet = 0ULL; @@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) fq_flow_set_detached(f); f->sk = sk; - if (skb->sk == sk) + if (skb->sk == sk) { f->socket_hash = sk->sk_hash; + if (q->rate_enable) + smp_store_release(&sk->sk_pacing_status, + SK_PACING_FQ); + } f->credit = q->initial_quantum; rb_link_node(&f->fq_node, parent, p); @@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, f->qlen++; qdisc_qstats_backlog_inc(sch, skb); if (fq_flow_is_detached(f)) { - struct sock *sk = skb->sk; - fq_flow_add_tail(&q->new_flows, f); if (time_after(jiffies, f->age + q->flow_refill_delay)) f->credit = max_t(u32, f->credit, q->quantum); - if (sk && q->rate_enable) { - if (unlikely(smp_load_acquire(&sk->sk_pacing_status) != - SK_PACING_FQ)) - smp_store_release(&sk->sk_pacing_status, - SK_PACING_FQ); - } q->inactive_flows--; } @@ -746,6 +745,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, + [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 }, [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 }, [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 }, }; @@ -788,10 +788,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_FQ_QUANTUM]) { u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); - if (quantum > 0) + if (quantum > 0 && quantum <= (1 << 20)) { q->quantum = quantum; - else + } else { + NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); err = -EINVAL; + } } if (tb[TCA_FQ_INITIAL_QUANTUM]) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 8769b4b8807d..7c3c5fdb82a9 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -657,7 +657,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) if (likely(skb)) { qdisc_update_stats_at_dequeue(qdisc, skb); } else { - qdisc->empty = true; + WRITE_ONCE(qdisc->empty, true); } return skb; diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 0d578333e967..e79f1afe0cfd 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -153,6 +153,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) __gnet_stats_copy_queue(&sch->qstats, qdisc->cpu_qstats, &qdisc->qstats, qlen); + sch->q.qlen += qlen; } else { sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; @@ -245,7 +246,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct netdev_queue *dev_queue = mq_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; - if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || + if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats, + &sch->bstats) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; return 0; diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 46980b8d66c5..8766ab5b8788 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -411,6 +411,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) __gnet_stats_copy_queue(&sch->qstats, qdisc->cpu_qstats, &qdisc->qstats, qlen); + sch->q.qlen += qlen; } else { sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; @@ -433,7 +434,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) opt.offset[tc] = dev->tc_to_txq[tc].offset; } - if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt)) + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; if ((priv->flags & TC_MQPRIO_F_MODE) && @@ -557,8 +558,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &sch->bstats) < 0 || + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, + sch->cpu_bstats, &sch->bstats) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; } diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index b2b7fdb06fc6..1330ad224931 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -339,7 +339,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, cl_q = q->queues[cl - 1]; if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &cl_q->bstats) < 0 || + d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || qdisc_qstats_copy(d, cl_q) < 0) return -1; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 0f8fedb8809a..647941702f9f 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -292,8 +292,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct tc_prio_qopt_offload graft_offload; unsigned long band = arg - 1; - if (new == NULL) - new = &noop_qdisc; + if (!new) { + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + TC_H_MAKE(sch->handle, arg), extack); + if (!new) + new = &noop_qdisc; + else + qdisc_hash_add(new, true); + } *old = qdisc_replace(sch, new, &q->queues[band]); @@ -356,7 +362,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, cl_q = q->queues[cl - 1]; if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &cl_q->bstats) < 0 || + d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || qdisc_qstats_copy(d, cl_q) < 0) return -1; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index c609373c8661..b1eb12d33b9a 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(taprio_list_lock); #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) +#define TAPRIO_FLAGS_INVALID U32_MAX struct sched_entry { struct list_head list; @@ -563,8 +564,10 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch) prio = skb->priority; tc = netdev_get_prio_tc_map(dev, prio); - if (!(gate_mask & BIT(tc))) + if (!(gate_mask & BIT(tc))) { + skb = NULL; continue; + } len = qdisc_pkt_len(skb); guard = ktime_add_ns(taprio_get_time(q), @@ -574,13 +577,17 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch) * guard band ... */ if (gate_mask != TAPRIO_ALL_GATES_OPEN && - ktime_after(guard, entry->close_time)) + ktime_after(guard, entry->close_time)) { + skb = NULL; continue; + } /* ... and no budget. */ if (gate_mask != TAPRIO_ALL_GATES_OPEN && - atomic_sub_return(len, &entry->budget) < 0) + atomic_sub_return(len, &entry->budget) < 0) { + skb = NULL; continue; + } skb = child->ops->dequeue(child); if (unlikely(!skb)) @@ -766,6 +773,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, + [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, + [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, }; static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, @@ -1367,6 +1376,33 @@ static int taprio_mqprio_cmp(const struct net_device *dev, return 0; } +/* The semantics of the 'flags' argument in relation to 'change()' + * requests, are interpreted following two rules (which are applied in + * this order): (1) an omitted 'flags' argument is interpreted as + * zero; (2) the 'flags' of a "running" taprio instance cannot be + * changed. + */ +static int taprio_new_flags(const struct nlattr *attr, u32 old, + struct netlink_ext_ack *extack) +{ + u32 new = 0; + + if (attr) + new = nla_get_u32(attr); + + if (old != TAPRIO_FLAGS_INVALID && old != new) { + NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); + return -EOPNOTSUPP; + } + + if (!taprio_flags_valid(new)) { + NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); + return -EINVAL; + } + + return new; +} + static int taprio_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { @@ -1375,7 +1411,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct tc_mqprio_qopt *mqprio = NULL; - u32 taprio_flags = 0; unsigned long flags; ktime_t start; int i, err; @@ -1388,21 +1423,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); - if (tb[TCA_TAPRIO_ATTR_FLAGS]) { - taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]); + err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], + q->flags, extack); + if (err < 0) + return err; - if (q->flags != 0 && q->flags != taprio_flags) { - NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); - return -EOPNOTSUPP; - } else if (!taprio_flags_valid(taprio_flags)) { - NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); - return -EINVAL; - } + q->flags = err; - q->flags = taprio_flags; - } - - err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags); + err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); if (err < 0) return err; @@ -1444,7 +1472,20 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, taprio_set_picos_per_byte(dev, q); - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) + if (mqprio) { + netdev_set_num_tc(dev, mqprio->num_tc); + for (i = 0; i < mqprio->num_tc; i++) + netdev_set_tc_queue(dev, i, + mqprio->count[i], + mqprio->offset[i]); + + /* Always use supplied priority mappings */ + for (i = 0; i <= TC_BITMASK; i++) + netdev_set_prio_tc_map(dev, i, + mqprio->prio_tc_map[i]); + } + + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); else err = taprio_disable_offload(dev, q, extack); @@ -1464,27 +1505,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); } - if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) && - !FULL_OFFLOAD_IS_ENABLED(taprio_flags) && + if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && + !FULL_OFFLOAD_IS_ENABLED(q->flags) && !hrtimer_active(&q->advance_timer)) { hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); q->advance_timer.function = advance_sched; } - if (mqprio) { - netdev_set_num_tc(dev, mqprio->num_tc); - for (i = 0; i < mqprio->num_tc; i++) - netdev_set_tc_queue(dev, i, - mqprio->count[i], - mqprio->offset[i]); - - /* Always use supplied priority mappings */ - for (i = 0; i <= TC_BITMASK; i++) - netdev_set_prio_tc_map(dev, i, - mqprio->prio_tc_map[i]); - } - - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) { + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { q->dequeue = taprio_dequeue_offload; q->peek = taprio_peek_offload; } else { @@ -1501,9 +1529,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, goto unlock; } - if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) { - setup_txtime(q, new_admin, start); + setup_txtime(q, new_admin, start); + if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { if (!oper) { rcu_assign_pointer(q->oper_sched, new_admin); err = 0; @@ -1528,7 +1556,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, spin_unlock_irqrestore(&q->current_entry_lock, flags); - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) taprio_offload_config_changed(q); } @@ -1567,7 +1595,7 @@ static void taprio_destroy(struct Qdisc *sch) } q->qdiscs = NULL; - netdev_set_num_tc(dev, 0); + netdev_reset_tc(dev); if (q->oper_sched) call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); @@ -1597,6 +1625,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, * and get the valid one on taprio_change(). */ q->clockid = -1; + q->flags = TAPRIO_FLAGS_INVALID; spin_lock(&taprio_list_lock); list_add(&q->taprio_list, &taprio_list); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index d2ffc9a0ba3a..41839b85c268 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -64,6 +64,7 @@ static struct sctp_association *sctp_association_init( /* Discarding const is appropriate here. */ asoc->ep = (struct sctp_endpoint *)ep; asoc->base.sk = (struct sock *)sk; + asoc->base.net = sock_net(sk); sctp_endpoint_hold(asoc->ep); sock_hold(asoc->base.sk); diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 0851166b9175..ba9f64fdfd23 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -237,15 +237,11 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc) addrcnt++; return nla_total_size(sizeof(struct sctp_info)) - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ - + nla_total_size(1) /* INET_DIAG_TOS */ - + nla_total_size(1) /* INET_DIAG_TCLASS */ - + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4) /* INET_DIAG_CLASS_ID */ + nla_total_size(addrlen * asoc->peer.transport_count) + nla_total_size(addrlen * addrcnt) - + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64; } diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index ea53049d1db6..3067deb0fbec 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -110,6 +110,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Remember who we are attached to. */ ep->base.sk = sk; + ep->base.net = sock_net(sk); sock_hold(ep->base.sk); return ep; diff --git a/net/sctp/input.c b/net/sctp/input.c index 2277981559d0..4d2bcfc9d7f8 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -882,7 +882,7 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, if (!sctp_transport_hold(t)) return err; - if (!net_eq(sock_net(t->asoc->base.sk), x->net)) + if (!net_eq(t->asoc->base.net, x->net)) goto out; if (x->lport != htons(t->asoc->base.bind_addr.port)) goto out; @@ -897,7 +897,7 @@ static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed) { const struct sctp_transport *t = data; - return sctp_hashfn(sock_net(t->asoc->base.sk), + return sctp_hashfn(t->asoc->base.net, htons(t->asoc->base.bind_addr.port), &t->ipaddr, seed); } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index dd860fea0148..bc734cfaa29e 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -275,7 +275,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!asoc || saddr) goto out; @@ -328,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->saddr = laddr->a.v6.sin6_addr; fl6->fl6_sport = laddr->a.v6.sin6_port; final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); - bdst = ip6_dst_lookup_flow(sk, fl6, final_p); + bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(bdst)) continue; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 08d14d86ecfb..681ffb3545db 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -227,6 +227,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, sa->sin_port = sh->dest; sa->sin_addr.s_addr = ip_hdr(skb)->daddr; } + memset(sa->sin_zero, 0, sizeof(sa->sin_zero)); } /* Initialize an sctp_addr from a socket. */ @@ -235,6 +236,7 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) addr->v4.sin_family = AF_INET; addr->v4.sin_port = 0; addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr; + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); } /* Initialize sk->sk_rcv_saddr from sctp_addr. */ @@ -257,6 +259,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr, addr->v4.sin_family = AF_INET; addr->v4.sin_port = port; addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); } /* Initialize an address parameter from a sctp_addr and return the length @@ -281,6 +284,7 @@ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4, saddr->v4.sin_family = AF_INET; saddr->v4.sin_port = port; saddr->v4.sin_addr.s_addr = fl4->saddr; + memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero)); } /* Compare two addresses exactly. */ @@ -303,6 +307,7 @@ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) addr->v4.sin_family = AF_INET; addr->v4.sin_addr.s_addr = htonl(INADDR_ANY); addr->v4.sin_port = port; + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); } /* Is this a wildcard address? */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index e52b2128e43b..b06cae508158 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1358,8 +1358,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, /* Generate an INIT ACK chunk. */ new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, 0); - if (!new_obj) - goto nomem; + if (!new_obj) { + error = -ENOMEM; + break; + } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); @@ -1381,7 +1383,8 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, if (!new_obj) { if (cmd->obj.chunk) sctp_chunk_free(cmd->obj.chunk); - goto nomem; + error = -ENOMEM; + break; } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); @@ -1428,8 +1431,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, /* Generate a SHUTDOWN chunk. */ new_obj = sctp_make_shutdown(asoc, chunk); - if (!new_obj) - goto nomem; + if (!new_obj) { + error = -ENOMEM; + break; + } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; @@ -1765,11 +1770,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, break; } - if (error) + if (error) { + cmd = sctp_next_cmd(commands); + while (cmd) { + if (cmd->verb == SCTP_CMD_REPLY) + sctp_chunk_free(cmd->obj.chunk); + cmd = sctp_next_cmd(commands); + } break; + } } -out: /* If this is in response to a received chunk, wait until * we are done with the packet to open the queue so that we don't * send multiple packets in response to a single request. @@ -1784,7 +1795,4 @@ out: sp->data_ready_signalled = 0; return error; -nomem: - error = -ENOMEM; - goto out; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 0c21c52fc408..c6d83a64eac3 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -170,6 +170,16 @@ static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, return true; } +/* Check for format error in an ABORT chunk */ +static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) +{ + struct sctp_errhdr *err; + + sctp_walk_errors(err, chunk->chunk_hdr); + + return (void *)err == (void *)chunk->chunk_end; +} + /********************************************************** * These are the state functions for handling chunk events. **********************************************************/ @@ -2160,8 +2170,10 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook( /* Update socket peer label if first association. */ if (security_sctp_assoc_request((struct sctp_endpoint *)ep, - chunk->skb)) + chunk->skb)) { + sctp_association_free(new_asoc); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } /* Set temp so that it won't be added into hashtable */ new_asoc->temp = 1; @@ -2253,6 +2265,9 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort( sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + if (!sctp_err_chunk_valid(chunk)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); } @@ -2296,6 +2311,9 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort( sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + if (!sctp_err_chunk_valid(chunk)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + /* Stop the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); @@ -2563,6 +2581,9 @@ enum sctp_disposition sctp_sf_do_9_1_abort( sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + if (!sctp_err_chunk_valid(chunk)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); } @@ -2580,16 +2601,8 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort( /* See if we have an error cause code in the chunk. */ len = ntohs(chunk->chunk_hdr->length); - if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) { - struct sctp_errhdr *err; - - sctp_walk_errors(err, chunk->chunk_hdr); - if ((void *)err != (void *)chunk->chunk_end) - return sctp_sf_pdiscard(net, ep, asoc, type, arg, - commands); - + if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) error = ((struct sctp_errhdr *)chunk->skb->data)->cause; - } sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); /* ASSOC_FAILED will DELETE_TCB. */ diff --git a/net/sctp/stream.c b/net/sctp/stream.c index e83cdaa2ab76..c1a100d2fed3 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -119,7 +119,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, * a new one with new outcnt to save memory if needed. */ if (outcnt == stream->outcnt) - goto in; + goto handle_in; /* Filter out chunks queued on streams that won't exist anymore */ sched->unsched_all(stream); @@ -128,24 +128,28 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, ret = sctp_stream_alloc_out(stream, outcnt, gfp); if (ret) - goto out; + goto out_err; for (i = 0; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; -in: +handle_in: sctp_stream_interleave_init(stream); if (!incnt) goto out; ret = sctp_stream_alloc_in(stream, incnt, gfp); - if (ret) { - sched->free(stream); - genradix_free(&stream->out); - stream->outcnt = 0; - goto out; - } + if (ret) + goto in_err; + goto out; + +in_err: + sched->free(stream); + genradix_free(&stream->in); +out_err: + genradix_free(&stream->out); + stream->outcnt = 0; out: return ret; } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 7235a6032671..3bbe1a58ec87 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -263,7 +263,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) pf->af->from_sk(&addr, sk); pf->to_sk_daddr(&t->ipaddr, sk); - dst->ops->update_pmtu(dst, sk, NULL, pmtu); + dst->ops->update_pmtu(dst, sk, NULL, pmtu, true); pf->to_sk_daddr(&addr, sk); dst = sctp_transport_dst_check(t); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 737b49909a7a..dc09a72f8110 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -467,6 +467,8 @@ static void smc_switch_to_fallback(struct smc_sock *smc) if (smc->sk.sk_socket && smc->sk.sk_socket->file) { smc->clcsock->file = smc->sk.sk_socket->file; smc->clcsock->file->private_data = smc->clcsock; + smc->clcsock->wq.fasync_list = + smc->sk.sk_socket->wq.fasync_list; } } @@ -854,6 +856,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, goto out; sock_hold(&smc->sk); /* sock put in passive closing */ + if (smc->use_fallback) + goto out; if (flags & O_NONBLOCK) { if (schedule_work(&smc->connect_work)) smc->connect_nonblock = 1; @@ -1716,8 +1720,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, sk->sk_err = smc->clcsock->sk->sk_err; sk->sk_error_report(sk); } - if (rc) - return rc; if (optlen < sizeof(int)) return -EINVAL; @@ -1725,6 +1727,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, return -EFAULT; lock_sock(sk); + if (rc || smc->use_fallback) + goto out; switch (optname) { case TCP_ULP: case TCP_FASTOPEN: @@ -1736,15 +1740,14 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, smc_switch_to_fallback(smc); smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; } else { - if (!smc->use_fallback) - rc = -EINVAL; + rc = -EINVAL; } break; case TCP_NODELAY: if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_CLOSED) { - if (val && !smc->use_fallback) + if (val) mod_delayed_work(system_wq, &smc->conn.tx_work, 0); } @@ -1753,7 +1756,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_CLOSED) { - if (!val && !smc->use_fallback) + if (!val) mod_delayed_work(system_wq, &smc->conn.tx_work, 0); } @@ -1764,6 +1767,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, default: break; } +out: release_sock(sk); return rc; diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 49bcebff6378..aee9ccfa99c2 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -372,7 +372,9 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline)); dclc.hdr.version = SMC_CLC_V1; dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0; - memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); + if (smc->conn.lgr && !smc->conn.lgr->is_smcd) + memcpy(dclc.id_for_peer, local_systemid, + sizeof(local_systemid)); dclc.peer_diagnosis = htonl(peer_diag_info); memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2ba97ff325a5..0c5fcb8ed404 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -231,10 +231,12 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) lgr->conns_all = RB_ROOT; if (ini->is_smcd) { /* SMC-D specific settings */ + get_device(&ini->ism_dev->dev); lgr->peer_gid = ini->ism_gid; lgr->smcd = ini->ism_dev; } else { /* SMC-R specific settings */ + get_device(&ini->ib_dev->ibdev->dev); lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer, SMC_SYSTEMID_LEN); @@ -433,10 +435,13 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr) static void smc_lgr_free(struct smc_link_group *lgr) { smc_lgr_free_bufs(lgr); - if (lgr->is_smcd) + if (lgr->is_smcd) { smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); - else + put_device(&lgr->smcd->dev); + } else { smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); + put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev); + } kfree(lgr); } diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index f38727ecf8b2..e1f64f4ba236 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -39,16 +39,15 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk) { struct smc_sock *smc = smc_sk(sk); + memset(r, 0, sizeof(*r)); r->diag_family = sk->sk_family; + sock_diag_save_cookie(sk, r->id.idiag_cookie); if (!smc->clcsock) return; r->id.idiag_sport = htons(smc->clcsock->sk->sk_num); r->id.idiag_dport = smc->clcsock->sk->sk_dport; r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if; - sock_diag_save_cookie(sk, r->id.idiag_cookie); if (sk->sk_protocol == SMCPROTO_SMC) { - memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); - memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr; r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index d14ca4af6f94..d74a71dff5b8 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -560,12 +560,15 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) struct smc_ib_device *smcibdev; smcibdev = ib_get_client_data(ibdev, &smc_ib_client); + if (!smcibdev || smcibdev->ibdev != ibdev) + return; ib_set_client_data(ibdev, &smc_ib_client, NULL); spin_lock(&smc_ib_devices.lock); list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ spin_unlock(&smc_ib_devices.lock); smc_ib_cleanup_per_ibdev(smcibdev); ib_unregister_event_handler(&smcibdev->event_handler); + cancel_work_sync(&smcibdev->port_event_work); kfree(smcibdev); } diff --git a/net/socket.c b/net/socket.c index 6a9ab7a8b1d2..f5784503b865 100644 --- a/net/socket.c +++ b/net/socket.c @@ -104,6 +104,24 @@ #include #include +#define HOOK_FUN_NUM 4 +typedef void (*GET_INFO_HOOK_FUNC)(struct socket *sock, int fd, struct sockaddr_storage *address, int err); +static GET_INFO_HOOK_FUNC get_info_func[HOOK_FUN_NUM]; +enum get_infotype { CONNECT_INFO, ACCEPT_INFO, SENDTO_INFO, RECVFROM_INFO }; + +static DEFINE_PER_CPU(long, sendto_count) = { 0 }; +static DEFINE_PER_CPU(long, recvfrom_count) = { 0 }; +static DEFINE_PER_CPU(long, connect_count) = { 0 }; +static DEFINE_PER_CPU(long, accept_count) = { 0 }; + +extern int connect_info_flag; +extern int accept_info_flag; +extern int sendto_info_flag; +extern int recvfrom_info_flag; + +int hook_info_flag; +EXPORT_SYMBOL(hook_info_flag); + #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sysctl_net_busy_read __read_mostly; unsigned int sysctl_net_busy_poll __read_mostly; @@ -158,6 +176,41 @@ static const struct file_operations socket_file_ops = { static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; +long nr_hook_count(int flag) +{ + int cpu; + long total = 0; + flag &= 0x1111; + + for_each_possible_cpu(cpu) + { + if (flag & 0x0001) + total += per_cpu(connect_count, cpu); + if (flag & 0x0010) + total += per_cpu(accept_count, cpu); + if (flag & 0x0100) + total += per_cpu(sendto_count, cpu); + if (flag & 0x1000) + total += per_cpu(recvfrom_count, cpu); + } + return total; +} +EXPORT_SYMBOL(nr_hook_count); + +int set_get_info_func(GET_INFO_HOOK_FUNC func_addr[], int num) +{ + int i; + + if (num > HOOK_FUN_NUM || hook_info_flag != 0 || nr_hook_count(0x1111) != 0) + return -EINVAL; + + for (i = 0; i < num; i++) + get_info_func[i] = func_addr[i]; + + return 0; +} +EXPORT_SYMBOL(set_get_info_func); + /* * Support routines. * Move socket addresses back and forth across the kernel/user @@ -955,7 +1008,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) .msg_iocb = iocb}; ssize_t res; - if (file->f_flags & O_NONBLOCK) + if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT)) msg.msg_flags = MSG_DONTWAIT; if (iocb->ki_pos != 0) @@ -980,7 +1033,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_pos != 0) return -ESPIPE; - if (file->f_flags & O_NONBLOCK) + if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT)) msg.msg_flags = MSG_DONTWAIT; if (sock->type == SOCK_SEQPACKET) @@ -1755,6 +1808,13 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, if (err < 0) goto out_fd; + if (accept_info_flag && hook_info_flag) { + __this_cpu_inc(accept_count); + if (get_info_func[ACCEPT_INFO] != NULL) + get_info_func[ACCEPT_INFO](newsock, newfd, NULL, err); + __this_cpu_dec(accept_count); + } + if (upeer_sockaddr) { len = newsock->ops->getname(newsock, (struct sockaddr *)&address, 2); @@ -1827,6 +1887,14 @@ int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen) err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); + + if (connect_info_flag && hook_info_flag) { + __this_cpu_inc(connect_count); + if (get_info_func[CONNECT_INFO] != NULL) + get_info_func[CONNECT_INFO](sock, fd, &address, err); + __this_cpu_dec(connect_count); + } + out_put: fput_light(sock->file, fput_needed); out: @@ -1951,6 +2019,13 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags, msg.msg_flags = flags; err = sock_sendmsg(sock, &msg); + if (sendto_info_flag && hook_info_flag && err >= 0) { + __this_cpu_inc(sendto_count); + if (get_info_func[SENDTO_INFO] != NULL) + get_info_func[SENDTO_INFO](sock, fd, &address, err); + __this_cpu_dec(sendto_count); + } + out_put: fput_light(sock->file, fput_needed); out: @@ -2015,6 +2090,13 @@ int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags, err = err2; } + if (recvfrom_info_flag && hook_info_flag && err >= 0) { + __this_cpu_inc(recvfrom_count); + if (get_info_func[RECVFROM_INFO] != NULL) + get_info_func[RECVFROM_INFO](sock, fd, &address, err); + __this_cpu_dec(recvfrom_count); + } + fput_light(sock->file, fput_needed); out: return err; @@ -2232,15 +2314,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, return err < 0 ? err : 0; } -static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, - struct msghdr *msg_sys, unsigned int flags, - struct used_address *used_address, - unsigned int allowed_msghdr_flags) +static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys, + unsigned int flags, struct used_address *used_address, + unsigned int allowed_msghdr_flags) { - struct compat_msghdr __user *msg_compat = - (struct compat_msghdr __user *)msg; - struct sockaddr_storage address; - struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __aligned(sizeof(__kernel_size_t)); /* 20 is size of ipv6_pktinfo */ @@ -2248,19 +2325,10 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, int ctl_len; ssize_t err; - msg_sys->msg_name = &address; - - if (MSG_CMSG_COMPAT & flags) - err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov); - else - err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov); - if (err < 0) - return err; - err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) - goto out_freeiov; + goto out; flags |= (msg_sys->msg_flags & allowed_msghdr_flags); ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { @@ -2268,7 +2336,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) - goto out_freeiov; + goto out; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { @@ -2277,7 +2345,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) - goto out_freeiov; + goto out; } err = -EFAULT; /* @@ -2323,7 +2391,47 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); -out_freeiov: +out: + return err; +} + +static int sendmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct iovec **iov) +{ + int err; + + if (flags & MSG_CMSG_COMPAT) { + struct compat_msghdr __user *msg_compat; + + msg_compat = (struct compat_msghdr __user *) umsg; + err = get_compat_msghdr(msg, msg_compat, NULL, iov); + } else { + err = copy_msghdr_from_user(msg, umsg, NULL, iov); + } + if (err < 0) + return err; + + return 0; +} + +static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, + struct msghdr *msg_sys, unsigned int flags, + struct used_address *used_address, + unsigned int allowed_msghdr_flags) +{ + struct sockaddr_storage address; + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + ssize_t err; + + msg_sys->msg_name = &address; + + err = sendmsg_copy_msghdr(msg_sys, msg, flags, &iov); + if (err < 0) + return err; + + err = ____sys_sendmsg(sock, msg_sys, flags, used_address, + allowed_msghdr_flags); kfree(iov); return err; } @@ -2331,12 +2439,27 @@ out_freeiov: /* * BSD sendmsg interface */ -long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *msg, +long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *umsg, unsigned int flags) { - struct msghdr msg_sys; + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + struct sockaddr_storage address; + struct msghdr msg = { .msg_name = &address }; + ssize_t err; - return ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0); + err = sendmsg_copy_msghdr(&msg, umsg, flags, &iov); + if (err) + return err; + /* disallow ancillary data requests from this path */ + if (msg.msg_control || msg.msg_controllen) { + err = -EINVAL; + goto out; + } + + err = ____sys_sendmsg(sock, &msg, flags, NULL, 0); +out: + kfree(iov); + return err; } long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, @@ -2442,33 +2565,41 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, return __sys_sendmmsg(fd, mmsg, vlen, flags, true); } -static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, - struct msghdr *msg_sys, unsigned int flags, int nosec) +static int recvmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct sockaddr __user **uaddr, + struct iovec **iov) +{ + ssize_t err; + + if (MSG_CMSG_COMPAT & flags) { + struct compat_msghdr __user *msg_compat; + + msg_compat = (struct compat_msghdr __user *) umsg; + err = get_compat_msghdr(msg, msg_compat, uaddr, iov); + } else { + err = copy_msghdr_from_user(msg, umsg, uaddr, iov); + } + if (err < 0) + return err; + + return 0; +} + +static int ____sys_recvmsg(struct socket *sock, struct msghdr *msg_sys, + struct user_msghdr __user *msg, + struct sockaddr __user *uaddr, + unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = - (struct compat_msghdr __user *)msg; - struct iovec iovstack[UIO_FASTIOV]; - struct iovec *iov = iovstack; + (struct compat_msghdr __user *) msg; + int __user *uaddr_len = COMPAT_NAMELEN(msg); + struct sockaddr_storage addr; unsigned long cmsg_ptr; int len; ssize_t err; - /* kernel mode address */ - struct sockaddr_storage addr; - - /* user mode address pointers */ - struct sockaddr __user *uaddr; - int __user *uaddr_len = COMPAT_NAMELEN(msg); - msg_sys->msg_name = &addr; - - if (MSG_CMSG_COMPAT & flags) - err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov); - else - err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); - if (err < 0) - return err; - cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); @@ -2479,7 +2610,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags); if (err < 0) - goto out_freeiov; + goto out; len = err; if (uaddr != NULL) { @@ -2487,12 +2618,12 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) - goto out_freeiov; + goto out; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) - goto out_freeiov; + goto out; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); @@ -2500,10 +2631,25 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) - goto out_freeiov; + goto out; err = len; +out: + return err; +} -out_freeiov: +static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, + struct msghdr *msg_sys, unsigned int flags, int nosec) +{ + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + /* user mode address pointers */ + struct sockaddr __user *uaddr; + ssize_t err; + + err = recvmsg_copy_msghdr(msg_sys, msg, flags, &uaddr, &iov); + if (err < 0) + return err; + + err = ____sys_recvmsg(sock, msg_sys, msg, uaddr, flags, nosec); kfree(iov); return err; } @@ -2512,12 +2658,28 @@ out_freeiov: * BSD recvmsg interface */ -long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *msg, +long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *umsg, unsigned int flags) { - struct msghdr msg_sys; + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + struct sockaddr_storage address; + struct msghdr msg = { .msg_name = &address }; + struct sockaddr __user *uaddr; + ssize_t err; - return ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); + err = recvmsg_copy_msghdr(&msg, umsg, flags, &uaddr, &iov); + if (err) + return err; + /* disallow ancillary data requests from this path */ + if (msg.msg_control || msg.msg_controllen) { + err = -EINVAL; + goto out; + } + + err = ____sys_recvmsg(sock, &msg, umsg, uaddr, flags, 0); +out: + kfree(iov); + return err; } long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, @@ -3452,6 +3614,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCSARP: case SIOCGARP: case SIOCDARP: + case SIOCOUTQNSD: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 8be2f209982b..ed20fa8a6f70 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1075,24 +1075,32 @@ gss_read_verf(struct rpc_gss_wire_cred *gc, return 0; } -/* Ok this is really heavily depending on a set of semantics in - * how rqstp is set up by svc_recv and pages laid down by the - * server when reading a request. We are basically guaranteed that - * the token lays all down linearly across a set of pages, starting - * at iov_base in rq_arg.head[0] which happens to be the first of a - * set of pages stored in rq_pages[]. - * rq_arg.head[0].iov_base will provide us the page_base to pass - * to the upcall. - */ -static inline int -gss_read_proxy_verf(struct svc_rqst *rqstp, - struct rpc_gss_wire_cred *gc, __be32 *authp, - struct xdr_netobj *in_handle, - struct gssp_in_token *in_token) +static void gss_free_in_token_pages(struct gssp_in_token *in_token) +{ + u32 inlen; + int i; + + i = 0; + inlen = in_token->page_len; + while (inlen) { + if (in_token->pages[i]) + put_page(in_token->pages[i]); + inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen; + } + + kfree(in_token->pages); + in_token->pages = NULL; +} + +static int gss_read_proxy_verf(struct svc_rqst *rqstp, + struct rpc_gss_wire_cred *gc, __be32 *authp, + struct xdr_netobj *in_handle, + struct gssp_in_token *in_token) { struct kvec *argv = &rqstp->rq_arg.head[0]; - u32 inlen; - int res; + unsigned int page_base, length; + int pages, i, res; + size_t inlen; res = gss_read_common_verf(gc, argv, authp, in_handle); if (res) @@ -1102,10 +1110,36 @@ gss_read_proxy_verf(struct svc_rqst *rqstp, if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) return SVC_DENIED; - in_token->pages = rqstp->rq_pages; - in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK; + pages = DIV_ROUND_UP(inlen, PAGE_SIZE); + in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); + if (!in_token->pages) + return SVC_DENIED; + in_token->page_base = 0; in_token->page_len = inlen; + for (i = 0; i < pages; i++) { + in_token->pages[i] = alloc_page(GFP_KERNEL); + if (!in_token->pages[i]) { + gss_free_in_token_pages(in_token); + return SVC_DENIED; + } + } + length = min_t(unsigned int, inlen, argv->iov_len); + memcpy(page_address(in_token->pages[0]), argv->iov_base, length); + inlen -= length; + + i = 1; + page_base = rqstp->rq_arg.page_base; + while (inlen) { + length = min_t(unsigned int, inlen, PAGE_SIZE); + memcpy(page_address(in_token->pages[i]), + page_address(rqstp->rq_arg.pages[i]) + page_base, + length); + + inlen -= length; + page_base = 0; + i++; + } return 0; } @@ -1211,6 +1245,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd, dprintk("RPC: No creds found!\n"); goto out; } else { + struct timespec64 boot; /* steal creds */ rsci.cred = ud->creds; @@ -1231,6 +1266,9 @@ static int gss_proxy_save_rsc(struct cache_detail *cd, &expiry, GFP_KERNEL); if (status) goto out; + + getboottime64(&boot); + expiry -= boot.tv_sec; } rsci.h.expiry_time = expiry; @@ -1280,8 +1318,11 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, break; case GSS_S_COMPLETE: status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle); - if (status) + if (status) { + pr_info("%s: gss_proxy_save_rsc failed (%d)\n", + __func__, status); goto out; + } cli_handle.data = (u8 *)&handle; cli_handle.len = sizeof(handle); break; @@ -1292,15 +1333,20 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, /* Got an answer to the upcall; use it: */ if (gss_write_init_verf(sn->rsc_cache, rqstp, - &cli_handle, &ud.major_status)) + &cli_handle, &ud.major_status)) { + pr_info("%s: gss_write_init_verf failed\n", __func__); goto out; + } if (gss_write_resv(resv, PAGE_SIZE, &cli_handle, &ud.out_token, - ud.major_status, ud.minor_status)) + ud.major_status, ud.minor_status)) { + pr_info("%s: gss_write_resv failed\n", __func__); goto out; + } ret = SVC_COMPLETE; out: + gss_free_in_token_pages(&ud.in_token); gssp_free_upcall_data(&ud); return ret; } diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index a349094f6fb7..7ede1e52fd81 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -53,9 +53,6 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail) h->last_refresh = now; } -static inline int cache_is_valid(struct cache_head *h); -static void cache_fresh_locked(struct cache_head *head, time_t expiry, - struct cache_detail *detail); static void cache_fresh_unlocked(struct cache_head *head, struct cache_detail *detail); @@ -105,9 +102,6 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, if (cache_is_expired(detail, tmp)) { hlist_del_init_rcu(&tmp->cache_list); detail->entries --; - if (cache_is_valid(tmp) == -EAGAIN) - set_bit(CACHE_NEGATIVE, &tmp->flags); - cache_fresh_locked(tmp, 0, detail); freeme = tmp; break; } @@ -1894,7 +1888,9 @@ void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) if (!hlist_unhashed(&h->cache_list)){ hlist_del_init_rcu(&h->cache_list); cd->entries--; + set_bit(CACHE_CLEANED, &h->flags); spin_unlock(&cd->hash_lock); + cache_fresh_unlocked(h, cd); cache_put(h, cd); } else spin_unlock(&cd->hash_lock); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 360afe153193..987c4b1f0b17 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -260,7 +260,7 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c rpc_reset_waitqueue_priority(queue); queue->qlen = 0; queue->timer_list.expires = 0; - INIT_DEFERRABLE_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); + INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); INIT_LIST_HEAD(&queue->timer_list.list); rpc_assign_waitqueue_name(queue, qname); } diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 14ba9e72a204..f3104be8ff5d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -436,13 +436,12 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) } /** - * xdr_shrink_pagelen + * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes * @buf: xdr_buf * @len: bytes to remove from buf->pages * - * Shrinks XDR buffer's page array buf->pages by - * 'len' bytes. The extra data is not lost, but is instead - * moved into the tail. + * The extra data is not lost, but is instead moved into buf->tail. + * Returns the actual number of bytes moved. */ static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) @@ -455,8 +454,8 @@ xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) result = 0; tail = buf->tail; - BUG_ON (len > pglen); - + if (len > buf->page_len) + len = buf-> page_len; tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; /* Shift the tail first */ diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 30065a28628c..0ad45a8fe3fb 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -326,8 +326,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, { struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct ib_reg_wr *reg_wr; + int i, n, dma_nents; struct ib_mr *ibmr; - int i, n; u8 key; if (nsegs > ia->ri_max_frwr_depth) @@ -351,15 +351,16 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, break; } mr->mr_dir = rpcrdma_data_dir(writing); + mr->mr_nents = i; - mr->mr_nents = - ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir); - if (!mr->mr_nents) + dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents, + mr->mr_dir); + if (!dma_nents) goto out_dmamap_err; ibmr = mr->frwr.fr_mr; - n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); - if (unlikely(n != mr->mr_nents)) + n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); + if (n != dma_nents) goto out_mapmr_err; ibmr->iova &= 0x00000000ffffffff; @@ -570,7 +571,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) */ bad_wr = NULL; rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); - trace_xprtrdma_post_send(req, rc); /* The final LOCAL_INV WR in the chain is supposed to * do the wake. If it was never posted, the wake will @@ -583,6 +583,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) /* Recycle MRs in the LOCAL_INV chain that did not get posted. */ + trace_xprtrdma_post_linv(req, rc); while (bad_wr) { frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); @@ -673,12 +674,12 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) */ bad_wr = NULL; rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); - trace_xprtrdma_post_send(req, rc); if (!rc) return; /* Recycle MRs in the LOCAL_INV chain that did not get posted. */ + trace_xprtrdma_post_linv(req, rc); while (bad_wr) { frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); mr = container_of(frwr, struct rpcrdma_mr, frwr); diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index b86b5fd62d9f..ef5102b60589 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1362,6 +1362,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) xprt->cwnd = credits << RPC_CWNDSHIFT; spin_unlock(&xprt->transport_lock); } + rpcrdma_post_recvs(r_xprt, false); req = rpcr_to_rdmar(rqst); if (req->rl_reply) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index d1fcc41d5eb5..908e78bb87c6 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -195,6 +195,7 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer); #endif + rqst->rq_xtime = ktime_get(); rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); if (rc) { svc_rdma_send_ctxt_put(rdma, ctxt); diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 160558b4135e..c67d465dc062 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -428,8 +428,11 @@ void xprt_rdma_close(struct rpc_xprt *xprt) /* Prepare @xprt for the next connection by reinitializing * its credit grant to one (see RFC 8166, Section 3.3.3). */ + spin_lock(&xprt->transport_lock); r_xprt->rx_buf.rb_credits = 1; + xprt->cong = 0; xprt->cwnd = RPC_CWNDSHIFT; + spin_unlock(&xprt->transport_lock); out: xprt->reestablish_timeout = 0; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 3a907537e2cf..0f4d39fdb48f 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -75,16 +75,15 @@ * internal functions */ static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); -static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf); +static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt); +static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt); static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); -static void rpcrdma_mr_free(struct rpcrdma_mr *mr); static struct rpcrdma_regbuf * rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, gfp_t flags); static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); -static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp); /* Wait for outstanding transport work to finish. ib_drain_qp * handles the drains in the wrong order for us, so open code @@ -170,7 +169,6 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) rdmab_addr(rep->rr_rdmabuf), wc->byte_len, DMA_FROM_DEVICE); - rpcrdma_post_recvs(r_xprt, false); rpcrdma_reply_handler(rep); return; @@ -247,6 +245,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) ia->ri_id->device->name, rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); #endif + init_completion(&ia->ri_remove_done); set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); ep->rep_connected = -ENODEV; xprt_force_disconnect(xprt); @@ -301,7 +300,6 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) trace_xprtrdma_conn_start(xprt); init_completion(&ia->ri_done); - init_completion(&ia->ri_remove_done); id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, xprt, RDMA_PS_TCP, IB_QPT_RC); @@ -431,7 +429,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia) /* The ULP is responsible for ensuring all DMA * mappings and MRs are gone. */ - rpcrdma_reps_destroy(buf); + rpcrdma_reps_unmap(r_xprt); list_for_each_entry(req, &buf->rb_allreqs, rl_all) { rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf); rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); @@ -609,6 +607,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, struct ib_qp_init_attr *qp_init_attr) { struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct rpcrdma_ep *ep = &r_xprt->rx_ep; int rc, err; trace_xprtrdma_reinsert(r_xprt); @@ -623,6 +622,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); goto out2; } + memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr)); rc = -ENETUNREACH; err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr); @@ -780,6 +780,7 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) trace_xprtrdma_disconnect(r_xprt, rc); rpcrdma_xprt_drain(r_xprt); + rpcrdma_reqs_reset(r_xprt); } /* Fixed-size circular FIFO queue. This implementation is wait-free and @@ -965,7 +966,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) mr->mr_xprt = r_xprt; spin_lock(&buf->rb_lock); - list_add(&mr->mr_list, &buf->rb_mrs); + rpcrdma_mr_push(mr, &buf->rb_mrs); list_add(&mr->mr_all, &buf->rb_all_mrs); spin_unlock(&buf->rb_lock); } @@ -1042,6 +1043,26 @@ out1: return NULL; } +/** + * rpcrdma_reqs_reset - Reset all reqs owned by a transport + * @r_xprt: controlling transport instance + * + * ASSUMPTION: the rb_allreqs list is stable for the duration, + * and thus can be walked without holding rb_lock. Eg. the + * caller is holding the transport send lock to exclude + * device removal or disconnection. + */ +static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct rpcrdma_req *req; + + list_for_each_entry(req, &buf->rb_allreqs, rl_all) { + /* Credits are valid only for one connection */ + req->rl_slot.rq_cong = 0; + } +} + static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp) { @@ -1065,6 +1086,7 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; rep->rr_recv_wr.num_sge = 1; rep->rr_temp = temp; + list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps); return rep; out_free: @@ -1075,6 +1097,7 @@ out: static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep) { + list_del(&rep->rr_all); rpcrdma_regbuf_free(rep->rr_rdmabuf); kfree(rep); } @@ -1093,10 +1116,16 @@ static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf) static void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep) { - if (!rep->rr_temp) - llist_add(&rep->rr_node, &buf->rb_free_reps); - else - rpcrdma_rep_destroy(rep); + llist_add(&rep->rr_node, &buf->rb_free_reps); +} + +static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct rpcrdma_rep *rep; + + list_for_each_entry(rep, &buf->rb_all_reps, rr_all) + rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); } static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf) @@ -1129,6 +1158,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) INIT_LIST_HEAD(&buf->rb_send_bufs); INIT_LIST_HEAD(&buf->rb_allreqs); + INIT_LIST_HEAD(&buf->rb_all_reps); rc = -ENOMEM; for (i = 0; i < buf->rb_max_requests; i++) { @@ -1163,10 +1193,19 @@ out: */ void rpcrdma_req_destroy(struct rpcrdma_req *req) { + struct rpcrdma_mr *mr; + list_del(&req->rl_all); - while (!list_empty(&req->rl_free_mrs)) - rpcrdma_mr_free(rpcrdma_mr_pop(&req->rl_free_mrs)); + while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { + struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; + + spin_lock(&buf->rb_lock); + list_del(&mr->mr_all); + spin_unlock(&buf->rb_lock); + + frwr_release_mr(mr); + } rpcrdma_regbuf_free(req->rl_recvbuf); rpcrdma_regbuf_free(req->rl_sendbuf); @@ -1174,24 +1213,28 @@ void rpcrdma_req_destroy(struct rpcrdma_req *req) kfree(req); } -static void -rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) +/** + * rpcrdma_mrs_destroy - Release all of a transport's MRs + * @buf: controlling buffer instance + * + * Relies on caller holding the transport send lock to protect + * removing mr->mr_list from req->rl_free_mrs safely. + */ +static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) { struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf); struct rpcrdma_mr *mr; - unsigned int count; - count = 0; spin_lock(&buf->rb_lock); while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, struct rpcrdma_mr, mr_all)) != NULL) { + list_del(&mr->mr_list); list_del(&mr->mr_all); spin_unlock(&buf->rb_lock); frwr_release_mr(mr); - count++; spin_lock(&buf->rb_lock); } spin_unlock(&buf->rb_lock); @@ -1264,17 +1307,6 @@ void rpcrdma_mr_put(struct rpcrdma_mr *mr) rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); } -static void rpcrdma_mr_free(struct rpcrdma_mr *mr) -{ - struct rpcrdma_xprt *r_xprt = mr->mr_xprt; - struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - - mr->mr_req = NULL; - spin_lock(&buf->rb_lock); - rpcrdma_mr_push(mr, &buf->rb_mrs); - spin_unlock(&buf->rb_lock); -} - /** * rpcrdma_buffer_get - Get a request buffer * @buffers: Buffer pool from which to obtain a buffer @@ -1455,8 +1487,13 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, return 0; } -static void -rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) +/** + * rpcrdma_post_recvs - Refill the Receive Queue + * @r_xprt: controlling transport instance + * @temp: mark Receive buffers to be deleted after use + * + */ +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) { struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_ep *ep = &r_xprt->rx_ep; @@ -1478,6 +1515,10 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) wr = NULL; while (needed) { rep = rpcrdma_rep_get_locked(buf); + if (rep && rep->rr_temp) { + rpcrdma_rep_destroy(rep); + continue; + } if (!rep) rep = rpcrdma_rep_create(r_xprt, temp); if (!rep) diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 65e6b0eb862e..fc761679487c 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -203,6 +203,7 @@ struct rpcrdma_rep { struct xdr_stream rr_stream; struct llist_node rr_node; struct ib_recv_wr rr_recv_wr; + struct list_head rr_all; }; /* To reduce the rate at which a transport invokes ib_post_recv @@ -372,6 +373,7 @@ struct rpcrdma_buffer { struct list_head rb_allreqs; struct list_head rb_all_mrs; + struct list_head rb_all_reps; struct llist_head rb_free_reps; @@ -474,6 +476,7 @@ void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, struct rpcrdma_req *); +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp); /* * Buffer calls - xprtrdma/verbs.c diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 70e52f567b2a..5361b98f31ae 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2659,6 +2659,8 @@ static int bc_sendto(struct rpc_rqst *req) .iov_len = sizeof(marker), }; + req->rq_xtime = ktime_get(); + len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len); if (len != iov.iov_len) return -EAGAIN; @@ -2684,7 +2686,6 @@ static int bc_send_request(struct rpc_rqst *req) struct svc_xprt *xprt; int len; - dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); /* * Get the server socket associated with this callback xprt */ diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 6ef1abdd525f..885ecf6ea65a 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -305,17 +305,17 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, * @skb: socket buffer to copy * @method: send method to be used * @dests: destination nodes for message. - * @cong_link_cnt: returns number of encountered congested destination links * Returns 0 if success, otherwise errno */ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, struct tipc_mc_method *method, - struct tipc_nlist *dests, - u16 *cong_link_cnt) + struct tipc_nlist *dests) { struct tipc_msg *hdr, *_hdr; struct sk_buff_head tmpq; struct sk_buff *_skb; + u16 cong_link_cnt; + int rc = 0; /* Is a cluster supporting with new capabilities ? */ if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL)) @@ -343,18 +343,19 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, _hdr = buf_msg(_skb); msg_set_size(_hdr, MCAST_H_SIZE); msg_set_is_rcast(_hdr, !msg_is_rcast(hdr)); + msg_set_errcode(_hdr, TIPC_ERR_NO_PORT); __skb_queue_head_init(&tmpq); __skb_queue_tail(&tmpq, _skb); if (method->rcast) - tipc_bcast_xmit(net, &tmpq, cong_link_cnt); + rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt); else - tipc_rcast_xmit(net, &tmpq, dests, cong_link_cnt); + rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt); /* This queue should normally be empty by now */ __skb_queue_purge(&tmpq); - return 0; + return rc; } /* tipc_mcast_xmit - deliver message to indicated destination nodes @@ -396,9 +397,14 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, msg_set_is_rcast(hdr, method->rcast); /* Switch method ? */ - if (rcast != method->rcast) - tipc_mcast_send_sync(net, skb, method, - dests, cong_link_cnt); + if (rcast != method->rcast) { + rc = tipc_mcast_send_sync(net, skb, method, dests); + if (unlikely(rc)) { + pr_err("Unable to send SYN: method %d, rc %d\n", + rcast, rc); + goto exit; + } + } if (method->rcast) rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt); diff --git a/net/tipc/core.c b/net/tipc/core.c index 8f35060a24e1..12192e7f4050 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -125,14 +125,6 @@ static int __init tipc_init(void) sysctl_tipc_rmem[1] = RCVBUF_DEF; sysctl_tipc_rmem[2] = RCVBUF_MAX; - err = tipc_netlink_start(); - if (err) - goto out_netlink; - - err = tipc_netlink_compat_start(); - if (err) - goto out_netlink_compat; - err = tipc_register_sysctl(); if (err) goto out_sysctl; @@ -153,8 +145,21 @@ static int __init tipc_init(void) if (err) goto out_bearer; + err = tipc_netlink_start(); + if (err) + goto out_netlink; + + err = tipc_netlink_compat_start(); + if (err) + goto out_netlink_compat; + pr_info("Started in single node mode\n"); return 0; + +out_netlink_compat: + tipc_netlink_stop(); +out_netlink: + tipc_bearer_cleanup(); out_bearer: unregister_pernet_device(&tipc_topsrv_net_ops); out_pernet_topsrv: @@ -164,22 +169,18 @@ out_socket: out_pernet: tipc_unregister_sysctl(); out_sysctl: - tipc_netlink_compat_stop(); -out_netlink_compat: - tipc_netlink_stop(); -out_netlink: pr_err("Unable to start in single node mode\n"); return err; } static void __exit tipc_exit(void) { + tipc_netlink_compat_stop(); + tipc_netlink_stop(); tipc_bearer_cleanup(); unregister_pernet_device(&tipc_topsrv_net_ops); tipc_socket_stop(); unregister_pernet_device(&tipc_net_ops); - tipc_netlink_stop(); - tipc_netlink_compat_stop(); tipc_unregister_sysctl(); pr_info("Deactivated\n"); diff --git a/net/tipc/link.c b/net/tipc/link.c index 999eab592de8..a9d8a81e80cf 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1084,7 +1084,7 @@ static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, return false; if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp + - msecs_to_jiffies(r->tolerance))) + msecs_to_jiffies(r->tolerance * 10))) return false; hdr = buf_msg(skb); diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 6a6eae88442f..58708b4c7719 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -665,6 +665,21 @@ void tipc_mon_delete(struct net *net, int bearer_id) kfree(mon); } +void tipc_mon_reinit_self(struct net *net) +{ + struct tipc_monitor *mon; + int bearer_id; + + for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + mon = tipc_monitor(net, bearer_id); + if (!mon) + continue; + write_lock_bh(&mon->lock); + mon->self->addr = tipc_own_addr(net); + write_unlock_bh(&mon->lock); + } +} + int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size) { struct tipc_net *tn = tipc_net(net); diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h index 2a21b93e0d04..ed63d2e650b0 100644 --- a/net/tipc/monitor.h +++ b/net/tipc/monitor.h @@ -77,6 +77,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg, u32 bearer_id); int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg, u32 bearer_id, u32 *prev_node); +void tipc_mon_reinit_self(struct net *net); extern const int tipc_max_domain_size; #endif diff --git a/net/tipc/net.c b/net/tipc/net.c index 85707c185360..2de3cec9929d 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -42,6 +42,7 @@ #include "node.h" #include "bcast.h" #include "netlink.h" +#include "monitor.h" /* * The TIPC locking policy is designed to ensure a very fine locking @@ -136,6 +137,7 @@ static void tipc_net_finalize(struct net *net, u32 addr) tipc_set_node_addr(net, addr); tipc_named_reinit(net); tipc_sk_reinit(net); + tipc_mon_reinit_self(net); tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, TIPC_CLUSTER_SCOPE, 0, addr); } diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index d6165ad384c0..e9bbf4a00881 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -111,6 +111,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 }, [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 }, [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }, + [TIPC_NLA_PROP_MTU] = { .type = NLA_U32 }, [TIPC_NLA_PROP_BROADCAST] = { .type = NLA_U32 }, [TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 } }; diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index e135d4e11231..d4d2928424e2 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -550,7 +550,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, if (len <= 0) return -EINVAL; - len = min_t(int, len, TIPC_MAX_BEARER_NAME); + len = min_t(int, len, TIPC_MAX_LINK_NAME); if (!string_is_valid(name, len)) return -EINVAL; @@ -822,7 +822,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, if (len <= 0) return -EINVAL; - len = min_t(int, len, TIPC_MAX_BEARER_NAME); + len = min_t(int, len, TIPC_MAX_LINK_NAME); if (!string_is_valid(name, len)) return -EINVAL; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 4b92b196cfa6..aea951a1f805 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -504,7 +504,7 @@ static void __tipc_shutdown(struct socket *sock, int error) struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); struct net *net = sock_net(sk); - long timeout = CONN_TIMEOUT_DEFAULT; + long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT); u32 dnode = tsk_peer_node(tsk); struct sk_buff *skb; @@ -1306,8 +1306,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) struct tipc_msg *hdr = &tsk->phdr; struct tipc_name_seq *seq; struct sk_buff_head pkts; - u32 dport, dnode = 0; - u32 type, inst; + u32 dport = 0, dnode = 0; + u32 type = 0, inst = 0; int mtu, rc; if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) @@ -1360,23 +1360,11 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) type = dest->addr.name.name.type; inst = dest->addr.name.name.instance; dnode = dest->addr.name.domain; - msg_set_type(hdr, TIPC_NAMED_MSG); - msg_set_hdr_sz(hdr, NAMED_H_SIZE); - msg_set_nametype(hdr, type); - msg_set_nameinst(hdr, inst); - msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); dport = tipc_nametbl_translate(net, type, inst, &dnode); - msg_set_destnode(hdr, dnode); - msg_set_destport(hdr, dport); if (unlikely(!dport && !dnode)) return -EHOSTUNREACH; } else if (dest->addrtype == TIPC_ADDR_ID) { dnode = dest->addr.id.node; - msg_set_type(hdr, TIPC_DIRECT_MSG); - msg_set_lookup_scope(hdr, 0); - msg_set_destnode(hdr, dnode); - msg_set_destport(hdr, dest->addr.id.ref); - msg_set_hdr_sz(hdr, BASIC_H_SIZE); } else { return -EINVAL; } @@ -1387,13 +1375,31 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) if (unlikely(rc)) return rc; + if (dest->addrtype == TIPC_ADDR_NAME) { + msg_set_type(hdr, TIPC_NAMED_MSG); + msg_set_hdr_sz(hdr, NAMED_H_SIZE); + msg_set_nametype(hdr, type); + msg_set_nameinst(hdr, inst); + msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); + msg_set_destnode(hdr, dnode); + msg_set_destport(hdr, dport); + } else { /* TIPC_ADDR_ID */ + msg_set_type(hdr, TIPC_DIRECT_MSG); + msg_set_lookup_scope(hdr, 0); + msg_set_destnode(hdr, dnode); + msg_set_destport(hdr, dest->addr.id.ref); + msg_set_hdr_sz(hdr, BASIC_H_SIZE); + } + __skb_queue_head_init(&pkts); mtu = tipc_node_get_mtu(net, dnode, tsk->portid); rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); if (unlikely(rc != dlen)) return rc; - if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) + if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) { + __skb_queue_purge(&pkts); return -ENOMEM; + } trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " "); rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); @@ -2681,6 +2687,7 @@ static void tipc_sk_timeout(struct timer_list *t) if (sock_owned_by_user(sk)) { sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); bh_unlock_sock(sk); + sock_put(sk); return; } diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 287df68721df..186c78431217 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -195,10 +195,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, .saddr = src->ipv6, .flowi6_proto = IPPROTO_UDP }; - err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, - &ndst, &fl6); - if (err) + ndst = ipv6_stub->ipv6_dst_lookup_flow(net, + ub->ubsock->sk, + &fl6, NULL); + if (IS_ERR(ndst)) { + err = PTR_ERR(ndst); goto tx_error; + } dst_cache_set_ip6(cache, ndst, &fl6.saddr); } ttl = ip6_dst_hoplimit(ndst); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 683d00837693..1adeb1c0473b 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -417,7 +417,7 @@ static int tls_push_data(struct sock *sk, if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (sk->sk_err) return -sk->sk_err; @@ -560,7 +560,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, lock_sock(sk); if (flags & MSG_OOB) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out; } @@ -581,7 +581,7 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn) { u64 record_sn = context->hint_record_sn; - struct tls_record_info *info; + struct tls_record_info *info, *last; info = context->retransmit_hint; if (!info || @@ -593,6 +593,24 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, struct tls_record_info, list); if (!info) return NULL; + /* send the start_marker record if seq number is before the + * tls offload start marker sequence number. This record is + * required to handle TCP packets which are before TLS offload + * started. + * And if it's not start marker, look if this seq number + * belongs to the list. + */ + if (likely(!tls_record_is_start_marker(info))) { + /* we have the first record, get the last record to see + * if this seq number belongs to the list. + */ + last = list_last_entry(&context->records_list, + struct tls_record_info, list); + + if (!between(seq, tls_record_start_seq(info), + last->end_seq)) + return NULL; + } record_sn = context->unacked_record_sn; } @@ -999,7 +1017,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) } if (!(netdev->features & NETIF_F_HW_TLS_TX)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto release_netdev; } @@ -1071,7 +1089,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) } if (!(netdev->features & NETIF_F_HW_TLS_RX)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto release_netdev; } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index f874cc0da45d..7aba4ee77aba 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -209,24 +209,15 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, return tls_push_sg(sk, ctx, sg, offset, flags); } -bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx) +void tls_free_partial_record(struct sock *sk, struct tls_context *ctx) { struct scatterlist *sg; - sg = ctx->partially_sent_record; - if (!sg) - return false; - - while (1) { + for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) { put_page(sg_page(sg)); sk_mem_uncharge(sk, sg->length); - - if (sg_is_last(sg)) - break; - sg++; } ctx->partially_sent_record = NULL; - return true; } static void tls_write_space(struct sock *sk) @@ -491,7 +482,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, /* check version */ if (crypto_info->version != TLS_1_2_VERSION && crypto_info->version != TLS_1_3_VERSION) { - rc = -ENOTSUPP; + rc = -EINVAL; goto err_crypto_info; } @@ -787,7 +778,7 @@ static int tls_init(struct sock *sk) * share the ulp context. */ if (sk->sk_state != TCP_ESTABLISHED) - return -ENOTSUPP; + return -ENOTCONN; tls_build_proto(sk); @@ -807,15 +798,19 @@ out: return rc; } -static void tls_update(struct sock *sk, struct proto *p) +static void tls_update(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk)) { struct tls_context *ctx; ctx = tls_get_ctx(sk); - if (likely(ctx)) + if (likely(ctx)) { + ctx->sk_write_space = write_space; ctx->sk_proto = p; - else + } else { sk->sk_prot = p; + sk->sk_write_space = write_space; + } } static int tls_get_info(const struct sock *sk, struct sk_buff *skb) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 319735d5c084..41e9c2932b34 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -677,12 +677,32 @@ static int tls_push_record(struct sock *sk, int flags, split_point = msg_pl->apply_bytes; split = split_point && split_point < msg_pl->sg.size; + if (unlikely((!split && + msg_pl->sg.size + + prot->overhead_size > msg_en->sg.size) || + (split && + split_point + + prot->overhead_size > msg_en->sg.size))) { + split = true; + split_point = msg_en->sg.size; + } if (split) { rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, split_point, prot->overhead_size, &orig_end); if (rc < 0) return rc; + /* This can happen if above tls_split_open_record allocates + * a single large encryption buffer instead of two smaller + * ones. In this case adjust pointers and continue without + * split. + */ + if (!msg_pl->sg.size) { + tls_merge_open_record(sk, rec, tmp, orig_end); + msg_pl = &rec->msg_plaintext; + msg_en = &rec->msg_encrypted; + split = false; + } sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); } @@ -704,9 +724,14 @@ static int tls_push_record(struct sock *sk, int flags, sg_mark_end(sk_msg_elem(msg_pl, i)); } + if (msg_pl->sg.end < msg_pl->sg.start) { + sg_chain(&msg_pl->sg.data[msg_pl->sg.start], + MAX_SKB_FRAGS - msg_pl->sg.start + 1, + msg_pl->sg.data); + } + i = msg_pl->sg.start; - sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ? - &msg_en->sg.data[i] : &msg_pl->sg.data[i]); + sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); i = msg_en->sg.end; sk_msg_iter_var_prev(i); @@ -766,17 +791,20 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, policy = !(flags & MSG_SENDPAGE_NOPOLICY); psock = sk_psock_get(sk); - if (!psock || !policy) - return tls_push_record(sk, flags, record_type); + if (!psock || !policy) { + err = tls_push_record(sk, flags, record_type); + if (err && err != -EINPROGRESS) { + *copied -= sk_msg_free(sk, msg); + tls_free_open_rec(sk); + } + return err; + } more_data: enospc = sk_msg_full(msg); if (psock->eval == __SK_NONE) { delta = msg->sg.size; psock->eval = sk_psock_msg_verdict(sk, psock, msg); - if (delta < msg->sg.size) - delta -= msg->sg.size; - else - delta = 0; + delta -= msg->sg.size; } if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && !enospc && !full_record) { @@ -791,7 +819,7 @@ more_data: switch (psock->eval) { case __SK_PASS: err = tls_push_record(sk, flags, record_type); - if (err < 0) { + if (err && err != -EINPROGRESS) { *copied -= sk_msg_free(sk, msg); tls_free_open_rec(sk); goto out_err; @@ -895,7 +923,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int ret = 0; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) - return -ENOTSUPP; + return -EOPNOTSUPP; mutex_lock(&tls_ctx->tx_lock); lock_sock(sk); @@ -965,8 +993,6 @@ alloc_encrypted: if (ret) goto fallback_to_reg_send; - rec->inplace_crypto = 0; - num_zc++; copied += try_to_copy; @@ -979,7 +1005,7 @@ alloc_encrypted: num_async++; else if (ret == -ENOMEM) goto wait_for_memory; - else if (ret == -ENOSPC) + else if (ctx->open_rec && ret == -ENOSPC) goto rollback_iter; else if (ret != -EAGAIN) goto send_end; @@ -1048,11 +1074,12 @@ wait_for_memory: ret = sk_stream_wait_memory(sk, &timeo); if (ret) { trim_sgl: - tls_trim_both_msgs(sk, orig_size); + if (ctx->open_rec) + tls_trim_both_msgs(sk, orig_size); goto send_end; } - if (msg_en->sg.size < required_size) + if (ctx->open_rec && msg_en->sg.size < required_size) goto alloc_encrypted; } @@ -1164,7 +1191,6 @@ alloc_payload: tls_ctx->pending_open_record_frags = true; if (full_record || eor || sk_msg_full(msg_pl)) { - rec->inplace_crypto = 0; ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, record_type, &copied, flags); if (ret) { @@ -1185,11 +1211,13 @@ wait_for_sndbuf: wait_for_memory: ret = sk_stream_wait_memory(sk, &timeo); if (ret) { - tls_trim_both_msgs(sk, msg_pl->sg.size); + if (ctx->open_rec) + tls_trim_both_msgs(sk, msg_pl->sg.size); goto sendpage_end; } - goto alloc_payload; + if (ctx->open_rec) + goto alloc_payload; } if (num_async) { @@ -1210,7 +1238,7 @@ int tls_sw_sendpage_locked(struct sock *sk, struct page *page, if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | MSG_NO_SHARED_FRAGS)) - return -ENOTSUPP; + return -EOPNOTSUPP; return tls_sw_do_sendpage(sk, page, offset, size, flags); } @@ -1223,7 +1251,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) - return -ENOTSUPP; + return -EOPNOTSUPP; mutex_lock(&tls_ctx->tx_lock); lock_sock(sk); @@ -1922,7 +1950,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, /* splice does not support reading control messages */ if (ctx->control != TLS_RECORD_TYPE_DATA) { - err = -ENOTSUPP; + err = -EINVAL; goto splice_read_end; } @@ -2079,7 +2107,8 @@ void tls_sw_release_resources_tx(struct sock *sk) /* Free up un-sent records in tx_list. First, free * the partially sent record if any at head of tx_list. */ - if (tls_free_partial_record(sk, tls_ctx)) { + if (tls_ctx->partially_sent_record) { + tls_free_partial_record(sk, tls_ctx); rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); list_del(&rec->list); diff --git a/net/toa/Kconfig b/net/toa/Kconfig new file mode 100644 index 000000000000..7bdf874bbe82 --- /dev/null +++ b/net/toa/Kconfig @@ -0,0 +1,9 @@ +config TOA + tristate "The private TCP option for support Taobao LVS full-NAT feature" + default m + ---help--- + This option saves the original IP address and source port of a TCP segment + after LVS performed NAT on it. So far, this module only supports IPv4 and + IPv6 mapped IPv4. + + Say m if unsure. diff --git a/net/toa/Makefile b/net/toa/Makefile new file mode 100644 index 000000000000..a0e3620ded48 --- /dev/null +++ b/net/toa/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for TOA module. +# +obj-$(CONFIG_TOA) += toa.o diff --git a/net/toa/toa.c b/net/toa/toa.c new file mode 100644 index 000000000000..82084e59597a --- /dev/null +++ b/net/toa/toa.c @@ -0,0 +1,426 @@ +#include "toa.h" + +/* + * TOA a new Tcp Option as Address, + * here address including IP and Port. + * the real {IP,Port} can be added into option field of TCP header, + * with LVS FULLNAT model, the realservice are still able to receive real {IP,Port} info. + * So far, this module only supports IPv4 and IPv6 mapped IPv4. + * + * Authors: + * Wen Li + * Yan Tian + * Jiaming Wu + * Jiajun Chen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +unsigned long sk_data_ready_addr = 0; + +/* + * Statistics of toa in proc /proc/net/toa_stats + */ + +struct toa_stats_entry toa_stats[] = { + TOA_STAT_ITEM("syn_recv_sock_toa", SYN_RECV_SOCK_TOA_CNT), + TOA_STAT_ITEM("syn_recv_sock_no_toa", SYN_RECV_SOCK_NO_TOA_CNT), + TOA_STAT_ITEM("getname_toa_ok", GETNAME_TOA_OK_CNT), + TOA_STAT_ITEM("getname_toa_mismatch", GETNAME_TOA_MISMATCH_CNT), + TOA_STAT_ITEM("getname_toa_bypass", GETNAME_TOA_BYPASS_CNT), + TOA_STAT_ITEM("getname_toa_empty", GETNAME_TOA_EMPTY_CNT), + TOA_STAT_END +}; + +DEFINE_TOA_STAT(struct toa_stat_mib, ext_stats); + +/* + * Funcs for toa hooks + */ + +/* Parse TCP options in skb, try to get client ip, port + * @param skb [in] received skb, it should be a ack/get-ack packet. + * @return NULL if we don't get client ip/port; + * value of toa_data in ret_ptr if we get client ip/port. + */ +static void * get_toa_data(struct sk_buff *skb) +{ + struct tcphdr *th; + int length; + unsigned char *ptr; + + struct toa_data tdata; + + void *ret_ptr = NULL; + + //TOA_DBG("get_toa_data called\n"); + + if (NULL != skb) { + th = tcp_hdr(skb); + length = (th->doff * 4) - sizeof (struct tcphdr); + ptr = (unsigned char *) (th + 1); + + while (length > 0) { + int opcode = *ptr++; + int opsize; + switch (opcode) { + case TCPOPT_EOL: + return NULL; + case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ + length--; + continue; + default: + opsize = *ptr++; + if (opsize < 2) /* "silly options" */ + return NULL; + if (opsize > length) + return NULL; /* don't parse partial options */ + if (TCPOPT_TOA == opcode && TCPOLEN_TOA == opsize) { + memcpy(&tdata, ptr - 2, sizeof (tdata)); + //TOA_DBG("find toa data: ip = %u.%u.%u.%u, port = %u\n", NIPQUAD(tdata.ip), + //ntohs(tdata.port)); + memcpy(&ret_ptr, &tdata, sizeof (ret_ptr)); + //TOA_DBG("coded toa data: %p\n", ret_ptr); + return ret_ptr; + } + ptr += opsize - 2; + length -= opsize; + } + } + } + return NULL; +} + +/* get client ip from socket + * @param sock [in] the socket to getpeername() or getsockname() + * @param uaddr [out] the place to put client ip, port + * @param uaddr_len [out] lenth of @uaddr + * @peer [in] if(peer), try to get remote address; if(!peer), try to get local address + * @return return what the original inet_getname() returns. + */ +static int +inet_getname_toa(struct socket *sock, struct sockaddr *uaddr, int peer) +{ + int retval = 0; + struct sock *sk = sock->sk; + struct sockaddr_in *sin = (struct sockaddr_in *) uaddr; + struct toa_data tdata; + + //TOA_DBG("inet_getname_toa called, sk->sk_user_data is %p\n", sk->sk_user_data); + + /* call orginal one */ + retval = inet_getname(sock, uaddr, peer); + + /* set our value if need */ + if (retval == 0 && NULL != sk->sk_user_data && peer) { + if (sk_data_ready_addr == (unsigned long) sk->sk_data_ready) { + memcpy(&tdata, &sk->sk_user_data, sizeof (tdata)); + if (TCPOPT_TOA == tdata.opcode && TCPOLEN_TOA == tdata.opsize) { + TOA_INC_STATS(ext_stats, GETNAME_TOA_OK_CNT); + //TOA_DBG("inet_getname_toa: set new sockaddr, ip %u.%u.%u.%u -> %u.%u.%u.%u, port %u -> %u\n", + // NIPQUAD(sin->sin_addr.s_addr), NIPQUAD(tdata.ip), ntohs(sin->sin_port), + // ntohs(tdata.port)); + sin->sin_port = tdata.port; + sin->sin_addr.s_addr = tdata.ip; + } else { /* sk_user_data doesn't belong to us */ + TOA_INC_STATS(ext_stats, GETNAME_TOA_MISMATCH_CNT); + //TOA_DBG("inet_getname_toa: invalid toa data, ip %u.%u.%u.%u port %u opcode %u opsize %u\n", + // NIPQUAD(tdata.ip), ntohs(tdata.port), tdata.opcode, tdata.opsize); + } + } else { + TOA_INC_STATS(ext_stats, GETNAME_TOA_BYPASS_CNT); + } + } else { /* no need to get client ip */ + TOA_INC_STATS(ext_stats, GETNAME_TOA_EMPTY_CNT); + } + + return retval; +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static int +inet6_getname_toa(struct socket *sock, struct sockaddr *uaddr, int peer) +{ + int retval = 0; + struct sock *sk = sock->sk; + struct sockaddr_in6 *sin = (struct sockaddr_in6 *) uaddr; + struct toa_data tdata; + + //TOA_DBG("inet6_getname_toa called, sk->sk_user_data is %p\n", sk->sk_user_data); + + /* call orginal one */ + retval = inet6_getname(sock, uaddr, peer); + + /* set our value if need */ + if (retval == 0 && NULL != sk->sk_user_data && peer) { + if (sk_data_ready_addr == (unsigned long) sk->sk_data_ready) { + memcpy(&tdata, &sk->sk_user_data, sizeof (tdata)); + if (TCPOPT_TOA == tdata.opcode && TCPOLEN_TOA == tdata.opsize) { + TOA_INC_STATS(ext_stats, GETNAME_TOA_OK_CNT); + sin->sin6_port = tdata.port; + ipv6_addr_set(&sin->sin6_addr, 0, 0, htonl(0x0000FFFF), tdata.ip); + } else { /* sk_user_data doesn't belong to us */ + TOA_INC_STATS(ext_stats, GETNAME_TOA_MISMATCH_CNT); + } + } else { + TOA_INC_STATS(ext_stats, GETNAME_TOA_BYPASS_CNT); + } + } else { /* no need to get client ip */ + TOA_INC_STATS(ext_stats, GETNAME_TOA_EMPTY_CNT); + } + + return retval; +} +#endif + +/* The three way handshake has completed - we got a valid synack - + * now create the new socket. + * We need to save toa data into the new socket. + * @param sk [out] the socket + * @param skb [in] the ack/ack-get packet + * @param req [in] the open request for this connection + * @param dst [out] route cache entry + * @return NULL if fail new socket if succeed. + */ +static struct sock * +tcp_v4_syn_recv_sock_toa(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) +{ + struct sock *newsock = NULL; + + //TOA_DBG("tcp_v4_syn_recv_sock_toa called\n"); + + /* call orginal one */ + newsock = tcp_v4_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); + + /* set our value if need */ + if (NULL != newsock && NULL == newsock->sk_user_data) { + newsock->sk_user_data = get_toa_data(skb); + if(NULL != newsock->sk_user_data){ + TOA_INC_STATS(ext_stats, SYN_RECV_SOCK_TOA_CNT); + } else { + TOA_INC_STATS(ext_stats, SYN_RECV_SOCK_NO_TOA_CNT); + } + //TOA_DBG("tcp_v4_syn_recv_sock_toa: set sk->sk_user_data to %p\n", newsock->sk_user_data); + } + return newsock; +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static struct sock * +tcp_v6_syn_recv_sock_toa(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) +{ + struct sock *newsock = NULL; + + //TOA_DBG("tcp_v4_syn_recv_sock_toa called\n"); + + /* call orginal one */ + newsock = tcp_v6_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); + + /* set our value if need */ + if (NULL != newsock && NULL == newsock->sk_user_data) { + newsock->sk_user_data = get_toa_data(skb); + if(NULL != newsock->sk_user_data){ + TOA_INC_STATS(ext_stats, SYN_RECV_SOCK_TOA_CNT); + } else { + TOA_INC_STATS(ext_stats, SYN_RECV_SOCK_NO_TOA_CNT); + } + } + return newsock; +} +#endif + +/* + * HOOK FUNCS + */ + +/* replace the functions with our functions */ +static inline int +hook_toa_functions(void) +{ + struct proto_ops *inet_stream_ops_p; + struct proto_ops *inet6_stream_ops_p; + struct inet_connection_sock_af_ops *ipv4_specific_p; + struct inet_connection_sock_af_ops *ipv6_specific_p; + + /* hook inet_getname for ipv4 */ + inet_stream_ops_p = (struct proto_ops *)&inet_stream_ops; + inet_stream_ops_p->getname = inet_getname_toa; + TOA_INFO("CPU [%u] hooked inet_getname <%p> --> <%p>\n", smp_processor_id(), inet_getname, + inet_stream_ops_p->getname); + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + /* hook inet6_getname for ipv6 */ + inet6_stream_ops_p = (struct proto_ops *)&inet6_stream_ops; + inet6_stream_ops_p->getname = inet6_getname_toa; + TOA_INFO("CPU [%u] hooked inet6_getname <%p> --> <%p>\n", smp_processor_id(), inet6_getname, + inet6_stream_ops_p->getname); +#endif + + /* hook tcp_v4_syn_recv_sock for ipv4 */ + ipv4_specific_p = (struct inet_connection_sock_af_ops *)&ipv4_specific; + ipv4_specific_p->syn_recv_sock = tcp_v4_syn_recv_sock_toa; + TOA_INFO("CPU [%u] hooked tcp_v4_syn_recv_sock <%p> --> <%p>\n", smp_processor_id(), tcp_v4_syn_recv_sock, + ipv4_specific_p->syn_recv_sock); + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + /* hook tcp_v6_syn_recv_sock for ipv6 */ + ipv6_specific_p = (struct inet_connection_sock_af_ops *)&ipv6_specific; + ipv6_specific_p->syn_recv_sock = tcp_v6_syn_recv_sock_toa; + TOA_INFO("CPU [%u] hooked tcp_v6_syn_recv_sock <%p> --> <%p>\n", smp_processor_id(), tcp_v6_syn_recv_sock, + ipv6_specific_p->syn_recv_sock); +#endif + + return 0; +} + +/* replace the functions to original ones */ +static int +unhook_toa_functions(void) +{ + struct proto_ops *inet_stream_ops_p; + struct proto_ops *inet6_stream_ops_p; + struct inet_connection_sock_af_ops *ipv4_specific_p; + struct inet_connection_sock_af_ops *ipv6_specific_p; + + /* unhook inet_getname for ipv4 */ + inet_stream_ops_p = (struct proto_ops *)&inet_stream_ops; + inet_stream_ops_p->getname = inet_getname; + TOA_INFO("CPU [%u] unhooked inet_getname\n", smp_processor_id()); + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + /* unhook inet6_getname for ipv6 */ + inet6_stream_ops_p = (struct proto_ops *)&inet6_stream_ops; + inet6_stream_ops_p->getname = inet6_getname; + TOA_INFO("CPU [%u] unhooked inet6_getname\n", smp_processor_id()); +#endif + + /* unhook tcp_v4_syn_recv_sock for ipv4 */ + ipv4_specific_p = (struct inet_connection_sock_af_ops *)&ipv4_specific; + ipv4_specific_p->syn_recv_sock = tcp_v4_syn_recv_sock; + TOA_INFO("CPU [%u] unhooked tcp_v4_syn_recv_sock\n", smp_processor_id()); + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + /* unhook tcp_v6_syn_recv_sock for ipv6 */ + ipv6_specific_p = (struct inet_connection_sock_af_ops *)&ipv6_specific; + ipv6_specific_p->syn_recv_sock = tcp_v6_syn_recv_sock; + TOA_INFO("CPU [%u] unhooked tcp_v6_syn_recv_sock\n", smp_processor_id()); +#endif + + return 0; +} + +/* + * Statistics of toa in proc /proc/net/toa_stats + */ +static int toa_stats_show(struct seq_file *seq, void *v){ + int i, j; + + /* print CPU first */ + seq_printf(seq, " "); + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) + seq_printf(seq, "CPU%d ", i); + seq_putc(seq, '\n'); + + i = 0; + while (NULL != toa_stats[i].name) { + seq_printf(seq, "%-25s:", toa_stats[i].name); + for (j = 0; j < NR_CPUS; j++) { + if (cpu_online(j)) { + seq_printf(seq, "%10lu ", + *(((unsigned long *) per_cpu_ptr(ext_stats, j)) + toa_stats[i].entry)); + } + } + seq_putc(seq, '\n'); + i++; + } + return 0; +} + +static int toa_stats_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, toa_stats_show, NULL); +} + +static const struct file_operations toa_stats_fops = { + .owner = THIS_MODULE, + .open = toa_stats_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * TOA module init and destory + */ + +/* module init */ + static int __init +toa_init(void) +{ + + TOA_INFO("TOA " TOA_VERSION " by pukong.wjm\n"); + + /* alloc statistics array for toa */ + if (NULL == (ext_stats = alloc_percpu(struct toa_stat_mib))) + return 1; + proc_create("toa_stats", 0, init_net.proc_net, &toa_stats_fops); + + /* get the address of function sock_def_readable + * so later we can know whether the sock is for rpc, tux or others + */ + sk_data_ready_addr = kallsyms_lookup_name("sock_def_readable"); + TOA_INFO("CPU [%u] sk_data_ready_addr = kallsyms_lookup_name(sock_def_readable) = %lu\n", + smp_processor_id(), sk_data_ready_addr); + if(0 == sk_data_ready_addr) { + TOA_INFO("cannot find sock_def_readable.\n"); + goto err; + } + + /* hook funcs for parse and get toa */ + hook_toa_functions(); + + TOA_INFO("toa loaded\n"); + return 0; + +err: + remove_proc_entry("toa_stats", init_net.proc_net); + if (NULL != ext_stats) { + free_percpu(ext_stats); + ext_stats = NULL; + } + + return 1; +} + +/* module cleanup*/ +static void __exit +toa_exit(void) +{ + unhook_toa_functions(); + synchronize_net(); + + remove_proc_entry("toa_stats", init_net.proc_net); + if (NULL != ext_stats) { + free_percpu(ext_stats); + ext_stats = NULL; + } + TOA_INFO("toa unloaded\n"); +} + +module_init(toa_init); +module_exit(toa_exit); +MODULE_LICENSE("GPL"); + diff --git a/net/toa/toa.h b/net/toa/toa.h new file mode 100644 index 000000000000..615ae8b25bc2 --- /dev/null +++ b/net/toa/toa.h @@ -0,0 +1,87 @@ +#ifndef __NET__TOA_H__ +#define __NET__TOA_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TOA_VERSION "1.0.0.0" + +#define TOA_DBG(msg...) \ + do { \ + printk(KERN_DEBUG "[DEBUG] TOA: " msg); \ + } while (0) + +#define TOA_INFO(msg...) \ + do { \ + if(net_ratelimit()) \ + printk(KERN_INFO "TOA: " msg);\ + } while(0) + +#define TCPOPT_TOA 200 + +/* MUST be 4n !!!! */ +#define TCPOLEN_TOA 8 /* |opcode|size|ip+port| = 1 + 1 + 6 */ + +/* MUST be 4 bytes alignment */ +struct toa_data { + __u8 opcode; + __u8 opsize; + __u16 port; + __u32 ip; +}; + +/* statistics about toa in proc /proc/net/toa_stat */ +enum { + SYN_RECV_SOCK_TOA_CNT = 1, + SYN_RECV_SOCK_NO_TOA_CNT, + GETNAME_TOA_OK_CNT, + GETNAME_TOA_MISMATCH_CNT, + GETNAME_TOA_BYPASS_CNT, + GETNAME_TOA_EMPTY_CNT, + TOA_STAT_LAST +}; + +struct toa_stats_entry { + char *name; + int entry; +}; + +#define TOA_STAT_ITEM(_name, _entry) { \ + .name = _name, \ + .entry = _entry, \ +} + +#define TOA_STAT_END { \ + NULL, \ + 0, \ +} + +struct toa_stat_mib { + unsigned long mibs[TOA_STAT_LAST]; +}; + +#define DEFINE_TOA_STAT(type, name) \ + __typeof__(type) *name +#define TOA_INC_STATS(mib, field) \ + (per_cpu_ptr(mib, smp_processor_id())->mibs[field]++) + + +#endif + + diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0d8da809bea2..b3369d678f1a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -646,6 +646,9 @@ static __poll_t unix_poll(struct file *, struct socket *, poll_table *); static __poll_t unix_dgram_poll(struct file *, struct socket *, poll_table *); static int unix_ioctl(struct socket *, unsigned int, unsigned long); +#ifdef CONFIG_COMPAT +static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +#endif static int unix_shutdown(struct socket *, int); static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); @@ -687,6 +690,9 @@ static const struct proto_ops unix_stream_ops = { .getname = unix_getname, .poll = unix_poll, .ioctl = unix_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = unix_compat_ioctl, +#endif .listen = unix_listen, .shutdown = unix_shutdown, .setsockopt = sock_no_setsockopt, @@ -710,6 +716,9 @@ static const struct proto_ops unix_dgram_ops = { .getname = unix_getname, .poll = unix_dgram_poll, .ioctl = unix_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = unix_compat_ioctl, +#endif .listen = sock_no_listen, .shutdown = unix_shutdown, .setsockopt = sock_no_setsockopt, @@ -732,6 +741,9 @@ static const struct proto_ops unix_seqpacket_ops = { .getname = unix_getname, .poll = unix_dgram_poll, .ioctl = unix_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = unix_compat_ioctl, +#endif .listen = unix_listen, .shutdown = unix_shutdown, .setsockopt = sock_no_setsockopt, @@ -2582,6 +2594,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return err; } +#ifdef CONFIG_COMPAT +static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index c443db7af8d4..463cefc1e5ae 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -136,28 +136,15 @@ struct hvsock { **************************************************************************** * The only valid Service GUIDs, from the perspectives of both the host and * * Linux VM, that can be connected by the other end, must conform to this * - * format: -facb-11e6-bd58-64006a7986d3, and the "port" must be in * - * this range [0, 0x7FFFFFFF]. * + * format: -facb-11e6-bd58-64006a7986d3. * **************************************************************************** * * When we write apps on the host to connect(), the GUID ServiceID is used. * When we write apps in Linux VM to connect(), we only need to specify the * port and the driver will form the GUID and use that to request the host. * - * From the perspective of Linux VM: - * 1. the local ephemeral port (i.e. the local auto-bound port when we call - * connect() without explicit bind()) is generated by __vsock_bind_stream(), - * and the range is [1024, 0xFFFFFFFF). - * 2. the remote ephemeral port (i.e. the auto-generated remote port for - * a connect request initiated by the host's connect()) is generated by - * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF). */ -#define MAX_LISTEN_PORT ((u32)0x7FFFFFFF) -#define MAX_VM_LISTEN_PORT MAX_LISTEN_PORT -#define MAX_HOST_LISTEN_PORT MAX_LISTEN_PORT -#define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1) - /* 00000000-facb-11e6-bd58-64006a7986d3 */ static const guid_t srv_id_template = GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58, @@ -180,33 +167,6 @@ static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id) vsock_addr_init(addr, VMADDR_CID_ANY, port); } -static void hvs_remote_addr_init(struct sockaddr_vm *remote, - struct sockaddr_vm *local) -{ - static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT; - struct sock *sk; - - vsock_addr_init(remote, VMADDR_CID_ANY, VMADDR_PORT_ANY); - - while (1) { - /* Wrap around ? */ - if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT || - host_ephemeral_port == VMADDR_PORT_ANY) - host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT; - - remote->svm_port = host_ephemeral_port++; - - sk = vsock_find_connected_socket(remote, local); - if (!sk) { - /* Found an available ephemeral port */ - return; - } - - /* Release refcnt got in vsock_find_connected_socket */ - sock_put(sk); - } -} - static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan) { set_channel_pending_send_size(chan, @@ -336,12 +296,7 @@ static void hvs_open_connection(struct vmbus_channel *chan) if_type = &chan->offermsg.offer.if_type; if_instance = &chan->offermsg.offer.if_instance; conn_from_host = chan->offermsg.offer.u.pipe.user_def[0]; - - /* The host or the VM should only listen on a port in - * [0, MAX_LISTEN_PORT] - */ - if (!is_valid_srv_id(if_type) || - get_port_by_srv_id(if_type) > MAX_LISTEN_PORT) + if (!is_valid_srv_id(if_type)) return; hvs_addr_init(&addr, conn_from_host ? if_type : if_instance); @@ -365,6 +320,13 @@ static void hvs_open_connection(struct vmbus_channel *chan) new->sk_state = TCP_SYN_SENT; vnew = vsock_sk(new); + + hvs_addr_init(&vnew->local_addr, if_type); + + /* Remote peer is always the host */ + vsock_addr_init(&vnew->remote_addr, + VMADDR_CID_HOST, VMADDR_PORT_ANY); + vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance); hvs_new = vnew->trans; hvs_new->chan = chan; } else { @@ -429,8 +391,6 @@ static void hvs_open_connection(struct vmbus_channel *chan) sk->sk_ack_backlog++; hvs_addr_init(&vnew->local_addr, if_type); - hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr); - hvs_new->vm_srv_id = *if_type; hvs_new->host_srv_id = *if_instance; @@ -753,16 +713,6 @@ static bool hvs_stream_is_active(struct vsock_sock *vsk) static bool hvs_stream_allow(u32 cid, u32 port) { - /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is - * reserved as ephemeral ports, which are used as the host's ports - * when the host initiates connections. - * - * Perform this check in the guest so an immediate error is produced - * instead of a timeout. - */ - if (port > MAX_HOST_LISTEN_PORT) - return false; - if (cid == VMADDR_CID_HOST) return true; diff --git a/net/wireless/core.c b/net/wireless/core.c index 350513744575..3e25229a059d 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1102,6 +1102,7 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) #ifdef CONFIG_CFG80211_WEXT kzfree(wdev->wext.keys); + wdev->wext.keys = NULL; #endif /* only initialized if we have a netdev */ if (wdev->netdev) diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c index a9c0f368db5d..24e18405cdb4 100644 --- a/net/wireless/ethtool.c +++ b/net/wireless/ethtool.c @@ -7,9 +7,13 @@ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct wireless_dev *wdev = dev->ieee80211_ptr; + struct device *pdev = wiphy_dev(wdev->wiphy); - strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, - sizeof(info->driver)); + if (pdev->driver) + strlcpy(info->driver, pdev->driver->name, + sizeof(info->driver)); + else + strlcpy(info->driver, "N/A", sizeof(info->driver)); strlcpy(info->version, init_utsname()->release, sizeof(info->version)); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7b72286922f7..dbb6a14968ef 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -437,6 +437,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG }, [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG }, [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, + [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, @@ -468,6 +469,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_PLINK_STATE] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1), + [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 }, + [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG }, [NL80211_ATTR_MESH_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, @@ -529,6 +532,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 }, + [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 }, [NL80211_ATTR_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, @@ -559,6 +564,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1), [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, + [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_MAC_MASK] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN @@ -4794,8 +4800,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) err = nl80211_parse_he_obss_pd( info->attrs[NL80211_ATTR_HE_OBSS_PD], ¶ms.he_obss_pd); - if (err) - return err; + goto out; } nl80211_calculate_ap_params(¶ms); @@ -4817,6 +4822,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) } wdev_unlock(wdev); +out: kfree(params.acl); return err; @@ -10834,6 +10840,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, if (err) return err; + cfg80211_sinfo_release_content(&sinfo); if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) wdev->cqm_config->last_rssi_event_value = (s8) sinfo.rx_beacon_signal_avg; @@ -13787,6 +13794,8 @@ static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info) if (err) return err; + cfg80211_sinfo_release_content(&sinfo); + return rdev_probe_mesh_link(rdev, dev, dest, buf, len); } @@ -16398,7 +16407,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac, goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) && - nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw)) + nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw)) goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) && diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index e853a4fe6f97..e0d34f796d0b 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -538,6 +538,10 @@ static inline int rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) { int ret; + + if (!rdev->ops->set_wiphy_params) + return -EOPNOTSUPP; + trace_rdev_set_wiphy_params(&rdev->wiphy, changed); ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); trace_rdev_return_int(&rdev->wiphy, ret); @@ -1167,6 +1171,16 @@ rdev_start_radar_detection(struct cfg80211_registered_device *rdev, return ret; } +static inline void +rdev_end_cac(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + trace_rdev_end_cac(&rdev->wiphy, dev); + if (rdev->ops->end_cac) + rdev->ops->end_cac(&rdev->wiphy, dev); + trace_rdev_return_void(&rdev->wiphy); +} + static inline int rdev_set_mcast_rate(struct cfg80211_registered_device *rdev, struct net_device *dev, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 446c76d44e65..1a8218f1bbe0 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2261,21 +2261,22 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) static void handle_channel_custom(struct wiphy *wiphy, struct ieee80211_channel *chan, - const struct ieee80211_regdomain *regd) + const struct ieee80211_regdomain *regd, + u32 min_bw) { u32 bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; u32 bw; - for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) { + for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) { reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq), regd, bw); if (!IS_ERR(reg_rule)) break; } - if (IS_ERR(reg_rule)) { + if (IS_ERR_OR_NULL(reg_rule)) { pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n", chan->center_freq); if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { @@ -2324,8 +2325,14 @@ static void handle_band_custom(struct wiphy *wiphy, if (!sband) return; + /* + * We currently assume that you always want at least 20 MHz, + * otherwise channel 12 might get enabled if this rule is + * compatible to US, which permits 2402 - 2472 MHz. + */ for (i = 0; i < sband->n_channels; i++) - handle_channel_custom(wiphy, &sband->channels[i], regd); + handle_channel_custom(wiphy, &sband->channels[i], regd, + MHZ_TO_KHZ(20)); } /* Used by drivers prior to wiphy registration */ @@ -3885,6 +3892,25 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy) } EXPORT_SYMBOL(regulatory_pre_cac_allowed); +static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) +{ + struct wireless_dev *wdev; + /* If we finished CAC or received radar, we should end any + * CAC running on the same channels. + * the check !cfg80211_chandef_dfs_usable contain 2 options: + * either all channels are available - those the CAC_FINISHED + * event has effected another wdev state, or there is a channel + * in unavailable state in wdev chandef - those the RADAR_DETECTED + * event has effected another wdev state. + * In both cases we should end the CAC on the wdev. + */ + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + if (wdev->cac_started && + !cfg80211_chandef_dfs_usable(&rdev->wiphy, &wdev->chandef)) + rdev_end_cac(rdev, wdev->netdev); + } +} + void regulatory_propagate_dfs_state(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state, @@ -3911,8 +3937,10 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy, cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state); if (event == NL80211_RADAR_DETECTED || - event == NL80211_RADAR_CAC_FINISHED) + event == NL80211_RADAR_CAC_FINISHED) { cfg80211_sched_dfs_chan_update(rdev); + cfg80211_check_and_end_cac(rdev); + } nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL); } diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 7a6c38ddc65a..d32a2ec4d96a 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -1307,14 +1307,14 @@ void cfg80211_autodisconnect_wk(struct work_struct *work) if (wdev->conn_owner_nlportid) { switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: - cfg80211_leave_ibss(rdev, wdev->netdev, false); + __cfg80211_leave_ibss(rdev, wdev->netdev, false); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: - cfg80211_stop_ap(rdev, wdev->netdev, false); + __cfg80211_stop_ap(rdev, wdev->netdev, false); break; case NL80211_IFTYPE_MESH_POINT: - cfg80211_leave_mesh(rdev, wdev->netdev); + __cfg80211_leave_mesh(rdev, wdev->netdev); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: diff --git a/net/wireless/trace.h b/net/wireless/trace.h index d98ad2b3143b..8677d7ab7d69 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -646,6 +646,11 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa, TP_ARGS(wiphy, netdev) ); +DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), + TP_ARGS(wiphy, netdev) +); + DECLARE_EVENT_CLASS(station_add_change, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), diff --git a/net/wireless/util.c b/net/wireless/util.c index 5b4ed5bbc542..8481e9ac33da 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -564,7 +564,7 @@ __frame_add_frag(struct sk_buff *skb, struct page *page, struct skb_shared_info *sh = skb_shinfo(skb); int page_offset; - page_ref_inc(page); + get_page(page); page_offset = ptr - page_address(page); skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size); } diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 5e677dac2a0c..69102fda9ebd 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -657,7 +657,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev) return NULL; } -static int iw_handler_get_iwstats(struct net_device * dev, +/* noinline to avoid a bogus warning with -O3 */ +static noinline int iw_handler_get_iwstats(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 6aee9f5e8e71..256f3e97d1f3 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -760,6 +760,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, if (sk->sk_state == TCP_ESTABLISHED) goto out; + rc = -EALREADY; /* Do nothing if call is already in progress */ + if (sk->sk_state == TCP_SYN_SENT) + goto out; + sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; @@ -806,7 +810,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, /* Now the loop */ rc = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) - goto out_put_neigh; + goto out; rc = x25_wait_for_connection_establishment(sk); if (rc) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 9044073fbf22..d426fc01c529 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -305,12 +305,21 @@ out: } EXPORT_SYMBOL(xsk_umem_consume_tx); -static int xsk_zc_xmit(struct xdp_sock *xs) +static int xsk_wakeup(struct xdp_sock *xs, u8 flags) { struct net_device *dev = xs->dev; + int err; - return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, - XDP_WAKEUP_TX); + rcu_read_lock(); + err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); + rcu_read_unlock(); + + return err; +} + +static int xsk_zc_xmit(struct xdp_sock *xs) +{ + return xsk_wakeup(xs, XDP_WAKEUP_TX); } static void xsk_destruct_skb(struct sk_buff *skb) @@ -424,19 +433,16 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock, unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); - struct net_device *dev; struct xdp_umem *umem; if (unlikely(!xsk_is_bound(xs))) return mask; - dev = xs->dev; umem = xs->umem; if (umem->need_wakeup) { - if (dev->netdev_ops->ndo_xsk_wakeup) - dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, - umem->need_wakeup); + if (xs->zc) + xsk_wakeup(xs, umem->need_wakeup); else /* Poll needs to drive Tx also in copy mode */ __xsk_sendmsg(sk); diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 189ef15acbbc..64486ad81341 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -390,6 +390,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void return xfrm_dev_feat_change(dev); case NETDEV_DOWN: + case NETDEV_UNREGISTER: return xfrm_dev_down(dev); } return NOTIFY_DONE; diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 0f5131bc3342..4d5627e274fe 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -268,9 +268,6 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) int err = -1; int mtu; - if (!dst) - goto tx_err_link_failure; - dst_hold(dst); dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id); if (IS_ERR(dst)) { @@ -297,7 +294,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) mtu = dst_mtu(dst); if (!skb->ignore_df && skb->len > mtu) { - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { if (mtu < IPV6_MIN_MTU) @@ -343,6 +340,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) { struct xfrm_if *xi = netdev_priv(dev); struct net_device_stats *stats = &xi->dev->stats; + struct dst_entry *dst = skb_dst(skb); struct flowi fl; int ret; @@ -352,10 +350,33 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) case htons(ETH_P_IPV6): xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + if (!dst) { + fl.u.ip6.flowi6_oif = dev->ifindex; + fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; + dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6); + if (dst->error) { + dst_release(dst); + stats->tx_carrier_errors++; + goto tx_err; + } + skb_dst_set(skb, dst); + } break; case htons(ETH_P_IP): xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + if (!dst) { + struct rtable *rt; + + fl.u.ip4.flowi4_oif = dev->ifindex; + fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; + rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4); + if (IS_ERR(rt)) { + stats->tx_carrier_errors++; + goto tx_err; + } + skb_dst_set(skb, &rt->dst); + } break; default: goto tx_err; @@ -563,12 +584,9 @@ static void xfrmi_dev_setup(struct net_device *dev) { dev->netdev_ops = &xfrmi_netdev_ops; dev->type = ARPHRD_NONE; - dev->hard_header_len = ETH_HLEN; - dev->min_header_len = ETH_HLEN; dev->mtu = ETH_DATA_LEN; dev->min_mtu = ETH_MIN_MTU; - dev->max_mtu = ETH_DATA_LEN; - dev->addr_len = ETH_ALEN; + dev->max_mtu = IP_MAX_MTU; dev->flags = IFF_NOARP; dev->needs_free_netdev = true; dev->priv_destructor = xfrmi_dev_free; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f2d1e573ea55..264cf05a4eaa 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -431,7 +431,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy); static void xfrm_policy_kill(struct xfrm_policy *policy) { + write_lock_bh(&policy->lock); policy->walk.dead = 1; + write_unlock_bh(&policy->lock); atomic_inc(&policy->genid); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index b88ba45ff1ac..e6cfaa680ef3 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -110,7 +110,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs) return 0; uctx = nla_data(rt); - if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) + if (uctx->len > nla_len(rt) || + uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) return -EINVAL; return 0; @@ -2273,6 +2274,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, xfrm_mark_get(attrs, &mark); err = verify_newpolicy_info(&ua->policy); + if (err) + goto free_state; + err = verify_sec_ctx_len(attrs); if (err) goto free_state; diff --git a/package/arm/config.default b/package/arm/config.default new file mode 100644 index 000000000000..b8107d3471eb --- /dev/null +++ b/package/arm/config.default @@ -0,0 +1,6068 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 5.4.32-1 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80301 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-tlinux4" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=16 +CONFIG_ARM64_CONT_SHIFT=5 +CONFIG_ARCH_MMAP_RND_BITS_MIN=14 +CONFIG_ARCH_MMAP_RND_BITS_MAX=29 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=7 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_AGILEX is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_QCOM is not set +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +# CONFIG_ARCH_SEATTLE is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +# CONFIG_ARCH_THUNDER is not set +# CONFIG_ARCH_THUNDER2 is not set +# CONFIG_ARCH_UNIPHIER is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_XGENE is not set +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +CONFIG_KPATCH=m + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_845719=y +# CONFIG_ARM64_ERRATUM_843419 is not set +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_FUJITSU_ERRATUM_010001=y +# end of ARM errata workarounds via the alternatives framework + +# CONFIG_ARM64_4K_PAGES is not set +# CONFIG_ARM64_16K_PAGES is not set +CONFIG_ARM64_64K_PAGES=y +# CONFIG_ARM64_VA_BITS_42 is not set +CONFIG_ARM64_VA_BITS_48=y +# CONFIG_ARM64_VA_BITS_52 is not set +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +# CONFIG_ARM64_PA_BITS_52 is not set +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=256 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=4 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_SECCOMP=y +# CONFIG_PARAVIRT is not set +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +CONFIG_KEXEC=y +# CONFIG_KEXEC_FILE is not set +CONFIG_CRASH_DUMP=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=14 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +CONFIG_RODATA_FULL_DEFAULT_ENABLED=y +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +# CONFIG_ARM64_HW_AFDBM is not set +# CONFIG_ARM64_PAN is not set +# CONFIG_ARM64_VHE is not set +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +# CONFIG_ARM64_PMEM is not set +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# end of ARMv8.3 architectural features + +CONFIG_ARM64_MODULE_PLTS=y +# CONFIG_ARM64_PSEUDO_NMI is not set +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +# end of Kernel Features + +# +# Boot options +# +# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set +CONFIG_CMDLINE="" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +CONFIG_SYSVIPC_COMPAT=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=100 +CONFIG_PM_WAKELOCKS_GC=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_DPM_WATCHDOG is not set +CONFIG_PM_CLK=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_CPUIDLE is not set +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_QORIQ_CPUFREQ is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +# +# Firmware Drivers +# +# CONFIG_ARM_SDE_INTERFACE is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +# CONFIG_ISCSI_IBFT is not set +# CONFIG_FW_CFG_SYSFS is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_EFI_EARLYCON=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_MCFG=y +# CONFIG_ACPI_PROCESSOR is not set +# CONFIG_ACPI_IPMI is not set +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +# CONFIG_ACPI_CONTAINER is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_ACPI_HMAT is not set +CONFIG_HAVE_ACPI_APEI=y +# CONFIG_ACPI_APEI is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +# CONFIG_VHOST_VSOCK is not set +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +# CONFIG_CRYPTO_SM3_ARM64_CE is not set +# CONFIG_CRYPTO_SM4_ARM64_CE is not set +CONFIG_CRYPTO_GHASH_ARM64_CE=m +# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=m +CONFIG_CRYPTO_AES_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KPROBES=y +# CONFIG_JUMP_LABEL is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_RELR=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_CMDLINE_PARSER=y +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +CONFIG_AIX_PARTITION=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_SYSV68_PARTITION=y +CONFIG_CMDLINE_PARTITION=y +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +CONFIG_ZPOOL=m +CONFIG_ZBUD=m +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=m +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +# CONFIG_XFRM_INTERFACE is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +# CONFIG_NET_KEY_MIGRATE is not set +# CONFIG_SMC is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_TOA=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +# CONFIG_NF_CT_PROTO_DCCP is not set +CONFIG_NF_CT_PROTO_GRE=y +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +# CONFIG_IP_SET_HASH_IPMARK is not set +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=m +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +# CONFIG_IP_VS_PROTO_SCTP is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +# CONFIG_IP6_NF_NAT is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m + +# +# DECnet: Netfilter Configuration +# +CONFIG_DECNET_NF_GRABULATOR=m +# end of DECnet: Netfilter Configuration + +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +# CONFIG_IP_DCCP_CCID3 is not set +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +# CONFIG_DECNET_ROUTER is not set +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +# CONFIG_IPDDP_ENCAP is not set +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_DEBUGFS=y +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +# CONFIG_CLS_U32_PERF is not set +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +# CONFIG_BATMAN_ADV_BATMAN_V is not set +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +CONFIG_BATMAN_ADV_DEBUGFS=y +# CONFIG_BATMAN_ADV_DEBUG is not set +CONFIG_BATMAN_ADV_SYSFS=y +# CONFIG_BATMAN_ADV_TRACING is not set +# CONFIG_OPENVSWITCH is not set +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +# CONFIG_VIRTIO_VSOCKETS is not set +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +# CONFIG_NET_NSH is not set +CONFIG_HSR=m +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +CONFIG_HAMRADIO=y + +# +# Packet Radio protocols +# +CONFIG_AX25=m +CONFIG_AX25_DAMA_SLAVE=y +CONFIG_NETROM=m +CONFIG_ROSE=m + +# +# AX.25 network device drivers +# +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +# CONFIG_BAYCOM_PAR is not set +CONFIG_YAM=m +# end of AX.25 network device drivers + +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_FLEXCAN is not set +CONFIG_CAN_GRCAN=m +# CONFIG_CAN_KVASER_PCIEFD is not set +CONFIG_CAN_XILINXCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_ISA=m +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +CONFIG_CAN_M_CAN=m +# CONFIG_CAN_M_CAN_PLATFORM is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_ISA=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +CONFIG_CAN_MCP251X=m +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +CONFIG_CAN_GS_USB=m +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +# end of CAN Device Drivers + +# CONFIG_BT is not set +CONFIG_AF_RXRPC=m +CONFIG_AF_RXRPC_IPV6=y +# CONFIG_AF_RXRPC_INJECT_LOSS is not set +# CONFIG_AF_RXRPC_DEBUG is not set +# CONFIG_RXKAD is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +CONFIG_WIMAX=y +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_RFKILL=y +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_NET_9P_RDMA=m +# CONFIG_NET_9P_DEBUG is not set +CONFIG_CAIF=m +# CONFIG_CAIF_DEBUG is not set +CONFIG_CAIF_NETDEV=m +CONFIG_CAIF_USB=m +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_NCI=m +CONFIG_NFC_NCI_SPI=m +CONFIG_NFC_NCI_UART=m +CONFIG_NFC_HCI=m +CONFIG_NFC_SHDLC=y + +# +# Near Field Communication (NFC) devices +# +CONFIG_NFC_TRF7970A=m +CONFIG_NFC_SIM=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_FDP=m +CONFIG_NFC_FDP_I2C=m +CONFIG_NFC_PN544=m +CONFIG_NFC_PN544_I2C=m +# CONFIG_NFC_PN533_USB is not set +# CONFIG_NFC_PN533_I2C is not set +CONFIG_NFC_MICROREAD=m +CONFIG_NFC_MICROREAD_I2C=m +CONFIG_NFC_MRVL=m +CONFIG_NFC_MRVL_USB=m +CONFIG_NFC_MRVL_UART=m +CONFIG_NFC_MRVL_I2C=m +CONFIG_NFC_MRVL_SPI=m +CONFIG_NFC_ST21NFCA=m +CONFIG_NFC_ST21NFCA_I2C=m +CONFIG_NFC_ST_NCI=m +CONFIG_NFC_ST_NCI_I2C=m +CONFIG_NFC_ST_NCI_SPI=m +CONFIG_NFC_NXP_NCI=m +CONFIG_NFC_NXP_NCI_I2C=m +CONFIG_NFC_S3FWRN5=m +CONFIG_NFC_S3FWRN5_I2C=m +# CONFIG_NFC_ST95HF is not set +# end of Near Field Communication (NFC) devices + +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +# CONFIG_PCIE_ECRC is not set +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=m +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_ACPI is not set +CONFIG_HOTPLUG_PCI_CPCI=y +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# end of Cadence PCIe controllers support + +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +# CONFIG_PCI_XGENE is not set +# CONFIG_PCIE_ALTERA is not set +# CONFIG_PCI_HOST_THUNDER_PEM is not set +# CONFIG_PCI_HOST_THUNDER_ECAM is not set + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCI_HISI=y +CONFIG_PCIE_KIRIN=y +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_AL is not set +# end of DesignWare PCI Core Support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +# CONFIG_HISILICON_LPC is not set +# CONFIG_SIMPLE_PM_BUS is not set +# CONFIG_VEXPRESS_CONFIG is not set +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +CONFIG_PARPORT=m +# CONFIG_PARPORT_AX88796 is not set +# CONFIG_PARPORT_1284 is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_ZRAM is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_DRBD=m +CONFIG_DRBD_FAULT_INJECTION=y +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_VIRTIO_BLK_SCSI is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +# CONFIG_NVME_RDMA is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# CONFIG_NVME_TARGET is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +CONFIG_EEPROM_93CX6=y +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +CONFIG_TI_ST=m +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +CONFIG_VHOST_RING=m +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=y +CONFIG_SCSI_HISI_SAS_PCI=y +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVSAS_DEBUG=y +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=m +# CONFIG_SCSI_ADVANSYS is not set +CONFIG_SCSI_ARCMSR=m +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=y +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +# CONFIG_SCSI_QLA_ISCSI is not set +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +# CONFIG_SCSI_CHELSIO_FCOE is not set +# CONFIG_SCSI_DH is not set +# end of SCSI device support + +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_CEVA is not set +# CONFIG_AHCI_QORIQ is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +# CONFIG_DM_INTEGRITY is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_FUSION=y +# CONFIG_FUSION_SPI is not set +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=m +# CONFIG_MACVTAP is not set +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NLMON is not set +# CONFIG_NET_VRF is not set +# CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +# CONFIG_ATM_DUMMY is not set +# CONFIG_ATM_TCP is not set +# CONFIG_ATM_LANAI is not set +# CONFIG_ATM_ENI is not set +# CONFIG_ATM_NICSTAR is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E is not set +# CONFIG_ATM_HE is not set +# CONFIG_ATM_SOLOS is not set + +# +# CAIF transport drivers +# +# CONFIG_CAIF_TTY is not set +# CONFIG_CAIF_SPI_SLAVE is not set +# CONFIG_CAIF_HSI is not set +CONFIG_CAIF_VIRTIO=m + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +# CONFIG_BNX2 is not set +# CONFIG_CNIC is not set +# CONFIG_TIGON3 is not set +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_GEMINI_ETHERNET is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +CONFIG_NET_VENDOR_HISILICON=y +CONFIG_HIX5HD2_GMAC=m +CONFIG_HISI_FEMAC=m +CONFIG_HIP04_ETH=m +# CONFIG_HI13X1_GMAC is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=y +CONFIG_IGB_HWMON=y +# CONFIG_IGBVF is not set +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +# CONFIG_IXGBE_DCB is not set +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +# CONFIG_ICE is not set +CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_ENC28J60 is not set +# CONFIG_ENCX24J600 is not set +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +CONFIG_STMMAC_PCI=m +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +CONFIG_MDIO_HISI_FEMAC=y +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_SFP is not set +# CONFIG_ADIN_PHY is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PLIP=m +CONFIG_PPP=m +# CONFIG_PPP_BSDCOMP is not set +# CONFIG_PPP_DEFLATE is not set +# CONFIG_PPP_FILTER is not set +# CONFIG_PPP_MPPE is not set +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPPOATM is not set +CONFIG_PPPOE=m +# CONFIG_PPTP is not set +# CONFIG_PPPOL2TP is not set +# CONFIG_PPP_ASYNC is not set +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_SLIP=m +CONFIG_SLHC=m +# CONFIG_SLIP_COMPRESSED is not set +# CONFIG_SLIP_SMART is not set +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=m +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +CONFIG_USB_RTL8152=m +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# WiMAX Wireless Broadband devices +# +# CONFIG_WIMAX_I2400M_USB is not set +# end of WiMAX Wireless Broadband devices + +# CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=m +CONFIG_CAPI_TRACE=y +# CONFIG_ISDN_CAPI_CAPI20 is not set +# CONFIG_MISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_LEDS is not set +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set +CONFIG_MOUSE_PS2_SMBUS=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +CONFIG_TOUCHSCREEN_USB_COMPOSITE=y +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +# CONFIG_INPUT_REGULATOR_HAPTIC is not set +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_INPUT_HISI_POWERKEY=y +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_PARKBD is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=0 +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_DMA is not set +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_PRINTER is not set +# CONFIG_PPDEV is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=m +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_APPLICOM is not set +CONFIG_RAW_DRIVER=m +CONFIG_MAX_RAW_DEVS=256 +CONFIG_TCG_TPM=y +# CONFIG_TCG_TIS is not set +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +# CONFIG_TCG_ATMEL is not set +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_PINCTRL is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PCI=y +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_THUNDERX is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +CONFIG_I2C_SLAVE=y +# CONFIG_I2C_SLAVE_EEPROM is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_NXP_FLEXSPI is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PL022 is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +# CONFIG_DP83640_PHY is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_OCELOT is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +# CONFIG_GPIO_AMDPT is not set +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=y +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SAMA5D2_PIOBU is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XGENE is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_XGENE is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_UCS1002 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +CONFIG_SENSORS_W83795=y +# CONFIG_SENSORS_W83795_FANCTRL is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=y +# CONFIG_QORIQ_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD70528 is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=m +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m +CONFIG_REGULATOR_USERSPACE_CONSUMER=m +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ANATOP is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +CONFIG_REGULATOR_GPIO=m +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_IR_IMON_DECODER is not set +# CONFIG_IR_RCMM_DECODER is not set +# CONFIG_RC_DEVICES is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set + +# +# Media drivers +# +# CONFIG_MEDIA_USB_SUPPORT is not set +# CONFIG_MEDIA_PCI_SUPPORT is not set + +# +# Supported MMC/SDIO adapters +# +CONFIG_CYPRESS_FIRMWARE=m + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# + +# +# Media SPI Adapters +# +# end of Media SPI Adapters + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# +# end of Customise DVB Frontends + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=y +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_VRAM_HELPER=y +CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_KMS_CMA_HELPER=y +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SCHED=y + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=y +# CONFIG_DRM_RADEON_USERPTR is not set +CONFIG_DRM_AMDGPU=y +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set +CONFIG_DRM_AMDGPU_GART_DEBUGFS=y + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +# CONFIG_DEBUG_KERNEL_DC is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +# CONFIG_DRM_NOUVEAU is not set +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_UDL=y +CONFIG_DRM_AST=y +# CONFIG_DRM_MGAG200 is not set +CONFIG_DRM_CIRRUS_QEMU=y +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +CONFIG_DRM_RCAR_WRITEBACK=y +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=y +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set +# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set +# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set +# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set +# CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS is not set +# CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_RAYDIUM_RM67191 is not set +# CONFIG_DRM_PANEL_RAYDIUM_RM68200 is not set +# CONFIG_DRM_PANEL_ROCKTECH_JH057N00900 is not set +# CONFIG_DRM_PANEL_RONBO_RB070D30 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7701 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=y +CONFIG_DRM_HISI_KIRIN=y +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_GM12U320 is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +CONFIG_FB_CIRRUS=y +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +CONFIG_FB_UVESA=m +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +CONFIG_FB_RADEON_DEBUG=y +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +CONFIG_FB_VIRTUAL=y +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y +# CONFIG_SND_HRTIMER is not set +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +# CONFIG_SND_SEQUENCER is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +# CONFIG_SND_PORTMAN2X4 is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +# CONFIG_SND_HDA_INTEL is not set +# end of HD-Audio + +CONFIG_SND_HDA_PREALLOC_SIZE=64 +CONFIG_SND_SPI=y +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +# CONFIG_SND_SOC is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_ASUS is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_BIGBEN_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CORSAIR is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GT683R is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LED is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_LOGITECH is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_LED_TRIG is not set +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +CONFIG_USB_DYNAMIC_MINORS=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=y +CONFIG_USB_PRINTER=m +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_USS720 is not set +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=y +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_CH341 is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +# CONFIG_USB_SERIAL_FTDI_SIO is not set +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_METRO is not set +# CONFIG_USB_SERIAL_MOS7720 is not set +# CONFIG_USB_SERIAL_MOS7840 is not set +# CONFIG_USB_SERIAL_MXUPORT is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +CONFIG_USB_SERIAL_PL2303=m +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_XSENS_MT is not set +# CONFIG_USB_SERIAL_WISHBONE is not set +# CONFIG_USB_SERIAL_SSU100 is not set +# CONFIG_USB_SERIAL_QT2 is not set +# CONFIG_USB_SERIAL_UPD78F0730 is not set +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ATM is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_U_ETHER=m +CONFIG_USB_F_ECM=m +CONFIG_USB_F_SUBSET=m +CONFIG_USB_F_RNDIS=m +# CONFIG_USB_CONFIGFS is not set +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_TI_LMU_COMMON is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +# CONFIG_INFINIBAND_USER_MAD is not set +# CONFIG_INFINIBAND_USER_ACCESS is not set +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_I40IW is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_HNS=m +# CONFIG_INFINIBAND_HNS_HIP06 is not set +# CONFIG_INFINIBAND_HNS_HIP08 is not set +# CONFIG_INFINIBAND_BNXT_RE is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +# CONFIG_INFINIBAND_IPOIB_CM is not set +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +# CONFIG_INFINIBAND_SRP is not set +# CONFIG_INFINIBAND_SRPT is not set +# CONFIG_INFINIBAND_ISER is not set +# CONFIG_INFINIBAND_ISERT is not set +CONFIG_EDAC_SUPPORT=y +# CONFIG_EDAC is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=y +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=y +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_DW_EDMA_PCIE is not set + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_SELFTESTS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PLATFORM=m +CONFIG_VFIO_AMBA=m +CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET=m +CONFIG_VFIO_PLATFORM_AMDXGBE_RESET=m +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_SM750 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# end of Speakup console speech + +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# end of Android + +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_FB_TFT is not set +# CONFIG_MOST is not set +# CONFIG_PI433 is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# end of Gasket devices + +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set + +# +# ISDN CAPI drivers +# +# CONFIG_CAPI_AVM is not set +# CONFIG_ISDN_DRV_GIGASET is not set +# CONFIG_HYSDN is not set +# end of ISDN CAPI drivers + +# CONFIG_USB_WUSB_CBAF is not set +# CONFIG_UWB is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_QLGE is not set +# CONFIG_GOLDFISH is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +CONFIG_COMMON_CLK_HI6220=y +CONFIG_RESET_HISI=y +# end of Common Clock Framework + +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_V3=y +CONFIG_VIRTIO_IOMMU=y +CONFIG_SMMU_BYPASS_DEV=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_PARTITION_PERCPU=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_XGENE is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_CADENCE_DP is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_FSL_IMX8MQ_USB is not set +# CONFIG_PHY_MIXEL_MIPI_DPHY is not set +# CONFIG_PHY_HI6220_USB is not set +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +# CONFIG_ARM_CCN is not set +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +# CONFIG_ARM_SMMU_V3_PMU is not set +# CONFIG_ARM_DSU_PMU is not set +# CONFIG_HISI_PMU is not set +# CONFIG_ARM_SPE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +# CONFIG_TEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +CONFIG_JFS_STATISTICS=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +CONFIG_F2FS_FS=m +CONFIG_F2FS_STAT_FS=y +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_FS_POSIX_ACL=y +CONFIG_F2FS_FS_SECURITY=y +# CONFIG_F2FS_CHECK_FS is not set +# CONFIG_F2FS_IO_TRACE is not set +# CONFIG_F2FS_FAULT_INJECTION is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=m +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=m +CONFIG_AUTOFS_FS=m +CONFIG_FUSE_FS=y +CONFIG_CUSE=m +# CONFIG_VIRTIO_FS is not set +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +CONFIG_ADFS_FS=m +# CONFIG_ADFS_FS_RW is not set +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +# CONFIG_BEFS_DEBUG is not set +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +# CONFIG_JFFS2_SUMMARY is not set +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_JFFS2_CMODE_NONE is not set +# CONFIG_JFFS2_CMODE_PRIORITY is not set +# CONFIG_JFFS2_CMODE_SIZE is not set +CONFIG_JFFS2_CMODE_FAVOURLZO=y +CONFIG_UBIFS_FS=m +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +CONFIG_UBIFS_ATIME_SUPPORT=y +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +CONFIG_HPFS_FS=m +CONFIG_QNX4FS_FS=m +CONFIG_QNX6FS_FS=m +# CONFIG_QNX6FS_DEBUG is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_UFS_FS_WRITE=y +# CONFIG_UFS_DEBUG is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CIFS_FSCACHE=y +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +# CONFIG_AFS_DEBUG is not set +CONFIG_AFS_FSCACHE=y +# CONFIG_AFS_DEBUG_CURSOR is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_TRUSTED_KEYS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_INFINIBAND is not set +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=32768 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +# CONFIG_IMA_WRITE_POLICY is not set +# CONFIG_IMA_READ_POLICY is not set +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_EVM is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_KEYWRAP=m +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +# CONFIG_CRYPTO_XXHASH is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +# CONFIG_INDIRECT_PIO is not set +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_XZ_DEC_TEST=m +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y +CONFIG_SWIOTLB=y +CONFIG_DMA_REMAP=y +CONFIG_DMA_DIRECT_REMAP=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_FONT_SUPPORT=y +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_6x10 is not set +# CONFIG_FONT_10x18 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_TER16x32 is not set +CONFIG_FONT_AUTOSELECT=y +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_BTF is not set +CONFIG_GDB_SCRIPTS=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +# end of Memory Debugging + +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# end of Debug Lockups and Hangs + +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_NOTIFIER_ERROR_INJECTION=m +CONFIG_PM_NOTIFIER_ERROR_INJECT=m +# CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT is not set +# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +CONFIG_RBTREE_TEST=m +# CONFIG_REED_SOLOMON_TEST is not set +CONFIG_INTERVAL_TREE_TEST=m +CONFIG_PERCPU_TEST=m +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +# CONFIG_TEST_STRSCPY is not set +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +CONFIG_TEST_LKM=m +# CONFIG_TEST_VMALLOC is not set +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +CONFIG_TEST_FIRMWARE=m +# CONFIG_TEST_SYSCTL is not set +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +CONFIG_MEMTEST=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +# CONFIG_KGDB_TESTS is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set +# end of Kernel hacking diff --git a/package/arm/config.default_kasan b/package/arm/config.default_kasan new file mode 100644 index 000000000000..0fc5e7ca2f00 --- /dev/null +++ b/package/arm/config.default_kasan @@ -0,0 +1,6074 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 5.4.32-1 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80301 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-tlinux4" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=16 +CONFIG_ARM64_CONT_SHIFT=5 +CONFIG_ARCH_MMAP_RND_BITS_MIN=14 +CONFIG_ARCH_MMAP_RND_BITS_MAX=29 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=7 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_KASAN_SHADOW_OFFSET=0xdfffa00000000000 + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_AGILEX is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_QCOM is not set +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +# CONFIG_ARCH_SEATTLE is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +# CONFIG_ARCH_THUNDER is not set +# CONFIG_ARCH_THUNDER2 is not set +# CONFIG_ARCH_UNIPHIER is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_XGENE is not set +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +CONFIG_KPATCH=m + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_845719=y +# CONFIG_ARM64_ERRATUM_843419 is not set +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_FUJITSU_ERRATUM_010001=y +# end of ARM errata workarounds via the alternatives framework + +# CONFIG_ARM64_4K_PAGES is not set +# CONFIG_ARM64_16K_PAGES is not set +CONFIG_ARM64_64K_PAGES=y +# CONFIG_ARM64_VA_BITS_42 is not set +CONFIG_ARM64_VA_BITS_48=y +# CONFIG_ARM64_VA_BITS_52 is not set +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +# CONFIG_ARM64_PA_BITS_52 is not set +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=256 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=4 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_SECCOMP=y +# CONFIG_PARAVIRT is not set +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +CONFIG_KEXEC=y +# CONFIG_KEXEC_FILE is not set +CONFIG_CRASH_DUMP=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=14 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +CONFIG_RODATA_FULL_DEFAULT_ENABLED=y +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +# CONFIG_ARM64_HW_AFDBM is not set +# CONFIG_ARM64_PAN is not set +# CONFIG_ARM64_VHE is not set +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +# CONFIG_ARM64_PMEM is not set +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# end of ARMv8.3 architectural features + +CONFIG_ARM64_MODULE_PLTS=y +# CONFIG_ARM64_PSEUDO_NMI is not set +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +# end of Kernel Features + +# +# Boot options +# +# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set +CONFIG_CMDLINE="" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +CONFIG_SYSVIPC_COMPAT=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=100 +CONFIG_PM_WAKELOCKS_GC=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_DPM_WATCHDOG is not set +CONFIG_PM_CLK=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_CPUIDLE is not set +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_QORIQ_CPUFREQ is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +# +# Firmware Drivers +# +# CONFIG_ARM_SDE_INTERFACE is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +# CONFIG_ISCSI_IBFT is not set +# CONFIG_FW_CFG_SYSFS is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_EFI_EARLYCON=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_MCFG=y +# CONFIG_ACPI_PROCESSOR is not set +# CONFIG_ACPI_IPMI is not set +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +# CONFIG_ACPI_CONTAINER is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_ACPI_HMAT is not set +CONFIG_HAVE_ACPI_APEI=y +# CONFIG_ACPI_APEI is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +# CONFIG_VHOST_VSOCK is not set +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +# CONFIG_CRYPTO_SM3_ARM64_CE is not set +# CONFIG_CRYPTO_SM4_ARM64_CE is not set +CONFIG_CRYPTO_GHASH_ARM64_CE=m +# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=m +CONFIG_CRYPTO_AES_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KPROBES=y +# CONFIG_JUMP_LABEL is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_RELR=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_CMDLINE_PARSER=y +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +CONFIG_AIX_PARTITION=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_SYSV68_PARTITION=y +CONFIG_CMDLINE_PARTITION=y +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +CONFIG_ZPOOL=m +CONFIG_ZBUD=m +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=m +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +# CONFIG_XFRM_INTERFACE is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +# CONFIG_NET_KEY_MIGRATE is not set +# CONFIG_SMC is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_TOA=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +# CONFIG_NF_CT_PROTO_DCCP is not set +CONFIG_NF_CT_PROTO_GRE=y +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +# CONFIG_IP_SET_HASH_IPMARK is not set +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=m +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +# CONFIG_IP_VS_PROTO_SCTP is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +# CONFIG_IP6_NF_NAT is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m + +# +# DECnet: Netfilter Configuration +# +CONFIG_DECNET_NF_GRABULATOR=m +# end of DECnet: Netfilter Configuration + +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +# CONFIG_IP_DCCP_CCID3 is not set +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +# CONFIG_DECNET_ROUTER is not set +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +# CONFIG_IPDDP_ENCAP is not set +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_DEBUGFS=y +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +# CONFIG_CLS_U32_PERF is not set +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +# CONFIG_BATMAN_ADV_BATMAN_V is not set +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +CONFIG_BATMAN_ADV_DEBUGFS=y +# CONFIG_BATMAN_ADV_DEBUG is not set +CONFIG_BATMAN_ADV_SYSFS=y +# CONFIG_BATMAN_ADV_TRACING is not set +# CONFIG_OPENVSWITCH is not set +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +# CONFIG_VIRTIO_VSOCKETS is not set +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +# CONFIG_NET_NSH is not set +CONFIG_HSR=m +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +CONFIG_HAMRADIO=y + +# +# Packet Radio protocols +# +CONFIG_AX25=m +CONFIG_AX25_DAMA_SLAVE=y +CONFIG_NETROM=m +CONFIG_ROSE=m + +# +# AX.25 network device drivers +# +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +# CONFIG_BAYCOM_PAR is not set +CONFIG_YAM=m +# end of AX.25 network device drivers + +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_FLEXCAN is not set +CONFIG_CAN_GRCAN=m +# CONFIG_CAN_KVASER_PCIEFD is not set +CONFIG_CAN_XILINXCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_ISA=m +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +CONFIG_CAN_M_CAN=m +# CONFIG_CAN_M_CAN_PLATFORM is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_ISA=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +CONFIG_CAN_MCP251X=m +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +CONFIG_CAN_GS_USB=m +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +# end of CAN Device Drivers + +# CONFIG_BT is not set +CONFIG_AF_RXRPC=m +CONFIG_AF_RXRPC_IPV6=y +# CONFIG_AF_RXRPC_INJECT_LOSS is not set +# CONFIG_AF_RXRPC_DEBUG is not set +# CONFIG_RXKAD is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +CONFIG_WIMAX=y +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_RFKILL=y +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_NET_9P_RDMA=m +# CONFIG_NET_9P_DEBUG is not set +CONFIG_CAIF=m +# CONFIG_CAIF_DEBUG is not set +CONFIG_CAIF_NETDEV=m +CONFIG_CAIF_USB=m +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_NCI=m +CONFIG_NFC_NCI_SPI=m +CONFIG_NFC_NCI_UART=m +CONFIG_NFC_HCI=m +CONFIG_NFC_SHDLC=y + +# +# Near Field Communication (NFC) devices +# +CONFIG_NFC_TRF7970A=m +CONFIG_NFC_SIM=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_FDP=m +CONFIG_NFC_FDP_I2C=m +CONFIG_NFC_PN544=m +CONFIG_NFC_PN544_I2C=m +# CONFIG_NFC_PN533_USB is not set +# CONFIG_NFC_PN533_I2C is not set +CONFIG_NFC_MICROREAD=m +CONFIG_NFC_MICROREAD_I2C=m +CONFIG_NFC_MRVL=m +CONFIG_NFC_MRVL_USB=m +CONFIG_NFC_MRVL_UART=m +CONFIG_NFC_MRVL_I2C=m +CONFIG_NFC_MRVL_SPI=m +CONFIG_NFC_ST21NFCA=m +CONFIG_NFC_ST21NFCA_I2C=m +CONFIG_NFC_ST_NCI=m +CONFIG_NFC_ST_NCI_I2C=m +CONFIG_NFC_ST_NCI_SPI=m +CONFIG_NFC_NXP_NCI=m +CONFIG_NFC_NXP_NCI_I2C=m +CONFIG_NFC_S3FWRN5=m +CONFIG_NFC_S3FWRN5_I2C=m +# CONFIG_NFC_ST95HF is not set +# end of Near Field Communication (NFC) devices + +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +# CONFIG_PCIE_ECRC is not set +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=m +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_ACPI is not set +CONFIG_HOTPLUG_PCI_CPCI=y +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# end of Cadence PCIe controllers support + +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +# CONFIG_PCI_XGENE is not set +# CONFIG_PCIE_ALTERA is not set +# CONFIG_PCI_HOST_THUNDER_PEM is not set +# CONFIG_PCI_HOST_THUNDER_ECAM is not set + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCI_HISI=y +CONFIG_PCIE_KIRIN=y +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_AL is not set +# end of DesignWare PCI Core Support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +# CONFIG_HISILICON_LPC is not set +# CONFIG_SIMPLE_PM_BUS is not set +# CONFIG_VEXPRESS_CONFIG is not set +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +CONFIG_PARPORT=m +# CONFIG_PARPORT_AX88796 is not set +# CONFIG_PARPORT_1284 is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_ZRAM is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_DRBD=m +CONFIG_DRBD_FAULT_INJECTION=y +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_VIRTIO_BLK_SCSI is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +# CONFIG_NVME_RDMA is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# CONFIG_NVME_TARGET is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +CONFIG_EEPROM_93CX6=y +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +CONFIG_TI_ST=m +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +CONFIG_VHOST_RING=m +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=y +CONFIG_SCSI_HISI_SAS_PCI=y +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVSAS_DEBUG=y +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=m +# CONFIG_SCSI_ADVANSYS is not set +CONFIG_SCSI_ARCMSR=m +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=y +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +# CONFIG_SCSI_QLA_ISCSI is not set +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +# CONFIG_SCSI_CHELSIO_FCOE is not set +# CONFIG_SCSI_DH is not set +# end of SCSI device support + +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_CEVA is not set +# CONFIG_AHCI_QORIQ is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +# CONFIG_DM_INTEGRITY is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_FUSION=y +# CONFIG_FUSION_SPI is not set +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=m +# CONFIG_MACVTAP is not set +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NLMON is not set +# CONFIG_NET_VRF is not set +# CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +# CONFIG_ATM_DUMMY is not set +# CONFIG_ATM_TCP is not set +# CONFIG_ATM_LANAI is not set +# CONFIG_ATM_ENI is not set +# CONFIG_ATM_NICSTAR is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E is not set +# CONFIG_ATM_HE is not set +# CONFIG_ATM_SOLOS is not set + +# +# CAIF transport drivers +# +# CONFIG_CAIF_TTY is not set +# CONFIG_CAIF_SPI_SLAVE is not set +# CONFIG_CAIF_HSI is not set +CONFIG_CAIF_VIRTIO=m + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +# CONFIG_BNX2 is not set +# CONFIG_CNIC is not set +# CONFIG_TIGON3 is not set +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_GEMINI_ETHERNET is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +CONFIG_NET_VENDOR_HISILICON=y +CONFIG_HIX5HD2_GMAC=m +CONFIG_HISI_FEMAC=m +CONFIG_HIP04_ETH=m +# CONFIG_HI13X1_GMAC is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=y +CONFIG_IGB_HWMON=y +# CONFIG_IGBVF is not set +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +# CONFIG_IXGBE_DCB is not set +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +# CONFIG_ICE is not set +CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_ENC28J60 is not set +# CONFIG_ENCX24J600 is not set +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +CONFIG_STMMAC_PCI=m +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +CONFIG_MDIO_HISI_FEMAC=y +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_SFP is not set +# CONFIG_ADIN_PHY is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PLIP=m +CONFIG_PPP=m +# CONFIG_PPP_BSDCOMP is not set +# CONFIG_PPP_DEFLATE is not set +# CONFIG_PPP_FILTER is not set +# CONFIG_PPP_MPPE is not set +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPPOATM is not set +CONFIG_PPPOE=m +# CONFIG_PPTP is not set +# CONFIG_PPPOL2TP is not set +# CONFIG_PPP_ASYNC is not set +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_SLIP=m +CONFIG_SLHC=m +# CONFIG_SLIP_COMPRESSED is not set +# CONFIG_SLIP_SMART is not set +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=m +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +CONFIG_USB_RTL8152=m +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# WiMAX Wireless Broadband devices +# +# CONFIG_WIMAX_I2400M_USB is not set +# end of WiMAX Wireless Broadband devices + +# CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=m +CONFIG_CAPI_TRACE=y +# CONFIG_ISDN_CAPI_CAPI20 is not set +# CONFIG_MISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_LEDS is not set +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set +CONFIG_MOUSE_PS2_SMBUS=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +CONFIG_TOUCHSCREEN_USB_COMPOSITE=y +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +# CONFIG_INPUT_REGULATOR_HAPTIC is not set +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_INPUT_HISI_POWERKEY=y +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_PARKBD is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=0 +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_DMA is not set +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_PRINTER is not set +# CONFIG_PPDEV is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=m +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_APPLICOM is not set +CONFIG_RAW_DRIVER=m +CONFIG_MAX_RAW_DEVS=256 +CONFIG_TCG_TPM=y +# CONFIG_TCG_TIS is not set +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +# CONFIG_TCG_ATMEL is not set +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_PINCTRL is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PCI=y +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_THUNDERX is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +CONFIG_I2C_SLAVE=y +# CONFIG_I2C_SLAVE_EEPROM is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_NXP_FLEXSPI is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PL022 is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +# CONFIG_DP83640_PHY is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_OCELOT is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +# CONFIG_GPIO_AMDPT is not set +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=y +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SAMA5D2_PIOBU is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XGENE is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_XGENE is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_UCS1002 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +CONFIG_SENSORS_W83795=y +# CONFIG_SENSORS_W83795_FANCTRL is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=y +# CONFIG_QORIQ_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD70528 is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=m +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m +CONFIG_REGULATOR_USERSPACE_CONSUMER=m +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ANATOP is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +CONFIG_REGULATOR_GPIO=m +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_IR_IMON_DECODER is not set +# CONFIG_IR_RCMM_DECODER is not set +# CONFIG_RC_DEVICES is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set + +# +# Media drivers +# +# CONFIG_MEDIA_USB_SUPPORT is not set +# CONFIG_MEDIA_PCI_SUPPORT is not set + +# +# Supported MMC/SDIO adapters +# +CONFIG_CYPRESS_FIRMWARE=m + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# + +# +# Media SPI Adapters +# +# end of Media SPI Adapters + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# +# end of Customise DVB Frontends + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=y +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_VRAM_HELPER=y +CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_KMS_CMA_HELPER=y +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SCHED=y + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=y +# CONFIG_DRM_RADEON_USERPTR is not set +CONFIG_DRM_AMDGPU=y +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set +CONFIG_DRM_AMDGPU_GART_DEBUGFS=y + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +# CONFIG_DEBUG_KERNEL_DC is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +# CONFIG_DRM_NOUVEAU is not set +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_UDL=y +CONFIG_DRM_AST=y +# CONFIG_DRM_MGAG200 is not set +CONFIG_DRM_CIRRUS_QEMU=y +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +CONFIG_DRM_RCAR_WRITEBACK=y +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=y +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set +# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set +# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set +# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set +# CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS is not set +# CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_RAYDIUM_RM67191 is not set +# CONFIG_DRM_PANEL_RAYDIUM_RM68200 is not set +# CONFIG_DRM_PANEL_ROCKTECH_JH057N00900 is not set +# CONFIG_DRM_PANEL_RONBO_RB070D30 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7701 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=y +CONFIG_DRM_HISI_KIRIN=y +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_GM12U320 is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +CONFIG_FB_CIRRUS=y +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +CONFIG_FB_UVESA=m +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +CONFIG_FB_RADEON_DEBUG=y +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +CONFIG_FB_VIRTUAL=y +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y +# CONFIG_SND_HRTIMER is not set +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +# CONFIG_SND_SEQUENCER is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +# CONFIG_SND_PORTMAN2X4 is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +# CONFIG_SND_HDA_INTEL is not set +# end of HD-Audio + +CONFIG_SND_HDA_PREALLOC_SIZE=64 +CONFIG_SND_SPI=y +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +# CONFIG_SND_SOC is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_ASUS is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_BIGBEN_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CORSAIR is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GT683R is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LED is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_LOGITECH is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_LED_TRIG is not set +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +CONFIG_USB_DYNAMIC_MINORS=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=y +CONFIG_USB_PRINTER=m +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_USS720 is not set +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=y +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_CH341 is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +# CONFIG_USB_SERIAL_FTDI_SIO is not set +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_METRO is not set +# CONFIG_USB_SERIAL_MOS7720 is not set +# CONFIG_USB_SERIAL_MOS7840 is not set +# CONFIG_USB_SERIAL_MXUPORT is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +CONFIG_USB_SERIAL_PL2303=m +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_XSENS_MT is not set +# CONFIG_USB_SERIAL_WISHBONE is not set +# CONFIG_USB_SERIAL_SSU100 is not set +# CONFIG_USB_SERIAL_QT2 is not set +# CONFIG_USB_SERIAL_UPD78F0730 is not set +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ATM is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_U_ETHER=m +CONFIG_USB_F_ECM=m +CONFIG_USB_F_SUBSET=m +CONFIG_USB_F_RNDIS=m +# CONFIG_USB_CONFIGFS is not set +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_TI_LMU_COMMON is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +# CONFIG_INFINIBAND_USER_MAD is not set +# CONFIG_INFINIBAND_USER_ACCESS is not set +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_I40IW is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_HNS=m +# CONFIG_INFINIBAND_HNS_HIP06 is not set +# CONFIG_INFINIBAND_HNS_HIP08 is not set +# CONFIG_INFINIBAND_BNXT_RE is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +# CONFIG_INFINIBAND_IPOIB_CM is not set +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +# CONFIG_INFINIBAND_SRP is not set +# CONFIG_INFINIBAND_SRPT is not set +# CONFIG_INFINIBAND_ISER is not set +# CONFIG_INFINIBAND_ISERT is not set +CONFIG_EDAC_SUPPORT=y +# CONFIG_EDAC is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=y +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=y +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_DW_EDMA_PCIE is not set + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_SELFTESTS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PLATFORM=m +CONFIG_VFIO_AMBA=m +CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET=m +CONFIG_VFIO_PLATFORM_AMDXGBE_RESET=m +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_SM750 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# end of Speakup console speech + +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# end of Android + +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_FB_TFT is not set +# CONFIG_MOST is not set +# CONFIG_PI433 is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# end of Gasket devices + +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set + +# +# ISDN CAPI drivers +# +# CONFIG_CAPI_AVM is not set +# CONFIG_ISDN_DRV_GIGASET is not set +# CONFIG_HYSDN is not set +# end of ISDN CAPI drivers + +# CONFIG_USB_WUSB_CBAF is not set +# CONFIG_UWB is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_QLGE is not set +# CONFIG_GOLDFISH is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +CONFIG_COMMON_CLK_HI6220=y +CONFIG_RESET_HISI=y +# end of Common Clock Framework + +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_V3=y +CONFIG_VIRTIO_IOMMU=y +CONFIG_SMMU_BYPASS_DEV=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_PARTITION_PERCPU=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_XGENE is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_CADENCE_DP is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_FSL_IMX8MQ_USB is not set +# CONFIG_PHY_MIXEL_MIPI_DPHY is not set +# CONFIG_PHY_HI6220_USB is not set +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +# CONFIG_ARM_CCN is not set +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +# CONFIG_ARM_SMMU_V3_PMU is not set +# CONFIG_ARM_DSU_PMU is not set +# CONFIG_HISI_PMU is not set +# CONFIG_ARM_SPE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +# CONFIG_TEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +CONFIG_JFS_STATISTICS=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +CONFIG_F2FS_FS=m +CONFIG_F2FS_STAT_FS=y +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_FS_POSIX_ACL=y +CONFIG_F2FS_FS_SECURITY=y +# CONFIG_F2FS_CHECK_FS is not set +# CONFIG_F2FS_IO_TRACE is not set +# CONFIG_F2FS_FAULT_INJECTION is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=m +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=m +CONFIG_AUTOFS_FS=m +CONFIG_FUSE_FS=y +CONFIG_CUSE=m +# CONFIG_VIRTIO_FS is not set +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +CONFIG_ADFS_FS=m +# CONFIG_ADFS_FS_RW is not set +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +# CONFIG_BEFS_DEBUG is not set +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +# CONFIG_JFFS2_SUMMARY is not set +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_JFFS2_CMODE_NONE is not set +# CONFIG_JFFS2_CMODE_PRIORITY is not set +# CONFIG_JFFS2_CMODE_SIZE is not set +CONFIG_JFFS2_CMODE_FAVOURLZO=y +CONFIG_UBIFS_FS=m +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +CONFIG_UBIFS_ATIME_SUPPORT=y +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +CONFIG_HPFS_FS=m +CONFIG_QNX4FS_FS=m +CONFIG_QNX6FS_FS=m +# CONFIG_QNX6FS_DEBUG is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_UFS_FS_WRITE=y +# CONFIG_UFS_DEBUG is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CIFS_FSCACHE=y +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +# CONFIG_AFS_DEBUG is not set +CONFIG_AFS_FSCACHE=y +# CONFIG_AFS_DEBUG_CURSOR is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_TRUSTED_KEYS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_INFINIBAND is not set +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=32768 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +# CONFIG_IMA_WRITE_POLICY is not set +# CONFIG_IMA_READ_POLICY is not set +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_EVM is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_KEYWRAP=m +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +# CONFIG_CRYPTO_XXHASH is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +# CONFIG_INDIRECT_PIO is not set +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_XZ_DEC_TEST=m +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y +CONFIG_SWIOTLB=y +CONFIG_DMA_REMAP=y +CONFIG_DMA_DIRECT_REMAP=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_FONT_SUPPORT=y +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_6x10 is not set +# CONFIG_FONT_10x18 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_TER16x32 is not set +CONFIG_FONT_AUTOSELECT=y +CONFIG_SG_POOL=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_BTF is not set +CONFIG_GDB_SCRIPTS=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_KASAN=y +CONFIG_KASAN_GENERIC=y +CONFIG_KASAN_OUTLINE=y +# CONFIG_KASAN_INLINE is not set +CONFIG_KASAN_STACK=1 +# CONFIG_TEST_KASAN is not set +# end of Memory Debugging + +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# end of Debug Lockups and Hangs + +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_NOTIFIER_ERROR_INJECTION=m +CONFIG_PM_NOTIFIER_ERROR_INJECT=m +# CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT is not set +# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +CONFIG_RBTREE_TEST=m +# CONFIG_REED_SOLOMON_TEST is not set +CONFIG_INTERVAL_TREE_TEST=m +CONFIG_PERCPU_TEST=m +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +# CONFIG_TEST_STRSCPY is not set +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +CONFIG_TEST_LKM=m +# CONFIG_TEST_VMALLOC is not set +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +CONFIG_TEST_FIRMWARE=m +# CONFIG_TEST_SYSCTL is not set +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +CONFIG_MEMTEST=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +# CONFIG_KGDB_TESTS is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set +# end of Kernel hacking diff --git a/package/arm/generate-rpms.sh b/package/arm/generate-rpms.sh new file mode 100755 index 000000000000..a5096afb579e --- /dev/null +++ b/package/arm/generate-rpms.sh @@ -0,0 +1,345 @@ +#!/bin/bash + +# ============================================================================== +# Description: Automaticly generating kernel rpm package with specified branch +# according to HEAD tag. +# +# Author: David Shan +# =============================================================================== + +#variables +kernel_full_name="" +kernel_version="" +tlinux_branch="" +tlinux_release="" +extra_version="" +curdir=`pwd` +build_specdir="/usr/src/packages/SPECS" +build_srcdir="/usr/src/packages/SOURCES" +kernel_type="default" +spec_file_name="" +make_jobs="" +nosign_suffix="" +build_src_only=0 +strip_sign_token=0 +nosign_suffix="" +raw_tagged_name="" +# to control wheter to build rpm for xen domU +isXenU="" +build_perf="" +kernel_default_types=(default) + +#funcitons +usage() +{ + echo "generate-rpms [-t \$tag_name ] [-j \$jobs ][-h]" + echo " -t tag_name" + echo " Tagged name in the git tree." + echo " -j jobs" + echo " Specifies the number of jobs (threads) to run make." + echo " -p" + echo " build perf rpm only." + + echo -e "\n" + echo " Note:By default, we only generate latest tag kernel for standard kernels" + echo " without -t" + echo " So, if you hope to get certain tag kernel, please use -t." +} + +get_kernel_version() +{ + local makefile="$1/Makefile" + + if [ ! -e $makefile ]; then + echo "Error: No Makefile found." + exit 1 + fi + + kernel_version+=`grep "^VERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + kernel_version+="." + kernel_version+=`grep "^PATCHLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + kernel_version+="." + kernel_version+=`grep "^SUBLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + #kernel_version+=`grep "^EXTRAVERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + + echo "kernel version: $kernel_version" +} + +get_tlinux_name() +{ + if [ -n "$tag_name" ]; then + raw_tagged_name="$tag_name" + else + raw_tagged_name=`git describe --tags` + fi + + if [ -z "${raw_tagged_name}" ];then + echo "Error:Can't get kernel version from git tree." + exit 1 + fi + + if [[ $raw_tagged_name != public-arm64-* ]]; then + echo "$raw_tagged_name not valid tag name for public-arm64" + exit 1 + fi + + tagged_name=${raw_tagged_name#public-arm64-} + + echo "${tagged_name}" | grep 'kvm_guest' + if [ $? -eq 0 ]; then + echo "start kvm guest build" + kvm_guest="yes" + fi + + echo "${tagged_name}" | grep 'xen_guest' + if [ $? -eq 0 ]; then + echo "start xen guest build" + xen_guest="yes" + fi + + echo "${tagged_name}" | grep 'ec' + if [ $? -eq 0 ]; then + echo "start ECkernel build" + ec="yes" + fi + + echo "${tagged_name}" | grep 'kasan' + if [ $? -eq 0 ]; then + nosign_suffix="_kasan" + fi + + + #if [ "${tagged_name#*-*-*}" != ${tagged_name} ];then + #echo "Error: bad tag name:$tagged_name." + #exit 1 + #fi + + if [ "$build_perf" = "yes" ]; then + rpm_name="perf" + else + rpm_name="kernel" + fi + #tlinux_release=${tagged_name#*-} + tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` + extra_version=`echo $tagged_name | cut -d- -f2` + kernel_full_name=${rpm_name}-${kernel_version} + + echo "$tlinux_release" + echo "$kernel_full_name" + echo "extra_version $extra_version" + +} + +prepare_tlinux_spec() +{ + local sign_token + spec_file_name=${build_specdir}/${kernel_full_name}.spec + spec_contents="%define name ${rpm_name} \n" + spec_contents+="%define version ${kernel_version} \n" + spec_contents+="%define release_os ${tlinux_release} \n" + spec_contents+="%define tagged_name ${tagged_name} \n" + spec_contents+="%define extra_version ${extra_version} \n" + sign_token= + if [ "$sign_token" = "" ]; then + sign_token=0 + fi + spec_contents+="%define sign_token ${sign_token} \n" + + + if [ -z "${kernel_type}" ];then + spec_contents+="%define kernel_all_types ${kernel_default_types[@]} \n" + else + spec_contents+="%define kernel_all_types ${kernel_type} \n" + fi + + if [ -n "${make_jobs}" ];then + spec_contents+="%define jobs ${make_jobs} \n\n" + fi + + if [ -e /etc/tlinux-release ];then + spec_contents+="%define debug_package %{nil}\n" + spec_contents+="%define __os_install_post %{nil}\n" + + fi + + if [[ $isXenU == "32" && `uname -m` == "x86_64" ]];then + spec_contents+="%define make_flag \"ARCH=i386\" \n" + fi + + echo -e "${spec_contents}" > $spec_file_name + + if [ "$build_perf" = "yes" ]; then + cat $curdir/perf-tlinux.spec >> $spec_file_name + else + cat $curdir/kernel-tlinux.spec >> $spec_file_name + fi +} + +#main function + +#Parse Parameters +while getopts ":t:j:hdp" option "$@" +do + case $option in + "t" ) + tag_name=$OPTARG + ;; + "j" ) + make_jobs=$OPTARG + ;; + "h" ) + usage + exit 0 + ;; + "p" ) + build_perf="yes" + ;; + "?" ) + usage + exit 1 + ;; + esac +done + + +#test if parameter of kernel type is valid +if [ -n "${kernel_type}" ] && [ ! -e config."${kernel_type}" ] +then + echo "Error: bad kernel type name. Config file for ${kernel_type} does not exist." + usage + exit 1 +elif [ -n "${kernel_type}" ] && [ -n "$isXenU" ] && [ ! -e config."${kernel_type}".xenU ] +then + echo "Error: bad kernel type name. Config file for ${kernel_type}.xenU does not exist." + usage + exit 1 + +fi + + +if [ -e /etc/SuSE-release ];then + build_specdir="/usr/src/packages/SPECS" + build_srcdir="/usr/src/packages/SOURCES" +elif [ -e /etc/tlinux-release ];then + build_specdir="$HOME/rpmbuild/SPECS" + build_srcdir="$HOME/rpmbuild/SOURCES" +else + echo "Error: Compile on unknown system." + exit 1 +fi + + +if [ ! -e "${build_srcdir}" ]; then + mkdir $build_srcdir + [ $? == 1 ] && exit 1 +fi + +if [ ! -e "${build_specdir}" ]; then + mkdir $build_specdir + [ $? == 1 ] && exit 1 +fi + +cd ../../ +origsrc=`pwd` + +#if [ -n "$tag_name" ]; then +# local_valid_branch=`git branch | grep "*" | awk '{print $2 }'` +# git checkout "${tag_name}" +# if [ $? -ne 0 ];then +# echo "Error: bad tag name [$tag_name] " +# exit 1 +# fi +#fi + + +get_kernel_version $origsrc +get_tlinux_name +echo "Prepare $kernel_full_name source." +prepare_tlinux_spec + +cp package/default/tlinux_cciss_link.modules $build_srcdir +if [ "$kvm_guest" = "yes" ] ; then + cp package/default/kvm_guest/*.patch $build_srcdir +fi + +if [ "$ec" = "yes" ] ; then + cp package/default/ec/*.patch $build_srcdir +fi + +if test -e ${build_srcdir}/${kernel_full_name}; then + rm -fr ${build_srcdir}/${kernel_full_name} +fi + +#tagged_name is a confirmed tag name and will be used for final source. +git archive --format=tar --prefix=${kernel_full_name}/ ${raw_tagged_name} | (cd ${build_srcdir} && tar xf -) +if [ $? -ne 0 ];then + echo "Error:can't prepare $kernel_full_name source with git archive!" + exit 1 +fi + +cd ${build_srcdir}/${kernel_full_name} + +if [[ $isXenU == "32" ]] +then + config_suffix="xenU32" + target="--target i686" +elif [[ $isXenU == "64" ]] +then + config_suffix="xenU" +fi +echo $config_suffix + +if [ -z "${kernel_type}" ];then + for type_name in "${kernel_default_types[@]}" + do + if [ -n "$isXenU" ]; then + echo mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name{nosign_suffix} + mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name${nosign_suffix} + fi + done +else + if [ -n "$isXenU" ]; then + echo mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} + mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} + + elif [ "$kvm_guest" = "yes" ] ; then + echo \ + cp -f ${build_srcdir}/${kernel_full_name}/package/default/kvm_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} + cp -f ${build_srcdir}/${kernel_full_name}/package/default/kvm_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} + elif [ "$xen_guest" = "yes" ] ; then + echo \ + cp -f ${build_srcdir}/${kernel_full_name}/package/default/xen_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} + cp -f ${build_srcdir}/${kernel_full_name}/package/default/xen_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} + fi +fi + +cd .. +tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} +rm -fr ${kernel_full_name} + +#if [ -n "$tag_name" ];then +# cd $origsrc +# git checkout $local_valid_branch > /dev/null +#fi + + +echo "Build kernel rpm package." +if [ $build_src_only -eq 0 ]; then + rpmbuild -bb ${target} ${spec_file_name} + sed -i 's/^%define sign_token.*/%define sign_token please_enter_your_token_here/' ${spec_file_name} +else + if [ $strip_sign_token -ne 0 ]; then + sed -i 's/^%define sign_token.*/%define sign_token please_enter_your_token_here/' ${spec_file_name} + fi +fi + +echo "Build kernel source rpm package." + +cd ${build_srcdir} + +tar -zxf ${kernel_full_name}.tar.gz + +rm -fr ${kernel_full_name}/package/default/generate-rpms.sh +tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} + +rpmbuild -bs ${target} ${spec_file_name} diff --git a/package/arm/kernel-tlinux.spec b/package/arm/kernel-tlinux.spec new file mode 100644 index 000000000000..aa506255f210 --- /dev/null +++ b/package/arm/kernel-tlinux.spec @@ -0,0 +1,551 @@ +%global with_debuginfo 0 +%global with_perf 1 +%if 0%{?rhel} == 6 +%global rdist .tl1 +%global debug_path /usr/lib/debug/lib/ +%else +%global debug_path /usr/lib/debug/usr/lib/ +%if 0%{?rhel} == 7 +%global rdist .tl2 +%endif +%if 0%{?rhel} == 8 +%global rdist .tl3 +%global __python /usr/bin/python2 +%global _enable_debug_packages %{nil} +%global debug_package %{nil} +%global __debug_package %{nil} +%global __debug_install_post %{nil} +%global _build_id_links none +%endif +%endif + +%global dist %{nil} + +Summary: Kernel for Tencent physical machine +Name: %{name} +Version: %{version} +Release: %{release_os}%{?rdist} +License: GPLv2 +Vendor: Tencent +Packager: tlinux team +Provides: kernel = %{version}-%{release} +Group: System Environment/Kernel +Source0: %{name}-%{version}.tar.gz +Source1: tlinux_cciss_link.modules +URL: http://www.tencent.com +ExclusiveArch: aarch64 +Distribution: Tencent Linux +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build +BuildRequires: wget bc module-init-tools curl +%if %{with_perf} +BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex +BuildRequires: xmlto asciidoc hmaccalc +BuildRequires: audit-libs-devel +#BuildRequires: uboot-tools +%if 0%{?rhel} == 8 +BuildRequires: python2-devel +%else +BuildRequires: python-devel +%endif + + +%ifnarch s390 s390x +BuildRequires: numactl-devel +%endif +%endif +Requires(pre): linux-firmware >= 20100806-2 + +# for the 'hostname' command +%if 0%{?rhel} == 7 +BuildRequires: hostname +%else +BuildRequires: net-tools +%endif + +%description +This package contains tlinux kernel for arm64 Bare-Metal& VM + +%package debuginfo-common +Summary: tlinux kernel vmlinux for crash debug +Release: %{release} +Group: System Environment/Kernel +Provides: kernel-debuginfo-common kernel-debuginfo +AutoReqprov: no + +%description debuginfo-common +This package container vmlinux, System.map and config files for kernel +debugging. + +%package devel +Summary: Development package for building kernel modules to match the %{version}-%{release} kernel +Release: %{release} +Group: System Environment/Kernel +AutoReqprov: no + +%description devel +This package provides kernel headers and makefiles sufficient to build modules +against the %{version}-%{release} kernel package. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders +Provides: glibc-kernheaders = 3.0-46 +AutoReqprov: no + +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +%if %{with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +Group: Development/System +License: GPLv2 +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n perf-debuginfo +Summary: Debug information for package perf +Group: Development/Debug +#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} +AutoReqProv: no +%description -n perf-debuginfo +This package provides debug information for the perf package. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|XXX' -o perf-debuginfo.list} + +%package -n python-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python-perf +The python-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} + +%package -n python-perf-debuginfo +Summary: Debug information for package perf python bindings +Group: Development/Debug +#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} +AutoReqProv: no +%description -n python-perf-debuginfo +This package provides debug information for the perf python bindings. + +# the python_sitearch macro should already be defined from above +%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{python_sitearch}/perf.so(\.debug)?|XXX' -o python-perf-debuginfo.list} + + +%endif # with_perf + +# prep ######################################################################### +%prep +%setup -q -c -T -a 0 + +# build ######################################################################## +%build + + +cd %{name}-%{version} + +all_types="%{kernel_all_types}" +no_sign="yes" +for type_name in $all_types +do + sed -i '/^EXTRAVERSION/cEXTRAVERSION = -%{extra_version}' Makefile + objdir=../%{name}-%{version}-obj-${type_name} + [ -d ${objdir} ] || mkdir ${objdir} + echo "include $PWD/Makefile" > ${objdir}/Makefile + cp ./package/arm/config.${type_name} ${objdir}/.config + localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' + sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config + make -C ${objdir} olddefconfig > /dev/null + make -j16 -C ${objdir} RPM_BUILD_MODULE=y %{?jobs:-j %jobs} all > /dev/null + +# dealing with files under lib/modules + make -j16 -C ${objdir} RPM_BUILD_MODULE=y modules > /dev/null + + %global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} + %if %{with_perf} + # perf + %{perf_make} all + %{perf_make} man + %endif +done + +# install ###################################################################### +%install + + +echo install %{version} +arch=`uname -m` +if [ "$arch" != "aarch64" ];then + echo "Unexpected error. Cannot build this rpm in non-aarch64 platform\n" + exit 1 +fi +mkdir -p %buildroot/boot +cd %{name}-%{version} + +all_types="%{kernel_all_types}" +for type_name in $all_types +do + elfname=vmlinuz-%{tagged_name}%{?dist} + mapname=System.map-%{tagged_name}%{?dist} + configname=config-%{tagged_name}%{?dist} + builddir=../%{name}-%{version}-obj-${type_name} +%if 0%{?rhel} == 7 + cp -rpf ${builddir}/arch/arm64/boot/Image %buildroot/boot/${elfname} +%endif + #mkimage -A arm64 -O linux -T kernel -C none -a 0x80080000 -e 0x80080000 -n %{tagged_name} -d ${builddir}/arch/arm64/boot/Image %buildroot/boot/uImage-%{tagged_name}%{?dist} + cp -rpf ${builddir}/System.map %buildroot/boot/${mapname} + cp -rpf ${builddir}/.config %buildroot/boot/${configname} + cp -rpf ${builddir}/vmlinux %buildroot/boot/vmlinux-%{tagged_name}%{?dist} + sha512hmac %buildroot/boot/${elfname} | sed -e "s,$RPM_BUILD_ROOT,," > %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac + + # dealing with kernel-devel package + objdir=${builddir} + #KernelVer=%{version}-%{title}-%{release_os}-${type_name} + KernelVer=%{tagged_name}%{?dist} + + + + ####### make kernel-devel package: methods comes from rhel6(kernel.spec) ####### + make -C ${objdir} RPM_BUILD_MODULE=y INSTALL_MOD_PATH=%buildroot modules_install > /dev/null + #don't package firmware, use linux-firmware rpm instead + rm -rf %buildroot/lib/firmware + # And save the headers/makefiles etc for building modules against + # + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + rm -rf %buildroot/lib/modules/${KernelVer}/build + rm -rf %buildroot/lib/modules/${KernelVer}/source + mkdir -p %buildroot/lib/modules/${KernelVer}/build + (cd $RPM_BUILD_ROOT/lib/modules/${KernelVer} ; ln -s build source) + mkdir -p %buildroot/lib/modules/${KernelVer}/source/arch/arm64/kernel && cp -pf arch/arm64/kernel/module.lds %buildroot/lib/modules/${KernelVer}/source/arch/arm64/kernel/ + # dirs for additional modules per module-init-tools, kbuild/modules.txt + mkdir -p %buildroot/lib/modules/${KernelVer}/extra + mkdir -p %buildroot/lib/modules/${KernelVer}/updates + mkdir -p %buildroot/lib/modules/${KernelVer}/weak-updates +%if 0%{?rhel} == 8 + cp -rpf ${builddir}/arch/arm64/boot/Image %buildroot/lib/modules/${KernelVer}/vmlinuz +%endif + # first copy everything + cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` %buildroot/lib/modules/${KernelVer}/build + cp ${builddir}/Module.symvers %buildroot/lib/modules/${KernelVer}/build + cp ${builddir}/System.map %buildroot/lib/modules/${KernelVer}/build + + if [ -s ${builddir}/Module.markers ]; then + cp ${builddir}/Module.markers %buildroot/lib/modules/${KernelVer}/build + fi + + # create the kABI metadata for use in packaging + echo "**** GENERATING kernel ABI metadata ****" + gzip -c9 < ${builddir}/Module.symvers > %buildroot/boot/symvers-${KernelVer}.gz + # chmod 0755 %_sourcedir/kabitool + # rm -f %{_tmppath}/kernel-$KernelVer-kabideps + # %_sourcedir/kabitool -s Module.symvers -o %{_tmppath}/kernel-$KernelVer-kabideps + + rm -rf %buildroot/lib/modules/${KernelVer}/build/Documentation + rm -rf %buildroot/lib/modules/${KernelVer}/build/scripts + rm -rf %buildroot/lib/modules/${KernelVer}/build/include + cp ${builddir}/.config %buildroot/lib/modules/${KernelVer}/build + cp -a scripts %buildroot/lib/modules/${KernelVer}/build + cp -a ${builddir}/scripts %buildroot/lib/modules/${KernelVer}/build + rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*.o + rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*/*.o + cp -a --parents arch/arm64/include %buildroot/lib/modules/${KernelVer}/build/ + (cd ${builddir} ; cp -a --parents arch/arm64/include/generated %buildroot/lib/modules/${KernelVer}/build/) + mkdir -p %buildroot/lib/modules/${KernelVer}/build/include + + cp -a ${builddir}/include/config %buildroot/lib/modules/${KernelVer}/build/include/ + cp -a ${builddir}/include/generated %buildroot/lib/modules/${KernelVer}/build/include/ + + cd include + cp -a acpi asm-generic clocksource crypto drm dt-bindings keys kvm linux math-emu media misc net pcmcia ras rdma scsi soc sound target trace uapi video xen %buildroot/lib/modules/${KernelVer}/build/include + + cp -a ../arch/arm64 $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/asm-arm64 + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + ln -s asm-arm64 asm + popd + + # Make sure the Makefile and version.h have a matching timestamp so that + # external modules can be built + touch -r %buildroot/lib/modules/${KernelVer}/build/Makefile %buildroot/lib/modules/${KernelVer}/build/include/generated/uapi/linux/version.h + #touch -r %buildroot/lib/modules/${KernelVer}/build/.config %buildroot/lib/modules/${KernelVer}/build/include/linux/autoconf.h + # Copy .config to include/config/auto.conf so "make prepare" is unnecessary. + cp %buildroot/lib/modules/$KernelVer/build/.config %buildroot/lib/modules/${KernelVer}/build/include/config/auto.conf + cd .. + + + find %buildroot/lib/modules/${KernelVer} -name "*.ko" -type f >modnames + + + mkdir -p %buildroot%debug_path/modules/${KernelVer} + cp -r %buildroot/lib/modules/${KernelVer}/kernel %buildroot%debug_path/modules/${KernelVer} + xargs --no-run-if-empty strip -g < modnames + + xargs --no-run-if-empty chmod u+x < modnames + + # Generate a list of modules for block and networking. + fgrep /drivers/ modnames | xargs --no-run-if-empty nm -upA | + sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + + collect_modules_list() + { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + fi + } + + collect_modules_list networking 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt2x00(pci|usb)_probe|register_netdevice' + collect_modules_list block 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' + collect_modules_list drm 'drm_open|drm_init' + collect_modules_list modesetting 'drm_crtc_init' + # detect missing or incorrect license tags + rm -f modinfo + while read i + do + echo -n "${i#%buildroot/lib/modules/${KernelVer}/} " >> modinfo + /sbin/modinfo -l $i >> modinfo + done < modnames + + egrep -v \ + 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' \ + modinfo && exit 1 + + rm -f modinfo modnames + # remove files that will be auto generated by depmod at rpm -i time + #for i in alias alias.bin ccwmap dep dep.bin ieee1394map inputmap isapnpmap ofmap pcimap seriomap symbols symbols.bin usbmap + #do + # rm -f %buildroot/lib/modules/${KernelVer}/modules.$i + #done + + # Move the devel headers out of the root file system + mkdir -p %buildroot/usr/src/kernels + mv %buildroot/lib/modules/${KernelVer}/build %buildroot/usr/src/kernels/${KernelVer} + ln -sf /usr/src/kernels/${KernelVer} %buildroot/lib/modules/${KernelVer}/build + # + # Generate the kernel-modules-* files lists + # +%if 0%{?rhel} == 8 + mkdir -p %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/boot/$configname %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/boot/System.map-$KernelVer %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/lib/modules/$KernelVer/dist_compat +%endif + + + # Copy the System.map file for depmod to use, and create a backup of the + # full module tree so we can restore it after we're done filtering + cp ${builddir}/System.map $RPM_BUILD_ROOT/. + pushd $RPM_BUILD_ROOT + mkdir restore + + + cp -r lib/modules/$KernelVer/* restore/. + + find lib/modules/$KernelVer/ -not -type d | sort -n > modules.list + find lib/modules/$KernelVer/ -type d | sort -n > module-dirs.list + + # Run depmod on the resulting module tree and make sure it isn't broken + depmod -b . -aeF ./System.map $KernelVer &> depmod.out + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + + # Cleanup + rm System.map + cp -r restore/* lib/modules/$KernelVer/. + rm -rf restore + popd + +%if 0%{?rhel} == 8 + mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat +%endif + + # Make sure the files lists start with absolute paths or rpmbuild fails. + # Also add in the dir entries + sed -e 's/^lib*/%dir \/lib/' $RPM_BUILD_ROOT/module-dirs.list > ../core.list + sed -e 's/^lib*/\/lib/' $RPM_BUILD_ROOT/modules.list >> ../core.list + + # Cleanup + rm -f $RPM_BUILD_ROOT/modules.list + rm -f $RPM_BUILD_ROOT/module-dirs.list + + %if %{with_perf} + # perf tool binary and supporting scripts/binaries + %{perf_make} DESTDIR=$RPM_BUILD_ROOT install + + # perf-python extension + %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + + # perf man pages (note: implicit rpm magic compresses them later) + %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + %endif +done + +mkdir -p %buildroot/etc/sysconfig/modules +install -m755 %SOURCE1 %buildroot/etc/sysconfig/modules + +#### Header +make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install +# Do headers_check but don't die if it fails. +make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_check \ + > hdrwarnings.txt || : +if grep -q exist hdrwarnings.txt; then + sed s:^$RPM_BUILD_ROOT/usr/include/:: hdrwarnings.txt + # Temporarily cause a build failure if header inconsistencies. + # exit 1 +fi + +find $RPM_BUILD_ROOT/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) | xargs rm -f + +# glibc provides scsi headers for itself, for now +rm -rf $RPM_BUILD_ROOT/usr/include/scsi +rm -f $RPM_BUILD_ROOT/usr/include/asm*/atomic.h +rm -f $RPM_BUILD_ROOT/usr/include/asm*/io.h +rm -f $RPM_BUILD_ROOT/usr/include/asm*/irq.h + + +%pre +# pre ######################################################################### +system_arch=`uname -m` + +if [ %{_target_cpu} != ${system_arch} ]; then + echo "ERROR: Failed installing this rpm!!!!" + echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; + exit 1; +fi; + +%post +# post ######################################################################### +echo "Install tlinux kernel" +%if 0%{?rhel} == 8 +kernel-install add %{tagged_name} /lib/modules/%{tagged_name}/vmlinuz +cp -p /lib/modules/%{tagged_name}/dist_compat/config-%{tagged_name}%{?dist} /boot +cp -p /lib/modules/%{tagged_name}/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac /boot +cp -p /lib/modules/%{tagged_name}/dist_compat/System.map-%{tagged_name}%{?dist} /boot +%else +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --package kernel --install %{tagged_name}%{?dist} --make-default|| exit $? +echo -e "Set Grub default to \"%{tagged_name}%{?dist}\" Done." +%endif +%endif + +%preun +# preun ####################################################################### +%if 0%{?rhel} == 8 +kernel-install remove %{tagged_name} +%else +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? +echo -e "Remove \"%{tagged_name}%{?dist}\" Done." +%endif +%endif + +%posttrans +# posttrans #################################################################### +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? +/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? +%endif + +%files -f core.list +# files ######################################################################## +%defattr(-,root,root) +%if 0%{?rhel} == 7 +/boot/vmlinuz-%%{tagged_name}%%{?dist} +/boot/.vmlinuz-%%{tagged_name}%%{?dist}.hmac +#/boot/uImage-%%{tagged_name}%%{?dist} +/boot/System.map-%%{tagged_name}%%{?dist} +/boot/config-%%{tagged_name}%%{?dist} +%endif +/boot/symvers-%{tagged_name}%{?dist}* +/etc/sysconfig/modules/tlinux_cciss_link.modules +#/lib/modules/%%{tagged_name}/vmlinuz +#do not package firmware ,use linux-firmware rpm instead +#/lib/firmware/ + +%files debuginfo-common +%defattr(-,root,root) +/boot/vmlinux-%{tagged_name}%{?dist} +%dir %debug_path +%debug_path/modules +#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* +#/boot/config-%%{version}-%%{title}-%%{release_os}-* + +%files headers +%defattr(-,root,root) +/usr/include/* + +%files devel +%defattr(-,root,root) +/usr/src/kernels/%{tagged_name}%{?dist} + +%if %{with_perf} +%files -n perf +%defattr(-,root,root) +%{_bindir}/* +/usr/lib64/* +/usr/lib/traceevent/plugins/*.so +/usr/lib/perf/examples/bpf/* +/usr/lib/perf/include/bpf/* +/usr/share/* +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_sysconfdir}/bash_completion.d/perf + +%files -n python-perf +%defattr(-,root,root) +%{python_sitearch} + +%if %{with_debuginfo} +%files -f perf-debuginfo.list -n perf-debuginfo +%defattr(-,root,root) + +%files -f python-perf-debuginfo.list -n python-perf-debuginfo +%defattr(-,root,root) +%endif +%endif # with_perf + +# changelog ################################################################### +%changelog +* Mon Mar 11 2019 Jia Wang + - Initial 4.14.99 repository diff --git a/package/arm/perf-tlinux.spec b/package/arm/perf-tlinux.spec new file mode 100644 index 000000000000..049c128c0be4 --- /dev/null +++ b/package/arm/perf-tlinux.spec @@ -0,0 +1,154 @@ +%define sign_server 10.12.65.118 + +%global with_debuginfo 0 +%if 0%{?rhel} == 6 +%global dist .tl1 +%global debug_path /usr/lib/debug/lib/ +%else +%global debug_path /usr/lib/debug/usr/lib/ +%if 0%{?rhel} == 7 +%global dist .tl2 +%endif +%endif + +Summary: Performance monitoring for the Linux kernel +Group: Development/System +Name: %{name} +Version: %{version} +Release: %{release_os}%{?dist} +License: GPLv2 +Vendor: Tencent +Packager: tlinux team +Provides: perf = %{version}-%{release} +Source0: %{name}-%{version}.tar.gz +URL: http://www.tencent.com +ExclusiveArch: aarch64 +Distribution: Tencent Linux +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build +BuildRequires: wget bc module-init-tools curl +BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel python-devel perl(ExtUtils::Embed) bison flex +BuildRequires: xmlto asciidoc +BuildRequires: audit-libs-devel +%ifnarch s390 s390x +BuildRequires: numactl-devel +%endif + +# for the 'hostname' command +%if 0%{?rhel} == 7 +BuildRequires: hostname +%else +BuildRequires: net-tools +%endif + +%description +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + + +%package -n python-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python-perf +The python-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} + + +# prep ######################################################################### +%prep + +%setup -q -c -T -a 0 + +# build ######################################################################## +%build + + +if [ ! -f /etc/tlinux-release ]; then + echo "Error: please build this rpm on tlinux\n" + exit 1 +fi + + +cd %{name}-%{version} +all_types="%{kernel_all_types}" +num_processor=`cat /proc/cpuinfo | grep 'processor' | wc -l` +if [ $num_processor -gt 8 ]; then + num_processor=8 +fi + +type_name=default + +objdir=../%{name}-%{version}-obj-${type_name} +[ -d ${objdir} ] || mkdir ${objdir} +bash scripts/mkmakefile $PWD ${objdir} 2 6 +if [ "$no_sign" != "yes" ]; then +cp ./package/default/config.${type_name} ${objdir}/.config +else +cp ./package/default/config.${type_name}_nosign ${objdir}/.config +fi +localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' +sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config + + +%global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} +# perf +%{perf_make} all +%{perf_make} man + +# install ###################################################################### +%install +cd %{name}-%{version} +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install + +# perf-python extension +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + +# perf man pages (note: implicit rpm magic compresses them later) +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + +%pre +# pre ######################################################################### +system_arch=`uname -m` + +if [ %{_target_cpu} != ${system_arch} ]; then + echo "ERROR: Failed installing this rpm!!!!" + echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; + exit 1; +fi; + +if [ ! -f /etc/tlinux-release -o ! -f /etc/redhat-release ]; then + echo "Error: Cannot install this rpm on non-tlinux OS" + exit 1; +fi + +%post +# post ######################################################################### + + +%postun + +%files +%defattr(-,root,root) +%{_bindir}/perf +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +/usr/bin/trace +/usr/lib/traceevent/plugins/* +/usr/lib/perf +/usr/share/doc/* +/usr/share/perf-core/strace/groups +/usr/share/perf-core/strace/groups + +%files -n python-perf +%defattr(-,root,root) +%{python_sitearch} + + +# changelog ################################################################### +%changelog +* Thu Feb 2 2012 Samuel Liao + - Initial 3.0.18 repository diff --git a/package/arm/repackage/generate-rpms.sh b/package/arm/repackage/generate-rpms.sh new file mode 100755 index 000000000000..69506d3d38a2 --- /dev/null +++ b/package/arm/repackage/generate-rpms.sh @@ -0,0 +1,223 @@ +#!/bin/bash +# ============================================================================== +# Description: Automaticly generating kernel rpm package with specified branch +# according to HEAD tag. +# +# Author: David Shan +# =============================================================================== + +#variables +kernel_full_name="" +kernel_version="" +tlinux_branch="" +tlinux_release="" +curdir=`pwd` + + +build_specdir=`readlink -f ~/rpmbuild/SPECS` +build_srcdir=`readlink -f ~/rpmbuild/SOURCES` +dist="tl2" + +kernel_type="default" +spec_file_name="" +make_jobs="" +tag_name="" +nosign_suffix="" +build_src_only=0 +strip_sign_token=0 +# to control wheter to build rpm for xen domU +isXenU="" +kernel_default_types=(default) + +#funcitons +usage() +{ + echo "generate-rpms [-t \$tag_name ] [-k \$kernel_type ] [-j \$jobs ][-h][-x \$arch]" + echo " -t tag_name" + echo " Tagged name in the git tree." + echo " -k kernel_type" + echo " Kernel type is default, state, container or user-specified type." + echo " If -k parameter is not set, we will compile standard three kernels of default, state and container." + echo " For user-specified type kernel, only compile individually." + echo " -j jobs" + echo " Specifies the number of jobs (threads) to run make." + echo " -x arch" + echo " switch to build rpm for xen domU. arch is 32 or 64." + echo " -d" + echo " disable module digital signature." + + echo -e "\n" + echo " Note:By default, we only generate latest tag kernel for standard kernels" + echo " without -t and -k option." + echo " So, if you hope to get certain tag kernel, please use -t." + echo " Also, only for state type kernel, please use -k option with state value." +} + +get_kernel_version() +{ + local tagged_name + raw_tagged_name=$tag_name + + if [[ $raw_tagged_name != public-arm64-* ]]; then + echo "$raw_tagged_name not valid tag name for public-arm64" + exit 1 + fi + + tagged_name=${raw_tagged_name#public-arm64-} + + kernel_version=`echo $tagged_name|cut -d- -f1` + #kernel_version=${kernel_version}-1 + #echo "kernel version: $kernel_version" + echo "kernel version: ${kernel_version}" +} + +get_tlinux_name() +{ + if [ -n "$tag_name" ]; then + raw_tagged_name="$tag_name" + else + raw_tagged_name=`git describe --tags` + fi + + if [ -z "${raw_tagged_name}" ];then + echo "Error:Can't get kernel version from git tree." + exit 1 + fi + + if [[ $raw_tagged_name != public-arm64-* ]]; then + echo "$raw_tagged_name not valid tag name for public-arm64" + exit 1 + fi + + tagged_name=${raw_tagged_name#public-arm64-} + + + echo "${tagged_name}" | grep 'kvm_guest' + if [ $? -eq 0 ]; then + echo "start kvm guest build" + kvm_guest="yes" + fi + + #if [ "${tagged_name#*-*-*}" != ${tagged_name} ];then + #echo "Error: bad tag name:$tagged_name." + #exit 1 + #fi + + + release=`echo $tagged_name|cut -d- -f2` + + tlinux_branch=`echo $tagged_name|cut -d- -f3` + + if [ $release -eq 19 ]; then + rpm_name="kernel" + tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` + else + rpm_name="kernel"-${tlinux_branch} + tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f4` + fi + + echo "rpm_name $rpm_name" + #tlinux_release=${tagged_name#*-} + kernel_full_name=${rpm_name}-${kernel_version} + echo $kernel_full_name + #exit 1 +} + +prepare_tlinux_spec() +{ + spec_file_name=${build_specdir}/${kernel_full_name}_${dist}.spec + spec_contents="%define name ${rpm_name} \n" + spec_contents+="%define version ${kernel_version} \n" + spec_contents+="%define release_os ${tlinux_release} \n" + spec_contents+="%define title ${tlinux_branch} \n" + spec_contents+="%define tagged_name ${tagged_name} \n" + + echo -e "${spec_contents}" > $spec_file_name + + if [ "$kvm_guest" = "yes" ] ; then + cat $curdir/kernel-tlinux_kvmguest_${dist}.spec >> $spec_file_name + else + cat $curdir/kernel-tlinux_${dist}.spec >> $spec_file_name + #cp $curdir/${kernel_full_name}_${dist}.spec $spec_file_name + fi +} + +prepare_tlinux_bin_sources() +{ + local kernel_tl3_rpm=${rpm_name}-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm + local kernel_debuginfo_tl3_rpm=${rpm_name}-debuginfo-common-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm + local kernel_headers_tl3_rpm=${rpm_name}-headers-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm + local kernel_devel_tl3_rpm=${rpm_name}-devel-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm + + if [ ! -f $kernel_tl3_rpm ]; then + echo "$kernel_tl3_rpm not exist" + exit 1 + fi + + if [ ! -f $kernel_debuginfo_tl3_rpm ]; then + echo "$kernel_debuginfo_tl3_rpm not exist" + exit 1 + fi + + if [ ! -f $kernel_headers_tl3_rpm ]; then + echo "$kernel_headers_tl3_rpm not exist" + exit 1 + fi + + if [ ! -f $kernel_devel_tl3_rpm ]; then + echo "$kernel_devel_tl3_rpm not exist" + exit 1 + fi + + rm -rf ${rpm_name}-${kernel_version} + rm -rf ${rpm_name}-${kernel_version}_bin.tar.gz + mkdir ${rpm_name}-${kernel_version} + pushd . + cd ${rpm_name}-${kernel_version} + rpm2cpio ../$kernel_tl3_rpm | cpio -divm + rpm2cpio ../$kernel_debuginfo_tl3_rpm | cpio -divm + rpm2cpio ../$kernel_headers_tl3_rpm | cpio -divm + rpm2cpio ../$kernel_devel_tl3_rpm | cpio -divm + popd + tar -czvf ${rpm_name}-${kernel_version}_bin.tar.gz ${rpm_name}-${kernel_version} +} + +#main function + +if [ $# -eq 0 ]; then + echo "please specify tag" + exit 1 +fi + +tag_name=$1 + +if [ ! -e "${build_srcdir}" ]; then + mkdir $build_srcdir + [ $? == 1 ] && exit 1 +fi + +if [ ! -e "${build_specdir}" ]; then + mkdir $build_specdir + [ $? == 1 ] && exit 1 +fi + +origsrc=`pwd` + +get_kernel_version $origsrc +get_tlinux_name +prepare_tlinux_bin_sources +echo "Prepare $kernel_full_name source." +prepare_tlinux_spec + + +if test -e ${build_srcdir}/${kernel_full_name}; then + rm -fr ${build_srcdir}/${kernel_full_name} +fi + +rm -rf /var/tmp/${kernel_full_name} + +cp ${kernel_full_name}_bin.tar.gz ${build_srcdir}/ + +echo "Build kernel rpm package." + +rpmbuild -bb ${target} ${spec_file_name} diff --git a/package/arm/repackage/kernel-tlinux_tl2.spec b/package/arm/repackage/kernel-tlinux_tl2.spec new file mode 100644 index 000000000000..1de3e5e399b0 --- /dev/null +++ b/package/arm/repackage/kernel-tlinux_tl2.spec @@ -0,0 +1,231 @@ +%global with_debuginfo 0 +%global with_perf 1 +%if 0%{?rhel} == 6 +%global rdist .tl1 +%global debug_path /usr/lib/debug/lib/ +%else +%global debug_path /usr/lib/debug/usr/lib/ +%if 0%{?rhel} == 7 +%global rdist .tl2 +%global _enable_debug_packages %{nil} +%global debug_package %{nil} +%global __debug_package %{nil} +%global __debug_install_post %{nil} +%global _build_id_links none +%endif +%if 0%{?rhel} == 8 +%global rdist .tl3 +%global __python /usr/bin/python2 +%global _enable_debug_packages %{nil} +%global debug_package %{nil} +%global __debug_package %{nil} +%global __debug_install_post %{nil} +%global _build_id_links none +%endif +%endif + +%global dist %{nil} + +Summary: Kernel for Tencent physical machine +Name: %{name} +Version: %{version} +Release: %{release_os}%{?rdist} +License: GPLv2 +Vendor: Tencent +Packager: tlinux team +Provides: kernel = %{version}-%{release} +Group: System Environment/Kernel +Source0: %{name}-%{version}_bin.tar.gz +Source1: tlinux_cciss_link.modules +URL: http://www.tencent.com +ExclusiveArch: aarch64 +Distribution: Tencent Linux +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build +BuildRequires: wget bc module-init-tools curl +%if %{with_perf} +BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex +BuildRequires: xmlto asciidoc hmaccalc +BuildRequires: audit-libs-devel +#BuildRequires: uboot-tools +%if 0%{?rhel} == 8 +BuildRequires: python2-devel +%else +BuildRequires: python-devel +%endif + + +%ifnarch s390 s390x +BuildRequires: numactl-devel +%endif +%endif +Requires(pre): linux-firmware >= 20100806-2 + +# for the 'hostname' command +%if 0%{?rhel} == 7 +BuildRequires: hostname +%else +BuildRequires: net-tools +%endif + +%description +This package contains tlinux kernel for arm64 Bare-Metal& VM + +%package debuginfo-common +Summary: tlinux kernel vmlinux for crash debug +Release: %{release} +Group: System Environment/Kernel +Provides: kernel-debuginfo-common kernel-debuginfo +AutoReqprov: no + +%description debuginfo-common +This package container vmlinux, System.map and config files for kernel +debugging. + +%package devel +Summary: Development package for building kernel modules to match the %{version}-%{release} kernel +Release: %{release} +Group: System Environment/Kernel +AutoReqprov: no + +%description devel +This package provides kernel headers and makefiles sufficient to build modules +against the %{version}-%{release} kernel package. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders +Provides: glibc-kernheaders = 3.0-46 +AutoReqprov: no + +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + + +# prep ######################################################################### +%prep +%setup -q -c -T -a 0 + +# build ######################################################################## +%build + + +# install ###################################################################### +%install + + +echo install %{version} +arch=`uname -m` +if [ "$arch" != "aarch64" ];then + echo "Unexpected error. Cannot build this rpm in non-aarch64 platform\n" + exit 1 +fi + +cd %{name}-%{version} + +cp -rp * %buildroot/ + +elfname=vmlinuz-%{tagged_name}%{?dist} +mapname=System.map-%{tagged_name}%{?dist} +configname=config-%{tagged_name}%{?dist} + +mkdir -p %buildroot/boot +mv %buildroot/lib/modules/%tagged_name/vmlinuz %buildroot/boot/${elfname} +mv %buildroot/lib/modules/%tagged_name/dist_compat/${mapname} %buildroot/boot/${mapname} +mv %buildroot/lib/modules/%tagged_name/dist_compat/${configname} %buildroot/boot/${configname} +mv %buildroot/lib/modules/%tagged_name/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/boot/ + +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.softdep %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.devname %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.builtin.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias %buildroot/lib/modules/%tagged_name/ + + + +%pre +# pre ######################################################################### +system_arch=`uname -m` + +if [ %{_target_cpu} != ${system_arch} ]; then + echo "ERROR: Failed installing this rpm!!!!" + echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; + exit 1; +fi; + +%post +# post ######################################################################### +echo "Install tlinux kernel" +%if 0%{?rhel} == 8 +kernel-install add %{tagged_name} /lib/modules/%{tagged_name}/vmlinuz +cp -p /lib/modules/%{tagged_name}/dist_compat/config-%{tagged_name}%{?dist} /boot +cp -p /lib/modules/%{tagged_name}/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac /boot +cp -p /lib/modules/%{tagged_name}/dist_compat/System.map-%{tagged_name}%{?dist} /boot +%else +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --package kernel --install %{tagged_name}%{?dist} --make-default|| exit $? +echo -e "Set Grub default to \"%{tagged_name}%{?dist}\" Done." +%endif +%endif + +%preun +# preun ####################################################################### +%if 0%{?rhel} == 8 +kernel-install remove %{tagged_name} +%else +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? +echo -e "Remove \"%{tagged_name}%{?dist}\" Done." +%endif +%endif + +%posttrans +# posttrans #################################################################### +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? +/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? +%endif + +%files +# files ######################################################################## +%defattr(-,root,root) +/boot/vmlinuz-%%{tagged_name}%%{?dist} +/boot/.vmlinuz-%%{tagged_name}%%{?dist}.hmac +#/boot/uImage-%%{tagged_name}%%{?dist} +/boot/System.map-%%{tagged_name}%%{?dist} +/boot/config-%%{tagged_name}%%{?dist} +/boot/symvers-%{tagged_name}%{?dist}* +/etc/sysconfig/modules/tlinux_cciss_link.modules +/lib/modules/%%{tagged_name}/* +#do not package firmware ,use linux-firmware rpm instead +#/lib/firmware/ + +%files debuginfo-common +%defattr(-,root,root) +/boot/vmlinux-%{tagged_name}%{?dist} +%dir %debug_path +%debug_path/modules +#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* +#/boot/config-%%{version}-%%{title}-%%{release_os}-* + +%files headers +%defattr(-,root,root) +/usr/include/* + +%files devel +%defattr(-,root,root) +/usr/src/kernels/%{tagged_name}%{?dist} + +# changelog ################################################################### +%changelog +* Mon Mar 11 2019 Jia Wang + - Initial 4.14.99 repository diff --git a/package/arm/tlinux_cciss_link.modules b/package/arm/tlinux_cciss_link.modules new file mode 100644 index 000000000000..8d15da12d851 --- /dev/null +++ b/package/arm/tlinux_cciss_link.modules @@ -0,0 +1,16 @@ +#!/bin/sh + +if [ -b /dev/sda -a ! -b /dev/cciss/c0d0 ]; then + mkdir -p /dev/cciss + set -- 0 a 1 b 2 c 3 d 4 e 5 f 6 g 7 h 8 i 9 j 10 k 11 l 12 m 13 n 14 o 15 p + while [ $# -ge 2 ]; do + c=/dev/cciss/c0d$1; shift + s=/dev/sd$1; shift + ln -s ${s} ${c} + ln -s ${s}1 ${c}p1 + ln -s ${s}2 ${c}p2 + ln -s ${s}3 ${c}p3 + ln -s ${s}4 ${c}p4 + done +fi + diff --git a/package/default/config.default b/package/default/config.default new file mode 100644 index 000000000000..c592bfa82ed4 --- /dev/null +++ b/package/default/config.default @@ -0,0 +1,5077 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 5.4.32-1 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80301 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-tlinux4-0001" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=19 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +CONFIG_RD_LZMA=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +CONFIG_SLUB_MEMCG_SYSFS_ON=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_SLAB_MERGE_DEFAULT is not set +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=5 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +# CONFIG_RETPOLINE is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +CONFIG_X86_NUMACHIP=y +# CONFIG_X86_VSMP is not set +# CONFIG_X86_UV is not set +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_LPSS is not set +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=m +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_XEN is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +CONFIG_PVH=y +# CONFIG_KVM_DEBUG_FS is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +CONFIG_JAILHOUSE_GUEST=y +# CONFIG_ACRN_GUEST is not set +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +# CONFIG_PROCESSOR_SELECT is not set +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_GART_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set +CONFIG_MAXSMP=y +CONFIG_NR_CPUS_RANGE_BEGIN=8192 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=8192 +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +CONFIG_PERF_EVENTS_AMD_POWER=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +# CONFIG_I8K is not set +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_X86_5LEVEL=y +CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +CONFIG_X86_INTEL_MPX=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_TSX_MODE_OFF=y +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_MIXED is not set +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +# CONFIG_KEXEC_SIG is not set +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +CONFIG_COMPAT_VDSO=y +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_XONLY is not set +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +# CONFIG_ACPI_AC is not set +# CONFIG_ACPI_BATTERY is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NFIT=y +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +# CONFIG_DPTF_POWER is not set +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_PCC_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +CONFIG_X86_SPEEDSTEP_CENTRINO=m +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_MMCONF_FAM10H=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +# CONFIG_ISA_BUS is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +# end of Binary Emulations + +CONFIG_X86_DEV_DMA_OPS=y + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_RCI2_TABLE is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_EARLYCON=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +# CONFIG_KVM_MMU_AUDIT is not set +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_OPROFILE=m +CONFIG_OPROFILE_EVENT_MULTIPLEX=y +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +# CONFIG_JUMP_LABEL is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DEVICE=y +CONFIG_DEV_PAGEMAP_OPS=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +# CONFIG_TLS_DEVICE is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set +CONFIG_TOA=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_NETLABEL is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +# CONFIG_NF_CT_PROTO_DCCP is not set +CONFIG_NF_CT_PROTO_GRE=y +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=m +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +# CONFIG_IP_VS_PROTO_SCTP is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +# CONFIG_IP6_NF_SECURITY is not set +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +# CONFIG_IP6_NF_TARGET_NPT is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_MRP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=m +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support + +CONFIG_VMD=y + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_MESON is not set +# end of DesignWare PCI Core Support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Bus devices +# +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_BLK_DEV_FD=m +CONFIG_CDROM=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=y +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +CONFIG_BLK_DEV_SX8=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# CONFIG_NVME_TARGET is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_HP_ILO=m +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_HDCP is not set +CONFIG_VMWARE_VMCI=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=y +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=y +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=y +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_ACARD=y +CONFIG_SCSI_AACRAID=y +CONFIG_SCSI_AIC7XXX=y +CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC79XX=y +CONFIG_AIC79XX_CMDS_PER_DEVICE=4 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +CONFIG_AIC79XX_DEBUG_MASK=0 +# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set +CONFIG_SCSI_AIC94XX=y +CONFIG_AIC94XX_DEBUG=y +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +CONFIG_SCSI_ARCMSR=y +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_SMARTPQI is not set +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PCI=y +# CONFIG_SCSI_UFS_DWC_TC_PCI is not set +# CONFIG_SCSI_UFSHCD_PLATFORM is not set +# CONFIG_SCSI_UFS_BSG is not set +CONFIG_SCSI_HPTIOP=y +CONFIG_SCSI_BUSLOGIC=y +# CONFIG_SCSI_FLASHPOINT is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +CONFIG_LIBFC=y +CONFIG_LIBFCOE=y +CONFIG_FCOE=y +CONFIG_FCOE_FNIC=y +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_GDTH=y +CONFIG_SCSI_ISCI=y +CONFIG_SCSI_IPS=y +CONFIG_SCSI_INITIO=y +# CONFIG_SCSI_INIA100 is not set +CONFIG_SCSI_STEX=y +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=y +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=y +CONFIG_SCSI_LPFC=y +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +CONFIG_SCSI_PMCRAID=y +CONFIG_SCSI_PM8001=y +CONFIG_SCSI_BFA_FC=y +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_CHELSIO_FCOE is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_SATA_INIC162X is not set +CONFIG_SATA_ACARD_AHCI=y +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=y +CONFIG_SATA_QSTOR=y +CONFIG_SATA_SX4=y +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +CONFIG_SATA_MV=y +CONFIG_SATA_NV=y +CONFIG_SATA_PROMISE=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_SVW=y +CONFIG_SATA_ULI=y +CONFIG_SATA_VIA=y +CONFIG_SATA_VITESSE=y + +# +# PATA SFF controllers with BMDMA +# +CONFIG_PATA_ALI=y +CONFIG_PATA_AMD=y +CONFIG_PATA_ARTOP=y +CONFIG_PATA_ATIIXP=y +CONFIG_PATA_ATP867X=y +CONFIG_PATA_CMD64X=y +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +CONFIG_PATA_HPT366=y +CONFIG_PATA_HPT37X=y +CONFIG_PATA_HPT3X2N=y +CONFIG_PATA_HPT3X3=y +# CONFIG_PATA_HPT3X3_DMA is not set +CONFIG_PATA_IT8213=y +CONFIG_PATA_IT821X=y +CONFIG_PATA_JMICRON=y +CONFIG_PATA_MARVELL=y +CONFIG_PATA_NETCELL=y +CONFIG_PATA_NINJA32=y +# CONFIG_PATA_NS87415 is not set +CONFIG_PATA_OLDPIIX=y +# CONFIG_PATA_OPTIDMA is not set +CONFIG_PATA_PDC2027X=y +CONFIG_PATA_PDC_OLD=y +# CONFIG_PATA_RADISYS is not set +CONFIG_PATA_RDC=y +CONFIG_PATA_SCH=y +CONFIG_PATA_SERVERWORKS=y +CONFIG_PATA_SIL680=y +CONFIG_PATA_SIS=y +CONFIG_PATA_TOSHIBA=y +# CONFIG_PATA_TRIFLEX is not set +CONFIG_PATA_VIA=y +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_LEGACY=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +# CONFIG_TCM_USER2 is not set +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +# CONFIG_ISCSI_TARGET_CXGB4 is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=y +CONFIG_FUSION_FC=y +CONFIG_FUSION_SAS=y +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=y +# CONFIG_FUSION_LAN is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NLMON is not set +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_BNA=m +CONFIG_NET_VENDOR_CADENCE=y +CONFIG_MACB=m +CONFIG_MACB_USE_HWSTAMP=y +# CONFIG_MACB_PCI is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +# CONFIG_CAVIUM_PTP is not set +CONFIG_LIQUIDIO=m +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_CX_ECAT is not set +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +# CONFIG_TULIP_MWI is not set +CONFIG_TULIP_MMIO=y +# CONFIG_TULIP_NAPI is not set +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_HINIC is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +CONFIG_JME=m +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +# CONFIG_MLX5_FPGA is not set +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_NETXEN_NIC=m +# CONFIG_QED is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCOM_EMAC is not set +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_EPIC100=m +# CONFIG_SMSC911X is not set +CONFIG_SMSC9420=m +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=y +CONFIG_SWPHY=y + +# +# MII PHY device drivers +# +# CONFIG_ADIN_PHY is not set +CONFIG_AMD_PHY=m +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_TJA11XX_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +# CONFIG_TERANETICS_PHY is not set +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_HWSIM is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=y +CONFIG_KEYBOARD_XTKBD=y +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_PCSPKR is not set +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=y +CONFIG_SERIO_ALTERA_PS2=y +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=y +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# end of Serial drivers + +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_NVRAM=y +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +CONFIG_HPET_MMAP_DEFAULT=y +CONFIG_HANGCHECK_TIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_CRB is not set +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +CONFIG_TELCLOCK=m +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +# CONFIG_I2C_HELPER_AUTO is not set +# CONFIG_I2C_SMBUS is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_ISMT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=m +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=m +# CONFIG_DP83640_PHY is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# end of PTP clock support + +CONFIG_PINCTRL=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_BAYTRAIL is not set +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_BROXTON is not set +# CONFIG_PINCTRL_CANNONLAKE is not set +# CONFIG_PINCTRL_CEDARFORK is not set +# CONFIG_PINCTRL_DENVERTON is not set +# CONFIG_PINCTRL_GEMINILAKE is not set +# CONFIG_PINCTRL_ICELAKE is not set +# CONFIG_PINCTRL_LEWISBURG is not set +# CONFIG_PINCTRL_SUNRISEPOINT is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_DELL_SMM is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_I5500 is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +# CONFIG_INTEL_POWERCLAMP is not set +CONFIG_X86_PKG_TEMP_THERMAL=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_INT340X_THERMAL is not set +# end of ACPI INT340X thermal drivers + +# CONFIG_INTEL_PCH_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_LPSS_PCI is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_IR_IMON_DECODER is not set +# CONFIG_IR_RCMM_DECODER is not set +# CONFIG_RC_DEVICES is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_AGP=y +# CONFIG_AGP_AMD64 is not set +CONFIG_AGP_INTEL=y +# CONFIG_AGP_SIS is not set +# CONFIG_AGP_VIA is not set +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +# CONFIG_VGA_SWITCHEROO is not set +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +# CONFIG_DRM_RADEON is not set +# CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE is not set +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# end of ACP (Audio CoProcessor) Configuration + +# CONFIG_DRM_NOUVEAU is not set +CONFIG_DRM_I915=m +# CONFIG_DRM_I915_ALPHA_SUPPORT is not set +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +# CONFIG_DRM_I915_GVT is not set + +# +# drm/i915 Debugging +# +# CONFIG_DRM_I915_WERROR is not set +# CONFIG_DRM_I915_DEBUG is not set +# CONFIG_DRM_I915_DEBUG_MMIO is not set +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set +# CONFIG_DRM_I915_DEBUG_GUC is not set +# CONFIG_DRM_I915_SELFTEST is not set +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set +# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set +# end of drm/i915 Debugging + +# +# drm/i915 Profile Guided Optimisation +# +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_SPIN_REQUEST=5 +# end of drm/i915 Profile Guided Optimisation + +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +# CONFIG_DRM_VMWGFX is not set +# CONFIG_DRM_GMA500 is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_AST is not set +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +# CONFIG_DRM_VIRTIO_GPU is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_VESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_INTEL is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_PLATFORM is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=m +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_APPLE is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +# CONFIG_HID_BETOP_FF is not set +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +CONFIG_USB_HIDDEV=y +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +# end of Intel ISH HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_ULPI_BUS is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_ARCH_PHYTIUM_1500A_USB_RESET_PROBE_REMOVE is not set +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_MTHCA_DEBUG=y +# CONFIG_INFINIBAND_CXGB3 is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_I40IW is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_MLX5_INFINIBAND is not set +CONFIG_INFINIBAND_OCRDMA=m +# CONFIG_INFINIBAND_USNIC is not set +# CONFIG_INFINIBAND_BNXT_RE is not set +# CONFIG_INFINIBAND_RDMAVT is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +# CONFIG_INFINIBAND_IPOIB_CM is not set +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_OPA_VNIC is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=m +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=y +CONFIG_INTEL_IOATDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_DW_EDMA_PCIE is not set +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_SELFTESTS is not set +# end of DMABUF options + +CONFIG_DCA=y +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI_IGD=y +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VBOXGUEST=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_SM750 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# end of Speakup console speech + +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ASHMEM=y +# CONFIG_ANDROID_VSOC is not set +# CONFIG_ION is not set +# end of Android + +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_MOST is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# end of Gasket devices + +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set +# CONFIG_USB_WUSB_CBAF is not set +# CONFIG_UWB is not set +# CONFIG_EXFAT_FS is not set +CONFIG_QLGE=m +# CONFIG_X86_PLATFORM_DEVICES is not set +CONFIG_PMC_ATOM=y +# CONFIG_MFD_CROS_EC is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# end of Common Clock Framework + +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y +# CONFIG_SMMU_BYPASS_DEV is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_RAS_CEC is not set +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +# CONFIG_ANDROID_BINDERFS is not set +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_BLK=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_DEV_DAX_PMEM_COMPAT=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_PM_OPP=y +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=m +CONFIG_AUTOFS_FS=m +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +# CONFIG_SQUASHFS_ZLIB is not set +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=m +CONFIG_PSTORE_LZO_COMPRESS=m +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +# CONFIG_NFSD_V4_SECURITY_LABEL is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +# CONFIG_CIFS_WEAK_PW_HASH is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CIFS_FSCACHE=y +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set +# CONFIG_UNICODE is not set +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_TRUSTED_KEYS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +# CONFIG_SECURITY_INFINIBAND is not set +# CONFIG_SECURITY_NETWORK_XFRM is not set +CONFIG_SECURITY_PATH=y +# CONFIG_INTEL_TXT is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_SELINUX is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_INTEGRITY is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_GLUE_HELPER_X86=m +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# CONFIG_CRYPTO_KEYWRAP is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +# CONFIG_CRYPTO_XXHASH is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_SHA256_SSSE3=m +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_NI_INTEL=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_DES=m +# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CHELSIO is not set +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_CORDIC=m +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_IOMMU_HELPER=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_BTF is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +# end of Memory Debugging + +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# end of Debug Lockups and Hangs + +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_LIVEPATCH is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +# CONFIG_UNWINDER_ORC is not set +CONFIG_UNWINDER_FRAME_POINTER=y +# CONFIG_UNWINDER_GUESS is not set +# end of Kernel hacking diff --git a/package/default/config.default_kasan b/package/default/config.default_kasan new file mode 100644 index 000000000000..de4d63d7ab42 --- /dev/null +++ b/package/default/config.default_kasan @@ -0,0 +1,5052 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 5.4.32-1 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80301 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-tlinux4-0001" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=19 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +CONFIG_RD_LZMA=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +CONFIG_SLUB_MEMCG_SYSFS_ON=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_SLAB_MERGE_DEFAULT is not set +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=5 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +# CONFIG_RETPOLINE is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +CONFIG_X86_NUMACHIP=y +# CONFIG_X86_VSMP is not set +# CONFIG_X86_UV is not set +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_LPSS is not set +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=m +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_XEN is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +CONFIG_PVH=y +# CONFIG_KVM_DEBUG_FS is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +CONFIG_JAILHOUSE_GUEST=y +# CONFIG_ACRN_GUEST is not set +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +# CONFIG_PROCESSOR_SELECT is not set +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_GART_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set +CONFIG_MAXSMP=y +CONFIG_NR_CPUS_RANGE_BEGIN=8192 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=8192 +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +CONFIG_PERF_EVENTS_AMD_POWER=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +# CONFIG_I8K is not set +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_X86_5LEVEL=y +CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +CONFIG_X86_INTEL_MPX=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_TSX_MODE_OFF=y +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_MIXED is not set +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +# CONFIG_KEXEC_SIG is not set +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +CONFIG_COMPAT_VDSO=y +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_XONLY is not set +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +# CONFIG_ACPI_AC is not set +# CONFIG_ACPI_BATTERY is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NFIT=y +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +# CONFIG_DPTF_POWER is not set +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_PCC_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +CONFIG_X86_SPEEDSTEP_CENTRINO=m +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_MMCONF_FAM10H=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +# CONFIG_ISA_BUS is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +# end of Binary Emulations + +CONFIG_X86_DEV_DMA_OPS=y + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_RCI2_TABLE is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_EARLYCON=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +# CONFIG_KVM_MMU_AUDIT is not set +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_OPROFILE=m +CONFIG_OPROFILE_EVENT_MULTIPLEX=y +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +# CONFIG_JUMP_LABEL is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DEVICE=y +CONFIG_DEV_PAGEMAP_OPS=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +# CONFIG_TLS_DEVICE is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set +CONFIG_TOA=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +# CONFIG_NF_CT_PROTO_DCCP is not set +CONFIG_NF_CT_PROTO_GRE=y +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=m +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +# CONFIG_IP_VS_PROTO_SCTP is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +# CONFIG_IP6_NF_TARGET_NPT is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_MRP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=m +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support + +CONFIG_VMD=y + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_MESON is not set +# end of DesignWare PCI Core Support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Bus devices +# +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_BLK_DEV_FD=m +CONFIG_CDROM=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=y +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +CONFIG_BLK_DEV_SX8=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# CONFIG_NVME_TARGET is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_HP_ILO=m +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_HDCP is not set +CONFIG_VMWARE_VMCI=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=y +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=y +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=y +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_ACARD=y +CONFIG_SCSI_AACRAID=y +CONFIG_SCSI_AIC7XXX=y +CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC79XX=y +CONFIG_AIC79XX_CMDS_PER_DEVICE=4 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +CONFIG_AIC79XX_DEBUG_MASK=0 +# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set +CONFIG_SCSI_AIC94XX=y +CONFIG_AIC94XX_DEBUG=y +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +CONFIG_SCSI_ARCMSR=y +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_SMARTPQI is not set +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PCI=y +# CONFIG_SCSI_UFS_DWC_TC_PCI is not set +# CONFIG_SCSI_UFSHCD_PLATFORM is not set +# CONFIG_SCSI_UFS_BSG is not set +CONFIG_SCSI_HPTIOP=y +CONFIG_SCSI_BUSLOGIC=y +# CONFIG_SCSI_FLASHPOINT is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +CONFIG_LIBFC=y +CONFIG_LIBFCOE=y +CONFIG_FCOE=y +CONFIG_FCOE_FNIC=y +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_GDTH=y +CONFIG_SCSI_ISCI=y +CONFIG_SCSI_IPS=y +CONFIG_SCSI_INITIO=y +# CONFIG_SCSI_INIA100 is not set +CONFIG_SCSI_STEX=y +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=y +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=y +CONFIG_SCSI_LPFC=y +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +CONFIG_SCSI_PMCRAID=y +CONFIG_SCSI_PM8001=y +CONFIG_SCSI_BFA_FC=y +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_CHELSIO_FCOE is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_SATA_INIC162X is not set +CONFIG_SATA_ACARD_AHCI=y +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=y +CONFIG_SATA_QSTOR=y +CONFIG_SATA_SX4=y +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +CONFIG_SATA_MV=y +CONFIG_SATA_NV=y +CONFIG_SATA_PROMISE=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_SVW=y +CONFIG_SATA_ULI=y +CONFIG_SATA_VIA=y +CONFIG_SATA_VITESSE=y + +# +# PATA SFF controllers with BMDMA +# +CONFIG_PATA_ALI=y +CONFIG_PATA_AMD=y +CONFIG_PATA_ARTOP=y +CONFIG_PATA_ATIIXP=y +CONFIG_PATA_ATP867X=y +CONFIG_PATA_CMD64X=y +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +CONFIG_PATA_HPT366=y +CONFIG_PATA_HPT37X=y +CONFIG_PATA_HPT3X2N=y +CONFIG_PATA_HPT3X3=y +# CONFIG_PATA_HPT3X3_DMA is not set +CONFIG_PATA_IT8213=y +CONFIG_PATA_IT821X=y +CONFIG_PATA_JMICRON=y +CONFIG_PATA_MARVELL=y +CONFIG_PATA_NETCELL=y +CONFIG_PATA_NINJA32=y +# CONFIG_PATA_NS87415 is not set +CONFIG_PATA_OLDPIIX=y +# CONFIG_PATA_OPTIDMA is not set +CONFIG_PATA_PDC2027X=y +CONFIG_PATA_PDC_OLD=y +# CONFIG_PATA_RADISYS is not set +CONFIG_PATA_RDC=y +CONFIG_PATA_SCH=y +CONFIG_PATA_SERVERWORKS=y +CONFIG_PATA_SIL680=y +CONFIG_PATA_SIS=y +CONFIG_PATA_TOSHIBA=y +# CONFIG_PATA_TRIFLEX is not set +CONFIG_PATA_VIA=y +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_LEGACY=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +# CONFIG_TCM_USER2 is not set +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +# CONFIG_ISCSI_TARGET_CXGB4 is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=y +CONFIG_FUSION_FC=y +CONFIG_FUSION_SAS=y +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=y +# CONFIG_FUSION_LAN is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NLMON is not set +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_BNA=m +CONFIG_NET_VENDOR_CADENCE=y +CONFIG_MACB=m +CONFIG_MACB_USE_HWSTAMP=y +# CONFIG_MACB_PCI is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +# CONFIG_CAVIUM_PTP is not set +CONFIG_LIQUIDIO=m +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_CX_ECAT is not set +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +# CONFIG_TULIP_MWI is not set +CONFIG_TULIP_MMIO=y +# CONFIG_TULIP_NAPI is not set +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_HINIC is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +CONFIG_JME=m +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +# CONFIG_MLX5_FPGA is not set +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_NETXEN_NIC=m +# CONFIG_QED is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCOM_EMAC is not set +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_EPIC100=m +# CONFIG_SMSC911X is not set +CONFIG_SMSC9420=m +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=y +CONFIG_SWPHY=y + +# +# MII PHY device drivers +# +# CONFIG_ADIN_PHY is not set +CONFIG_AMD_PHY=m +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_TJA11XX_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +# CONFIG_TERANETICS_PHY is not set +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_HWSIM is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=y +CONFIG_KEYBOARD_XTKBD=y +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_PCSPKR is not set +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=y +CONFIG_SERIO_ALTERA_PS2=y +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=y +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# end of Serial drivers + +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_NVRAM=y +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +CONFIG_HPET_MMAP_DEFAULT=y +CONFIG_HANGCHECK_TIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_CRB is not set +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +CONFIG_TELCLOCK=m +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +# CONFIG_I2C_HELPER_AUTO is not set +# CONFIG_I2C_SMBUS is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_ISMT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=m +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=m +# CONFIG_DP83640_PHY is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# end of PTP clock support + +CONFIG_PINCTRL=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_BAYTRAIL is not set +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_BROXTON is not set +# CONFIG_PINCTRL_CANNONLAKE is not set +# CONFIG_PINCTRL_CEDARFORK is not set +# CONFIG_PINCTRL_DENVERTON is not set +# CONFIG_PINCTRL_GEMINILAKE is not set +# CONFIG_PINCTRL_ICELAKE is not set +# CONFIG_PINCTRL_LEWISBURG is not set +# CONFIG_PINCTRL_SUNRISEPOINT is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_DELL_SMM is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_I5500 is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +# CONFIG_INTEL_POWERCLAMP is not set +CONFIG_X86_PKG_TEMP_THERMAL=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_INT340X_THERMAL is not set +# end of ACPI INT340X thermal drivers + +# CONFIG_INTEL_PCH_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_LPSS_PCI is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_IR_IMON_DECODER is not set +# CONFIG_IR_RCMM_DECODER is not set +# CONFIG_RC_DEVICES is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_AGP=y +# CONFIG_AGP_AMD64 is not set +CONFIG_AGP_INTEL=y +# CONFIG_AGP_SIS is not set +# CONFIG_AGP_VIA is not set +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +# CONFIG_VGA_SWITCHEROO is not set +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +# CONFIG_DRM_RADEON is not set +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# end of ACP (Audio CoProcessor) Configuration + +# CONFIG_DRM_NOUVEAU is not set +CONFIG_DRM_I915=m +# CONFIG_DRM_I915_ALPHA_SUPPORT is not set +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +# CONFIG_DRM_I915_GVT is not set + +# +# drm/i915 Debugging +# +# CONFIG_DRM_I915_WERROR is not set +# CONFIG_DRM_I915_DEBUG is not set +# CONFIG_DRM_I915_DEBUG_MMIO is not set +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set +# CONFIG_DRM_I915_DEBUG_GUC is not set +# CONFIG_DRM_I915_SELFTEST is not set +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set +# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set +# end of drm/i915 Debugging + +# +# drm/i915 Profile Guided Optimisation +# +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_SPIN_REQUEST=5 +# end of drm/i915 Profile Guided Optimisation + +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +# CONFIG_DRM_VMWGFX is not set +# CONFIG_DRM_GMA500 is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_AST is not set +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +# CONFIG_DRM_VIRTIO_GPU is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_VESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_INTEL is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_PLATFORM is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=m +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_APPLE is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +# CONFIG_HID_BETOP_FF is not set +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +CONFIG_USB_HIDDEV=y +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +# end of Intel ISH HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_ULPI_BUS is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_MTHCA_DEBUG=y +# CONFIG_INFINIBAND_CXGB3 is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_I40IW is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_MLX5_INFINIBAND is not set +CONFIG_INFINIBAND_OCRDMA=m +# CONFIG_INFINIBAND_USNIC is not set +# CONFIG_INFINIBAND_BNXT_RE is not set +# CONFIG_INFINIBAND_RDMAVT is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +# CONFIG_INFINIBAND_IPOIB_CM is not set +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_OPA_VNIC is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=m +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=y +CONFIG_INTEL_IOATDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_DW_EDMA_PCIE is not set +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_SELFTESTS is not set +# end of DMABUF options + +CONFIG_DCA=y +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI_IGD=y +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VBOXGUEST=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_SM750 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# end of Speakup console speech + +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ASHMEM=y +# CONFIG_ANDROID_VSOC is not set +# CONFIG_ION is not set +# end of Android + +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_MOST is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# end of Gasket devices + +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set +# CONFIG_USB_WUSB_CBAF is not set +# CONFIG_UWB is not set +# CONFIG_EXFAT_FS is not set +CONFIG_QLGE=m +# CONFIG_X86_PLATFORM_DEVICES is not set +CONFIG_PMC_ATOM=y +# CONFIG_MFD_CROS_EC is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# end of Common Clock Framework + +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y +# CONFIG_SMMU_BYPASS_DEV is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_RAS_CEC is not set +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +# CONFIG_ANDROID_BINDERFS is not set +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_BLK=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_DEV_DAX_PMEM_COMPAT=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_PM_OPP=y +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=m +CONFIG_AUTOFS_FS=m +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +CONFIG_OVERLAY_FS_METACOPY=y + +# +# Caches +# +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +# CONFIG_SQUASHFS_ZLIB is not set +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=m +CONFIG_PSTORE_LZO_COMPRESS=m +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +# CONFIG_CIFS_WEAK_PW_HASH is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CIFS_FSCACHE=y +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set +# CONFIG_UNICODE is not set +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_TRUSTED_KEYS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +CONFIG_SECURITYFS=y +CONFIG_PAGE_TABLE_ISOLATION=y +# CONFIG_INTEL_TXT is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_GLUE_HELPER_X86=m +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# CONFIG_CRYPTO_KEYWRAP is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +# CONFIG_CRYPTO_XXHASH is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_SHA256_SSSE3=m +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_NI_INTEL=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_DES=m +# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CHELSIO is not set +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_CORDIC=m +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_IOMMU_HELPER=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_BTF is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_KASAN=y +CONFIG_KASAN_GENERIC=y +CONFIG_KASAN_OUTLINE=y +# CONFIG_KASAN_INLINE is not set +CONFIG_KASAN_STACK=1 +# CONFIG_TEST_KASAN is not set +# end of Memory Debugging + +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# end of Debug Lockups and Hangs + +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_LIVEPATCH is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +# CONFIG_UNWINDER_ORC is not set +CONFIG_UNWINDER_FRAME_POINTER=y +# end of Kernel hacking diff --git a/package/default/config.default_nosign b/package/default/config.default_nosign new file mode 100644 index 000000000000..f58f31116dec --- /dev/null +++ b/package/default/config.default_nosign @@ -0,0 +1,5047 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 5.4.32-1 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80301 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-tlinux4-0001" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=19 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +CONFIG_RD_LZMA=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +CONFIG_SLUB_MEMCG_SYSFS_ON=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_SLAB_MERGE_DEFAULT is not set +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=5 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +# CONFIG_RETPOLINE is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +CONFIG_X86_NUMACHIP=y +# CONFIG_X86_VSMP is not set +# CONFIG_X86_UV is not set +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_LPSS is not set +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=m +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_XEN is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +CONFIG_PVH=y +# CONFIG_KVM_DEBUG_FS is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +CONFIG_JAILHOUSE_GUEST=y +# CONFIG_ACRN_GUEST is not set +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +# CONFIG_PROCESSOR_SELECT is not set +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_GART_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set +CONFIG_MAXSMP=y +CONFIG_NR_CPUS_RANGE_BEGIN=8192 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=8192 +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +CONFIG_PERF_EVENTS_AMD_POWER=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +# CONFIG_I8K is not set +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_X86_5LEVEL=y +CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +CONFIG_X86_INTEL_MPX=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_TSX_MODE_OFF=y +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_MIXED is not set +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +# CONFIG_KEXEC_SIG is not set +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +CONFIG_COMPAT_VDSO=y +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_XONLY is not set +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +# CONFIG_ACPI_AC is not set +# CONFIG_ACPI_BATTERY is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NFIT=y +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +# CONFIG_DPTF_POWER is not set +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_PCC_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +CONFIG_X86_SPEEDSTEP_CENTRINO=m +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_MMCONF_FAM10H=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +# CONFIG_ISA_BUS is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +# end of Binary Emulations + +CONFIG_X86_DEV_DMA_OPS=y + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_RCI2_TABLE is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_EARLYCON=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +# CONFIG_KVM_MMU_AUDIT is not set +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_OPROFILE=m +CONFIG_OPROFILE_EVENT_MULTIPLEX=y +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +# CONFIG_JUMP_LABEL is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DEVICE=y +CONFIG_DEV_PAGEMAP_OPS=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +# CONFIG_TLS_DEVICE is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set +CONFIG_TOA=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +# CONFIG_NF_CT_PROTO_DCCP is not set +CONFIG_NF_CT_PROTO_GRE=y +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=m +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +# CONFIG_IP_VS_PROTO_SCTP is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +# CONFIG_IP6_NF_TARGET_NPT is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_MRP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=m +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support + +CONFIG_VMD=y + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_MESON is not set +# end of DesignWare PCI Core Support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Bus devices +# +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_BLK_DEV_FD=m +CONFIG_CDROM=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=y +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +CONFIG_BLK_DEV_SX8=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# CONFIG_NVME_TARGET is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_HP_ILO=m +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_HDCP is not set +CONFIG_VMWARE_VMCI=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=y +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=y +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=y +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_ACARD=y +CONFIG_SCSI_AACRAID=y +CONFIG_SCSI_AIC7XXX=y +CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC79XX=y +CONFIG_AIC79XX_CMDS_PER_DEVICE=4 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +CONFIG_AIC79XX_DEBUG_MASK=0 +# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set +CONFIG_SCSI_AIC94XX=y +CONFIG_AIC94XX_DEBUG=y +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +CONFIG_SCSI_ARCMSR=y +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_SMARTPQI is not set +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PCI=y +# CONFIG_SCSI_UFS_DWC_TC_PCI is not set +# CONFIG_SCSI_UFSHCD_PLATFORM is not set +# CONFIG_SCSI_UFS_BSG is not set +CONFIG_SCSI_HPTIOP=y +CONFIG_SCSI_BUSLOGIC=y +# CONFIG_SCSI_FLASHPOINT is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +CONFIG_LIBFC=y +CONFIG_LIBFCOE=y +CONFIG_FCOE=y +CONFIG_FCOE_FNIC=y +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_GDTH=y +CONFIG_SCSI_ISCI=y +CONFIG_SCSI_IPS=y +CONFIG_SCSI_INITIO=y +# CONFIG_SCSI_INIA100 is not set +CONFIG_SCSI_STEX=y +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=y +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=y +CONFIG_SCSI_LPFC=y +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +CONFIG_SCSI_PMCRAID=y +CONFIG_SCSI_PM8001=y +CONFIG_SCSI_BFA_FC=y +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_CHELSIO_FCOE is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_SATA_INIC162X is not set +CONFIG_SATA_ACARD_AHCI=y +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=y +CONFIG_SATA_QSTOR=y +CONFIG_SATA_SX4=y +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +CONFIG_SATA_MV=y +CONFIG_SATA_NV=y +CONFIG_SATA_PROMISE=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_SVW=y +CONFIG_SATA_ULI=y +CONFIG_SATA_VIA=y +CONFIG_SATA_VITESSE=y + +# +# PATA SFF controllers with BMDMA +# +CONFIG_PATA_ALI=y +CONFIG_PATA_AMD=y +CONFIG_PATA_ARTOP=y +CONFIG_PATA_ATIIXP=y +CONFIG_PATA_ATP867X=y +CONFIG_PATA_CMD64X=y +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +CONFIG_PATA_HPT366=y +CONFIG_PATA_HPT37X=y +CONFIG_PATA_HPT3X2N=y +CONFIG_PATA_HPT3X3=y +# CONFIG_PATA_HPT3X3_DMA is not set +CONFIG_PATA_IT8213=y +CONFIG_PATA_IT821X=y +CONFIG_PATA_JMICRON=y +CONFIG_PATA_MARVELL=y +CONFIG_PATA_NETCELL=y +CONFIG_PATA_NINJA32=y +# CONFIG_PATA_NS87415 is not set +CONFIG_PATA_OLDPIIX=y +# CONFIG_PATA_OPTIDMA is not set +CONFIG_PATA_PDC2027X=y +CONFIG_PATA_PDC_OLD=y +# CONFIG_PATA_RADISYS is not set +CONFIG_PATA_RDC=y +CONFIG_PATA_SCH=y +CONFIG_PATA_SERVERWORKS=y +CONFIG_PATA_SIL680=y +CONFIG_PATA_SIS=y +CONFIG_PATA_TOSHIBA=y +# CONFIG_PATA_TRIFLEX is not set +CONFIG_PATA_VIA=y +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_LEGACY=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +# CONFIG_TCM_USER2 is not set +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +# CONFIG_ISCSI_TARGET_CXGB4 is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=y +CONFIG_FUSION_FC=y +CONFIG_FUSION_SAS=y +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=y +# CONFIG_FUSION_LAN is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NLMON is not set +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_BNA=m +CONFIG_NET_VENDOR_CADENCE=y +CONFIG_MACB=m +CONFIG_MACB_USE_HWSTAMP=y +# CONFIG_MACB_PCI is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +# CONFIG_CAVIUM_PTP is not set +CONFIG_LIQUIDIO=m +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_CX_ECAT is not set +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +# CONFIG_TULIP_MWI is not set +CONFIG_TULIP_MMIO=y +# CONFIG_TULIP_NAPI is not set +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_HINIC is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +CONFIG_JME=m +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +# CONFIG_MLX5_FPGA is not set +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_NETXEN_NIC=m +# CONFIG_QED is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCOM_EMAC is not set +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_EPIC100=m +# CONFIG_SMSC911X is not set +CONFIG_SMSC9420=m +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=y +CONFIG_SWPHY=y + +# +# MII PHY device drivers +# +# CONFIG_ADIN_PHY is not set +CONFIG_AMD_PHY=m +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_TJA11XX_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +# CONFIG_TERANETICS_PHY is not set +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_HWSIM is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=y +CONFIG_KEYBOARD_XTKBD=y +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_PCSPKR is not set +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=y +CONFIG_SERIO_ALTERA_PS2=y +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=y +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# end of Serial drivers + +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_NVRAM=y +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +CONFIG_HPET_MMAP_DEFAULT=y +CONFIG_HANGCHECK_TIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_CRB is not set +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +CONFIG_TELCLOCK=m +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +# CONFIG_I2C_HELPER_AUTO is not set +# CONFIG_I2C_SMBUS is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_ISMT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=m +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=m +# CONFIG_DP83640_PHY is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# end of PTP clock support + +CONFIG_PINCTRL=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_BAYTRAIL is not set +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_BROXTON is not set +# CONFIG_PINCTRL_CANNONLAKE is not set +# CONFIG_PINCTRL_CEDARFORK is not set +# CONFIG_PINCTRL_DENVERTON is not set +# CONFIG_PINCTRL_GEMINILAKE is not set +# CONFIG_PINCTRL_ICELAKE is not set +# CONFIG_PINCTRL_LEWISBURG is not set +# CONFIG_PINCTRL_SUNRISEPOINT is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_DELL_SMM is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_I5500 is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +# CONFIG_INTEL_POWERCLAMP is not set +CONFIG_X86_PKG_TEMP_THERMAL=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_INT340X_THERMAL is not set +# end of ACPI INT340X thermal drivers + +# CONFIG_INTEL_PCH_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_LPSS_PCI is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_IR_IMON_DECODER is not set +# CONFIG_IR_RCMM_DECODER is not set +# CONFIG_RC_DEVICES is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_AGP=y +# CONFIG_AGP_AMD64 is not set +CONFIG_AGP_INTEL=y +# CONFIG_AGP_SIS is not set +# CONFIG_AGP_VIA is not set +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +# CONFIG_VGA_SWITCHEROO is not set +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +# CONFIG_DRM_RADEON is not set +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# end of ACP (Audio CoProcessor) Configuration + +# CONFIG_DRM_NOUVEAU is not set +CONFIG_DRM_I915=m +# CONFIG_DRM_I915_ALPHA_SUPPORT is not set +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +# CONFIG_DRM_I915_GVT is not set + +# +# drm/i915 Debugging +# +# CONFIG_DRM_I915_WERROR is not set +# CONFIG_DRM_I915_DEBUG is not set +# CONFIG_DRM_I915_DEBUG_MMIO is not set +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set +# CONFIG_DRM_I915_DEBUG_GUC is not set +# CONFIG_DRM_I915_SELFTEST is not set +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set +# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set +# end of drm/i915 Debugging + +# +# drm/i915 Profile Guided Optimisation +# +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_SPIN_REQUEST=5 +# end of drm/i915 Profile Guided Optimisation + +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +# CONFIG_DRM_VMWGFX is not set +# CONFIG_DRM_GMA500 is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_AST is not set +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +# CONFIG_DRM_VIRTIO_GPU is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_VESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_INTEL is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_PLATFORM is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=m +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_APPLE is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +# CONFIG_HID_BETOP_FF is not set +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +CONFIG_USB_HIDDEV=y +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +# end of Intel ISH HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_ULPI_BUS is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_MTHCA_DEBUG=y +# CONFIG_INFINIBAND_CXGB3 is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_I40IW is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_MLX5_INFINIBAND is not set +CONFIG_INFINIBAND_OCRDMA=m +# CONFIG_INFINIBAND_USNIC is not set +# CONFIG_INFINIBAND_BNXT_RE is not set +# CONFIG_INFINIBAND_RDMAVT is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +# CONFIG_INFINIBAND_IPOIB_CM is not set +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_OPA_VNIC is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=m +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=y +CONFIG_INTEL_IOATDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_DW_EDMA_PCIE is not set +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_SELFTESTS is not set +# end of DMABUF options + +CONFIG_DCA=y +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI_IGD=y +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VBOXGUEST=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_SM750 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# end of Speakup console speech + +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ASHMEM=y +# CONFIG_ANDROID_VSOC is not set +# CONFIG_ION is not set +# end of Android + +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_MOST is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# end of Gasket devices + +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set +# CONFIG_USB_WUSB_CBAF is not set +# CONFIG_UWB is not set +# CONFIG_EXFAT_FS is not set +CONFIG_QLGE=m +# CONFIG_X86_PLATFORM_DEVICES is not set +CONFIG_PMC_ATOM=y +# CONFIG_MFD_CROS_EC is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# end of Common Clock Framework + +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y +# CONFIG_SMMU_BYPASS_DEV is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_RAS_CEC is not set +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +# CONFIG_ANDROID_BINDERFS is not set +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_BLK=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_DEV_DAX_PMEM_COMPAT=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_PM_OPP=y +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=m +CONFIG_AUTOFS_FS=m +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +CONFIG_OVERLAY_FS_METACOPY=y + +# +# Caches +# +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +# CONFIG_SQUASHFS_ZLIB is not set +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=m +CONFIG_PSTORE_LZO_COMPRESS=m +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +# CONFIG_CIFS_WEAK_PW_HASH is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CIFS_FSCACHE=y +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set +# CONFIG_UNICODE is not set +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_TRUSTED_KEYS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +CONFIG_SECURITYFS=y +CONFIG_PAGE_TABLE_ISOLATION=y +# CONFIG_INTEL_TXT is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_GLUE_HELPER_X86=m +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# CONFIG_CRYPTO_KEYWRAP is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +# CONFIG_CRYPTO_XXHASH is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_SHA256_SSSE3=m +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_NI_INTEL=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_DES=m +# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CHELSIO is not set +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_CORDIC=m +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_IOMMU_HELPER=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_BTF is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +# end of Memory Debugging + +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# end of Debug Lockups and Hangs + +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_LIVEPATCH is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +# CONFIG_UNWINDER_ORC is not set +CONFIG_UNWINDER_FRAME_POINTER=y +# CONFIG_UNWINDER_GUESS is not set +# end of Kernel hacking diff --git a/package/default/generate-rpms.sh b/package/default/generate-rpms.sh new file mode 100755 index 000000000000..ec17f3e8651c --- /dev/null +++ b/package/default/generate-rpms.sh @@ -0,0 +1,256 @@ +#!/bin/bash + +# ============================================================================== +# Description: Automaticly generating kernel rpm package with specified branch +# according to HEAD tag. +# +# Author: David Shan +# =============================================================================== + +#variables +kernel_full_name="" +kernel_version="" +tlinux_release="" +curdir=`pwd` +build_specdir="/usr/src/packages/SPECS" +build_srcdir="/usr/src/packages/SOURCES" +kernel_type="default" +spec_file_name="" +make_jobs="" +nosign_suffix="" +build_src_only=0 +strip_sign_token=0 +raw_tagged_name="" +kasan=0 +# to control wheter to build rpm for xen domU +isXenU="" +build_perf="" +kernel_default_types=(default) + +#funcitons +usage() +{ + echo "generate-rpms [-t \$tag_name ] [-j \$jobs ][-h]" + echo " -t tag_name" + echo " Tagged name in the git tree." + echo " -j jobs" + echo " Specifies the number of jobs (threads) to run make." + echo " -d" + echo " disable module digital signature." + echo " -p" + echo " build perf rpm only." + + echo -e "\n" + echo " Note:By default, we only generate latest tag kernel for standard kernels" + echo " without -t and -k option." + echo " So, if you hope to get certain tag kernel, please use -t." + echo " Also, only for state type kernel, please use -k option with state value." +} + +get_kernel_version() +{ + local makefile="$1/Makefile" + + if [ ! -e $makefile ]; then + echo "Error: No Makefile found." + exit 1 + fi + + kernel_version+=`grep "^VERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + kernel_version+="." + kernel_version+=`grep "^PATCHLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + kernel_version+="." + kernel_version+=`grep "^SUBLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + #kernel_version+=`grep "^EXTRAVERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` + + echo "kernel version: $kernel_version" +} + +get_tlinux_name() +{ + if [ -n "$tag_name" ]; then + raw_tagged_name="$tag_name" + else + raw_tagged_name=`git describe --tags` + fi + + if [ -z "${raw_tagged_name}" ];then + echo "Error:Can't get kernel version from git tree." + exit 1 + fi + + if [[ $raw_tagged_name != public-x86-* ]]; then + echo "$raw_tagged_name not valid tag name for public-x86" + exit 11 + fi + + tagged_name=${raw_tagged_name#public-x86-} + + echo "${tagged_name}" | grep 'kasan' + if [ $? -eq 0 ]; then + kasan=1 + fi + + if [ "$build_perf" = "yes" ]; then + rpm_name="perf" + else + rpm_name="kernel" + fi + + tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` + extra_version=`echo $tagged_name | cut -d- -f2` + kernel_full_name=${rpm_name}-${kernel_version} + + echo "$kernel_version" + echo "$tlinux_release" + echo "$kernel_full_name" + echo "extra_version $extra_version" +} + +prepare_tlinux_spec() +{ + local sign_token + spec_file_name=${build_specdir}/${kernel_full_name}.spec + spec_contents="%define name ${rpm_name} \n" + spec_contents+="%define version ${kernel_version} \n" + spec_contents+="%define release_os ${tlinux_release} \n" + spec_contents+="%define tagged_name ${tagged_name} \n" + spec_contents+="%define extra_version ${extra_version} \n" + + if [ -z "${kernel_type}" ];then + spec_contents+="%define kernel_all_types ${kernel_default_types[@]} \n" + else + spec_contents+="%define kernel_all_types ${kernel_type} \n" + fi + + if [ -n "${make_jobs}" ];then + spec_contents+="%define jobs ${make_jobs} \n\n" + fi + + if [ -e /etc/redhat-release ];then + spec_contents+="%define debug_package %{nil}\n" + spec_contents+="%define __os_install_post %{nil}\n" + + fi + + echo -e "${spec_contents}" > $spec_file_name + + if [ "$build_perf" = "yes" ]; then + cat $curdir/perf-tlinux.spec >> $spec_file_name + else + cat $curdir/kernel-tlinux.spec >> $spec_file_name + fi +} + +#main function + +#Parse Parameters +while getopts ":t:k:j:hdpx:" option "$@" +do + case $option in + "t" ) + tag_name=$OPTARG + ;; + "j" ) + make_jobs=$OPTARG + ;; + "h" ) + usage + exit 0 + ;; + "p" ) + build_perf="yes" + ;; + "?" ) + usage + exit 1 + ;; + esac +done + +#test if parameter of kernel type is valid +if [ -n "${kernel_type}" ] && [ ! -e config."${kernel_type}" ] +then + echo "Error: bad kernel type name. Config file for ${kernel_type} does not exist." + usage + exit 1 +elif [ -n "${kernel_type}" ] && [ -n "$isXenU" ] && [ ! -e config."${kernel_type}".xenU ] +then + echo "Error: bad kernel type name. Config file for ${kernel_type}.xenU does not exist." + usage + exit 1 + +fi + +if [ -e /etc/SuSE-release ];then + build_specdir="/usr/src/packages/SPECS" + build_srcdir="/usr/src/packages/SOURCES" +elif [ -e /etc/redhat-release ];then + build_specdir="$HOME/rpmbuild/SPECS" + build_srcdir="$HOME/rpmbuild/SOURCES" +else + echo "Error: Compile on unknown system." + exit 1 +fi + + +if [ ! -e "${build_srcdir}" ]; then + mkdir $build_srcdir + [ $? == 1 ] && exit 1 +fi + +if [ ! -e "${build_specdir}" ]; then + mkdir $build_specdir + [ $? == 1 ] && exit 1 +fi + +cd ../../ +origsrc=`pwd` + + +if [ $kasan -eq 1 ]; then + nosign_suffix="_kasan" +else + nosign_suffix="" +fi + +get_kernel_version $origsrc +get_tlinux_name +echo "Prepare $kernel_full_name source." +prepare_tlinux_spec + +cp package/default/tlinux_cciss_link.modules $build_srcdir + +if test -e ${build_srcdir}/${kernel_full_name}; then + rm -fr ${build_srcdir}/${kernel_full_name} +fi + +#tagged_name is a confirmed tag name and will be used for final source. +git archive --format=tar --prefix=${kernel_full_name}/ ${raw_tagged_name} | (cd ${build_srcdir} && tar xf -) +if [ $? -ne 0 ];then + echo "Error:can't prepare $kernel_full_name source with git archive!" + exit 1 +fi + +cd ${build_srcdir}/${kernel_full_name} + +echo $config_suffix + +cd .. +tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} +rm -fr ${kernel_full_name} + +echo "Build kernel rpm package." +if [ $build_src_only -eq 0 ]; then + rpmbuild -bb ${target} ${spec_file_name} +fi + +echo "Build kernel source rpm package." +pwd +tar -zxf ${kernel_full_name}.tar.gz + +rm -fr ${kernel_full_name}/package/default/generate-rpms.sh +tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} + +rpmbuild -bs ${target} ${spec_file_name} + diff --git a/package/default/kernel-tlinux.spec b/package/default/kernel-tlinux.spec new file mode 100644 index 000000000000..53501eb71faa --- /dev/null +++ b/package/default/kernel-tlinux.spec @@ -0,0 +1,553 @@ +%global with_debuginfo 0 +%global with_perf 1 +%if 0%{?rhel} == 6 +%global rdist .tl1 +%global debug_path /usr/lib/debug/lib/ +%else +%global debug_path /usr/lib/debug/usr/lib/ +%if 0%{?rhel} == 7 +%global rdist .tl2 +%else +%if 0%{?rhel} == 8 +%global rdist .tl3 +%global __python /usr/bin/python2 +%global _enable_debug_packages %{nil} +%global debug_package %{nil} +%global __debug_package %{nil} +%global __debug_install_post %{nil} +%global _build_id_links none +%endif +%endif +%endif + +%global dist %{nil} + +Summary: Kernel for Tencent physical machine +Name: %{name} +Version: %{version} +Release: %{release_os}%{?rdist} +License: GPLv2 +Vendor: Tencent +Packager: tlinux team +Provides: kernel = %{version}-%{release} +Group: System Environment/Kernel +Source0: %{name}-%{version}.tar.gz +Source1: tlinux_cciss_link.modules +URL: http://www.tencent.com +ExclusiveArch: x86_64 +Distribution: Tencent Linux +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build +BuildRequires: wget bc module-init-tools curl +%if %{with_perf} +BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex openssl-devel +BuildRequires: xmlto asciidoc +BuildRequires: audit-libs-devel +%if 0%{?rhel} == 8 +BuildRequires: python2-devel +%else +BuildRequires: python-devel +%endif +%ifnarch s390 s390x +BuildRequires: numactl-devel +%endif +%endif +Requires(pre): linux-firmware >= 20150904-44 + +# for the 'hostname' command +%if 0%{?rhel} >= 7 +BuildRequires: hostname +%else +BuildRequires: net-tools +%endif + +%description +This package contains tlinux kernel for physical machine + +%package debuginfo-common +Summary: tlinux kernel vmlinux for crash debug +Release: %{release} +Group: System Environment/Kernel +Provides: kernel-debuginfo-common kernel-debuginfo +Obsoletes: kernel-debuginfo +AutoReqprov: no + +%description debuginfo-common +This package container vmlinux, System.map and config files for kernel +debugging. + +%package devel +Summary: Development package for building kernel modules to match the %{version}-%{release} kernel +Release: %{release} +Group: System Environment/Kernel +Provides: kernel-devel +Obsoletes: kernel-devel +AutoReqprov: no + +%description devel +This package provides kernel headers and makefiles sufficient to build modules +against the %{version}-%{release} kernel package. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders +Provides: glibc-kernheaders = 3.0-46 +Provides: kernel-headers +Obsoletes: kernel-headers +AutoReqprov: no + +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +%if %{with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +Group: Development/System +License: GPLv2 +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n perf-debuginfo +Summary: Debug information for package perf +Group: Development/Debug +#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} +AutoReqProv: no +%description -n perf-debuginfo +This package provides debug information for the perf package. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|XXX' -o perf-debuginfo.list} + +%package -n python-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python-perf +The python-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} + +%package -n python-perf-debuginfo +Summary: Debug information for package perf python bindings +Group: Development/Debug +#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} +AutoReqProv: no +%description -n python-perf-debuginfo +This package provides debug information for the perf python bindings. + +# the python_sitearch macro should already be defined from above +%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{python_sitearch}/perf.so(\.debug)?|XXX' -o python-perf-debuginfo.list} + + +%endif # with_perf + +# prep ######################################################################### +%prep + +%setup -q -c -T -a 0 + +# build ######################################################################## +%build + +cd %{name}-%{version} + +all_types="%{kernel_all_types}" +num_processor=`cat /proc/cpuinfo | grep 'processor' | wc -l` +if [ $num_processor -gt 8 ]; then + num_processor=8 +fi +for type_name in $all_types +do + sed -i '/^EXTRAVERSION/cEXTRAVERSION = -%{extra_version}' Makefile + objdir=../%{name}-%{version}-obj-${type_name} + [ -d ${objdir} ] || mkdir ${objdir} + echo "include $PWD/Makefile" > ${objdir}/Makefile + cp ./package/default/config.${type_name} ${objdir}/.config + localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' + sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config + make -C ${objdir} olddefconfig > /dev/null + smp_jobs=%{?jobs:%jobs} + if [ "$smp_jobs" = "" ]; then + make -j ${num_processor} -C ${objdir} RPM_BUILD_MODULE=y all + else + make -C ${objdir} RPM_BUILD_MODULE=y %{?jobs:-j%jobs} all + fi +# dealing with files under lib/modules + make -C ${objdir} RPM_BUILD_MODULE=y modules + + %global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} + %if %{with_perf} + # perf + %{perf_make} all + %{perf_make} man + %endif +done + +# install ###################################################################### +%install + +echo install %{version} +arch=`uname -m` +if [ "$arch" != "x86_64" ];then + echo "Unexpected error. Cannot build this rpm in non-x86_64 platform\n" + exit 1 +fi +mkdir -p %buildroot/boot +cd %{name}-%{version} + +all_types="%{kernel_all_types}" +for type_name in $all_types +do + elfname=vmlinuz-%{tagged_name}%{?dist} + mapname=System.map-%{tagged_name}%{?dist} + configname=config-%{tagged_name}%{?dist} + builddir=../%{name}-%{version}-obj-${type_name} +%if 0%{?rhel} == 7 + cp -rpf ${builddir}/arch/x86/boot/bzImage %buildroot/boot/${elfname} +%endif + cp -rpf ${builddir}/System.map %buildroot/boot/${mapname} + cp -rpf ${builddir}/.config %buildroot/boot/${configname} + cp -rpf ${builddir}/vmlinux %buildroot/boot/vmlinux-%{tagged_name}%{?dist} + sha512hmac %buildroot/boot/${elfname} | sed -e "s,$RPM_BUILD_ROOT,," > %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac + + # dealing with kernel-devel package + objdir=${builddir} + #KernelVer=%{version}-%{title}-%{release_os}-${type_name} + KernelVer=%{tagged_name}%{?dist} + + ####### make kernel-devel package: methods comes from rhel6(kernel.spec) ####### + make -C ${objdir} RPM_BUILD_MODULE=y INSTALL_MOD_PATH=%buildroot modules_install > /dev/null + #don't package firmware, use linux-firmware rpm instead + rm -rf %buildroot/lib/firmware + # And save the headers/makefiles etc for building modules against + # + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + rm -rf %buildroot/lib/modules/${KernelVer}/build + rm -rf %buildroot/lib/modules/${KernelVer}/source + mkdir -p %buildroot/lib/modules/${KernelVer}/build + (cd $RPM_BUILD_ROOT/lib/modules/${KernelVer} ; ln -s build source) + # dirs for additional modules per module-init-tools, kbuild/modules.txt + mkdir -p %buildroot/lib/modules/${KernelVer}/extra + mkdir -p %buildroot/lib/modules/${KernelVer}/updates + mkdir -p %buildroot/lib/modules/${KernelVer}/weak-updates + +%if 0%{?rhel} == 8 + cp -rpf ${builddir}/arch/x86/boot/bzImage %buildroot/lib/modules/${KernelVer}/vmlinuz +%endif + + # first copy everything + cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` %buildroot/lib/modules/${KernelVer}/build + cp ${builddir}/Module.symvers %buildroot/lib/modules/${KernelVer}/build + cp ${builddir}/System.map %buildroot/lib/modules/${KernelVer}/build + + if [ -s ${builddir}/Module.markers ]; then + cp ${builddir}/Module.markers %buildroot/lib/modules/${KernelVer}/build + fi + + # create the kABI metadata for use in packaging + echo "**** GENERATING kernel ABI metadata ****" + gzip -c9 < ${builddir}/Module.symvers > %buildroot/boot/symvers-${KernelVer}.gz + # chmod 0755 %_sourcedir/kabitool + # rm -f %{_tmppath}/kernel-$KernelVer-kabideps + # %_sourcedir/kabitool -s Module.symvers -o %{_tmppath}/kernel-$KernelVer-kabideps + + rm -rf %buildroot/lib/modules/${KernelVer}/build/Documentation + rm -rf %buildroot/lib/modules/${KernelVer}/build/scripts + rm -rf %buildroot/lib/modules/${KernelVer}/build/include + cp ${builddir}/.config %buildroot/lib/modules/${KernelVer}/build + cp -a scripts %buildroot/lib/modules/${KernelVer}/build + cp -a ${builddir}/scripts %buildroot/lib/modules/${KernelVer}/build + rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*.o + rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*/*.o + cp -a --parents arch/x86/include %buildroot/lib/modules/${KernelVer}/build/ + (cd ${builddir} ; cp -a --parents arch/x86/include/generated %buildroot/lib/modules/${KernelVer}/build/) + mkdir -p %buildroot/lib/modules/${KernelVer}/build/include + + cp -a ${builddir}/include/config %buildroot/lib/modules/${KernelVer}/build/include/ + cp -a ${builddir}/include/generated %buildroot/lib/modules/${KernelVer}/build/include/ + + cd include + cp -a * %buildroot/lib/modules/${KernelVer}/build/include + + cp -a ../arch/x86 $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/asm-x86 + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + ln -s asm-x86 asm + popd + + # Make sure the Makefile and version.h have a matching timestamp so that + # external modules can be built + touch -r %buildroot/lib/modules/${KernelVer}/build/Makefile %buildroot/lib/modules/${KernelVer}/build/include/generated/uapi/linux/version.h + #touch -r %buildroot/lib/modules/${KernelVer}/build/.config %buildroot/lib/modules/${KernelVer}/build/include/linux/autoconf.h + # Copy .config to include/config/auto.conf so "make prepare" is unnecessary. + cp %buildroot/lib/modules/$KernelVer/build/.config %buildroot/lib/modules/${KernelVer}/build/include/config/auto.conf + cd .. + + + find %buildroot/lib/modules/${KernelVer} -name "*.ko" -type f >modnames + + + mkdir -p %buildroot%debug_path/modules/${KernelVer} + cp -r %buildroot/lib/modules/${KernelVer}/kernel %buildroot%debug_path/modules/${KernelVer} + xargs --no-run-if-empty strip -g < modnames + + xargs --no-run-if-empty chmod u+x < modnames + + # Generate a list of modules for block and networking. + fgrep /drivers/ modnames | xargs --no-run-if-empty nm -upA | + sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + + collect_modules_list() + { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + fi + } + + collect_modules_list networking 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt2x00(pci|usb)_probe|register_netdevice' + collect_modules_list block 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' + collect_modules_list drm 'drm_open|drm_init' + collect_modules_list modesetting 'drm_crtc_init' + # detect missing or incorrect license tags + rm -f modinfo + while read i + do + echo -n "${i#%buildroot/lib/modules/${KernelVer}/} " >> modinfo + /sbin/modinfo -l $i >> modinfo + done < modnames + + egrep -v \ + 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' \ + modinfo && exit 1 + + rm -f modinfo modnames + cp tools_key.pub %buildroot/lib/modules/${KernelVer}/kernel + # remove files that will be auto generated by depmod at rpm -i time + #for i in alias alias.bin ccwmap dep dep.bin ieee1394map inputmap isapnpmap ofmap pcimap seriomap symbols symbols.bin usbmap + #do + # rm -f %buildroot/lib/modules/${KernelVer}/modules.$i + #done + + # Move the devel headers out of the root file system + mkdir -p %buildroot/usr/src/kernels + mkdir -p %buildroot/lib/modules/${KernelVer}/build/tools/objtool + cp ${builddir}/tools/objtool/objtool %buildroot/lib/modules/${KernelVer}/build/tools/objtool/objtool + mv %buildroot/lib/modules/${KernelVer}/build %buildroot/usr/src/kernels/${KernelVer} + ln -sf /usr/src/kernels/${KernelVer} %buildroot/lib/modules/${KernelVer}/build + # + # Generate the kernel-modules-* files lists + # + +%if 0%{?rhel} == 8 + mkdir -p %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/boot/$configname %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/boot/System.map-$KernelVer %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/lib/modules/$KernelVer/dist_compat +%endif + + + # Copy the System.map file for depmod to use, and create a backup of the + # full module tree so we can restore it after we're done filtering + cp ${builddir}/System.map $RPM_BUILD_ROOT/. + pushd $RPM_BUILD_ROOT + mkdir restore + cp -r lib/modules/$KernelVer/* restore/. + + find lib/modules/$KernelVer/ -not -type d | sort -n > modules.list + find lib/modules/$KernelVer/ -type d | sort -n > module-dirs.list + + # Run depmod on the resulting module tree and make sure it isn't broken + depmod -b . -aeF ./System.map $KernelVer &> depmod.out + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + + # Cleanup + rm System.map + cp -r restore/* lib/modules/$KernelVer/. + rm -rf restore + popd + +%if 0%{?rhel} == 8 + mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat + mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat +%endif + + # Make sure the files lists start with absolute paths or rpmbuild fails. + # Also add in the dir entries + sed -e 's/^lib*/%dir \/lib/' $RPM_BUILD_ROOT/module-dirs.list > ../core.list + sed -e 's/^lib*/\/lib/' $RPM_BUILD_ROOT/modules.list >> ../core.list + + # Cleanup + rm -f $RPM_BUILD_ROOT/modules.list + rm -f $RPM_BUILD_ROOT/module-dirs.list + + %if %{with_perf} + # perf tool binary and supporting scripts/binaries + %{perf_make} DESTDIR=$RPM_BUILD_ROOT install + + # perf-python extension + %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + + # perf man pages (note: implicit rpm magic compresses them later) + %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + %endif +done + +mkdir -p %buildroot/etc/sysconfig/modules +install -m755 %SOURCE1 %buildroot/etc/sysconfig/modules + +#### Header +make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install +# Do headers_check but don't die if it fails. +make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_check \ + > hdrwarnings.txt || : +if grep -q exist hdrwarnings.txt; then + sed s:^$RPM_BUILD_ROOT/usr/include/:: hdrwarnings.txt + # Temporarily cause a build failure if header inconsistencies. + # exit 1 +fi + +find $RPM_BUILD_ROOT/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) | xargs rm -f + +# glibc provides scsi headers for itself, for now +rm -rf $RPM_BUILD_ROOT/usr/include/scsi +rm -f $RPM_BUILD_ROOT/usr/include/asm*/atomic.h +rm -f $RPM_BUILD_ROOT/usr/include/asm*/io.h +rm -f $RPM_BUILD_ROOT/usr/include/asm*/irq.h + +%pre +# pre ######################################################################### +system_arch=`uname -m` + +if [ %{_target_cpu} != ${system_arch} ]; then + echo "ERROR: Failed installing this rpm!!!!" + echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; + exit 1; +fi; + +%post +# post ######################################################################### +echo "Install tlinux kernel" +%if 0%{?rhel} == 8 +kernel-install add %{tagged_name} /lib/modules/%{tagged_name}/vmlinuz +cp -p /lib/modules/%{tagged_name}/dist_compat/config-%{tagged_name}%{?dist} /boot +cp -p /lib/modules/%{tagged_name}/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac /boot +cp -p /lib/modules/%{tagged_name}/dist_compat/System.map-%{tagged_name}%{?dist} /boot +%else +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --package kernel-tlinux4 --install %{tagged_name}%{?dist} --kernel-args="crashkernel=512M-12G:128M,12G-64G:256M,64G-128G:512M,128G-:768M" --make-default|| exit $? +%endif +%endif + +%preun +# preun ####################################################################### +%if 0%{?rhel} == 8 +kernel-install remove %{tagged_name} +%else +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? +echo -e "Remove \"%{tagged_name}%{?dist}\" Done." +%endif +%endif + +%posttrans +# posttrans #################################################################### +%if 0%{?rhel} == 7 +/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? +/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? +%endif + +%files -f core.list +# files ######################################################################## +%defattr(-,root,root) +%if 0%{?rhel} == 7 +/boot/vmlinuz-%{tagged_name}%{?dist} +/boot/System.map-%{tagged_name}%{?dist} +/boot/config-%{tagged_name}%{?dist} +%endif +/boot/symvers-%{tagged_name}%{?dist}* +/etc/sysconfig/modules/tlinux_cciss_link.modules +#do not package firmware ,use linux-firmware rpm instead +#/lib/firmware/ + +%files debuginfo-common +%defattr(-,root,root) +/boot/vmlinux-%{tagged_name}%{?dist} +%dir %debug_path +%debug_path/modules +#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* +#/boot/config-%%{version}-%%{title}-%%{release_os}-* + +%files headers +%defattr(-,root,root) +/usr/include/* + +%files devel +%defattr(-,root,root) +/usr/src/kernels/%{tagged_name}%{?dist} + +%if %{with_perf} +%files -n perf +%defattr(-,root,root) +%{_bindir}/* +/usr/lib64/* +/usr/lib/perf/* +/usr/share/* +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_sysconfdir}/bash_completion.d/perf + +%files -n python-perf +%defattr(-,root,root) +%{python_sitearch} + +%if %{with_debuginfo} +%files -f perf-debuginfo.list -n perf-debuginfo +%defattr(-,root,root) + +%files -f python-perf-debuginfo.list -n python-perf-debuginfo +%defattr(-,root,root) +%endif +%endif # with_perf + +# changelog ################################################################### +%changelog +* Thu Feb 2 2012 Samuel Liao + - Initial 3.0.18 repository diff --git a/package/default/perf-tlinux.spec b/package/default/perf-tlinux.spec new file mode 100644 index 000000000000..f4a426197b7b --- /dev/null +++ b/package/default/perf-tlinux.spec @@ -0,0 +1,154 @@ +%define sign_server 10.12.65.118 + +%global with_debuginfo 0 +%if 0%{?rhel} == 6 +%global dist .tl1 +%global debug_path /usr/lib/debug/lib/ +%else +%global debug_path /usr/lib/debug/usr/lib/ +%if 0%{?rhel} == 7 +%global dist .tl2 +%endif +%endif + +Summary: Performance monitoring for the Linux kernel +Group: Development/System +Name: %{name} +Version: %{version} +Release: %{release_os}%{?dist} +License: GPLv2 +Vendor: Tencent +Packager: tlinux team +Provides: perf = %{version}-%{release} +Source0: %{name}-%{version}.tar.gz +URL: http://www.tencent.com +ExclusiveArch: x86_64 +Distribution: Tencent Linux +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build +BuildRequires: wget bc module-init-tools curl +BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel python-devel perl(ExtUtils::Embed) bison flex +BuildRequires: xmlto asciidoc +BuildRequires: audit-libs-devel +%ifnarch s390 s390x +BuildRequires: numactl-devel +%endif + +# for the 'hostname' command +%if 0%{?rhel} == 7 +BuildRequires: hostname +%else +BuildRequires: net-tools +%endif + +%description +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + + +%package -n python-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python-perf +The python-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} + + +# prep ######################################################################### +%prep + +%setup -q -c -T -a 0 + +# build ######################################################################## +%build + + +if [ ! -f /etc/tlinux-release ]; then + echo "Error: please build this rpm on tlinux\n" + exit 1 +fi + + +cd %{name}-%{version} +all_types="%{kernel_all_types}" +num_processor=`cat /proc/cpuinfo | grep 'processor' | wc -l` +if [ $num_processor -gt 8 ]; then + num_processor=8 +fi + +type_name=default + +objdir=../%{name}-%{version}-obj-${type_name} +[ -d ${objdir} ] || mkdir ${objdir} +bash scripts/mkmakefile $PWD ${objdir} 2 6 +if [ "$no_sign" != "yes" ]; then +cp ./package/default/config.${type_name} ${objdir}/.config +else +cp ./package/default/config.${type_name}_nosign ${objdir}/.config +fi +localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' +sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config + + +%global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} +# perf +%{perf_make} all +%{perf_make} man + +# install ###################################################################### +%install +cd %{name}-%{version} +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install + +# perf-python extension +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + +# perf man pages (note: implicit rpm magic compresses them later) +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + +%pre +# pre ######################################################################### +system_arch=`uname -m` + +if [ %{_target_cpu} != ${system_arch} ]; then + echo "ERROR: Failed installing this rpm!!!!" + echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; + exit 1; +fi; + +if [ ! -f /etc/tlinux-release -o ! -f /etc/redhat-release ]; then + echo "Error: Cannot install this rpm on non-tlinux OS" + exit 1; +fi + +%post +# post ######################################################################### + + +%postun + +%files +%defattr(-,root,root) +%{_bindir}/perf +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +/usr/bin/trace +/usr/bin/perf* +/usr/lib64/traceevent/plugins/* +/usr/share/doc/* +/usr/share/perf-core/* +/usr/lib/perf/* + +%files -n python-perf +%defattr(-,root,root) +%{python_sitearch} + + +# changelog ################################################################### +%changelog +* Thu Feb 2 2012 Samuel Liao + - Initial 3.0.18 repository diff --git a/package/default/repackage/generate-rpms.sh b/package/default/repackage/generate-rpms.sh new file mode 100755 index 000000000000..23a9f494c8ca --- /dev/null +++ b/package/default/repackage/generate-rpms.sh @@ -0,0 +1,174 @@ +#!/bin/bash +# ============================================================================== +# Description: Automaticly generating kernel rpm package with specified branch +# according to HEAD tag. +# +# Author: David Shan +# =============================================================================== + +#variables +kernel_full_name="" +kernel_version="" +tlinux_release="" +curdir=`pwd` + + +build_specdir=`readlink -f ~/rpmbuild/SPECS` +build_srcdir=`readlink -f ~/rpmbuild/SOURCES` +dist="tl2" + +kernel_type="default" +spec_file_name="" +make_jobs="" +tag_name="" +nosign_suffix="" +build_src_only=0 +strip_sign_token=0 +# to control wheter to build rpm for xen domU +isXenU="" +kernel_default_types=(default) + +get_kernel_version() +{ + local tagged_name + raw_tagged_name=$tag_name + + if [[ $raw_tagged_name != public-x86-* ]]; then + echo "$raw_tagged_name not valid tag name for public-x86" + exit 1 + fi + + tagged_name=${raw_tagged_name#public-x86-} + + kernel_version=`echo $tagged_name|cut -d- -f1` + #kernel_version=${kernel_version}-1 + #echo "kernel version: $kernel_version" + echo "kernel version: ${kernel_version}" +} + +get_tlinux_name() +{ + if [ -n "$tag_name" ]; then + raw_tagged_name="$tag_name" + else + raw_tagged_name=`git describe --tags` + fi + + if [ -z "${raw_tagged_name}" ];then + echo "Error:Can't get kernel version from git tree." + exit 1 + fi + + if [[ $raw_tagged_name != public-x86-* ]]; then + echo "$raw_tagged_name not valid tag name for public-x86" + exit 1 + fi + + tagged_name=${raw_tagged_name#public-x86-} + + #if [ "${tagged_name#*-*-*}" != ${tagged_name} ];then + #echo "Error: bad tag name:$tagged_name." + #exit 1 + #fi + + rpm_name="kernel" + echo "rpm_name $rpm_name" + + tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` + extra_version=`echo $tagged_name | cut -d- -f2` + kernel_full_name=${rpm_name}-${kernel_version} + #tlinux_release=${tagged_name#*-} +} + +prepare_tlinux_spec() +{ + spec_file_name=${build_specdir}/${kernel_full_name}_${dist}.spec + spec_contents="%define name ${rpm_name} \n" + spec_contents+="%define version ${kernel_version} \n" + spec_contents+="%define release_os ${tlinux_release} \n" + spec_contents+="%define tagged_name ${tagged_name} \n" + + echo -e "${spec_contents}" > $spec_file_name + + if [ "$kvm_guest" = "yes" ] ; then + cat $curdir/kernel-tlinux_kvmguest_${dist}.spec >> $spec_file_name + else + cat $curdir/kernel-tlinux_${dist}.spec >> $spec_file_name + #cp $curdir/${kernel_full_name}_${dist}.spec $spec_file_name + fi +} + +prepare_tlinux_bin_sources() +{ + local kernel_tl3_rpm=${rpm_name}-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm + local kernel_debuginfo_tl3_rpm=${rpm_name}-debuginfo-common-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm + local kernel_headers_tl3_rpm=${rpm_name}-headers-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm + local kernel_devel_tl3_rpm=${rpm_name}-devel-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm + + if [ ! -f $kernel_tl3_rpm ]; then + echo "$kernel_tl3_rpm not exist" + exit 1 + fi + + if [ ! -f $kernel_debuginfo_tl3_rpm ]; then + echo "$kernel_debuginfo_tl3_rpm not exist" + exit 1 + fi + + if [ ! -f $kernel_headers_tl3_rpm ]; then + echo "$kernel_headers_tl3_rpm not exist" + exit 1 + fi + + rm -rf ${rpm_name}-${kernel_version} + rm -rf ${rpm_name}-${kernel_version}_bin.tar.gz + mkdir ${rpm_name}-${kernel_version} + pushd . + cd ${rpm_name}-${kernel_version} + rpm2cpio ../$kernel_tl3_rpm | cpio -divm + rpm2cpio ../$kernel_debuginfo_tl3_rpm | cpio -divm + rpm2cpio ../$kernel_headers_tl3_rpm | cpio -divm + rpm2cpio ../$kernel_devel_tl3_rpm | cpio -divm + popd + tar -czvf ${rpm_name}-${kernel_version}_bin.tar.gz ${rpm_name}-${kernel_version} +} + +#main function + +if [ $# -eq 0 ]; then + echo "please specify tag" + exit 1 +fi + +tag_name=$1 + +if [ ! -e "${build_srcdir}" ]; then + mkdir $build_srcdir + [ $? == 1 ] && exit 1 +fi + +if [ ! -e "${build_specdir}" ]; then + mkdir $build_specdir + [ $? == 1 ] && exit 1 +fi + +origsrc=`pwd` + +get_kernel_version $origsrc +get_tlinux_name +prepare_tlinux_bin_sources +echo "Prepare $kernel_full_name source." +prepare_tlinux_spec + + +if test -e ${build_srcdir}/${kernel_full_name}; then + rm -fr ${build_srcdir}/${kernel_full_name} +fi + +rm -rf /var/tmp/${kernel_full_name} + +cp ${kernel_full_name}_bin.tar.gz ${build_srcdir}/ + +echo "Build kernel rpm package." + +rpmbuild -bb ${target} ${spec_file_name} diff --git a/package/default/repackage/kernel-tlinux_tl2.spec b/package/default/repackage/kernel-tlinux_tl2.spec new file mode 100644 index 000000000000..aa1c0b0de0ae --- /dev/null +++ b/package/default/repackage/kernel-tlinux_tl2.spec @@ -0,0 +1,268 @@ +%global with_debuginfo 0 +%global with_perf 1 +%if 0%{?rhel} == 6 +%global rdist .tl1 +%global debug_path /usr/lib/debug/lib/ +%else +%global debug_path /usr/lib/debug/usr/lib/ +%if 0%{?rhel} == 7 +%global rdist .tl2 +%global _enable_debug_packages %{nil} +%global debug_package %{nil} +%global __debug_package %{nil} +%global __debug_install_post %{nil} +%else +%if 0%{?rhel} == 8 +%global rdist .tl3 +%global __python /usr/bin/python2 +%global _enable_debug_packages %{nil} +%global debug_package %{nil} +%global __debug_package %{nil} +%global __debug_install_post %{nil} +%endif +%endif +%endif + +%global dist %{nil} + +Summary: Kernel for Tencent physical machine +Name: %{name} +Version: %{version} +Release: %{release_os}%{?rdist} +License: GPLv2 +Vendor: Tencent +Packager: tlinux team +Provides: kernel = %{version}-%{release} +Group: System Environment/Kernel +Source0: %{name}-%{version}_bin.tar.gz +Source1: tlinux_cciss_link.modules +URL: http://www.tencent.com +ExclusiveArch: x86_64 +Distribution: Tencent Linux +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build +BuildRequires: wget bc module-init-tools curl +Requires(pre): linux-firmware >= 20150904-44 + +# for the 'hostname' command +%if 0%{?rhel} >= 7 +BuildRequires: hostname +%else +BuildRequires: net-tools +%endif + +%description +This package contains tlinux kernel for physical machine + +%package debuginfo-common +Summary: tlinux kernel vmlinux for crash debug +Release: %{release} +Group: System Environment/Kernel +Provides: kernel-debuginfo-common kernel-debuginfo +Obsoletes: kernel-debuginfo +AutoReqprov: no + +%description debuginfo-common +This package container vmlinux, System.map and config files for kernel +debugging. + +%package devel +Summary: Development package for building kernel modules to match the %{version}-%{release} kernel +Release: %{release} +Group: System Environment/Kernel +AutoReqprov: no + +%description devel +This package provides kernel headers and makefiles sufficient to build modules +against the %{version}-%{release} kernel package. + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders +Provides: glibc-kernheaders = 3.0-46 +Provides: kernel-headers +Obsoletes: kernel-headers +AutoReqprov: no + +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +# prep ######################################################################### +%prep + +%setup -q -c -T -a 0 + +# build ######################################################################## +%build + +# install ###################################################################### +%install + +echo install %{version} +arch=`uname -m` +if [ "$arch" != "x86_64" ];then + echo "Unexpected error. Cannot build this rpm in non-x86_64 platform\n" + exit 1 +fi + +pwd + +cd %{name}-%{version} + +cp -rp * %buildroot/ + +elfname=vmlinuz-%{tagged_name}%{?dist} +mapname=System.map-%{tagged_name}%{?dist} +configname=config-%{tagged_name}%{?dist} + +mkdir -p %buildroot/boot +mv %buildroot/lib/modules/%tagged_name/vmlinuz %buildroot/boot/${elfname} +mv %buildroot/lib/modules/%tagged_name/dist_compat/${mapname} %buildroot/boot/${mapname} +mv %buildroot/lib/modules/%tagged_name/dist_compat/${configname} %buildroot/boot/${configname} +mv %buildroot/lib/modules/%tagged_name/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/boot/ + +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.softdep %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.devname %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.builtin.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias.bin %buildroot/lib/modules/%tagged_name/ +mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias %buildroot/lib/modules/%tagged_name/ + +%pre +# pre ######################################################################### +system_arch=`uname -m` + +if [ %{_target_cpu} != ${system_arch} ]; then + echo "ERROR: Failed installing this rpm!!!!" + echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; + exit 1; +fi; + +%post +# post ######################################################################### +echo "Install tlinux kernel" +%if 0%{?rhel} >= 7 +/sbin/new-kernel-pkg --package kernel --install %{tagged_name}%{?dist} --kernel-args="crashkernel=512M-12G:128M,12G-64G:256M,64G-128G:512M,128G-:768M" --make-default|| exit $? +%else +/sbin/new-kernel-pkg --install %{tagged_name}%{?dist} --kernel-args="crashkernel=512M-12G:128M,12G-64G:256M,64G-128G:512M,128G-:768M" --banner="tkernel 3.0" || exit $? + +# check relase info to get suffix +suffix=default + +# modify boot menu +grubconfig=/boot/grub/grub.conf +if [ -d /sys/firmware/efi -a -f /boot/efi/EFI/tencent/grub.efi ]; then + grubconfig=/boot/efi/EFI/tencent/grub.conf +fi + +# reduce multiple blank lines to one +sed '/^$/{ + N + /^\n$/D +}' ${grubconfig} > ${grubconfig}.tmp +mv ${grubconfig}.tmp ${grubconfig} +rm -f ${grubconfig}.tmp + +# set grub default according to $suffix we already got +count=-1 +defcount=0 +while read line +do + echo $line | grep -q "title" + if [ $? -eq 0 ]; then + (( count++ )) + fi + if [ -f /etc/SuSE-release ]; then + echo $line | grep -q "%{tagged_name}%{?dist}" + elif [ -f /etc/redhat-release ]; then + echo $line | grep -q "title.*\(%{tagged_name}%{?dist}\).*" + fi + result=$? + if [ $result -eq 0 ] ; then + break + fi + if [ -f /etc/SuSE-release ]; then + echo $line | grep -q "title %{tagged_name}%{?dist}" + elif [ -f /etc/redhat-release ]; then + echo $line | grep -q "title.*(%{tagged_name}%{?dist}).*" + fi + if [ $? -eq 0 ] ; then + defcount=$count + fi +done < $grubconfig + +if [ $result -eq 0 ] ; then + index=$count +else + index=$defcount +fi + +if [ -f /etc/SuSE-release ]; then + sed -n "/^default /!p; + {/^default /c\ + \default $index + } + " $grubconfig > $grubconfig.NEW + mv $grubconfig.NEW $grubconfig +elif [ -f /etc/redhat-release ]; then + sed -n "/^default=/!p; + {/^default=/c\ + \default=$index + } + " $grubconfig > $grubconfig.NEW + mv $grubconfig.NEW $grubconfig +fi +%endif +echo -e "Set Grub default to \"%{tagged_name}%{?dist}\" Done." + +%postun +# postun ####################################################################### +/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? +echo -e "Remove \"%{tagged_name}%{?dist}\" Done." + +%if 0%{?rhel} >= 7 +%posttrans +# posttrans #################################################################### +/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? +/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? +%endif + +%files +# files ######################################################################## +%defattr(-,root,root) +/boot/vmlinuz-%%{tagged_name}%%{?dist} +/boot/.vmlinuz-%%{tagged_name}%%{?dist}.hmac +/boot/System.map-%%{tagged_name}%%{?dist} +/boot/config-%%{tagged_name}%%{?dist} +/boot/symvers-%{tagged_name}%{?dist}* +/etc/sysconfig/modules/tlinux_cciss_link.modules +/lib/modules/%%{tagged_name}/* + +%files debuginfo-common +%defattr(-,root,root) +/boot/vmlinux-%{tagged_name}%{?dist} +%dir %debug_path +%debug_path/modules +#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* +#/boot/config-%%{version}-%%{title}-%%{release_os}-* + + +%files devel +%defattr(-,root,root) +/usr/src/kernels/%{tagged_name}%{?dist} + +%files headers +%defattr(-,root,root) +/usr/include/* + +# changelog ################################################################### +%changelog +* Thu Feb 2 2012 Samuel Liao + - Initial 3.0.18 repository diff --git a/package/default/tlinux_cciss_link.modules b/package/default/tlinux_cciss_link.modules new file mode 100644 index 000000000000..8d15da12d851 --- /dev/null +++ b/package/default/tlinux_cciss_link.modules @@ -0,0 +1,16 @@ +#!/bin/sh + +if [ -b /dev/sda -a ! -b /dev/cciss/c0d0 ]; then + mkdir -p /dev/cciss + set -- 0 a 1 b 2 c 3 d 4 e 5 f 6 g 7 h 8 i 9 j 10 k 11 l 12 m 13 n 14 o 15 p + while [ $# -ge 2 ]; do + c=/dev/cciss/c0d$1; shift + s=/dev/sd$1; shift + ln -s ${s} ${c} + ln -s ${s}1 ${c}p1 + ln -s ${s}2 ${c}p2 + ln -s ${s}3 ${c}p3 + ln -s ${s}4 ${c}p4 + done +fi + diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 42b571cde177..6d1df7117e11 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -219,6 +219,7 @@ BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \ readelf -S ./llvm_btf_verify.o | grep BTF; \ /bin/rm -f ./llvm_btf_verify.o) +BPF_EXTRA_CFLAGS += -fno-stack-protector ifneq ($(BTF_LLVM_PROBE),) EXTRA_CFLAGS += -g else @@ -236,7 +237,7 @@ all: clean: $(MAKE) -C ../../ M=$(CURDIR) clean - @rm -f *~ + @find $(CURDIR) -type f -name '*~' -delete $(LIBBPF): FORCE # Fix up variables inherited from Kbuild that tools/ build system won't like diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c index ed18e9a4909c..43e38ce594d4 100644 --- a/samples/bpf/sockex1_kern.c +++ b/samples/bpf/sockex1_kern.c @@ -4,12 +4,12 @@ #include #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") my_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 256); +} my_map SEC(".maps"); SEC("socket1") int bpf_prog1(struct __sk_buff *skb) diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c index f2f9dbc021b0..ae4bdc89b599 100644 --- a/samples/bpf/sockex2_kern.c +++ b/samples/bpf/sockex2_kern.c @@ -189,12 +189,12 @@ struct pair { long bytes; }; -struct bpf_map_def SEC("maps") hash_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__be32), - .value_size = sizeof(struct pair), - .max_entries = 1024, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __be32); + __type(value, struct pair); + __uint(max_entries, 1024); +} hash_map SEC(".maps"); SEC("socket2") int bpf_prog2(struct __sk_buff *skb) diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c index 1d78819ffef1..630ce8c4d5a2 100644 --- a/samples/bpf/syscall_tp_kern.c +++ b/samples/bpf/syscall_tp_kern.c @@ -47,13 +47,27 @@ static __always_inline void count(void *map) SEC("tracepoint/syscalls/sys_enter_open") int trace_enter_open(struct syscalls_enter_open_args *ctx) { - count((void *)&enter_open_map); + count(&enter_open_map); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_openat") +int trace_enter_open_at(struct syscalls_enter_open_args *ctx) +{ + count(&enter_open_map); return 0; } SEC("tracepoint/syscalls/sys_exit_open") int trace_enter_exit(struct syscalls_exit_open_args *ctx) { - count((void *)&exit_open_map); + count(&exit_open_map); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_openat") +int trace_enter_exit_at(struct syscalls_exit_open_args *ctx) +{ + count(&exit_open_map); return 0; } diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c index 16a16eadd509..749a50f2f9f3 100644 --- a/samples/bpf/trace_event_user.c +++ b/samples/bpf/trace_event_user.c @@ -37,9 +37,9 @@ static void print_ksym(__u64 addr) } printf("%s;", sym->name); - if (!strcmp(sym->name, "sys_read")) + if (!strstr(sym->name, "sys_read")) sys_read_seen = true; - else if (!strcmp(sym->name, "sys_write")) + else if (!strstr(sym->name, "sys_write")) sys_write_seen = true; } diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c index 219742106bfd..db6870aee42c 100644 --- a/samples/bpf/xdp1_kern.c +++ b/samples/bpf/xdp1_kern.c @@ -14,12 +14,12 @@ #include #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); static int parse_ipv4(void *data, u64 nh_off, void *data_end) { diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c index e01288867d15..c74b52c6d945 100644 --- a/samples/bpf/xdp2_kern.c +++ b/samples/bpf/xdp2_kern.c @@ -14,12 +14,12 @@ #include #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); static void swap_src_dst_mac(void *data) { diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c index 411fdb21f8bc..cd9ff2a40a39 100644 --- a/samples/bpf/xdp_adjust_tail_kern.c +++ b/samples/bpf/xdp_adjust_tail_kern.c @@ -25,12 +25,12 @@ #define ICMP_TOOBIG_SIZE 98 #define ICMP_TOOBIG_PAYLOAD_SIZE 92 -struct bpf_map_def SEC("maps") icmpcnt = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u64); + __uint(max_entries, 1); +} icmpcnt SEC(".maps"); static __always_inline void count_icmp(void) { diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c index 701a30f258b1..d013029aeaa2 100644 --- a/samples/bpf/xdp_fwd_kern.c +++ b/samples/bpf/xdp_fwd_kern.c @@ -23,13 +23,12 @@ #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) -/* For TX-traffic redirect requires net_device ifindex to be in this devmap */ -struct bpf_map_def SEC("maps") xdp_tx_ports = { - .type = BPF_MAP_TYPE_DEVMAP, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 64, -}; +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __uint(max_entries, 64); +} xdp_tx_ports SEC(".maps"); /* from include/net/ip.h */ static __always_inline int ip_decrease_ttl(struct iphdr *iph) diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index a306d1c75622..cfcc31e51197 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -18,12 +18,12 @@ #define MAX_CPUS 64 /* WARNING - sync with _user.c */ /* Special map type that can XDP_REDIRECT frames to another CPU */ -struct bpf_map_def SEC("maps") cpu_map = { - .type = BPF_MAP_TYPE_CPUMAP, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = MAX_CPUS, -}; +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u32)); + __uint(max_entries, MAX_CPUS); +} cpu_map SEC(".maps"); /* Common stats data record to keep userspace more simple */ struct datarec { @@ -35,67 +35,67 @@ struct datarec { /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success * feedback. Redirect TX errors can be caught via a tracepoint. */ -struct bpf_map_def SEC("maps") rx_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} rx_cnt SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") redirect_err_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 2, +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 2); /* TODO: have entries for all possible errno's */ -}; +} redirect_err_cnt SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = MAX_CPUS, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, MAX_CPUS); +} cpumap_enqueue_cnt SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") cpumap_kthread_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} cpumap_kthread_cnt SEC(".maps"); /* Set of maps controlling available CPU, and for iterating through * selectable redirect CPUs. */ -struct bpf_map_def SEC("maps") cpus_available = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = MAX_CPUS, -}; -struct bpf_map_def SEC("maps") cpus_count = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = 1, -}; -struct bpf_map_def SEC("maps") cpus_iterator = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, MAX_CPUS); +} cpus_available SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 1); +} cpus_count SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 1); +} cpus_iterator SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") exception_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} exception_cnt SEC(".maps"); /* Helper parse functions */ diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index 0da6e9e7132e..8b862a7a6c6a 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -16,6 +16,10 @@ static const char *__doc__ = #include #include #include +#include + +#define __must_check +#include #include #include @@ -46,6 +50,10 @@ static int cpus_count_map_fd; static int cpus_iterator_map_fd; static int exception_cnt_map_fd; +#define NUM_TP 5 +struct bpf_link *tp_links[NUM_TP] = { 0 }; +static int tp_cnt = 0; + /* Exit return codes */ #define EXIT_OK 0 #define EXIT_FAIL 1 @@ -88,6 +96,10 @@ static void int_exit(int sig) printf("program on interface changed, not removing\n"); } } + /* Detach tracepoints */ + while (tp_cnt) + bpf_link__destroy(tp_links[--tp_cnt]); + exit(EXIT_OK); } @@ -588,23 +600,61 @@ static void stats_poll(int interval, bool use_separators, char *prog_name, free_stats_record(prev); } +static struct bpf_link * attach_tp(struct bpf_object *obj, + const char *tp_category, + const char* tp_name) +{ + struct bpf_program *prog; + struct bpf_link *link; + char sec_name[PATH_MAX]; + int len; + + len = snprintf(sec_name, PATH_MAX, "tracepoint/%s/%s", + tp_category, tp_name); + if (len < 0) + exit(EXIT_FAIL); + + prog = bpf_object__find_program_by_title(obj, sec_name); + if (!prog) { + fprintf(stderr, "ERR: finding progsec: %s\n", sec_name); + exit(EXIT_FAIL_BPF); + } + + link = bpf_program__attach_tracepoint(prog, tp_category, tp_name); + if (IS_ERR(link)) + exit(EXIT_FAIL_BPF); + + return link; +} + +static void init_tracepoints(struct bpf_object *obj) { + tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_err"); + tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_map_err"); + tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_exception"); + tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_enqueue"); + tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_kthread"); +} + static int init_map_fds(struct bpf_object *obj) { - cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map"); - rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt"); + /* Maps updated by tracepoints */ redirect_err_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt"); + exception_cnt_map_fd = + bpf_object__find_map_fd_by_name(obj, "exception_cnt"); cpumap_enqueue_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt"); cpumap_kthread_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt"); + + /* Maps used by XDP */ + rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt"); + cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map"); cpus_available_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_available"); cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count"); cpus_iterator_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_iterator"); - exception_cnt_map_fd = - bpf_object__find_map_fd_by_name(obj, "exception_cnt"); if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 || redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 || @@ -662,6 +712,7 @@ int main(int argc, char **argv) strerror(errno)); return EXIT_FAIL; } + init_tracepoints(obj); if (init_map_fds(obj) < 0) { fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n"); return EXIT_FAIL; diff --git a/samples/bpf/xdp_redirect_kern.c b/samples/bpf/xdp_redirect_kern.c index 8abb151e385f..1f0b7d05abb2 100644 --- a/samples/bpf/xdp_redirect_kern.c +++ b/samples/bpf/xdp_redirect_kern.c @@ -19,22 +19,22 @@ #include #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") tx_port = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} tx_port SEC(".maps"); /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success * feedback. Redirect TX errors can be caught via a tracepoint. */ -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 1); +} rxcnt SEC(".maps"); static void swap_src_dst_mac(void *data) { diff --git a/samples/bpf/xdp_redirect_map_kern.c b/samples/bpf/xdp_redirect_map_kern.c index 740a529ba84f..4631b484c432 100644 --- a/samples/bpf/xdp_redirect_map_kern.c +++ b/samples/bpf/xdp_redirect_map_kern.c @@ -19,22 +19,22 @@ #include #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") tx_port = { - .type = BPF_MAP_TYPE_DEVMAP, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 100, -}; +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __uint(max_entries, 100); +} tx_port SEC(".maps"); /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success * feedback. Redirect TX errors can be caught via a tracepoint. */ -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 1); +} rxcnt SEC(".maps"); static void swap_src_dst_mac(void *data) { diff --git a/samples/bpf/xdp_router_ipv4_kern.c b/samples/bpf/xdp_router_ipv4_kern.c index 993f56bc7b9a..bf11efc8e949 100644 --- a/samples/bpf/xdp_router_ipv4_kern.c +++ b/samples/bpf/xdp_router_ipv4_kern.c @@ -42,44 +42,44 @@ struct direct_map { }; /* Map for trie implementation*/ -struct bpf_map_def SEC("maps") lpm_map = { - .type = BPF_MAP_TYPE_LPM_TRIE, - .key_size = 8, - .value_size = sizeof(struct trie_value), - .max_entries = 50, - .map_flags = BPF_F_NO_PREALLOC, -}; +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(key_size, 8); + __uint(value_size, sizeof(struct trie_value)); + __uint(max_entries, 50); + __uint(map_flags, BPF_F_NO_PREALLOC); +} lpm_map SEC(".maps"); /* Map for counter*/ -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, u64); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); /* Map for ARP table*/ -struct bpf_map_def SEC("maps") arp_table = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__be32), - .value_size = sizeof(__be64), - .max_entries = 50, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __be32); + __type(value, __be64); + __uint(max_entries, 50); +} arp_table SEC(".maps"); /* Map to keep the exact match entries in the route table*/ -struct bpf_map_def SEC("maps") exact_match = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__be32), - .value_size = sizeof(struct direct_map), - .max_entries = 50, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __be32); + __type(value, struct direct_map); + __uint(max_entries, 50); +} exact_match SEC(".maps"); -struct bpf_map_def SEC("maps") tx_port = { - .type = BPF_MAP_TYPE_DEVMAP, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 100, -}; +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __uint(max_entries, 100); +} tx_port SEC(".maps"); /* Function to set source and destination mac of the packet */ static inline void set_src_dst_mac(void *data, void *src, void *dst) diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c index 222a83eed1cb..272d0f82a6b5 100644 --- a/samples/bpf/xdp_rxq_info_kern.c +++ b/samples/bpf/xdp_rxq_info_kern.c @@ -23,12 +23,13 @@ enum cfg_options_flags { READ_MEM = 0x1U, SWAP_MAC = 0x2U, }; -struct bpf_map_def SEC("maps") config_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(struct config), - .max_entries = 1, -}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct config); + __uint(max_entries, 1); +} config_map SEC(".maps"); /* Common stats data record (shared with userspace) */ struct datarec { @@ -36,22 +37,22 @@ struct datarec { __u64 issue; }; -struct bpf_map_def SEC("maps") stats_global_map = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} stats_global_map SEC(".maps"); #define MAX_RXQs 64 /* Stats per rx_queue_index (per CPU) */ -struct bpf_map_def SEC("maps") rx_queue_index_map = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = MAX_RXQs + 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, MAX_RXQs + 1); +} rx_queue_index_map SEC(".maps"); static __always_inline void swap_src_dst_mac(void *data) diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index c7e4e45d824a..b88df17853b8 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -489,9 +489,9 @@ int main(int argc, char **argv) if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) return EXIT_FAIL; - map = bpf_map__next(NULL, obj); - stats_global_map = bpf_map__next(map, obj); - rx_queue_index_map = bpf_map__next(stats_global_map, obj); + map = bpf_object__find_map_by_name(obj, "config_map"); + stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map"); + rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map"); if (!map || !stats_global_map || !rx_queue_index_map) { printf("finding a map in obj file failed\n"); return EXIT_FAIL; diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c index 0f4f6e8c8611..6db450a5c1ca 100644 --- a/samples/bpf/xdp_tx_iptunnel_kern.c +++ b/samples/bpf/xdp_tx_iptunnel_kern.c @@ -19,19 +19,19 @@ #include "bpf_helpers.h" #include "xdp_tx_iptunnel_common.h" -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, __u32); + __type(value, __u64); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); -struct bpf_map_def SEC("maps") vip2tnl = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(struct vip), - .value_size = sizeof(struct iptnl_info), - .max_entries = MAX_IPTNL_ENTRIES, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, struct vip); + __type(value, struct iptnl_info); + __uint(max_entries, MAX_IPTNL_ENTRIES); +} vip2tnl SEC(".maps"); static __always_inline void count_tx(u32 protocol) { diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh index 4af4046d71be..40873a5d1461 100644 --- a/samples/pktgen/functions.sh +++ b/samples/pktgen/functions.sh @@ -5,6 +5,8 @@ # Author: Jesper Dangaaard Brouer # License: GPL +set -o errexit + ## -- General shell logging cmds -- function err() { local exitcode=$1 @@ -58,6 +60,7 @@ function pg_set() { function proc_cmd() { local result local proc_file=$1 + local status=0 # after shift, the remaining args are contained in $@ shift local proc_ctrl=${PROC_DIR}/$proc_file @@ -73,13 +76,13 @@ function proc_cmd() { echo "cmd: $@ > $proc_ctrl" fi # Quoting of "$@" is important for space expansion - echo "$@" > "$proc_ctrl" - local status=$? + echo "$@" > "$proc_ctrl" || status=$? - result=$(grep "Result: OK:" $proc_ctrl) - # Due to pgctrl, cannot use exit code $? from grep - if [[ "$result" == "" ]]; then - grep "Result:" $proc_ctrl >&2 + if [[ "$proc_file" != "pgctrl" ]]; then + result=$(grep "Result: OK:" $proc_ctrl) || true + if [[ "$result" == "" ]]; then + grep "Result:" $proc_ctrl >&2 + fi fi if (( $status != 0 )); then err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\"" @@ -105,6 +108,8 @@ function pgset() { fi } +[[ $EUID -eq 0 ]] && trap 'pg_ctrl "reset"' EXIT + ## -- General shell tricks -- function root_check_run_with_sudo() { diff --git a/samples/seccomp/user-trap.c b/samples/seccomp/user-trap.c index 6d0125ca8af7..20291ec6489f 100644 --- a/samples/seccomp/user-trap.c +++ b/samples/seccomp/user-trap.c @@ -298,14 +298,14 @@ int main(void) req = malloc(sizes.seccomp_notif); if (!req) goto out_close; - memset(req, 0, sizeof(*req)); resp = malloc(sizes.seccomp_notif_resp); if (!resp) goto out_req; - memset(resp, 0, sizeof(*resp)); + memset(resp, 0, sizes.seccomp_notif_resp); while (1) { + memset(req, 0, sizes.seccomp_notif); if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) { perror("ioctl recv"); goto out_resp; diff --git a/samples/trace_printk/trace-printk.c b/samples/trace_printk/trace-printk.c index 7affc3b50b61..cfc159580263 100644 --- a/samples/trace_printk/trace-printk.c +++ b/samples/trace_printk/trace-printk.c @@ -36,6 +36,7 @@ static int __init trace_printk_init(void) /* Kick off printing in irq context */ irq_work_queue(&irqwork); + irq_work_sync(&irqwork); trace_printk("This is a %s that will use trace_bprintk()\n", "static string"); diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 10ba926ae292..d1dd4a6b6adb 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -55,14 +55,13 @@ kecho := $($(quiet)kecho) # - stdin is piped in from the first prerequisite ($<) so one has # to specify a valid file as first prerequisite (often the kbuild file) define filechk - $(Q)set -e; \ - mkdir -p $(dir $@); \ - { $(filechk_$(1)); } > $@.tmp; \ - if [ -r $@ ] && cmp -s $@ $@.tmp; then \ - rm -f $@.tmp; \ - else \ - $(kecho) ' UPD $@'; \ - mv -f $@.tmp $@; \ + $(Q)set -e; \ + mkdir -p $(dir $@); \ + trap "rm -f $(dot-target).tmp" EXIT; \ + { $(filechk_$(1)); } > $(dot-target).tmp; \ + if [ ! -r $@ ] || ! cmp -s $@ $(dot-target).tmp; then \ + $(kecho) ' UPD $@'; \ + mv -f $(dot-target).tmp $@; \ fi endef diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include index d4adfbe42690..77a69ba9cd19 100644 --- a/scripts/Kconfig.include +++ b/scripts/Kconfig.include @@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y) # $(cc-option,) # Return y if the compiler supports , n otherwise -cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null) +cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /dev/null) # $(ld-option,) # Return y if the linker supports , n otherwise @@ -40,3 +40,10 @@ $(error-if,$(success, $(LD) -v | grep -q gold), gold linker '$(LD)' not supporte # gcc version including patch level gcc-version := $(shell,$(srctree)/scripts/gcc-version.sh $(CC)) + +# machine bit flags +# $(m32-flag): -m32 if the compiler supports it, or an empty string otherwise. +# $(m64-flag): -m64 if the compiler supports it, or an empty string otherwise. +cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$(1)) +m32-flag := $(cc-option-bit,-m32) +m64-flag := $(cc-option-bit,-m64) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index a9e47953ca53..24a33c01bbf7 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -283,15 +283,6 @@ quiet_cmd_cc_lst_c = MKLST $@ $(obj)/%.lst: $(src)/%.c FORCE $(call if_changed_dep,cc_lst_c) -# header test (header-test-y, header-test-m target) -# --------------------------------------------------------------------------- - -quiet_cmd_cc_s_h = CC $@ - cmd_cc_s_h = $(CC) $(c_flags) -S -o $@ -x c /dev/null -include $< - -$(obj)/%.h.s: $(src)/%.h FORCE - $(call if_changed_dep,cc_s_h) - # Compile assembler sources (.S) # --------------------------------------------------------------------------- diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index ecddf83ac142..ca08f2fe7c34 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -48,6 +48,7 @@ KBUILD_CFLAGS += -Wno-initializer-overrides KBUILD_CFLAGS += -Wno-format KBUILD_CFLAGS += -Wno-sign-compare KBUILD_CFLAGS += -Wno-format-zero-length +KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) endif endif diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index 1b405a7ed14f..708fbd08a2c5 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -56,9 +56,6 @@ new-dirs := $(filter-out $(existing-dirs), $(wanted-dirs)) $(if $(new-dirs), $(shell mkdir -p $(new-dirs))) # Rules - -ifndef HDRCHECK - quiet_cmd_install = HDRINST $@ cmd_install = $(CONFIG_SHELL) $(srctree)/scripts/headers_install.sh $< $@ @@ -81,21 +78,6 @@ existing-headers := $(filter $(old-headers), $(all-headers)) -include $(foreach f,$(existing-headers),$(dir $(f)).$(notdir $(f)).cmd) -else - -quiet_cmd_check = HDRCHK $< - cmd_check = $(PERL) $(srctree)/scripts/headers_check.pl $(dst) $(SRCARCH) $<; touch $@ - -check-files := $(addsuffix .chk, $(all-headers)) - -$(check-files): $(dst)/%.chk : $(dst)/% $(srctree)/scripts/headers_check.pl - $(call cmd,check) - -__headers: $(check-files) - @: - -endif - PHONY += FORCE FORCE: diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 179d55af5852..a66fc0acad1e 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -65,20 +65,6 @@ extra-y += $(patsubst %.dtb,%.dt.yaml, $(dtb-y)) extra-$(CONFIG_OF_ALL_DTBS) += $(patsubst %.dtb,%.dt.yaml, $(dtb-)) endif -# Test self-contained headers - -# Wildcard searches in $(srctree)/$(src)/, but not in $(objtree)/$(obj)/. -# Stale generated headers are often left over, so pattern matching should -# be avoided. Please notice $(srctree)/$(src)/ and $(objtree)/$(obj) point -# to the same location for in-tree building. So, header-test-pattern-y should -# be used with care. -header-test-y += $(filter-out $(header-test-), \ - $(patsubst $(srctree)/$(src)/%, %, \ - $(wildcard $(addprefix $(srctree)/$(src)/, \ - $(header-test-pattern-y))))) - -extra-$(CONFIG_HEADER_TEST) += $(addsuffix .s, $(header-test-y) $(header-test-m)) - # Add subdir path extra-y := $(addprefix $(obj)/,$(extra-y)) @@ -305,13 +291,13 @@ DT_TMP_SCHEMA := $(objtree)/$(DT_BINDING_DIR)/processed-schema.yaml quiet_cmd_dtb_check = CHECK $@ cmd_dtb_check = $(DT_CHECKER) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@ ; -define rule_dtc_dt_yaml +define rule_dtc $(call cmd_and_fixdep,dtc,yaml) $(call cmd,dtb_check) endef $(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE - $(call if_changed_rule,dtc_dt_yaml) + $(call if_changed_rule,dtc) dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile index 82160808765c..b5a5b1c548c9 100644 --- a/scripts/dtc/Makefile +++ b/scripts/dtc/Makefile @@ -11,7 +11,7 @@ dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o # Source files need to get at the userspace version of libfdt_env.h to compile HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt -ifeq ($(wildcard /usr/include/yaml.h),) +ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),) ifneq ($(CHECK_DTBS),) $(error dtc needs libyaml for DT schema validation support. \ Install the necessary libyaml development package.) @@ -19,7 +19,7 @@ endif HOST_EXTRACFLAGS += -DNO_YAML else dtc-objs += yamltree.o -HOSTLDLIBS_dtc := -lyaml +HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs) endif # Generated files need one more search path to include headers in source tree diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l index 5c6c3fd557d7..b3b7270300de 100644 --- a/scripts/dtc/dtc-lexer.l +++ b/scripts/dtc/dtc-lexer.l @@ -23,7 +23,6 @@ LINECOMMENT "//".*\n #include "srcpos.h" #include "dtc-parser.tab.h" -YYLTYPE yylloc; extern bool treesource_error; /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ diff --git a/scripts/export_report.pl b/scripts/export_report.pl index 548330e8c4e7..feb3d5542a62 100755 --- a/scripts/export_report.pl +++ b/scripts/export_report.pl @@ -94,7 +94,7 @@ if (defined $opt{'o'}) { # while ( <$module_symvers> ) { chomp; - my (undef, $symbol, $namespace, $module, $gpl) = split('\t'); + my (undef, $symbol, $module, $gpl, $namespace) = split('\t'); $SYMBOL { $symbol } = [ $module , "0" , $symbol, $gpl]; } close($module_symvers); diff --git a/scripts/find-unused-docs.sh b/scripts/find-unused-docs.sh index 3f46f8977dc4..ee6a50e33aba 100755 --- a/scripts/find-unused-docs.sh +++ b/scripts/find-unused-docs.sh @@ -54,7 +54,7 @@ for file in `find $1 -name '*.c'`; do if [[ ${FILES_INCLUDED[$file]+_} ]]; then continue; fi - str=$(scripts/kernel-doc -text -export "$file" 2>/dev/null) + str=$(scripts/kernel-doc -export "$file" 2>/dev/null) if [[ -n "$str" ]]; then echo "$file" fi diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index d33de0b9f4f5..e3569543bdac 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -14,8 +14,8 @@ config HAVE_GCC_PLUGINS An arch should select this symbol if it supports building with GCC plugins. -config GCC_PLUGINS - bool +menuconfig GCC_PLUGINS + bool "GCC plugins" depends on HAVE_GCC_PLUGINS depends on PLUGIN_HOSTCC != "" default y @@ -25,8 +25,7 @@ config GCC_PLUGINS See Documentation/core-api/gcc-plugins.rst for details. -menu "GCC plugins" - depends on GCC_PLUGINS +if GCC_PLUGINS config GCC_PLUGIN_CYC_COMPLEXITY bool "Compute the cyclomatic complexity of a function" if EXPERT @@ -113,4 +112,4 @@ config GCC_PLUGIN_ARM_SSP_PER_TASK bool depends on GCC_PLUGINS && ARM -endmenu +endif diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index ae6504d07fd6..fb15f09e0e38 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -489,6 +489,8 @@ static void build_initial_tok_table(void) table[pos] = table[i]; learn_symbol(table[pos].sym, table[pos].len); pos++; + } else { + free(table[i].sym); } } table_cnt = pos; diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 3569d2dec37c..17298239e363 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -1353,7 +1353,7 @@ bool conf_set_all_new_symbols(enum conf_def_mode mode) sym_calc_value(csym); if (mode == def_random) - has_changed = randomize_choice_values(csym); + has_changed |= randomize_choice_values(csym); else { set_all_choice_values(csym); has_changed = true; diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c index 77ffff3a053c..9f1de58e9f0c 100644 --- a/scripts/kconfig/expr.c +++ b/scripts/kconfig/expr.c @@ -254,6 +254,13 @@ static int expr_eq(struct expr *e1, struct expr *e2) { int res, old_count; + /* + * A NULL expr is taken to be yes, but there's also a different way to + * represent yes. expr_is_yes() checks for either representation. + */ + if (!e1 || !e2) + return expr_is_yes(e1) && expr_is_yes(e2); + if (e1->type != e2->type) return 0; switch (e1->type) { diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 06495379fcd8..408b5c0b99b1 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -108,13 +108,13 @@ gen_btf() local bin_arch if ! [ -x "$(command -v ${PAHOLE})" ]; then - info "BTF" "${1}: pahole (${PAHOLE}) is not available" + echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available" return 1 fi pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/') if [ "${pahole_ver}" -lt "113" ]; then - info "BTF" "${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13" + echo >&2 "BTF: ${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13" return 1 fi @@ -127,7 +127,9 @@ gen_btf() cut -d, -f1 | cut -d' ' -f2) bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \ awk '{print $4}') - ${OBJCOPY} --dump-section .BTF=.btf.vmlinux.bin ${1} 2>/dev/null + ${OBJCOPY} --change-section-address .BTF=0 \ + --set-section-flags .BTF=alloc -O binary \ + --only-section=.BTF ${1} .btf.vmlinux.bin ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \ --rename-section .data=.BTF .btf.vmlinux.bin ${2} } @@ -253,6 +255,10 @@ btf_vmlinux_bin_o="" if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then btf_vmlinux_bin_o=.btf.vmlinux.bin.o + else + echo >&2 "Failed to generate BTF for vmlinux" + echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF" + exit 1 fi fi diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index d2a30a7b3f07..52f1152c9838 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -2434,7 +2434,7 @@ static void write_if_changed(struct buffer *b, const char *fname) } /* parse Module.symvers file. line format: - * 0x12345678symbolmodule[[export]something] + * 0x12345678symbolmoduleexportnamespace **/ static void read_dump(const char *fname, unsigned int kernel) { @@ -2447,7 +2447,7 @@ static void read_dump(const char *fname, unsigned int kernel) return; while ((line = get_next_line(&pos, file, size))) { - char *symname, *namespace, *modname, *d, *export, *end; + char *symname, *namespace, *modname, *d, *export; unsigned int crc; struct module *mod; struct symbol *s; @@ -2455,16 +2455,16 @@ static void read_dump(const char *fname, unsigned int kernel) if (!(symname = strchr(line, '\t'))) goto fail; *symname++ = '\0'; - if (!(namespace = strchr(symname, '\t'))) - goto fail; - *namespace++ = '\0'; - if (!(modname = strchr(namespace, '\t'))) + if (!(modname = strchr(symname, '\t'))) goto fail; *modname++ = '\0'; - if ((export = strchr(modname, '\t')) != NULL) - *export++ = '\0'; - if (export && ((end = strchr(export, '\t')) != NULL)) - *end = '\0'; + if (!(export = strchr(modname, '\t'))) + goto fail; + *export++ = '\0'; + if (!(namespace = strchr(export, '\t'))) + goto fail; + *namespace++ = '\0'; + crc = strtoul(line, &d, 16); if (*symname == '\0' || *modname == '\0' || *d != '\0') goto fail; @@ -2516,9 +2516,9 @@ static void write_dump(const char *fname) namespace = symbol->namespace; buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n", symbol->crc, symbol->name, - namespace ? namespace : "", symbol->module->name, - export_str(symbol->export)); + export_str(symbol->export), + namespace ? namespace : ""); } symbol = symbol->next; } diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index e0750b70453f..357dc56bcf30 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -136,7 +136,7 @@ mkdir -p debian/source/ echo "1.0" > debian/source/format echo $debarch > debian/arch -extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)" +extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)" extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)" # Generate a simple changelog template @@ -174,7 +174,7 @@ Source: $sourcename Section: kernel Priority: optional Maintainer: $maintainer -Build-Depends: bc, kmod, cpio, bison, flex | flex:native $extra_build_depends +Build-Depends: bc, rsync, kmod, cpio, bison, flex | flex:native $extra_build_depends Homepage: http://www.kernel.org/ Package: $packagename diff --git a/scripts/parse-maintainers.pl b/scripts/parse-maintainers.pl old mode 100644 new mode 100755 diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 612268eabef4..7225107a9aaf 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -38,6 +38,10 @@ #define R_AARCH64_ABS64 257 #endif +#define R_ARM_PC24 1 +#define R_ARM_THM_CALL 10 +#define R_ARM_CALL 28 + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ @@ -418,6 +422,18 @@ static char const *already_has_rel_mcount = "success"; /* our work here is done! #define RECORD_MCOUNT_64 #include "recordmcount.h" +static int arm_is_fake_mcount(Elf32_Rel const *rp) +{ + switch (ELF32_R_TYPE(w(rp->r_info))) { + case R_ARM_THM_CALL: + case R_ARM_CALL: + case R_ARM_PC24: + return 0; + } + + return 1; +} + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] @@ -523,6 +539,7 @@ static int do_file(char const *const fname) altmcount = "__gnu_mcount_nc"; make_nop = make_nop_arm; rel_type_nop = R_ARM_NONE; + is_fake_mcount32 = arm_is_fake_mcount; gpfx = 0; break; case EM_AARCH64: diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 45d13b6462aa..90d21675c3ad 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -593,7 +593,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt) void __aa_bump_ns_revision(struct aa_ns *ns) { - ns->revision++; + WRITE_ONCE(ns->revision, ns->revision + 1); wake_up_interruptible(&ns->wait); } diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 9e0492795267..039ca71872ce 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm, if (!bprm || !profile->xattr_count) return 0; + might_sleep(); /* transition from exec match to xattr set */ state = aa_dfa_null_transition(profile->xmatch, state); @@ -361,10 +362,11 @@ out: } /** - * __attach_match_ - find an attachment match + * find_attach - do attachment search for unconfined processes * @bprm - binprm structure of transitioning task - * @name - to match against (NOT NULL) + * @ns: the current namespace (NOT NULL) * @head - profile list to walk (NOT NULL) + * @name - to match against (NOT NULL) * @info - info message if there was an error (NOT NULL) * * Do a linear search on the profiles in the list. There is a matching @@ -374,12 +376,11 @@ out: * * Requires: @head not be shared or have appropriate locks held * - * Returns: profile or NULL if no match found + * Returns: label or NULL if no match found */ -static struct aa_profile *__attach_match(const struct linux_binprm *bprm, - const char *name, - struct list_head *head, - const char **info) +static struct aa_label *find_attach(const struct linux_binprm *bprm, + struct aa_ns *ns, struct list_head *head, + const char *name, const char **info) { int candidate_len = 0, candidate_xattrs = 0; bool conflict = false; @@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, AA_BUG(!name); AA_BUG(!head); + rcu_read_lock(); +restart: list_for_each_entry_rcu(profile, head, base.list) { if (profile->label.flags & FLAG_NULL && &profile->label == ns_unconfined(profile->ns)) @@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, perm = dfa_user_allow(profile->xmatch, state); /* any accepting state means a valid match. */ if (perm & MAY_EXEC) { - int ret; + int ret = 0; if (count < candidate_len) continue; - ret = aa_xattrs_match(bprm, profile, state); - /* Fail matching if the xattrs don't match */ - if (ret < 0) - continue; + if (bprm && profile->xattr_count) { + long rev = READ_ONCE(ns->revision); + if (!aa_get_profile_not0(profile)) + goto restart; + rcu_read_unlock(); + ret = aa_xattrs_match(bprm, profile, + state); + rcu_read_lock(); + aa_put_profile(profile); + if (rev != + READ_ONCE(ns->revision)) + /* policy changed */ + goto restart; + /* + * Fail matching if the xattrs don't + * match + */ + if (ret < 0) + continue; + } /* * TODO: allow for more flexible best match * @@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, candidate_xattrs = ret; conflict = false; } - } else if (!strcmp(profile->base.name, name)) + } else if (!strcmp(profile->base.name, name)) { /* * old exact non-re match, without conditionals such * as xattrs. no more searching required */ - return profile; + candidate = profile; + goto out; + } } - if (conflict) { - *info = "conflicting profile attachments"; + if (!candidate || conflict) { + if (conflict) + *info = "conflicting profile attachments"; + rcu_read_unlock(); return NULL; } - return candidate; -} - -/** - * find_attach - do attachment search for unconfined processes - * @bprm - binprm structure of transitioning task - * @ns: the current namespace (NOT NULL) - * @list: list to search (NOT NULL) - * @name: the executable name to match against (NOT NULL) - * @info: info message if there was an error - * - * Returns: label or NULL if no match found - */ -static struct aa_label *find_attach(const struct linux_binprm *bprm, - struct aa_ns *ns, struct list_head *list, - const char *name, const char **info) -{ - struct aa_profile *profile; - - rcu_read_lock(); - profile = aa_get_profile(__attach_match(bprm, name, list, info)); +out: + candidate = aa_get_newest_profile(candidate); rcu_read_unlock(); - return profile ? &profile->label : NULL; + return &candidate->label; } static const char *next_name(int xtype, const char *name) diff --git a/security/apparmor/label.c b/security/apparmor/label.c index 59f1cc2557a7..470693239e64 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c @@ -1458,11 +1458,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label, /* helper macro for snprint routines */ #define update_for_len(total, len, size, str) \ do { \ + size_t ulen = len; \ + \ AA_BUG(len < 0); \ - total += len; \ - len = min(len, size); \ - size -= len; \ - str += len; \ + total += ulen; \ + ulen = min(ulen, size); \ + size -= ulen; \ + str += ulen; \ } while (0) /** @@ -1597,7 +1599,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns, struct aa_ns *prev_ns = NULL; struct label_it i; int count = 0, total = 0; - size_t len; + ssize_t len; AA_BUG(!str && size != 0); AA_BUG(!label); diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index ade333074c8e..06355717ee84 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -1124,8 +1124,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ mutex_lock_nested(&ns->parent->lock, ns->level); - __aa_remove_ns(ns); __aa_bump_ns_revision(ns); + __aa_remove_ns(ns); mutex_unlock(&ns->parent->lock); } else { /* remove profile */ @@ -1137,9 +1137,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, goto fail_ns_lock; } name = profile->base.hname; + __aa_bump_ns_revision(ns); __remove_profile(profile); __aa_labelset_update_subtree(ns); - __aa_bump_ns_revision(ns); mutex_unlock(&ns->lock); } diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 5380aca2b351..ee9aec5e98f0 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -263,7 +263,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry) static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) { struct ima_rule_entry *nentry; - int i, result; + int i; nentry = kmalloc(sizeof(*nentry), GFP_KERNEL); if (!nentry) @@ -277,7 +277,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm)); for (i = 0; i < MAX_LSM_RULES; i++) { - if (!entry->lsm[i].rule) + if (!entry->lsm[i].args_p) continue; nentry->lsm[i].type = entry->lsm[i].type; @@ -286,13 +286,13 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) if (!nentry->lsm[i].args_p) goto out_err; - result = security_filter_rule_init(nentry->lsm[i].type, - Audit_equal, - nentry->lsm[i].args_p, - &nentry->lsm[i].rule); - if (result == -EINVAL) - pr_warn("ima: rule for LSM \'%d\' is undefined\n", - entry->lsm[i].type); + security_filter_rule_init(nentry->lsm[i].type, + Audit_equal, + nentry->lsm[i].args_p, + &nentry->lsm[i].rule); + if (!nentry->lsm[i].rule) + pr_warn("rule for LSM \'%s\' is undefined\n", + (char *)entry->lsm[i].args_p); } return nentry; @@ -329,7 +329,7 @@ static void ima_lsm_update_rules(void) list_for_each_entry_safe(entry, e, &ima_policy_rules, list) { needs_update = 0; for (i = 0; i < MAX_LSM_RULES; i++) { - if (entry->lsm[i].rule) { + if (entry->lsm[i].args_p) { needs_update = 1; break; } @@ -339,8 +339,7 @@ static void ima_lsm_update_rules(void) result = ima_lsm_update_rule(entry); if (result) { - pr_err("ima: lsm rule update error %d\n", - result); + pr_err("lsm rule update error %d\n", result); return; } } @@ -357,7 +356,7 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event, } /** - * ima_match_rules - determine whether an inode matches the measure rule. + * ima_match_rules - determine whether an inode matches the policy rule. * @rule: a pointer to a rule * @inode: a pointer to an inode * @cred: a pointer to a credentials structure for user validation @@ -415,9 +414,12 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode, int rc = 0; u32 osid; - if (!rule->lsm[i].rule) - continue; - + if (!rule->lsm[i].rule) { + if (!rule->lsm[i].args_p) + continue; + else + return false; + } switch (i) { case LSM_OBJ_USER: case LSM_OBJ_ROLE: @@ -822,8 +824,14 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry, entry->lsm[lsm_rule].args_p, &entry->lsm[lsm_rule].rule); if (!entry->lsm[lsm_rule].rule) { - kfree(entry->lsm[lsm_rule].args_p); - return -EINVAL; + pr_warn("rule for LSM \'%s\' is undefined\n", + (char *)entry->lsm[lsm_rule].args_p); + + if (ima_rules == &ima_default_rules) { + kfree(entry->lsm[lsm_rule].args_p); + result = -EINVAL; + } else + result = 0; } return result; diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c index 81b19c52832b..020fc7a11ef0 100644 --- a/security/integrity/platform_certs/load_uefi.c +++ b/security/integrity/platform_certs/load_uefi.c @@ -39,16 +39,18 @@ static __init bool uefi_check_ignore_db(void) * Get a certificate list blob from the named EFI variable. */ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid, - unsigned long *size) + unsigned long *size, efi_status_t *status) { - efi_status_t status; unsigned long lsize = 4; unsigned long tmpdb[4]; void *db; - status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb); - if (status != EFI_BUFFER_TOO_SMALL) { - pr_err("Couldn't get size: 0x%lx\n", status); + *status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb); + if (*status == EFI_NOT_FOUND) + return NULL; + + if (*status != EFI_BUFFER_TOO_SMALL) { + pr_err("Couldn't get size: 0x%lx\n", *status); return NULL; } @@ -56,10 +58,10 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid, if (!db) return NULL; - status = efi.get_variable(name, guid, NULL, &lsize, db); - if (status != EFI_SUCCESS) { + *status = efi.get_variable(name, guid, NULL, &lsize, db); + if (*status != EFI_SUCCESS) { kfree(db); - pr_err("Error reading db var: 0x%lx\n", status); + pr_err("Error reading db var: 0x%lx\n", *status); return NULL; } @@ -144,6 +146,7 @@ static int __init load_uefi_certs(void) efi_guid_t mok_var = EFI_SHIM_LOCK_GUID; void *db = NULL, *dbx = NULL, *mok = NULL; unsigned long dbsize = 0, dbxsize = 0, moksize = 0; + efi_status_t status; int rc = 0; if (!efi.get_variable) @@ -153,9 +156,12 @@ static int __init load_uefi_certs(void) * an error if we can't get them. */ if (!uefi_check_ignore_db()) { - db = get_cert_list(L"db", &secure_var, &dbsize); + db = get_cert_list(L"db", &secure_var, &dbsize, &status); if (!db) { - pr_err("MODSIGN: Couldn't get UEFI db list\n"); + if (status == EFI_NOT_FOUND) + pr_debug("MODSIGN: db variable wasn't found\n"); + else + pr_err("MODSIGN: Couldn't get UEFI db list\n"); } else { rc = parse_efi_signature_list("UEFI:db", db, dbsize, get_handler_for_db); @@ -166,9 +172,12 @@ static int __init load_uefi_certs(void) } } - mok = get_cert_list(L"MokListRT", &mok_var, &moksize); + mok = get_cert_list(L"MokListRT", &mok_var, &moksize, &status); if (!mok) { - pr_info("Couldn't get UEFI MokListRT\n"); + if (status == EFI_NOT_FOUND) + pr_debug("MokListRT variable wasn't found\n"); + else + pr_info("Couldn't get UEFI MokListRT\n"); } else { rc = parse_efi_signature_list("UEFI:MokListRT", mok, moksize, get_handler_for_db); @@ -177,9 +186,12 @@ static int __init load_uefi_certs(void) kfree(mok); } - dbx = get_cert_list(L"dbx", &secure_var, &dbxsize); + dbx = get_cert_list(L"dbx", &secure_var, &dbxsize, &status); if (!dbx) { - pr_info("Couldn't get UEFI dbx list\n"); + if (status == EFI_NOT_FOUND) + pr_debug("dbx variable wasn't found\n"); + else + pr_info("Couldn't get UEFI dbx list\n"); } else { rc = parse_efi_signature_list("UEFI:dbx", dbx, dbxsize, diff --git a/security/selinux/avc.c b/security/selinux/avc.c index ecd3829996aa..d18cb32a242a 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state, if (likely(!audited)) return 0; return slow_avc_audit(state, ssid, tsid, tclass, requested, - audited, denied, result, ad, 0); + audited, denied, result, ad); } static void avc_node_free(struct rcu_head *rhead) @@ -617,40 +617,37 @@ static struct avc_node *avc_insert(struct selinux_avc *avc, struct avc_node *pos, *node = NULL; int hvalue; unsigned long flag; + spinlock_t *lock; + struct hlist_head *head; if (avc_latest_notif_update(avc, avd->seqno, 1)) - goto out; + return NULL; node = avc_alloc_node(avc); - if (node) { - struct hlist_head *head; - spinlock_t *lock; - int rc = 0; + if (!node) + return NULL; - hvalue = avc_hash(ssid, tsid, tclass); - avc_node_populate(node, ssid, tsid, tclass, avd); - rc = avc_xperms_populate(node, xp_node); - if (rc) { - kmem_cache_free(avc_node_cachep, node); - return NULL; - } - head = &avc->avc_cache.slots[hvalue]; - lock = &avc->avc_cache.slots_lock[hvalue]; - - spin_lock_irqsave(lock, flag); - hlist_for_each_entry(pos, head, list) { - if (pos->ae.ssid == ssid && - pos->ae.tsid == tsid && - pos->ae.tclass == tclass) { - avc_node_replace(avc, node, pos); - goto found; - } - } - hlist_add_head_rcu(&node->list, head); -found: - spin_unlock_irqrestore(lock, flag); + avc_node_populate(node, ssid, tsid, tclass, avd); + if (avc_xperms_populate(node, xp_node)) { + avc_node_kill(avc, node); + return NULL; } -out: + + hvalue = avc_hash(ssid, tsid, tclass); + head = &avc->avc_cache.slots[hvalue]; + lock = &avc->avc_cache.slots_lock[hvalue]; + spin_lock_irqsave(lock, flag); + hlist_for_each_entry(pos, head, list) { + if (pos->ae.ssid == ssid && + pos->ae.tsid == tsid && + pos->ae.tclass == tclass) { + avc_node_replace(avc, node, pos); + goto found; + } + } + hlist_add_head_rcu(&node->list, head); +found: + spin_unlock_irqrestore(lock, flag); return node; } @@ -758,8 +755,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a) noinline int slow_avc_audit(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass, u32 requested, u32 audited, u32 denied, int result, - struct common_audit_data *a, - unsigned int flags) + struct common_audit_data *a) { struct common_audit_data stack_data; struct selinux_audit_data sad; @@ -772,17 +768,6 @@ noinline int slow_avc_audit(struct selinux_state *state, a->type = LSM_AUDIT_DATA_NONE; } - /* - * When in a RCU walk do the audit on the RCU retry. This is because - * the collection of the dname in an inode audit message is not RCU - * safe. Note this may drop some audits when the situation changes - * during retry. However this is logically just as if the operation - * happened a little later. - */ - if ((a->type == LSM_AUDIT_DATA_INODE) && - (flags & MAY_NOT_BLOCK)) - return -ECHILD; - sad.tclass = tclass; sad.requested = requested; sad.ssid = ssid; @@ -855,15 +840,14 @@ static int avc_update_node(struct selinux_avc *avc, /* * If we are in a non-blocking code path, e.g. VFS RCU walk, * then we must not add permissions to a cache entry - * because we cannot safely audit the denial. Otherwise, + * because we will not audit the denial. Otherwise, * during the subsequent blocking retry (e.g. VFS ref walk), we * will find the permissions already granted in the cache entry * and won't audit anything at all, leading to silent denials in * permissive mode that only appear when in enforcing mode. * - * See the corresponding handling in slow_avc_audit(), and the - * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag, - * which is transliterated into AVC_NONBLOCKING. + * See the corresponding handling of MAY_NOT_BLOCK in avc_audit() + * and selinux_inode_permission(). */ if (flags & AVC_NONBLOCKING) return 0; @@ -907,7 +891,7 @@ static int avc_update_node(struct selinux_avc *avc, if (orig->ae.xp_node) { rc = avc_xperms_populate(node, orig->ae.xp_node); if (rc) { - kmem_cache_free(avc_node_cachep, node); + avc_node_kill(avc, node); goto out_unlock; } } @@ -1205,6 +1189,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass, return rc; } +int avc_has_perm_flags(struct selinux_state *state, + u32 ssid, u32 tsid, u16 tclass, u32 requested, + struct common_audit_data *auditdata, + int flags) +{ + struct av_decision avd; + int rc, rc2; + + rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, + (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0, + &avd); + + rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc, + auditdata, flags); + if (rc2) + return rc2; + return rc; +} + u32 avc_policy_seqno(struct selinux_state *state) { return state->avc->avc_cache.latest_notif; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 9625b99e677f..39410913a694 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2766,6 +2766,14 @@ static int selinux_mount(const char *dev_name, return path_has_perm(cred, path, FILE__MOUNTON); } +static int selinux_move_mount(const struct path *from_path, + const struct path *to_path) +{ + const struct cred *cred = current_cred(); + + return path_has_perm(cred, to_path, FILE__MOUNTON); +} + static int selinux_umount(struct vfsmount *mnt, int flags) { const struct cred *cred = current_cred(); @@ -3008,14 +3016,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode, if (IS_ERR(isec)) return PTR_ERR(isec); - return avc_has_perm(&selinux_state, - sid, isec->sid, isec->sclass, FILE__READ, &ad); + return avc_has_perm_flags(&selinux_state, + sid, isec->sid, isec->sclass, FILE__READ, &ad, + rcu ? MAY_NOT_BLOCK : 0); } static noinline int audit_inode_permission(struct inode *inode, u32 perms, u32 audited, u32 denied, - int result, - unsigned flags) + int result) { struct common_audit_data ad; struct inode_security_struct *isec = selinux_inode(inode); @@ -3026,7 +3034,7 @@ static noinline int audit_inode_permission(struct inode *inode, rc = slow_avc_audit(&selinux_state, current_sid(), isec->sid, isec->sclass, perms, - audited, denied, result, &ad, flags); + audited, denied, result, &ad); if (rc) return rc; return 0; @@ -3073,7 +3081,11 @@ static int selinux_inode_permission(struct inode *inode, int mask) if (likely(!audited)) return rc; - rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags); + /* fall back to ref-walk if we have to generate audit */ + if (flags & MAY_NOT_BLOCK) + return -ECHILD; + + rc2 = audit_inode_permission(inode, perms, audited, denied, rc); if (rc2) return rc2; return rc; @@ -6834,6 +6846,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts), LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt), + LSM_HOOK_INIT(move_mount, selinux_move_mount), + LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security), LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as), diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h index 7be0e1e90e8b..cf4cc3ef959b 100644 --- a/security/selinux/include/avc.h +++ b/security/selinux/include/avc.h @@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested, int slow_avc_audit(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass, u32 requested, u32 audited, u32 denied, int result, - struct common_audit_data *a, - unsigned flags); + struct common_audit_data *a); /** * avc_audit - Audit the granting or denial of permissions. @@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state, audited = avc_audit_required(requested, avd, result, 0, &denied); if (likely(!audited)) return 0; + /* fall back to ref-walk if we have to generate audit */ + if (flags & MAY_NOT_BLOCK) + return -ECHILD; return slow_avc_audit(state, ssid, tsid, tclass, requested, audited, denied, result, - a, flags); + a); } #define AVC_STRICT 1 /* Ignore permissive mode. */ @@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata); +int avc_has_perm_flags(struct selinux_state *state, + u32 ssid, u32 tsid, + u16 tclass, u32 requested, + struct common_audit_data *auditdata, + int flags); int avc_has_extended_perms(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass, u32 requested, diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index abeb09c30633..ad22066eba04 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2832,42 +2832,39 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, int addrlen) { int rc = 0; -#if IS_ENABLED(CONFIG_IPV6) - struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; -#endif -#ifdef SMACK_IPV6_SECMARK_LABELING - struct smack_known *rsp; - struct socket_smack *ssp; -#endif if (sock->sk == NULL) return 0; - + if (sock->sk->sk_family != PF_INET && + (!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6)) + return 0; + if (addrlen < offsetofend(struct sockaddr, sa_family)) + return 0; + if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { + struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; #ifdef SMACK_IPV6_SECMARK_LABELING - ssp = sock->sk->sk_security; + struct smack_known *rsp; #endif - switch (sock->sk->sk_family) { - case PF_INET: - if (addrlen < sizeof(struct sockaddr_in) || - sap->sa_family != AF_INET) - return -EINVAL; - rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap); - break; - case PF_INET6: - if (addrlen < SIN6_LEN_RFC2133 || sap->sa_family != AF_INET6) - return -EINVAL; + if (addrlen < SIN6_LEN_RFC2133) + return 0; #ifdef SMACK_IPV6_SECMARK_LABELING rsp = smack_ipv6host_label(sip); - if (rsp != NULL) + if (rsp != NULL) { + struct socket_smack *ssp = sock->sk->sk_security; + rc = smk_ipv6_check(ssp->smk_out, rsp, sip, - SMK_CONNECTING); + SMK_CONNECTING); + } #endif #ifdef SMACK_IPV6_PORT_LABELING rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); #endif - break; + return rc; } + if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) + return 0; + rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap); return rc; } diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index dd3d5942e669..1b467381986f 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -951,7 +951,8 @@ static bool tomoyo_manager(void) exe = tomoyo_get_exe(); if (!exe) return false; - list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) { + list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (!ptr->head.is_deleted && (!tomoyo_pathcmp(domainname, ptr->manager) || !strcmp(exe, ptr->manager->name))) { @@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname) if (mutex_lock_interruptible(&tomoyo_policy_lock)) return -EINTR; /* Is there an active domain? */ - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, + srcu_read_lock_held(&tomoyo_ss)) { /* Never delete tomoyo_kernel_domain */ if (domain == &tomoyo_kernel_domain) continue; @@ -2320,9 +2322,9 @@ static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = { [TOMOYO_MEMORY_QUERY] = "query message:", }; -/* Timestamp counter for last updated. */ -static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT]; /* Counter for number of updates. */ +static atomic_t tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT]; +/* Timestamp counter for last updated. */ static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT]; /** @@ -2334,10 +2336,7 @@ static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT]; */ void tomoyo_update_stat(const u8 index) { - /* - * I don't use atomic operations because race condition is not fatal. - */ - tomoyo_stat_updated[index]++; + atomic_inc(&tomoyo_stat_updated[index]); tomoyo_stat_modified[index] = ktime_get_real_seconds(); } @@ -2358,7 +2357,7 @@ static void tomoyo_read_stat(struct tomoyo_io_buffer *head) for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) { tomoyo_io_printf(head, "Policy %-30s %10u", tomoyo_policy_headers[i], - tomoyo_stat_updated[i]); + atomic_read(&tomoyo_stat_updated[i])); if (tomoyo_stat_modified[i]) { struct tomoyo_time stamp; @@ -2778,7 +2777,8 @@ void tomoyo_check_profile(void) tomoyo_policy_loaded = true; pr_info("TOMOYO: 2.6.0\n"); - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, + srcu_read_lock_held(&tomoyo_ss)) { const u8 profile = domain->profile; struct tomoyo_policy_namespace *ns = domain->ns; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 8526a0a74023..7869d6a9980b 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, if (mutex_lock_interruptible(&tomoyo_policy_lock)) return -ENOMEM; - list_for_each_entry_rcu(entry, list, list) { + list_for_each_entry_rcu(entry, list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) continue; if (!check_duplicate(entry, new_entry)) @@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, } if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list_for_each_entry_rcu(entry, list, list) { + list_for_each_entry_rcu(entry, list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) continue; if (!tomoyo_same_acl_head(entry, new_entry) || @@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r, u16 i = 0; retry: - list_for_each_entry_rcu(ptr, list, list) { + list_for_each_entry_rcu(ptr, list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (ptr->is_deleted || ptr->type != r->param_type) continue; if (!check_entry(r, ptr)) @@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition { const struct tomoyo_transition_control *ptr; - list_for_each_entry_rcu(ptr, list, head.list) { + list_for_each_entry_rcu(ptr, list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (ptr->head.is_deleted || ptr->type != type) continue; if (ptr->domainname) { @@ -735,7 +739,8 @@ retry: /* Check 'aggregator' directive. */ candidate = &exename; - list_for_each_entry_rcu(ptr, list, head.list) { + list_for_each_entry_rcu(ptr, list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (ptr->head.is_deleted || !tomoyo_path_matches_pattern(&exename, ptr->original_name)) diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c index a37c7dc66e44..1cecdd797597 100644 --- a/security/tomoyo/group.c +++ b/security/tomoyo/group.c @@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, { struct tomoyo_path_group *member; - list_for_each_entry_rcu(member, &group->member_list, head.list) { + list_for_each_entry_rcu(member, &group->member_list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (member->head.is_deleted) continue; if (!tomoyo_path_matches_pattern(pathname, member->member_name)) @@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min, struct tomoyo_number_group *member; bool matched = false; - list_for_each_entry_rcu(member, &group->member_list, head.list) { + list_for_each_entry_rcu(member, &group->member_list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (member->head.is_deleted) continue; if (min > member->number.values[1] || @@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, bool matched = false; const u8 size = is_ipv6 ? 16 : 4; - list_for_each_entry_rcu(member, &group->member_list, head.list) { + list_for_each_entry_rcu(member, &group->member_list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (member->head.is_deleted) continue; if (member->address.is_ipv6 != is_ipv6) diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index e7832448d721..bf38fc1b59b2 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -217,31 +217,6 @@ out: return ERR_PTR(-ENOMEM); } -/** - * tomoyo_get_socket_name - Get the name of a socket. - * - * @path: Pointer to "struct path". - * @buffer: Pointer to buffer to return value in. - * @buflen: Sizeof @buffer. - * - * Returns the buffer. - */ -static char *tomoyo_get_socket_name(const struct path *path, char * const buffer, - const int buflen) -{ - struct inode *inode = d_backing_inode(path->dentry); - struct socket *sock = inode ? SOCKET_I(inode) : NULL; - struct sock *sk = sock ? sock->sk : NULL; - - if (sk) { - snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]", - sk->sk_family, sk->sk_type, sk->sk_protocol); - } else { - snprintf(buffer, buflen, "socket:[unknown]"); - } - return buffer; -} - /** * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root. * @@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path) break; /* To make sure that pos is '\0' terminated. */ buf[buf_len - 1] = '\0'; - /* Get better name for socket. */ - if (sb->s_magic == SOCKFS_MAGIC) { - pos = tomoyo_get_socket_name(path, buf, buf_len - 1); - goto encode; - } - /* For "pipe:[\$]". */ + /* For "pipe:[\$]" and "socket:[\$]". */ if (dentry->d_op && dentry->d_op->d_dname) { pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1); goto encode; diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 52752e1a84ed..eba0b3395851 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname) name.name = domainname; tomoyo_fill_path_info(&name); - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (!domain->is_deleted && !tomoyo_pathcmp(&name, domain->domainname)) return domain; @@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) return false; if (!domain) return true; - list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { + list_for_each_entry_rcu(ptr, &domain->acl_info_list, list, + srcu_read_lock_held(&tomoyo_ss)) { u16 perm; u8 i; diff --git a/sound/core/control.c b/sound/core/control.c index 7a4d8690ce41..08ca7666e84c 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1430,8 +1430,9 @@ static int call_tlv_handler(struct snd_ctl_file *file, int op_flag, if (kctl->tlv.c == NULL) return -ENXIO; - /* When locked, this is unavailable. */ - if (vd->owner != NULL && vd->owner != file) + /* Write and command operations are not allowed for locked element. */ + if (op_flag != SNDRV_CTL_TLV_OP_READ && + vd->owner != NULL && vd->owner != file) return -EPERM; return kctl->tlv.c(kctl, op_flag, size, buf); diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c index 2045697f449d..797d838a2f9e 100644 --- a/sound/core/oss/linear.c +++ b/sound/core/oss/linear.c @@ -107,6 +107,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin, } } #endif + if (frames > dst_channels[0].frames) + frames = dst_channels[0].frames; convert(plugin, src_channels, dst_channels, frames); return frames; } diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c index 7915564bd394..3788906421a7 100644 --- a/sound/core/oss/mulaw.c +++ b/sound/core/oss/mulaw.c @@ -269,6 +269,8 @@ static snd_pcm_sframes_t mulaw_transfer(struct snd_pcm_plugin *plugin, } } #endif + if (frames > dst_channels[0].frames) + frames = dst_channels[0].frames; data = (struct mulaw_priv *)plugin->extra_data; data->func(plugin, src_channels, dst_channels, frames); return frames; diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index 31cb2acf8afc..732bbede7ebf 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c @@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames) while (plugin->next) { if (plugin->dst_frames) frames = plugin->dst_frames(plugin, frames); - if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0)) + if ((snd_pcm_sframes_t)frames <= 0) return -ENXIO; plugin = plugin->next; err = snd_pcm_plugin_alloc(plugin, frames); @@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames) while (plugin->prev) { if (plugin->src_frames) frames = plugin->src_frames(plugin, frames); - if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0)) + if ((snd_pcm_sframes_t)frames <= 0) return -ENXIO; plugin = plugin->prev; err = snd_pcm_plugin_alloc(plugin, frames); @@ -209,6 +209,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p if (stream == SNDRV_PCM_STREAM_PLAYBACK) { plugin = snd_pcm_plug_last(plug); while (plugin && drv_frames > 0) { + if (drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; plugin_prev = plugin->prev; if (plugin->src_frames) drv_frames = plugin->src_frames(plugin, drv_frames); @@ -220,6 +222,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p plugin_next = plugin->next; if (plugin->dst_frames) drv_frames = plugin->dst_frames(plugin, drv_frames); + if (drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; plugin = plugin_next; } } else @@ -248,11 +252,15 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc if (frames < 0) return frames; } + if (frames > plugin->buf_frames) + frames = plugin->buf_frames; plugin = plugin_next; } } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { plugin = snd_pcm_plug_last(plug); while (plugin) { + if (frames > plugin->buf_frames) + frames = plugin->buf_frames; plugin_prev = plugin->prev; if (plugin->src_frames) { frames = plugin->src_frames(plugin, frames); diff --git a/sound/core/oss/route.c b/sound/core/oss/route.c index c8171f5783c8..72dea04197ef 100644 --- a/sound/core/oss/route.c +++ b/sound/core/oss/route.c @@ -57,6 +57,8 @@ static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin, return -ENXIO; if (frames == 0) return 0; + if (frames > dst_channels[0].frames) + frames = dst_channels[0].frames; nsrcs = plugin->src_format.channels; ndsts = plugin->dst_format.channels; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 91c6ad58729f..5c74ea2bb44b 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -222,7 +222,8 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream) return false; if (substream->ops->mmap || - substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV) + (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV && + substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC)) return true; return dma_can_mmap(substream->dma_buffer.dev.dev); @@ -705,6 +706,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size) runtime->boundary *= 2; + /* clear the buffer for avoiding possible kernel info leaks */ + if (runtime->dma_area && !substream->ops->copy_user) + memset(runtime->dma_area, 0, runtime->dma_bytes); + snd_pcm_timer_resolution_change(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); @@ -3403,7 +3408,8 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, #endif /* CONFIG_GENERIC_ALLOCATOR */ #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */ if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page && - substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) + (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV || + substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC)) return dma_mmap_coherent(substream->dma_buffer.dev.dev, area, substream->runtime->dma_area, diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c index a88c235b2ea3..2ddfe2226651 100644 --- a/sound/core/seq/oss/seq_oss_midi.c +++ b/sound/core/seq/oss/seq_oss_midi.c @@ -602,6 +602,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq len = snd_seq_oss_timer_start(dp->timer); if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev); + snd_midi_event_reset_decode(mdev->coder); } else { len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev); if (len > 0) diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 6d9592f0ae1d..cc93157fa950 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -580,7 +580,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event, event->queue = queue; event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK; if (real_time) { - event->time.time = snd_seq_timer_get_cur_time(q->timer); + event->time.time = snd_seq_timer_get_cur_time(q->timer, true); event->flags |= SNDRV_SEQ_TIME_STAMP_REAL; } else { event->time.tick = snd_seq_timer_get_cur_tick(q->timer); @@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client, tmr = queue->timer; status->events = queue->tickq->cells + queue->timeq->cells; - status->time = snd_seq_timer_get_cur_time(tmr); + status->time = snd_seq_timer_get_cur_time(tmr, true); status->tick = snd_seq_timer_get_cur_tick(tmr); status->running = tmr->running; diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index caf68bf42f13..71a6ea62c3be 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) { unsigned long flags; struct snd_seq_event_cell *cell; + snd_seq_tick_time_t cur_tick; + snd_seq_real_time_t cur_time; if (q == NULL) return; @@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) __again: /* Process tick queue... */ + cur_tick = snd_seq_timer_get_cur_tick(q->timer); for (;;) { - cell = snd_seq_prioq_cell_out(q->tickq, - &q->timer->tick.cur_tick); + cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); } /* Process time queue... */ + cur_time = snd_seq_timer_get_cur_time(q->timer, false); for (;;) { - cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time); + cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); @@ -392,6 +395,7 @@ int snd_seq_queue_check_access(int queueid, int client) int snd_seq_queue_set_owner(int queueid, int client, int locked) { struct snd_seq_queue *q = queueptr(queueid); + unsigned long flags; if (q == NULL) return -EINVAL; @@ -401,8 +405,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked) return -EPERM; } + spin_lock_irqsave(&q->owner_lock, flags); q->locked = locked ? 1 : 0; q->owner = client; + spin_unlock_irqrestore(&q->owner_lock, flags); queue_access_unlock(q); queuefree(q); @@ -539,15 +545,17 @@ void snd_seq_queue_client_termination(int client) unsigned long flags; int i; struct snd_seq_queue *q; + bool matched; for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) == NULL) continue; spin_lock_irqsave(&q->owner_lock, flags); - if (q->owner == client) + matched = (q->owner == client); + if (matched) q->klocked = 1; spin_unlock_irqrestore(&q->owner_lock, flags); - if (q->owner == client) { + if (matched) { if (q->timer->running) snd_seq_timer_stop(q->timer); snd_seq_timer_reset(q->timer); @@ -739,6 +747,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry, int i, bpm; struct snd_seq_queue *q; struct snd_seq_timer *tmr; + bool locked; + int owner; for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) == NULL) @@ -750,9 +760,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry, else bpm = 0; + spin_lock_irq(&q->owner_lock); + locked = q->locked; + owner = q->owner; + spin_unlock_irq(&q->owner_lock); + snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); - snd_iprintf(buffer, "owned by client : %d\n", q->owner); - snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free"); + snd_iprintf(buffer, "owned by client : %d\n", owner); + snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free"); snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped"); diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c index 161f3170bd7e..0b43fc5fe349 100644 --- a/sound/core/seq/seq_timer.c +++ b/sound/core/seq/seq_timer.c @@ -422,14 +422,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr) } /* return current 'real' time. use timeofday() to get better granularity. */ -snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) +snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr, + bool adjust_ktime) { snd_seq_real_time_t cur_time; unsigned long flags; spin_lock_irqsave(&tmr->lock, flags); cur_time = tmr->cur_time; - if (tmr->running) { + if (adjust_ktime && tmr->running) { struct timespec64 tm; ktime_get_ts64(&tm); @@ -446,7 +447,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) high PPQ values) */ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr) { - return tmr->tick.cur_tick; + snd_seq_tick_time_t cur_tick; + unsigned long flags; + + spin_lock_irqsave(&tmr->lock, flags); + cur_tick = tmr->tick.cur_tick; + spin_unlock_irqrestore(&tmr->lock, flags); + return cur_tick; } @@ -465,15 +472,19 @@ void snd_seq_info_timer_read(struct snd_info_entry *entry, q = queueptr(idx); if (q == NULL) continue; - if ((tmr = q->timer) == NULL || - (ti = tmr->timeri) == NULL) { - queuefree(q); - continue; - } + mutex_lock(&q->timer_mutex); + tmr = q->timer; + if (!tmr) + goto unlock; + ti = tmr->timeri; + if (!ti) + goto unlock; snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name); resolution = snd_timer_resolution(ti) * tmr->ticks; snd_iprintf(buffer, " Period time : %lu.%09lu\n", resolution / 1000000000, resolution % 1000000000); snd_iprintf(buffer, " Skew : %u / %u\n", tmr->skew, tmr->skew_base); +unlock: + mutex_unlock(&q->timer_mutex); queuefree(q); } } diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h index 66c3e344eae3..4bec57df8158 100644 --- a/sound/core/seq/seq_timer.h +++ b/sound/core/seq/seq_timer.h @@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq); int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position); int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position); int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base); -snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr); +snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr, + bool adjust_ktime); snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr); extern int seq_default_timer_class; diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c index 626d87c1539b..77d7037d1476 100644 --- a/sound/core/seq/seq_virmidi.c +++ b/sound/core/seq/seq_virmidi.c @@ -81,6 +81,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) continue; snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream); + snd_midi_event_reset_decode(vmidi->parser); } else { len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev); if (len > 0) diff --git a/sound/core/timer.c b/sound/core/timer.c index 59ae21b0bb93..013f0e69ff0f 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -74,6 +74,9 @@ static LIST_HEAD(snd_timer_slave_list); /* lock for slave active lists */ static DEFINE_SPINLOCK(slave_active_lock); +#define MAX_SLAVE_INSTANCES 1000 +static int num_slaves; + static DEFINE_MUTEX(register_mutex); static int snd_timer_free(struct snd_timer *timer); @@ -252,6 +255,10 @@ int snd_timer_open(struct snd_timer_instance **ti, err = -EINVAL; goto unlock; } + if (num_slaves >= MAX_SLAVE_INSTANCES) { + err = -EBUSY; + goto unlock; + } timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { err = -ENOMEM; @@ -261,6 +268,7 @@ int snd_timer_open(struct snd_timer_instance **ti, timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); + num_slaves++; err = snd_timer_check_slave(timeri); if (err < 0) { snd_timer_close_locked(timeri, &card_dev_to_put); @@ -356,6 +364,8 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri, } list_del(&timeri->open_list); + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) + num_slaves--; /* force to stop the timer */ snd_timer_stop(timeri); diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c index aee7c04d49e5..b61ba0321a72 100644 --- a/sound/drivers/dummy.c +++ b/sound/drivers/dummy.c @@ -915,7 +915,7 @@ static void print_formats(struct snd_dummy *dummy, { int i; - for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) { + for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) { if (dummy->pcm_hw.formats & (1ULL << i)) snd_iprintf(buffer, " %s", snd_pcm_format_name(i)); } diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index 6c1497d9f52b..ce07ea0d4e71 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c @@ -415,15 +415,16 @@ static int make_both_connections(struct snd_bebob *bebob) return 0; } -static void -break_both_connections(struct snd_bebob *bebob) +static void break_both_connections(struct snd_bebob *bebob) { cmp_connection_break(&bebob->in_conn); cmp_connection_break(&bebob->out_conn); - /* These models seems to be in transition state for a longer time. */ - if (bebob->maudio_special_quirk != NULL) - msleep(200); + // These models seem to be in transition state for a longer time. When + // accessing in the state, any transactions is corrupted. In the worst + // case, the device is going to reboot. + if (bebob->version < 2) + msleep(600); } static int diff --git a/sound/firewire/dice/dice-extension.c b/sound/firewire/dice/dice-extension.c index a63fcbc875ad..02f4a8318e38 100644 --- a/sound/firewire/dice/dice-extension.c +++ b/sound/firewire/dice/dice-extension.c @@ -159,8 +159,11 @@ int snd_dice_detect_extension_formats(struct snd_dice *dice) int j; for (j = i + 1; j < 9; ++j) { - if (pointers[i * 2] == pointers[j * 2]) + if (pointers[i * 2] == pointers[j * 2]) { + // Fallback to limited functionality. + err = -ENXIO; goto end; + } } } diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c index 9eab3ad283ce..df6ff2df0124 100644 --- a/sound/firewire/fireface/ff-pcm.c +++ b/sound/firewire/fireface/ff-pcm.c @@ -219,7 +219,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream, mutex_unlock(&ff->mutex); } - return 0; + return err; } static int pcm_hw_free(struct snd_pcm_substream *substream) diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c index ea46fb4c1b5a..126a7bd187bb 100644 --- a/sound/firewire/motu/motu-proc.c +++ b/sound/firewire/motu/motu-proc.c @@ -16,7 +16,7 @@ static const char *const clock_names[] = { [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT] = "S/PDIF on optical interface", [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A] = "S/PDIF on optical interface A", [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_B] = "S/PDIF on optical interface B", - [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PCIF on coaxial interface", + [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PDIF on coaxial interface", [SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR] = "AESEBU on XLR interface", [SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC] = "Word clock on BNC interface", }; diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c index 7c6d1c277d4d..78d906af9c00 100644 --- a/sound/firewire/oxfw/oxfw-pcm.c +++ b/sound/firewire/oxfw/oxfw-pcm.c @@ -255,7 +255,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream, mutex_unlock(&oxfw->mutex); } - return 0; + return err; } static int pcm_capture_hw_free(struct snd_pcm_substream *substream) diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c index e80bb84c43f6..f823a2ab3544 100644 --- a/sound/firewire/tascam/amdtp-tascam.c +++ b/sound/firewire/tascam/amdtp-tascam.c @@ -157,14 +157,15 @@ static void read_status_messages(struct amdtp_stream *s, if ((before ^ after) & mask) { struct snd_firewire_tascam_change *entry = &tscm->queue[tscm->push_pos]; + unsigned long flag; - spin_lock_irq(&tscm->lock); + spin_lock_irqsave(&tscm->lock, flag); entry->index = index; entry->before = before; entry->after = after; if (++tscm->push_pos >= SND_TSCM_QUEUE_COUNT) tscm->push_pos = 0; - spin_unlock_irq(&tscm->lock); + spin_unlock_irqrestore(&tscm->lock, flag); wake_up(&tscm->hwdep_wait); } diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c index cfab60d88c92..09ff209df4a3 100644 --- a/sound/hda/ext/hdac_ext_controller.c +++ b/sound/hda/ext/hdac_ext_controller.c @@ -254,6 +254,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all); int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link) { + unsigned long codec_mask; int ret = 0; mutex_lock(&bus->lock); @@ -280,9 +281,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, * HDA spec section 4.3 - Codec Discovery */ udelay(521); - bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS); - dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask); - snd_hdac_chip_writew(bus, STATESTS, bus->codec_mask); + codec_mask = snd_hdac_chip_readw(bus, STATESTS); + dev_dbg(bus->dev, "codec_mask = 0x%lx\n", codec_mask); + snd_hdac_chip_writew(bus, STATESTS, codec_mask); + if (!bus->codec_mask) + bus->codec_mask = codec_mask; } mutex_unlock(&bus->lock); diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index d8fe7ff0cd58..682ed39f79b0 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -96,12 +96,14 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start) 1 << azx_dev->index, 1 << azx_dev->index); /* set stripe control */ - if (azx_dev->substream) - stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); - else - stripe_ctl = 0; - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, - stripe_ctl); + if (azx_dev->stripe) { + if (azx_dev->substream) + stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); + else + stripe_ctl = 0; + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + stripe_ctl); + } /* set DMA start and interrupt mask */ snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); @@ -118,7 +120,8 @@ void snd_hdac_stream_clear(struct hdac_stream *azx_dev) snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_DMA_START | SD_INT_MASK, 0); snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + if (azx_dev->stripe) + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); azx_dev->running = false; } EXPORT_SYMBOL_GPL(snd_hdac_stream_clear); diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c index 886cb7811bd6..2efee794cac6 100644 --- a/sound/hda/hdmi_chmap.c +++ b/sound/hda/hdmi_chmap.c @@ -250,7 +250,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen) for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) { if (spk_alloc & (1 << i)) - j += snprintf(buf + j, buflen - j, " %s", + j += scnprintf(buf + j, buflen - j, " %s", cea_speaker_allocation_names[i]); } buf[j] = '\0'; /* necessary when j == 0 */ diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c index 78dd213589b4..fa3c39cff5f8 100644 --- a/sound/isa/cs423x/cs4236.c +++ b/sound/isa/cs423x/cs4236.c @@ -278,7 +278,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev) } else { mpu_port[dev] = pnp_port_start(pdev, 0); if (mpu_irq[dev] >= 0 && - pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) { + pnp_irq_valid(pdev, 0) && + pnp_irq(pdev, 0) != (resource_size_t)-1) { mpu_irq[dev] = pnp_irq(pdev, 0); } else { mpu_irq[dev] = -1; /* disable interrupt */ diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index 8272b50b8349..6a8564566375 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c @@ -43,6 +43,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev) { struct hda_codec *codec = container_of(dev, struct hda_codec, core); + /* ignore unsol events during shutdown */ + if (codec->bus->shutdown) + return; + if (codec->patch_ops.unsol_event) codec->patch_ops.unsol_event(codec, ev); } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index a2fb19129219..6cb72336433a 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen) for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++) if (pcm & (AC_SUPPCM_BITS_8 << i)) - j += snprintf(buf + j, buflen - j, " %d", bits[i]); + j += scnprintf(buf + j, buflen - j, " %d", bits[i]); buf[j] = '\0'; /* necessary when j == 0 */ } diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 6387c7e90918..76b507058cb4 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -884,7 +884,7 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, return -EAGAIN; /* give a chance to retry */ } - dev_WARN(chip->card->dev, + dev_err(chip->card->dev, "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", bus->last_cmd[addr]); chip->single_cmd = 1; diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index d081fb2880a0..82cf1da2ff12 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -360,7 +360,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen) for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++) if (pcm & (1 << i)) - j += snprintf(buf + j, buflen - j, " %d", + j += scnprintf(buf + j, buflen - j, " %d", alsa_rates[i]); buf[j] = '\0'; /* necessary when j == 0 */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c52419376c74..85beb172d810 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -280,12 +280,13 @@ enum { /* quirks for old Intel chipsets */ #define AZX_DCAPS_INTEL_ICH \ - (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE) + (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\ + AZX_DCAPS_SYNC_WRITE) /* quirks for Intel PCH */ #define AZX_DCAPS_INTEL_PCH_BASE \ (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\ - AZX_DCAPS_SNOOP_TYPE(SCH)) + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) /* PCH up to IVB; no runtime PM; bind with i915 gfx */ #define AZX_DCAPS_INTEL_PCH_NOPM \ @@ -300,13 +301,13 @@ enum { #define AZX_DCAPS_INTEL_HASWELL \ (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ - AZX_DCAPS_SNOOP_TYPE(SCH)) + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) /* Broadwell HDMI can't use position buffer reliably, force to use LPIB */ #define AZX_DCAPS_INTEL_BROADWELL \ (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ - AZX_DCAPS_SNOOP_TYPE(SCH)) + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) #define AZX_DCAPS_INTEL_BAYTRAIL \ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT) @@ -1280,11 +1281,17 @@ static void init_vga_switcheroo(struct azx *chip) { struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct pci_dev *p = get_bound_vga(chip->pci); + struct pci_dev *parent; if (p) { dev_info(chip->card->dev, "Handle vga_switcheroo audio client\n"); hda->use_vga_switcheroo = 1; - chip->bus.keep_power = 1; /* cleared in either gpu_bound op or codec probe */ + + /* cleared in either gpu_bound op or codec probe, or when its + * upstream port has _PR3 (i.e. dGPU). + */ + parent = pci_upstream_bridge(p); + chip->bus.keep_power = parent ? !pci_pr3_present(parent) : 1; chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; pci_dev_put(p); } @@ -1382,8 +1389,11 @@ static int azx_free(struct azx *chip) static int azx_dev_disconnect(struct snd_device *device) { struct azx *chip = device->device_data; + struct hdac_bus *bus = azx_bus(chip); chip->bus.shutdown = 1; + cancel_work_sync(&bus->unsol_work); + return 0; } @@ -2146,6 +2156,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */ SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ + SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0), + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ SND_PCI_QUIRK(0x1028, 0x0497, "Dell Precision T3600", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ /* Note the P55A-UD3 and Z87-D3HP share the subsys id for the HDA dev */ @@ -2405,6 +2417,8 @@ static const struct pci_device_id azx_ids[] = { /* Jasperlake */ { PCI_DEVICE(0x8086, 0x38c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4dc8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Tigerlake */ { PCI_DEVICE(0x8086, 0xa0c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c index fcc34417cbce..6dbe99131bc4 100644 --- a/sound/pci/hda/hda_sysfs.c +++ b/sound/pci/hda/hda_sysfs.c @@ -222,7 +222,7 @@ static ssize_t init_verbs_show(struct device *dev, int i, len = 0; mutex_lock(&codec->user_mutex); snd_array_for_each(&codec->init_verbs, i, v) { - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "0x%02x 0x%03x 0x%04x\n", v->nid, v->verb, v->param); } @@ -272,7 +272,7 @@ static ssize_t hints_show(struct device *dev, int i, len = 0; mutex_lock(&codec->user_mutex); snd_array_for_each(&codec->hints, i, hint) { - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "%s = %s\n", hint->key, hint->val); } mutex_unlock(&codec->user_mutex); diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 8350954b7986..e5191584638a 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -398,6 +398,7 @@ static int hda_tegra_create(struct snd_card *card, return err; chip->bus.needs_damn_long_delay = 1; + chip->bus.core.aligned_mmio = 1; err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index b7a1abb3e231..adad3651889e 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -1180,6 +1180,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), + SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI), SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5), {} @@ -1809,13 +1810,14 @@ struct scp_msg { static void dspio_clear_response_queue(struct hda_codec *codec) { + unsigned long timeout = jiffies + msecs_to_jiffies(1000); unsigned int dummy = 0; - int status = -1; + int status; /* clear all from the response queue */ do { status = dspio_read(codec, &dummy); - } while (status == 0); + } while (status == 0 && time_before(jiffies, timeout)); } static int dspio_get_response_data(struct hda_codec *codec) @@ -7588,12 +7590,14 @@ static void ca0132_process_dsp_response(struct hda_codec *codec, struct ca0132_spec *spec = codec->spec; codec_dbg(codec, "ca0132_process_dsp_response\n"); + snd_hda_power_up_pm(codec); if (spec->wait_scp) { if (dspio_get_response_data(codec) >= 0) spec->wait_scp = 0; } dspio_clear_response_queue(codec); + snd_hda_power_down_pm(codec); } static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) @@ -7604,11 +7608,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) /* Delay enabling the HP amp, to let the mic-detection * state machine run. */ - cancel_delayed_work(&spec->unsol_hp_work); - schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); tbl = snd_hda_jack_tbl_get(codec, cb->nid); if (tbl) tbl->block_report = 1; + schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); } static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb) @@ -8454,12 +8457,25 @@ static void ca0132_reboot_notify(struct hda_codec *codec) codec->patch_ops.free(codec); } +#ifdef CONFIG_PM +static int ca0132_suspend(struct hda_codec *codec) +{ + struct ca0132_spec *spec = codec->spec; + + cancel_delayed_work_sync(&spec->unsol_hp_work); + return 0; +} +#endif + static const struct hda_codec_ops ca0132_patch_ops = { .build_controls = ca0132_build_controls, .build_pcms = ca0132_build_pcms, .init = ca0132_init, .free = ca0132_free, .unsol_event = snd_hda_jack_unsol_event, +#ifdef CONFIG_PM + .suspend = ca0132_suspend, +#endif .reboot_notify = ca0132_reboot_notify, }; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 968d3caab6ac..1e20e85e9b46 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -910,6 +910,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE), @@ -921,6 +922,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x21d2, "Lenovo T420s", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 78bd2e3722c7..307ca1f03676 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -32,6 +32,7 @@ #include #include "hda_local.h" #include "hda_jack.h" +#include "hda_controller.h" static bool static_hdmi_pcm; module_param(static_hdmi_pcm, bool, 0644); @@ -1240,6 +1241,10 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, per_pin->cvt_nid = per_cvt->cvt_nid; hinfo->nid = per_cvt->cvt_nid; + /* flip stripe flag for the assigned stream if supported */ + if (get_wcaps(codec, per_cvt->cvt_nid) & AC_WCAP_STRIPE) + azx_stream(get_azx_dev(substream))->stripe = 1; + snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id); snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0, AC_VERB_SET_CONNECT_SEL, @@ -1978,6 +1983,8 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, per_cvt->assigned = 0; hinfo->nid = 0; + azx_stream(get_azx_dev(substream))->stripe = 0; + mutex_lock(&spec->pcm_lock); snd_hda_spdif_ctls_unassign(codec, pcm_idx); clear_bit(pcm_idx, &spec->pcm_in_use); @@ -2787,9 +2794,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec) /* parse and post-process for Intel codecs */ static int parse_intel_hdmi(struct hda_codec *codec) { - int err; + int err, retries = 3; + + do { + err = hdmi_parse_codec(codec); + } while (err < 0 && retries--); - err = hdmi_parse_codec(codec); if (err < 0) { generic_spec_free(codec); return err; @@ -3454,26 +3464,6 @@ static int nvhdmi_chmap_validate(struct hdac_chmap *chmap, return 0; } -/* map from pin NID to port; port is 0-based */ -/* for Nvidia: assume widget NID starting from 4, with step 1 (4, 5, 6, ...) */ -static int nvhdmi_pin2port(void *audio_ptr, int pin_nid) -{ - return pin_nid - 4; -} - -/* reverse-map from port to pin NID: see above */ -static int nvhdmi_port2pin(struct hda_codec *codec, int port) -{ - return port + 4; -} - -static const struct drm_audio_component_audio_ops nvhdmi_audio_ops = { - .pin2port = nvhdmi_pin2port, - .pin_eld_notify = generic_acomp_pin_eld_notify, - .master_bind = generic_acomp_master_bind, - .master_unbind = generic_acomp_master_unbind, -}; - static int patch_nvhdmi(struct hda_codec *codec) { struct hdmi_spec *spec; @@ -3492,8 +3482,6 @@ static int patch_nvhdmi(struct hda_codec *codec) codec->link_down_at_suspend = 1; - generic_acomp_init(codec, &nvhdmi_audio_ops, nvhdmi_port2pin); - return 0; } @@ -4168,6 +4156,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), +HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 80f66ba85f87..128db2e6bc64 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -367,9 +367,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0215: case 0x10ec0233: case 0x10ec0235: - case 0x10ec0236: case 0x10ec0255: - case 0x10ec0256: case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: @@ -381,6 +379,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0300: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; + case 0x10ec0236: + case 0x10ec0256: + alc_write_coef_idx(codec, 0x36, 0x5757); + alc_update_coef_idx(codec, 0x10, 1<<9, 0); + break; case 0x10ec0275: alc_update_coef_idx(codec, 0xe, 0, 1<<0); break; @@ -409,6 +412,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0672: alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */ break; + case 0x10ec0222: case 0x10ec0623: alc_update_coef_idx(codec, 0x19, 1<<13, 0); break; @@ -427,6 +431,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) break; case 0x10ec0899: case 0x10ec0900: + case 0x10ec0b00: case 0x10ec1168: case 0x10ec1220: alc_update_coef_idx(codec, 0x7, 1<<1, 0); @@ -498,6 +503,7 @@ static void alc_shutup_pins(struct hda_codec *codec) struct alc_spec *spec = codec->spec; switch (codec->core.vendor_id) { + case 0x10ec0283: case 0x10ec0286: case 0x10ec0288: case 0x10ec0298: @@ -943,7 +949,7 @@ struct alc_codec_rename_pci_table { const char *name; }; -static struct alc_codec_rename_table rename_tbl[] = { +static const struct alc_codec_rename_table rename_tbl[] = { { 0x10ec0221, 0xf00f, 0x1003, "ALC231" }, { 0x10ec0269, 0xfff0, 0x3010, "ALC277" }, { 0x10ec0269, 0xf0f0, 0x2010, "ALC259" }, @@ -964,7 +970,7 @@ static struct alc_codec_rename_table rename_tbl[] = { { } /* terminator */ }; -static struct alc_codec_rename_pci_table rename_pci_tbl[] = { +static const struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0280, 0x1028, 0, "ALC3220" }, { 0x10ec0282, 0x1028, 0, "ALC3221" }, { 0x10ec0283, 0x1028, 0, "ALC3223" }, @@ -2441,6 +2447,10 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), @@ -2522,6 +2532,7 @@ static int patch_alc882(struct hda_codec *codec) case 0x10ec0882: case 0x10ec0885: case 0x10ec0900: + case 0x10ec0b00: case 0x10ec1220: break; default: @@ -2989,7 +3000,7 @@ static void alc269_shutup(struct hda_codec *codec) alc_shutup_pins(codec); } -static struct coef_fw alc282_coefs[] = { +static const struct coef_fw alc282_coefs[] = { WRITE_COEF(0x03, 0x0002), /* Power Down Control */ UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */ WRITE_COEF(0x07, 0x0200), /* DMIC control */ @@ -3101,7 +3112,7 @@ static void alc282_shutup(struct hda_codec *codec) alc_write_coef_idx(codec, 0x78, coef78); } -static struct coef_fw alc283_coefs[] = { +static const struct coef_fw alc283_coefs[] = { WRITE_COEF(0x03, 0x0002), /* Power Down Control */ UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */ WRITE_COEF(0x07, 0x0200), /* DMIC control */ @@ -4177,7 +4188,7 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, } } -static struct coef_fw alc225_pre_hsmode[] = { +static const struct coef_fw alc225_pre_hsmode[] = { UPDATE_COEF(0x4a, 1<<8, 0), UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), UPDATE_COEF(0x63, 3<<14, 3<<14), @@ -4190,7 +4201,7 @@ static struct coef_fw alc225_pre_hsmode[] = { static void alc_headset_mode_unplugged(struct hda_codec *codec) { - static struct coef_fw coef0255[] = { + static const struct coef_fw coef0255[] = { WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ @@ -4198,7 +4209,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ {} }; - static struct coef_fw coef0256[] = { + static const struct coef_fw coef0256[] = { WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ @@ -4206,7 +4217,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ {} }; - static struct coef_fw coef0233[] = { + static const struct coef_fw coef0233[] = { WRITE_COEF(0x1b, 0x0c0b), WRITE_COEF(0x45, 0xc429), UPDATE_COEF(0x35, 0x4000, 0), @@ -4216,7 +4227,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) WRITE_COEF(0x32, 0x42a3), {} }; - static struct coef_fw coef0288[] = { + static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0xfcc0, 0xc400), UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), @@ -4224,18 +4235,18 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) UPDATE_COEF(0x67, 0x2000, 0), {} }; - static struct coef_fw coef0298[] = { + static const struct coef_fw coef0298[] = { UPDATE_COEF(0x19, 0x1300, 0x0300), {} }; - static struct coef_fw coef0292[] = { + static const struct coef_fw coef0292[] = { WRITE_COEF(0x76, 0x000e), WRITE_COEF(0x6c, 0x2400), WRITE_COEF(0x18, 0x7308), WRITE_COEF(0x6b, 0xc429), {} }; - static struct coef_fw coef0293[] = { + static const struct coef_fw coef0293[] = { UPDATE_COEF(0x10, 7<<8, 6<<8), /* SET Line1 JD to 0 */ UPDATE_COEFEX(0x57, 0x05, 1<<15|1<<13, 0x0), /* SET charge pump by verb */ UPDATE_COEFEX(0x57, 0x03, 1<<10, 1<<10), /* SET EN_OSW to 1 */ @@ -4244,16 +4255,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */ {} }; - static struct coef_fw coef0668[] = { + static const struct coef_fw coef0668[] = { WRITE_COEF(0x15, 0x0d40), WRITE_COEF(0xb7, 0x802b), {} }; - static struct coef_fw coef0225[] = { + static const struct coef_fw coef0225[] = { UPDATE_COEF(0x63, 3<<14, 0), {} }; - static struct coef_fw coef0274[] = { + static const struct coef_fw coef0274[] = { UPDATE_COEF(0x4a, 0x0100, 0), UPDATE_COEFEX(0x57, 0x05, 0x4000, 0), UPDATE_COEF(0x6b, 0xf000, 0x5000), @@ -4318,25 +4329,25 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, hda_nid_t mic_pin) { - static struct coef_fw coef0255[] = { + static const struct coef_fw coef0255[] = { WRITE_COEFEX(0x57, 0x03, 0x8aa6), WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ {} }; - static struct coef_fw coef0256[] = { + static const struct coef_fw coef0256[] = { UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/ WRITE_COEFEX(0x57, 0x03, 0x09a3), WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ {} }; - static struct coef_fw coef0233[] = { + static const struct coef_fw coef0233[] = { UPDATE_COEF(0x35, 0, 1<<14), WRITE_COEF(0x06, 0x2100), WRITE_COEF(0x1a, 0x0021), WRITE_COEF(0x26, 0x008c), {} }; - static struct coef_fw coef0288[] = { + static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0x00c0, 0), UPDATE_COEF(0x50, 0x2000, 0), UPDATE_COEF(0x56, 0x0006, 0), @@ -4345,30 +4356,30 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, UPDATE_COEF(0x67, 0x2000, 0x2000), {} }; - static struct coef_fw coef0292[] = { + static const struct coef_fw coef0292[] = { WRITE_COEF(0x19, 0xa208), WRITE_COEF(0x2e, 0xacf0), {} }; - static struct coef_fw coef0293[] = { + static const struct coef_fw coef0293[] = { UPDATE_COEFEX(0x57, 0x05, 0, 1<<15|1<<13), /* SET charge pump by verb */ UPDATE_COEFEX(0x57, 0x03, 1<<10, 0), /* SET EN_OSW to 0 */ UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */ {} }; - static struct coef_fw coef0688[] = { + static const struct coef_fw coef0688[] = { WRITE_COEF(0xb7, 0x802b), WRITE_COEF(0xb5, 0x1040), UPDATE_COEF(0xc3, 0, 1<<12), {} }; - static struct coef_fw coef0225[] = { + static const struct coef_fw coef0225[] = { UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), UPDATE_COEF(0x4a, 3<<4, 2<<4), UPDATE_COEF(0x63, 3<<14, 0), {} }; - static struct coef_fw coef0274[] = { + static const struct coef_fw coef0274[] = { UPDATE_COEFEX(0x57, 0x05, 0x4000, 0x4000), UPDATE_COEF(0x4a, 0x0010, 0), UPDATE_COEF(0x6b, 0xf000, 0), @@ -4454,7 +4465,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, static void alc_headset_mode_default(struct hda_codec *codec) { - static struct coef_fw coef0225[] = { + static const struct coef_fw coef0225[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x30<<10), UPDATE_COEF(0x45, 0x3f<<10, 0x31<<10), UPDATE_COEF(0x49, 3<<8, 0<<8), @@ -4463,14 +4474,14 @@ static void alc_headset_mode_default(struct hda_codec *codec) UPDATE_COEF(0x67, 0xf000, 0x3000), {} }; - static struct coef_fw coef0255[] = { + static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xc089), WRITE_COEF(0x45, 0xc489), WRITE_COEFEX(0x57, 0x03, 0x8ea6), WRITE_COEF(0x49, 0x0049), {} }; - static struct coef_fw coef0256[] = { + static const struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xc489), WRITE_COEFEX(0x57, 0x03, 0x0da3), WRITE_COEF(0x49, 0x0049), @@ -4478,12 +4489,12 @@ static void alc_headset_mode_default(struct hda_codec *codec) WRITE_COEF(0x06, 0x6100), {} }; - static struct coef_fw coef0233[] = { + static const struct coef_fw coef0233[] = { WRITE_COEF(0x06, 0x2100), WRITE_COEF(0x32, 0x4ea3), {} }; - static struct coef_fw coef0288[] = { + static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0xfcc0, 0xc400), /* Set to TRS type */ UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), @@ -4491,26 +4502,26 @@ static void alc_headset_mode_default(struct hda_codec *codec) UPDATE_COEF(0x67, 0x2000, 0), {} }; - static struct coef_fw coef0292[] = { + static const struct coef_fw coef0292[] = { WRITE_COEF(0x76, 0x000e), WRITE_COEF(0x6c, 0x2400), WRITE_COEF(0x6b, 0xc429), WRITE_COEF(0x18, 0x7308), {} }; - static struct coef_fw coef0293[] = { + static const struct coef_fw coef0293[] = { UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */ WRITE_COEF(0x45, 0xC429), /* Set to TRS type */ UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */ {} }; - static struct coef_fw coef0688[] = { + static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0041), WRITE_COEF(0x15, 0x0d40), WRITE_COEF(0xb7, 0x802b), {} }; - static struct coef_fw coef0274[] = { + static const struct coef_fw coef0274[] = { WRITE_COEF(0x45, 0x4289), UPDATE_COEF(0x4a, 0x0010, 0x0010), UPDATE_COEF(0x6b, 0x0f00, 0), @@ -4573,53 +4584,53 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) { int val; - static struct coef_fw coef0255[] = { + static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ WRITE_COEF(0x1b, 0x0c2b), WRITE_COEFEX(0x57, 0x03, 0x8ea6), {} }; - static struct coef_fw coef0256[] = { + static const struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ WRITE_COEF(0x1b, 0x0e6b), {} }; - static struct coef_fw coef0233[] = { + static const struct coef_fw coef0233[] = { WRITE_COEF(0x45, 0xd429), WRITE_COEF(0x1b, 0x0c2b), WRITE_COEF(0x32, 0x4ea3), {} }; - static struct coef_fw coef0288[] = { + static const struct coef_fw coef0288[] = { UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), UPDATE_COEF(0x67, 0x2000, 0), {} }; - static struct coef_fw coef0292[] = { + static const struct coef_fw coef0292[] = { WRITE_COEF(0x6b, 0xd429), WRITE_COEF(0x76, 0x0008), WRITE_COEF(0x18, 0x7388), {} }; - static struct coef_fw coef0293[] = { + static const struct coef_fw coef0293[] = { WRITE_COEF(0x45, 0xd429), /* Set to ctia type */ UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */ {} }; - static struct coef_fw coef0688[] = { + static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0001), WRITE_COEF(0x15, 0x0d60), WRITE_COEF(0xc3, 0x0000), {} }; - static struct coef_fw coef0225_1[] = { + static const struct coef_fw coef0225_1[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10), UPDATE_COEF(0x63, 3<<14, 2<<14), {} }; - static struct coef_fw coef0225_2[] = { + static const struct coef_fw coef0225_2[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10), UPDATE_COEF(0x63, 3<<14, 1<<14), {} @@ -4691,48 +4702,48 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) /* Nokia type */ static void alc_headset_mode_omtp(struct hda_codec *codec) { - static struct coef_fw coef0255[] = { + static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ WRITE_COEF(0x1b, 0x0c2b), WRITE_COEFEX(0x57, 0x03, 0x8ea6), {} }; - static struct coef_fw coef0256[] = { + static const struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ WRITE_COEF(0x1b, 0x0e6b), {} }; - static struct coef_fw coef0233[] = { + static const struct coef_fw coef0233[] = { WRITE_COEF(0x45, 0xe429), WRITE_COEF(0x1b, 0x0c2b), WRITE_COEF(0x32, 0x4ea3), {} }; - static struct coef_fw coef0288[] = { + static const struct coef_fw coef0288[] = { UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), UPDATE_COEF(0x67, 0x2000, 0), {} }; - static struct coef_fw coef0292[] = { + static const struct coef_fw coef0292[] = { WRITE_COEF(0x6b, 0xe429), WRITE_COEF(0x76, 0x0008), WRITE_COEF(0x18, 0x7388), {} }; - static struct coef_fw coef0293[] = { + static const struct coef_fw coef0293[] = { WRITE_COEF(0x45, 0xe429), /* Set to omtp type */ UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */ {} }; - static struct coef_fw coef0688[] = { + static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0001), WRITE_COEF(0x15, 0x0d50), WRITE_COEF(0xc3, 0x0000), {} }; - static struct coef_fw coef0225[] = { + static const struct coef_fw coef0225[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10), UPDATE_COEF(0x63, 3<<14, 2<<14), {} @@ -4792,17 +4803,17 @@ static void alc_determine_headset_type(struct hda_codec *codec) int val; bool is_ctia = false; struct alc_spec *spec = codec->spec; - static struct coef_fw coef0255[] = { + static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xd089), /* combo jack auto switch control(Check type)*/ WRITE_COEF(0x49, 0x0149), /* combo jack auto switch control(Vref conteol) */ {} }; - static struct coef_fw coef0288[] = { + static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0xfcc0, 0xd400), /* Check Type */ {} }; - static struct coef_fw coef0298[] = { + static const struct coef_fw coef0298[] = { UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), @@ -4810,19 +4821,19 @@ static void alc_determine_headset_type(struct hda_codec *codec) UPDATE_COEF(0x19, 0x1300, 0x1300), {} }; - static struct coef_fw coef0293[] = { + static const struct coef_fw coef0293[] = { UPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */ WRITE_COEF(0x45, 0xD429), /* Set to ctia type */ {} }; - static struct coef_fw coef0688[] = { + static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0001), WRITE_COEF(0xb7, 0x802b), WRITE_COEF(0x15, 0x0d60), WRITE_COEF(0xc3, 0x0c00), {} }; - static struct coef_fw coef0274[] = { + static const struct coef_fw coef0274[] = { UPDATE_COEF(0x4a, 0x0010, 0), UPDATE_COEF(0x4a, 0x8000, 0), WRITE_COEF(0x45, 0xd289), @@ -5109,7 +5120,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, static void alc255_set_default_jack_type(struct hda_codec *codec) { /* Set to iphone type */ - static struct coef_fw alc255fw[] = { + static const struct coef_fw alc255fw[] = { WRITE_COEF(0x1b, 0x880b), WRITE_COEF(0x45, 0xd089), WRITE_COEF(0x1b, 0x080b), @@ -5117,7 +5128,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) WRITE_COEF(0x1b, 0x0c0b), {} }; - static struct coef_fw alc256fw[] = { + static const struct coef_fw alc256fw[] = { WRITE_COEF(0x1b, 0x884b), WRITE_COEF(0x45, 0xd089), WRITE_COEF(0x1b, 0x084b), @@ -5544,6 +5555,16 @@ static void alc295_fixup_disable_dac3(struct hda_codec *codec, } } +/* force NID 0x17 (Bass Speaker) to DAC1 to share it with the main speaker */ +static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + hda_nid_t conn[1] = { 0x02 }; + snd_hda_override_conn_list(codec, 0x17, 1, conn); + } +} + /* Hook to update amp GPIO4 for automute */ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) @@ -5684,8 +5705,11 @@ static void alc_fixup_headset_jack(struct hda_codec *codec, break; case HDA_FIXUP_ACT_INIT: switch (codec->core.vendor_id) { + case 0x10ec0215: case 0x10ec0225: + case 0x10ec0285: case 0x10ec0295: + case 0x10ec0289: case 0x10ec0299: alc_write_coef_idx(codec, 0x48, 0xd011); alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); @@ -5831,6 +5855,7 @@ enum { ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ALC288_FIXUP_DELL_XPS_13, ALC288_FIXUP_DISABLE_AAMIX, + ALC292_FIXUP_DELL_E7X_AAMIX, ALC292_FIXUP_DELL_E7X, ALC292_FIXUP_DISABLE_AAMIX, ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, @@ -5846,6 +5871,7 @@ enum { ALC225_FIXUP_DISABLE_MIC_VREF, ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC295_FIXUP_DISABLE_DAC3, + ALC285_FIXUP_SPEAKER2_TO_DAC1, ALC280_FIXUP_HP_HEADSET_MIC, ALC221_FIXUP_HP_FRONT_MIC, ALC292_FIXUP_TPT460, @@ -5890,8 +5916,13 @@ enum { ALC256_FIXUP_ASUS_HEADSET_MIC, ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, ALC299_FIXUP_PREDATOR_SPK, - ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC, ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, + ALC289_FIXUP_DELL_SPK2, + ALC289_FIXUP_DUAL_SPK, + ALC294_FIXUP_SPK2_TO_DAC1, + ALC294_FIXUP_ASUS_DUAL_SPK, + ALC285_FIXUP_THINKPAD_HEADSET_JACK, + ALC294_FIXUP_ASUS_HPE, }; static const struct hda_fixup alc269_fixups[] = { @@ -6521,12 +6552,19 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE }, - [ALC292_FIXUP_DELL_E7X] = { + [ALC292_FIXUP_DELL_E7X_AAMIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_dell_xps13, .chained = true, .chain_id = ALC292_FIXUP_DISABLE_AAMIX }, + [ALC292_FIXUP_DELL_E7X] = { + .type = HDA_FIXUP_FUNC, + .v.func = snd_hda_gen_fixup_micmute_led, + /* micmute fixup must be applied at last */ + .chained_before = true, + .chain_id = ALC292_FIXUP_DELL_E7X_AAMIX, + }, [ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -6645,6 +6683,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc295_fixup_disable_dac3, }, + [ALC285_FIXUP_SPEAKER2_TO_DAC1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI + }, [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -6962,16 +7006,6 @@ static const struct hda_fixup alc269_fixups[] = { { } } }, - [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = { - .type = HDA_FIXUP_PINS, - .v.pins = (const struct hda_pintbl[]) { - { 0x14, 0x411111f0 }, /* disable confusing internal speaker */ - { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */ - { } - }, - .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC - }, [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -6982,6 +7016,51 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, + [ALC289_FIXUP_DELL_SPK2] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x17, 0x90170130 }, /* bass spk */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE + }, + [ALC289_FIXUP_DUAL_SPK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC289_FIXUP_DELL_SPK2 + }, + [ALC294_FIXUP_SPK2_TO_DAC1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC + }, + [ALC294_FIXUP_ASUS_DUAL_SPK] = { + .type = HDA_FIXUP_FUNC, + /* The GPIO must be pulled to initialize the AMP */ + .v.func = alc_fixup_gpio4, + .chained = true, + .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 + }, + [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_headset_jack, + .chained = true, + .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1 + }, + [ALC294_FIXUP_ASUS_HPE] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* Set EAPD high */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 }, + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7054,6 +7133,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -7141,8 +7224,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), - SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), @@ -7213,6 +7297,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), @@ -7248,6 +7334,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ + SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), #if 0 @@ -7396,6 +7483,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"}, {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, + {.id = ALC285_FIXUP_SPEAKER2_TO_DAC1, .name = "alc285-speaker2-to-dac1"}, {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, {.id = ALC298_FIXUP_SPK_VOLUME, .name = "alc298-spk-volume"}, @@ -7512,20 +7600,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x19, 0x02a11020}, {0x1a, 0x02a11030}, {0x21, 0x0221101f}), - SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, - {0x12, 0x90a60140}, - {0x14, 0x90170110}, - {0x21, 0x02211020}), - SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, - {0x12, 0x90a60140}, - {0x14, 0x90170150}, - {0x21, 0x02211020}), - SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, - {0x21, 0x02211020}), - SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, - {0x12, 0x40000000}, - {0x14, 0x90170110}, - {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x21, 0x02211020}), @@ -7660,11 +7734,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x1a, 0x90a70130}, {0x1b, 0x90170110}, {0x21, 0x03211020}), - SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, - {0x12, 0xb7a60130}, - {0x13, 0xb8a61140}, - {0x16, 0x90170110}, - {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, {0x12, 0x90a60130}, {0x14, 0x90170110}, @@ -7852,6 +7921,12 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, {0x19, 0x40000000}, {0x1b, 0x40000000}), + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, + {0x19, 0x40000000}, + {0x1a, 0x40000000}), + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x19, 0x40000000}, + {0x1a, 0x40000000}), {} }; @@ -8005,6 +8080,8 @@ static int patch_alc269(struct hda_codec *codec) spec->gen.mixer_nid = 0; break; case 0x10ec0225: + codec->power_save_node = 1; + /* fall through */ case 0x10ec0295: case 0x10ec0299: spec->codec_variant = ALC269_TYPE_ALC225; @@ -8455,6 +8532,8 @@ static void alc662_fixup_aspire_ethos_hp(struct hda_codec *codec, case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_jack_detect_enable_callback(codec, 0x1b, alc662_aspire_ethos_mute_speakers); + /* subwoofer needs an extra GPIO setting to become audible */ + alc_setup_gpio(codec, 0x02); break; case HDA_FIXUP_ACT_INIT: /* Make sure to start in a correct state, i.e. if @@ -8465,7 +8544,30 @@ static void alc662_fixup_aspire_ethos_hp(struct hda_codec *codec, } } -static struct coef_fw alc668_coefs[] = { +static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + static const struct hda_pintbl pincfgs[] = { + { 0x19, 0x02a11040 }, /* use as headset mic, with its own jack detect */ + { 0x1b, 0x0181304f }, + { } + }; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + spec->gen.mixer_nid = 0; + spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; + snd_hda_apply_pincfgs(codec, pincfgs); + break; + case HDA_FIXUP_ACT_INIT: + alc_write_coef_idx(codec, 0x19, 0xa054); + break; + } +} + +static const struct coef_fw alc668_coefs[] = { WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), WRITE_COEF(0x08, 0x0031), WRITE_COEF(0x0a, 0x0060), WRITE_COEF(0x0b, 0x0), @@ -8537,8 +8639,10 @@ enum { ALC662_FIXUP_USI_HEADSET_MODE, ALC662_FIXUP_LENOVO_MULTI_CODECS, ALC669_FIXUP_ACER_ASPIRE_ETHOS, - ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER, ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET, + ALC671_FIXUP_HP_HEADSET_MIC2, + ALC662_FIXUP_ACER_X2660G_HEADSET_MODE, + ALC662_FIXUP_ACER_NITRO_HEADSET_MODE, }; static const struct hda_fixup alc662_fixups[] = { @@ -8869,18 +8973,6 @@ static const struct hda_fixup alc662_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc662_fixup_aspire_ethos_hp, }, - [ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER] = { - .type = HDA_FIXUP_VERBS, - /* subwoofer needs an extra GPIO setting to become audible */ - .v.verbs = (const struct hda_verb[]) { - {0x01, AC_VERB_SET_GPIO_MASK, 0x02}, - {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02}, - {0x01, AC_VERB_SET_GPIO_DATA, 0x00}, - { } - }, - .chained = true, - .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET - }, [ALC669_FIXUP_ACER_ASPIRE_ETHOS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -8890,7 +8982,30 @@ static const struct hda_fixup alc662_fixups[] = { { } }, .chained = true, - .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER + .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET + }, + [ALC671_FIXUP_HP_HEADSET_MIC2] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc671_fixup_hp_headset_mic2, + }, + [ALC662_FIXUP_ACER_X2660G_HEADSET_MODE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x02a1113c }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC662_FIXUP_USI_FUNC + }, + [ALC662_FIXUP_ACER_NITRO_HEADSET_MODE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */ + { 0x1b, 0x0221144f }, + { } + }, + .chained = true, + .chain_id = ALC662_FIXUP_USI_FUNC }, }; @@ -8903,6 +9018,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), + SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE), + SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE), SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13), @@ -9074,6 +9191,23 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = { {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x15, 0x0321101f}), + SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2, + {0x14, 0x01014010}, + {0x17, 0x90170150}, + {0x19, 0x02a11060}, + {0x1b, 0x01813030}, + {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2, + {0x14, 0x01014010}, + {0x18, 0x01a19040}, + {0x1b, 0x01813030}, + {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2, + {0x14, 0x01014020}, + {0x17, 0x90170110}, + {0x18, 0x01a19050}, + {0x1b, 0x01813040}, + {0x21, 0x02211030}), {} }; @@ -9261,6 +9395,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662), HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882), HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882), + HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882), HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882), HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882), {} /* terminator */ diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c index e62c11816683..f360b33a1042 100644 --- a/sound/pci/ice1712/ice1724.c +++ b/sound/pci/ice1712/ice1724.c @@ -647,6 +647,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, unsigned long flags; unsigned char mclk_change; unsigned int i, old_rate; + bool call_set_rate = false; if (rate > ice->hw_rates->list[ice->hw_rates->count - 1]) return -EINVAL; @@ -670,7 +671,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, * setting clock rate for internal clock mode */ old_rate = ice->get_rate(ice); if (force || (old_rate != rate)) - ice->set_rate(ice, rate); + call_set_rate = true; else if (rate == ice->cur_rate) { spin_unlock_irqrestore(&ice->reg_lock, flags); return 0; @@ -678,12 +679,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, } ice->cur_rate = rate; + spin_unlock_irqrestore(&ice->reg_lock, flags); + + if (call_set_rate) + ice->set_rate(ice, rate); /* setting master clock */ mclk_change = ice->set_mclk(ice, rate); - spin_unlock_irqrestore(&ice->reg_lock, flags); - if (mclk_change && ice->gpio.i2s_mclk_changed) ice->gpio.i2s_mclk_changed(ice); if (ice->gpio.set_pro_rate) diff --git a/sound/sh/aica.c b/sound/sh/aica.c index 52e9cfb4f819..8421b2f9c9f3 100644 --- a/sound/sh/aica.c +++ b/sound/sh/aica.c @@ -101,10 +101,10 @@ static void spu_memset(u32 toi, u32 what, int length) } /* spu_memload - write to SPU address space */ -static void spu_memload(u32 toi, void *from, int length) +static void spu_memload(u32 toi, const void *from, int length) { unsigned long flags; - u32 *froml = from; + const u32 *froml = from; u32 __iomem *to = (u32 __iomem *) (SPU_MEMORY_BASE + toi); int i; u32 val; diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c index ed877a138965..7c46494466ff 100644 --- a/sound/sh/sh_dac_audio.c +++ b/sound/sh/sh_dac_audio.c @@ -175,7 +175,6 @@ static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream, { /* channel is not used (interleaved data) */ struct snd_sh_dac *chip = snd_pcm_substream_chip(substream); - struct snd_pcm_runtime *runtime = substream->runtime; if (copy_from_user_toio(chip->data_buffer + pos, src, count)) return -EFAULT; @@ -195,7 +194,6 @@ static int snd_sh_dac_pcm_copy_kernel(struct snd_pcm_substream *substream, { /* channel is not used (interleaved data) */ struct snd_sh_dac *chip = snd_pcm_substream_chip(substream); - struct snd_pcm_runtime *runtime = substream->runtime; memcpy_toio(chip->data_buffer + pos, src, count); chip->buffer_end = chip->data_buffer + pos + count; @@ -214,7 +212,6 @@ static int snd_sh_dac_pcm_silence(struct snd_pcm_substream *substream, { /* channel is not used (interleaved data) */ struct snd_sh_dac *chip = snd_pcm_substream_chip(substream); - struct snd_pcm_runtime *runtime = substream->runtime; memset_io(chip->data_buffer + pos, 0, count); chip->buffer_end = chip->data_buffer + pos + count; diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig index f118c229ed82..71f2d42188c4 100644 --- a/sound/soc/atmel/Kconfig +++ b/sound/soc/atmel/Kconfig @@ -10,15 +10,17 @@ config SND_ATMEL_SOC if SND_ATMEL_SOC config SND_ATMEL_SOC_PDC - tristate + bool depends on HAS_DMA config SND_ATMEL_SOC_DMA - tristate + bool select SND_SOC_GENERIC_DMAENGINE_PCM config SND_ATMEL_SOC_SSC tristate + select SND_ATMEL_SOC_DMA + select SND_ATMEL_SOC_PDC config SND_ATMEL_SOC_SSC_PDC tristate "SoC PCM DAI support for AT91 SSC controller using PDC" diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile index 1f6890ed3738..c7d2989791be 100644 --- a/sound/soc/atmel/Makefile +++ b/sound/soc/atmel/Makefile @@ -6,8 +6,14 @@ snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o snd-soc-atmel-i2s-objs := atmel-i2s.o snd-soc-mchp-i2s-mcc-objs := mchp-i2s-mcc.o -obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o -obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o +# pdc and dma need to both be built-in if any user of +# ssc is built-in. +ifdef CONFIG_SND_ATMEL_SOC_PDC +obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-pdc.o +endif +ifdef CONFIG_SND_ATMEL_SOC_DMA +obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-dma.o +endif obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o obj-$(CONFIG_SND_ATMEL_SOC_I2S) += snd-soc-atmel-i2s.o obj-$(CONFIG_SND_MCHP_SOC_I2S_MCC) += snd-soc-mchp-i2s-mcc.o diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c index 4570f662fb48..d78f4d856aaf 100644 --- a/sound/soc/codecs/hdac_hda.c +++ b/sound/soc/codecs/hdac_hda.c @@ -498,7 +498,9 @@ static int hdac_hda_dev_remove(struct hdac_device *hdev) struct hdac_hda_priv *hda_pvt; hda_pvt = dev_get_drvdata(&hdev->dev); - cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work); + if (hda_pvt && hda_pvt->codec.registered) + cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work); + return 0; } diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index f6bf4cfbea23..45da2b51543e 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -2103,10 +2103,8 @@ static void max98090_pll_det_disable_work(struct work_struct *work) M98090_IULK_MASK, 0); } -static void max98090_pll_work(struct work_struct *work) +static void max98090_pll_work(struct max98090_priv *max98090) { - struct max98090_priv *max98090 = - container_of(work, struct max98090_priv, pll_work); struct snd_soc_component *component = max98090->component; if (!snd_soc_component_is_active(component)) @@ -2259,7 +2257,7 @@ static irqreturn_t max98090_interrupt(int irq, void *data) if (active & M98090_ULK_MASK) { dev_dbg(component->dev, "M98090_ULK_MASK\n"); - schedule_work(&max98090->pll_work); + max98090_pll_work(max98090); } if (active & M98090_JDET_MASK) { @@ -2422,7 +2420,6 @@ static int max98090_probe(struct snd_soc_component *component) max98090_pll_det_enable_work); INIT_WORK(&max98090->pll_det_disable_work, max98090_pll_det_disable_work); - INIT_WORK(&max98090->pll_work, max98090_pll_work); /* Enable jack detection */ snd_soc_component_write(component, M98090_REG_JACK_DETECT, @@ -2475,7 +2472,6 @@ static void max98090_remove(struct snd_soc_component *component) cancel_delayed_work_sync(&max98090->jack_work); cancel_delayed_work_sync(&max98090->pll_det_enable_work); cancel_work_sync(&max98090->pll_det_disable_work); - cancel_work_sync(&max98090->pll_work); max98090->component = NULL; } diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h index 57965cd678b4..a197114b0dad 100644 --- a/sound/soc/codecs/max98090.h +++ b/sound/soc/codecs/max98090.h @@ -1530,7 +1530,6 @@ struct max98090_priv { struct delayed_work jack_work; struct delayed_work pll_det_enable_work; struct work_struct pll_det_disable_work; - struct work_struct pll_work; struct snd_soc_jack *jack; unsigned int dai_fmt; int tdm_slots; diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index e3d311fb510e..84289ebeae87 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -391,9 +391,6 @@ static int pm8916_wcd_analog_enable_micbias_int(struct snd_soc_component switch (event) { case SND_SOC_DAPM_PRE_PMU: - snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS, - MICB_1_INT_TX2_INT_RBIAS_EN_MASK, - MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE); snd_soc_component_update_bits(component, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0); snd_soc_component_update_bits(component, CDC_A_MICB_1_EN, MICB_1_EN_OPA_STG2_TAIL_CURR_MASK, @@ -443,6 +440,14 @@ static int pm8916_wcd_analog_enable_micbias_int1(struct struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component); + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS, + MICB_1_INT_TX1_INT_RBIAS_EN_MASK, + MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE); + break; + } + return pm8916_wcd_analog_enable_micbias_int(component, event, w->reg, wcd->micbias1_cap_mode); } @@ -553,6 +558,11 @@ static int pm8916_wcd_analog_enable_micbias_int2(struct struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component); switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS, + MICB_1_INT_TX2_INT_RBIAS_EN_MASK, + MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE); + break; case SND_SOC_DAPM_POST_PMU: pm8916_mbhc_configure_bias(wcd, true); break; @@ -888,10 +898,10 @@ static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0, pm8916_wcd_analog_enable_micbias_ext1, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0, pm8916_wcd_analog_enable_micbias_ext2, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0, pm8916_wcd_analog_enable_adc, diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index 58b2468fb2a7..09fccacadd6b 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -586,6 +586,12 @@ static int msm8916_wcd_digital_enable_interpolator( snd_soc_component_write(component, rx_gain_reg[w->shift], snd_soc_component_read32(component, rx_gain_reg[w->shift])); break; + case SND_SOC_DAPM_POST_PMD: + snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL, + 1 << w->shift, 1 << w->shift); + snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL, + 1 << w->shift, 0x0); + break; } return 0; } diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index 861210f6bf4f..4cbef9affffd 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -1564,13 +1564,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) } pcm512x->sclk = devm_clk_get(dev, NULL); - if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err; + } if (!IS_ERR(pcm512x->sclk)) { ret = clk_prepare_enable(pcm512x->sclk); if (ret != 0) { dev_err(dev, "Failed to enable SCLK: %d\n", ret); - return ret; + goto err; } } diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index adbae1f36a8a..747ca248bf10 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -2432,6 +2432,13 @@ static void rt5640_disable_jack_detect(struct snd_soc_component *component) { struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component); + /* + * soc_remove_component() force-disables jack and thus rt5640->jack + * could be NULL at the time of driver's module unloading. + */ + if (!rt5640->jack) + return; + disable_irq(rt5640->irq); rt5640_cancel_work(rt5640); diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 1c06b3b9218c..19662ee330d6 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3270,6 +3270,9 @@ static void rt5645_jack_detect_work(struct work_struct *work) snd_soc_jack_report(rt5645->mic_jack, report, SND_JACK_MICROPHONE); return; + case 4: + val = snd_soc_component_read32(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020; + break; default: /* read rt5645 jd1_1 status */ val = snd_soc_component_read32(rt5645->component, RT5645_INT_IRQ_ST) & 0x1000; break; @@ -3603,7 +3606,7 @@ static const struct rt5645_platform_data intel_braswell_platform_data = { static const struct rt5645_platform_data buddy_platform_data = { .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5, .dmic2_data_pin = RT5645_DMIC_DATA_IN2P, - .jd_mode = 3, + .jd_mode = 4, .level_trigger_irq = true, }; @@ -3999,6 +4002,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, RT5645_JD1_MODE_1); break; case 3: + case 4: regmap_update_bits(rt5645->regmap, RT5645_A_JD_CTRL1, RT5645_JD1_MODE_MASK, RT5645_JD1_MODE_2); diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 315a3d39bc09..8bc9450da79c 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -298,6 +298,7 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg) case RT5677_I2C_MASTER_CTRL7: case RT5677_I2C_MASTER_CTRL8: case RT5677_HAP_GENE_CTRL2: + case RT5677_PWR_ANLG2: /* Modified by DSP firmware */ case RT5677_PWR_DSP_ST: case RT5677_PRIV_DATA: case RT5677_ASRC_22: diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index c50b75ce82e0..05e883a65d7a 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -72,6 +72,7 @@ struct rt5682_priv { static const struct reg_sequence patch_list[] = { {RT5682_HP_IMP_SENS_CTRL_19, 0x1000}, {RT5682_DAC_ADC_DIG_VOL1, 0xa020}, + {RT5682_I2C_CTRL, 0x000f}, }; static const struct reg_default rt5682_reg[] = { @@ -2481,6 +2482,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682) mutex_lock(&rt5682->calibrate_mutex); rt5682_reset(rt5682->regmap); + regmap_write(rt5682->regmap, RT5682_I2C_CTRL, 0x000f); regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2af); usleep_range(15000, 20000); regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2af); diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index aa1f9637d895..e949b372cead 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1344,7 +1344,8 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component) * if vddio == vdda the source of charge pump should be * assigned manually to VDDIO */ - if (vddio == vdda) { + if (regulator_is_equal(sgtl5000->supplies[VDDA].consumer, + sgtl5000->supplies[VDDIO].consumer)) { lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << SGTL5000_VDDC_MAN_ASSN_SHIFT; diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c index cf64e109c658..7b087d94141b 100644 --- a/sound/soc/codecs/wm2200.c +++ b/sound/soc/codecs/wm2200.c @@ -2410,6 +2410,8 @@ static int wm2200_i2c_probe(struct i2c_client *i2c, err_pm_runtime: pm_runtime_disable(&i2c->dev); + if (i2c->irq) + free_irq(i2c->irq, wm2200); err_reset: if (wm2200->pdata.reset) gpio_set_value_cansleep(wm2200->pdata.reset, 0); @@ -2426,12 +2428,15 @@ static int wm2200_i2c_remove(struct i2c_client *i2c) { struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c); + pm_runtime_disable(&i2c->dev); if (i2c->irq) free_irq(i2c->irq, wm2200); if (wm2200->pdata.reset) gpio_set_value_cansleep(wm2200->pdata.reset, 0); if (wm2200->pdata.ldo_ena) gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0); + regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies), + wm2200->core_supplies); return 0; } diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c index 4af0e519e623..91cc63c5a51f 100644 --- a/sound/soc/codecs/wm5100.c +++ b/sound/soc/codecs/wm5100.c @@ -2617,6 +2617,7 @@ static int wm5100_i2c_probe(struct i2c_client *i2c, return ret; err_reset: + pm_runtime_disable(&i2c->dev); if (i2c->irq) free_irq(i2c->irq, wm5100); wm5100_free_gpio(i2c); @@ -2640,6 +2641,7 @@ static int wm5100_i2c_remove(struct i2c_client *i2c) { struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c); + pm_runtime_disable(&i2c->dev); if (i2c->irq) free_irq(i2c->irq, wm5100); wm5100_free_gpio(i2c); diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index bcb3c9d5abf0..9e8c564f6e9c 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -1917,6 +1917,7 @@ static int wm8904_set_bias_level(struct snd_soc_component *component, snd_soc_component_update_bits(component, WM8904_BIAS_CONTROL_0, WM8904_BIAS_ENA, 0); + snd_soc_component_write(component, WM8904_SW_RESET_AND_ID, 0); regcache_cache_only(wm8904->regmap, true); regcache_mark_dirty(wm8904->regmap); diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 3e5c69fbc33a..d9d59f45833f 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -2788,7 +2788,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref, if (target % Fref == 0) { fll_div->theta = 0; - fll_div->lambda = 0; + fll_div->lambda = 1; } else { gcd_fll = gcd(target, fratio * Fref); @@ -2858,7 +2858,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s return -EINVAL; } - if (fll_div.theta || fll_div.lambda) + if (fll_div.theta) fll1 |= WM8962_FLL_FRAC; /* Stop the FLL while we reconfigure */ diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c index c7e4e9757dce..5faecbeb5497 100644 --- a/sound/soc/fsl/fsl_audmix.c +++ b/sound/soc/fsl/fsl_audmix.c @@ -286,6 +286,7 @@ static int fsl_audmix_dai_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct fsl_audmix *priv = snd_soc_dai_get_drvdata(dai); + unsigned long lock_flags; /* Capture stream shall not be handled */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) @@ -295,12 +296,16 @@ static int fsl_audmix_dai_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + spin_lock_irqsave(&priv->lock, lock_flags); priv->tdms |= BIT(dai->driver->id); + spin_unlock_irqrestore(&priv->lock, lock_flags); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + spin_lock_irqsave(&priv->lock, lock_flags); priv->tdms &= ~BIT(dai->driver->id); + spin_unlock_irqrestore(&priv->lock, lock_flags); break; default: return -EINVAL; @@ -491,6 +496,7 @@ static int fsl_audmix_probe(struct platform_device *pdev) return PTR_ERR(priv->ipg_clk); } + spin_lock_init(&priv->lock); platform_set_drvdata(pdev, priv); pm_runtime_enable(dev); @@ -499,15 +505,20 @@ static int fsl_audmix_probe(struct platform_device *pdev) ARRAY_SIZE(fsl_audmix_dai)); if (ret) { dev_err(dev, "failed to register ASoC DAI\n"); - return ret; + goto err_disable_pm; } priv->pdev = platform_device_register_data(dev, mdrv, 0, NULL, 0); if (IS_ERR(priv->pdev)) { ret = PTR_ERR(priv->pdev); dev_err(dev, "failed to register platform %s: %d\n", mdrv, ret); + goto err_disable_pm; } + return 0; + +err_disable_pm: + pm_runtime_disable(dev); return ret; } @@ -515,6 +526,8 @@ static int fsl_audmix_remove(struct platform_device *pdev) { struct fsl_audmix *priv = dev_get_drvdata(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (priv->pdev) platform_device_unregister(priv->pdev); diff --git a/sound/soc/fsl/fsl_audmix.h b/sound/soc/fsl/fsl_audmix.h index 7812ffec45c5..479f05695d53 100644 --- a/sound/soc/fsl/fsl_audmix.h +++ b/sound/soc/fsl/fsl_audmix.h @@ -96,6 +96,7 @@ struct fsl_audmix { struct platform_device *pdev; struct regmap *regmap; struct clk *ipg_clk; + spinlock_t lock; /* Protect tdms */ u8 tdms; }; diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index a78e4ab478df..c7a49d03463a 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -33,6 +33,7 @@ * @fsysclk: system clock source to derive HCK, SCK and FS * @spbaclk: SPBA clock (optional, depending on SoC design) * @task: tasklet to handle the reset operation + * @lock: spin lock between hw_reset() and trigger() * @fifo_depth: depth of tx/rx FIFO * @slot_width: width of each DAI slot * @slots: number of slots @@ -56,6 +57,7 @@ struct fsl_esai { struct clk *fsysclk; struct clk *spbaclk; struct tasklet_struct task; + spinlock_t lock; /* Protect hw_reset and trigger */ u32 fifo_depth; u32 slot_width; u32 slots; @@ -676,8 +678,10 @@ static void fsl_esai_hw_reset(unsigned long arg) { struct fsl_esai *esai_priv = (struct fsl_esai *)arg; bool tx = true, rx = false, enabled[2]; + unsigned long lock_flags; u32 tfcr, rfcr; + spin_lock_irqsave(&esai_priv->lock, lock_flags); /* Save the registers */ regmap_read(esai_priv->regmap, REG_ESAI_TFCR, &tfcr); regmap_read(esai_priv->regmap, REG_ESAI_RFCR, &rfcr); @@ -715,6 +719,8 @@ static void fsl_esai_hw_reset(unsigned long arg) fsl_esai_trigger_start(esai_priv, tx); if (enabled[rx]) fsl_esai_trigger_start(esai_priv, rx); + + spin_unlock_irqrestore(&esai_priv->lock, lock_flags); } static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, @@ -722,6 +728,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, { struct fsl_esai *esai_priv = snd_soc_dai_get_drvdata(dai); bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; + unsigned long lock_flags; esai_priv->channels[tx] = substream->runtime->channels; @@ -729,12 +736,16 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + spin_lock_irqsave(&esai_priv->lock, lock_flags); fsl_esai_trigger_start(esai_priv, tx); + spin_unlock_irqrestore(&esai_priv->lock, lock_flags); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + spin_lock_irqsave(&esai_priv->lock, lock_flags); fsl_esai_trigger_stop(esai_priv, tx); + spin_unlock_irqrestore(&esai_priv->lock, lock_flags); break; default: return -EINVAL; @@ -1002,6 +1013,7 @@ static int fsl_esai_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, esai_priv); + spin_lock_init(&esai_priv->lock); ret = fsl_esai_hw_init(esai_priv); if (ret) return ret; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index b517e4bc1b87..41b83ecaf008 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -1019,12 +1019,24 @@ static int fsl_sai_probe(struct platform_device *pdev) ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component, &fsl_sai_dai, 1); if (ret) - return ret; + goto err_pm_disable; - if (sai->soc_data->use_imx_pcm) - return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE); - else - return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); + if (sai->soc_data->use_imx_pcm) { + ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE); + if (ret) + goto err_pm_disable; + } else { + ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); + if (ret) + goto err_pm_disable; + } + + return ret; + +err_pm_disable: + pm_runtime_disable(&pdev->dev); + + return ret; } static int fsl_sai_remove(struct platform_device *pdev) diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index 01c99750212a..ef493cae78ff 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -59,6 +59,9 @@ config SND_SOC_INTEL_HASWELL If you have a Intel Haswell or Broadwell platform connected to an I2S codec, then enable this option by saying Y or m. This is typically used for Chromebooks. This is a recommended option. + This option is mutually exclusive with the SOF support on + Broadwell. If you want to enable SOF on Broadwell, you need to + deselect this option first. config SND_SOC_INTEL_BAYTRAIL tristate "Baytrail (legacy) Platforms" diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 46612331f5ea..54e97455d7f6 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -442,7 +442,8 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"), DMI_MATCH(DMI_PRODUCT_NAME, "NB41"), }, - .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP + .driver_data = (void *)(BYT_CHT_ES8316_SSP0 + | BYT_CHT_ES8316_INTMIC_IN2_MAP | BYT_CHT_ES8316_JD_INVERTED), }, { /* Teclast X98 Plus II */ diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 9c1aa4ec9cba..243f683bc02a 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -405,10 +405,12 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), }, - .driver_data = (void *)(BYT_RT5640_IN1_MAP | - BYT_RT5640_MCLK_EN | - BYT_RT5640_SSP0_AIF1), - + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), }, { .matches = { @@ -705,13 +707,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_MCLK_EN), }, { + /* Teclast X89 */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), DMI_MATCH(DMI_BOARD_NAME, "tPAD"), }, .driver_data = (void *)(BYT_RT5640_IN3_MAP | - BYT_RT5640_MCLK_EN | - BYT_RT5640_SSP0_AIF1), + BYT_RT5640_JD_SRC_JD1_IN4P | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_1P0 | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), }, { /* Toshiba Satellite Click Mini L9W-B */ .matches = { diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c index 8879c3be29d5..c68a5b85a4a0 100644 --- a/sound/soc/intel/boards/cht_bsw_rt5645.c +++ b/sound/soc/intel/boards/cht_bsw_rt5645.c @@ -48,6 +48,7 @@ struct cht_mc_private { #define CHT_RT5645_SSP2_AIF2 BIT(16) /* default is using AIF1 */ #define CHT_RT5645_SSP0_AIF1 BIT(17) #define CHT_RT5645_SSP0_AIF2 BIT(18) +#define CHT_RT5645_PMC_PLT_CLK_0 BIT(19) static unsigned long cht_rt5645_quirk = 0; @@ -59,6 +60,8 @@ static void log_quirks(struct device *dev) dev_info(dev, "quirk SSP0_AIF1 enabled"); if (cht_rt5645_quirk & CHT_RT5645_SSP0_AIF2) dev_info(dev, "quirk SSP0_AIF2 enabled"); + if (cht_rt5645_quirk & CHT_RT5645_PMC_PLT_CLK_0) + dev_info(dev, "quirk PMC_PLT_CLK_0 enabled"); } static int platform_clock_control(struct snd_soc_dapm_widget *w, @@ -226,15 +229,21 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream, return 0; } -/* uncomment when we have a real quirk static int cht_rt5645_quirk_cb(const struct dmi_system_id *id) { cht_rt5645_quirk = (unsigned long)id->driver_data; return 1; } -*/ static const struct dmi_system_id cht_rt5645_quirk_table[] = { + { + /* Strago family Chromebooks */ + .callback = cht_rt5645_quirk_cb, + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), + }, + .driver_data = (void *)CHT_RT5645_PMC_PLT_CLK_0, + }, { }, }; @@ -526,6 +535,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev) int dai_index = 0; int ret_val = 0; int i; + const char *mclk_name; drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); if (!drv) @@ -662,11 +672,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev) if (ret_val) return ret_val; - drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3"); + if (cht_rt5645_quirk & CHT_RT5645_PMC_PLT_CLK_0) + mclk_name = "pmc_plt_clk_0"; + else + mclk_name = "pmc_plt_clk_3"; + + drv->mclk = devm_clk_get(&pdev->dev, mclk_name); if (IS_ERR(drv->mclk)) { - dev_err(&pdev->dev, - "Failed to get MCLK from pmc_plt_clk_3: %ld\n", - PTR_ERR(drv->mclk)); + dev_err(&pdev->dev, "Failed to get MCLK from %s: %ld\n", + mclk_name, PTR_ERR(drv->mclk)); return PTR_ERR(drv->mclk); } diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c index 74dda8784f1a..67b276a65a8d 100644 --- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c @@ -400,6 +400,9 @@ static int kabylake_dmic_startup(struct snd_pcm_substream *substream) snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, dmic_constraints); + runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; + snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16); + return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_rates); } diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.c b/sound/soc/intel/boards/skl_hda_dsp_common.c index 58409b6e476e..e3d405e57c5f 100644 --- a/sound/soc/intel/boards/skl_hda_dsp_common.c +++ b/sound/soc/intel/boards/skl_hda_dsp_common.c @@ -38,16 +38,19 @@ int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device) return 0; } -SND_SOC_DAILINK_DEFS(idisp1, - DAILINK_COMP_ARRAY(COMP_CPU("iDisp1 Pin")), +SND_SOC_DAILINK_DEF(idisp1_cpu, + DAILINK_COMP_ARRAY(COMP_CPU("iDisp1 Pin"))); +SND_SOC_DAILINK_DEF(idisp1_codec, DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi1"))); -SND_SOC_DAILINK_DEFS(idisp2, - DAILINK_COMP_ARRAY(COMP_CPU("iDisp2 Pin")), +SND_SOC_DAILINK_DEF(idisp2_cpu, + DAILINK_COMP_ARRAY(COMP_CPU("iDisp2 Pin"))); +SND_SOC_DAILINK_DEF(idisp2_codec, DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi2"))); -SND_SOC_DAILINK_DEFS(idisp3, - DAILINK_COMP_ARRAY(COMP_CPU("iDisp3 Pin")), +SND_SOC_DAILINK_DEF(idisp3_cpu, + DAILINK_COMP_ARRAY(COMP_CPU("iDisp3 Pin"))); +SND_SOC_DAILINK_DEF(idisp3_codec, DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi3"))); SND_SOC_DAILINK_DEF(analog_cpu, @@ -80,21 +83,21 @@ struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS] = { .id = 1, .dpcm_playback = 1, .no_pcm = 1, - SND_SOC_DAILINK_REG(idisp1), + SND_SOC_DAILINK_REG(idisp1_cpu, idisp1_codec, platform), }, { .name = "iDisp2", .id = 2, .dpcm_playback = 1, .no_pcm = 1, - SND_SOC_DAILINK_REG(idisp2), + SND_SOC_DAILINK_REG(idisp2_cpu, idisp2_codec, platform), }, { .name = "iDisp3", .id = 3, .dpcm_playback = 1, .no_pcm = 1, - SND_SOC_DAILINK_REG(idisp3), + SND_SOC_DAILINK_REG(idisp3_cpu, idisp3_codec, platform), }, { .name = "Analog Playback and Capture", diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c index 4f6e58c3954a..06b7d6c6c9a0 100644 --- a/sound/soc/intel/boards/sof_rt5682.c +++ b/sound/soc/intel/boards/sof_rt5682.c @@ -34,6 +34,10 @@ #define SOF_RT5682_SSP_AMP(quirk) \ (((quirk) << SOF_RT5682_SSP_AMP_SHIFT) & SOF_RT5682_SSP_AMP_MASK) #define SOF_RT5682_MCLK_BYTCHT_EN BIT(9) +#define SOF_RT5682_NUM_HDMIDEV_SHIFT 10 +#define SOF_RT5682_NUM_HDMIDEV_MASK (GENMASK(12, 10)) +#define SOF_RT5682_NUM_HDMIDEV(quirk) \ + ((quirk << SOF_RT5682_NUM_HDMIDEV_SHIFT) & SOF_RT5682_NUM_HDMIDEV_MASK) /* Default: MCLK on, MCLK 19.2M, SSP0 */ static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN | @@ -585,6 +589,19 @@ static int sof_audio_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; + if (pdev->id_entry && pdev->id_entry->driver_data) + sof_rt5682_quirk = (unsigned long)pdev->id_entry->driver_data; + + dmi_check_system(sof_rt5682_quirk_table); + + mach = (&pdev->dev)->platform_data; + + /* A speaker amp might not be present when the quirk claims one is. + * Detect this via whether the machine driver match includes quirk_data. + */ + if ((sof_rt5682_quirk & SOF_SPEAKER_AMP_PRESENT) && !mach->quirk_data) + sof_rt5682_quirk &= ~SOF_SPEAKER_AMP_PRESENT; + if (soc_intel_is_byt() || soc_intel_is_cht()) { is_legacy_cpu = 1; dmic_be_num = 0; @@ -595,11 +612,13 @@ static int sof_audio_probe(struct platform_device *pdev) SOF_RT5682_SSP_CODEC(2); } else { dmic_be_num = 2; - hdmi_num = 3; + hdmi_num = (sof_rt5682_quirk & SOF_RT5682_NUM_HDMIDEV_MASK) >> + SOF_RT5682_NUM_HDMIDEV_SHIFT; + /* default number of HDMI DAI's */ + if (!hdmi_num) + hdmi_num = 3; } - dmi_check_system(sof_rt5682_quirk_table); - /* need to get main clock from pmc */ if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) { ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3"); @@ -643,7 +662,6 @@ static int sof_audio_probe(struct platform_device *pdev) INIT_LIST_HEAD(&ctx->hdmi_pcm_list); sof_audio_card_rt5682.dev = &pdev->dev; - mach = (&pdev->dev)->platform_data; /* set platform name for each dailink */ ret = snd_soc_fixup_dai_links_platform_name(&sof_audio_card_rt5682, @@ -672,6 +690,21 @@ static int sof_rt5682_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id board_ids[] = { + { + .name = "sof_rt5682", + }, + { + .name = "tgl_max98357a_rt5682", + .driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN | + SOF_RT5682_SSP_CODEC(0) | + SOF_SPEAKER_AMP_PRESENT | + SOF_RT5682_SSP_AMP(1) | + SOF_RT5682_NUM_HDMIDEV(4)), + }, + { } +}; + static struct platform_driver sof_audio = { .probe = sof_audio_probe, .remove = sof_rt5682_remove, @@ -679,6 +712,7 @@ static struct platform_driver sof_audio = { .name = "sof_rt5682", .pm = &snd_soc_pm_ops, }, + .id_table = board_ids, }; module_platform_driver(sof_audio) @@ -688,3 +722,4 @@ MODULE_AUTHOR("Bard Liao "); MODULE_AUTHOR("Sathya Prakash M R "); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sof_rt5682"); +MODULE_ALIAS("platform:tgl_max98357a_rt5682"); diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c index 3466675f2678..a15aa2ffa681 100644 --- a/sound/soc/intel/skylake/skl-debug.c +++ b/sound/soc/intel/skylake/skl-debug.c @@ -34,8 +34,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf, int i; ssize_t ret = 0; - for (i = 0; i < max_pin; i++) - ret += snprintf(buf + size, MOD_BUF - size, + for (i = 0; i < max_pin; i++) { + ret += scnprintf(buf + size, MOD_BUF - size, "%s %d\n\tModule %d\n\tInstance %d\n\t" "In-used %s\n\tType %s\n" "\tState %d\n\tIndex %d\n", @@ -45,13 +45,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf, m_pin[i].in_use ? "Used" : "Unused", m_pin[i].is_dynamic ? "Dynamic" : "Static", m_pin[i].pin_state, i); + size += ret; + } return ret; } static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf, ssize_t size, bool direction) { - return snprintf(buf + size, MOD_BUF - size, + return scnprintf(buf + size, MOD_BUF - size, "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t" "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t" "Sample Type %d\n\tCh Map %#x\n", @@ -75,16 +77,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; - ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n" + ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n" "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid, mconfig->id.module_id, mconfig->id.instance_id, mconfig->id.pvt_id); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n", res->cpc, res->ibs, res->obs); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "Module data:\n\tCore %d\n\tIn queue %d\n\t" "Out queue %d\n\tType %s\n", mconfig->core_id, mconfig->max_in_queue, @@ -94,38 +96,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf, ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true); ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "Fixup:\n\tParams %#x\n\tConverter %#x\n", mconfig->params_fixup, mconfig->converter); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n", mconfig->dev_type, mconfig->vbus_id, mconfig->hw_conn_type, mconfig->time_slot); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t" "Pages %#x\n", mconfig->pipe->ppl_id, mconfig->pipe->pipe_priority, mconfig->pipe->conn_type, mconfig->pipe->memory_pages); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n", mconfig->pipe->p_params->host_dma_id, mconfig->pipe->p_params->link_dma_id); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n", mconfig->pipe->p_params->ch, mconfig->pipe->p_params->s_freq, mconfig->pipe->p_params->s_fmt); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "\tLink %#x\n\tStream %#x\n", mconfig->pipe->p_params->linktype, mconfig->pipe->p_params->stream); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "\tState %d\n\tPassthru %s\n", mconfig->pipe->state, mconfig->pipe->passthru ? "true" : "false"); @@ -135,7 +137,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf, ret += skl_print_pins(mconfig->m_out_pin, buf, mconfig->max_out_queue, ret, false); - ret += snprintf(buf + ret, MOD_BUF - ret, + ret += scnprintf(buf + ret, MOD_BUF - ret, "Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t" "Homogeneous Output %s\n\tIn Queue Mask %d\n\t" "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t" @@ -191,7 +193,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf, __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2); for (offset = 0; offset < FW_REG_SIZE; offset += 16) { - ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset); + ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset); hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4, tmp + ret, FW_REG_BUF - ret, 0); ret += strlen(tmp + ret); diff --git a/sound/soc/intel/skylake/skl-ssp-clk.c b/sound/soc/intel/skylake/skl-ssp-clk.c index 1c0e5226cb5b..bd43885f3805 100644 --- a/sound/soc/intel/skylake/skl-ssp-clk.c +++ b/sound/soc/intel/skylake/skl-ssp-clk.c @@ -384,9 +384,11 @@ static int skl_clk_dev_probe(struct platform_device *pdev) &clks[i], clk_pdata, i); if (IS_ERR(data->clk[data->avail_clk_cnt])) { - ret = PTR_ERR(data->clk[data->avail_clk_cnt++]); + ret = PTR_ERR(data->clk[data->avail_clk_cnt]); goto err_unreg_skl_clk; } + + data->avail_clk_cnt++; } platform_set_drvdata(pdev, data); diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 13408de34055..0bbd86390be5 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -83,7 +83,7 @@ #define JZ_AIC_I2S_STATUS_BUSY BIT(2) #define JZ_AIC_CLK_DIV_MASK 0xf -#define I2SDIV_DV_SHIFT 8 +#define I2SDIV_DV_SHIFT 0 #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT) #define I2SDIV_IDV_SHIFT 8 #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT) diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c index 5a3749938900..d286dff3171d 100644 --- a/sound/soc/meson/axg-fifo.c +++ b/sound/soc/meson/axg-fifo.c @@ -108,10 +108,12 @@ static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss, { struct snd_pcm_runtime *runtime = ss->runtime; struct axg_fifo *fifo = axg_fifo_data(ss); + unsigned int burst_num, period, threshold; dma_addr_t end_ptr; - unsigned int burst_num; int ret; + period = params_period_bytes(params); + ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(params)); if (ret < 0) return ret; @@ -122,9 +124,25 @@ static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss, regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr); /* Setup interrupt periodicity */ - burst_num = params_period_bytes(params) / AXG_FIFO_BURST; + burst_num = period / AXG_FIFO_BURST; regmap_write(fifo->map, FIFO_INT_ADDR, burst_num); + /* + * Start the fifo request on the smallest of the following: + * - Half the fifo size + * - Half the period size + */ + threshold = min(period / 2, + (unsigned int)AXG_FIFO_MIN_DEPTH / 2); + + /* + * With the threshold in bytes, register value is: + * V = (threshold / burst) - 1 + */ + threshold /= AXG_FIFO_BURST; + regmap_field_write(fifo->field_threshold, + threshold ? threshold - 1 : 0); + /* Enable block count irq */ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), @@ -360,6 +378,11 @@ int axg_fifo_probe(struct platform_device *pdev) return fifo->irq; } + fifo->field_threshold = + devm_regmap_field_alloc(dev, fifo->map, data->field_threshold); + if (IS_ERR(fifo->field_threshold)) + return PTR_ERR(fifo->field_threshold); + return devm_snd_soc_register_component(dev, data->component_drv, data->dai_drv, 1); } diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h index bb1e2ce50256..ab546a3cf940 100644 --- a/sound/soc/meson/axg-fifo.h +++ b/sound/soc/meson/axg-fifo.h @@ -9,7 +9,9 @@ struct clk; struct platform_device; +struct reg_field; struct regmap; +struct regmap_field; struct reset_control; struct snd_soc_component_driver; @@ -50,8 +52,6 @@ struct snd_soc_pcm_runtime; #define CTRL1_STATUS2_SEL_MASK GENMASK(11, 8) #define CTRL1_STATUS2_SEL(x) ((x) << 8) #define STATUS2_SEL_DDR_READ 0 -#define CTRL1_THRESHOLD_MASK GENMASK(23, 16) -#define CTRL1_THRESHOLD(x) ((x) << 16) #define CTRL1_FRDDR_DEPTH_MASK GENMASK(31, 24) #define CTRL1_FRDDR_DEPTH(x) ((x) << 24) #define FIFO_START_ADDR 0x08 @@ -67,12 +67,14 @@ struct axg_fifo { struct regmap *map; struct clk *pclk; struct reset_control *arb; + struct regmap_field *field_threshold; int irq; }; struct axg_fifo_match_data { const struct snd_soc_component_driver *component_drv; struct snd_soc_dai_driver *dai_drv; + struct reg_field field_threshold; }; extern const struct snd_pcm_ops axg_fifo_pcm_ops; diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c index 6ab111c31b28..09773a9ae964 100644 --- a/sound/soc/meson/axg-frddr.c +++ b/sound/soc/meson/axg-frddr.c @@ -50,7 +50,7 @@ static int axg_frddr_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai); - unsigned int fifo_depth, fifo_threshold; + unsigned int fifo_depth; int ret; /* Enable pclk to access registers and clock the fifo ip */ @@ -68,11 +68,8 @@ static int axg_frddr_dai_startup(struct snd_pcm_substream *substream, * Depth and threshold are zero based. */ fifo_depth = AXG_FIFO_MIN_CNT - 1; - fifo_threshold = (AXG_FIFO_MIN_CNT / 2) - 1; - regmap_update_bits(fifo->map, FIFO_CTRL1, - CTRL1_FRDDR_DEPTH_MASK | CTRL1_THRESHOLD_MASK, - CTRL1_FRDDR_DEPTH(fifo_depth) | - CTRL1_THRESHOLD(fifo_threshold)); + regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH_MASK, + CTRL1_FRDDR_DEPTH(fifo_depth)); return 0; } @@ -153,8 +150,9 @@ static const struct snd_soc_component_driver axg_frddr_component_drv = { }; static const struct axg_fifo_match_data axg_frddr_match_data = { - .component_drv = &axg_frddr_component_drv, - .dai_drv = &axg_frddr_dai_drv + .field_threshold = REG_FIELD(FIFO_CTRL1, 16, 23), + .component_drv = &axg_frddr_component_drv, + .dai_drv = &axg_frddr_dai_drv }; static const struct snd_soc_dai_ops g12a_frddr_ops = { @@ -271,8 +269,9 @@ static const struct snd_soc_component_driver g12a_frddr_component_drv = { }; static const struct axg_fifo_match_data g12a_frddr_match_data = { - .component_drv = &g12a_frddr_component_drv, - .dai_drv = &g12a_frddr_dai_drv + .field_threshold = REG_FIELD(FIFO_CTRL1, 16, 23), + .component_drv = &g12a_frddr_component_drv, + .dai_drv = &g12a_frddr_dai_drv }; /* On SM1, the output selection in on CTRL2 */ @@ -335,8 +334,9 @@ static const struct snd_soc_component_driver sm1_frddr_component_drv = { }; static const struct axg_fifo_match_data sm1_frddr_match_data = { - .component_drv = &sm1_frddr_component_drv, - .dai_drv = &g12a_frddr_dai_drv + .field_threshold = REG_FIELD(FIFO_CTRL1, 16, 23), + .component_drv = &sm1_frddr_component_drv, + .dai_drv = &g12a_frddr_dai_drv }; static const struct of_device_id axg_frddr_of_match[] = { diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c index c8ea2145f576..ecf41c7549a6 100644 --- a/sound/soc/meson/axg-toddr.c +++ b/sound/soc/meson/axg-toddr.c @@ -89,7 +89,6 @@ static int axg_toddr_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai); - unsigned int fifo_threshold; int ret; /* Enable pclk to access registers and clock the fifo ip */ @@ -107,11 +106,6 @@ static int axg_toddr_dai_startup(struct snd_pcm_substream *substream, /* Apply single buffer mode to the interface */ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_PP_MODE, 0); - /* TODDR does not have a configurable fifo depth */ - fifo_threshold = AXG_FIFO_MIN_CNT - 1; - regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_THRESHOLD_MASK, - CTRL1_THRESHOLD(fifo_threshold)); - return 0; } @@ -185,8 +179,9 @@ static const struct snd_soc_component_driver axg_toddr_component_drv = { }; static const struct axg_fifo_match_data axg_toddr_match_data = { - .component_drv = &axg_toddr_component_drv, - .dai_drv = &axg_toddr_dai_drv + .field_threshold = REG_FIELD(FIFO_CTRL1, 16, 23), + .component_drv = &axg_toddr_component_drv, + .dai_drv = &axg_toddr_dai_drv }; static const struct snd_soc_dai_ops g12a_toddr_ops = { @@ -218,8 +213,9 @@ static const struct snd_soc_component_driver g12a_toddr_component_drv = { }; static const struct axg_fifo_match_data g12a_toddr_match_data = { - .component_drv = &g12a_toddr_component_drv, - .dai_drv = &g12a_toddr_dai_drv + .field_threshold = REG_FIELD(FIFO_CTRL1, 16, 23), + .component_drv = &g12a_toddr_component_drv, + .dai_drv = &g12a_toddr_dai_drv }; static const char * const sm1_toddr_sel_texts[] = { @@ -282,8 +278,9 @@ static const struct snd_soc_component_driver sm1_toddr_component_drv = { }; static const struct axg_fifo_match_data sm1_toddr_match_data = { - .component_drv = &sm1_toddr_component_drv, - .dai_drv = &g12a_toddr_dai_drv + .field_threshold = REG_FIELD(FIFO_CTRL1, 12, 23), + .component_drv = &sm1_toddr_component_drv, + .dai_drv = &g12a_toddr_dai_drv }; static const struct of_device_id axg_toddr_of_match[] = { diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index e9596c2096cd..a6c1cf987e6e 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -376,6 +376,17 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io) */ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io) { + static const u32 dalign_values[8][2] = { + {0x76543210, 0x67452301}, + {0x00000032, 0x00000023}, + {0x00007654, 0x00006745}, + {0x00000076, 0x00000067}, + {0xfedcba98, 0xefcdab89}, + {0x000000ba, 0x000000ab}, + {0x0000fedc, 0x0000efcd}, + {0x000000fe, 0x000000ef}, + }; + int id = 0, inv; struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io); struct rsnd_mod *target; struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); @@ -411,13 +422,18 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io) target = cmd ? cmd : ssiu; } + if (mod == ssiu) + id = rsnd_mod_id_sub(mod); + /* Non target mod or non 16bit needs normal DALIGN */ if ((snd_pcm_format_width(runtime->format) != 16) || (mod != target)) - return 0x76543210; + inv = 0; /* Target mod needs inverted DALIGN when 16bit */ else - return 0x67452301; + inv = 1; + + return dalign_values[id][inv]; } u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod) diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 88978a3036c4..9d3b546bae7b 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1886,6 +1886,8 @@ match: /* convert non BE into BE */ dai_link->no_pcm = 1; + dai_link->dpcm_playback = 1; + dai_link->dpcm_capture = 1; /* override any BE fixups */ dai_link->be_hw_params_fixup = diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index b6378f025836..ebd785f9aa46 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3888,9 +3888,6 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w, runtime->rate = params_rate(params); out: - if (ret < 0) - kfree(runtime); - kfree(params); return ret; } @@ -4752,7 +4749,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm) continue; if (w->power) { dapm_seq_insert(w, &down_list, false); - w->power = 0; + w->new_power = 0; powerdown = 1; } } diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c index a71d2340eb05..b5748dcd490f 100644 --- a/sound/soc/soc-jack.c +++ b/sound/soc/soc-jack.c @@ -82,10 +82,9 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) unsigned int sync = 0; int enable; - trace_snd_soc_jack_report(jack, mask, status); - if (!jack) return; + trace_snd_soc_jack_report(jack, mask, status); dapm = &jack->card->dapm; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index b600d3eaaf5c..d978df95c5c6 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -877,6 +877,11 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, int i, ret = 0; mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass); + + ret = soc_pcm_params_symmetry(substream, params); + if (ret) + goto out; + if (rtd->dai_link->ops->hw_params) { ret = rtd->dai_link->ops->hw_params(substream, params); if (ret < 0) { @@ -958,9 +963,6 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, } component = NULL; - ret = soc_pcm_params_symmetry(substream, params); - if (ret) - goto component_err; out: mutex_unlock(&rtd->card->pcm_mutex); return ret; @@ -1146,7 +1148,9 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe, { struct snd_soc_dpcm *dpcm; unsigned long flags; +#ifdef CONFIG_DEBUG_FS char *name; +#endif /* only add new dpcms */ for_each_dpcm_be(fe, stream, dpcm) { @@ -1385,6 +1389,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget *widget; struct snd_soc_dai *dai; int prune = 0; + int do_prune; /* Destroy any old FE <--> BE connections */ for_each_dpcm_be(fe, stream, dpcm) { @@ -1398,13 +1403,16 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream, continue; /* is there a valid CODEC DAI widget for this BE */ + do_prune = 1; for_each_rtd_codec_dai(dpcm->be, i, dai) { widget = dai_get_widget(dai, stream); /* prune the BE if it's no longer in our active list */ if (widget && widget_in_list(list, widget)) - continue; + do_prune = 0; } + if (!do_prune) + continue; dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n", stream ? "capture" : "playback", @@ -2289,42 +2297,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, } EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); +static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream, + int cmd, bool fe_first) +{ + struct snd_soc_pcm_runtime *fe = substream->private_data; + int ret; + + /* call trigger on the frontend before the backend. */ + if (fe_first) { + dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n", + fe->dai_link->name, cmd); + + ret = soc_pcm_trigger(substream, cmd); + if (ret < 0) + return ret; + + ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); + return ret; + } + + /* call trigger on the frontend after the backend. */ + ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); + if (ret < 0) + return ret; + + dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n", + fe->dai_link->name, cmd); + + ret = soc_pcm_trigger(substream, cmd); + + return ret; +} + static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *fe = substream->private_data; - int stream = substream->stream, ret; + int stream = substream->stream; + int ret = 0; enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; switch (trigger) { case SND_SOC_DPCM_TRIGGER_PRE: - /* call trigger on the frontend before the backend. */ - - dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n", - fe->dai_link->name, cmd); - - ret = soc_pcm_trigger(substream, cmd); - if (ret < 0) { - dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret); - goto out; + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + ret = dpcm_dai_trigger_fe_be(substream, cmd, true); + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + ret = dpcm_dai_trigger_fe_be(substream, cmd, false); + break; + default: + ret = -EINVAL; + break; } - - ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); break; case SND_SOC_DPCM_TRIGGER_POST: - /* call trigger on the frontend after the backend. */ - - ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); - if (ret < 0) { - dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret); - goto out; + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + ret = dpcm_dai_trigger_fe_be(substream, cmd, false); + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + ret = dpcm_dai_trigger_fe_be(substream, cmd, true); + break; + default: + ret = -EINVAL; + break; } - - dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n", - fe->dai_link->name, cmd); - - ret = soc_pcm_trigger(substream, cmd); break; case SND_SOC_DPCM_TRIGGER_BESPOKE: /* bespoke trigger() - handles both FE and BEs */ @@ -2333,10 +2380,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) fe->dai_link->name, cmd); ret = soc_pcm_bespoke_trigger(substream, cmd); - if (ret < 0) { - dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret); - goto out; - } break; default: dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd, @@ -2345,6 +2388,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) goto out; } + if (ret < 0) { + dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n", + cmd, ret); + goto out; + } + switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: @@ -3120,16 +3169,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, unsigned long flags; /* FE state */ - offset += snprintf(buf + offset, size - offset, + offset += scnprintf(buf + offset, size - offset, "[%s - %s]\n", fe->dai_link->name, stream ? "Capture" : "Playback"); - offset += snprintf(buf + offset, size - offset, "State: %s\n", + offset += scnprintf(buf + offset, size - offset, "State: %s\n", dpcm_state_string(fe->dpcm[stream].state)); if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) && (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP)) - offset += snprintf(buf + offset, size - offset, + offset += scnprintf(buf + offset, size - offset, "Hardware Params: " "Format = %s, Channels = %d, Rate = %d\n", snd_pcm_format_name(params_format(params)), @@ -3137,10 +3186,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, params_rate(params)); /* BEs state */ - offset += snprintf(buf + offset, size - offset, "Backends:\n"); + offset += scnprintf(buf + offset, size - offset, "Backends:\n"); if (list_empty(&fe->dpcm[stream].be_clients)) { - offset += snprintf(buf + offset, size - offset, + offset += scnprintf(buf + offset, size - offset, " No active DSP links\n"); goto out; } @@ -3150,16 +3199,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be = dpcm->be; params = &dpcm->hw_params; - offset += snprintf(buf + offset, size - offset, + offset += scnprintf(buf + offset, size - offset, "- %s\n", be->dai_link->name); - offset += snprintf(buf + offset, size - offset, + offset += scnprintf(buf + offset, size - offset, " State: %s\n", dpcm_state_string(be->dpcm[stream].state)); if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP)) - offset += snprintf(buf + offset, size - offset, + offset += scnprintf(buf + offset, size - offset, " Hardware Params: " "Format = %s, Channels = %d, Rate = %d\n", snd_pcm_format_name(params_format(params)), diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 0fd032914a31..b19ecaf0febf 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -548,12 +548,12 @@ static void remove_link(struct snd_soc_component *comp, if (dobj->ops && dobj->ops->link_unload) dobj->ops->link_unload(comp, dobj); + list_del(&dobj->list); + snd_soc_remove_dai_link(comp->card, link); + kfree(link->name); kfree(link->stream_name); kfree(link->cpus->dai_name); - - list_del(&dobj->list); - snd_soc_remove_dai_link(comp->card, link); kfree(link); } @@ -604,9 +604,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, ext_ops = tplg->bytes_ext_ops; num_ops = tplg->bytes_ext_ops_count; for (i = 0; i < num_ops; i++) { - if (!sbe->put && ext_ops[i].id == be->ext_ops.put) + if (!sbe->put && + ext_ops[i].id == le32_to_cpu(be->ext_ops.put)) sbe->put = ext_ops[i].put; - if (!sbe->get && ext_ops[i].id == be->ext_ops.get) + if (!sbe->get && + ext_ops[i].id == le32_to_cpu(be->ext_ops.get)) sbe->get = ext_ops[i].get; } @@ -621,11 +623,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, num_ops = tplg->io_ops_count; for (i = 0; i < num_ops; i++) { - if (k->put == NULL && ops[i].id == hdr->ops.put) + if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put)) k->put = ops[i].put; - if (k->get == NULL && ops[i].id == hdr->ops.get) + if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get)) k->get = ops[i].get; - if (k->info == NULL && ops[i].id == hdr->ops.info) + if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info)) k->info = ops[i].info; } @@ -638,11 +640,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, num_ops = ARRAY_SIZE(io_ops); for (i = 0; i < num_ops; i++) { - if (k->put == NULL && ops[i].id == hdr->ops.put) + if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put)) k->put = ops[i].put; - if (k->get == NULL && ops[i].id == hdr->ops.get) + if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get)) k->get = ops[i].get; - if (k->info == NULL && ops[i].id == hdr->ops.info) + if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info)) k->info = ops[i].info; } @@ -931,7 +933,7 @@ static int soc_tplg_denum_create_texts(struct soc_enum *se, if (se->dobj.control.dtexts == NULL) return -ENOMEM; - for (i = 0; i < ec->items; i++) { + for (i = 0; i < le32_to_cpu(ec->items); i++) { if (strnlen(ec->texts[i], SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { @@ -1325,7 +1327,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( if (kc[i].name == NULL) goto err_sm; kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; - kc[i].access = mc->hdr.access; + kc[i].access = le32_to_cpu(mc->hdr.access); /* we only support FL/FR channel mapping atm */ sm->reg = tplc_chan_get_reg(tplg, mc->channel, @@ -1337,10 +1339,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( sm->rshift = tplc_chan_get_shift(tplg, mc->channel, SNDRV_CHMAP_FR); - sm->max = mc->max; - sm->min = mc->min; - sm->invert = mc->invert; - sm->platform_max = mc->platform_max; + sm->max = le32_to_cpu(mc->max); + sm->min = le32_to_cpu(mc->min); + sm->invert = le32_to_cpu(mc->invert); + sm->platform_max = le32_to_cpu(mc->platform_max); sm->dobj.index = tplg->index; INIT_LIST_HEAD(&sm->dobj.list); @@ -1401,7 +1403,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create( goto err_se; tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) + - ec->priv.size); + le32_to_cpu(ec->priv.size)); dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n", ec->hdr.name); @@ -1411,7 +1413,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create( if (kc[i].name == NULL) goto err_se; kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; - kc[i].access = ec->hdr.access; + kc[i].access = le32_to_cpu(ec->hdr.access); /* we only support FL/FR channel mapping atm */ se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL); @@ -1420,8 +1422,8 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create( se->shift_r = tplc_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FR); - se->items = ec->items; - se->mask = ec->mask; + se->items = le32_to_cpu(ec->items); + se->mask = le32_to_cpu(ec->mask); se->dobj.index = tplg->index; switch (le32_to_cpu(ec->hdr.ops.info)) { @@ -1523,9 +1525,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create( if (kc[i].name == NULL) goto err_sbe; kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; - kc[i].access = be->hdr.access; + kc[i].access = le32_to_cpu(be->hdr.access); - sbe->max = be->max; + sbe->max = le32_to_cpu(be->max); INIT_LIST_HEAD(&sbe->dobj.list); /* map standard io handlers and check for external handlers */ @@ -1891,6 +1893,10 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg, link->num_codecs = 1; link->num_platforms = 1; + link->dobj.index = tplg->index; + link->dobj.ops = tplg->ops; + link->dobj.type = SND_SOC_DOBJ_DAI_LINK; + if (strlen(pcm->pcm_name)) { link->name = kstrdup(pcm->pcm_name, GFP_KERNEL); link->stream_name = kstrdup(pcm->pcm_name, GFP_KERNEL); @@ -1918,20 +1924,24 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg, ret = soc_tplg_dai_link_load(tplg, link, NULL); if (ret < 0) { dev_err(tplg->comp->dev, "ASoC: FE link loading failed\n"); - kfree(link->name); - kfree(link->stream_name); - kfree(link->cpus->dai_name); - kfree(link); - return ret; + goto err; + } + + ret = snd_soc_add_dai_link(tplg->comp->card, link); + if (ret < 0) { + dev_err(tplg->comp->dev, "ASoC: adding FE link failed\n"); + goto err; } - link->dobj.index = tplg->index; - link->dobj.ops = tplg->ops; - link->dobj.type = SND_SOC_DOBJ_DAI_LINK; list_add(&link->dobj.list, &tplg->comp->dobj_list); - snd_soc_add_dai_link(tplg->comp->card, link); return 0; +err: + kfree(link->name); + kfree(link->stream_name); + kfree(link->cpus->dai_name); + kfree(link); + return ret; } /* create a FE DAI and DAI link from the PCM object */ @@ -2024,6 +2034,7 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg, int size; int i; bool abi_match; + int ret; count = le32_to_cpu(hdr->count); @@ -2065,7 +2076,12 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg, } /* create the FE DAIs and DAI links */ - soc_tplg_pcm_create(tplg, _pcm); + ret = soc_tplg_pcm_create(tplg, _pcm); + if (ret < 0) { + if (!abi_match) + kfree(_pcm); + return ret; + } /* offset by version-specific struct size and * real priv data size @@ -2304,8 +2320,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg, } ret = soc_tplg_link_config(tplg, _link); - if (ret < 0) + if (ret < 0) { + if (!abi_match) + kfree(_link); return ret; + } /* offset by version-specific struct size and * real priv data size @@ -2469,7 +2488,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg, { struct snd_soc_tplg_manifest *manifest, *_manifest; bool abi_match; - int err; + int ret = 0; if (tplg->pass != SOC_TPLG_PASS_MANIFEST) return 0; @@ -2482,19 +2501,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg, _manifest = manifest; } else { abi_match = false; - err = manifest_new_ver(tplg, manifest, &_manifest); - if (err < 0) - return err; + ret = manifest_new_ver(tplg, manifest, &_manifest); + if (ret < 0) + return ret; } /* pass control to component driver for optional further init */ if (tplg->comp && tplg->ops && tplg->ops->manifest) - return tplg->ops->manifest(tplg->comp, tplg->index, _manifest); + ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest); if (!abi_match) /* free the duplicated one */ kfree(_manifest); - return 0; + return ret; } /* validate header magic, size and type */ diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c index 81f28f7ff1a0..12aec140819a 100644 --- a/sound/soc/sof/core.c +++ b/sound/soc/sof/core.c @@ -288,6 +288,46 @@ static int sof_machine_check(struct snd_sof_dev *sdev) #endif } +/* + * FW Boot State Transition Diagram + * + * +-----------------------------------------------------------------------+ + * | | + * ------------------ ------------------ | + * | | | | | + * | BOOT_FAILED | | READY_FAILED |-------------------------+ | + * | | | | | | + * ------------------ ------------------ | | + * ^ ^ | | + * | | | | + * (FW Boot Timeout) (FW_READY FAIL) | | + * | | | | + * | | | | + * ------------------ | ------------------ | | + * | | | | | | | + * | IN_PROGRESS |---------------+------------->| COMPLETE | | | + * | | (FW Boot OK) (FW_READY OK) | | | | + * ------------------ ------------------ | | + * ^ | | | + * | | | | + * (FW Loading OK) (System Suspend/Runtime Suspend) + * | | | | + * | | | | + * ------------------ ------------------ | | | + * | | | |<-----+ | | + * | PREPARE | | NOT_STARTED |<---------------------+ | + * | | | |<---------------------------+ + * ------------------ ------------------ + * | ^ | ^ + * | | | | + * | +-----------------------+ | + * | (DSP Probe OK) | + * | | + * | | + * +------------------------------------+ + * (System Suspend/Runtime Suspend) + */ + static int sof_probe_continue(struct snd_sof_dev *sdev) { struct snd_sof_pdata *plat_data = sdev->pdata; @@ -303,6 +343,8 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) return ret; } + sdev->fw_state = SOF_FW_BOOT_PREPARE; + /* check machine info */ ret = sof_machine_check(sdev); if (ret < 0) { @@ -342,7 +384,12 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) goto fw_load_err; } - /* boot the firmware */ + sdev->fw_state = SOF_FW_BOOT_IN_PROGRESS; + + /* + * Boot the firmware. The FW boot status will be modified + * in snd_sof_run_firmware() depending on the outcome. + */ ret = snd_sof_run_firmware(sdev); if (ret < 0) { dev_err(sdev->dev, "error: failed to boot DSP firmware %d\n", @@ -368,7 +415,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) if (ret < 0) { dev_err(sdev->dev, "error: failed to register DSP DAI driver %d\n", ret); - goto fw_run_err; + goto fw_trace_err; } drv_name = plat_data->machine->drv_name; @@ -382,7 +429,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) if (IS_ERR(plat_data->pdev_mach)) { ret = PTR_ERR(plat_data->pdev_mach); - goto fw_run_err; + goto fw_trace_err; } dev_dbg(sdev->dev, "created machine %s\n", @@ -393,7 +440,8 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) return 0; -#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE) +fw_trace_err: + snd_sof_free_trace(sdev); fw_run_err: snd_sof_fw_unload(sdev); fw_load_err: @@ -402,21 +450,10 @@ ipc_err: snd_sof_free_debug(sdev); dbg_err: snd_sof_remove(sdev); -#else - /* - * when the probe_continue is handled in a work queue, the - * probe does not fail so we don't release resources here. - * They will be released with an explicit call to - * snd_sof_device_remove() when the PCI/ACPI device is removed - */ - -fw_run_err: -fw_load_err: -ipc_err: -dbg_err: - -#endif + /* all resources freed, update state to match */ + sdev->fw_state = SOF_FW_BOOT_NOT_STARTED; + sdev->first_boot = true; return ret; } @@ -447,6 +484,7 @@ int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data) sdev->pdata = plat_data; sdev->first_boot = true; + sdev->fw_state = SOF_FW_BOOT_NOT_STARTED; dev_set_drvdata(dev, sdev); /* check all mandatory ops */ @@ -494,10 +532,12 @@ int snd_sof_device_remove(struct device *dev) if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) cancel_work_sync(&sdev->probe_work); - snd_sof_fw_unload(sdev); - snd_sof_ipc_free(sdev); - snd_sof_free_debug(sdev); - snd_sof_free_trace(sdev); + if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) { + snd_sof_fw_unload(sdev); + snd_sof_ipc_free(sdev); + snd_sof_free_debug(sdev); + snd_sof_free_trace(sdev); + } /* * Unregister machine driver. This will unbind the snd_card which @@ -513,7 +553,8 @@ int snd_sof_device_remove(struct device *dev) * scheduled on, when they are unloaded. Therefore, the DSP must be * removed only after the topology has been unloaded. */ - snd_sof_remove(sdev); + if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) + snd_sof_remove(sdev); /* release firmware */ release_firmware(pdata->fw); diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig index 5acae75f5750..71f318bc2c74 100644 --- a/sound/soc/sof/imx/Kconfig +++ b/sound/soc/sof/imx/Kconfig @@ -11,8 +11,8 @@ config SND_SOC_SOF_IMX_TOPLEVEL if SND_SOC_SOF_IMX_TOPLEVEL -config SND_SOC_SOF_IMX8 - tristate "SOF support for i.MX8" +config SND_SOC_SOF_IMX8_SUPPORT + bool "SOF support for i.MX8" depends on IMX_SCU depends on IMX_DSP help @@ -20,4 +20,8 @@ config SND_SOC_SOF_IMX8 Say Y if you have such a device. If unsure select "N". +config SND_SOC_SOF_IMX8 + def_tristate SND_SOC_SOF_OF + depends on SND_SOC_SOF_IMX8_SUPPORT + endif ## SND_SOC_SOF_IMX_IMX_TOPLEVEL diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c index 2a22b18e5ec0..69785f688ddf 100644 --- a/sound/soc/sof/imx/imx8.c +++ b/sound/soc/sof/imx/imx8.c @@ -209,7 +209,7 @@ static int imx8_probe(struct snd_sof_dev *sdev) priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains, sizeof(*priv->pd_dev), GFP_KERNEL); - if (!priv) + if (!priv->pd_dev) return -ENOMEM; priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains, @@ -304,6 +304,9 @@ static int imx8_probe(struct snd_sof_dev *sdev) } sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM; + /* set default mailbox offset for FW ready message */ + sdev->dsp_box.offset = MBOX_OFFSET; + return 0; exit_pdev_unregister: diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig index d62f51d33be1..8421b97d949e 100644 --- a/sound/soc/sof/intel/Kconfig +++ b/sound/soc/sof/intel/Kconfig @@ -76,10 +76,18 @@ config SND_SOC_SOF_BAYTRAIL config SND_SOC_SOF_BROADWELL_SUPPORT bool "SOF support for Broadwell" + depends on SND_SOC_INTEL_HASWELL=n help This adds support for Sound Open Firmware for Intel(R) platforms using the Broadwell processors. - Say Y if you have such a device. + This option is mutually exclusive with the Haswell/Broadwell legacy + driver. If you want to enable SOF on Broadwell you need to deselect + the legacy driver first. + SOF does fully support Broadwell yet, so this option is not + recommended for distros. At some point all legacy drivers will be + deprecated but not before all userspace firmware/topology/UCM files + are made available to downstream distros. + Say Y if you want to enable SOF on Broadwell If unsure select "N". config SND_SOC_SOF_BROADWELL diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c index a1e514f71739..41008c974ac6 100644 --- a/sound/soc/sof/intel/byt.c +++ b/sound/soc/sof/intel/byt.c @@ -24,7 +24,8 @@ #define DRAM_OFFSET 0x100000 #define DRAM_SIZE (160 * 1024) #define SHIM_OFFSET 0x140000 -#define SHIM_SIZE 0x100 +#define SHIM_SIZE_BYT 0x100 +#define SHIM_SIZE_CHT 0x118 #define MBOX_OFFSET 0x144000 #define MBOX_SIZE 0x1000 #define EXCEPT_OFFSET 0x800 @@ -75,7 +76,7 @@ static const struct snd_sof_debugfs_map byt_debugfs[] = { SOF_DEBUGFS_ACCESS_D0_ONLY}, {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE, SOF_DEBUGFS_ACCESS_D0_ONLY}, - {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE, + {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT, SOF_DEBUGFS_ACCESS_ALWAYS}, }; @@ -102,7 +103,7 @@ static const struct snd_sof_debugfs_map cht_debugfs[] = { SOF_DEBUGFS_ACCESS_D0_ONLY}, {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE, SOF_DEBUGFS_ACCESS_D0_ONLY}, - {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE, + {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT, SOF_DEBUGFS_ACCESS_ALWAYS}, }; diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c index 3ca6795a89ba..9e8233c10d86 100644 --- a/sound/soc/sof/intel/hda-codec.c +++ b/sound/soc/sof/intel/hda-codec.c @@ -24,19 +24,18 @@ #define IDISP_VID_INTEL 0x80860000 /* load the legacy HDA codec driver */ -#ifdef MODULE -static void hda_codec_load_module(struct hda_codec *codec) +static int hda_codec_load_module(struct hda_codec *codec) { +#ifdef MODULE char alias[MODULE_NAME_LEN]; const char *module = alias; snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias)); dev_dbg(&codec->core.dev, "loading codec module: %s\n", module); request_module(module); -} -#else -static void hda_codec_load_module(struct hda_codec *codec) {} #endif + return device_attach(hda_codec_dev(codec)); +} /* enable controller wake up event for all codecs with jack connectors */ void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) @@ -116,10 +115,16 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address) /* use legacy bus only for HDA codecs, idisp uses ext bus */ if ((resp & 0xFFFF0000) != IDISP_VID_INTEL) { hdev->type = HDA_DEV_LEGACY; - hda_codec_load_module(&hda_priv->codec); + ret = hda_codec_load_module(&hda_priv->codec); + /* + * handle ret==0 (no driver bound) as an error, but pass + * other return codes without modification + */ + if (ret == 0) + ret = -ENOENT; } - return 0; + return ret; #else hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index 8796f385be76..3f645200d3a5 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -216,6 +216,8 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, link_dev = hda_link_stream_assign(bus, substream); if (!link_dev) return -EBUSY; + + snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev); } stream_tag = hdac_stream(link_dev)->stream_tag; @@ -228,8 +230,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev); - link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name); if (!link) return -EINVAL; @@ -261,14 +261,11 @@ static int hda_link_pcm_prepare(struct snd_pcm_substream *substream, { struct hdac_ext_stream *link_dev = snd_soc_dai_get_dma_data(dai, substream); - struct sof_intel_hda_stream *hda_stream; struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(dai->component); struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); int stream = substream->stream; - hda_stream = hstream_to_sof_hda_stream(link_dev); - if (link_dev->link_prepared) return 0; @@ -361,6 +358,13 @@ static int hda_link_hw_free(struct snd_pcm_substream *substream, bus = hstream->bus; rtd = snd_pcm_substream_chip(substream); link_dev = snd_soc_dai_get_dma_data(dai, substream); + + if (!link_dev) { + dev_dbg(dai->dev, + "%s: link_dev is not assigned\n", __func__); + return -EINVAL; + } + hda_stream = hstream_to_sof_hda_stream(link_dev); /* free the link DMA channel in the FW */ @@ -439,6 +443,10 @@ struct snd_soc_dai_driver skl_dai[] = { .name = "iDisp3 Pin", .ops = &hda_link_dai_ops, }, +{ + .name = "iDisp4 Pin", + .ops = &hda_link_dai_ops, +}, { .name = "Analog CPU DAI", .ops = &hda_link_dai_ops, diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c index 65c2af3fcaab..356bb134ae93 100644 --- a/sound/soc/sof/intel/hda-loader.c +++ b/sound/soc/sof/intel/hda-loader.c @@ -278,7 +278,6 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev) /* init for booting wait */ init_waitqueue_head(&sdev->boot_wait); - sdev->boot_complete = false; /* prepare DMA for code loader stream */ tag = cl_stream_prepare(sdev, 0x40, stripped_firmware.size, diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 06e84679087b..3c4b604412f0 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -166,7 +166,7 @@ void hda_dsp_dump_skl(struct snd_sof_dev *sdev, u32 flags) panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_ADSP_ERROR_CODE_SKL + 0x4); - if (sdev->boot_complete) { + if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) { hda_dsp_get_registers(sdev, &xoops, &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE); snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, @@ -193,7 +193,7 @@ void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags) HDA_DSP_SRAM_REG_FW_STATUS); panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP); - if (sdev->boot_complete) { + if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) { hda_dsp_get_registers(sdev, &xoops, &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE); snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, @@ -268,6 +268,7 @@ static int hda_init(struct snd_sof_dev *sdev) bus->use_posbuf = 1; bus->bdl_pos_adj = 0; + bus->sync_write = 1; mutex_init(&hbus->prepare_mutex); hbus->pci = pci; diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h index 23e430d3e056..4be53ef2eab6 100644 --- a/sound/soc/sof/intel/hda.h +++ b/sound/soc/sof/intel/hda.h @@ -336,7 +336,7 @@ /* Number of DAIs */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) -#define SOF_SKL_NUM_DAIS 14 +#define SOF_SKL_NUM_DAIS 15 #else #define SOF_SKL_NUM_DAIS 8 #endif diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c index 086eeeab8679..e7b1a80e2a14 100644 --- a/sound/soc/sof/ipc.c +++ b/sound/soc/sof/ipc.c @@ -348,19 +348,12 @@ void snd_sof_ipc_msgs_rx(struct snd_sof_dev *sdev) break; case SOF_IPC_FW_READY: /* check for FW boot completion */ - if (!sdev->boot_complete) { + if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS) { err = sof_ops(sdev)->fw_ready(sdev, cmd); - if (err < 0) { - /* - * this indicates a mismatch in ABI - * between the driver and fw - */ - dev_err(sdev->dev, "error: ABI mismatch %d\n", - err); - } else { - /* firmware boot completed OK */ - sdev->boot_complete = true; - } + if (err < 0) + sdev->fw_state = SOF_FW_BOOT_READY_FAILED; + else + sdev->fw_state = SOF_FW_BOOT_COMPLETE; /* wake up firmware loader */ wake_up(&sdev->boot_wait); @@ -504,7 +497,7 @@ int snd_sof_ipc_stream_posn(struct snd_sof_dev *sdev, /* send IPC to the DSP */ err = sof_ipc_tx_message(sdev->ipc, - stream.hdr.cmd, &stream, sizeof(stream), &posn, + stream.hdr.cmd, &stream, sizeof(stream), posn, sizeof(*posn)); if (err < 0) { dev_err(sdev->dev, "error: failed to get stream %d position\n", @@ -834,6 +827,9 @@ void snd_sof_ipc_free(struct snd_sof_dev *sdev) { struct snd_sof_ipc *ipc = sdev->ipc; + if (!ipc) + return; + /* disable sending of ipc's */ mutex_lock(&ipc->tx_mutex); ipc->disable_ipc_tx = true; diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c index 9a9a381a908d..ce114df5e4fc 100644 --- a/sound/soc/sof/loader.c +++ b/sound/soc/sof/loader.c @@ -66,6 +66,8 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset) ret = get_ext_windows(sdev, ext_hdr); break; default: + dev_warn(sdev->dev, "warning: unknown ext header type %d size 0x%x\n", + ext_hdr->type, ext_hdr->hdr.size); break; } @@ -509,7 +511,6 @@ int snd_sof_run_firmware(struct snd_sof_dev *sdev) int init_core_mask; init_waitqueue_head(&sdev->boot_wait); - sdev->boot_complete = false; /* create read-only fw_version debugfs to store boot version info */ if (sdev->first_boot) { @@ -541,19 +542,27 @@ int snd_sof_run_firmware(struct snd_sof_dev *sdev) init_core_mask = ret; - /* now wait for the DSP to boot */ - ret = wait_event_timeout(sdev->boot_wait, sdev->boot_complete, + /* + * now wait for the DSP to boot. There are 3 possible outcomes: + * 1. Boot wait times out indicating FW boot failure. + * 2. FW boots successfully and fw_ready op succeeds. + * 3. FW boots but fw_ready op fails. + */ + ret = wait_event_timeout(sdev->boot_wait, + sdev->fw_state > SOF_FW_BOOT_IN_PROGRESS, msecs_to_jiffies(sdev->boot_timeout)); if (ret == 0) { dev_err(sdev->dev, "error: firmware boot failure\n"); snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX | SOF_DBG_TEXT | SOF_DBG_PCI); - /* after this point FW_READY msg should be ignored */ - sdev->boot_complete = true; + sdev->fw_state = SOF_FW_BOOT_FAILED; return -EIO; } - dev_info(sdev->dev, "firmware boot complete\n"); + if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) + dev_info(sdev->dev, "firmware boot complete\n"); + else + return -EIO; /* FW boots but fw_ready op failed */ /* perform post fw run operations */ ret = snd_sof_dsp_post_fw_run(sdev); diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c index e23beaeefe00..195af259e78e 100644 --- a/sound/soc/sof/pm.c +++ b/sound/soc/sof/pm.c @@ -269,6 +269,10 @@ static int sof_resume(struct device *dev, bool runtime_resume) if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume) return 0; + /* DSP was never successfully started, nothing to resume */ + if (sdev->first_boot) + return 0; + /* * if the runtime_resume flag is set, call the runtime_resume routine * or else call the system resume routine @@ -283,6 +287,8 @@ static int sof_resume(struct device *dev, bool runtime_resume) return ret; } + sdev->fw_state = SOF_FW_BOOT_PREPARE; + /* load the firmware */ ret = snd_sof_load_firmware(sdev); if (ret < 0) { @@ -292,7 +298,12 @@ static int sof_resume(struct device *dev, bool runtime_resume) return ret; } - /* boot the firmware */ + sdev->fw_state = SOF_FW_BOOT_IN_PROGRESS; + + /* + * Boot the firmware. The FW boot status will be modified + * in snd_sof_run_firmware() depending on the outcome. + */ ret = snd_sof_run_firmware(sdev); if (ret < 0) { dev_err(sdev->dev, @@ -338,6 +349,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend) if (!sof_ops(sdev)->suspend) return 0; + if (sdev->fw_state != SOF_FW_BOOT_COMPLETE) + goto power_down; + /* release trace */ snd_sof_release_trace(sdev); @@ -375,6 +389,12 @@ static int sof_suspend(struct device *dev, bool runtime_suspend) ret); } +power_down: + + /* return if the DSP was not probed successfully */ + if (sdev->fw_state == SOF_FW_BOOT_NOT_STARTED) + return 0; + /* power down all DSP cores */ if (runtime_suspend) ret = snd_sof_dsp_runtime_suspend(sdev); @@ -385,6 +405,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend) "error: failed to power down DSP during suspend %d\n", ret); + /* reset FW state */ + sdev->fw_state = SOF_FW_BOOT_NOT_STARTED; + return ret; } diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h index 730f3259dd02..7b329bd99674 100644 --- a/sound/soc/sof/sof-priv.h +++ b/sound/soc/sof/sof-priv.h @@ -356,6 +356,15 @@ struct snd_sof_dai { struct list_head list; /* list in sdev dai list */ }; +enum snd_sof_fw_state { + SOF_FW_BOOT_NOT_STARTED = 0, + SOF_FW_BOOT_PREPARE, + SOF_FW_BOOT_IN_PROGRESS, + SOF_FW_BOOT_FAILED, + SOF_FW_BOOT_READY_FAILED, /* firmware booted but fw_ready op failed */ + SOF_FW_BOOT_COMPLETE, +}; + /* * SOF Device Level. */ @@ -372,7 +381,7 @@ struct snd_sof_dev { /* DSP firmware boot */ wait_queue_head_t boot_wait; - u32 boot_complete; + enum snd_sof_fw_state fw_state; u32 first_boot; /* work queue in case the probe is implemented in two steps */ diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index 4452594c2e17..fa299e078156 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -2828,6 +2828,10 @@ static int sof_link_load(struct snd_soc_component *scomp, int index, if (!link->no_pcm) { link->nonatomic = true; + /* set trigger order */ + link->trigger[0] = SND_SOC_DPCM_TRIGGER_POST; + link->trigger[1] = SND_SOC_DPCM_TRIGGER_POST; + /* nothing more to do for FE dai links */ return 0; } diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index 48ea915b24ba..2ed92c990b97 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c @@ -226,7 +226,6 @@ static void uni_player_set_channel_status(struct uniperif *player, * sampling frequency. If no sample rate is already specified, then * set one. */ - mutex_lock(&player->ctrl_lock); if (runtime) { switch (runtime->rate) { case 22050: @@ -303,7 +302,6 @@ static void uni_player_set_channel_status(struct uniperif *player, player->stream_settings.iec958.status[3 + (n * 4)] << 24; SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status); } - mutex_unlock(&player->ctrl_lock); /* Update the channel status */ if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) @@ -365,8 +363,10 @@ static int uni_player_prepare_iec958(struct uniperif *player, SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player); + mutex_lock(&player->ctrl_lock); /* Update the channel status */ uni_player_set_channel_status(player, runtime); + mutex_unlock(&player->ctrl_lock); /* Clear the user validity user bits */ SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0); @@ -598,7 +598,6 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol, iec958->status[1] = ucontrol->value.iec958.status[1]; iec958->status[2] = ucontrol->value.iec958.status[2]; iec958->status[3] = ucontrol->value.iec958.status[3]; - mutex_unlock(&player->ctrl_lock); spin_lock_irqsave(&player->irq_lock, flags); if (player->substream && player->substream->runtime) @@ -608,6 +607,8 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol, uni_player_set_channel_status(player, NULL); spin_unlock_irqrestore(&player->irq_lock, flags); + mutex_unlock(&player->ctrl_lock); + return 0; } diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c index 3c9a9deec9af..4ecea4913f42 100644 --- a/sound/soc/stm/stm32_adfsdm.c +++ b/sound/soc/stm/stm32_adfsdm.c @@ -153,13 +153,13 @@ static const struct snd_soc_component_driver stm32_adfsdm_dai_component = { .name = "stm32_dfsdm_audio", }; -static void memcpy_32to16(void *dest, const void *src, size_t n) +static void stm32_memcpy_32to16(void *dest, const void *src, size_t n) { unsigned int i = 0; u16 *d = (u16 *)dest, *s = (u16 *)src; s++; - for (i = n; i > 0; i--) { + for (i = n >> 1; i > 0; i--) { *d++ = *s++; s++; } @@ -186,8 +186,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private) if ((priv->pos + src_size) > buff_size) { if (format == SNDRV_PCM_FORMAT_S16_LE) - memcpy_32to16(&pcm_buff[priv->pos], src_buff, - buff_size - priv->pos); + stm32_memcpy_32to16(&pcm_buff[priv->pos], src_buff, + buff_size - priv->pos); else memcpy(&pcm_buff[priv->pos], src_buff, buff_size - priv->pos); @@ -196,8 +196,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private) } if (format == SNDRV_PCM_FORMAT_S16_LE) - memcpy_32to16(&pcm_buff[priv->pos], - &src_buff[src_size - cur_size], cur_size); + stm32_memcpy_32to16(&pcm_buff[priv->pos], + &src_buff[src_size - cur_size], cur_size); else memcpy(&pcm_buff[priv->pos], &src_buff[src_size - cur_size], cur_size); diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index 48e629ac2d88..10eb4b8e8e7e 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -184,6 +184,56 @@ static bool stm32_sai_sub_writeable_reg(struct device *dev, unsigned int reg) } } +static int stm32_sai_sub_reg_up(struct stm32_sai_sub_data *sai, + unsigned int reg, unsigned int mask, + unsigned int val) +{ + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; + + ret = regmap_update_bits(sai->regmap, reg, mask, val); + + clk_disable(sai->pdata->pclk); + + return ret; +} + +static int stm32_sai_sub_reg_wr(struct stm32_sai_sub_data *sai, + unsigned int reg, unsigned int mask, + unsigned int val) +{ + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; + + ret = regmap_write_bits(sai->regmap, reg, mask, val); + + clk_disable(sai->pdata->pclk); + + return ret; +} + +static int stm32_sai_sub_reg_rd(struct stm32_sai_sub_data *sai, + unsigned int reg, unsigned int *val) +{ + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; + + ret = regmap_read(sai->regmap, reg, val); + + clk_disable(sai->pdata->pclk); + + return ret; +} + static const struct regmap_config stm32_sai_sub_regmap_config_f4 = { .reg_bits = 32, .reg_stride = 4, @@ -295,7 +345,7 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai, mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version)); cr1 = SAI_XCR1_MCKDIV_SET(div); - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, mask, cr1); if (ret < 0) dev_err(&sai->pdev->dev, "Failed to update CR1 register\n"); @@ -372,8 +422,8 @@ static int stm32_sai_mclk_enable(struct clk_hw *hw) dev_dbg(&sai->pdev->dev, "Enable master clock\n"); - return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_MCKEN, SAI_XCR1_MCKEN); + return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_MCKEN, SAI_XCR1_MCKEN); } static void stm32_sai_mclk_disable(struct clk_hw *hw) @@ -383,7 +433,7 @@ static void stm32_sai_mclk_disable(struct clk_hw *hw) dev_dbg(&sai->pdev->dev, "Disable master clock\n"); - regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0); + stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0); } static const struct clk_ops mclk_ops = { @@ -446,15 +496,15 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid) unsigned int sr, imr, flags; snd_pcm_state_t status = SNDRV_PCM_STATE_RUNNING; - regmap_read(sai->regmap, STM_SAI_IMR_REGX, &imr); - regmap_read(sai->regmap, STM_SAI_SR_REGX, &sr); + stm32_sai_sub_reg_rd(sai, STM_SAI_IMR_REGX, &imr); + stm32_sai_sub_reg_rd(sai, STM_SAI_SR_REGX, &sr); flags = sr & imr; if (!flags) return IRQ_NONE; - regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK, - SAI_XCLRFR_MASK); + stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK, + SAI_XCLRFR_MASK); if (!sai->substream) { dev_err(&pdev->dev, "Device stopped. Spurious IRQ 0x%x\n", sr); @@ -503,8 +553,8 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai, int ret; if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) { - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_NODIV, + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_NODIV, freq ? 0 : SAI_XCR1_NODIV); if (ret < 0) return ret; @@ -583,7 +633,7 @@ static int stm32_sai_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask, slotr_mask |= SAI_XSLOTR_SLOTEN_MASK; - regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, slotr_mask, slotr); + stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, slotr_mask, slotr); sai->slot_width = slot_width; sai->slots = slots; @@ -665,7 +715,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) cr1_mask |= SAI_XCR1_CKSTR; frcr_mask |= SAI_XFRCR_FSPOL; - regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr); + stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr); /* DAI clock master masks */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { @@ -693,7 +743,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) cr1_mask |= SAI_XCR1_SLAVE; conf_update: - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1); if (ret < 0) { dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); return ret; @@ -730,12 +780,12 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream, } /* Enable ITs */ - regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX, - SAI_XCLRFR_MASK, SAI_XCLRFR_MASK); + stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX, + SAI_XCLRFR_MASK, SAI_XCLRFR_MASK); imr = SAI_XIMR_OVRUDRIE; if (STM_SAI_IS_CAPTURE(sai)) { - regmap_read(sai->regmap, STM_SAI_CR2_REGX, &cr2); + stm32_sai_sub_reg_rd(sai, STM_SAI_CR2_REGX, &cr2); if (cr2 & SAI_XCR2_MUTECNT_MASK) imr |= SAI_XIMR_MUTEDETIE; } @@ -745,8 +795,8 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream, else imr |= SAI_XIMR_AFSDETIE | SAI_XIMR_LFSDETIE; - regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, - SAI_XIMR_MASK, imr); + stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, + SAI_XIMR_MASK, imr); return 0; } @@ -763,10 +813,10 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai, * SAI fifo threshold is set to half fifo, to keep enough space * for DMA incoming bursts. */ - regmap_write_bits(sai->regmap, STM_SAI_CR2_REGX, - SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK, - SAI_XCR2_FFLUSH | - SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF)); + stm32_sai_sub_reg_wr(sai, STM_SAI_CR2_REGX, + SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK, + SAI_XCR2_FFLUSH | + SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF)); /* DS bits in CR1 not set for SPDIF (size forced to 24 bits).*/ if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) { @@ -795,7 +845,7 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai, if ((sai->slots == 2) && (params_channels(params) == 1)) cr1 |= SAI_XCR1_MONO; - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1); if (ret < 0) { dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); return ret; @@ -809,7 +859,7 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai) struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); int slotr, slot_sz; - regmap_read(sai->regmap, STM_SAI_SLOTR_REGX, &slotr); + stm32_sai_sub_reg_rd(sai, STM_SAI_SLOTR_REGX, &slotr); /* * If SLOTSZ is set to auto in SLOTR, align slot width on data size @@ -831,16 +881,16 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai) sai->slots = 2; /* The number of slots in the audio frame is equal to NBSLOT[3:0] + 1*/ - regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, - SAI_XSLOTR_NBSLOT_MASK, - SAI_XSLOTR_NBSLOT_SET((sai->slots - 1))); + stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, + SAI_XSLOTR_NBSLOT_MASK, + SAI_XSLOTR_NBSLOT_SET((sai->slots - 1))); /* Set default slots mask if not already set from DT */ if (!(slotr & SAI_XSLOTR_SLOTEN_MASK)) { sai->slot_mask = (1 << sai->slots) - 1; - regmap_update_bits(sai->regmap, - STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK, - SAI_XSLOTR_SLOTEN_SET(sai->slot_mask)); + stm32_sai_sub_reg_up(sai, + STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK, + SAI_XSLOTR_SLOTEN_SET(sai->slot_mask)); } dev_dbg(cpu_dai->dev, "Slots %d, slot width %d\n", @@ -870,14 +920,14 @@ static void stm32_sai_set_frame(struct snd_soc_dai *cpu_dai) dev_dbg(cpu_dai->dev, "Frame length %d, frame active %d\n", sai->fs_length, fs_active); - regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr); + stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr); if ((sai->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_LSB) { offset = sai->slot_width - sai->data_size; - regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, - SAI_XSLOTR_FBOFF_MASK, - SAI_XSLOTR_FBOFF_SET(offset)); + stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, + SAI_XSLOTR_FBOFF_MASK, + SAI_XSLOTR_FBOFF_SET(offset)); } } @@ -994,9 +1044,9 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai, return -EINVAL; } - regmap_update_bits(sai->regmap, - STM_SAI_CR1_REGX, - SAI_XCR1_OSR, cr1); + stm32_sai_sub_reg_up(sai, + STM_SAI_CR1_REGX, + SAI_XCR1_OSR, cr1); div = stm32_sai_get_clk_div(sai, sai_clk_rate, sai->mclk_rate); @@ -1058,12 +1108,12 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: dev_dbg(cpu_dai->dev, "Enable DMA and SAI\n"); - regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_DMAEN, SAI_XCR1_DMAEN); + stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_DMAEN, SAI_XCR1_DMAEN); /* Enable SAI */ - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_SAIEN, SAI_XCR1_SAIEN); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_SAIEN, SAI_XCR1_SAIEN); if (ret < 0) dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); break; @@ -1072,16 +1122,16 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_STOP: dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n"); - regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, - SAI_XIMR_MASK, 0); + stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, + SAI_XIMR_MASK, 0); - regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_SAIEN, - (unsigned int)~SAI_XCR1_SAIEN); + stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_SAIEN, + (unsigned int)~SAI_XCR1_SAIEN); - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_DMAEN, - (unsigned int)~SAI_XCR1_DMAEN); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_DMAEN, + (unsigned int)~SAI_XCR1_DMAEN); if (ret < 0) dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); @@ -1101,7 +1151,7 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream, struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); unsigned long flags; - regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0); + stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0); clk_disable_unprepare(sai->sai_ck); @@ -1169,7 +1219,7 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai) cr1_mask |= SAI_XCR1_SYNCEN_MASK; cr1 |= SAI_XCR1_SYNCEN_SET(sai->sync); - return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1); + return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1); } static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops = { @@ -1322,8 +1372,13 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, if (STM_SAI_HAS_PDM(sai) && STM_SAI_IS_SUB_A(sai)) sai->regmap_config = &stm32_sai_sub_regmap_config_h7; - sai->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "sai_ck", - base, sai->regmap_config); + /* + * Do not manage peripheral clock through regmap framework as this + * can lead to circular locking issue with sai master clock provider. + * Manage peripheral clock directly in driver instead. + */ + sai->regmap = devm_regmap_init_mmio(&pdev->dev, base, + sai->regmap_config); if (IS_ERR(sai->regmap)) { dev_err(&pdev->dev, "Failed to initialize MMIO\n"); return PTR_ERR(sai->regmap); @@ -1420,6 +1475,10 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, return PTR_ERR(sai->sai_ck); } + ret = clk_prepare(sai->pdata->pclk); + if (ret < 0) + return ret; + if (STM_SAI_IS_F4(sai->pdata)) return 0; @@ -1484,19 +1543,30 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) return ret; } - ret = devm_snd_soc_register_component(&pdev->dev, &stm32_component, - &sai->cpu_dai_drv, 1); + ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0); + if (ret) { + dev_err(&pdev->dev, "Could not register pcm dma\n"); + return ret; + } + + ret = snd_soc_register_component(&pdev->dev, &stm32_component, + &sai->cpu_dai_drv, 1); if (ret) return ret; if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) conf = &stm32_sai_pcm_config_spdif; - ret = devm_snd_dmaengine_pcm_register(&pdev->dev, conf, 0); - if (ret) { - dev_err(&pdev->dev, "Could not register pcm dma\n"); - return ret; - } + return 0; +} + +static int stm32_sai_sub_remove(struct platform_device *pdev) +{ + struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev); + + clk_unprepare(sai->pdata->pclk); + snd_dmaengine_pcm_unregister(&pdev->dev); + snd_soc_unregister_component(&pdev->dev); return 0; } @@ -1505,18 +1575,35 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) static int stm32_sai_sub_suspend(struct device *dev) { struct stm32_sai_sub_data *sai = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; regcache_cache_only(sai->regmap, true); regcache_mark_dirty(sai->regmap); + + clk_disable(sai->pdata->pclk); + return 0; } static int stm32_sai_sub_resume(struct device *dev) { struct stm32_sai_sub_data *sai = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; regcache_cache_only(sai->regmap, false); - return regcache_sync(sai->regmap); + ret = regcache_sync(sai->regmap); + + clk_disable(sai->pdata->pclk); + + return ret; } #endif /* CONFIG_PM_SLEEP */ @@ -1531,6 +1618,7 @@ static struct platform_driver stm32_sai_sub_driver = { .pm = &stm32_sai_sub_pm_ops, }, .probe = stm32_sai_sub_probe, + .remove = stm32_sai_sub_remove, }; module_platform_driver(stm32_sai_sub_driver); diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c index cd4b235fce57..e53fb4bd66b3 100644 --- a/sound/soc/stm/stm32_spdifrx.c +++ b/sound/soc/stm/stm32_spdifrx.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -220,6 +219,7 @@ * @slave_config: dma slave channel runtime config pointer * @phys_addr: SPDIFRX registers physical base address * @lock: synchronization enabling lock + * @irq_lock: prevent race condition with IRQ on stream state * @cs: channel status buffer * @ub: user data buffer * @irq: SPDIFRX interrupt line @@ -240,6 +240,7 @@ struct stm32_spdifrx_data { struct dma_slave_config slave_config; dma_addr_t phys_addr; spinlock_t lock; /* Sync enabling lock */ + spinlock_t irq_lock; /* Prevent race condition on stream state */ unsigned char cs[SPDIFRX_CS_BYTES_NB]; unsigned char ub[SPDIFRX_UB_BYTES_NB]; int irq; @@ -320,6 +321,7 @@ static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx) static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) { int cr, cr_mask, imr, ret; + unsigned long flags; /* Enable IRQs */ imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE; @@ -327,7 +329,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) if (ret) return ret; - spin_lock(&spdifrx->lock); + spin_lock_irqsave(&spdifrx->lock, flags); spdifrx->refcount++; @@ -360,7 +362,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) "Failed to start synchronization\n"); } - spin_unlock(&spdifrx->lock); + spin_unlock_irqrestore(&spdifrx->lock, flags); return ret; } @@ -368,11 +370,12 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx) { int cr, cr_mask, reg; + unsigned long flags; - spin_lock(&spdifrx->lock); + spin_lock_irqsave(&spdifrx->lock, flags); if (--spdifrx->refcount) { - spin_unlock(&spdifrx->lock); + spin_unlock_irqrestore(&spdifrx->lock, flags); return; } @@ -391,7 +394,7 @@ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx) regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, ®); regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, ®); - spin_unlock(&spdifrx->lock); + spin_unlock_irqrestore(&spdifrx->lock, flags); } static int stm32_spdifrx_dma_ctrl_register(struct device *dev, @@ -478,8 +481,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx) memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB); memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB); - pinctrl_pm_select_default_state(&spdifrx->pdev->dev); - ret = stm32_spdifrx_dma_ctrl_start(spdifrx); if (ret < 0) return ret; @@ -511,7 +512,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx) end: clk_disable_unprepare(spdifrx->kclk); - pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev); return ret; } @@ -663,7 +663,6 @@ static const struct regmap_config stm32_h7_spdifrx_regmap_conf = { static irqreturn_t stm32_spdifrx_isr(int irq, void *devid) { struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid; - struct snd_pcm_substream *substream = spdifrx->substream; struct platform_device *pdev = spdifrx->pdev; unsigned int cr, mask, sr, imr; unsigned int flags; @@ -731,14 +730,19 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid) regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR, SPDIFRX_CR_SPDIFEN_MASK, cr); - if (substream) - snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); + spin_lock(&spdifrx->irq_lock); + if (spdifrx->substream) + snd_pcm_stop(spdifrx->substream, + SNDRV_PCM_STATE_DISCONNECTED); + spin_unlock(&spdifrx->irq_lock); return IRQ_HANDLED; } - if (err_xrun && substream) - snd_pcm_stop_xrun(substream); + spin_lock(&spdifrx->irq_lock); + if (err_xrun && spdifrx->substream) + snd_pcm_stop_xrun(spdifrx->substream); + spin_unlock(&spdifrx->irq_lock); return IRQ_HANDLED; } @@ -747,9 +751,12 @@ static int stm32_spdifrx_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai); + unsigned long flags; int ret; + spin_lock_irqsave(&spdifrx->irq_lock, flags); spdifrx->substream = substream; + spin_unlock_irqrestore(&spdifrx->irq_lock, flags); ret = clk_prepare_enable(spdifrx->kclk); if (ret) @@ -825,8 +832,12 @@ static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai); + unsigned long flags; + spin_lock_irqsave(&spdifrx->irq_lock, flags); spdifrx->substream = NULL; + spin_unlock_irqrestore(&spdifrx->irq_lock, flags); + clk_disable_unprepare(spdifrx->kclk); } @@ -930,6 +941,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev) spdifrx->pdev = pdev; init_completion(&spdifrx->cs_completion); spin_lock_init(&spdifrx->lock); + spin_lock_init(&spdifrx->irq_lock); platform_set_drvdata(pdev, spdifrx); diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c index 55798bc8eae2..686561df8e13 100644 --- a/sound/soc/sunxi/sun8i-codec.c +++ b/sound/soc/sunxi/sun8i-codec.c @@ -80,6 +80,7 @@ #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK GENMASK(15, 12) #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8) +#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK GENMASK(3, 2) #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4) #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6) #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9) @@ -241,7 +242,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return -EINVAL; } regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL, - BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT), + SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK, value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT); return 0; diff --git a/sound/usb/card.c b/sound/usb/card.c index db91dc76cc91..54f9ce38471e 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -597,6 +597,10 @@ static int usb_audio_probe(struct usb_interface *intf, } } if (! chip) { + err = snd_usb_apply_boot_quirk_once(dev, intf, quirk, id); + if (err < 0) + goto __error; + /* it's a fresh one. * now look for an empty slot and create a new card instance */ diff --git a/sound/usb/card.h b/sound/usb/card.h index 2991b9986f66..395403a2d33f 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -145,6 +145,7 @@ struct snd_usb_substream { struct snd_usb_endpoint *sync_endpoint; unsigned long flags; bool need_setup_ep; /* (re)configure EP at prepare? */ + bool need_setup_fmt; /* (re)configure fmt after resume? */ unsigned int speed; /* USB_SPEED_XXX */ u64 formats; /* format bitmasks (all or'ed) */ diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 6b8c14f9b5d4..a48313dfa967 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i return ret; } +/* + * Assume the clock is valid if clock source supports only one single sample + * rate, the terminal is connected directly to it (there is no clock selector) + * and clock type is internal. This is to deal with some Denon DJ controllers + * that always reports that clock is invalid. + */ +static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip, + struct audioformat *fmt, + int source_id) +{ + if (fmt->protocol == UAC_VERSION_2) { + struct uac_clock_source_descriptor *cs_desc = + snd_usb_find_clock_source(chip->ctrl_intf, source_id); + + if (!cs_desc) + return false; + + return (fmt->nr_rates == 1 && + (fmt->clock & 0xff) == cs_desc->bClockID && + (cs_desc->bmAttributes & 0x3) != + UAC_CLOCK_SOURCE_TYPE_EXT); + } + + return false; +} + static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, - int protocol, + struct audioformat *fmt, int source_id) { int err; @@ -160,26 +186,26 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, struct usb_device *dev = chip->dev; u32 bmControls; - if (protocol == UAC_VERSION_3) { + if (fmt->protocol == UAC_VERSION_3) { struct uac3_clock_source_descriptor *cs_desc = snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id); if (!cs_desc) - return 0; + return false; bmControls = le32_to_cpu(cs_desc->bmControls); } else { /* UAC_VERSION_1/2 */ struct uac_clock_source_descriptor *cs_desc = snd_usb_find_clock_source(chip->ctrl_intf, source_id); if (!cs_desc) - return 0; + return false; bmControls = cs_desc->bmControls; } /* If a clock source can't tell us whether it's valid, we assume it is */ if (!uac_v2v3_control_is_readable(bmControls, UAC2_CS_CONTROL_CLOCK_VALID)) - return 1; + return true; err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, @@ -191,13 +217,17 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, dev_warn(&dev->dev, "%s(): cannot get clock validity for id %d\n", __func__, source_id); - return 0; + return false; } - return !!data; + if (data) + return true; + else + return uac_clock_source_is_valid_quirk(chip, fmt, source_id); } -static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, +static int __uac_clock_find_source(struct snd_usb_audio *chip, + struct audioformat *fmt, int entity_id, unsigned long *visited, bool validate) { struct uac_clock_source_descriptor *source; @@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id); if (source) { entity_id = source->bClockID; - if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2, + if (validate && !uac_clock_source_is_valid(chip, fmt, entity_id)) { usb_audio_err(chip, "clock source %d is not valid, cannot use\n", @@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, } cur = ret; - ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1], - visited, validate); + ret = __uac_clock_find_source(chip, fmt, + selector->baCSourceID[ret - 1], + visited, validate); if (!validate || ret > 0 || !chip->autoclock) return ret; @@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, if (i == cur) continue; - ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1], - visited, true); + ret = __uac_clock_find_source(chip, fmt, + selector->baCSourceID[i - 1], + visited, true); if (ret < 0) continue; @@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, /* FIXME: multipliers only act as pass-thru element for now */ multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id); if (multiplier) - return __uac_clock_find_source(chip, multiplier->bCSourceID, - visited, validate); + return __uac_clock_find_source(chip, fmt, + multiplier->bCSourceID, + visited, validate); return -EINVAL; } -static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, - unsigned long *visited, bool validate) +static int __uac3_clock_find_source(struct snd_usb_audio *chip, + struct audioformat *fmt, int entity_id, + unsigned long *visited, bool validate) { struct uac3_clock_source_descriptor *source; struct uac3_clock_selector_descriptor *selector; @@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id); if (source) { entity_id = source->bClockID; - if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3, + if (validate && !uac_clock_source_is_valid(chip, fmt, entity_id)) { usb_audio_err(chip, "clock source %d is not valid, cannot use\n", @@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, } cur = ret; - ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1], + ret = __uac3_clock_find_source(chip, fmt, + selector->baCSourceID[ret - 1], visited, validate); if (!validate || ret > 0 || !chip->autoclock) return ret; @@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, if (i == cur) continue; - ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1], - visited, true); + ret = __uac3_clock_find_source(chip, fmt, + selector->baCSourceID[i - 1], + visited, true); if (ret < 0) continue; @@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf, entity_id); if (multiplier) - return __uac3_clock_find_source(chip, multiplier->bCSourceID, + return __uac3_clock_find_source(chip, fmt, + multiplier->bCSourceID, visited, validate); return -EINVAL; @@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, * * Returns the clock source UnitID (>=0) on success, or an error. */ -int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol, - int entity_id, bool validate) +int snd_usb_clock_find_source(struct snd_usb_audio *chip, + struct audioformat *fmt, bool validate) { DECLARE_BITMAP(visited, 256); memset(visited, 0, sizeof(visited)); - switch (protocol) { + switch (fmt->protocol) { case UAC_VERSION_2: - return __uac_clock_find_source(chip, entity_id, visited, + return __uac_clock_find_source(chip, fmt, fmt->clock, visited, validate); case UAC_VERSION_3: - return __uac3_clock_find_source(chip, entity_id, visited, + return __uac3_clock_find_source(chip, fmt, fmt->clock, visited, validate); default: return -EINVAL; @@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface, * automatic clock selection if the current clock is not * valid. */ - clock = snd_usb_clock_find_source(chip, fmt->protocol, - fmt->clock, true); + clock = snd_usb_clock_find_source(chip, fmt, true); if (clock < 0) { /* We did not find a valid clock, but that might be * because the current sample rate does not match an @@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface, * and we will do another validation after setting the * rate. */ - clock = snd_usb_clock_find_source(chip, fmt->protocol, - fmt->clock, false); + clock = snd_usb_clock_find_source(chip, fmt, false); if (clock < 0) return clock; } @@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface, validation: /* validate clock after rate change */ - if (!uac_clock_source_is_valid(chip, fmt->protocol, clock)) + if (!uac_clock_source_is_valid(chip, fmt, clock)) return -ENXIO; return 0; } diff --git a/sound/usb/clock.h b/sound/usb/clock.h index 076e31b79ee0..68df0fbe09d0 100644 --- a/sound/usb/clock.h +++ b/sound/usb/clock.h @@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt, int rate); -int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol, - int entity_id, bool validate); +int snd_usb_clock_find_source(struct snd_usb_audio *chip, + struct audioformat *fmt, bool validate); #endif /* __USBAUDIO_CLOCK_H */ diff --git a/sound/usb/format.c b/sound/usb/format.c index d79db71305f6..f4f0cf3deaf0 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -296,6 +296,9 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip, case USB_ID(0x0E41, 0x4242): /* Line6 Helix Rack */ case USB_ID(0x0E41, 0x4244): /* Line6 Helix LT */ case USB_ID(0x0E41, 0x4246): /* Line6 HX-Stomp */ + case USB_ID(0x0E41, 0x4248): /* Line6 Helix >= fw 2.82 */ + case USB_ID(0x0E41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */ + case USB_ID(0x0E41, 0x424a): /* Line6 Helix LT >= fw 2.82 */ /* supported rates: 48Khz */ kfree(fp->rate_table); fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL); @@ -322,8 +325,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip, struct usb_device *dev = chip->dev; unsigned char tmp[2], *data; int nr_triplets, data_size, ret = 0, ret_l6; - int clock = snd_usb_clock_find_source(chip, fp->protocol, - fp->clock, false); + int clock = snd_usb_clock_find_source(chip, fp, false); if (clock < 0) { dev_err(&dev->dev, diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index b5a3f754a4f1..4f096685ed65 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -305,7 +305,7 @@ static void line6_data_received(struct urb *urb) line6_midibuf_read(mb, line6->buffer_message, LINE6_MIDI_MESSAGE_MAXLEN); - if (done == 0) + if (done <= 0) break; line6->message_length = done; diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c index 8d6eefa0d936..6a70463f82c4 100644 --- a/sound/usb/line6/midibuf.c +++ b/sound/usb/line6/midibuf.c @@ -159,7 +159,7 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data, int midi_length_prev = midibuf_message_length(this->command_prev); - if (midi_length_prev > 0) { + if (midi_length_prev > 1) { midi_length = midi_length_prev - 1; repeat = 1; } else diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 45eee5cc312e..d2a050bb8341 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state, return 0; } +static int parse_term_effect_unit(struct mixer_build *state, + struct usb_audio_term *term, + void *p1, int id) +{ + term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */ + term->id = id; + return 0; +} + static int parse_term_uac2_clock_source(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) @@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, int id, UAC3_PROCESSING_UNIT); case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT): case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT): - return parse_term_proc_unit(state, term, p1, id, - UAC3_EFFECT_UNIT); + return parse_term_effect_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT): case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT): @@ -2930,6 +2938,9 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, continue; iface = usb_ifnum_to_if(dev, intf); + if (!iface) + continue; + num = iface->num_altsetting; if (num < 2) diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c index 7d460b1f1735..74c00c905d24 100644 --- a/sound/usb/mixer_scarlett_gen2.c +++ b/sound/usb/mixer_scarlett_gen2.c @@ -261,34 +261,34 @@ static const struct scarlett2_device_info s6i6_gen2_info = { }, .ports = { - { + [SCARLETT2_PORT_TYPE_NONE] = { .id = 0x000, .num = { 1, 0, 8, 8, 8 }, .src_descr = "Off", .src_num_offset = 0, }, - { + [SCARLETT2_PORT_TYPE_ANALOGUE] = { .id = 0x080, .num = { 4, 4, 4, 4, 4 }, .src_descr = "Analogue %d", .src_num_offset = 1, .dst_descr = "Analogue Output %02d Playback" }, - { + [SCARLETT2_PORT_TYPE_SPDIF] = { .id = 0x180, .num = { 2, 2, 2, 2, 2 }, .src_descr = "S/PDIF %d", .src_num_offset = 1, .dst_descr = "S/PDIF Output %d Playback" }, - { + [SCARLETT2_PORT_TYPE_MIX] = { .id = 0x300, .num = { 10, 18, 18, 18, 18 }, .src_descr = "Mix %c", .src_num_offset = 65, .dst_descr = "Mixer Input %02d Capture" }, - { + [SCARLETT2_PORT_TYPE_PCM] = { .id = 0x600, .num = { 6, 6, 6, 6, 6 }, .src_descr = "PCM %d", @@ -317,44 +317,44 @@ static const struct scarlett2_device_info s18i8_gen2_info = { }, .ports = { - { + [SCARLETT2_PORT_TYPE_NONE] = { .id = 0x000, .num = { 1, 0, 8, 8, 4 }, .src_descr = "Off", .src_num_offset = 0, }, - { + [SCARLETT2_PORT_TYPE_ANALOGUE] = { .id = 0x080, .num = { 8, 6, 6, 6, 6 }, .src_descr = "Analogue %d", .src_num_offset = 1, .dst_descr = "Analogue Output %02d Playback" }, - { + [SCARLETT2_PORT_TYPE_SPDIF] = { + .id = 0x180, /* S/PDIF outputs aren't available at 192KHz * but are included in the USB mux I/O * assignment message anyway */ - .id = 0x180, .num = { 2, 2, 2, 2, 2 }, .src_descr = "S/PDIF %d", .src_num_offset = 1, .dst_descr = "S/PDIF Output %d Playback" }, - { + [SCARLETT2_PORT_TYPE_ADAT] = { .id = 0x200, .num = { 8, 0, 0, 0, 0 }, .src_descr = "ADAT %d", .src_num_offset = 1, }, - { + [SCARLETT2_PORT_TYPE_MIX] = { .id = 0x300, .num = { 10, 18, 18, 18, 18 }, .src_descr = "Mix %c", .src_num_offset = 65, .dst_descr = "Mixer Input %02d Capture" }, - { + [SCARLETT2_PORT_TYPE_PCM] = { .id = 0x600, .num = { 20, 18, 18, 14, 10 }, .src_descr = "PCM %d", @@ -387,20 +387,20 @@ static const struct scarlett2_device_info s18i20_gen2_info = { }, .ports = { - { + [SCARLETT2_PORT_TYPE_NONE] = { .id = 0x000, .num = { 1, 0, 8, 8, 6 }, .src_descr = "Off", .src_num_offset = 0, }, - { + [SCARLETT2_PORT_TYPE_ANALOGUE] = { .id = 0x080, .num = { 8, 10, 10, 10, 10 }, .src_descr = "Analogue %d", .src_num_offset = 1, .dst_descr = "Analogue Output %02d Playback" }, - { + [SCARLETT2_PORT_TYPE_SPDIF] = { /* S/PDIF outputs aren't available at 192KHz * but are included in the USB mux I/O * assignment message anyway @@ -411,21 +411,21 @@ static const struct scarlett2_device_info s18i20_gen2_info = { .src_num_offset = 1, .dst_descr = "S/PDIF Output %d Playback" }, - { + [SCARLETT2_PORT_TYPE_ADAT] = { .id = 0x200, .num = { 8, 8, 8, 4, 0 }, .src_descr = "ADAT %d", .src_num_offset = 1, .dst_descr = "ADAT Output %d Playback" }, - { + [SCARLETT2_PORT_TYPE_MIX] = { .id = 0x300, .num = { 10, 18, 18, 18, 18 }, .src_descr = "Mix %c", .src_num_offset = 65, .dst_descr = "Mixer Input %02d Capture" }, - { + [SCARLETT2_PORT_TYPE_PCM] = { .id = 0x600, .num = { 20, 18, 18, 14, 10 }, .src_descr = "PCM %d", @@ -558,11 +558,11 @@ static const struct scarlett2_config /* proprietary request/response format */ struct scarlett2_usb_packet { - u32 cmd; - u16 size; - u16 seq; - u32 error; - u32 pad; + __le32 cmd; + __le16 size; + __le16 seq; + __le32 error; + __le32 pad; u8 data[]; }; @@ -664,11 +664,11 @@ static int scarlett2_usb( "Scarlett Gen 2 USB invalid response; " "cmd tx/rx %d/%d seq %d/%d size %d/%d " "error %d pad %d\n", - le16_to_cpu(req->cmd), le16_to_cpu(resp->cmd), + le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd), le16_to_cpu(req->seq), le16_to_cpu(resp->seq), resp_size, le16_to_cpu(resp->size), - le16_to_cpu(resp->error), - le16_to_cpu(resp->pad)); + le32_to_cpu(resp->error), + le32_to_cpu(resp->pad)); err = -EINVAL; goto unlock; } @@ -687,7 +687,7 @@ error: /* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */ static void scarlett2_config_save(struct usb_mixer_interface *mixer) { - u32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE); + __le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE); scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD, &req, sizeof(u32), @@ -713,11 +713,11 @@ static int scarlett2_usb_set_config( const struct scarlett2_config config_item = scarlett2_config_items[config_item_num]; struct { - u32 offset; - u32 bytes; - s32 value; + __le32 offset; + __le32 bytes; + __le32 value; } __packed req; - u32 req2; + __le32 req2; int err; struct scarlett2_mixer_data *private = mixer->private_data; @@ -753,8 +753,8 @@ static int scarlett2_usb_get( int offset, void *buf, int size) { struct { - u32 offset; - u32 size; + __le32 offset; + __le32 size; } __packed req; req.offset = cpu_to_le32(offset); @@ -794,8 +794,8 @@ static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer, const struct scarlett2_device_info *info = private->info; struct { - u16 mix_num; - u16 data[SCARLETT2_INPUT_MIX_MAX]; + __le16 mix_num; + __le16 data[SCARLETT2_INPUT_MIX_MAX]; } __packed req; int i, j; @@ -850,9 +850,9 @@ static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer) }; struct { - u16 pad; - u16 num; - u32 data[SCARLETT2_MUX_MAX]; + __le16 pad; + __le16 num; + __le32 data[SCARLETT2_MUX_MAX]; } __packed req; req.pad = 0; @@ -911,9 +911,9 @@ static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer, u16 *levels) { struct { - u16 pad; - u16 num_meters; - u32 magic; + __le16 pad; + __le16 num_meters; + __le32 magic; } __packed req; u32 resp[SCARLETT2_NUM_METERS]; int i, err; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index ff5ab24f3bd1..ad8f38380aa3 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -348,6 +348,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ep = 0x84; ifnum = 0; goto add_sync_ep_from_ifnum; + case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + ep = 0x81; + ifnum = 2; + goto add_sync_ep_from_ifnum; case USB_ID(0x0582, 0x01d8): /* BOSS Katana */ /* BOSS Katana amplifiers do not need quirks */ return 0; @@ -370,7 +374,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, add_sync_ep_from_ifnum: iface = usb_ifnum_to_if(dev, ifnum); - if (!iface || iface->num_altsetting == 0) + if (!iface || iface->num_altsetting < 2) return -EINVAL; alts = &iface->altsetting[1]; @@ -506,15 +510,15 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) if (WARN_ON(!iface)) return -EINVAL; alts = usb_altnum_to_altsetting(iface, fmt->altsetting); - altsd = get_iface_desc(alts); - if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting)) + if (WARN_ON(!alts)) return -EINVAL; + altsd = get_iface_desc(alts); - if (fmt == subs->cur_audiofmt) + if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt) return 0; /* close the old interface */ - if (subs->interface >= 0 && subs->interface != fmt->iface) { + if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) { if (!subs->stream->chip->keep_iface) { err = usb_set_interface(subs->dev, subs->interface, 0); if (err < 0) { @@ -528,6 +532,9 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) subs->altset_idx = 0; } + if (subs->need_setup_fmt) + subs->need_setup_fmt = false; + /* set interface */ if (iface->cur_altsetting != alts) { err = snd_usb_select_mode_quirk(subs, fmt); @@ -1735,6 +1742,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea subs->data_endpoint->retire_data_urb = retire_playback_urb; subs->running = 0; return 0; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (subs->stream->chip->setup_fmt_after_resume_quirk) { + stop_endpoints(subs, true); + subs->need_setup_fmt = true; + return 0; + } + break; } return -EINVAL; @@ -1767,6 +1781,13 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream subs->data_endpoint->retire_data_urb = retire_capture_urb; subs->running = 1; return 0; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (subs->stream->chip->setup_fmt_after_resume_quirk) { + stop_endpoints(subs, true); + subs->need_setup_fmt = true; + return 0; + } + break; } return -EINVAL; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 70c338f3ae24..d187aa6d50db 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3466,7 +3466,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), .vendor_name = "Dell", .product_name = "WD19 Dock", .profile_name = "Dell-WD15-Dock", - .ifnum = QUIRK_NO_INTERFACE + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_SETUP_FMT_AFTER_RESUME } }, /* MOTU Microbook II */ diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 349e1e52996d..7448ab07bd36 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -508,6 +508,16 @@ static int create_standard_mixer_quirk(struct snd_usb_audio *chip, return snd_usb_create_mixer(chip, quirk->ifnum, 0); } + +static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip, + struct usb_interface *iface, + struct usb_driver *driver, + const struct snd_usb_audio_quirk *quirk) +{ + chip->setup_fmt_after_resume_quirk = 1; + return 1; /* Continue with creating streams and mixer */ +} + /* * audio-interface quirks * @@ -546,6 +556,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk, [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk, + [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk, }; if (quirk->type < QUIRK_TYPE_COUNT) { @@ -1102,6 +1113,31 @@ free_buf: return err; } +static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev) +{ + int ret; + + if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0))) + return -EINVAL; + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + 1, USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0, 0, NULL, 0, 1000); + + if (ret < 0) + return ret; + + msleep(2000); + + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + 1, USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x20, 0, NULL, 0, 1000); + + if (ret < 0) + return ret; + + return 0; +} + /* * Setup quirks */ @@ -1286,6 +1322,19 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev, return 0; } +int snd_usb_apply_boot_quirk_once(struct usb_device *dev, + struct usb_interface *intf, + const struct snd_usb_audio_quirk *quirk, + unsigned int id) +{ + switch (id) { + case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + return snd_usb_motu_m_series_boot_quirk(dev); + } + + return 0; +} + /* * check if the device uses big-endian samples */ @@ -1386,10 +1435,12 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ + case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */ case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ + case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */ return true; } diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h index a80e0ddd0736..df0355843a4c 100644 --- a/sound/usb/quirks.h +++ b/sound/usb/quirks.h @@ -20,6 +20,11 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev, const struct snd_usb_audio_quirk *quirk, unsigned int usb_id); +int snd_usb_apply_boot_quirk_once(struct usb_device *dev, + struct usb_interface *intf, + const struct snd_usb_audio_quirk *quirk, + unsigned int usb_id); + void snd_usb_set_format_quirk(struct snd_usb_substream *subs, struct audioformat *fmt); diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index feb30f9c1716..e360680f45f3 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -33,7 +33,7 @@ struct snd_usb_audio { wait_queue_head_t shutdown_wait; unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ unsigned int tx_length_quirk:1; /* Put length specifier in transfers */ - + unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */ int num_interfaces; int num_suspended_intf; int sample_rate_read_error; @@ -98,6 +98,7 @@ enum quirk_type { QUIRK_AUDIO_EDIROL_UAXX, QUIRK_AUDIO_ALIGN_TRANSFER, QUIRK_AUDIO_STANDARD_MIXER, + QUIRK_SETUP_FMT_AFTER_RESUME, QUIRK_TYPE_COUNT }; diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c index d1caa8ed9e68..9985fc139487 100644 --- a/sound/usb/usx2y/usX2Yhwdep.c +++ b/sound/usb/usx2y/usX2Yhwdep.c @@ -119,7 +119,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw, info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code if (us428->chip_status & USX2Y_STAT_CHIP_INIT) info->chip_ready = 1; - info->version = USX2Y_DRIVER_VERSION; + info->version = USX2Y_DRIVER_VERSION; return 0; } diff --git a/sound/usb/validate.c b/sound/usb/validate.c index 389e8657434a..5a3c4f7882b0 100644 --- a/sound/usb/validate.c +++ b/sound/usb/validate.c @@ -110,7 +110,7 @@ static bool validate_processing_unit(const void *p, default: if (v->type == UAC1_EXTENSION_UNIT) return true; /* OK */ - switch (d->wProcessType) { + switch (le16_to_cpu(d->wProcessType)) { case UAC_PROCESS_UP_DOWNMIX: case UAC_PROCESS_DOLBY_PROLOGIC: if (d->bLength < len + 1) /* bNrModes */ @@ -125,7 +125,7 @@ static bool validate_processing_unit(const void *p, case UAC_VERSION_2: if (v->type == UAC2_EXTENSION_UNIT_V2) return true; /* OK */ - switch (d->wProcessType) { + switch (le16_to_cpu(d->wProcessType)) { case UAC2_PROCESS_UP_DOWNMIX: case UAC2_PROCESS_DOLBY_PROLOCIC: /* SiC! */ if (d->bLength < len + 1) /* bNrModes */ @@ -142,7 +142,7 @@ static bool validate_processing_unit(const void *p, len += 2; /* wClusterDescrID */ break; } - switch (d->wProcessType) { + switch (le16_to_cpu(d->wProcessType)) { case UAC3_PROCESS_UP_DOWNMIX: if (d->bLength < len + 1) /* bNrModes */ return false; diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c index 8cb504d30384..5ef1c15e88ad 100644 --- a/tools/accounting/getdelays.c +++ b/tools/accounting/getdelays.c @@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid, msg.g.version = 0x1; na = (struct nlattr *) GENLMSG_DATA(&msg); na->nla_type = nla_type; - na->nla_len = nla_len + 1 + NLA_HDRLEN; + na->nla_len = nla_len + NLA_HDRLEN; memcpy(NLA_DATA(na), nla_data, nla_len); msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len); diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt index e0b85930dd77..5cb9f009f2be 100644 --- a/tools/arch/x86/lib/x86-opcode-map.txt +++ b/tools/arch/x86/lib/x86-opcode-map.txt @@ -333,7 +333,7 @@ AVXcode: 1 06: CLTS 07: SYSRET (o64) 08: INVD -09: WBINVD +09: WBINVD | WBNOINVD (F3) 0a: 0b: UD2 (1B) 0c: @@ -364,7 +364,7 @@ AVXcode: 1 # a ModR/M byte. 1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev 1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv -1c: +1c: Grp20 (1A),(1C) 1d: 1e: 1f: NOP Ev @@ -792,6 +792,8 @@ f3: Grp17 (1A) f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v) f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) +f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3) +f9: MOVDIRI My,Gy EndTable Table: 3-byte opcode 2 (0x0f 0x3a) @@ -907,7 +909,7 @@ EndTable GrpTable: Grp3_2 0: TEST Ev,Iz -1: +1: TEST Ev,Iz 2: NOT Ev 3: NEG Ev 4: MUL rAX,Ev @@ -943,9 +945,9 @@ GrpTable: Grp6 EndTable GrpTable: Grp7 -0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) -1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) -2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) +0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) +1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B) 3: LIDT Ms 4: SMSW Mw/Rv 5: rdpkru (110),(11B) | wrpkru (111),(11B) @@ -1020,7 +1022,7 @@ GrpTable: Grp15 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) 4: XSAVE | ptwrite Ey (F3),(11B) 5: XRSTOR | lfence (11B) -6: XSAVEOPT | clwb (66) | mfence (11B) +6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B) 7: clflush | clflushopt (66) | sfence (11B) EndTable @@ -1051,6 +1053,10 @@ GrpTable: Grp19 6: vscatterpf1qps/d Wx (66),(ev) EndTable +GrpTable: Grp20 +0: cldemote Mb +EndTable + # AMD's Prefetch Group GrpTable: GrpP 0: PREFETCH diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk index b02a36b2c14f..a42015b305f4 100644 --- a/tools/arch/x86/tools/gen-insn-attr-x86.awk +++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk @@ -69,7 +69,7 @@ BEGIN { lprefix1_expr = "\\((66|!F3)\\)" lprefix2_expr = "\\(F3\\)" - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" + lprefix3_expr = "\\((F2|!F3|66&F2)\\)" lprefix_expr = "\\((66|F2|F3)\\)" max_lprefix = 4 @@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod) return add_flags(imm, mod) } -/^[0-9a-f]+\:/ { +/^[0-9a-f]+:/ { if (NR == 1) next # get index diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 5d1995fd369c..5535650800ab 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -16,7 +16,13 @@ CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include # isn't set and when invoked from selftests build, where srctree # is set to ".". building_out_of_srctree is undefined for in srctree # builds +ifeq ($(srctree),) +update_srctree := 1 +endif ifndef building_out_of_srctree +update_srctree := 1 +endif +ifeq ($(update_srctree),1) srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(srctree))) endif diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index d66131f69689..397e5716ab6d 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -26,7 +26,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw, bool is_plain_text) { if (is_plain_text) - jsonw_printf(jw, "%p", data); + jsonw_printf(jw, "%p", *(void **)data); else jsonw_printf(jw, "%lu", *(unsigned long *)data); } diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 1ef45e55039e..2f017caa678d 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -117,6 +117,25 @@ static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) return prog_cnt; } +static int cgroup_has_attached_progs(int cgroup_fd) +{ + enum bpf_attach_type type; + bool no_prog = true; + + for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { + int count = count_attached_bpf_progs(cgroup_fd, type); + + if (count < 0 && errno != EINVAL) + return -1; + + if (count > 0) { + no_prog = false; + break; + } + } + + return no_prog ? 0 : 1; +} static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, int level) { @@ -161,6 +180,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, static int do_show(int argc, char **argv) { enum bpf_attach_type type; + int has_attached_progs; const char *path; int cgroup_fd; int ret = -1; @@ -192,6 +212,16 @@ static int do_show(int argc, char **argv) goto exit; } + has_attached_progs = cgroup_has_attached_progs(cgroup_fd); + if (has_attached_progs < 0) { + p_err("can't query bpf programs attached to %s: %s", + path, strerror(errno)); + goto exit_cgroup; + } else if (!has_attached_progs) { + ret = 0; + goto exit_cgroup; + } + if (json_output) jsonw_start_array(json_wtr); else @@ -212,6 +242,7 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); +exit_cgroup: close(cgroup_fd); exit: return ret; @@ -228,7 +259,7 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftw) { enum bpf_attach_type type; - bool skip = true; + int has_attached_progs; int cgroup_fd; if (typeflag != FTW_D) @@ -240,22 +271,13 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb, return SHOW_TREE_FN_ERR; } - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { - int count = count_attached_bpf_progs(cgroup_fd, type); - - if (count < 0 && errno != EINVAL) { - p_err("can't query bpf programs attached to %s: %s", - fpath, strerror(errno)); - close(cgroup_fd); - return SHOW_TREE_FN_ERR; - } - if (count > 0) { - skip = false; - break; - } - } - - if (skip) { + has_attached_progs = cgroup_has_attached_progs(cgroup_fd); + if (has_attached_progs < 0) { + p_err("can't query bpf programs attached to %s: %s", + fpath, strerror(errno)); + close(cgroup_fd); + return SHOW_TREE_FN_ERR; + } else if (!has_attached_progs) { close(cgroup_fd); return 0; } diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 43fdbbfe41bb..2e388421c32f 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -493,14 +493,14 @@ static int do_dump(int argc, char **argv) info = &info_linear->info; if (mode == DUMP_JITED) { - if (info->jited_prog_len == 0) { + if (info->jited_prog_len == 0 || !info->jited_prog_insns) { p_info("no instructions returned"); goto err_free; } buf = (unsigned char *)(info->jited_prog_insns); member_len = info->jited_prog_len; } else { /* DUMP_XLATED */ - if (info->xlated_prog_len == 0) { + if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { p_err("error retrieving insn dump: kernel.kptr_restrict set?"); goto err_free; } diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index 494d7ae3614d..5b91ee65a080 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -174,7 +174,7 @@ static const char *print_call(void *private_data, struct kernel_sym *sym; if (insn->src_reg == BPF_PSEUDO_CALL && - (__u32) insn->imm < dd->nr_jited_ksyms) + (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms) address = dd->jited_ksyms[insn->imm]; sym = kernel_syms_search(dd, address); diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h index 980cb9266718..5e9e781905ed 100644 --- a/tools/include/linux/string.h +++ b/tools/include/linux/string.h @@ -17,7 +17,15 @@ int strtobool(const char *s, bool *res); * However uClibc headers also define __GLIBC__ hence the hack below */ #if defined(__GLIBC__) && !defined(__UCLIBC__) +// pragma diagnostic was introduced in gcc 4.6 +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wredundant-decls" +#endif extern size_t strlcpy(char *dest, const char *src, size_t size); +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif #endif char *str_error_r(int errnum, char *buf, size_t buflen); diff --git a/tools/include/uapi/linux/netlink.h b/tools/include/uapi/linux/netlink.h index 0a4d73317759..21db87138514 100644 --- a/tools/include/uapi/linux/netlink.h +++ b/tools/include/uapi/linux/netlink.h @@ -32,6 +32,9 @@ #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG +/* For tkernel GPU track feature */ +#define NETLINK_USER 30 + #define MAX_LINKS 32 struct sockaddr_nl { diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index ad1b9e646c49..4cf93110c259 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -270,6 +270,7 @@ class ArchX86(Arch): def __init__(self, exit_reasons): self.sc_perf_evt_open = 298 self.ioctl_numbers = IOCTL_NUMBERS + self.exit_reason_field = 'exit_reason' self.exit_reasons = exit_reasons def debugfs_is_child(self, field): @@ -289,6 +290,7 @@ class ArchPPC(Arch): # numbers depend on the wordsize. char_ptr_size = ctypes.sizeof(ctypes.c_char_p) self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16 + self.exit_reason_field = 'exit_nr' self.exit_reasons = {} def debugfs_is_child(self, field): @@ -300,6 +302,7 @@ class ArchA64(Arch): def __init__(self): self.sc_perf_evt_open = 241 self.ioctl_numbers = IOCTL_NUMBERS + self.exit_reason_field = 'esr_ec' self.exit_reasons = AARCH64_EXIT_REASONS def debugfs_is_child(self, field): @@ -311,6 +314,7 @@ class ArchS390(Arch): def __init__(self): self.sc_perf_evt_open = 331 self.ioctl_numbers = IOCTL_NUMBERS + self.exit_reason_field = None self.exit_reasons = None def debugfs_is_child(self, field): @@ -541,8 +545,8 @@ class TracepointProvider(Provider): """ filters = {} filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS) - if ARCH.exit_reasons: - filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons) + if ARCH.exit_reason_field and ARCH.exit_reasons: + filters['kvm_exit'] = (ARCH.exit_reason_field, ARCH.exit_reasons) return filters def _get_available_fields(self): diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 7aba8243a0e7..bd021a0eeef8 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -210,6 +210,7 @@ static bool fs__env_override(struct fs *fs) size_t name_len = strlen(fs->name); /* name + "_PATH" + '\0' */ char upper_name[name_len + 5 + 1]; + memcpy(upper_name, fs->name, name_len); mem_toupper(upper_name, name_len); strcpy(&upper_name[name_len], "_PATH"); @@ -219,7 +220,8 @@ static bool fs__env_override(struct fs *fs) return false; fs->found = true; - strncpy(fs->path, override_path, sizeof(fs->path)); + strncpy(fs->path, override_path, sizeof(fs->path) - 1); + fs->path[sizeof(fs->path) - 1] = '\0'; return true; } diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 56ce6292071b..33e2638ef7f0 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -215,7 +215,7 @@ check_abi: $(OUTPUT)libbpf.so "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ "Please make sure all LIBBPF_API symbols are" \ "versioned in $(VERSION_SCRIPT)." >&2; \ - readelf -s --wide $(OUTPUT)libbpf-in.o | \ + readelf -s --wide $(BPF_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index cbb933532981..9d0485959308 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -189,7 +189,7 @@ static void * alloc_zero_tailing_info(const void *orecord, __u32 cnt, __u32 actual_rec_size, __u32 expected_rec_size) { - __u64 info_len = actual_rec_size * cnt; + __u64 info_len = (__u64)actual_rec_size * cnt; void *info, *nrecord; int i; diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c index 8c67561c93b0..3ed1a27b5f7c 100644 --- a/tools/lib/bpf/bpf_prog_linfo.c +++ b/tools/lib/bpf/bpf_prog_linfo.c @@ -101,6 +101,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) { struct bpf_prog_linfo *prog_linfo; __u32 nr_linfo, nr_jited_func; + __u64 data_sz; nr_linfo = info->nr_line_info; @@ -122,11 +123,11 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) /* Copy xlated line_info */ prog_linfo->nr_linfo = nr_linfo; prog_linfo->rec_size = info->line_info_rec_size; - prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size); + data_sz = (__u64)nr_linfo * prog_linfo->rec_size; + prog_linfo->raw_linfo = malloc(data_sz); if (!prog_linfo->raw_linfo) goto err_free; - memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, - nr_linfo * prog_linfo->rec_size); + memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz); nr_jited_func = info->nr_jited_ksyms; if (!nr_jited_func || @@ -142,13 +143,12 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) /* Copy jited_line_info */ prog_linfo->nr_jited_func = nr_jited_func; prog_linfo->jited_rec_size = info->jited_line_info_rec_size; - prog_linfo->raw_jited_linfo = malloc(nr_linfo * - prog_linfo->jited_rec_size); + data_sz = (__u64)nr_linfo * prog_linfo->jited_rec_size; + prog_linfo->raw_jited_linfo = malloc(data_sz); if (!prog_linfo->raw_jited_linfo) goto err_free; memcpy(prog_linfo->raw_jited_linfo, - (void *)(long)info->jited_line_info, - nr_linfo * prog_linfo->jited_rec_size); + (void *)(long)info->jited_line_info, data_sz); /* Number of jited_line_info per jited func */ prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func * diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 1aa189a9112a..d606a358480d 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -269,10 +269,9 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) t = btf__type_by_id(btf, type_id); } +done: if (size < 0) return -EINVAL; - -done: if (nelems && size > UINT32_MAX / nelems) return -E2BIG; diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index ede55fec3618..87f27e2664c5 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -876,7 +876,6 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, __u16 vlen = btf_vlen(t); packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0; - align = packed ? 1 : btf_align_of(d->btf, id); btf_dump_printf(d, "%s%s%s {", is_struct ? "struct" : "union", @@ -906,6 +905,13 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, ";"); } + /* pad at the end, if necessary */ + if (is_struct) { + align = packed ? 1 : btf_align_of(d->btf, id); + btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align, + lvl + 1); + } + if (vlen) btf_dump_printf(d, "\n"); btf_dump_printf(d, "%s}", pfx(lvl)); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e0276520171b..b6403712c2f4 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1897,16 +1897,22 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) return -errno; new_fd = open("/", O_RDONLY | O_CLOEXEC); - if (new_fd < 0) + if (new_fd < 0) { + err = -errno; goto err_free_new_name; + } new_fd = dup3(fd, new_fd, O_CLOEXEC); - if (new_fd < 0) + if (new_fd < 0) { + err = -errno; goto err_close_new_fd; + } err = zclose(map->fd); - if (err) + if (err) { + err = -errno; goto err_close_new_fd; + } free(map->name); map->fd = new_fd; @@ -1925,7 +1931,7 @@ err_close_new_fd: close(new_fd); err_free_new_name: free(new_name); - return -errno; + return err; } int bpf_map__resize(struct bpf_map *map, __u32 max_entries) @@ -2535,7 +2541,9 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf, if (strncmp(local_name, targ_name, local_essent_len) == 0) { pr_debug("[%d] %s: found candidate [%d] %s\n", local_type_id, local_name, i, targ_name); - new_ids = realloc(cand_ids->data, cand_ids->len + 1); + new_ids = reallocarray(cand_ids->data, + cand_ids->len + 1, + sizeof(*cand_ids->data)); if (!new_ids) { err = -ENOMEM; goto err_out; @@ -3214,6 +3222,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, pr_warning("oom in prog realloc\n"); return -ENOMEM; } + prog->insns = new_insn; if (obj->btf_ext) { err = bpf_program_reloc_btf_ext(prog, obj, @@ -3225,7 +3234,6 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, memcpy(new_insn + prog->insns_cnt, text->insns, text->insns_cnt * sizeof(*insn)); - prog->insns = new_insn; prog->main_prog_cnt = prog->insns_cnt; prog->insns_cnt = new_cnt; pr_debug("added %zd insn from %s to prog %s\n", diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index a902838f9fcc..0c7386b0e42e 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -73,6 +73,21 @@ struct xsk_nl_info { int fd; }; +/* Up until and including Linux 5.3 */ +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +/* Up until and including Linux 5.3 */ +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + int xsk_umem__fd(const struct xsk_umem *umem) { return umem ? umem->fd : -EINVAL; @@ -133,6 +148,58 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, return 0; } +static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off) +{ + struct xdp_mmap_offsets_v1 off_v1; + + /* getsockopt on a kernel <= 5.3 has no flags fields. + * Copy over the offsets to the correct places in the >=5.4 format + * and put the flags where they would have been on that kernel. + */ + memcpy(&off_v1, off, sizeof(off_v1)); + + off->rx.producer = off_v1.rx.producer; + off->rx.consumer = off_v1.rx.consumer; + off->rx.desc = off_v1.rx.desc; + off->rx.flags = off_v1.rx.consumer + sizeof(__u32); + + off->tx.producer = off_v1.tx.producer; + off->tx.consumer = off_v1.tx.consumer; + off->tx.desc = off_v1.tx.desc; + off->tx.flags = off_v1.tx.consumer + sizeof(__u32); + + off->fr.producer = off_v1.fr.producer; + off->fr.consumer = off_v1.fr.consumer; + off->fr.desc = off_v1.fr.desc; + off->fr.flags = off_v1.fr.consumer + sizeof(__u32); + + off->cr.producer = off_v1.cr.producer; + off->cr.consumer = off_v1.cr.consumer; + off->cr.desc = off_v1.cr.desc; + off->cr.flags = off_v1.cr.consumer + sizeof(__u32); +} + +static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) +{ + socklen_t optlen; + int err; + + optlen = sizeof(*off); + err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen); + if (err) + return err; + + if (optlen == sizeof(*off)) + return 0; + + if (optlen == sizeof(struct xdp_mmap_offsets_v1)) { + xsk_mmap_offsets_v1(off); + return 0; + } + + return -EINVAL; +} + int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, @@ -141,7 +208,6 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, struct xdp_mmap_offsets off; struct xdp_umem_reg mr; struct xsk_umem *umem; - socklen_t optlen; void *map; int err; @@ -163,6 +229,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, umem->umem_area = umem_area; xsk_set_umem_config(&umem->config, usr_config); + memset(&mr, 0, sizeof(mr)); mr.addr = (uintptr_t)umem_area; mr.len = size; mr.chunk_size = umem->config.frame_size; @@ -189,8 +256,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, goto out_socket; } - optlen = sizeof(off); - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(umem->fd, &off); if (err) { err = -errno; goto out_socket; @@ -343,13 +409,18 @@ static int xsk_get_max_queues(struct xsk_socket *xsk) goto out; } - if (err || channels.max_combined == 0) + if (err) { /* If the device says it has no channels, then all traffic * is sent to a single stream, so max queues = 1. */ ret = 1; - else - ret = channels.max_combined; + } else { + /* Take the max of rx, tx, combined. Drivers return + * the number of channels in different ways. + */ + ret = max(channels.max_rx, channels.max_tx); + ret = max(ret, (int)channels.max_combined); + } out: close(fd); @@ -465,6 +536,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk) } } else { xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (xsk->prog_fd < 0) + return -errno; err = xsk_lookup_bpf_maps(xsk); if (err) { close(xsk->prog_fd); @@ -491,7 +564,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, struct sockaddr_xdp sxdp = {}; struct xdp_mmap_offsets off; struct xsk_socket *xsk; - socklen_t optlen; int err; if (!umem || !xsk_ptr || !rx || !tx) @@ -550,8 +622,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, } } - optlen = sizeof(off); - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (err) { err = -errno; goto out_socket; @@ -637,7 +708,6 @@ out_xsk_alloc: int xsk_umem__delete(struct xsk_umem *umem) { struct xdp_mmap_offsets off; - socklen_t optlen; int err; if (!umem) @@ -646,8 +716,7 @@ int xsk_umem__delete(struct xsk_umem *umem) if (umem->refcount) return -EBUSY; - optlen = sizeof(off); - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(umem->fd, &off); if (!err) { munmap(umem->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * sizeof(__u64)); @@ -665,7 +734,6 @@ void xsk_socket__delete(struct xsk_socket *xsk) { size_t desc_sz = sizeof(struct xdp_desc); struct xdp_mmap_offsets off; - socklen_t optlen; int err; if (!xsk) @@ -676,8 +744,7 @@ void xsk_socket__delete(struct xsk_socket *xsk) close(xsk->prog_fd); } - optlen = sizeof(off); - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (!err) { if (xsk->rx) { munmap(xsk->rx->ring - off.rx.desc, diff --git a/tools/lib/string.c b/tools/lib/string.c index f2ae1b87c719..f645343815de 100644 --- a/tools/lib/string.c +++ b/tools/lib/string.c @@ -96,6 +96,10 @@ int strtobool(const char *s, bool *res) * If libc has strlcpy() then that version will override this * implementation: */ +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wignored-attributes" +#endif size_t __weak strlcpy(char *dest, const char *src, size_t size) { size_t ret = strlen(src); @@ -107,6 +111,9 @@ size_t __weak strlcpy(char *dest, const char *src, size_t size) } return ret; } +#ifdef __clang__ +#pragma clang diagnostic pop +#endif /** * skip_spaces - Removes leading whitespace from @str. diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile index 5b2cd5e58df0..5dbb0dde208c 100644 --- a/tools/lib/subcmd/Makefile +++ b/tools/lib/subcmd/Makefile @@ -28,7 +28,9 @@ ifeq ($(DEBUG),0) endif endif -ifeq ($(CC_NO_CLANG), 0) +ifeq ($(DEBUG),1) + CFLAGS += -O0 +else ifeq ($(CC_NO_CLANG), 0) CFLAGS += -O3 else CFLAGS += -O6 diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 5315f3787f8d..ecf882308d8a 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -97,6 +97,7 @@ EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION) LIB_INSTALL = libtraceevent.a libtraceevent.so* +LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL)) INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES) @@ -207,10 +208,11 @@ define do_install $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2' endef -PKG_CONFIG_FILE = libtraceevent.pc +PKG_CONFIG_SOURCE_FILE = libtraceevent.pc +PKG_CONFIG_FILE := $(addprefix $(OUTPUT),$(PKG_CONFIG_SOURCE_FILE)) define do_install_pkgconfig_file if [ -n "${pkgconfig_dir}" ]; then \ - cp -f ${PKG_CONFIG_FILE}.template ${PKG_CONFIG_FILE}; \ + cp -f ${PKG_CONFIG_SOURCE_FILE}.template ${PKG_CONFIG_FILE}; \ sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE}; \ sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \ sed -i "s|LIB_DIR|${libdir}|g" ${PKG_CONFIG_FILE}; \ diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c index 552592d153fb..20eed719542e 100644 --- a/tools/lib/traceevent/parse-filter.c +++ b/tools/lib/traceevent/parse-filter.c @@ -1228,8 +1228,10 @@ filter_event(struct tep_event_filter *filter, struct tep_event *event, } filter_type = add_filter_type(filter, event->id); - if (filter_type == NULL) + if (filter_type == NULL) { + free_arg(arg); return TEP_ERRNO__MEM_ALLOC_FAILED; + } if (filter_type->filter) free_arg(filter_type->filter); @@ -1473,8 +1475,10 @@ static int copy_filter_type(struct tep_event_filter *filter, if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) { /* Add trivial event */ arg = allocate_arg(); - if (arg == NULL) + if (arg == NULL) { + free(str); return -1; + } arg->type = TEP_FILTER_ARG_BOOLEAN; if (strcmp(str, "TRUE") == 0) @@ -1483,8 +1487,11 @@ static int copy_filter_type(struct tep_event_filter *filter, arg->boolean.value = 0; filter_type = add_filter_type(filter, event->id); - if (filter_type == NULL) + if (filter_type == NULL) { + free(str); + free_arg(arg); return -1; + } filter_type->filter = arg; diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat index ea2ff4b94074..2a9b4fe4a84e 100644 --- a/tools/memory-model/linux-kernel.cat +++ b/tools/memory-model/linux-kernel.cat @@ -197,7 +197,7 @@ empty (wr-incoh | rw-incoh | ww-incoh) as plain-coherence (* Actual races *) let ww-nonrace = ww-vis & ((Marked * W) | rw-xbstar) & ((W * Marked) | wr-vis) let ww-race = (pre-race & co) \ ww-nonrace -let wr-race = (pre-race & (co? ; rf)) \ wr-vis +let wr-race = (pre-race & (co? ; rf)) \ wr-vis \ rw-xbstar^-1 let rw-race = (pre-race & fr) \ rw-xbstar flag ~empty (ww-race | wr-race | rw-race) as data-race diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index d2a19b0bc05a..ee08aeff30a1 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -2,10 +2,6 @@ include ../scripts/Makefile.include include ../scripts/Makefile.arch -ifeq ($(ARCH),x86_64) -ARCH := x86 -endif - # always use the host compiler HOSTAR ?= ar HOSTCC ?= gcc @@ -33,7 +29,7 @@ all: $(OBJTOOL) INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ - -I$(srctree)/tools/arch/$(ARCH)/include + -I$(srctree)/tools/arch/$(SRCARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 044c9a3cb247..f53d3c515cdc 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -481,6 +481,7 @@ static const char *uaccess_safe_builtin[] = { "ubsan_type_mismatch_common", "__ubsan_handle_type_mismatch", "__ubsan_handle_type_mismatch_v1", + "__ubsan_handle_shift_out_of_bounds", /* misc */ "csum_partial_copy_generic", "__memcpy_mcsafe", diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index 0a832e265a50..c3ae1e8ae119 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -47,5 +47,3 @@ check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[ check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"' check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"' check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]"' - -cd - diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c index cb1e51fcc84e..32b7c6f9043d 100644 --- a/tools/pci/pcitest.c +++ b/tools/pci/pcitest.c @@ -129,6 +129,7 @@ static int run_test(struct pci_test *test) } fflush(stdout); + close(fd); return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */ } diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 7902a5681fc8..b8fc7d972be9 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -35,7 +35,7 @@ endif # Only pass canonical directory names as the output directory: # ifneq ($(O),) - FULL_O := $(shell readlink -f $(O) || echo $(O)) + FULL_O := $(shell cd $(PWD); readlink -f $(O) || echo $(O)) endif # diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index ede040cf82ad..20e9a189ad92 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -865,9 +865,12 @@ static int cs_etm_read_finish(struct auxtrace_record *itr, int idx) struct evsel *evsel; evlist__for_each_entry(ptr->evlist, evsel) { - if (evsel->core.attr.type == ptr->cs_etm_pmu->type) + if (evsel->core.attr.type == ptr->cs_etm_pmu->type) { + if (evsel->disabled) + return 0; return perf_evlist__enable_event_idx(ptr->evlist, evsel, idx); + } } return -EINVAL; diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c index eba6541ec0f1..1d993c27242b 100644 --- a/tools/perf/arch/arm64/util/arm-spe.c +++ b/tools/perf/arch/arm64/util/arm-spe.c @@ -165,9 +165,12 @@ static int arm_spe_read_finish(struct auxtrace_record *itr, int idx) struct evsel *evsel; evlist__for_each_entry(sper->evlist, evsel) { - if (evsel->core.attr.type == sper->arm_spe_pmu->type) + if (evsel->core.attr.type == sper->arm_spe_pmu->type) { + if (evsel->disabled) + return 0; return perf_evlist__enable_event_idx(sper->evlist, evsel, idx); + } } return -EINVAL; } diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c index 5df788985130..8dfa3e5229f1 100644 --- a/tools/perf/arch/arm64/util/sym-handling.c +++ b/tools/perf/arch/arm64/util/sym-handling.c @@ -6,9 +6,10 @@ #include "symbol.h" // for the elf__needs_adjust_symbols() prototype #include -#include #ifdef HAVE_LIBELF_SUPPORT +#include + bool elf__needs_adjust_symbols(GElf_Ehdr ehdr) { return ehdr.e_type == ET_EXEC || diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c index f7f68a50a5cd..85799b5fa01d 100644 --- a/tools/perf/arch/x86/util/intel-bts.c +++ b/tools/perf/arch/x86/util/intel-bts.c @@ -415,9 +415,12 @@ static int intel_bts_read_finish(struct auxtrace_record *itr, int idx) struct evsel *evsel; evlist__for_each_entry(btsr->evlist, evsel) { - if (evsel->core.attr.type == btsr->intel_bts_pmu->type) + if (evsel->core.attr.type == btsr->intel_bts_pmu->type) { + if (evsel->disabled) + return 0; return perf_evlist__enable_event_idx(btsr->evlist, evsel, idx); + } } return -EINVAL; } diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index d6d26256915f..d43f9dec6998 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -1099,9 +1099,12 @@ static int intel_pt_read_finish(struct auxtrace_record *itr, int idx) struct evsel *evsel; evlist__for_each_entry(ptr->evlist, evsel) { - if (evsel->core.attr.type == ptr->intel_pt_pmu->type) + if (evsel->core.attr.type == ptr->intel_pt_pmu->type) { + if (evsel->disabled) + return 0; return perf_evlist__enable_event_idx(ptr->evlist, evsel, idx); + } } return -EINVAL; } diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c index df810096abfe..58906e9499bb 100644 --- a/tools/perf/bench/futex-wake.c +++ b/tools/perf/bench/futex-wake.c @@ -43,7 +43,7 @@ static bool done = false, silent = false, fshared = false; static pthread_mutex_t thread_lock; static pthread_cond_t thread_parent, thread_worker; static struct stats waketime_stats, wakeup_stats; -static unsigned int ncpus, threads_starting, nthreads = 0; +static unsigned int threads_starting, nthreads = 0; static int futex_flag = 0; static const struct option options[] = { @@ -141,7 +141,7 @@ int bench_futex_wake(int argc, const char **argv) sigaction(SIGINT, &act, NULL); if (!nthreads) - nthreads = ncpus; + nthreads = cpu->nr; worker = calloc(nthreads, sizeof(*worker)); if (!worker) diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index e69f44941aad..f2e9d2b1b913 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -595,8 +595,8 @@ tot_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused, { struct c2c_hist_entry *c2c_left; struct c2c_hist_entry *c2c_right; - unsigned int tot_hitm_left; - unsigned int tot_hitm_right; + uint64_t tot_hitm_left; + uint64_t tot_hitm_right; c2c_left = container_of(left, struct c2c_hist_entry, he); c2c_right = container_of(right, struct c2c_hist_entry, he); @@ -629,7 +629,8 @@ __f ## _cmp(struct perf_hpp_fmt *fmt __maybe_unused, \ \ c2c_left = container_of(left, struct c2c_hist_entry, he); \ c2c_right = container_of(right, struct c2c_hist_entry, he); \ - return c2c_left->stats.__f - c2c_right->stats.__f; \ + return (uint64_t) c2c_left->stats.__f - \ + (uint64_t) c2c_right->stats.__f; \ } #define STAT_FN(__f) \ @@ -682,7 +683,8 @@ ld_llcmiss_cmp(struct perf_hpp_fmt *fmt __maybe_unused, c2c_left = container_of(left, struct c2c_hist_entry, he); c2c_right = container_of(right, struct c2c_hist_entry, he); - return llc_miss(&c2c_left->stats) - llc_miss(&c2c_right->stats); + return (uint64_t) llc_miss(&c2c_left->stats) - + (uint64_t) llc_miss(&c2c_right->stats); } static uint64_t total_records(struct c2c_stats *stats) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index c37a78677955..265682296836 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -575,8 +575,8 @@ static int64_t block_cycles_diff_cmp(struct hist_entry *left, if (!pairs_left && !pairs_right) return 0; - l = labs(left->diff.cycles); - r = labs(right->diff.cycles); + l = llabs(left->diff.cycles); + r = llabs(right->diff.cycles); return r - l; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index aae0e57c60fb..6407dff405d9 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -399,6 +399,13 @@ static int report__setup_sample_type(struct report *rep) PERF_SAMPLE_BRANCH_ANY)) rep->nonany_branch_mode = true; +#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT) + if (dwarf_callchain_users) { + ui__warning("Please install libunwind or libdw " + "development packages during the perf build.\n"); + } +#endif + return 0; } @@ -1024,6 +1031,7 @@ int cmd_report(int argc, const char **argv) struct stat st; bool has_br_stack = false; int branch_mode = -1; + int last_key = 0; bool branch_call_mode = false; #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent" static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n" @@ -1389,7 +1397,8 @@ repeat: sort_order = sort_tmp; } - if (setup_sorting(session->evlist) < 0) { + if ((last_key != K_SWITCH_INPUT_DATA) && + (setup_sorting(session->evlist) < 0)) { if (sort_order) parse_options_usage(report_usage, options, "s", 1); if (field_order) @@ -1468,6 +1477,7 @@ repeat: ret = __cmd_report(&report); if (ret == K_SWITCH_INPUT_DATA) { perf_session__delete(session); + last_key = K_SWITCH_INPUT_DATA; goto repeat; } else ret = 0; diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 67be8d31afab..da016f398aa8 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -448,7 +448,7 @@ static int perf_evsel__check_attr(struct evsel *evsel, "selected. Hence, no address to lookup the source line number.\n"); return -EINVAL; } - if (PRINT_FIELD(BRSTACKINSN) && + if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set && !(perf_evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) { pr_err("Display of branch stack assembler requested, but non all-branch filter set\n" @@ -1084,7 +1084,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, insn++; } } - if (off != (unsigned)len) + if (off != end - start) printed += fprintf(fp, "\tmismatch of LBR data and executable\n"); } @@ -3605,11 +3605,6 @@ int cmd_script(int argc, const char **argv) } } - if (script.time_str && reltime) { - fprintf(stderr, "Don't combine --reltime with --time\n"); - return -1; - } - if (itrace_synth_opts.callchain && itrace_synth_opts.callchain_sz > scripting_max_stack) scripting_max_stack = itrace_synth_opts.callchain_sz; @@ -3869,10 +3864,11 @@ int cmd_script(int argc, const char **argv) goto out_delete; if (script.time_str) { - err = perf_time__parse_for_ranges(script.time_str, session, + err = perf_time__parse_for_ranges_reltime(script.time_str, session, &script.ptime_range, &script.range_size, - &script.range_num); + &script.range_num, + reltime); if (err < 0) goto out_delete; diff --git a/tools/perf/lib/include/perf/core.h b/tools/perf/lib/include/perf/core.h deleted file mode 100644 index cfd70e720c1c..000000000000 --- a/tools/perf/lib/include/perf/core.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LIBPERF_CORE_H -#define __LIBPERF_CORE_H - -#include - -#ifndef LIBPERF_API -#define LIBPERF_API __attribute__((visibility("default"))) -#endif - -enum libperf_print_level { - LIBPERF_WARN, - LIBPERF_INFO, - LIBPERF_DEBUG, -}; - -typedef int (*libperf_print_fn_t)(enum libperf_print_level level, - const char *, va_list ap); - -LIBPERF_API void libperf_init(libperf_print_fn_t fn); - -#endif /* __LIBPERF_CORE_H */ diff --git a/tools/perf/lib/include/perf/cpumap.h b/tools/perf/lib/include/perf/cpumap.h deleted file mode 100644 index ac9aa497f84a..000000000000 --- a/tools/perf/lib/include/perf/cpumap.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LIBPERF_CPUMAP_H -#define __LIBPERF_CPUMAP_H - -#include -#include -#include - -struct perf_cpu_map; - -LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void); -LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); -LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file); -LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); -LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); -LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); -LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); -LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); -LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map); - -#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ - for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ - (idx) < perf_cpu_map__nr(cpus); \ - (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx)) - -#endif /* __LIBPERF_CPUMAP_H */ diff --git a/tools/perf/lib/include/perf/event.h b/tools/perf/lib/include/perf/event.h deleted file mode 100644 index 18106899cb4e..000000000000 --- a/tools/perf/lib/include/perf/event.h +++ /dev/null @@ -1,385 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LIBPERF_EVENT_H -#define __LIBPERF_EVENT_H - -#include -#include -#include -#include -#include /* pid_t */ - -struct perf_record_mmap { - struct perf_event_header header; - __u32 pid, tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; -}; - -struct perf_record_mmap2 { - struct perf_event_header header; - __u32 pid, tid; - __u64 start; - __u64 len; - __u64 pgoff; - __u32 maj; - __u32 min; - __u64 ino; - __u64 ino_generation; - __u32 prot; - __u32 flags; - char filename[PATH_MAX]; -}; - -struct perf_record_comm { - struct perf_event_header header; - __u32 pid, tid; - char comm[16]; -}; - -struct perf_record_namespaces { - struct perf_event_header header; - __u32 pid, tid; - __u64 nr_namespaces; - struct perf_ns_link_info link_info[]; -}; - -struct perf_record_fork { - struct perf_event_header header; - __u32 pid, ppid; - __u32 tid, ptid; - __u64 time; -}; - -struct perf_record_lost { - struct perf_event_header header; - __u64 id; - __u64 lost; -}; - -struct perf_record_lost_samples { - struct perf_event_header header; - __u64 lost; -}; - -/* - * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID - */ -struct perf_record_read { - struct perf_event_header header; - __u32 pid, tid; - __u64 value; - __u64 time_enabled; - __u64 time_running; - __u64 id; -}; - -struct perf_record_throttle { - struct perf_event_header header; - __u64 time; - __u64 id; - __u64 stream_id; -}; - -#ifndef KSYM_NAME_LEN -#define KSYM_NAME_LEN 256 -#endif - -struct perf_record_ksymbol { - struct perf_event_header header; - __u64 addr; - __u32 len; - __u16 ksym_type; - __u16 flags; - char name[KSYM_NAME_LEN]; -}; - -struct perf_record_bpf_event { - struct perf_event_header header; - __u16 type; - __u16 flags; - __u32 id; - - /* for bpf_prog types */ - __u8 tag[BPF_TAG_SIZE]; // prog tag -}; - -struct perf_record_sample { - struct perf_event_header header; - __u64 array[]; -}; - -struct perf_record_switch { - struct perf_event_header header; - __u32 next_prev_pid; - __u32 next_prev_tid; -}; - -struct perf_record_header_attr { - struct perf_event_header header; - struct perf_event_attr attr; - __u64 id[]; -}; - -enum { - PERF_CPU_MAP__CPUS = 0, - PERF_CPU_MAP__MASK = 1, -}; - -struct cpu_map_entries { - __u16 nr; - __u16 cpu[]; -}; - -struct perf_record_record_cpu_map { - __u16 nr; - __u16 long_size; - unsigned long mask[]; -}; - -struct perf_record_cpu_map_data { - __u16 type; - char data[]; -}; - -struct perf_record_cpu_map { - struct perf_event_header header; - struct perf_record_cpu_map_data data; -}; - -enum { - PERF_EVENT_UPDATE__UNIT = 0, - PERF_EVENT_UPDATE__SCALE = 1, - PERF_EVENT_UPDATE__NAME = 2, - PERF_EVENT_UPDATE__CPUS = 3, -}; - -struct perf_record_event_update_cpus { - struct perf_record_cpu_map_data cpus; -}; - -struct perf_record_event_update_scale { - double scale; -}; - -struct perf_record_event_update { - struct perf_event_header header; - __u64 type; - __u64 id; - char data[]; -}; - -#define MAX_EVENT_NAME 64 - -struct perf_trace_event_type { - __u64 event_id; - char name[MAX_EVENT_NAME]; -}; - -struct perf_record_header_event_type { - struct perf_event_header header; - struct perf_trace_event_type event_type; -}; - -struct perf_record_header_tracing_data { - struct perf_event_header header; - __u32 size; -}; - -struct perf_record_header_build_id { - struct perf_event_header header; - pid_t pid; - __u8 build_id[24]; - char filename[]; -}; - -struct id_index_entry { - __u64 id; - __u64 idx; - __u64 cpu; - __u64 tid; -}; - -struct perf_record_id_index { - struct perf_event_header header; - __u64 nr; - struct id_index_entry entries[0]; -}; - -struct perf_record_auxtrace_info { - struct perf_event_header header; - __u32 type; - __u32 reserved__; /* For alignment */ - __u64 priv[]; -}; - -struct perf_record_auxtrace { - struct perf_event_header header; - __u64 size; - __u64 offset; - __u64 reference; - __u32 idx; - __u32 tid; - __u32 cpu; - __u32 reserved__; /* For alignment */ -}; - -#define MAX_AUXTRACE_ERROR_MSG 64 - -struct perf_record_auxtrace_error { - struct perf_event_header header; - __u32 type; - __u32 code; - __u32 cpu; - __u32 pid; - __u32 tid; - __u32 fmt; - __u64 ip; - __u64 time; - char msg[MAX_AUXTRACE_ERROR_MSG]; -}; - -struct perf_record_aux { - struct perf_event_header header; - __u64 aux_offset; - __u64 aux_size; - __u64 flags; -}; - -struct perf_record_itrace_start { - struct perf_event_header header; - __u32 pid; - __u32 tid; -}; - -struct perf_record_thread_map_entry { - __u64 pid; - char comm[16]; -}; - -struct perf_record_thread_map { - struct perf_event_header header; - __u64 nr; - struct perf_record_thread_map_entry entries[]; -}; - -enum { - PERF_STAT_CONFIG_TERM__AGGR_MODE = 0, - PERF_STAT_CONFIG_TERM__INTERVAL = 1, - PERF_STAT_CONFIG_TERM__SCALE = 2, - PERF_STAT_CONFIG_TERM__MAX = 3, -}; - -struct perf_record_stat_config_entry { - __u64 tag; - __u64 val; -}; - -struct perf_record_stat_config { - struct perf_event_header header; - __u64 nr; - struct perf_record_stat_config_entry data[]; -}; - -struct perf_record_stat { - struct perf_event_header header; - - __u64 id; - __u32 cpu; - __u32 thread; - - union { - struct { - __u64 val; - __u64 ena; - __u64 run; - }; - __u64 values[3]; - }; -}; - -struct perf_record_stat_round { - struct perf_event_header header; - __u64 type; - __u64 time; -}; - -struct perf_record_time_conv { - struct perf_event_header header; - __u64 time_shift; - __u64 time_mult; - __u64 time_zero; -}; - -struct perf_record_header_feature { - struct perf_event_header header; - __u64 feat_id; - char data[]; -}; - -struct perf_record_compressed { - struct perf_event_header header; - char data[]; -}; - -enum perf_user_event_type { /* above any possible kernel type */ - PERF_RECORD_USER_TYPE_START = 64, - PERF_RECORD_HEADER_ATTR = 64, - PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */ - PERF_RECORD_HEADER_TRACING_DATA = 66, - PERF_RECORD_HEADER_BUILD_ID = 67, - PERF_RECORD_FINISHED_ROUND = 68, - PERF_RECORD_ID_INDEX = 69, - PERF_RECORD_AUXTRACE_INFO = 70, - PERF_RECORD_AUXTRACE = 71, - PERF_RECORD_AUXTRACE_ERROR = 72, - PERF_RECORD_THREAD_MAP = 73, - PERF_RECORD_CPU_MAP = 74, - PERF_RECORD_STAT_CONFIG = 75, - PERF_RECORD_STAT = 76, - PERF_RECORD_STAT_ROUND = 77, - PERF_RECORD_EVENT_UPDATE = 78, - PERF_RECORD_TIME_CONV = 79, - PERF_RECORD_HEADER_FEATURE = 80, - PERF_RECORD_COMPRESSED = 81, - PERF_RECORD_HEADER_MAX -}; - -union perf_event { - struct perf_event_header header; - struct perf_record_mmap mmap; - struct perf_record_mmap2 mmap2; - struct perf_record_comm comm; - struct perf_record_namespaces namespaces; - struct perf_record_fork fork; - struct perf_record_lost lost; - struct perf_record_lost_samples lost_samples; - struct perf_record_read read; - struct perf_record_throttle throttle; - struct perf_record_sample sample; - struct perf_record_bpf_event bpf; - struct perf_record_ksymbol ksymbol; - struct perf_record_header_attr attr; - struct perf_record_event_update event_update; - struct perf_record_header_event_type event_type; - struct perf_record_header_tracing_data tracing_data; - struct perf_record_header_build_id build_id; - struct perf_record_id_index id_index; - struct perf_record_auxtrace_info auxtrace_info; - struct perf_record_auxtrace auxtrace; - struct perf_record_auxtrace_error auxtrace_error; - struct perf_record_aux aux; - struct perf_record_itrace_start itrace_start; - struct perf_record_switch context_switch; - struct perf_record_thread_map thread_map; - struct perf_record_cpu_map cpu_map; - struct perf_record_stat_config stat_config; - struct perf_record_stat stat; - struct perf_record_stat_round stat_round; - struct perf_record_time_conv time_conv; - struct perf_record_header_feature feat; - struct perf_record_compressed pack; -}; - -#endif /* __LIBPERF_EVENT_H */ diff --git a/tools/perf/lib/include/perf/evlist.h b/tools/perf/lib/include/perf/evlist.h deleted file mode 100644 index 8a2ce0757ab2..000000000000 --- a/tools/perf/lib/include/perf/evlist.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LIBPERF_EVLIST_H -#define __LIBPERF_EVLIST_H - -#include - -struct perf_evlist; -struct perf_evsel; -struct perf_cpu_map; -struct perf_thread_map; - -LIBPERF_API void perf_evlist__init(struct perf_evlist *evlist); -LIBPERF_API void perf_evlist__add(struct perf_evlist *evlist, - struct perf_evsel *evsel); -LIBPERF_API void perf_evlist__remove(struct perf_evlist *evlist, - struct perf_evsel *evsel); -LIBPERF_API struct perf_evlist *perf_evlist__new(void); -LIBPERF_API void perf_evlist__delete(struct perf_evlist *evlist); -LIBPERF_API struct perf_evsel* perf_evlist__next(struct perf_evlist *evlist, - struct perf_evsel *evsel); -LIBPERF_API int perf_evlist__open(struct perf_evlist *evlist); -LIBPERF_API void perf_evlist__close(struct perf_evlist *evlist); -LIBPERF_API void perf_evlist__enable(struct perf_evlist *evlist); -LIBPERF_API void perf_evlist__disable(struct perf_evlist *evlist); - -#define perf_evlist__for_each_evsel(evlist, pos) \ - for ((pos) = perf_evlist__next((evlist), NULL); \ - (pos) != NULL; \ - (pos) = perf_evlist__next((evlist), (pos))) - -LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist, - struct perf_cpu_map *cpus, - struct perf_thread_map *threads); -LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout); - -#endif /* __LIBPERF_EVLIST_H */ diff --git a/tools/perf/lib/include/perf/evsel.h b/tools/perf/lib/include/perf/evsel.h deleted file mode 100644 index 4388667f265c..000000000000 --- a/tools/perf/lib/include/perf/evsel.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LIBPERF_EVSEL_H -#define __LIBPERF_EVSEL_H - -#include -#include - -struct perf_evsel; -struct perf_event_attr; -struct perf_cpu_map; -struct perf_thread_map; - -struct perf_counts_values { - union { - struct { - uint64_t val; - uint64_t ena; - uint64_t run; - }; - uint64_t values[3]; - }; -}; - -LIBPERF_API void perf_evsel__init(struct perf_evsel *evsel, - struct perf_event_attr *attr); -LIBPERF_API struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr); -LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel); -LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, - struct perf_thread_map *threads); -LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel); -LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, - struct perf_counts_values *count); -LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel); -LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel); -LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); -LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); -LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel); - -#endif /* __LIBPERF_EVSEL_H */ diff --git a/tools/perf/lib/include/perf/threadmap.h b/tools/perf/lib/include/perf/threadmap.h deleted file mode 100644 index a7c50de8d010..000000000000 --- a/tools/perf/lib/include/perf/threadmap.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LIBPERF_THREADMAP_H -#define __LIBPERF_THREADMAP_H - -#include -#include - -struct perf_thread_map; - -LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void); - -LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid); -LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread); -LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads); -LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread); - -LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map); -LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map); - -#endif /* __LIBPERF_THREADMAP_H */ diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json index 0d1556fcdffe..99f4fc425564 100644 --- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json +++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json @@ -15,7 +15,7 @@ }, { "EventCode": "0x04", - "EventName": "uncore_hisi_ddrc.flux_wr", + "EventName": "uncore_hisi_ddrc.pre_cmd", "BriefDescription": "DDRC precharge commands", "PublicDescription": "DDRC precharge commands", "Unit": "hisi_sccl,ddrc", diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json index 68618152ea2c..89e070727e1b 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json @@ -4,7 +4,7 @@ "EventCode": "128", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", - "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" + "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { "Unit": "CPU-M-CF", diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index e2837260ca4d..99e3fd04a5cb 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -758,6 +758,7 @@ static int process_mapfile(FILE *outfp, char *fpath) char *line, *p; int line_num; char *tblname; + int ret = 0; pr_info("%s: Processing mapfile %s\n", prog, fpath); @@ -769,6 +770,7 @@ static int process_mapfile(FILE *outfp, char *fpath) if (!mapfp) { pr_info("%s: Error %s opening %s\n", prog, strerror(errno), fpath); + free(line); return -1; } @@ -795,7 +797,8 @@ static int process_mapfile(FILE *outfp, char *fpath) /* TODO Deal with lines longer than 16K */ pr_info("%s: Mapfile %s: line %d too long, aborting\n", prog, fpath, line_num); - return -1; + ret = -1; + goto out; } line[strlen(line)-1] = '\0'; @@ -825,7 +828,9 @@ static int process_mapfile(FILE *outfp, char *fpath) out: print_mapping_table_suffix(outfp); - return 0; + fclose(mapfp); + free(line); + return ret; } /* @@ -1122,6 +1127,7 @@ int main(int argc, char *argv[]) goto empty_map; } else if (rc < 0) { /* Make build fail */ + fclose(eventsfp); free_arch_std_events(); return 1; } else if (rc) { @@ -1134,6 +1140,7 @@ int main(int argc, char *argv[]) goto empty_map; } else if (rc < 0) { /* Make build fail */ + fclose(eventsfp); free_arch_std_events(); return 1; } else if (rc) { @@ -1151,6 +1158,8 @@ int main(int argc, char *argv[]) if (process_mapfile(eventsfp, mapfile)) { pr_info("%s: Error processing mapfile %s\n", prog, mapfile); /* Make build fail */ + fclose(eventsfp); + free_arch_std_events(); return 1; } diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index 61b3911d91e6..4b28c9d08d5a 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -625,7 +625,7 @@ class CallGraphRootItem(CallGraphLevelItemBase): self.query_done = True if_has_calls = "" if IsSelectable(glb.db, "comms", columns = "has_calls"): - if_has_calls = " WHERE has_calls = TRUE" + if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE query = QSqlQuery(glb.db) QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls) while query.next(): @@ -905,7 +905,7 @@ class CallTreeRootItem(CallGraphLevelItemBase): self.query_done = True if_has_calls = "" if IsSelectable(glb.db, "comms", columns = "has_calls"): - if_has_calls = " WHERE has_calls = TRUE" + if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE query = QSqlQuery(glb.db) QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls) while query.next(): @@ -3509,6 +3509,12 @@ class DBRef(): def __init__(self, is_sqlite3, dbname): self.is_sqlite3 = is_sqlite3 self.dbname = dbname + self.TRUE = "TRUE" + self.FALSE = "FALSE" + # SQLite prior to version 3.23 does not support TRUE and FALSE + if self.is_sqlite3: + self.TRUE = "1" + self.FALSE = "0" def Open(self, connection_name): dbname = self.dbname diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c index 338cd9faa835..5128f727c0ef 100644 --- a/tools/perf/tests/backward-ring-buffer.c +++ b/tools/perf/tests/backward-ring-buffer.c @@ -147,6 +147,15 @@ int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __m goto out_delete_evlist; } + evlist__close(evlist); + + err = evlist__open(evlist); + if (err < 0) { + pr_debug("perf_evlist__open: %s\n", + str_error_r(errno, sbuf, sizeof(sbuf))); + goto out_delete_evlist; + } + err = do_test(evlist, 1, &sample_count, &comm_count); if (err != TEST_OK) goto out_delete_evlist; diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c index c1c2c13de254..166f411568a5 100644 --- a/tools/perf/tests/bp_signal.c +++ b/tools/perf/tests/bp_signal.c @@ -49,14 +49,6 @@ asm ( "__test_function:\n" "incq (%rdi)\n" "ret\n"); -#elif defined (__aarch64__) -extern void __test_function(volatile long *ptr); -asm ( - ".globl __test_function\n" - "__test_function:\n" - "str x30, [x0]\n" - "ret\n"); - #else static void __test_function(volatile long *ptr) { @@ -302,10 +294,15 @@ bool test__bp_signal_is_supported(void) * stepping into the SIGIO handler and getting stuck on the * breakpointed instruction. * + * Since arm64 has the same issue with arm for the single-step + * handling, this case also gets suck on the breakpointed + * instruction. + * * Just disable the test for these architectures until these * issues are resolved. */ -#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) +#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) || \ + defined(__aarch64__) return false; #else return true; diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index bce3a4cb4c89..d85c9f608564 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -53,6 +53,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused struct perf_cpu_map *cpus; struct perf_thread_map *threads; struct mmap *md; + int retry_count = 0; signal(SIGCHLD, sig_handler); @@ -110,6 +111,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused if (evlist__mmap(evlist, 128) < 0) { pr_debug("failed to mmap events: %d (%s)\n", errno, str_error_r(errno, sbuf, sizeof(sbuf))); + err = -1; goto out_delete_evlist; } @@ -131,6 +133,13 @@ retry: out_init: if (!exited || !nr_exit) { evlist__poll(evlist, -1); + + if (retry_count++ > 1000) { + pr_debug("Failed after retrying 1000 times\n"); + err = -1; + goto out_free_maps; + } + goto retry; } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 7a7187e069b4..88c3df24b748 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -3054,6 +3054,7 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events, continue; } + actions->ms.map = map; top = pstack__peek(browser->pstack); if (top == &browser->hists->dso_filter) { /* diff --git a/tools/perf/ui/gtk/Build b/tools/perf/ui/gtk/Build index ec22e899a224..9b5d5cbb7af7 100644 --- a/tools/perf/ui/gtk/Build +++ b/tools/perf/ui/gtk/Build @@ -7,3 +7,8 @@ gtk-y += util.o gtk-y += helpline.o gtk-y += progress.o gtk-y += annotate.o +gtk-y += zalloc.o + +$(OUTPUT)ui/gtk/zalloc.o: ../lib/zalloc.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 4ba0f871f086..f5f855fff412 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -110,7 +110,7 @@ static int cs_etm__decode_data_block(struct cs_etm_queue *etmq); * encode the etm queue number as the upper 16 bit and the channel as * the lower 16 bit. */ -#define TO_CS_QUEUE_NR(queue_nr, trace_id_chan) \ +#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \ (queue_nr << 16 | trace_chan_id) #define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16) #define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff) @@ -819,7 +819,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm, * Note that packets decoded above are still in the traceID's packet * queue and will be processed in cs_etm__process_queues(). */ - cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_id_chan); + cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id); ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp); out: return ret; diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index df6cee5c071f..5544bfbd0f6c 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -307,21 +307,51 @@ bool die_is_func_def(Dwarf_Die *dw_die) dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL); } +/** + * die_entrypc - Returns entry PC (the lowest address) of a DIE + * @dw_die: a DIE + * @addr: where to store entry PC + * + * Since dwarf_entrypc() does not return entry PC if the DIE has only address + * range, we have to use this to retrieve the lowest address from the address + * range attribute. + */ +int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr) +{ + Dwarf_Addr base, end; + + if (!addr) + return -EINVAL; + + if (dwarf_entrypc(dw_die, addr) == 0) + return 0; + + return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0; +} + /** * die_is_func_instance - Ensure that this DIE is an instance of a subprogram * @dw_die: a DIE * * Ensure that this DIE is an instance (which has an entry address). - * This returns true if @dw_die is a function instance. If not, you need to - * call die_walk_instances() to find actual instances. + * This returns true if @dw_die is a function instance. If not, the @dw_die + * must be a prototype. You can use die_walk_instances() to find actual + * instances. **/ bool die_is_func_instance(Dwarf_Die *dw_die) { Dwarf_Addr tmp; + Dwarf_Attribute attr_mem; + int tag = dwarf_tag(dw_die); - /* Actually gcc optimizes non-inline as like as inlined */ - return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0; + if (tag != DW_TAG_subprogram && + tag != DW_TAG_inlined_subroutine) + return false; + + return dwarf_entrypc(dw_die, &tmp) == 0 || + dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL; } + /** * die_get_data_member_location - Get the data-member offset * @mb_die: a DIE of a member of a data structure @@ -598,6 +628,9 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data) Dwarf_Die *origin; int tmp; + if (!die_is_func_instance(inst)) + return DIE_FIND_CB_CONTINUE; + attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem); if (attr == NULL) return DIE_FIND_CB_CONTINUE; @@ -669,15 +702,14 @@ static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { fname = die_get_call_file(in_die); lineno = die_get_call_lineno(in_die); - if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { + if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) { lw->retval = lw->callback(fname, lineno, addr, lw->data); if (lw->retval != 0) return DIE_FIND_CB_END; } + if (!lw->recursive) + return DIE_FIND_CB_SIBLING; } - if (!lw->recursive) - /* Don't need to search recursively */ - return DIE_FIND_CB_SIBLING; if (addr) { fname = dwarf_decl_file(in_die); @@ -710,7 +742,7 @@ static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive, /* Handle function declaration line */ fname = dwarf_decl_file(sp_die); if (fname && dwarf_decl_line(sp_die, &lineno) == 0 && - dwarf_entrypc(sp_die, &addr) == 0) { + die_entrypc(sp_die, &addr) == 0) { lw.retval = callback(fname, lineno, addr, data); if (lw.retval != 0) goto done; @@ -724,6 +756,10 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) { struct __line_walk_param *lw = data; + /* + * Since inlined function can include another inlined function in + * the same file, we need to walk in it recursively. + */ lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data); if (lw->retval != 0) return DWARF_CB_ABORT; @@ -748,11 +784,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) Dwarf_Lines *lines; Dwarf_Line *line; Dwarf_Addr addr; - const char *fname, *decf = NULL; + const char *fname, *decf = NULL, *inf = NULL; int lineno, ret = 0; int decl = 0, inl; Dwarf_Die die_mem, *cu_die; size_t nlines, i; + bool flag; /* Get the CU die */ if (dwarf_tag(rt_die) != DW_TAG_compile_unit) { @@ -783,6 +820,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) "Possible error in debuginfo.\n"); continue; } + /* Skip end-of-sequence */ + if (dwarf_lineendsequence(line, &flag) != 0 || flag) + continue; + /* Skip Non statement line-info */ + if (dwarf_linebeginstatement(line, &flag) != 0 || !flag) + continue; /* Filter lines based on address */ if (rt_die != cu_die) { /* @@ -792,13 +835,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) */ if (!dwarf_haspc(rt_die, addr)) continue; + if (die_find_inlinefunc(rt_die, addr, &die_mem)) { + /* Call-site check */ + inf = die_get_call_file(&die_mem); + if ((inf && !strcmp(inf, decf)) && + die_get_call_lineno(&die_mem) == lineno) + goto found; + dwarf_decl_line(&die_mem, &inl); if (inl != decl || decf != dwarf_decl_file(&die_mem)) continue; } } +found: /* Get source line */ fname = dwarf_linesrc(line, NULL, NULL); @@ -813,8 +864,9 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) */ if (rt_die != cu_die) /* - * Don't need walk functions recursively, because nested - * inlined functions don't have lines of the specified DIE. + * Don't need walk inlined functions recursively, because + * inner inlined functions don't have the lines of the + * specified function. */ ret = __die_walk_funclines(rt_die, false, callback, data); else { @@ -989,7 +1041,7 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die, bool first = true; const char *name; - ret = dwarf_entrypc(sp_die, &entry); + ret = die_entrypc(sp_die, &entry); if (ret) return ret; @@ -1052,7 +1104,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf) bool first = true; const char *name; - ret = dwarf_entrypc(sp_die, &entry); + ret = die_entrypc(sp_die, &entry); if (ret) return ret; diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index f204e5892403..506006e0cf66 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -29,6 +29,9 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, /* Get DW_AT_linkage_name (should be NULL for C binary) */ const char *die_get_linkage_name(Dwarf_Die *dw_die); +/* Get the lowest PC in DIE (including range list) */ +int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr); + /* Ensure that this DIE is a subprogram and definition (not declaration) */ bool die_is_func_def(Dwarf_Die *dw_die); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index becc2d109423..d3412f2c0d18 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1089,21 +1089,18 @@ static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); } -static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) +#define MAX_CACHE_LVL 4 + +static int build_caches(struct cpu_cache_level caches[], u32 *cntp) { u32 i, cnt = 0; - long ncpus; u32 nr, cpu; u16 level; - ncpus = sysconf(_SC_NPROCESSORS_CONF); - if (ncpus < 0) - return -1; - - nr = (u32)(ncpus & UINT_MAX); + nr = cpu__max_cpu(); for (cpu = 0; cpu < nr; cpu++) { - for (level = 0; level < 10; level++) { + for (level = 0; level < MAX_CACHE_LVL; level++) { struct cpu_cache_level c; int err; @@ -1123,18 +1120,12 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) caches[cnt++] = c; else cpu_cache_level__free(&c); - - if (WARN_ONCE(cnt == size, "way too many cpu caches..")) - goto out; } } - out: *cntp = cnt; return 0; } -#define MAX_CACHE_LVL 4 - static int write_cache(struct feat_fd *ff, struct evlist *evlist __maybe_unused) { @@ -1143,7 +1134,7 @@ static int write_cache(struct feat_fd *ff, u32 cnt = 0, i, version = 1; int ret; - ret = build_caches(caches, max_caches, &cnt); + ret = build_caches(caches, &cnt); if (ret) goto out; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 6a186b668303..479273130794 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -339,10 +339,10 @@ static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format) list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list) #define hists__for_each_format(hists, format) \ - perf_hpp_list__for_each_format((hists)->hpp_list, fmt) + perf_hpp_list__for_each_format((hists)->hpp_list, format) #define hists__for_each_sort_list(hists, format) \ - perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt) + perf_hpp_list__for_each_sort_list((hists)->hpp_list, format) extern struct perf_hpp_fmt perf_hpp__format[]; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 70a9f8716a4b..ea277ce63a46 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -767,24 +767,6 @@ int machine__process_ksymbol(struct machine *machine __maybe_unused, return machine__process_ksymbol_register(machine, event, sample); } -static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename) -{ - const char *dup_filename; - - if (!filename || !dso || !dso->long_name) - return; - if (dso->long_name[0] != '[') - return; - if (!strchr(filename, '/')) - return; - - dup_filename = strdup(filename); - if (!dup_filename) - return; - - dso__set_long_name(dso, dup_filename, true); -} - struct map *machine__findnew_module_map(struct machine *machine, u64 start, const char *filename) { @@ -796,15 +778,8 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start, return NULL; map = map_groups__find_by_name(&machine->kmaps, m.name); - if (map) { - /* - * If the map's dso is an offline module, give dso__load() - * a chance to find the file path of that module by fixing - * long_name. - */ - dso__adjust_kmod_long_name(map->dso, filename); + if (map) goto out; - } dso = machine__findnew_module_dso(machine, &m, filename); if (dso == NULL) @@ -2403,7 +2378,7 @@ static int thread__resolve_callchain_sample(struct thread *thread, } check_calls: - if (callchain_param.order != ORDER_CALLEE) { + if (chain && callchain_param.order != ORDER_CALLEE) { err = find_prev_cpumode(chain, thread, cursor, parent, root_al, &cpumode, chain->nr - first_call); if (err) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index eec9b282c047..4b07b1cc22dc 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -90,7 +90,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) return true; } - if (!strncmp(filename, "/system/lib/", 11)) { + if (!strncmp(filename, "/system/lib/", 12)) { char *ndk, *app; const char *arch; size_t ndk_length; diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index a7c0424dbda3..940a6e7a6854 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -103,8 +103,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, if (!strcmp(ev->name, ids[i])) { if (!metric_events[i]) metric_events[i] = ev; + i++; + if (i == idnum) + break; } else { - if (++i == idnum) { + if (i + 1 == idnum) { /* Discard the whole match and start again */ i = 0; memset(metric_events, 0, @@ -124,7 +127,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, } } - if (i != idnum - 1) { + if (i != idnum) { /* Not whole match */ return NULL; } diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index b5e2adef49de..422ad1888e74 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1365,8 +1365,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, if (get_config_terms(head_config, &config_terms)) return -ENOMEM; - if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) + if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { + struct perf_evsel_config_term *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &config_terms, list) { + list_del_init(&pos->list); + free(pos); + } return -EINVAL; + } evsel = __add_event(list, &parse_state->idx, &attr, get_config_name(head_config), pmu, @@ -1927,15 +1934,20 @@ int parse_events(struct evlist *evlist, const char *str, ret = parse_events__scanner(str, &parse_state, PE_START_EVENTS); perf_pmu__parse_cleanup(); + + if (!ret && list_empty(&parse_state.list)) { + WARN_ONCE(true, "WARNING: event parser found nothing\n"); + return -1; + } + + /* + * Add list to the evlist even with errors to allow callers to clean up. + */ + perf_evlist__splice_list_tail(evlist, &parse_state.list); + if (!ret) { struct evsel *last; - if (list_empty(&parse_state.list)) { - WARN_ONCE(true, "WARNING: event parser found nothing\n"); - return -1; - } - - perf_evlist__splice_list_tail(evlist, &parse_state.list); evlist->nr_groups += parse_state.nr_groups; last = evlist__last(evlist); last->cmdline_group_boundary = true; diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 47fe34e5f7d5..ec7640cc4c91 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -41,7 +41,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id); static inline const char *perf_reg_name(int id __maybe_unused) { - return NULL; + return "unknown"; } static inline int perf_reg_value(u64 *valp __maybe_unused, diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index b659466ea498..bf50f464234f 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -206,6 +206,9 @@ static struct strlist *__probe_file__get_namelist(int fd, bool include_group) } else ret = strlist__add(sl, tev.event); clear_probe_trace_event(&tev); + /* Skip if there is same name multi-probe event in the list */ + if (ret == -EEXIST) + ret = 0; if (ret < 0) break; } diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index cd9f95e5044e..aaf3b24fffa4 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -604,38 +604,31 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, const char *function, struct probe_trace_point *tp) { - Dwarf_Addr eaddr, highaddr; + Dwarf_Addr eaddr; GElf_Sym sym; const char *symbol; /* Verify the address is correct */ - if (dwarf_entrypc(sp_die, &eaddr) != 0) { - pr_warning("Failed to get entry address of %s\n", - dwarf_diename(sp_die)); - return -ENOENT; - } - if (dwarf_highpc(sp_die, &highaddr) != 0) { - pr_warning("Failed to get end address of %s\n", - dwarf_diename(sp_die)); - return -ENOENT; - } - if (paddr > highaddr) { - pr_warning("Offset specified is greater than size of %s\n", + if (!dwarf_haspc(sp_die, paddr)) { + pr_warning("Specified offset is out of %s\n", dwarf_diename(sp_die)); return -EINVAL; } - symbol = dwarf_diename(sp_die); - if (!symbol) { - /* Try to get the symbol name from symtab */ + if (dwarf_entrypc(sp_die, &eaddr) == 0) { + /* If the DIE has entrypc, use it. */ + symbol = dwarf_diename(sp_die); + } else { + /* Try to get actual symbol name and address from symtab */ symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL); - if (!symbol) { - pr_warning("Failed to find symbol at 0x%lx\n", - (unsigned long)paddr); - return -ENOENT; - } eaddr = sym.st_value; } + if (!symbol) { + pr_warning("Failed to find symbol at 0x%lx\n", + (unsigned long)paddr); + return -ENOENT; + } + tp->offset = (unsigned long)(paddr - eaddr); tp->address = (unsigned long)paddr; tp->symbol = strdup(symbol); @@ -756,6 +749,16 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data) return 0; } +/* Return innermost DIE */ +static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data) +{ + struct find_scope_param *fsp = data; + + memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); + fsp->found = true; + return 1; +} + /* Find an appropriate scope fits to given conditions */ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem) { @@ -767,8 +770,13 @@ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem) .die_mem = die_mem, .found = false, }; + int ret; - cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp); + ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, + &fsp); + if (!ret && !fsp.found) + cu_walk_functions_at(&pf->cu_die, pf->addr, + find_inner_scope_cb, &fsp); return fsp.found ? die_mem : NULL; } @@ -942,7 +950,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) ret = find_probe_point_lazy(in_die, pf); else { /* Get probe address */ - if (dwarf_entrypc(in_die, &addr) != 0) { + if (die_entrypc(in_die, &addr) != 0) { pr_warning("Failed to get entry address of %s.\n", dwarf_diename(in_die)); return -ENOENT; @@ -994,7 +1002,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) param->retval = find_probe_point_by_line(pf); } else if (die_is_func_instance(sp_die)) { /* Instances always have the entry address */ - dwarf_entrypc(sp_die, &pf->addr); + die_entrypc(sp_die, &pf->addr); /* But in some case the entry address is 0 */ if (pf->addr == 0) { pr_debug("%s has no entry PC. Skipped\n", @@ -1425,6 +1433,18 @@ error: return DIE_FIND_CB_END; } +static bool available_var_finder_overlap(struct available_var_finder *af) +{ + int i; + + for (i = 0; i < af->nvls; i++) { + if (af->pf.addr == af->vls[i].point.address) + return true; + } + return false; + +} + /* Add a found vars into available variables list */ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) { @@ -1435,6 +1455,14 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) Dwarf_Die die_mem; int ret; + /* + * For some reason (e.g. different column assigned to same address), + * this callback can be called with the address which already passed. + * Ignore it first. + */ + if (available_var_finder_overlap(af)) + return 0; + /* Check number of tevs */ if (af->nvls == af->max_vls) { pr_warning("Too many( > %d) probe point found.\n", af->max_vls); @@ -1578,7 +1606,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, /* Get function entry information */ func = basefunc = dwarf_diename(&spdie); if (!func || - dwarf_entrypc(&spdie, &baseaddr) != 0 || + die_entrypc(&spdie, &baseaddr) != 0 || dwarf_decl_line(&spdie, &baseline) != 0) { lineno = 0; goto post; @@ -1595,7 +1623,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr, &indie)) { /* There is an inline function */ - if (dwarf_entrypc(&indie, &_addr) == 0 && + if (die_entrypc(&indie, &_addr) == 0 && _addr == addr) { /* * addr is at an inline function entry. diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 061bb4d6a3f5..5c172845fa5a 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1954,8 +1954,8 @@ out_err: } static union perf_event * -fetch_mmaped_event(struct perf_session *session, - u64 head, size_t mmap_size, char *buf) +prefetch_event(char *buf, u64 head, size_t mmap_size, + bool needs_swap, union perf_event *error) { union perf_event *event; @@ -1967,20 +1967,32 @@ fetch_mmaped_event(struct perf_session *session, return NULL; event = (union perf_event *)(buf + head); - - if (session->header.needs_swap) + if (needs_swap) perf_event_header__bswap(&event->header); - if (head + event->header.size > mmap_size) { - /* We're not fetching the event so swap back again */ - if (session->header.needs_swap) - perf_event_header__bswap(&event->header); - pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n", - __func__, head, event->header.size, mmap_size); - return ERR_PTR(-EINVAL); - } + if (head + event->header.size <= mmap_size) + return event; - return event; + /* We're not fetching the event so swap back again */ + if (needs_swap) + perf_event_header__bswap(&event->header); + + pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:" + " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size); + + return error; +} + +static union perf_event * +fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) +{ + return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL)); +} + +static union perf_event * +fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) +{ + return prefetch_event(buf, head, mmap_size, needs_swap, NULL); } static int __perf_session__process_decomp_events(struct perf_session *session) @@ -1993,10 +2005,8 @@ static int __perf_session__process_decomp_events(struct perf_session *session) return 0; while (decomp->head < decomp->size && !session_done()) { - union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data); - - if (IS_ERR(event)) - return PTR_ERR(event); + union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data, + session->header.needs_swap); if (!event) break; @@ -2096,7 +2106,7 @@ remap: } more: - event = fetch_mmaped_event(session, head, mmap_size, buf); + event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap); if (IS_ERR(event)) return PTR_ERR(event); diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 2c41d47f6f83..90d23cc3c8d4 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -18,7 +18,6 @@ * AGGR_NONE: Use matching CPU * AGGR_THREAD: Not supported? */ -static bool have_frontend_stalled; struct runtime_stat rt_stat; struct stats walltime_nsecs_stats; @@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_stat *st) void perf_stat__init_shadow_stats(void) { - have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend"); runtime_stat__init(&rt_stat); } @@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, NULL, "%7.2f ", "stalled cycles per insn", ratio); - } else if (have_frontend_stalled) { - out->new_line(config, ctxp); - print_metric(config, ctxp, NULL, "%7.2f ", - "stalled cycles per insn", 0); } } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0) diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c index 9796a2e43f67..302443921681 100644 --- a/tools/perf/util/time-utils.c +++ b/tools/perf/util/time-utils.c @@ -458,10 +458,11 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf, return true; } -int perf_time__parse_for_ranges(const char *time_str, +int perf_time__parse_for_ranges_reltime(const char *time_str, struct perf_session *session, struct perf_time_interval **ranges, - int *range_size, int *range_num) + int *range_size, int *range_num, + bool reltime) { bool has_percent = strchr(time_str, '%'); struct perf_time_interval *ptime_range; @@ -471,7 +472,7 @@ int perf_time__parse_for_ranges(const char *time_str, if (!ptime_range) return -ENOMEM; - if (has_percent) { + if (has_percent || reltime) { if (session->evlist->first_sample_time == 0 && session->evlist->last_sample_time == 0) { pr_err("HINT: no first/last sample time found in perf data.\n" @@ -479,7 +480,9 @@ int perf_time__parse_for_ranges(const char *time_str, "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n"); goto error; } + } + if (has_percent) { num = perf_time__percent_parse_str( ptime_range, size, time_str, @@ -492,6 +495,15 @@ int perf_time__parse_for_ranges(const char *time_str, if (num < 0) goto error_invalid; + if (reltime) { + int i; + + for (i = 0; i < num; i++) { + ptime_range[i].start += session->evlist->first_sample_time; + ptime_range[i].end += session->evlist->first_sample_time; + } + } + *range_size = size; *range_num = num; *ranges = ptime_range; @@ -504,6 +516,15 @@ error: return ret; } +int perf_time__parse_for_ranges(const char *time_str, + struct perf_session *session, + struct perf_time_interval **ranges, + int *range_size, int *range_num) +{ + return perf_time__parse_for_ranges_reltime(time_str, session, ranges, + range_size, range_num, false); +} + int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz) { u64 sec = timestamp / NSEC_PER_SEC; diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h index 4f42988eb2f7..1142b0bddd5e 100644 --- a/tools/perf/util/time-utils.h +++ b/tools/perf/util/time-utils.h @@ -26,6 +26,11 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf, struct perf_session; +int perf_time__parse_for_ranges_reltime(const char *str, struct perf_session *session, + struct perf_time_interval **ranges, + int *range_size, int *range_num, + bool reltime); + int perf_time__parse_for_ranges(const char *str, struct perf_session *session, struct perf_time_interval **ranges, int *range_size, int *range_num); diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index 0111d246d1ca..54a2857c2510 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config @@ -15,7 +15,7 @@ include $(srctree)/../../scripts/Makefile.include OUTPUT=$(srctree)/ ifeq ("$(origin O)", "command line") - OUTPUT := $(O)/power/acpi/ + OUTPUT := $(O)/tools/power/acpi/ endif #$(info Determined 'OUTPUT' to be $(OUTPUT)) diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c index 2f55d4d23446..6e04304560ca 100644 --- a/tools/power/cpupower/lib/cpufreq.c +++ b/tools/power/cpupower/lib/cpufreq.c @@ -332,21 +332,18 @@ void cpufreq_put_available_governors(struct cpufreq_available_governors *any) } -struct cpufreq_frequencies -*cpufreq_get_frequencies(const char *type, unsigned int cpu) +struct cpufreq_available_frequencies +*cpufreq_get_available_frequencies(unsigned int cpu) { - struct cpufreq_frequencies *first = NULL; - struct cpufreq_frequencies *current = NULL; + struct cpufreq_available_frequencies *first = NULL; + struct cpufreq_available_frequencies *current = NULL; char one_value[SYSFS_PATH_MAX]; char linebuf[MAX_LINE_LEN]; - char fname[MAX_LINE_LEN]; unsigned int pos, i; unsigned int len; - snprintf(fname, MAX_LINE_LEN, "scaling_%s_frequencies", type); - - len = sysfs_cpufreq_read_file(cpu, fname, - linebuf, sizeof(linebuf)); + len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies", + linebuf, sizeof(linebuf)); if (len == 0) return NULL; @@ -391,9 +388,65 @@ struct cpufreq_frequencies return NULL; } -void cpufreq_put_frequencies(struct cpufreq_frequencies *any) +struct cpufreq_available_frequencies +*cpufreq_get_boost_frequencies(unsigned int cpu) { - struct cpufreq_frequencies *tmp, *next; + struct cpufreq_available_frequencies *first = NULL; + struct cpufreq_available_frequencies *current = NULL; + char one_value[SYSFS_PATH_MAX]; + char linebuf[MAX_LINE_LEN]; + unsigned int pos, i; + unsigned int len; + + len = sysfs_cpufreq_read_file(cpu, "scaling_boost_frequencies", + linebuf, sizeof(linebuf)); + if (len == 0) + return NULL; + + pos = 0; + for (i = 0; i < len; i++) { + if (linebuf[i] == ' ' || linebuf[i] == '\n') { + if (i - pos < 2) + continue; + if (i - pos >= SYSFS_PATH_MAX) + goto error_out; + if (current) { + current->next = malloc(sizeof(*current)); + if (!current->next) + goto error_out; + current = current->next; + } else { + first = malloc(sizeof(*first)); + if (!first) + goto error_out; + current = first; + } + current->first = first; + current->next = NULL; + + memcpy(one_value, linebuf + pos, i - pos); + one_value[i - pos] = '\0'; + if (sscanf(one_value, "%lu", ¤t->frequency) != 1) + goto error_out; + + pos = i + 1; + } + } + + return first; + + error_out: + while (first) { + current = first->next; + free(first); + first = current; + } + return NULL; +} + +void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies *any) +{ + struct cpufreq_available_frequencies *tmp, *next; if (!any) return; @@ -406,6 +459,11 @@ void cpufreq_put_frequencies(struct cpufreq_frequencies *any) } } +void cpufreq_put_boost_frequencies(struct cpufreq_available_frequencies *any) +{ + cpufreq_put_available_frequencies(any); +} + static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu, const char *file) { diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h index a55f0d19215b..95f4fd9e2656 100644 --- a/tools/power/cpupower/lib/cpufreq.h +++ b/tools/power/cpupower/lib/cpufreq.h @@ -20,10 +20,10 @@ struct cpufreq_available_governors { struct cpufreq_available_governors *first; }; -struct cpufreq_frequencies { +struct cpufreq_available_frequencies { unsigned long frequency; - struct cpufreq_frequencies *next; - struct cpufreq_frequencies *first; + struct cpufreq_available_frequencies *next; + struct cpufreq_available_frequencies *first; }; @@ -124,11 +124,17 @@ void cpufreq_put_available_governors( * cpufreq_put_frequencies after use. */ -struct cpufreq_frequencies -*cpufreq_get_frequencies(const char *type, unsigned int cpu); +struct cpufreq_available_frequencies +*cpufreq_get_available_frequencies(unsigned int cpu); -void cpufreq_put_frequencies( - struct cpufreq_frequencies *first); +void cpufreq_put_available_frequencies( + struct cpufreq_available_frequencies *first); + +struct cpufreq_available_frequencies +*cpufreq_get_boost_frequencies(unsigned int cpu); + +void cpufreq_put_boost_frequencies( + struct cpufreq_available_frequencies *first); /* determine affected CPUs diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c index e63cf55f81cf..6efc0f6b1b11 100644 --- a/tools/power/cpupower/utils/cpufreq-info.c +++ b/tools/power/cpupower/utils/cpufreq-info.c @@ -244,14 +244,14 @@ static int get_boost_mode_x86(unsigned int cpu) static int get_boost_mode(unsigned int cpu) { - struct cpufreq_frequencies *freqs; + struct cpufreq_available_frequencies *freqs; if (cpupower_cpu_info.vendor == X86_VENDOR_AMD || cpupower_cpu_info.vendor == X86_VENDOR_HYGON || cpupower_cpu_info.vendor == X86_VENDOR_INTEL) return get_boost_mode_x86(cpu); - freqs = cpufreq_get_frequencies("boost", cpu); + freqs = cpufreq_get_boost_frequencies(cpu); if (freqs) { printf(_(" boost frequency steps: ")); while (freqs->next) { @@ -261,7 +261,7 @@ static int get_boost_mode(unsigned int cpu) } print_speed(freqs->frequency); printf("\n"); - cpufreq_put_frequencies(freqs); + cpufreq_put_available_frequencies(freqs); } return 0; @@ -475,7 +475,7 @@ static int get_latency(unsigned int cpu, unsigned int human) static void debug_output_one(unsigned int cpu) { - struct cpufreq_frequencies *freqs; + struct cpufreq_available_frequencies *freqs; get_driver(cpu); get_related_cpus(cpu); @@ -483,7 +483,7 @@ static void debug_output_one(unsigned int cpu) get_latency(cpu, 1); get_hardware_limits(cpu, 1); - freqs = cpufreq_get_frequencies("available", cpu); + freqs = cpufreq_get_available_frequencies(cpu); if (freqs) { printf(_(" available frequency steps: ")); while (freqs->next) { @@ -493,7 +493,7 @@ static void debug_output_one(unsigned int cpu) } print_speed(freqs->frequency); printf("\n"); - cpufreq_put_frequencies(freqs); + cpufreq_put_available_frequencies(freqs); } get_available_governors(cpu); diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c index 3f893b99b337..555cb338a71a 100644 --- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c +++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c @@ -82,7 +82,7 @@ static struct pci_access *pci_acc; static struct pci_dev *amd_fam14h_pci_dev; static int nbp1_entered; -struct timespec start_time; +static struct timespec start_time; static unsigned long long timediff; #ifdef DEBUG diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c index f634aeb65c5f..7fb4f7a291ad 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c @@ -19,7 +19,7 @@ struct cpuidle_monitor cpuidle_sysfs_monitor; static unsigned long long **previous_count; static unsigned long long **current_count; -struct timespec start_time; +static struct timespec start_time; static unsigned long long timediff; static int cpuidle_get_count_percent(unsigned int id, double *percent, diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index d3c3e6e7aa26..3d54fd433626 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c @@ -27,6 +27,8 @@ struct cpuidle_monitor *all_monitors[] = { 0 }; +int cpu_count; + static struct cpuidle_monitor *monitors[MONITORS_MAX]; static unsigned int avail_monitors; diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h index a2d901d3bfaf..eafef38f1982 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h @@ -25,7 +25,7 @@ #endif #define CSTATE_DESC_LEN 60 -int cpu_count; +extern int cpu_count; /* Hard to define the right names ...: */ enum power_range_e { diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c index 7c7451d3f494..58dbdfd4fa13 100644 --- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c +++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c @@ -39,7 +39,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = { { .name = "PC9", .desc = N_("Processor Package C9"), - .desc = N_("Processor Package C2"), .id = PC9, .range = RANGE_PACKAGE, .get_count_percent = hsw_ext_get_count_percent, diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c index 2a9890c8395a..21fcfe621d3a 100644 --- a/tools/power/x86/intel-speed-select/isst-config.c +++ b/tools/power/x86/intel-speed-select/isst-config.c @@ -169,7 +169,7 @@ int get_topo_max_cpus(void) static void set_cpu_online_offline(int cpu, int state) { char buffer[128]; - int fd; + int fd, ret; snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/online", cpu); @@ -179,9 +179,12 @@ static void set_cpu_online_offline(int cpu, int state) err(-1, "%s open failed", buffer); if (state) - write(fd, "1\n", 2); + ret = write(fd, "1\n", 2); else - write(fd, "0\n", 2); + ret = write(fd, "0\n", 2); + + if (ret == -1) + perror("Online/Offline: Operation failed\n"); close(fd); } diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c index 6dee5332c9d3..fde3f9cefc6d 100644 --- a/tools/power/x86/intel-speed-select/isst-core.c +++ b/tools/power/x86/intel-speed-select/isst-core.c @@ -553,7 +553,6 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev) i); ctdp_level = &pkg_dev->ctdp_level[i]; - ctdp_level->processed = 1; ctdp_level->level = i; ctdp_level->control_cpu = cpu; ctdp_level->pkg_id = get_physical_package_id(cpu); @@ -561,7 +560,10 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev) ret = isst_get_ctdp_control(cpu, i, ctdp_level); if (ret) - return ret; + continue; + + pkg_dev->processed = 1; + ctdp_level->processed = 1; ret = isst_get_tdp_info(cpu, i, ctdp_level); if (ret) @@ -614,8 +616,6 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev) } } - pkg_dev->processed = 1; - return 0; } diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c index 40346d534f78..b11575c3e886 100644 --- a/tools/power/x86/intel-speed-select/isst-display.c +++ b/tools/power/x86/intel-speed-select/isst-display.c @@ -314,7 +314,8 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level, char value[256]; int i, base_level = 1; - print_package_info(cpu, outf); + if (pkg_dev->processed) + print_package_info(cpu, outf); for (i = 0; i <= pkg_dev->levels; ++i) { struct isst_pkg_ctdp_level_info *ctdp_level; diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile index 13f1e8b9ac52..2b6551269e43 100644 --- a/tools/power/x86/turbostat/Makefile +++ b/tools/power/x86/turbostat/Makefile @@ -16,7 +16,7 @@ override CFLAGS += -D_FORTIFY_SOURCE=2 %: %.c @mkdir -p $(BUILD_OUTPUT) - $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) + $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) -lcap .PHONY : clean clean : diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 5d0fddda842c..988326b67a91 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include @@ -304,6 +304,10 @@ int *irqs_per_cpu; /* indexed by cpu_num */ void setup_all_buffers(void); +char *sys_lpi_file; +char *sys_lpi_file_sysfs = "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us"; +char *sys_lpi_file_debugfs = "/sys/kernel/debug/pmc_core/slp_s0_residency_usec"; + int cpu_is_not_present(int cpu) { return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); @@ -2916,8 +2920,6 @@ int snapshot_gfx_mhz(void) * * record snapshot of * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us - * - * return 1 if config change requires a restart, else return 0 */ int snapshot_cpu_lpi_us(void) { @@ -2941,17 +2943,14 @@ int snapshot_cpu_lpi_us(void) /* * snapshot_sys_lpi() * - * record snapshot of - * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us - * - * return 1 if config change requires a restart, else return 0 + * record snapshot of sys_lpi_file */ int snapshot_sys_lpi_us(void) { FILE *fp; int retval; - fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); + fp = fopen_or_die(sys_lpi_file, "r"); retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); if (retval != 1) { @@ -3151,27 +3150,41 @@ void check_dev_msr() err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } -void check_permissions() +/* + * check for CAP_SYS_RAWIO + * return 0 on success + * return 1 on fail + */ +int check_for_cap_sys_rawio(void) +{ + cap_t caps; + cap_flag_value_t cap_flag_value; + + caps = cap_get_proc(); + if (caps == NULL) + err(-6, "cap_get_proc\n"); + + if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value)) + err(-6, "cap_get\n"); + + if (cap_flag_value != CAP_SET) { + warnx("capget(CAP_SYS_RAWIO) failed," + " try \"# setcap cap_sys_rawio=ep %s\"", progname); + return 1; + } + + if (cap_free(caps) == -1) + err(-6, "cap_free\n"); + + return 0; +} +void check_permissions(void) { - struct __user_cap_header_struct cap_header_data; - cap_user_header_t cap_header = &cap_header_data; - struct __user_cap_data_struct cap_data_data; - cap_user_data_t cap_data = &cap_data_data; - extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); int do_exit = 0; char pathname[32]; /* check for CAP_SYS_RAWIO */ - cap_header->pid = getpid(); - cap_header->version = _LINUX_CAPABILITY_VERSION; - if (capget(cap_header, cap_data) < 0) - err(-6, "capget(2) failed"); - - if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) { - do_exit++; - warnx("capget(CAP_SYS_RAWIO) failed," - " try \"# setcap cap_sys_rawio=ep %s\"", progname); - } + do_exit += check_for_cap_sys_rawio(); /* test file permissions */ sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); @@ -4907,10 +4920,16 @@ void process_cpuid() else BIC_NOT_PRESENT(BIC_CPU_LPI); - if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK)) + if (!access(sys_lpi_file_sysfs, R_OK)) { + sys_lpi_file = sys_lpi_file_sysfs; BIC_PRESENT(BIC_SYS_LPI); - else + } else if (!access(sys_lpi_file_debugfs, R_OK)) { + sys_lpi_file = sys_lpi_file_debugfs; + BIC_PRESENT(BIC_SYS_LPI); + } else { + sys_lpi_file_sysfs = NULL; BIC_NOT_PRESENT(BIC_SYS_LPI); + } if (!quiet) decode_misc_feature_control(); @@ -5323,9 +5342,9 @@ int add_counter(unsigned int msr_num, char *path, char *name, } msrp->msr_num = msr_num; - strncpy(msrp->name, name, NAME_BYTES); + strncpy(msrp->name, name, NAME_BYTES - 1); if (path) - strncpy(msrp->path, path, PATH_BYTES); + strncpy(msrp->path, path, PATH_BYTES - 1); msrp->width = width; msrp->type = type; msrp->format = format; diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index ded7a950dc40..6d2f3a1b2249 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 ifneq ($(O),) ifeq ($(origin O), command line) - dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) - ABSOLUTE_O := $(shell cd $(O) ; pwd) + dummy := $(if $(shell cd $(PWD); test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) + ABSOLUTE_O := $(shell cd $(PWD); cd $(O) ; pwd) OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) COMMAND_O := O=$(ABSOLUTE_O) ifeq ($(objtree),) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 220d04f958a6..42b6cd41d2ea 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1383,7 +1383,7 @@ sub reboot { } else { # Make sure everything has been written to disk - run_ssh("sync"); + run_ssh("sync", 10); if (defined($time)) { start_monitor; diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 4cdbae6f4e61..612f6757015d 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -213,7 +213,7 @@ ifdef INSTALL_PATH @# included in the generated runlist. for TARGET in $(TARGETS); do \ BUILD_TARGET=$$BUILD/$$TARGET; \ - [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \ + [ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \ echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \ echo "cd $$TARGET" >> $(ALL_SCRIPT); \ echo -n "run_many" >> $(ALL_SCRIPT); \ diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 54a50699bbfd..9f77cbaac01c 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -3,7 +3,7 @@ #define __BPF_HELPERS__ #define __uint(name, val) int (*name)[val] -#define __type(name, val) val *name +#define __type(name, val) typeof(val) *name /* helper macro to print out debug messages */ #define bpf_printk(fmt, ...) \ diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index e95c33e333a4..b29a73fe64db 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -98,7 +98,7 @@ int enable_all_controllers(char *cgroup_path) */ int setup_cgroup_environment(void) { - char cgroup_workdir[PATH_MAX + 1]; + char cgroup_workdir[PATH_MAX - 24]; format_cgroup_path(cgroup_workdir, ""); diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 5ecc267d98b0..fad615c22e4d 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -2,7 +2,7 @@ #include ssize_t get_base_addr() { - size_t start; + size_t start, offset; char buf[256]; FILE *f; @@ -10,10 +10,11 @@ ssize_t get_base_addr() { if (!f) return -errno; - while (fscanf(f, "%zx-%*x %s %*s\n", &start, buf) == 2) { + while (fscanf(f, "%zx-%*x %s %zx %*[^\n]\n", + &start, buf, &offset) == 3) { if (strcmp(buf, "r-xp") == 0) { fclose(f); - return start; + return start - offset; } } diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 3003fddc0613..cf6c87936c69 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -4,6 +4,7 @@ #include #include #include +#include "libbpf_internal.h" static void on_sample(void *ctx, int cpu, void *data, __u32 size) { @@ -19,7 +20,7 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_perf_buffer(void) { - int err, prog_fd, nr_cpus, i, duration = 0; + int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; const char *prog_name = "kprobe/sys_nanosleep"; const char *file = "./test_perf_buffer.o"; struct perf_buffer_opts pb_opts = {}; @@ -29,15 +30,27 @@ void test_perf_buffer(void) struct bpf_object *obj; struct perf_buffer *pb; struct bpf_link *link; + bool *online; nr_cpus = libbpf_num_possible_cpus(); if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) return; + err = parse_cpu_mask_file("/sys/devices/system/cpu/online", + &online, &on_len); + if (CHECK(err, "nr_on_cpus", "err %d\n", err)) + return; + + for (i = 0; i < on_len; i++) + if (online[i]) + nr_on_cpus++; + /* load program */ err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); - if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) - return; + if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out_close; + } prog = bpf_object__find_program_by_title(obj, prog_name); if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) @@ -64,6 +77,11 @@ void test_perf_buffer(void) /* trigger kprobe on every CPU */ CPU_ZERO(&cpu_seen); for (i = 0; i < nr_cpus; i++) { + if (i >= on_len || !online[i]) { + printf("skipping offline CPU #%d\n", i); + continue; + } + CPU_ZERO(&cpu_set); CPU_SET(i, &cpu_set); @@ -81,8 +99,8 @@ void test_perf_buffer(void) if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) goto out_free_pb; - if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt", - "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen))) + if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt", + "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen))) goto out_free_pb; out_free_pb: @@ -91,4 +109,5 @@ out_detach: bpf_link__destroy(link); out_close: bpf_object__close(obj); + free(online); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c new file mode 100644 index 000000000000..aa43e0bd210c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Cloudflare + +#include "test_progs.h" + +#define TCP_REPAIR 19 /* TCP sock is under repair right now */ + +#define TCP_REPAIR_ON 1 +#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ + +static int connected_socket_v4(void) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_port = htons(80), + .sin_addr = { inet_addr("127.0.0.1") }, + }; + socklen_t len = sizeof(addr); + int s, repair, err; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK_FAIL(s == -1)) + goto error; + + repair = TCP_REPAIR_ON; + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); + if (CHECK_FAIL(err)) + goto error; + + err = connect(s, (struct sockaddr *)&addr, len); + if (CHECK_FAIL(err)) + goto error; + + repair = TCP_REPAIR_OFF_NO_WP; + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); + if (CHECK_FAIL(err)) + goto error; + + return s; +error: + perror(__func__); + close(s); + return -1; +} + +/* Create a map, populate it with one socket, and free the map. */ +static void test_sockmap_create_update_free(enum bpf_map_type map_type) +{ + const int zero = 0; + int s, map, err; + + s = connected_socket_v4(); + if (CHECK_FAIL(s == -1)) + return; + + map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); + if (CHECK_FAIL(map == -1)) { + perror("bpf_create_map"); + goto out; + } + + err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); + if (CHECK_FAIL(err)) { + perror("bpf_map_update"); + goto out; + } + +out: + close(map); + close(s); +} + +void test_sockmap_basic(void) +{ + if (test__start_subtest("sockmap create_update_free")) + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); + if (test__start_subtest("sockhash create_update_free")) + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index f62aa0eb959b..1735faf17536 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -49,8 +49,12 @@ retry: pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, -1 /* group id */, 0 /* flags */); - if (CHECK(pmu_fd < 0, "perf_event_open", - "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n", + if (pmu_fd < 0 && errno == ENOENT) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); + test__skip(); + goto cleanup; + } + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, errno)) goto close_prog; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c index 3a62119c7498..35c512818a56 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c @@ -62,6 +62,10 @@ struct padded_a_lot { * long: 64; * long: 64; * int b; + * long: 32; + * long: 64; + * long: 64; + * long: 64; *}; * */ @@ -95,7 +99,6 @@ struct zone_padding { struct zone { int a; short b; - short: 16; struct zone_padding __pad__; }; diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c index f8ffa3f3d44b..6cc4479ac9df 100644 --- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c @@ -47,12 +47,11 @@ struct { * issue and avoid complicated C programming massaging. * This is an acceptable workaround since there is one entry here. */ -typedef __u64 raw_stack_trace_t[2 * MAX_STACK_RAWTP]; struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 1); __type(key, __u32); - __type(value, raw_stack_trace_t); + __type(value, __u64[2 * MAX_STACK_RAWTP]); } rawdata_map SEC(".maps"); SEC("raw_tracepoint/sys_enter") diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c index c4d104428643..69880c1e7700 100644 --- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c +++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c @@ -132,8 +132,10 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb, *pad_off = 0; // we can only go as far as ~10 TLVs due to the BPF max stack size + // workaround: define induction variable "i" as "long" instead + // of "int" to prevent alu32 sub-register spilling. #pragma clang loop unroll(disable) - for (int i = 0; i < 100; i++) { + for (long i = 0; i < 100; i++) { struct sr6_tlv_t tlv; if (cur_off == *tlv_off) diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index ea7d84f01235..e6be383a003f 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -113,6 +113,12 @@ int _select_by_skb_data(struct sk_reuseport_md *reuse_md) data_check.skb_ports[0] = th->source; data_check.skb_ports[1] = th->dest; + if (th->fin) + /* The connection is being torn down at the end of a + * test. It can't contain a cmd, so return early. + */ + return SK_PASS; + if ((th->doff << 2) + sizeof(*cmd) > data_check.len) GOTO_DONE(DROP_ERR_SKB_DATA); if (bpf_skb_load_bytes(reuse_md, th->doff << 2, &cmd_copy, diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index 608a06871572..d22e438198cf 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -44,7 +44,10 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) unsigned long tcp_mem[TCP_MEM_LOOPS] = {}; char value[MAX_VALUE_STR_LEN]; unsigned char i, off = 0; - int ret; + /* a workaround to prevent compiler from generating + * codes verifier cannot handle yet. + */ + volatile int ret; if (ctx->write) return 0; diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index af75a1c7a458..3bf18364c67c 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -20,7 +20,7 @@ struct prog_test_def { bool tested; bool need_cgroup_cleanup; - const char *subtest_name; + char *subtest_name; int subtest_num; /* store counts before subtest started */ @@ -81,16 +81,17 @@ void test__end_subtest() fprintf(env.stdout, "#%d/%d %s:%s\n", test->test_num, test->subtest_num, test->subtest_name, sub_error_cnt ? "FAIL" : "OK"); + + free(test->subtest_name); + test->subtest_name = NULL; } bool test__start_subtest(const char *name) { struct prog_test_def *test = env.test; - if (test->subtest_name) { + if (test->subtest_name) test__end_subtest(); - test->subtest_name = NULL; - } test->subtest_num++; @@ -104,7 +105,13 @@ bool test__start_subtest(const char *name) if (!should_run(&env.subtest_selector, test->subtest_num, name)) return false; - test->subtest_name = name; + test->subtest_name = strdup(name); + if (!test->subtest_name) { + fprintf(env.stderr, + "Subtest #%d: failed to copy subtest name!\n", + test->subtest_num); + return false; + } env.test->old_error_cnt = env.test->error_cnt; return true; diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c index 7566c13eb51a..079d0f5a2909 100644 --- a/tools/testing/selftests/bpf/test_select_reuseport.c +++ b/tools/testing/selftests/bpf/test_select_reuseport.c @@ -30,7 +30,7 @@ #define REUSEPORT_ARRAY_SIZE 32 static int result_map, tmp_index_ovr_map, linum_map, data_check_map; -static enum result expected_results[NR_RESULTS]; +static __u32 expected_results[NR_RESULTS]; static int sk_fds[REUSEPORT_ARRAY_SIZE]; static int reuseport_array, outer_map; static int select_by_skb_data_prog; @@ -662,7 +662,19 @@ static void setup_per_test(int type, unsigned short family, bool inany) static void cleanup_per_test(void) { - int i, err; + int i, err, zero = 0; + + memset(expected_results, 0, sizeof(expected_results)); + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY); + RET_IF(err, "reset elem in result_map", + "i:%u err:%d errno:%d\n", i, err, errno); + } + + err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY); + RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n", + err, errno); for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) close(sk_fds[i]); diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 3845144e2c91..779e11da979c 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -240,14 +240,14 @@ static int sockmap_init_sockets(int verbose) addr.sin_port = htons(S1_PORT); err = bind(s1, (struct sockaddr *)&addr, sizeof(addr)); if (err < 0) { - perror("bind s1 failed()\n"); + perror("bind s1 failed()"); return errno; } addr.sin_port = htons(S2_PORT); err = bind(s2, (struct sockaddr *)&addr, sizeof(addr)); if (err < 0) { - perror("bind s2 failed()\n"); + perror("bind s2 failed()"); return errno; } @@ -255,14 +255,14 @@ static int sockmap_init_sockets(int verbose) addr.sin_port = htons(S1_PORT); err = listen(s1, 32); if (err < 0) { - perror("listen s1 failed()\n"); + perror("listen s1 failed()"); return errno; } addr.sin_port = htons(S2_PORT); err = listen(s2, 32); if (err < 0) { - perror("listen s1 failed()\n"); + perror("listen s1 failed()"); return errno; } @@ -270,14 +270,14 @@ static int sockmap_init_sockets(int verbose) addr.sin_port = htons(S1_PORT); err = connect(c1, (struct sockaddr *)&addr, sizeof(addr)); if (err < 0 && errno != EINPROGRESS) { - perror("connect c1 failed()\n"); + perror("connect c1 failed()"); return errno; } addr.sin_port = htons(S2_PORT); err = connect(c2, (struct sockaddr *)&addr, sizeof(addr)); if (err < 0 && errno != EINPROGRESS) { - perror("connect c2 failed()\n"); + perror("connect c2 failed()"); return errno; } else if (err < 0) { err = 0; @@ -286,13 +286,13 @@ static int sockmap_init_sockets(int verbose) /* Accept Connecrtions */ p1 = accept(s1, NULL, NULL); if (p1 < 0) { - perror("accept s1 failed()\n"); + perror("accept s1 failed()"); return errno; } p2 = accept(s2, NULL, NULL); if (p2 < 0) { - perror("accept s1 failed()\n"); + perror("accept s1 failed()"); return errno; } @@ -331,25 +331,29 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, FILE *file; int i, fp; - file = fopen(".sendpage_tst.tmp", "w+"); + file = tmpfile(); + if (!file) { + perror("create file for sendpage"); + return 1; + } for (i = 0; i < iov_length * cnt; i++, k++) fwrite(&k, sizeof(char), 1, file); fflush(file); fseek(file, 0, SEEK_SET); - fclose(file); - fp = open(".sendpage_tst.tmp", O_RDONLY); + fp = fileno(file); + clock_gettime(CLOCK_MONOTONIC, &s->start); for (i = 0; i < cnt; i++) { int sent = sendfile(fd, fp, NULL, iov_length); if (!drop && sent < 0) { - perror("send loop error:"); - close(fp); + perror("send loop error"); + fclose(file); return sent; } else if (drop && sent >= 0) { printf("sendpage loop error expected: %i\n", sent); - close(fp); + fclose(file); return -EIO; } @@ -357,7 +361,7 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, s->bytes_sent += sent; } clock_gettime(CLOCK_MONOTONIC, &s->end); - close(fp); + fclose(file); return 0; } @@ -463,7 +467,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, int sent = sendmsg(fd, &msg, flags); if (!drop && sent < 0) { - perror("send loop error:"); + perror("send loop error"); goto out_errno; } else if (drop && sent >= 0) { printf("send loop error expected: %i\n", sent); @@ -499,7 +503,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, total_bytes -= txmsg_pop_total; err = clock_gettime(CLOCK_MONOTONIC, &s->start); if (err < 0) - perror("recv start time: "); + perror("recv start time"); while (s->bytes_recvd < total_bytes) { if (txmsg_cork) { timeout.tv_sec = 0; @@ -543,7 +547,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, if (recv < 0) { if (errno != EWOULDBLOCK) { clock_gettime(CLOCK_MONOTONIC, &s->end); - perror("recv failed()\n"); + perror("recv failed()"); goto out_errno; } } @@ -557,7 +561,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, errno = msg_verify_data(&msg, recv, chunk_sz); if (errno) { - perror("data verify msg failed\n"); + perror("data verify msg failed"); goto out_errno; } if (recvp) { @@ -565,7 +569,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, recvp, chunk_sz); if (errno) { - perror("data verify msg_peek failed\n"); + perror("data verify msg_peek failed"); goto out_errno; } } @@ -654,7 +658,7 @@ static int sendmsg_test(struct sockmap_options *opt) err = 0; exit(err ? 1 : 0); } else if (rxpid == -1) { - perror("msg_loop_rx: "); + perror("msg_loop_rx"); return errno; } @@ -681,7 +685,7 @@ static int sendmsg_test(struct sockmap_options *opt) s.bytes_recvd, recvd_Bps, recvd_Bps/giga); exit(err ? 1 : 0); } else if (txpid == -1) { - perror("msg_loop_tx: "); + perror("msg_loop_tx"); return errno; } @@ -715,7 +719,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt) /* Ping/Pong data from client to server */ sc = send(c1, buf, sizeof(buf), 0); if (sc < 0) { - perror("send failed()\n"); + perror("send failed()"); return sc; } @@ -748,7 +752,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt) rc = recv(i, buf, sizeof(buf), 0); if (rc < 0) { if (errno != EWOULDBLOCK) { - perror("recv failed()\n"); + perror("recv failed()"); return rc; } } @@ -760,7 +764,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt) sc = send(i, buf, rc, 0); if (sc < 0) { - perror("send failed()\n"); + perror("send failed()"); return sc; } } diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index ff0d31d38061..7c76b841b17b 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -62,6 +62,10 @@ cleanup() { if [[ -f "${infile}" ]]; then rm "${infile}" fi + + if [[ -n $server_pid ]]; then + kill $server_pid 2> /dev/null + fi } server_listen() { @@ -77,6 +81,7 @@ client_connect() { verify_data() { wait "${server_pid}" + server_pid= # sha1sum returns two fields [sha1] [filepath] # convert to bash array and access first elem insum=($(sha1sum ${infile})) diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c index d60a343b1371..842d9155d36c 100644 --- a/tools/testing/selftests/bpf/xdping.c +++ b/tools/testing/selftests/bpf/xdping.c @@ -45,7 +45,7 @@ static int get_stats(int fd, __u16 count, __u32 raddr) printf("\nXDP RTT data:\n"); if (bpf_map_lookup_elem(fd, &raddr, &pinginfo)) { - perror("bpf_map_lookup elem: "); + perror("bpf_map_lookup elem"); return 1; } diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c index 0fc1b6d4b0f9..62a27ab3c2f3 100644 --- a/tools/testing/selftests/cgroup/test_freezer.c +++ b/tools/testing/selftests/cgroup/test_freezer.c @@ -72,6 +72,7 @@ static int cg_prepare_for_wait(const char *cgroup) if (ret == -1) { debug("Error: inotify_add_watch() failed\n"); close(fd); + fd = -1; } return fd; diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh index 47315fe48d5a..24dd8ed48580 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -232,7 +232,7 @@ test_mc_aware() stop_traffic local ucth1=${uc_rate[1]} - start_traffic $h1 own bc bc + start_traffic $h1 192.0.2.65 bc bc local d0=$(date +%s) local t0=$(ethtool_stats_get $h3 rx_octets_prio_0) @@ -254,7 +254,11 @@ test_mc_aware() ret = 100 * ($ucth1 - $ucth2) / $ucth1 if (ret > 0) { ret } else { 0 } ") - check_err $(bc <<< "$deg > 25") + + # Minimum shaper of 200Mbps on MC TCs should cause about 20% of + # degradation on 1Gbps link. + check_err $(bc <<< "$deg < 15") "Minimum shaper not in effect" + check_err $(bc <<< "$deg > 25") "MC traffic degrades UC performance too much" local interval=$((d1 - d0)) local mc_ir=$(rate $u0 $u1 $interval) diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh index b879305a766d..5b8c0fedee76 100755 --- a/tools/testing/selftests/firmware/fw_lib.sh +++ b/tools/testing/selftests/firmware/fw_lib.sh @@ -34,6 +34,12 @@ test_modprobe() check_mods() { + local uid=$(id -u) + if [ $uid -ne 0 ]; then + echo "skip all tests: must be run as root" >&2 + exit $ksft_skip + fi + trap "test_modprobe" EXIT if [ ! -d $DIR ]; then modprobe test_firmware diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile index cd1f5b3a7774..d6e106fbce11 100644 --- a/tools/testing/selftests/ftrace/Makefile +++ b/tools/testing/selftests/ftrace/Makefile @@ -2,7 +2,7 @@ all: TEST_PROGS := ftracetest -TEST_FILES := test.d +TEST_FILES := test.d settings EXTRA_CLEAN := $(OUTPUT)/logs/* include ../lib.mk diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc index 36fb59f886ea..1a52f2883fe0 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc @@ -3,6 +3,8 @@ # description: ftrace - stacktrace filter command # flags: instance +[ ! -f set_ftrace_filter ] && exit_unsupported + echo _do_fork:stacktrace >> set_ftrace_filter grep -q "_do_fork:stacktrace:unlimited" set_ftrace_filter diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc index 86a1f07ef2ca..71fa3f49e35e 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc @@ -15,6 +15,11 @@ if [ $NP -eq 1 ] ;then exit_unresolved fi +if ! grep -q "function" available_tracers ; then + echo "Function trace is not enabled" + exit_unsupported +fi + ORIG_CPUMASK=`cat tracing_cpumask` do_reset() { diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 86986c4bba54..5d4550591ff9 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -46,6 +46,9 @@ reset_events_filter() { # reset all current setting filters } reset_ftrace_filter() { # reset all triggers in set_ftrace_filter + if [ ! -f set_ftrace_filter ]; then + return 0 + fi echo > set_ftrace_filter grep -v '^#' set_ftrace_filter | while read t; do tr=`echo $t | cut -d: -f2` @@ -93,7 +96,7 @@ initialize_ftrace() { # Reset ftrace to initial-state disable_events [ -f set_event_pid ] && echo > set_event_pid [ -f set_ftrace_pid ] && echo > set_ftrace_pid - [ -f set_ftrace_filter ] && echo | tee set_ftrace_* + [ -f set_ftrace_notrace ] && echo > set_ftrace_notrace [ -f set_graph_function ] && echo | tee set_graph_* [ -f stack_trace_filter ] && echo > stack_trace_filter [ -f kprobe_events ] && echo > kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc index 5862eee91e1d..6e3dbe5f96b7 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc @@ -20,9 +20,9 @@ while read i; do test $N -eq 256 && break done -L=`wc -l kprobe_events` -if [ $L -ne $N ]; then - echo "The number of kprobes events ($L) is not $N" +L=`cat kprobe_events | wc -l` +if [ $L -ne 256 ]; then + echo "The number of kprobes events ($L) is not 256" exit_fail fi diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc index 1221240f8cf6..3f2aee115f6e 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc @@ -21,10 +21,10 @@ grep -q "snapshot()" README || exit_unsupported # version issue echo "Test expected snapshot action failure" -echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail +echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> events/sched/sched_waking/trigger && exit_fail echo "Test expected save action failure" -echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail +echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> events/sched/sched_waking/trigger && exit_fail exit_xfail diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc index 064a284e4e75..c80007aa9f86 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc @@ -16,7 +16,7 @@ grep -q "onchange(var)" README || exit_unsupported # version issue echo "Test onchange action" -echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger +echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> events/sched/sched_waking/trigger ping $LOCALHOST -c 3 nice -n 1 ping $LOCALHOST -c 3 diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc index 18fff69fc433..f546c1b66a9b 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc @@ -23,9 +23,9 @@ grep -q "snapshot()" README || exit_unsupported # version issue echo "Test snapshot action" -echo 1 > /sys/kernel/debug/tracing/events/sched/enable +echo 1 > events/sched/enable -echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger +echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> events/sched/sched_waking/trigger ping $LOCALHOST -c 3 nice -n 1 ping $LOCALHOST -c 3 diff --git a/tools/testing/selftests/gen_kselftest_tar.sh b/tools/testing/selftests/gen_kselftest_tar.sh index a27e2eec3586..8b2b6088540d 100755 --- a/tools/testing/selftests/gen_kselftest_tar.sh +++ b/tools/testing/selftests/gen_kselftest_tar.sh @@ -38,16 +38,21 @@ main() esac fi - install_dir=./kselftest + # Create working directory. + dest=`pwd` + install_work="$dest"/kselftest_install + install_name=kselftest + install_dir="$install_work"/"$install_name" + mkdir -p "$install_dir" -# Run install using INSTALL_KSFT_PATH override to generate install -# directory -./kselftest_install.sh -tar $copts kselftest${ext} $install_dir -echo "Kselftest archive kselftest${ext} created!" + # Run install using INSTALL_KSFT_PATH override to generate install + # directory + ./kselftest_install.sh "$install_dir" + (cd "$install_work"; tar $copts "$dest"/kselftest${ext} $install_name) + echo "Kselftest archive kselftest${ext} created!" -# clean up install directory -rm -rf kselftest + # clean up top-level install work directory + rm -rf "$install_work" } main "$@" diff --git a/tools/testing/selftests/kselftest/prefix.pl b/tools/testing/selftests/kselftest/prefix.pl index ec7e48118183..31f7c2a0a8bd 100755 --- a/tools/testing/selftests/kselftest/prefix.pl +++ b/tools/testing/selftests/kselftest/prefix.pl @@ -3,6 +3,7 @@ # Prefix all lines with "# ", unbuffered. Command being piped in may need # to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd". use strict; +use IO::Handle; binmode STDIN; binmode STDOUT; diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh index 84de7bc74f2c..e84d901f8567 100644 --- a/tools/testing/selftests/kselftest/runner.sh +++ b/tools/testing/selftests/kselftest/runner.sh @@ -79,6 +79,7 @@ run_one() if [ $rc -eq $skip_rc ]; then \ echo "not ok $test_num $TEST_HDR_MSG # SKIP" elif [ $rc -eq $timeout_rc ]; then \ + echo "#" echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT" else echo "not ok $test_num $TEST_HDR_MSG # exit=$rc" @@ -90,7 +91,7 @@ run_one() run_many() { echo "TAP version 13" - DIR=$(basename "$PWD") + DIR="${PWD#${BASE_DIR}/}" test_num=0 total=$(echo "$@" | wc -w) echo "1..$total" diff --git a/tools/testing/selftests/kselftest_install.sh b/tools/testing/selftests/kselftest_install.sh index e2e1911d62d5..407af7da7037 100755 --- a/tools/testing/selftests/kselftest_install.sh +++ b/tools/testing/selftests/kselftest_install.sh @@ -6,30 +6,30 @@ # Author: Shuah Khan # Copyright (C) 2015 Samsung Electronics Co., Ltd. -install_loc=`pwd` - main() { - if [ $(basename $install_loc) != "selftests" ]; then + base_dir=`pwd` + install_dir="$base_dir"/kselftest_install + + # Make sure we're in the selftests top-level directory. + if [ $(basename "$base_dir") != "selftests" ]; then echo "$0: Please run it in selftests directory ..." exit 1; fi + + # Only allow installation into an existing location. if [ "$#" -eq 0 ]; then - echo "$0: Installing in default location - $install_loc ..." + echo "$0: Installing in default location - $install_dir ..." elif [ ! -d "$1" ]; then echo "$0: $1 doesn't exist!!" exit 1; else - install_loc=$1 - echo "$0: Installing in specified location - $install_loc ..." + install_dir="$1" + echo "$0: Installing in specified location - $install_dir ..." fi - install_dir=$install_loc/kselftest_install - -# Create install directory - mkdir -p $install_dir -# Build tests - KSFT_INSTALL_PATH=$install_dir make install + # Build tests + KSFT_INSTALL_PATH="$install_dir" make install } main "$@" diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 1c8a1963d03f..3ed0134a764d 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -83,17 +83,20 @@ else $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS)) endif +define INSTALL_SINGLE_RULE + $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH)) + $(if $(INSTALL_LIST),@echo rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/) + $(if $(INSTALL_LIST),@rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/) +endef + define INSTALL_RULE - @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ - mkdir -p ${INSTALL_PATH}; \ - echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ - rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ - fi - @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \ - mkdir -p ${INSTALL_PATH}; \ - echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \ - rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \ - fi + $(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE) + $(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE) + $(eval INSTALL_LIST = $(TEST_FILES)) $(INSTALL_SINGLE_RULE) + $(eval INSTALL_LIST = $(TEST_GEN_PROGS)) $(INSTALL_SINGLE_RULE) + $(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE) + $(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE) + $(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE) endef install: all diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile index fd405402c3ff..485696a01989 100644 --- a/tools/testing/selftests/livepatch/Makefile +++ b/tools/testing/selftests/livepatch/Makefile @@ -6,4 +6,6 @@ TEST_PROGS := \ test-callbacks.sh \ test-shadow-vars.sh +TEST_FILES := settings + include ../lib.mk diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 76c1897e6352..09854f8a0b57 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -910,6 +910,12 @@ ipv6_rt_replace_mpath() check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024" log_test $? 0 "Multipath with single path via multipath attribute" + # multipath with dev-only + add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" + run_cmd "$IP -6 ro replace 2001:db8:104::/64 dev veth1" + check_route6 "2001:db8:104::/64 dev veth1 metric 1024" + log_test $? 0 "Multipath with dev-only" + # route replace fails - invalid nexthop 1 add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3" @@ -1035,6 +1041,27 @@ ipv6_addr_metric_test() fi log_test $rc 0 "Prefix route with metric on link up" + # verify peer metric added correctly + set -e + run_cmd "$IP -6 addr flush dev dummy2" + run_cmd "$IP -6 addr add dev dummy2 2001:db8:104::1 peer 2001:db8:104::2 metric 260" + set +e + + check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260" + log_test $? 0 "Set metric with peer route on local side" + log_test $? 0 "User specified metric on local address" + check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260" + log_test $? 0 "Set metric with peer route on peer side" + + set -e + run_cmd "$IP -6 addr change dev dummy2 2001:db8:104::1 peer 2001:db8:104::3 metric 261" + set +e + + check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 261" + log_test $? 0 "Modify metric and peer address on local side" + check_route6 "2001:db8:104::3 dev dummy2 proto kernel metric 261" + log_test $? 0 "Modify metric and peer address on peer side" + $IP li del dummy1 $IP li del dummy2 cleanup @@ -1451,13 +1478,20 @@ ipv4_addr_metric_test() run_cmd "$IP addr flush dev dummy2" run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260" - run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261" rc=$? if [ $rc -eq 0 ]; then - check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261" + check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 260" rc=$? fi - log_test $rc 0 "Modify metric of address with peer route" + log_test $rc 0 "Set metric of address with peer route" + + run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.3 metric 261" + rc=$? + if [ $rc -eq 0 ]; then + check_route "172.16.104.3 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261" + rc=$? + fi + log_test $rc 0 "Modify metric and peer address for peer route" $IP li del dummy1 $IP li del dummy2 diff --git a/tools/testing/selftests/net/forwarding/loopback.sh b/tools/testing/selftests/net/forwarding/loopback.sh index 6e4626ae71b0..8f4057310b5b 100755 --- a/tools/testing/selftests/net/forwarding/loopback.sh +++ b/tools/testing/selftests/net/forwarding/loopback.sh @@ -1,6 +1,9 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + ALL_TESTS="loopback_test" NUM_NETIFS=2 source tc_common.sh @@ -72,6 +75,11 @@ setup_prepare() h1_create h2_create + + if ethtool -k $h1 | grep loopback | grep -q fixed; then + log_test "SKIP: dev $h1 does not support loopback feature" + exit $ksft_skip + fi } cleanup() diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh index e6fd7a18c655..0266443601bc 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh @@ -63,22 +63,23 @@ test_span_gre_mac() { local tundev=$1; shift local direction=$1; shift - local prot=$1; shift local what=$1; shift - local swp3mac=$(mac_get $swp3) - local h3mac=$(mac_get $h3) + case "$direction" in + ingress) local src_mac=$(mac_get $h1); local dst_mac=$(mac_get $h2) + ;; + egress) local src_mac=$(mac_get $h2); local dst_mac=$(mac_get $h1) + ;; + esac RET=0 mirror_install $swp1 $direction $tundev "matchall $tcflags" - tc filter add dev $h3 ingress pref 77 prot $prot \ - flower ip_proto 0x2f src_mac $swp3mac dst_mac $h3mac \ - action pass + icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac" - mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10 + mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10 - tc filter del dev $h3 ingress pref 77 + icmp_capture_uninstall h3-${tundev} mirror_uninstall $swp1 $direction log_test "$direction $what: envelope MAC ($tcflags)" @@ -120,14 +121,14 @@ test_ip6gretap() test_gretap_mac() { - test_span_gre_mac gt4 ingress ip "mirror to gretap" - test_span_gre_mac gt4 egress ip "mirror to gretap" + test_span_gre_mac gt4 ingress "mirror to gretap" + test_span_gre_mac gt4 egress "mirror to gretap" } test_ip6gretap_mac() { - test_span_gre_mac gt6 ingress ipv6 "mirror to ip6gretap" - test_span_gre_mac gt6 egress ipv6 "mirror to ip6gretap" + test_span_gre_mac gt6 ingress "mirror to ip6gretap" + test_span_gre_mac gt6 egress "mirror to ip6gretap" } test_all() diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh index fef88eb4b873..fa6a88c50750 100755 --- a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh +++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh @@ -36,7 +36,7 @@ h2_destroy() { ip -6 route del 2001:db8:1::/64 vrf v$h2 ip -4 route del 192.0.2.0/28 vrf v$h2 - simple_if_fini $h2 192.0.2.130/28 + simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64 } router_create() diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh index bb10e33690b2..ce6bea9675c0 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh @@ -516,9 +516,9 @@ test_tos() RET=0 tc filter add dev v1 egress pref 77 prot ip \ - flower ip_tos 0x40 action pass - vxlan_ping_test $h1 192.0.2.3 "-Q 0x40" v1 egress 77 10 - vxlan_ping_test $h1 192.0.2.3 "-Q 0x30" v1 egress 77 0 + flower ip_tos 0x14 action pass + vxlan_ping_test $h1 192.0.2.3 "-Q 0x14" v1 egress 77 10 + vxlan_ping_test $h1 192.0.2.3 "-Q 0x18" v1 egress 77 0 tc filter del dev v1 egress pref 77 prot ip log_test "VXLAN: envelope TOS inheritance" diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index ab367e75f095..71a62e7e35b1 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -11,9 +11,9 @@ # R1 and R2 (also implemented with namespaces), with different MTUs: # # segment a_r1 segment b_r1 a_r1: 2000 -# .--------------R1--------------. a_r2: 1500 -# A B a_r3: 2000 -# '--------------R2--------------' a_r4: 1400 +# .--------------R1--------------. b_r1: 1400 +# A B a_r2: 2000 +# '--------------R2--------------' b_r2: 1500 # segment a_r2 segment b_r2 # # Check that PMTU exceptions with the correct PMTU are created. Then @@ -1249,8 +1249,7 @@ test_list_flush_ipv4_exception() { done run_cmd ${ns_a} ping -q -M want -i 0.1 -c 2 -s 1800 "${dst2}" - # Each exception is printed as two lines - if [ "$(${ns_a} ip route list cache | wc -l)" -ne 202 ]; then + if [ "$(${ns_a} ip -oneline route list cache | wc -l)" -ne 101 ]; then err " can't list cached exceptions" fail=1 fi @@ -1300,7 +1299,7 @@ test_list_flush_ipv6_exception() { run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst_prefix1}${i}" done run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst2}" - if [ "$(${ns_a} ip -6 route list cache | wc -l)" -ne 101 ]; then + if [ "$(${ns_a} ip -oneline -6 route list cache | wc -l)" -ne 101 ]; then err " can't list cached exceptions" fail=1 fi diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c index 53f598f06647..383bac05ac32 100644 --- a/tools/testing/selftests/net/so_txtime.c +++ b/tools/testing/selftests/net/so_txtime.c @@ -12,7 +12,11 @@ #include #include #include +#include #include +#include +#include +#include #include #include #include @@ -28,7 +32,7 @@ static int cfg_clockid = CLOCK_TAI; static bool cfg_do_ipv4; static bool cfg_do_ipv6; static uint16_t cfg_port = 8000; -static int cfg_variance_us = 2000; +static int cfg_variance_us = 4000; static uint64_t glob_tstart; @@ -43,6 +47,9 @@ static struct timed_send cfg_in[MAX_NUM_PKT]; static struct timed_send cfg_out[MAX_NUM_PKT]; static int cfg_num_pkt; +static int cfg_errq_level; +static int cfg_errq_type; + static uint64_t gettime_ns(void) { struct timespec ts; @@ -90,13 +97,15 @@ static void do_send_one(int fdt, struct timed_send *ts) } -static void do_recv_one(int fdr, struct timed_send *ts) +static bool do_recv_one(int fdr, struct timed_send *ts) { int64_t tstop, texpect; char rbuf[2]; int ret; ret = recv(fdr, rbuf, sizeof(rbuf), 0); + if (ret == -1 && errno == EAGAIN) + return true; if (ret == -1) error(1, errno, "read"); if (ret != 1) @@ -105,14 +114,16 @@ static void do_recv_one(int fdr, struct timed_send *ts) tstop = (gettime_ns() - glob_tstart) / 1000; texpect = ts->delay_us >= 0 ? ts->delay_us : 0; - fprintf(stderr, "payload:%c delay:%ld expected:%ld (us)\n", - rbuf[0], tstop, texpect); + fprintf(stderr, "payload:%c delay:%lld expected:%lld (us)\n", + rbuf[0], (long long)tstop, (long long)texpect); if (rbuf[0] != ts->data) error(1, 0, "payload mismatch. expected %c", ts->data); if (labs(tstop - texpect) > cfg_variance_us) error(1, 0, "exceeds variance (%d us)", cfg_variance_us); + + return false; } static void do_recv_verify_empty(int fdr) @@ -125,12 +136,70 @@ static void do_recv_verify_empty(int fdr) error(1, 0, "recv: not empty as expected (%d, %d)", ret, errno); } +static void do_recv_errqueue_timeout(int fdt) +{ + char control[CMSG_SPACE(sizeof(struct sock_extended_err)) + + CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0}; + char data[sizeof(struct ipv6hdr) + + sizeof(struct tcphdr) + 1]; + struct sock_extended_err *err; + struct msghdr msg = {0}; + struct iovec iov = {0}; + struct cmsghdr *cm; + int64_t tstamp = 0; + int ret; + + iov.iov_base = data; + iov.iov_len = sizeof(data); + + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + while (1) { + ret = recvmsg(fdt, &msg, MSG_ERRQUEUE); + if (ret == -1 && errno == EAGAIN) + break; + if (ret == -1) + error(1, errno, "errqueue"); + if (msg.msg_flags != MSG_ERRQUEUE) + error(1, 0, "errqueue: flags 0x%x\n", msg.msg_flags); + + cm = CMSG_FIRSTHDR(&msg); + if (cm->cmsg_level != cfg_errq_level || + cm->cmsg_type != cfg_errq_type) + error(1, 0, "errqueue: type 0x%x.0x%x\n", + cm->cmsg_level, cm->cmsg_type); + + err = (struct sock_extended_err *)CMSG_DATA(cm); + if (err->ee_origin != SO_EE_ORIGIN_TXTIME) + error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin); + if (err->ee_code != ECANCELED) + error(1, 0, "errqueue: code 0x%x\n", err->ee_code); + + tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info; + tstamp -= (int64_t) glob_tstart; + tstamp /= 1000 * 1000; + fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n", + data[ret - 1], tstamp); + + msg.msg_flags = 0; + msg.msg_controllen = sizeof(control); + } + + error(1, 0, "recv: timeout"); +} + static void setsockopt_txtime(int fd) { struct sock_txtime so_txtime_val = { .clockid = cfg_clockid }; struct sock_txtime so_txtime_val_read = { 0 }; socklen_t vallen = sizeof(so_txtime_val); + so_txtime_val.flags = SOF_TXTIME_REPORT_ERRORS; + if (setsockopt(fd, SOL_SOCKET, SO_TXTIME, &so_txtime_val, sizeof(so_txtime_val))) error(1, errno, "setsockopt txtime"); @@ -194,7 +263,8 @@ static void do_test(struct sockaddr *addr, socklen_t alen) for (i = 0; i < cfg_num_pkt; i++) do_send_one(fdt, &cfg_in[i]); for (i = 0; i < cfg_num_pkt; i++) - do_recv_one(fdr, &cfg_out[i]); + if (do_recv_one(fdr, &cfg_out[i])) + do_recv_errqueue_timeout(fdt); do_recv_verify_empty(fdr); @@ -280,6 +350,10 @@ int main(int argc, char **argv) addr6.sin6_family = AF_INET6; addr6.sin6_port = htons(cfg_port); addr6.sin6_addr = in6addr_loopback; + + cfg_errq_level = SOL_IPV6; + cfg_errq_type = IPV6_RECVERR; + do_test((void *)&addr6, sizeof(addr6)); } @@ -289,6 +363,10 @@ int main(int argc, char **argv) addr4.sin_family = AF_INET; addr4.sin_port = htons(cfg_port); addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + + cfg_errq_level = SOL_IP; + cfg_errq_type = IP_RECVERR; + do_test((void *)&addr4, sizeof(addr4)); } diff --git a/tools/testing/selftests/net/so_txtime.sh b/tools/testing/selftests/net/so_txtime.sh index 5aa519328a5b..3f7800eaecb1 100755 --- a/tools/testing/selftests/net/so_txtime.sh +++ b/tools/testing/selftests/net/so_txtime.sh @@ -5,7 +5,12 @@ # Run in network namespace if [[ $# -eq 0 ]]; then - ./in_netns.sh $0 __subprocess + if ! ./in_netns.sh $0 __subprocess; then + # test is time sensitive, can be flaky + echo "test failed: retry once" + ./in_netns.sh $0 __subprocess + fi + exit $? fi @@ -18,7 +23,7 @@ tc qdisc add dev lo root fq ./so_txtime -4 -6 -c mono a,10,b,20 a,10,b,20 ./so_txtime -4 -6 -c mono a,20,b,10 b,20,a,20 -if tc qdisc replace dev lo root etf clockid CLOCK_TAI delta 200000; then +if tc qdisc replace dev lo root etf clockid CLOCK_TAI delta 400000; then ! ./so_txtime -4 -6 -c tai a,-1 a,-1 ! ./so_txtime -4 -6 -c tai a,0 a,0 ./so_txtime -4 -6 -c tai a,10 a,10 diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 1c8f194d6556..0ea44d975b6c 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -25,10 +25,6 @@ #define TLS_PAYLOAD_MAX_LEN 16384 #define SOL_TLS 282 -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - FIXTURE(tls_basic) { int fd, cfd; @@ -268,6 +264,38 @@ TEST_F(tls, sendmsg_single) EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } +#define MAX_FRAGS 64 +#define SEND_LEN 13 +TEST_F(tls, sendmsg_fragmented) +{ + char const *test_str = "test_sendmsg"; + char buf[SEND_LEN * MAX_FRAGS]; + struct iovec vec[MAX_FRAGS]; + struct msghdr msg; + int i, frags; + + for (frags = 1; frags <= MAX_FRAGS; frags++) { + for (i = 0; i < frags; i++) { + vec[i].iov_base = (char *)test_str; + vec[i].iov_len = SEND_LEN; + } + + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = vec; + msg.msg_iovlen = frags; + + EXPECT_EQ(sendmsg(self->fd, &msg, 0), SEND_LEN * frags); + EXPECT_EQ(recv(self->cfd, buf, SEND_LEN * frags, MSG_WAITALL), + SEND_LEN * frags); + + for (i = 0; i < frags; i++) + EXPECT_EQ(memcmp(buf + SEND_LEN * i, + test_str, SEND_LEN), 0); + } +} +#undef MAX_FRAGS +#undef SEND_LEN + TEST_F(tls, sendmsg_large) { void *mem = malloc(16384); @@ -1145,11 +1173,11 @@ TEST(non_established) { /* TLS ULP not supported */ if (errno == ENOENT) return; - EXPECT_EQ(errno, ENOTSUPP); + EXPECT_EQ(errno, ENOTCONN); ret = setsockopt(sfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); EXPECT_EQ(ret, -1); - EXPECT_EQ(errno, ENOTSUPP); + EXPECT_EQ(errno, ENOTCONN); ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c index 614b31aad168..c66da6ffd6d8 100644 --- a/tools/testing/selftests/net/udpgso.c +++ b/tools/testing/selftests/net/udpgso.c @@ -440,7 +440,8 @@ static bool __send_one(int fd, struct msghdr *msg, int flags) if (ret == -1) error(1, errno, "sendmsg"); if (ret != msg->msg_iov->iov_len) - error(1, 0, "sendto: %d != %lu", ret, msg->msg_iov->iov_len); + error(1, 0, "sendto: %d != %llu", ret, + (unsigned long long)msg->msg_iov->iov_len); if (msg->msg_flags) error(1, 0, "sendmsg: return flags 0x%x\n", msg->msg_flags); diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c index ada99496634a..17512a43885e 100644 --- a/tools/testing/selftests/net/udpgso_bench_tx.c +++ b/tools/testing/selftests/net/udpgso_bench_tx.c @@ -405,7 +405,8 @@ static int send_udp_segment(int fd, char *data) if (ret == -1) error(1, errno, "sendmsg"); if (ret != iov.iov_len) - error(1, 0, "sendmsg: %u != %lu\n", ret, iov.iov_len); + error(1, 0, "sendmsg: %u != %llu\n", ret, + (unsigned long long)iov.iov_len); return 1; } diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 1be55e705780..d7e07f4c3d7f 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -8,9 +8,14 @@ ksft_skip=4 ret=0 test_inet_nat=true +sfx=$(mktemp -u "XXXXXXXX") +ns0="ns0-$sfx" +ns1="ns1-$sfx" +ns2="ns2-$sfx" + cleanup() { - for i in 0 1 2; do ip netns del ns$i;done + for i in 0 1 2; do ip netns del ns$i-"$sfx";done } nft --version > /dev/null 2>&1 @@ -25,40 +30,49 @@ if [ $? -ne 0 ];then exit $ksft_skip fi -ip netns add ns0 +ip netns add "$ns0" if [ $? -ne 0 ];then - echo "SKIP: Could not create net namespace" + echo "SKIP: Could not create net namespace $ns0" exit $ksft_skip fi trap cleanup EXIT -ip netns add ns1 -ip netns add ns2 +ip netns add "$ns1" +if [ $? -ne 0 ];then + echo "SKIP: Could not create net namespace $ns1" + exit $ksft_skip +fi -ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1 +ip netns add "$ns2" +if [ $? -ne 0 ];then + echo "SKIP: Could not create net namespace $ns2" + exit $ksft_skip +fi + +ip link add veth0 netns "$ns0" type veth peer name eth0 netns "$ns1" > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: No virtual ethernet pair device support in kernel" exit $ksft_skip fi -ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 +ip link add veth1 netns "$ns0" type veth peer name eth0 netns "$ns2" -ip -net ns0 link set lo up -ip -net ns0 link set veth0 up -ip -net ns0 addr add 10.0.1.1/24 dev veth0 -ip -net ns0 addr add dead:1::1/64 dev veth0 +ip -net "$ns0" link set lo up +ip -net "$ns0" link set veth0 up +ip -net "$ns0" addr add 10.0.1.1/24 dev veth0 +ip -net "$ns0" addr add dead:1::1/64 dev veth0 -ip -net ns0 link set veth1 up -ip -net ns0 addr add 10.0.2.1/24 dev veth1 -ip -net ns0 addr add dead:2::1/64 dev veth1 +ip -net "$ns0" link set veth1 up +ip -net "$ns0" addr add 10.0.2.1/24 dev veth1 +ip -net "$ns0" addr add dead:2::1/64 dev veth1 for i in 1 2; do - ip -net ns$i link set lo up - ip -net ns$i link set eth0 up - ip -net ns$i addr add 10.0.$i.99/24 dev eth0 - ip -net ns$i route add default via 10.0.$i.1 - ip -net ns$i addr add dead:$i::99/64 dev eth0 - ip -net ns$i route add default via dead:$i::1 + ip -net ns$i-$sfx link set lo up + ip -net ns$i-$sfx link set eth0 up + ip -net ns$i-$sfx addr add 10.0.$i.99/24 dev eth0 + ip -net ns$i-$sfx route add default via 10.0.$i.1 + ip -net ns$i-$sfx addr add dead:$i::99/64 dev eth0 + ip -net ns$i-$sfx route add default via dead:$i::1 done bad_counter() @@ -66,8 +80,9 @@ bad_counter() local ns=$1 local counter=$2 local expect=$3 + local tag=$4 - echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2 + echo "ERROR: $counter counter in $ns has unexpected value (expected $expect) at $tag" 1>&2 ip netns exec $ns nft list counter inet filter $counter 1>&2 } @@ -78,24 +93,24 @@ check_counters() cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84") if [ $? -ne 0 ]; then - bad_counter $ns ns0in "packets 1 bytes 84" + bad_counter $ns ns0in "packets 1 bytes 84" "check_counters 1" lret=1 fi cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84") if [ $? -ne 0 ]; then - bad_counter $ns ns0out "packets 1 bytes 84" + bad_counter $ns ns0out "packets 1 bytes 84" "check_counters 2" lret=1 fi expect="packets 1 bytes 104" cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter $ns ns0in6 "$expect" + bad_counter $ns ns0in6 "$expect" "check_counters 3" lret=1 fi cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter $ns ns0out6 "$expect" + bad_counter $ns ns0out6 "$expect" "check_counters 4" lret=1 fi @@ -107,41 +122,41 @@ check_ns0_counters() local ns=$1 local lret=0 - cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0in "packets 0 bytes 0" + bad_counter "$ns0" ns0in "packets 0 bytes 0" "check_ns0_counters 1" lret=1 fi - cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0in6 "packets 0 bytes 0" + bad_counter "$ns0" ns0in6 "packets 0 bytes 0" lret=1 fi - cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0out "packets 0 bytes 0" + bad_counter "$ns0" ns0out "packets 0 bytes 0" "check_ns0_counters 2" lret=1 fi - cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0out6 "packets 0 bytes 0" + bad_counter "$ns0" ns0out6 "packets 0 bytes 0" "check_ns0_counters3 " lret=1 fi for dir in "in" "out" ; do expect="packets 1 bytes 84" - cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ${ns}${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 $ns$dir "$expect" + bad_counter "$ns0" $ns$dir "$expect" "check_ns0_counters 4" lret=1 fi expect="packets 1 bytes 104" - cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 $ns$dir6 "$expect" + bad_counter "$ns0" $ns$dir6 "$expect" "check_ns0_counters 5" lret=1 fi done @@ -152,7 +167,7 @@ check_ns0_counters() reset_counters() { for i in 0 1 2;do - ip netns exec ns$i nft reset counters inet > /dev/null + ip netns exec ns$i-$sfx nft reset counters inet > /dev/null done } @@ -166,7 +181,7 @@ test_local_dnat6() IPF="ip6" fi -ip netns exec ns0 nft -f - < /dev/null + ip netns exec "$ns0" ping -q -c 1 dead:1::99 > /dev/null if [ $? -ne 0 ]; then lret=1 echo "ERROR: ping6 failed" @@ -189,18 +204,18 @@ EOF expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_local_dnat6 1" lret=1 fi done expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns2$dir "$expect" + bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat6 2" lret=1 fi done @@ -208,9 +223,9 @@ EOF # expect 0 count in ns1 expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_local_dnat6 3" lret=1 fi done @@ -218,15 +233,15 @@ EOF # expect 1 packet in ns2 expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns0$dir "$expect" + bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat6 4" lret=1 fi done - test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was $family NATted to ns2" - ip netns exec ns0 nft flush chain ip6 nat output + test $lret -eq 0 && echo "PASS: ipv6 ping to $ns1 was $family NATted to $ns2" + ip netns exec "$ns0" nft flush chain ip6 nat output return $lret } @@ -241,7 +256,7 @@ test_local_dnat() IPF="ip" fi -ip netns exec ns0 nft -f - </dev/null +ip netns exec "$ns0" nft -f /dev/stdin </dev/null table $family nat { chain output { type nat hook output priority 0; policy accept; @@ -260,7 +275,7 @@ EOF fi # ping netns1, expect rewrite to netns2 - ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null + ip netns exec "$ns0" ping -q -c 1 10.0.1.99 > /dev/null if [ $? -ne 0 ]; then lret=1 echo "ERROR: ping failed" @@ -269,18 +284,18 @@ EOF expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_local_dnat 1" lret=1 fi done expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns2$dir "$expect" + bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat 2" lret=1 fi done @@ -288,9 +303,9 @@ EOF # expect 0 count in ns1 expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_local_dnat 3" lret=1 fi done @@ -298,19 +313,19 @@ EOF # expect 1 packet in ns2 expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns0$dir "$expect" + bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat 4" lret=1 fi done - test $lret -eq 0 && echo "PASS: ping to ns1 was $family NATted to ns2" + test $lret -eq 0 && echo "PASS: ping to $ns1 was $family NATted to $ns2" - ip netns exec ns0 nft flush chain $family nat output + ip netns exec "$ns0" nft flush chain $family nat output reset_counters - ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null + ip netns exec "$ns0" ping -q -c 1 10.0.1.99 > /dev/null if [ $? -ne 0 ]; then lret=1 echo "ERROR: ping failed" @@ -319,17 +334,17 @@ EOF expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns1$dir "$expect" + bad_counter "$ns1" ns1$dir "$expect" "test_local_dnat 5" lret=1 fi done expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns2$dir "$expect" + bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat 6" lret=1 fi done @@ -337,9 +352,9 @@ EOF # expect 1 count in ns1 expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns0$dir "$expect" + bad_counter "$ns0" ns0$dir "$expect" "test_local_dnat 7" lret=1 fi done @@ -347,14 +362,14 @@ EOF # expect 0 packet in ns2 expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns2$dir "$expect" + bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat 8" lret=1 fi done - test $lret -eq 0 && echo "PASS: ping to ns1 OK after $family nat output chain flush" + test $lret -eq 0 && echo "PASS: ping to $ns1 OK after $family nat output chain flush" return $lret } @@ -366,26 +381,26 @@ test_masquerade6() local natflags=$2 local lret=0 - ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 via ipv6" + echo "ERROR: cannot ping $ns1 from $ns2 via ipv6" return 1 lret=1 fi expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" ns2$dir "$expect" "test_masquerade6 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade6 2" lret=1 fi done @@ -393,7 +408,7 @@ test_masquerade6() reset_counters # add masquerading rule -ip netns exec ns0 nft -f - < /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags" + echo "ERROR: cannot ping $ns1 from $ns2 with active $family masquerade $natflags" lret=1 fi # ns1 should have seen packets from ns0, due to masquerade expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade6 3" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade6 4" lret=1 fi done @@ -431,32 +446,32 @@ EOF # ns1 should not have seen packets from ns2, due to masquerade expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade6 5" lret=1 fi - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_masquerade6 6" lret=1 fi done - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)" + echo "ERROR: cannot ping $ns1 from $ns2 with active ipv6 masquerade $natflags (attempt 2)" lret=1 fi - ip netns exec ns0 nft flush chain $family nat postrouting + ip netns exec "$ns0" nft flush chain $family nat postrouting if [ $? -ne 0 ]; then echo "ERROR: Could not flush $family nat postrouting" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for ns2" + test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for $ns2" return $lret } @@ -467,26 +482,26 @@ test_masquerade() local natflags=$2 local lret=0 - ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null - ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 $natflags" + echo "ERROR: cannot ping $ns1 from "$ns2" $natflags" lret=1 fi expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" ns2$dir "$expect" "test_masquerade 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade 2" lret=1 fi done @@ -494,7 +509,7 @@ test_masquerade() reset_counters # add masquerading rule -ip netns exec ns0 nft -f - < /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags" + echo "ERROR: cannot ping $ns1 from $ns2 with active $family masquerade $natflags" lret=1 fi # ns1 should have seen packets from ns0, due to masquerade expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade 3" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade 4" lret=1 fi done @@ -532,32 +547,32 @@ EOF # ns1 should not have seen packets from ns2, due to masquerade expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade 5" lret=1 fi - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_masquerade 6" lret=1 fi done - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)" + echo "ERROR: cannot ping $ns1 from $ns2 with active ip masquerade $natflags (attempt 2)" lret=1 fi - ip netns exec ns0 nft flush chain $family nat postrouting + ip netns exec "$ns0" nft flush chain $family nat postrouting if [ $? -ne 0 ]; then echo "ERROR: Could not flush $family nat postrouting" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for ns2" + test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for $ns2" return $lret } @@ -567,25 +582,25 @@ test_redirect6() local family=$1 local lret=0 - ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannnot ping ns1 from ns2 via ipv6" + echo "ERROR: cannnot ping $ns1 from $ns2 via ipv6" lret=1 fi expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" ns2$dir "$expect" "test_redirect6 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_redirect6 2" lret=1 fi done @@ -593,7 +608,7 @@ test_redirect6() reset_counters # add redirect rule -ip netns exec ns0 nft -f - < /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 via ipv6 with active $family redirect" + echo "ERROR: cannot ping $ns1 from $ns2 via ipv6 with active $family redirect" lret=1 fi # ns1 should have seen no packets from ns2, due to redirection expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_redirect6 3" lret=1 fi done @@ -625,20 +640,20 @@ EOF # ns0 should have seen packets from ns2, due to masquerade expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_redirect6 4" lret=1 fi done - ip netns exec ns0 nft delete table $family nat + ip netns exec "$ns0" nft delete table $family nat if [ $? -ne 0 ]; then echo "ERROR: Could not delete $family nat table" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IPv6 redirection for ns2" + test $lret -eq 0 && echo "PASS: $family IPv6 redirection for $ns2" return $lret } @@ -648,26 +663,26 @@ test_redirect() local family=$1 local lret=0 - ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null - ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2" + echo "ERROR: cannot ping $ns1 from $ns2" lret=1 fi expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" $ns2$dir "$expect" "test_redirect 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_redirect 2" lret=1 fi done @@ -675,7 +690,7 @@ test_redirect() reset_counters # add redirect rule -ip netns exec ns0 nft -f - < /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active $family ip redirect" + echo "ERROR: cannot ping $ns1 from $ns2 with active $family ip redirect" lret=1 fi @@ -698,9 +713,9 @@ EOF expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_redirect 3" lret=1 fi done @@ -708,28 +723,28 @@ EOF # ns0 should have seen packets from ns2, due to masquerade expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns0" ns0$dir "$expect" "test_redirect 4" lret=1 fi done - ip netns exec ns0 nft delete table $family nat + ip netns exec "$ns0" nft delete table $family nat if [ $? -ne 0 ]; then echo "ERROR: Could not delete $family nat table" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IP redirection for ns2" + test $lret -eq 0 && echo "PASS: $family IP redirection for $ns2" return $lret } -# ip netns exec ns0 ping -c 1 -q 10.0.$i.99 +# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99 for i in 0 1 2; do -ip netns exec ns$i nft -f - < /dev/null + ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99 > /dev/null if [ $? -ne 0 ];then echo "ERROR: Could not reach other namespace(s)" 1>&2 ret=1 fi - ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null + ip netns exec "$ns0" ping -c 1 -q dead:$i::99 > /dev/null if [ $? -ne 0 ];then echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2 ret=1 fi - check_counters ns$i + check_counters ns$i-$sfx if [ $? -ne 0 ]; then ret=1 fi @@ -820,7 +835,7 @@ for i in 1 2; do done if [ $ret -eq 0 ];then - echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2" + echo "PASS: netns routing/connectivity: $ns0 can reach $ns1 and $ns2" fi reset_counters @@ -846,4 +861,9 @@ reset_counters $test_inet_nat && test_redirect inet $test_inet_nat && test_redirect6 inet +if [ $ret -ne 0 ];then + echo -n "FAIL: " + nft --version +fi + exit $ret diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh index 26112ab5cdf4..f52ed92b53e7 100755 --- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh +++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh @@ -53,9 +53,13 @@ eeh_one_dev() { # is a no-op. echo $dev >/sys/kernel/debug/powerpc/eeh_dev_check - # Enforce a 30s timeout for recovery. Even the IPR, which is infamously - # slow to reset, should recover within 30s. - max_wait=30 + # Default to a 60s timeout when waiting for a device to recover. This + # is an arbitrary default which can be overridden by setting the + # EEH_MAX_WAIT environmental variable when required. + + # The current record holder for longest recovery time is: + # "Adaptec Series 8 12G SAS/PCIe 3" at 39 seconds + max_wait=${EEH_MAX_WAIT:=60} for i in `seq 0 ${max_wait}` ; do if pe_ok $dev ; then diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c index 25e23e73c72e..2ecfa1158e2b 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c @@ -73,7 +73,7 @@ trans: [sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2), [tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3) - : "memory", "r0", "r1", "r3", "r4", "r5", "r6" + : "memory", "r0", "r3", "r4", "r5", "r6", "lr" ); /* TM failed, analyse */ diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c index f603fe5a445b..6f7fb51f0809 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c @@ -74,8 +74,8 @@ trans: "3: ;" : [res] "=r" (result), [texasr] "=r" (texasr) : [sprn_texasr] "i" (SPRN_TEXASR) - : "memory", "r0", "r1", "r3", "r4", - "r7", "r8", "r9", "r10", "r11" + : "memory", "r0", "r3", "r4", + "r7", "r8", "r9", "r10", "r11", "lr" ); if (result) { diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c index e0d37f07bdeb..46ef378a15ec 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c @@ -62,7 +62,7 @@ trans: [sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2), [cptr1] "b" (&cptr[1]) - : "memory", "r0", "r1", "r3", "r4", "r5", "r6" + : "memory", "r0", "r3", "r4", "r5", "r6" ); /* TM failed, analyse */ diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c index 8027457b97b7..70ca01234f79 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c @@ -62,8 +62,8 @@ trans: "3: ;" : [res] "=r" (result), [texasr] "=r" (texasr) : [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "b" (&cptr[1]) - : "memory", "r0", "r1", "r3", "r4", - "r7", "r8", "r9", "r10", "r11" + : "memory", "r0", "r3", "r4", + "r7", "r8", "r9", "r10", "r11", "lr" ); if (result) { diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c index 56fbf9f6bbf3..07c388147b75 100644 --- a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c +++ b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c @@ -10,10 +10,12 @@ */ #define _GNU_SOURCE +#include #include #include #include "utils.h" +#include "tm.h" void trap_signal_handler(int signo, siginfo_t *si, void *uc) { @@ -29,6 +31,8 @@ int tm_signal_sigreturn_nt(void) { struct sigaction trap_sa; + SKIP_IF(!have_htm()); + trap_sa.sa_flags = SA_SIGINFO; trap_sa.sa_sigaction = trap_signal_handler; diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c index 47b7473dedef..e6aa00a183bc 100644 --- a/tools/testing/selftests/proc/proc-self-map-files-002.c +++ b/tools/testing/selftests/proc/proc-self-map-files-002.c @@ -47,7 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b) int main(void) { const int PAGE_SIZE = sysconf(_SC_PAGESIZE); - const unsigned long va_max = 1UL << 32; + /* + * va_max must be enough bigger than vm.mmap_min_addr, which is + * 64KB/32KB by default. (depends on CONFIG_LSM_MMAP_MIN_ADDR) + */ + const unsigned long va_max = 1UL << 20; unsigned long va; void *p; int fd; diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile index d6469535630a..2af9d39a9716 100644 --- a/tools/testing/selftests/rseq/Makefile +++ b/tools/testing/selftests/rseq/Makefile @@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as endif -CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ \ +CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \ $(CLANG_FLAGS) LDLIBS += -lpthread @@ -19,6 +19,8 @@ TEST_GEN_PROGS_EXTENDED = librseq.so TEST_PROGS = run_param_test.sh +TEST_FILES := settings + include ../lib.mk $(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c index eec2663261f2..e8a657a5f48a 100644 --- a/tools/testing/selftests/rseq/param_test.c +++ b/tools/testing/selftests/rseq/param_test.c @@ -15,7 +15,7 @@ #include #include -static inline pid_t gettid(void) +static inline pid_t rseq_gettid(void) { return syscall(__NR_gettid); } @@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg) rseq_percpu_unlock(&data->lock, cpu); #ifndef BENCHMARK if (i != 0 && !(i % (reps / 10))) - printf_verbose("tid %d: count %lld\n", (int) gettid(), i); + printf_verbose("tid %d: count %lld\n", + (int) rseq_gettid(), i); #endif } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && thread_data->reg && rseq_unregister_current_thread()) abort(); @@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg) } while (rseq_unlikely(ret)); #ifndef BENCHMARK if (i != 0 && !(i % (reps / 10))) - printf_verbose("tid %d: count %lld\n", (int) gettid(), i); + printf_verbose("tid %d: count %lld\n", + (int) rseq_gettid(), i); #endif } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && thread_data->reg && rseq_unregister_current_thread()) abort(); @@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg) } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && rseq_unregister_current_thread()) abort(); @@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg) } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && rseq_unregister_current_thread()) abort(); @@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg) } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && rseq_unregister_current_thread()) abort(); diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/rseq/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/rtc/Makefile b/tools/testing/selftests/rtc/Makefile index de9c8566672a..90fa1a346908 100644 --- a/tools/testing/selftests/rtc/Makefile +++ b/tools/testing/selftests/rtc/Makefile @@ -6,4 +6,6 @@ TEST_GEN_PROGS = rtctest TEST_GEN_PROGS_EXTENDED = setdate +TEST_FILES := settings + include ../lib.mk diff --git a/tools/testing/selftests/safesetid/Makefile b/tools/testing/selftests/safesetid/Makefile index 98da7a504737..fa02c4d5ec13 100644 --- a/tools/testing/selftests/safesetid/Makefile +++ b/tools/testing/selftests/safesetid/Makefile @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for mount selftests. -CFLAGS = -Wall -lcap -O2 +CFLAGS = -Wall -O2 +LDLIBS = -lcap -TEST_PROGS := run_tests.sh +TEST_PROGS := safesetid-test.sh TEST_GEN_FILES := safesetid-test include ../lib.mk diff --git a/tools/testing/selftests/safesetid/safesetid-test.c b/tools/testing/selftests/safesetid/safesetid-test.c index 8f40c6ecdad1..0c4d50644c13 100644 --- a/tools/testing/selftests/safesetid/safesetid-test.c +++ b/tools/testing/selftests/safesetid/safesetid-test.c @@ -213,7 +213,8 @@ static void test_setuid(uid_t child_uid, bool expect_success) } if (cpid == 0) { /* Code executed by child */ - setuid(child_uid); + if (setuid(child_uid) < 0) + exit(EXIT_FAILURE); if (getuid() == child_uid) exit(EXIT_SUCCESS); else @@ -291,8 +292,10 @@ int main(int argc, char **argv) // First test to make sure we can write userns mappings from a user // that doesn't have any restrictions (as long as it has CAP_SETUID); - setuid(NO_POLICY_USER); - setgid(NO_POLICY_USER); + if (setuid(NO_POLICY_USER) < 0) + die("Error with set uid(%d)\n", NO_POLICY_USER); + if (setgid(NO_POLICY_USER) < 0) + die("Error with set gid(%d)\n", NO_POLICY_USER); // Take away all but setid caps drop_caps(true); @@ -306,8 +309,10 @@ int main(int argc, char **argv) die("test_userns failed when it should work\n"); } - setuid(RESTRICTED_PARENT); - setgid(RESTRICTED_PARENT); + if (setuid(RESTRICTED_PARENT) < 0) + die("Error with set uid(%d)\n", RESTRICTED_PARENT); + if (setgid(RESTRICTED_PARENT) < 0) + die("Error with set gid(%d)\n", RESTRICTED_PARENT); test_setuid(ROOT_USER, false); test_setuid(ALLOWED_CHILD1, true); diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 7f8b5c8982e3..96bbda4f10fc 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -3077,7 +3078,7 @@ static int user_trap_syscall(int nr, unsigned int flags) return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog); } -#define USER_NOTIF_MAGIC 116983961184613L +#define USER_NOTIF_MAGIC INT_MAX TEST(user_notification_basic) { pid_t pid; @@ -3146,7 +3147,18 @@ TEST(user_notification_basic) EXPECT_GT(poll(&pollfd, 1, -1), 0); EXPECT_EQ(pollfd.revents, POLLIN); - EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + /* Test that we can't pass garbage to the kernel. */ + memset(&req, 0, sizeof(req)); + req.pid = -1; + errno = 0; + ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + if (ret) { + req.pid = 0; + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + } pollfd.fd = listener; pollfd.events = POLLIN | POLLOUT; @@ -3266,6 +3278,7 @@ TEST(user_notification_signal) close(sk_pair[1]); + memset(&req, 0, sizeof(req)); EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); EXPECT_EQ(kill(pid, SIGUSR1), 0); @@ -3284,6 +3297,7 @@ TEST(user_notification_signal) EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); EXPECT_EQ(errno, ENOENT); + memset(&req, 0, sizeof(req)); EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); resp.id = req.id; diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c index 2ad45b944355..2980b1a63366 100644 --- a/tools/testing/selftests/size/get_size.c +++ b/tools/testing/selftests/size/get_size.c @@ -11,23 +11,35 @@ * own execution. It also attempts to have as few dependencies * on kernel features as possible. * - * It should be statically linked, with startup libs avoided. - * It uses no library calls, and only the following 3 syscalls: + * It should be statically linked, with startup libs avoided. It uses + * no library calls except the syscall() function for the following 3 + * syscalls: * sysinfo(), write(), and _exit() * * For output, it avoids printf (which in some C libraries * has large external dependencies) by implementing it's own * number output and print routines, and using __builtin_strlen() + * + * The test may crash if any of the above syscalls fails because in some + * libc implementations (e.g. the GNU C Library) errno is saved in + * thread-local storage, which does not get initialized due to avoiding + * startup libs. */ #include #include +#include #define STDOUT_FILENO 1 static int print(const char *s) { - return write(STDOUT_FILENO, s, __builtin_strlen(s)); + size_t len = 0; + + while (s[len] != '\0') + len++; + + return syscall(SYS_write, STDOUT_FILENO, s, len); } static inline char *num_to_str(unsigned long num, char *buf, int len) @@ -79,12 +91,12 @@ void _start(void) print("TAP version 13\n"); print("# Testing system size.\n"); - ccode = sysinfo(&info); + ccode = syscall(SYS_sysinfo, &info); if (ccode < 0) { print("not ok 1"); print(test_name); print(" ---\n reason: \"could not get sysinfo\"\n ...\n"); - _exit(ccode); + syscall(SYS_exit, ccode); } print("ok 1"); print(test_name); @@ -100,5 +112,5 @@ void _start(void) print(" ...\n"); print("1..1\n"); - _exit(0); + syscall(SYS_exit, 0); } diff --git a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py index e98c36750fae..d34fe06268d2 100644 --- a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py +++ b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py @@ -54,7 +54,7 @@ class SubPlugin(TdcPlugin): shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=ENVIR) + env=os.environ.copy()) (rawout, serr) = proc.communicate() if proc.returncode != 0 and len(serr) > 0: diff --git a/tools/testing/selftests/tc-testing/plugins/__init__.py b/tools/testing/selftests/tc-testing/plugins/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config index 1c0d76cb5adf..93b90a9b1eeb 100644 --- a/tools/testing/selftests/vm/config +++ b/tools/testing/selftests/vm/config @@ -1,2 +1,3 @@ CONFIG_SYSVIPC=y CONFIG_USERFAULTFD=y +CONFIG_TEST_VMALLOC=m diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c index 3c3a022654f3..6da0ac3f0135 100644 --- a/tools/testing/selftests/x86/mov_ss_trap.c +++ b/tools/testing/selftests/x86/mov_ss_trap.c @@ -257,7 +257,8 @@ int main() err(1, "sigaltstack"); sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK); nr = SYS_getpid; - asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr) + /* Clear EBP first to make sure we segfault cleanly. */ + asm volatile ("xorl %%ebp, %%ebp; mov %[ss], %%ss; SYSENTER" : "+a" (nr) : [ss] "m" (ss) : "flags", "rcx" #ifdef __x86_64__ , "r11" diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c index 3e49a7873f3e..57c4f67f16ef 100644 --- a/tools/testing/selftests/x86/sigreturn.c +++ b/tools/testing/selftests/x86/sigreturn.c @@ -451,6 +451,19 @@ static void sigusr1(int sig, siginfo_t *info, void *ctx_void) ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL; ctx->uc_mcontext.gregs[REG_CX] = 0; +#ifdef __i386__ + /* + * Make sure the kernel doesn't inadvertently use DS or ES-relative + * accesses in a region where user DS or ES is loaded. + * + * Skip this for 64-bit builds because long mode doesn't care about + * DS and ES and skipping it increases test coverage a little bit, + * since 64-bit kernels can still run the 32-bit build. + */ + ctx->uc_mcontext.gregs[REG_DS] = 0; + ctx->uc_mcontext.gregs[REG_ES] = 0; +#endif + memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t)); requested_regs[REG_CX] = *ssptr(ctx); /* The asm code does this. */ diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c index 2813aa821c82..d1d8ba2a4a40 100644 --- a/tools/usb/usbip/libsrc/usbip_host_common.c +++ b/tools/usb/usbip/libsrc/usbip_host_common.c @@ -57,7 +57,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) } value = atoi(status); - + close(fd); return value; } diff --git a/tools/usb/usbip/src/usbip_network.c b/tools/usb/usbip/src/usbip_network.c index d595d72693fb..ed4dc8c14269 100644 --- a/tools/usb/usbip/src/usbip_network.c +++ b/tools/usb/usbip/src/usbip_network.c @@ -50,39 +50,39 @@ void usbip_setup_port_number(char *arg) info("using port %d (\"%s\")", usbip_port, usbip_port_string); } -void usbip_net_pack_uint32_t(int pack, uint32_t *num) +uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num) { uint32_t i; if (pack) - i = htonl(*num); + i = htonl(num); else - i = ntohl(*num); + i = ntohl(num); - *num = i; + return i; } -void usbip_net_pack_uint16_t(int pack, uint16_t *num) +uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num) { uint16_t i; if (pack) - i = htons(*num); + i = htons(num); else - i = ntohs(*num); + i = ntohs(num); - *num = i; + return i; } void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev) { - usbip_net_pack_uint32_t(pack, &udev->busnum); - usbip_net_pack_uint32_t(pack, &udev->devnum); - usbip_net_pack_uint32_t(pack, &udev->speed); + udev->busnum = usbip_net_pack_uint32_t(pack, udev->busnum); + udev->devnum = usbip_net_pack_uint32_t(pack, udev->devnum); + udev->speed = usbip_net_pack_uint32_t(pack, udev->speed); - usbip_net_pack_uint16_t(pack, &udev->idVendor); - usbip_net_pack_uint16_t(pack, &udev->idProduct); - usbip_net_pack_uint16_t(pack, &udev->bcdDevice); + udev->idVendor = usbip_net_pack_uint16_t(pack, udev->idVendor); + udev->idProduct = usbip_net_pack_uint16_t(pack, udev->idProduct); + udev->bcdDevice = usbip_net_pack_uint16_t(pack, udev->bcdDevice); } void usbip_net_pack_usb_interface(int pack __attribute__((unused)), @@ -129,6 +129,14 @@ ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen) return usbip_net_xmit(sockfd, buff, bufflen, 1); } +static inline void usbip_net_pack_op_common(int pack, + struct op_common *op_common) +{ + op_common->version = usbip_net_pack_uint16_t(pack, op_common->version); + op_common->code = usbip_net_pack_uint16_t(pack, op_common->code); + op_common->status = usbip_net_pack_uint32_t(pack, op_common->status); +} + int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status) { struct op_common op_common; @@ -140,7 +148,7 @@ int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status) op_common.code = code; op_common.status = status; - PACK_OP_COMMON(1, &op_common); + usbip_net_pack_op_common(1, &op_common); rc = usbip_net_send(sockfd, &op_common, sizeof(op_common)); if (rc < 0) { @@ -164,7 +172,7 @@ int usbip_net_recv_op_common(int sockfd, uint16_t *code, int *status) goto err; } - PACK_OP_COMMON(0, &op_common); + usbip_net_pack_op_common(0, &op_common); if (op_common.version != USBIP_VERSION) { err("USBIP Kernel and tool version mismatch: %d %d:", diff --git a/tools/usb/usbip/src/usbip_network.h b/tools/usb/usbip/src/usbip_network.h index 555215eae43e..83b4c5344f72 100644 --- a/tools/usb/usbip/src/usbip_network.h +++ b/tools/usb/usbip/src/usbip_network.h @@ -32,12 +32,6 @@ struct op_common { } __attribute__((packed)); -#define PACK_OP_COMMON(pack, op_common) do {\ - usbip_net_pack_uint16_t(pack, &(op_common)->version);\ - usbip_net_pack_uint16_t(pack, &(op_common)->code);\ - usbip_net_pack_uint32_t(pack, &(op_common)->status);\ -} while (0) - /* ---------------------------------------------------------------------- */ /* Dummy Code */ #define OP_UNSPEC 0x00 @@ -163,11 +157,11 @@ struct op_devlist_reply_extra { } while (0) #define PACK_OP_DEVLIST_REPLY(pack, reply) do {\ - usbip_net_pack_uint32_t(pack, &(reply)->ndev);\ + (reply)->ndev = usbip_net_pack_uint32_t(pack, (reply)->ndev);\ } while (0) -void usbip_net_pack_uint32_t(int pack, uint32_t *num); -void usbip_net_pack_uint16_t(int pack, uint16_t *num); +uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num); +uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num); void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev); void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf); diff --git a/tools_key.pub b/tools_key.pub new file mode 100644 index 000000000000..bc5f919e381b Binary files /dev/null and b/tools_key.pub differ diff --git a/usr/Kconfig b/usr/Kconfig index a6b68503d177..a80cc7972274 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -131,17 +131,6 @@ choice If in doubt, select 'None' -config INITRAMFS_COMPRESSION_NONE - bool "None" - help - Do not compress the built-in initramfs at all. This may sound wasteful - in space, but, you should be aware that the built-in initramfs will be - compressed at a later stage anyways along with the rest of the kernel, - on those architectures that support this. However, not compressing the - initramfs may lead to slightly higher memory consumption during a - short time at boot, while both the cpio image and the unpacked - filesystem image will be present in memory simultaneously - config INITRAMFS_COMPRESSION_GZIP bool "Gzip" depends on RD_GZIP @@ -214,6 +203,17 @@ config INITRAMFS_COMPRESSION_LZ4 If you choose this, keep in mind that most distros don't provide lz4 by default which could cause a build failure. +config INITRAMFS_COMPRESSION_NONE + bool "None" + help + Do not compress the built-in initramfs at all. This may sound wasteful + in space, but, you should be aware that the built-in initramfs will be + compressed at a later stage anyways along with the rest of the kernel, + on those architectures that support this. However, not compressing the + initramfs may lead to slightly higher memory consumption during a + short time at boot, while both the cpio image and the unpacked + filesystem image will be present in memory simultaneously + endchoice config INITRAMFS_COMPRESSION diff --git a/usr/gen_initramfs_list.sh b/usr/gen_initramfs_list.sh index 0aad760fcd8c..2bbac73e6477 100755 --- a/usr/gen_initramfs_list.sh +++ b/usr/gen_initramfs_list.sh @@ -128,7 +128,7 @@ parse() { str="${ftype} ${name} ${location} ${str}" ;; "nod") - local dev=`LC_ALL=C ls -l "${location}"` + local dev="`LC_ALL=C ls -l "${location}"`" local maj=`field 5 ${dev}` local min=`field 6 ${dev}` maj=${maj%,} diff --git a/usr/include/Makefile b/usr/include/Makefile index 57b20f7b6729..e2840579156a 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -99,9 +99,16 @@ endif # asm-generic/*.h is used by asm/*.h, and should not be included directly header-test- += asm-generic/% -# The rest are compile-tested -header-test-y += $(filter-out $(header-test-), \ - $(patsubst $(obj)/%,%, $(wildcard \ - $(addprefix $(obj)/, *.h */*.h */*/*.h */*/*/*.h)))) +extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null)) + +quiet_cmd_hdrtest = HDRTEST $< + cmd_hdrtest = \ + $(CC) $(c_flags) -S -o /dev/null -x c /dev/null \ + $(if $(filter-out $(header-test-), $*.h), -include $<); \ + $(PERL) $(srctree)/scripts/headers_check.pl $(obj) $(SRCARCH) $<; \ + touch $@ + +$(obj)/%.hdrtest: $(obj)/%.h FORCE + $(call if_changed_dep,hdrtest) clean-files += $(filter-out Makefile, $(notdir $(wildcard $(obj)/*))) diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index c4c57ba99e90..0a356aa91aa1 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -10,10 +10,15 @@ * Author: Christoffer Dall */ +#include #include #include #include +#define DFSR_FSC_EXTABT_LPAE 0x10 +#define DFSR_FSC_EXTABT_nLPAE 0x08 +#define DFSR_LPAE BIT(9) + /* * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. */ @@ -28,25 +33,115 @@ static const u8 return_offsets[8][2] = { [7] = { 4, 4 }, /* FIQ, unused */ }; +/* + * When an exception is taken, most CPSR fields are left unchanged in the + * handler. However, some are explicitly overridden (e.g. M[4:0]). + * + * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with + * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was + * obsoleted by the ARMv7 virtualization extensions and is RES0. + * + * For the SPSR layout seen from AArch32, see: + * - ARM DDI 0406C.d, page B1-1148 + * - ARM DDI 0487E.a, page G8-6264 + * + * For the SPSR_ELx layout for AArch32 seen from AArch64, see: + * - ARM DDI 0487E.a, page C5-426 + * + * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from + * MSB to LSB. + */ +static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode) +{ + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + unsigned long old, new; + + old = *vcpu_cpsr(vcpu); + new = 0; + + new |= (old & PSR_AA32_N_BIT); + new |= (old & PSR_AA32_Z_BIT); + new |= (old & PSR_AA32_C_BIT); + new |= (old & PSR_AA32_V_BIT); + new |= (old & PSR_AA32_Q_BIT); + + // CPSR.IT[7:0] are set to zero upon any exception + // See ARM DDI 0487E.a, section G1.12.3 + // See ARM DDI 0406C.d, section B1.8.3 + + new |= (old & PSR_AA32_DIT_BIT); + + // CPSR.SSBS is set to SCTLR.DSSBS upon any exception + // See ARM DDI 0487E.a, page G8-6244 + if (sctlr & BIT(31)) + new |= PSR_AA32_SSBS_BIT; + + // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0 + // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented + // See ARM DDI 0487E.a, page G8-6246 + new |= (old & PSR_AA32_PAN_BIT); + if (!(sctlr & BIT(23))) + new |= PSR_AA32_PAN_BIT; + + // SS does not exist in AArch32, so ignore + + // CPSR.IL is set to zero upon any exception + // See ARM DDI 0487E.a, page G1-5527 + + new |= (old & PSR_AA32_GE_MASK); + + // CPSR.IT[7:0] are set to zero upon any exception + // See prior comment above + + // CPSR.E is set to SCTLR.EE upon any exception + // See ARM DDI 0487E.a, page G8-6245 + // See ARM DDI 0406C.d, page B4-1701 + if (sctlr & BIT(25)) + new |= PSR_AA32_E_BIT; + + // CPSR.A is unchanged upon an exception to Undefined, Supervisor + // CPSR.A is set upon an exception to other modes + // See ARM DDI 0487E.a, pages G1-5515 to G1-5516 + // See ARM DDI 0406C.d, page B1-1182 + new |= (old & PSR_AA32_A_BIT); + if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC) + new |= PSR_AA32_A_BIT; + + // CPSR.I is set upon any exception + // See ARM DDI 0487E.a, pages G1-5515 to G1-5516 + // See ARM DDI 0406C.d, page B1-1182 + new |= PSR_AA32_I_BIT; + + // CPSR.F is set upon an exception to FIQ + // CPSR.F is unchanged upon an exception to other modes + // See ARM DDI 0487E.a, pages G1-5515 to G1-5516 + // See ARM DDI 0406C.d, page B1-1182 + new |= (old & PSR_AA32_F_BIT); + if (mode == PSR_AA32_MODE_FIQ) + new |= PSR_AA32_F_BIT; + + // CPSR.T is set to SCTLR.TE upon any exception + // See ARM DDI 0487E.a, page G8-5514 + // See ARM DDI 0406C.d, page B1-1181 + if (sctlr & BIT(30)) + new |= PSR_AA32_T_BIT; + + new |= mode; + + return new; +} + static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) { - unsigned long cpsr; - unsigned long new_spsr_value = *vcpu_cpsr(vcpu); - bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT); + unsigned long spsr = *vcpu_cpsr(vcpu); + bool is_thumb = (spsr & PSR_AA32_T_BIT); u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); - cpsr = mode | PSR_AA32_I_BIT; - - if (sctlr & (1 << 30)) - cpsr |= PSR_AA32_T_BIT; - if (sctlr & (1 << 25)) - cpsr |= PSR_AA32_E_BIT; - - *vcpu_cpsr(vcpu) = cpsr; + *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode); /* Note: These now point to the banked copies */ - vcpu_write_spsr(vcpu, new_spsr_value); + vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr)); *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; /* Branch to exception vector */ @@ -84,16 +179,18 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, fsr = &vcpu_cp15(vcpu, c5_DFSR); } - prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset); + prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset); *far = addr; /* Give the guest an IMPLEMENTATION DEFINED exception */ is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); - if (is_lpae) - *fsr = 1 << 9 | 0x34; - else - *fsr = 0x14; + if (is_lpae) { + *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE; + } else { + /* no need to shuffle FS[4] into DFSR[10] as its 0 */ + *fsr = DFSR_FSC_EXTABT_nLPAE; + } } void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index e2bb5bd60227..6b222100608f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, switch (treg) { case TIMER_REG_TVAL: val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff; + val &= lower_32_bits(val); break; case TIMER_REG_CTL: @@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, { switch (treg) { case TIMER_REG_TVAL: - timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val; + timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val; break; case TIMER_REG_CTL: diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c index 6af5c91337f2..f274fabb4301 100644 --- a/virt/kvm/arm/mmio.c +++ b/virt/kvm/arm/mmio.c @@ -105,6 +105,9 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) data = (data ^ mask) - mask; } + if (!vcpu->arch.mmio_decode.sixty_four) + data = data & 0xffffffff; + trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, &data); data = vcpu_data_host_to_guest(vcpu, data, len); @@ -125,6 +128,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) unsigned long rt; int access_size; bool sign_extend; + bool sixty_four; if (kvm_vcpu_dabt_iss1tw(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ @@ -138,11 +142,13 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) *is_write = kvm_vcpu_dabt_iswrite(vcpu); sign_extend = kvm_vcpu_dabt_issext(vcpu); + sixty_four = kvm_vcpu_dabt_issf(vcpu); rt = kvm_vcpu_dabt_get_rd(vcpu); *len = access_size; vcpu->arch.mmio_decode.sign_extend = sign_extend; vcpu->arch.mmio_decode.rt = rt; + vcpu->arch.mmio_decode.sixty_four = sixty_four; return 0; } diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 38b4c910b6c3..ce7fa37987e1 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -38,6 +38,11 @@ static unsigned long io_map_base; #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) +static bool is_iomap(unsigned long flags) +{ + return flags & KVM_S2PTE_FLAG_IS_IOMAP; +} + static bool memslot_is_logging(struct kvm_memory_slot *memslot) { return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); @@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vma_pagesize = vma_kernel_pagesize(vma); if (logging_active || + (vma->vm_flags & VM_PFNMAP) || !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { force_pte = true; vma_pagesize = PAGE_SIZE; @@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, writable = false; } + if (exec_fault && is_iomap(flags)) + return -ENOEXEC; + spin_lock(&kvm->mmu_lock); if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; @@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (writable) kvm_set_pfn_dirty(pfn); - if (fault_status != FSC_PERM) + if (fault_status != FSC_PERM && !is_iomap(flags)) clean_dcache_guest_page(pfn, vma_pagesize); if (exec_fault) @@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) if (kvm_is_error_hva(hva) || (write_fault && !writable)) { if (is_iabt) { /* Prefetch Abort on I/O address */ - kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - ret = 1; - goto out_unlock; + ret = -ENOEXEC; + goto out; } /* @@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); if (ret == 0) ret = 1; +out: + if (ret == -ENOEXEC) { + kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); + ret = 1; + } out_unlock: srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; @@ -2134,7 +2147,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) if (!kvm->arch.pgd) return 0; trace_kvm_test_age_hva(hva); - return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); + return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE, + kvm_test_age_hva_handler, NULL); } void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 8731dfeced8b..4c08fd009768 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -480,25 +480,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, */ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) { + struct kvm_pmu *pmu = &vcpu->arch.pmu; int i; - u64 type, enable, reg; - if (val == 0) + if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) return; - enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); + /* Weed out disabled counters */ + val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); + for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) { + u64 type, reg; + if (!(val & BIT(i))) continue; - type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) - & ARMV8_PMU_EVTYPE_EVENT; - if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR) - && (enable & BIT(i))) { - reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; + + /* PMSWINC only applies to ... SW_INC! */ + type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i); + type &= ARMV8_PMU_EVTYPE_EVENT; + if (type != ARMV8_PMUV3_PERFCTR_SW_INCR) + continue; + + /* increment this even SW_INC counter */ + reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; + reg = lower_32_bits(reg); + __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; + + if (reg) /* no overflow on the low part */ + continue; + + if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { + /* increment the high counter */ + reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1; reg = lower_32_bits(reg); - __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; - if (!reg) - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); + __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg; + if (!reg) /* mark overflow on the high counter */ + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1); + } else { + /* mark overflow on low counter */ + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); } } } diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 2be6b66b3856..f8ad7096555d 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -2472,7 +2472,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT); coll_id = val & KVM_ITS_CTE_ICID_MASK; - if (target_addr >= atomic_read(&kvm->online_vcpus)) + if (target_addr != COLLECTION_NOT_MAPPED && + target_addr >= atomic_read(&kvm->online_vcpus)) return -EINVAL; collection = find_collection(its, coll_id); diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 8d69f007dd0c..b3a97dcaa30d 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -363,8 +363,8 @@ retry: int vgic_v3_save_pending_tables(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; - int last_byte_offset = -1; struct vgic_irq *irq; + gpa_t last_ptr = ~(gpa_t)0; int ret; u8 val; @@ -384,11 +384,11 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) bit_nr = irq->intid % BITS_PER_BYTE; ptr = pendbase + byte_offset; - if (byte_offset != last_byte_offset) { + if (ptr != last_ptr) { ret = kvm_read_guest_lock(kvm, ptr, &val, 1); if (ret) return ret; - last_byte_offset = byte_offset; + last_ptr = ptr; } stored = val & (1U << bit_nr); diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 35305d6e68cc..d8ef708a2ef6 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -64,7 +64,7 @@ static void async_pf_execute(struct work_struct *work) struct mm_struct *mm = apf->mm; struct kvm_vcpu *vcpu = apf->vcpu; unsigned long addr = apf->addr; - gva_t gva = apf->gva; + gpa_t cr2_or_gpa = apf->cr2_or_gpa; int locked = 1; might_sleep(); @@ -92,7 +92,7 @@ static void async_pf_execute(struct work_struct *work) * this point */ - trace_kvm_async_pf_completed(addr, gva); + trace_kvm_async_pf_completed(addr, cr2_or_gpa); if (swq_has_sleeper(&vcpu->wq)) swake_up_one(&vcpu->wq); @@ -165,8 +165,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) } } -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, - struct kvm_arch_async_pf *arch) +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch) { struct kvm_async_pf *work; @@ -185,7 +185,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, work->wakeup_all = false; work->vcpu = vcpu; - work->gva = gva; + work->cr2_or_gpa = cr2_or_gpa; work->addr = hva; work->arch = *arch; work->mm = current->mm; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 13efc291b1c7..03c681568ab1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1394,14 +1394,14 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); -unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) +unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) { struct vm_area_struct *vma; unsigned long addr, size; size = PAGE_SIZE; - addr = gfn_to_hva(kvm, gfn); + addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); if (kvm_is_error_hva(addr)) return PAGE_SIZE; @@ -1809,26 +1809,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_page); -static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn, - struct kvm_host_map *map) +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) +{ + if (pfn == 0) + return; + + if (cache) + cache->pfn = cache->gfn = 0; + + if (dirty) + kvm_release_pfn_dirty(pfn); + else + kvm_release_pfn_clean(pfn); +} + +static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, + struct gfn_to_pfn_cache *cache, u64 gen) +{ + kvm_release_pfn(cache->pfn, cache->dirty, cache); + + cache->pfn = gfn_to_pfn_memslot(slot, gfn); + cache->gfn = gfn; + cache->dirty = false; + cache->generation = gen; +} + +static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, + struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, + bool atomic) { kvm_pfn_t pfn; void *hva = NULL; struct page *page = KVM_UNMAPPED_PAGE; + struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); + u64 gen = slots->generation; if (!map) return -EINVAL; - pfn = gfn_to_pfn_memslot(slot, gfn); + if (cache) { + if (!cache->pfn || cache->gfn != gfn || + cache->generation != gen) { + if (atomic) + return -EAGAIN; + kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); + } + pfn = cache->pfn; + } else { + if (atomic) + return -EAGAIN; + pfn = gfn_to_pfn_memslot(slot, gfn); + } if (is_error_noslot_pfn(pfn)) return -EINVAL; if (pfn_valid(pfn)) { page = pfn_to_page(pfn); - hva = kmap(page); + if (atomic) + hva = kmap_atomic(page); + else + hva = kmap(page); #ifdef CONFIG_HAS_IOMEM - } else { + } else if (!atomic) { hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); + } else { + return -EINVAL; #endif } @@ -1843,14 +1889,25 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn, return 0; } +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool atomic) +{ + return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, + cache, atomic); +} +EXPORT_SYMBOL_GPL(kvm_map_gfn); + int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) { - return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map); + return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, + NULL, false); } EXPORT_SYMBOL_GPL(kvm_vcpu_map); -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, - bool dirty) +static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot, + struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, + bool dirty, bool atomic) { if (!map) return; @@ -1858,23 +1915,45 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, if (!map->hva) return; - if (map->page != KVM_UNMAPPED_PAGE) - kunmap(map->page); + if (map->page != KVM_UNMAPPED_PAGE) { + if (atomic) + kunmap_atomic(map->hva); + else + kunmap(map->page); + } #ifdef CONFIG_HAS_IOMEM - else + else if (!atomic) memunmap(map->hva); + else + WARN_ONCE(1, "Unexpected unmapping in atomic context"); #endif - if (dirty) { - kvm_vcpu_mark_page_dirty(vcpu, map->gfn); - kvm_release_pfn_dirty(map->pfn); - } else { - kvm_release_pfn_clean(map->pfn); - } + if (dirty) + mark_page_dirty_in_slot(memslot, map->gfn); + + if (cache) + cache->dirty |= dirty; + else + kvm_release_pfn(map->pfn, dirty, NULL); map->hva = NULL; map->page = NULL; } + +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) +{ + __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, + cache, dirty, atomic); + return 0; +} +EXPORT_SYMBOL_GPL(kvm_unmap_gfn); + +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) +{ + __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL, + dirty, false); +} EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) @@ -2196,12 +2275,12 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, if (slots->generation != ghc->generation) __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); - if (unlikely(!ghc->memslot)) - return kvm_write_guest(kvm, gpa, data, len); - if (kvm_is_error_hva(ghc->hva)) return -EFAULT; + if (unlikely(!ghc->memslot)) + return kvm_write_guest(kvm, gpa, data, len); + r = __copy_to_user((void __user *)ghc->hva + offset, data, len); if (r) return -EFAULT; @@ -2229,12 +2308,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, if (slots->generation != ghc->generation) __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); - if (unlikely(!ghc->memslot)) - return kvm_read_guest(kvm, ghc->gpa, data, len); - if (kvm_is_error_hva(ghc->hva)) return -EFAULT; + if (unlikely(!ghc->memslot)) + return kvm_read_guest(kvm, ghc->gpa, data, len); + r = __copy_from_user(data, (void __user *)ghc->hva, len); if (r) return -EFAULT;